VirtualBox

Changeset 93132 in vbox for trunk


Ignore:
Timestamp:
Jan 6, 2022 12:38:02 PM (3 years ago)
Author:
vboxsync
Message:

VMM,{HMVMXR0.cpp,VMXTemplate.cpp.h}: Make use of the VMX template code in HM, getting rid of the temporary code duplication, bugref:10136

Location:
trunk/src/VBox/VMM
Files:
2 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/VMXAllTemplate.cpp.h

    r93115 r93132  
    127127#endif
    128128
    129 #ifdef IN_RING0
     129#ifndef IN_NEM_DARWIN
    130130/** Assert that preemption is disabled or covered by thread-context hooks. */
    131131# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu)          Assert(   VMMR0ThreadCtxHookIsEnabled((a_pVCpu))   \
     
    743743
    744744
    745 #ifdef IN_RING0
    746 /**
    747  * Checks if the given MSR is part of the lastbranch-from-IP MSR stack.
    748  * @returns @c true if it's part of LBR stack, @c false otherwise.
    749  *
    750  * @param   pVM         The cross context VM structure.
    751  * @param   idMsr       The MSR.
    752  * @param   pidxMsr     Where to store the index of the MSR in the LBR MSR array.
    753  *                      Optional, can be NULL.
    754  *
    755  * @remarks Must only be called when LBR is enabled.
    756  */
    757 DECL_FORCE_INLINE(bool) vmxHCIsLbrBranchFromMsr(PCVMCC pVM, uint32_t idMsr, uint32_t *pidxMsr)
    758 {
    759     Assert(VM_IS_VMX_LBR(pVM));
    760     Assert(pVM->hmr0.s.vmx.idLbrFromIpMsrFirst);
    761     uint32_t const cLbrStack = pVM->hmr0.s.vmx.idLbrFromIpMsrLast - pVM->hmr0.s.vmx.idLbrFromIpMsrFirst + 1;
    762     uint32_t const idxMsr    = idMsr - pVM->hmr0.s.vmx.idLbrFromIpMsrFirst;
    763     if (idxMsr < cLbrStack)
    764     {
    765         if (pidxMsr)
    766             *pidxMsr = idxMsr;
    767         return true;
    768     }
    769     return false;
    770 }
    771 
    772 
    773 /**
    774  * Checks if the given MSR is part of the lastbranch-to-IP MSR stack.
    775  * @returns @c true if it's part of LBR stack, @c false otherwise.
    776  *
    777  * @param   pVM         The cross context VM structure.
    778  * @param   idMsr       The MSR.
    779  * @param   pidxMsr     Where to store the index of the MSR in the LBR MSR array.
    780  *                      Optional, can be NULL.
    781  *
    782  * @remarks Must only be called when LBR is enabled and when lastbranch-to-IP MSRs
    783  *          are supported by the CPU (see vmxHCSetupLbrMsrRange).
    784  */
    785 DECL_FORCE_INLINE(bool) vmxHCIsLbrBranchToMsr(PCVMCC pVM, uint32_t idMsr, uint32_t *pidxMsr)
    786 {
    787     Assert(VM_IS_VMX_LBR(pVM));
    788     if (pVM->hmr0.s.vmx.idLbrToIpMsrFirst)
    789     {
    790         uint32_t const cLbrStack = pVM->hmr0.s.vmx.idLbrToIpMsrLast - pVM->hmr0.s.vmx.idLbrToIpMsrFirst + 1;
    791         uint32_t const idxMsr    = idMsr - pVM->hmr0.s.vmx.idLbrToIpMsrFirst;
    792         if (idxMsr < cLbrStack)
    793         {
    794             if (pidxMsr)
    795                 *pidxMsr = idxMsr;
    796             return true;
    797         }
    798     }
    799     return false;
    800 }
    801 #endif
    802 
    803 
    804745/**
    805746 * Gets the CR0 guest/host mask.
     
    888829
    889830/**
    890  * Returns whether the VM-exit MSR-store area differs from the VM-exit MSR-load
    891  * area.
    892  *
    893  * @returns @c true if it's different, @c false otherwise.
    894  * @param   pVmcsInfo   The VMCS info. object.
    895  */
    896 DECL_FORCE_INLINE(bool) vmxHCIsSeparateExitMsrStoreAreaVmcs(PCVMXVMCSINFO pVmcsInfo)
    897 {
    898     return RT_BOOL(   pVmcsInfo->pvGuestMsrStore != pVmcsInfo->pvGuestMsrLoad
    899                    && pVmcsInfo->pvGuestMsrStore);
    900 }
    901 
    902 #ifdef IN_RING0
    903 /**
    904  * Sets the given Processor-based VM-execution controls.
    905  *
    906  * @param   pVCpu           The cross context virtual CPU structure.
    907  * @param   pVmxTransient   The VMX-transient structure.
    908  * @param   uProcCtls       The Processor-based VM-execution controls to set.
    909  */
    910 static void vmxHCSetProcCtlsVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t uProcCtls)
    911 {
    912     PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    913     if ((pVmcsInfo->u32ProcCtls & uProcCtls) != uProcCtls)
    914     {
    915         pVmcsInfo->u32ProcCtls |= uProcCtls;
    916         int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
    917         AssertRC(rc);
    918     }
    919 }
    920 
    921 
    922 /**
    923  * Removes the given Processor-based VM-execution controls.
    924  *
    925  * @param   pVCpu           The cross context virtual CPU structure.
    926  * @param   pVmxTransient   The VMX-transient structure.
    927  * @param   uProcCtls       The Processor-based VM-execution controls to remove.
    928  *
    929  * @remarks When executing a nested-guest, this will not remove any of the specified
    930  *          controls if the nested hypervisor has set any one of them.
    931  */
    932 static void vmxHCRemoveProcCtlsVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t uProcCtls)
    933 {
    934     PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    935     if (pVmcsInfo->u32ProcCtls & uProcCtls)
    936     {
    937 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    938         if (   !pVmxTransient->fIsNestedGuest
    939             || !CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, uProcCtls))
    940 #else
    941         NOREF(pVCpu);
    942         if (!pVmxTransient->fIsNestedGuest)
    943 #endif
    944         {
    945             pVmcsInfo->u32ProcCtls &= ~uProcCtls;
    946             int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
    947             AssertRC(rc);
    948         }
    949     }
    950 }
    951 
    952 
    953 /**
    954  * Sets the TSC offset for the current VMCS.
    955  *
    956  * @param   pVCpu           The cross context virtual CPU structure.
    957  * @param   uTscOffset      The TSC offset to set.
    958  * @param   pVmcsInfo       The VMCS info. object.
    959  */
    960 static void vmxHCSetTscOffsetVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t uTscOffset)
    961 {
    962     if (pVmcsInfo->u64TscOffset != uTscOffset)
    963     {
    964         int rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_CTRL_TSC_OFFSET_FULL, uTscOffset);
    965         AssertRC(rc);
    966         pVmcsInfo->u64TscOffset = uTscOffset;
    967     }
    968 }
    969 #endif
    970 
    971 /**
    972831 * Adds one or more exceptions to the exception bitmap and commits it to the current
    973832 * VMCS.
     
    1046905        {
    1047906            /* Validate we are not removing any essential exception intercepts. */
    1048 #ifdef IN_RING0
     907#ifndef IN_NEM_DARWIN
    1049908            Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || !(uXcptMask & RT_BIT(X86_XCPT_PF)));
    1050909#else
     
    11441003    if (pVmcsInfoFrom->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
    11451004    {
    1146         int rc = vmxHCClearVmcs(pVmcsInfoFrom);
     1005        int rc = hmR0VmxClearVmcs(pVmcsInfoFrom);
    11471006        if (RT_SUCCESS(rc))
    11481007        {
     
    11671026    if (pVmcsInfoTo->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
    11681027    {
    1169         int rc = vmxHCClearVmcs(pVmcsInfoTo);
     1028        int rc = hmR0VmxClearVmcs(pVmcsInfoTo);
    11701029        if (RT_SUCCESS(rc))
    11711030        { /* likely */ }
     
    11771036     * Finally, load the VMCS we are switching to.
    11781037     */
    1179     return vmxHCLoadVmcs(pVmcsInfoTo);
     1038    return hmR0VmxLoadVmcs(pVmcsInfoTo);
    11801039}
    11811040
     
    12181077    {
    12191078        pVCpu->hmr0.s.vmx.fSwitchedToNstGstVmcs           = fSwitchToNstGstVmcs;
    1220         VCPU_2_VMXSTATE(pVCpu).vmx.fSwitchedToNstGstVmcsCopyForRing3 = fSwitchToNstGstVmcs;
     1079        pVCpu->hm.s.vmx.fSwitchedToNstGstVmcsCopyForRing3 = fSwitchToNstGstVmcs;
    12211080
    12221081        /*
     
    12511110}
    12521111#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
    1253 
    1254 
    1255 #ifdef IN_RING0
    1256 /**
    1257  * Updates the VM's last error record.
    1258  *
    1259  * If there was a VMX instruction error, reads the error data from the VMCS and
    1260  * updates VCPU's last error record as well.
    1261  *
    1262  * @param   pVCpu   The cross context virtual CPU structure of the calling EMT.
    1263  *                  Can be NULL if @a rc is not VERR_VMX_UNABLE_TO_START_VM or
    1264  *                  VERR_VMX_INVALID_VMCS_FIELD.
    1265  * @param   rc      The error code.
    1266  */
    1267 static void vmxHCUpdateErrorRecord(PVMCPUCC pVCpu, int rc)
    1268 {
    1269     if (   rc == VERR_VMX_INVALID_VMCS_FIELD
    1270         || rc == VERR_VMX_UNABLE_TO_START_VM)
    1271     {
    1272         AssertPtrReturnVoid(pVCpu);
    1273         VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_VM_INSTR_ERROR, &VCPU_2_VMXSTATE(pVCpu).vmx.LastError.u32InstrError);
    1274     }
    1275     pVCpu->CTX_SUFF(pVM)->hm.s.ForR3.rcInit = rc;
    1276 }
    1277 #endif
    12781112
    12791113
     
    15311365#endif
    15321366
    1533 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    1534 /**
    1535  * Returns whether an MSR at the given MSR-bitmap offset is intercepted or not.
    1536  *
    1537  * @returns @c true if the MSR is intercepted, @c false otherwise.
    1538  * @param   pbMsrBitmap     The MSR bitmap.
    1539  * @param   offMsr          The MSR byte offset.
    1540  * @param   iBit            The bit offset from the byte offset.
    1541  */
    1542 DECLINLINE(bool) vmxHCIsMsrBitSet(uint8_t const *pbMsrBitmap, uint16_t offMsr, int32_t iBit)
    1543 {
    1544     Assert(offMsr + (iBit >> 3) <= X86_PAGE_4K_SIZE);
    1545     return ASMBitTest(pbMsrBitmap + offMsr, iBit);
    1546 }
    1547 #endif
    1548 
    1549 #ifdef IN_RING0
    1550 /**
    1551  * Sets the permission bits for the specified MSR in the given MSR bitmap.
    1552  *
    1553  * If the passed VMCS is a nested-guest VMCS, this function ensures that the
    1554  * read/write intercept is cleared from the MSR bitmap used for hardware-assisted
    1555  * VMX execution of the nested-guest, only if nested-guest is also not intercepting
    1556  * the read/write access of this MSR.
    1557  *
    1558  * @param   pVCpu           The cross context virtual CPU structure.
    1559  * @param   pVmcsInfo       The VMCS info. object.
    1560  * @param   fIsNstGstVmcs   Whether this is a nested-guest VMCS.
    1561  * @param   idMsr           The MSR value.
    1562  * @param   fMsrpm          The MSR permissions (see VMXMSRPM_XXX). This must
    1563  *                          include both a read -and- a write permission!
    1564  *
    1565  * @sa      CPUMGetVmxMsrPermission.
    1566  * @remarks Can be called with interrupts disabled.
    1567  */
    1568 static void vmxHCSetMsrPermission(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs, uint32_t idMsr, uint32_t fMsrpm)
    1569 {
    1570     uint8_t *pbMsrBitmap = (uint8_t *)pVmcsInfo->pvMsrBitmap;
    1571     Assert(pbMsrBitmap);
    1572     Assert(VMXMSRPM_IS_FLAG_VALID(fMsrpm));
    1573 
    1574     /*
    1575      * MSR-bitmap Layout:
    1576      *   Byte index            MSR range            Interpreted as
    1577      * 0x000 - 0x3ff    0x00000000 - 0x00001fff    Low MSR read bits.
    1578      * 0x400 - 0x7ff    0xc0000000 - 0xc0001fff    High MSR read bits.
    1579      * 0x800 - 0xbff    0x00000000 - 0x00001fff    Low MSR write bits.
    1580      * 0xc00 - 0xfff    0xc0000000 - 0xc0001fff    High MSR write bits.
    1581      *
    1582      * A bit corresponding to an MSR within the above range causes a VM-exit
    1583      * if the bit is 1 on executions of RDMSR/WRMSR.  If an MSR falls out of
    1584      * the MSR range, it always cause a VM-exit.
    1585      *
    1586      * See Intel spec. 24.6.9 "MSR-Bitmap Address".
    1587      */
    1588     uint16_t const offBitmapRead  = 0;
    1589     uint16_t const offBitmapWrite = 0x800;
    1590     uint16_t       offMsr;
    1591     int32_t        iBit;
    1592     if (idMsr <= UINT32_C(0x00001fff))
    1593     {
    1594         offMsr = 0;
    1595         iBit   = idMsr;
    1596     }
    1597     else if (idMsr - UINT32_C(0xc0000000) <= UINT32_C(0x00001fff))
    1598     {
    1599         offMsr = 0x400;
    1600         iBit   = idMsr - UINT32_C(0xc0000000);
    1601     }
    1602     else
    1603         AssertMsgFailedReturnVoid(("Invalid MSR %#RX32\n", idMsr));
    1604 
    1605     /*
    1606      * Set the MSR read permission.
    1607      */
    1608     uint16_t const offMsrRead = offBitmapRead + offMsr;
    1609     Assert(offMsrRead + (iBit >> 3) < offBitmapWrite);
    1610     if (fMsrpm & VMXMSRPM_ALLOW_RD)
    1611     {
    1612 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    1613         bool const fClear = !fIsNstGstVmcs ? true
    1614                           : !vmxHCIsMsrBitSet(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, offMsrRead, iBit);
    1615 #else
    1616         RT_NOREF2(pVCpu, fIsNstGstVmcs);
    1617         bool const fClear = true;
    1618 #endif
    1619         if (fClear)
    1620             ASMBitClear(pbMsrBitmap + offMsrRead, iBit);
    1621     }
    1622     else
    1623         ASMBitSet(pbMsrBitmap + offMsrRead, iBit);
    1624 
    1625     /*
    1626      * Set the MSR write permission.
    1627      */
    1628     uint16_t const offMsrWrite = offBitmapWrite + offMsr;
    1629     Assert(offMsrWrite + (iBit >> 3) < X86_PAGE_4K_SIZE);
    1630     if (fMsrpm & VMXMSRPM_ALLOW_WR)
    1631     {
    1632 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    1633         bool const fClear = !fIsNstGstVmcs ? true
    1634                           : !vmxHCIsMsrBitSet(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, offMsrWrite, iBit);
    1635 #else
    1636         RT_NOREF2(pVCpu, fIsNstGstVmcs);
    1637         bool const fClear = true;
    1638 #endif
    1639         if (fClear)
    1640             ASMBitClear(pbMsrBitmap + offMsrWrite, iBit);
    1641     }
    1642     else
    1643         ASMBitSet(pbMsrBitmap + offMsrWrite, iBit);
    1644 }
    1645 
    1646 
    1647 /**
    1648  * Updates the VMCS with the number of effective MSRs in the auto-load/store MSR
    1649  * area.
    1650  *
    1651  * @returns VBox status code.
    1652  * @param   pVCpu       The cross context virtual CPU structure.
    1653  * @param   pVmcsInfo   The VMCS info. object.
    1654  * @param   cMsrs       The number of MSRs.
    1655  */
    1656 static int vmxHCSetAutoLoadStoreMsrCount(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t cMsrs)
    1657 {
    1658     /* Shouldn't ever happen but there -is- a number. We're well within the recommended 512. */
    1659     uint32_t const cMaxSupportedMsrs = VMX_MISC_MAX_MSRS(g_HmMsrs.u.vmx.u64Misc);
    1660     if (RT_LIKELY(cMsrs < cMaxSupportedMsrs))
    1661     {
    1662         /* Commit the MSR counts to the VMCS and update the cache. */
    1663         if (pVmcsInfo->cEntryMsrLoad != cMsrs)
    1664         {
    1665             int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, cMsrs);   AssertRC(rc);
    1666             rc     = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, cMsrs);   AssertRC(rc);
    1667             rc     = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT,  cMsrs);   AssertRC(rc);
    1668             pVmcsInfo->cEntryMsrLoad = cMsrs;
    1669             pVmcsInfo->cExitMsrStore = cMsrs;
    1670             pVmcsInfo->cExitMsrLoad  = cMsrs;
    1671         }
    1672         return VINF_SUCCESS;
    1673     }
    1674 
    1675     LogRel(("Auto-load/store MSR count exceeded! cMsrs=%u MaxSupported=%u\n", cMsrs, cMaxSupportedMsrs));
    1676     VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_INSUFFICIENT_GUEST_MSR_STORAGE;
    1677     return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
    1678 }
    1679 
    1680 
    1681 /**
    1682  * Adds a new (or updates the value of an existing) guest/host MSR
    1683  * pair to be swapped during the world-switch as part of the
    1684  * auto-load/store MSR area in the VMCS.
    1685  *
    1686  * @returns VBox status code.
    1687  * @param   pVCpu           The cross context virtual CPU structure.
    1688  * @param   pVmxTransient   The VMX-transient structure.
    1689  * @param   idMsr           The MSR.
    1690  * @param   uGuestMsrValue  Value of the guest MSR.
    1691  * @param   fSetReadWrite   Whether to set the guest read/write access of this
    1692  *                          MSR (thus not causing a VM-exit).
    1693  * @param   fUpdateHostMsr  Whether to update the value of the host MSR if
    1694  *                          necessary.
    1695  */
    1696 static int vmxHCAddAutoLoadStoreMsr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t idMsr, uint64_t uGuestMsrValue,
    1697                                     bool fSetReadWrite, bool fUpdateHostMsr)
    1698 {
    1699     PVMXVMCSINFO  pVmcsInfo     = pVmxTransient->pVmcsInfo;
    1700     bool const      fIsNstGstVmcs = pVmxTransient->fIsNestedGuest;
    1701     PVMXAUTOMSR     pGuestMsrLoad = (PVMXAUTOMSR)pVmcsInfo->pvGuestMsrLoad;
    1702     uint32_t        cMsrs         = pVmcsInfo->cEntryMsrLoad;
    1703     uint32_t        i;
    1704 
    1705     /* Paranoia. */
    1706     Assert(pGuestMsrLoad);
    1707 
    1708 #ifndef DEBUG_bird
    1709     LogFlowFunc(("pVCpu=%p idMsr=%#RX32 uGuestMsrValue=%#RX64\n", pVCpu, idMsr, uGuestMsrValue));
    1710 #endif
    1711 
    1712     /* Check if the MSR already exists in the VM-entry MSR-load area. */
    1713     for (i = 0; i < cMsrs; i++)
    1714     {
    1715         if (pGuestMsrLoad[i].u32Msr == idMsr)
    1716             break;
    1717     }
    1718 
    1719     bool fAdded = false;
    1720     if (i == cMsrs)
    1721     {
    1722         /* The MSR does not exist, bump the MSR count to make room for the new MSR. */
    1723         ++cMsrs;
    1724         int rc = vmxHCSetAutoLoadStoreMsrCount(pVCpu, pVmcsInfo, cMsrs);
    1725         AssertMsgRCReturn(rc, ("Insufficient space to add MSR to VM-entry MSR-load/store area %u\n", idMsr), rc);
    1726 
    1727         /* Set the guest to read/write this MSR without causing VM-exits. */
    1728         if (   fSetReadWrite
    1729             && (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS))
    1730             vmxHCSetMsrPermission(pVCpu, pVmcsInfo, fIsNstGstVmcs, idMsr, VMXMSRPM_ALLOW_RD_WR);
    1731 
    1732         Log4Func(("Added MSR %#RX32, cMsrs=%u\n", idMsr, cMsrs));
    1733         fAdded = true;
    1734     }
    1735 
    1736     /* Update the MSR value for the newly added or already existing MSR. */
    1737     pGuestMsrLoad[i].u32Msr   = idMsr;
    1738     pGuestMsrLoad[i].u64Value = uGuestMsrValue;
    1739 
    1740     /* Create the corresponding slot in the VM-exit MSR-store area if we use a different page. */
    1741     if (vmxHCIsSeparateExitMsrStoreAreaVmcs(pVmcsInfo))
    1742     {
    1743         PVMXAUTOMSR pGuestMsrStore = (PVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
    1744         pGuestMsrStore[i].u32Msr   = idMsr;
    1745         pGuestMsrStore[i].u64Value = uGuestMsrValue;
    1746     }
    1747 
    1748     /* Update the corresponding slot in the host MSR area. */
    1749     PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVmcsInfo->pvHostMsrLoad;
    1750     Assert(pHostMsr != pVmcsInfo->pvGuestMsrLoad);
    1751     Assert(pHostMsr != pVmcsInfo->pvGuestMsrStore);
    1752     pHostMsr[i].u32Msr = idMsr;
    1753 
    1754     /*
    1755      * Only if the caller requests to update the host MSR value AND we've newly added the
    1756      * MSR to the host MSR area do we actually update the value. Otherwise, it will be
    1757      * updated by vmxHCUpdateAutoLoadHostMsrs().
    1758      *
    1759      * We do this for performance reasons since reading MSRs may be quite expensive.
    1760      */
    1761     if (fAdded)
    1762     {
    1763         if (fUpdateHostMsr)
    1764         {
    1765             Assert(!VMMRZCallRing3IsEnabled(pVCpu));
    1766             Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    1767             pHostMsr[i].u64Value = ASMRdMsr(idMsr);
    1768         }
    1769         else
    1770         {
    1771             /* Someone else can do the work. */
    1772             pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs = false;
    1773         }
    1774     }
    1775 
    1776     return VINF_SUCCESS;
    1777 }
    1778 
    1779 
    1780 /**
    1781  * Removes a guest/host MSR pair to be swapped during the world-switch from the
    1782  * auto-load/store MSR area in the VMCS.
    1783  *
    1784  * @returns VBox status code.
    1785  * @param   pVCpu           The cross context virtual CPU structure.
    1786  * @param   pVmxTransient   The VMX-transient structure.
    1787  * @param   idMsr           The MSR.
    1788  */
    1789 static int vmxHCRemoveAutoLoadStoreMsr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t idMsr)
    1790 {
    1791     PVMXVMCSINFO  pVmcsInfo     = pVmxTransient->pVmcsInfo;
    1792     bool const      fIsNstGstVmcs = pVmxTransient->fIsNestedGuest;
    1793     PVMXAUTOMSR     pGuestMsrLoad = (PVMXAUTOMSR)pVmcsInfo->pvGuestMsrLoad;
    1794     uint32_t        cMsrs         = pVmcsInfo->cEntryMsrLoad;
    1795 
    1796 #ifndef DEBUG_bird
    1797     LogFlowFunc(("pVCpu=%p idMsr=%#RX32\n", pVCpu, idMsr));
    1798 #endif
    1799 
    1800     for (uint32_t i = 0; i < cMsrs; i++)
    1801     {
    1802         /* Find the MSR. */
    1803         if (pGuestMsrLoad[i].u32Msr == idMsr)
    1804         {
    1805             /*
    1806              * If it's the last MSR, we only need to reduce the MSR count.
    1807              * If it's -not- the last MSR, copy the last MSR in place of it and reduce the MSR count.
    1808              */
    1809             if (i < cMsrs - 1)
    1810             {
    1811                 /* Remove it from the VM-entry MSR-load area. */
    1812                 pGuestMsrLoad[i].u32Msr   = pGuestMsrLoad[cMsrs - 1].u32Msr;
    1813                 pGuestMsrLoad[i].u64Value = pGuestMsrLoad[cMsrs - 1].u64Value;
    1814 
    1815                 /* Remove it from the VM-exit MSR-store area if it's in a different page. */
    1816                 if (vmxHCIsSeparateExitMsrStoreAreaVmcs(pVmcsInfo))
    1817                 {
    1818                     PVMXAUTOMSR pGuestMsrStore = (PVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
    1819                     Assert(pGuestMsrStore[i].u32Msr == idMsr);
    1820                     pGuestMsrStore[i].u32Msr   = pGuestMsrStore[cMsrs - 1].u32Msr;
    1821                     pGuestMsrStore[i].u64Value = pGuestMsrStore[cMsrs - 1].u64Value;
    1822                 }
    1823 
    1824                 /* Remove it from the VM-exit MSR-load area. */
    1825                 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVmcsInfo->pvHostMsrLoad;
    1826                 Assert(pHostMsr[i].u32Msr == idMsr);
    1827                 pHostMsr[i].u32Msr   = pHostMsr[cMsrs - 1].u32Msr;
    1828                 pHostMsr[i].u64Value = pHostMsr[cMsrs - 1].u64Value;
    1829             }
    1830 
    1831             /* Reduce the count to reflect the removed MSR and bail. */
    1832             --cMsrs;
    1833             break;
    1834         }
    1835     }
    1836 
    1837     /* Update the VMCS if the count changed (meaning the MSR was found and removed). */
    1838     if (cMsrs != pVmcsInfo->cEntryMsrLoad)
    1839     {
    1840         int rc = vmxHCSetAutoLoadStoreMsrCount(pVCpu, pVmcsInfo, cMsrs);
    1841         AssertRCReturn(rc, rc);
    1842 
    1843         /* We're no longer swapping MSRs during the world-switch, intercept guest read/writes to them. */
    1844         if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
    1845             vmxHCSetMsrPermission(pVCpu, pVmcsInfo, fIsNstGstVmcs, idMsr, VMXMSRPM_EXIT_RD | VMXMSRPM_EXIT_WR);
    1846 
    1847         Log4Func(("Removed MSR %#RX32, cMsrs=%u\n", idMsr, cMsrs));
    1848         return VINF_SUCCESS;
    1849     }
    1850 
    1851     return VERR_NOT_FOUND;
    1852 }
    1853 
    1854 
    1855 /**
    1856  * Checks if the specified guest MSR is part of the VM-entry MSR-load area.
    1857  *
    1858  * @returns @c true if found, @c false otherwise.
    1859  * @param   pVmcsInfo   The VMCS info. object.
    1860  * @param   idMsr       The MSR to find.
    1861  */
    1862 static bool vmxHCIsAutoLoadGuestMsr(PCVMXVMCSINFO pVmcsInfo, uint32_t idMsr)
    1863 {
    1864     PCVMXAUTOMSR   pMsrs = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrLoad;
    1865     uint32_t const cMsrs = pVmcsInfo->cEntryMsrLoad;
    1866     Assert(pMsrs);
    1867     Assert(sizeof(*pMsrs) * cMsrs <= X86_PAGE_4K_SIZE);
    1868     for (uint32_t i = 0; i < cMsrs; i++)
    1869     {
    1870         if (pMsrs[i].u32Msr == idMsr)
    1871             return true;
    1872     }
    1873     return false;
    1874 }
    1875 #endif
    1876 
    1877 
    18781367/**
    18791368 * Verifies that our cached values of the VMCS fields are all consistent with
     
    19601449    return VINF_SUCCESS;
    19611450}
    1962 
    1963 
    1964 #ifdef IN_RING0
    1965 /**
    1966  * Sets up the LBR MSR ranges based on the host CPU.
    1967  *
    1968  * @returns VBox status code.
    1969  * @param   pVM     The cross context VM structure.
    1970  */
    1971 static int vmxHCSetupLbrMsrRange(PVMCC pVM)
    1972 {
    1973     Assert(VM_IS_VMX_LBR(pVM));
    1974     uint32_t idLbrFromIpMsrFirst;
    1975     uint32_t idLbrFromIpMsrLast;
    1976     uint32_t idLbrToIpMsrFirst;
    1977     uint32_t idLbrToIpMsrLast;
    1978     uint32_t idLbrTosMsr;
    1979 
    1980     /*
    1981      * Determine the LBR MSRs supported for this host CPU family and model.
    1982      *
    1983      * See Intel spec. 17.4.8 "LBR Stack".
    1984      * See Intel "Model-Specific Registers" spec.
    1985      */
    1986     uint32_t const uFamilyModel = (pVM->cpum.ro.HostFeatures.uFamily << 8)
    1987                                 | pVM->cpum.ro.HostFeatures.uModel;
    1988     switch (uFamilyModel)
    1989     {
    1990         case 0x0f01: case 0x0f02:
    1991             idLbrFromIpMsrFirst = MSR_P4_LASTBRANCH_0;
    1992             idLbrFromIpMsrLast  = MSR_P4_LASTBRANCH_3;
    1993             idLbrToIpMsrFirst   = 0x0;
    1994             idLbrToIpMsrLast    = 0x0;
    1995             idLbrTosMsr         = MSR_P4_LASTBRANCH_TOS;
    1996             break;
    1997 
    1998         case 0x065c: case 0x065f: case 0x064e: case 0x065e: case 0x068e:
    1999         case 0x069e: case 0x0655: case 0x0666: case 0x067a: case 0x0667:
    2000         case 0x066a: case 0x066c: case 0x067d: case 0x067e:
    2001             idLbrFromIpMsrFirst = MSR_LASTBRANCH_0_FROM_IP;
    2002             idLbrFromIpMsrLast  = MSR_LASTBRANCH_31_FROM_IP;
    2003             idLbrToIpMsrFirst   = MSR_LASTBRANCH_0_TO_IP;
    2004             idLbrToIpMsrLast    = MSR_LASTBRANCH_31_TO_IP;
    2005             idLbrTosMsr         = MSR_LASTBRANCH_TOS;
    2006             break;
    2007 
    2008         case 0x063d: case 0x0647: case 0x064f: case 0x0656: case 0x063c:
    2009         case 0x0645: case 0x0646: case 0x063f: case 0x062a: case 0x062d:
    2010         case 0x063a: case 0x063e: case 0x061a: case 0x061e: case 0x061f:
    2011         case 0x062e: case 0x0625: case 0x062c: case 0x062f:
    2012             idLbrFromIpMsrFirst = MSR_LASTBRANCH_0_FROM_IP;
    2013             idLbrFromIpMsrLast  = MSR_LASTBRANCH_15_FROM_IP;
    2014             idLbrToIpMsrFirst   = MSR_LASTBRANCH_0_TO_IP;
    2015             idLbrToIpMsrLast    = MSR_LASTBRANCH_15_TO_IP;
    2016             idLbrTosMsr         = MSR_LASTBRANCH_TOS;
    2017             break;
    2018 
    2019         case 0x0617: case 0x061d: case 0x060f:
    2020             idLbrFromIpMsrFirst = MSR_CORE2_LASTBRANCH_0_FROM_IP;
    2021             idLbrFromIpMsrLast  = MSR_CORE2_LASTBRANCH_3_FROM_IP;
    2022             idLbrToIpMsrFirst   = MSR_CORE2_LASTBRANCH_0_TO_IP;
    2023             idLbrToIpMsrLast    = MSR_CORE2_LASTBRANCH_3_TO_IP;
    2024             idLbrTosMsr         = MSR_CORE2_LASTBRANCH_TOS;
    2025             break;
    2026 
    2027         /* Atom and related microarchitectures we don't care about:
    2028         case 0x0637: case 0x064a: case 0x064c: case 0x064d: case 0x065a:
    2029         case 0x065d: case 0x061c: case 0x0626: case 0x0627: case 0x0635:
    2030         case 0x0636: */
    2031         /* All other CPUs: */
    2032         default:
    2033         {
    2034             LogRelFunc(("Could not determine LBR stack size for the CPU model %#x\n", uFamilyModel));
    2035             VMCC_GET_CPU_0(pVM)->hm.s.u32HMError = VMX_UFC_LBR_STACK_SIZE_UNKNOWN;
    2036             return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
    2037         }
    2038     }
    2039 
    2040     /*
    2041      * Validate.
    2042      */
    2043     uint32_t const cLbrStack = idLbrFromIpMsrLast - idLbrFromIpMsrFirst + 1;
    2044     PCVMCPU pVCpu0 = VMCC_GET_CPU_0(pVM);
    2045     AssertCompile(   RT_ELEMENTS(pVCpu0->hm.s.vmx.VmcsInfo.au64LbrFromIpMsr)
    2046                   == RT_ELEMENTS(pVCpu0->hm.s.vmx.VmcsInfo.au64LbrToIpMsr));
    2047     if (cLbrStack > RT_ELEMENTS(pVCpu0->hm.s.vmx.VmcsInfo.au64LbrFromIpMsr))
    2048     {
    2049         LogRelFunc(("LBR stack size of the CPU (%u) exceeds our buffer size\n", cLbrStack));
    2050         VMCC_GET_CPU_0(pVM)->hm.s.u32HMError = VMX_UFC_LBR_STACK_SIZE_OVERFLOW;
    2051         return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
    2052     }
    2053     NOREF(pVCpu0);
    2054 
    2055     /*
    2056      * Update the LBR info. to the VM struct. for use later.
    2057      */
    2058     pVM->hmr0.s.vmx.idLbrTosMsr = idLbrTosMsr;
    2059 
    2060     pVM->hm.s.ForR3.vmx.idLbrFromIpMsrFirst = pVM->hmr0.s.vmx.idLbrFromIpMsrFirst = idLbrFromIpMsrFirst;
    2061     pVM->hm.s.ForR3.vmx.idLbrFromIpMsrLast  = pVM->hmr0.s.vmx.idLbrFromIpMsrLast  = idLbrFromIpMsrLast;
    2062 
    2063     pVM->hm.s.ForR3.vmx.idLbrToIpMsrFirst   = pVM->hmr0.s.vmx.idLbrToIpMsrFirst   = idLbrToIpMsrFirst;
    2064     pVM->hm.s.ForR3.vmx.idLbrToIpMsrLast    = pVM->hmr0.s.vmx.idLbrToIpMsrLast    = idLbrToIpMsrLast;
    2065     return VINF_SUCCESS;
    2066 }
    2067 
    2068 
    2069 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    2070 /**
    2071  * Sets up the shadow VMCS fields arrays.
    2072  *
    2073  * This function builds arrays of VMCS fields to sync the shadow VMCS later while
    2074  * executing the guest.
    2075  *
    2076  * @returns VBox status code.
    2077  * @param   pVM     The cross context VM structure.
    2078  */
    2079 static int vmxHCSetupShadowVmcsFieldsArrays(PVMCC pVM)
    2080 {
    2081     /*
    2082      * Paranoia. Ensure we haven't exposed the VMWRITE-All VMX feature to the guest
    2083      * when the host does not support it.
    2084      */
    2085     bool const fGstVmwriteAll = pVM->cpum.ro.GuestFeatures.fVmxVmwriteAll;
    2086     if (   !fGstVmwriteAll
    2087         || (g_HmMsrs.u.vmx.u64Misc & VMX_MISC_VMWRITE_ALL))
    2088     { /* likely. */ }
    2089     else
    2090     {
    2091         LogRelFunc(("VMX VMWRITE-All feature exposed to the guest but host CPU does not support it!\n"));
    2092         VMCC_GET_CPU_0(pVM)->hm.s.u32HMError = VMX_UFC_GST_HOST_VMWRITE_ALL;
    2093         return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
    2094     }
    2095 
    2096     uint32_t const cVmcsFields = RT_ELEMENTS(g_aVmcsFields);
    2097     uint32_t       cRwFields   = 0;
    2098     uint32_t       cRoFields   = 0;
    2099     for (uint32_t i = 0; i < cVmcsFields; i++)
    2100     {
    2101         VMXVMCSFIELD VmcsField;
    2102         VmcsField.u = g_aVmcsFields[i];
    2103 
    2104         /*
    2105          * We will be writing "FULL" (64-bit) fields while syncing the shadow VMCS.
    2106          * Therefore, "HIGH" (32-bit portion of 64-bit) fields must not be included
    2107          * in the shadow VMCS fields array as they would be redundant.
    2108          *
    2109          * If the VMCS field depends on a CPU feature that is not exposed to the guest,
    2110          * we must not include it in the shadow VMCS fields array. Guests attempting to
    2111          * VMREAD/VMWRITE such VMCS fields would cause a VM-exit and we shall emulate
    2112          * the required behavior.
    2113          */
    2114         if (   VmcsField.n.fAccessType == VMX_VMCSFIELD_ACCESS_FULL
    2115             && CPUMIsGuestVmxVmcsFieldValid(pVM, VmcsField.u))
    2116         {
    2117             /*
    2118              * Read-only fields are placed in a separate array so that while syncing shadow
    2119              * VMCS fields later (which is more performance critical) we can avoid branches.
    2120              *
    2121              * However, if the guest can write to all fields (including read-only fields),
    2122              * we treat it a as read/write field. Otherwise, writing to these fields would
    2123              * cause a VMWRITE instruction error while syncing the shadow VMCS.
    2124              */
    2125             if (   fGstVmwriteAll
    2126                 || !VMXIsVmcsFieldReadOnly(VmcsField.u))
    2127                 pVM->hmr0.s.vmx.paShadowVmcsFields[cRwFields++] = VmcsField.u;
    2128             else
    2129                 pVM->hmr0.s.vmx.paShadowVmcsRoFields[cRoFields++] = VmcsField.u;
    2130         }
    2131     }
    2132 
    2133     /* Update the counts. */
    2134     pVM->hmr0.s.vmx.cShadowVmcsFields   = cRwFields;
    2135     pVM->hmr0.s.vmx.cShadowVmcsRoFields = cRoFields;
    2136     return VINF_SUCCESS;
    2137 }
    2138 
    2139 
    2140 /**
    2141  * Sets up the VMREAD and VMWRITE bitmaps.
    2142  *
    2143  * @param   pVM     The cross context VM structure.
    2144  */
    2145 static void vmxHCSetupVmreadVmwriteBitmaps(PVMCC pVM)
    2146 {
    2147     /*
    2148      * By default, ensure guest attempts to access any VMCS fields cause VM-exits.
    2149      */
    2150     uint32_t const cbBitmap        = X86_PAGE_4K_SIZE;
    2151     uint8_t       *pbVmreadBitmap  = (uint8_t *)pVM->hmr0.s.vmx.pvVmreadBitmap;
    2152     uint8_t       *pbVmwriteBitmap = (uint8_t *)pVM->hmr0.s.vmx.pvVmwriteBitmap;
    2153     ASMMemFill32(pbVmreadBitmap,  cbBitmap, UINT32_C(0xffffffff));
    2154     ASMMemFill32(pbVmwriteBitmap, cbBitmap, UINT32_C(0xffffffff));
    2155 
    2156     /*
    2157      * Skip intercepting VMREAD/VMWRITE to guest read/write fields in the
    2158      * VMREAD and VMWRITE bitmaps.
    2159      */
    2160     {
    2161         uint32_t const *paShadowVmcsFields = pVM->hmr0.s.vmx.paShadowVmcsFields;
    2162         uint32_t const  cShadowVmcsFields  = pVM->hmr0.s.vmx.cShadowVmcsFields;
    2163         for (uint32_t i = 0; i < cShadowVmcsFields; i++)
    2164         {
    2165             uint32_t const uVmcsField = paShadowVmcsFields[i];
    2166             Assert(!(uVmcsField & VMX_VMCSFIELD_RSVD_MASK));
    2167             Assert(uVmcsField >> 3 < cbBitmap);
    2168             ASMBitClear(pbVmreadBitmap  + (uVmcsField >> 3), uVmcsField & 7);
    2169             ASMBitClear(pbVmwriteBitmap + (uVmcsField >> 3), uVmcsField & 7);
    2170         }
    2171     }
    2172 
    2173     /*
    2174      * Skip intercepting VMREAD for guest read-only fields in the VMREAD bitmap
    2175      * if the host supports VMWRITE to all supported VMCS fields.
    2176      */
    2177     if (g_HmMsrs.u.vmx.u64Misc & VMX_MISC_VMWRITE_ALL)
    2178     {
    2179         uint32_t const *paShadowVmcsRoFields = pVM->hmr0.s.vmx.paShadowVmcsRoFields;
    2180         uint32_t const  cShadowVmcsRoFields  = pVM->hmr0.s.vmx.cShadowVmcsRoFields;
    2181         for (uint32_t i = 0; i < cShadowVmcsRoFields; i++)
    2182         {
    2183             uint32_t const uVmcsField = paShadowVmcsRoFields[i];
    2184             Assert(!(uVmcsField & VMX_VMCSFIELD_RSVD_MASK));
    2185             Assert(uVmcsField >> 3 < cbBitmap);
    2186             ASMBitClear(pbVmreadBitmap + (uVmcsField >> 3), uVmcsField & 7);
    2187         }
    2188     }
    2189 }
    2190 #endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
    2191 
    2192 
    2193 /**
    2194  * Sets up the APIC-access page address for the VMCS.
    2195  *
    2196  * @param   pVCpu   The cross context virtual CPU structure.
    2197  */
    2198 DECLINLINE(void) vmxHCSetupVmcsApicAccessAddr(PVMCPUCC pVCpu)
    2199 {
    2200     RTHCPHYS const HCPhysApicAccess = pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.HCPhysApicAccess;
    2201     Assert(HCPhysApicAccess != NIL_RTHCPHYS);
    2202     Assert(!(HCPhysApicAccess & 0xfff));                     /* Bits 11:0 MBZ. */
    2203     int rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL, HCPhysApicAccess);
    2204     AssertRC(rc);
    2205 }
    2206 
    2207 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    2208 
    2209 /**
    2210  * Sets up the VMREAD bitmap address for the VMCS.
    2211  *
    2212  * @param   pVCpu   The cross context virtual CPU structure.
    2213  */
    2214 DECLINLINE(void) vmxHCSetupVmcsVmreadBitmapAddr(PVMCPUCC pVCpu)
    2215 {
    2216     RTHCPHYS const HCPhysVmreadBitmap = pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.HCPhysVmreadBitmap;
    2217     Assert(HCPhysVmreadBitmap != NIL_RTHCPHYS);
    2218     Assert(!(HCPhysVmreadBitmap & 0xfff));                     /* Bits 11:0 MBZ. */
    2219     int rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL, HCPhysVmreadBitmap);
    2220     AssertRC(rc);
    2221 }
    2222 
    2223 
    2224 /**
    2225  * Sets up the VMWRITE bitmap address for the VMCS.
    2226  *
    2227  * @param   pVCpu   The cross context virtual CPU structure.
    2228  */
    2229 DECLINLINE(void) vmxHCSetupVmcsVmwriteBitmapAddr(PVMCPUCC pVCpu)
    2230 {
    2231     RTHCPHYS const HCPhysVmwriteBitmap = pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.HCPhysVmwriteBitmap;
    2232     Assert(HCPhysVmwriteBitmap != NIL_RTHCPHYS);
    2233     Assert(!(HCPhysVmwriteBitmap & 0xfff));                     /* Bits 11:0 MBZ. */
    2234     int rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL, HCPhysVmwriteBitmap);
    2235     AssertRC(rc);
    2236 }
    2237 
    2238 #endif
    2239 
    2240 /**
    2241  * Sets up MSR permissions in the MSR bitmap of a VMCS info. object.
    2242  *
    2243  * @param   pVCpu           The cross context virtual CPU structure.
    2244  * @param   pVmcsInfo       The VMCS info. object.
    2245  */
    2246 static void vmxHCSetupVmcsMsrPermissions(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
    2247 {
    2248     Assert(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS);
    2249 
    2250     /*
    2251      * By default, ensure guest attempts to access any MSR cause VM-exits.
    2252      * This shall later be relaxed for specific MSRs as necessary.
    2253      *
    2254      * Note: For nested-guests, the entire bitmap will be merged prior to
    2255      * executing the nested-guest using hardware-assisted VMX and hence there
    2256      * is no need to perform this operation. See vmxHCMergeMsrBitmapNested.
    2257      */
    2258     Assert(pVmcsInfo->pvMsrBitmap);
    2259     ASMMemFill32(pVmcsInfo->pvMsrBitmap, X86_PAGE_4K_SIZE, UINT32_C(0xffffffff));
    2260 
    2261     /*
    2262      * The guest can access the following MSRs (read, write) without causing
    2263      * VM-exits; they are loaded/stored automatically using fields in the VMCS.
    2264      */
    2265     PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    2266     vmxHCSetMsrPermission(pVCpu, pVmcsInfo, false, MSR_IA32_SYSENTER_CS,  VMXMSRPM_ALLOW_RD_WR);
    2267     vmxHCSetMsrPermission(pVCpu, pVmcsInfo, false, MSR_IA32_SYSENTER_ESP, VMXMSRPM_ALLOW_RD_WR);
    2268     vmxHCSetMsrPermission(pVCpu, pVmcsInfo, false, MSR_IA32_SYSENTER_EIP, VMXMSRPM_ALLOW_RD_WR);
    2269     vmxHCSetMsrPermission(pVCpu, pVmcsInfo, false, MSR_K8_GS_BASE,        VMXMSRPM_ALLOW_RD_WR);
    2270     vmxHCSetMsrPermission(pVCpu, pVmcsInfo, false, MSR_K8_FS_BASE,        VMXMSRPM_ALLOW_RD_WR);
    2271 
    2272     /*
    2273      * The IA32_PRED_CMD and IA32_FLUSH_CMD MSRs are write-only and has no state
    2274      * associated with then. We never need to intercept access (writes need to be
    2275      * executed without causing a VM-exit, reads will #GP fault anyway).
    2276      *
    2277      * The IA32_SPEC_CTRL MSR is read/write and has state. We allow the guest to
    2278      * read/write them. We swap the guest/host MSR value using the
    2279      * auto-load/store MSR area.
    2280      */
    2281     if (pVM->cpum.ro.GuestFeatures.fIbpb)
    2282         vmxHCSetMsrPermission(pVCpu, pVmcsInfo, false, MSR_IA32_PRED_CMD,  VMXMSRPM_ALLOW_RD_WR);
    2283     if (pVM->cpum.ro.GuestFeatures.fFlushCmd)
    2284         vmxHCSetMsrPermission(pVCpu, pVmcsInfo, false, MSR_IA32_FLUSH_CMD, VMXMSRPM_ALLOW_RD_WR);
    2285     if (pVM->cpum.ro.GuestFeatures.fIbrs)
    2286         vmxHCSetMsrPermission(pVCpu, pVmcsInfo, false, MSR_IA32_SPEC_CTRL, VMXMSRPM_ALLOW_RD_WR);
    2287 
    2288     /*
    2289      * Allow full read/write access for the following MSRs (mandatory for VT-x)
    2290      * required for 64-bit guests.
    2291      */
    2292     if (pVM->hmr0.s.fAllow64BitGuests)
    2293     {
    2294         vmxHCSetMsrPermission(pVCpu, pVmcsInfo, false, MSR_K8_LSTAR,          VMXMSRPM_ALLOW_RD_WR);
    2295         vmxHCSetMsrPermission(pVCpu, pVmcsInfo, false, MSR_K6_STAR,           VMXMSRPM_ALLOW_RD_WR);
    2296         vmxHCSetMsrPermission(pVCpu, pVmcsInfo, false, MSR_K8_SF_MASK,        VMXMSRPM_ALLOW_RD_WR);
    2297         vmxHCSetMsrPermission(pVCpu, pVmcsInfo, false, MSR_K8_KERNEL_GS_BASE, VMXMSRPM_ALLOW_RD_WR);
    2298     }
    2299 
    2300     /*
    2301      * IA32_EFER MSR is always intercepted, see @bugref{9180#c37}.
    2302      */
    2303 #ifdef VBOX_STRICT
    2304     Assert(pVmcsInfo->pvMsrBitmap);
    2305     uint32_t const fMsrpmEfer = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, MSR_K6_EFER);
    2306     Assert(fMsrpmEfer == VMXMSRPM_EXIT_RD_WR);
    2307 #endif
    2308 }
    2309 
    2310 
    2311 /**
    2312  * Sets up pin-based VM-execution controls in the VMCS.
    2313  *
    2314  * @returns VBox status code.
    2315  * @param   pVCpu       The cross context virtual CPU structure.
    2316  * @param   pVmcsInfo   The VMCS info. object.
    2317  */
    2318 static int vmxHCSetupVmcsPinCtls(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
    2319 {
    2320     PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    2321     uint32_t       fVal = g_HmMsrs.u.vmx.PinCtls.n.allowed0;      /* Bits set here must always be set. */
    2322     uint32_t const fZap = g_HmMsrs.u.vmx.PinCtls.n.allowed1;      /* Bits cleared here must always be cleared. */
    2323 
    2324     fVal |= VMX_PIN_CTLS_EXT_INT_EXIT                        /* External interrupts cause a VM-exit. */
    2325          |  VMX_PIN_CTLS_NMI_EXIT;                           /* Non-maskable interrupts (NMIs) cause a VM-exit. */
    2326 
    2327     if (g_HmMsrs.u.vmx.PinCtls.n.allowed1 & VMX_PIN_CTLS_VIRT_NMI)
    2328         fVal |= VMX_PIN_CTLS_VIRT_NMI;                       /* Use virtual NMIs and virtual-NMI blocking features. */
    2329 
    2330     /* Enable the VMX-preemption timer. */
    2331     if (VM_IS_VMX_PREEMPT_TIMER_USED(pVM))
    2332     {
    2333         Assert(g_HmMsrs.u.vmx.PinCtls.n.allowed1 & VMX_PIN_CTLS_PREEMPT_TIMER);
    2334         fVal |= VMX_PIN_CTLS_PREEMPT_TIMER;
    2335     }
    2336 
    2337 #if 0
    2338     /* Enable posted-interrupt processing. */
    2339     if (pVM->hm.s.fPostedIntrs)
    2340     {
    2341         Assert(g_HmMsrs.u.vmx.PinCtls.n.allowed1  & VMX_PIN_CTLS_POSTED_INT);
    2342         Assert(g_HmMsrs.u.vmx.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_ACK_EXT_INT);
    2343         fVal |= VMX_PIN_CTLS_POSTED_INT;
    2344     }
    2345 #endif
    2346 
    2347     if ((fVal & fZap) != fVal)
    2348     {
    2349         LogRelFunc(("Invalid pin-based VM-execution controls combo! Cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
    2350                     g_HmMsrs.u.vmx.PinCtls.n.allowed0, fVal, fZap));
    2351         VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_PIN_EXEC;
    2352         return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
    2353     }
    2354 
    2355     /* Commit it to the VMCS and update our cache. */
    2356     int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PIN_EXEC, fVal);
    2357     AssertRC(rc);
    2358     pVmcsInfo->u32PinCtls = fVal;
    2359 
    2360     return VINF_SUCCESS;
    2361 }
    2362 
    2363 
    2364 /**
    2365  * Sets up secondary processor-based VM-execution controls in the VMCS.
    2366  *
    2367  * @returns VBox status code.
    2368  * @param   pVCpu       The cross context virtual CPU structure.
    2369  * @param   pVmcsInfo   The VMCS info. object.
    2370  */
    2371 static int vmxHCSetupVmcsProcCtls2(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
    2372 {
    2373     PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    2374     uint32_t       fVal = g_HmMsrs.u.vmx.ProcCtls2.n.allowed0;    /* Bits set here must be set in the VMCS. */
    2375     uint32_t const fZap = g_HmMsrs.u.vmx.ProcCtls2.n.allowed1;    /* Bits cleared here must be cleared in the VMCS. */
    2376 
    2377     /* WBINVD causes a VM-exit. */
    2378     if (g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_WBINVD_EXIT)
    2379         fVal |= VMX_PROC_CTLS2_WBINVD_EXIT;
    2380 
    2381     /* Enable EPT (aka nested-paging). */
    2382     if (VM_IS_VMX_NESTED_PAGING(pVM))
    2383         fVal |= VMX_PROC_CTLS2_EPT;
    2384 
    2385     /* Enable the INVPCID instruction if we expose it to the guest and is supported
    2386        by the hardware. Without this, guest executing INVPCID would cause a #UD. */
    2387     if (   pVM->cpum.ro.GuestFeatures.fInvpcid
    2388         && (g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_INVPCID))
    2389         fVal |= VMX_PROC_CTLS2_INVPCID;
    2390 
    2391     /* Enable VPID. */
    2392     if (pVM->hmr0.s.vmx.fVpid)
    2393         fVal |= VMX_PROC_CTLS2_VPID;
    2394 
    2395     /* Enable unrestricted guest execution. */
    2396     if (VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
    2397         fVal |= VMX_PROC_CTLS2_UNRESTRICTED_GUEST;
    2398 
    2399 #if 0
    2400     if (pVM->hm.s.fVirtApicRegs)
    2401     {
    2402         /* Enable APIC-register virtualization. */
    2403         Assert(g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_APIC_REG_VIRT);
    2404         fVal |= VMX_PROC_CTLS2_APIC_REG_VIRT;
    2405 
    2406         /* Enable virtual-interrupt delivery. */
    2407         Assert(g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VIRT_INTR_DELIVERY);
    2408         fVal |= VMX_PROC_CTLS2_VIRT_INTR_DELIVERY;
    2409     }
    2410 #endif
    2411 
    2412     /* Virtualize-APIC accesses if supported by the CPU. The virtual-APIC page is
    2413        where the TPR shadow resides. */
    2414     /** @todo VIRT_X2APIC support, it's mutually exclusive with this. So must be
    2415      *        done dynamically. */
    2416     if (g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
    2417     {
    2418         fVal |= VMX_PROC_CTLS2_VIRT_APIC_ACCESS;
    2419         vmxHCSetupVmcsApicAccessAddr(pVCpu);
    2420    }
    2421 
    2422     /* Enable the RDTSCP instruction if we expose it to the guest and is supported
    2423        by the hardware. Without this, guest executing RDTSCP would cause a #UD. */
    2424     if (   pVM->cpum.ro.GuestFeatures.fRdTscP
    2425         && (g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_RDTSCP))
    2426         fVal |= VMX_PROC_CTLS2_RDTSCP;
    2427 
    2428     /* Enable Pause-Loop exiting. */
    2429     if (   (g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_PAUSE_LOOP_EXIT)
    2430         && pVM->hm.s.vmx.cPleGapTicks
    2431         && pVM->hm.s.vmx.cPleWindowTicks)
    2432     {
    2433         fVal |= VMX_PROC_CTLS2_PAUSE_LOOP_EXIT;
    2434 
    2435         int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PLE_GAP, pVM->hm.s.vmx.cPleGapTicks);          AssertRC(rc);
    2436         rc     = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PLE_WINDOW, pVM->hm.s.vmx.cPleWindowTicks);    AssertRC(rc);
    2437     }
    2438 
    2439     if ((fVal & fZap) != fVal)
    2440     {
    2441         LogRelFunc(("Invalid secondary processor-based VM-execution controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
    2442                     g_HmMsrs.u.vmx.ProcCtls2.n.allowed0, fVal, fZap));
    2443         VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_PROC_EXEC2;
    2444         return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
    2445     }
    2446 
    2447     /* Commit it to the VMCS and update our cache. */
    2448     int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, fVal);
    2449     AssertRC(rc);
    2450     pVmcsInfo->u32ProcCtls2 = fVal;
    2451 
    2452     return VINF_SUCCESS;
    2453 }
    2454 
    2455 
    2456 /**
    2457  * Sets up processor-based VM-execution controls in the VMCS.
    2458  *
    2459  * @returns VBox status code.
    2460  * @param   pVCpu       The cross context virtual CPU structure.
    2461  * @param   pVmcsInfo   The VMCS info. object.
    2462  */
    2463 static int vmxHCSetupVmcsProcCtls(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
    2464 {
    2465     PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    2466     uint32_t       fVal = g_HmMsrs.u.vmx.ProcCtls.n.allowed0;     /* Bits set here must be set in the VMCS. */
    2467     uint32_t const fZap = g_HmMsrs.u.vmx.ProcCtls.n.allowed1;     /* Bits cleared here must be cleared in the VMCS. */
    2468 
    2469     fVal |= VMX_PROC_CTLS_HLT_EXIT                                    /* HLT causes a VM-exit. */
    2470          |  VMX_PROC_CTLS_USE_TSC_OFFSETTING                          /* Use TSC-offsetting. */
    2471          |  VMX_PROC_CTLS_MOV_DR_EXIT                                 /* MOV DRx causes a VM-exit. */
    2472          |  VMX_PROC_CTLS_UNCOND_IO_EXIT                              /* All IO instructions cause a VM-exit. */
    2473          |  VMX_PROC_CTLS_RDPMC_EXIT                                  /* RDPMC causes a VM-exit. */
    2474          |  VMX_PROC_CTLS_MONITOR_EXIT                                /* MONITOR causes a VM-exit. */
    2475          |  VMX_PROC_CTLS_MWAIT_EXIT;                                 /* MWAIT causes a VM-exit. */
    2476 
    2477     /* We toggle VMX_PROC_CTLS_MOV_DR_EXIT later, check if it's not -always- needed to be set or clear. */
    2478     if (   !(g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_MOV_DR_EXIT)
    2479         ||  (g_HmMsrs.u.vmx.ProcCtls.n.allowed0 & VMX_PROC_CTLS_MOV_DR_EXIT))
    2480     {
    2481         VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_PROC_MOV_DRX_EXIT;
    2482         return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
    2483     }
    2484 
    2485     /* Without nested paging, INVLPG (also affects INVPCID) and MOV CR3 instructions should cause VM-exits. */
    2486     if (!VM_IS_VMX_NESTED_PAGING(pVM))
    2487     {
    2488         Assert(!VM_IS_VMX_UNRESTRICTED_GUEST(pVM));
    2489         fVal |= VMX_PROC_CTLS_INVLPG_EXIT
    2490              |  VMX_PROC_CTLS_CR3_LOAD_EXIT
    2491              |  VMX_PROC_CTLS_CR3_STORE_EXIT;
    2492     }
    2493 
    2494 #ifdef IN_INRG0
    2495     /* Use TPR shadowing if supported by the CPU. */
    2496     if (   PDMHasApic(pVM)
    2497         && (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_TPR_SHADOW))
    2498     {
    2499         fVal |= VMX_PROC_CTLS_USE_TPR_SHADOW;                /* CR8 reads from the Virtual-APIC page. */
    2500                                                              /* CR8 writes cause a VM-exit based on TPR threshold. */
    2501         Assert(!(fVal & VMX_PROC_CTLS_CR8_STORE_EXIT));
    2502         Assert(!(fVal & VMX_PROC_CTLS_CR8_LOAD_EXIT));
    2503         vmxHCSetupVmcsVirtApicAddr(pVmcsInfo);
    2504     }
    2505     else
    2506     {
    2507         /* Some 32-bit CPUs do not support CR8 load/store exiting as MOV CR8 is
    2508            invalid on 32-bit Intel CPUs. Set this control only for 64-bit guests. */
    2509         if (pVM->hmr0.s.fAllow64BitGuests)
    2510             fVal |= VMX_PROC_CTLS_CR8_STORE_EXIT             /* CR8 reads cause a VM-exit. */
    2511                  |  VMX_PROC_CTLS_CR8_LOAD_EXIT;             /* CR8 writes cause a VM-exit. */
    2512     }
    2513 
    2514     /* Use MSR-bitmaps if supported by the CPU. */
    2515     if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_MSR_BITMAPS)
    2516     {
    2517         fVal |= VMX_PROC_CTLS_USE_MSR_BITMAPS;
    2518         vmxHCSetupVmcsMsrBitmapAddr(pVmcsInfo);
    2519     }
    2520 #endif
    2521 
    2522     /* Use the secondary processor-based VM-execution controls if supported by the CPU. */
    2523     if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
    2524         fVal |= VMX_PROC_CTLS_USE_SECONDARY_CTLS;
    2525 
    2526     if ((fVal & fZap) != fVal)
    2527     {
    2528         LogRelFunc(("Invalid processor-based VM-execution controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
    2529                     g_HmMsrs.u.vmx.ProcCtls.n.allowed0, fVal, fZap));
    2530         VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_PROC_EXEC;
    2531         return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
    2532     }
    2533 
    2534     /* Commit it to the VMCS and update our cache. */
    2535     int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, fVal);
    2536     AssertRC(rc);
    2537     pVmcsInfo->u32ProcCtls = fVal;
    2538 
    2539     /* Set up MSR permissions that don't change through the lifetime of the VM. */
    2540     if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
    2541         vmxHCSetupVmcsMsrPermissions(pVCpu, pVmcsInfo);
    2542 
    2543     /* Set up secondary processor-based VM-execution controls if the CPU supports it. */
    2544     if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
    2545         return vmxHCSetupVmcsProcCtls2(pVCpu, pVmcsInfo);
    2546 
    2547     /* Sanity check, should not really happen. */
    2548     if (RT_LIKELY(!VM_IS_VMX_UNRESTRICTED_GUEST(pVM)))
    2549     { /* likely */ }
    2550     else
    2551     {
    2552         VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_INVALID_UX_COMBO;
    2553         return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
    2554     }
    2555 
    2556     /* Old CPUs without secondary processor-based VM-execution controls would end up here. */
    2557     return VINF_SUCCESS;
    2558 }
    2559 
    2560 
    2561 /**
    2562  * Sets up miscellaneous (everything other than Pin, Processor and secondary
    2563  * Processor-based VM-execution) control fields in the VMCS.
    2564  *
    2565  * @returns VBox status code.
    2566  * @param   pVCpu       The cross context virtual CPU structure.
    2567  * @param   pVmcsInfo   The VMCS info. object.
    2568  */
    2569 static int vmxHCSetupVmcsMiscCtls(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
    2570 {
    2571 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    2572     if (pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUseVmcsShadowing)
    2573     {
    2574         vmxHCSetupVmcsVmreadBitmapAddr(pVCpu);
    2575         vmxHCSetupVmcsVmwriteBitmapAddr(pVCpu);
    2576     }
    2577 #endif
    2578 
    2579     Assert(pVmcsInfo->u64VmcsLinkPtr == NIL_RTHCPHYS);
    2580     int rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, NIL_RTHCPHYS);
    2581     AssertRC(rc);
    2582 
    2583     rc = vmxHCSetupVmcsAutoLoadStoreMsrAddrs(pVmcsInfo);
    2584     if (RT_SUCCESS(rc))
    2585     {
    2586         uint64_t const u64Cr0Mask = vmxHCGetFixedCr0Mask(pVCpu);
    2587         uint64_t const u64Cr4Mask = vmxHCGetFixedCr4Mask(pVCpu);
    2588 
    2589         rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, u64Cr0Mask);    AssertRC(rc);
    2590         rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, u64Cr4Mask);    AssertRC(rc);
    2591 
    2592         pVmcsInfo->u64Cr0Mask = u64Cr0Mask;
    2593         pVmcsInfo->u64Cr4Mask = u64Cr4Mask;
    2594 
    2595         if (pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fLbr)
    2596         {
    2597             rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS64_GUEST_DEBUGCTL_FULL, MSR_IA32_DEBUGCTL_LBR);
    2598             AssertRC(rc);
    2599         }
    2600         return VINF_SUCCESS;
    2601     }
    2602     else
    2603         LogRelFunc(("Failed to initialize VMCS auto-load/store MSR addresses. rc=%Rrc\n", rc));
    2604     return rc;
    2605 }
    2606 
    2607 
    2608 /**
    2609  * Sets up the initial exception bitmap in the VMCS based on static conditions.
    2610  *
    2611  * We shall setup those exception intercepts that don't change during the
    2612  * lifetime of the VM here. The rest are done dynamically while loading the
    2613  * guest state.
    2614  *
    2615  * @param   pVCpu       The cross context virtual CPU structure.
    2616  * @param   pVmcsInfo   The VMCS info. object.
    2617  */
    2618 static void vmxHCSetupVmcsXcptBitmap(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
    2619 {
    2620     /*
    2621      * The following exceptions are always intercepted:
    2622      *
    2623      * #AC - To prevent the guest from hanging the CPU and for dealing with
    2624      *       split-lock detecting host configs.
    2625      * #DB - To maintain the DR6 state even when intercepting DRx reads/writes and
    2626      *       recursive #DBs can cause a CPU hang.
    2627      * #PF - To sync our shadow page tables when nested-paging is not used.
    2628      */
    2629     bool const fNestedPaging = pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging;
    2630     uint32_t const uXcptBitmap = RT_BIT(X86_XCPT_AC)
    2631                                | RT_BIT(X86_XCPT_DB)
    2632                                | (fNestedPaging ? 0 : RT_BIT(X86_XCPT_PF));
    2633 
    2634     /* Commit it to the VMCS. */
    2635     int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
    2636     AssertRC(rc);
    2637 
    2638     /* Update our cache of the exception bitmap. */
    2639     pVmcsInfo->u32XcptBitmap = uXcptBitmap;
    2640 }
    2641 
    2642 
    2643 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    2644 /**
    2645  * Sets up the VMCS for executing a nested-guest using hardware-assisted VMX.
    2646  *
    2647  * @returns VBox status code.
    2648  * @param   pVmcsInfo   The VMCS info. object.
    2649  */
    2650 static int vmxHCSetupVmcsCtlsNested(PVMXVMCSINFO pVmcsInfo)
    2651 {
    2652     Assert(pVmcsInfo->u64VmcsLinkPtr == NIL_RTHCPHYS);
    2653     int rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, NIL_RTHCPHYS);
    2654     AssertRC(rc);
    2655 
    2656     rc = vmxHCSetupVmcsAutoLoadStoreMsrAddrs(pVmcsInfo);
    2657     if (RT_SUCCESS(rc))
    2658     {
    2659         if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_MSR_BITMAPS)
    2660             vmxHCSetupVmcsMsrBitmapAddr(pVmcsInfo);
    2661 
    2662         /* Paranoia - We've not yet initialized these, they shall be done while merging the VMCS. */
    2663         Assert(!pVmcsInfo->u64Cr0Mask);
    2664         Assert(!pVmcsInfo->u64Cr4Mask);
    2665         return VINF_SUCCESS;
    2666     }
    2667     LogRelFunc(("Failed to set up the VMCS link pointer in the nested-guest VMCS. rc=%Rrc\n", rc));
    2668     return rc;
    2669 }
    2670 #endif
    2671 #endif /* !IN_RING0 */
    26721451
    26731452
     
    27341513             */
    27351514            if (   g_fHmVmxSupportsVmcsEfer
    2736                 && vmxHCShouldSwapEferMsr(pVCpu, pVmxTransient))
     1515#ifndef IN_NEM_DARWIN
     1516                && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient)
     1517#endif
     1518                )
    27371519                fVal |= VMX_ENTRY_CTLS_LOAD_EFER_MSR;
    27381520            else
     
    27941576            fVal |= VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE;
    27951577
    2796 #ifdef IN_RING0
     1578#ifndef IN_NEM_DARWIN
    27971579            /*
    27981580             * If the VMCS EFER MSR fields are supported by the hardware, we use it.
     
    28021584             */
    28031585            if (   g_fHmVmxSupportsVmcsEfer
    2804                 && vmxHCShouldSwapEferMsr(pVCpu, pVmxTransient))
     1586                && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient))
    28051587            {
    28061588                fVal |= VMX_EXIT_CTLS_SAVE_EFER_MSR
     
    30391821
    30401822
    3041 #ifdef IN_RING0
    3042 /**
    3043  * Exports the guest's RSP into the guest-state area in the VMCS.
    3044  *
    3045  * @param   pVCpu   The cross context virtual CPU structure.
    3046  *
    3047  * @remarks No-long-jump zone!!!
    3048  */
    3049 static void vmxHCExportGuestRsp(PVMCPUCC pVCpu)
    3050 {
    3051     if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RSP)
    3052     {
    3053         HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RSP);
    3054 
    3055         int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_RSP, pVCpu->cpum.GstCtx.rsp);
    3056         AssertRC(rc);
    3057 
    3058         ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RSP);
    3059         Log4Func(("rsp=%#RX64\n", pVCpu->cpum.GstCtx.rsp));
    3060     }
    3061 }
    3062 #endif
    3063 
    3064 
    30651823/**
    30661824 * Exports the guest's RFLAGS into the guest-state area in the VMCS.
     
    30841842        Assert(!(fEFlags.u32 & ~(X86_EFL_1 | X86_EFL_LIVE_MASK)));
    30851843
    3086 #ifdef IN_RING0
     1844#ifndef IN_NEM_DARWIN
    30871845        /*
    30881846         * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so
     
    31751933
    31761934        rc  = vmxHCClearShadowVmcs(pVmcsInfo);
    3177         rc |= vmxHCLoadVmcs(pVmcsInfo);
     1935        rc |= hmR0VmxLoadVmcs(pVmcsInfo);
    31781936    }
    31791937
     
    32191977
    32201978        rc  = vmxHCClearShadowVmcs(pVmcsInfo);
    3221         rc |= vmxHCLoadVmcs(pVmcsInfo);
     1979        rc |= hmR0VmxLoadVmcs(pVmcsInfo);
    32221980    }
    32231981    return rc;
     
    32762034        Log4Func(("Disabled\n"));
    32772035    }
    3278 }
    3279 #endif
    3280 
    3281 
    3282 #ifdef IN_RING0
    3283 /**
    3284  * Exports the guest hardware-virtualization state.
    3285  *
    3286  * @returns VBox status code.
    3287  * @param   pVCpu           The cross context virtual CPU structure.
    3288  * @param   pVmxTransient   The VMX-transient structure.
    3289  *
    3290  * @remarks No-long-jump zone!!!
    3291  */
    3292 static int vmxHCExportGuestHwvirtState(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
    3293 {
    3294     if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_HWVIRT)
    3295     {
    3296 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    3297         /*
    3298          * Check if the VMX feature is exposed to the guest and if the host CPU supports
    3299          * VMCS shadowing.
    3300          */
    3301         if (pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUseVmcsShadowing)
    3302         {
    3303             /*
    3304              * If the nested hypervisor has loaded a current VMCS and is in VMX root mode,
    3305              * copy the nested hypervisor's current VMCS into the shadow VMCS and enable
    3306              * VMCS shadowing to skip intercepting some or all VMREAD/VMWRITE VM-exits.
    3307              *
    3308              * We check for VMX root mode here in case the guest executes VMXOFF without
    3309              * clearing the current VMCS pointer and our VMXOFF instruction emulation does
    3310              * not clear the current VMCS pointer.
    3311              */
    3312             PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    3313             if (   CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx)
    3314                 && !CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx)
    3315                 && CPUMIsGuestVmxCurrentVmcsValid(&pVCpu->cpum.GstCtx))
    3316             {
    3317                 /* Paranoia. */
    3318                 Assert(!pVmxTransient->fIsNestedGuest);
    3319 
    3320                 /*
    3321                  * For performance reasons, also check if the nested hypervisor's current VMCS
    3322                  * was newly loaded or modified before copying it to the shadow VMCS.
    3323                  */
    3324                 if (!VCPU_2_VMXSTATE(pVCpu).vmx.fCopiedNstGstToShadowVmcs)
    3325                 {
    3326                     int rc = vmxHCCopyNstGstToShadowVmcs(pVCpu, pVmcsInfo);
    3327                     AssertRCReturn(rc, rc);
    3328                     VCPU_2_VMXSTATE(pVCpu).vmx.fCopiedNstGstToShadowVmcs = true;
    3329                 }
    3330                 vmxHCEnableVmcsShadowing(pVmcsInfo);
    3331             }
    3332             else
    3333                 vmxHCDisableVmcsShadowing(pVmcsInfo);
    3334         }
    3335 #else
    3336         NOREF(pVmxTransient);
    3337 #endif
    3338         ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_HWVIRT);
    3339     }
    3340     return VINF_SUCCESS;
    33412036}
    33422037#endif
     
    34232118             */
    34242119            uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
    3425 #ifdef IN_RING0
     2120#ifndef IN_NEM_DARWIN
    34262121            if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
    34272122            {
     
    35522247        if (VM_IS_VMX_NESTED_PAGING(pVM))
    35532248        {
    3554 #ifdef IN_RING0
     2249#ifndef IN_NEM_DARWIN
    35552250            PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    35562251            pVmcsInfo->HCPhysEPTP = PGMGetHyperCR3(pVCpu);
     
    35972292                 */
    35982293            }
    3599 #ifdef IN_RING0
     2294#ifndef IN_NEM_DARWIN
    36002295            else
    36012296            {
     
    36692364        Assert(!RT_HI_U32(u64GuestCr4));
    36702365
    3671 #ifdef IN_RING0
     2366#ifndef IN_NEM_DARWIN
    36722367        /*
    36732368         * Setup VT-x's view of the guest CR4.
     
    37472442        rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, u64ShadowCr4);   AssertRC(rc);
    37482443
    3749 #ifdef IN_RING0
     2444#ifndef IN_NEM_DARWIN
    37502445        /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */
    37512446        bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
     
    37532448        {
    37542449            pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
    3755             vmxHCUpdateStartVmFunction(pVCpu);
     2450            hmR0VmxUpdateStartVmFunction(pVCpu);
    37562451        }
    37572452#endif
     
    37632458    return rc;
    37642459}
    3765 
    3766 
    3767 #ifdef IN_RING0
    3768 /**
    3769  * Exports the guest debug registers into the guest-state area in the VMCS.
    3770  * The guest debug bits are partially shared with the host (e.g. DR6, DR0-3).
    3771  *
    3772  * This also sets up whether \#DB and MOV DRx accesses cause VM-exits.
    3773  *
    3774  * @returns VBox status code.
    3775  * @param   pVCpu           The cross context virtual CPU structure.
    3776  * @param   pVmxTransient   The VMX-transient structure.
    3777  *
    3778  * @remarks No-long-jump zone!!!
    3779  */
    3780 static int vmxHCExportSharedDebugState(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    3781 {
    3782     Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    3783 
    3784     /** @todo NSTVMX: Figure out what we want to do with nested-guest instruction
    3785      *        stepping. */
    3786     PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    3787 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    3788     if (pVmxTransient->fIsNestedGuest)
    3789     {
    3790         int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_DR7, CPUMGetGuestDR7(pVCpu));
    3791         AssertRC(rc);
    3792 
    3793         /*
    3794          * We don't want to always intercept MOV DRx for nested-guests as it causes
    3795          * problems when the nested hypervisor isn't intercepting them, see @bugref{10080}.
    3796          * Instead, they are strictly only requested when the nested hypervisor intercepts
    3797          * them -- handled while merging VMCS controls.
    3798          *
    3799          * If neither the outer nor the nested-hypervisor is intercepting MOV DRx,
    3800          * then the nested-guest debug state should be actively loaded on the host so that
    3801          * nested-guest reads its own debug registers without causing VM-exits.
    3802          */
    3803         if (   !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_MOV_DR_EXIT)
    3804             && !CPUMIsGuestDebugStateActive(pVCpu))
    3805             CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
    3806         return VINF_SUCCESS;
    3807     }
    3808 #endif
    3809 
    3810 #ifdef VBOX_STRICT
    3811     /* Validate. Intel spec. 26.3.1.1 "Checks on Guest Controls Registers, Debug Registers, MSRs" */
    3812     if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
    3813     {
    3814         /* Validate. Intel spec. 17.2 "Debug Registers", recompiler paranoia checks. */
    3815         Assert((pVCpu->cpum.GstCtx.dr[7] & (X86_DR7_MBZ_MASK | X86_DR7_RAZ_MASK)) == 0);
    3816         Assert((pVCpu->cpum.GstCtx.dr[7] & X86_DR7_RA1_MASK) == X86_DR7_RA1_MASK);
    3817     }
    3818 #endif
    3819 
    3820     bool     fSteppingDB      = false;
    3821     bool     fInterceptMovDRx = false;
    3822     uint32_t uProcCtls        = pVmcsInfo->u32ProcCtls;
    3823     if (VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
    3824     {
    3825         /* If the CPU supports the monitor trap flag, use it for single stepping in DBGF and avoid intercepting #DB. */
    3826         if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_MONITOR_TRAP_FLAG)
    3827         {
    3828             uProcCtls |= VMX_PROC_CTLS_MONITOR_TRAP_FLAG;
    3829             Assert(fSteppingDB == false);
    3830         }
    3831         else
    3832         {
    3833             pVCpu->cpum.GstCtx.eflags.u32 |= X86_EFL_TF;
    3834             VCPU_2_VMXSTATE(pVCpu).fCtxChanged |= HM_CHANGED_GUEST_RFLAGS;
    3835             pVCpu->hmr0.s.fClearTrapFlag = true;
    3836             fSteppingDB = true;
    3837         }
    3838     }
    3839 
    3840     uint64_t u64GuestDr7;
    3841     if (   fSteppingDB
    3842         || (CPUMGetHyperDR7(pVCpu) & X86_DR7_ENABLED_MASK))
    3843     {
    3844         /*
    3845          * Use the combined guest and host DRx values found in the hypervisor register set
    3846          * because the hypervisor debugger has breakpoints active or someone is single stepping
    3847          * on the host side without a monitor trap flag.
    3848          *
    3849          * Note! DBGF expects a clean DR6 state before executing guest code.
    3850          */
    3851         if (!CPUMIsHyperDebugStateActive(pVCpu))
    3852         {
    3853             CPUMR0LoadHyperDebugState(pVCpu, true /* include DR6 */);
    3854             Assert(CPUMIsHyperDebugStateActive(pVCpu));
    3855             Assert(!CPUMIsGuestDebugStateActive(pVCpu));
    3856         }
    3857 
    3858         /* Update DR7 with the hypervisor value (other DRx registers are handled by CPUM one way or another). */
    3859         u64GuestDr7 = CPUMGetHyperDR7(pVCpu);
    3860         pVCpu->hmr0.s.fUsingHyperDR7 = true;
    3861         fInterceptMovDRx = true;
    3862     }
    3863     else
    3864     {
    3865         /*
    3866          * If the guest has enabled debug registers, we need to load them prior to
    3867          * executing guest code so they'll trigger at the right time.
    3868          */
    3869         HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
    3870         if (pVCpu->cpum.GstCtx.dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD))
    3871         {
    3872             if (!CPUMIsGuestDebugStateActive(pVCpu))
    3873             {
    3874                 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
    3875                 Assert(CPUMIsGuestDebugStateActive(pVCpu));
    3876                 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
    3877                 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxArmed);
    3878             }
    3879             Assert(!fInterceptMovDRx);
    3880         }
    3881         else if (!CPUMIsGuestDebugStateActive(pVCpu))
    3882         {
    3883             /*
    3884              * If no debugging enabled, we'll lazy load DR0-3.  Unlike on AMD-V, we
    3885              * must intercept #DB in order to maintain a correct DR6 guest value, and
    3886              * because we need to intercept it to prevent nested #DBs from hanging the
    3887              * CPU, we end up always having to intercept it. See vmxHCSetupVmcsXcptBitmap().
    3888              */
    3889             fInterceptMovDRx = true;
    3890         }
    3891 
    3892         /* Update DR7 with the actual guest value. */
    3893         u64GuestDr7 = pVCpu->cpum.GstCtx.dr[7];
    3894         pVCpu->hmr0.s.fUsingHyperDR7 = false;
    3895     }
    3896 
    3897     if (fInterceptMovDRx)
    3898         uProcCtls |= VMX_PROC_CTLS_MOV_DR_EXIT;
    3899     else
    3900         uProcCtls &= ~VMX_PROC_CTLS_MOV_DR_EXIT;
    3901 
    3902     /*
    3903      * Update the processor-based VM-execution controls with the MOV-DRx intercepts and the
    3904      * monitor-trap flag and update our cache.
    3905      */
    3906     if (uProcCtls != pVmcsInfo->u32ProcCtls)
    3907     {
    3908         int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
    3909         AssertRC(rc);
    3910         pVmcsInfo->u32ProcCtls = uProcCtls;
    3911     }
    3912 
    3913     /*
    3914      * Update guest DR7.
    3915      */
    3916     int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_DR7, u64GuestDr7);
    3917     AssertRC(rc);
    3918 
    3919     /*
    3920      * If we have forced EFLAGS.TF to be set because we're single-stepping in the hypervisor debugger,
    3921      * we need to clear interrupt inhibition if any as otherwise it causes a VM-entry failure.
    3922      *
    3923      * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State".
    3924      */
    3925     if (fSteppingDB)
    3926     {
    3927         Assert(VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
    3928         Assert(pVCpu->cpum.GstCtx.eflags.Bits.u1TF);
    3929 
    3930         uint32_t fIntrState = 0;
    3931         rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState);
    3932         AssertRC(rc);
    3933 
    3934         if (fIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
    3935         {
    3936             fIntrState &= ~(VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
    3937             rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
    3938             AssertRC(rc);
    3939         }
    3940     }
    3941 
    3942     return VINF_SUCCESS;
    3943 }
    3944 #endif /* !IN_RING0 */
    39452460
    39462461
     
    40782593        /* vmxHCExportGuestSegReg() writes the modified in VMCS. We want what we're feeding to VT-x. */
    40792594        uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
    4080 #ifdef IN_RING0
     2595#ifndef IN_NEM_DARWIN
    40812596        if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
    40822597        {
     
    41412656
    41422657    uint32_t u32Access = pSelReg->Attr.u;
    4143 #ifdef IN_RING0
     2658#ifndef IN_NEM_DARWIN
    41442659    if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
    41452660#endif
     
    41572672            u32Access = X86DESCATTR_UNUSABLE;
    41582673    }
    4159 #ifdef IN_RING0
     2674#ifndef IN_NEM_DARWIN
    41602675    else
    41612676    {
     
    42042719{
    42052720    int                 rc              = VERR_INTERNAL_ERROR_5;
    4206 #ifdef IN_RING0
     2721#ifndef IN_NEM_DARWIN
    42072722    PVMCC               pVM             = pVCpu->CTX_SUFF(pVM);
    42082723#endif
    42092724    PCCPUMCTX           pCtx            = &pVCpu->cpum.GstCtx;
    42102725    PVMXVMCSINFO        pVmcsInfo       = pVmxTransient->pVmcsInfo;
    4211 #ifdef IN_RING0
     2726#ifndef IN_NEM_DARWIN
    42122727    PVMXVMCSINFOSHARED  pVmcsInfoShared = pVmcsInfo->pShared;
    42132728#endif
     
    42212736        {
    42222737            HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS);
    4223 #ifdef IN_RING0
     2738#ifndef IN_NEM_DARWIN
    42242739            if (pVmcsInfoShared->RealMode.fRealOnV86Active)
    42252740                pVmcsInfoShared->RealMode.AttrCS.u = pCtx->cs.Attr.u;
     
    42332748        {
    42342749            HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SS);
    4235 #ifdef IN_RING0
     2750#ifndef IN_NEM_DARWIN
    42362751            if (pVmcsInfoShared->RealMode.fRealOnV86Active)
    42372752                pVmcsInfoShared->RealMode.AttrSS.u = pCtx->ss.Attr.u;
     
    42452760        {
    42462761            HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DS);
    4247 #ifdef IN_RING0
     2762#ifndef IN_NEM_DARWIN
    42482763            if (pVmcsInfoShared->RealMode.fRealOnV86Active)
    42492764                pVmcsInfoShared->RealMode.AttrDS.u = pCtx->ds.Attr.u;
     
    42572772        {
    42582773            HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_ES);
    4259 #ifdef IN_RING0
     2774#ifndef IN_NEM_DARWIN
    42602775            if (pVmcsInfoShared->RealMode.fRealOnV86Active)
    42612776                pVmcsInfoShared->RealMode.AttrES.u = pCtx->es.Attr.u;
     
    42692784        {
    42702785            HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_FS);
    4271 #ifdef IN_RING0
     2786#ifndef IN_NEM_DARWIN
    42722787            if (pVmcsInfoShared->RealMode.fRealOnV86Active)
    42732788                pVmcsInfoShared->RealMode.AttrFS.u = pCtx->fs.Attr.u;
     
    42812796        {
    42822797            HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GS);
    4283 #ifdef IN_RING0
     2798#ifndef IN_NEM_DARWIN
    42842799            if (pVmcsInfoShared->RealMode.fRealOnV86Active)
    42852800                pVmcsInfoShared->RealMode.AttrGS.u = pCtx->gs.Attr.u;
     
    43132828        uint64_t u64Base;
    43142829        uint32_t u32AccessRights;
    4315 #ifdef IN_RING0
     2830#ifndef IN_NEM_DARWIN
    43162831        if (!pVmcsInfoShared->RealMode.fRealOnV86Active)
    43172832#endif
     
    43222837            u32AccessRights = pCtx->tr.Attr.u;
    43232838        }
    4324 #ifdef IN_RING0
     2839#ifndef IN_NEM_DARWIN
    43252840        else
    43262841        {
     
    44442959    return VINF_SUCCESS;
    44452960}
    4446 
    4447 
    4448 #ifdef IN_RING0
    4449 /**
    4450  * Exports certain guest MSRs into the VM-entry MSR-load and VM-exit MSR-store
    4451  * areas.
    4452  *
    4453  * These MSRs will automatically be loaded to the host CPU on every successful
    4454  * VM-entry and stored from the host CPU on every successful VM-exit.
    4455  *
    4456  * We creates/updates MSR slots for the host MSRs in the VM-exit MSR-load area. The
    4457  * actual host MSR values are not- updated here for performance reasons. See
    4458  * vmxHCExportHostMsrs().
    4459  *
    4460  * We also exports the guest sysenter MSRs into the guest-state area in the VMCS.
    4461  *
    4462  * @returns VBox status code.
    4463  * @param   pVCpu           The cross context virtual CPU structure.
    4464  * @param   pVmxTransient   The VMX-transient structure.
    4465  *
    4466  * @remarks No-long-jump zone!!!
    4467  */
    4468 static int vmxHCExportGuestMsrs(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
    4469 {
    4470     AssertPtr(pVCpu);
    4471     AssertPtr(pVmxTransient);
    4472 
    4473     PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    4474     PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    4475 
    4476     /*
    4477      * MSRs that we use the auto-load/store MSR area in the VMCS.
    4478      * For 64-bit hosts, we load/restore them lazily, see vmxHCLazyLoadGuestMsrs(),
    4479      * nothing to do here. The host MSR values are updated when it's safe in
    4480      * vmxHCLazySaveHostMsrs().
    4481      *
    4482      * For nested-guests, the guests MSRs from the VM-entry MSR-load area are already
    4483      * loaded (into the guest-CPU context) by the VMLAUNCH/VMRESUME instruction
    4484      * emulation. The merged MSR permission bitmap will ensure that we get VM-exits
    4485      * for any MSR that are not part of the lazy MSRs so we do not need to place
    4486      * those MSRs into the auto-load/store MSR area. Nothing to do here.
    4487      */
    4488     if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_GUEST_AUTO_MSRS)
    4489     {
    4490         /* No auto-load/store MSRs currently. */
    4491         ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_GUEST_AUTO_MSRS);
    4492     }
    4493 
    4494     /*
    4495      * Guest Sysenter MSRs.
    4496      */
    4497     if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_MSR_MASK)
    4498     {
    4499         HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SYSENTER_MSRS);
    4500 
    4501         if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_CS_MSR)
    4502         {
    4503             int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SYSENTER_CS, pCtx->SysEnter.cs);
    4504             AssertRC(rc);
    4505             ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_SYSENTER_CS_MSR);
    4506         }
    4507 
    4508         if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_EIP_MSR)
    4509         {
    4510             int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_EIP, pCtx->SysEnter.eip);
    4511             AssertRC(rc);
    4512             ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_SYSENTER_EIP_MSR);
    4513         }
    4514 
    4515         if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_ESP_MSR)
    4516         {
    4517             int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_ESP, pCtx->SysEnter.esp);
    4518             AssertRC(rc);
    4519             ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_SYSENTER_ESP_MSR);
    4520         }
    4521     }
    4522 
    4523     /*
    4524      * Guest/host EFER MSR.
    4525      */
    4526     if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_EFER_MSR)
    4527     {
    4528         /* Whether we are using the VMCS to swap the EFER MSR must have been
    4529            determined earlier while exporting VM-entry/VM-exit controls. */
    4530         Assert(!(ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_ENTRY_EXIT_CTLS));
    4531         HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_EFER);
    4532 
    4533         if (vmxHCShouldSwapEferMsr(pVCpu, pVmxTransient))
    4534         {
    4535             /*
    4536              * EFER.LME is written by software, while EFER.LMA is set by the CPU to (CR0.PG & EFER.LME).
    4537              * This means a guest can set EFER.LME=1 while CR0.PG=0 and EFER.LMA can remain 0.
    4538              * VT-x requires that "IA-32e mode guest" VM-entry control must be identical to EFER.LMA
    4539              * and to CR0.PG. Without unrestricted execution, CR0.PG (used for VT-x, not the shadow)
    4540              * must always be 1. This forces us to effectively clear both EFER.LMA and EFER.LME until
    4541              * the guest has also set CR0.PG=1. Otherwise, we would run into an invalid-guest state
    4542              * during VM-entry.
    4543              */
    4544             uint64_t uGuestEferMsr = pCtx->msrEFER;
    4545             if (!VM_IS_VMX_UNRESTRICTED_GUEST(pVM))
    4546             {
    4547                 if (!(pCtx->msrEFER & MSR_K6_EFER_LMA))
    4548                     uGuestEferMsr &= ~MSR_K6_EFER_LME;
    4549                 else
    4550                     Assert((pCtx->msrEFER & (MSR_K6_EFER_LMA | MSR_K6_EFER_LME)) == (MSR_K6_EFER_LMA | MSR_K6_EFER_LME));
    4551             }
    4552 
    4553             /*
    4554              * If the CPU supports VMCS controls for swapping EFER, use it. Otherwise, we have no option
    4555              * but to use the auto-load store MSR area in the VMCS for swapping EFER. See @bugref{7368}.
    4556              */
    4557             if (g_fHmVmxSupportsVmcsEfer)
    4558             {
    4559                 int rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_GUEST_EFER_FULL, uGuestEferMsr);
    4560                 AssertRC(rc);
    4561             }
    4562             else
    4563             {
    4564                 /*
    4565                  * We shall use the auto-load/store MSR area only for loading the EFER MSR but we must
    4566                  * continue to intercept guest read and write accesses to it, see @bugref{7386#c16}.
    4567                  */
    4568                 int rc = vmxHCAddAutoLoadStoreMsr(pVCpu, pVmxTransient, MSR_K6_EFER, uGuestEferMsr,
    4569                                                     false /* fSetReadWrite */, false /* fUpdateHostMsr */);
    4570                 AssertRCReturn(rc, rc);
    4571             }
    4572 
    4573             Log4Func(("efer=%#RX64 shadow=%#RX64\n", uGuestEferMsr, pCtx->msrEFER));
    4574         }
    4575         else if (!g_fHmVmxSupportsVmcsEfer)
    4576             vmxHCRemoveAutoLoadStoreMsr(pVCpu, pVmxTransient, MSR_K6_EFER);
    4577 
    4578         ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_EFER_MSR);
    4579     }
    4580 
    4581     /*
    4582      * Other MSRs.
    4583      */
    4584     if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_OTHER_MSRS)
    4585     {
    4586         /* Speculation Control (R/W). */
    4587         HMVMX_CPUMCTX_ASSERT(pVCpu, HM_CHANGED_GUEST_OTHER_MSRS);
    4588         if (pVM->cpum.ro.GuestFeatures.fIbrs)
    4589         {
    4590             int rc = vmxHCAddAutoLoadStoreMsr(pVCpu, pVmxTransient, MSR_IA32_SPEC_CTRL, CPUMGetGuestSpecCtrl(pVCpu),
    4591                                                 false /* fSetReadWrite */, false /* fUpdateHostMsr */);
    4592             AssertRCReturn(rc, rc);
    4593         }
    4594 
    4595         /* Last Branch Record. */
    4596         if (VM_IS_VMX_LBR(pVM))
    4597         {
    4598             PVMXVMCSINFOSHARED const pVmcsInfoShared = pVmxTransient->pVmcsInfo->pShared;
    4599             uint32_t const idFromIpMsrStart = pVM->hmr0.s.vmx.idLbrFromIpMsrFirst;
    4600             uint32_t const idToIpMsrStart   = pVM->hmr0.s.vmx.idLbrToIpMsrFirst;
    4601             uint32_t const cLbrStack        = pVM->hmr0.s.vmx.idLbrFromIpMsrLast - pVM->hmr0.s.vmx.idLbrFromIpMsrFirst + 1;
    4602             Assert(cLbrStack <= 32);
    4603             for (uint32_t i = 0; i < cLbrStack; i++)
    4604             {
    4605                 int rc = vmxHCAddAutoLoadStoreMsr(pVCpu, pVmxTransient, idFromIpMsrStart + i,
    4606                                                     pVmcsInfoShared->au64LbrFromIpMsr[i],
    4607                                                     false /* fSetReadWrite */, false /* fUpdateHostMsr */);
    4608                 AssertRCReturn(rc, rc);
    4609 
    4610                 /* Some CPUs don't have a Branch-To-IP MSR (P4 and related Xeons). */
    4611                 if (idToIpMsrStart != 0)
    4612                 {
    4613                     rc = vmxHCAddAutoLoadStoreMsr(pVCpu, pVmxTransient, idToIpMsrStart + i,
    4614                                                     pVmcsInfoShared->au64LbrToIpMsr[i],
    4615                                                     false /* fSetReadWrite */, false /* fUpdateHostMsr */);
    4616                     AssertRCReturn(rc, rc);
    4617                 }
    4618             }
    4619 
    4620             /* Add LBR top-of-stack MSR (which contains the index to the most recent record). */
    4621             int rc = vmxHCAddAutoLoadStoreMsr(pVCpu, pVmxTransient, pVM->hmr0.s.vmx.idLbrTosMsr,
    4622                                                 pVmcsInfoShared->u64LbrTosMsr, false /* fSetReadWrite */,
    4623                                                 false /* fUpdateHostMsr */);
    4624             AssertRCReturn(rc, rc);
    4625         }
    4626 
    4627         ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_OTHER_MSRS);
    4628     }
    4629 
    4630     return VINF_SUCCESS;
    4631 }
    4632 
    4633 
    4634 /**
    4635  * Sets up the usage of TSC-offsetting and updates the VMCS.
    4636  *
    4637  * If offsetting is not possible, cause VM-exits on RDTSC(P)s. Also sets up the
    4638  * VMX-preemption timer.
    4639  *
    4640  * @returns VBox status code.
    4641  * @param   pVCpu           The cross context virtual CPU structure.
    4642  * @param   pVmxTransient   The VMX-transient structure.
    4643  * @param   idCurrentCpu    The current CPU number.
    4644  *
    4645  * @remarks No-long-jump zone!!!
    4646  */
    4647 static void vmxHCUpdateTscOffsettingAndPreemptTimer(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, RTCPUID idCurrentCpu)
    4648 {
    4649     bool         fOffsettedTsc;
    4650     bool         fParavirtTsc;
    4651     uint64_t     uTscOffset;
    4652     PVMCC        pVM       = pVCpu->CTX_SUFF(pVM);
    4653     PVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
    4654 
    4655     if (VM_IS_VMX_PREEMPT_TIMER_USED(pVM))
    4656     {
    4657         /* The TMCpuTickGetDeadlineAndTscOffset function is expensive (calling it on
    4658            every entry slowed down the bs2-test1 CPUID testcase by ~33% (on an 10980xe). */
    4659         uint64_t cTicksToDeadline;
    4660         if (   idCurrentCpu == pVCpu->hmr0.s.idLastCpu
    4661             && TMVirtualSyncIsCurrentDeadlineVersion(pVM, pVCpu->hmr0.s.vmx.uTscDeadlineVersion))
    4662         {
    4663             STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatVmxPreemptionReusingDeadline);
    4664             fOffsettedTsc = TMCpuTickCanUseRealTSC(pVM, pVCpu, &uTscOffset, &fParavirtTsc);
    4665             cTicksToDeadline = pVCpu->hmr0.s.vmx.uTscDeadline - SUPReadTsc();
    4666             if ((int64_t)cTicksToDeadline > 0)
    4667             { /* hopefully */ }
    4668             else
    4669             {
    4670                 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatVmxPreemptionReusingDeadlineExpired);
    4671                 cTicksToDeadline = 0;
    4672             }
    4673         }
    4674         else
    4675         {
    4676             STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatVmxPreemptionRecalcingDeadline);
    4677             cTicksToDeadline = TMCpuTickGetDeadlineAndTscOffset(pVM, pVCpu, &uTscOffset, &fOffsettedTsc, &fParavirtTsc,
    4678                                                                 &pVCpu->hmr0.s.vmx.uTscDeadline,
    4679                                                                 &pVCpu->hmr0.s.vmx.uTscDeadlineVersion);
    4680             pVCpu->hmr0.s.vmx.uTscDeadline += cTicksToDeadline;
    4681             if (cTicksToDeadline >= 128)
    4682             { /* hopefully */ }
    4683             else
    4684                 STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatVmxPreemptionRecalcingDeadlineExpired);
    4685         }
    4686 
    4687         /* Make sure the returned values have sane upper and lower boundaries. */
    4688         uint64_t const u64CpuHz = SUPGetCpuHzFromGipBySetIndex(g_pSUPGlobalInfoPage, pVCpu->iHostCpuSet);
    4689         cTicksToDeadline   = RT_MIN(cTicksToDeadline, u64CpuHz / 64);      /* 1/64th of a second,  15.625ms. */ /** @todo r=bird: Once real+virtual timers move to separate thread, we can raise the upper limit (16ms isn't much). ASSUMES working poke cpu function. */
    4690         cTicksToDeadline   = RT_MAX(cTicksToDeadline, u64CpuHz / 32678);   /* 1/32768th of a second,  ~30us. */
    4691         cTicksToDeadline >>= pVM->hm.s.vmx.cPreemptTimerShift;
    4692 
    4693         /** @todo r=ramshankar: We need to find a way to integrate nested-guest
    4694          *        preemption timers here. We probably need to clamp the preemption timer,
    4695          *        after converting the timer value to the host. */
    4696         uint32_t const cPreemptionTickCount = (uint32_t)RT_MIN(cTicksToDeadline, UINT32_MAX - 16);
    4697         int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_PREEMPT_TIMER_VALUE, cPreemptionTickCount);
    4698         AssertRC(rc);
    4699     }
    4700     else
    4701         fOffsettedTsc = TMCpuTickCanUseRealTSC(pVM, pVCpu, &uTscOffset, &fParavirtTsc);
    4702 
    4703     if (fParavirtTsc)
    4704     {
    4705         /* Currently neither Hyper-V nor KVM need to update their paravirt. TSC
    4706            information before every VM-entry, hence disable it for performance sake. */
    4707 #if 0
    4708         int rc = GIMR0UpdateParavirtTsc(pVM, 0 /* u64Offset */);
    4709         AssertRC(rc);
    4710 #endif
    4711         STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatTscParavirt);
    4712     }
    4713 
    4714     if (   fOffsettedTsc
    4715         && RT_LIKELY(!pVCpu->hmr0.s.fDebugWantRdTscExit))
    4716     {
    4717         if (pVmxTransient->fIsNestedGuest)
    4718             uTscOffset = CPUMApplyNestedGuestTscOffset(pVCpu, uTscOffset);
    4719         vmxHCSetTscOffsetVmcs(pVCpu, pVmcsInfo, uTscOffset);
    4720         vmxHCRemoveProcCtlsVmcs(pVCpu, pVmxTransient, VMX_PROC_CTLS_RDTSC_EXIT);
    4721     }
    4722     else
    4723     {
    4724         /* We can't use TSC-offsetting (non-fixed TSC, warp drive active etc.), VM-exit on RDTSC(P). */
    4725         vmxHCSetProcCtlsVmcs(pVmxTransient, VMX_PROC_CTLS_RDTSC_EXIT);
    4726     }
    4727 }
    4728 #endif /* !IN_RING0 */
    47292961
    47302962
     
    49593191
    49603192#ifdef VBOX_STRICT
    4961 # ifdef IN_RING0
     3193# ifndef IN_NEM_DARWIN
    49623194    VMMRZCallRing3Disable(pVCpu);
    49633195# endif
     
    49683200               pszRegName, uAttr, pSelReg->Attr.u, pSelReg->Sel, pSelReg->u64Base, pSelReg->u32Limit));
    49693201# endif
    4970 # ifdef IN_RING0
     3202# ifndef IN_NEM_DARWIN
    49713203    VMMRZCallRing3Enable(pVCpu);
    49723204# endif
     
    51153347
    51163348        pCtx->rflags.u64 = u64Val;
    5117 #ifdef IN_RING0
     3349#ifndef IN_NEM_DARWIN
    51183350        PCVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
    51193351        if (pVmcsInfoShared->RealMode.fRealOnV86Active)
     
    52043436    STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState, x);
    52053437
    5206 #ifdef IN_RING0
     3438#ifndef IN_NEM_DARWIN
    52073439    /*
    52083440     * We disable interrupts to make the updating of the state and in particular
     
    53033535                if (fWhat & CPUMCTX_EXTRN_TR)
    53043536                {
    5305 #ifdef IN_RING0
     3537#ifndef IN_NEM_DARWIN
    53063538                    /* Real-mode emulation using virtual-8086 mode has the fake TSS (pRealModeTSS) in TR,
    53073539                       don't need to import that one. */
     
    53143546            if (fWhat & CPUMCTX_EXTRN_DR7)
    53153547            {
    5316 #ifdef IN_RING0
     3548#ifndef IN_NEM_DARWIN
    53173549                if (!pVCpu->hmr0.s.fUsingHyperDR7)
    53183550#endif
     
    53313563            }
    53323564
    5333 #ifdef IN_RING0
     3565#ifndef IN_NEM_DARWIN
    53343566            if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
    53353567            {
     
    53713603                            if (VM_IS_VMX_LBR(pVM))
    53723604                            {
    5373                                 if (vmxHCIsLbrBranchFromMsr(pVM, idMsr, &idxLbrMsr))
     3605                                if (hmR0VmxIsLbrBranchFromMsr(pVM, idMsr, &idxLbrMsr))
    53743606                                {
    53753607                                    Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
     
    53773609                                    break;
    53783610                                }
    5379                                 if (vmxHCIsLbrBranchToMsr(pVM, idMsr, &idxLbrMsr))
     3611                                if (hmR0VmxIsLbrBranchToMsr(pVM, idMsr, &idxLbrMsr))
    53803612                                {
    53813613                                    Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
     
    54323664                    }
    54333665#endif
    5434 #ifdef IN_RING0
     3666#ifndef IN_NEM_DARWIN
    54353667                    VMMRZCallRing3Disable(pVCpu);   /* May call into PGM which has Log statements. */
    54363668#endif
    54373669                    CPUMSetGuestCR0(pVCpu, u64Cr0);
    5438 #ifdef IN_RING0
     3670#ifndef IN_NEM_DARWIN
    54393671                    VMMRZCallRing3Enable(pVCpu);
    54403672#endif
     
    55443776        }
    55453777    }
    5546 #ifdef IN_RING0
     3778#ifndef IN_NEM_DARWIN
    55473779    else
    55483780        AssertMsg(!pCtx->fExtrn || (pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL), ("%#RX64\n", pCtx->fExtrn));
     
    55783810     */
    55793811    if (   VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)
    5580 #ifdef IN_RING0
     3812#ifndef IN_NEM_DARWIN
    55813813        && VMMRZCallRing3IsEnabled(pVCpu)
    55823814#endif
     
    56183850static VBOXSTRICTRC vmxHCCheckForceFlags(PVMCPUCC pVCpu, bool fIsNestedGuest, bool fStepping)
    56193851{
    5620 #ifdef IN_RING0
     3852#ifndef IN_NEM_DARWIN
    56213853    Assert(VMMRZCallRing3IsEnabled(pVCpu));
    56223854#endif
     
    58984130
    58994131
    5900 #ifdef IN_RING0
    5901 /**
    5902  * Does the necessary state syncing before returning to ring-3 for any reason
    5903  * (longjmp, preemption, voluntary exits to ring-3) from VT-x.
    5904  *
    5905  * @returns VBox status code.
    5906  * @param   pVCpu           The cross context virtual CPU structure.
    5907  * @param   fImportState    Whether to import the guest state from the VMCS back
    5908  *                          to the guest-CPU context.
    5909  *
    5910  * @remarks No-long-jmp zone!!!
    5911  */
    5912 static int vmxHCLeave(PVMCPUCC pVCpu, bool fImportState)
    5913 {
    5914     Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    5915     Assert(!VMMRZCallRing3IsEnabled(pVCpu));
    5916 
    5917     RTCPUID const idCpu = RTMpCpuId();
    5918     Log4Func(("HostCpuId=%u\n", idCpu));
    5919 
    5920     /*
    5921      * !!! IMPORTANT !!!
    5922      * If you modify code here, check whether VMXR0CallRing3Callback() needs to be updated too.
    5923      */
    5924 
    5925     /* Save the guest state if necessary. */
    5926     PVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
    5927     if (fImportState)
    5928     {
    5929         int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
    5930         AssertRCReturn(rc, rc);
    5931     }
    5932 
    5933     /* Restore host FPU state if necessary. We will resync on next R0 reentry. */
    5934     CPUMR0FpuStateMaybeSaveGuestAndRestoreHost(pVCpu);
    5935     Assert(!CPUMIsGuestFPUStateActive(pVCpu));
    5936 
    5937     /* Restore host debug registers if necessary. We will resync on next R0 reentry. */
    5938 #ifdef VBOX_STRICT
    5939     if (CPUMIsHyperDebugStateActive(pVCpu))
    5940         Assert(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_MOV_DR_EXIT);
    5941 #endif
    5942     CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, true /* save DR6 */);
    5943     Assert(!CPUMIsGuestDebugStateActive(pVCpu));
    5944     Assert(!CPUMIsHyperDebugStateActive(pVCpu));
    5945 
    5946     /* Restore host-state bits that VT-x only restores partially. */
    5947     if (pVCpu->hmr0.s.vmx.fRestoreHostFlags > VMX_RESTORE_HOST_REQUIRED)
    5948     {
    5949         Log4Func(("Restoring Host State: fRestoreHostFlags=%#RX32 HostCpuId=%u\n", pVCpu->hmr0.s.vmx.fRestoreHostFlags, idCpu));
    5950         VMXRestoreHostState(pVCpu->hmr0.s.vmx.fRestoreHostFlags, &pVCpu->hmr0.s.vmx.RestoreHost);
    5951     }
    5952     pVCpu->hmr0.s.vmx.fRestoreHostFlags = 0;
    5953 
    5954     /* Restore the lazy host MSRs as we're leaving VT-x context. */
    5955     if (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
    5956     {
    5957         /* We shouldn't restore the host MSRs without saving the guest MSRs first. */
    5958         if (!fImportState)
    5959         {
    5960             int rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_KERNEL_GS_BASE | CPUMCTX_EXTRN_SYSCALL_MSRS);
    5961             AssertRCReturn(rc, rc);
    5962         }
    5963         vmxHCLazyRestoreHostMsrs(pVCpu);
    5964         Assert(!pVCpu->hmr0.s.vmx.fLazyMsrs);
    5965     }
    5966     else
    5967         pVCpu->hmr0.s.vmx.fLazyMsrs = 0;
    5968 
    5969     /* Update auto-load/store host MSRs values when we re-enter VT-x (as we could be on a different CPU). */
    5970     pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs = false;
    5971 
    5972     STAM_PROFILE_ADV_SET_STOPPED(&VCPU_2_VMXSTATS(pVCpu).StatEntry);
    5973     STAM_PROFILE_ADV_SET_STOPPED(&VCPU_2_VMXSTATS(pVCpu).StatImportGuestState);
    5974     STAM_PROFILE_ADV_SET_STOPPED(&VCPU_2_VMXSTATS(pVCpu).StatExportGuestState);
    5975     STAM_PROFILE_ADV_SET_STOPPED(&VCPU_2_VMXSTATS(pVCpu).StatPreExit);
    5976     STAM_PROFILE_ADV_SET_STOPPED(&VCPU_2_VMXSTATS(pVCpu).StatExitHandling);
    5977     STAM_PROFILE_ADV_SET_STOPPED(&VCPU_2_VMXSTATS(pVCpu).StatExitIO);
    5978     STAM_PROFILE_ADV_SET_STOPPED(&VCPU_2_VMXSTATS(pVCpu).StatExitMovCRx);
    5979     STAM_PROFILE_ADV_SET_STOPPED(&VCPU_2_VMXSTATS(pVCpu).StatExitXcptNmi);
    5980     STAM_PROFILE_ADV_SET_STOPPED(&VCPU_2_VMXSTATS(pVCpu).StatExitVmentry);
    5981     STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchLongJmpToR3);
    5982 
    5983     VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
    5984 
    5985     /** @todo This partially defeats the purpose of having preemption hooks.
    5986      *  The problem is, deregistering the hooks should be moved to a place that
    5987      *  lasts until the EMT is about to be destroyed not everytime while leaving HM
    5988      *  context.
    5989      */
    5990     int rc = vmxHCClearVmcs(pVmcsInfo);
    5991     AssertRCReturn(rc, rc);
    5992 
    5993 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    5994     /*
    5995      * A valid shadow VMCS is made active as part of VM-entry. It is necessary to
    5996      * clear a shadow VMCS before allowing that VMCS to become active on another
    5997      * logical processor. We may or may not be importing guest state which clears
    5998      * it, so cover for it here.
    5999      *
    6000      * See Intel spec. 24.11.1 "Software Use of Virtual-Machine Control Structures".
    6001      */
    6002     if (   pVmcsInfo->pvShadowVmcs
    6003         && pVmcsInfo->fShadowVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
    6004     {
    6005         rc = vmxHCClearShadowVmcs(pVmcsInfo);
    6006         AssertRCReturn(rc, rc);
    6007     }
    6008 
    6009     /*
    6010      * Flag that we need to re-export the host state if we switch to this VMCS before
    6011      * executing guest or nested-guest code.
    6012      */
    6013     pVmcsInfo->idHostCpuState = NIL_RTCPUID;
    6014 #endif
    6015 
    6016     Log4Func(("Cleared Vmcs. HostCpuId=%u\n", idCpu));
    6017     NOREF(idCpu);
    6018     return VINF_SUCCESS;
    6019 }
    6020 
    6021 
    6022 /**
    6023  * Leaves the VT-x session.
    6024  *
    6025  * @returns VBox status code.
    6026  * @param   pVCpu   The cross context virtual CPU structure.
    6027  *
    6028  * @remarks No-long-jmp zone!!!
    6029  */
    6030 static int vmxHCLeaveSession(PVMCPUCC pVCpu)
    6031 {
    6032     HM_DISABLE_PREEMPT(pVCpu);
    6033     HMVMX_ASSERT_CPU_SAFE(pVCpu);
    6034     Assert(!VMMRZCallRing3IsEnabled(pVCpu));
    6035     Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    6036 
    6037     /* When thread-context hooks are used, we can avoid doing the leave again if we had been preempted before
    6038        and done this from the VMXR0ThreadCtxCallback(). */
    6039     if (!pVCpu->hmr0.s.fLeaveDone)
    6040     {
    6041         int rc2 = vmxHCLeave(pVCpu, true /* fImportState */);
    6042         AssertRCReturnStmt(rc2, HM_RESTORE_PREEMPT(), rc2);
    6043         pVCpu->hmr0.s.fLeaveDone = true;
    6044     }
    6045     Assert(!pVCpu->cpum.GstCtx.fExtrn);
    6046 
    6047     /*
    6048      * !!! IMPORTANT !!!
    6049      * If you modify code here, make sure to check whether VMXR0CallRing3Callback() needs to be updated too.
    6050      */
    6051 
    6052     /* Deregister hook now that we've left HM context before re-enabling preemption. */
    6053     /** @todo Deregistering here means we need to VMCLEAR always
    6054      *        (longjmp/exit-to-r3) in VT-x which is not efficient, eliminate need
    6055      *        for calling VMMR0ThreadCtxHookDisable here! */
    6056     VMMR0ThreadCtxHookDisable(pVCpu);
    6057 
    6058     /* Leave HM context. This takes care of local init (term) and deregistering the longjmp-to-ring-3 callback. */
    6059     int rc = HMR0LeaveCpu(pVCpu);
    6060     HM_RESTORE_PREEMPT();
    6061     return rc;
    6062 }
    6063 
    6064 
    6065 /**
    6066  * Does the necessary state syncing before doing a longjmp to ring-3.
    6067  *
    6068  * @returns VBox status code.
    6069  * @param   pVCpu   The cross context virtual CPU structure.
    6070  *
    6071  * @remarks No-long-jmp zone!!!
    6072  */
    6073 DECLINLINE(int) vmxHCLongJmpToRing3(PVMCPUCC pVCpu)
    6074 {
    6075     return vmxHCLeaveSession(pVCpu);
    6076 }
    6077 
    6078 
    6079 /**
    6080  * Take necessary actions before going back to ring-3.
    6081  *
    6082  * An action requires us to go back to ring-3. This function does the necessary
    6083  * steps before we can safely return to ring-3. This is not the same as longjmps
    6084  * to ring-3, this is voluntary and prepares the guest so it may continue
    6085  * executing outside HM (recompiler/IEM).
    6086  *
    6087  * @returns VBox status code.
    6088  * @param   pVCpu   The cross context virtual CPU structure.
    6089  * @param   rcExit  The reason for exiting to ring-3. Can be
    6090  *                  VINF_VMM_UNKNOWN_RING3_CALL.
    6091  */
    6092 static int vmxHCExitToRing3(PVMCPUCC pVCpu, VBOXSTRICTRC rcExit)
    6093 {
    6094     HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
    6095 
    6096     PVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
    6097     if (RT_UNLIKELY(rcExit == VERR_VMX_INVALID_VMCS_PTR))
    6098     {
    6099         VMXGetCurrentVmcs(&VCPU_2_VMXSTATE(pVCpu).vmx.LastError.HCPhysCurrentVmcs);
    6100         VCPU_2_VMXSTATE(pVCpu).vmx.LastError.u32VmcsRev   = *(uint32_t *)pVmcsInfo->pvVmcs;
    6101         VCPU_2_VMXSTATE(pVCpu).vmx.LastError.idEnteredCpu = pVCpu->hmr0.s.idEnteredCpu;
    6102         /* LastError.idCurrentCpu was updated in vmxHCPreRunGuestCommitted(). */
    6103     }
    6104 
    6105     /* Please, no longjumps here (any logging shouldn't flush jump back to ring-3). NO LOGGING BEFORE THIS POINT! */
    6106     VMMRZCallRing3Disable(pVCpu);
    6107     Log4Func(("rcExit=%d\n", VBOXSTRICTRC_VAL(rcExit)));
    6108 
    6109     /*
    6110      * Convert any pending HM events back to TRPM due to premature exits to ring-3.
    6111      * We need to do this only on returns to ring-3 and not for longjmps to ring3.
    6112      *
    6113      * This is because execution may continue from ring-3 and we would need to inject
    6114      * the event from there (hence place it back in TRPM).
    6115      */
    6116     if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
    6117     {
    6118         vmxHCPendingEventToTrpmTrap(pVCpu);
    6119         Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
    6120 
    6121         /* Clear the events from the VMCS. */
    6122         int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, 0);    AssertRC(rc);
    6123         rc     = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, 0);         AssertRC(rc);
    6124     }
    6125 #ifdef VBOX_STRICT
    6126     /*
    6127      * We check for rcExit here since for errors like VERR_VMX_UNABLE_TO_START_VM (which are
    6128      * fatal), we don't care about verifying duplicate injection of events. Errors like
    6129      * VERR_EM_INTERPRET are converted to their VINF_* counterparts -prior- to  calling this
    6130      * function so those should and will be checked below.
    6131      */
    6132     else if (RT_SUCCESS(rcExit))
    6133     {
    6134         /*
    6135          * Ensure we don't accidentally clear a pending HM event without clearing the VMCS.
    6136          * This can be pretty hard to debug otherwise, interrupts might get injected twice
    6137          * occasionally, see @bugref{9180#c42}.
    6138          *
    6139          * However, if the VM-entry failed, any VM entry-interruption info. field would
    6140          * be left unmodified as the event would not have been injected to the guest. In
    6141          * such cases, don't assert, we're not going to continue guest execution anyway.
    6142          */
    6143         uint32_t uExitReason;
    6144         uint32_t uEntryIntInfo;
    6145         int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_REASON, &uExitReason);
    6146         rc    |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &uEntryIntInfo);
    6147         AssertRC(rc);
    6148         AssertMsg(VMX_EXIT_REASON_HAS_ENTRY_FAILED(uExitReason) || !VMX_ENTRY_INT_INFO_IS_VALID(uEntryIntInfo),
    6149                   ("uExitReason=%#RX32 uEntryIntInfo=%#RX32 rcExit=%d\n", uExitReason, uEntryIntInfo, VBOXSTRICTRC_VAL(rcExit)));
    6150     }
    6151 #endif
    6152 
    6153     /*
    6154      * Clear the interrupt-window and NMI-window VMCS controls as we could have got
    6155      * a VM-exit with higher priority than interrupt-window or NMI-window VM-exits
    6156      * (e.g. TPR below threshold).
    6157      */
    6158     if (!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
    6159     {
    6160         vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
    6161         vmxHCClearNmiWindowExitVmcs(pVCpu, pVmcsInfo);
    6162     }
    6163 
    6164     /* If we're emulating an instruction, we shouldn't have any TRPM traps pending
    6165        and if we're injecting an event we should have a TRPM trap pending. */
    6166     AssertMsg(rcExit != VINF_EM_RAW_INJECT_TRPM_EVENT || TRPMHasTrap(pVCpu), ("%Rrc\n", VBOXSTRICTRC_VAL(rcExit)));
    6167 #ifndef DEBUG_bird /* Triggered after firing an NMI against NT4SP1, possibly a triple fault in progress. */
    6168     AssertMsg(rcExit != VINF_EM_RAW_EMULATE_INSTR || !TRPMHasTrap(pVCpu), ("%Rrc\n", VBOXSTRICTRC_VAL(rcExit)));
    6169 #endif
    6170 
    6171     /* Save guest state and restore host state bits. */
    6172     int rc = vmxHCLeaveSession(pVCpu);
    6173     AssertRCReturn(rc, rc);
    6174     STAM_COUNTER_DEC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchLongJmpToR3);
    6175 
    6176     /* Thread-context hooks are unregistered at this point!!! */
    6177     /* Ring-3 callback notifications are unregistered at this point!!! */
    6178 
    6179     /* Sync recompiler state. */
    6180     VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3);
    6181     CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_SYSENTER_MSR
    6182                              | CPUM_CHANGED_LDTR
    6183                              | CPUM_CHANGED_GDTR
    6184                              | CPUM_CHANGED_IDTR
    6185                              | CPUM_CHANGED_TR
    6186                              | CPUM_CHANGED_HIDDEN_SEL_REGS);
    6187     if (   pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging
    6188         && CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx))
    6189         CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_GLOBAL_TLB_FLUSH);
    6190 
    6191     Assert(!pVCpu->hmr0.s.fClearTrapFlag);
    6192 
    6193     /* Update the exit-to-ring 3 reason. */
    6194     VCPU_2_VMXSTATE(pVCpu).rcLastExitToR3 = VBOXSTRICTRC_VAL(rcExit);
    6195 
    6196     /* On our way back from ring-3 reload the guest state if there is a possibility of it being changed. */
    6197     if (   rcExit != VINF_EM_RAW_INTERRUPT
    6198         || CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
    6199     {
    6200         Assert(!(pVCpu->cpum.GstCtx.fExtrn & HMVMX_CPUMCTX_EXTRN_ALL));
    6201         ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
    6202     }
    6203 
    6204     STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchExitToR3);
    6205     VMMRZCallRing3Enable(pVCpu);
    6206     return rc;
    6207 }
    6208 
    6209 
    6210 /**
    6211  * Pushes a 2-byte value onto the real-mode (in virtual-8086 mode) guest's
    6212  * stack.
    6213  *
    6214  * @returns Strict VBox status code (i.e. informational status codes too).
    6215  * @retval  VINF_EM_RESET if pushing a value to the stack caused a triple-fault.
    6216  * @param   pVCpu   The cross context virtual CPU structure.
    6217  * @param   uValue  The value to push to the guest stack.
    6218  */
    6219 static VBOXSTRICTRC vmxHCRealModeGuestStackPush(PVMCPUCC pVCpu, uint16_t uValue)
    6220 {
    6221     /*
    6222      * The stack limit is 0xffff in real-on-virtual 8086 mode. Real-mode with weird stack limits cannot be run in
    6223      * virtual 8086 mode in VT-x. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
    6224      * See Intel Instruction reference for PUSH and Intel spec. 22.33.1 "Segment Wraparound".
    6225      */
    6226     PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    6227     if (pCtx->sp == 1)
    6228         return VINF_EM_RESET;
    6229     pCtx->sp -= sizeof(uint16_t);       /* May wrap around which is expected behaviour. */
    6230     int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), pCtx->ss.u64Base + pCtx->sp, &uValue, sizeof(uint16_t));
    6231     AssertRC(rc);
    6232     return rc;
    6233 }
    6234 #endif /* !IN_RING */
    6235 
    62364132/**
    62374133 * Injects an event into the guest upon VM-entry by updating the relevant fields
     
    62594155    Assert(pfIntrState);
    62604156
    6261 #ifndef IN_RING0
     4157#ifdef IN_NEM_DARWIN
    62624158    RT_NOREF(fIsNestedGuest, fStepping, pfIntrState);
    62634159#endif
     
    63274223    if (CPUMIsGuestInRealModeEx(pCtx))     /* CR0.PE bit changes are always intercepted, so it's up to date. */
    63284224    {
    6329 #ifdef IN_RING0
     4225#ifndef IN_NEM_DARWIN
    63304226        if (pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest)
    63314227#endif
     
    63394235            u32IntInfo &= ~VMX_ENTRY_INT_INFO_ERROR_CODE_VALID;
    63404236        }
    6341 #ifdef IN_RING0
     4237#ifndef IN_NEM_DARWIN
    63424238        else
    63434239        {
     
    64094305            /* Construct the stack frame for the interrupt/exception handler. */
    64104306            VBOXSTRICTRC rcStrict;
    6411             rcStrict = vmxHCRealModeGuestStackPush(pVCpu, pCtx->eflags.u32);
     4307            rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->eflags.u32);
    64124308            if (rcStrict == VINF_SUCCESS)
    64134309            {
    6414                 rcStrict = vmxHCRealModeGuestStackPush(pVCpu, pCtx->cs.Sel);
     4310                rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->cs.Sel);
    64154311                if (rcStrict == VINF_SUCCESS)
    6416                     rcStrict = vmxHCRealModeGuestStackPush(pVCpu, uGuestIp);
     4312                    rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, uGuestIp);
    64174313            }
    64184314
     
    66754571{
    66764572    HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
    6677 #ifdef IN_RING0
     4573#ifndef IN_NEM_DARWIN
    66784574    Assert(VMMRZCallRing3IsEnabled(pVCpu));
    66794575#endif
     
    67954691
    67964692
    6797 #ifdef IN_RING0
    6798 /**
    6799  * Exports the guest state into the VMCS guest-state area.
    6800  *
    6801  * The will typically be done before VM-entry when the guest-CPU state and the
    6802  * VMCS state may potentially be out of sync.
    6803  *
    6804  * Sets up the VM-entry MSR-load and VM-exit MSR-store areas. Sets up the
    6805  * VM-entry controls.
    6806  * Sets up the appropriate VMX non-root function to execute guest code based on
    6807  * the guest CPU mode.
    6808  *
    6809  * @returns VBox strict status code.
    6810  * @retval  VINF_EM_RESCHEDULE_REM if we try to emulate non-paged guest code
    6811  *          without unrestricted guest execution and the VMMDev is not presently
    6812  *          mapped (e.g. EFI32).
    6813  *
    6814  * @param   pVCpu           The cross context virtual CPU structure.
    6815  * @param   pVmxTransient   The VMX-transient structure.
    6816  *
    6817  * @remarks No-long-jump zone!!!
    6818  */
    6819 static VBOXSTRICTRC vmxHCExportGuestState(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    6820 {
    6821     AssertPtr(pVCpu);
    6822     HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
    6823     LogFlowFunc(("pVCpu=%p\n", pVCpu));
    6824 
    6825     STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatExportGuestState, x);
    6826 
    6827     /*
    6828      * Determine real-on-v86 mode.
    6829      * Used when the guest is in real-mode and unrestricted guest execution is not used.
    6830      */
    6831     PVMXVMCSINFOSHARED pVmcsInfoShared = pVmxTransient->pVmcsInfo->pShared;
    6832     if (    pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest
    6833         || !CPUMIsGuestInRealModeEx(&pVCpu->cpum.GstCtx))
    6834         pVmcsInfoShared->RealMode.fRealOnV86Active = false;
    6835     else
    6836     {
    6837         Assert(!pVmxTransient->fIsNestedGuest);
    6838         pVmcsInfoShared->RealMode.fRealOnV86Active = true;
    6839     }
    6840 
    6841     /*
    6842      * Any ordering dependency among the sub-functions below must be explicitly stated using comments.
    6843      * Ideally, assert that the cross-dependent bits are up-to-date at the point of using it.
    6844      */
    6845     int rc = vmxHCExportGuestEntryExitCtls(pVCpu, pVmxTransient);
    6846     AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
    6847 
    6848     rc = vmxHCExportGuestCR0(pVCpu, pVmxTransient);
    6849     AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
    6850 
    6851     VBOXSTRICTRC rcStrict = vmxHCExportGuestCR3AndCR4(pVCpu, pVmxTransient);
    6852     if (rcStrict == VINF_SUCCESS)
    6853     { /* likely */ }
    6854     else
    6855     {
    6856         Assert(rcStrict == VINF_EM_RESCHEDULE_REM || RT_FAILURE_NP(rcStrict));
    6857         return rcStrict;
    6858     }
    6859 
    6860     rc = vmxHCExportGuestSegRegsXdtr(pVCpu, pVmxTransient);
    6861     AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
    6862 
    6863     rc = vmxHCExportGuestMsrs(pVCpu, pVmxTransient);
    6864     AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
    6865 
    6866     vmxHCExportGuestApicTpr(pVCpu, pVmxTransient);
    6867     vmxHCExportGuestXcptIntercepts(pVCpu, pVmxTransient);
    6868     vmxHCExportGuestRip(pVCpu);
    6869     vmxHCExportGuestRsp(pVCpu);
    6870     vmxHCExportGuestRflags(pVCpu, pVmxTransient);
    6871 
    6872     rc = vmxHCExportGuestHwvirtState(pVCpu, pVmxTransient);
    6873     AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
    6874 
    6875     /* Clear any bits that may be set but exported unconditionally or unused/reserved bits. */
    6876     ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~(  (HM_CHANGED_GUEST_GPRS_MASK & ~HM_CHANGED_GUEST_RSP)
    6877                                                   |  HM_CHANGED_GUEST_CR2
    6878                                                   | (HM_CHANGED_GUEST_DR_MASK & ~HM_CHANGED_GUEST_DR7)
    6879                                                   |  HM_CHANGED_GUEST_X87
    6880                                                   |  HM_CHANGED_GUEST_SSE_AVX
    6881                                                   |  HM_CHANGED_GUEST_OTHER_XSAVE
    6882                                                   |  HM_CHANGED_GUEST_XCRx
    6883                                                   |  HM_CHANGED_GUEST_KERNEL_GS_BASE /* Part of lazy or auto load-store MSRs. */
    6884                                                   |  HM_CHANGED_GUEST_SYSCALL_MSRS   /* Part of lazy or auto load-store MSRs. */
    6885                                                   |  HM_CHANGED_GUEST_TSC_AUX
    6886                                                   |  HM_CHANGED_GUEST_OTHER_MSRS
    6887                                                   | (HM_CHANGED_KEEPER_STATE_MASK & ~HM_CHANGED_VMX_MASK)));
    6888 
    6889     STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExportGuestState, x);
    6890     return rc;
    6891 }
    6892 
    6893 
    6894 /**
    6895  * Exports the state shared between the host and guest into the VMCS.
    6896  *
    6897  * @param   pVCpu           The cross context virtual CPU structure.
    6898  * @param   pVmxTransient   The VMX-transient structure.
    6899  *
    6900  * @remarks No-long-jump zone!!!
    6901  */
    6902 static void vmxHCExportSharedState(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    6903 {
    6904     Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    6905     Assert(!VMMRZCallRing3IsEnabled(pVCpu));
    6906 
    6907     if (VCPU_2_VMXSTATE(pVCpu).fCtxChanged & HM_CHANGED_GUEST_DR_MASK)
    6908     {
    6909         int rc = vmxHCExportSharedDebugState(pVCpu, pVmxTransient);
    6910         AssertRC(rc);
    6911         VCPU_2_VMXSTATE(pVCpu).fCtxChanged &= ~HM_CHANGED_GUEST_DR_MASK;
    6912 
    6913         /* Loading shared debug bits might have changed eflags.TF bit for debugging purposes. */
    6914         if (VCPU_2_VMXSTATE(pVCpu).fCtxChanged & HM_CHANGED_GUEST_RFLAGS)
    6915             vmxHCExportGuestRflags(pVCpu, pVmxTransient);
    6916     }
    6917 
    6918     if (VCPU_2_VMXSTATE(pVCpu).fCtxChanged & HM_CHANGED_VMX_GUEST_LAZY_MSRS)
    6919     {
    6920         vmxHCLazyLoadGuestMsrs(pVCpu);
    6921         VCPU_2_VMXSTATE(pVCpu).fCtxChanged &= ~HM_CHANGED_VMX_GUEST_LAZY_MSRS;
    6922     }
    6923 
    6924     AssertMsg(!(VCPU_2_VMXSTATE(pVCpu).fCtxChanged & HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE),
    6925               ("fCtxChanged=%#RX64\n", VCPU_2_VMXSTATE(pVCpu).fCtxChanged));
    6926 }
    6927 
    6928 
    6929 /**
    6930  * Worker for loading the guest-state bits in the inner VT-x execution loop.
    6931  *
    6932  * @returns Strict VBox status code (i.e. informational status codes too).
    6933  * @retval  VINF_EM_RESCHEDULE_REM if we try to emulate non-paged guest code
    6934  *          without unrestricted guest execution and the VMMDev is not presently
    6935  *          mapped (e.g. EFI32).
    6936  *
    6937  * @param   pVCpu           The cross context virtual CPU structure.
    6938  * @param   pVmxTransient   The VMX-transient structure.
    6939  *
    6940  * @remarks No-long-jump zone!!!
    6941  */
    6942 static VBOXSTRICTRC vmxHCExportGuestStateOptimal(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    6943 {
    6944     HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
    6945     Assert(!VMMRZCallRing3IsEnabled(pVCpu));
    6946 
    6947 #ifdef HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE
    6948     ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
    6949 #endif
    6950 
    6951     /*
    6952      * For many VM-exits only RIP/RSP/RFLAGS (and HWVIRT state when executing a nested-guest)
    6953      * changes. First try to export only these without going through all other changed-flag checks.
    6954      */
    6955     VBOXSTRICTRC   rcStrict;
    6956     uint64_t const fCtxMask     = HM_CHANGED_ALL_GUEST & ~HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE;
    6957     uint64_t const fMinimalMask = HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT;
    6958     uint64_t const fCtxChanged  = ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged);
    6959 
    6960     /* If only RIP/RSP/RFLAGS/HWVIRT changed, export only those (quicker, happens more often).*/
    6961     if (    (fCtxChanged & fMinimalMask)
    6962         && !(fCtxChanged & (fCtxMask & ~fMinimalMask)))
    6963     {
    6964         vmxHCExportGuestRip(pVCpu);
    6965         vmxHCExportGuestRsp(pVCpu);
    6966         vmxHCExportGuestRflags(pVCpu, pVmxTransient);
    6967         rcStrict = vmxHCExportGuestHwvirtState(pVCpu, pVmxTransient);
    6968         STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExportMinimal);
    6969     }
    6970     /* If anything else also changed, go through the full export routine and export as required. */
    6971     else if (fCtxChanged & fCtxMask)
    6972     {
    6973         rcStrict = vmxHCExportGuestState(pVCpu, pVmxTransient);
    6974         if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    6975         { /* likely */}
    6976         else
    6977         {
    6978             AssertMsg(rcStrict == VINF_EM_RESCHEDULE_REM, ("Failed to export guest state! rc=%Rrc\n",
    6979                                                            VBOXSTRICTRC_VAL(rcStrict)));
    6980             Assert(!VMMRZCallRing3IsEnabled(pVCpu));
    6981             return rcStrict;
    6982         }
    6983         STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExportFull);
    6984     }
    6985     /* Nothing changed, nothing to load here. */
    6986     else
    6987         rcStrict = VINF_SUCCESS;
    6988 
    6989 #ifdef VBOX_STRICT
    6990     /* All the guest state bits should be loaded except maybe the host context and/or the shared host/guest bits. */
    6991     uint64_t const fCtxChangedCur = ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged);
    6992     AssertMsg(!(fCtxChangedCur & fCtxMask), ("fCtxChangedCur=%#RX64\n", fCtxChangedCur));
    6993 #endif
    6994     return rcStrict;
    6995 }
    6996 #endif
    6997 
    6998 
    69994693/**
    70004694 * Tries to determine what part of the guest-state VT-x has deemed as invalid
     
    70214715    uint32_t uError = VMX_IGS_ERROR;
    70224716    uint32_t u32IntrState = 0;
    7023 #ifdef IN_RING0
     4717#ifndef IN_NEM_DARWIN
    70244718    PVMCC    pVM    = pVCpu->CTX_SUFF(pVM);
    70254719    bool const fUnrestrictedGuest = VM_IS_VMX_UNRESTRICTED_GUEST(pVM);
     
    71524846            HMVMX_ERROR_BREAK(VMX_IGS_DR7_RESERVED);
    71534847
    7154 #ifdef IN_RING0
     4848#ifndef IN_NEM_DARWIN
    71554849        rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_HOST_SYSENTER_ESP, &u64Val);
    71564850        AssertRC(rc);
     
    75165210        }
    75175211
    7518 #ifdef IN_RING0
     5212#ifndef IN_NEM_DARWIN
    75195213        /* VMCS link pointer. */
    75205214        rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, &u64Val);
     
    75715265#undef HMVMX_CHECK_BREAK
    75725266}
    7573 
    7574 
    7575 #ifdef IN_RING0
    7576 /**
    7577  * Map the APIC-access page for virtualizing APIC accesses.
    7578  *
    7579  * This can cause a longjumps to R3 due to the acquisition of the PGM lock. Hence,
    7580  * this not done as part of exporting guest state, see @bugref{8721}.
    7581  *
    7582  * @returns VBox status code.
    7583  * @param   pVCpu   The cross context virtual CPU structure.
    7584  */
    7585 static int vmxHCMapHCApicAccessPage(PVMCPUCC pVCpu)
    7586 {
    7587     PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    7588     uint64_t const u64MsrApicBase = APICGetBaseMsrNoCheck(pVCpu);
    7589 
    7590     Assert(PDMHasApic(pVM));
    7591     Assert(u64MsrApicBase);
    7592 
    7593     RTGCPHYS const GCPhysApicBase = u64MsrApicBase & PAGE_BASE_GC_MASK;
    7594     Log4Func(("Mappping HC APIC-access page at %#RGp\n", GCPhysApicBase));
    7595 
    7596     /* Unalias the existing mapping. */
    7597     int rc = PGMHandlerPhysicalReset(pVM, GCPhysApicBase);
    7598     AssertRCReturn(rc, rc);
    7599 
    7600     /* Map the HC APIC-access page in place of the MMIO page, also updates the shadow page tables if necessary. */
    7601     Assert(pVM->hmr0.s.vmx.HCPhysApicAccess != NIL_RTHCPHYS);
    7602     rc = IOMR0MmioMapMmioHCPage(pVM, pVCpu, GCPhysApicBase, pVM->hmr0.s.vmx.HCPhysApicAccess, X86_PTE_RW | X86_PTE_P);
    7603     AssertRCReturn(rc, rc);
    7604 
    7605     /* Update the per-VCPU cache of the APIC base MSR. */
    7606     VCPU_2_VMXSTATE(pVCpu).vmx.u64GstMsrApicBase = u64MsrApicBase;
    7607     return VINF_SUCCESS;
    7608 }
    7609 
    7610 
    7611 /**
    7612  * Worker function passed to RTMpOnSpecific() that is to be called on the target
    7613  * CPU.
    7614  *
    7615  * @param   idCpu       The ID for the CPU the function is called on.
    7616  * @param   pvUser1     Null, not used.
    7617  * @param   pvUser2     Null, not used.
    7618  */
    7619 static DECLCALLBACK(void) hmR0DispatchHostNmi(RTCPUID idCpu, void *pvUser1, void *pvUser2)
    7620 {
    7621     RT_NOREF3(idCpu, pvUser1, pvUser2);
    7622     VMXDispatchHostNmi();
    7623 }
    7624 
    7625 
    7626 /**
    7627  * Dispatching an NMI on the host CPU that received it.
    7628  *
    7629  * @returns VBox status code.
    7630  * @param   pVCpu       The cross context virtual CPU structure.
    7631  * @param   pVmcsInfo   The VMCS info. object corresponding to the VMCS that was
    7632  *                      executing when receiving the host NMI in VMX non-root
    7633  *                      operation.
    7634  */
    7635 static int vmxHCExitHostNmi(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
    7636 {
    7637     RTCPUID const idCpu = pVmcsInfo->idHostCpuExec;
    7638     Assert(idCpu != NIL_RTCPUID);
    7639 
    7640     /*
    7641      * We don't want to delay dispatching the NMI any more than we have to. However,
    7642      * we have already chosen -not- to dispatch NMIs when interrupts were still disabled
    7643      * after executing guest or nested-guest code for the following reasons:
    7644      *
    7645      *   - We would need to perform VMREADs with interrupts disabled and is orders of
    7646      *     magnitude worse when we run as a nested hypervisor without VMCS shadowing
    7647      *     supported by the host hypervisor.
    7648      *
    7649      *   - It affects the common VM-exit scenario and keeps interrupts disabled for a
    7650      *     longer period of time just for handling an edge case like host NMIs which do
    7651      *     not occur nearly as frequently as other VM-exits.
    7652      *
    7653      * Let's cover the most likely scenario first. Check if we are on the target CPU
    7654      * and dispatch the NMI right away. This should be much faster than calling into
    7655      * RTMpOnSpecific() machinery.
    7656      */
    7657     bool fDispatched = false;
    7658     RTCCUINTREG const fEFlags = ASMIntDisableFlags();
    7659     if (idCpu == RTMpCpuId())
    7660     {
    7661         VMXDispatchHostNmi();
    7662         fDispatched = true;
    7663     }
    7664     ASMSetFlags(fEFlags);
    7665     if (fDispatched)
    7666     {
    7667         STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitHostNmiInGC);
    7668         return VINF_SUCCESS;
    7669     }
    7670 
    7671     /*
    7672      * RTMpOnSpecific() waits until the worker function has run on the target CPU. So
    7673      * there should be no race or recursion even if we are unlucky enough to be preempted
    7674      * (to the target CPU) without dispatching the host NMI above.
    7675      */
    7676     STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitHostNmiInGCIpi);
    7677     return RTMpOnSpecific(idCpu, &hmR0DispatchHostNmi, NULL /* pvUser1 */,  NULL /* pvUser2 */);
    7678 }
    7679 
    7680 
    7681 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    7682 /**
    7683  * Merges the guest with the nested-guest MSR bitmap in preparation of executing the
    7684  * nested-guest using hardware-assisted VMX.
    7685  *
    7686  * @param   pVCpu               The cross context virtual CPU structure.
    7687  * @param   pVmcsInfoNstGst     The nested-guest VMCS info. object.
    7688  * @param   pVmcsInfoGst        The guest VMCS info. object.
    7689  */
    7690 static void vmxHCMergeMsrBitmapNested(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfoNstGst, PCVMXVMCSINFO pVmcsInfoGst)
    7691 {
    7692     uint32_t const cbMsrBitmap    = X86_PAGE_4K_SIZE;
    7693     uint64_t       *pu64MsrBitmap = (uint64_t *)pVmcsInfoNstGst->pvMsrBitmap;
    7694     Assert(pu64MsrBitmap);
    7695 
    7696     /*
    7697      * We merge the guest MSR bitmap with the nested-guest MSR bitmap such that any
    7698      * MSR that is intercepted by the guest is also intercepted while executing the
    7699      * nested-guest using hardware-assisted VMX.
    7700      *
    7701      * Note! If the nested-guest is not using an MSR bitmap, every MSR must cause a
    7702      *       nested-guest VM-exit even if the outer guest is not intercepting some
    7703      *       MSRs. We cannot assume the caller has initialized the nested-guest
    7704      *       MSR bitmap in this case.
    7705      *
    7706      *       The nested hypervisor may also switch whether it uses MSR bitmaps for
    7707      *       each of its VM-entry, hence initializing it once per-VM while setting
    7708      *       up the nested-guest VMCS is not sufficient.
    7709      */
    7710     PCVMXVVMCS const pVmcsNstGst  = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
    7711     if (pVmcsNstGst->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
    7712     {
    7713         uint64_t const *pu64MsrBitmapNstGst = (uint64_t const *)&pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap[0];
    7714         uint64_t const *pu64MsrBitmapGst    = (uint64_t const *)pVmcsInfoGst->pvMsrBitmap;
    7715         Assert(pu64MsrBitmapNstGst);
    7716         Assert(pu64MsrBitmapGst);
    7717 
    7718         /** @todo Detect and use EVEX.POR? */
    7719         uint32_t const cFrags = cbMsrBitmap / sizeof(uint64_t);
    7720         for (uint32_t i = 0; i < cFrags; i++)
    7721             pu64MsrBitmap[i] = pu64MsrBitmapNstGst[i] | pu64MsrBitmapGst[i];
    7722     }
    7723     else
    7724         ASMMemFill32(pu64MsrBitmap, cbMsrBitmap, UINT32_C(0xffffffff));
    7725 }
    7726 
    7727 
    7728 /**
    7729  * Merges the guest VMCS in to the nested-guest VMCS controls in preparation of
    7730  * hardware-assisted VMX execution of the nested-guest.
    7731  *
    7732  * For a guest, we don't modify these controls once we set up the VMCS and hence
    7733  * this function is never called.
    7734  *
    7735  * For nested-guests since the nested hypervisor provides these controls on every
    7736  * nested-guest VM-entry and could potentially change them everytime we need to
    7737  * merge them before every nested-guest VM-entry.
    7738  *
    7739  * @returns VBox status code.
    7740  * @param   pVCpu   The cross context virtual CPU structure.
    7741  */
    7742 static int vmxHCMergeVmcsNested(PVMCPUCC pVCpu)
    7743 {
    7744     PVMCC const         pVM          = pVCpu->CTX_SUFF(pVM);
    7745     PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
    7746     PCVMXVVMCS const    pVmcsNstGst  = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
    7747 
    7748     /*
    7749      * Merge the controls with the requirements of the guest VMCS.
    7750      *
    7751      * We do not need to validate the nested-guest VMX features specified in the nested-guest
    7752      * VMCS with the features supported by the physical CPU as it's already done by the
    7753      * VMLAUNCH/VMRESUME instruction emulation.
    7754      *
    7755      * This is because the VMX features exposed by CPUM (through CPUID/MSRs) to the guest are
    7756      * derived from the VMX features supported by the physical CPU.
    7757      */
    7758 
    7759     /* Pin-based VM-execution controls. */
    7760     uint32_t const u32PinCtls = pVmcsNstGst->u32PinCtls | pVmcsInfoGst->u32PinCtls;
    7761 
    7762     /* Processor-based VM-execution controls. */
    7763     uint32_t       u32ProcCtls = (pVmcsNstGst->u32ProcCtls  & ~VMX_PROC_CTLS_USE_IO_BITMAPS)
    7764                                | (pVmcsInfoGst->u32ProcCtls & ~(  VMX_PROC_CTLS_INT_WINDOW_EXIT
    7765                                                                 | VMX_PROC_CTLS_NMI_WINDOW_EXIT
    7766                                                                 | VMX_PROC_CTLS_MOV_DR_EXIT
    7767                                                                 | VMX_PROC_CTLS_USE_TPR_SHADOW
    7768                                                                 | VMX_PROC_CTLS_MONITOR_TRAP_FLAG));
    7769 
    7770     /* Secondary processor-based VM-execution controls. */
    7771     uint32_t const u32ProcCtls2 = (pVmcsNstGst->u32ProcCtls2  & ~VMX_PROC_CTLS2_VPID)
    7772                                 | (pVmcsInfoGst->u32ProcCtls2 & ~(  VMX_PROC_CTLS2_VIRT_APIC_ACCESS
    7773                                                                   | VMX_PROC_CTLS2_INVPCID
    7774                                                                   | VMX_PROC_CTLS2_VMCS_SHADOWING
    7775                                                                   | VMX_PROC_CTLS2_RDTSCP
    7776                                                                   | VMX_PROC_CTLS2_XSAVES_XRSTORS
    7777                                                                   | VMX_PROC_CTLS2_APIC_REG_VIRT
    7778                                                                   | VMX_PROC_CTLS2_VIRT_INT_DELIVERY
    7779                                                                   | VMX_PROC_CTLS2_VMFUNC));
    7780 
    7781     /*
    7782      * VM-entry controls:
    7783      * These controls contains state that depends on the nested-guest state (primarily
    7784      * EFER MSR) and is thus not constant between VMLAUNCH/VMRESUME and the nested-guest
    7785      * VM-exit. Although the nested hypervisor cannot change it, we need to in order to
    7786      * properly continue executing the nested-guest if the EFER MSR changes but does not
    7787      * cause a nested-guest VM-exits.
    7788      *
    7789      * VM-exit controls:
    7790      * These controls specify the host state on return. We cannot use the controls from
    7791      * the nested hypervisor state as is as it would contain the guest state rather than
    7792      * the host state. Since the host state is subject to change (e.g. preemption, trips
    7793      * to ring-3, longjmp and rescheduling to a different host CPU) they are not constant
    7794      * through VMLAUNCH/VMRESUME and the nested-guest VM-exit.
    7795      *
    7796      * VM-entry MSR-load:
    7797      * The guest MSRs from the VM-entry MSR-load area are already loaded into the guest-CPU
    7798      * context by the VMLAUNCH/VMRESUME instruction emulation.
    7799      *
    7800      * VM-exit MSR-store:
    7801      * The VM-exit emulation will take care of populating the MSRs from the guest-CPU context
    7802      * back into the VM-exit MSR-store area.
    7803      *
    7804      * VM-exit MSR-load areas:
    7805      * This must contain the real host MSRs with hardware-assisted VMX execution. Hence, we
    7806      * can entirely ignore what the nested hypervisor wants to load here.
    7807      */
    7808 
    7809     /*
    7810      * Exception bitmap.
    7811      *
    7812      * We could remove #UD from the guest bitmap and merge it with the nested-guest bitmap
    7813      * here (and avoid doing anything while exporting nested-guest state), but to keep the
    7814      * code more flexible if intercepting exceptions become more dynamic in the future we do
    7815      * it as part of exporting the nested-guest state.
    7816      */
    7817     uint32_t const u32XcptBitmap = pVmcsNstGst->u32XcptBitmap | pVmcsInfoGst->u32XcptBitmap;
    7818 
    7819     /*
    7820      * CR0/CR4 guest/host mask.
    7821      *
    7822      * Modifications by the nested-guest to CR0/CR4 bits owned by the host and the guest must
    7823      * cause VM-exits, so we need to merge them here.
    7824      */
    7825     uint64_t const u64Cr0Mask = pVmcsNstGst->u64Cr0Mask.u | pVmcsInfoGst->u64Cr0Mask;
    7826     uint64_t const u64Cr4Mask = pVmcsNstGst->u64Cr4Mask.u | pVmcsInfoGst->u64Cr4Mask;
    7827 
    7828     /*
    7829      * Page-fault error-code mask and match.
    7830      *
    7831      * Although we require unrestricted guest execution (and thereby nested-paging) for
    7832      * hardware-assisted VMX execution of nested-guests and thus the outer guest doesn't
    7833      * normally intercept #PFs, it might intercept them for debugging purposes.
    7834      *
    7835      * If the outer guest is not intercepting #PFs, we can use the nested-guest #PF filters.
    7836      * If the outer guest is intercepting #PFs, we must intercept all #PFs.
    7837      */
    7838     uint32_t u32XcptPFMask;
    7839     uint32_t u32XcptPFMatch;
    7840     if (!(pVmcsInfoGst->u32XcptBitmap & RT_BIT(X86_XCPT_PF)))
    7841     {
    7842         u32XcptPFMask  = pVmcsNstGst->u32XcptPFMask;
    7843         u32XcptPFMatch = pVmcsNstGst->u32XcptPFMatch;
    7844     }
    7845     else
    7846     {
    7847         u32XcptPFMask  = 0;
    7848         u32XcptPFMatch = 0;
    7849     }
    7850 
    7851     /*
    7852      * Pause-Loop exiting.
    7853      */
    7854     /** @todo r=bird: given that both pVM->hm.s.vmx.cPleGapTicks and
    7855      *        pVM->hm.s.vmx.cPleWindowTicks defaults to zero, I cannot see how
    7856      *        this will work... */
    7857     uint32_t const cPleGapTicks    = RT_MIN(pVM->hm.s.vmx.cPleGapTicks,    pVmcsNstGst->u32PleGap);
    7858     uint32_t const cPleWindowTicks = RT_MIN(pVM->hm.s.vmx.cPleWindowTicks, pVmcsNstGst->u32PleWindow);
    7859 
    7860     /*
    7861      * Pending debug exceptions.
    7862      * Currently just copy whatever the nested-guest provides us.
    7863      */
    7864     uint64_t const uPendingDbgXcpts = pVmcsNstGst->u64GuestPendingDbgXcpts.u;
    7865 
    7866     /*
    7867      * I/O Bitmap.
    7868      *
    7869      * We do not use the I/O bitmap that may be provided by the nested hypervisor as we always
    7870      * intercept all I/O port accesses.
    7871      */
    7872     Assert(u32ProcCtls & VMX_PROC_CTLS_UNCOND_IO_EXIT);
    7873     Assert(!(u32ProcCtls & VMX_PROC_CTLS_USE_IO_BITMAPS));
    7874 
    7875     /*
    7876      * VMCS shadowing.
    7877      *
    7878      * We do not yet expose VMCS shadowing to the guest and thus VMCS shadowing should not be
    7879      * enabled while executing the nested-guest.
    7880      */
    7881     Assert(!(u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING));
    7882 
    7883     /*
    7884      * APIC-access page.
    7885      */
    7886     RTHCPHYS HCPhysApicAccess;
    7887     if (u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
    7888     {
    7889         Assert(g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS);
    7890         RTGCPHYS const GCPhysApicAccess = pVmcsNstGst->u64AddrApicAccess.u;
    7891 
    7892         /** @todo NSTVMX: This is not really correct but currently is required to make
    7893          *        things work. We need to re-enable the page handler when we fallback to
    7894          *        IEM execution of the nested-guest! */
    7895         PGMHandlerPhysicalPageTempOff(pVM, GCPhysApicAccess, GCPhysApicAccess);
    7896 
    7897         void          *pvPage;
    7898         PGMPAGEMAPLOCK PgLockApicAccess;
    7899         int rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysApicAccess, &pvPage, &PgLockApicAccess);
    7900         if (RT_SUCCESS(rc))
    7901         {
    7902             rc = PGMPhysGCPhys2HCPhys(pVM, GCPhysApicAccess, &HCPhysApicAccess);
    7903             AssertMsgRCReturn(rc, ("Failed to get host-physical address for APIC-access page at %#RGp\n", GCPhysApicAccess), rc);
    7904 
    7905             /** @todo Handle proper releasing of page-mapping lock later. */
    7906             PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &PgLockApicAccess);
    7907         }
    7908         else
    7909             return rc;
    7910     }
    7911     else
    7912         HCPhysApicAccess = 0;
    7913 
    7914     /*
    7915      * Virtual-APIC page and TPR threshold.
    7916      */
    7917     RTHCPHYS HCPhysVirtApic;
    7918     uint32_t u32TprThreshold;
    7919     if (u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
    7920     {
    7921         Assert(g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_TPR_SHADOW);
    7922         RTGCPHYS const GCPhysVirtApic = pVmcsNstGst->u64AddrVirtApic.u;
    7923 
    7924         void          *pvPage;
    7925         PGMPAGEMAPLOCK PgLockVirtApic;
    7926         int rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysVirtApic, &pvPage, &PgLockVirtApic);
    7927         if (RT_SUCCESS(rc))
    7928         {
    7929             rc = PGMPhysGCPhys2HCPhys(pVM, GCPhysVirtApic, &HCPhysVirtApic);
    7930             AssertMsgRCReturn(rc, ("Failed to get host-physical address for virtual-APIC page at %#RGp\n", GCPhysVirtApic), rc);
    7931 
    7932             /** @todo Handle proper releasing of page-mapping lock later. */
    7933             PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &PgLockVirtApic);
    7934         }
    7935         else
    7936             return rc;
    7937 
    7938         u32TprThreshold = pVmcsNstGst->u32TprThreshold;
    7939     }
    7940     else
    7941     {
    7942         HCPhysVirtApic  = 0;
    7943         u32TprThreshold = 0;
    7944 
    7945         /*
    7946          * We must make sure CR8 reads/write must cause VM-exits when TPR shadowing is not
    7947          * used by the nested hypervisor. Preventing MMIO accesses to the physical APIC will
    7948          * be taken care of by EPT/shadow paging.
    7949          */
    7950         if (pVM->hmr0.s.fAllow64BitGuests)
    7951             u32ProcCtls |= VMX_PROC_CTLS_CR8_STORE_EXIT
    7952                         |  VMX_PROC_CTLS_CR8_LOAD_EXIT;
    7953     }
    7954 
    7955     /*
    7956      * Validate basic assumptions.
    7957      */
    7958     PVMXVMCSINFO pVmcsInfoNstGst = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
    7959     Assert(VM_IS_VMX_UNRESTRICTED_GUEST(pVM));
    7960     Assert(g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS);
    7961     Assert(hmGetVmxActiveVmcsInfo(pVCpu) == pVmcsInfoNstGst);
    7962 
    7963     /*
    7964      * Commit it to the nested-guest VMCS.
    7965      */
    7966     int rc = VINF_SUCCESS;
    7967     if (pVmcsInfoNstGst->u32PinCtls != u32PinCtls)
    7968         rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PIN_EXEC, u32PinCtls);
    7969     if (pVmcsInfoNstGst->u32ProcCtls != u32ProcCtls)
    7970         rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, u32ProcCtls);
    7971     if (pVmcsInfoNstGst->u32ProcCtls2 != u32ProcCtls2)
    7972         rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, u32ProcCtls2);
    7973     if (pVmcsInfoNstGst->u32XcptBitmap != u32XcptBitmap)
    7974         rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, u32XcptBitmap);
    7975     if (pVmcsInfoNstGst->u64Cr0Mask != u64Cr0Mask)
    7976         rc |= VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, u64Cr0Mask);
    7977     if (pVmcsInfoNstGst->u64Cr4Mask != u64Cr4Mask)
    7978         rc |= VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, u64Cr4Mask);
    7979     if (pVmcsInfoNstGst->u32XcptPFMask != u32XcptPFMask)
    7980         rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK, u32XcptPFMask);
    7981     if (pVmcsInfoNstGst->u32XcptPFMatch != u32XcptPFMatch)
    7982         rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH, u32XcptPFMatch);
    7983     if (   !(u32ProcCtls  & VMX_PROC_CTLS_PAUSE_EXIT)
    7984         &&  (u32ProcCtls2 & VMX_PROC_CTLS2_PAUSE_LOOP_EXIT))
    7985     {
    7986         Assert(g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_PAUSE_LOOP_EXIT);
    7987         rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PLE_GAP, cPleGapTicks);
    7988         rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PLE_WINDOW, cPleWindowTicks);
    7989     }
    7990     if (u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
    7991     {
    7992         rc |= VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
    7993         rc |= VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL, HCPhysVirtApic);
    7994     }
    7995     if (u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
    7996         rc |= VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL, HCPhysApicAccess);
    7997     rc |= VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, uPendingDbgXcpts);
    7998     AssertRC(rc);
    7999 
    8000     /*
    8001      * Update the nested-guest VMCS cache.
    8002      */
    8003     pVmcsInfoNstGst->u32PinCtls     = u32PinCtls;
    8004     pVmcsInfoNstGst->u32ProcCtls    = u32ProcCtls;
    8005     pVmcsInfoNstGst->u32ProcCtls2   = u32ProcCtls2;
    8006     pVmcsInfoNstGst->u32XcptBitmap  = u32XcptBitmap;
    8007     pVmcsInfoNstGst->u64Cr0Mask     = u64Cr0Mask;
    8008     pVmcsInfoNstGst->u64Cr4Mask     = u64Cr4Mask;
    8009     pVmcsInfoNstGst->u32XcptPFMask  = u32XcptPFMask;
    8010     pVmcsInfoNstGst->u32XcptPFMatch = u32XcptPFMatch;
    8011     pVmcsInfoNstGst->HCPhysVirtApic = HCPhysVirtApic;
    8012 
    8013     /*
    8014      * We need to flush the TLB if we are switching the APIC-access page address.
    8015      * See Intel spec. 28.3.3.4 "Guidelines for Use of the INVEPT Instruction".
    8016      */
    8017     if (u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
    8018         VCPU_2_VMXSTATE(pVCpu).vmx.fSwitchedNstGstFlushTlb = true;
    8019 
    8020     /*
    8021      * MSR bitmap.
    8022      *
    8023      * The MSR bitmap address has already been initialized while setting up the nested-guest
    8024      * VMCS, here we need to merge the MSR bitmaps.
    8025      */
    8026     if (u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
    8027         vmxHCMergeMsrBitmapNested(pVCpu, pVmcsInfoNstGst, pVmcsInfoGst);
    8028 
    8029     return VINF_SUCCESS;
    8030 }
    8031 #endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
    8032 
    8033 
    8034 /**
    8035  * Runs the guest code using hardware-assisted VMX the normal way.
    8036  *
    8037  * @returns VBox status code.
    8038  * @param   pVCpu       The cross context virtual CPU structure.
    8039  * @param   pcLoops     Pointer to the number of executed loops.
    8040  */
    8041 static VBOXSTRICTRC vmxHCRunGuestCodeNormal(PVMCPUCC pVCpu, uint32_t *pcLoops)
    8042 {
    8043     uint32_t const cMaxResumeLoops = pVCpu->CTX_SUFF(pVM)->hmr0.s.cMaxResumeLoops;
    8044     Assert(pcLoops);
    8045     Assert(*pcLoops <= cMaxResumeLoops);
    8046     Assert(!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx));
    8047 
    8048 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    8049     /*
    8050      * Switch to the guest VMCS as we may have transitioned from executing the nested-guest
    8051      * without leaving ring-0. Otherwise, if we came from ring-3 we would have loaded the
    8052      * guest VMCS while entering the VMX ring-0 session.
    8053      */
    8054     if (pVCpu->hmr0.s.vmx.fSwitchedToNstGstVmcs)
    8055     {
    8056         int rc = vmxHCSwitchToGstOrNstGstVmcs(pVCpu, false /* fSwitchToNstGstVmcs */);
    8057         if (RT_SUCCESS(rc))
    8058         { /* likely */ }
    8059         else
    8060         {
    8061             LogRelFunc(("Failed to switch to the guest VMCS. rc=%Rrc\n", rc));
    8062             return rc;
    8063         }
    8064     }
    8065 #endif
    8066 
    8067     VMXTRANSIENT VmxTransient;
    8068     RT_ZERO(VmxTransient);
    8069     VmxTransient.pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
    8070 
    8071     /* Paranoia. */
    8072     Assert(VmxTransient.pVmcsInfo == &pVCpu->hmr0.s.vmx.VmcsInfo);
    8073 
    8074     VBOXSTRICTRC rcStrict = VERR_INTERNAL_ERROR_5;
    8075     for (;;)
    8076     {
    8077         Assert(!HMR0SuspendPending());
    8078         HMVMX_ASSERT_CPU_SAFE(pVCpu);
    8079         STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatEntry, x);
    8080 
    8081         /*
    8082          * Preparatory work for running nested-guest code, this may force us to
    8083          * return to ring-3.
    8084          *
    8085          * Warning! This bugger disables interrupts on VINF_SUCCESS!
    8086          */
    8087         rcStrict = vmxHCPreRunGuest(pVCpu, &VmxTransient, false /* fStepping */);
    8088         if (rcStrict != VINF_SUCCESS)
    8089             break;
    8090 
    8091         /* Interrupts are disabled at this point! */
    8092         vmxHCPreRunGuestCommitted(pVCpu, &VmxTransient);
    8093         int rcRun = vmxHCRunGuest(pVCpu, &VmxTransient);
    8094         vmxHCPostRunGuest(pVCpu, &VmxTransient, rcRun);
    8095         /* Interrupts are re-enabled at this point! */
    8096 
    8097         /*
    8098          * Check for errors with running the VM (VMLAUNCH/VMRESUME).
    8099          */
    8100         if (RT_SUCCESS(rcRun))
    8101         { /* very likely */ }
    8102         else
    8103         {
    8104             STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatPreExit, x);
    8105             vmxHCReportWorldSwitchError(pVCpu, rcRun, &VmxTransient);
    8106             return rcRun;
    8107         }
    8108 
    8109         /*
    8110          * Profile the VM-exit.
    8111          */
    8112         AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason));
    8113         STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitAll);
    8114         STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatExitReason[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);
    8115         STAM_PROFILE_ADV_STOP_START(&VCPU_2_VMXSTATS(pVCpu).StatPreExit, &VCPU_2_VMXSTATS(pVCpu).StatExitHandling, x);
    8116         HMVMX_START_EXIT_DISPATCH_PROF();
    8117 
    8118         VBOXVMM_R0_HMVMX_VMEXIT_NOCTX(pVCpu, &pVCpu->cpum.GstCtx, VmxTransient.uExitReason);
    8119 
    8120         /*
    8121          * Handle the VM-exit.
    8122          */
    8123 #ifdef HMVMX_USE_FUNCTION_TABLE
    8124         rcStrict = g_aVMExitHandlers[VmxTransient.uExitReason].pfn(pVCpu, &VmxTransient);
    8125 #else
    8126         rcStrict = vmxHCHandleExit(pVCpu, &VmxTransient);
    8127 #endif
    8128         STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitHandling, x);
    8129         if (rcStrict == VINF_SUCCESS)
    8130         {
    8131             if (++(*pcLoops) <= cMaxResumeLoops)
    8132                 continue;
    8133             STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchMaxResumeLoops);
    8134             rcStrict = VINF_EM_RAW_INTERRUPT;
    8135         }
    8136         break;
    8137     }
    8138 
    8139     STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatEntry, x);
    8140     return rcStrict;
    8141 }
    8142 
    8143 
    8144 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    8145 /**
    8146  * Runs the nested-guest code using hardware-assisted VMX.
    8147  *
    8148  * @returns VBox status code.
    8149  * @param   pVCpu       The cross context virtual CPU structure.
    8150  * @param   pcLoops     Pointer to the number of executed loops.
    8151  *
    8152  * @sa      vmxHCRunGuestCodeNormal.
    8153  */
    8154 static VBOXSTRICTRC vmxHCRunGuestCodeNested(PVMCPUCC pVCpu, uint32_t *pcLoops)
    8155 {
    8156     uint32_t const cMaxResumeLoops = pVCpu->CTX_SUFF(pVM)->hmr0.s.cMaxResumeLoops;
    8157     Assert(pcLoops);
    8158     Assert(*pcLoops <= cMaxResumeLoops);
    8159     Assert(CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx));
    8160 
    8161     /*
    8162      * Switch to the nested-guest VMCS as we may have transitioned from executing the
    8163      * guest without leaving ring-0. Otherwise, if we came from ring-3 we would have
    8164      * loaded the nested-guest VMCS while entering the VMX ring-0 session.
    8165      */
    8166     if (!pVCpu->hmr0.s.vmx.fSwitchedToNstGstVmcs)
    8167     {
    8168         int rc = vmxHCSwitchToGstOrNstGstVmcs(pVCpu, true /* fSwitchToNstGstVmcs */);
    8169         if (RT_SUCCESS(rc))
    8170         { /* likely */ }
    8171         else
    8172         {
    8173             LogRelFunc(("Failed to switch to the nested-guest VMCS. rc=%Rrc\n", rc));
    8174             return rc;
    8175         }
    8176     }
    8177 
    8178     VMXTRANSIENT VmxTransient;
    8179     RT_ZERO(VmxTransient);
    8180     VmxTransient.pVmcsInfo      = hmGetVmxActiveVmcsInfo(pVCpu);
    8181     VmxTransient.fIsNestedGuest = true;
    8182 
    8183     /* Paranoia. */
    8184     Assert(VmxTransient.pVmcsInfo == &pVCpu->hmr0.s.vmx.VmcsInfoNstGst);
    8185 
    8186     VBOXSTRICTRC rcStrict = VERR_INTERNAL_ERROR_5;
    8187     for (;;)
    8188     {
    8189         Assert(!HMR0SuspendPending());
    8190         HMVMX_ASSERT_CPU_SAFE(pVCpu);
    8191         STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatEntry, x);
    8192 
    8193         /*
    8194          * Preparatory work for running guest code, this may force us to
    8195          * return to ring-3.
    8196          *
    8197          * Warning! This bugger disables interrupts on VINF_SUCCESS!
    8198          */
    8199         rcStrict = vmxHCPreRunGuest(pVCpu, &VmxTransient, false /* fStepping */);
    8200         if (rcStrict != VINF_SUCCESS)
    8201             break;
    8202 
    8203         /* Interrupts are disabled at this point! */
    8204         vmxHCPreRunGuestCommitted(pVCpu, &VmxTransient);
    8205         int rcRun = vmxHCRunGuest(pVCpu, &VmxTransient);
    8206         vmxHCPostRunGuest(pVCpu, &VmxTransient, rcRun);
    8207         /* Interrupts are re-enabled at this point! */
    8208 
    8209         /*
    8210          * Check for errors with running the VM (VMLAUNCH/VMRESUME).
    8211          */
    8212         if (RT_SUCCESS(rcRun))
    8213         { /* very likely */ }
    8214         else
    8215         {
    8216             STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatPreExit, x);
    8217             vmxHCReportWorldSwitchError(pVCpu, rcRun, &VmxTransient);
    8218             return rcRun;
    8219         }
    8220 
    8221         /*
    8222          * Profile the VM-exit.
    8223          */
    8224         AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason));
    8225         STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitAll);
    8226         STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatNestedExitAll);
    8227         STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatNestedExitReason[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);
    8228         STAM_PROFILE_ADV_STOP_START(&VCPU_2_VMXSTATS(pVCpu).StatPreExit, &VCPU_2_VMXSTATS(pVCpu).StatExitHandling, x);
    8229         HMVMX_START_EXIT_DISPATCH_PROF();
    8230 
    8231         VBOXVMM_R0_HMVMX_VMEXIT_NOCTX(pVCpu, &pVCpu->cpum.GstCtx, VmxTransient.uExitReason);
    8232 
    8233         /*
    8234          * Handle the VM-exit.
    8235          */
    8236         rcStrict = vmxHCHandleExitNested(pVCpu, &VmxTransient);
    8237         STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitHandling, x);
    8238         if (rcStrict == VINF_SUCCESS)
    8239         {
    8240             if (!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
    8241             {
    8242                 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchNstGstVmexit);
    8243                 rcStrict = VINF_VMX_VMEXIT;
    8244             }
    8245             else
    8246             {
    8247                 if (++(*pcLoops) <= cMaxResumeLoops)
    8248                     continue;
    8249                 STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchMaxResumeLoops);
    8250                 rcStrict = VINF_EM_RAW_INTERRUPT;
    8251             }
    8252         }
    8253         else
    8254             Assert(rcStrict != VINF_VMX_VMEXIT);
    8255         break;
    8256     }
    8257 
    8258     STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatEntry, x);
    8259     return rcStrict;
    8260 }
    8261 #endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
    8262 
    8263 
    8264 /** @name Execution loop for single stepping, DBGF events and expensive Dtrace
    8265  *  probes.
    8266  *
    8267  * The following few functions and associated structure contains the bloat
    8268  * necessary for providing detailed debug events and dtrace probes as well as
    8269  * reliable host side single stepping.  This works on the principle of
    8270  * "subclassing" the normal execution loop and workers.  We replace the loop
    8271  * method completely and override selected helpers to add necessary adjustments
    8272  * to their core operation.
    8273  *
    8274  * The goal is to keep the "parent" code lean and mean, so as not to sacrifice
    8275  * any performance for debug and analysis features.
    8276  *
    8277  * @{
    8278  */
    8279 
    8280 /**
    8281  * Transient per-VCPU debug state of VMCS and related info. we save/restore in
    8282  * the debug run loop.
    8283  */
    8284 typedef struct VMXRUNDBGSTATE
    8285 {
    8286     /** The RIP we started executing at.  This is for detecting that we stepped.  */
    8287     uint64_t    uRipStart;
    8288     /** The CS we started executing with.  */
    8289     uint16_t    uCsStart;
    8290 
    8291     /** Whether we've actually modified the 1st execution control field. */
    8292     bool        fModifiedProcCtls : 1;
    8293     /** Whether we've actually modified the 2nd execution control field. */
    8294     bool        fModifiedProcCtls2 : 1;
    8295     /** Whether we've actually modified the exception bitmap. */
    8296     bool        fModifiedXcptBitmap : 1;
    8297 
    8298     /** We desire the modified the CR0 mask to be cleared. */
    8299     bool        fClearCr0Mask : 1;
    8300     /** We desire the modified the CR4 mask to be cleared. */
    8301     bool        fClearCr4Mask : 1;
    8302     /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC. */
    8303     uint32_t    fCpe1Extra;
    8304     /** Stuff we do not want in VMX_VMCS32_CTRL_PROC_EXEC. */
    8305     uint32_t    fCpe1Unwanted;
    8306     /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC2. */
    8307     uint32_t    fCpe2Extra;
    8308     /** Extra stuff we need in VMX_VMCS32_CTRL_EXCEPTION_BITMAP. */
    8309     uint32_t    bmXcptExtra;
    8310     /** The sequence number of the Dtrace provider settings the state was
    8311      *  configured against. */
    8312     uint32_t    uDtraceSettingsSeqNo;
    8313     /** VM-exits to check (one bit per VM-exit). */
    8314     uint32_t    bmExitsToCheck[3];
    8315 
    8316     /** The initial VMX_VMCS32_CTRL_PROC_EXEC value (helps with restore). */
    8317     uint32_t    fProcCtlsInitial;
    8318     /** The initial VMX_VMCS32_CTRL_PROC_EXEC2 value (helps with restore). */
    8319     uint32_t    fProcCtls2Initial;
    8320     /** The initial VMX_VMCS32_CTRL_EXCEPTION_BITMAP value (helps with restore). */
    8321     uint32_t    bmXcptInitial;
    8322 } VMXRUNDBGSTATE;
    8323 AssertCompileMemberSize(VMXRUNDBGSTATE, bmExitsToCheck, (VMX_EXIT_MAX + 1 + 31) / 32 * 4);
    8324 typedef VMXRUNDBGSTATE *PVMXRUNDBGSTATE;
    8325 
    8326 
    8327 /**
    8328  * Initializes the VMXRUNDBGSTATE structure.
    8329  *
    8330  * @param   pVCpu           The cross context virtual CPU structure of the
    8331  *                          calling EMT.
    8332  * @param   pVmxTransient   The VMX-transient structure.
    8333  * @param   pDbgState       The debug state to initialize.
    8334  */
    8335 static void vmxHCRunDebugStateInit(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
    8336 {
    8337     pDbgState->uRipStart            = pVCpu->cpum.GstCtx.rip;
    8338     pDbgState->uCsStart             = pVCpu->cpum.GstCtx.cs.Sel;
    8339 
    8340     pDbgState->fModifiedProcCtls    = false;
    8341     pDbgState->fModifiedProcCtls2   = false;
    8342     pDbgState->fModifiedXcptBitmap  = false;
    8343     pDbgState->fClearCr0Mask        = false;
    8344     pDbgState->fClearCr4Mask        = false;
    8345     pDbgState->fCpe1Extra           = 0;
    8346     pDbgState->fCpe1Unwanted        = 0;
    8347     pDbgState->fCpe2Extra           = 0;
    8348     pDbgState->bmXcptExtra          = 0;
    8349     pDbgState->fProcCtlsInitial     = pVmxTransient->pVmcsInfo->u32ProcCtls;
    8350     pDbgState->fProcCtls2Initial    = pVmxTransient->pVmcsInfo->u32ProcCtls2;
    8351     pDbgState->bmXcptInitial        = pVmxTransient->pVmcsInfo->u32XcptBitmap;
    8352 }
    8353 
    8354 
    8355 /**
    8356  * Updates the VMSC fields with changes requested by @a pDbgState.
    8357  *
    8358  * This is performed after vmxHCPreRunGuestDebugStateUpdate as well
    8359  * immediately before executing guest code, i.e. when interrupts are disabled.
    8360  * We don't check status codes here as we cannot easily assert or return in the
    8361  * latter case.
    8362  *
    8363  * @param   pVCpu           The cross context virtual CPU structure.
    8364  * @param   pVmxTransient   The VMX-transient structure.
    8365  * @param   pDbgState       The debug state.
    8366  */
    8367 static void vmxHCPreRunGuestDebugStateApply(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
    8368 {
    8369     /*
    8370      * Ensure desired flags in VMCS control fields are set.
    8371      * (Ignoring write failure here, as we're committed and it's just debug extras.)
    8372      *
    8373      * Note! We load the shadow CR0 & CR4 bits when we flag the clearing, so
    8374      *       there should be no stale data in pCtx at this point.
    8375      */
    8376     PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    8377     if (   (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Extra) != pDbgState->fCpe1Extra
    8378         || (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Unwanted))
    8379     {
    8380         pVmcsInfo->u32ProcCtls |= pDbgState->fCpe1Extra;
    8381         pVmcsInfo->u32ProcCtls &= ~pDbgState->fCpe1Unwanted;
    8382         VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
    8383         Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC: %#RX32\n", pVmcsInfo->u32ProcCtls));
    8384         pDbgState->fModifiedProcCtls   = true;
    8385     }
    8386 
    8387     if ((pVmcsInfo->u32ProcCtls2 & pDbgState->fCpe2Extra) != pDbgState->fCpe2Extra)
    8388     {
    8389         pVmcsInfo->u32ProcCtls2  |= pDbgState->fCpe2Extra;
    8390         VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pVmcsInfo->u32ProcCtls2);
    8391         Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC2: %#RX32\n", pVmcsInfo->u32ProcCtls2));
    8392         pDbgState->fModifiedProcCtls2  = true;
    8393     }
    8394 
    8395     if ((pVmcsInfo->u32XcptBitmap & pDbgState->bmXcptExtra) != pDbgState->bmXcptExtra)
    8396     {
    8397         pVmcsInfo->u32XcptBitmap |= pDbgState->bmXcptExtra;
    8398         VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVmcsInfo->u32XcptBitmap);
    8399         Log6Func(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP: %#RX32\n", pVmcsInfo->u32XcptBitmap));
    8400         pDbgState->fModifiedXcptBitmap = true;
    8401     }
    8402 
    8403     if (pDbgState->fClearCr0Mask && pVmcsInfo->u64Cr0Mask != 0)
    8404     {
    8405         pVmcsInfo->u64Cr0Mask = 0;
    8406         VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, 0);
    8407         Log6Func(("VMX_VMCS_CTRL_CR0_MASK: 0\n"));
    8408     }
    8409 
    8410     if (pDbgState->fClearCr4Mask && pVmcsInfo->u64Cr4Mask != 0)
    8411     {
    8412         pVmcsInfo->u64Cr4Mask = 0;
    8413         VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, 0);
    8414         Log6Func(("VMX_VMCS_CTRL_CR4_MASK: 0\n"));
    8415     }
    8416 
    8417     NOREF(pVCpu);
    8418 }
    8419 
    8420 
    8421 /**
    8422  * Restores VMCS fields that were changed by vmxHCPreRunGuestDebugStateApply for
    8423  * re-entry next time around.
    8424  *
    8425  * @returns Strict VBox status code (i.e. informational status codes too).
    8426  * @param   pVCpu           The cross context virtual CPU structure.
    8427  * @param   pVmxTransient   The VMX-transient structure.
    8428  * @param   pDbgState       The debug state.
    8429  * @param   rcStrict        The return code from executing the guest using single
    8430  *                          stepping.
    8431  */
    8432 static VBOXSTRICTRC vmxHCRunDebugStateRevert(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState,
    8433                                                VBOXSTRICTRC rcStrict)
    8434 {
    8435     /*
    8436      * Restore VM-exit control settings as we may not reenter this function the
    8437      * next time around.
    8438      */
    8439     PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    8440 
    8441     /* We reload the initial value, trigger what we can of recalculations the
    8442        next time around.  From the looks of things, that's all that's required atm. */
    8443     if (pDbgState->fModifiedProcCtls)
    8444     {
    8445         if (!(pDbgState->fProcCtlsInitial & VMX_PROC_CTLS_MOV_DR_EXIT) && CPUMIsHyperDebugStateActive(pVCpu))
    8446             pDbgState->fProcCtlsInitial |= VMX_PROC_CTLS_MOV_DR_EXIT; /* Avoid assertion in vmxHCLeave */
    8447         int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pDbgState->fProcCtlsInitial);
    8448         AssertRC(rc2);
    8449         pVmcsInfo->u32ProcCtls = pDbgState->fProcCtlsInitial;
    8450     }
    8451 
    8452     /* We're currently the only ones messing with this one, so just restore the
    8453        cached value and reload the field. */
    8454     if (   pDbgState->fModifiedProcCtls2
    8455         && pVmcsInfo->u32ProcCtls2 != pDbgState->fProcCtls2Initial)
    8456     {
    8457         int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pDbgState->fProcCtls2Initial);
    8458         AssertRC(rc2);
    8459         pVmcsInfo->u32ProcCtls2 = pDbgState->fProcCtls2Initial;
    8460     }
    8461 
    8462     /* If we've modified the exception bitmap, we restore it and trigger
    8463        reloading and partial recalculation the next time around. */
    8464     if (pDbgState->fModifiedXcptBitmap)
    8465         pVmcsInfo->u32XcptBitmap = pDbgState->bmXcptInitial;
    8466 
    8467     return rcStrict;
    8468 }
    8469 
    8470 
    8471 /**
    8472  * Configures VM-exit controls for current DBGF and DTrace settings.
    8473  *
    8474  * This updates @a pDbgState and the VMCS execution control fields to reflect
    8475  * the necessary VM-exits demanded by DBGF and DTrace.
    8476  *
    8477  * @param   pVCpu           The cross context virtual CPU structure.
    8478  * @param   pVmxTransient   The VMX-transient structure. May update
    8479  *                          fUpdatedTscOffsettingAndPreemptTimer.
    8480  * @param   pDbgState       The debug state.
    8481  */
    8482 static void vmxHCPreRunGuestDebugStateUpdate(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
    8483 {
    8484     /*
    8485      * Take down the dtrace serial number so we can spot changes.
    8486      */
    8487     pDbgState->uDtraceSettingsSeqNo = VBOXVMM_GET_SETTINGS_SEQ_NO();
    8488     ASMCompilerBarrier();
    8489 
    8490     /*
    8491      * We'll rebuild most of the middle block of data members (holding the
    8492      * current settings) as we go along here, so start by clearing it all.
    8493      */
    8494     pDbgState->bmXcptExtra      = 0;
    8495     pDbgState->fCpe1Extra       = 0;
    8496     pDbgState->fCpe1Unwanted    = 0;
    8497     pDbgState->fCpe2Extra       = 0;
    8498     for (unsigned i = 0; i < RT_ELEMENTS(pDbgState->bmExitsToCheck); i++)
    8499         pDbgState->bmExitsToCheck[i] = 0;
    8500 
    8501     /*
    8502      * Software interrupts (INT XXh) - no idea how to trigger these...
    8503      */
    8504     PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    8505     if (   DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_INTERRUPT_SOFTWARE)
    8506         || VBOXVMM_INT_SOFTWARE_ENABLED())
    8507     {
    8508         ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
    8509     }
    8510 
    8511     /*
    8512      * INT3 breakpoints - triggered by #BP exceptions.
    8513      */
    8514     if (pVM->dbgf.ro.cEnabledInt3Breakpoints > 0)
    8515         pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
    8516 
    8517     /*
    8518      * Exception bitmap and XCPT events+probes.
    8519      */
    8520     for (int iXcpt = 0; iXcpt < (DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST + 1); iXcpt++)
    8521         if (DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + iXcpt)))
    8522             pDbgState->bmXcptExtra |= RT_BIT_32(iXcpt);
    8523 
    8524     if (VBOXVMM_XCPT_DE_ENABLED())  pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DE);
    8525     if (VBOXVMM_XCPT_DB_ENABLED())  pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DB);
    8526     if (VBOXVMM_XCPT_BP_ENABLED())  pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
    8527     if (VBOXVMM_XCPT_OF_ENABLED())  pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_OF);
    8528     if (VBOXVMM_XCPT_BR_ENABLED())  pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BR);
    8529     if (VBOXVMM_XCPT_UD_ENABLED())  pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_UD);
    8530     if (VBOXVMM_XCPT_NM_ENABLED())  pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NM);
    8531     if (VBOXVMM_XCPT_DF_ENABLED())  pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DF);
    8532     if (VBOXVMM_XCPT_TS_ENABLED())  pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_TS);
    8533     if (VBOXVMM_XCPT_NP_ENABLED())  pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NP);
    8534     if (VBOXVMM_XCPT_SS_ENABLED())  pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SS);
    8535     if (VBOXVMM_XCPT_GP_ENABLED())  pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_GP);
    8536     if (VBOXVMM_XCPT_PF_ENABLED())  pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_PF);
    8537     if (VBOXVMM_XCPT_MF_ENABLED())  pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_MF);
    8538     if (VBOXVMM_XCPT_AC_ENABLED())  pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_AC);
    8539     if (VBOXVMM_XCPT_XF_ENABLED())  pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_XF);
    8540     if (VBOXVMM_XCPT_VE_ENABLED())  pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_VE);
    8541     if (VBOXVMM_XCPT_SX_ENABLED())  pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SX);
    8542 
    8543     if (pDbgState->bmXcptExtra)
    8544         ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
    8545 
    8546     /*
    8547      * Process events and probes for VM-exits, making sure we get the wanted VM-exits.
    8548      *
    8549      * Note! This is the reverse of what vmxHCHandleExitDtraceEvents does.
    8550      *       So, when adding/changing/removing please don't forget to update it.
    8551      *
    8552      * Some of the macros are picking up local variables to save horizontal space,
    8553      * (being able to see it in a table is the lesser evil here).
    8554      */
    8555 #define IS_EITHER_ENABLED(a_pVM, a_EventSubName) \
    8556         (    DBGF_IS_EVENT_ENABLED(a_pVM, RT_CONCAT(DBGFEVENT_, a_EventSubName)) \
    8557          ||  RT_CONCAT3(VBOXVMM_, a_EventSubName, _ENABLED)() )
    8558 #define SET_ONLY_XBM_IF_EITHER_EN(a_EventSubName, a_uExit) \
    8559         if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
    8560         {   AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
    8561             ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
    8562         } else do { } while (0)
    8563 #define SET_CPE1_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec) \
    8564         if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
    8565         { \
    8566             (pDbgState)->fCpe1Extra |= (a_fCtrlProcExec); \
    8567             AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
    8568             ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
    8569         } else do { } while (0)
    8570 #define SET_CPEU_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fUnwantedCtrlProcExec) \
    8571         if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
    8572         { \
    8573             (pDbgState)->fCpe1Unwanted |= (a_fUnwantedCtrlProcExec); \
    8574             AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
    8575             ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
    8576         } else do { } while (0)
    8577 #define SET_CPE2_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec2) \
    8578         if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
    8579         { \
    8580             (pDbgState)->fCpe2Extra |= (a_fCtrlProcExec2); \
    8581             AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
    8582             ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
    8583         } else do { } while (0)
    8584 
    8585     SET_ONLY_XBM_IF_EITHER_EN(EXIT_TASK_SWITCH,         VMX_EXIT_TASK_SWITCH);   /* unconditional */
    8586     SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_VIOLATION,   VMX_EXIT_EPT_VIOLATION); /* unconditional */
    8587     SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_MISCONFIG,   VMX_EXIT_EPT_MISCONFIG); /* unconditional (unless #VE) */
    8588     SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_ACCESS,    VMX_EXIT_APIC_ACCESS);   /* feature dependent, nothing to enable here */
    8589     SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_WRITE,     VMX_EXIT_APIC_WRITE);    /* feature dependent, nothing to enable here */
    8590 
    8591     SET_ONLY_XBM_IF_EITHER_EN(INSTR_CPUID,              VMX_EXIT_CPUID);         /* unconditional */
    8592     SET_ONLY_XBM_IF_EITHER_EN( EXIT_CPUID,              VMX_EXIT_CPUID);
    8593     SET_ONLY_XBM_IF_EITHER_EN(INSTR_GETSEC,             VMX_EXIT_GETSEC);        /* unconditional */
    8594     SET_ONLY_XBM_IF_EITHER_EN( EXIT_GETSEC,             VMX_EXIT_GETSEC);
    8595     SET_CPE1_XBM_IF_EITHER_EN(INSTR_HALT,               VMX_EXIT_HLT,      VMX_PROC_CTLS_HLT_EXIT); /* paranoia */
    8596     SET_ONLY_XBM_IF_EITHER_EN( EXIT_HALT,               VMX_EXIT_HLT);
    8597     SET_ONLY_XBM_IF_EITHER_EN(INSTR_INVD,               VMX_EXIT_INVD);          /* unconditional */
    8598     SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVD,               VMX_EXIT_INVD);
    8599     SET_CPE1_XBM_IF_EITHER_EN(INSTR_INVLPG,             VMX_EXIT_INVLPG,   VMX_PROC_CTLS_INVLPG_EXIT);
    8600     SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVLPG,             VMX_EXIT_INVLPG);
    8601     SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDPMC,              VMX_EXIT_RDPMC,    VMX_PROC_CTLS_RDPMC_EXIT);
    8602     SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDPMC,              VMX_EXIT_RDPMC);
    8603     SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSC,              VMX_EXIT_RDTSC,    VMX_PROC_CTLS_RDTSC_EXIT);
    8604     SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSC,              VMX_EXIT_RDTSC);
    8605     SET_ONLY_XBM_IF_EITHER_EN(INSTR_RSM,                VMX_EXIT_RSM);           /* unconditional */
    8606     SET_ONLY_XBM_IF_EITHER_EN( EXIT_RSM,                VMX_EXIT_RSM);
    8607     SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMM_CALL,           VMX_EXIT_VMCALL);        /* unconditional */
    8608     SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMM_CALL,           VMX_EXIT_VMCALL);
    8609     SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMCLEAR,        VMX_EXIT_VMCLEAR);       /* unconditional */
    8610     SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMCLEAR,        VMX_EXIT_VMCLEAR);
    8611     SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMLAUNCH,       VMX_EXIT_VMLAUNCH);      /* unconditional */
    8612     SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMLAUNCH,       VMX_EXIT_VMLAUNCH);
    8613     SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRLD,        VMX_EXIT_VMPTRLD);       /* unconditional */
    8614     SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRLD,        VMX_EXIT_VMPTRLD);
    8615     SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRST,        VMX_EXIT_VMPTRST);       /* unconditional */
    8616     SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRST,        VMX_EXIT_VMPTRST);
    8617     SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMREAD,         VMX_EXIT_VMREAD);        /* unconditional */
    8618     SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMREAD,         VMX_EXIT_VMREAD);
    8619     SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMRESUME,       VMX_EXIT_VMRESUME);      /* unconditional */
    8620     SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMRESUME,       VMX_EXIT_VMRESUME);
    8621     SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMWRITE,        VMX_EXIT_VMWRITE);       /* unconditional */
    8622     SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMWRITE,        VMX_EXIT_VMWRITE);
    8623     SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXOFF,         VMX_EXIT_VMXOFF);        /* unconditional */
    8624     SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXOFF,         VMX_EXIT_VMXOFF);
    8625     SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXON,          VMX_EXIT_VMXON);         /* unconditional */
    8626     SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXON,          VMX_EXIT_VMXON);
    8627 
    8628     if (   IS_EITHER_ENABLED(pVM, INSTR_CRX_READ)
    8629         || IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
    8630     {
    8631         int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4
    8632                                                                         | CPUMCTX_EXTRN_APIC_TPR);
    8633         AssertRC(rc);
    8634 
    8635 #if 0 /** @todo fix me */
    8636         pDbgState->fClearCr0Mask = true;
    8637         pDbgState->fClearCr4Mask = true;
    8638 #endif
    8639         if (IS_EITHER_ENABLED(pVM, INSTR_CRX_READ))
    8640             pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_STORE_EXIT | VMX_PROC_CTLS_CR8_STORE_EXIT;
    8641         if (IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
    8642             pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_LOAD_EXIT | VMX_PROC_CTLS_CR8_LOAD_EXIT;
    8643         pDbgState->fCpe1Unwanted |= VMX_PROC_CTLS_USE_TPR_SHADOW; /* risky? */
    8644         /* Note! We currently don't use VMX_VMCS32_CTRL_CR3_TARGET_COUNT.  It would
    8645                  require clearing here and in the loop if we start using it. */
    8646         ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_CRX);
    8647     }
    8648     else
    8649     {
    8650         if (pDbgState->fClearCr0Mask)
    8651         {
    8652             pDbgState->fClearCr0Mask = false;
    8653             ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR0);
    8654         }
    8655         if (pDbgState->fClearCr4Mask)
    8656         {
    8657             pDbgState->fClearCr4Mask = false;
    8658             ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR4);
    8659         }
    8660     }
    8661     SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_READ,           VMX_EXIT_MOV_CRX);
    8662     SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_WRITE,          VMX_EXIT_MOV_CRX);
    8663 
    8664     if (   IS_EITHER_ENABLED(pVM, INSTR_DRX_READ)
    8665         || IS_EITHER_ENABLED(pVM, INSTR_DRX_WRITE))
    8666     {
    8667         /** @todo later, need to fix handler as it assumes this won't usually happen. */
    8668         ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_DRX);
    8669     }
    8670     SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_READ,           VMX_EXIT_MOV_DRX);
    8671     SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_WRITE,          VMX_EXIT_MOV_DRX);
    8672 
    8673     SET_CPEU_XBM_IF_EITHER_EN(INSTR_RDMSR,              VMX_EXIT_RDMSR,    VMX_PROC_CTLS_USE_MSR_BITMAPS); /* risky clearing this? */
    8674     SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDMSR,              VMX_EXIT_RDMSR);
    8675     SET_CPEU_XBM_IF_EITHER_EN(INSTR_WRMSR,              VMX_EXIT_WRMSR,    VMX_PROC_CTLS_USE_MSR_BITMAPS);
    8676     SET_ONLY_XBM_IF_EITHER_EN( EXIT_WRMSR,              VMX_EXIT_WRMSR);
    8677     SET_CPE1_XBM_IF_EITHER_EN(INSTR_MWAIT,              VMX_EXIT_MWAIT,    VMX_PROC_CTLS_MWAIT_EXIT);   /* paranoia */
    8678     SET_ONLY_XBM_IF_EITHER_EN( EXIT_MWAIT,              VMX_EXIT_MWAIT);
    8679     SET_CPE1_XBM_IF_EITHER_EN(INSTR_MONITOR,            VMX_EXIT_MONITOR,  VMX_PROC_CTLS_MONITOR_EXIT); /* paranoia */
    8680     SET_ONLY_XBM_IF_EITHER_EN( EXIT_MONITOR,            VMX_EXIT_MONITOR);
    8681 #if 0 /** @todo too slow, fix handler. */
    8682     SET_CPE1_XBM_IF_EITHER_EN(INSTR_PAUSE,              VMX_EXIT_PAUSE,    VMX_PROC_CTLS_PAUSE_EXIT);
    8683 #endif
    8684     SET_ONLY_XBM_IF_EITHER_EN( EXIT_PAUSE,              VMX_EXIT_PAUSE);
    8685 
    8686     if (   IS_EITHER_ENABLED(pVM, INSTR_SGDT)
    8687         || IS_EITHER_ENABLED(pVM, INSTR_SIDT)
    8688         || IS_EITHER_ENABLED(pVM, INSTR_LGDT)
    8689         || IS_EITHER_ENABLED(pVM, INSTR_LIDT))
    8690     {
    8691         pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
    8692         ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_GDTR_IDTR_ACCESS);
    8693     }
    8694     SET_ONLY_XBM_IF_EITHER_EN( EXIT_SGDT,               VMX_EXIT_GDTR_IDTR_ACCESS);
    8695     SET_ONLY_XBM_IF_EITHER_EN( EXIT_SIDT,               VMX_EXIT_GDTR_IDTR_ACCESS);
    8696     SET_ONLY_XBM_IF_EITHER_EN( EXIT_LGDT,               VMX_EXIT_GDTR_IDTR_ACCESS);
    8697     SET_ONLY_XBM_IF_EITHER_EN( EXIT_LIDT,               VMX_EXIT_GDTR_IDTR_ACCESS);
    8698 
    8699     if (   IS_EITHER_ENABLED(pVM, INSTR_SLDT)
    8700         || IS_EITHER_ENABLED(pVM, INSTR_STR)
    8701         || IS_EITHER_ENABLED(pVM, INSTR_LLDT)
    8702         || IS_EITHER_ENABLED(pVM, INSTR_LTR))
    8703     {
    8704         pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
    8705         ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_LDTR_TR_ACCESS);
    8706     }
    8707     SET_ONLY_XBM_IF_EITHER_EN( EXIT_SLDT,               VMX_EXIT_LDTR_TR_ACCESS);
    8708     SET_ONLY_XBM_IF_EITHER_EN( EXIT_STR,                VMX_EXIT_LDTR_TR_ACCESS);
    8709     SET_ONLY_XBM_IF_EITHER_EN( EXIT_LLDT,               VMX_EXIT_LDTR_TR_ACCESS);
    8710     SET_ONLY_XBM_IF_EITHER_EN( EXIT_LTR,                VMX_EXIT_LDTR_TR_ACCESS);
    8711 
    8712     SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVEPT,         VMX_EXIT_INVEPT);        /* unconditional */
    8713     SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVEPT,         VMX_EXIT_INVEPT);
    8714     SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSCP,             VMX_EXIT_RDTSCP,   VMX_PROC_CTLS_RDTSC_EXIT);
    8715     SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSCP,             VMX_EXIT_RDTSCP);
    8716     SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVVPID,        VMX_EXIT_INVVPID);       /* unconditional */
    8717     SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVVPID,        VMX_EXIT_INVVPID);
    8718     SET_CPE2_XBM_IF_EITHER_EN(INSTR_WBINVD,             VMX_EXIT_WBINVD,   VMX_PROC_CTLS2_WBINVD_EXIT);
    8719     SET_ONLY_XBM_IF_EITHER_EN( EXIT_WBINVD,             VMX_EXIT_WBINVD);
    8720     SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSETBV,             VMX_EXIT_XSETBV);        /* unconditional */
    8721     SET_ONLY_XBM_IF_EITHER_EN( EXIT_XSETBV,             VMX_EXIT_XSETBV);
    8722     SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDRAND,             VMX_EXIT_RDRAND,   VMX_PROC_CTLS2_RDRAND_EXIT);
    8723     SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDRAND,             VMX_EXIT_RDRAND);
    8724     SET_CPE1_XBM_IF_EITHER_EN(INSTR_VMX_INVPCID,        VMX_EXIT_INVPCID,  VMX_PROC_CTLS_INVLPG_EXIT);
    8725     SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVPCID,        VMX_EXIT_INVPCID);
    8726     SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMFUNC,         VMX_EXIT_VMFUNC);        /* unconditional for the current setup */
    8727     SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMFUNC,         VMX_EXIT_VMFUNC);
    8728     SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDSEED,             VMX_EXIT_RDSEED,   VMX_PROC_CTLS2_RDSEED_EXIT);
    8729     SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDSEED,             VMX_EXIT_RDSEED);
    8730     SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSAVES,             VMX_EXIT_XSAVES);        /* unconditional (enabled by host, guest cfg) */
    8731     SET_ONLY_XBM_IF_EITHER_EN(EXIT_XSAVES,              VMX_EXIT_XSAVES);
    8732     SET_ONLY_XBM_IF_EITHER_EN(INSTR_XRSTORS,            VMX_EXIT_XRSTORS);       /* unconditional (enabled by host, guest cfg) */
    8733     SET_ONLY_XBM_IF_EITHER_EN( EXIT_XRSTORS,            VMX_EXIT_XRSTORS);
    8734 
    8735 #undef IS_EITHER_ENABLED
    8736 #undef SET_ONLY_XBM_IF_EITHER_EN
    8737 #undef SET_CPE1_XBM_IF_EITHER_EN
    8738 #undef SET_CPEU_XBM_IF_EITHER_EN
    8739 #undef SET_CPE2_XBM_IF_EITHER_EN
    8740 
    8741     /*
    8742      * Sanitize the control stuff.
    8743      */
    8744     pDbgState->fCpe2Extra       &= g_HmMsrs.u.vmx.ProcCtls2.n.allowed1;
    8745     if (pDbgState->fCpe2Extra)
    8746         pDbgState->fCpe1Extra   |= VMX_PROC_CTLS_USE_SECONDARY_CTLS;
    8747     pDbgState->fCpe1Extra       &= g_HmMsrs.u.vmx.ProcCtls.n.allowed1;
    8748     pDbgState->fCpe1Unwanted    &= ~g_HmMsrs.u.vmx.ProcCtls.n.allowed0;
    8749     if (pVCpu->hmr0.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
    8750     {
    8751         pVCpu->hmr0.s.fDebugWantRdTscExit ^= true;
    8752         pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
    8753     }
    8754 
    8755     Log6(("HM: debug state: cpe1=%#RX32 cpeu=%#RX32 cpe2=%#RX32%s%s\n",
    8756           pDbgState->fCpe1Extra, pDbgState->fCpe1Unwanted, pDbgState->fCpe2Extra,
    8757           pDbgState->fClearCr0Mask ? " clr-cr0" : "",
    8758           pDbgState->fClearCr4Mask ? " clr-cr4" : ""));
    8759 }
    8760 
    8761 
    8762 /**
    8763  * Fires off DBGF events and dtrace probes for a VM-exit, when it's
    8764  * appropriate.
    8765  *
    8766  * The caller has checked the VM-exit against the
    8767  * VMXRUNDBGSTATE::bmExitsToCheck bitmap. The caller has checked for NMIs
    8768  * already, so we don't have to do that either.
    8769  *
    8770  * @returns Strict VBox status code (i.e. informational status codes too).
    8771  * @param   pVCpu           The cross context virtual CPU structure.
    8772  * @param   pVmxTransient   The VMX-transient structure.
    8773  * @param   uExitReason     The VM-exit reason.
    8774  *
    8775  * @remarks The name of this function is displayed by dtrace, so keep it short
    8776  *          and to the point. No longer than 33 chars long, please.
    8777  */
    8778 static VBOXSTRICTRC vmxHCHandleExitDtraceEvents(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t uExitReason)
    8779 {
    8780     /*
    8781      * Translate the event into a DBGF event (enmEvent + uEventArg) and at the
    8782      * same time check whether any corresponding Dtrace event is enabled (fDtrace).
    8783      *
    8784      * Note! This is the reverse operation of what vmxHCPreRunGuestDebugStateUpdate
    8785      *       does.  Must add/change/remove both places.  Same ordering, please.
    8786      *
    8787      *       Added/removed events must also be reflected in the next section
    8788      *       where we dispatch dtrace events.
    8789      */
    8790     bool            fDtrace1   = false;
    8791     bool            fDtrace2   = false;
    8792     DBGFEVENTTYPE   enmEvent1  = DBGFEVENT_END;
    8793     DBGFEVENTTYPE   enmEvent2  = DBGFEVENT_END;
    8794     uint32_t        uEventArg  = 0;
    8795 #define SET_EXIT(a_EventSubName) \
    8796         do { \
    8797             enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_,  a_EventSubName); \
    8798             fDtrace2  = RT_CONCAT3(VBOXVMM_EXIT_,   a_EventSubName, _ENABLED)(); \
    8799         } while (0)
    8800 #define SET_BOTH(a_EventSubName) \
    8801         do { \
    8802             enmEvent1 = RT_CONCAT(DBGFEVENT_INSTR_, a_EventSubName); \
    8803             enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_,  a_EventSubName); \
    8804             fDtrace1  = RT_CONCAT3(VBOXVMM_INSTR_,  a_EventSubName, _ENABLED)(); \
    8805             fDtrace2  = RT_CONCAT3(VBOXVMM_EXIT_,   a_EventSubName, _ENABLED)(); \
    8806         } while (0)
    8807     switch (uExitReason)
    8808     {
    8809         case VMX_EXIT_MTF:
    8810             return vmxHCExitMtf(pVCpu, pVmxTransient);
    8811 
    8812         case VMX_EXIT_XCPT_OR_NMI:
    8813         {
    8814             uint8_t const idxVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
    8815             switch (VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo))
    8816             {
    8817                 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
    8818                 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
    8819                 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
    8820                     if (idxVector <= (unsigned)(DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST))
    8821                     {
    8822                         if (VMX_EXIT_INT_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uExitIntInfo))
    8823                         {
    8824                             vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
    8825                             uEventArg = pVmxTransient->uExitIntErrorCode;
    8826                         }
    8827                         enmEvent1 = (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + idxVector);
    8828                         switch (enmEvent1)
    8829                         {
    8830                             case DBGFEVENT_XCPT_DE: fDtrace1 = VBOXVMM_XCPT_DE_ENABLED(); break;
    8831                             case DBGFEVENT_XCPT_DB: fDtrace1 = VBOXVMM_XCPT_DB_ENABLED(); break;
    8832                             case DBGFEVENT_XCPT_BP: fDtrace1 = VBOXVMM_XCPT_BP_ENABLED(); break;
    8833                             case DBGFEVENT_XCPT_OF: fDtrace1 = VBOXVMM_XCPT_OF_ENABLED(); break;
    8834                             case DBGFEVENT_XCPT_BR: fDtrace1 = VBOXVMM_XCPT_BR_ENABLED(); break;
    8835                             case DBGFEVENT_XCPT_UD: fDtrace1 = VBOXVMM_XCPT_UD_ENABLED(); break;
    8836                             case DBGFEVENT_XCPT_NM: fDtrace1 = VBOXVMM_XCPT_NM_ENABLED(); break;
    8837                             case DBGFEVENT_XCPT_DF: fDtrace1 = VBOXVMM_XCPT_DF_ENABLED(); break;
    8838                             case DBGFEVENT_XCPT_TS: fDtrace1 = VBOXVMM_XCPT_TS_ENABLED(); break;
    8839                             case DBGFEVENT_XCPT_NP: fDtrace1 = VBOXVMM_XCPT_NP_ENABLED(); break;
    8840                             case DBGFEVENT_XCPT_SS: fDtrace1 = VBOXVMM_XCPT_SS_ENABLED(); break;
    8841                             case DBGFEVENT_XCPT_GP: fDtrace1 = VBOXVMM_XCPT_GP_ENABLED(); break;
    8842                             case DBGFEVENT_XCPT_PF: fDtrace1 = VBOXVMM_XCPT_PF_ENABLED(); break;
    8843                             case DBGFEVENT_XCPT_MF: fDtrace1 = VBOXVMM_XCPT_MF_ENABLED(); break;
    8844                             case DBGFEVENT_XCPT_AC: fDtrace1 = VBOXVMM_XCPT_AC_ENABLED(); break;
    8845                             case DBGFEVENT_XCPT_XF: fDtrace1 = VBOXVMM_XCPT_XF_ENABLED(); break;
    8846                             case DBGFEVENT_XCPT_VE: fDtrace1 = VBOXVMM_XCPT_VE_ENABLED(); break;
    8847                             case DBGFEVENT_XCPT_SX: fDtrace1 = VBOXVMM_XCPT_SX_ENABLED(); break;
    8848                             default:                                                      break;
    8849                         }
    8850                     }
    8851                     else
    8852                         AssertFailed();
    8853                     break;
    8854 
    8855                 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
    8856                     uEventArg = idxVector;
    8857                     enmEvent1 = DBGFEVENT_INTERRUPT_SOFTWARE;
    8858                     fDtrace1  = VBOXVMM_INT_SOFTWARE_ENABLED();
    8859                     break;
    8860             }
    8861             break;
    8862         }
    8863 
    8864         case VMX_EXIT_TRIPLE_FAULT:
    8865             enmEvent1 = DBGFEVENT_TRIPLE_FAULT;
    8866             //fDtrace1  = VBOXVMM_EXIT_TRIPLE_FAULT_ENABLED();
    8867             break;
    8868         case VMX_EXIT_TASK_SWITCH:      SET_EXIT(TASK_SWITCH); break;
    8869         case VMX_EXIT_EPT_VIOLATION:    SET_EXIT(VMX_EPT_VIOLATION); break;
    8870         case VMX_EXIT_EPT_MISCONFIG:    SET_EXIT(VMX_EPT_MISCONFIG); break;
    8871         case VMX_EXIT_APIC_ACCESS:      SET_EXIT(VMX_VAPIC_ACCESS); break;
    8872         case VMX_EXIT_APIC_WRITE:       SET_EXIT(VMX_VAPIC_WRITE); break;
    8873 
    8874         /* Instruction specific VM-exits: */
    8875         case VMX_EXIT_CPUID:            SET_BOTH(CPUID); break;
    8876         case VMX_EXIT_GETSEC:           SET_BOTH(GETSEC); break;
    8877         case VMX_EXIT_HLT:              SET_BOTH(HALT); break;
    8878         case VMX_EXIT_INVD:             SET_BOTH(INVD); break;
    8879         case VMX_EXIT_INVLPG:           SET_BOTH(INVLPG); break;
    8880         case VMX_EXIT_RDPMC:            SET_BOTH(RDPMC); break;
    8881         case VMX_EXIT_RDTSC:            SET_BOTH(RDTSC); break;
    8882         case VMX_EXIT_RSM:              SET_BOTH(RSM); break;
    8883         case VMX_EXIT_VMCALL:           SET_BOTH(VMM_CALL); break;
    8884         case VMX_EXIT_VMCLEAR:          SET_BOTH(VMX_VMCLEAR); break;
    8885         case VMX_EXIT_VMLAUNCH:         SET_BOTH(VMX_VMLAUNCH); break;
    8886         case VMX_EXIT_VMPTRLD:          SET_BOTH(VMX_VMPTRLD); break;
    8887         case VMX_EXIT_VMPTRST:          SET_BOTH(VMX_VMPTRST); break;
    8888         case VMX_EXIT_VMREAD:           SET_BOTH(VMX_VMREAD); break;
    8889         case VMX_EXIT_VMRESUME:         SET_BOTH(VMX_VMRESUME); break;
    8890         case VMX_EXIT_VMWRITE:          SET_BOTH(VMX_VMWRITE); break;
    8891         case VMX_EXIT_VMXOFF:           SET_BOTH(VMX_VMXOFF); break;
    8892         case VMX_EXIT_VMXON:            SET_BOTH(VMX_VMXON); break;
    8893         case VMX_EXIT_MOV_CRX:
    8894             vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
    8895             if (VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_CRX_ACCESS_READ)
    8896                 SET_BOTH(CRX_READ);
    8897             else
    8898                 SET_BOTH(CRX_WRITE);
    8899             uEventArg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
    8900             break;
    8901         case VMX_EXIT_MOV_DRX:
    8902             vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
    8903             if (   VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual)
    8904                 == VMX_EXIT_QUAL_DRX_DIRECTION_READ)
    8905                 SET_BOTH(DRX_READ);
    8906             else
    8907                 SET_BOTH(DRX_WRITE);
    8908             uEventArg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual);
    8909             break;
    8910         case VMX_EXIT_RDMSR:            SET_BOTH(RDMSR); break;
    8911         case VMX_EXIT_WRMSR:            SET_BOTH(WRMSR); break;
    8912         case VMX_EXIT_MWAIT:            SET_BOTH(MWAIT); break;
    8913         case VMX_EXIT_MONITOR:          SET_BOTH(MONITOR); break;
    8914         case VMX_EXIT_PAUSE:            SET_BOTH(PAUSE); break;
    8915         case VMX_EXIT_GDTR_IDTR_ACCESS:
    8916             vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
    8917             switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_XDTR_INSINFO_INSTR_ID))
    8918             {
    8919                 case VMX_XDTR_INSINFO_II_SGDT: SET_BOTH(SGDT); break;
    8920                 case VMX_XDTR_INSINFO_II_SIDT: SET_BOTH(SIDT); break;
    8921                 case VMX_XDTR_INSINFO_II_LGDT: SET_BOTH(LGDT); break;
    8922                 case VMX_XDTR_INSINFO_II_LIDT: SET_BOTH(LIDT); break;
    8923             }
    8924             break;
    8925 
    8926         case VMX_EXIT_LDTR_TR_ACCESS:
    8927             vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
    8928             switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_YYTR_INSINFO_INSTR_ID))
    8929             {
    8930                 case VMX_YYTR_INSINFO_II_SLDT: SET_BOTH(SLDT); break;
    8931                 case VMX_YYTR_INSINFO_II_STR:  SET_BOTH(STR); break;
    8932                 case VMX_YYTR_INSINFO_II_LLDT: SET_BOTH(LLDT); break;
    8933                 case VMX_YYTR_INSINFO_II_LTR:  SET_BOTH(LTR); break;
    8934             }
    8935             break;
    8936 
    8937         case VMX_EXIT_INVEPT:           SET_BOTH(VMX_INVEPT); break;
    8938         case VMX_EXIT_RDTSCP:           SET_BOTH(RDTSCP); break;
    8939         case VMX_EXIT_INVVPID:          SET_BOTH(VMX_INVVPID); break;
    8940         case VMX_EXIT_WBINVD:           SET_BOTH(WBINVD); break;
    8941         case VMX_EXIT_XSETBV:           SET_BOTH(XSETBV); break;
    8942         case VMX_EXIT_RDRAND:           SET_BOTH(RDRAND); break;
    8943         case VMX_EXIT_INVPCID:          SET_BOTH(VMX_INVPCID); break;
    8944         case VMX_EXIT_VMFUNC:           SET_BOTH(VMX_VMFUNC); break;
    8945         case VMX_EXIT_RDSEED:           SET_BOTH(RDSEED); break;
    8946         case VMX_EXIT_XSAVES:           SET_BOTH(XSAVES); break;
    8947         case VMX_EXIT_XRSTORS:          SET_BOTH(XRSTORS); break;
    8948 
    8949         /* Events that aren't relevant at this point. */
    8950         case VMX_EXIT_EXT_INT:
    8951         case VMX_EXIT_INT_WINDOW:
    8952         case VMX_EXIT_NMI_WINDOW:
    8953         case VMX_EXIT_TPR_BELOW_THRESHOLD:
    8954         case VMX_EXIT_PREEMPT_TIMER:
    8955         case VMX_EXIT_IO_INSTR:
    8956             break;
    8957 
    8958         /* Errors and unexpected events. */
    8959         case VMX_EXIT_INIT_SIGNAL:
    8960         case VMX_EXIT_SIPI:
    8961         case VMX_EXIT_IO_SMI:
    8962         case VMX_EXIT_SMI:
    8963         case VMX_EXIT_ERR_INVALID_GUEST_STATE:
    8964         case VMX_EXIT_ERR_MSR_LOAD:
    8965         case VMX_EXIT_ERR_MACHINE_CHECK:
    8966         case VMX_EXIT_PML_FULL:
    8967         case VMX_EXIT_VIRTUALIZED_EOI:
    8968             break;
    8969 
    8970         default:
    8971             AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
    8972             break;
    8973     }
    8974 #undef SET_BOTH
    8975 #undef SET_EXIT
    8976 
    8977     /*
    8978      * Dtrace tracepoints go first.   We do them here at once so we don't
    8979      * have to copy the guest state saving and stuff a few dozen times.
    8980      * Down side is that we've got to repeat the switch, though this time
    8981      * we use enmEvent since the probes are a subset of what DBGF does.
    8982      */
    8983     if (fDtrace1 || fDtrace2)
    8984     {
    8985         vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
    8986         vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
    8987         PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    8988         switch (enmEvent1)
    8989         {
    8990             /** @todo consider which extra parameters would be helpful for each probe.   */
    8991             case DBGFEVENT_END: break;
    8992             case DBGFEVENT_XCPT_DE:                 VBOXVMM_XCPT_DE(pVCpu, pCtx); break;
    8993             case DBGFEVENT_XCPT_DB:                 VBOXVMM_XCPT_DB(pVCpu, pCtx, pCtx->dr[6]); break;
    8994             case DBGFEVENT_XCPT_BP:                 VBOXVMM_XCPT_BP(pVCpu, pCtx); break;
    8995             case DBGFEVENT_XCPT_OF:                 VBOXVMM_XCPT_OF(pVCpu, pCtx); break;
    8996             case DBGFEVENT_XCPT_BR:                 VBOXVMM_XCPT_BR(pVCpu, pCtx); break;
    8997             case DBGFEVENT_XCPT_UD:                 VBOXVMM_XCPT_UD(pVCpu, pCtx); break;
    8998             case DBGFEVENT_XCPT_NM:                 VBOXVMM_XCPT_NM(pVCpu, pCtx); break;
    8999             case DBGFEVENT_XCPT_DF:                 VBOXVMM_XCPT_DF(pVCpu, pCtx); break;
    9000             case DBGFEVENT_XCPT_TS:                 VBOXVMM_XCPT_TS(pVCpu, pCtx, uEventArg); break;
    9001             case DBGFEVENT_XCPT_NP:                 VBOXVMM_XCPT_NP(pVCpu, pCtx, uEventArg); break;
    9002             case DBGFEVENT_XCPT_SS:                 VBOXVMM_XCPT_SS(pVCpu, pCtx, uEventArg); break;
    9003             case DBGFEVENT_XCPT_GP:                 VBOXVMM_XCPT_GP(pVCpu, pCtx, uEventArg); break;
    9004             case DBGFEVENT_XCPT_PF:                 VBOXVMM_XCPT_PF(pVCpu, pCtx, uEventArg, pCtx->cr2); break;
    9005             case DBGFEVENT_XCPT_MF:                 VBOXVMM_XCPT_MF(pVCpu, pCtx); break;
    9006             case DBGFEVENT_XCPT_AC:                 VBOXVMM_XCPT_AC(pVCpu, pCtx); break;
    9007             case DBGFEVENT_XCPT_XF:                 VBOXVMM_XCPT_XF(pVCpu, pCtx); break;
    9008             case DBGFEVENT_XCPT_VE:                 VBOXVMM_XCPT_VE(pVCpu, pCtx); break;
    9009             case DBGFEVENT_XCPT_SX:                 VBOXVMM_XCPT_SX(pVCpu, pCtx, uEventArg); break;
    9010             case DBGFEVENT_INTERRUPT_SOFTWARE:      VBOXVMM_INT_SOFTWARE(pVCpu, pCtx, (uint8_t)uEventArg); break;
    9011             case DBGFEVENT_INSTR_CPUID:             VBOXVMM_INSTR_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
    9012             case DBGFEVENT_INSTR_GETSEC:            VBOXVMM_INSTR_GETSEC(pVCpu, pCtx); break;
    9013             case DBGFEVENT_INSTR_HALT:              VBOXVMM_INSTR_HALT(pVCpu, pCtx); break;
    9014             case DBGFEVENT_INSTR_INVD:              VBOXVMM_INSTR_INVD(pVCpu, pCtx); break;
    9015             case DBGFEVENT_INSTR_INVLPG:            VBOXVMM_INSTR_INVLPG(pVCpu, pCtx); break;
    9016             case DBGFEVENT_INSTR_RDPMC:             VBOXVMM_INSTR_RDPMC(pVCpu, pCtx); break;
    9017             case DBGFEVENT_INSTR_RDTSC:             VBOXVMM_INSTR_RDTSC(pVCpu, pCtx); break;
    9018             case DBGFEVENT_INSTR_RSM:               VBOXVMM_INSTR_RSM(pVCpu, pCtx); break;
    9019             case DBGFEVENT_INSTR_CRX_READ:          VBOXVMM_INSTR_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
    9020             case DBGFEVENT_INSTR_CRX_WRITE:         VBOXVMM_INSTR_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
    9021             case DBGFEVENT_INSTR_DRX_READ:          VBOXVMM_INSTR_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
    9022             case DBGFEVENT_INSTR_DRX_WRITE:         VBOXVMM_INSTR_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
    9023             case DBGFEVENT_INSTR_RDMSR:             VBOXVMM_INSTR_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
    9024             case DBGFEVENT_INSTR_WRMSR:             VBOXVMM_INSTR_WRMSR(pVCpu, pCtx, pCtx->ecx,
    9025                                                                         RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
    9026             case DBGFEVENT_INSTR_MWAIT:             VBOXVMM_INSTR_MWAIT(pVCpu, pCtx); break;
    9027             case DBGFEVENT_INSTR_MONITOR:           VBOXVMM_INSTR_MONITOR(pVCpu, pCtx); break;
    9028             case DBGFEVENT_INSTR_PAUSE:             VBOXVMM_INSTR_PAUSE(pVCpu, pCtx); break;
    9029             case DBGFEVENT_INSTR_SGDT:              VBOXVMM_INSTR_SGDT(pVCpu, pCtx); break;
    9030             case DBGFEVENT_INSTR_SIDT:              VBOXVMM_INSTR_SIDT(pVCpu, pCtx); break;
    9031             case DBGFEVENT_INSTR_LGDT:              VBOXVMM_INSTR_LGDT(pVCpu, pCtx); break;
    9032             case DBGFEVENT_INSTR_LIDT:              VBOXVMM_INSTR_LIDT(pVCpu, pCtx); break;
    9033             case DBGFEVENT_INSTR_SLDT:              VBOXVMM_INSTR_SLDT(pVCpu, pCtx); break;
    9034             case DBGFEVENT_INSTR_STR:               VBOXVMM_INSTR_STR(pVCpu, pCtx); break;
    9035             case DBGFEVENT_INSTR_LLDT:              VBOXVMM_INSTR_LLDT(pVCpu, pCtx); break;
    9036             case DBGFEVENT_INSTR_LTR:               VBOXVMM_INSTR_LTR(pVCpu, pCtx); break;
    9037             case DBGFEVENT_INSTR_RDTSCP:            VBOXVMM_INSTR_RDTSCP(pVCpu, pCtx); break;
    9038             case DBGFEVENT_INSTR_WBINVD:            VBOXVMM_INSTR_WBINVD(pVCpu, pCtx); break;
    9039             case DBGFEVENT_INSTR_XSETBV:            VBOXVMM_INSTR_XSETBV(pVCpu, pCtx); break;
    9040             case DBGFEVENT_INSTR_RDRAND:            VBOXVMM_INSTR_RDRAND(pVCpu, pCtx); break;
    9041             case DBGFEVENT_INSTR_RDSEED:            VBOXVMM_INSTR_RDSEED(pVCpu, pCtx); break;
    9042             case DBGFEVENT_INSTR_XSAVES:            VBOXVMM_INSTR_XSAVES(pVCpu, pCtx); break;
    9043             case DBGFEVENT_INSTR_XRSTORS:           VBOXVMM_INSTR_XRSTORS(pVCpu, pCtx); break;
    9044             case DBGFEVENT_INSTR_VMM_CALL:          VBOXVMM_INSTR_VMM_CALL(pVCpu, pCtx); break;
    9045             case DBGFEVENT_INSTR_VMX_VMCLEAR:       VBOXVMM_INSTR_VMX_VMCLEAR(pVCpu, pCtx); break;
    9046             case DBGFEVENT_INSTR_VMX_VMLAUNCH:      VBOXVMM_INSTR_VMX_VMLAUNCH(pVCpu, pCtx); break;
    9047             case DBGFEVENT_INSTR_VMX_VMPTRLD:       VBOXVMM_INSTR_VMX_VMPTRLD(pVCpu, pCtx); break;
    9048             case DBGFEVENT_INSTR_VMX_VMPTRST:       VBOXVMM_INSTR_VMX_VMPTRST(pVCpu, pCtx); break;
    9049             case DBGFEVENT_INSTR_VMX_VMREAD:        VBOXVMM_INSTR_VMX_VMREAD(pVCpu, pCtx); break;
    9050             case DBGFEVENT_INSTR_VMX_VMRESUME:      VBOXVMM_INSTR_VMX_VMRESUME(pVCpu, pCtx); break;
    9051             case DBGFEVENT_INSTR_VMX_VMWRITE:       VBOXVMM_INSTR_VMX_VMWRITE(pVCpu, pCtx); break;
    9052             case DBGFEVENT_INSTR_VMX_VMXOFF:        VBOXVMM_INSTR_VMX_VMXOFF(pVCpu, pCtx); break;
    9053             case DBGFEVENT_INSTR_VMX_VMXON:         VBOXVMM_INSTR_VMX_VMXON(pVCpu, pCtx); break;
    9054             case DBGFEVENT_INSTR_VMX_INVEPT:        VBOXVMM_INSTR_VMX_INVEPT(pVCpu, pCtx); break;
    9055             case DBGFEVENT_INSTR_VMX_INVVPID:       VBOXVMM_INSTR_VMX_INVVPID(pVCpu, pCtx); break;
    9056             case DBGFEVENT_INSTR_VMX_INVPCID:       VBOXVMM_INSTR_VMX_INVPCID(pVCpu, pCtx); break;
    9057             case DBGFEVENT_INSTR_VMX_VMFUNC:        VBOXVMM_INSTR_VMX_VMFUNC(pVCpu, pCtx); break;
    9058             default: AssertMsgFailed(("enmEvent1=%d uExitReason=%d\n", enmEvent1, uExitReason)); break;
    9059         }
    9060         switch (enmEvent2)
    9061         {
    9062             /** @todo consider which extra parameters would be helpful for each probe. */
    9063             case DBGFEVENT_END: break;
    9064             case DBGFEVENT_EXIT_TASK_SWITCH:        VBOXVMM_EXIT_TASK_SWITCH(pVCpu, pCtx); break;
    9065             case DBGFEVENT_EXIT_CPUID:              VBOXVMM_EXIT_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
    9066             case DBGFEVENT_EXIT_GETSEC:             VBOXVMM_EXIT_GETSEC(pVCpu, pCtx); break;
    9067             case DBGFEVENT_EXIT_HALT:               VBOXVMM_EXIT_HALT(pVCpu, pCtx); break;
    9068             case DBGFEVENT_EXIT_INVD:               VBOXVMM_EXIT_INVD(pVCpu, pCtx); break;
    9069             case DBGFEVENT_EXIT_INVLPG:             VBOXVMM_EXIT_INVLPG(pVCpu, pCtx); break;
    9070             case DBGFEVENT_EXIT_RDPMC:              VBOXVMM_EXIT_RDPMC(pVCpu, pCtx); break;
    9071             case DBGFEVENT_EXIT_RDTSC:              VBOXVMM_EXIT_RDTSC(pVCpu, pCtx); break;
    9072             case DBGFEVENT_EXIT_RSM:                VBOXVMM_EXIT_RSM(pVCpu, pCtx); break;
    9073             case DBGFEVENT_EXIT_CRX_READ:           VBOXVMM_EXIT_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
    9074             case DBGFEVENT_EXIT_CRX_WRITE:          VBOXVMM_EXIT_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
    9075             case DBGFEVENT_EXIT_DRX_READ:           VBOXVMM_EXIT_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
    9076             case DBGFEVENT_EXIT_DRX_WRITE:          VBOXVMM_EXIT_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
    9077             case DBGFEVENT_EXIT_RDMSR:              VBOXVMM_EXIT_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
    9078             case DBGFEVENT_EXIT_WRMSR:              VBOXVMM_EXIT_WRMSR(pVCpu, pCtx, pCtx->ecx,
    9079                                                                        RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
    9080             case DBGFEVENT_EXIT_MWAIT:              VBOXVMM_EXIT_MWAIT(pVCpu, pCtx); break;
    9081             case DBGFEVENT_EXIT_MONITOR:            VBOXVMM_EXIT_MONITOR(pVCpu, pCtx); break;
    9082             case DBGFEVENT_EXIT_PAUSE:              VBOXVMM_EXIT_PAUSE(pVCpu, pCtx); break;
    9083             case DBGFEVENT_EXIT_SGDT:               VBOXVMM_EXIT_SGDT(pVCpu, pCtx); break;
    9084             case DBGFEVENT_EXIT_SIDT:               VBOXVMM_EXIT_SIDT(pVCpu, pCtx); break;
    9085             case DBGFEVENT_EXIT_LGDT:               VBOXVMM_EXIT_LGDT(pVCpu, pCtx); break;
    9086             case DBGFEVENT_EXIT_LIDT:               VBOXVMM_EXIT_LIDT(pVCpu, pCtx); break;
    9087             case DBGFEVENT_EXIT_SLDT:               VBOXVMM_EXIT_SLDT(pVCpu, pCtx); break;
    9088             case DBGFEVENT_EXIT_STR:                VBOXVMM_EXIT_STR(pVCpu, pCtx); break;
    9089             case DBGFEVENT_EXIT_LLDT:               VBOXVMM_EXIT_LLDT(pVCpu, pCtx); break;
    9090             case DBGFEVENT_EXIT_LTR:                VBOXVMM_EXIT_LTR(pVCpu, pCtx); break;
    9091             case DBGFEVENT_EXIT_RDTSCP:             VBOXVMM_EXIT_RDTSCP(pVCpu, pCtx); break;
    9092             case DBGFEVENT_EXIT_WBINVD:             VBOXVMM_EXIT_WBINVD(pVCpu, pCtx); break;
    9093             case DBGFEVENT_EXIT_XSETBV:             VBOXVMM_EXIT_XSETBV(pVCpu, pCtx); break;
    9094             case DBGFEVENT_EXIT_RDRAND:             VBOXVMM_EXIT_RDRAND(pVCpu, pCtx); break;
    9095             case DBGFEVENT_EXIT_RDSEED:             VBOXVMM_EXIT_RDSEED(pVCpu, pCtx); break;
    9096             case DBGFEVENT_EXIT_XSAVES:             VBOXVMM_EXIT_XSAVES(pVCpu, pCtx); break;
    9097             case DBGFEVENT_EXIT_XRSTORS:            VBOXVMM_EXIT_XRSTORS(pVCpu, pCtx); break;
    9098             case DBGFEVENT_EXIT_VMM_CALL:           VBOXVMM_EXIT_VMM_CALL(pVCpu, pCtx); break;
    9099             case DBGFEVENT_EXIT_VMX_VMCLEAR:        VBOXVMM_EXIT_VMX_VMCLEAR(pVCpu, pCtx); break;
    9100             case DBGFEVENT_EXIT_VMX_VMLAUNCH:       VBOXVMM_EXIT_VMX_VMLAUNCH(pVCpu, pCtx); break;
    9101             case DBGFEVENT_EXIT_VMX_VMPTRLD:        VBOXVMM_EXIT_VMX_VMPTRLD(pVCpu, pCtx); break;
    9102             case DBGFEVENT_EXIT_VMX_VMPTRST:        VBOXVMM_EXIT_VMX_VMPTRST(pVCpu, pCtx); break;
    9103             case DBGFEVENT_EXIT_VMX_VMREAD:         VBOXVMM_EXIT_VMX_VMREAD(pVCpu, pCtx); break;
    9104             case DBGFEVENT_EXIT_VMX_VMRESUME:       VBOXVMM_EXIT_VMX_VMRESUME(pVCpu, pCtx); break;
    9105             case DBGFEVENT_EXIT_VMX_VMWRITE:        VBOXVMM_EXIT_VMX_VMWRITE(pVCpu, pCtx); break;
    9106             case DBGFEVENT_EXIT_VMX_VMXOFF:         VBOXVMM_EXIT_VMX_VMXOFF(pVCpu, pCtx); break;
    9107             case DBGFEVENT_EXIT_VMX_VMXON:          VBOXVMM_EXIT_VMX_VMXON(pVCpu, pCtx); break;
    9108             case DBGFEVENT_EXIT_VMX_INVEPT:         VBOXVMM_EXIT_VMX_INVEPT(pVCpu, pCtx); break;
    9109             case DBGFEVENT_EXIT_VMX_INVVPID:        VBOXVMM_EXIT_VMX_INVVPID(pVCpu, pCtx); break;
    9110             case DBGFEVENT_EXIT_VMX_INVPCID:        VBOXVMM_EXIT_VMX_INVPCID(pVCpu, pCtx); break;
    9111             case DBGFEVENT_EXIT_VMX_VMFUNC:         VBOXVMM_EXIT_VMX_VMFUNC(pVCpu, pCtx); break;
    9112             case DBGFEVENT_EXIT_VMX_EPT_MISCONFIG:  VBOXVMM_EXIT_VMX_EPT_MISCONFIG(pVCpu, pCtx); break;
    9113             case DBGFEVENT_EXIT_VMX_EPT_VIOLATION:  VBOXVMM_EXIT_VMX_EPT_VIOLATION(pVCpu, pCtx); break;
    9114             case DBGFEVENT_EXIT_VMX_VAPIC_ACCESS:   VBOXVMM_EXIT_VMX_VAPIC_ACCESS(pVCpu, pCtx); break;
    9115             case DBGFEVENT_EXIT_VMX_VAPIC_WRITE:    VBOXVMM_EXIT_VMX_VAPIC_WRITE(pVCpu, pCtx); break;
    9116             default: AssertMsgFailed(("enmEvent2=%d uExitReason=%d\n", enmEvent2, uExitReason)); break;
    9117         }
    9118     }
    9119 
    9120     /*
    9121      * Fire of the DBGF event, if enabled (our check here is just a quick one,
    9122      * the DBGF call will do a full check).
    9123      *
    9124      * Note! DBGF sets DBGFEVENT_INTERRUPT_SOFTWARE in the bitmap.
    9125      * Note! If we have to events, we prioritize the first, i.e. the instruction
    9126      *       one, in order to avoid event nesting.
    9127      */
    9128     PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    9129     if (   enmEvent1 != DBGFEVENT_END
    9130         && DBGF_IS_EVENT_ENABLED(pVM, enmEvent1))
    9131     {
    9132         vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
    9133         VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent1, DBGFEVENTCTX_HM, 1, uEventArg);
    9134         if (rcStrict != VINF_SUCCESS)
    9135             return rcStrict;
    9136     }
    9137     else if (   enmEvent2 != DBGFEVENT_END
    9138              && DBGF_IS_EVENT_ENABLED(pVM, enmEvent2))
    9139     {
    9140         vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
    9141         VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent2, DBGFEVENTCTX_HM, 1, uEventArg);
    9142         if (rcStrict != VINF_SUCCESS)
    9143             return rcStrict;
    9144     }
    9145 
    9146     return VINF_SUCCESS;
    9147 }
    9148 
    9149 
    9150 /**
    9151  * Single-stepping VM-exit filtering.
    9152  *
    9153  * This is preprocessing the VM-exits and deciding whether we've gotten far
    9154  * enough to return VINF_EM_DBG_STEPPED already.  If not, normal VM-exit
    9155  * handling is performed.
    9156  *
    9157  * @returns Strict VBox status code (i.e. informational status codes too).
    9158  * @param   pVCpu           The cross context virtual CPU structure of the calling EMT.
    9159  * @param   pVmxTransient   The VMX-transient structure.
    9160  * @param   pDbgState       The debug state.
    9161  */
    9162 DECLINLINE(VBOXSTRICTRC) vmxHCRunDebugHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
    9163 {
    9164     /*
    9165      * Expensive (saves context) generic dtrace VM-exit probe.
    9166      */
    9167     uint32_t const uExitReason = pVmxTransient->uExitReason;
    9168     if (!VBOXVMM_R0_HMVMX_VMEXIT_ENABLED())
    9169     { /* more likely */ }
    9170     else
    9171     {
    9172         vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
    9173         int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
    9174         AssertRC(rc);
    9175         VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, &pVCpu->cpum.GstCtx, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
    9176     }
    9177 
    9178 #ifdef IN_RING0 /* NMIs should never reach R3. */
    9179     /*
    9180      * Check for host NMI, just to get that out of the way.
    9181      */
    9182     if (uExitReason != VMX_EXIT_XCPT_OR_NMI)
    9183     { /* normally likely */ }
    9184     else
    9185     {
    9186         vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
    9187         uint32_t const uIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
    9188         if (uIntType == VMX_EXIT_INT_INFO_TYPE_NMI)
    9189             return vmxHCExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
    9190     }
    9191 #endif
    9192 
    9193     /*
    9194      * Check for single stepping event if we're stepping.
    9195      */
    9196     if (VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
    9197     {
    9198         switch (uExitReason)
    9199         {
    9200             case VMX_EXIT_MTF:
    9201                 return vmxHCExitMtf(pVCpu, pVmxTransient);
    9202 
    9203             /* Various events: */
    9204             case VMX_EXIT_XCPT_OR_NMI:
    9205             case VMX_EXIT_EXT_INT:
    9206             case VMX_EXIT_TRIPLE_FAULT:
    9207             case VMX_EXIT_INT_WINDOW:
    9208             case VMX_EXIT_NMI_WINDOW:
    9209             case VMX_EXIT_TASK_SWITCH:
    9210             case VMX_EXIT_TPR_BELOW_THRESHOLD:
    9211             case VMX_EXIT_APIC_ACCESS:
    9212             case VMX_EXIT_EPT_VIOLATION:
    9213             case VMX_EXIT_EPT_MISCONFIG:
    9214             case VMX_EXIT_PREEMPT_TIMER:
    9215 
    9216             /* Instruction specific VM-exits: */
    9217             case VMX_EXIT_CPUID:
    9218             case VMX_EXIT_GETSEC:
    9219             case VMX_EXIT_HLT:
    9220             case VMX_EXIT_INVD:
    9221             case VMX_EXIT_INVLPG:
    9222             case VMX_EXIT_RDPMC:
    9223             case VMX_EXIT_RDTSC:
    9224             case VMX_EXIT_RSM:
    9225             case VMX_EXIT_VMCALL:
    9226             case VMX_EXIT_VMCLEAR:
    9227             case VMX_EXIT_VMLAUNCH:
    9228             case VMX_EXIT_VMPTRLD:
    9229             case VMX_EXIT_VMPTRST:
    9230             case VMX_EXIT_VMREAD:
    9231             case VMX_EXIT_VMRESUME:
    9232             case VMX_EXIT_VMWRITE:
    9233             case VMX_EXIT_VMXOFF:
    9234             case VMX_EXIT_VMXON:
    9235             case VMX_EXIT_MOV_CRX:
    9236             case VMX_EXIT_MOV_DRX:
    9237             case VMX_EXIT_IO_INSTR:
    9238             case VMX_EXIT_RDMSR:
    9239             case VMX_EXIT_WRMSR:
    9240             case VMX_EXIT_MWAIT:
    9241             case VMX_EXIT_MONITOR:
    9242             case VMX_EXIT_PAUSE:
    9243             case VMX_EXIT_GDTR_IDTR_ACCESS:
    9244             case VMX_EXIT_LDTR_TR_ACCESS:
    9245             case VMX_EXIT_INVEPT:
    9246             case VMX_EXIT_RDTSCP:
    9247             case VMX_EXIT_INVVPID:
    9248             case VMX_EXIT_WBINVD:
    9249             case VMX_EXIT_XSETBV:
    9250             case VMX_EXIT_RDRAND:
    9251             case VMX_EXIT_INVPCID:
    9252             case VMX_EXIT_VMFUNC:
    9253             case VMX_EXIT_RDSEED:
    9254             case VMX_EXIT_XSAVES:
    9255             case VMX_EXIT_XRSTORS:
    9256             {
    9257                 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
    9258                 AssertRCReturn(rc, rc);
    9259                 if (   pVCpu->cpum.GstCtx.rip    != pDbgState->uRipStart
    9260                     || pVCpu->cpum.GstCtx.cs.Sel != pDbgState->uCsStart)
    9261                     return VINF_EM_DBG_STEPPED;
    9262                 break;
    9263             }
    9264 
    9265             /* Errors and unexpected events: */
    9266             case VMX_EXIT_INIT_SIGNAL:
    9267             case VMX_EXIT_SIPI:
    9268             case VMX_EXIT_IO_SMI:
    9269             case VMX_EXIT_SMI:
    9270             case VMX_EXIT_ERR_INVALID_GUEST_STATE:
    9271             case VMX_EXIT_ERR_MSR_LOAD:
    9272             case VMX_EXIT_ERR_MACHINE_CHECK:
    9273             case VMX_EXIT_PML_FULL:
    9274             case VMX_EXIT_VIRTUALIZED_EOI:
    9275             case VMX_EXIT_APIC_WRITE:  /* Some talk about this being fault like, so I guess we must process it? */
    9276                 break;
    9277 
    9278             default:
    9279                 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
    9280                 break;
    9281         }
    9282     }
    9283 
    9284     /*
    9285      * Check for debugger event breakpoints and dtrace probes.
    9286      */
    9287     if (   uExitReason < RT_ELEMENTS(pDbgState->bmExitsToCheck) * 32U
    9288         && ASMBitTest(pDbgState->bmExitsToCheck, uExitReason) )
    9289     {
    9290         VBOXSTRICTRC rcStrict = vmxHCHandleExitDtraceEvents(pVCpu, pVmxTransient, uExitReason);
    9291         if (rcStrict != VINF_SUCCESS)
    9292             return rcStrict;
    9293     }
    9294 
    9295     /*
    9296      * Normal processing.
    9297      */
    9298 #ifdef HMVMX_USE_FUNCTION_TABLE
    9299     return g_aVMExitHandlers[uExitReason].pfn(pVCpu, pVmxTransient);
    9300 #else
    9301     return vmxHCHandleExit(pVCpu, pVmxTransient, uExitReason);
    9302 #endif
    9303 }
    9304 
    9305 
    9306 /**
    9307  * Single steps guest code using hardware-assisted VMX.
    9308  *
    9309  * This is -not- the same as the guest single-stepping itself (say using EFLAGS.TF)
    9310  * but single-stepping through the hypervisor debugger.
    9311  *
    9312  * @returns Strict VBox status code (i.e. informational status codes too).
    9313  * @param   pVCpu       The cross context virtual CPU structure.
    9314  * @param   pcLoops     Pointer to the number of executed loops.
    9315  *
    9316  * @note    Mostly the same as vmxHCRunGuestCodeNormal().
    9317  */
    9318 static VBOXSTRICTRC vmxHCRunGuestCodeDebug(PVMCPUCC pVCpu, uint32_t *pcLoops)
    9319 {
    9320     uint32_t const cMaxResumeLoops = pVCpu->CTX_SUFF(pVM)->hmr0.s.cMaxResumeLoops;
    9321     Assert(pcLoops);
    9322     Assert(*pcLoops <= cMaxResumeLoops);
    9323 
    9324     VMXTRANSIENT VmxTransient;
    9325     RT_ZERO(VmxTransient);
    9326     VmxTransient.pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
    9327 
    9328     /* Set HMCPU indicators.  */
    9329     bool const fSavedSingleInstruction = VCPU_2_VMXSTATE(pVCpu).fSingleInstruction;
    9330     VCPU_2_VMXSTATE(pVCpu).fSingleInstruction     = VCPU_2_VMXSTATE(pVCpu).fSingleInstruction || DBGFIsStepping(pVCpu);
    9331     pVCpu->hmr0.s.fDebugWantRdTscExit    = false;
    9332     pVCpu->hmr0.s.fUsingDebugLoop        = true;
    9333 
    9334     /* State we keep to help modify and later restore the VMCS fields we alter, and for detecting steps.  */
    9335     VMXRUNDBGSTATE DbgState;
    9336     vmxHCRunDebugStateInit(pVCpu, &VmxTransient, &DbgState);
    9337     vmxHCPreRunGuestDebugStateUpdate(pVCpu, &VmxTransient, &DbgState);
    9338 
    9339     /*
    9340      * The loop.
    9341      */
    9342     VBOXSTRICTRC rcStrict  = VERR_INTERNAL_ERROR_5;
    9343     for (;;)
    9344     {
    9345         Assert(!HMR0SuspendPending());
    9346         HMVMX_ASSERT_CPU_SAFE(pVCpu);
    9347         STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATS(pVCpu).StatEntry, x);
    9348         bool fStepping = VCPU_2_VMXSTATE(pVCpu).fSingleInstruction;
    9349 
    9350         /* Set up VM-execution controls the next two can respond to. */
    9351         vmxHCPreRunGuestDebugStateApply(pVCpu, &VmxTransient, &DbgState);
    9352 
    9353         /*
    9354          * Preparatory work for running guest code, this may force us to
    9355          * return to ring-3.
    9356          *
    9357          * Warning! This bugger disables interrupts on VINF_SUCCESS!
    9358          */
    9359         rcStrict = vmxHCPreRunGuest(pVCpu, &VmxTransient, fStepping);
    9360         if (rcStrict != VINF_SUCCESS)
    9361             break;
    9362 
    9363         /* Interrupts are disabled at this point! */
    9364         vmxHCPreRunGuestCommitted(pVCpu, &VmxTransient);
    9365 
    9366         /* Override any obnoxious code in the above two calls. */
    9367         vmxHCPreRunGuestDebugStateApply(pVCpu, &VmxTransient, &DbgState);
    9368 
    9369         /*
    9370          * Finally execute the guest.
    9371          */
    9372         int rcRun = vmxHCRunGuest(pVCpu, &VmxTransient);
    9373 
    9374         vmxHCPostRunGuest(pVCpu, &VmxTransient, rcRun);
    9375         /* Interrupts are re-enabled at this point! */
    9376 
    9377         /* Check for errors with running the VM (VMLAUNCH/VMRESUME). */
    9378         if (RT_SUCCESS(rcRun))
    9379         { /* very likely */ }
    9380         else
    9381         {
    9382             STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatPreExit, x);
    9383             vmxHCReportWorldSwitchError(pVCpu, rcRun, &VmxTransient);
    9384             return rcRun;
    9385         }
    9386 
    9387         /* Profile the VM-exit. */
    9388         AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason));
    9389         STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitAll);
    9390         STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).aStatExitReason[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);
    9391         STAM_PROFILE_ADV_STOP_START(&VCPU_2_VMXSTATS(pVCpu).StatPreExit, &VCPU_2_VMXSTATS(pVCpu).StatExitHandling, x);
    9392         HMVMX_START_EXIT_DISPATCH_PROF();
    9393 
    9394         VBOXVMM_R0_HMVMX_VMEXIT_NOCTX(pVCpu, &pVCpu->cpum.GstCtx, VmxTransient.uExitReason);
    9395 
    9396         /*
    9397          * Handle the VM-exit - we quit earlier on certain VM-exits, see vmxHCHandleExitDebug().
    9398          */
    9399         rcStrict = vmxHCRunDebugHandleExit(pVCpu, &VmxTransient, &DbgState);
    9400         STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatExitHandling, x);
    9401         if (rcStrict != VINF_SUCCESS)
    9402             break;
    9403         if (++(*pcLoops) > cMaxResumeLoops)
    9404         {
    9405             STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatSwitchMaxResumeLoops);
    9406             rcStrict = VINF_EM_RAW_INTERRUPT;
    9407             break;
    9408         }
    9409 
    9410         /*
    9411          * Stepping: Did the RIP change, if so, consider it a single step.
    9412          * Otherwise, make sure one of the TFs gets set.
    9413          */
    9414         if (fStepping)
    9415         {
    9416             int rc = vmxHCImportGuestState(pVCpu, VmxTransient.pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
    9417             AssertRC(rc);
    9418             if (   pVCpu->cpum.GstCtx.rip    != DbgState.uRipStart
    9419                 || pVCpu->cpum.GstCtx.cs.Sel != DbgState.uCsStart)
    9420             {
    9421                 rcStrict = VINF_EM_DBG_STEPPED;
    9422                 break;
    9423             }
    9424             ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_DR7);
    9425         }
    9426 
    9427         /*
    9428          * Update when dtrace settings changes (DBGF kicks us, so no need to check).
    9429          */
    9430         if (VBOXVMM_GET_SETTINGS_SEQ_NO() != DbgState.uDtraceSettingsSeqNo)
    9431             vmxHCPreRunGuestDebugStateUpdate(pVCpu, &VmxTransient, &DbgState);
    9432 
    9433         /* Restore all controls applied by vmxHCPreRunGuestDebugStateApply above. */
    9434         rcStrict = vmxHCRunDebugStateRevert(pVCpu, &VmxTransient, &DbgState, rcStrict);
    9435         Assert(rcStrict == VINF_SUCCESS);
    9436     }
    9437 
    9438     /*
    9439      * Clear the X86_EFL_TF if necessary.
    9440      */
    9441     if (pVCpu->hmr0.s.fClearTrapFlag)
    9442     {
    9443         int rc = vmxHCImportGuestState(pVCpu, VmxTransient.pVmcsInfo, CPUMCTX_EXTRN_RFLAGS);
    9444         AssertRC(rc);
    9445         pVCpu->hmr0.s.fClearTrapFlag = false;
    9446         pVCpu->cpum.GstCtx.eflags.Bits.u1TF = 0;
    9447     }
    9448     /** @todo there seems to be issues with the resume flag when the monitor trap
    9449      *        flag is pending without being used. Seen early in bios init when
    9450      *        accessing APIC page in protected mode. */
    9451 
    9452     /* Restore HMCPU indicators. */
    9453     pVCpu->hmr0.s.fUsingDebugLoop     = false;
    9454     pVCpu->hmr0.s.fDebugWantRdTscExit = false;
    9455     VCPU_2_VMXSTATE(pVCpu).fSingleInstruction  = fSavedSingleInstruction;
    9456 
    9457     STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATS(pVCpu).StatEntry, x);
    9458     return rcStrict;
    9459 }
    9460 #endif
    9461 
    94625267/** @} */
    94635268
     
    97195524
    97205525#ifdef VBOX_STRICT
    9721 # ifdef IN_RING0
     5526# ifndef IN_NEM_DARWIN
    97225527/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
    97235528# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() \
     
    104706275            ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR4);
    104716276            STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitCR4Write);
    10472 #ifdef IN_RING0
     6277#ifndef IN_NEM_DARWIN
    104736278            Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict),
    104746279                      pVCpu->cpum.GstCtx.cr4, pVCpu->hmr0.s.fLoadSaveGuestXcr0));
     
    105086313    vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
    105096314
    10510 #ifdef IN_RING0
     6315#ifndef IN_NEM_DARWIN
    105116316    PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    105126317    if (!VM_IS_VMX_NESTED_PAGING(pVM))
     
    105156320#endif
    105166321    {
    10517 #if !defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) && !defined(HMVMX_ALWAYS_TRAP_PF) && defined(IN_RING0)
     6322#if !defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) && !defined(HMVMX_ALWAYS_TRAP_PF) && !defined(IN_NEM_DARWIN)
    105186323        Assert(pVmxTransient->fIsNestedGuest || pVCpu->hmr0.s.fUsingDebugLoop);
    105196324#endif
     
    106916496        PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    106926497        if (   !DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK)
    10693 #ifdef IN_RING0
     6498#ifndef IN_NEM_DARWIN
    106946499            && !VBOXVMM_VMX_SPLIT_LOCK_ENABLED()
    106956500#endif
     
    108166621         * See Intel spec. 27.1 "Architectural State before a VM-Exit".
    108176622         */
    10818 #ifdef IN_RING0
     6623#ifndef IN_NEM_DARWIN
    108196624        VMMRZCallRing3Disable(pVCpu);
    108206625        HM_DISABLE_PREEMPT(pVCpu);
     
    109516756    PCPUMCTX            pCtx            = &pVCpu->cpum.GstCtx;
    109526757    PVMXVMCSINFO        pVmcsInfo       = pVmxTransient->pVmcsInfo;
    10953 #ifdef IN_RING0
     6758#ifndef IN_NEM_DARWIN
    109546759    PVMXVMCSINFOSHARED  pVmcsInfoShared = pVmcsInfo->pShared;
    109556760    if (pVmcsInfoShared->RealMode.fRealOnV86Active)
     
    109596764    {
    109606765#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
    10961 # ifdef IN_RING0
     6766# ifndef IN_NEM_DARWIN
    109626767        Assert(pVCpu->hmr0.s.fUsingDebugLoop || VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
    109636768# else
     
    109846789    }
    109856790
    10986 #ifdef IN_RING0
     6791#ifndef IN_NEM_DARWIN
    109876792    Assert(CPUMIsGuestInRealModeEx(pCtx));
    109886793    Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest);
     
    110406845
    110416846#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
    11042 # ifdef IN_RING0
     6847# ifndef IN_NEM_DARWIN
    110436848    PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    110446849    AssertMsg(pVCpu->hmr0.s.fUsingDebugLoop || pVmcsInfo->pShared->RealMode.fRealOnV86Active || pVmxTransient->fIsNestedGuest,
     
    111676972    STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitExtInt);
    111686973
    11169 #ifdef IN_RING0
     6974#ifndef IN_NEM_DARWIN
    111706975    /* Windows hosts (32-bit and 64-bit) have DPC latency issues. See @bugref{6853}. */
    111716976    if (VMMR0ThreadCtxHookIsEnabled(pVCpu))
     
    112017006    switch (uExitIntType)
    112027007    {
    11203 #ifdef IN_RING0 /* NMIs should never reach R3. */
     7008#ifndef IN_NEM_DARWIN /* NMIs should never reach R3. */
    112047009        /*
    112057010         * Host physical NMIs:
     
    112137018        case VMX_EXIT_INT_INFO_TYPE_NMI:
    112147019        {
    11215             rcStrict = vmxHCExitHostNmi(pVCpu, pVmcsInfo);
     7020            rcStrict = hmR0VmxExitHostNmi(pVCpu, pVmcsInfo);
    112167021            break;
    112177022        }
     
    115487353{
    115497354    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    11550 #ifdef IN_RING0
     7355#ifndef IN_NEM_DARWIN
    115517356    Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || pVCpu->hmr0.s.fUsingDebugLoop);
    115527357#endif
     
    117037508                                                                                : HM_CHANGED_RAISED_XCPT_MASK);
    117047509
    11705 #ifdef IN_RING0
     7510#ifndef IN_NEM_DARWIN
    117067511    PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    117077512    bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
     
    117097514    {
    117107515        pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
    11711         vmxHCUpdateStartVmFunction(pVCpu);
     7516        hmR0VmxUpdateStartVmFunction(pVCpu);
    117127517    }
    117137518#endif
     
    118017606    rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Val);             AssertRC(rc);
    118027607    Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW              %#RX64\n", u64Val));
    11803 # ifdef IN_RING0
     7608# ifndef IN_NEM_DARWIN
    118047609    if (pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging)
    118057610    {
     
    119297734    Log4Func(("ecx=%#RX32\n", idMsr));
    119307735
    11931 #if defined(VBOX_STRICT) && defined(IN_RING0)
     7736#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
    119327737    Assert(!pVmxTransient->fIsNestedGuest);
    119337738    if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
    119347739    {
    11935         if (   vmxHCIsAutoLoadGuestMsr(pVmcsInfo, idMsr)
     7740        if (   hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr)
    119367741            && idMsr != MSR_K6_EFER)
    119377742        {
     
    119397744            HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
    119407745        }
    11941         if (vmxHCIsLazyGuestMsr(pVCpu, idMsr))
     7746        if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
    119427747        {
    119437748            Assert(pVmcsInfo->pvMsrBitmap);
     
    120467851                default:
    120477852                {
    12048 #ifdef IN_RING0
    12049                     if (vmxHCIsLazyGuestMsr(pVCpu, idMsr))
     7853#ifndef IN_NEM_DARWIN
     7854                    if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
    120507855                        ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_LAZY_MSRS);
    12051                     else if (vmxHCIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
     7856                    else if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
    120527857                        ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
    120537858#else
     
    120587863            }
    120597864        }
    12060 #if defined(VBOX_STRICT) && defined(IN_RING0)
     7865#if defined(VBOX_STRICT) && !defined(IN_NEM_DARWIN)
    120617866        else
    120627867        {
     
    120777882                default:
    120787883                {
    12079                     if (vmxHCIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
     7884                    if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
    120807885                    {
    120817886                        /* EFER MSR writes are always intercepted. */
     
    120887893                    }
    120897894
    12090                     if (vmxHCIsLazyGuestMsr(pVCpu, idMsr))
     7895                    if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
    120917896                    {
    120927897                        Assert(pVmcsInfo->pvMsrBitmap);
     
    121937998
    121947999            HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
    12195 #ifdef IN_RING0
     8000#ifndef IN_NEM_DARWIN
    121968001            uint32_t const uOldCr0 = pVCpu->cpum.GstCtx.cr0;
    121978002#endif
     
    122058010             *   - We are executing in the VM debug loop.
    122068011             */
    12207 #ifdef IN_RING0
     8012#ifndef IN_NEM_DARWIN
    122088013            Assert(   iCrReg != 3
    122098014                   || !VM_IS_VMX_NESTED_PAGING(pVM)
     
    122238028                      || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
    122248029
    12225 #ifdef IN_RING0
     8030#ifndef IN_NEM_DARWIN
    122268031            /*
    122278032             * This is a kludge for handling switches back to real mode when we try to use
     
    122658070             *   - We are executing in the VM debug loop.
    122668071             */
    12267 #ifdef IN_RING0
     8072#ifndef IN_NEM_DARWIN
    122688073            Assert(   iCrReg != 3
    122698074                   || !VM_IS_VMX_NESTED_PAGING(pVM)
     
    124328237                rcStrict = IOMIOPortWrite(pVM, pVCpu, uIOPort, pCtx->eax & uAndVal, cbValue);
    124338238                STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitIOWrite);
    12434 #ifdef IN_RING0
     8239#ifndef IN_NEM_DARWIN
    124358240                if (    rcStrict == VINF_IOM_R3_IOPORT_WRITE
    124368241                    && !pCtx->eflags.Bits.u1TF)
     
    124478252                    pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Result & uAndVal);
    124488253                }
    12449 #ifdef IN_RING0
     8254#ifndef IN_NEM_DARWIN
    124508255                if (    rcStrict == VINF_IOM_R3_IOPORT_READ
    124518256                    && !pCtx->eflags.Bits.u1TF)
     
    124918296                STAM_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatDRxIoCheck);
    124928297
    12493 #ifdef IN_RING0
     8298#ifndef IN_NEM_DARWIN
    124948299                /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
    124958300                VMMRZCallRing3Disable(pVCpu);
     
    126798484    switch (uAccessType)
    126808485    {
    12681 #ifdef IN_RING0
     8486#ifndef IN_NEM_DARWIN
    126828487        case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE:
    126838488        case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
     
    127588563            AssertRC(rc);
    127598564
    12760 #ifdef IN_RING0
     8565#ifndef IN_NEM_DARWIN
    127618566            /* We're playing with the host CPU state here, make sure we can't preempt or longjmp. */
    127628567            VMMRZCallRing3Disable(pVCpu);
     
    128338638    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    128348639
    12835 #ifdef IN_RING0
     8640#ifndef IN_NEM_DARWIN
    128368641    Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
    128378642
     
    129468751{
    129478752    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    12948 #ifdef IN_RING0
     8753#ifndef IN_NEM_DARWIN
    129498754    Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
    129508755
     
    134739278    switch (uExitIntType)
    134749279    {
     9280#ifndef IN_NEM_DARWIN
    134759281        /*
    134769282         * Physical NMIs:
     
    134789284         */
    134799285        case VMX_EXIT_INT_INFO_TYPE_NMI:
    13480             return vmxHCExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
     9286            return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
     9287#endif
    134819288
    134829289        /*
     
    1422310030    {
    1422410031        Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_INVPCID));
    14225         vmxHCReadExitInstrLenVmcs(ppVCpu, VmxTransient);
     10032        vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
    1422610033        vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
    1422710034        vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r93115 r93132  
    4040#include <VBox/vmm/hmvmxinline.h>
    4141#include "HMVMXR0.h"
     42#include "VMXInternal.h"
    4243#include "dtrace/VBoxVMM.h"
    4344
     
    5960*   Defined Constants And Macros                                                                                                 *
    6061*********************************************************************************************************************************/
    61 /** Use the function table. */
    62 #define HMVMX_USE_FUNCTION_TABLE
    63 
    64 /** Determine which tagged-TLB flush handler to use. */
    65 #define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID             0
    66 #define HMVMX_FLUSH_TAGGED_TLB_EPT                  1
    67 #define HMVMX_FLUSH_TAGGED_TLB_VPID                 2
    68 #define HMVMX_FLUSH_TAGGED_TLB_NONE                 3
    69 
    70 /**
    71  * Flags to skip redundant reads of some common VMCS fields that are not part of
    72  * the guest-CPU or VCPU state but are needed while handling VM-exits.
    73  */
    74 #define HMVMX_READ_IDT_VECTORING_INFO               RT_BIT_32(0)
    75 #define HMVMX_READ_IDT_VECTORING_ERROR_CODE         RT_BIT_32(1)
    76 #define HMVMX_READ_EXIT_QUALIFICATION               RT_BIT_32(2)
    77 #define HMVMX_READ_EXIT_INSTR_LEN                   RT_BIT_32(3)
    78 #define HMVMX_READ_EXIT_INTERRUPTION_INFO           RT_BIT_32(4)
    79 #define HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE     RT_BIT_32(5)
    80 #define HMVMX_READ_EXIT_INSTR_INFO                  RT_BIT_32(6)
    81 #define HMVMX_READ_GUEST_LINEAR_ADDR                RT_BIT_32(7)
    82 #define HMVMX_READ_GUEST_PHYSICAL_ADDR              RT_BIT_32(8)
    83 #define HMVMX_READ_GUEST_PENDING_DBG_XCPTS          RT_BIT_32(9)
    84 
    85 /** All the VMCS fields required for processing of exception/NMI VM-exits. */
    86 #define HMVMX_READ_XCPT_INFO         (  HMVMX_READ_EXIT_INTERRUPTION_INFO        \
    87                                       | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE  \
    88                                       | HMVMX_READ_EXIT_INSTR_LEN                \
    89                                       | HMVMX_READ_IDT_VECTORING_INFO            \
    90                                       | HMVMX_READ_IDT_VECTORING_ERROR_CODE)
    91 
    92 /** Assert that all the given fields have been read from the VMCS. */
    93 #ifdef VBOX_STRICT
    94 # define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) \
    95         do { \
    96             uint32_t const fVmcsFieldRead = ASMAtomicUoReadU32(&pVmxTransient->fVmcsFieldsRead); \
    97             Assert((fVmcsFieldRead & (a_fReadFields)) == (a_fReadFields)); \
    98         } while (0)
    99 #else
    100 # define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) do { } while (0)
    101 #endif
    102 
    103 /**
    104  * Subset of the guest-CPU state that is kept by VMX R0 code while executing the
    105  * guest using hardware-assisted VMX.
    106  *
    107  * This excludes state like GPRs (other than RSP) which are always are
    108  * swapped and restored across the world-switch and also registers like EFER,
    109  * MSR which cannot be modified by the guest without causing a VM-exit.
    110  */
    111 #define HMVMX_CPUMCTX_EXTRN_ALL      (  CPUMCTX_EXTRN_RIP             \
    112                                       | CPUMCTX_EXTRN_RFLAGS          \
    113                                       | CPUMCTX_EXTRN_RSP             \
    114                                       | CPUMCTX_EXTRN_SREG_MASK       \
    115                                       | CPUMCTX_EXTRN_TABLE_MASK      \
    116                                       | CPUMCTX_EXTRN_KERNEL_GS_BASE  \
    117                                       | CPUMCTX_EXTRN_SYSCALL_MSRS    \
    118                                       | CPUMCTX_EXTRN_SYSENTER_MSRS   \
    119                                       | CPUMCTX_EXTRN_TSC_AUX         \
    120                                       | CPUMCTX_EXTRN_OTHER_MSRS      \
    121                                       | CPUMCTX_EXTRN_CR0             \
    122                                       | CPUMCTX_EXTRN_CR3             \
    123                                       | CPUMCTX_EXTRN_CR4             \
    124                                       | CPUMCTX_EXTRN_DR7             \
    125                                       | CPUMCTX_EXTRN_HWVIRT          \
    126                                       | CPUMCTX_EXTRN_INHIBIT_INT     \
    127                                       | CPUMCTX_EXTRN_INHIBIT_NMI)
    128 
    129 /**
    130  * Exception bitmap mask for real-mode guests (real-on-v86).
    131  *
    132  * We need to intercept all exceptions manually except:
    133  * - \#AC and \#DB are always intercepted to prevent the CPU from deadlocking
    134  *   due to bugs in Intel CPUs.
    135  * - \#PF need not be intercepted even in real-mode if we have nested paging
    136  * support.
    137  */
    138 #define HMVMX_REAL_MODE_XCPT_MASK    (  RT_BIT(X86_XCPT_DE)  /* always: | RT_BIT(X86_XCPT_DB) */ | RT_BIT(X86_XCPT_NMI)   \
    139                                       | RT_BIT(X86_XCPT_BP)             | RT_BIT(X86_XCPT_OF)    | RT_BIT(X86_XCPT_BR)    \
    140                                       | RT_BIT(X86_XCPT_UD)             | RT_BIT(X86_XCPT_NM)    | RT_BIT(X86_XCPT_DF)    \
    141                                       | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS)    | RT_BIT(X86_XCPT_NP)    \
    142                                       | RT_BIT(X86_XCPT_SS)             | RT_BIT(X86_XCPT_GP)   /* RT_BIT(X86_XCPT_PF) */ \
    143                                       | RT_BIT(X86_XCPT_MF)  /* always: | RT_BIT(X86_XCPT_AC) */ | RT_BIT(X86_XCPT_MC)    \
    144                                       | RT_BIT(X86_XCPT_XF))
    145 
    146 /** Maximum VM-instruction error number. */
    147 #define HMVMX_INSTR_ERROR_MAX        28
    148 
    149 /** Profiling macro. */
    150 #ifdef HM_PROFILE_EXIT_DISPATCH
    151 # define HMVMX_START_EXIT_DISPATCH_PROF()           STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitDispatch, ed)
    152 # define HMVMX_STOP_EXIT_DISPATCH_PROF()            STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitDispatch, ed)
    153 #else
    154 # define HMVMX_START_EXIT_DISPATCH_PROF()           do { } while (0)
    155 # define HMVMX_STOP_EXIT_DISPATCH_PROF()            do { } while (0)
    156 #endif
    157 
    158 /** Assert that preemption is disabled or covered by thread-context hooks. */
    159 #define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu)          Assert(   VMMR0ThreadCtxHookIsEnabled((a_pVCpu))   \
    160                                                            || !RTThreadPreemptIsEnabled(NIL_RTTHREAD))
    161 
    162 /** Assert that we haven't migrated CPUs when thread-context hooks are not
    163  *  used. */
    164 #define HMVMX_ASSERT_CPU_SAFE(a_pVCpu)              AssertMsg(   VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
    165                                                               || (a_pVCpu)->hmr0.s.idEnteredCpu == RTMpCpuId(), \
    166                                                               ("Illegal migration! Entered on CPU %u Current %u\n", \
    167                                                               (a_pVCpu)->hmr0.s.idEnteredCpu, RTMpCpuId()))
    168 
    169 /** Asserts that the given CPUMCTX_EXTRN_XXX bits are present in the guest-CPU
    170  *  context. */
    171 #define HMVMX_CPUMCTX_ASSERT(a_pVCpu, a_fExtrnMbz)  AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz)), \
    172                                                               ("fExtrn=%#RX64 fExtrnMbz=%#RX64\n", \
    173                                                               (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fExtrnMbz)))
    174 
    175 /** Log the VM-exit reason with an easily visible marker to identify it in a
    176  *  potential sea of logging data. */
    177 #define HMVMX_LOG_EXIT(a_pVCpu, a_uExitReason) \
    178     do { \
    179         Log4(("VM-exit: vcpu[%RU32] %85s -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-\n", (a_pVCpu)->idCpu, \
    180              HMGetVmxExitName(a_uExitReason))); \
    181     } while (0) \
    18262
    18363
     
    18565*   Structures and Typedefs                                                                                                      *
    18666*********************************************************************************************************************************/
    187 /**
    188  * VMX per-VCPU transient state.
    189  *
    190  * A state structure for holding miscellaneous information across
    191  * VMX non-root operation and restored after the transition.
    192  *
    193  * Note: The members are ordered and aligned such that the most
    194  * frequently used ones (in the guest execution loop) fall within
    195  * the first cache line.
    196  */
    197 typedef struct VMXTRANSIENT
    198 {
    199    /** Mask of currently read VMCS fields; HMVMX_READ_XXX. */
    200    uint32_t            fVmcsFieldsRead;
    201    /** The guest's TPR value used for TPR shadowing. */
    202    uint8_t             u8GuestTpr;
    203    uint8_t             abAlignment0[3];
    204 
    205    /** Whether the VM-exit was caused by a page-fault during delivery of an
    206     *  external interrupt or NMI. */
    207    bool                fVectoringPF;
    208    /** Whether the VM-exit was caused by a page-fault during delivery of a
    209     *  contributory exception or a page-fault. */
    210    bool                fVectoringDoublePF;
    211    /** Whether the VM-entry failed or not. */
    212    bool                fVMEntryFailed;
    213    /** Whether the TSC_AUX MSR needs to be removed from the auto-load/store MSR
    214     *  area after VM-exit. */
    215    bool                fRemoveTscAuxMsr;
    216    /** Whether TSC-offsetting and VMX-preemption timer was updated before VM-entry. */
    217    bool                fUpdatedTscOffsettingAndPreemptTimer;
    218    /** Whether we are currently executing a nested-guest. */
    219    bool                fIsNestedGuest;
    220    /** Whether the guest debug state was active at the time of VM-exit. */
    221    bool                fWasGuestDebugStateActive;
    222    /** Whether the hyper debug state was active at the time of VM-exit. */
    223    bool                fWasHyperDebugStateActive;
    224 
    225    /** The basic VM-exit reason. */
    226    uint32_t            uExitReason;
    227    /** The VM-exit interruption error code. */
    228    uint32_t            uExitIntErrorCode;
    229 
    230    /** The host's rflags/eflags. */
    231    RTCCUINTREG         fEFlags;
    232 
    233    /** The VM-exit exit code qualification. */
    234    uint64_t            uExitQual;
    235 
    236    /** The VMCS info. object. */
    237    PVMXVMCSINFO         pVmcsInfo;
    238 
    239    /** The VM-exit interruption-information field. */
    240    uint32_t            uExitIntInfo;
    241    /** The VM-exit instruction-length field. */
    242    uint32_t            cbExitInstr;
    243 
    244    /** The VM-exit instruction-information field. */
    245    VMXEXITINSTRINFO    ExitInstrInfo;
    246    /** IDT-vectoring information field. */
    247    uint32_t            uIdtVectoringInfo;
    248 
    249    /** IDT-vectoring error code. */
    250    uint32_t            uIdtVectoringErrorCode;
    251    uint32_t            u32Alignment0;
    252 
    253    /** The Guest-linear address. */
    254    uint64_t            uGuestLinearAddr;
    255 
    256    /** The Guest-physical address. */
    257    uint64_t            uGuestPhysicalAddr;
    258 
    259    /** The Guest pending-debug exceptions. */
    260    uint64_t            uGuestPendingDbgXcpts;
    261 
    262    /** The VM-entry interruption-information field. */
    263    uint32_t            uEntryIntInfo;
    264    /** The VM-entry exception error code field. */
    265    uint32_t            uEntryXcptErrorCode;
    266 
    267    /** The VM-entry instruction length field. */
    268    uint32_t            cbEntryInstr;
    269 } VMXTRANSIENT;
    270 AssertCompileMemberSize(VMXTRANSIENT, ExitInstrInfo, sizeof(uint32_t));
    271 AssertCompileMemberAlignment(VMXTRANSIENT, fVmcsFieldsRead,        8);
    272 AssertCompileMemberAlignment(VMXTRANSIENT, fVectoringPF,           8);
    273 AssertCompileMemberAlignment(VMXTRANSIENT, uExitReason,            8);
    274 AssertCompileMemberAlignment(VMXTRANSIENT, fEFlags,                8);
    275 AssertCompileMemberAlignment(VMXTRANSIENT, uExitQual,              8);
    276 AssertCompileMemberAlignment(VMXTRANSIENT, pVmcsInfo,              8);
    277 AssertCompileMemberAlignment(VMXTRANSIENT, uExitIntInfo,           8);
    278 AssertCompileMemberAlignment(VMXTRANSIENT, ExitInstrInfo,          8);
    279 AssertCompileMemberAlignment(VMXTRANSIENT, uIdtVectoringErrorCode, 8);
    280 AssertCompileMemberAlignment(VMXTRANSIENT, uGuestLinearAddr,       8);
    281 AssertCompileMemberAlignment(VMXTRANSIENT, uGuestPhysicalAddr,     8);
    282 AssertCompileMemberAlignment(VMXTRANSIENT, uEntryIntInfo,          8);
    283 AssertCompileMemberAlignment(VMXTRANSIENT, cbEntryInstr,           8);
    284 /** Pointer to VMX transient state. */
    285 typedef VMXTRANSIENT *PVMXTRANSIENT;
    286 /** Pointer to a const VMX transient state. */
    287 typedef const VMXTRANSIENT *PCVMXTRANSIENT;
    28867
    28968/**
     
    30382AssertCompileSizeAlignment(VMXPAGEALLOCINFO, 8);
    30483
    305 /**
    306  * Memory operand read or write access.
    307  */
    308 typedef enum VMXMEMACCESS
    309 {
    310     VMXMEMACCESS_READ  = 0,
    311     VMXMEMACCESS_WRITE = 1
    312 } VMXMEMACCESS;
    313 
    314 /**
    315  * VMX VM-exit handler.
    316  *
    317  * @returns Strict VBox status code (i.e. informational status codes too).
    318  * @param   pVCpu           The cross context virtual CPU structure.
    319  * @param   pVmxTransient   The VMX-transient structure.
    320  */
    321 #ifndef HMVMX_USE_FUNCTION_TABLE
    322 typedef VBOXSTRICTRC               FNVMXEXITHANDLER(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
    323 #else
    324 typedef DECLCALLBACKTYPE(VBOXSTRICTRC, FNVMXEXITHANDLER,(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient));
    325 /** Pointer to VM-exit handler. */
    326 typedef FNVMXEXITHANDLER          *PFNVMXEXITHANDLER;
    327 #endif
    328 
    329 /**
    330  * VMX VM-exit handler, non-strict status code.
    331  *
    332  * This is generally the same as FNVMXEXITHANDLER, the NSRC bit is just FYI.
    333  *
    334  * @returns VBox status code, no informational status code returned.
    335  * @param   pVCpu           The cross context virtual CPU structure.
    336  * @param   pVmxTransient   The VMX-transient structure.
    337  *
    338  * @remarks This is not used on anything returning VERR_EM_INTERPRETER as the
    339  *          use of that status code will be replaced with VINF_EM_SOMETHING
    340  *          later when switching over to IEM.
    341  */
    342 #ifndef HMVMX_USE_FUNCTION_TABLE
    343 typedef int                        FNVMXEXITHANDLERNSRC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
    344 #else
    345 typedef FNVMXEXITHANDLER           FNVMXEXITHANDLERNSRC;
    346 #endif
    347 
    34884
    34985/*********************************************************************************************************************************
    35086*   Internal Functions                                                                                                           *
    35187*********************************************************************************************************************************/
    352 #ifndef HMVMX_USE_FUNCTION_TABLE
    353 DECLINLINE(VBOXSTRICTRC)           hmR0VmxHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
    354 # define HMVMX_EXIT_DECL           DECLINLINE(VBOXSTRICTRC)
    355 # define HMVMX_EXIT_NSRC_DECL      DECLINLINE(int)
    356 #else
    357 # define HMVMX_EXIT_DECL           static DECLCALLBACK(VBOXSTRICTRC)
    358 # define HMVMX_EXIT_NSRC_DECL      HMVMX_EXIT_DECL
    359 #endif
    360 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    361 DECLINLINE(VBOXSTRICTRC)           hmR0VmxHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
    362 #endif
    363 
    364 static int hmR0VmxImportGuestState(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat);
    365 
    366 /** @name VM-exit handler prototypes.
    367  * @{
    368  */
    369 static FNVMXEXITHANDLER            hmR0VmxExitXcptOrNmi;
    370 static FNVMXEXITHANDLER            hmR0VmxExitExtInt;
    371 static FNVMXEXITHANDLER            hmR0VmxExitTripleFault;
    372 static FNVMXEXITHANDLERNSRC        hmR0VmxExitIntWindow;
    373 static FNVMXEXITHANDLERNSRC        hmR0VmxExitNmiWindow;
    374 static FNVMXEXITHANDLER            hmR0VmxExitTaskSwitch;
    375 static FNVMXEXITHANDLER            hmR0VmxExitCpuid;
    376 static FNVMXEXITHANDLER            hmR0VmxExitGetsec;
    377 static FNVMXEXITHANDLER            hmR0VmxExitHlt;
    378 static FNVMXEXITHANDLERNSRC        hmR0VmxExitInvd;
    379 static FNVMXEXITHANDLER            hmR0VmxExitInvlpg;
    380 static FNVMXEXITHANDLER            hmR0VmxExitRdpmc;
    381 static FNVMXEXITHANDLER            hmR0VmxExitVmcall;
    382 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    383 static FNVMXEXITHANDLER            hmR0VmxExitVmclear;
    384 static FNVMXEXITHANDLER            hmR0VmxExitVmlaunch;
    385 static FNVMXEXITHANDLER            hmR0VmxExitVmptrld;
    386 static FNVMXEXITHANDLER            hmR0VmxExitVmptrst;
    387 static FNVMXEXITHANDLER            hmR0VmxExitVmread;
    388 static FNVMXEXITHANDLER            hmR0VmxExitVmresume;
    389 static FNVMXEXITHANDLER            hmR0VmxExitVmwrite;
    390 static FNVMXEXITHANDLER            hmR0VmxExitVmxoff;
    391 static FNVMXEXITHANDLER            hmR0VmxExitVmxon;
    392 static FNVMXEXITHANDLER            hmR0VmxExitInvvpid;
    393 #endif
    394 static FNVMXEXITHANDLER            hmR0VmxExitRdtsc;
    395 static FNVMXEXITHANDLER            hmR0VmxExitMovCRx;
    396 static FNVMXEXITHANDLER            hmR0VmxExitMovDRx;
    397 static FNVMXEXITHANDLER            hmR0VmxExitIoInstr;
    398 static FNVMXEXITHANDLER            hmR0VmxExitRdmsr;
    399 static FNVMXEXITHANDLER            hmR0VmxExitWrmsr;
    400 static FNVMXEXITHANDLER            hmR0VmxExitMwait;
    401 static FNVMXEXITHANDLER            hmR0VmxExitMtf;
    402 static FNVMXEXITHANDLER            hmR0VmxExitMonitor;
    403 static FNVMXEXITHANDLER            hmR0VmxExitPause;
    404 static FNVMXEXITHANDLERNSRC        hmR0VmxExitTprBelowThreshold;
    405 static FNVMXEXITHANDLER            hmR0VmxExitApicAccess;
    406 static FNVMXEXITHANDLER            hmR0VmxExitEptViolation;
    407 static FNVMXEXITHANDLER            hmR0VmxExitEptMisconfig;
    408 static FNVMXEXITHANDLER            hmR0VmxExitRdtscp;
    409 static FNVMXEXITHANDLER            hmR0VmxExitPreemptTimer;
    410 static FNVMXEXITHANDLERNSRC        hmR0VmxExitWbinvd;
    411 static FNVMXEXITHANDLER            hmR0VmxExitXsetbv;
    412 static FNVMXEXITHANDLER            hmR0VmxExitInvpcid;
    413 static FNVMXEXITHANDLERNSRC        hmR0VmxExitSetPendingXcptUD;
    414 static FNVMXEXITHANDLERNSRC        hmR0VmxExitErrInvalidGuestState;
    415 static FNVMXEXITHANDLERNSRC        hmR0VmxExitErrUnexpected;
    416 /** @} */
    417 
    418 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    419 /** @name Nested-guest VM-exit handler prototypes.
    420  * @{
    421  */
    422 static FNVMXEXITHANDLER            hmR0VmxExitXcptOrNmiNested;
    423 static FNVMXEXITHANDLER            hmR0VmxExitTripleFaultNested;
    424 static FNVMXEXITHANDLERNSRC        hmR0VmxExitIntWindowNested;
    425 static FNVMXEXITHANDLERNSRC        hmR0VmxExitNmiWindowNested;
    426 static FNVMXEXITHANDLER            hmR0VmxExitTaskSwitchNested;
    427 static FNVMXEXITHANDLER            hmR0VmxExitHltNested;
    428 static FNVMXEXITHANDLER            hmR0VmxExitInvlpgNested;
    429 static FNVMXEXITHANDLER            hmR0VmxExitRdpmcNested;
    430 static FNVMXEXITHANDLER            hmR0VmxExitVmreadVmwriteNested;
    431 static FNVMXEXITHANDLER            hmR0VmxExitRdtscNested;
    432 static FNVMXEXITHANDLER            hmR0VmxExitMovCRxNested;
    433 static FNVMXEXITHANDLER            hmR0VmxExitMovDRxNested;
    434 static FNVMXEXITHANDLER            hmR0VmxExitIoInstrNested;
    435 static FNVMXEXITHANDLER            hmR0VmxExitRdmsrNested;
    436 static FNVMXEXITHANDLER            hmR0VmxExitWrmsrNested;
    437 static FNVMXEXITHANDLER            hmR0VmxExitMwaitNested;
    438 static FNVMXEXITHANDLER            hmR0VmxExitMtfNested;
    439 static FNVMXEXITHANDLER            hmR0VmxExitMonitorNested;
    440 static FNVMXEXITHANDLER            hmR0VmxExitPauseNested;
    441 static FNVMXEXITHANDLERNSRC        hmR0VmxExitTprBelowThresholdNested;
    442 static FNVMXEXITHANDLER            hmR0VmxExitApicAccessNested;
    443 static FNVMXEXITHANDLER            hmR0VmxExitApicWriteNested;
    444 static FNVMXEXITHANDLER            hmR0VmxExitVirtEoiNested;
    445 static FNVMXEXITHANDLER            hmR0VmxExitRdtscpNested;
    446 static FNVMXEXITHANDLERNSRC        hmR0VmxExitWbinvdNested;
    447 static FNVMXEXITHANDLER            hmR0VmxExitInvpcidNested;
    448 static FNVMXEXITHANDLERNSRC        hmR0VmxExitErrInvalidGuestStateNested;
    449 static FNVMXEXITHANDLER            hmR0VmxExitInstrNested;
    450 static FNVMXEXITHANDLER            hmR0VmxExitInstrWithInfoNested;
    451 /** @} */
    452 #endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
    45388
    45489
     
    45691*   Global Variables                                                                                                             *
    45792*********************************************************************************************************************************/
    458 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    459 /**
    460  * Array of all VMCS fields.
    461  * Any fields added to the VT-x spec. should be added here.
    462  *
    463  * Currently only used to derive shadow VMCS fields for hardware-assisted execution
    464  * of nested-guests.
    465  */
    466 static const uint32_t g_aVmcsFields[] =
    467 {
    468     /* 16-bit control fields. */
    469     VMX_VMCS16_VPID,
    470     VMX_VMCS16_POSTED_INT_NOTIFY_VECTOR,
    471     VMX_VMCS16_EPTP_INDEX,
    472 
    473     /* 16-bit guest-state fields. */
    474     VMX_VMCS16_GUEST_ES_SEL,
    475     VMX_VMCS16_GUEST_CS_SEL,
    476     VMX_VMCS16_GUEST_SS_SEL,
    477     VMX_VMCS16_GUEST_DS_SEL,
    478     VMX_VMCS16_GUEST_FS_SEL,
    479     VMX_VMCS16_GUEST_GS_SEL,
    480     VMX_VMCS16_GUEST_LDTR_SEL,
    481     VMX_VMCS16_GUEST_TR_SEL,
    482     VMX_VMCS16_GUEST_INTR_STATUS,
    483     VMX_VMCS16_GUEST_PML_INDEX,
    484 
    485     /* 16-bits host-state fields. */
    486     VMX_VMCS16_HOST_ES_SEL,
    487     VMX_VMCS16_HOST_CS_SEL,
    488     VMX_VMCS16_HOST_SS_SEL,
    489     VMX_VMCS16_HOST_DS_SEL,
    490     VMX_VMCS16_HOST_FS_SEL,
    491     VMX_VMCS16_HOST_GS_SEL,
    492     VMX_VMCS16_HOST_TR_SEL,
    493 
    494     /* 64-bit control fields. */
    495     VMX_VMCS64_CTRL_IO_BITMAP_A_FULL,
    496     VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH,
    497     VMX_VMCS64_CTRL_IO_BITMAP_B_FULL,
    498     VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH,
    499     VMX_VMCS64_CTRL_MSR_BITMAP_FULL,
    500     VMX_VMCS64_CTRL_MSR_BITMAP_HIGH,
    501     VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL,
    502     VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH,
    503     VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL,
    504     VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH,
    505     VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL,
    506     VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH,
    507     VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL,
    508     VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH,
    509     VMX_VMCS64_CTRL_EXEC_PML_ADDR_FULL,
    510     VMX_VMCS64_CTRL_EXEC_PML_ADDR_HIGH,
    511     VMX_VMCS64_CTRL_TSC_OFFSET_FULL,
    512     VMX_VMCS64_CTRL_TSC_OFFSET_HIGH,
    513     VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL,
    514     VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_HIGH,
    515     VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL,
    516     VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH,
    517     VMX_VMCS64_CTRL_POSTED_INTR_DESC_FULL,
    518     VMX_VMCS64_CTRL_POSTED_INTR_DESC_HIGH,
    519     VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL,
    520     VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH,
    521     VMX_VMCS64_CTRL_EPTP_FULL,
    522     VMX_VMCS64_CTRL_EPTP_HIGH,
    523     VMX_VMCS64_CTRL_EOI_BITMAP_0_FULL,
    524     VMX_VMCS64_CTRL_EOI_BITMAP_0_HIGH,
    525     VMX_VMCS64_CTRL_EOI_BITMAP_1_FULL,
    526     VMX_VMCS64_CTRL_EOI_BITMAP_1_HIGH,
    527     VMX_VMCS64_CTRL_EOI_BITMAP_2_FULL,
    528     VMX_VMCS64_CTRL_EOI_BITMAP_2_HIGH,
    529     VMX_VMCS64_CTRL_EOI_BITMAP_3_FULL,
    530     VMX_VMCS64_CTRL_EOI_BITMAP_3_HIGH,
    531     VMX_VMCS64_CTRL_EPTP_LIST_FULL,
    532     VMX_VMCS64_CTRL_EPTP_LIST_HIGH,
    533     VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL,
    534     VMX_VMCS64_CTRL_VMREAD_BITMAP_HIGH,
    535     VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL,
    536     VMX_VMCS64_CTRL_VMWRITE_BITMAP_HIGH,
    537     VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_FULL,
    538     VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_HIGH,
    539     VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_FULL,
    540     VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_HIGH,
    541     VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_FULL,
    542     VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_HIGH,
    543     VMX_VMCS64_CTRL_SPPTP_FULL,
    544     VMX_VMCS64_CTRL_SPPTP_HIGH,
    545     VMX_VMCS64_CTRL_TSC_MULTIPLIER_FULL,
    546     VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH,
    547     VMX_VMCS64_CTRL_PROC_EXEC3_FULL,
    548     VMX_VMCS64_CTRL_PROC_EXEC3_HIGH,
    549     VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_FULL,
    550     VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_HIGH,
    551 
    552     /* 64-bit read-only data fields. */
    553     VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL,
    554     VMX_VMCS64_RO_GUEST_PHYS_ADDR_HIGH,
    555 
    556     /* 64-bit guest-state fields. */
    557     VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL,
    558     VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH,
    559     VMX_VMCS64_GUEST_DEBUGCTL_FULL,
    560     VMX_VMCS64_GUEST_DEBUGCTL_HIGH,
    561     VMX_VMCS64_GUEST_PAT_FULL,
    562     VMX_VMCS64_GUEST_PAT_HIGH,
    563     VMX_VMCS64_GUEST_EFER_FULL,
    564     VMX_VMCS64_GUEST_EFER_HIGH,
    565     VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL,
    566     VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_HIGH,
    567     VMX_VMCS64_GUEST_PDPTE0_FULL,
    568     VMX_VMCS64_GUEST_PDPTE0_HIGH,
    569     VMX_VMCS64_GUEST_PDPTE1_FULL,
    570     VMX_VMCS64_GUEST_PDPTE1_HIGH,
    571     VMX_VMCS64_GUEST_PDPTE2_FULL,
    572     VMX_VMCS64_GUEST_PDPTE2_HIGH,
    573     VMX_VMCS64_GUEST_PDPTE3_FULL,
    574     VMX_VMCS64_GUEST_PDPTE3_HIGH,
    575     VMX_VMCS64_GUEST_BNDCFGS_FULL,
    576     VMX_VMCS64_GUEST_BNDCFGS_HIGH,
    577     VMX_VMCS64_GUEST_RTIT_CTL_FULL,
    578     VMX_VMCS64_GUEST_RTIT_CTL_HIGH,
    579     VMX_VMCS64_GUEST_PKRS_FULL,
    580     VMX_VMCS64_GUEST_PKRS_HIGH,
    581 
    582     /* 64-bit host-state fields. */
    583     VMX_VMCS64_HOST_PAT_FULL,
    584     VMX_VMCS64_HOST_PAT_HIGH,
    585     VMX_VMCS64_HOST_EFER_FULL,
    586     VMX_VMCS64_HOST_EFER_HIGH,
    587     VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL,
    588     VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_HIGH,
    589     VMX_VMCS64_HOST_PKRS_FULL,
    590     VMX_VMCS64_HOST_PKRS_HIGH,
    591 
    592     /* 32-bit control fields. */
    593     VMX_VMCS32_CTRL_PIN_EXEC,
    594     VMX_VMCS32_CTRL_PROC_EXEC,
    595     VMX_VMCS32_CTRL_EXCEPTION_BITMAP,
    596     VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK,
    597     VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH,
    598     VMX_VMCS32_CTRL_CR3_TARGET_COUNT,
    599     VMX_VMCS32_CTRL_EXIT,
    600     VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT,
    601     VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT,
    602     VMX_VMCS32_CTRL_ENTRY,
    603     VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT,
    604     VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO,
    605     VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE,
    606     VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH,
    607     VMX_VMCS32_CTRL_TPR_THRESHOLD,
    608     VMX_VMCS32_CTRL_PROC_EXEC2,
    609     VMX_VMCS32_CTRL_PLE_GAP,
    610     VMX_VMCS32_CTRL_PLE_WINDOW,
    611 
    612     /* 32-bits read-only fields. */
    613     VMX_VMCS32_RO_VM_INSTR_ERROR,
    614     VMX_VMCS32_RO_EXIT_REASON,
    615     VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO,
    616     VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE,
    617     VMX_VMCS32_RO_IDT_VECTORING_INFO,
    618     VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE,
    619     VMX_VMCS32_RO_EXIT_INSTR_LENGTH,
    620     VMX_VMCS32_RO_EXIT_INSTR_INFO,
    621 
    622     /* 32-bit guest-state fields. */
    623     VMX_VMCS32_GUEST_ES_LIMIT,
    624     VMX_VMCS32_GUEST_CS_LIMIT,
    625     VMX_VMCS32_GUEST_SS_LIMIT,
    626     VMX_VMCS32_GUEST_DS_LIMIT,
    627     VMX_VMCS32_GUEST_FS_LIMIT,
    628     VMX_VMCS32_GUEST_GS_LIMIT,
    629     VMX_VMCS32_GUEST_LDTR_LIMIT,
    630     VMX_VMCS32_GUEST_TR_LIMIT,
    631     VMX_VMCS32_GUEST_GDTR_LIMIT,
    632     VMX_VMCS32_GUEST_IDTR_LIMIT,
    633     VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS,
    634     VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS,
    635     VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS,
    636     VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS,
    637     VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS,
    638     VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS,
    639     VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS,
    640     VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS,
    641     VMX_VMCS32_GUEST_INT_STATE,
    642     VMX_VMCS32_GUEST_ACTIVITY_STATE,
    643     VMX_VMCS32_GUEST_SMBASE,
    644     VMX_VMCS32_GUEST_SYSENTER_CS,
    645     VMX_VMCS32_PREEMPT_TIMER_VALUE,
    646 
    647     /* 32-bit host-state fields. */
    648     VMX_VMCS32_HOST_SYSENTER_CS,
    649 
    650     /* Natural-width control fields. */
    651     VMX_VMCS_CTRL_CR0_MASK,
    652     VMX_VMCS_CTRL_CR4_MASK,
    653     VMX_VMCS_CTRL_CR0_READ_SHADOW,
    654     VMX_VMCS_CTRL_CR4_READ_SHADOW,
    655     VMX_VMCS_CTRL_CR3_TARGET_VAL0,
    656     VMX_VMCS_CTRL_CR3_TARGET_VAL1,
    657     VMX_VMCS_CTRL_CR3_TARGET_VAL2,
    658     VMX_VMCS_CTRL_CR3_TARGET_VAL3,
    659 
    660     /* Natural-width read-only data fields. */
    661     VMX_VMCS_RO_EXIT_QUALIFICATION,
    662     VMX_VMCS_RO_IO_RCX,
    663     VMX_VMCS_RO_IO_RSI,
    664     VMX_VMCS_RO_IO_RDI,
    665     VMX_VMCS_RO_IO_RIP,
    666     VMX_VMCS_RO_GUEST_LINEAR_ADDR,
    667 
    668     /* Natural-width guest-state field */
    669     VMX_VMCS_GUEST_CR0,
    670     VMX_VMCS_GUEST_CR3,
    671     VMX_VMCS_GUEST_CR4,
    672     VMX_VMCS_GUEST_ES_BASE,
    673     VMX_VMCS_GUEST_CS_BASE,
    674     VMX_VMCS_GUEST_SS_BASE,
    675     VMX_VMCS_GUEST_DS_BASE,
    676     VMX_VMCS_GUEST_FS_BASE,
    677     VMX_VMCS_GUEST_GS_BASE,
    678     VMX_VMCS_GUEST_LDTR_BASE,
    679     VMX_VMCS_GUEST_TR_BASE,
    680     VMX_VMCS_GUEST_GDTR_BASE,
    681     VMX_VMCS_GUEST_IDTR_BASE,
    682     VMX_VMCS_GUEST_DR7,
    683     VMX_VMCS_GUEST_RSP,
    684     VMX_VMCS_GUEST_RIP,
    685     VMX_VMCS_GUEST_RFLAGS,
    686     VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS,
    687     VMX_VMCS_GUEST_SYSENTER_ESP,
    688     VMX_VMCS_GUEST_SYSENTER_EIP,
    689     VMX_VMCS_GUEST_S_CET,
    690     VMX_VMCS_GUEST_SSP,
    691     VMX_VMCS_GUEST_INTR_SSP_TABLE_ADDR,
    692 
    693     /* Natural-width host-state fields */
    694     VMX_VMCS_HOST_CR0,
    695     VMX_VMCS_HOST_CR3,
    696     VMX_VMCS_HOST_CR4,
    697     VMX_VMCS_HOST_FS_BASE,
    698     VMX_VMCS_HOST_GS_BASE,
    699     VMX_VMCS_HOST_TR_BASE,
    700     VMX_VMCS_HOST_GDTR_BASE,
    701     VMX_VMCS_HOST_IDTR_BASE,
    702     VMX_VMCS_HOST_SYSENTER_ESP,
    703     VMX_VMCS_HOST_SYSENTER_EIP,
    704     VMX_VMCS_HOST_RSP,
    705     VMX_VMCS_HOST_RIP,
    706     VMX_VMCS_HOST_S_CET,
    707     VMX_VMCS_HOST_SSP,
    708     VMX_VMCS_HOST_INTR_SSP_TABLE_ADDR
    709 };
    710 #endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
    711 
    712 #ifdef VBOX_STRICT
    713 static const uint32_t g_aVmcsSegBase[] =
    714 {
    715     VMX_VMCS_GUEST_ES_BASE,
    716     VMX_VMCS_GUEST_CS_BASE,
    717     VMX_VMCS_GUEST_SS_BASE,
    718     VMX_VMCS_GUEST_DS_BASE,
    719     VMX_VMCS_GUEST_FS_BASE,
    720     VMX_VMCS_GUEST_GS_BASE
    721 };
    722 static const uint32_t g_aVmcsSegSel[] =
    723 {
    724     VMX_VMCS16_GUEST_ES_SEL,
    725     VMX_VMCS16_GUEST_CS_SEL,
    726     VMX_VMCS16_GUEST_SS_SEL,
    727     VMX_VMCS16_GUEST_DS_SEL,
    728     VMX_VMCS16_GUEST_FS_SEL,
    729     VMX_VMCS16_GUEST_GS_SEL
    730 };
    731 static const uint32_t g_aVmcsSegLimit[] =
    732 {
    733     VMX_VMCS32_GUEST_ES_LIMIT,
    734     VMX_VMCS32_GUEST_CS_LIMIT,
    735     VMX_VMCS32_GUEST_SS_LIMIT,
    736     VMX_VMCS32_GUEST_DS_LIMIT,
    737     VMX_VMCS32_GUEST_FS_LIMIT,
    738     VMX_VMCS32_GUEST_GS_LIMIT
    739 };
    740 static const uint32_t g_aVmcsSegAttr[] =
    741 {
    742     VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS,
    743     VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS,
    744     VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS,
    745     VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS,
    746     VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS,
    747     VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS
    748 };
    749 AssertCompile(RT_ELEMENTS(g_aVmcsSegSel)   == X86_SREG_COUNT);
    750 AssertCompile(RT_ELEMENTS(g_aVmcsSegLimit) == X86_SREG_COUNT);
    751 AssertCompile(RT_ELEMENTS(g_aVmcsSegBase)  == X86_SREG_COUNT);
    752 AssertCompile(RT_ELEMENTS(g_aVmcsSegAttr)  == X86_SREG_COUNT);
    753 #endif /* VBOX_STRICT */
    754 
    755 #ifdef HMVMX_USE_FUNCTION_TABLE
    756 /**
    757  * VMX_EXIT dispatch table.
    758  */
    759 static const struct CLANG11NOTHROWWEIRDNESS { PFNVMXEXITHANDLER pfn; } g_aVMExitHandlers[VMX_EXIT_MAX + 1] =
    760 {
    761     /*  0  VMX_EXIT_XCPT_OR_NMI             */  { hmR0VmxExitXcptOrNmi },
    762     /*  1  VMX_EXIT_EXT_INT                 */  { hmR0VmxExitExtInt },
    763     /*  2  VMX_EXIT_TRIPLE_FAULT            */  { hmR0VmxExitTripleFault },
    764     /*  3  VMX_EXIT_INIT_SIGNAL             */  { hmR0VmxExitErrUnexpected },
    765     /*  4  VMX_EXIT_SIPI                    */  { hmR0VmxExitErrUnexpected },
    766     /*  5  VMX_EXIT_IO_SMI                  */  { hmR0VmxExitErrUnexpected },
    767     /*  6  VMX_EXIT_SMI                     */  { hmR0VmxExitErrUnexpected },
    768     /*  7  VMX_EXIT_INT_WINDOW              */  { hmR0VmxExitIntWindow },
    769     /*  8  VMX_EXIT_NMI_WINDOW              */  { hmR0VmxExitNmiWindow },
    770     /*  9  VMX_EXIT_TASK_SWITCH             */  { hmR0VmxExitTaskSwitch },
    771     /* 10  VMX_EXIT_CPUID                   */  { hmR0VmxExitCpuid },
    772     /* 11  VMX_EXIT_GETSEC                  */  { hmR0VmxExitGetsec },
    773     /* 12  VMX_EXIT_HLT                     */  { hmR0VmxExitHlt },
    774     /* 13  VMX_EXIT_INVD                    */  { hmR0VmxExitInvd },
    775     /* 14  VMX_EXIT_INVLPG                  */  { hmR0VmxExitInvlpg },
    776     /* 15  VMX_EXIT_RDPMC                   */  { hmR0VmxExitRdpmc },
    777     /* 16  VMX_EXIT_RDTSC                   */  { hmR0VmxExitRdtsc },
    778     /* 17  VMX_EXIT_RSM                     */  { hmR0VmxExitErrUnexpected },
    779     /* 18  VMX_EXIT_VMCALL                  */  { hmR0VmxExitVmcall },
    780 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    781     /* 19  VMX_EXIT_VMCLEAR                 */  { hmR0VmxExitVmclear },
    782     /* 20  VMX_EXIT_VMLAUNCH                */  { hmR0VmxExitVmlaunch },
    783     /* 21  VMX_EXIT_VMPTRLD                 */  { hmR0VmxExitVmptrld },
    784     /* 22  VMX_EXIT_VMPTRST                 */  { hmR0VmxExitVmptrst },
    785     /* 23  VMX_EXIT_VMREAD                  */  { hmR0VmxExitVmread },
    786     /* 24  VMX_EXIT_VMRESUME                */  { hmR0VmxExitVmresume },
    787     /* 25  VMX_EXIT_VMWRITE                 */  { hmR0VmxExitVmwrite },
    788     /* 26  VMX_EXIT_VMXOFF                  */  { hmR0VmxExitVmxoff },
    789     /* 27  VMX_EXIT_VMXON                   */  { hmR0VmxExitVmxon },
    790 #else
    791     /* 19  VMX_EXIT_VMCLEAR                 */  { hmR0VmxExitSetPendingXcptUD },
    792     /* 20  VMX_EXIT_VMLAUNCH                */  { hmR0VmxExitSetPendingXcptUD },
    793     /* 21  VMX_EXIT_VMPTRLD                 */  { hmR0VmxExitSetPendingXcptUD },
    794     /* 22  VMX_EXIT_VMPTRST                 */  { hmR0VmxExitSetPendingXcptUD },
    795     /* 23  VMX_EXIT_VMREAD                  */  { hmR0VmxExitSetPendingXcptUD },
    796     /* 24  VMX_EXIT_VMRESUME                */  { hmR0VmxExitSetPendingXcptUD },
    797     /* 25  VMX_EXIT_VMWRITE                 */  { hmR0VmxExitSetPendingXcptUD },
    798     /* 26  VMX_EXIT_VMXOFF                  */  { hmR0VmxExitSetPendingXcptUD },
    799     /* 27  VMX_EXIT_VMXON                   */  { hmR0VmxExitSetPendingXcptUD },
    800 #endif
    801     /* 28  VMX_EXIT_MOV_CRX                 */  { hmR0VmxExitMovCRx },
    802     /* 29  VMX_EXIT_MOV_DRX                 */  { hmR0VmxExitMovDRx },
    803     /* 30  VMX_EXIT_IO_INSTR                */  { hmR0VmxExitIoInstr },
    804     /* 31  VMX_EXIT_RDMSR                   */  { hmR0VmxExitRdmsr },
    805     /* 32  VMX_EXIT_WRMSR                   */  { hmR0VmxExitWrmsr },
    806     /* 33  VMX_EXIT_ERR_INVALID_GUEST_STATE */  { hmR0VmxExitErrInvalidGuestState },
    807     /* 34  VMX_EXIT_ERR_MSR_LOAD            */  { hmR0VmxExitErrUnexpected },
    808     /* 35  UNDEFINED                        */  { hmR0VmxExitErrUnexpected },
    809     /* 36  VMX_EXIT_MWAIT                   */  { hmR0VmxExitMwait },
    810     /* 37  VMX_EXIT_MTF                     */  { hmR0VmxExitMtf },
    811     /* 38  UNDEFINED                        */  { hmR0VmxExitErrUnexpected },
    812     /* 39  VMX_EXIT_MONITOR                 */  { hmR0VmxExitMonitor },
    813     /* 40  VMX_EXIT_PAUSE                   */  { hmR0VmxExitPause },
    814     /* 41  VMX_EXIT_ERR_MACHINE_CHECK       */  { hmR0VmxExitErrUnexpected },
    815     /* 42  UNDEFINED                        */  { hmR0VmxExitErrUnexpected },
    816     /* 43  VMX_EXIT_TPR_BELOW_THRESHOLD     */  { hmR0VmxExitTprBelowThreshold },
    817     /* 44  VMX_EXIT_APIC_ACCESS             */  { hmR0VmxExitApicAccess },
    818     /* 45  VMX_EXIT_VIRTUALIZED_EOI         */  { hmR0VmxExitErrUnexpected },
    819     /* 46  VMX_EXIT_GDTR_IDTR_ACCESS        */  { hmR0VmxExitErrUnexpected },
    820     /* 47  VMX_EXIT_LDTR_TR_ACCESS          */  { hmR0VmxExitErrUnexpected },
    821     /* 48  VMX_EXIT_EPT_VIOLATION           */  { hmR0VmxExitEptViolation },
    822     /* 49  VMX_EXIT_EPT_MISCONFIG           */  { hmR0VmxExitEptMisconfig },
    823     /* 50  VMX_EXIT_INVEPT                  */  { hmR0VmxExitSetPendingXcptUD },
    824     /* 51  VMX_EXIT_RDTSCP                  */  { hmR0VmxExitRdtscp },
    825     /* 52  VMX_EXIT_PREEMPT_TIMER           */  { hmR0VmxExitPreemptTimer },
    826 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    827     /* 53  VMX_EXIT_INVVPID                 */  { hmR0VmxExitInvvpid },
    828 #else
    829     /* 53  VMX_EXIT_INVVPID                 */  { hmR0VmxExitSetPendingXcptUD },
    830 #endif
    831     /* 54  VMX_EXIT_WBINVD                  */  { hmR0VmxExitWbinvd },
    832     /* 55  VMX_EXIT_XSETBV                  */  { hmR0VmxExitXsetbv },
    833     /* 56  VMX_EXIT_APIC_WRITE              */  { hmR0VmxExitErrUnexpected },
    834     /* 57  VMX_EXIT_RDRAND                  */  { hmR0VmxExitErrUnexpected },
    835     /* 58  VMX_EXIT_INVPCID                 */  { hmR0VmxExitInvpcid },
    836     /* 59  VMX_EXIT_VMFUNC                  */  { hmR0VmxExitErrUnexpected },
    837     /* 60  VMX_EXIT_ENCLS                   */  { hmR0VmxExitErrUnexpected },
    838     /* 61  VMX_EXIT_RDSEED                  */  { hmR0VmxExitErrUnexpected },
    839     /* 62  VMX_EXIT_PML_FULL                */  { hmR0VmxExitErrUnexpected },
    840     /* 63  VMX_EXIT_XSAVES                  */  { hmR0VmxExitErrUnexpected },
    841     /* 64  VMX_EXIT_XRSTORS                 */  { hmR0VmxExitErrUnexpected },
    842     /* 65  UNDEFINED                        */  { hmR0VmxExitErrUnexpected },
    843     /* 66  VMX_EXIT_SPP_EVENT               */  { hmR0VmxExitErrUnexpected },
    844     /* 67  VMX_EXIT_UMWAIT                  */  { hmR0VmxExitErrUnexpected },
    845     /* 68  VMX_EXIT_TPAUSE                  */  { hmR0VmxExitErrUnexpected },
    846     /* 69  VMX_EXIT_LOADIWKEY               */  { hmR0VmxExitErrUnexpected },
    847 };
    848 #endif /* HMVMX_USE_FUNCTION_TABLE */
    849 
    850 #if defined(VBOX_STRICT) && defined(LOG_ENABLED)
    851 static const char * const g_apszVmxInstrErrors[HMVMX_INSTR_ERROR_MAX + 1] =
    852 {
    853     /*  0 */ "(Not Used)",
    854     /*  1 */ "VMCALL executed in VMX root operation.",
    855     /*  2 */ "VMCLEAR with invalid physical address.",
    856     /*  3 */ "VMCLEAR with VMXON pointer.",
    857     /*  4 */ "VMLAUNCH with non-clear VMCS.",
    858     /*  5 */ "VMRESUME with non-launched VMCS.",
    859     /*  6 */ "VMRESUME after VMXOFF",
    860     /*  7 */ "VM-entry with invalid control fields.",
    861     /*  8 */ "VM-entry with invalid host state fields.",
    862     /*  9 */ "VMPTRLD with invalid physical address.",
    863     /* 10 */ "VMPTRLD with VMXON pointer.",
    864     /* 11 */ "VMPTRLD with incorrect revision identifier.",
    865     /* 12 */ "VMREAD/VMWRITE from/to unsupported VMCS component.",
    866     /* 13 */ "VMWRITE to read-only VMCS component.",
    867     /* 14 */ "(Not Used)",
    868     /* 15 */ "VMXON executed in VMX root operation.",
    869     /* 16 */ "VM-entry with invalid executive-VMCS pointer.",
    870     /* 17 */ "VM-entry with non-launched executing VMCS.",
    871     /* 18 */ "VM-entry with executive-VMCS pointer not VMXON pointer.",
    872     /* 19 */ "VMCALL with non-clear VMCS.",
    873     /* 20 */ "VMCALL with invalid VM-exit control fields.",
    874     /* 21 */ "(Not Used)",
    875     /* 22 */ "VMCALL with incorrect MSEG revision identifier.",
    876     /* 23 */ "VMXOFF under dual monitor treatment of SMIs and SMM.",
    877     /* 24 */ "VMCALL with invalid SMM-monitor features.",
    878     /* 25 */ "VM-entry with invalid VM-execution control fields in executive VMCS.",
    879     /* 26 */ "VM-entry with events blocked by MOV SS.",
    880     /* 27 */ "(Not Used)",
    881     /* 28 */ "Invalid operand to INVEPT/INVVPID."
    882 };
    883 #endif /* VBOX_STRICT && LOG_ENABLED */
     93static bool hmR0VmxShouldSwapEferMsr(PCVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient);
     94static int hmR0VmxExitHostNmi(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo);
    88495
    88596
     
    938149    }
    939150    return false;
    940 }
    941 
    942 
    943 /**
    944  * Gets the CR0 guest/host mask.
    945  *
    946  * These bits typically does not change through the lifetime of a VM. Any bit set in
    947  * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
    948  * by the guest.
    949  *
    950  * @returns The CR0 guest/host mask.
    951  * @param   pVCpu   The cross context virtual CPU structure.
    952  */
    953 static uint64_t hmR0VmxGetFixedCr0Mask(PCVMCPUCC pVCpu)
    954 {
    955     /*
    956      * Modifications to CR0 bits that VT-x ignores saving/restoring (CD, ET, NW) and
    957      * to CR0 bits that we require for shadow paging (PG) by the guest must cause VM-exits.
    958      *
    959      * Furthermore, modifications to any bits that are reserved/unspecified currently
    960      * by the Intel spec. must also cause a VM-exit. This prevents unpredictable behavior
    961      * when future CPUs specify and use currently reserved/unspecified bits.
    962      */
    963     /** @todo Avoid intercepting CR0.PE with unrestricted guest execution. Fix PGM
    964      *        enmGuestMode to be in-sync with the current mode. See @bugref{6398}
    965      *        and @bugref{6944}. */
    966     PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
    967     return (  X86_CR0_PE
    968             | X86_CR0_NE
    969             | (pVM->hmr0.s.fNestedPaging ? 0 : X86_CR0_WP)
    970             | X86_CR0_PG
    971             | VMX_EXIT_HOST_CR0_IGNORE_MASK);
    972 }
    973 
    974 
    975 /**
    976  * Gets the CR4 guest/host mask.
    977  *
    978  * These bits typically does not change through the lifetime of a VM. Any bit set in
    979  * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
    980  * by the guest.
    981  *
    982  * @returns The CR4 guest/host mask.
    983  * @param   pVCpu   The cross context virtual CPU structure.
    984  */
    985 static uint64_t hmR0VmxGetFixedCr4Mask(PCVMCPUCC pVCpu)
    986 {
    987     /*
    988      * We construct a mask of all CR4 bits that the guest can modify without causing
    989      * a VM-exit. Then invert this mask to obtain all CR4 bits that should cause
    990      * a VM-exit when the guest attempts to modify them when executing using
    991      * hardware-assisted VMX.
    992      *
    993      * When a feature is not exposed to the guest (and may be present on the host),
    994      * we want to intercept guest modifications to the bit so we can emulate proper
    995      * behavior (e.g., #GP).
    996      *
    997      * Furthermore, only modifications to those bits that don't require immediate
    998      * emulation is allowed. For e.g., PCIDE is excluded because the behavior
    999      * depends on CR3 which might not always be the guest value while executing
    1000      * using hardware-assisted VMX.
    1001      */
    1002     PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
    1003     bool const fFsGsBase    = pVM->cpum.ro.GuestFeatures.fFsGsBase;
    1004     bool const fXSaveRstor  = pVM->cpum.ro.GuestFeatures.fXSaveRstor;
    1005     bool const fFxSaveRstor = pVM->cpum.ro.GuestFeatures.fFxSaveRstor;
    1006 
    1007     /*
    1008      * Paranoia.
    1009      * Ensure features exposed to the guest are present on the host.
    1010      */
    1011     Assert(!fFsGsBase    || pVM->cpum.ro.HostFeatures.fFsGsBase);
    1012     Assert(!fXSaveRstor  || pVM->cpum.ro.HostFeatures.fXSaveRstor);
    1013     Assert(!fFxSaveRstor || pVM->cpum.ro.HostFeatures.fFxSaveRstor);
    1014 
    1015     uint64_t const fGstMask = (  X86_CR4_PVI
    1016                                | X86_CR4_TSD
    1017                                | X86_CR4_DE
    1018                                | X86_CR4_MCE
    1019                                | X86_CR4_PCE
    1020                                | X86_CR4_OSXMMEEXCPT
    1021                                | (fFsGsBase    ? X86_CR4_FSGSBASE : 0)
    1022                                | (fXSaveRstor  ? X86_CR4_OSXSAVE  : 0)
    1023                                | (fFxSaveRstor ? X86_CR4_OSFXSR   : 0));
    1024     return ~fGstMask;
    1025151}
    1026152
     
    1132258
    1133259/**
    1134  * Adds one or more exceptions to the exception bitmap and commits it to the current
    1135  * VMCS.
    1136  *
    1137  * @param   pVmxTransient   The VMX-transient structure.
    1138  * @param   uXcptMask       The exception(s) to add.
    1139  */
    1140 static void hmR0VmxAddXcptInterceptMask(PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
    1141 {
    1142     PVMXVMCSINFO pVmcsInfo   = pVmxTransient->pVmcsInfo;
    1143     uint32_t       uXcptBitmap = pVmcsInfo->u32XcptBitmap;
    1144     if ((uXcptBitmap & uXcptMask) != uXcptMask)
    1145     {
    1146         uXcptBitmap |= uXcptMask;
    1147         int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
    1148         AssertRC(rc);
    1149         pVmcsInfo->u32XcptBitmap = uXcptBitmap;
    1150     }
    1151 }
    1152 
    1153 
    1154 /**
    1155  * Adds an exception to the exception bitmap and commits it to the current VMCS.
    1156  *
    1157  * @param   pVmxTransient   The VMX-transient structure.
    1158  * @param   uXcpt           The exception to add.
    1159  */
    1160 static void hmR0VmxAddXcptIntercept(PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
    1161 {
    1162     Assert(uXcpt <= X86_XCPT_LAST);
    1163     hmR0VmxAddXcptInterceptMask(pVmxTransient, RT_BIT_32(uXcpt));
    1164 }
    1165 
    1166 
    1167 /**
    1168  * Remove one or more exceptions from the exception bitmap and commits it to the
    1169  * current VMCS.
    1170  *
    1171  * This takes care of not removing the exception intercept if a nested-guest
    1172  * requires the exception to be intercepted.
    1173  *
    1174  * @returns VBox status code.
    1175  * @param   pVCpu           The cross context virtual CPU structure.
    1176  * @param   pVmxTransient   The VMX-transient structure.
    1177  * @param   uXcptMask       The exception(s) to remove.
    1178  */
    1179 static int hmR0VmxRemoveXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
    1180 {
    1181     PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    1182     uint32_t   u32XcptBitmap = pVmcsInfo->u32XcptBitmap;
    1183     if (u32XcptBitmap & uXcptMask)
    1184     {
    1185 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    1186         if (!pVmxTransient->fIsNestedGuest)
    1187         { /* likely */ }
    1188         else
    1189             uXcptMask &= ~pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32XcptBitmap;
    1190 #endif
    1191 #ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
    1192         uXcptMask &= ~(  RT_BIT(X86_XCPT_BP)
    1193                        | RT_BIT(X86_XCPT_DE)
    1194                        | RT_BIT(X86_XCPT_NM)
    1195                        | RT_BIT(X86_XCPT_TS)
    1196                        | RT_BIT(X86_XCPT_UD)
    1197                        | RT_BIT(X86_XCPT_NP)
    1198                        | RT_BIT(X86_XCPT_SS)
    1199                        | RT_BIT(X86_XCPT_GP)
    1200                        | RT_BIT(X86_XCPT_PF)
    1201                        | RT_BIT(X86_XCPT_MF));
    1202 #elif defined(HMVMX_ALWAYS_TRAP_PF)
    1203         uXcptMask &= ~RT_BIT(X86_XCPT_PF);
    1204 #endif
    1205         if (uXcptMask)
    1206         {
    1207             /* Validate we are not removing any essential exception intercepts. */
    1208             Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || !(uXcptMask & RT_BIT(X86_XCPT_PF)));
    1209             NOREF(pVCpu);
    1210             Assert(!(uXcptMask & RT_BIT(X86_XCPT_DB)));
    1211             Assert(!(uXcptMask & RT_BIT(X86_XCPT_AC)));
    1212 
    1213             /* Remove it from the exception bitmap. */
    1214             u32XcptBitmap &= ~uXcptMask;
    1215 
    1216             /* Commit and update the cache if necessary. */
    1217             if (pVmcsInfo->u32XcptBitmap != u32XcptBitmap)
    1218             {
    1219                 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, u32XcptBitmap);
    1220                 AssertRC(rc);
    1221                 pVmcsInfo->u32XcptBitmap = u32XcptBitmap;
    1222             }
    1223         }
    1224     }
    1225     return VINF_SUCCESS;
    1226 }
    1227 
    1228 
    1229 /**
    1230  * Remove an exceptions from the exception bitmap and commits it to the current
    1231  * VMCS.
    1232  *
    1233  * @returns VBox status code.
    1234  * @param   pVCpu           The cross context virtual CPU structure.
    1235  * @param   pVmxTransient   The VMX-transient structure.
    1236  * @param   uXcpt           The exception to remove.
    1237  */
    1238 static int hmR0VmxRemoveXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
    1239 {
    1240     return hmR0VmxRemoveXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT(uXcpt));
    1241 }
    1242 
    1243 
    1244 /**
    1245260 * Loads the VMCS specified by the VMCS info. object.
    1246261 *
     
    1279294        pVmcsInfo->fVmcsState = VMX_V_VMCS_LAUNCH_STATE_CLEAR;
    1280295    return rc;
    1281 }
    1282 
    1283 
    1284 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    1285 /**
    1286  * Loads the shadow VMCS specified by the VMCS info. object.
    1287  *
    1288  * @returns VBox status code.
    1289  * @param   pVmcsInfo   The VMCS info. object.
    1290  *
    1291  * @remarks Can be called with interrupts disabled.
    1292  */
    1293 static int hmR0VmxLoadShadowVmcs(PVMXVMCSINFO pVmcsInfo)
    1294 {
    1295     Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    1296     Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
    1297 
    1298     int rc = VMXLoadVmcs(pVmcsInfo->HCPhysShadowVmcs);
    1299     if (RT_SUCCESS(rc))
    1300         pVmcsInfo->fShadowVmcsState |= VMX_V_VMCS_LAUNCH_STATE_CURRENT;
    1301     return rc;
    1302 }
    1303 
    1304 
    1305 /**
    1306  * Clears the shadow VMCS specified by the VMCS info. object.
    1307  *
    1308  * @returns VBox status code.
    1309  * @param   pVmcsInfo   The VMCS info. object.
    1310  *
    1311  * @remarks Can be called with interrupts disabled.
    1312  */
    1313 static int hmR0VmxClearShadowVmcs(PVMXVMCSINFO pVmcsInfo)
    1314 {
    1315     Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    1316     Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
    1317 
    1318     int rc = VMXClearVmcs(pVmcsInfo->HCPhysShadowVmcs);
    1319     if (RT_SUCCESS(rc))
    1320         pVmcsInfo->fShadowVmcsState = VMX_V_VMCS_LAUNCH_STATE_CLEAR;
    1321     return rc;
    1322 }
    1323 
    1324 
    1325 /**
    1326  * Switches from and to the specified VMCSes.
    1327  *
    1328  * @returns VBox status code.
    1329  * @param   pVmcsInfoFrom   The VMCS info. object we are switching from.
    1330  * @param   pVmcsInfoTo     The VMCS info. object we are switching to.
    1331  *
    1332  * @remarks Called with interrupts disabled.
    1333  */
    1334 static int hmR0VmxSwitchVmcs(PVMXVMCSINFO pVmcsInfoFrom, PVMXVMCSINFO pVmcsInfoTo)
    1335 {
    1336     /*
    1337      * Clear the VMCS we are switching out if it has not already been cleared.
    1338      * This will sync any CPU internal data back to the VMCS.
    1339      */
    1340     if (pVmcsInfoFrom->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
    1341     {
    1342         int rc = hmR0VmxClearVmcs(pVmcsInfoFrom);
    1343         if (RT_SUCCESS(rc))
    1344         {
    1345             /*
    1346              * The shadow VMCS, if any, would not be active at this point since we
    1347              * would have cleared it while importing the virtual hardware-virtualization
    1348              * state as part the VMLAUNCH/VMRESUME VM-exit. Hence, there's no need to
    1349              * clear the shadow VMCS here, just assert for safety.
    1350              */
    1351             Assert(!pVmcsInfoFrom->pvShadowVmcs || pVmcsInfoFrom->fShadowVmcsState == VMX_V_VMCS_LAUNCH_STATE_CLEAR);
    1352         }
    1353         else
    1354             return rc;
    1355     }
    1356 
    1357     /*
    1358      * Clear the VMCS we are switching to if it has not already been cleared.
    1359      * This will initialize the VMCS launch state to "clear" required for loading it.
    1360      *
    1361      * See Intel spec. 31.6 "Preparation And Launching A Virtual Machine".
    1362      */
    1363     if (pVmcsInfoTo->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
    1364     {
    1365         int rc = hmR0VmxClearVmcs(pVmcsInfoTo);
    1366         if (RT_SUCCESS(rc))
    1367         { /* likely */ }
    1368         else
    1369             return rc;
    1370     }
    1371 
    1372     /*
    1373      * Finally, load the VMCS we are switching to.
    1374      */
    1375     return hmR0VmxLoadVmcs(pVmcsInfoTo);
    1376 }
    1377 
    1378 
    1379 /**
    1380  * Switches between the guest VMCS and the nested-guest VMCS as specified by the
    1381  * caller.
    1382  *
    1383  * @returns VBox status code.
    1384  * @param   pVCpu                   The cross context virtual CPU structure.
    1385  * @param   fSwitchToNstGstVmcs     Whether to switch to the nested-guest VMCS (pass
    1386  *                                  true) or guest VMCS (pass false).
    1387  */
    1388 static int hmR0VmxSwitchToGstOrNstGstVmcs(PVMCPUCC pVCpu, bool fSwitchToNstGstVmcs)
    1389 {
    1390     /* Ensure we have synced everything from the guest-CPU context to the VMCS before switching. */
    1391     HMVMX_CPUMCTX_ASSERT(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
    1392 
    1393     PVMXVMCSINFO pVmcsInfoFrom;
    1394     PVMXVMCSINFO pVmcsInfoTo;
    1395     if (fSwitchToNstGstVmcs)
    1396     {
    1397         pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfo;
    1398         pVmcsInfoTo   = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
    1399     }
    1400     else
    1401     {
    1402         pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
    1403         pVmcsInfoTo   = &pVCpu->hmr0.s.vmx.VmcsInfo;
    1404     }
    1405 
    1406     /*
    1407      * Disable interrupts to prevent being preempted while we switch the current VMCS as the
    1408      * preemption hook code path acquires the current VMCS.
    1409      */
    1410     RTCCUINTREG const fEFlags = ASMIntDisableFlags();
    1411 
    1412     int rc = hmR0VmxSwitchVmcs(pVmcsInfoFrom, pVmcsInfoTo);
    1413     if (RT_SUCCESS(rc))
    1414     {
    1415         pVCpu->hmr0.s.vmx.fSwitchedToNstGstVmcs           = fSwitchToNstGstVmcs;
    1416         pVCpu->hm.s.vmx.fSwitchedToNstGstVmcsCopyForRing3 = fSwitchToNstGstVmcs;
    1417 
    1418         /*
    1419          * If we are switching to a VMCS that was executed on a different host CPU or was
    1420          * never executed before, flag that we need to export the host state before executing
    1421          * guest/nested-guest code using hardware-assisted VMX.
    1422          *
    1423          * This could probably be done in a preemptible context since the preemption hook
    1424          * will flag the necessary change in host context. However, since preemption is
    1425          * already disabled and to avoid making assumptions about host specific code in
    1426          * RTMpCpuId when called with preemption enabled, we'll do this while preemption is
    1427          * disabled.
    1428          */
    1429         if (pVmcsInfoTo->idHostCpuState == RTMpCpuId())
    1430         { /* likely */ }
    1431         else
    1432             ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE);
    1433 
    1434         ASMSetFlags(fEFlags);
    1435 
    1436         /*
    1437          * We use a different VM-exit MSR-store areas for the guest and nested-guest. Hence,
    1438          * flag that we need to update the host MSR values there. Even if we decide in the
    1439          * future to share the VM-exit MSR-store area page between the guest and nested-guest,
    1440          * if its content differs, we would have to update the host MSRs anyway.
    1441          */
    1442         pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs = false;
    1443     }
    1444     else
    1445         ASMSetFlags(fEFlags);
    1446     return rc;
    1447 }
    1448 #endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
    1449 
    1450 
    1451 /**
    1452  * Updates the VM's last error record.
    1453  *
    1454  * If there was a VMX instruction error, reads the error data from the VMCS and
    1455  * updates VCPU's last error record as well.
    1456  *
    1457  * @param   pVCpu   The cross context virtual CPU structure of the calling EMT.
    1458  *                  Can be NULL if @a rc is not VERR_VMX_UNABLE_TO_START_VM or
    1459  *                  VERR_VMX_INVALID_VMCS_FIELD.
    1460  * @param   rc      The error code.
    1461  */
    1462 static void hmR0VmxUpdateErrorRecord(PVMCPUCC pVCpu, int rc)
    1463 {
    1464     if (   rc == VERR_VMX_INVALID_VMCS_FIELD
    1465         || rc == VERR_VMX_UNABLE_TO_START_VM)
    1466     {
    1467         AssertPtrReturnVoid(pVCpu);
    1468         VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &pVCpu->hm.s.vmx.LastError.u32InstrError);
    1469     }
    1470     pVCpu->CTX_SUFF(pVM)->hm.s.ForR3.rcInit = rc;
    1471 }
    1472 
    1473 
    1474 #ifdef VBOX_STRICT
    1475 /**
    1476  * Reads the VM-entry interruption-information field from the VMCS into the VMX
    1477  * transient structure.
    1478  *
    1479  * @param   pVmxTransient   The VMX-transient structure.
    1480  */
    1481 DECLINLINE(void) hmR0VmxReadEntryIntInfoVmcs(PVMXTRANSIENT pVmxTransient)
    1482 {
    1483     int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &pVmxTransient->uEntryIntInfo);
    1484     AssertRC(rc);
    1485 }
    1486 
    1487 
    1488 /**
    1489  * Reads the VM-entry exception error code field from the VMCS into
    1490  * the VMX transient structure.
    1491  *
    1492  * @param   pVmxTransient   The VMX-transient structure.
    1493  */
    1494 DECLINLINE(void) hmR0VmxReadEntryXcptErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
    1495 {
    1496     int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &pVmxTransient->uEntryXcptErrorCode);
    1497     AssertRC(rc);
    1498 }
    1499 
    1500 
    1501 /**
    1502  * Reads the VM-entry exception error code field from the VMCS into
    1503  * the VMX transient structure.
    1504  *
    1505  * @param   pVmxTransient   The VMX-transient structure.
    1506  */
    1507 DECLINLINE(void) hmR0VmxReadEntryInstrLenVmcs(PVMXTRANSIENT pVmxTransient)
    1508 {
    1509     int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &pVmxTransient->cbEntryInstr);
    1510     AssertRC(rc);
    1511 }
    1512 #endif /* VBOX_STRICT */
    1513 
    1514 
    1515 /**
    1516  * Reads the VM-exit interruption-information field from the VMCS into the VMX
    1517  * transient structure.
    1518  *
    1519  * @param   pVmxTransient   The VMX-transient structure.
    1520  */
    1521 DECLINLINE(void) hmR0VmxReadExitIntInfoVmcs(PVMXTRANSIENT pVmxTransient)
    1522 {
    1523     if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_INFO))
    1524     {
    1525         int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
    1526         AssertRC(rc);
    1527         pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INTERRUPTION_INFO;
    1528     }
    1529 }
    1530 
    1531 
    1532 /**
    1533  * Reads the VM-exit interruption error code from the VMCS into the VMX
    1534  * transient structure.
    1535  *
    1536  * @param   pVmxTransient   The VMX-transient structure.
    1537  */
    1538 DECLINLINE(void) hmR0VmxReadExitIntErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
    1539 {
    1540     if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE))
    1541     {
    1542         int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
    1543         AssertRC(rc);
    1544         pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE;
    1545     }
    1546 }
    1547 
    1548 
    1549 /**
    1550  * Reads the VM-exit instruction length field from the VMCS into the VMX
    1551  * transient structure.
    1552  *
    1553  * @param   pVmxTransient   The VMX-transient structure.
    1554  */
    1555 DECLINLINE(void) hmR0VmxReadExitInstrLenVmcs(PVMXTRANSIENT pVmxTransient)
    1556 {
    1557     if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_LEN))
    1558     {
    1559         int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
    1560         AssertRC(rc);
    1561         pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INSTR_LEN;
    1562     }
    1563 }
    1564 
    1565 
    1566 /**
    1567  * Reads the VM-exit instruction-information field from the VMCS into
    1568  * the VMX transient structure.
    1569  *
    1570  * @param   pVmxTransient   The VMX-transient structure.
    1571  */
    1572 DECLINLINE(void) hmR0VmxReadExitInstrInfoVmcs(PVMXTRANSIENT pVmxTransient)
    1573 {
    1574     if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_INFO))
    1575     {
    1576         int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
    1577         AssertRC(rc);
    1578         pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INSTR_INFO;
    1579     }
    1580 }
    1581 
    1582 
    1583 /**
    1584  * Reads the Exit Qualification from the VMCS into the VMX transient structure.
    1585  *
    1586  * @param   pVmxTransient   The VMX-transient structure.
    1587  */
    1588 DECLINLINE(void) hmR0VmxReadExitQualVmcs(PVMXTRANSIENT pVmxTransient)
    1589 {
    1590     if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_QUALIFICATION))
    1591     {
    1592         int rc = VMXReadVmcsNw(VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
    1593         AssertRC(rc);
    1594         pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION;
    1595     }
    1596 }
    1597 
    1598 
    1599 /**
    1600  * Reads the Guest-linear address from the VMCS into the VMX transient structure.
    1601  *
    1602  * @param   pVmxTransient   The VMX-transient structure.
    1603  */
    1604 DECLINLINE(void) hmR0VmxReadGuestLinearAddrVmcs(PVMXTRANSIENT pVmxTransient)
    1605 {
    1606     if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_GUEST_LINEAR_ADDR))
    1607     {
    1608         int rc = VMXReadVmcsNw(VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
    1609         AssertRC(rc);
    1610         pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_GUEST_LINEAR_ADDR;
    1611     }
    1612 }
    1613 
    1614 
    1615 /**
    1616  * Reads the Guest-physical address from the VMCS into the VMX transient structure.
    1617  *
    1618  * @param   pVmxTransient   The VMX-transient structure.
    1619  */
    1620 DECLINLINE(void) hmR0VmxReadGuestPhysicalAddrVmcs(PVMXTRANSIENT pVmxTransient)
    1621 {
    1622     if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_GUEST_PHYSICAL_ADDR))
    1623     {
    1624         int rc = VMXReadVmcs64(VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
    1625         AssertRC(rc);
    1626         pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_GUEST_PHYSICAL_ADDR;
    1627     }
    1628 }
    1629 
    1630 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    1631 /**
    1632  * Reads the Guest pending-debug exceptions from the VMCS into the VMX transient
    1633  * structure.
    1634  *
    1635  * @param   pVmxTransient   The VMX-transient structure.
    1636  */
    1637 DECLINLINE(void) hmR0VmxReadGuestPendingDbgXctps(PVMXTRANSIENT pVmxTransient)
    1638 {
    1639     if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_GUEST_PENDING_DBG_XCPTS))
    1640     {
    1641         int rc = VMXReadVmcsNw(VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &pVmxTransient->uGuestPendingDbgXcpts);
    1642         AssertRC(rc);
    1643         pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_GUEST_PENDING_DBG_XCPTS;
    1644     }
    1645 }
    1646 #endif
    1647 
    1648 /**
    1649  * Reads the IDT-vectoring information field from the VMCS into the VMX
    1650  * transient structure.
    1651  *
    1652  * @param   pVmxTransient   The VMX-transient structure.
    1653  *
    1654  * @remarks No-long-jump zone!!!
    1655  */
    1656 DECLINLINE(void) hmR0VmxReadIdtVectoringInfoVmcs(PVMXTRANSIENT pVmxTransient)
    1657 {
    1658     if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_INFO))
    1659     {
    1660         int rc = VMXReadVmcs32(VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
    1661         AssertRC(rc);
    1662         pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_IDT_VECTORING_INFO;
    1663     }
    1664 }
    1665 
    1666 
    1667 /**
    1668  * Reads the IDT-vectoring error code from the VMCS into the VMX
    1669  * transient structure.
    1670  *
    1671  * @param   pVmxTransient   The VMX-transient structure.
    1672  */
    1673 DECLINLINE(void) hmR0VmxReadIdtVectoringErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
    1674 {
    1675     if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_ERROR_CODE))
    1676     {
    1677         int rc = VMXReadVmcs32(VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
    1678         AssertRC(rc);
    1679         pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_IDT_VECTORING_ERROR_CODE;
    1680     }
    1681 }
    1682 
    1683 #ifdef HMVMX_ALWAYS_SAVE_RO_GUEST_STATE
    1684 /**
    1685  * Reads all relevant read-only VMCS fields into the VMX transient structure.
    1686  *
    1687  * @param   pVmxTransient   The VMX-transient structure.
    1688  */
    1689 static void hmR0VmxReadAllRoFieldsVmcs(PVMXTRANSIENT pVmxTransient)
    1690 {
    1691     int rc = VMXReadVmcsNw(VMX_VMCS_RO_EXIT_QUALIFICATION,             &pVmxTransient->uExitQual);
    1692     rc    |= VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INSTR_LENGTH,            &pVmxTransient->cbExitInstr);
    1693     rc    |= VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INSTR_INFO,              &pVmxTransient->ExitInstrInfo.u);
    1694     rc    |= VMXReadVmcs32(VMX_VMCS32_RO_IDT_VECTORING_INFO,           &pVmxTransient->uIdtVectoringInfo);
    1695     rc    |= VMXReadVmcs32(VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE,     &pVmxTransient->uIdtVectoringErrorCode);
    1696     rc    |= VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO,       &pVmxTransient->uExitIntInfo);
    1697     rc    |= VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
    1698     rc    |= VMXReadVmcsNw(VMX_VMCS_RO_GUEST_LINEAR_ADDR,              &pVmxTransient->uGuestLinearAddr);
    1699     rc    |= VMXReadVmcs64(VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL,         &pVmxTransient->uGuestPhysicalAddr);
    1700     AssertRC(rc);
    1701     pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION
    1702                                    |  HMVMX_READ_EXIT_INSTR_LEN
    1703                                    |  HMVMX_READ_EXIT_INSTR_INFO
    1704                                    |  HMVMX_READ_IDT_VECTORING_INFO
    1705                                    |  HMVMX_READ_IDT_VECTORING_ERROR_CODE
    1706                                    |  HMVMX_READ_EXIT_INTERRUPTION_INFO
    1707                                    |  HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
    1708                                    |  HMVMX_READ_GUEST_LINEAR_ADDR
    1709                                    |  HMVMX_READ_GUEST_PHYSICAL_ADDR;
    1710 }
    1711 #endif
    1712 
    1713 /**
    1714  * Enters VMX root mode operation on the current CPU.
    1715  *
    1716  * @returns VBox status code.
    1717  * @param   pHostCpu        The HM physical-CPU structure.
    1718  * @param   pVM             The cross context VM structure. Can be
    1719  *                          NULL, after a resume.
    1720  * @param   HCPhysCpuPage   Physical address of the VMXON region.
    1721  * @param   pvCpuPage       Pointer to the VMXON region.
    1722  */
    1723 static int hmR0VmxEnterRootMode(PHMPHYSCPU pHostCpu, PVMCC pVM, RTHCPHYS HCPhysCpuPage, void *pvCpuPage)
    1724 {
    1725     Assert(pHostCpu);
    1726     Assert(HCPhysCpuPage && HCPhysCpuPage != NIL_RTHCPHYS);
    1727     Assert(RT_ALIGN_T(HCPhysCpuPage, _4K, RTHCPHYS) == HCPhysCpuPage);
    1728     Assert(pvCpuPage);
    1729     Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    1730 
    1731     if (pVM)
    1732     {
    1733         /* Write the VMCS revision identifier to the VMXON region. */
    1734         *(uint32_t *)pvCpuPage = RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_ID);
    1735     }
    1736 
    1737     /* Paranoid: Disable interrupts as, in theory, interrupt handlers might mess with CR4. */
    1738     RTCCUINTREG const fEFlags = ASMIntDisableFlags();
    1739 
    1740     /* Enable the VMX bit in CR4 if necessary. */
    1741     RTCCUINTREG const uOldCr4 = SUPR0ChangeCR4(X86_CR4_VMXE, RTCCUINTREG_MAX);
    1742 
    1743     /* Record whether VMXE was already prior to us enabling it above. */
    1744     pHostCpu->fVmxeAlreadyEnabled = RT_BOOL(uOldCr4 & X86_CR4_VMXE);
    1745 
    1746     /* Enter VMX root mode. */
    1747     int rc = VMXEnable(HCPhysCpuPage);
    1748     if (RT_FAILURE(rc))
    1749     {
    1750         /* Restore CR4.VMXE if it was not set prior to our attempt to set it above. */
    1751         if (!pHostCpu->fVmxeAlreadyEnabled)
    1752             SUPR0ChangeCR4(0 /* fOrMask */, ~(uint64_t)X86_CR4_VMXE);
    1753 
    1754         if (pVM)
    1755             pVM->hm.s.ForR3.vmx.HCPhysVmxEnableError = HCPhysCpuPage;
    1756     }
    1757 
    1758     /* Restore interrupts. */
    1759     ASMSetFlags(fEFlags);
    1760     return rc;
    1761 }
    1762 
    1763 
    1764 /**
    1765  * Exits VMX root mode operation on the current CPU.
    1766  *
    1767  * @returns VBox status code.
    1768  * @param   pHostCpu        The HM physical-CPU structure.
    1769  */
    1770 static int hmR0VmxLeaveRootMode(PHMPHYSCPU pHostCpu)
    1771 {
    1772     Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    1773 
    1774     /* Paranoid: Disable interrupts as, in theory, interrupts handlers might mess with CR4. */
    1775     RTCCUINTREG const fEFlags = ASMIntDisableFlags();
    1776 
    1777     /* If we're for some reason not in VMX root mode, then don't leave it. */
    1778     RTCCUINTREG const uHostCr4 = ASMGetCR4();
    1779 
    1780     int rc;
    1781     if (uHostCr4 & X86_CR4_VMXE)
    1782     {
    1783         /* Exit VMX root mode and clear the VMX bit in CR4. */
    1784         VMXDisable();
    1785 
    1786         /* Clear CR4.VMXE only if it was clear prior to use setting it. */
    1787         if (!pHostCpu->fVmxeAlreadyEnabled)
    1788             SUPR0ChangeCR4(0 /* fOrMask */, ~(uint64_t)X86_CR4_VMXE);
    1789 
    1790         rc = VINF_SUCCESS;
    1791     }
    1792     else
    1793         rc = VERR_VMX_NOT_IN_VMX_ROOT_MODE;
    1794 
    1795     /* Restore interrupts. */
    1796     ASMSetFlags(fEFlags);
    1797     return rc;
    1798 }
    1799 
    1800 
    1801 /**
    1802  * Allocates pages specified as specified by an array of VMX page allocation info
    1803  * objects.
    1804  *
    1805  * The pages contents are zero'd after allocation.
    1806  *
    1807  * @returns VBox status code.
    1808  * @param   phMemObj        Where to return the handle to the allocation.
    1809  * @param   paAllocInfo     The pointer to the first element of the VMX
    1810  *                          page-allocation info object array.
    1811  * @param   cEntries        The number of elements in the @a paAllocInfo array.
    1812  */
    1813 static int hmR0VmxPagesAllocZ(PRTR0MEMOBJ phMemObj, PVMXPAGEALLOCINFO paAllocInfo, uint32_t cEntries)
    1814 {
    1815     *phMemObj = NIL_RTR0MEMOBJ;
    1816 
    1817     /* Figure out how many pages to allocate. */
    1818     uint32_t cPages = 0;
    1819     for (uint32_t iPage = 0; iPage < cEntries; iPage++)
    1820         cPages += !!paAllocInfo[iPage].fValid;
    1821 
    1822     /* Allocate the pages. */
    1823     if (cPages)
    1824     {
    1825         size_t const cbPages = cPages << PAGE_SHIFT;
    1826         int rc = RTR0MemObjAllocPage(phMemObj, cbPages, false /* fExecutable */);
    1827         if (RT_FAILURE(rc))
    1828             return rc;
    1829 
    1830         /* Zero the contents and assign each page to the corresponding VMX page-allocation entry. */
    1831         void *pvFirstPage = RTR0MemObjAddress(*phMemObj);
    1832         RT_BZERO(pvFirstPage, cbPages);
    1833 
    1834         uint32_t iPage = 0;
    1835         for (uint32_t i = 0; i < cEntries; i++)
    1836             if (paAllocInfo[i].fValid)
    1837             {
    1838                 RTHCPHYS const HCPhysPage = RTR0MemObjGetPagePhysAddr(*phMemObj, iPage);
    1839                 void          *pvPage     = (void *)((uintptr_t)pvFirstPage + (iPage << X86_PAGE_4K_SHIFT));
    1840                 Assert(HCPhysPage && HCPhysPage != NIL_RTHCPHYS);
    1841                 AssertPtr(pvPage);
    1842 
    1843                 Assert(paAllocInfo[iPage].pHCPhys);
    1844                 Assert(paAllocInfo[iPage].ppVirt);
    1845                 *paAllocInfo[iPage].pHCPhys = HCPhysPage;
    1846                 *paAllocInfo[iPage].ppVirt  = pvPage;
    1847 
    1848                 /* Move to next page. */
    1849                 ++iPage;
    1850             }
    1851 
    1852         /* Make sure all valid (requested) pages have been assigned. */
    1853         Assert(iPage == cPages);
    1854     }
    1855     return VINF_SUCCESS;
    1856 }
    1857 
    1858 
    1859 /**
    1860  * Frees pages allocated using hmR0VmxPagesAllocZ.
    1861  *
    1862  * @param   phMemObj    Pointer to the memory object handle.  Will be set to
    1863  *                      NIL.
    1864  */
    1865 DECL_FORCE_INLINE(void) hmR0VmxPagesFree(PRTR0MEMOBJ phMemObj)
    1866 {
    1867     /* We can cleanup wholesale since it's all one allocation. */
    1868     if (*phMemObj != NIL_RTR0MEMOBJ)
    1869     {
    1870         RTR0MemObjFree(*phMemObj, true /* fFreeMappings */);
    1871         *phMemObj = NIL_RTR0MEMOBJ;
    1872     }
    1873 }
    1874 
    1875 
    1876 /**
    1877  * Initializes a VMCS info. object.
    1878  *
    1879  * @param   pVmcsInfo           The VMCS info. object.
    1880  * @param   pVmcsInfoShared     The VMCS info. object shared with ring-3.
    1881  */
    1882 static void hmR0VmxVmcsInfoInit(PVMXVMCSINFO pVmcsInfo, PVMXVMCSINFOSHARED pVmcsInfoShared)
    1883 {
    1884     RT_ZERO(*pVmcsInfo);
    1885     RT_ZERO(*pVmcsInfoShared);
    1886 
    1887     pVmcsInfo->pShared             = pVmcsInfoShared;
    1888     Assert(pVmcsInfo->hMemObj == NIL_RTR0MEMOBJ);
    1889     pVmcsInfo->HCPhysVmcs          = NIL_RTHCPHYS;
    1890     pVmcsInfo->HCPhysShadowVmcs    = NIL_RTHCPHYS;
    1891     pVmcsInfo->HCPhysMsrBitmap     = NIL_RTHCPHYS;
    1892     pVmcsInfo->HCPhysGuestMsrLoad  = NIL_RTHCPHYS;
    1893     pVmcsInfo->HCPhysGuestMsrStore = NIL_RTHCPHYS;
    1894     pVmcsInfo->HCPhysHostMsrLoad   = NIL_RTHCPHYS;
    1895     pVmcsInfo->HCPhysVirtApic      = NIL_RTHCPHYS;
    1896     pVmcsInfo->HCPhysEPTP          = NIL_RTHCPHYS;
    1897     pVmcsInfo->u64VmcsLinkPtr      = NIL_RTHCPHYS;
    1898     pVmcsInfo->idHostCpuState      = NIL_RTCPUID;
    1899     pVmcsInfo->idHostCpuExec       = NIL_RTCPUID;
    1900 }
    1901 
    1902 
    1903 /**
    1904  * Frees the VT-x structures for a VMCS info. object.
    1905  *
    1906  * @param   pVmcsInfo           The VMCS info. object.
    1907  * @param   pVmcsInfoShared     The VMCS info. object shared with ring-3.
    1908  */
    1909 static void hmR0VmxVmcsInfoFree(PVMXVMCSINFO pVmcsInfo, PVMXVMCSINFOSHARED pVmcsInfoShared)
    1910 {
    1911     hmR0VmxPagesFree(&pVmcsInfo->hMemObj);
    1912     hmR0VmxVmcsInfoInit(pVmcsInfo, pVmcsInfoShared);
    1913 }
    1914 
    1915 
    1916 /**
    1917  * Allocates the VT-x structures for a VMCS info. object.
    1918  *
    1919  * @returns VBox status code.
    1920  * @param   pVCpu           The cross context virtual CPU structure.
    1921  * @param   pVmcsInfo       The VMCS info. object.
    1922  * @param   fIsNstGstVmcs   Whether this is a nested-guest VMCS.
    1923  *
    1924  * @remarks The caller is expected to take care of any and all allocation failures.
    1925  *          This function will not perform any cleanup for failures half-way
    1926  *          through.
    1927  */
    1928 static int hmR0VmxAllocVmcsInfo(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs)
    1929 {
    1930     PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    1931 
    1932     bool const fMsrBitmaps = RT_BOOL(g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_MSR_BITMAPS);
    1933     bool const fShadowVmcs = !fIsNstGstVmcs ? pVM->hmr0.s.vmx.fUseVmcsShadowing : pVM->cpum.ro.GuestFeatures.fVmxVmcsShadowing;
    1934     Assert(!pVM->cpum.ro.GuestFeatures.fVmxVmcsShadowing);  /* VMCS shadowing is not yet exposed to the guest. */
    1935     VMXPAGEALLOCINFO aAllocInfo[] =
    1936     {
    1937         { true,        0 /* Unused */, &pVmcsInfo->HCPhysVmcs,         &pVmcsInfo->pvVmcs         },
    1938         { true,        0 /* Unused */, &pVmcsInfo->HCPhysGuestMsrLoad, &pVmcsInfo->pvGuestMsrLoad },
    1939         { true,        0 /* Unused */, &pVmcsInfo->HCPhysHostMsrLoad,  &pVmcsInfo->pvHostMsrLoad  },
    1940         { fMsrBitmaps, 0 /* Unused */, &pVmcsInfo->HCPhysMsrBitmap,    &pVmcsInfo->pvMsrBitmap    },
    1941         { fShadowVmcs, 0 /* Unused */, &pVmcsInfo->HCPhysShadowVmcs,   &pVmcsInfo->pvShadowVmcs   },
    1942     };
    1943 
    1944     int rc = hmR0VmxPagesAllocZ(&pVmcsInfo->hMemObj, &aAllocInfo[0], RT_ELEMENTS(aAllocInfo));
    1945     if (RT_FAILURE(rc))
    1946         return rc;
    1947 
    1948     /*
    1949      * We use the same page for VM-entry MSR-load and VM-exit MSR store areas.
    1950      * Because they contain a symmetric list of guest MSRs to load on VM-entry and store on VM-exit.
    1951      */
    1952     AssertCompile(RT_ELEMENTS(aAllocInfo) > 0);
    1953     Assert(pVmcsInfo->HCPhysGuestMsrLoad != NIL_RTHCPHYS);
    1954     pVmcsInfo->pvGuestMsrStore     = pVmcsInfo->pvGuestMsrLoad;
    1955     pVmcsInfo->HCPhysGuestMsrStore = pVmcsInfo->HCPhysGuestMsrLoad;
    1956 
    1957     /*
    1958      * Get the virtual-APIC page rather than allocating them again.
    1959      */
    1960     if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_TPR_SHADOW)
    1961     {
    1962         if (!fIsNstGstVmcs)
    1963         {
    1964             if (PDMHasApic(pVM))
    1965             {
    1966                 rc = APICGetApicPageForCpu(pVCpu, &pVmcsInfo->HCPhysVirtApic, (PRTR0PTR)&pVmcsInfo->pbVirtApic, NULL /*pR3Ptr*/);
    1967                 if (RT_FAILURE(rc))
    1968                     return rc;
    1969                 Assert(pVmcsInfo->pbVirtApic);
    1970                 Assert(pVmcsInfo->HCPhysVirtApic && pVmcsInfo->HCPhysVirtApic != NIL_RTHCPHYS);
    1971             }
    1972         }
    1973         else
    1974         {
    1975             pVmcsInfo->pbVirtApic     = &pVCpu->cpum.GstCtx.hwvirt.vmx.abVirtApicPage[0];
    1976             pVmcsInfo->HCPhysVirtApic = GVMMR0ConvertGVMPtr2HCPhys(pVM, pVmcsInfo->pbVirtApic);
    1977             Assert(pVmcsInfo->HCPhysVirtApic && pVmcsInfo->HCPhysVirtApic != NIL_RTHCPHYS);
    1978         }
    1979     }
    1980 
    1981     return VINF_SUCCESS;
    1982 }
    1983 
    1984 
    1985 /**
    1986  * Free all VT-x structures for the VM.
    1987  *
    1988  * @returns IPRT status code.
    1989  * @param   pVM     The cross context VM structure.
    1990  */
    1991 static void hmR0VmxStructsFree(PVMCC pVM)
    1992 {
    1993     hmR0VmxPagesFree(&pVM->hmr0.s.vmx.hMemObj);
    1994 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    1995     if (pVM->hmr0.s.vmx.fUseVmcsShadowing)
    1996     {
    1997         RTMemFree(pVM->hmr0.s.vmx.paShadowVmcsFields);
    1998         pVM->hmr0.s.vmx.paShadowVmcsFields = NULL;
    1999         RTMemFree(pVM->hmr0.s.vmx.paShadowVmcsRoFields);
    2000         pVM->hmr0.s.vmx.paShadowVmcsRoFields = NULL;
    2001     }
    2002 #endif
    2003 
    2004     for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
    2005     {
    2006         PVMCPUCC pVCpu = VMCC_GET_CPU(pVM, idCpu);
    2007         hmR0VmxVmcsInfoFree(&pVCpu->hmr0.s.vmx.VmcsInfo, &pVCpu->hm.s.vmx.VmcsInfo);
    2008 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    2009         if (pVM->cpum.ro.GuestFeatures.fVmx)
    2010             hmR0VmxVmcsInfoFree(&pVCpu->hmr0.s.vmx.VmcsInfoNstGst, &pVCpu->hm.s.vmx.VmcsInfoNstGst);
    2011 #endif
    2012     }
    2013 }
    2014 
    2015 
    2016 /**
    2017  * Allocate all VT-x structures for the VM.
    2018  *
    2019  * @returns IPRT status code.
    2020  * @param   pVM     The cross context VM structure.
    2021  *
    2022  * @remarks This functions will cleanup on memory allocation failures.
    2023  */
    2024 static int hmR0VmxStructsAlloc(PVMCC pVM)
    2025 {
    2026     /*
    2027      * Sanity check the VMCS size reported by the CPU as we assume 4KB allocations.
    2028      * The VMCS size cannot be more than 4096 bytes.
    2029      *
    2030      * See Intel spec. Appendix A.1 "Basic VMX Information".
    2031      */
    2032     uint32_t const cbVmcs = RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_SIZE);
    2033     if (cbVmcs <= X86_PAGE_4K_SIZE)
    2034     { /* likely */ }
    2035     else
    2036     {
    2037         VMCC_GET_CPU_0(pVM)->hm.s.u32HMError = VMX_UFC_INVALID_VMCS_SIZE;
    2038         return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
    2039     }
    2040 
    2041     /*
    2042      * Allocate per-VM VT-x structures.
    2043      */
    2044     bool const fVirtApicAccess   = RT_BOOL(g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS);
    2045     bool const fUseVmcsShadowing = pVM->hmr0.s.vmx.fUseVmcsShadowing;
    2046     VMXPAGEALLOCINFO aAllocInfo[] =
    2047     {
    2048         { fVirtApicAccess,   0 /* Unused */, &pVM->hmr0.s.vmx.HCPhysApicAccess,    (PRTR0PTR)&pVM->hmr0.s.vmx.pbApicAccess },
    2049         { fUseVmcsShadowing, 0 /* Unused */, &pVM->hmr0.s.vmx.HCPhysVmreadBitmap,  &pVM->hmr0.s.vmx.pvVmreadBitmap         },
    2050         { fUseVmcsShadowing, 0 /* Unused */, &pVM->hmr0.s.vmx.HCPhysVmwriteBitmap, &pVM->hmr0.s.vmx.pvVmwriteBitmap        },
    2051 #ifdef VBOX_WITH_CRASHDUMP_MAGIC
    2052         { true,              0 /* Unused */, &pVM->hmr0.s.vmx.HCPhysScratch,       (PRTR0PTR)&pVM->hmr0.s.vmx.pbScratch    },
    2053 #endif
    2054     };
    2055 
    2056     int rc = hmR0VmxPagesAllocZ(&pVM->hmr0.s.vmx.hMemObj, &aAllocInfo[0], RT_ELEMENTS(aAllocInfo));
    2057     if (RT_SUCCESS(rc))
    2058     {
    2059 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    2060         /* Allocate the shadow VMCS-fields array. */
    2061         if (fUseVmcsShadowing)
    2062         {
    2063             Assert(!pVM->hmr0.s.vmx.cShadowVmcsFields);
    2064             Assert(!pVM->hmr0.s.vmx.cShadowVmcsRoFields);
    2065             pVM->hmr0.s.vmx.paShadowVmcsFields   = (uint32_t *)RTMemAllocZ(sizeof(g_aVmcsFields));
    2066             pVM->hmr0.s.vmx.paShadowVmcsRoFields = (uint32_t *)RTMemAllocZ(sizeof(g_aVmcsFields));
    2067             if (!pVM->hmr0.s.vmx.paShadowVmcsFields || !pVM->hmr0.s.vmx.paShadowVmcsRoFields)
    2068                 rc = VERR_NO_MEMORY;
    2069         }
    2070 #endif
    2071 
    2072         /*
    2073          * Allocate per-VCPU VT-x structures.
    2074          */
    2075         for (VMCPUID idCpu = 0; idCpu < pVM->cCpus && RT_SUCCESS(rc); idCpu++)
    2076         {
    2077             /* Allocate the guest VMCS structures. */
    2078             PVMCPUCC pVCpu = VMCC_GET_CPU(pVM, idCpu);
    2079             rc = hmR0VmxAllocVmcsInfo(pVCpu, &pVCpu->hmr0.s.vmx.VmcsInfo, false /* fIsNstGstVmcs */);
    2080 
    2081 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    2082             /* Allocate the nested-guest VMCS structures, when the VMX feature is exposed to the guest. */
    2083             if (pVM->cpum.ro.GuestFeatures.fVmx && RT_SUCCESS(rc))
    2084                 rc = hmR0VmxAllocVmcsInfo(pVCpu, &pVCpu->hmr0.s.vmx.VmcsInfoNstGst, true /* fIsNstGstVmcs */);
    2085 #endif
    2086         }
    2087         if (RT_SUCCESS(rc))
    2088             return VINF_SUCCESS;
    2089     }
    2090     hmR0VmxStructsFree(pVM);
    2091     return rc;
    2092 }
    2093 
    2094 
    2095 /**
    2096  * Pre-initializes non-zero fields in VMX structures that will be allocated.
    2097  *
    2098  * @param   pVM     The cross context VM structure.
    2099  */
    2100 static void hmR0VmxStructsInit(PVMCC pVM)
    2101 {
    2102     /* Paranoia. */
    2103     Assert(pVM->hmr0.s.vmx.pbApicAccess == NULL);
    2104 #ifdef VBOX_WITH_CRASHDUMP_MAGIC
    2105     Assert(pVM->hmr0.s.vmx.pbScratch == NULL);
    2106 #endif
    2107 
    2108     /*
    2109      * Initialize members up-front so we can cleanup en masse on allocation failures.
    2110      */
    2111 #ifdef VBOX_WITH_CRASHDUMP_MAGIC
    2112     pVM->hmr0.s.vmx.HCPhysScratch       = NIL_RTHCPHYS;
    2113 #endif
    2114     pVM->hmr0.s.vmx.HCPhysApicAccess    = NIL_RTHCPHYS;
    2115     pVM->hmr0.s.vmx.HCPhysVmreadBitmap  = NIL_RTHCPHYS;
    2116     pVM->hmr0.s.vmx.HCPhysVmwriteBitmap = NIL_RTHCPHYS;
    2117     for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
    2118     {
    2119         PVMCPUCC pVCpu = VMCC_GET_CPU(pVM, idCpu);
    2120         hmR0VmxVmcsInfoInit(&pVCpu->hmr0.s.vmx.VmcsInfo,       &pVCpu->hm.s.vmx.VmcsInfo);
    2121         hmR0VmxVmcsInfoInit(&pVCpu->hmr0.s.vmx.VmcsInfoNstGst, &pVCpu->hm.s.vmx.VmcsInfoNstGst);
    2122     }
    2123 }
    2124 
    2125 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    2126 /**
    2127  * Returns whether an MSR at the given MSR-bitmap offset is intercepted or not.
    2128  *
    2129  * @returns @c true if the MSR is intercepted, @c false otherwise.
    2130  * @param   pbMsrBitmap     The MSR bitmap.
    2131  * @param   offMsr          The MSR byte offset.
    2132  * @param   iBit            The bit offset from the byte offset.
    2133  */
    2134 DECLINLINE(bool) hmR0VmxIsMsrBitSet(uint8_t const *pbMsrBitmap, uint16_t offMsr, int32_t iBit)
    2135 {
    2136     Assert(offMsr + (iBit >> 3) <= X86_PAGE_4K_SIZE);
    2137     return ASMBitTest(pbMsrBitmap + offMsr, iBit);
    2138 }
    2139 #endif
    2140 
    2141 /**
    2142  * Sets the permission bits for the specified MSR in the given MSR bitmap.
    2143  *
    2144  * If the passed VMCS is a nested-guest VMCS, this function ensures that the
    2145  * read/write intercept is cleared from the MSR bitmap used for hardware-assisted
    2146  * VMX execution of the nested-guest, only if nested-guest is also not intercepting
    2147  * the read/write access of this MSR.
    2148  *
    2149  * @param   pVCpu           The cross context virtual CPU structure.
    2150  * @param   pVmcsInfo       The VMCS info. object.
    2151  * @param   fIsNstGstVmcs   Whether this is a nested-guest VMCS.
    2152  * @param   idMsr           The MSR value.
    2153  * @param   fMsrpm          The MSR permissions (see VMXMSRPM_XXX). This must
    2154  *                          include both a read -and- a write permission!
    2155  *
    2156  * @sa      CPUMGetVmxMsrPermission.
    2157  * @remarks Can be called with interrupts disabled.
    2158  */
    2159 static void hmR0VmxSetMsrPermission(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs, uint32_t idMsr, uint32_t fMsrpm)
    2160 {
    2161     uint8_t *pbMsrBitmap = (uint8_t *)pVmcsInfo->pvMsrBitmap;
    2162     Assert(pbMsrBitmap);
    2163     Assert(VMXMSRPM_IS_FLAG_VALID(fMsrpm));
    2164 
    2165     /*
    2166      * MSR-bitmap Layout:
    2167      *   Byte index            MSR range            Interpreted as
    2168      * 0x000 - 0x3ff    0x00000000 - 0x00001fff    Low MSR read bits.
    2169      * 0x400 - 0x7ff    0xc0000000 - 0xc0001fff    High MSR read bits.
    2170      * 0x800 - 0xbff    0x00000000 - 0x00001fff    Low MSR write bits.
    2171      * 0xc00 - 0xfff    0xc0000000 - 0xc0001fff    High MSR write bits.
    2172      *
    2173      * A bit corresponding to an MSR within the above range causes a VM-exit
    2174      * if the bit is 1 on executions of RDMSR/WRMSR.  If an MSR falls out of
    2175      * the MSR range, it always cause a VM-exit.
    2176      *
    2177      * See Intel spec. 24.6.9 "MSR-Bitmap Address".
    2178      */
    2179     uint16_t const offBitmapRead  = 0;
    2180     uint16_t const offBitmapWrite = 0x800;
    2181     uint16_t       offMsr;
    2182     int32_t        iBit;
    2183     if (idMsr <= UINT32_C(0x00001fff))
    2184     {
    2185         offMsr = 0;
    2186         iBit   = idMsr;
    2187     }
    2188     else if (idMsr - UINT32_C(0xc0000000) <= UINT32_C(0x00001fff))
    2189     {
    2190         offMsr = 0x400;
    2191         iBit   = idMsr - UINT32_C(0xc0000000);
    2192     }
    2193     else
    2194         AssertMsgFailedReturnVoid(("Invalid MSR %#RX32\n", idMsr));
    2195 
    2196     /*
    2197      * Set the MSR read permission.
    2198      */
    2199     uint16_t const offMsrRead = offBitmapRead + offMsr;
    2200     Assert(offMsrRead + (iBit >> 3) < offBitmapWrite);
    2201     if (fMsrpm & VMXMSRPM_ALLOW_RD)
    2202     {
    2203 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    2204         bool const fClear = !fIsNstGstVmcs ? true
    2205                           : !hmR0VmxIsMsrBitSet(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, offMsrRead, iBit);
    2206 #else
    2207         RT_NOREF2(pVCpu, fIsNstGstVmcs);
    2208         bool const fClear = true;
    2209 #endif
    2210         if (fClear)
    2211             ASMBitClear(pbMsrBitmap + offMsrRead, iBit);
    2212     }
    2213     else
    2214         ASMBitSet(pbMsrBitmap + offMsrRead, iBit);
    2215 
    2216     /*
    2217      * Set the MSR write permission.
    2218      */
    2219     uint16_t const offMsrWrite = offBitmapWrite + offMsr;
    2220     Assert(offMsrWrite + (iBit >> 3) < X86_PAGE_4K_SIZE);
    2221     if (fMsrpm & VMXMSRPM_ALLOW_WR)
    2222     {
    2223 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    2224         bool const fClear = !fIsNstGstVmcs ? true
    2225                           : !hmR0VmxIsMsrBitSet(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, offMsrWrite, iBit);
    2226 #else
    2227         RT_NOREF2(pVCpu, fIsNstGstVmcs);
    2228         bool const fClear = true;
    2229 #endif
    2230         if (fClear)
    2231             ASMBitClear(pbMsrBitmap + offMsrWrite, iBit);
    2232     }
    2233     else
    2234         ASMBitSet(pbMsrBitmap + offMsrWrite, iBit);
    2235 }
    2236 
    2237 
    2238 /**
    2239  * Updates the VMCS with the number of effective MSRs in the auto-load/store MSR
    2240  * area.
    2241  *
    2242  * @returns VBox status code.
    2243  * @param   pVCpu       The cross context virtual CPU structure.
    2244  * @param   pVmcsInfo   The VMCS info. object.
    2245  * @param   cMsrs       The number of MSRs.
    2246  */
    2247 static int hmR0VmxSetAutoLoadStoreMsrCount(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t cMsrs)
    2248 {
    2249     /* Shouldn't ever happen but there -is- a number. We're well within the recommended 512. */
    2250     uint32_t const cMaxSupportedMsrs = VMX_MISC_MAX_MSRS(g_HmMsrs.u.vmx.u64Misc);
    2251     if (RT_LIKELY(cMsrs < cMaxSupportedMsrs))
    2252     {
    2253         /* Commit the MSR counts to the VMCS and update the cache. */
    2254         if (pVmcsInfo->cEntryMsrLoad != cMsrs)
    2255         {
    2256             int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, cMsrs);   AssertRC(rc);
    2257             rc     = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, cMsrs);   AssertRC(rc);
    2258             rc     = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT,  cMsrs);   AssertRC(rc);
    2259             pVmcsInfo->cEntryMsrLoad = cMsrs;
    2260             pVmcsInfo->cExitMsrStore = cMsrs;
    2261             pVmcsInfo->cExitMsrLoad  = cMsrs;
    2262         }
    2263         return VINF_SUCCESS;
    2264     }
    2265 
    2266     LogRel(("Auto-load/store MSR count exceeded! cMsrs=%u MaxSupported=%u\n", cMsrs, cMaxSupportedMsrs));
    2267     pVCpu->hm.s.u32HMError = VMX_UFC_INSUFFICIENT_GUEST_MSR_STORAGE;
    2268     return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
    2269 }
    2270 
    2271 
    2272 /**
    2273  * Adds a new (or updates the value of an existing) guest/host MSR
    2274  * pair to be swapped during the world-switch as part of the
    2275  * auto-load/store MSR area in the VMCS.
    2276  *
    2277  * @returns VBox status code.
    2278  * @param   pVCpu           The cross context virtual CPU structure.
    2279  * @param   pVmxTransient   The VMX-transient structure.
    2280  * @param   idMsr           The MSR.
    2281  * @param   uGuestMsrValue  Value of the guest MSR.
    2282  * @param   fSetReadWrite   Whether to set the guest read/write access of this
    2283  *                          MSR (thus not causing a VM-exit).
    2284  * @param   fUpdateHostMsr  Whether to update the value of the host MSR if
    2285  *                          necessary.
    2286  */
    2287 static int hmR0VmxAddAutoLoadStoreMsr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t idMsr, uint64_t uGuestMsrValue,
    2288                                       bool fSetReadWrite, bool fUpdateHostMsr)
    2289 {
    2290     PVMXVMCSINFO  pVmcsInfo     = pVmxTransient->pVmcsInfo;
    2291     bool const      fIsNstGstVmcs = pVmxTransient->fIsNestedGuest;
    2292     PVMXAUTOMSR     pGuestMsrLoad = (PVMXAUTOMSR)pVmcsInfo->pvGuestMsrLoad;
    2293     uint32_t        cMsrs         = pVmcsInfo->cEntryMsrLoad;
    2294     uint32_t        i;
    2295 
    2296     /* Paranoia. */
    2297     Assert(pGuestMsrLoad);
    2298 
    2299 #ifndef DEBUG_bird
    2300     LogFlowFunc(("pVCpu=%p idMsr=%#RX32 uGuestMsrValue=%#RX64\n", pVCpu, idMsr, uGuestMsrValue));
    2301 #endif
    2302 
    2303     /* Check if the MSR already exists in the VM-entry MSR-load area. */
    2304     for (i = 0; i < cMsrs; i++)
    2305     {
    2306         if (pGuestMsrLoad[i].u32Msr == idMsr)
    2307             break;
    2308     }
    2309 
    2310     bool fAdded = false;
    2311     if (i == cMsrs)
    2312     {
    2313         /* The MSR does not exist, bump the MSR count to make room for the new MSR. */
    2314         ++cMsrs;
    2315         int rc = hmR0VmxSetAutoLoadStoreMsrCount(pVCpu, pVmcsInfo, cMsrs);
    2316         AssertMsgRCReturn(rc, ("Insufficient space to add MSR to VM-entry MSR-load/store area %u\n", idMsr), rc);
    2317 
    2318         /* Set the guest to read/write this MSR without causing VM-exits. */
    2319         if (   fSetReadWrite
    2320             && (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS))
    2321             hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, fIsNstGstVmcs, idMsr, VMXMSRPM_ALLOW_RD_WR);
    2322 
    2323         Log4Func(("Added MSR %#RX32, cMsrs=%u\n", idMsr, cMsrs));
    2324         fAdded = true;
    2325     }
    2326 
    2327     /* Update the MSR value for the newly added or already existing MSR. */
    2328     pGuestMsrLoad[i].u32Msr   = idMsr;
    2329     pGuestMsrLoad[i].u64Value = uGuestMsrValue;
    2330 
    2331     /* Create the corresponding slot in the VM-exit MSR-store area if we use a different page. */
    2332     if (hmR0VmxIsSeparateExitMsrStoreAreaVmcs(pVmcsInfo))
    2333     {
    2334         PVMXAUTOMSR pGuestMsrStore = (PVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
    2335         pGuestMsrStore[i].u32Msr   = idMsr;
    2336         pGuestMsrStore[i].u64Value = uGuestMsrValue;
    2337     }
    2338 
    2339     /* Update the corresponding slot in the host MSR area. */
    2340     PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVmcsInfo->pvHostMsrLoad;
    2341     Assert(pHostMsr != pVmcsInfo->pvGuestMsrLoad);
    2342     Assert(pHostMsr != pVmcsInfo->pvGuestMsrStore);
    2343     pHostMsr[i].u32Msr = idMsr;
    2344 
    2345     /*
    2346      * Only if the caller requests to update the host MSR value AND we've newly added the
    2347      * MSR to the host MSR area do we actually update the value. Otherwise, it will be
    2348      * updated by hmR0VmxUpdateAutoLoadHostMsrs().
    2349      *
    2350      * We do this for performance reasons since reading MSRs may be quite expensive.
    2351      */
    2352     if (fAdded)
    2353     {
    2354         if (fUpdateHostMsr)
    2355         {
    2356             Assert(!VMMRZCallRing3IsEnabled(pVCpu));
    2357             Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    2358             pHostMsr[i].u64Value = ASMRdMsr(idMsr);
    2359         }
    2360         else
    2361         {
    2362             /* Someone else can do the work. */
    2363             pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs = false;
    2364         }
    2365     }
    2366     return VINF_SUCCESS;
    2367 }
    2368 
    2369 
    2370 /**
    2371  * Removes a guest/host MSR pair to be swapped during the world-switch from the
    2372  * auto-load/store MSR area in the VMCS.
    2373  *
    2374  * @returns VBox status code.
    2375  * @param   pVCpu           The cross context virtual CPU structure.
    2376  * @param   pVmxTransient   The VMX-transient structure.
    2377  * @param   idMsr           The MSR.
    2378  */
    2379 static int hmR0VmxRemoveAutoLoadStoreMsr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t idMsr)
    2380 {
    2381     PVMXVMCSINFO  pVmcsInfo     = pVmxTransient->pVmcsInfo;
    2382     bool const      fIsNstGstVmcs = pVmxTransient->fIsNestedGuest;
    2383     PVMXAUTOMSR     pGuestMsrLoad = (PVMXAUTOMSR)pVmcsInfo->pvGuestMsrLoad;
    2384     uint32_t        cMsrs         = pVmcsInfo->cEntryMsrLoad;
    2385 
    2386 #ifndef DEBUG_bird
    2387     LogFlowFunc(("pVCpu=%p idMsr=%#RX32\n", pVCpu, idMsr));
    2388 #endif
    2389 
    2390     for (uint32_t i = 0; i < cMsrs; i++)
    2391     {
    2392         /* Find the MSR. */
    2393         if (pGuestMsrLoad[i].u32Msr == idMsr)
    2394         {
    2395             /*
    2396              * If it's the last MSR, we only need to reduce the MSR count.
    2397              * If it's -not- the last MSR, copy the last MSR in place of it and reduce the MSR count.
    2398              */
    2399             if (i < cMsrs - 1)
    2400             {
    2401                 /* Remove it from the VM-entry MSR-load area. */
    2402                 pGuestMsrLoad[i].u32Msr   = pGuestMsrLoad[cMsrs - 1].u32Msr;
    2403                 pGuestMsrLoad[i].u64Value = pGuestMsrLoad[cMsrs - 1].u64Value;
    2404 
    2405                 /* Remove it from the VM-exit MSR-store area if it's in a different page. */
    2406                 if (hmR0VmxIsSeparateExitMsrStoreAreaVmcs(pVmcsInfo))
    2407                 {
    2408                     PVMXAUTOMSR pGuestMsrStore = (PVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
    2409                     Assert(pGuestMsrStore[i].u32Msr == idMsr);
    2410                     pGuestMsrStore[i].u32Msr   = pGuestMsrStore[cMsrs - 1].u32Msr;
    2411                     pGuestMsrStore[i].u64Value = pGuestMsrStore[cMsrs - 1].u64Value;
    2412                 }
    2413 
    2414                 /* Remove it from the VM-exit MSR-load area. */
    2415                 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVmcsInfo->pvHostMsrLoad;
    2416                 Assert(pHostMsr[i].u32Msr == idMsr);
    2417                 pHostMsr[i].u32Msr   = pHostMsr[cMsrs - 1].u32Msr;
    2418                 pHostMsr[i].u64Value = pHostMsr[cMsrs - 1].u64Value;
    2419             }
    2420 
    2421             /* Reduce the count to reflect the removed MSR and bail. */
    2422             --cMsrs;
    2423             break;
    2424         }
    2425     }
    2426 
    2427     /* Update the VMCS if the count changed (meaning the MSR was found and removed). */
    2428     if (cMsrs != pVmcsInfo->cEntryMsrLoad)
    2429     {
    2430         int rc = hmR0VmxSetAutoLoadStoreMsrCount(pVCpu, pVmcsInfo, cMsrs);
    2431         AssertRCReturn(rc, rc);
    2432 
    2433         /* We're no longer swapping MSRs during the world-switch, intercept guest read/writes to them. */
    2434         if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
    2435             hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, fIsNstGstVmcs, idMsr, VMXMSRPM_EXIT_RD | VMXMSRPM_EXIT_WR);
    2436 
    2437         Log4Func(("Removed MSR %#RX32, cMsrs=%u\n", idMsr, cMsrs));
    2438         return VINF_SUCCESS;
    2439     }
    2440 
    2441     return VERR_NOT_FOUND;
    2442 }
    2443 
    2444 
    2445 /**
    2446  * Checks if the specified guest MSR is part of the VM-entry MSR-load area.
    2447  *
    2448  * @returns @c true if found, @c false otherwise.
    2449  * @param   pVmcsInfo   The VMCS info. object.
    2450  * @param   idMsr       The MSR to find.
    2451  */
    2452 static bool hmR0VmxIsAutoLoadGuestMsr(PCVMXVMCSINFO pVmcsInfo, uint32_t idMsr)
    2453 {
    2454     PCVMXAUTOMSR   pMsrs = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrLoad;
    2455     uint32_t const cMsrs = pVmcsInfo->cEntryMsrLoad;
    2456     Assert(pMsrs);
    2457     Assert(sizeof(*pMsrs) * cMsrs <= X86_PAGE_4K_SIZE);
    2458     for (uint32_t i = 0; i < cMsrs; i++)
    2459     {
    2460         if (pMsrs[i].u32Msr == idMsr)
    2461             return true;
    2462     }
    2463     return false;
    2464 }
    2465 
    2466 
    2467 /**
    2468  * Updates the value of all host MSRs in the VM-exit MSR-load area.
    2469  *
    2470  * @param   pVCpu       The cross context virtual CPU structure.
    2471  * @param   pVmcsInfo   The VMCS info. object.
    2472  *
    2473  * @remarks No-long-jump zone!!!
    2474  */
    2475 static void hmR0VmxUpdateAutoLoadHostMsrs(PCVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
    2476 {
    2477     RT_NOREF(pVCpu);
    2478     Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    2479 
    2480     PVMXAUTOMSR pHostMsrLoad = (PVMXAUTOMSR)pVmcsInfo->pvHostMsrLoad;
    2481     uint32_t const cMsrs     = pVmcsInfo->cExitMsrLoad;
    2482     Assert(pHostMsrLoad);
    2483     Assert(sizeof(*pHostMsrLoad) * cMsrs <= X86_PAGE_4K_SIZE);
    2484     LogFlowFunc(("pVCpu=%p cMsrs=%u\n", pVCpu, cMsrs));
    2485     for (uint32_t i = 0; i < cMsrs; i++)
    2486     {
    2487         /*
    2488          * Performance hack for the host EFER MSR. We use the cached value rather than re-read it.
    2489          * Strict builds will catch mismatches in hmR0VmxCheckAutoLoadStoreMsrs(). See @bugref{7368}.
    2490          */
    2491         if (pHostMsrLoad[i].u32Msr == MSR_K6_EFER)
    2492             pHostMsrLoad[i].u64Value = g_uHmVmxHostMsrEfer;
    2493         else
    2494             pHostMsrLoad[i].u64Value = ASMRdMsr(pHostMsrLoad[i].u32Msr);
    2495     }
    2496 }
    2497 
    2498 
    2499 /**
    2500  * Saves a set of host MSRs to allow read/write passthru access to the guest and
    2501  * perform lazy restoration of the host MSRs while leaving VT-x.
    2502  *
    2503  * @param   pVCpu   The cross context virtual CPU structure.
    2504  *
    2505  * @remarks No-long-jump zone!!!
    2506  */
    2507 static void hmR0VmxLazySaveHostMsrs(PVMCPUCC pVCpu)
    2508 {
    2509     Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    2510 
    2511     /*
    2512      * Note: If you're adding MSRs here, make sure to update the MSR-bitmap accesses in hmR0VmxSetupVmcsProcCtls().
    2513      */
    2514     if (!(pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_SAVED_HOST))
    2515     {
    2516         Assert(!(pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST));  /* Guest MSRs better not be loaded now. */
    2517         if (pVCpu->CTX_SUFF(pVM)->hmr0.s.fAllow64BitGuests)
    2518         {
    2519             pVCpu->hmr0.s.vmx.u64HostMsrLStar        = ASMRdMsr(MSR_K8_LSTAR);
    2520             pVCpu->hmr0.s.vmx.u64HostMsrStar         = ASMRdMsr(MSR_K6_STAR);
    2521             pVCpu->hmr0.s.vmx.u64HostMsrSfMask       = ASMRdMsr(MSR_K8_SF_MASK);
    2522             pVCpu->hmr0.s.vmx.u64HostMsrKernelGsBase = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
    2523         }
    2524         pVCpu->hmr0.s.vmx.fLazyMsrs |= VMX_LAZY_MSRS_SAVED_HOST;
    2525     }
    2526296}
    2527297
     
    2612382
    2613383/**
     384 * Checks if the specified guest MSR is part of the VM-entry MSR-load area.
     385 *
     386 * @returns @c true if found, @c false otherwise.
     387 * @param   pVmcsInfo   The VMCS info. object.
     388 * @param   idMsr       The MSR to find.
     389 */
     390static bool hmR0VmxIsAutoLoadGuestMsr(PCVMXVMCSINFO pVmcsInfo, uint32_t idMsr)
     391{
     392    PCVMXAUTOMSR   pMsrs = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrLoad;
     393    uint32_t const cMsrs = pVmcsInfo->cEntryMsrLoad;
     394    Assert(pMsrs);
     395    Assert(sizeof(*pMsrs) * cMsrs <= X86_PAGE_4K_SIZE);
     396    for (uint32_t i = 0; i < cMsrs; i++)
     397    {
     398        if (pMsrs[i].u32Msr == idMsr)
     399            return true;
     400    }
     401    return false;
     402}
     403
     404
     405/**
    2614406 * Performs lazy restoration of the set of host MSRs if they were previously
    2615407 * loaded with guest MSR values.
     
    2642434
    2643435/**
    2644  * Verifies that our cached values of the VMCS fields are all consistent with
    2645  * what's actually present in the VMCS.
     436 * Sets pfnStartVm to the best suited variant.
     437 *
     438 * This must be called whenever anything changes relative to the hmR0VmXStartVm
     439 * variant selection:
     440 *      - pVCpu->hm.s.fLoadSaveGuestXcr0
     441 *      - HM_WSF_IBPB_ENTRY in pVCpu->hmr0.s.fWorldSwitcher
     442 *      - HM_WSF_IBPB_EXIT  in pVCpu->hmr0.s.fWorldSwitcher
     443 *      - Perhaps: CPUMIsGuestFPUStateActive() (windows only)
     444 *      - Perhaps: CPUMCTX.fXStateMask (windows only)
     445 *
     446 * We currently ASSUME that neither HM_WSF_IBPB_ENTRY nor HM_WSF_IBPB_EXIT
     447 * cannot be changed at runtime.
     448 */
     449static void hmR0VmxUpdateStartVmFunction(PVMCPUCC pVCpu)
     450{
     451    static const struct CLANGWORKAROUND { PFNHMVMXSTARTVM pfn; } s_aHmR0VmxStartVmFunctions[] =
     452    {
     453        { hmR0VmxStartVm_SansXcr0_SansIbpbEntry_SansL1dEntry_SansMdsEntry_SansIbpbExit },
     454        { hmR0VmxStartVm_WithXcr0_SansIbpbEntry_SansL1dEntry_SansMdsEntry_SansIbpbExit },
     455        { hmR0VmxStartVm_SansXcr0_WithIbpbEntry_SansL1dEntry_SansMdsEntry_SansIbpbExit },
     456        { hmR0VmxStartVm_WithXcr0_WithIbpbEntry_SansL1dEntry_SansMdsEntry_SansIbpbExit },
     457        { hmR0VmxStartVm_SansXcr0_SansIbpbEntry_WithL1dEntry_SansMdsEntry_SansIbpbExit },
     458        { hmR0VmxStartVm_WithXcr0_SansIbpbEntry_WithL1dEntry_SansMdsEntry_SansIbpbExit },
     459        { hmR0VmxStartVm_SansXcr0_WithIbpbEntry_WithL1dEntry_SansMdsEntry_SansIbpbExit },
     460        { hmR0VmxStartVm_WithXcr0_WithIbpbEntry_WithL1dEntry_SansMdsEntry_SansIbpbExit },
     461        { hmR0VmxStartVm_SansXcr0_SansIbpbEntry_SansL1dEntry_WithMdsEntry_SansIbpbExit },
     462        { hmR0VmxStartVm_WithXcr0_SansIbpbEntry_SansL1dEntry_WithMdsEntry_SansIbpbExit },
     463        { hmR0VmxStartVm_SansXcr0_WithIbpbEntry_SansL1dEntry_WithMdsEntry_SansIbpbExit },
     464        { hmR0VmxStartVm_WithXcr0_WithIbpbEntry_SansL1dEntry_WithMdsEntry_SansIbpbExit },
     465        { hmR0VmxStartVm_SansXcr0_SansIbpbEntry_WithL1dEntry_WithMdsEntry_SansIbpbExit },
     466        { hmR0VmxStartVm_WithXcr0_SansIbpbEntry_WithL1dEntry_WithMdsEntry_SansIbpbExit },
     467        { hmR0VmxStartVm_SansXcr0_WithIbpbEntry_WithL1dEntry_WithMdsEntry_SansIbpbExit },
     468        { hmR0VmxStartVm_WithXcr0_WithIbpbEntry_WithL1dEntry_WithMdsEntry_SansIbpbExit },
     469        { hmR0VmxStartVm_SansXcr0_SansIbpbEntry_SansL1dEntry_SansMdsEntry_WithIbpbExit },
     470        { hmR0VmxStartVm_WithXcr0_SansIbpbEntry_SansL1dEntry_SansMdsEntry_WithIbpbExit },
     471        { hmR0VmxStartVm_SansXcr0_WithIbpbEntry_SansL1dEntry_SansMdsEntry_WithIbpbExit },
     472        { hmR0VmxStartVm_WithXcr0_WithIbpbEntry_SansL1dEntry_SansMdsEntry_WithIbpbExit },
     473        { hmR0VmxStartVm_SansXcr0_SansIbpbEntry_WithL1dEntry_SansMdsEntry_WithIbpbExit },
     474        { hmR0VmxStartVm_WithXcr0_SansIbpbEntry_WithL1dEntry_SansMdsEntry_WithIbpbExit },
     475        { hmR0VmxStartVm_SansXcr0_WithIbpbEntry_WithL1dEntry_SansMdsEntry_WithIbpbExit },
     476        { hmR0VmxStartVm_WithXcr0_WithIbpbEntry_WithL1dEntry_SansMdsEntry_WithIbpbExit },
     477        { hmR0VmxStartVm_SansXcr0_SansIbpbEntry_SansL1dEntry_WithMdsEntry_WithIbpbExit },
     478        { hmR0VmxStartVm_WithXcr0_SansIbpbEntry_SansL1dEntry_WithMdsEntry_WithIbpbExit },
     479        { hmR0VmxStartVm_SansXcr0_WithIbpbEntry_SansL1dEntry_WithMdsEntry_WithIbpbExit },
     480        { hmR0VmxStartVm_WithXcr0_WithIbpbEntry_SansL1dEntry_WithMdsEntry_WithIbpbExit },
     481        { hmR0VmxStartVm_SansXcr0_SansIbpbEntry_WithL1dEntry_WithMdsEntry_WithIbpbExit },
     482        { hmR0VmxStartVm_WithXcr0_SansIbpbEntry_WithL1dEntry_WithMdsEntry_WithIbpbExit },
     483        { hmR0VmxStartVm_SansXcr0_WithIbpbEntry_WithL1dEntry_WithMdsEntry_WithIbpbExit },
     484        { hmR0VmxStartVm_WithXcr0_WithIbpbEntry_WithL1dEntry_WithMdsEntry_WithIbpbExit },
     485    };
     486    uintptr_t const idx = (pVCpu->hmr0.s.fLoadSaveGuestXcr0                 ?  1 : 0)
     487                        | (pVCpu->hmr0.s.fWorldSwitcher & HM_WSF_IBPB_ENTRY ?  2 : 0)
     488                        | (pVCpu->hmr0.s.fWorldSwitcher & HM_WSF_L1D_ENTRY  ?  4 : 0)
     489                        | (pVCpu->hmr0.s.fWorldSwitcher & HM_WSF_MDS_ENTRY  ?  8 : 0)
     490                        | (pVCpu->hmr0.s.fWorldSwitcher & HM_WSF_IBPB_EXIT  ? 16 : 0);
     491    PFNHMVMXSTARTVM const pfnStartVm = s_aHmR0VmxStartVmFunctions[idx].pfn;
     492    if (pVCpu->hmr0.s.vmx.pfnStartVm != pfnStartVm)
     493        pVCpu->hmr0.s.vmx.pfnStartVm = pfnStartVm;
     494}
     495
     496
     497/**
     498 * Pushes a 2-byte value onto the real-mode (in virtual-8086 mode) guest's
     499 * stack.
     500 *
     501 * @returns Strict VBox status code (i.e. informational status codes too).
     502 * @retval  VINF_EM_RESET if pushing a value to the stack caused a triple-fault.
     503 * @param   pVCpu   The cross context virtual CPU structure.
     504 * @param   uValue  The value to push to the guest stack.
     505 */
     506static VBOXSTRICTRC hmR0VmxRealModeGuestStackPush(PVMCPUCC pVCpu, uint16_t uValue)
     507{
     508    /*
     509     * The stack limit is 0xffff in real-on-virtual 8086 mode. Real-mode with weird stack limits cannot be run in
     510     * virtual 8086 mode in VT-x. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
     511     * See Intel Instruction reference for PUSH and Intel spec. 22.33.1 "Segment Wraparound".
     512     */
     513    PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
     514    if (pCtx->sp == 1)
     515        return VINF_EM_RESET;
     516    pCtx->sp -= sizeof(uint16_t);       /* May wrap around which is expected behaviour. */
     517    int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), pCtx->ss.u64Base + pCtx->sp, &uValue, sizeof(uint16_t));
     518    AssertRC(rc);
     519    return rc;
     520}
     521
     522
     523/*
     524 * Instantiate the code we share with the NEM darwin backend.
     525 */
     526#define VCPU_2_VMXSTATE(a_pVCpu)            (a_pVCpu)->hm.s
     527#define VCPU_2_VMXSTATS(a_pVCpu)            (a_pVCpu)->hm.s
     528
     529#define VM_IS_VMX_UNRESTRICTED_GUEST(a_pVM) (a_pVM)->hmr0.s.vmx.fUnrestrictedGuest
     530#define VM_IS_VMX_NESTED_PAGING(a_pVM)      (a_pVM)->hmr0.s.fNestedPaging
     531#define VM_IS_VMX_PREEMPT_TIMER_USED(a_pVM) (a_pVM)->hmr0.s.vmx.fUsePreemptTimer
     532#define VM_IS_VMX_LBR(a_pVM)                (a_pVM)->hmr0.s.vmx.fLbr
     533
     534#define VMX_VMCS_WRITE_16(a_pVCpu, a_FieldEnc, a_Val) VMXWriteVmcs16((a_FieldEnc), (a_Val))
     535#define VMX_VMCS_WRITE_32(a_pVCpu, a_FieldEnc, a_Val) VMXWriteVmcs32((a_FieldEnc), (a_Val))
     536#define VMX_VMCS_WRITE_64(a_pVCpu, a_FieldEnc, a_Val) VMXWriteVmcs64((a_FieldEnc), (a_Val))
     537#define VMX_VMCS_WRITE_NW(a_pVCpu, a_FieldEnc, a_Val) VMXWriteVmcsNw((a_FieldEnc), (a_Val))
     538
     539#define VMX_VMCS_READ_16(a_pVCpu, a_FieldEnc, a_pVal) VMXReadVmcs16((a_FieldEnc), (a_pVal))
     540#define VMX_VMCS_READ_32(a_pVCpu, a_FieldEnc, a_pVal) VMXReadVmcs32((a_FieldEnc), (a_pVal))
     541#define VMX_VMCS_READ_64(a_pVCpu, a_FieldEnc, a_pVal) VMXReadVmcs64((a_FieldEnc), (a_pVal))
     542#define VMX_VMCS_READ_NW(a_pVCpu, a_FieldEnc, a_pVal) VMXReadVmcsNw((a_FieldEnc), (a_pVal))
     543
     544#include "../VMMAll/VMXAllTemplate.cpp.h"
     545
     546#undef VMX_VMCS_WRITE_16
     547#undef VMX_VMCS_WRITE_32
     548#undef VMX_VMCS_WRITE_64
     549#undef VMX_VMCS_WRITE_NW
     550
     551#undef VMX_VMCS_READ_16
     552#undef VMX_VMCS_READ_32
     553#undef VMX_VMCS_READ_64
     554#undef VMX_VMCS_READ_NW
     555
     556#undef VM_IS_VMX_PREEMPT_TIMER_USED
     557#undef VM_IS_VMX_NESTED_PAGING
     558#undef VM_IS_VMX_UNRESTRICTED_GUEST
     559#undef VCPU_2_VMXSTATS
     560#undef VCPU_2_VMXSTATE
     561
     562
     563/**
     564 * Updates the VM's last error record.
     565 *
     566 * If there was a VMX instruction error, reads the error data from the VMCS and
     567 * updates VCPU's last error record as well.
     568 *
     569 * @param   pVCpu   The cross context virtual CPU structure of the calling EMT.
     570 *                  Can be NULL if @a rc is not VERR_VMX_UNABLE_TO_START_VM or
     571 *                  VERR_VMX_INVALID_VMCS_FIELD.
     572 * @param   rc      The error code.
     573 */
     574static void hmR0VmxUpdateErrorRecord(PVMCPUCC pVCpu, int rc)
     575{
     576    if (   rc == VERR_VMX_INVALID_VMCS_FIELD
     577        || rc == VERR_VMX_UNABLE_TO_START_VM)
     578    {
     579        AssertPtrReturnVoid(pVCpu);
     580        VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &pVCpu->hm.s.vmx.LastError.u32InstrError);
     581    }
     582    pVCpu->CTX_SUFF(pVM)->hm.s.ForR3.rcInit = rc;
     583}
     584
     585
     586/**
     587 * Enters VMX root mode operation on the current CPU.
    2646588 *
    2647589 * @returns VBox status code.
    2648  * @retval  VINF_SUCCESS if all our caches match their respective VMCS fields.
    2649  * @retval  VERR_VMX_VMCS_FIELD_CACHE_INVALID if a cache field doesn't match the
    2650  *                                            VMCS content. HMCPU error-field is
    2651  *                                            updated, see VMX_VCI_XXX.
     590 * @param   pHostCpu        The HM physical-CPU structure.
     591 * @param   pVM             The cross context VM structure. Can be
     592 *                          NULL, after a resume.
     593 * @param   HCPhysCpuPage   Physical address of the VMXON region.
     594 * @param   pvCpuPage       Pointer to the VMXON region.
     595 */
     596static int hmR0VmxEnterRootMode(PHMPHYSCPU pHostCpu, PVMCC pVM, RTHCPHYS HCPhysCpuPage, void *pvCpuPage)
     597{
     598    Assert(pHostCpu);
     599    Assert(HCPhysCpuPage && HCPhysCpuPage != NIL_RTHCPHYS);
     600    Assert(RT_ALIGN_T(HCPhysCpuPage, _4K, RTHCPHYS) == HCPhysCpuPage);
     601    Assert(pvCpuPage);
     602    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     603
     604    if (pVM)
     605    {
     606        /* Write the VMCS revision identifier to the VMXON region. */
     607        *(uint32_t *)pvCpuPage = RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_ID);
     608    }
     609
     610    /* Paranoid: Disable interrupts as, in theory, interrupt handlers might mess with CR4. */
     611    RTCCUINTREG const fEFlags = ASMIntDisableFlags();
     612
     613    /* Enable the VMX bit in CR4 if necessary. */
     614    RTCCUINTREG const uOldCr4 = SUPR0ChangeCR4(X86_CR4_VMXE, RTCCUINTREG_MAX);
     615
     616    /* Record whether VMXE was already prior to us enabling it above. */
     617    pHostCpu->fVmxeAlreadyEnabled = RT_BOOL(uOldCr4 & X86_CR4_VMXE);
     618
     619    /* Enter VMX root mode. */
     620    int rc = VMXEnable(HCPhysCpuPage);
     621    if (RT_FAILURE(rc))
     622    {
     623        /* Restore CR4.VMXE if it was not set prior to our attempt to set it above. */
     624        if (!pHostCpu->fVmxeAlreadyEnabled)
     625            SUPR0ChangeCR4(0 /* fOrMask */, ~(uint64_t)X86_CR4_VMXE);
     626
     627        if (pVM)
     628            pVM->hm.s.ForR3.vmx.HCPhysVmxEnableError = HCPhysCpuPage;
     629    }
     630
     631    /* Restore interrupts. */
     632    ASMSetFlags(fEFlags);
     633    return rc;
     634}
     635
     636
     637/**
     638 * Exits VMX root mode operation on the current CPU.
     639 *
     640 * @returns VBox status code.
     641 * @param   pHostCpu        The HM physical-CPU structure.
     642 */
     643static int hmR0VmxLeaveRootMode(PHMPHYSCPU pHostCpu)
     644{
     645    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     646
     647    /* Paranoid: Disable interrupts as, in theory, interrupts handlers might mess with CR4. */
     648    RTCCUINTREG const fEFlags = ASMIntDisableFlags();
     649
     650    /* If we're for some reason not in VMX root mode, then don't leave it. */
     651    RTCCUINTREG const uHostCr4 = ASMGetCR4();
     652
     653    int rc;
     654    if (uHostCr4 & X86_CR4_VMXE)
     655    {
     656        /* Exit VMX root mode and clear the VMX bit in CR4. */
     657        VMXDisable();
     658
     659        /* Clear CR4.VMXE only if it was clear prior to use setting it. */
     660        if (!pHostCpu->fVmxeAlreadyEnabled)
     661            SUPR0ChangeCR4(0 /* fOrMask */, ~(uint64_t)X86_CR4_VMXE);
     662
     663        rc = VINF_SUCCESS;
     664    }
     665    else
     666        rc = VERR_VMX_NOT_IN_VMX_ROOT_MODE;
     667
     668    /* Restore interrupts. */
     669    ASMSetFlags(fEFlags);
     670    return rc;
     671}
     672
     673
     674/**
     675 * Allocates pages specified as specified by an array of VMX page allocation info
     676 * objects.
     677 *
     678 * The pages contents are zero'd after allocation.
     679 *
     680 * @returns VBox status code.
     681 * @param   phMemObj        Where to return the handle to the allocation.
     682 * @param   paAllocInfo     The pointer to the first element of the VMX
     683 *                          page-allocation info object array.
     684 * @param   cEntries        The number of elements in the @a paAllocInfo array.
     685 */
     686static int hmR0VmxPagesAllocZ(PRTR0MEMOBJ phMemObj, PVMXPAGEALLOCINFO paAllocInfo, uint32_t cEntries)
     687{
     688    *phMemObj = NIL_RTR0MEMOBJ;
     689
     690    /* Figure out how many pages to allocate. */
     691    uint32_t cPages = 0;
     692    for (uint32_t iPage = 0; iPage < cEntries; iPage++)
     693        cPages += !!paAllocInfo[iPage].fValid;
     694
     695    /* Allocate the pages. */
     696    if (cPages)
     697    {
     698        size_t const cbPages = cPages << PAGE_SHIFT;
     699        int rc = RTR0MemObjAllocPage(phMemObj, cbPages, false /* fExecutable */);
     700        if (RT_FAILURE(rc))
     701            return rc;
     702
     703        /* Zero the contents and assign each page to the corresponding VMX page-allocation entry. */
     704        void *pvFirstPage = RTR0MemObjAddress(*phMemObj);
     705        RT_BZERO(pvFirstPage, cbPages);
     706
     707        uint32_t iPage = 0;
     708        for (uint32_t i = 0; i < cEntries; i++)
     709            if (paAllocInfo[i].fValid)
     710            {
     711                RTHCPHYS const HCPhysPage = RTR0MemObjGetPagePhysAddr(*phMemObj, iPage);
     712                void          *pvPage     = (void *)((uintptr_t)pvFirstPage + (iPage << X86_PAGE_4K_SHIFT));
     713                Assert(HCPhysPage && HCPhysPage != NIL_RTHCPHYS);
     714                AssertPtr(pvPage);
     715
     716                Assert(paAllocInfo[iPage].pHCPhys);
     717                Assert(paAllocInfo[iPage].ppVirt);
     718                *paAllocInfo[iPage].pHCPhys = HCPhysPage;
     719                *paAllocInfo[iPage].ppVirt  = pvPage;
     720
     721                /* Move to next page. */
     722                ++iPage;
     723            }
     724
     725        /* Make sure all valid (requested) pages have been assigned. */
     726        Assert(iPage == cPages);
     727    }
     728    return VINF_SUCCESS;
     729}
     730
     731
     732/**
     733 * Frees pages allocated using hmR0VmxPagesAllocZ.
     734 *
     735 * @param   phMemObj    Pointer to the memory object handle.  Will be set to
     736 *                      NIL.
     737 */
     738DECL_FORCE_INLINE(void) hmR0VmxPagesFree(PRTR0MEMOBJ phMemObj)
     739{
     740    /* We can cleanup wholesale since it's all one allocation. */
     741    if (*phMemObj != NIL_RTR0MEMOBJ)
     742    {
     743        RTR0MemObjFree(*phMemObj, true /* fFreeMappings */);
     744        *phMemObj = NIL_RTR0MEMOBJ;
     745    }
     746}
     747
     748
     749/**
     750 * Initializes a VMCS info. object.
     751 *
     752 * @param   pVmcsInfo           The VMCS info. object.
     753 * @param   pVmcsInfoShared     The VMCS info. object shared with ring-3.
     754 */
     755static void hmR0VmxVmcsInfoInit(PVMXVMCSINFO pVmcsInfo, PVMXVMCSINFOSHARED pVmcsInfoShared)
     756{
     757    RT_ZERO(*pVmcsInfo);
     758    RT_ZERO(*pVmcsInfoShared);
     759
     760    pVmcsInfo->pShared             = pVmcsInfoShared;
     761    Assert(pVmcsInfo->hMemObj == NIL_RTR0MEMOBJ);
     762    pVmcsInfo->HCPhysVmcs          = NIL_RTHCPHYS;
     763    pVmcsInfo->HCPhysShadowVmcs    = NIL_RTHCPHYS;
     764    pVmcsInfo->HCPhysMsrBitmap     = NIL_RTHCPHYS;
     765    pVmcsInfo->HCPhysGuestMsrLoad  = NIL_RTHCPHYS;
     766    pVmcsInfo->HCPhysGuestMsrStore = NIL_RTHCPHYS;
     767    pVmcsInfo->HCPhysHostMsrLoad   = NIL_RTHCPHYS;
     768    pVmcsInfo->HCPhysVirtApic      = NIL_RTHCPHYS;
     769    pVmcsInfo->HCPhysEPTP          = NIL_RTHCPHYS;
     770    pVmcsInfo->u64VmcsLinkPtr      = NIL_RTHCPHYS;
     771    pVmcsInfo->idHostCpuState      = NIL_RTCPUID;
     772    pVmcsInfo->idHostCpuExec       = NIL_RTCPUID;
     773}
     774
     775
     776/**
     777 * Frees the VT-x structures for a VMCS info. object.
     778 *
     779 * @param   pVmcsInfo           The VMCS info. object.
     780 * @param   pVmcsInfoShared     The VMCS info. object shared with ring-3.
     781 */
     782static void hmR0VmxVmcsInfoFree(PVMXVMCSINFO pVmcsInfo, PVMXVMCSINFOSHARED pVmcsInfoShared)
     783{
     784    hmR0VmxPagesFree(&pVmcsInfo->hMemObj);
     785    hmR0VmxVmcsInfoInit(pVmcsInfo, pVmcsInfoShared);
     786}
     787
     788
     789/**
     790 * Allocates the VT-x structures for a VMCS info. object.
     791 *
     792 * @returns VBox status code.
    2652793 * @param   pVCpu           The cross context virtual CPU structure.
    2653794 * @param   pVmcsInfo       The VMCS info. object.
    2654795 * @param   fIsNstGstVmcs   Whether this is a nested-guest VMCS.
    2655  */
    2656 static int hmR0VmxCheckCachedVmcsCtls(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs)
    2657 {
    2658     const char * const pcszVmcs = fIsNstGstVmcs ? "Nested-guest VMCS" : "VMCS";
    2659 
    2660     uint32_t u32Val;
    2661     int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY, &u32Val);
    2662     AssertRC(rc);
    2663     AssertMsgReturnStmt(pVmcsInfo->u32EntryCtls == u32Val,
    2664                         ("%s entry controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32EntryCtls, u32Val),
    2665                         pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_ENTRY,
    2666                         VERR_VMX_VMCS_FIELD_CACHE_INVALID);
    2667 
    2668     rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT, &u32Val);
    2669     AssertRC(rc);
    2670     AssertMsgReturnStmt(pVmcsInfo->u32ExitCtls == u32Val,
    2671                         ("%s exit controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ExitCtls, u32Val),
    2672                         pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_EXIT,
    2673                         VERR_VMX_VMCS_FIELD_CACHE_INVALID);
    2674 
    2675     rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, &u32Val);
    2676     AssertRC(rc);
    2677     AssertMsgReturnStmt(pVmcsInfo->u32PinCtls == u32Val,
    2678                         ("%s pin controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32PinCtls, u32Val),
    2679                         pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_PIN_EXEC,
    2680                         VERR_VMX_VMCS_FIELD_CACHE_INVALID);
    2681 
    2682     rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, &u32Val);
    2683     AssertRC(rc);
    2684     AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls == u32Val,
    2685                         ("%s proc controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls, u32Val),
    2686                         pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_PROC_EXEC,
    2687                         VERR_VMX_VMCS_FIELD_CACHE_INVALID);
    2688 
    2689     if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
    2690     {
    2691         rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val);
    2692         AssertRC(rc);
    2693         AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls2 == u32Val,
    2694                             ("%s proc2 controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls2, u32Val),
    2695                             pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_PROC_EXEC2,
    2696                             VERR_VMX_VMCS_FIELD_CACHE_INVALID);
    2697     }
    2698 
    2699     uint64_t u64Val;
    2700     if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TERTIARY_CTLS)
    2701     {
    2702         rc = VMXReadVmcs64(VMX_VMCS64_CTRL_PROC_EXEC3_FULL, &u64Val);
    2703         AssertRC(rc);
    2704         AssertMsgReturnStmt(pVmcsInfo->u64ProcCtls3 == u64Val,
    2705                             ("%s proc3 controls mismatch: Cache=%#RX32 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64ProcCtls3, u64Val),
    2706                             pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_PROC_EXEC3,
    2707                             VERR_VMX_VMCS_FIELD_CACHE_INVALID);
    2708     }
    2709 
    2710     rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val);
    2711     AssertRC(rc);
    2712     AssertMsgReturnStmt(pVmcsInfo->u32XcptBitmap == u32Val,
    2713                         ("%s exception bitmap mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32XcptBitmap, u32Val),
    2714                         pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_XCPT_BITMAP,
    2715                         VERR_VMX_VMCS_FIELD_CACHE_INVALID);
    2716 
    2717     rc = VMXReadVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, &u64Val);
    2718     AssertRC(rc);
    2719     AssertMsgReturnStmt(pVmcsInfo->u64TscOffset == u64Val,
    2720                         ("%s TSC offset mismatch: Cache=%#RX64 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64TscOffset, u64Val),
    2721                         pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_TSC_OFFSET,
    2722                         VERR_VMX_VMCS_FIELD_CACHE_INVALID);
    2723 
    2724     NOREF(pcszVmcs);
     796 *
     797 * @remarks The caller is expected to take care of any and all allocation failures.
     798 *          This function will not perform any cleanup for failures half-way
     799 *          through.
     800 */
     801static int hmR0VmxAllocVmcsInfo(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs)
     802{
     803    PVMCC pVM = pVCpu->CTX_SUFF(pVM);
     804
     805    bool const fMsrBitmaps = RT_BOOL(g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_MSR_BITMAPS);
     806    bool const fShadowVmcs = !fIsNstGstVmcs ? pVM->hmr0.s.vmx.fUseVmcsShadowing : pVM->cpum.ro.GuestFeatures.fVmxVmcsShadowing;
     807    Assert(!pVM->cpum.ro.GuestFeatures.fVmxVmcsShadowing);  /* VMCS shadowing is not yet exposed to the guest. */
     808    VMXPAGEALLOCINFO aAllocInfo[] =
     809    {
     810        { true,        0 /* Unused */, &pVmcsInfo->HCPhysVmcs,         &pVmcsInfo->pvVmcs         },
     811        { true,        0 /* Unused */, &pVmcsInfo->HCPhysGuestMsrLoad, &pVmcsInfo->pvGuestMsrLoad },
     812        { true,        0 /* Unused */, &pVmcsInfo->HCPhysHostMsrLoad,  &pVmcsInfo->pvHostMsrLoad  },
     813        { fMsrBitmaps, 0 /* Unused */, &pVmcsInfo->HCPhysMsrBitmap,    &pVmcsInfo->pvMsrBitmap    },
     814        { fShadowVmcs, 0 /* Unused */, &pVmcsInfo->HCPhysShadowVmcs,   &pVmcsInfo->pvShadowVmcs   },
     815    };
     816
     817    int rc = hmR0VmxPagesAllocZ(&pVmcsInfo->hMemObj, &aAllocInfo[0], RT_ELEMENTS(aAllocInfo));
     818    if (RT_FAILURE(rc))
     819        return rc;
     820
     821    /*
     822     * We use the same page for VM-entry MSR-load and VM-exit MSR store areas.
     823     * Because they contain a symmetric list of guest MSRs to load on VM-entry and store on VM-exit.
     824     */
     825    AssertCompile(RT_ELEMENTS(aAllocInfo) > 0);
     826    Assert(pVmcsInfo->HCPhysGuestMsrLoad != NIL_RTHCPHYS);
     827    pVmcsInfo->pvGuestMsrStore     = pVmcsInfo->pvGuestMsrLoad;
     828    pVmcsInfo->HCPhysGuestMsrStore = pVmcsInfo->HCPhysGuestMsrLoad;
     829
     830    /*
     831     * Get the virtual-APIC page rather than allocating them again.
     832     */
     833    if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_TPR_SHADOW)
     834    {
     835        if (!fIsNstGstVmcs)
     836        {
     837            if (PDMHasApic(pVM))
     838            {
     839                rc = APICGetApicPageForCpu(pVCpu, &pVmcsInfo->HCPhysVirtApic, (PRTR0PTR)&pVmcsInfo->pbVirtApic, NULL /*pR3Ptr*/);
     840                if (RT_FAILURE(rc))
     841                    return rc;
     842                Assert(pVmcsInfo->pbVirtApic);
     843                Assert(pVmcsInfo->HCPhysVirtApic && pVmcsInfo->HCPhysVirtApic != NIL_RTHCPHYS);
     844            }
     845        }
     846        else
     847        {
     848            pVmcsInfo->pbVirtApic     = &pVCpu->cpum.GstCtx.hwvirt.vmx.abVirtApicPage[0];
     849            pVmcsInfo->HCPhysVirtApic = GVMMR0ConvertGVMPtr2HCPhys(pVM, pVmcsInfo->pbVirtApic);
     850            Assert(pVmcsInfo->HCPhysVirtApic && pVmcsInfo->HCPhysVirtApic != NIL_RTHCPHYS);
     851        }
     852    }
     853
    2725854    return VINF_SUCCESS;
    2726855}
     856
     857
     858/**
     859 * Free all VT-x structures for the VM.
     860 *
     861 * @returns IPRT status code.
     862 * @param   pVM     The cross context VM structure.
     863 */
     864static void hmR0VmxStructsFree(PVMCC pVM)
     865{
     866    hmR0VmxPagesFree(&pVM->hmr0.s.vmx.hMemObj);
     867#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     868    if (pVM->hmr0.s.vmx.fUseVmcsShadowing)
     869    {
     870        RTMemFree(pVM->hmr0.s.vmx.paShadowVmcsFields);
     871        pVM->hmr0.s.vmx.paShadowVmcsFields = NULL;
     872        RTMemFree(pVM->hmr0.s.vmx.paShadowVmcsRoFields);
     873        pVM->hmr0.s.vmx.paShadowVmcsRoFields = NULL;
     874    }
     875#endif
     876
     877    for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
     878    {
     879        PVMCPUCC pVCpu = VMCC_GET_CPU(pVM, idCpu);
     880        hmR0VmxVmcsInfoFree(&pVCpu->hmr0.s.vmx.VmcsInfo, &pVCpu->hm.s.vmx.VmcsInfo);
     881#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     882        if (pVM->cpum.ro.GuestFeatures.fVmx)
     883            hmR0VmxVmcsInfoFree(&pVCpu->hmr0.s.vmx.VmcsInfoNstGst, &pVCpu->hm.s.vmx.VmcsInfoNstGst);
     884#endif
     885    }
     886}
     887
     888
     889/**
     890 * Allocate all VT-x structures for the VM.
     891 *
     892 * @returns IPRT status code.
     893 * @param   pVM     The cross context VM structure.
     894 *
     895 * @remarks This functions will cleanup on memory allocation failures.
     896 */
     897static int hmR0VmxStructsAlloc(PVMCC pVM)
     898{
     899    /*
     900     * Sanity check the VMCS size reported by the CPU as we assume 4KB allocations.
     901     * The VMCS size cannot be more than 4096 bytes.
     902     *
     903     * See Intel spec. Appendix A.1 "Basic VMX Information".
     904     */
     905    uint32_t const cbVmcs = RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_SIZE);
     906    if (cbVmcs <= X86_PAGE_4K_SIZE)
     907    { /* likely */ }
     908    else
     909    {
     910        VMCC_GET_CPU_0(pVM)->hm.s.u32HMError = VMX_UFC_INVALID_VMCS_SIZE;
     911        return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
     912    }
     913
     914    /*
     915     * Allocate per-VM VT-x structures.
     916     */
     917    bool const fVirtApicAccess   = RT_BOOL(g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS);
     918    bool const fUseVmcsShadowing = pVM->hmr0.s.vmx.fUseVmcsShadowing;
     919    VMXPAGEALLOCINFO aAllocInfo[] =
     920    {
     921        { fVirtApicAccess,   0 /* Unused */, &pVM->hmr0.s.vmx.HCPhysApicAccess,    (PRTR0PTR)&pVM->hmr0.s.vmx.pbApicAccess },
     922        { fUseVmcsShadowing, 0 /* Unused */, &pVM->hmr0.s.vmx.HCPhysVmreadBitmap,  &pVM->hmr0.s.vmx.pvVmreadBitmap         },
     923        { fUseVmcsShadowing, 0 /* Unused */, &pVM->hmr0.s.vmx.HCPhysVmwriteBitmap, &pVM->hmr0.s.vmx.pvVmwriteBitmap        },
     924#ifdef VBOX_WITH_CRASHDUMP_MAGIC
     925        { true,              0 /* Unused */, &pVM->hmr0.s.vmx.HCPhysScratch,       (PRTR0PTR)&pVM->hmr0.s.vmx.pbScratch    },
     926#endif
     927    };
     928
     929    int rc = hmR0VmxPagesAllocZ(&pVM->hmr0.s.vmx.hMemObj, &aAllocInfo[0], RT_ELEMENTS(aAllocInfo));
     930    if (RT_SUCCESS(rc))
     931    {
     932#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     933        /* Allocate the shadow VMCS-fields array. */
     934        if (fUseVmcsShadowing)
     935        {
     936            Assert(!pVM->hmr0.s.vmx.cShadowVmcsFields);
     937            Assert(!pVM->hmr0.s.vmx.cShadowVmcsRoFields);
     938            pVM->hmr0.s.vmx.paShadowVmcsFields   = (uint32_t *)RTMemAllocZ(sizeof(g_aVmcsFields));
     939            pVM->hmr0.s.vmx.paShadowVmcsRoFields = (uint32_t *)RTMemAllocZ(sizeof(g_aVmcsFields));
     940            if (!pVM->hmr0.s.vmx.paShadowVmcsFields || !pVM->hmr0.s.vmx.paShadowVmcsRoFields)
     941                rc = VERR_NO_MEMORY;
     942        }
     943#endif
     944
     945        /*
     946         * Allocate per-VCPU VT-x structures.
     947         */
     948        for (VMCPUID idCpu = 0; idCpu < pVM->cCpus && RT_SUCCESS(rc); idCpu++)
     949        {
     950            /* Allocate the guest VMCS structures. */
     951            PVMCPUCC pVCpu = VMCC_GET_CPU(pVM, idCpu);
     952            rc = hmR0VmxAllocVmcsInfo(pVCpu, &pVCpu->hmr0.s.vmx.VmcsInfo, false /* fIsNstGstVmcs */);
     953
     954#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     955            /* Allocate the nested-guest VMCS structures, when the VMX feature is exposed to the guest. */
     956            if (pVM->cpum.ro.GuestFeatures.fVmx && RT_SUCCESS(rc))
     957                rc = hmR0VmxAllocVmcsInfo(pVCpu, &pVCpu->hmr0.s.vmx.VmcsInfoNstGst, true /* fIsNstGstVmcs */);
     958#endif
     959        }
     960        if (RT_SUCCESS(rc))
     961            return VINF_SUCCESS;
     962    }
     963    hmR0VmxStructsFree(pVM);
     964    return rc;
     965}
     966
     967
     968/**
     969 * Pre-initializes non-zero fields in VMX structures that will be allocated.
     970 *
     971 * @param   pVM     The cross context VM structure.
     972 */
     973static void hmR0VmxStructsInit(PVMCC pVM)
     974{
     975    /* Paranoia. */
     976    Assert(pVM->hmr0.s.vmx.pbApicAccess == NULL);
     977#ifdef VBOX_WITH_CRASHDUMP_MAGIC
     978    Assert(pVM->hmr0.s.vmx.pbScratch == NULL);
     979#endif
     980
     981    /*
     982     * Initialize members up-front so we can cleanup en masse on allocation failures.
     983     */
     984#ifdef VBOX_WITH_CRASHDUMP_MAGIC
     985    pVM->hmr0.s.vmx.HCPhysScratch       = NIL_RTHCPHYS;
     986#endif
     987    pVM->hmr0.s.vmx.HCPhysApicAccess    = NIL_RTHCPHYS;
     988    pVM->hmr0.s.vmx.HCPhysVmreadBitmap  = NIL_RTHCPHYS;
     989    pVM->hmr0.s.vmx.HCPhysVmwriteBitmap = NIL_RTHCPHYS;
     990    for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
     991    {
     992        PVMCPUCC pVCpu = VMCC_GET_CPU(pVM, idCpu);
     993        hmR0VmxVmcsInfoInit(&pVCpu->hmr0.s.vmx.VmcsInfo,       &pVCpu->hm.s.vmx.VmcsInfo);
     994        hmR0VmxVmcsInfoInit(&pVCpu->hmr0.s.vmx.VmcsInfoNstGst, &pVCpu->hm.s.vmx.VmcsInfoNstGst);
     995    }
     996}
     997
     998#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     999/**
     1000 * Returns whether an MSR at the given MSR-bitmap offset is intercepted or not.
     1001 *
     1002 * @returns @c true if the MSR is intercepted, @c false otherwise.
     1003 * @param   pbMsrBitmap     The MSR bitmap.
     1004 * @param   offMsr          The MSR byte offset.
     1005 * @param   iBit            The bit offset from the byte offset.
     1006 */
     1007DECLINLINE(bool) hmR0VmxIsMsrBitSet(uint8_t const *pbMsrBitmap, uint16_t offMsr, int32_t iBit)
     1008{
     1009    Assert(offMsr + (iBit >> 3) <= X86_PAGE_4K_SIZE);
     1010    return ASMBitTest(pbMsrBitmap + offMsr, iBit);
     1011}
     1012#endif
     1013
     1014/**
     1015 * Sets the permission bits for the specified MSR in the given MSR bitmap.
     1016 *
     1017 * If the passed VMCS is a nested-guest VMCS, this function ensures that the
     1018 * read/write intercept is cleared from the MSR bitmap used for hardware-assisted
     1019 * VMX execution of the nested-guest, only if nested-guest is also not intercepting
     1020 * the read/write access of this MSR.
     1021 *
     1022 * @param   pVCpu           The cross context virtual CPU structure.
     1023 * @param   pVmcsInfo       The VMCS info. object.
     1024 * @param   fIsNstGstVmcs   Whether this is a nested-guest VMCS.
     1025 * @param   idMsr           The MSR value.
     1026 * @param   fMsrpm          The MSR permissions (see VMXMSRPM_XXX). This must
     1027 *                          include both a read -and- a write permission!
     1028 *
     1029 * @sa      CPUMGetVmxMsrPermission.
     1030 * @remarks Can be called with interrupts disabled.
     1031 */
     1032static void hmR0VmxSetMsrPermission(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs, uint32_t idMsr, uint32_t fMsrpm)
     1033{
     1034    uint8_t *pbMsrBitmap = (uint8_t *)pVmcsInfo->pvMsrBitmap;
     1035    Assert(pbMsrBitmap);
     1036    Assert(VMXMSRPM_IS_FLAG_VALID(fMsrpm));
     1037
     1038    /*
     1039     * MSR-bitmap Layout:
     1040     *   Byte index            MSR range            Interpreted as
     1041     * 0x000 - 0x3ff    0x00000000 - 0x00001fff    Low MSR read bits.
     1042     * 0x400 - 0x7ff    0xc0000000 - 0xc0001fff    High MSR read bits.
     1043     * 0x800 - 0xbff    0x00000000 - 0x00001fff    Low MSR write bits.
     1044     * 0xc00 - 0xfff    0xc0000000 - 0xc0001fff    High MSR write bits.
     1045     *
     1046     * A bit corresponding to an MSR within the above range causes a VM-exit
     1047     * if the bit is 1 on executions of RDMSR/WRMSR.  If an MSR falls out of
     1048     * the MSR range, it always cause a VM-exit.
     1049     *
     1050     * See Intel spec. 24.6.9 "MSR-Bitmap Address".
     1051     */
     1052    uint16_t const offBitmapRead  = 0;
     1053    uint16_t const offBitmapWrite = 0x800;
     1054    uint16_t       offMsr;
     1055    int32_t        iBit;
     1056    if (idMsr <= UINT32_C(0x00001fff))
     1057    {
     1058        offMsr = 0;
     1059        iBit   = idMsr;
     1060    }
     1061    else if (idMsr - UINT32_C(0xc0000000) <= UINT32_C(0x00001fff))
     1062    {
     1063        offMsr = 0x400;
     1064        iBit   = idMsr - UINT32_C(0xc0000000);
     1065    }
     1066    else
     1067        AssertMsgFailedReturnVoid(("Invalid MSR %#RX32\n", idMsr));
     1068
     1069    /*
     1070     * Set the MSR read permission.
     1071     */
     1072    uint16_t const offMsrRead = offBitmapRead + offMsr;
     1073    Assert(offMsrRead + (iBit >> 3) < offBitmapWrite);
     1074    if (fMsrpm & VMXMSRPM_ALLOW_RD)
     1075    {
     1076#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     1077        bool const fClear = !fIsNstGstVmcs ? true
     1078                          : !hmR0VmxIsMsrBitSet(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, offMsrRead, iBit);
     1079#else
     1080        RT_NOREF2(pVCpu, fIsNstGstVmcs);
     1081        bool const fClear = true;
     1082#endif
     1083        if (fClear)
     1084            ASMBitClear(pbMsrBitmap + offMsrRead, iBit);
     1085    }
     1086    else
     1087        ASMBitSet(pbMsrBitmap + offMsrRead, iBit);
     1088
     1089    /*
     1090     * Set the MSR write permission.
     1091     */
     1092    uint16_t const offMsrWrite = offBitmapWrite + offMsr;
     1093    Assert(offMsrWrite + (iBit >> 3) < X86_PAGE_4K_SIZE);
     1094    if (fMsrpm & VMXMSRPM_ALLOW_WR)
     1095    {
     1096#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     1097        bool const fClear = !fIsNstGstVmcs ? true
     1098                          : !hmR0VmxIsMsrBitSet(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, offMsrWrite, iBit);
     1099#else
     1100        RT_NOREF2(pVCpu, fIsNstGstVmcs);
     1101        bool const fClear = true;
     1102#endif
     1103        if (fClear)
     1104            ASMBitClear(pbMsrBitmap + offMsrWrite, iBit);
     1105    }
     1106    else
     1107        ASMBitSet(pbMsrBitmap + offMsrWrite, iBit);
     1108}
     1109
     1110
     1111/**
     1112 * Updates the VMCS with the number of effective MSRs in the auto-load/store MSR
     1113 * area.
     1114 *
     1115 * @returns VBox status code.
     1116 * @param   pVCpu       The cross context virtual CPU structure.
     1117 * @param   pVmcsInfo   The VMCS info. object.
     1118 * @param   cMsrs       The number of MSRs.
     1119 */
     1120static int hmR0VmxSetAutoLoadStoreMsrCount(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t cMsrs)
     1121{
     1122    /* Shouldn't ever happen but there -is- a number. We're well within the recommended 512. */
     1123    uint32_t const cMaxSupportedMsrs = VMX_MISC_MAX_MSRS(g_HmMsrs.u.vmx.u64Misc);
     1124    if (RT_LIKELY(cMsrs < cMaxSupportedMsrs))
     1125    {
     1126        /* Commit the MSR counts to the VMCS and update the cache. */
     1127        if (pVmcsInfo->cEntryMsrLoad != cMsrs)
     1128        {
     1129            int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, cMsrs);   AssertRC(rc);
     1130            rc     = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, cMsrs);   AssertRC(rc);
     1131            rc     = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT,  cMsrs);   AssertRC(rc);
     1132            pVmcsInfo->cEntryMsrLoad = cMsrs;
     1133            pVmcsInfo->cExitMsrStore = cMsrs;
     1134            pVmcsInfo->cExitMsrLoad  = cMsrs;
     1135        }
     1136        return VINF_SUCCESS;
     1137    }
     1138
     1139    LogRel(("Auto-load/store MSR count exceeded! cMsrs=%u MaxSupported=%u\n", cMsrs, cMaxSupportedMsrs));
     1140    pVCpu->hm.s.u32HMError = VMX_UFC_INSUFFICIENT_GUEST_MSR_STORAGE;
     1141    return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
     1142}
     1143
     1144
     1145/**
     1146 * Adds a new (or updates the value of an existing) guest/host MSR
     1147 * pair to be swapped during the world-switch as part of the
     1148 * auto-load/store MSR area in the VMCS.
     1149 *
     1150 * @returns VBox status code.
     1151 * @param   pVCpu           The cross context virtual CPU structure.
     1152 * @param   pVmxTransient   The VMX-transient structure.
     1153 * @param   idMsr           The MSR.
     1154 * @param   uGuestMsrValue  Value of the guest MSR.
     1155 * @param   fSetReadWrite   Whether to set the guest read/write access of this
     1156 *                          MSR (thus not causing a VM-exit).
     1157 * @param   fUpdateHostMsr  Whether to update the value of the host MSR if
     1158 *                          necessary.
     1159 */
     1160static int hmR0VmxAddAutoLoadStoreMsr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t idMsr, uint64_t uGuestMsrValue,
     1161                                      bool fSetReadWrite, bool fUpdateHostMsr)
     1162{
     1163    PVMXVMCSINFO  pVmcsInfo     = pVmxTransient->pVmcsInfo;
     1164    bool const      fIsNstGstVmcs = pVmxTransient->fIsNestedGuest;
     1165    PVMXAUTOMSR     pGuestMsrLoad = (PVMXAUTOMSR)pVmcsInfo->pvGuestMsrLoad;
     1166    uint32_t        cMsrs         = pVmcsInfo->cEntryMsrLoad;
     1167    uint32_t        i;
     1168
     1169    /* Paranoia. */
     1170    Assert(pGuestMsrLoad);
     1171
     1172#ifndef DEBUG_bird
     1173    LogFlowFunc(("pVCpu=%p idMsr=%#RX32 uGuestMsrValue=%#RX64\n", pVCpu, idMsr, uGuestMsrValue));
     1174#endif
     1175
     1176    /* Check if the MSR already exists in the VM-entry MSR-load area. */
     1177    for (i = 0; i < cMsrs; i++)
     1178    {
     1179        if (pGuestMsrLoad[i].u32Msr == idMsr)
     1180            break;
     1181    }
     1182
     1183    bool fAdded = false;
     1184    if (i == cMsrs)
     1185    {
     1186        /* The MSR does not exist, bump the MSR count to make room for the new MSR. */
     1187        ++cMsrs;
     1188        int rc = hmR0VmxSetAutoLoadStoreMsrCount(pVCpu, pVmcsInfo, cMsrs);
     1189        AssertMsgRCReturn(rc, ("Insufficient space to add MSR to VM-entry MSR-load/store area %u\n", idMsr), rc);
     1190
     1191        /* Set the guest to read/write this MSR without causing VM-exits. */
     1192        if (   fSetReadWrite
     1193            && (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS))
     1194            hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, fIsNstGstVmcs, idMsr, VMXMSRPM_ALLOW_RD_WR);
     1195
     1196        Log4Func(("Added MSR %#RX32, cMsrs=%u\n", idMsr, cMsrs));
     1197        fAdded = true;
     1198    }
     1199
     1200    /* Update the MSR value for the newly added or already existing MSR. */
     1201    pGuestMsrLoad[i].u32Msr   = idMsr;
     1202    pGuestMsrLoad[i].u64Value = uGuestMsrValue;
     1203
     1204    /* Create the corresponding slot in the VM-exit MSR-store area if we use a different page. */
     1205    if (hmR0VmxIsSeparateExitMsrStoreAreaVmcs(pVmcsInfo))
     1206    {
     1207        PVMXAUTOMSR pGuestMsrStore = (PVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
     1208        pGuestMsrStore[i].u32Msr   = idMsr;
     1209        pGuestMsrStore[i].u64Value = uGuestMsrValue;
     1210    }
     1211
     1212    /* Update the corresponding slot in the host MSR area. */
     1213    PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVmcsInfo->pvHostMsrLoad;
     1214    Assert(pHostMsr != pVmcsInfo->pvGuestMsrLoad);
     1215    Assert(pHostMsr != pVmcsInfo->pvGuestMsrStore);
     1216    pHostMsr[i].u32Msr = idMsr;
     1217
     1218    /*
     1219     * Only if the caller requests to update the host MSR value AND we've newly added the
     1220     * MSR to the host MSR area do we actually update the value. Otherwise, it will be
     1221     * updated by hmR0VmxUpdateAutoLoadHostMsrs().
     1222     *
     1223     * We do this for performance reasons since reading MSRs may be quite expensive.
     1224     */
     1225    if (fAdded)
     1226    {
     1227        if (fUpdateHostMsr)
     1228        {
     1229            Assert(!VMMRZCallRing3IsEnabled(pVCpu));
     1230            Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     1231            pHostMsr[i].u64Value = ASMRdMsr(idMsr);
     1232        }
     1233        else
     1234        {
     1235            /* Someone else can do the work. */
     1236            pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs = false;
     1237        }
     1238    }
     1239    return VINF_SUCCESS;
     1240}
     1241
     1242
     1243/**
     1244 * Removes a guest/host MSR pair to be swapped during the world-switch from the
     1245 * auto-load/store MSR area in the VMCS.
     1246 *
     1247 * @returns VBox status code.
     1248 * @param   pVCpu           The cross context virtual CPU structure.
     1249 * @param   pVmxTransient   The VMX-transient structure.
     1250 * @param   idMsr           The MSR.
     1251 */
     1252static int hmR0VmxRemoveAutoLoadStoreMsr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t idMsr)
     1253{
     1254    PVMXVMCSINFO  pVmcsInfo     = pVmxTransient->pVmcsInfo;
     1255    bool const      fIsNstGstVmcs = pVmxTransient->fIsNestedGuest;
     1256    PVMXAUTOMSR     pGuestMsrLoad = (PVMXAUTOMSR)pVmcsInfo->pvGuestMsrLoad;
     1257    uint32_t        cMsrs         = pVmcsInfo->cEntryMsrLoad;
     1258
     1259#ifndef DEBUG_bird
     1260    LogFlowFunc(("pVCpu=%p idMsr=%#RX32\n", pVCpu, idMsr));
     1261#endif
     1262
     1263    for (uint32_t i = 0; i < cMsrs; i++)
     1264    {
     1265        /* Find the MSR. */
     1266        if (pGuestMsrLoad[i].u32Msr == idMsr)
     1267        {
     1268            /*
     1269             * If it's the last MSR, we only need to reduce the MSR count.
     1270             * If it's -not- the last MSR, copy the last MSR in place of it and reduce the MSR count.
     1271             */
     1272            if (i < cMsrs - 1)
     1273            {
     1274                /* Remove it from the VM-entry MSR-load area. */
     1275                pGuestMsrLoad[i].u32Msr   = pGuestMsrLoad[cMsrs - 1].u32Msr;
     1276                pGuestMsrLoad[i].u64Value = pGuestMsrLoad[cMsrs - 1].u64Value;
     1277
     1278                /* Remove it from the VM-exit MSR-store area if it's in a different page. */
     1279                if (hmR0VmxIsSeparateExitMsrStoreAreaVmcs(pVmcsInfo))
     1280                {
     1281                    PVMXAUTOMSR pGuestMsrStore = (PVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
     1282                    Assert(pGuestMsrStore[i].u32Msr == idMsr);
     1283                    pGuestMsrStore[i].u32Msr   = pGuestMsrStore[cMsrs - 1].u32Msr;
     1284                    pGuestMsrStore[i].u64Value = pGuestMsrStore[cMsrs - 1].u64Value;
     1285                }
     1286
     1287                /* Remove it from the VM-exit MSR-load area. */
     1288                PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVmcsInfo->pvHostMsrLoad;
     1289                Assert(pHostMsr[i].u32Msr == idMsr);
     1290                pHostMsr[i].u32Msr   = pHostMsr[cMsrs - 1].u32Msr;
     1291                pHostMsr[i].u64Value = pHostMsr[cMsrs - 1].u64Value;
     1292            }
     1293
     1294            /* Reduce the count to reflect the removed MSR and bail. */
     1295            --cMsrs;
     1296            break;
     1297        }
     1298    }
     1299
     1300    /* Update the VMCS if the count changed (meaning the MSR was found and removed). */
     1301    if (cMsrs != pVmcsInfo->cEntryMsrLoad)
     1302    {
     1303        int rc = hmR0VmxSetAutoLoadStoreMsrCount(pVCpu, pVmcsInfo, cMsrs);
     1304        AssertRCReturn(rc, rc);
     1305
     1306        /* We're no longer swapping MSRs during the world-switch, intercept guest read/writes to them. */
     1307        if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
     1308            hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, fIsNstGstVmcs, idMsr, VMXMSRPM_EXIT_RD | VMXMSRPM_EXIT_WR);
     1309
     1310        Log4Func(("Removed MSR %#RX32, cMsrs=%u\n", idMsr, cMsrs));
     1311        return VINF_SUCCESS;
     1312    }
     1313
     1314    return VERR_NOT_FOUND;
     1315}
     1316
     1317
     1318/**
     1319 * Updates the value of all host MSRs in the VM-exit MSR-load area.
     1320 *
     1321 * @param   pVCpu       The cross context virtual CPU structure.
     1322 * @param   pVmcsInfo   The VMCS info. object.
     1323 *
     1324 * @remarks No-long-jump zone!!!
     1325 */
     1326static void hmR0VmxUpdateAutoLoadHostMsrs(PCVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
     1327{
     1328    RT_NOREF(pVCpu);
     1329    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     1330
     1331    PVMXAUTOMSR pHostMsrLoad = (PVMXAUTOMSR)pVmcsInfo->pvHostMsrLoad;
     1332    uint32_t const cMsrs     = pVmcsInfo->cExitMsrLoad;
     1333    Assert(pHostMsrLoad);
     1334    Assert(sizeof(*pHostMsrLoad) * cMsrs <= X86_PAGE_4K_SIZE);
     1335    LogFlowFunc(("pVCpu=%p cMsrs=%u\n", pVCpu, cMsrs));
     1336    for (uint32_t i = 0; i < cMsrs; i++)
     1337    {
     1338        /*
     1339         * Performance hack for the host EFER MSR. We use the cached value rather than re-read it.
     1340         * Strict builds will catch mismatches in hmR0VmxCheckAutoLoadStoreMsrs(). See @bugref{7368}.
     1341         */
     1342        if (pHostMsrLoad[i].u32Msr == MSR_K6_EFER)
     1343            pHostMsrLoad[i].u64Value = g_uHmVmxHostMsrEfer;
     1344        else
     1345            pHostMsrLoad[i].u64Value = ASMRdMsr(pHostMsrLoad[i].u32Msr);
     1346    }
     1347}
     1348
     1349
     1350/**
     1351 * Saves a set of host MSRs to allow read/write passthru access to the guest and
     1352 * perform lazy restoration of the host MSRs while leaving VT-x.
     1353 *
     1354 * @param   pVCpu   The cross context virtual CPU structure.
     1355 *
     1356 * @remarks No-long-jump zone!!!
     1357 */
     1358static void hmR0VmxLazySaveHostMsrs(PVMCPUCC pVCpu)
     1359{
     1360    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     1361
     1362    /*
     1363     * Note: If you're adding MSRs here, make sure to update the MSR-bitmap accesses in hmR0VmxSetupVmcsProcCtls().
     1364     */
     1365    if (!(pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_SAVED_HOST))
     1366    {
     1367        Assert(!(pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST));  /* Guest MSRs better not be loaded now. */
     1368        if (pVCpu->CTX_SUFF(pVM)->hmr0.s.fAllow64BitGuests)
     1369        {
     1370            pVCpu->hmr0.s.vmx.u64HostMsrLStar        = ASMRdMsr(MSR_K8_LSTAR);
     1371            pVCpu->hmr0.s.vmx.u64HostMsrStar         = ASMRdMsr(MSR_K6_STAR);
     1372            pVCpu->hmr0.s.vmx.u64HostMsrSfMask       = ASMRdMsr(MSR_K8_SF_MASK);
     1373            pVCpu->hmr0.s.vmx.u64HostMsrKernelGsBase = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
     1374        }
     1375        pVCpu->hmr0.s.vmx.fLazyMsrs |= VMX_LAZY_MSRS_SAVED_HOST;
     1376    }
     1377}
     1378
    27271379
    27281380#ifdef VBOX_STRICT
     
    40942746    if (RT_SUCCESS(rc))
    40952747    {
    4096         uint64_t const u64Cr0Mask = hmR0VmxGetFixedCr0Mask(pVCpu);
    4097         uint64_t const u64Cr4Mask = hmR0VmxGetFixedCr4Mask(pVCpu);
     2748        uint64_t const u64Cr0Mask = vmxHCGetFixedCr0Mask(pVCpu);
     2749        uint64_t const u64Cr4Mask = vmxHCGetFixedCr4Mask(pVCpu);
    40982750
    40992751        rc = VMXWriteVmcsNw(VMX_VMCS_CTRL_CR0_MASK, u64Cr0Mask);    AssertRC(rc);
     
    41792831}
    41802832#endif
    4181 
    4182 
    4183 /**
    4184  * Sets pfnStartVm to the best suited variant.
    4185  *
    4186  * This must be called whenever anything changes relative to the hmR0VmXStartVm
    4187  * variant selection:
    4188  *      - pVCpu->hm.s.fLoadSaveGuestXcr0
    4189  *      - HM_WSF_IBPB_ENTRY in pVCpu->hmr0.s.fWorldSwitcher
    4190  *      - HM_WSF_IBPB_EXIT  in pVCpu->hmr0.s.fWorldSwitcher
    4191  *      - Perhaps: CPUMIsGuestFPUStateActive() (windows only)
    4192  *      - Perhaps: CPUMCTX.fXStateMask (windows only)
    4193  *
    4194  * We currently ASSUME that neither HM_WSF_IBPB_ENTRY nor HM_WSF_IBPB_EXIT
    4195  * cannot be changed at runtime.
    4196  */
    4197 static void hmR0VmxUpdateStartVmFunction(PVMCPUCC pVCpu)
    4198 {
    4199     static const struct CLANGWORKAROUND { PFNHMVMXSTARTVM pfn; } s_aHmR0VmxStartVmFunctions[] =
    4200     {
    4201         { hmR0VmxStartVm_SansXcr0_SansIbpbEntry_SansL1dEntry_SansMdsEntry_SansIbpbExit },
    4202         { hmR0VmxStartVm_WithXcr0_SansIbpbEntry_SansL1dEntry_SansMdsEntry_SansIbpbExit },
    4203         { hmR0VmxStartVm_SansXcr0_WithIbpbEntry_SansL1dEntry_SansMdsEntry_SansIbpbExit },
    4204         { hmR0VmxStartVm_WithXcr0_WithIbpbEntry_SansL1dEntry_SansMdsEntry_SansIbpbExit },
    4205         { hmR0VmxStartVm_SansXcr0_SansIbpbEntry_WithL1dEntry_SansMdsEntry_SansIbpbExit },
    4206         { hmR0VmxStartVm_WithXcr0_SansIbpbEntry_WithL1dEntry_SansMdsEntry_SansIbpbExit },
    4207         { hmR0VmxStartVm_SansXcr0_WithIbpbEntry_WithL1dEntry_SansMdsEntry_SansIbpbExit },
    4208         { hmR0VmxStartVm_WithXcr0_WithIbpbEntry_WithL1dEntry_SansMdsEntry_SansIbpbExit },
    4209         { hmR0VmxStartVm_SansXcr0_SansIbpbEntry_SansL1dEntry_WithMdsEntry_SansIbpbExit },
    4210         { hmR0VmxStartVm_WithXcr0_SansIbpbEntry_SansL1dEntry_WithMdsEntry_SansIbpbExit },
    4211         { hmR0VmxStartVm_SansXcr0_WithIbpbEntry_SansL1dEntry_WithMdsEntry_SansIbpbExit },
    4212         { hmR0VmxStartVm_WithXcr0_WithIbpbEntry_SansL1dEntry_WithMdsEntry_SansIbpbExit },
    4213         { hmR0VmxStartVm_SansXcr0_SansIbpbEntry_WithL1dEntry_WithMdsEntry_SansIbpbExit },
    4214         { hmR0VmxStartVm_WithXcr0_SansIbpbEntry_WithL1dEntry_WithMdsEntry_SansIbpbExit },
    4215         { hmR0VmxStartVm_SansXcr0_WithIbpbEntry_WithL1dEntry_WithMdsEntry_SansIbpbExit },
    4216         { hmR0VmxStartVm_WithXcr0_WithIbpbEntry_WithL1dEntry_WithMdsEntry_SansIbpbExit },
    4217         { hmR0VmxStartVm_SansXcr0_SansIbpbEntry_SansL1dEntry_SansMdsEntry_WithIbpbExit },
    4218         { hmR0VmxStartVm_WithXcr0_SansIbpbEntry_SansL1dEntry_SansMdsEntry_WithIbpbExit },
    4219         { hmR0VmxStartVm_SansXcr0_WithIbpbEntry_SansL1dEntry_SansMdsEntry_WithIbpbExit },
    4220         { hmR0VmxStartVm_WithXcr0_WithIbpbEntry_SansL1dEntry_SansMdsEntry_WithIbpbExit },
    4221         { hmR0VmxStartVm_SansXcr0_SansIbpbEntry_WithL1dEntry_SansMdsEntry_WithIbpbExit },
    4222         { hmR0VmxStartVm_WithXcr0_SansIbpbEntry_WithL1dEntry_SansMdsEntry_WithIbpbExit },
    4223         { hmR0VmxStartVm_SansXcr0_WithIbpbEntry_WithL1dEntry_SansMdsEntry_WithIbpbExit },
    4224         { hmR0VmxStartVm_WithXcr0_WithIbpbEntry_WithL1dEntry_SansMdsEntry_WithIbpbExit },
    4225         { hmR0VmxStartVm_SansXcr0_SansIbpbEntry_SansL1dEntry_WithMdsEntry_WithIbpbExit },
    4226         { hmR0VmxStartVm_WithXcr0_SansIbpbEntry_SansL1dEntry_WithMdsEntry_WithIbpbExit },
    4227         { hmR0VmxStartVm_SansXcr0_WithIbpbEntry_SansL1dEntry_WithMdsEntry_WithIbpbExit },
    4228         { hmR0VmxStartVm_WithXcr0_WithIbpbEntry_SansL1dEntry_WithMdsEntry_WithIbpbExit },
    4229         { hmR0VmxStartVm_SansXcr0_SansIbpbEntry_WithL1dEntry_WithMdsEntry_WithIbpbExit },
    4230         { hmR0VmxStartVm_WithXcr0_SansIbpbEntry_WithL1dEntry_WithMdsEntry_WithIbpbExit },
    4231         { hmR0VmxStartVm_SansXcr0_WithIbpbEntry_WithL1dEntry_WithMdsEntry_WithIbpbExit },
    4232         { hmR0VmxStartVm_WithXcr0_WithIbpbEntry_WithL1dEntry_WithMdsEntry_WithIbpbExit },
    4233     };
    4234     uintptr_t const idx = (pVCpu->hmr0.s.fLoadSaveGuestXcr0                 ?  1 : 0)
    4235                         | (pVCpu->hmr0.s.fWorldSwitcher & HM_WSF_IBPB_ENTRY ?  2 : 0)
    4236                         | (pVCpu->hmr0.s.fWorldSwitcher & HM_WSF_L1D_ENTRY  ?  4 : 0)
    4237                         | (pVCpu->hmr0.s.fWorldSwitcher & HM_WSF_MDS_ENTRY  ?  8 : 0)
    4238                         | (pVCpu->hmr0.s.fWorldSwitcher & HM_WSF_IBPB_EXIT  ? 16 : 0);
    4239     PFNHMVMXSTARTVM const pfnStartVm = s_aHmR0VmxStartVmFunctions[idx].pfn;
    4240     if (pVCpu->hmr0.s.vmx.pfnStartVm != pfnStartVm)
    4241         pVCpu->hmr0.s.vmx.pfnStartVm = pfnStartVm;
    4242 }
    42432833
    42442834
     
    43122902                                VmcsRevId.n.fIsShadowVmcs = 1;
    43132903                                *(uint32_t *)pVmcsInfo->pvShadowVmcs = VmcsRevId.u;
    4314                                 rc = hmR0VmxClearShadowVmcs(pVmcsInfo);
     2904                                rc = vmxHCClearShadowVmcs(pVmcsInfo);
    43152905                                if (RT_SUCCESS(rc))
    43162906                                { /* likely */ }
     
    49773567     * import CR4 and CR0 from the VMCS here as those bits are always up to date.
    49783568     */
    4979     Assert(hmR0VmxGetFixedCr4Mask(pVCpu) & X86_CR4_PAE);
    4980     Assert(hmR0VmxGetFixedCr0Mask(pVCpu) & X86_CR0_PG);
     3569    Assert(vmxHCGetFixedCr4Mask(pVCpu) & X86_CR4_PAE);
     3570    Assert(vmxHCGetFixedCr0Mask(pVCpu) & X86_CR0_PG);
    49813571    if (   (pCtx->cr4 & X86_CR4_PAE)
    49823572        && (pCtx->cr0 & X86_CR0_PG))
     
    50073597
    50083598/**
    5009  * Exports the guest state with appropriate VM-entry and VM-exit controls in the
    5010  * VMCS.
    5011  *
    5012  * This is typically required when the guest changes paging mode.
    5013  *
    5014  * @returns VBox status code.
    5015  * @param   pVCpu           The cross context virtual CPU structure.
    5016  * @param   pVmxTransient   The VMX-transient structure.
    5017  *
    5018  * @remarks Requires EFER.
    5019  * @remarks No-long-jump zone!!!
    5020  */
    5021 static int hmR0VmxExportGuestEntryExitCtls(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
    5022 {
    5023     if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_ENTRY_EXIT_CTLS)
    5024     {
    5025         PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    5026         PVMXVMCSINFO pVmcsInfo      = pVmxTransient->pVmcsInfo;
    5027 
    5028         /*
    5029          * VM-entry controls.
    5030          */
    5031         {
    5032             uint32_t       fVal = g_HmMsrs.u.vmx.EntryCtls.n.allowed0;    /* Bits set here must be set in the VMCS. */
    5033             uint32_t const fZap = g_HmMsrs.u.vmx.EntryCtls.n.allowed1;    /* Bits cleared here must be cleared in the VMCS. */
    5034 
    5035             /*
    5036              * Load the guest debug controls (DR7 and IA32_DEBUGCTL MSR) on VM-entry.
    5037              * The first VT-x capable CPUs only supported the 1-setting of this bit.
    5038              *
    5039              * For nested-guests, this is a mandatory VM-entry control. It's also
    5040              * required because we do not want to leak host bits to the nested-guest.
    5041              */
    5042             fVal |= VMX_ENTRY_CTLS_LOAD_DEBUG;
    5043 
    5044             /*
    5045              * Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry.
    5046              *
    5047              * For nested-guests, the "IA-32e mode guest" control we initialize with what is
    5048              * required to get the nested-guest working with hardware-assisted VMX execution.
    5049              * It depends on the nested-guest's IA32_EFER.LMA bit. Remember, a nested hypervisor
    5050              * can skip intercepting changes to the EFER MSR. This is why it needs to be done
    5051              * here rather than while merging the guest VMCS controls.
    5052              */
    5053             if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
    5054             {
    5055                 Assert(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME);
    5056                 fVal |= VMX_ENTRY_CTLS_IA32E_MODE_GUEST;
    5057             }
    5058             else
    5059                 Assert(!(fVal & VMX_ENTRY_CTLS_IA32E_MODE_GUEST));
    5060 
    5061             /*
    5062              * If the CPU supports the newer VMCS controls for managing guest/host EFER, use it.
    5063              *
    5064              * For nested-guests, we use the "load IA32_EFER" if the hardware supports it,
    5065              * regardless of whether the nested-guest VMCS specifies it because we are free to
    5066              * load whatever MSRs we require and we do not need to modify the guest visible copy
    5067              * of the VM-entry MSR load area.
    5068              */
    5069             if (   g_fHmVmxSupportsVmcsEfer
    5070                 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient))
    5071                 fVal |= VMX_ENTRY_CTLS_LOAD_EFER_MSR;
    5072             else
    5073                 Assert(!(fVal & VMX_ENTRY_CTLS_LOAD_EFER_MSR));
    5074 
    5075             /*
    5076              * The following should -not- be set (since we're not in SMM mode):
    5077              * - VMX_ENTRY_CTLS_ENTRY_TO_SMM
    5078              * - VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON
    5079              */
    5080 
    5081             /** @todo VMX_ENTRY_CTLS_LOAD_PERF_MSR,
    5082              *        VMX_ENTRY_CTLS_LOAD_PAT_MSR. */
    5083 
    5084             if ((fVal & fZap) == fVal)
    5085             { /* likely */ }
    5086             else
    5087             {
    5088                 Log4Func(("Invalid VM-entry controls combo! Cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
    5089                           g_HmMsrs.u.vmx.EntryCtls.n.allowed0, fVal, fZap));
    5090                 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_ENTRY;
    5091                 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
    5092             }
    5093 
    5094             /* Commit it to the VMCS. */
    5095             if (pVmcsInfo->u32EntryCtls != fVal)
    5096             {
    5097                 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY, fVal);
    5098                 AssertRC(rc);
    5099                 pVmcsInfo->u32EntryCtls = fVal;
    5100             }
    5101         }
    5102 
    5103         /*
    5104          * VM-exit controls.
    5105          */
    5106         {
    5107             uint32_t       fVal = g_HmMsrs.u.vmx.ExitCtls.n.allowed0;     /* Bits set here must be set in the VMCS. */
    5108             uint32_t const fZap = g_HmMsrs.u.vmx.ExitCtls.n.allowed1;     /* Bits cleared here must be cleared in the VMCS. */
    5109 
    5110             /*
    5111              * Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only
    5112              * supported the 1-setting of this bit.
    5113              *
    5114              * For nested-guests, we set the "save debug controls" as the converse
    5115              * "load debug controls" is mandatory for nested-guests anyway.
    5116              */
    5117             fVal |= VMX_EXIT_CTLS_SAVE_DEBUG;
    5118 
    5119             /*
    5120              * Set the host long mode active (EFER.LMA) bit (which Intel calls
    5121              * "Host address-space size") if necessary. On VM-exit, VT-x sets both the
    5122              * host EFER.LMA and EFER.LME bit to this value. See assertion in
    5123              * hmR0VmxExportHostMsrs().
    5124              *
    5125              * For nested-guests, we always set this bit as we do not support 32-bit
    5126              * hosts.
    5127              */
    5128             fVal |= VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE;
    5129 
    5130             /*
    5131              * If the VMCS EFER MSR fields are supported by the hardware, we use it.
    5132              *
    5133              * For nested-guests, we should use the "save IA32_EFER" control if we also
    5134              * used the "load IA32_EFER" control while exporting VM-entry controls.
    5135              */
    5136             if (   g_fHmVmxSupportsVmcsEfer
    5137                 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient))
    5138             {
    5139                 fVal |= VMX_EXIT_CTLS_SAVE_EFER_MSR
    5140                      |  VMX_EXIT_CTLS_LOAD_EFER_MSR;
    5141             }
    5142 
    5143             /*
    5144              * Enable saving of the VMX-preemption timer value on VM-exit.
    5145              * For nested-guests, currently not exposed/used.
    5146              */
    5147             /** @todo r=bird: Measure performance hit because of this vs. always rewriting
    5148              *        the timer value. */
    5149             if (pVM->hmr0.s.vmx.fUsePreemptTimer)
    5150             {
    5151                 Assert(g_HmMsrs.u.vmx.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER);
    5152                 fVal |= VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER;
    5153             }
    5154 
    5155             /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */
    5156             Assert(!(fVal & VMX_EXIT_CTLS_ACK_EXT_INT));
    5157 
    5158             /** @todo VMX_EXIT_CTLS_LOAD_PERF_MSR,
    5159              *        VMX_EXIT_CTLS_SAVE_PAT_MSR,
    5160              *        VMX_EXIT_CTLS_LOAD_PAT_MSR. */
    5161 
    5162             if ((fVal & fZap) == fVal)
    5163             { /* likely */ }
    5164             else
    5165             {
    5166                 Log4Func(("Invalid VM-exit controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%R#X32\n",
    5167                           g_HmMsrs.u.vmx.ExitCtls.n.allowed0, fVal, fZap));
    5168                 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_EXIT;
    5169                 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
    5170             }
    5171 
    5172             /* Commit it to the VMCS. */
    5173             if (pVmcsInfo->u32ExitCtls != fVal)
    5174             {
    5175                 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT, fVal);
    5176                 AssertRC(rc);
    5177                 pVmcsInfo->u32ExitCtls = fVal;
    5178             }
    5179         }
    5180 
    5181         ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
    5182     }
    5183     return VINF_SUCCESS;
    5184 }
    5185 
    5186 
    5187 /**
    5188  * Sets the TPR threshold in the VMCS.
    5189  *
    5190  * @param   pVmcsInfo           The VMCS info. object.
    5191  * @param   u32TprThreshold     The TPR threshold (task-priority class only).
    5192  */
    5193 DECLINLINE(void) hmR0VmxApicSetTprThreshold(PVMXVMCSINFO pVmcsInfo, uint32_t u32TprThreshold)
    5194 {
    5195     Assert(!(u32TprThreshold & ~VMX_TPR_THRESHOLD_MASK));         /* Bits 31:4 MBZ. */
    5196     Assert(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
    5197     RT_NOREF(pVmcsInfo);
    5198     int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
    5199     AssertRC(rc);
    5200 }
    5201 
    5202 
    5203 /**
    5204  * Exports the guest APIC TPR state into the VMCS.
    5205  *
    5206  * @param   pVCpu           The cross context virtual CPU structure.
    5207  * @param   pVmxTransient   The VMX-transient structure.
    5208  *
    5209  * @remarks No-long-jump zone!!!
    5210  */
    5211 static void hmR0VmxExportGuestApicTpr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
    5212 {
    5213     if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_APIC_TPR)
    5214     {
    5215         HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
    5216 
    5217         PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    5218         if (!pVmxTransient->fIsNestedGuest)
    5219         {
    5220             if (   PDMHasApic(pVCpu->CTX_SUFF(pVM))
    5221                 && APICIsEnabled(pVCpu))
    5222             {
    5223                 /*
    5224                  * Setup TPR shadowing.
    5225                  */
    5226                 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
    5227                 {
    5228                     bool    fPendingIntr  = false;
    5229                     uint8_t u8Tpr         = 0;
    5230                     uint8_t u8PendingIntr = 0;
    5231                     int rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);
    5232                     AssertRC(rc);
    5233 
    5234                     /*
    5235                      * If there are interrupts pending but masked by the TPR, instruct VT-x to
    5236                      * cause a TPR-below-threshold VM-exit when the guest lowers its TPR below the
    5237                      * priority of the pending interrupt so we can deliver the interrupt. If there
    5238                      * are no interrupts pending, set threshold to 0 to not cause any
    5239                      * TPR-below-threshold VM-exits.
    5240                      */
    5241                     uint32_t u32TprThreshold = 0;
    5242                     if (fPendingIntr)
    5243                     {
    5244                         /* Bits 3:0 of the TPR threshold field correspond to bits 7:4 of the TPR
    5245                            (which is the Task-Priority Class). */
    5246                         const uint8_t u8PendingPriority = u8PendingIntr >> 4;
    5247                         const uint8_t u8TprPriority     = u8Tpr >> 4;
    5248                         if (u8PendingPriority <= u8TprPriority)
    5249                             u32TprThreshold = u8PendingPriority;
    5250                     }
    5251 
    5252                     hmR0VmxApicSetTprThreshold(pVmcsInfo, u32TprThreshold);
    5253                 }
    5254             }
    5255         }
    5256         /* else: the TPR threshold has already been updated while merging the nested-guest VMCS. */
    5257         ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_APIC_TPR);
    5258     }
    5259 }
    5260 
    5261 
    5262 /**
    5263  * Gets the guest interruptibility-state and updates related force-flags.
    5264  *
    5265  * @returns Guest's interruptibility-state.
    5266  * @param   pVCpu           The cross context virtual CPU structure.
    5267  *
    5268  * @remarks No-long-jump zone!!!
    5269  */
    5270 static uint32_t hmR0VmxGetGuestIntrStateAndUpdateFFs(PVMCPUCC pVCpu)
    5271 {
    5272     /*
    5273      * Check if we should inhibit interrupt delivery due to instructions like STI and MOV SS.
    5274      */
    5275     uint32_t fIntrState = 0;
    5276     if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
    5277     {
    5278         /* If inhibition is active, RIP and RFLAGS should've been imported from the VMCS already. */
    5279         HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
    5280 
    5281         PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    5282         if (pCtx->rip == EMGetInhibitInterruptsPC(pVCpu))
    5283         {
    5284             if (pCtx->eflags.Bits.u1IF)
    5285                 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
    5286             else
    5287                 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS;
    5288         }
    5289         else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
    5290         {
    5291             /*
    5292              * We can clear the inhibit force flag as even if we go back to the recompiler
    5293              * without executing guest code in VT-x, the flag's condition to be cleared is
    5294              * met and thus the cleared state is correct.
    5295              */
    5296             VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
    5297         }
    5298     }
    5299 
    5300     /*
    5301      * Check if we should inhibit NMI delivery.
    5302      */
    5303     if (CPUMIsGuestNmiBlocking(pVCpu))
    5304         fIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI;
    5305 
    5306     /*
    5307      * Validate.
    5308      */
    5309 #ifdef VBOX_STRICT
    5310     /* We don't support block-by-SMI yet.*/
    5311     Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI));
    5312 
    5313     /* Block-by-STI must not be set when interrupts are disabled. */
    5314     if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
    5315     {
    5316         HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
    5317         Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_IF);
    5318     }
    5319 #endif
    5320 
    5321     return fIntrState;
    5322 }
    5323 
    5324 
    5325 /**
    5326  * Exports the exception intercepts required for guest execution in the VMCS.
    5327  *
    5328  * @param   pVCpu           The cross context virtual CPU structure.
    5329  * @param   pVmxTransient   The VMX-transient structure.
    5330  *
    5331  * @remarks No-long-jump zone!!!
    5332  */
    5333 static void hmR0VmxExportGuestXcptIntercepts(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
    5334 {
    5335     if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_XCPT_INTERCEPTS)
    5336     {
    5337         /* When executing a nested-guest, we do not need to trap GIM hypercalls by intercepting #UD. */
    5338         if (   !pVmxTransient->fIsNestedGuest
    5339             &&  pVCpu->hm.s.fGIMTrapXcptUD)
    5340             hmR0VmxAddXcptIntercept(pVmxTransient, X86_XCPT_UD);
    5341         else
    5342             hmR0VmxRemoveXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
    5343 
    5344         /* Other exception intercepts are handled elsewhere, e.g. while exporting guest CR0. */
    5345         ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_VMX_XCPT_INTERCEPTS);
    5346     }
    5347 }
    5348 
    5349 
    5350 /**
    5351  * Exports the guest's RIP into the guest-state area in the VMCS.
    5352  *
    5353  * @param   pVCpu   The cross context virtual CPU structure.
    5354  *
    5355  * @remarks No-long-jump zone!!!
    5356  */
    5357 static void hmR0VmxExportGuestRip(PVMCPUCC pVCpu)
    5358 {
    5359     if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_RIP)
    5360     {
    5361         HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP);
    5362 
    5363         int rc = VMXWriteVmcsNw(VMX_VMCS_GUEST_RIP, pVCpu->cpum.GstCtx.rip);
    5364         AssertRC(rc);
    5365 
    5366         ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_RIP);
    5367         Log4Func(("rip=%#RX64\n", pVCpu->cpum.GstCtx.rip));
    5368     }
    5369 }
    5370 
    5371 
    5372 /**
    53733599 * Exports the guest's RSP into the guest-state area in the VMCS.
    53743600 *
     
    53903616    }
    53913617}
    5392 
    5393 
    5394 /**
    5395  * Exports the guest's RFLAGS into the guest-state area in the VMCS.
    5396  *
    5397  * @param   pVCpu           The cross context virtual CPU structure.
    5398  * @param   pVmxTransient   The VMX-transient structure.
    5399  *
    5400  * @remarks No-long-jump zone!!!
    5401  */
    5402 static void hmR0VmxExportGuestRflags(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
    5403 {
    5404     if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_RFLAGS)
    5405     {
    5406         HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
    5407 
    5408         /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits of RFLAGS are reserved (MBZ).
    5409            Let us assert it as such and use 32-bit VMWRITE. */
    5410         Assert(!RT_HI_U32(pVCpu->cpum.GstCtx.rflags.u64));
    5411         X86EFLAGS fEFlags = pVCpu->cpum.GstCtx.eflags;
    5412         Assert(fEFlags.u32 & X86_EFL_RA1_MASK);
    5413         Assert(!(fEFlags.u32 & ~(X86_EFL_1 | X86_EFL_LIVE_MASK)));
    5414 
    5415         /*
    5416          * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so
    5417          * we can restore them on VM-exit. Modify the real-mode guest's eflags so that VT-x
    5418          * can run the real-mode guest code under Virtual 8086 mode.
    5419          */
    5420         PVMXVMCSINFOSHARED pVmcsInfo = pVmxTransient->pVmcsInfo->pShared;
    5421         if (pVmcsInfo->RealMode.fRealOnV86Active)
    5422         {
    5423             Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
    5424             Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
    5425             Assert(!pVmxTransient->fIsNestedGuest);
    5426             pVmcsInfo->RealMode.Eflags.u32 = fEFlags.u32;    /* Save the original eflags of the real-mode guest. */
    5427             fEFlags.Bits.u1VM   = 1;                         /* Set the Virtual 8086 mode bit. */
    5428             fEFlags.Bits.u2IOPL = 0;                         /* Change IOPL to 0, otherwise certain instructions won't fault. */
    5429         }
    5430 
    5431         int rc = VMXWriteVmcsNw(VMX_VMCS_GUEST_RFLAGS, fEFlags.u32);
    5432         AssertRC(rc);
    5433 
    5434         ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_RFLAGS);
    5435         Log4Func(("eflags=%#RX32\n", fEFlags.u32));
    5436     }
    5437 }
    5438 
    5439 
    5440 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    5441 /**
    5442  * Copies the nested-guest VMCS to the shadow VMCS.
    5443  *
    5444  * @returns VBox status code.
    5445  * @param   pVCpu       The cross context virtual CPU structure.
    5446  * @param   pVmcsInfo   The VMCS info. object.
    5447  *
    5448  * @remarks No-long-jump zone!!!
    5449  */
    5450 static int hmR0VmxCopyNstGstToShadowVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
    5451 {
    5452     PVMCC      const pVM         = pVCpu->CTX_SUFF(pVM);
    5453     PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
    5454 
    5455     /*
    5456      * Disable interrupts so we don't get preempted while the shadow VMCS is the
    5457      * current VMCS, as we may try saving guest lazy MSRs.
    5458      *
    5459      * Strictly speaking the lazy MSRs are not in the VMCS, but I'd rather not risk
    5460      * calling the import VMCS code which is currently performing the guest MSR reads
    5461      * (on 64-bit hosts) and accessing the auto-load/store MSR area on 32-bit hosts
    5462      * and the rest of the VMX leave session machinery.
    5463      */
    5464     RTCCUINTREG const fEFlags = ASMIntDisableFlags();
    5465 
    5466     int rc = hmR0VmxLoadShadowVmcs(pVmcsInfo);
    5467     if (RT_SUCCESS(rc))
    5468     {
    5469         /*
    5470          * Copy all guest read/write VMCS fields.
    5471          *
    5472          * We don't check for VMWRITE failures here for performance reasons and
    5473          * because they are not expected to fail, barring irrecoverable conditions
    5474          * like hardware errors.
    5475          */
    5476         uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
    5477         for (uint32_t i = 0; i < cShadowVmcsFields; i++)
    5478         {
    5479             uint64_t       u64Val;
    5480             uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
    5481             IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
    5482             VMXWriteVmcs64(uVmcsField, u64Val);
    5483         }
    5484 
    5485         /*
    5486          * If the host CPU supports writing all VMCS fields, copy the guest read-only
    5487          * VMCS fields, so the guest can VMREAD them without causing a VM-exit.
    5488          */
    5489         if (g_HmMsrs.u.vmx.u64Misc & VMX_MISC_VMWRITE_ALL)
    5490         {
    5491             uint32_t const cShadowVmcsRoFields = pVM->hmr0.s.vmx.cShadowVmcsRoFields;
    5492             for (uint32_t i = 0; i < cShadowVmcsRoFields; i++)
    5493             {
    5494                 uint64_t       u64Val;
    5495                 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsRoFields[i];
    5496                 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
    5497                 VMXWriteVmcs64(uVmcsField, u64Val);
    5498             }
    5499         }
    5500 
    5501         rc  = hmR0VmxClearShadowVmcs(pVmcsInfo);
    5502         rc |= hmR0VmxLoadVmcs(pVmcsInfo);
    5503     }
    5504 
    5505     ASMSetFlags(fEFlags);
    5506     return rc;
    5507 }
    5508 
    5509 
    5510 /**
    5511  * Copies the shadow VMCS to the nested-guest VMCS.
    5512  *
    5513  * @returns VBox status code.
    5514  * @param   pVCpu       The cross context virtual CPU structure.
    5515  * @param   pVmcsInfo   The VMCS info. object.
    5516  *
    5517  * @remarks Called with interrupts disabled.
    5518  */
    5519 static int hmR0VmxCopyShadowToNstGstVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
    5520 {
    5521     Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    5522     PVMCC const     pVM         = pVCpu->CTX_SUFF(pVM);
    5523     PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
    5524 
    5525     int rc = hmR0VmxLoadShadowVmcs(pVmcsInfo);
    5526     if (RT_SUCCESS(rc))
    5527     {
    5528         /*
    5529          * Copy guest read/write fields from the shadow VMCS.
    5530          * Guest read-only fields cannot be modified, so no need to copy them.
    5531          *
    5532          * We don't check for VMREAD failures here for performance reasons and
    5533          * because they are not expected to fail, barring irrecoverable conditions
    5534          * like hardware errors.
    5535          */
    5536         uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
    5537         for (uint32_t i = 0; i < cShadowVmcsFields; i++)
    5538         {
    5539             uint64_t       u64Val;
    5540             uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
    5541             VMXReadVmcs64(uVmcsField, &u64Val);
    5542             IEMWriteVmxVmcsField(pVmcsNstGst, uVmcsField, u64Val);
    5543         }
    5544 
    5545         rc  = hmR0VmxClearShadowVmcs(pVmcsInfo);
    5546         rc |= hmR0VmxLoadVmcs(pVmcsInfo);
    5547     }
    5548     return rc;
    5549 }
    5550 
    5551 
    5552 /**
    5553  * Enables VMCS shadowing for the given VMCS info. object.
    5554  *
    5555  * @param   pVmcsInfo   The VMCS info. object.
    5556  *
    5557  * @remarks No-long-jump zone!!!
    5558  */
    5559 static void hmR0VmxEnableVmcsShadowing(PVMXVMCSINFO pVmcsInfo)
    5560 {
    5561     uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
    5562     if (!(uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING))
    5563     {
    5564         Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
    5565         uProcCtls2 |= VMX_PROC_CTLS2_VMCS_SHADOWING;
    5566         int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2);                            AssertRC(rc);
    5567         rc     = VMXWriteVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, pVmcsInfo->HCPhysShadowVmcs);  AssertRC(rc);
    5568         pVmcsInfo->u32ProcCtls2   = uProcCtls2;
    5569         pVmcsInfo->u64VmcsLinkPtr = pVmcsInfo->HCPhysShadowVmcs;
    5570         Log4Func(("Enabled\n"));
    5571     }
    5572 }
    5573 
    5574 
    5575 /**
    5576  * Disables VMCS shadowing for the given VMCS info. object.
    5577  *
    5578  * @param   pVmcsInfo   The VMCS info. object.
    5579  *
    5580  * @remarks No-long-jump zone!!!
    5581  */
    5582 static void hmR0VmxDisableVmcsShadowing(PVMXVMCSINFO pVmcsInfo)
    5583 {
    5584     /*
    5585      * We want all VMREAD and VMWRITE instructions to cause VM-exits, so we clear the
    5586      * VMCS shadowing control. However, VM-entry requires the shadow VMCS indicator bit
    5587      * to match the VMCS shadowing control if the VMCS link pointer is not NIL_RTHCPHYS.
    5588      * Hence, we must also reset the VMCS link pointer to ensure VM-entry does not fail.
    5589      *
    5590      * See Intel spec. 26.2.1.1 "VM-Execution Control Fields".
    5591      * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State".
    5592      */
    5593     uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
    5594     if (uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
    5595     {
    5596         uProcCtls2 &= ~VMX_PROC_CTLS2_VMCS_SHADOWING;
    5597         int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2);                AssertRC(rc);
    5598         rc     = VMXWriteVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, NIL_RTHCPHYS);     AssertRC(rc);
    5599         pVmcsInfo->u32ProcCtls2   = uProcCtls2;
    5600         pVmcsInfo->u64VmcsLinkPtr = NIL_RTHCPHYS;
    5601         Log4Func(("Disabled\n"));
    5602     }
    5603 }
    5604 #endif
    56053618
    56063619
     
    56483661                if (!pVCpu->hm.s.vmx.fCopiedNstGstToShadowVmcs)
    56493662                {
    5650                     int rc = hmR0VmxCopyNstGstToShadowVmcs(pVCpu, pVmcsInfo);
     3663                    int rc = vmxHCCopyNstGstToShadowVmcs(pVCpu, pVmcsInfo);
    56513664                    AssertRCReturn(rc, rc);
    56523665                    pVCpu->hm.s.vmx.fCopiedNstGstToShadowVmcs = true;
    56533666                }
    5654                 hmR0VmxEnableVmcsShadowing(pVmcsInfo);
     3667                vmxHCEnableVmcsShadowing(pVmcsInfo);
    56553668            }
    56563669            else
    5657                 hmR0VmxDisableVmcsShadowing(pVmcsInfo);
     3670                vmxHCDisableVmcsShadowing(pVmcsInfo);
    56583671        }
    56593672#else
     
    56633676    }
    56643677    return VINF_SUCCESS;
    5665 }
    5666 
    5667 
    5668 /**
    5669  * Exports the guest CR0 control register into the guest-state area in the VMCS.
    5670  *
    5671  * The guest FPU state is always pre-loaded hence we don't need to bother about
    5672  * sharing FPU related CR0 bits between the guest and host.
    5673  *
    5674  * @returns VBox status code.
    5675  * @param   pVCpu           The cross context virtual CPU structure.
    5676  * @param   pVmxTransient   The VMX-transient structure.
    5677  *
    5678  * @remarks No-long-jump zone!!!
    5679  */
    5680 static int hmR0VmxExportGuestCR0(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
    5681 {
    5682     if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_CR0)
    5683     {
    5684         PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    5685         PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    5686 
    5687         uint64_t       fSetCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed0;
    5688         uint64_t const fZapCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed1;
    5689         if (pVM->hmr0.s.vmx.fUnrestrictedGuest)
    5690             fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
    5691         else
    5692             Assert((fSetCr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG));
    5693 
    5694         if (!pVmxTransient->fIsNestedGuest)
    5695         {
    5696             HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
    5697             uint64_t       u64GuestCr0  = pVCpu->cpum.GstCtx.cr0;
    5698             uint64_t const u64ShadowCr0 = u64GuestCr0;
    5699             Assert(!RT_HI_U32(u64GuestCr0));
    5700 
    5701             /*
    5702              * Setup VT-x's view of the guest CR0.
    5703              */
    5704             uint32_t uProcCtls = pVmcsInfo->u32ProcCtls;
    5705             if (pVM->hmr0.s.fNestedPaging)
    5706             {
    5707                 if (CPUMIsGuestPagingEnabled(pVCpu))
    5708                 {
    5709                     /* The guest has paging enabled, let it access CR3 without causing a VM-exit if supported. */
    5710                     uProcCtls &= ~(  VMX_PROC_CTLS_CR3_LOAD_EXIT
    5711                                    | VMX_PROC_CTLS_CR3_STORE_EXIT);
    5712                 }
    5713                 else
    5714                 {
    5715                     /* The guest doesn't have paging enabled, make CR3 access cause a VM-exit to update our shadow. */
    5716                     uProcCtls |= VMX_PROC_CTLS_CR3_LOAD_EXIT
    5717                               |  VMX_PROC_CTLS_CR3_STORE_EXIT;
    5718                 }
    5719 
    5720                 /* If we have unrestricted guest execution, we never have to intercept CR3 reads. */
    5721                 if (pVM->hmr0.s.vmx.fUnrestrictedGuest)
    5722                     uProcCtls &= ~VMX_PROC_CTLS_CR3_STORE_EXIT;
    5723             }
    5724             else
    5725             {
    5726                 /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */
    5727                 u64GuestCr0 |= X86_CR0_WP;
    5728             }
    5729 
    5730             /*
    5731              * Guest FPU bits.
    5732              *
    5733              * Since we pre-load the guest FPU always before VM-entry there is no need to track lazy state
    5734              * using CR0.TS.
    5735              *
    5736              * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be
    5737              * set on the first CPUs to support VT-x and no mention of with regards to UX in VM-entry checks.
    5738              */
    5739             u64GuestCr0 |= X86_CR0_NE;
    5740 
    5741             /* If CR0.NE isn't set, we need to intercept #MF exceptions and report them to the guest differently. */
    5742             bool const fInterceptMF = !(u64ShadowCr0 & X86_CR0_NE);
    5743 
    5744             /*
    5745              * Update exception intercepts.
    5746              */
    5747             uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
    5748             if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
    5749             {
    5750                 Assert(PDMVmmDevHeapIsEnabled(pVM));
    5751                 Assert(pVM->hm.s.vmx.pRealModeTSS);
    5752                 uXcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;
    5753             }
    5754             else
    5755             {
    5756                 /* For now, cleared here as mode-switches can happen outside HM/VT-x. See @bugref{7626#c11}. */
    5757                 uXcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
    5758                 if (fInterceptMF)
    5759                     uXcptBitmap |= RT_BIT(X86_XCPT_MF);
    5760             }
    5761 
    5762             /* Additional intercepts for debugging, define these yourself explicitly. */
    5763 #ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
    5764             uXcptBitmap |= 0
    5765                         |  RT_BIT(X86_XCPT_BP)
    5766                         |  RT_BIT(X86_XCPT_DE)
    5767                         |  RT_BIT(X86_XCPT_NM)
    5768                         |  RT_BIT(X86_XCPT_TS)
    5769                         |  RT_BIT(X86_XCPT_UD)
    5770                         |  RT_BIT(X86_XCPT_NP)
    5771                         |  RT_BIT(X86_XCPT_SS)
    5772                         |  RT_BIT(X86_XCPT_GP)
    5773                         |  RT_BIT(X86_XCPT_PF)
    5774                         |  RT_BIT(X86_XCPT_MF)
    5775                         ;
    5776 #elif defined(HMVMX_ALWAYS_TRAP_PF)
    5777             uXcptBitmap |= RT_BIT(X86_XCPT_PF);
    5778 #endif
    5779             if (pVCpu->hm.s.fTrapXcptGpForLovelyMesaDrv)
    5780                 uXcptBitmap |= RT_BIT(X86_XCPT_GP);
    5781             Assert(pVM->hmr0.s.fNestedPaging || (uXcptBitmap & RT_BIT(X86_XCPT_PF)));
    5782 
    5783             /* Apply the hardware specified CR0 fixed bits and enable caching. */
    5784             u64GuestCr0 |= fSetCr0;
    5785             u64GuestCr0 &= fZapCr0;
    5786             u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
    5787 
    5788             /* Commit the CR0 and related fields to the guest VMCS. */
    5789             int rc = VMXWriteVmcsNw(VMX_VMCS_GUEST_CR0, u64GuestCr0);               AssertRC(rc);
    5790             rc     = VMXWriteVmcsNw(VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0);   AssertRC(rc);
    5791             if (uProcCtls != pVmcsInfo->u32ProcCtls)
    5792             {
    5793                 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
    5794                 AssertRC(rc);
    5795             }
    5796             if (uXcptBitmap != pVmcsInfo->u32XcptBitmap)
    5797             {
    5798                 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
    5799                 AssertRC(rc);
    5800             }
    5801 
    5802             /* Update our caches. */
    5803             pVmcsInfo->u32ProcCtls   = uProcCtls;
    5804             pVmcsInfo->u32XcptBitmap = uXcptBitmap;
    5805 
    5806             Log4Func(("cr0=%#RX64 shadow=%#RX64 set=%#RX64 zap=%#RX64\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
    5807         }
    5808         else
    5809         {
    5810             /*
    5811              * With nested-guests, we may have extended the guest/host mask here since we
    5812              * merged in the outer guest's mask. Thus, the merged mask can include more bits
    5813              * (to read from the nested-guest CR0 read-shadow) than the nested hypervisor
    5814              * originally supplied. We must copy those bits from the nested-guest CR0 into
    5815              * the nested-guest CR0 read-shadow.
    5816              */
    5817             HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
    5818             uint64_t       u64GuestCr0  = pVCpu->cpum.GstCtx.cr0;
    5819             uint64_t const u64ShadowCr0 = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVmcsInfo->u64Cr0Mask);
    5820             Assert(!RT_HI_U32(u64GuestCr0));
    5821             Assert(u64GuestCr0 & X86_CR0_NE);
    5822 
    5823             /* Apply the hardware specified CR0 fixed bits and enable caching. */
    5824             u64GuestCr0 |= fSetCr0;
    5825             u64GuestCr0 &= fZapCr0;
    5826             u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
    5827 
    5828             /* Commit the CR0 and CR0 read-shadow to the nested-guest VMCS. */
    5829             int rc = VMXWriteVmcsNw(VMX_VMCS_GUEST_CR0, u64GuestCr0);               AssertRC(rc);
    5830             rc     = VMXWriteVmcsNw(VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0);   AssertRC(rc);
    5831 
    5832             Log4Func(("cr0=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
    5833         }
    5834 
    5835         ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_CR0);
    5836     }
    5837 
    5838     return VINF_SUCCESS;
    5839 }
    5840 
    5841 
    5842 /**
    5843  * Exports the guest control registers (CR3, CR4) into the guest-state area
    5844  * in the VMCS.
    5845  *
    5846  * @returns VBox strict status code.
    5847  * @retval  VINF_EM_RESCHEDULE_REM if we try to emulate non-paged guest code
    5848  *          without unrestricted guest access and the VMMDev is not presently
    5849  *          mapped (e.g. EFI32).
    5850  *
    5851  * @param   pVCpu           The cross context virtual CPU structure.
    5852  * @param   pVmxTransient   The VMX-transient structure.
    5853  *
    5854  * @remarks No-long-jump zone!!!
    5855  */
    5856 static VBOXSTRICTRC hmR0VmxExportGuestCR3AndCR4(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
    5857 {
    5858     int rc  = VINF_SUCCESS;
    5859     PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    5860 
    5861     /*
    5862      * Guest CR2.
    5863      * It's always loaded in the assembler code. Nothing to do here.
    5864      */
    5865 
    5866     /*
    5867      * Guest CR3.
    5868      */
    5869     if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_CR3)
    5870     {
    5871         HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
    5872 
    5873         if (pVM->hmr0.s.fNestedPaging)
    5874         {
    5875             PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    5876             pVmcsInfo->HCPhysEPTP = PGMGetHyperCR3(pVCpu);
    5877 
    5878             /* Validate. See Intel spec. 28.2.2 "EPT Translation Mechanism" and 24.6.11 "Extended-Page-Table Pointer (EPTP)" */
    5879             Assert(pVmcsInfo->HCPhysEPTP != NIL_RTHCPHYS);
    5880             Assert(!(pVmcsInfo->HCPhysEPTP & UINT64_C(0xfff0000000000000)));
    5881             Assert(!(pVmcsInfo->HCPhysEPTP & 0xfff));
    5882 
    5883             /* VMX_EPT_MEMTYPE_WB support is already checked in hmR0VmxSetupTaggedTlb(). */
    5884             pVmcsInfo->HCPhysEPTP |= RT_BF_MAKE(VMX_BF_EPTP_MEMTYPE,          VMX_EPTP_MEMTYPE_WB)
    5885                                   |  RT_BF_MAKE(VMX_BF_EPTP_PAGE_WALK_LENGTH, VMX_EPTP_PAGE_WALK_LENGTH_4);
    5886 
    5887             /* Validate. See Intel spec. 26.2.1 "Checks on VMX Controls" */
    5888             AssertMsg(   ((pVmcsInfo->HCPhysEPTP >> 3) & 0x07) == 3      /* Bits 3:5 (EPT page walk length - 1) must be 3. */
    5889                       && ((pVmcsInfo->HCPhysEPTP >> 7) & 0x1f) == 0,     /* Bits 7:11 MBZ. */
    5890                          ("EPTP %#RX64\n", pVmcsInfo->HCPhysEPTP));
    5891             AssertMsg(  !((pVmcsInfo->HCPhysEPTP >> 6) & 0x01)           /* Bit 6 (EPT accessed & dirty bit). */
    5892                       || (g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_ACCESS_DIRTY),
    5893                          ("EPTP accessed/dirty bit not supported by CPU but set %#RX64\n", pVmcsInfo->HCPhysEPTP));
    5894 
    5895             rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, pVmcsInfo->HCPhysEPTP);
    5896             AssertRC(rc);
    5897 
    5898             uint64_t  u64GuestCr3;
    5899             PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    5900             if (   pVM->hmr0.s.vmx.fUnrestrictedGuest
    5901                 || CPUMIsGuestPagingEnabledEx(pCtx))
    5902             {
    5903                 /* If the guest is in PAE mode, pass the PDPEs to VT-x using the VMCS fields. */
    5904                 if (CPUMIsGuestInPAEModeEx(pCtx))
    5905                 {
    5906                     rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, pCtx->aPaePdpes[0].u);     AssertRC(rc);
    5907                     rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, pCtx->aPaePdpes[1].u);     AssertRC(rc);
    5908                     rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, pCtx->aPaePdpes[2].u);     AssertRC(rc);
    5909                     rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, pCtx->aPaePdpes[3].u);     AssertRC(rc);
    5910                 }
    5911 
    5912                 /*
    5913                  * The guest's view of its CR3 is unblemished with nested paging when the
    5914                  * guest is using paging or we have unrestricted guest execution to handle
    5915                  * the guest when it's not using paging.
    5916                  */
    5917                 u64GuestCr3 = pCtx->cr3;
    5918             }
    5919             else
    5920             {
    5921                 /*
    5922                  * The guest is not using paging, but the CPU (VT-x) has to. While the guest
    5923                  * thinks it accesses physical memory directly, we use our identity-mapped
    5924                  * page table to map guest-linear to guest-physical addresses. EPT takes care
    5925                  * of translating it to host-physical addresses.
    5926                  */
    5927                 RTGCPHYS GCPhys;
    5928                 Assert(pVM->hm.s.vmx.pNonPagingModeEPTPageTable);
    5929 
    5930                 /* We obtain it here every time as the guest could have relocated this PCI region. */
    5931                 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
    5932                 if (RT_SUCCESS(rc))
    5933                 { /* likely */ }
    5934                 else if (rc == VERR_PDM_DEV_HEAP_R3_TO_GCPHYS)
    5935                 {
    5936                     Log4Func(("VERR_PDM_DEV_HEAP_R3_TO_GCPHYS -> VINF_EM_RESCHEDULE_REM\n"));
    5937                     return VINF_EM_RESCHEDULE_REM;  /* We cannot execute now, switch to REM/IEM till the guest maps in VMMDev. */
    5938                 }
    5939                 else
    5940                     AssertMsgFailedReturn(("%Rrc\n",  rc), rc);
    5941 
    5942                 u64GuestCr3 = GCPhys;
    5943             }
    5944 
    5945             Log4Func(("guest_cr3=%#RX64 (GstN)\n", u64GuestCr3));
    5946             rc = VMXWriteVmcsNw(VMX_VMCS_GUEST_CR3, u64GuestCr3);
    5947             AssertRC(rc);
    5948         }
    5949         else
    5950         {
    5951             Assert(!pVmxTransient->fIsNestedGuest);
    5952             /* Non-nested paging case, just use the hypervisor's CR3. */
    5953             RTHCPHYS const HCPhysGuestCr3 = PGMGetHyperCR3(pVCpu);
    5954 
    5955             Log4Func(("guest_cr3=%#RX64 (HstN)\n", HCPhysGuestCr3));
    5956             rc = VMXWriteVmcsNw(VMX_VMCS_GUEST_CR3, HCPhysGuestCr3);
    5957             AssertRC(rc);
    5958         }
    5959 
    5960         ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_CR3);
    5961     }
    5962 
    5963     /*
    5964      * Guest CR4.
    5965      * ASSUMES this is done everytime we get in from ring-3! (XCR0)
    5966      */
    5967     if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_CR4)
    5968     {
    5969         PCPUMCTX     pCtx      = &pVCpu->cpum.GstCtx;
    5970         PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    5971 
    5972         uint64_t const fSetCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed0;
    5973         uint64_t const fZapCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed1;
    5974 
    5975         /*
    5976          * With nested-guests, we may have extended the guest/host mask here (since we
    5977          * merged in the outer guest's mask, see hmR0VmxMergeVmcsNested). This means, the
    5978          * mask can include more bits (to read from the nested-guest CR4 read-shadow) than
    5979          * the nested hypervisor originally supplied. Thus, we should, in essence, copy
    5980          * those bits from the nested-guest CR4 into the nested-guest CR4 read-shadow.
    5981          */
    5982         HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
    5983         uint64_t       u64GuestCr4  = pCtx->cr4;
    5984         uint64_t const u64ShadowCr4 = !pVmxTransient->fIsNestedGuest
    5985                                     ? pCtx->cr4
    5986                                     : CPUMGetGuestVmxMaskedCr4(pCtx, pVmcsInfo->u64Cr4Mask);
    5987         Assert(!RT_HI_U32(u64GuestCr4));
    5988 
    5989         /*
    5990          * Setup VT-x's view of the guest CR4.
    5991          *
    5992          * If we're emulating real-mode using virtual-8086 mode, we want to redirect software
    5993          * interrupts to the 8086 program interrupt handler. Clear the VME bit (the interrupt
    5994          * redirection bitmap is already all 0, see hmR3InitFinalizeR0())
    5995          *
    5996          * See Intel spec. 20.2 "Software Interrupt Handling Methods While in Virtual-8086 Mode".
    5997          */
    5998         if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
    5999         {
    6000             Assert(pVM->hm.s.vmx.pRealModeTSS);
    6001             Assert(PDMVmmDevHeapIsEnabled(pVM));
    6002             u64GuestCr4 &= ~(uint64_t)X86_CR4_VME;
    6003         }
    6004 
    6005         if (pVM->hmr0.s.fNestedPaging)
    6006         {
    6007             if (   !CPUMIsGuestPagingEnabledEx(pCtx)
    6008                 && !pVM->hmr0.s.vmx.fUnrestrictedGuest)
    6009             {
    6010                 /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */
    6011                 u64GuestCr4 |= X86_CR4_PSE;
    6012                 /* Our identity mapping is a 32-bit page directory. */
    6013                 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
    6014             }
    6015             /* else use guest CR4.*/
    6016         }
    6017         else
    6018         {
    6019             Assert(!pVmxTransient->fIsNestedGuest);
    6020 
    6021             /*
    6022              * The shadow paging modes and guest paging modes are different, the shadow is in accordance with the host
    6023              * paging mode and thus we need to adjust VT-x's view of CR4 depending on our shadow page tables.
    6024              */
    6025             switch (pVCpu->hm.s.enmShadowMode)
    6026             {
    6027                 case PGMMODE_REAL:              /* Real-mode. */
    6028                 case PGMMODE_PROTECTED:         /* Protected mode without paging. */
    6029                 case PGMMODE_32_BIT:            /* 32-bit paging. */
    6030                 {
    6031                     u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
    6032                     break;
    6033                 }
    6034 
    6035                 case PGMMODE_PAE:               /* PAE paging. */
    6036                 case PGMMODE_PAE_NX:            /* PAE paging with NX. */
    6037                 {
    6038                     u64GuestCr4 |= X86_CR4_PAE;
    6039                     break;
    6040                 }
    6041 
    6042                 case PGMMODE_AMD64:             /* 64-bit AMD paging (long mode). */
    6043                 case PGMMODE_AMD64_NX:          /* 64-bit AMD paging (long mode) with NX enabled. */
    6044                 {
    6045 #ifdef VBOX_WITH_64_BITS_GUESTS
    6046                     /* For our assumption in hmR0VmxShouldSwapEferMsr. */
    6047                     Assert(u64GuestCr4 & X86_CR4_PAE);
    6048                     break;
    6049 #endif
    6050                 }
    6051                 default:
    6052                     AssertFailed();
    6053                     return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
    6054             }
    6055         }
    6056 
    6057         /* Apply the hardware specified CR4 fixed bits (mainly CR4.VMXE). */
    6058         u64GuestCr4 |= fSetCr4;
    6059         u64GuestCr4 &= fZapCr4;
    6060 
    6061         /* Commit the CR4 and CR4 read-shadow to the guest VMCS. */
    6062         rc = VMXWriteVmcsNw(VMX_VMCS_GUEST_CR4, u64GuestCr4);               AssertRC(rc);
    6063         rc = VMXWriteVmcsNw(VMX_VMCS_CTRL_CR4_READ_SHADOW, u64ShadowCr4);   AssertRC(rc);
    6064 
    6065         /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */
    6066         bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
    6067         if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
    6068         {
    6069             pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
    6070             hmR0VmxUpdateStartVmFunction(pVCpu);
    6071         }
    6072 
    6073         ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_CR4);
    6074 
    6075         Log4Func(("cr4=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr4, u64ShadowCr4, fSetCr4, fZapCr4));
    6076     }
    6077     return rc;
    60783678}
    60793679
     
    62493849            AssertRC(rc);
    62503850        }
    6251     }
    6252 
    6253     return VINF_SUCCESS;
    6254 }
    6255 
    6256 
    6257 #ifdef VBOX_STRICT
    6258 /**
    6259  * Strict function to validate segment registers.
    6260  *
    6261  * @param   pVCpu       The cross context virtual CPU structure.
    6262  * @param   pVmcsInfo   The VMCS info. object.
    6263  *
    6264  * @remarks Will import guest CR0 on strict builds during validation of
    6265  *          segments.
    6266  */
    6267 static void hmR0VmxValidateSegmentRegs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
    6268 {
    6269     /*
    6270      * Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
    6271      *
    6272      * The reason we check for attribute value 0 in this function and not just the unusable bit is
    6273      * because hmR0VmxExportGuestSegReg() only updates the VMCS' copy of the value with the
    6274      * unusable bit and doesn't change the guest-context value.
    6275      */
    6276     PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    6277     PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    6278     hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR0);
    6279     if (   !pVM->hmr0.s.vmx.fUnrestrictedGuest
    6280         && (   !CPUMIsGuestInRealModeEx(pCtx)
    6281             && !CPUMIsGuestInV86ModeEx(pCtx)))
    6282     {
    6283         /* Protected mode checks */
    6284         /* CS */
    6285         Assert(pCtx->cs.Attr.n.u1Present);
    6286         Assert(!(pCtx->cs.Attr.u & 0xf00));
    6287         Assert(!(pCtx->cs.Attr.u & 0xfffe0000));
    6288         Assert(   (pCtx->cs.u32Limit & 0xfff) == 0xfff
    6289                || !(pCtx->cs.Attr.n.u1Granularity));
    6290         Assert(   !(pCtx->cs.u32Limit & 0xfff00000)
    6291                || (pCtx->cs.Attr.n.u1Granularity));
    6292         /* CS cannot be loaded with NULL in protected mode. */
    6293         Assert(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE)); /** @todo is this really true even for 64-bit CS? */
    6294         if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
    6295             Assert(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl);
    6296         else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
    6297             Assert(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl);
    6298         else
    6299             AssertMsgFailed(("Invalid CS Type %#x\n", pCtx->cs.Attr.n.u2Dpl));
    6300         /* SS */
    6301         Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
    6302         Assert(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL));
    6303         if (   !(pCtx->cr0 & X86_CR0_PE)
    6304             || pCtx->cs.Attr.n.u4Type == 3)
    6305         {
    6306             Assert(!pCtx->ss.Attr.n.u2Dpl);
    6307         }
    6308         if (pCtx->ss.Attr.u && !(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
    6309         {
    6310             Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
    6311             Assert(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7);
    6312             Assert(pCtx->ss.Attr.n.u1Present);
    6313             Assert(!(pCtx->ss.Attr.u & 0xf00));
    6314             Assert(!(pCtx->ss.Attr.u & 0xfffe0000));
    6315             Assert(   (pCtx->ss.u32Limit & 0xfff) == 0xfff
    6316                    || !(pCtx->ss.Attr.n.u1Granularity));
    6317             Assert(   !(pCtx->ss.u32Limit & 0xfff00000)
    6318                    || (pCtx->ss.Attr.n.u1Granularity));
    6319         }
    6320         /* DS, ES, FS, GS - only check for usable selectors, see hmR0VmxExportGuestSegReg(). */
    6321         if (pCtx->ds.Attr.u && !(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
    6322         {
    6323             Assert(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
    6324             Assert(pCtx->ds.Attr.n.u1Present);
    6325             Assert(pCtx->ds.Attr.n.u4Type > 11 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL));
    6326             Assert(!(pCtx->ds.Attr.u & 0xf00));
    6327             Assert(!(pCtx->ds.Attr.u & 0xfffe0000));
    6328             Assert(   (pCtx->ds.u32Limit & 0xfff) == 0xfff
    6329                    || !(pCtx->ds.Attr.n.u1Granularity));
    6330             Assert(   !(pCtx->ds.u32Limit & 0xfff00000)
    6331                    || (pCtx->ds.Attr.n.u1Granularity));
    6332             Assert(   !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
    6333                    || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ));
    6334         }
    6335         if (pCtx->es.Attr.u && !(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
    6336         {
    6337             Assert(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
    6338             Assert(pCtx->es.Attr.n.u1Present);
    6339             Assert(pCtx->es.Attr.n.u4Type > 11 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL));
    6340             Assert(!(pCtx->es.Attr.u & 0xf00));
    6341             Assert(!(pCtx->es.Attr.u & 0xfffe0000));
    6342             Assert(   (pCtx->es.u32Limit & 0xfff) == 0xfff
    6343                    || !(pCtx->es.Attr.n.u1Granularity));
    6344             Assert(   !(pCtx->es.u32Limit & 0xfff00000)
    6345                    || (pCtx->es.Attr.n.u1Granularity));
    6346             Assert(   !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
    6347                    || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ));
    6348         }
    6349         if (pCtx->fs.Attr.u && !(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
    6350         {
    6351             Assert(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
    6352             Assert(pCtx->fs.Attr.n.u1Present);
    6353             Assert(pCtx->fs.Attr.n.u4Type > 11 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL));
    6354             Assert(!(pCtx->fs.Attr.u & 0xf00));
    6355             Assert(!(pCtx->fs.Attr.u & 0xfffe0000));
    6356             Assert(   (pCtx->fs.u32Limit & 0xfff) == 0xfff
    6357                    || !(pCtx->fs.Attr.n.u1Granularity));
    6358             Assert(   !(pCtx->fs.u32Limit & 0xfff00000)
    6359                    || (pCtx->fs.Attr.n.u1Granularity));
    6360             Assert(   !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
    6361                    || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ));
    6362         }
    6363         if (pCtx->gs.Attr.u && !(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
    6364         {
    6365             Assert(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
    6366             Assert(pCtx->gs.Attr.n.u1Present);
    6367             Assert(pCtx->gs.Attr.n.u4Type > 11 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL));
    6368             Assert(!(pCtx->gs.Attr.u & 0xf00));
    6369             Assert(!(pCtx->gs.Attr.u & 0xfffe0000));
    6370             Assert(   (pCtx->gs.u32Limit & 0xfff) == 0xfff
    6371                    || !(pCtx->gs.Attr.n.u1Granularity));
    6372             Assert(   !(pCtx->gs.u32Limit & 0xfff00000)
    6373                    || (pCtx->gs.Attr.n.u1Granularity));
    6374             Assert(   !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
    6375                    || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ));
    6376         }
    6377         /* 64-bit capable CPUs. */
    6378         Assert(!RT_HI_U32(pCtx->cs.u64Base));
    6379         Assert(!pCtx->ss.Attr.u || !RT_HI_U32(pCtx->ss.u64Base));
    6380         Assert(!pCtx->ds.Attr.u || !RT_HI_U32(pCtx->ds.u64Base));
    6381         Assert(!pCtx->es.Attr.u || !RT_HI_U32(pCtx->es.u64Base));
    6382     }
    6383     else if (   CPUMIsGuestInV86ModeEx(pCtx)
    6384              || (   CPUMIsGuestInRealModeEx(pCtx)
    6385                  && !pVM->hmr0.s.vmx.fUnrestrictedGuest))
    6386     {
    6387         /* Real and v86 mode checks. */
    6388         /* hmR0VmxExportGuestSegReg() writes the modified in VMCS. We want what we're feeding to VT-x. */
    6389         uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
    6390         if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
    6391         {
    6392             u32CSAttr = 0xf3; u32SSAttr = 0xf3; u32DSAttr = 0xf3;
    6393             u32ESAttr = 0xf3; u32FSAttr = 0xf3; u32GSAttr = 0xf3;
    6394         }
    6395         else
    6396         {
    6397             u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u; u32DSAttr = pCtx->ds.Attr.u;
    6398             u32ESAttr = pCtx->es.Attr.u; u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
    6399         }
    6400 
    6401         /* CS */
    6402         AssertMsg((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), ("CS base %#x %#x\n", pCtx->cs.u64Base, pCtx->cs.Sel));
    6403         Assert(pCtx->cs.u32Limit == 0xffff);
    6404         Assert(u32CSAttr == 0xf3);
    6405         /* SS */
    6406         Assert(pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4);
    6407         Assert(pCtx->ss.u32Limit == 0xffff);
    6408         Assert(u32SSAttr == 0xf3);
    6409         /* DS */
    6410         Assert(pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4);
    6411         Assert(pCtx->ds.u32Limit == 0xffff);
    6412         Assert(u32DSAttr == 0xf3);
    6413         /* ES */
    6414         Assert(pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4);
    6415         Assert(pCtx->es.u32Limit == 0xffff);
    6416         Assert(u32ESAttr == 0xf3);
    6417         /* FS */
    6418         Assert(pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4);
    6419         Assert(pCtx->fs.u32Limit == 0xffff);
    6420         Assert(u32FSAttr == 0xf3);
    6421         /* GS */
    6422         Assert(pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4);
    6423         Assert(pCtx->gs.u32Limit == 0xffff);
    6424         Assert(u32GSAttr == 0xf3);
    6425         /* 64-bit capable CPUs. */
    6426         Assert(!RT_HI_U32(pCtx->cs.u64Base));
    6427         Assert(!u32SSAttr || !RT_HI_U32(pCtx->ss.u64Base));
    6428         Assert(!u32DSAttr || !RT_HI_U32(pCtx->ds.u64Base));
    6429         Assert(!u32ESAttr || !RT_HI_U32(pCtx->es.u64Base));
    6430     }
    6431 }
    6432 #endif /* VBOX_STRICT */
    6433 
    6434 
    6435 /**
    6436  * Exports a guest segment register into the guest-state area in the VMCS.
    6437  *
    6438  * @returns VBox status code.
    6439  * @param   pVCpu       The cross context virtual CPU structure.
    6440  * @param   pVmcsInfo   The VMCS info. object.
    6441  * @param   iSegReg     The segment register number (X86_SREG_XXX).
    6442  * @param   pSelReg     Pointer to the segment selector.
    6443  *
    6444  * @remarks No-long-jump zone!!!
    6445  */
    6446 static int hmR0VmxExportGuestSegReg(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t iSegReg, PCCPUMSELREG pSelReg)
    6447 {
    6448     Assert(iSegReg < X86_SREG_COUNT);
    6449 
    6450     uint32_t u32Access = pSelReg->Attr.u;
    6451     if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
    6452     {
    6453         /*
    6454          * The way to differentiate between whether this is really a null selector or was just
    6455          * a selector loaded with 0 in real-mode is using the segment attributes. A selector
    6456          * loaded in real-mode with the value 0 is valid and usable in protected-mode and we
    6457          * should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures
    6458          * NULL selectors loaded in protected-mode have their attribute as 0.
    6459          */
    6460         if (u32Access)
    6461         { }
    6462         else
    6463             u32Access = X86DESCATTR_UNUSABLE;
    6464     }
    6465     else
    6466     {
    6467         /* VT-x requires our real-using-v86 mode hack to override the segment access-right bits. */
    6468         u32Access = 0xf3;
    6469         Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
    6470         Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
    6471         RT_NOREF_PV(pVCpu);
    6472     }
    6473 
    6474     /* Validate segment access rights. Refer to Intel spec. "26.3.1.2 Checks on Guest Segment Registers". */
    6475     AssertMsg((u32Access & X86DESCATTR_UNUSABLE) || (u32Access & X86_SEL_TYPE_ACCESSED),
    6476               ("Access bit not set for usable segment. %.2s sel=%#x attr %#x\n", "ESCSSSDSFSGS" + iSegReg * 2, pSelReg, pSelReg->Attr.u));
    6477 
    6478     /*
    6479      * Commit it to the VMCS.
    6480      */
    6481     Assert((uint32_t)VMX_VMCS16_GUEST_SEG_SEL(iSegReg)           == g_aVmcsSegSel[iSegReg]);
    6482     Assert((uint32_t)VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg)         == g_aVmcsSegLimit[iSegReg]);
    6483     Assert((uint32_t)VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg) == g_aVmcsSegAttr[iSegReg]);
    6484     Assert((uint32_t)VMX_VMCS_GUEST_SEG_BASE(iSegReg)            == g_aVmcsSegBase[iSegReg]);
    6485     int rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_SEG_SEL(iSegReg),           pSelReg->Sel);      AssertRC(rc);
    6486     rc     = VMXWriteVmcs32(VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg),         pSelReg->u32Limit); AssertRC(rc);
    6487     rc     = VMXWriteVmcsNw(VMX_VMCS_GUEST_SEG_BASE(iSegReg),            pSelReg->u64Base);  AssertRC(rc);
    6488     rc     = VMXWriteVmcs32(VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg), u32Access);         AssertRC(rc);
    6489     return VINF_SUCCESS;
    6490 }
    6491 
    6492 
    6493 /**
    6494  * Exports the guest segment registers, GDTR, IDTR, LDTR, TR into the guest-state
    6495  * area in the VMCS.
    6496  *
    6497  * @returns VBox status code.
    6498  * @param   pVCpu           The cross context virtual CPU structure.
    6499  * @param   pVmxTransient   The VMX-transient structure.
    6500  *
    6501  * @remarks Will import guest CR0 on strict builds during validation of
    6502  *          segments.
    6503  * @remarks No-long-jump zone!!!
    6504  */
    6505 static int hmR0VmxExportGuestSegRegsXdtr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
    6506 {
    6507     int                 rc              = VERR_INTERNAL_ERROR_5;
    6508     PVMCC               pVM             = pVCpu->CTX_SUFF(pVM);
    6509     PCCPUMCTX           pCtx            = &pVCpu->cpum.GstCtx;
    6510     PVMXVMCSINFO        pVmcsInfo       = pVmxTransient->pVmcsInfo;
    6511     PVMXVMCSINFOSHARED  pVmcsInfoShared = pVmcsInfo->pShared;
    6512 
    6513     /*
    6514      * Guest Segment registers: CS, SS, DS, ES, FS, GS.
    6515      */
    6516     if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SREG_MASK)
    6517     {
    6518         if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_CS)
    6519         {
    6520             HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS);
    6521             if (pVmcsInfoShared->RealMode.fRealOnV86Active)
    6522                 pVmcsInfoShared->RealMode.AttrCS.u = pCtx->cs.Attr.u;
    6523             rc = hmR0VmxExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_CS, &pCtx->cs);
    6524             AssertRC(rc);
    6525             ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_CS);
    6526         }
    6527 
    6528         if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SS)
    6529         {
    6530             HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SS);
    6531             if (pVmcsInfoShared->RealMode.fRealOnV86Active)
    6532                 pVmcsInfoShared->RealMode.AttrSS.u = pCtx->ss.Attr.u;
    6533             rc = hmR0VmxExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_SS, &pCtx->ss);
    6534             AssertRC(rc);
    6535             ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_SS);
    6536         }
    6537 
    6538         if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_DS)
    6539         {
    6540             HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DS);
    6541             if (pVmcsInfoShared->RealMode.fRealOnV86Active)
    6542                 pVmcsInfoShared->RealMode.AttrDS.u = pCtx->ds.Attr.u;
    6543             rc = hmR0VmxExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_DS, &pCtx->ds);
    6544             AssertRC(rc);
    6545             ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_DS);
    6546         }
    6547 
    6548         if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_ES)
    6549         {
    6550             HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_ES);
    6551             if (pVmcsInfoShared->RealMode.fRealOnV86Active)
    6552                 pVmcsInfoShared->RealMode.AttrES.u = pCtx->es.Attr.u;
    6553             rc = hmR0VmxExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_ES, &pCtx->es);
    6554             AssertRC(rc);
    6555             ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_ES);
    6556         }
    6557 
    6558         if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_FS)
    6559         {
    6560             HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_FS);
    6561             if (pVmcsInfoShared->RealMode.fRealOnV86Active)
    6562                 pVmcsInfoShared->RealMode.AttrFS.u = pCtx->fs.Attr.u;
    6563             rc = hmR0VmxExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_FS, &pCtx->fs);
    6564             AssertRC(rc);
    6565             ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_FS);
    6566         }
    6567 
    6568         if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_GS)
    6569         {
    6570             HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GS);
    6571             if (pVmcsInfoShared->RealMode.fRealOnV86Active)
    6572                 pVmcsInfoShared->RealMode.AttrGS.u = pCtx->gs.Attr.u;
    6573             rc = hmR0VmxExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_GS, &pCtx->gs);
    6574             AssertRC(rc);
    6575             ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_GS);
    6576         }
    6577 
    6578 #ifdef VBOX_STRICT
    6579         hmR0VmxValidateSegmentRegs(pVCpu, pVmcsInfo);
    6580 #endif
    6581         Log4Func(("cs={%#04x base=%#RX64 limit=%#RX32 attr=%#RX32}\n", pCtx->cs.Sel, pCtx->cs.u64Base, pCtx->cs.u32Limit,
    6582                   pCtx->cs.Attr.u));
    6583     }
    6584 
    6585     /*
    6586      * Guest TR.
    6587      */
    6588     if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_TR)
    6589     {
    6590         HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_TR);
    6591 
    6592         /*
    6593          * Real-mode emulation using virtual-8086 mode with CR4.VME. Interrupt redirection is
    6594          * achieved using the interrupt redirection bitmap (all bits cleared to let the guest
    6595          * handle INT-n's) in the TSS. See hmR3InitFinalizeR0() to see how pRealModeTSS is setup.
    6596          */
    6597         uint16_t u16Sel;
    6598         uint32_t u32Limit;
    6599         uint64_t u64Base;
    6600         uint32_t u32AccessRights;
    6601         if (!pVmcsInfoShared->RealMode.fRealOnV86Active)
    6602         {
    6603             u16Sel          = pCtx->tr.Sel;
    6604             u32Limit        = pCtx->tr.u32Limit;
    6605             u64Base         = pCtx->tr.u64Base;
    6606             u32AccessRights = pCtx->tr.Attr.u;
    6607         }
    6608         else
    6609         {
    6610             Assert(!pVmxTransient->fIsNestedGuest);
    6611             Assert(pVM->hm.s.vmx.pRealModeTSS);
    6612             Assert(PDMVmmDevHeapIsEnabled(pVM));    /* Guaranteed by HMCanExecuteGuest() -XXX- what about inner loop changes? */
    6613 
    6614             /* We obtain it here every time as PCI regions could be reconfigured in the guest, changing the VMMDev base. */
    6615             RTGCPHYS GCPhys;
    6616             rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
    6617             AssertRCReturn(rc, rc);
    6618 
    6619             X86DESCATTR DescAttr;
    6620             DescAttr.u           = 0;
    6621             DescAttr.n.u1Present = 1;
    6622             DescAttr.n.u4Type    = X86_SEL_TYPE_SYS_386_TSS_BUSY;
    6623 
    6624             u16Sel          = 0;
    6625             u32Limit        = HM_VTX_TSS_SIZE;
    6626             u64Base         = GCPhys;
    6627             u32AccessRights = DescAttr.u;
    6628         }
    6629 
    6630         /* Validate. */
    6631         Assert(!(u16Sel & RT_BIT(2)));
    6632         AssertMsg(   (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_386_TSS_BUSY
    6633                   || (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_286_TSS_BUSY, ("TSS is not busy!? %#x\n", u32AccessRights));
    6634         AssertMsg(!(u32AccessRights & X86DESCATTR_UNUSABLE), ("TR unusable bit is not clear!? %#x\n", u32AccessRights));
    6635         Assert(!(u32AccessRights & RT_BIT(4)));                 /* System MBZ.*/
    6636         Assert(u32AccessRights & RT_BIT(7));                    /* Present MB1.*/
    6637         Assert(!(u32AccessRights & 0xf00));                     /* 11:8 MBZ. */
    6638         Assert(!(u32AccessRights & 0xfffe0000));                /* 31:17 MBZ. */
    6639         Assert(   (u32Limit & 0xfff) == 0xfff
    6640                || !(u32AccessRights & RT_BIT(15)));             /* Granularity MBZ. */
    6641         Assert(   !(pCtx->tr.u32Limit & 0xfff00000)
    6642                || (u32AccessRights & RT_BIT(15)));              /* Granularity MB1. */
    6643 
    6644         rc = VMXWriteVmcs16(VMX_VMCS16_GUEST_TR_SEL,           u16Sel);             AssertRC(rc);
    6645         rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_TR_LIMIT,         u32Limit);           AssertRC(rc);
    6646         rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights);    AssertRC(rc);
    6647         rc = VMXWriteVmcsNw(VMX_VMCS_GUEST_TR_BASE,            u64Base);            AssertRC(rc);
    6648 
    6649         ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_TR);
    6650         Log4Func(("tr base=%#RX64 limit=%#RX32\n", pCtx->tr.u64Base, pCtx->tr.u32Limit));
    6651     }
    6652 
    6653     /*
    6654      * Guest GDTR.
    6655      */
    6656     if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_GDTR)
    6657     {
    6658         HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GDTR);
    6659 
    6660         rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, pCtx->gdtr.cbGdt);     AssertRC(rc);
    6661         rc = VMXWriteVmcsNw(VMX_VMCS_GUEST_GDTR_BASE,  pCtx->gdtr.pGdt);        AssertRC(rc);
    6662 
    6663         /* Validate. */
    6664         Assert(!(pCtx->gdtr.cbGdt & 0xffff0000));          /* Bits 31:16 MBZ. */
    6665 
    6666         ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_GDTR);
    6667         Log4Func(("gdtr base=%#RX64 limit=%#RX32\n", pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt));
    6668     }
    6669 
    6670     /*
    6671      * Guest LDTR.
    6672      */
    6673     if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_LDTR)
    6674     {
    6675         HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_LDTR);
    6676 
    6677         /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
    6678         uint32_t u32Access;
    6679         if (   !pVmxTransient->fIsNestedGuest
    6680             && !pCtx->ldtr.Attr.u)
    6681             u32Access = X86DESCATTR_UNUSABLE;
    6682         else
    6683             u32Access = pCtx->ldtr.Attr.u;
    6684 
    6685         rc = VMXWriteVmcs16(VMX_VMCS16_GUEST_LDTR_SEL,           pCtx->ldtr.Sel);       AssertRC(rc);
    6686         rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_LIMIT,         pCtx->ldtr.u32Limit);  AssertRC(rc);
    6687         rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access);            AssertRC(rc);
    6688         rc = VMXWriteVmcsNw(VMX_VMCS_GUEST_LDTR_BASE,            pCtx->ldtr.u64Base);   AssertRC(rc);
    6689 
    6690         /* Validate. */
    6691         if (!(u32Access & X86DESCATTR_UNUSABLE))
    6692         {
    6693             Assert(!(pCtx->ldtr.Sel & RT_BIT(2)));              /* TI MBZ. */
    6694             Assert(pCtx->ldtr.Attr.n.u4Type == 2);              /* Type MB2 (LDT). */
    6695             Assert(!pCtx->ldtr.Attr.n.u1DescType);              /* System MBZ. */
    6696             Assert(pCtx->ldtr.Attr.n.u1Present == 1);           /* Present MB1. */
    6697             Assert(!pCtx->ldtr.Attr.n.u4LimitHigh);             /* 11:8 MBZ. */
    6698             Assert(!(pCtx->ldtr.Attr.u & 0xfffe0000));          /* 31:17 MBZ. */
    6699             Assert(   (pCtx->ldtr.u32Limit & 0xfff) == 0xfff
    6700                    || !pCtx->ldtr.Attr.n.u1Granularity);        /* Granularity MBZ. */
    6701             Assert(   !(pCtx->ldtr.u32Limit & 0xfff00000)
    6702                    || pCtx->ldtr.Attr.n.u1Granularity);         /* Granularity MB1. */
    6703         }
    6704 
    6705         ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_LDTR);
    6706         Log4Func(("ldtr base=%#RX64 limit=%#RX32\n", pCtx->ldtr.u64Base, pCtx->ldtr.u32Limit));
    6707     }
    6708 
    6709     /*
    6710      * Guest IDTR.
    6711      */
    6712     if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_IDTR)
    6713     {
    6714         HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_IDTR);
    6715 
    6716         rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, pCtx->idtr.cbIdt);     AssertRC(rc);
    6717         rc = VMXWriteVmcsNw(VMX_VMCS_GUEST_IDTR_BASE,  pCtx->idtr.pIdt);        AssertRC(rc);
    6718 
    6719         /* Validate. */
    6720         Assert(!(pCtx->idtr.cbIdt & 0xffff0000));          /* Bits 31:16 MBZ. */
    6721 
    6722         ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_IDTR);
    6723         Log4Func(("idtr base=%#RX64 limit=%#RX32\n", pCtx->idtr.pIdt, pCtx->idtr.cbIdt));
    67243851    }
    67253852
     
    69674094            rc    |= VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &pVCpu->hm.s.vmx.LastError.u32InstrError);
    69684095            AssertRC(rc);
    6969             hmR0VmxReadExitQualVmcs(pVmxTransient);
     4096            vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
    69704097
    69714098            pVCpu->hm.s.vmx.LastError.idEnteredCpu = pVCpu->hmr0.s.idEnteredCpu;
     
    72374364
    72384365/**
    7239  * Gets the IEM exception flags for the specified vector and IDT vectoring /
    7240  * VM-exit interruption info type.
    7241  *
    7242  * @returns The IEM exception flags.
    7243  * @param   uVector         The event vector.
    7244  * @param   uVmxEventType   The VMX event type.
    7245  *
    7246  * @remarks This function currently only constructs flags required for
    7247  *          IEMEvaluateRecursiveXcpt and not the complete flags (e.g, error-code
    7248  *          and CR2 aspects of an exception are not included).
    7249  */
    7250 static uint32_t hmR0VmxGetIemXcptFlags(uint8_t uVector, uint32_t uVmxEventType)
    7251 {
    7252     uint32_t fIemXcptFlags;
    7253     switch (uVmxEventType)
    7254     {
    7255         case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
    7256         case VMX_IDT_VECTORING_INFO_TYPE_NMI:
    7257             fIemXcptFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
    7258             break;
    7259 
    7260         case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT:
    7261             fIemXcptFlags = IEM_XCPT_FLAGS_T_EXT_INT;
    7262             break;
    7263 
    7264         case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
    7265             fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR;
    7266             break;
    7267 
    7268         case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
    7269         {
    7270             fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
    7271             if (uVector == X86_XCPT_BP)
    7272                 fIemXcptFlags |= IEM_XCPT_FLAGS_BP_INSTR;
    7273             else if (uVector == X86_XCPT_OF)
    7274                 fIemXcptFlags |= IEM_XCPT_FLAGS_OF_INSTR;
    7275             else
    7276             {
    7277                 fIemXcptFlags = 0;
    7278                 AssertMsgFailed(("Unexpected vector for software exception. uVector=%#x", uVector));
    7279             }
    7280             break;
    7281         }
    7282 
    7283         case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
    7284             fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
    7285             break;
    7286 
    7287         default:
    7288             fIemXcptFlags = 0;
    7289             AssertMsgFailed(("Unexpected vector type! uVmxEventType=%#x uVector=%#x", uVmxEventType, uVector));
    7290             break;
    7291     }
    7292     return fIemXcptFlags;
    7293 }
    7294 
    7295 
    7296 /**
    7297  * Sets an event as a pending event to be injected into the guest.
    7298  *
    7299  * @param   pVCpu               The cross context virtual CPU structure.
    7300  * @param   u32IntInfo          The VM-entry interruption-information field.
    7301  * @param   cbInstr             The VM-entry instruction length in bytes (for
    7302  *                              software interrupts, exceptions and privileged
    7303  *                              software exceptions).
    7304  * @param   u32ErrCode          The VM-entry exception error code.
    7305  * @param   GCPtrFaultAddress   The fault-address (CR2) in case it's a
    7306  *                              page-fault.
    7307  */
    7308 DECLINLINE(void) hmR0VmxSetPendingEvent(PVMCPUCC pVCpu, uint32_t u32IntInfo, uint32_t cbInstr, uint32_t u32ErrCode,
    7309                                         RTGCUINTPTR GCPtrFaultAddress)
    7310 {
    7311     Assert(!pVCpu->hm.s.Event.fPending);
    7312     pVCpu->hm.s.Event.fPending          = true;
    7313     pVCpu->hm.s.Event.u64IntInfo        = u32IntInfo;
    7314     pVCpu->hm.s.Event.u32ErrCode        = u32ErrCode;
    7315     pVCpu->hm.s.Event.cbInstr           = cbInstr;
    7316     pVCpu->hm.s.Event.GCPtrFaultAddress = GCPtrFaultAddress;
    7317 }
    7318 
    7319 
    7320 /**
    7321  * Sets an external interrupt as pending-for-injection into the VM.
    7322  *
    7323  * @param   pVCpu           The cross context virtual CPU structure.
    7324  * @param   u8Interrupt     The external interrupt vector.
    7325  */
    7326 DECLINLINE(void) hmR0VmxSetPendingExtInt(PVMCPUCC pVCpu, uint8_t u8Interrupt)
    7327 {
    7328     uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR,          u8Interrupt)
    7329                               | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE,           VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
    7330                               | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
    7331                               | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID,          1);
    7332     hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
    7333 }
    7334 
    7335 
    7336 /**
    7337  * Sets an NMI (\#NMI) exception as pending-for-injection into the VM.
    7338  *
    7339  * @param   pVCpu   The cross context virtual CPU structure.
    7340  */
    7341 DECLINLINE(void) hmR0VmxSetPendingXcptNmi(PVMCPUCC pVCpu)
    7342 {
    7343     uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR,         X86_XCPT_NMI)
    7344                               | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE,           VMX_ENTRY_INT_INFO_TYPE_NMI)
    7345                               | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
    7346                               | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID,          1);
    7347     hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
    7348 }
    7349 
    7350 
    7351 /**
    7352  * Sets a double-fault (\#DF) exception as pending-for-injection into the VM.
    7353  *
    7354  * @param   pVCpu   The cross context virtual CPU structure.
    7355  */
    7356 DECLINLINE(void) hmR0VmxSetPendingXcptDF(PVMCPUCC pVCpu)
    7357 {
    7358     uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR,         X86_XCPT_DF)
    7359                               | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE,           VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
    7360                               | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
    7361                               | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID,          1);
    7362     hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
    7363 }
    7364 
    7365 
    7366 /**
    7367  * Sets an invalid-opcode (\#UD) exception as pending-for-injection into the VM.
    7368  *
    7369  * @param   pVCpu   The cross context virtual CPU structure.
    7370  */
    7371 DECLINLINE(void) hmR0VmxSetPendingXcptUD(PVMCPUCC pVCpu)
    7372 {
    7373     uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR,         X86_XCPT_UD)
    7374                               | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE,           VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
    7375                               | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
    7376                               | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID,          1);
    7377     hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
    7378 }
    7379 
    7380 
    7381 /**
    7382  * Sets a debug (\#DB) exception as pending-for-injection into the VM.
    7383  *
    7384  * @param   pVCpu   The cross context virtual CPU structure.
    7385  */
    7386 DECLINLINE(void) hmR0VmxSetPendingXcptDB(PVMCPUCC pVCpu)
    7387 {
    7388     uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR,         X86_XCPT_DB)
    7389                               | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE,           VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
    7390                               | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
    7391                               | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID,          1);
    7392     hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
    7393 }
    7394 
    7395 
    7396 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    7397 /**
    7398  * Sets a general-protection (\#GP) exception as pending-for-injection into the VM.
    7399  *
    7400  * @param   pVCpu       The cross context virtual CPU structure.
    7401  * @param   u32ErrCode  The error code for the general-protection exception.
    7402  */
    7403 DECLINLINE(void) hmR0VmxSetPendingXcptGP(PVMCPUCC pVCpu, uint32_t u32ErrCode)
    7404 {
    7405     uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR,         X86_XCPT_GP)
    7406                               | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE,           VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
    7407                               | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
    7408                               | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID,          1);
    7409     hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
    7410 }
    7411 
    7412 
    7413 /**
    7414  * Sets a stack (\#SS) exception as pending-for-injection into the VM.
    7415  *
    7416  * @param   pVCpu       The cross context virtual CPU structure.
    7417  * @param   u32ErrCode  The error code for the stack exception.
    7418  */
    7419 DECLINLINE(void) hmR0VmxSetPendingXcptSS(PVMCPUCC pVCpu, uint32_t u32ErrCode)
    7420 {
    7421     uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR,         X86_XCPT_SS)
    7422                               | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE,           VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
    7423                               | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
    7424                               | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID,          1);
    7425     hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
    7426 }
    7427 #endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
    7428 
    7429 
    7430 /**
    7431  * Fixes up attributes for the specified segment register.
    7432  *
    7433  * @param   pVCpu       The cross context virtual CPU structure.
    7434  * @param   pSelReg     The segment register that needs fixing.
    7435  * @param   pszRegName  The register name (for logging and assertions).
    7436  */
    7437 static void hmR0VmxFixUnusableSegRegAttr(PVMCPUCC pVCpu, PCPUMSELREG pSelReg, const char *pszRegName)
    7438 {
    7439     Assert(pSelReg->Attr.u & X86DESCATTR_UNUSABLE);
    7440 
    7441     /*
    7442      * If VT-x marks the segment as unusable, most other bits remain undefined:
    7443      *   - For CS the L, D and G bits have meaning.
    7444      *   - For SS the DPL has meaning (it -is- the CPL for Intel and VBox).
    7445      *   - For the remaining data segments no bits are defined.
    7446      *
    7447      * The present bit and the unusable bit has been observed to be set at the
    7448      * same time (the selector was supposed to be invalid as we started executing
    7449      * a V8086 interrupt in ring-0).
    7450      *
    7451      * What should be important for the rest of the VBox code, is that the P bit is
    7452      * cleared.  Some of the other VBox code recognizes the unusable bit, but
    7453      * AMD-V certainly don't, and REM doesn't really either.  So, to be on the
    7454      * safe side here, we'll strip off P and other bits we don't care about.  If
    7455      * any code breaks because Attr.u != 0 when Sel < 4, it should be fixed.
    7456      *
    7457      * See Intel spec. 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
    7458      */
    7459 #ifdef VBOX_STRICT
    7460     uint32_t const uAttr = pSelReg->Attr.u;
    7461 #endif
    7462 
    7463     /* Masking off: X86DESCATTR_P, X86DESCATTR_LIMIT_HIGH, and X86DESCATTR_AVL. The latter two are really irrelevant. */
    7464     pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L    | X86DESCATTR_D  | X86DESCATTR_G
    7465                      | X86DESCATTR_DPL      | X86DESCATTR_TYPE | X86DESCATTR_DT;
    7466 
    7467 #ifdef VBOX_STRICT
    7468     VMMRZCallRing3Disable(pVCpu);
    7469     Log4Func(("Unusable %s: sel=%#x attr=%#x -> %#x\n", pszRegName, pSelReg->Sel, uAttr, pSelReg->Attr.u));
    7470 # ifdef DEBUG_bird
    7471     AssertMsg((uAttr & ~X86DESCATTR_P) == pSelReg->Attr.u,
    7472               ("%s: %#x != %#x (sel=%#x base=%#llx limit=%#x)\n",
    7473                pszRegName, uAttr, pSelReg->Attr.u, pSelReg->Sel, pSelReg->u64Base, pSelReg->u32Limit));
    7474 # endif
    7475     VMMRZCallRing3Enable(pVCpu);
    7476     NOREF(uAttr);
    7477 #endif
    7478     RT_NOREF2(pVCpu, pszRegName);
    7479 }
    7480 
    7481 
    7482 /**
    7483  * Imports a guest segment register from the current VMCS into the guest-CPU
    7484  * context.
    7485  *
    7486  * @param   pVCpu       The cross context virtual CPU structure.
    7487  * @param   iSegReg     The segment register number (X86_SREG_XXX).
    7488  *
    7489  * @remarks Called with interrupts and/or preemption disabled.
    7490  */
    7491 static void hmR0VmxImportGuestSegReg(PVMCPUCC pVCpu, uint32_t iSegReg)
    7492 {
    7493     Assert(iSegReg < X86_SREG_COUNT);
    7494     Assert((uint32_t)VMX_VMCS16_GUEST_SEG_SEL(iSegReg)           == g_aVmcsSegSel[iSegReg]);
    7495     Assert((uint32_t)VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg)         == g_aVmcsSegLimit[iSegReg]);
    7496     Assert((uint32_t)VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg) == g_aVmcsSegAttr[iSegReg]);
    7497     Assert((uint32_t)VMX_VMCS_GUEST_SEG_BASE(iSegReg)            == g_aVmcsSegBase[iSegReg]);
    7498 
    7499     PCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
    7500 
    7501     uint16_t u16Sel;
    7502     int rc = VMXReadVmcs16(VMX_VMCS16_GUEST_SEG_SEL(iSegReg), &u16Sel);   AssertRC(rc);
    7503     pSelReg->Sel      = u16Sel;
    7504     pSelReg->ValidSel = u16Sel;
    7505 
    7506     rc     = VMXReadVmcs32(VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg), &pSelReg->u32Limit); AssertRC(rc);
    7507     rc     = VMXReadVmcsNw(VMX_VMCS_GUEST_SEG_BASE(iSegReg), &pSelReg->u64Base);     AssertRC(rc);
    7508 
    7509     uint32_t u32Attr;
    7510     rc     = VMXReadVmcs32(VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg), &u32Attr);   AssertRC(rc);
    7511     pSelReg->Attr.u   = u32Attr;
    7512     if (u32Attr & X86DESCATTR_UNUSABLE)
    7513         hmR0VmxFixUnusableSegRegAttr(pVCpu, pSelReg, "ES\0CS\0SS\0DS\0FS\0GS" + iSegReg * 3);
    7514 
    7515     pSelReg->fFlags   = CPUMSELREG_FLAGS_VALID;
    7516 }
    7517 
    7518 
    7519 /**
    7520  * Imports the guest LDTR from the current VMCS into the guest-CPU context.
    7521  *
    7522  * @param   pVCpu   The cross context virtual CPU structure.
    7523  *
    7524  * @remarks Called with interrupts and/or preemption disabled.
    7525  */
    7526 static void hmR0VmxImportGuestLdtr(PVMCPUCC pVCpu)
    7527 {
    7528     uint16_t u16Sel;
    7529     uint64_t u64Base;
    7530     uint32_t u32Limit, u32Attr;
    7531     int rc = VMXReadVmcs16(VMX_VMCS16_GUEST_LDTR_SEL,           &u16Sel);       AssertRC(rc);
    7532     rc     = VMXReadVmcs32(VMX_VMCS32_GUEST_LDTR_LIMIT,         &u32Limit);     AssertRC(rc);
    7533     rc     = VMXReadVmcs32(VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, &u32Attr);      AssertRC(rc);
    7534     rc     = VMXReadVmcsNw(VMX_VMCS_GUEST_LDTR_BASE,            &u64Base);      AssertRC(rc);
    7535 
    7536     pVCpu->cpum.GstCtx.ldtr.Sel      = u16Sel;
    7537     pVCpu->cpum.GstCtx.ldtr.ValidSel = u16Sel;
    7538     pVCpu->cpum.GstCtx.ldtr.fFlags   = CPUMSELREG_FLAGS_VALID;
    7539     pVCpu->cpum.GstCtx.ldtr.u32Limit = u32Limit;
    7540     pVCpu->cpum.GstCtx.ldtr.u64Base  = u64Base;
    7541     pVCpu->cpum.GstCtx.ldtr.Attr.u   = u32Attr;
    7542     if (u32Attr & X86DESCATTR_UNUSABLE)
    7543         hmR0VmxFixUnusableSegRegAttr(pVCpu, &pVCpu->cpum.GstCtx.ldtr, "LDTR");
    7544 }
    7545 
    7546 
    7547 /**
    7548  * Imports the guest TR from the current VMCS into the guest-CPU context.
    7549  *
    7550  * @param   pVCpu   The cross context virtual CPU structure.
    7551  *
    7552  * @remarks Called with interrupts and/or preemption disabled.
    7553  */
    7554 static void hmR0VmxImportGuestTr(PVMCPUCC pVCpu)
    7555 {
    7556     uint16_t u16Sel;
    7557     uint64_t u64Base;
    7558     uint32_t u32Limit, u32Attr;
    7559     int rc = VMXReadVmcs16(VMX_VMCS16_GUEST_TR_SEL,           &u16Sel);     AssertRC(rc);
    7560     rc     = VMXReadVmcs32(VMX_VMCS32_GUEST_TR_LIMIT,         &u32Limit);   AssertRC(rc);
    7561     rc     = VMXReadVmcs32(VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, &u32Attr);    AssertRC(rc);
    7562     rc     = VMXReadVmcsNw(VMX_VMCS_GUEST_TR_BASE,            &u64Base);    AssertRC(rc);
    7563 
    7564     pVCpu->cpum.GstCtx.tr.Sel      = u16Sel;
    7565     pVCpu->cpum.GstCtx.tr.ValidSel = u16Sel;
    7566     pVCpu->cpum.GstCtx.tr.fFlags   = CPUMSELREG_FLAGS_VALID;
    7567     pVCpu->cpum.GstCtx.tr.u32Limit = u32Limit;
    7568     pVCpu->cpum.GstCtx.tr.u64Base  = u64Base;
    7569     pVCpu->cpum.GstCtx.tr.Attr.u   = u32Attr;
    7570     /* TR is the only selector that can never be unusable. */
    7571     Assert(!(u32Attr & X86DESCATTR_UNUSABLE));
    7572 }
    7573 
    7574 
    7575 /**
    7576  * Imports the guest RIP from the VMCS back into the guest-CPU context.
    7577  *
    7578  * @param   pVCpu   The cross context virtual CPU structure.
    7579  *
    7580  * @remarks Called with interrupts and/or preemption disabled, should not assert!
    7581  * @remarks Do -not- call this function directly, use hmR0VmxImportGuestState()
    7582  *          instead!!!
    7583  */
    7584 static void hmR0VmxImportGuestRip(PVMCPUCC pVCpu)
    7585 {
    7586     uint64_t u64Val;
    7587     PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    7588     if (pCtx->fExtrn & CPUMCTX_EXTRN_RIP)
    7589     {
    7590         int rc = VMXReadVmcsNw(VMX_VMCS_GUEST_RIP, &u64Val);
    7591         AssertRC(rc);
    7592 
    7593         pCtx->rip = u64Val;
    7594         EMHistoryUpdatePC(pVCpu, pCtx->rip, false);
    7595         pCtx->fExtrn &= ~CPUMCTX_EXTRN_RIP;
    7596     }
    7597 }
    7598 
    7599 
    7600 /**
    7601  * Imports the guest RFLAGS from the VMCS back into the guest-CPU context.
    7602  *
    7603  * @param   pVCpu       The cross context virtual CPU structure.
    7604  * @param   pVmcsInfo   The VMCS info. object.
    7605  *
    7606  * @remarks Called with interrupts and/or preemption disabled, should not assert!
    7607  * @remarks Do -not- call this function directly, use hmR0VmxImportGuestState()
    7608  *          instead!!!
    7609  */
    7610 static void hmR0VmxImportGuestRFlags(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
    7611 {
    7612     PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    7613     if (pCtx->fExtrn & CPUMCTX_EXTRN_RFLAGS)
    7614     {
    7615         uint64_t u64Val;
    7616         int rc = VMXReadVmcsNw(VMX_VMCS_GUEST_RFLAGS, &u64Val);
    7617         AssertRC(rc);
    7618 
    7619         pCtx->rflags.u64 = u64Val;
    7620         PCVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
    7621         if (pVmcsInfoShared->RealMode.fRealOnV86Active)
    7622         {
    7623             pCtx->eflags.Bits.u1VM   = 0;
    7624             pCtx->eflags.Bits.u2IOPL = pVmcsInfoShared->RealMode.Eflags.Bits.u2IOPL;
    7625         }
    7626         pCtx->fExtrn &= ~CPUMCTX_EXTRN_RFLAGS;
    7627     }
    7628 }
    7629 
    7630 
    7631 /**
    7632  * Imports the guest interruptibility-state from the VMCS back into the guest-CPU
    7633  * context.
    7634  *
    7635  * @param   pVCpu       The cross context virtual CPU structure.
    7636  * @param   pVmcsInfo   The VMCS info. object.
    7637  *
    7638  * @remarks Called with interrupts and/or preemption disabled, try not to assert and
    7639  *          do not log!
    7640  * @remarks Do -not- call this function directly, use hmR0VmxImportGuestState()
    7641  *          instead!!!
    7642  */
    7643 static void hmR0VmxImportGuestIntrState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
    7644 {
    7645     uint32_t u32Val;
    7646     int rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INT_STATE, &u32Val);    AssertRC(rc);
    7647     if (!u32Val)
    7648     {
    7649         if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
    7650             VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
    7651         CPUMSetGuestNmiBlocking(pVCpu, false);
    7652     }
    7653     else
    7654     {
    7655         /*
    7656          * We must import RIP here to set our EM interrupt-inhibited state.
    7657          * We also import RFLAGS as our code that evaluates pending interrupts
    7658          * before VM-entry requires it.
    7659          */
    7660         hmR0VmxImportGuestRip(pVCpu);
    7661         hmR0VmxImportGuestRFlags(pVCpu, pVmcsInfo);
    7662 
    7663         if (u32Val & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
    7664             EMSetInhibitInterruptsPC(pVCpu, pVCpu->cpum.GstCtx.rip);
    7665         else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
    7666             VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
    7667 
    7668         bool const fNmiBlocking = RT_BOOL(u32Val & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI);
    7669         CPUMSetGuestNmiBlocking(pVCpu, fNmiBlocking);
    7670     }
    7671 }
    7672 
    7673 
    7674 /**
    76754366 * Worker for VMXR0ImportStateOnDemand.
    76764367 *
     
    77154406        {
    77164407            if (fWhat & CPUMCTX_EXTRN_RIP)
    7717                 hmR0VmxImportGuestRip(pVCpu);
     4408                vmxHCImportGuestRip(pVCpu);
    77184409
    77194410            if (fWhat & CPUMCTX_EXTRN_RFLAGS)
    7720                 hmR0VmxImportGuestRFlags(pVCpu, pVmcsInfo);
     4411                vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
    77214412
    77224413            if (fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
    7723                 hmR0VmxImportGuestIntrState(pVCpu, pVmcsInfo);
     4414                vmxHCImportGuestIntrState(pVCpu, pVmcsInfo);
    77244415
    77254416            if (fWhat & CPUMCTX_EXTRN_RSP)
     
    77354426                if (fWhat & CPUMCTX_EXTRN_CS)
    77364427                {
    7737                     hmR0VmxImportGuestSegReg(pVCpu, X86_SREG_CS);
    7738                     hmR0VmxImportGuestRip(pVCpu);
     4428                    vmxHCImportGuestSegReg(pVCpu, X86_SREG_CS);
     4429                    vmxHCImportGuestRip(pVCpu);
    77394430                    if (fRealOnV86Active)
    77404431                        pCtx->cs.Attr.u = pVmcsInfoShared->RealMode.AttrCS.u;
     
    77434434                if (fWhat & CPUMCTX_EXTRN_SS)
    77444435                {
    7745                     hmR0VmxImportGuestSegReg(pVCpu, X86_SREG_SS);
     4436                    vmxHCImportGuestSegReg(pVCpu, X86_SREG_SS);
    77464437                    if (fRealOnV86Active)
    77474438                        pCtx->ss.Attr.u = pVmcsInfoShared->RealMode.AttrSS.u;
     
    77494440                if (fWhat & CPUMCTX_EXTRN_DS)
    77504441                {
    7751                     hmR0VmxImportGuestSegReg(pVCpu, X86_SREG_DS);
     4442                    vmxHCImportGuestSegReg(pVCpu, X86_SREG_DS);
    77524443                    if (fRealOnV86Active)
    77534444                        pCtx->ds.Attr.u = pVmcsInfoShared->RealMode.AttrDS.u;
     
    77554446                if (fWhat & CPUMCTX_EXTRN_ES)
    77564447                {
    7757                     hmR0VmxImportGuestSegReg(pVCpu, X86_SREG_ES);
     4448                    vmxHCImportGuestSegReg(pVCpu, X86_SREG_ES);
    77584449                    if (fRealOnV86Active)
    77594450                        pCtx->es.Attr.u = pVmcsInfoShared->RealMode.AttrES.u;
     
    77614452                if (fWhat & CPUMCTX_EXTRN_FS)
    77624453                {
    7763                     hmR0VmxImportGuestSegReg(pVCpu, X86_SREG_FS);
     4454                    vmxHCImportGuestSegReg(pVCpu, X86_SREG_FS);
    77644455                    if (fRealOnV86Active)
    77654456                        pCtx->fs.Attr.u = pVmcsInfoShared->RealMode.AttrFS.u;
     
    77674458                if (fWhat & CPUMCTX_EXTRN_GS)
    77684459                {
    7769                     hmR0VmxImportGuestSegReg(pVCpu, X86_SREG_GS);
     4460                    vmxHCImportGuestSegReg(pVCpu, X86_SREG_GS);
    77704461                    if (fRealOnV86Active)
    77714462                        pCtx->gs.Attr.u = pVmcsInfoShared->RealMode.AttrGS.u;
     
    77764467            {
    77774468                if (fWhat & CPUMCTX_EXTRN_LDTR)
    7778                     hmR0VmxImportGuestLdtr(pVCpu);
     4469                    vmxHCImportGuestLdtr(pVCpu);
    77794470
    77804471                if (fWhat & CPUMCTX_EXTRN_GDTR)
     
    77994490                       don't need to import that one. */
    78004491                    if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
    7801                         hmR0VmxImportGuestTr(pVCpu);
     4492                        vmxHCImportGuestTr(pVCpu);
    78024493                }
    78034494            }
     
    80004691                {
    80014692                    Assert(CPUMIsGuestInVmxRootMode(pCtx));
    8002                     rc = hmR0VmxCopyShadowToNstGstVmcs(pVCpu, pVmcsInfo);
     4693                    rc = vmxHCCopyShadowToNstGstVmcs(pVCpu, pVmcsInfo);
    80034694                    if (RT_SUCCESS(rc))
    80044695                    { /* likely */ }
     
    80824773
    80834774/**
    8084  * Check per-VM and per-VCPU force flag actions that require us to go back to
    8085  * ring-3 for one reason or another.
    8086  *
    8087  * @returns Strict VBox status code (i.e. informational status codes too)
    8088  * @retval VINF_SUCCESS if we don't have any actions that require going back to
    8089  *         ring-3.
    8090  * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
    8091  * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
    8092  *         interrupts)
    8093  * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
    8094  *         all EMTs to be in ring-3.
    8095  * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
    8096  * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
    8097  *         to the EM loop.
    8098  *
    8099  * @param   pVCpu           The cross context virtual CPU structure.
    8100  * @param   pVmxTransient   The VMX-transient structure.
    8101  * @param   fStepping       Whether we are single-stepping the guest using the
    8102  *                          hypervisor debugger.
    8103  *
    8104  * @remarks This might cause nested-guest VM-exits, caller must check if the guest
    8105  *          is no longer in VMX non-root mode.
    8106  */
    8107 static VBOXSTRICTRC hmR0VmxCheckForceFlags(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, bool fStepping)
    8108 {
    8109     Assert(VMMRZCallRing3IsEnabled(pVCpu));
    8110 
    8111     /*
    8112      * Update pending interrupts into the APIC's IRR.
    8113      */
    8114     if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
    8115         APICUpdatePendingInterrupts(pVCpu);
    8116 
    8117     /*
    8118      * Anything pending?  Should be more likely than not if we're doing a good job.
    8119      */
    8120     PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    8121     if (  !fStepping
    8122         ?    !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_MASK)
    8123           && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_MASK)
    8124         :    !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_STEP_MASK)
    8125           && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
    8126         return VINF_SUCCESS;
    8127 
    8128     /* Pending PGM C3 sync. */
    8129     if (VMCPU_FF_IS_ANY_SET(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
    8130     {
    8131         PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    8132         Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4)));
    8133         VBOXSTRICTRC rcStrict = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4,
    8134                                            VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
    8135         if (rcStrict != VINF_SUCCESS)
    8136         {
    8137             AssertRC(VBOXSTRICTRC_VAL(rcStrict));
    8138             Log4Func(("PGMSyncCR3 forcing us back to ring-3. rc2=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
    8139             return rcStrict;
    8140         }
    8141     }
    8142 
    8143     /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
    8144     if (   VM_FF_IS_ANY_SET(pVM, VM_FF_HM_TO_R3_MASK)
    8145         || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
    8146     {
    8147         STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
    8148         int rc = RT_LIKELY(!VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_RAW_TO_R3 : VINF_EM_NO_MEMORY;
    8149         Log4Func(("HM_TO_R3 forcing us back to ring-3. rc=%d\n", rc));
    8150         return rc;
    8151     }
    8152 
    8153     /* Pending VM request packets, such as hardware interrupts. */
    8154     if (   VM_FF_IS_SET(pVM, VM_FF_REQUEST)
    8155         || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
    8156     {
    8157         STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchVmReq);
    8158         Log4Func(("Pending VM request forcing us back to ring-3\n"));
    8159         return VINF_EM_PENDING_REQUEST;
    8160     }
    8161 
    8162     /* Pending PGM pool flushes. */
    8163     if (VM_FF_IS_SET(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
    8164     {
    8165         STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchPgmPoolFlush);
    8166         Log4Func(("PGM pool flush pending forcing us back to ring-3\n"));
    8167         return VINF_PGM_POOL_FLUSH_PENDING;
    8168     }
    8169 
    8170     /* Pending DMA requests. */
    8171     if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
    8172     {
    8173         STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchDma);
    8174         Log4Func(("Pending DMA request forcing us back to ring-3\n"));
    8175         return VINF_EM_RAW_TO_R3;
    8176     }
    8177 
    8178 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    8179     /*
    8180      * Pending nested-guest events.
    8181      *
    8182      * Please note the priority of these events are specified and important.
    8183      * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
    8184      * See Intel spec. 6.9 "Priority Among Simultaneous Exceptions And Interrupts".
    8185      */
    8186     if (pVmxTransient->fIsNestedGuest)
    8187     {
    8188         /* Pending nested-guest APIC-write. */
    8189         if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
    8190         {
    8191             Log4Func(("Pending nested-guest APIC-write\n"));
    8192             VBOXSTRICTRC rcStrict = IEMExecVmxVmexitApicWrite(pVCpu);
    8193             Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
    8194             return rcStrict;
    8195         }
    8196 
    8197         /* Pending nested-guest monitor-trap flag (MTF). */
    8198         if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
    8199         {
    8200             Log4Func(("Pending nested-guest MTF\n"));
    8201             VBOXSTRICTRC rcStrict = IEMExecVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* uExitQual */);
    8202             Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
    8203             return rcStrict;
    8204         }
    8205 
    8206         /* Pending nested-guest VMX-preemption timer expired. */
    8207         if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
    8208         {
    8209             Log4Func(("Pending nested-guest preempt timer\n"));
    8210             VBOXSTRICTRC rcStrict = IEMExecVmxVmexitPreemptTimer(pVCpu);
    8211             Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
    8212             return rcStrict;
    8213         }
    8214     }
    8215 #else
    8216     NOREF(pVmxTransient);
    8217 #endif
    8218 
    8219     return VINF_SUCCESS;
    8220 }
    8221 
    8222 
    8223 /**
    8224  * Converts any TRPM trap into a pending HM event. This is typically used when
    8225  * entering from ring-3 (not longjmp returns).
    8226  *
    8227  * @param   pVCpu   The cross context virtual CPU structure.
    8228  */
    8229 static void hmR0VmxTrpmTrapToPendingEvent(PVMCPUCC pVCpu)
    8230 {
    8231     Assert(TRPMHasTrap(pVCpu));
    8232     Assert(!pVCpu->hm.s.Event.fPending);
    8233 
    8234     uint8_t     uVector;
    8235     TRPMEVENT   enmTrpmEvent;
    8236     uint32_t    uErrCode;
    8237     RTGCUINTPTR GCPtrFaultAddress;
    8238     uint8_t     cbInstr;
    8239     bool        fIcebp;
    8240 
    8241     int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr, &fIcebp);
    8242     AssertRC(rc);
    8243 
    8244     uint32_t u32IntInfo;
    8245     u32IntInfo  = uVector | VMX_IDT_VECTORING_INFO_VALID;
    8246     u32IntInfo |= HMTrpmEventTypeToVmxEventType(uVector, enmTrpmEvent, fIcebp);
    8247 
    8248     rc = TRPMResetTrap(pVCpu);
    8249     AssertRC(rc);
    8250     Log4(("TRPM->HM event: u32IntInfo=%#RX32 enmTrpmEvent=%d cbInstr=%u uErrCode=%#RX32 GCPtrFaultAddress=%#RGv\n",
    8251           u32IntInfo, enmTrpmEvent, cbInstr, uErrCode, GCPtrFaultAddress));
    8252 
    8253     hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, cbInstr, uErrCode, GCPtrFaultAddress);
    8254 }
    8255 
    8256 
    8257 /**
    8258  * Converts the pending HM event into a TRPM trap.
    8259  *
    8260  * @param   pVCpu   The cross context virtual CPU structure.
    8261  */
    8262 static void hmR0VmxPendingEventToTrpmTrap(PVMCPUCC pVCpu)
    8263 {
    8264     Assert(pVCpu->hm.s.Event.fPending);
    8265 
    8266     /* If a trap was already pending, we did something wrong! */
    8267     Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
    8268 
    8269     uint32_t const  u32IntInfo  = pVCpu->hm.s.Event.u64IntInfo;
    8270     uint32_t const  uVector     = VMX_IDT_VECTORING_INFO_VECTOR(u32IntInfo);
    8271     TRPMEVENT const enmTrapType = HMVmxEventTypeToTrpmEventType(u32IntInfo);
    8272 
    8273     Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, enmTrapType));
    8274 
    8275     int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
    8276     AssertRC(rc);
    8277 
    8278     if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
    8279         TRPMSetErrorCode(pVCpu, pVCpu->hm.s.Event.u32ErrCode);
    8280 
    8281     if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(u32IntInfo))
    8282         TRPMSetFaultAddress(pVCpu, pVCpu->hm.s.Event.GCPtrFaultAddress);
    8283     else
    8284     {
    8285         uint8_t const uVectorType = VMX_IDT_VECTORING_INFO_TYPE(u32IntInfo);
    8286         switch (uVectorType)
    8287         {
    8288             case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
    8289                 TRPMSetTrapDueToIcebp(pVCpu);
    8290                 RT_FALL_THRU();
    8291             case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
    8292             case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
    8293             {
    8294                 AssertMsg(   uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
    8295                           || (   uVector == X86_XCPT_BP /* INT3 */
    8296                               || uVector == X86_XCPT_OF /* INTO */
    8297                               || uVector == X86_XCPT_DB /* INT1 (ICEBP) */),
    8298                           ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
    8299                 TRPMSetInstrLength(pVCpu, pVCpu->hm.s.Event.cbInstr);
    8300                 break;
    8301             }
    8302         }
    8303     }
    8304 
    8305     /* We're now done converting the pending event. */
    8306     pVCpu->hm.s.Event.fPending = false;
    8307 }
    8308 
    8309 
    8310 /**
    8311  * Sets the interrupt-window exiting control in the VMCS which instructs VT-x to
    8312  * cause a VM-exit as soon as the guest is in a state to receive interrupts.
    8313  *
    8314  * @param   pVmcsInfo   The VMCS info. object.
    8315  */
    8316 static void hmR0VmxSetIntWindowExitVmcs(PVMXVMCSINFO pVmcsInfo)
    8317 {
    8318     if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_INT_WINDOW_EXIT)
    8319     {
    8320         if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT))
    8321         {
    8322             pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_INT_WINDOW_EXIT;
    8323             int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
    8324             AssertRC(rc);
    8325         }
    8326     } /* else we will deliver interrupts whenever the guest Vm-exits next and is in a state to receive the interrupt. */
    8327 }
    8328 
    8329 
    8330 /**
    8331  * Clears the interrupt-window exiting control in the VMCS.
    8332  *
    8333  * @param   pVmcsInfo   The VMCS info. object.
    8334  */
    8335 DECLINLINE(void) hmR0VmxClearIntWindowExitVmcs(PVMXVMCSINFO pVmcsInfo)
    8336 {
    8337     if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT)
    8338     {
    8339         pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_INT_WINDOW_EXIT;
    8340         int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
    8341         AssertRC(rc);
    8342     }
    8343 }
    8344 
    8345 
    8346 /**
    8347  * Sets the NMI-window exiting control in the VMCS which instructs VT-x to
    8348  * cause a VM-exit as soon as the guest is in a state to receive NMIs.
    8349  *
    8350  * @param   pVmcsInfo   The VMCS info. object.
    8351  */
    8352 static void hmR0VmxSetNmiWindowExitVmcs(PVMXVMCSINFO pVmcsInfo)
    8353 {
    8354     if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
    8355     {
    8356         if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))
    8357         {
    8358             pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_NMI_WINDOW_EXIT;
    8359             int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
    8360             AssertRC(rc);
    8361             Log4Func(("Setup NMI-window exiting\n"));
    8362         }
    8363     } /* else we will deliver NMIs whenever we VM-exit next, even possibly nesting NMIs. Can't be helped on ancient CPUs. */
    8364 }
    8365 
    8366 
    8367 /**
    8368  * Clears the NMI-window exiting control in the VMCS.
    8369  *
    8370  * @param   pVmcsInfo   The VMCS info. object.
    8371  */
    8372 DECLINLINE(void) hmR0VmxClearNmiWindowExitVmcs(PVMXVMCSINFO pVmcsInfo)
    8373 {
    8374     if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
    8375     {
    8376         pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_NMI_WINDOW_EXIT;
    8377         int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
    8378         AssertRC(rc);
    8379     }
    8380 }
    8381 
    8382 
    8383 /**
    83844775 * Does the necessary state syncing before returning to ring-3 for any reason
    83854776 * (longjmp, preemption, voluntary exits to ring-3) from VT-x.
     
    84854876        && pVmcsInfo->fShadowVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
    84864877    {
    8487         rc = hmR0VmxClearShadowVmcs(pVmcsInfo);
     4878        rc = vmxHCClearShadowVmcs(pVmcsInfo);
    84884879        AssertRCReturn(rc, rc);
    84894880    }
     
    85844975    if (pVCpu->hm.s.Event.fPending)
    85854976    {
    8586         hmR0VmxPendingEventToTrpmTrap(pVCpu);
     4977        vmxHCPendingEventToTrpmTrap(pVCpu);
    85874978        Assert(!pVCpu->hm.s.Event.fPending);
    85884979
     
    86265017    if (!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
    86275018    {
    8628         hmR0VmxClearIntWindowExitVmcs(pVmcsInfo);
    8629         hmR0VmxClearNmiWindowExitVmcs(pVmcsInfo);
     5019        vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
     5020        vmxHCClearNmiWindowExitVmcs(pVCpu, pVmcsInfo);
    86305021    }
    86315022
     
    86955086
    86965087    PVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
    8697     hmR0VmxImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
     5088    vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
    86985089    CPUMR0FpuStateMaybeSaveGuestAndRestoreHost(pVCpu);
    86995090    CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, true /* save DR6 */);
     
    87235114    HM_RESTORE_PREEMPT();
    87245115    return VINF_SUCCESS;
    8725 }
    8726 
    8727 
    8728 /**
    8729  * Pushes a 2-byte value onto the real-mode (in virtual-8086 mode) guest's
    8730  * stack.
    8731  *
    8732  * @returns Strict VBox status code (i.e. informational status codes too).
    8733  * @retval  VINF_EM_RESET if pushing a value to the stack caused a triple-fault.
    8734  * @param   pVCpu   The cross context virtual CPU structure.
    8735  * @param   uValue  The value to push to the guest stack.
    8736  */
    8737 static VBOXSTRICTRC hmR0VmxRealModeGuestStackPush(PVMCPUCC pVCpu, uint16_t uValue)
    8738 {
    8739     /*
    8740      * The stack limit is 0xffff in real-on-virtual 8086 mode. Real-mode with weird stack limits cannot be run in
    8741      * virtual 8086 mode in VT-x. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
    8742      * See Intel Instruction reference for PUSH and Intel spec. 22.33.1 "Segment Wraparound".
    8743      */
    8744     PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    8745     if (pCtx->sp == 1)
    8746         return VINF_EM_RESET;
    8747     pCtx->sp -= sizeof(uint16_t);       /* May wrap around which is expected behaviour. */
    8748     int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), pCtx->ss.u64Base + pCtx->sp, &uValue, sizeof(uint16_t));
    8749     AssertRC(rc);
    8750     return rc;
    8751 }
    8752 
    8753 
    8754 /**
    8755  * Injects an event into the guest upon VM-entry by updating the relevant fields
    8756  * in the VM-entry area in the VMCS.
    8757  *
    8758  * @returns Strict VBox status code (i.e. informational status codes too).
    8759  * @retval  VINF_SUCCESS if the event is successfully injected into the VMCS.
    8760  * @retval  VINF_EM_RESET if event injection resulted in a triple-fault.
    8761  *
    8762  * @param   pVCpu           The cross context virtual CPU structure.
    8763  * @param   pVmxTransient   The VMX-transient structure.
    8764  * @param   pEvent          The event being injected.
    8765  * @param   pfIntrState     Pointer to the VT-x guest-interruptibility-state. This
    8766  *                          will be updated if necessary. This cannot not be NULL.
    8767  * @param   fStepping       Whether we're single-stepping guest execution and should
    8768  *                          return VINF_EM_DBG_STEPPED if the event is injected
    8769  *                          directly (registers modified by us, not by hardware on
    8770  *                          VM-entry).
    8771  */
    8772 static VBOXSTRICTRC hmR0VmxInjectEventVmcs(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, PCHMEVENT pEvent, bool fStepping,
    8773                                            uint32_t *pfIntrState)
    8774 {
    8775     /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */
    8776     AssertMsg(!RT_HI_U32(pEvent->u64IntInfo), ("%#RX64\n", pEvent->u64IntInfo));
    8777     Assert(pfIntrState);
    8778 
    8779     PCPUMCTX          pCtx       = &pVCpu->cpum.GstCtx;
    8780     uint32_t          u32IntInfo = pEvent->u64IntInfo;
    8781     uint32_t const    u32ErrCode = pEvent->u32ErrCode;
    8782     uint32_t const    cbInstr    = pEvent->cbInstr;
    8783     RTGCUINTPTR const GCPtrFault = pEvent->GCPtrFaultAddress;
    8784     uint8_t const     uVector    = VMX_ENTRY_INT_INFO_VECTOR(u32IntInfo);
    8785     uint32_t const    uIntType   = VMX_ENTRY_INT_INFO_TYPE(u32IntInfo);
    8786 
    8787 #ifdef VBOX_STRICT
    8788     /*
    8789      * Validate the error-code-valid bit for hardware exceptions.
    8790      * No error codes for exceptions in real-mode.
    8791      *
    8792      * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
    8793      */
    8794     if (   uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
    8795         && !CPUMIsGuestInRealModeEx(pCtx))
    8796     {
    8797         switch (uVector)
    8798         {
    8799             case X86_XCPT_PF:
    8800             case X86_XCPT_DF:
    8801             case X86_XCPT_TS:
    8802             case X86_XCPT_NP:
    8803             case X86_XCPT_SS:
    8804             case X86_XCPT_GP:
    8805             case X86_XCPT_AC:
    8806                 AssertMsg(VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo),
    8807                           ("Error-code-valid bit not set for exception that has an error code uVector=%#x\n", uVector));
    8808                 RT_FALL_THRU();
    8809             default:
    8810                 break;
    8811         }
    8812     }
    8813 
    8814     /* Cannot inject an NMI when block-by-MOV SS is in effect. */
    8815     Assert(   uIntType != VMX_EXIT_INT_INFO_TYPE_NMI
    8816            || !(*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
    8817 #endif
    8818 
    8819     if (   uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
    8820         || uIntType == VMX_EXIT_INT_INFO_TYPE_NMI
    8821         || uIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT
    8822         || uIntType == VMX_EXIT_INT_INFO_TYPE_SW_XCPT)
    8823     {
    8824         Assert(uVector <= X86_XCPT_LAST);
    8825         Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_NMI          || uVector == X86_XCPT_NMI);
    8826         Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT || uVector == X86_XCPT_DB);
    8827         STAM_COUNTER_INC(&pVCpu->hm.s.aStatInjectedXcpts[uVector]);
    8828     }
    8829     else
    8830         STAM_COUNTER_INC(&pVCpu->hm.s.aStatInjectedIrqs[uVector & MASK_INJECT_IRQ_STAT]);
    8831 
    8832     /*
    8833      * Hardware interrupts & exceptions cannot be delivered through the software interrupt
    8834      * redirection bitmap to the real mode task in virtual-8086 mode. We must jump to the
    8835      * interrupt handler in the (real-mode) guest.
    8836      *
    8837      * See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode".
    8838      * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling.
    8839      */
    8840     if (CPUMIsGuestInRealModeEx(pCtx))     /* CR0.PE bit changes are always intercepted, so it's up to date. */
    8841     {
    8842         if (pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest)
    8843         {
    8844             /*
    8845              * For CPUs with unrestricted guest execution enabled and with the guest
    8846              * in real-mode, we must not set the deliver-error-code bit.
    8847              *
    8848              * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
    8849              */
    8850             u32IntInfo &= ~VMX_ENTRY_INT_INFO_ERROR_CODE_VALID;
    8851         }
    8852         else
    8853         {
    8854             PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    8855             Assert(PDMVmmDevHeapIsEnabled(pVM));
    8856             Assert(pVM->hm.s.vmx.pRealModeTSS);
    8857             Assert(!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx));
    8858 
    8859             /* We require RIP, RSP, RFLAGS, CS, IDTR, import them. */
    8860             PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    8861             int rc2 = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TABLE_MASK
    8862                                                               | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_RFLAGS);
    8863             AssertRCReturn(rc2, rc2);
    8864 
    8865             /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */
    8866             size_t const cbIdtEntry = sizeof(X86IDTR16);
    8867             if (uVector * cbIdtEntry + (cbIdtEntry - 1) > pCtx->idtr.cbIdt)
    8868             {
    8869                 /* If we are trying to inject a #DF with no valid IDT entry, return a triple-fault. */
    8870                 if (uVector == X86_XCPT_DF)
    8871                     return VINF_EM_RESET;
    8872 
    8873                 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault.
    8874                    No error codes for exceptions in real-mode. */
    8875                 if (uVector == X86_XCPT_GP)
    8876                 {
    8877                     uint32_t const uXcptDfInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR,         X86_XCPT_DF)
    8878                                                | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE,           VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
    8879                                                | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
    8880                                                | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID,          1);
    8881                     HMEVENT EventXcptDf;
    8882                     RT_ZERO(EventXcptDf);
    8883                     EventXcptDf.u64IntInfo = uXcptDfInfo;
    8884                     return hmR0VmxInjectEventVmcs(pVCpu, pVmxTransient, &EventXcptDf, fStepping, pfIntrState);
    8885                 }
    8886 
    8887                 /*
    8888                  * If we're injecting an event with no valid IDT entry, inject a #GP.
    8889                  * No error codes for exceptions in real-mode.
    8890                  *
    8891                  * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
    8892                  */
    8893                 uint32_t const uXcptGpInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR,         X86_XCPT_GP)
    8894                                            | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE,           VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
    8895                                            | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
    8896                                            | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID,          1);
    8897                 HMEVENT EventXcptGp;
    8898                 RT_ZERO(EventXcptGp);
    8899                 EventXcptGp.u64IntInfo = uXcptGpInfo;
    8900                 return hmR0VmxInjectEventVmcs(pVCpu, pVmxTransient, &EventXcptGp, fStepping, pfIntrState);
    8901             }
    8902 
    8903             /* Software exceptions (#BP and #OF exceptions thrown as a result of INT3 or INTO) */
    8904             uint16_t uGuestIp = pCtx->ip;
    8905             if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT)
    8906             {
    8907                 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
    8908                 /* #BP and #OF are both benign traps, we need to resume the next instruction. */
    8909                 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
    8910             }
    8911             else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_INT)
    8912                 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
    8913 
    8914             /* Get the code segment selector and offset from the IDT entry for the interrupt handler. */
    8915             X86IDTR16 IdtEntry;
    8916             RTGCPHYS const GCPhysIdtEntry = (RTGCPHYS)pCtx->idtr.pIdt + uVector * cbIdtEntry;
    8917             rc2 = PGMPhysSimpleReadGCPhys(pVM, &IdtEntry, GCPhysIdtEntry, cbIdtEntry);
    8918             AssertRCReturn(rc2, rc2);
    8919 
    8920             /* Construct the stack frame for the interrupt/exception handler. */
    8921             VBOXSTRICTRC rcStrict;
    8922             rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->eflags.u32);
    8923             if (rcStrict == VINF_SUCCESS)
    8924             {
    8925                 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->cs.Sel);
    8926                 if (rcStrict == VINF_SUCCESS)
    8927                     rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, uGuestIp);
    8928             }
    8929 
    8930             /* Clear the required eflag bits and jump to the interrupt/exception handler. */
    8931             if (rcStrict == VINF_SUCCESS)
    8932             {
    8933                 pCtx->eflags.u32 &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
    8934                 pCtx->rip         = IdtEntry.offSel;
    8935                 pCtx->cs.Sel      = IdtEntry.uSel;
    8936                 pCtx->cs.ValidSel = IdtEntry.uSel;
    8937                 pCtx->cs.u64Base  = IdtEntry.uSel << cbIdtEntry;
    8938                 if (   uIntType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
    8939                     && uVector  == X86_XCPT_PF)
    8940                     pCtx->cr2 = GCPtrFault;
    8941 
    8942                 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CS  | HM_CHANGED_GUEST_CR2
    8943                                                          | HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
    8944                                                          | HM_CHANGED_GUEST_RSP);
    8945 
    8946                 /*
    8947                  * If we delivered a hardware exception (other than an NMI) and if there was
    8948                  * block-by-STI in effect, we should clear it.
    8949                  */
    8950                 if (*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
    8951                 {
    8952                     Assert(   uIntType != VMX_ENTRY_INT_INFO_TYPE_NMI
    8953                            && uIntType != VMX_ENTRY_INT_INFO_TYPE_EXT_INT);
    8954                     Log4Func(("Clearing inhibition due to STI\n"));
    8955                     *pfIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
    8956                 }
    8957 
    8958                 Log4(("Injected real-mode: u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x Eflags=%#x CS:EIP=%04x:%04x\n",
    8959                       u32IntInfo, u32ErrCode, cbInstr, pCtx->eflags.u, pCtx->cs.Sel, pCtx->eip));
    8960 
    8961                 /*
    8962                  * The event has been truly dispatched to the guest. Mark it as no longer pending so
    8963                  * we don't attempt to undo it if we are returning to ring-3 before executing guest code.
    8964                  */
    8965                 pVCpu->hm.s.Event.fPending = false;
    8966 
    8967                 /*
    8968                  * If we eventually support nested-guest execution without unrestricted guest execution,
    8969                  * we should set fInterceptEvents here.
    8970                  */
    8971                 Assert(!pVmxTransient->fIsNestedGuest);
    8972 
    8973                 /* If we're stepping and we've changed cs:rip above, bail out of the VMX R0 execution loop. */
    8974                 if (fStepping)
    8975                     rcStrict = VINF_EM_DBG_STEPPED;
    8976             }
    8977             AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping),
    8978                       ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
    8979             return rcStrict;
    8980         }
    8981     }
    8982 
    8983     /*
    8984      * Validate.
    8985      */
    8986     Assert(VMX_ENTRY_INT_INFO_IS_VALID(u32IntInfo));                     /* Bit 31 (Valid bit) must be set by caller. */
    8987     Assert(!(u32IntInfo & VMX_BF_ENTRY_INT_INFO_RSVD_12_30_MASK));       /* Bits 30:12 MBZ. */
    8988 
    8989     /*
    8990      * Inject the event into the VMCS.
    8991      */
    8992     int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntInfo);
    8993     if (VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
    8994         rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, u32ErrCode);
    8995     rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, cbInstr);
    8996     AssertRC(rc);
    8997 
    8998     /*
    8999      * Update guest CR2 if this is a page-fault.
    9000      */
    9001     if (VMX_ENTRY_INT_INFO_IS_XCPT_PF(u32IntInfo))
    9002         pCtx->cr2 = GCPtrFault;
    9003 
    9004     Log4(("Injecting u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x CR2=%#RX64\n", u32IntInfo, u32ErrCode, cbInstr, pCtx->cr2));
    9005     return VINF_SUCCESS;
    9006 }
    9007 
    9008 
    9009 /**
    9010  * Evaluates the event to be delivered to the guest and sets it as the pending
    9011  * event.
    9012  *
    9013  * Toggling of interrupt force-flags here is safe since we update TRPM on premature
    9014  * exits to ring-3 before executing guest code, see hmR0VmxExitToRing3(). We must
    9015  * NOT restore these force-flags.
    9016  *
    9017  * @returns Strict VBox status code (i.e. informational status codes too).
    9018  * @param   pVCpu           The cross context virtual CPU structure.
    9019  * @param   pVmxTransient   The VMX-transient structure.
    9020  * @param   pfIntrState     Where to store the VT-x guest-interruptibility state.
    9021  */
    9022 static VBOXSTRICTRC hmR0VmxEvaluatePendingEvent(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t *pfIntrState)
    9023 {
    9024     Assert(pfIntrState);
    9025     Assert(!TRPMHasTrap(pVCpu));
    9026 
    9027     /*
    9028      * Compute/update guest-interruptibility state related FFs.
    9029      * The FFs will be used below while evaluating events to be injected.
    9030      */
    9031     *pfIntrState = hmR0VmxGetGuestIntrStateAndUpdateFFs(pVCpu);
    9032 
    9033     /*
    9034      * Evaluate if a new event needs to be injected.
    9035      * An event that's already pending has already performed all necessary checks.
    9036      */
    9037     PVMXVMCSINFO pVmcsInfo      = pVmxTransient->pVmcsInfo;
    9038     bool const   fIsNestedGuest = pVmxTransient->fIsNestedGuest;
    9039     if (   !pVCpu->hm.s.Event.fPending
    9040         && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
    9041     {
    9042         /** @todo SMI. SMIs take priority over NMIs. */
    9043 
    9044         /*
    9045          * NMIs.
    9046          * NMIs take priority over external interrupts.
    9047          */
    9048         PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    9049         if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
    9050         {
    9051             /*
    9052              * For a guest, the FF always indicates the guest's ability to receive an NMI.
    9053              *
    9054              * For a nested-guest, the FF always indicates the outer guest's ability to
    9055              * receive an NMI while the guest-interruptibility state bit depends on whether
    9056              * the nested-hypervisor is using virtual-NMIs.
    9057              */
    9058             if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
    9059             {
    9060 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    9061                 if (   fIsNestedGuest
    9062                     && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_NMI_EXIT))
    9063                     return IEMExecVmxVmexitXcptNmi(pVCpu);
    9064 #endif
    9065                 hmR0VmxSetPendingXcptNmi(pVCpu);
    9066                 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
    9067                 Log4Func(("NMI pending injection\n"));
    9068 
    9069                 /* We've injected the NMI, bail. */
    9070                 return VINF_SUCCESS;
    9071             }
    9072             else if (!fIsNestedGuest)
    9073                 hmR0VmxSetNmiWindowExitVmcs(pVmcsInfo);
    9074         }
    9075 
    9076         /*
    9077          * External interrupts (PIC/APIC).
    9078          * Once PDMGetInterrupt() returns a valid interrupt we -must- deliver it.
    9079          * We cannot re-request the interrupt from the controller again.
    9080          */
    9081         if (    VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
    9082             && !pVCpu->hm.s.fSingleInstruction)
    9083         {
    9084             Assert(!DBGFIsStepping(pVCpu));
    9085             int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RFLAGS);
    9086             AssertRC(rc);
    9087 
    9088             /*
    9089              * We must not check EFLAGS directly when executing a nested-guest, use
    9090              * CPUMIsGuestPhysIntrEnabled() instead as EFLAGS.IF does not control the blocking of
    9091              * external interrupts when "External interrupt exiting" is set. This fixes a nasty
    9092              * SMP hang while executing nested-guest VCPUs on spinlocks which aren't rescued by
    9093              * other VM-exits (like a preemption timer), see @bugref{9562#c18}.
    9094              *
    9095              * See Intel spec. 25.4.1 "Event Blocking".
    9096              */
    9097             if (CPUMIsGuestPhysIntrEnabled(pVCpu))
    9098             {
    9099 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    9100                 if (    fIsNestedGuest
    9101                     &&  CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
    9102                 {
    9103                     VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0 /* uVector */, true /* fIntPending */);
    9104                     if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
    9105                         return rcStrict;
    9106                 }
    9107 #endif
    9108                 uint8_t u8Interrupt;
    9109                 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
    9110                 if (RT_SUCCESS(rc))
    9111                 {
    9112 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    9113                     if (   fIsNestedGuest
    9114                         && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
    9115                     {
    9116                         VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, u8Interrupt, false /* fIntPending */);
    9117                         Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
    9118                         return rcStrict;
    9119                     }
    9120 #endif
    9121                     hmR0VmxSetPendingExtInt(pVCpu, u8Interrupt);
    9122                     Log4Func(("External interrupt (%#x) pending injection\n", u8Interrupt));
    9123                 }
    9124                 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
    9125                 {
    9126                     STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchTprMaskedIrq);
    9127 
    9128                     if (   !fIsNestedGuest
    9129                         && (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW))
    9130                         hmR0VmxApicSetTprThreshold(pVmcsInfo, u8Interrupt >> 4);
    9131                     /* else: for nested-guests, TPR threshold is picked up while merging VMCS controls. */
    9132 
    9133                     /*
    9134                      * If the CPU doesn't have TPR shadowing, we will always get a VM-exit on TPR changes and
    9135                      * APICSetTpr() will end up setting the VMCPU_FF_INTERRUPT_APIC if required, so there is no
    9136                      * need to re-set this force-flag here.
    9137                      */
    9138                 }
    9139                 else
    9140                     STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq);
    9141 
    9142                 /* We've injected the interrupt or taken necessary action, bail. */
    9143                 return VINF_SUCCESS;
    9144             }
    9145             if (!fIsNestedGuest)
    9146                 hmR0VmxSetIntWindowExitVmcs(pVmcsInfo);
    9147         }
    9148     }
    9149     else if (!fIsNestedGuest)
    9150     {
    9151         /*
    9152          * An event is being injected or we are in an interrupt shadow. Check if another event is
    9153          * pending. If so, instruct VT-x to cause a VM-exit as soon as the guest is ready to accept
    9154          * the pending event.
    9155          */
    9156         if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
    9157             hmR0VmxSetNmiWindowExitVmcs(pVmcsInfo);
    9158         else if (   VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
    9159                  && !pVCpu->hm.s.fSingleInstruction)
    9160             hmR0VmxSetIntWindowExitVmcs(pVmcsInfo);
    9161     }
    9162     /* else: for nested-guests, NMI/interrupt-window exiting will be picked up when merging VMCS controls. */
    9163 
    9164     return VINF_SUCCESS;
    9165 }
    9166 
    9167 
    9168 /**
    9169  * Injects any pending events into the guest if the guest is in a state to
    9170  * receive them.
    9171  *
    9172  * @returns Strict VBox status code (i.e. informational status codes too).
    9173  * @param   pVCpu           The cross context virtual CPU structure.
    9174  * @param   pVmxTransient   The VMX-transient structure.
    9175  * @param   fIntrState      The VT-x guest-interruptibility state.
    9176  * @param   fStepping       Whether we are single-stepping the guest using the
    9177  *                          hypervisor debugger and should return
    9178  *                          VINF_EM_DBG_STEPPED if the event was dispatched
    9179  *                          directly.
    9180  */
    9181 static VBOXSTRICTRC hmR0VmxInjectPendingEvent(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t fIntrState, bool fStepping)
    9182 {
    9183     HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
    9184     Assert(VMMRZCallRing3IsEnabled(pVCpu));
    9185 
    9186 #ifdef VBOX_STRICT
    9187     /*
    9188      * Verify guest-interruptibility state.
    9189      *
    9190      * We put this in a scoped block so we do not accidentally use fBlockSti or fBlockMovSS,
    9191      * since injecting an event may modify the interruptibility state and we must thus always
    9192      * use fIntrState.
    9193      */
    9194     {
    9195         bool const fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
    9196         bool const fBlockSti   = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI);
    9197         Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_RFLAGS));
    9198         Assert(!fBlockSti || pVCpu->cpum.GstCtx.eflags.Bits.u1IF);     /* Cannot set block-by-STI when interrupts are disabled. */
    9199         Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI));    /* We don't support block-by-SMI yet.*/
    9200         Assert(!TRPMHasTrap(pVCpu));
    9201         NOREF(fBlockMovSS); NOREF(fBlockSti);
    9202     }
    9203 #endif
    9204 
    9205     VBOXSTRICTRC rcStrict = VINF_SUCCESS;
    9206     if (pVCpu->hm.s.Event.fPending)
    9207     {
    9208         /*
    9209          * Do -not- clear any interrupt-window exiting control here. We might have an interrupt
    9210          * pending even while injecting an event and in this case, we want a VM-exit as soon as
    9211          * the guest is ready for the next interrupt, see @bugref{6208#c45}.
    9212          *
    9213          * See Intel spec. 26.6.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
    9214          */
    9215         uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(pVCpu->hm.s.Event.u64IntInfo);
    9216 #ifdef VBOX_STRICT
    9217         if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
    9218         {
    9219             Assert(pVCpu->cpum.GstCtx.eflags.u32 & X86_EFL_IF);
    9220             Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
    9221             Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
    9222         }
    9223         else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI)
    9224         {
    9225             Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI));
    9226             Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
    9227             Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
    9228         }
    9229 #endif
    9230         Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#RX32\n", pVCpu->idCpu, pVCpu->hm.s.Event.u64IntInfo,
    9231               uIntType));
    9232 
    9233         /*
    9234          * Inject the event and get any changes to the guest-interruptibility state.
    9235          *
    9236          * The guest-interruptibility state may need to be updated if we inject the event
    9237          * into the guest IDT ourselves (for real-on-v86 guest injecting software interrupts).
    9238          */
    9239         rcStrict = hmR0VmxInjectEventVmcs(pVCpu, pVmxTransient, &pVCpu->hm.s.Event, fStepping, &fIntrState);
    9240         AssertRCReturn(VBOXSTRICTRC_VAL(rcStrict), rcStrict);
    9241 
    9242         if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
    9243             STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectInterrupt);
    9244         else
    9245             STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectXcpt);
    9246     }
    9247 
    9248     /*
    9249      * Deliver any pending debug exceptions if the guest is single-stepping using EFLAGS.TF and
    9250      * is an interrupt shadow (block-by-STI or block-by-MOV SS).
    9251      */
    9252     if (   (fIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
    9253         && !pVmxTransient->fIsNestedGuest)
    9254     {
    9255         HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
    9256 
    9257         if (!pVCpu->hm.s.fSingleInstruction)
    9258         {
    9259             /*
    9260              * Set or clear the BS bit depending on whether the trap flag is active or not. We need
    9261              * to do both since we clear the BS bit from the VMCS while exiting to ring-3.
    9262              */
    9263             Assert(!DBGFIsStepping(pVCpu));
    9264             uint8_t const fTrapFlag = !!(pVCpu->cpum.GstCtx.eflags.u32 & X86_EFL_TF);
    9265             int rc = VMXWriteVmcsNw(VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, fTrapFlag << VMX_BF_VMCS_PENDING_DBG_XCPT_BS_SHIFT);
    9266             AssertRC(rc);
    9267         }
    9268         else
    9269         {
    9270             /*
    9271              * We must not deliver a debug exception when single-stepping over STI/Mov-SS in the
    9272              * hypervisor debugger using EFLAGS.TF but rather clear interrupt inhibition. However,
    9273              * we take care of this case in hmR0VmxExportSharedDebugState and also the case if
    9274              * we use MTF, so just make sure it's called before executing guest-code.
    9275              */
    9276             ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_DR_MASK);
    9277         }
    9278     }
    9279     /* else: for nested-guest currently handling while merging controls. */
    9280 
    9281     /*
    9282      * Finally, update the guest-interruptibility state.
    9283      *
    9284      * This is required for the real-on-v86 software interrupt injection, for
    9285      * pending debug exceptions as well as updates to the guest state from ring-3 (IEM).
    9286      */
    9287     int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_INT_STATE, fIntrState);
    9288     AssertRC(rc);
    9289 
    9290     /*
    9291      * There's no need to clear the VM-entry interruption-information field here if we're not
    9292      * injecting anything. VT-x clears the valid bit on every VM-exit.
    9293      *
    9294      * See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
    9295      */
    9296 
    9297     Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping));
    9298     return rcStrict;
    92995116}
    93005117
     
    95445361     * Ideally, assert that the cross-dependent bits are up-to-date at the point of using it.
    95455362     */
    9546     int rc = hmR0VmxExportGuestEntryExitCtls(pVCpu, pVmxTransient);
     5363    int rc = vmxHCExportGuestEntryExitCtls(pVCpu, pVmxTransient);
    95475364    AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
    95485365
    9549     rc = hmR0VmxExportGuestCR0(pVCpu, pVmxTransient);
     5366    rc = vmxHCExportGuestCR0(pVCpu, pVmxTransient);
    95505367    AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
    95515368
    9552     VBOXSTRICTRC rcStrict = hmR0VmxExportGuestCR3AndCR4(pVCpu, pVmxTransient);
     5369    VBOXSTRICTRC rcStrict = vmxHCExportGuestCR3AndCR4(pVCpu, pVmxTransient);
    95535370    if (rcStrict == VINF_SUCCESS)
    95545371    { /* likely */ }
     
    95595376    }
    95605377
    9561     rc = hmR0VmxExportGuestSegRegsXdtr(pVCpu, pVmxTransient);
     5378    rc = vmxHCExportGuestSegRegsXdtr(pVCpu, pVmxTransient);
    95625379    AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
    95635380
     
    95655382    AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
    95665383
    9567     hmR0VmxExportGuestApicTpr(pVCpu, pVmxTransient);
    9568     hmR0VmxExportGuestXcptIntercepts(pVCpu, pVmxTransient);
    9569     hmR0VmxExportGuestRip(pVCpu);
     5384    vmxHCExportGuestApicTpr(pVCpu, pVmxTransient);
     5385    vmxHCExportGuestXcptIntercepts(pVCpu, pVmxTransient);
     5386    vmxHCExportGuestRip(pVCpu);
    95705387    hmR0VmxExportGuestRsp(pVCpu);
    9571     hmR0VmxExportGuestRflags(pVCpu, pVmxTransient);
     5388    vmxHCExportGuestRflags(pVCpu, pVmxTransient);
    95725389
    95735390    rc = hmR0VmxExportGuestHwvirtState(pVCpu, pVmxTransient);
     
    96145431        /* Loading shared debug bits might have changed eflags.TF bit for debugging purposes. */
    96155432        if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_RFLAGS)
    9616             hmR0VmxExportGuestRflags(pVCpu, pVmxTransient);
     5433            vmxHCExportGuestRflags(pVCpu, pVmxTransient);
    96175434    }
    96185435
     
    96635480        && !(fCtxChanged & (fCtxMask & ~fMinimalMask)))
    96645481    {
    9665         hmR0VmxExportGuestRip(pVCpu);
     5482        vmxHCExportGuestRip(pVCpu);
    96665483        hmR0VmxExportGuestRsp(pVCpu);
    9667         hmR0VmxExportGuestRflags(pVCpu, pVmxTransient);
     5484        vmxHCExportGuestRflags(pVCpu, pVmxTransient);
    96685485        rcStrict = hmR0VmxExportGuestHwvirtState(pVCpu, pVmxTransient);
    96695486        STAM_COUNTER_INC(&pVCpu->hm.s.StatExportMinimal);
     
    96945511#endif
    96955512    return rcStrict;
    9696 }
    9697 
    9698 
    9699 /**
    9700  * Tries to determine what part of the guest-state VT-x has deemed as invalid
    9701  * and update error record fields accordingly.
    9702  *
    9703  * @returns VMX_IGS_* error codes.
    9704  * @retval VMX_IGS_REASON_NOT_FOUND if this function could not find anything
    9705  *         wrong with the guest state.
    9706  *
    9707  * @param   pVCpu       The cross context virtual CPU structure.
    9708  * @param   pVmcsInfo   The VMCS info. object.
    9709  *
    9710  * @remarks This function assumes our cache of the VMCS controls
    9711  *          are valid, i.e. hmR0VmxCheckCachedVmcsCtls() succeeded.
    9712  */
    9713 static uint32_t hmR0VmxCheckGuestState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
    9714 {
    9715 #define HMVMX_ERROR_BREAK(err)              { uError = (err); break; }
    9716 #define HMVMX_CHECK_BREAK(expr, err)        do { \
    9717                                                 if (!(expr)) { uError = (err); break; } \
    9718                                             } while (0)
    9719 
    9720     PVMCC    pVM    = pVCpu->CTX_SUFF(pVM);
    9721     PCPUMCTX pCtx   = &pVCpu->cpum.GstCtx;
    9722     uint32_t uError = VMX_IGS_ERROR;
    9723     uint32_t u32IntrState = 0;
    9724     bool const fUnrestrictedGuest = pVM->hmr0.s.vmx.fUnrestrictedGuest;
    9725     do
    9726     {
    9727         int rc;
    9728 
    9729         /*
    9730          * Guest-interruptibility state.
    9731          *
    9732          * Read this first so that any check that fails prior to those that actually
    9733          * require the guest-interruptibility state would still reflect the correct
    9734          * VMCS value and avoids causing further confusion.
    9735          */
    9736         rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INT_STATE, &u32IntrState);
    9737         AssertRC(rc);
    9738 
    9739         uint32_t u32Val;
    9740         uint64_t u64Val;
    9741 
    9742         /*
    9743          * CR0.
    9744          */
    9745         /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
    9746         uint64_t       fSetCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 & g_HmMsrs.u.vmx.u64Cr0Fixed1);
    9747         uint64_t const fZapCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 | g_HmMsrs.u.vmx.u64Cr0Fixed1);
    9748         /* Exceptions for unrestricted guest execution for CR0 fixed bits (PE, PG).
    9749            See Intel spec. 26.3.1 "Checks on Guest Control Registers, Debug Registers and MSRs." */
    9750         if (fUnrestrictedGuest)
    9751             fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
    9752 
    9753         uint64_t u64GuestCr0;
    9754         rc = VMXReadVmcsNw(VMX_VMCS_GUEST_CR0, &u64GuestCr0);
    9755         AssertRC(rc);
    9756         HMVMX_CHECK_BREAK((u64GuestCr0 & fSetCr0) == fSetCr0, VMX_IGS_CR0_FIXED1);
    9757         HMVMX_CHECK_BREAK(!(u64GuestCr0 & ~fZapCr0), VMX_IGS_CR0_FIXED0);
    9758         if (   !fUnrestrictedGuest
    9759             &&  (u64GuestCr0 & X86_CR0_PG)
    9760             && !(u64GuestCr0 & X86_CR0_PE))
    9761             HMVMX_ERROR_BREAK(VMX_IGS_CR0_PG_PE_COMBO);
    9762 
    9763         /*
    9764          * CR4.
    9765          */
    9766         /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
    9767         uint64_t const fSetCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 & g_HmMsrs.u.vmx.u64Cr4Fixed1);
    9768         uint64_t const fZapCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 | g_HmMsrs.u.vmx.u64Cr4Fixed1);
    9769 
    9770         uint64_t u64GuestCr4;
    9771         rc = VMXReadVmcsNw(VMX_VMCS_GUEST_CR4, &u64GuestCr4);
    9772         AssertRC(rc);
    9773         HMVMX_CHECK_BREAK((u64GuestCr4 & fSetCr4) == fSetCr4, VMX_IGS_CR4_FIXED1);
    9774         HMVMX_CHECK_BREAK(!(u64GuestCr4 & ~fZapCr4), VMX_IGS_CR4_FIXED0);
    9775 
    9776         /*
    9777          * IA32_DEBUGCTL MSR.
    9778          */
    9779         rc = VMXReadVmcs64(VMX_VMCS64_GUEST_DEBUGCTL_FULL, &u64Val);
    9780         AssertRC(rc);
    9781         if (   (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
    9782             && (u64Val & 0xfffffe3c))                           /* Bits 31:9, bits 5:2 MBZ. */
    9783         {
    9784             HMVMX_ERROR_BREAK(VMX_IGS_DEBUGCTL_MSR_RESERVED);
    9785         }
    9786         uint64_t u64DebugCtlMsr = u64Val;
    9787 
    9788 #ifdef VBOX_STRICT
    9789         rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY, &u32Val);
    9790         AssertRC(rc);
    9791         Assert(u32Val == pVmcsInfo->u32EntryCtls);
    9792 #endif
    9793         bool const fLongModeGuest = RT_BOOL(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
    9794 
    9795         /*
    9796          * RIP and RFLAGS.
    9797          */
    9798         rc = VMXReadVmcsNw(VMX_VMCS_GUEST_RIP, &u64Val);
    9799         AssertRC(rc);
    9800         /* pCtx->rip can be different than the one in the VMCS (e.g. run guest code and VM-exits that don't update it). */
    9801         if (   !fLongModeGuest
    9802             || !pCtx->cs.Attr.n.u1Long)
    9803             HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffff00000000)), VMX_IGS_LONGMODE_RIP_INVALID);
    9804         /** @todo If the processor supports N < 64 linear-address bits, bits 63:N
    9805          *        must be identical if the "IA-32e mode guest" VM-entry
    9806          *        control is 1 and CS.L is 1. No check applies if the
    9807          *        CPU supports 64 linear-address bits. */
    9808 
    9809         /* Flags in pCtx can be different (real-on-v86 for instance). We are only concerned about the VMCS contents here. */
    9810         rc = VMXReadVmcsNw(VMX_VMCS_GUEST_RFLAGS, &u64Val);
    9811         AssertRC(rc);
    9812         HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffc08028)),                     /* Bit 63:22, Bit 15, 5, 3 MBZ. */
    9813                           VMX_IGS_RFLAGS_RESERVED);
    9814         HMVMX_CHECK_BREAK((u64Val & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1);       /* Bit 1 MB1. */
    9815         uint32_t const u32Eflags = u64Val;
    9816 
    9817         if (   fLongModeGuest
    9818             || (   fUnrestrictedGuest
    9819                 && !(u64GuestCr0 & X86_CR0_PE)))
    9820         {
    9821             HMVMX_CHECK_BREAK(!(u32Eflags & X86_EFL_VM), VMX_IGS_RFLAGS_VM_INVALID);
    9822         }
    9823 
    9824         uint32_t u32EntryInfo;
    9825         rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32EntryInfo);
    9826         AssertRC(rc);
    9827         if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
    9828             HMVMX_CHECK_BREAK(u32Eflags & X86_EFL_IF, VMX_IGS_RFLAGS_IF_INVALID);
    9829 
    9830         /*
    9831          * 64-bit checks.
    9832          */
    9833         if (fLongModeGuest)
    9834         {
    9835             HMVMX_CHECK_BREAK(u64GuestCr0 & X86_CR0_PG,  VMX_IGS_CR0_PG_LONGMODE);
    9836             HMVMX_CHECK_BREAK(u64GuestCr4 & X86_CR4_PAE, VMX_IGS_CR4_PAE_LONGMODE);
    9837         }
    9838 
    9839         if (   !fLongModeGuest
    9840             && (u64GuestCr4 & X86_CR4_PCIDE))
    9841             HMVMX_ERROR_BREAK(VMX_IGS_CR4_PCIDE);
    9842 
    9843         /** @todo CR3 field must be such that bits 63:52 and bits in the range
    9844          *        51:32 beyond the processor's physical-address width are 0. */
    9845 
    9846         if (   (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
    9847             && (pCtx->dr[7] & X86_DR7_MBZ_MASK))
    9848             HMVMX_ERROR_BREAK(VMX_IGS_DR7_RESERVED);
    9849 
    9850         rc = VMXReadVmcsNw(VMX_VMCS_HOST_SYSENTER_ESP, &u64Val);
    9851         AssertRC(rc);
    9852         HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_ESP_NOT_CANONICAL);
    9853 
    9854         rc = VMXReadVmcsNw(VMX_VMCS_HOST_SYSENTER_EIP, &u64Val);
    9855         AssertRC(rc);
    9856         HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_EIP_NOT_CANONICAL);
    9857 
    9858         /*
    9859          * PERF_GLOBAL MSR.
    9860          */
    9861         if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR)
    9862         {
    9863             rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL, &u64Val);
    9864             AssertRC(rc);
    9865             HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffff8fffffffc)),
    9866                               VMX_IGS_PERF_GLOBAL_MSR_RESERVED);        /* Bits 63:35, bits 31:2 MBZ. */
    9867         }
    9868 
    9869         /*
    9870          * PAT MSR.
    9871          */
    9872         if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
    9873         {
    9874             rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PAT_FULL, &u64Val);
    9875             AssertRC(rc);
    9876             HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0x707070707070707)), VMX_IGS_PAT_MSR_RESERVED);
    9877             for (unsigned i = 0; i < 8; i++)
    9878             {
    9879                 uint8_t u8Val = (u64Val & 0xff);
    9880                 if (   u8Val != 0 /* UC */
    9881                     && u8Val != 1 /* WC */
    9882                     && u8Val != 4 /* WT */
    9883                     && u8Val != 5 /* WP */
    9884                     && u8Val != 6 /* WB */
    9885                     && u8Val != 7 /* UC- */)
    9886                     HMVMX_ERROR_BREAK(VMX_IGS_PAT_MSR_INVALID);
    9887                 u64Val >>= 8;
    9888             }
    9889         }
    9890 
    9891         /*
    9892          * EFER MSR.
    9893          */
    9894         if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
    9895         {
    9896             Assert(g_fHmVmxSupportsVmcsEfer);
    9897             rc = VMXReadVmcs64(VMX_VMCS64_GUEST_EFER_FULL, &u64Val);
    9898             AssertRC(rc);
    9899             HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffffffffff2fe)),
    9900                               VMX_IGS_EFER_MSR_RESERVED);               /* Bits 63:12, bit 9, bits 7:1 MBZ. */
    9901             HMVMX_CHECK_BREAK(RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(  pVmcsInfo->u32EntryCtls
    9902                                                                            & VMX_ENTRY_CTLS_IA32E_MODE_GUEST),
    9903                               VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH);
    9904             /** @todo r=ramshankar: Unrestricted check here is probably wrong, see
    9905              *        iemVmxVmentryCheckGuestState(). */
    9906             HMVMX_CHECK_BREAK(   fUnrestrictedGuest
    9907                               || !(u64GuestCr0 & X86_CR0_PG)
    9908                               || RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(u64Val & MSR_K6_EFER_LME),
    9909                               VMX_IGS_EFER_LMA_LME_MISMATCH);
    9910         }
    9911 
    9912         /*
    9913          * Segment registers.
    9914          */
    9915         HMVMX_CHECK_BREAK(   (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
    9916                           || !(pCtx->ldtr.Sel & X86_SEL_LDT), VMX_IGS_LDTR_TI_INVALID);
    9917         if (!(u32Eflags & X86_EFL_VM))
    9918         {
    9919             /* CS */
    9920             HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1Present, VMX_IGS_CS_ATTR_P_INVALID);
    9921             HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xf00), VMX_IGS_CS_ATTR_RESERVED);
    9922             HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xfffe0000), VMX_IGS_CS_ATTR_RESERVED);
    9923             HMVMX_CHECK_BREAK(   (pCtx->cs.u32Limit & 0xfff) == 0xfff
    9924                               || !(pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
    9925             HMVMX_CHECK_BREAK(   !(pCtx->cs.u32Limit & 0xfff00000)
    9926                               || (pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
    9927             /* CS cannot be loaded with NULL in protected mode. */
    9928             HMVMX_CHECK_BREAK(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_CS_ATTR_UNUSABLE);
    9929             HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1DescType, VMX_IGS_CS_ATTR_S_INVALID);
    9930             if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
    9931                 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_UNEQUAL);
    9932             else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
    9933                 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_MISMATCH);
    9934             else if (fUnrestrictedGuest && pCtx->cs.Attr.n.u4Type == 3)
    9935                 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == 0, VMX_IGS_CS_ATTR_DPL_INVALID);
    9936             else
    9937                 HMVMX_ERROR_BREAK(VMX_IGS_CS_ATTR_TYPE_INVALID);
    9938 
    9939             /* SS */
    9940             HMVMX_CHECK_BREAK(   fUnrestrictedGuest
    9941                               || (pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL), VMX_IGS_SS_CS_RPL_UNEQUAL);
    9942             HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL), VMX_IGS_SS_ATTR_DPL_RPL_UNEQUAL);
    9943             if (   !(pCtx->cr0 & X86_CR0_PE)
    9944                 || pCtx->cs.Attr.n.u4Type == 3)
    9945                 HMVMX_CHECK_BREAK(!pCtx->ss.Attr.n.u2Dpl, VMX_IGS_SS_ATTR_DPL_INVALID);
    9946 
    9947             if (!(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
    9948             {
    9949                 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7, VMX_IGS_SS_ATTR_TYPE_INVALID);
    9950                 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u1Present, VMX_IGS_SS_ATTR_P_INVALID);
    9951                 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xf00), VMX_IGS_SS_ATTR_RESERVED);
    9952                 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xfffe0000), VMX_IGS_SS_ATTR_RESERVED);
    9953                 HMVMX_CHECK_BREAK(   (pCtx->ss.u32Limit & 0xfff) == 0xfff
    9954                                   || !(pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
    9955                 HMVMX_CHECK_BREAK(   !(pCtx->ss.u32Limit & 0xfff00000)
    9956                                   || (pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
    9957             }
    9958 
    9959             /* DS, ES, FS, GS - only check for usable selectors, see hmR0VmxExportGuestSReg(). */
    9960             if (!(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
    9961             {
    9962                 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_DS_ATTR_A_INVALID);
    9963                 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u1Present, VMX_IGS_DS_ATTR_P_INVALID);
    9964                 HMVMX_CHECK_BREAK(   fUnrestrictedGuest
    9965                                   || pCtx->ds.Attr.n.u4Type > 11
    9966                                   || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
    9967                 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xf00), VMX_IGS_DS_ATTR_RESERVED);
    9968                 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xfffe0000), VMX_IGS_DS_ATTR_RESERVED);
    9969                 HMVMX_CHECK_BREAK(   (pCtx->ds.u32Limit & 0xfff) == 0xfff
    9970                                   || !(pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
    9971                 HMVMX_CHECK_BREAK(   !(pCtx->ds.u32Limit & 0xfff00000)
    9972                                   || (pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
    9973                 HMVMX_CHECK_BREAK(   !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
    9974                                   || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_DS_ATTR_TYPE_INVALID);
    9975             }
    9976             if (!(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
    9977             {
    9978                 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_ES_ATTR_A_INVALID);
    9979                 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u1Present, VMX_IGS_ES_ATTR_P_INVALID);
    9980                 HMVMX_CHECK_BREAK(   fUnrestrictedGuest
    9981                                   || pCtx->es.Attr.n.u4Type > 11
    9982                                   || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
    9983                 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xf00), VMX_IGS_ES_ATTR_RESERVED);
    9984                 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xfffe0000), VMX_IGS_ES_ATTR_RESERVED);
    9985                 HMVMX_CHECK_BREAK(   (pCtx->es.u32Limit & 0xfff) == 0xfff
    9986                                   || !(pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
    9987                 HMVMX_CHECK_BREAK(   !(pCtx->es.u32Limit & 0xfff00000)
    9988                                   || (pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
    9989                 HMVMX_CHECK_BREAK(   !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
    9990                                   || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_ES_ATTR_TYPE_INVALID);
    9991             }
    9992             if (!(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
    9993             {
    9994                 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_FS_ATTR_A_INVALID);
    9995                 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u1Present, VMX_IGS_FS_ATTR_P_INVALID);
    9996                 HMVMX_CHECK_BREAK(   fUnrestrictedGuest
    9997                                   || pCtx->fs.Attr.n.u4Type > 11
    9998                                   || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL), VMX_IGS_FS_ATTR_DPL_RPL_UNEQUAL);
    9999                 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xf00), VMX_IGS_FS_ATTR_RESERVED);
    10000                 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xfffe0000), VMX_IGS_FS_ATTR_RESERVED);
    10001                 HMVMX_CHECK_BREAK(   (pCtx->fs.u32Limit & 0xfff) == 0xfff
    10002                                   || !(pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
    10003                 HMVMX_CHECK_BREAK(   !(pCtx->fs.u32Limit & 0xfff00000)
    10004                                   || (pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
    10005                 HMVMX_CHECK_BREAK(   !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
    10006                                   || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_FS_ATTR_TYPE_INVALID);
    10007             }
    10008             if (!(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
    10009             {
    10010                 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_GS_ATTR_A_INVALID);
    10011                 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u1Present, VMX_IGS_GS_ATTR_P_INVALID);
    10012                 HMVMX_CHECK_BREAK(   fUnrestrictedGuest
    10013                                   || pCtx->gs.Attr.n.u4Type > 11
    10014                                   || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL), VMX_IGS_GS_ATTR_DPL_RPL_UNEQUAL);
    10015                 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xf00), VMX_IGS_GS_ATTR_RESERVED);
    10016                 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xfffe0000), VMX_IGS_GS_ATTR_RESERVED);
    10017                 HMVMX_CHECK_BREAK(   (pCtx->gs.u32Limit & 0xfff) == 0xfff
    10018                                   || !(pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
    10019                 HMVMX_CHECK_BREAK(   !(pCtx->gs.u32Limit & 0xfff00000)
    10020                                   || (pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
    10021                 HMVMX_CHECK_BREAK(   !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
    10022                                   || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_GS_ATTR_TYPE_INVALID);
    10023             }
    10024             /* 64-bit capable CPUs. */
    10025             HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
    10026             HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
    10027             HMVMX_CHECK_BREAK(   (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
    10028                               || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
    10029             HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
    10030             HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
    10031                               VMX_IGS_LONGMODE_SS_BASE_INVALID);
    10032             HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
    10033                               VMX_IGS_LONGMODE_DS_BASE_INVALID);
    10034             HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
    10035                               VMX_IGS_LONGMODE_ES_BASE_INVALID);
    10036         }
    10037         else
    10038         {
    10039             /* V86 mode checks. */
    10040             uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
    10041             if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
    10042             {
    10043                 u32CSAttr = 0xf3;   u32SSAttr = 0xf3;
    10044                 u32DSAttr = 0xf3;   u32ESAttr = 0xf3;
    10045                 u32FSAttr = 0xf3;   u32GSAttr = 0xf3;
    10046             }
    10047             else
    10048             {
    10049                 u32CSAttr = pCtx->cs.Attr.u;   u32SSAttr = pCtx->ss.Attr.u;
    10050                 u32DSAttr = pCtx->ds.Attr.u;   u32ESAttr = pCtx->es.Attr.u;
    10051                 u32FSAttr = pCtx->fs.Attr.u;   u32GSAttr = pCtx->gs.Attr.u;
    10052             }
    10053 
    10054             /* CS */
    10055             HMVMX_CHECK_BREAK((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), VMX_IGS_V86_CS_BASE_INVALID);
    10056             HMVMX_CHECK_BREAK(pCtx->cs.u32Limit == 0xffff, VMX_IGS_V86_CS_LIMIT_INVALID);
    10057             HMVMX_CHECK_BREAK(u32CSAttr == 0xf3, VMX_IGS_V86_CS_ATTR_INVALID);
    10058             /* SS */
    10059             HMVMX_CHECK_BREAK((pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4), VMX_IGS_V86_SS_BASE_INVALID);
    10060             HMVMX_CHECK_BREAK(pCtx->ss.u32Limit == 0xffff, VMX_IGS_V86_SS_LIMIT_INVALID);
    10061             HMVMX_CHECK_BREAK(u32SSAttr == 0xf3, VMX_IGS_V86_SS_ATTR_INVALID);
    10062             /* DS */
    10063             HMVMX_CHECK_BREAK((pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4), VMX_IGS_V86_DS_BASE_INVALID);
    10064             HMVMX_CHECK_BREAK(pCtx->ds.u32Limit == 0xffff, VMX_IGS_V86_DS_LIMIT_INVALID);
    10065             HMVMX_CHECK_BREAK(u32DSAttr == 0xf3, VMX_IGS_V86_DS_ATTR_INVALID);
    10066             /* ES */
    10067             HMVMX_CHECK_BREAK((pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4), VMX_IGS_V86_ES_BASE_INVALID);
    10068             HMVMX_CHECK_BREAK(pCtx->es.u32Limit == 0xffff, VMX_IGS_V86_ES_LIMIT_INVALID);
    10069             HMVMX_CHECK_BREAK(u32ESAttr == 0xf3, VMX_IGS_V86_ES_ATTR_INVALID);
    10070             /* FS */
    10071             HMVMX_CHECK_BREAK((pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4), VMX_IGS_V86_FS_BASE_INVALID);
    10072             HMVMX_CHECK_BREAK(pCtx->fs.u32Limit == 0xffff, VMX_IGS_V86_FS_LIMIT_INVALID);
    10073             HMVMX_CHECK_BREAK(u32FSAttr == 0xf3, VMX_IGS_V86_FS_ATTR_INVALID);
    10074             /* GS */
    10075             HMVMX_CHECK_BREAK((pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4), VMX_IGS_V86_GS_BASE_INVALID);
    10076             HMVMX_CHECK_BREAK(pCtx->gs.u32Limit == 0xffff, VMX_IGS_V86_GS_LIMIT_INVALID);
    10077             HMVMX_CHECK_BREAK(u32GSAttr == 0xf3, VMX_IGS_V86_GS_ATTR_INVALID);
    10078             /* 64-bit capable CPUs. */
    10079             HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
    10080             HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
    10081             HMVMX_CHECK_BREAK(   (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
    10082                               || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
    10083             HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
    10084             HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
    10085                               VMX_IGS_LONGMODE_SS_BASE_INVALID);
    10086             HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
    10087                               VMX_IGS_LONGMODE_DS_BASE_INVALID);
    10088             HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
    10089                               VMX_IGS_LONGMODE_ES_BASE_INVALID);
    10090         }
    10091 
    10092         /*
    10093          * TR.
    10094          */
    10095         HMVMX_CHECK_BREAK(!(pCtx->tr.Sel & X86_SEL_LDT), VMX_IGS_TR_TI_INVALID);
    10096         /* 64-bit capable CPUs. */
    10097         HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->tr.u64Base), VMX_IGS_TR_BASE_NOT_CANONICAL);
    10098         if (fLongModeGuest)
    10099             HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u4Type == 11,           /* 64-bit busy TSS. */
    10100                               VMX_IGS_LONGMODE_TR_ATTR_TYPE_INVALID);
    10101         else
    10102             HMVMX_CHECK_BREAK(   pCtx->tr.Attr.n.u4Type == 3          /* 16-bit busy TSS. */
    10103                               || pCtx->tr.Attr.n.u4Type == 11,        /* 32-bit busy TSS.*/
    10104                               VMX_IGS_TR_ATTR_TYPE_INVALID);
    10105         HMVMX_CHECK_BREAK(!pCtx->tr.Attr.n.u1DescType, VMX_IGS_TR_ATTR_S_INVALID);
    10106         HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u1Present, VMX_IGS_TR_ATTR_P_INVALID);
    10107         HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & 0xf00), VMX_IGS_TR_ATTR_RESERVED);   /* Bits 11:8 MBZ. */
    10108         HMVMX_CHECK_BREAK(   (pCtx->tr.u32Limit & 0xfff) == 0xfff
    10109                           || !(pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
    10110         HMVMX_CHECK_BREAK(   !(pCtx->tr.u32Limit & 0xfff00000)
    10111                           || (pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
    10112         HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_TR_ATTR_UNUSABLE);
    10113 
    10114         /*
    10115          * GDTR and IDTR (64-bit capable checks).
    10116          */
    10117         rc = VMXReadVmcsNw(VMX_VMCS_GUEST_GDTR_BASE, &u64Val);
    10118         AssertRC(rc);
    10119         HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_GDTR_BASE_NOT_CANONICAL);
    10120 
    10121         rc = VMXReadVmcsNw(VMX_VMCS_GUEST_IDTR_BASE, &u64Val);
    10122         AssertRC(rc);
    10123         HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_IDTR_BASE_NOT_CANONICAL);
    10124 
    10125         rc = VMXReadVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);
    10126         AssertRC(rc);
    10127         HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_GDTR_LIMIT_INVALID);      /* Bits 31:16 MBZ. */
    10128 
    10129         rc = VMXReadVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);
    10130         AssertRC(rc);
    10131         HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_IDTR_LIMIT_INVALID);      /* Bits 31:16 MBZ. */
    10132 
    10133         /*
    10134          * Guest Non-Register State.
    10135          */
    10136         /* Activity State. */
    10137         uint32_t u32ActivityState;
    10138         rc = VMXReadVmcs32(VMX_VMCS32_GUEST_ACTIVITY_STATE, &u32ActivityState);
    10139         AssertRC(rc);
    10140         HMVMX_CHECK_BREAK(   !u32ActivityState
    10141                           || (u32ActivityState & RT_BF_GET(g_HmMsrs.u.vmx.u64Misc, VMX_BF_MISC_ACTIVITY_STATES)),
    10142                              VMX_IGS_ACTIVITY_STATE_INVALID);
    10143         HMVMX_CHECK_BREAK(   !(pCtx->ss.Attr.n.u2Dpl)
    10144                           || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT, VMX_IGS_ACTIVITY_STATE_HLT_INVALID);
    10145 
    10146         if (   u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS
    10147             || u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
    10148             HMVMX_CHECK_BREAK(u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE, VMX_IGS_ACTIVITY_STATE_ACTIVE_INVALID);
    10149 
    10150         /** @todo Activity state and injecting interrupts. Left as a todo since we
    10151          *        currently don't use activity states but ACTIVE. */
    10152 
    10153         HMVMX_CHECK_BREAK(   !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
    10154                           || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT, VMX_IGS_ACTIVITY_STATE_SIPI_WAIT_INVALID);
    10155 
    10156         /* Guest interruptibility-state. */
    10157         HMVMX_CHECK_BREAK(!(u32IntrState & 0xffffffe0), VMX_IGS_INTERRUPTIBILITY_STATE_RESERVED);
    10158         HMVMX_CHECK_BREAK((u32IntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
    10159                                        != (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
    10160                           VMX_IGS_INTERRUPTIBILITY_STATE_STI_MOVSS_INVALID);
    10161         HMVMX_CHECK_BREAK(   (u32Eflags & X86_EFL_IF)
    10162                           || !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
    10163                           VMX_IGS_INTERRUPTIBILITY_STATE_STI_EFL_INVALID);
    10164         if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
    10165         {
    10166             HMVMX_CHECK_BREAK(   !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
    10167                               && !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
    10168                               VMX_IGS_INTERRUPTIBILITY_STATE_EXT_INT_INVALID);
    10169         }
    10170         else if (VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
    10171         {
    10172             HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
    10173                               VMX_IGS_INTERRUPTIBILITY_STATE_MOVSS_INVALID);
    10174             HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
    10175                               VMX_IGS_INTERRUPTIBILITY_STATE_STI_INVALID);
    10176         }
    10177         /** @todo Assumes the processor is not in SMM. */
    10178         HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
    10179                           VMX_IGS_INTERRUPTIBILITY_STATE_SMI_INVALID);
    10180         HMVMX_CHECK_BREAK(   !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
    10181                           || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
    10182                              VMX_IGS_INTERRUPTIBILITY_STATE_SMI_SMM_INVALID);
    10183         if (   (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
    10184             && VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
    10185             HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI), VMX_IGS_INTERRUPTIBILITY_STATE_NMI_INVALID);
    10186 
    10187         /* Pending debug exceptions. */
    10188         rc = VMXReadVmcsNw(VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &u64Val);
    10189         AssertRC(rc);
    10190         /* Bits 63:15, Bit 13, Bits 11:4 MBZ. */
    10191         HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffffaff0)), VMX_IGS_LONGMODE_PENDING_DEBUG_RESERVED);
    10192         u32Val = u64Val;    /* For pending debug exceptions checks below. */
    10193 
    10194         if (   (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
    10195             || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)
    10196             || u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
    10197         {
    10198             if (   (u32Eflags & X86_EFL_TF)
    10199                 && !(u64DebugCtlMsr & RT_BIT_64(1)))    /* Bit 1 is IA32_DEBUGCTL.BTF. */
    10200             {
    10201                 /* Bit 14 is PendingDebug.BS. */
    10202                 HMVMX_CHECK_BREAK(u32Val & RT_BIT(14), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_SET);
    10203             }
    10204             if (   !(u32Eflags & X86_EFL_TF)
    10205                 || (u64DebugCtlMsr & RT_BIT_64(1)))     /* Bit 1 is IA32_DEBUGCTL.BTF. */
    10206             {
    10207                 /* Bit 14 is PendingDebug.BS. */
    10208                 HMVMX_CHECK_BREAK(!(u32Val & RT_BIT(14)), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_CLEAR);
    10209             }
    10210         }
    10211 
    10212         /* VMCS link pointer. */
    10213         rc = VMXReadVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, &u64Val);
    10214         AssertRC(rc);
    10215         if (u64Val != UINT64_C(0xffffffffffffffff))
    10216         {
    10217             HMVMX_CHECK_BREAK(!(u64Val & 0xfff), VMX_IGS_VMCS_LINK_PTR_RESERVED);
    10218             /** @todo Bits beyond the processor's physical-address width MBZ. */
    10219             /** @todo SMM checks. */
    10220             Assert(pVmcsInfo->HCPhysShadowVmcs == u64Val);
    10221             Assert(pVmcsInfo->pvShadowVmcs);
    10222             VMXVMCSREVID VmcsRevId;
    10223             VmcsRevId.u = *(uint32_t *)pVmcsInfo->pvShadowVmcs;
    10224             HMVMX_CHECK_BREAK(VmcsRevId.n.u31RevisionId == RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_ID),
    10225                               VMX_IGS_VMCS_LINK_PTR_SHADOW_VMCS_ID_INVALID);
    10226             HMVMX_CHECK_BREAK(VmcsRevId.n.fIsShadowVmcs == (uint32_t)!!(pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING),
    10227                               VMX_IGS_VMCS_LINK_PTR_NOT_SHADOW);
    10228         }
    10229 
    10230         /** @todo Checks on Guest Page-Directory-Pointer-Table Entries when guest is
    10231          *        not using nested paging? */
    10232         if (   pVM->hmr0.s.fNestedPaging
    10233             && !fLongModeGuest
    10234             && CPUMIsGuestInPAEModeEx(pCtx))
    10235         {
    10236             rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, &u64Val);
    10237             AssertRC(rc);
    10238             HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
    10239 
    10240             rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, &u64Val);
    10241             AssertRC(rc);
    10242             HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
    10243 
    10244             rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, &u64Val);
    10245             AssertRC(rc);
    10246             HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
    10247 
    10248             rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, &u64Val);
    10249             AssertRC(rc);
    10250             HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
    10251         }
    10252 
    10253         /* Shouldn't happen but distinguish it from AssertRCBreak() errors. */
    10254         if (uError == VMX_IGS_ERROR)
    10255             uError = VMX_IGS_REASON_NOT_FOUND;
    10256     } while (0);
    10257 
    10258     pVCpu->hm.s.u32HMError = uError;
    10259     pVCpu->hm.s.vmx.LastError.u32GuestIntrState = u32IntrState;
    10260     return uError;
    10261 
    10262 #undef HMVMX_ERROR_BREAK
    10263 #undef HMVMX_CHECK_BREAK
    102645513}
    102655514
     
    107726021     * Check and process force flag actions, some of which might require us to go back to ring-3.
    107736022     */
    10774     VBOXSTRICTRC rcStrict = hmR0VmxCheckForceFlags(pVCpu, pVmxTransient, fStepping);
     6023    VBOXSTRICTRC rcStrict = vmxHCCheckForceFlags(pVCpu, pVmxTransient, fStepping);
    107756024    if (rcStrict == VINF_SUCCESS)
    107766025    {
     
    108256074     */
    108266075    if (TRPMHasTrap(pVCpu))
    10827         hmR0VmxTrpmTrapToPendingEvent(pVCpu);
     6076        vmxHCTrpmTrapToPendingEvent(pVCpu);
    108286077
    108296078    uint32_t fIntrState;
    10830     rcStrict = hmR0VmxEvaluatePendingEvent(pVCpu, pVmxTransient, &fIntrState);
     6079    rcStrict = vmxHCEvaluatePendingEvent(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->fIsNestedGuest,
     6080                                         &fIntrState);
    108316081
    108326082#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     
    108556105     * requirement. Regardless, we do this here to avoid duplicating code elsewhere.
    108566106     */
    10857     rcStrict = hmR0VmxInjectPendingEvent(pVCpu, pVmxTransient, fIntrState, fStepping);
     6107    rcStrict = vmxHCInjectPendingEvent(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->fIsNestedGuest,
     6108                                       fIntrState, fStepping);
    108586109    if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    108596110    { /* likely */ }
     
    111006351        && !fIsRdtscIntercepted)
    111016352    {
    11102         hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_TSC_AUX);
     6353        vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_TSC_AUX);
    111036354
    111046355        /* NB: Because we call hmR0VmxAddAutoLoadStoreMsr with fUpdateHostMsr=true,
     
    111156366    hmR0VmxCheckAutoLoadStoreMsrs(pVCpu, pVmcsInfo, pVmxTransient->fIsNestedGuest);
    111166367    hmR0VmxCheckHostEferMsr(pVmcsInfo);
    11117     AssertRC(hmR0VmxCheckCachedVmcsCtls(pVCpu, pVmcsInfo, pVmxTransient->fIsNestedGuest));
     6368    AssertRC(vmxHCCheckCachedVmcsCtls(pVCpu, pVmcsInfo, pVmxTransient->fIsNestedGuest));
    111186369#endif
    111196370
     
    112736524#endif
    112746525                                       ;
    11275             rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, fImportMask);
     6526            rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, fImportMask);
    112766527            AssertRC(rc);
    112776528
     
    113306581    if (pVCpu->hmr0.s.vmx.fSwitchedToNstGstVmcs)
    113316582    {
    11332         int rc = hmR0VmxSwitchToGstOrNstGstVmcs(pVCpu, false /* fSwitchToNstGstVmcs */);
     6583        int rc = vmxHCSwitchToGstOrNstGstVmcs(pVCpu, false /* fSwitchToNstGstVmcs */);
    113336584        if (RT_SUCCESS(rc))
    113346585        { /* likely */ }
     
    114426693    if (!pVCpu->hmr0.s.vmx.fSwitchedToNstGstVmcs)
    114436694    {
    11444         int rc = hmR0VmxSwitchToGstOrNstGstVmcs(pVCpu, true /* fSwitchToNstGstVmcs */);
     6695        int rc = vmxHCSwitchToGstOrNstGstVmcs(pVCpu, true /* fSwitchToNstGstVmcs */);
    114456696        if (RT_SUCCESS(rc))
    114466697        { /* likely */ }
     
    115106761         * Handle the VM-exit.
    115116762         */
    11512         rcStrict = hmR0VmxHandleExitNested(pVCpu, &VmxTransient);
     6763        rcStrict = vmxHCHandleExitNested(pVCpu, &VmxTransient);
    115136764        STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitHandling, x);
    115146765        if (rcStrict == VINF_SUCCESS)
     
    120847335    {
    120857336        case VMX_EXIT_MTF:
    12086             return hmR0VmxExitMtf(pVCpu, pVmxTransient);
     7337            return vmxHCExitMtf(pVCpu, pVmxTransient);
    120877338
    120887339        case VMX_EXIT_XCPT_OR_NMI:
     
    120987349                        if (VMX_EXIT_INT_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uExitIntInfo))
    120997350                        {
    12100                             hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
     7351                            vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
    121017352                            uEventArg = pVmxTransient->uExitIntErrorCode;
    121027353                        }
     
    121687419        case VMX_EXIT_VMXON:            SET_BOTH(VMX_VMXON); break;
    121697420        case VMX_EXIT_MOV_CRX:
    12170             hmR0VmxReadExitQualVmcs(pVmxTransient);
     7421            vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
    121717422            if (VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_CRX_ACCESS_READ)
    121727423                SET_BOTH(CRX_READ);
     
    121767427            break;
    121777428        case VMX_EXIT_MOV_DRX:
    12178             hmR0VmxReadExitQualVmcs(pVmxTransient);
     7429            vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
    121797430            if (   VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual)
    121807431                == VMX_EXIT_QUAL_DRX_DIRECTION_READ)
     
    121907441        case VMX_EXIT_PAUSE:            SET_BOTH(PAUSE); break;
    121917442        case VMX_EXIT_GDTR_IDTR_ACCESS:
    12192             hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
     7443            vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
    121937444            switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_XDTR_INSINFO_INSTR_ID))
    121947445            {
     
    122017452
    122027453        case VMX_EXIT_LDTR_TR_ACCESS:
    12203             hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
     7454            vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
    122047455            switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_YYTR_INSINFO_INSTR_ID))
    122057456            {
     
    122597510    if (fDtrace1 || fDtrace2)
    122607511    {
    12261         hmR0VmxReadExitQualVmcs(pVmxTransient);
    12262         hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
     7512        vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
     7513        vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
    122637514        PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    122647515        switch (enmEvent1)
     
    124067657        && DBGF_IS_EVENT_ENABLED(pVM, enmEvent1))
    124077658    {
    12408         hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
     7659        vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
    124097660        VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent1, DBGFEVENTCTX_HM, 1, uEventArg);
    124107661        if (rcStrict != VINF_SUCCESS)
     
    124147665             && DBGF_IS_EVENT_ENABLED(pVM, enmEvent2))
    124157666    {
    12416         hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
     7667        vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
    124177668        VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent2, DBGFEVENTCTX_HM, 1, uEventArg);
    124187669        if (rcStrict != VINF_SUCCESS)
     
    124467697    else
    124477698    {
    12448         hmR0VmxReadExitQualVmcs(pVmxTransient);
    12449         int rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
     7699        vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
     7700        int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
    124507701        AssertRC(rc);
    124517702        VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, &pVCpu->cpum.GstCtx, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
     
    124597710    else
    124607711    {
    12461         hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
     7712        vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
    124627713        uint32_t const uIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
    124637714        if (uIntType == VMX_EXIT_INT_INFO_TYPE_NMI)
     
    124737724        {
    124747725            case VMX_EXIT_MTF:
    12475                 return hmR0VmxExitMtf(pVCpu, pVmxTransient);
     7726                return vmxHCExitMtf(pVCpu, pVmxTransient);
    124767727
    124777728            /* Various events: */
     
    125297780            case VMX_EXIT_XRSTORS:
    125307781            {
    12531                 int rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
     7782                int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
    125327783                AssertRCReturn(rc, rc);
    125337784                if (   pVCpu->cpum.GstCtx.rip    != pDbgState->uRipStart
     
    125737824    return g_aVMExitHandlers[uExitReason].pfn(pVCpu, pVmxTransient);
    125747825#else
    12575     return hmR0VmxHandleExit(pVCpu, pVmxTransient, uExitReason);
     7826    return vmxHCHandleExit(pVCpu, pVmxTransient, uExitReason);
    125767827#endif
    125777828}
     
    129438194    return rcStrict;
    129448195}
    12945 
    12946 
    12947 #ifndef HMVMX_USE_FUNCTION_TABLE
    12948 /**
    12949  * Handles a guest VM-exit from hardware-assisted VMX execution.
    12950  *
    12951  * @returns Strict VBox status code (i.e. informational status codes too).
    12952  * @param   pVCpu           The cross context virtual CPU structure.
    12953  * @param   pVmxTransient   The VMX-transient structure.
    12954  */
    12955 DECLINLINE(VBOXSTRICTRC) hmR0VmxHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    12956 {
    12957 #ifdef DEBUG_ramshankar
    12958 # define VMEXIT_CALL_RET(a_fSave, a_CallExpr) \
    12959        do { \
    12960             if (a_fSave != 0) \
    12961                 hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL); \
    12962             VBOXSTRICTRC rcStrict = a_CallExpr; \
    12963             if (a_fSave != 0) \
    12964                 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST); \
    12965             return rcStrict; \
    12966         } while (0)
    12967 #else
    12968 # define VMEXIT_CALL_RET(a_fSave, a_CallExpr) return a_CallExpr
    12969 #endif
    12970     uint32_t const uExitReason = pVmxTransient->uExitReason;
    12971     switch (uExitReason)
    12972     {
    12973         case VMX_EXIT_EPT_MISCONFIG:           VMEXIT_CALL_RET(0, hmR0VmxExitEptMisconfig(pVCpu, pVmxTransient));
    12974         case VMX_EXIT_EPT_VIOLATION:           VMEXIT_CALL_RET(0, hmR0VmxExitEptViolation(pVCpu, pVmxTransient));
    12975         case VMX_EXIT_IO_INSTR:                VMEXIT_CALL_RET(0, hmR0VmxExitIoInstr(pVCpu, pVmxTransient));
    12976         case VMX_EXIT_CPUID:                   VMEXIT_CALL_RET(0, hmR0VmxExitCpuid(pVCpu, pVmxTransient));
    12977         case VMX_EXIT_RDTSC:                   VMEXIT_CALL_RET(0, hmR0VmxExitRdtsc(pVCpu, pVmxTransient));
    12978         case VMX_EXIT_RDTSCP:                  VMEXIT_CALL_RET(0, hmR0VmxExitRdtscp(pVCpu, pVmxTransient));
    12979         case VMX_EXIT_APIC_ACCESS:             VMEXIT_CALL_RET(0, hmR0VmxExitApicAccess(pVCpu, pVmxTransient));
    12980         case VMX_EXIT_XCPT_OR_NMI:             VMEXIT_CALL_RET(0, hmR0VmxExitXcptOrNmi(pVCpu, pVmxTransient));
    12981         case VMX_EXIT_MOV_CRX:                 VMEXIT_CALL_RET(0, hmR0VmxExitMovCRx(pVCpu, pVmxTransient));
    12982         case VMX_EXIT_EXT_INT:                 VMEXIT_CALL_RET(0, hmR0VmxExitExtInt(pVCpu, pVmxTransient));
    12983         case VMX_EXIT_INT_WINDOW:              VMEXIT_CALL_RET(0, hmR0VmxExitIntWindow(pVCpu, pVmxTransient));
    12984         case VMX_EXIT_TPR_BELOW_THRESHOLD:     VMEXIT_CALL_RET(0, hmR0VmxExitTprBelowThreshold(pVCpu, pVmxTransient));
    12985         case VMX_EXIT_MWAIT:                   VMEXIT_CALL_RET(0, hmR0VmxExitMwait(pVCpu, pVmxTransient));
    12986         case VMX_EXIT_MONITOR:                 VMEXIT_CALL_RET(0, hmR0VmxExitMonitor(pVCpu, pVmxTransient));
    12987         case VMX_EXIT_TASK_SWITCH:             VMEXIT_CALL_RET(0, hmR0VmxExitTaskSwitch(pVCpu, pVmxTransient));
    12988         case VMX_EXIT_PREEMPT_TIMER:           VMEXIT_CALL_RET(0, hmR0VmxExitPreemptTimer(pVCpu, pVmxTransient));
    12989         case VMX_EXIT_RDMSR:                   VMEXIT_CALL_RET(0, hmR0VmxExitRdmsr(pVCpu, pVmxTransient));
    12990         case VMX_EXIT_WRMSR:                   VMEXIT_CALL_RET(0, hmR0VmxExitWrmsr(pVCpu, pVmxTransient));
    12991         case VMX_EXIT_VMCALL:                  VMEXIT_CALL_RET(0, hmR0VmxExitVmcall(pVCpu, pVmxTransient));
    12992         case VMX_EXIT_MOV_DRX:                 VMEXIT_CALL_RET(0, hmR0VmxExitMovDRx(pVCpu, pVmxTransient));
    12993         case VMX_EXIT_HLT:                     VMEXIT_CALL_RET(0, hmR0VmxExitHlt(pVCpu, pVmxTransient));
    12994         case VMX_EXIT_INVD:                    VMEXIT_CALL_RET(0, hmR0VmxExitInvd(pVCpu, pVmxTransient));
    12995         case VMX_EXIT_INVLPG:                  VMEXIT_CALL_RET(0, hmR0VmxExitInvlpg(pVCpu, pVmxTransient));
    12996         case VMX_EXIT_MTF:                     VMEXIT_CALL_RET(0, hmR0VmxExitMtf(pVCpu, pVmxTransient));
    12997         case VMX_EXIT_PAUSE:                   VMEXIT_CALL_RET(0, hmR0VmxExitPause(pVCpu, pVmxTransient));
    12998         case VMX_EXIT_WBINVD:                  VMEXIT_CALL_RET(0, hmR0VmxExitWbinvd(pVCpu, pVmxTransient));
    12999         case VMX_EXIT_XSETBV:                  VMEXIT_CALL_RET(0, hmR0VmxExitXsetbv(pVCpu, pVmxTransient));
    13000         case VMX_EXIT_INVPCID:                 VMEXIT_CALL_RET(0, hmR0VmxExitInvpcid(pVCpu, pVmxTransient));
    13001         case VMX_EXIT_GETSEC:                  VMEXIT_CALL_RET(0, hmR0VmxExitGetsec(pVCpu, pVmxTransient));
    13002         case VMX_EXIT_RDPMC:                   VMEXIT_CALL_RET(0, hmR0VmxExitRdpmc(pVCpu, pVmxTransient));
    13003 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    13004         case VMX_EXIT_VMCLEAR:                 VMEXIT_CALL_RET(0, hmR0VmxExitVmclear(pVCpu, pVmxTransient));
    13005         case VMX_EXIT_VMLAUNCH:                VMEXIT_CALL_RET(0, hmR0VmxExitVmlaunch(pVCpu, pVmxTransient));
    13006         case VMX_EXIT_VMPTRLD:                 VMEXIT_CALL_RET(0, hmR0VmxExitVmptrld(pVCpu, pVmxTransient));
    13007         case VMX_EXIT_VMPTRST:                 VMEXIT_CALL_RET(0, hmR0VmxExitVmptrst(pVCpu, pVmxTransient));
    13008         case VMX_EXIT_VMREAD:                  VMEXIT_CALL_RET(0, hmR0VmxExitVmread(pVCpu, pVmxTransient));
    13009         case VMX_EXIT_VMRESUME:                VMEXIT_CALL_RET(0, hmR0VmxExitVmwrite(pVCpu, pVmxTransient));
    13010         case VMX_EXIT_VMWRITE:                 VMEXIT_CALL_RET(0, hmR0VmxExitVmresume(pVCpu, pVmxTransient));
    13011         case VMX_EXIT_VMXOFF:                  VMEXIT_CALL_RET(0, hmR0VmxExitVmxoff(pVCpu, pVmxTransient));
    13012         case VMX_EXIT_VMXON:                   VMEXIT_CALL_RET(0, hmR0VmxExitVmxon(pVCpu, pVmxTransient));
    13013         case VMX_EXIT_INVVPID:                 VMEXIT_CALL_RET(0, hmR0VmxExitInvvpid(pVCpu, pVmxTransient));
    13014         case VMX_EXIT_INVEPT:                  VMEXIT_CALL_RET(0, hmR0VmxExitSetPendingXcptUD(pVCpu, pVmxTransient));
    13015 #else
    13016         case VMX_EXIT_VMCLEAR:
    13017         case VMX_EXIT_VMLAUNCH:
    13018         case VMX_EXIT_VMPTRLD:
    13019         case VMX_EXIT_VMPTRST:
    13020         case VMX_EXIT_VMREAD:
    13021         case VMX_EXIT_VMRESUME:
    13022         case VMX_EXIT_VMWRITE:
    13023         case VMX_EXIT_VMXOFF:
    13024         case VMX_EXIT_VMXON:
    13025         case VMX_EXIT_INVVPID:
    13026         case VMX_EXIT_INVEPT:
    13027             return hmR0VmxExitSetPendingXcptUD(pVCpu, pVmxTransient);
    13028 #endif
    13029 
    13030         case VMX_EXIT_TRIPLE_FAULT:            return hmR0VmxExitTripleFault(pVCpu, pVmxTransient);
    13031         case VMX_EXIT_NMI_WINDOW:              return hmR0VmxExitNmiWindow(pVCpu, pVmxTransient);
    13032         case VMX_EXIT_ERR_INVALID_GUEST_STATE: return hmR0VmxExitErrInvalidGuestState(pVCpu, pVmxTransient);
    13033 
    13034         case VMX_EXIT_INIT_SIGNAL:
    13035         case VMX_EXIT_SIPI:
    13036         case VMX_EXIT_IO_SMI:
    13037         case VMX_EXIT_SMI:
    13038         case VMX_EXIT_ERR_MSR_LOAD:
    13039         case VMX_EXIT_ERR_MACHINE_CHECK:
    13040         case VMX_EXIT_PML_FULL:
    13041         case VMX_EXIT_VIRTUALIZED_EOI:
    13042         case VMX_EXIT_GDTR_IDTR_ACCESS:
    13043         case VMX_EXIT_LDTR_TR_ACCESS:
    13044         case VMX_EXIT_APIC_WRITE:
    13045         case VMX_EXIT_RDRAND:
    13046         case VMX_EXIT_RSM:
    13047         case VMX_EXIT_VMFUNC:
    13048         case VMX_EXIT_ENCLS:
    13049         case VMX_EXIT_RDSEED:
    13050         case VMX_EXIT_XSAVES:
    13051         case VMX_EXIT_XRSTORS:
    13052         case VMX_EXIT_UMWAIT:
    13053         case VMX_EXIT_TPAUSE:
    13054         case VMX_EXIT_LOADIWKEY:
    13055         default:
    13056             return hmR0VmxExitErrUnexpected(pVCpu, pVmxTransient);
    13057     }
    13058 #undef VMEXIT_CALL_RET
    13059 }
    13060 #endif /* !HMVMX_USE_FUNCTION_TABLE */
    13061 
    13062 
    13063 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    13064 /**
    13065  * Handles a nested-guest VM-exit from hardware-assisted VMX execution.
    13066  *
    13067  * @returns Strict VBox status code (i.e. informational status codes too).
    13068  * @param   pVCpu           The cross context virtual CPU structure.
    13069  * @param   pVmxTransient   The VMX-transient structure.
    13070  */
    13071 DECLINLINE(VBOXSTRICTRC) hmR0VmxHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    13072 {
    13073     uint32_t const uExitReason = pVmxTransient->uExitReason;
    13074     switch (uExitReason)
    13075     {
    13076         case VMX_EXIT_EPT_MISCONFIG:            return hmR0VmxExitEptMisconfig(pVCpu, pVmxTransient);
    13077         case VMX_EXIT_EPT_VIOLATION:            return hmR0VmxExitEptViolation(pVCpu, pVmxTransient);
    13078         case VMX_EXIT_XCPT_OR_NMI:              return hmR0VmxExitXcptOrNmiNested(pVCpu, pVmxTransient);
    13079         case VMX_EXIT_IO_INSTR:                 return hmR0VmxExitIoInstrNested(pVCpu, pVmxTransient);
    13080         case VMX_EXIT_HLT:                      return hmR0VmxExitHltNested(pVCpu, pVmxTransient);
    13081 
    13082         /*
    13083          * We shouldn't direct host physical interrupts to the nested-guest.
    13084          */
    13085         case VMX_EXIT_EXT_INT:
    13086             return hmR0VmxExitExtInt(pVCpu, pVmxTransient);
    13087 
    13088         /*
    13089          * Instructions that cause VM-exits unconditionally or the condition is
    13090          * always is taken solely from the nested hypervisor (meaning if the VM-exit
    13091          * happens, it's guaranteed to be a nested-guest VM-exit).
    13092          *
    13093          *   - Provides VM-exit instruction length ONLY.
    13094          */
    13095         case VMX_EXIT_CPUID:              /* Unconditional. */
    13096         case VMX_EXIT_VMCALL:
    13097         case VMX_EXIT_GETSEC:
    13098         case VMX_EXIT_INVD:
    13099         case VMX_EXIT_XSETBV:
    13100         case VMX_EXIT_VMLAUNCH:
    13101         case VMX_EXIT_VMRESUME:
    13102         case VMX_EXIT_VMXOFF:
    13103         case VMX_EXIT_ENCLS:              /* Condition specified solely by nested hypervisor. */
    13104         case VMX_EXIT_VMFUNC:
    13105             return hmR0VmxExitInstrNested(pVCpu, pVmxTransient);
    13106 
    13107         /*
    13108          * Instructions that cause VM-exits unconditionally or the condition is
    13109          * always is taken solely from the nested hypervisor (meaning if the VM-exit
    13110          * happens, it's guaranteed to be a nested-guest VM-exit).
    13111          *
    13112          *   - Provides VM-exit instruction length.
    13113          *   - Provides VM-exit information.
    13114          *   - Optionally provides Exit qualification.
    13115          *
    13116          * Since Exit qualification is 0 for all VM-exits where it is not
    13117          * applicable, reading and passing it to the guest should produce
    13118          * defined behavior.
    13119          *
    13120          * See Intel spec. 27.2.1 "Basic VM-Exit Information".
    13121          */
    13122         case VMX_EXIT_INVEPT:             /* Unconditional. */
    13123         case VMX_EXIT_INVVPID:
    13124         case VMX_EXIT_VMCLEAR:
    13125         case VMX_EXIT_VMPTRLD:
    13126         case VMX_EXIT_VMPTRST:
    13127         case VMX_EXIT_VMXON:
    13128         case VMX_EXIT_GDTR_IDTR_ACCESS:   /* Condition specified solely by nested hypervisor. */
    13129         case VMX_EXIT_LDTR_TR_ACCESS:
    13130         case VMX_EXIT_RDRAND:
    13131         case VMX_EXIT_RDSEED:
    13132         case VMX_EXIT_XSAVES:
    13133         case VMX_EXIT_XRSTORS:
    13134         case VMX_EXIT_UMWAIT:
    13135         case VMX_EXIT_TPAUSE:
    13136             return hmR0VmxExitInstrWithInfoNested(pVCpu, pVmxTransient);
    13137 
    13138         case VMX_EXIT_RDTSC:                    return hmR0VmxExitRdtscNested(pVCpu, pVmxTransient);
    13139         case VMX_EXIT_RDTSCP:                   return hmR0VmxExitRdtscpNested(pVCpu, pVmxTransient);
    13140         case VMX_EXIT_RDMSR:                    return hmR0VmxExitRdmsrNested(pVCpu, pVmxTransient);
    13141         case VMX_EXIT_WRMSR:                    return hmR0VmxExitWrmsrNested(pVCpu, pVmxTransient);
    13142         case VMX_EXIT_INVLPG:                   return hmR0VmxExitInvlpgNested(pVCpu, pVmxTransient);
    13143         case VMX_EXIT_INVPCID:                  return hmR0VmxExitInvpcidNested(pVCpu, pVmxTransient);
    13144         case VMX_EXIT_TASK_SWITCH:              return hmR0VmxExitTaskSwitchNested(pVCpu, pVmxTransient);
    13145         case VMX_EXIT_WBINVD:                   return hmR0VmxExitWbinvdNested(pVCpu, pVmxTransient);
    13146         case VMX_EXIT_MTF:                      return hmR0VmxExitMtfNested(pVCpu, pVmxTransient);
    13147         case VMX_EXIT_APIC_ACCESS:              return hmR0VmxExitApicAccessNested(pVCpu, pVmxTransient);
    13148         case VMX_EXIT_APIC_WRITE:               return hmR0VmxExitApicWriteNested(pVCpu, pVmxTransient);
    13149         case VMX_EXIT_VIRTUALIZED_EOI:          return hmR0VmxExitVirtEoiNested(pVCpu, pVmxTransient);
    13150         case VMX_EXIT_MOV_CRX:                  return hmR0VmxExitMovCRxNested(pVCpu, pVmxTransient);
    13151         case VMX_EXIT_INT_WINDOW:               return hmR0VmxExitIntWindowNested(pVCpu, pVmxTransient);
    13152         case VMX_EXIT_NMI_WINDOW:               return hmR0VmxExitNmiWindowNested(pVCpu, pVmxTransient);
    13153         case VMX_EXIT_TPR_BELOW_THRESHOLD:      return hmR0VmxExitTprBelowThresholdNested(pVCpu, pVmxTransient);
    13154         case VMX_EXIT_MWAIT:                    return hmR0VmxExitMwaitNested(pVCpu, pVmxTransient);
    13155         case VMX_EXIT_MONITOR:                  return hmR0VmxExitMonitorNested(pVCpu, pVmxTransient);
    13156         case VMX_EXIT_PAUSE:                    return hmR0VmxExitPauseNested(pVCpu, pVmxTransient);
    13157 
    13158         case VMX_EXIT_PREEMPT_TIMER:
    13159         {
    13160             /** @todo NSTVMX: Preempt timer. */
    13161             return hmR0VmxExitPreemptTimer(pVCpu, pVmxTransient);
    13162         }
    13163 
    13164         case VMX_EXIT_MOV_DRX:                  return hmR0VmxExitMovDRxNested(pVCpu, pVmxTransient);
    13165         case VMX_EXIT_RDPMC:                    return hmR0VmxExitRdpmcNested(pVCpu, pVmxTransient);
    13166 
    13167         case VMX_EXIT_VMREAD:
    13168         case VMX_EXIT_VMWRITE:                  return hmR0VmxExitVmreadVmwriteNested(pVCpu, pVmxTransient);
    13169 
    13170         case VMX_EXIT_TRIPLE_FAULT:             return hmR0VmxExitTripleFaultNested(pVCpu, pVmxTransient);
    13171         case VMX_EXIT_ERR_INVALID_GUEST_STATE:  return hmR0VmxExitErrInvalidGuestStateNested(pVCpu, pVmxTransient);
    13172 
    13173         case VMX_EXIT_INIT_SIGNAL:
    13174         case VMX_EXIT_SIPI:
    13175         case VMX_EXIT_IO_SMI:
    13176         case VMX_EXIT_SMI:
    13177         case VMX_EXIT_ERR_MSR_LOAD:
    13178         case VMX_EXIT_ERR_MACHINE_CHECK:
    13179         case VMX_EXIT_PML_FULL:
    13180         case VMX_EXIT_RSM:
    13181         default:
    13182             return hmR0VmxExitErrUnexpected(pVCpu, pVmxTransient);
    13183     }
    13184 }
    13185 #endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
    13186 
    13187 
    13188 /** @name VM-exit helpers.
    13189  * @{
    13190  */
    13191 /* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
    13192 /* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= VM-exit helpers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
    13193 /* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
    13194 
    13195 /** Macro for VM-exits called unexpectedly. */
    13196 #define HMVMX_UNEXPECTED_EXIT_RET(a_pVCpu, a_HmError) \
    13197     do { \
    13198         (a_pVCpu)->hm.s.u32HMError = (a_HmError); \
    13199         return VERR_VMX_UNEXPECTED_EXIT; \
    13200     } while (0)
    13201 
    13202 #ifdef VBOX_STRICT
    13203 /* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
    13204 # define HMVMX_ASSERT_PREEMPT_CPUID_VAR() \
    13205     RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
    13206 
    13207 # define HMVMX_ASSERT_PREEMPT_CPUID() \
    13208     do { \
    13209          RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
    13210          AssertMsg(idAssertCpu == idAssertCpuNow,  ("VMX %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
    13211     } while (0)
    13212 
    13213 # define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
    13214     do { \
    13215         AssertPtr((a_pVCpu)); \
    13216         AssertPtr((a_pVmxTransient)); \
    13217         Assert((a_pVmxTransient)->fVMEntryFailed == false); \
    13218         Assert((a_pVmxTransient)->pVmcsInfo); \
    13219         Assert(ASMIntAreEnabled()); \
    13220         HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
    13221         HMVMX_ASSERT_PREEMPT_CPUID_VAR(); \
    13222         Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
    13223         HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
    13224         if (!VMMRZCallRing3IsEnabled((a_pVCpu))) \
    13225             HMVMX_ASSERT_PREEMPT_CPUID(); \
    13226         HMVMX_STOP_EXIT_DISPATCH_PROF(); \
    13227     } while (0)
    13228 
    13229 # define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
    13230     do { \
    13231         HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); \
    13232         Assert((a_pVmxTransient)->fIsNestedGuest); \
    13233     } while (0)
    13234 
    13235 # define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
    13236     do { \
    13237         Log4Func(("\n")); \
    13238     } while (0)
    13239 #else
    13240 # define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
    13241     do { \
    13242         HMVMX_STOP_EXIT_DISPATCH_PROF(); \
    13243         NOREF((a_pVCpu)); NOREF((a_pVmxTransient)); \
    13244     } while (0)
    13245 
    13246 # define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
    13247     do { HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); } while (0)
    13248 
    13249 # define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient)      do { } while (0)
    13250 #endif
    13251 
    13252 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    13253 /** Macro that does the necessary privilege checks and intercepted VM-exits for
    13254  *  guests that attempted to execute a VMX instruction. */
    13255 # define HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(a_pVCpu, a_uExitReason) \
    13256     do \
    13257     { \
    13258         VBOXSTRICTRC rcStrictTmp = hmR0VmxCheckExitDueToVmxInstr((a_pVCpu), (a_uExitReason)); \
    13259         if (rcStrictTmp == VINF_SUCCESS) \
    13260         { /* likely */ } \
    13261         else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
    13262         { \
    13263             Assert((a_pVCpu)->hm.s.Event.fPending); \
    13264             Log4Func(("Privilege checks failed -> %#x\n", VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo))); \
    13265             return VINF_SUCCESS; \
    13266         } \
    13267         else \
    13268         { \
    13269             int rcTmp = VBOXSTRICTRC_VAL(rcStrictTmp); \
    13270             AssertMsgFailedReturn(("Unexpected failure. rc=%Rrc", rcTmp), rcTmp); \
    13271         } \
    13272     } while (0)
    13273 
    13274 /** Macro that decodes a memory operand for an VM-exit caused by an instruction. */
    13275 # define HMVMX_DECODE_MEM_OPERAND(a_pVCpu, a_uExitInstrInfo, a_uExitQual, a_enmMemAccess, a_pGCPtrEffAddr) \
    13276     do \
    13277     { \
    13278         VBOXSTRICTRC rcStrictTmp = hmR0VmxDecodeMemOperand((a_pVCpu), (a_uExitInstrInfo), (a_uExitQual), (a_enmMemAccess), \
    13279                                                            (a_pGCPtrEffAddr)); \
    13280         if (rcStrictTmp == VINF_SUCCESS) \
    13281         { /* likely */ } \
    13282         else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
    13283         { \
    13284             uint8_t const uXcptTmp = VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo); \
    13285             Log4Func(("Memory operand decoding failed, raising xcpt %#x\n", uXcptTmp)); \
    13286             NOREF(uXcptTmp); \
    13287             return VINF_SUCCESS; \
    13288         } \
    13289         else \
    13290         { \
    13291             Log4Func(("hmR0VmxDecodeMemOperand failed. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrictTmp))); \
    13292             return rcStrictTmp; \
    13293         } \
    13294     } while (0)
    13295 #endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
    13296 
    13297 
    13298 /**
    13299  * Advances the guest RIP by the specified number of bytes.
    13300  *
    13301  * @param   pVCpu       The cross context virtual CPU structure.
    13302  * @param   cbInstr     Number of bytes to advance the RIP by.
    13303  *
    13304  * @remarks No-long-jump zone!!!
    13305  */
    13306 DECLINLINE(void) hmR0VmxAdvanceGuestRipBy(PVMCPUCC pVCpu, uint32_t cbInstr)
    13307 {
    13308     /* Advance the RIP. */
    13309     pVCpu->cpum.GstCtx.rip += cbInstr;
    13310     ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP);
    13311 
    13312     /* Update interrupt inhibition. */
    13313     if (   VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
    13314         && pVCpu->cpum.GstCtx.rip != EMGetInhibitInterruptsPC(pVCpu))
    13315         VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
    13316 }
    13317 
    13318 
    13319 /**
    13320  * Advances the guest RIP after reading it from the VMCS.
    13321  *
    13322  * @returns VBox status code, no informational status codes.
    13323  * @param   pVCpu           The cross context virtual CPU structure.
    13324  * @param   pVmxTransient   The VMX-transient structure.
    13325  *
    13326  * @remarks No-long-jump zone!!!
    13327  */
    13328 static int hmR0VmxAdvanceGuestRip(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    13329 {
    13330     hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    13331     int rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
    13332     AssertRCReturn(rc, rc);
    13333 
    13334     hmR0VmxAdvanceGuestRipBy(pVCpu, pVmxTransient->cbExitInstr);
    13335     return VINF_SUCCESS;
    13336 }
    13337 
    13338 
    13339 /**
    13340  * Handle a condition that occurred while delivering an event through the guest or
    13341  * nested-guest IDT.
    13342  *
    13343  * @returns Strict VBox status code (i.e. informational status codes too).
    13344  * @retval  VINF_SUCCESS if we should continue handling the VM-exit.
    13345  * @retval  VINF_HM_DOUBLE_FAULT if a \#DF condition was detected and we ought
    13346  *          to continue execution of the guest which will delivery the \#DF.
    13347  * @retval  VINF_EM_RESET if we detected a triple-fault condition.
    13348  * @retval  VERR_EM_GUEST_CPU_HANG if we detected a guest CPU hang.
    13349  *
    13350  * @param   pVCpu           The cross context virtual CPU structure.
    13351  * @param   pVmxTransient   The VMX-transient structure.
    13352  *
    13353  * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
    13354  *          Additionally, HMVMX_READ_EXIT_QUALIFICATION is required if the VM-exit
    13355  *          is due to an EPT violation, PML full or SPP-related event.
    13356  *
    13357  * @remarks No-long-jump zone!!!
    13358  */
    13359 static VBOXSTRICTRC hmR0VmxCheckExitDueToEventDelivery(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    13360 {
    13361     Assert(!pVCpu->hm.s.Event.fPending);
    13362     HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
    13363     if (   pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
    13364         || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
    13365         || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
    13366         HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_EXIT_QUALIFICATION);
    13367 
    13368     VBOXSTRICTRC   rcStrict       = VINF_SUCCESS;
    13369     PCVMXVMCSINFO  pVmcsInfo      = pVmxTransient->pVmcsInfo;
    13370     uint32_t const uIdtVectorInfo = pVmxTransient->uIdtVectoringInfo;
    13371     uint32_t const uExitIntInfo   = pVmxTransient->uExitIntInfo;
    13372     if (VMX_IDT_VECTORING_INFO_IS_VALID(uIdtVectorInfo))
    13373     {
    13374         uint32_t const uIdtVector     = VMX_IDT_VECTORING_INFO_VECTOR(uIdtVectorInfo);
    13375         uint32_t const uIdtVectorType = VMX_IDT_VECTORING_INFO_TYPE(uIdtVectorInfo);
    13376 
    13377         /*
    13378          * If the event was a software interrupt (generated with INT n) or a software exception
    13379          * (generated by INT3/INTO) or a privileged software exception (generated by INT1), we
    13380          * can handle the VM-exit and continue guest execution which will re-execute the
    13381          * instruction rather than re-injecting the exception, as that can cause premature
    13382          * trips to ring-3 before injection and involve TRPM which currently has no way of
    13383          * storing that these exceptions were caused by these instructions (ICEBP's #DB poses
    13384          * the problem).
    13385          */
    13386         IEMXCPTRAISE     enmRaise;
    13387         IEMXCPTRAISEINFO fRaiseInfo;
    13388         if (   uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
    13389             || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
    13390             || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
    13391         {
    13392             enmRaise   = IEMXCPTRAISE_REEXEC_INSTR;
    13393             fRaiseInfo = IEMXCPTRAISEINFO_NONE;
    13394         }
    13395         else if (VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo))
    13396         {
    13397             uint32_t const uExitVectorType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
    13398             uint8_t const  uExitVector     = VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo);
    13399             Assert(uExitVectorType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT);
    13400 
    13401             uint32_t const fIdtVectorFlags  = hmR0VmxGetIemXcptFlags(uIdtVector, uIdtVectorType);
    13402             uint32_t const fExitVectorFlags = hmR0VmxGetIemXcptFlags(uExitVector, uExitVectorType);
    13403 
    13404             enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fIdtVectorFlags, uIdtVector, fExitVectorFlags, uExitVector, &fRaiseInfo);
    13405 
    13406             /* Determine a vectoring #PF condition, see comment in hmR0VmxExitXcptPF(). */
    13407             if (fRaiseInfo & (IEMXCPTRAISEINFO_EXT_INT_PF | IEMXCPTRAISEINFO_NMI_PF))
    13408             {
    13409                 pVmxTransient->fVectoringPF = true;
    13410                 enmRaise = IEMXCPTRAISE_PREV_EVENT;
    13411             }
    13412         }
    13413         else
    13414         {
    13415             /*
    13416              * If an exception or hardware interrupt delivery caused an EPT violation/misconfig or APIC access
    13417              * VM-exit, then the VM-exit interruption-information will not be valid and we end up here.
    13418              * It is sufficient to reflect the original event to the guest after handling the VM-exit.
    13419              */
    13420             Assert(   uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
    13421                    || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
    13422                    || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT);
    13423             enmRaise   = IEMXCPTRAISE_PREV_EVENT;
    13424             fRaiseInfo = IEMXCPTRAISEINFO_NONE;
    13425         }
    13426 
    13427         /*
    13428          * On CPUs that support Virtual NMIs, if this VM-exit (be it an exception or EPT violation/misconfig
    13429          * etc.) occurred while delivering the NMI, we need to clear the block-by-NMI field in the guest
    13430          * interruptibility-state before re-delivering the NMI after handling the VM-exit. Otherwise the
    13431          * subsequent VM-entry would fail, see @bugref{7445}.
    13432          *
    13433          * See Intel spec. 30.7.1.2 "Resuming Guest Software after Handling an Exception".
    13434          */
    13435         if (   uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
    13436             && enmRaise == IEMXCPTRAISE_PREV_EVENT
    13437             && (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
    13438             && CPUMIsGuestNmiBlocking(pVCpu))
    13439         {
    13440             CPUMSetGuestNmiBlocking(pVCpu, false);
    13441         }
    13442 
    13443         switch (enmRaise)
    13444         {
    13445             case IEMXCPTRAISE_CURRENT_XCPT:
    13446             {
    13447                 Log4Func(("IDT: Pending secondary Xcpt: idtinfo=%#RX64 exitinfo=%#RX64\n", uIdtVectorInfo, uExitIntInfo));
    13448                 Assert(rcStrict == VINF_SUCCESS);
    13449                 break;
    13450             }
    13451 
    13452             case IEMXCPTRAISE_PREV_EVENT:
    13453             {
    13454                 uint32_t u32ErrCode;
    13455                 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(uIdtVectorInfo))
    13456                     u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
    13457                 else
    13458                     u32ErrCode = 0;
    13459 
    13460                 /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF, see hmR0VmxExitXcptPF(). */
    13461                 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectReflect);
    13462                 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(uIdtVectorInfo), 0 /* cbInstr */,
    13463                                        u32ErrCode, pVCpu->cpum.GstCtx.cr2);
    13464 
    13465                 Log4Func(("IDT: Pending vectoring event %#RX64 Err=%#RX32\n", pVCpu->hm.s.Event.u64IntInfo,
    13466                           pVCpu->hm.s.Event.u32ErrCode));
    13467                 Assert(rcStrict == VINF_SUCCESS);
    13468                 break;
    13469             }
    13470 
    13471             case IEMXCPTRAISE_REEXEC_INSTR:
    13472                 Assert(rcStrict == VINF_SUCCESS);
    13473                 break;
    13474 
    13475             case IEMXCPTRAISE_DOUBLE_FAULT:
    13476             {
    13477                 /*
    13478                  * Determing a vectoring double #PF condition. Used later, when PGM evaluates the
    13479                  * second #PF as a guest #PF (and not a shadow #PF) and needs to be converted into a #DF.
    13480                  */
    13481                 if (fRaiseInfo & IEMXCPTRAISEINFO_PF_PF)
    13482                 {
    13483                     pVmxTransient->fVectoringDoublePF = true;
    13484                     Log4Func(("IDT: Vectoring double #PF %#RX64 cr2=%#RX64\n", pVCpu->hm.s.Event.u64IntInfo,
    13485                           pVCpu->cpum.GstCtx.cr2));
    13486                     rcStrict = VINF_SUCCESS;
    13487                 }
    13488                 else
    13489                 {
    13490                     STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectConvertDF);
    13491                     hmR0VmxSetPendingXcptDF(pVCpu);
    13492                     Log4Func(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", pVCpu->hm.s.Event.u64IntInfo,
    13493                               uIdtVector, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
    13494                     rcStrict = VINF_HM_DOUBLE_FAULT;
    13495                 }
    13496                 break;
    13497             }
    13498 
    13499             case IEMXCPTRAISE_TRIPLE_FAULT:
    13500             {
    13501                 Log4Func(("IDT: Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", uIdtVector,
    13502                           VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
    13503                 rcStrict = VINF_EM_RESET;
    13504                 break;
    13505             }
    13506 
    13507             case IEMXCPTRAISE_CPU_HANG:
    13508             {
    13509                 Log4Func(("IDT: Bad guest! Entering CPU hang. fRaiseInfo=%#x\n", fRaiseInfo));
    13510                 rcStrict = VERR_EM_GUEST_CPU_HANG;
    13511                 break;
    13512             }
    13513 
    13514             default:
    13515             {
    13516                 AssertMsgFailed(("IDT: vcpu[%RU32] Unexpected/invalid value! enmRaise=%#x\n", pVCpu->idCpu, enmRaise));
    13517                 rcStrict = VERR_VMX_IPE_2;
    13518                 break;
    13519             }
    13520         }
    13521     }
    13522     else if (   (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
    13523              && !CPUMIsGuestNmiBlocking(pVCpu))
    13524     {
    13525         if (    VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo)
    13526              && VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo) != X86_XCPT_DF
    13527              && VMX_EXIT_INT_INFO_IS_NMI_UNBLOCK_IRET(uExitIntInfo))
    13528         {
    13529             /*
    13530              * Execution of IRET caused a fault when NMI blocking was in effect (i.e we're in
    13531              * the guest or nested-guest NMI handler). We need to set the block-by-NMI field so
    13532              * that virtual NMIs remain blocked until the IRET execution is completed.
    13533              *
    13534              * See Intel spec. 31.7.1.2 "Resuming Guest Software After Handling An Exception".
    13535              */
    13536             CPUMSetGuestNmiBlocking(pVCpu, true);
    13537             Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
    13538         }
    13539         else if (   pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
    13540                  || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
    13541                  || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
    13542         {
    13543             /*
    13544              * Execution of IRET caused an EPT violation, page-modification log-full event or
    13545              * SPP-related event VM-exit when NMI blocking was in effect (i.e. we're in the
    13546              * guest or nested-guest NMI handler). We need to set the block-by-NMI field so
    13547              * that virtual NMIs remain blocked until the IRET execution is completed.
    13548              *
    13549              * See Intel spec. 27.2.3 "Information about NMI unblocking due to IRET"
    13550              */
    13551             if (VMX_EXIT_QUAL_EPT_IS_NMI_UNBLOCK_IRET(pVmxTransient->uExitQual))
    13552             {
    13553                 CPUMSetGuestNmiBlocking(pVCpu, true);
    13554                 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
    13555             }
    13556         }
    13557     }
    13558 
    13559     Assert(   rcStrict == VINF_SUCCESS  || rcStrict == VINF_HM_DOUBLE_FAULT
    13560            || rcStrict == VINF_EM_RESET || rcStrict == VERR_EM_GUEST_CPU_HANG);
    13561     return rcStrict;
    13562 }
    13563 
    13564 
    13565 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    13566 /**
    13567  * Perform the relevant VMX instruction checks for VM-exits that occurred due to the
    13568  * guest attempting to execute a VMX instruction.
    13569  *
    13570  * @returns Strict VBox status code (i.e. informational status codes too).
    13571  * @retval  VINF_SUCCESS if we should continue handling the VM-exit.
    13572  * @retval  VINF_HM_PENDING_XCPT if an exception was raised.
    13573  *
    13574  * @param   pVCpu           The cross context virtual CPU structure.
    13575  * @param   uExitReason     The VM-exit reason.
    13576  *
    13577  * @todo    NSTVMX: Document other error codes when VM-exit is implemented.
    13578  * @remarks No-long-jump zone!!!
    13579  */
    13580 static VBOXSTRICTRC hmR0VmxCheckExitDueToVmxInstr(PVMCPUCC pVCpu, uint32_t uExitReason)
    13581 {
    13582     HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS
    13583                               | CPUMCTX_EXTRN_CS  | CPUMCTX_EXTRN_EFER);
    13584 
    13585     /*
    13586      * The physical CPU would have already checked the CPU mode/code segment.
    13587      * We shall just assert here for paranoia.
    13588      * See Intel spec. 25.1.1 "Relative Priority of Faults and VM Exits".
    13589      */
    13590     Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
    13591     Assert(   !CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx)
    13592            ||  CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx));
    13593 
    13594     if (uExitReason == VMX_EXIT_VMXON)
    13595     {
    13596         HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
    13597 
    13598         /*
    13599          * We check CR4.VMXE because it is required to be always set while in VMX operation
    13600          * by physical CPUs and our CR4 read-shadow is only consulted when executing specific
    13601          * instructions (CLTS, LMSW, MOV CR, and SMSW) and thus doesn't affect CPU operation
    13602          * otherwise (i.e. physical CPU won't automatically #UD if Cr4Shadow.VMXE is 0).
    13603          */
    13604         if (!CPUMIsGuestVmxEnabled(&pVCpu->cpum.GstCtx))
    13605         {
    13606             Log4Func(("CR4.VMXE is not set -> #UD\n"));
    13607             hmR0VmxSetPendingXcptUD(pVCpu);
    13608             return VINF_HM_PENDING_XCPT;
    13609         }
    13610     }
    13611     else if (!CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx))
    13612     {
    13613         /*
    13614          * The guest has not entered VMX operation but attempted to execute a VMX instruction
    13615          * (other than VMXON), we need to raise a #UD.
    13616          */
    13617         Log4Func(("Not in VMX root mode -> #UD\n"));
    13618         hmR0VmxSetPendingXcptUD(pVCpu);
    13619         return VINF_HM_PENDING_XCPT;
    13620     }
    13621 
    13622     /* All other checks (including VM-exit intercepts) are handled by IEM instruction emulation. */
    13623     return VINF_SUCCESS;
    13624 }
    13625 
    13626 
    13627 /**
    13628  * Decodes the memory operand of an instruction that caused a VM-exit.
    13629  *
    13630  * The Exit qualification field provides the displacement field for memory
    13631  * operand instructions, if any.
    13632  *
    13633  * @returns Strict VBox status code (i.e. informational status codes too).
    13634  * @retval  VINF_SUCCESS if the operand was successfully decoded.
    13635  * @retval  VINF_HM_PENDING_XCPT if an exception was raised while decoding the
    13636  *          operand.
    13637  * @param   pVCpu           The cross context virtual CPU structure.
    13638  * @param   uExitInstrInfo  The VM-exit instruction information field.
    13639  * @param   enmMemAccess    The memory operand's access type (read or write).
    13640  * @param   GCPtrDisp       The instruction displacement field, if any. For
    13641  *                          RIP-relative addressing pass RIP + displacement here.
    13642  * @param   pGCPtrMem       Where to store the effective destination memory address.
    13643  *
    13644  * @remarks Warning! This function ASSUMES the instruction cannot be used in real or
    13645  *          virtual-8086 mode hence skips those checks while verifying if the
    13646  *          segment is valid.
    13647  */
    13648 static VBOXSTRICTRC hmR0VmxDecodeMemOperand(PVMCPUCC pVCpu, uint32_t uExitInstrInfo, RTGCPTR GCPtrDisp, VMXMEMACCESS enmMemAccess,
    13649                                             PRTGCPTR pGCPtrMem)
    13650 {
    13651     Assert(pGCPtrMem);
    13652     Assert(!CPUMIsGuestInRealOrV86Mode(pVCpu));
    13653     HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER
    13654                               | CPUMCTX_EXTRN_CR0);
    13655 
    13656     static uint64_t const s_auAddrSizeMasks[]   = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
    13657     static uint64_t const s_auAccessSizeMasks[] = { sizeof(uint16_t), sizeof(uint32_t), sizeof(uint64_t) };
    13658     AssertCompile(RT_ELEMENTS(s_auAccessSizeMasks) == RT_ELEMENTS(s_auAddrSizeMasks));
    13659 
    13660     VMXEXITINSTRINFO ExitInstrInfo;
    13661     ExitInstrInfo.u = uExitInstrInfo;
    13662     uint8_t const   uAddrSize     =  ExitInstrInfo.All.u3AddrSize;
    13663     uint8_t const   iSegReg       =  ExitInstrInfo.All.iSegReg;
    13664     bool const      fIdxRegValid  = !ExitInstrInfo.All.fIdxRegInvalid;
    13665     uint8_t const   iIdxReg       =  ExitInstrInfo.All.iIdxReg;
    13666     uint8_t const   uScale        =  ExitInstrInfo.All.u2Scaling;
    13667     bool const      fBaseRegValid = !ExitInstrInfo.All.fBaseRegInvalid;
    13668     uint8_t const   iBaseReg      =  ExitInstrInfo.All.iBaseReg;
    13669     bool const      fIsMemOperand = !ExitInstrInfo.All.fIsRegOperand;
    13670     bool const      fIsLongMode   =  CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx);
    13671 
    13672     /*
    13673      * Validate instruction information.
    13674      * This shouldn't happen on real hardware but useful while testing our nested hardware-virtualization code.
    13675      */
    13676     AssertLogRelMsgReturn(uAddrSize < RT_ELEMENTS(s_auAddrSizeMasks),
    13677                           ("Invalid address size. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_1);
    13678     AssertLogRelMsgReturn(iSegReg  < X86_SREG_COUNT,
    13679                           ("Invalid segment register. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_2);
    13680     AssertLogRelMsgReturn(fIsMemOperand,
    13681                           ("Expected memory operand. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_3);
    13682 
    13683     /*
    13684      * Compute the complete effective address.
    13685      *
    13686      * See AMD instruction spec. 1.4.2 "SIB Byte Format"
    13687      * See AMD spec. 4.5.2 "Segment Registers".
    13688      */
    13689     RTGCPTR GCPtrMem = GCPtrDisp;
    13690     if (fBaseRegValid)
    13691         GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iBaseReg].u64;
    13692     if (fIdxRegValid)
    13693         GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iIdxReg].u64 << uScale;
    13694 
    13695     RTGCPTR const GCPtrOff = GCPtrMem;
    13696     if (   !fIsLongMode
    13697         || iSegReg >= X86_SREG_FS)
    13698         GCPtrMem += pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
    13699     GCPtrMem &= s_auAddrSizeMasks[uAddrSize];
    13700 
    13701     /*
    13702      * Validate effective address.
    13703      * See AMD spec. 4.5.3 "Segment Registers in 64-Bit Mode".
    13704      */
    13705     uint8_t const cbAccess = s_auAccessSizeMasks[uAddrSize];
    13706     Assert(cbAccess > 0);
    13707     if (fIsLongMode)
    13708     {
    13709         if (X86_IS_CANONICAL(GCPtrMem))
    13710         {
    13711             *pGCPtrMem = GCPtrMem;
    13712             return VINF_SUCCESS;
    13713         }
    13714 
    13715         /** @todo r=ramshankar: We should probably raise \#SS or \#GP. See AMD spec. 4.12.2
    13716          *        "Data Limit Checks in 64-bit Mode". */
    13717         Log4Func(("Long mode effective address is not canonical GCPtrMem=%#RX64\n", GCPtrMem));
    13718         hmR0VmxSetPendingXcptGP(pVCpu, 0);
    13719         return VINF_HM_PENDING_XCPT;
    13720     }
    13721 
    13722     /*
    13723      * This is a watered down version of iemMemApplySegment().
    13724      * Parts that are not applicable for VMX instructions like real-or-v8086 mode
    13725      * and segment CPL/DPL checks are skipped.
    13726      */
    13727     RTGCPTR32 const GCPtrFirst32 = (RTGCPTR32)GCPtrOff;
    13728     RTGCPTR32 const GCPtrLast32  = GCPtrFirst32 + cbAccess - 1;
    13729     PCCPUMSELREG    pSel         = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
    13730 
    13731     /* Check if the segment is present and usable. */
    13732     if (    pSel->Attr.n.u1Present
    13733         && !pSel->Attr.n.u1Unusable)
    13734     {
    13735         Assert(pSel->Attr.n.u1DescType);
    13736         if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
    13737         {
    13738             /* Check permissions for the data segment. */
    13739             if (   enmMemAccess == VMXMEMACCESS_WRITE
    13740                 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE))
    13741             {
    13742                 Log4Func(("Data segment access invalid. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
    13743                 hmR0VmxSetPendingXcptGP(pVCpu, iSegReg);
    13744                 return VINF_HM_PENDING_XCPT;
    13745             }
    13746 
    13747             /* Check limits if it's a normal data segment. */
    13748             if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
    13749             {
    13750                 if (   GCPtrFirst32 > pSel->u32Limit
    13751                     || GCPtrLast32  > pSel->u32Limit)
    13752                 {
    13753                     Log4Func(("Data segment limit exceeded. "
    13754                               "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
    13755                               GCPtrLast32, pSel->u32Limit));
    13756                     if (iSegReg == X86_SREG_SS)
    13757                         hmR0VmxSetPendingXcptSS(pVCpu, 0);
    13758                     else
    13759                         hmR0VmxSetPendingXcptGP(pVCpu, 0);
    13760                     return VINF_HM_PENDING_XCPT;
    13761                 }
    13762             }
    13763             else
    13764             {
    13765                /* Check limits if it's an expand-down data segment.
    13766                   Note! The upper boundary is defined by the B bit, not the G bit! */
    13767                if (   GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
    13768                    || GCPtrLast32  > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
    13769                {
    13770                    Log4Func(("Expand-down data segment limit exceeded. "
    13771                              "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
    13772                              GCPtrLast32, pSel->u32Limit));
    13773                    if (iSegReg == X86_SREG_SS)
    13774                        hmR0VmxSetPendingXcptSS(pVCpu, 0);
    13775                    else
    13776                        hmR0VmxSetPendingXcptGP(pVCpu, 0);
    13777                    return VINF_HM_PENDING_XCPT;
    13778                }
    13779             }
    13780         }
    13781         else
    13782         {
    13783             /* Check permissions for the code segment. */
    13784             if (   enmMemAccess == VMXMEMACCESS_WRITE
    13785                 || (   enmMemAccess == VMXMEMACCESS_READ
    13786                     && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)))
    13787             {
    13788                 Log4Func(("Code segment access invalid. Attr=%#RX32\n", pSel->Attr.u));
    13789                 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
    13790                 hmR0VmxSetPendingXcptGP(pVCpu, 0);
    13791                 return VINF_HM_PENDING_XCPT;
    13792             }
    13793 
    13794             /* Check limits for the code segment (normal/expand-down not applicable for code segments). */
    13795             if (   GCPtrFirst32 > pSel->u32Limit
    13796                 || GCPtrLast32  > pSel->u32Limit)
    13797             {
    13798                 Log4Func(("Code segment limit exceeded. GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n",
    13799                           GCPtrFirst32, GCPtrLast32, pSel->u32Limit));
    13800                 if (iSegReg == X86_SREG_SS)
    13801                     hmR0VmxSetPendingXcptSS(pVCpu, 0);
    13802                 else
    13803                     hmR0VmxSetPendingXcptGP(pVCpu, 0);
    13804                 return VINF_HM_PENDING_XCPT;
    13805             }
    13806         }
    13807     }
    13808     else
    13809     {
    13810         Log4Func(("Not present or unusable segment. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
    13811         hmR0VmxSetPendingXcptGP(pVCpu, 0);
    13812         return VINF_HM_PENDING_XCPT;
    13813     }
    13814 
    13815     *pGCPtrMem = GCPtrMem;
    13816     return VINF_SUCCESS;
    13817 }
    13818 #endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
    13819 
    13820 
    13821 /**
    13822  * VM-exit helper for LMSW.
    13823  */
    13824 static VBOXSTRICTRC hmR0VmxExitLmsw(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint16_t uMsw, RTGCPTR GCPtrEffDst)
    13825 {
    13826     int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
    13827     AssertRCReturn(rc, rc);
    13828 
    13829     VBOXSTRICTRC rcStrict = IEMExecDecodedLmsw(pVCpu, cbInstr, uMsw, GCPtrEffDst);
    13830     AssertMsg(   rcStrict == VINF_SUCCESS
    13831               || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
    13832 
    13833     ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
    13834     if (rcStrict == VINF_IEM_RAISED_XCPT)
    13835     {
    13836         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
    13837         rcStrict = VINF_SUCCESS;
    13838     }
    13839 
    13840     STAM_COUNTER_INC(&pVCpu->hm.s.StatExitLmsw);
    13841     Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
    13842     return rcStrict;
    13843 }
    13844 
    13845 
    13846 /**
    13847  * VM-exit helper for CLTS.
    13848  */
    13849 static VBOXSTRICTRC hmR0VmxExitClts(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr)
    13850 {
    13851     int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
    13852     AssertRCReturn(rc, rc);
    13853 
    13854     VBOXSTRICTRC rcStrict = IEMExecDecodedClts(pVCpu, cbInstr);
    13855     AssertMsg(   rcStrict == VINF_SUCCESS
    13856               || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
    13857 
    13858     ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
    13859     if (rcStrict == VINF_IEM_RAISED_XCPT)
    13860     {
    13861         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
    13862         rcStrict = VINF_SUCCESS;
    13863     }
    13864 
    13865     STAM_COUNTER_INC(&pVCpu->hm.s.StatExitClts);
    13866     Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
    13867     return rcStrict;
    13868 }
    13869 
    13870 
    13871 /**
    13872  * VM-exit helper for MOV from CRx (CRx read).
    13873  */
    13874 static VBOXSTRICTRC hmR0VmxExitMovFromCrX(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
    13875 {
    13876     Assert(iCrReg < 16);
    13877     Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
    13878 
    13879     int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
    13880     AssertRCReturn(rc, rc);
    13881 
    13882     VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxRead(pVCpu, cbInstr, iGReg, iCrReg);
    13883     AssertMsg(   rcStrict == VINF_SUCCESS
    13884               || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
    13885 
    13886     if (iGReg == X86_GREG_xSP)
    13887         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_RSP);
    13888     else
    13889         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
    13890 #ifdef VBOX_WITH_STATISTICS
    13891     switch (iCrReg)
    13892     {
    13893         case 0: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR0Read); break;
    13894         case 2: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR2Read); break;
    13895         case 3: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR3Read); break;
    13896         case 4: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR4Read); break;
    13897         case 8: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR8Read); break;
    13898     }
    13899 #endif
    13900     Log4Func(("CR%d Read access rcStrict=%Rrc\n", iCrReg, VBOXSTRICTRC_VAL(rcStrict)));
    13901     return rcStrict;
    13902 }
    13903 
    13904 
    13905 /**
    13906  * VM-exit helper for MOV to CRx (CRx write).
    13907  */
    13908 static VBOXSTRICTRC hmR0VmxExitMovToCrX(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
    13909 {
    13910     HMVMX_CPUMCTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
    13911 
    13912     VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, cbInstr, iCrReg, iGReg);
    13913     AssertMsg(   rcStrict == VINF_SUCCESS
    13914               || rcStrict == VINF_IEM_RAISED_XCPT
    13915               || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
    13916 
    13917     switch (iCrReg)
    13918     {
    13919         case 0:
    13920             ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0
    13921                                                      | HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
    13922             STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR0Write);
    13923             Log4Func(("CR0 write. rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr0));
    13924             break;
    13925 
    13926         case 2:
    13927             STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR2Write);
    13928             /* Nothing to do here, CR2 it's not part of the VMCS. */
    13929             break;
    13930 
    13931         case 3:
    13932             ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR3);
    13933             STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR3Write);
    13934             Log4Func(("CR3 write. rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr3));
    13935             break;
    13936 
    13937         case 4:
    13938             ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR4);
    13939             STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR4Write);
    13940             Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict),
    13941                       pVCpu->cpum.GstCtx.cr4, pVCpu->hmr0.s.fLoadSaveGuestXcr0));
    13942             break;
    13943 
    13944         case 8:
    13945             ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged,
    13946                              HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_APIC_TPR);
    13947             STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR8Write);
    13948             break;
    13949 
    13950         default:
    13951             AssertMsgFailed(("Invalid CRx register %#x\n", iCrReg));
    13952             break;
    13953     }
    13954 
    13955     if (rcStrict == VINF_IEM_RAISED_XCPT)
    13956     {
    13957         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
    13958         rcStrict = VINF_SUCCESS;
    13959     }
    13960     return rcStrict;
    13961 }
    13962 
    13963 
    13964 /**
    13965  * VM-exit exception handler for \#PF (Page-fault exception).
    13966  *
    13967  * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
    13968  */
    13969 static VBOXSTRICTRC hmR0VmxExitXcptPF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    13970 {
    13971     HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    13972     PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    13973     hmR0VmxReadExitQualVmcs(pVmxTransient);
    13974 
    13975     if (!pVM->hmr0.s.fNestedPaging)
    13976     { /* likely */ }
    13977     else
    13978     {
    13979 #if !defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) && !defined(HMVMX_ALWAYS_TRAP_PF)
    13980         Assert(pVmxTransient->fIsNestedGuest || pVCpu->hmr0.s.fUsingDebugLoop);
    13981 #endif
    13982         pVCpu->hm.s.Event.fPending = false;                  /* In case it's a contributory or vectoring #PF. */
    13983         if (!pVmxTransient->fVectoringDoublePF)
    13984         {
    13985             hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
    13986                                    pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual);
    13987         }
    13988         else
    13989         {
    13990             /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
    13991             Assert(!pVmxTransient->fIsNestedGuest);
    13992             hmR0VmxSetPendingXcptDF(pVCpu);
    13993             Log4Func(("Pending #DF due to vectoring #PF w/ NestedPaging\n"));
    13994         }
    13995         STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
    13996         return VINF_SUCCESS;
    13997     }
    13998 
    13999     Assert(!pVmxTransient->fIsNestedGuest);
    14000 
    14001     /* If it's a vectoring #PF, emulate injecting the original event injection as PGMTrap0eHandler() is incapable
    14002        of differentiating between instruction emulation and event injection that caused a #PF. See @bugref{6607}. */
    14003     if (pVmxTransient->fVectoringPF)
    14004     {
    14005         Assert(pVCpu->hm.s.Event.fPending);
    14006         return VINF_EM_RAW_INJECT_TRPM_EVENT;
    14007     }
    14008 
    14009     PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    14010     int rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
    14011     AssertRCReturn(rc, rc);
    14012 
    14013     Log4Func(("#PF: cs:rip=%#04x:%#RX64 err_code=%#RX32 exit_qual=%#RX64 cr3=%#RX64\n", pCtx->cs.Sel, pCtx->rip,
    14014               pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual, pCtx->cr3));
    14015 
    14016     TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQual, (RTGCUINT)pVmxTransient->uExitIntErrorCode);
    14017     rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, CPUMCTX2CORE(pCtx), (RTGCPTR)pVmxTransient->uExitQual);
    14018 
    14019     Log4Func(("#PF: rc=%Rrc\n", rc));
    14020     if (rc == VINF_SUCCESS)
    14021     {
    14022         /*
    14023          * This is typically a shadow page table sync or a MMIO instruction. But we may have
    14024          * emulated something like LTR or a far jump. Any part of the CPU context may have changed.
    14025          */
    14026         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
    14027         TRPMResetTrap(pVCpu);
    14028         STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF);
    14029         return rc;
    14030     }
    14031 
    14032     if (rc == VINF_EM_RAW_GUEST_TRAP)
    14033     {
    14034         if (!pVmxTransient->fVectoringDoublePF)
    14035         {
    14036             /* It's a guest page fault and needs to be reflected to the guest. */
    14037             uint32_t const uGstErrorCode = TRPMGetErrorCode(pVCpu);
    14038             TRPMResetTrap(pVCpu);
    14039             pVCpu->hm.s.Event.fPending = false;                 /* In case it's a contributory #PF. */
    14040             hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
    14041                                    uGstErrorCode, pVmxTransient->uExitQual);
    14042         }
    14043         else
    14044         {
    14045             /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
    14046             TRPMResetTrap(pVCpu);
    14047             pVCpu->hm.s.Event.fPending = false;     /* Clear pending #PF to replace it with #DF. */
    14048             hmR0VmxSetPendingXcptDF(pVCpu);
    14049             Log4Func(("#PF: Pending #DF due to vectoring #PF\n"));
    14050         }
    14051 
    14052         STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
    14053         return VINF_SUCCESS;
    14054     }
    14055 
    14056     TRPMResetTrap(pVCpu);
    14057     STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPFEM);
    14058     return rc;
    14059 }
    14060 
    14061 
    14062 /**
    14063  * VM-exit exception handler for \#MF (Math Fault: floating point exception).
    14064  *
    14065  * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
    14066  */
    14067 static VBOXSTRICTRC hmR0VmxExitXcptMF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    14068 {
    14069     HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    14070     STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestMF);
    14071 
    14072     int rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR0);
    14073     AssertRCReturn(rc, rc);
    14074 
    14075     if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE))
    14076     {
    14077         /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
    14078         rc = PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13, 1, 0 /* uTagSrc */);
    14079 
    14080         /** @todo r=ramshankar: The Intel spec. does -not- specify that this VM-exit
    14081          *        provides VM-exit instruction length. If this causes problem later,
    14082          *        disassemble the instruction like it's done on AMD-V. */
    14083         int rc2 = hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient);
    14084         AssertRCReturn(rc2, rc2);
    14085         return rc;
    14086     }
    14087 
    14088     hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbExitInstr,
    14089                            pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
    14090     return VINF_SUCCESS;
    14091 }
    14092 
    14093 
    14094 /**
    14095  * VM-exit exception handler for \#BP (Breakpoint exception).
    14096  *
    14097  * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
    14098  */
    14099 static VBOXSTRICTRC hmR0VmxExitXcptBP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    14100 {
    14101     HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    14102     STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestBP);
    14103 
    14104     int rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
    14105     AssertRCReturn(rc, rc);
    14106 
    14107     VBOXSTRICTRC rcStrict;
    14108     if (!pVmxTransient->fIsNestedGuest)
    14109         rcStrict = DBGFTrap03Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx));
    14110     else
    14111         rcStrict = VINF_EM_RAW_GUEST_TRAP;
    14112 
    14113     if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
    14114     {
    14115         hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
    14116                                pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
    14117         rcStrict = VINF_SUCCESS;
    14118     }
    14119 
    14120     Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_DBG_BREAKPOINT);
    14121     return rcStrict;
    14122 }
    14123 
    14124 
    14125 /**
    14126  * VM-exit exception handler for \#AC (Alignment-check exception).
    14127  *
    14128  * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
    14129  */
    14130 static VBOXSTRICTRC hmR0VmxExitXcptAC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    14131 {
    14132     HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    14133 
    14134     /*
    14135      * Detect #ACs caused by host having enabled split-lock detection.
    14136      * Emulate such instructions.
    14137      */
    14138     int rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo,
    14139                                      CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS);
    14140     AssertRCReturn(rc, rc);
    14141     /** @todo detect split lock in cpu feature?   */
    14142     if (   /* 1. If 486-style alignment checks aren't enabled, then this must be a split-lock exception */
    14143            !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
    14144            /* 2. #AC cannot happen in rings 0-2 except for split-lock detection. */
    14145         || CPUMGetGuestCPL(pVCpu) != 3
    14146            /* 3. When the EFLAGS.AC != 0 this can only be a split-lock case. */
    14147         || !(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_AC) )
    14148     {
    14149         /*
    14150          * Check for debug/trace events and import state accordingly.
    14151          */
    14152         STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatExitGuestACSplitLock);
    14153         PVMCC pVM = pVCpu->pVMR0;
    14154         if (   !DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK)
    14155             && !VBOXVMM_VMX_SPLIT_LOCK_ENABLED())
    14156         {
    14157             if (pVM->cCpus == 1)
    14158             {
    14159 #if 0 /** @todo r=bird: This is potentially wrong.  Might have to just do a whole state sync above and mark everything changed to be safe... */
    14160                 rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
    14161 #else
    14162                 rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
    14163 #endif
    14164                 AssertRCReturn(rc, rc);
    14165             }
    14166         }
    14167         else
    14168         {
    14169             rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
    14170             AssertRCReturn(rc, rc);
    14171 
    14172             VBOXVMM_XCPT_DF(pVCpu, &pVCpu->cpum.GstCtx);
    14173 
    14174             if (DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK))
    14175             {
    14176                 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, DBGFEVENT_VMX_SPLIT_LOCK, DBGFEVENTCTX_HM, 0);
    14177                 if (rcStrict != VINF_SUCCESS)
    14178                     return rcStrict;
    14179             }
    14180         }
    14181 
    14182         /*
    14183          * Emulate the instruction.
    14184          *
    14185          * We have to ignore the LOCK prefix here as we must not retrigger the
    14186          * detection on the host.  This isn't all that satisfactory, though...
    14187          */
    14188         if (pVM->cCpus == 1)
    14189         {
    14190             Log8Func(("cs:rip=%#04x:%#RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC\n", pVCpu->cpum.GstCtx.cs.Sel,
    14191                       pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
    14192 
    14193             /** @todo For SMP configs we should do a rendezvous here. */
    14194             VBOXSTRICTRC rcStrict = IEMExecOneIgnoreLock(pVCpu);
    14195             if (rcStrict == VINF_SUCCESS)
    14196 #if 0 /** @todo r=bird: This is potentially wrong.  Might have to just do a whole state sync above and mark everything changed to be safe... */
    14197                 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged,
    14198                                    HM_CHANGED_GUEST_RIP
    14199                                  | HM_CHANGED_GUEST_RFLAGS
    14200                                  | HM_CHANGED_GUEST_GPRS_MASK
    14201                                  | HM_CHANGED_GUEST_CS
    14202                                  | HM_CHANGED_GUEST_SS);
    14203 #else
    14204                 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
    14205 #endif
    14206             else if (rcStrict == VINF_IEM_RAISED_XCPT)
    14207             {
    14208                 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
    14209                 rcStrict = VINF_SUCCESS;
    14210             }
    14211             return rcStrict;
    14212         }
    14213         Log8Func(("cs:rip=%#04x:%#RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC -> VINF_EM_EMULATE_SPLIT_LOCK\n",
    14214                   pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
    14215         return VINF_EM_EMULATE_SPLIT_LOCK;
    14216     }
    14217 
    14218     STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatExitGuestAC);
    14219     Log8Func(("cs:rip=%#04x:%#RX64 rflags=%#RX64 cr0=%#RX64 cpl=%d -> #AC\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
    14220               pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0, CPUMGetGuestCPL(pVCpu) ));
    14221 
    14222     /* Re-inject it. We'll detect any nesting before getting here. */
    14223     hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
    14224                            pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
    14225     return VINF_SUCCESS;
    14226 }
    14227 
    14228 
    14229 /**
    14230  * VM-exit exception handler for \#DB (Debug exception).
    14231  *
    14232  * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
    14233  */
    14234 static VBOXSTRICTRC hmR0VmxExitXcptDB(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    14235 {
    14236     HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    14237     STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB);
    14238 
    14239     /*
    14240      * Get the DR6-like values from the Exit qualification and pass it to DBGF for processing.
    14241      */
    14242     hmR0VmxReadExitQualVmcs(pVmxTransient);
    14243 
    14244     /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
    14245     uint64_t const uDR6 = X86_DR6_INIT_VAL
    14246                         | (pVmxTransient->uExitQual & (  X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3
    14247                                                        | X86_DR6_BD | X86_DR6_BS));
    14248 
    14249     int rc;
    14250     PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    14251     if (!pVmxTransient->fIsNestedGuest)
    14252     {
    14253         rc = DBGFTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx), uDR6, pVCpu->hm.s.fSingleInstruction);
    14254 
    14255         /*
    14256          * Prevents stepping twice over the same instruction when the guest is stepping using
    14257          * EFLAGS.TF and the hypervisor debugger is stepping using MTF.
    14258          * Testcase: DOSQEMM, break (using "ba x 1") at cs:rip 0x70:0x774 and step (using "t").
    14259          */
    14260         if (   rc == VINF_EM_DBG_STEPPED
    14261             && (pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_MONITOR_TRAP_FLAG))
    14262         {
    14263             Assert(pVCpu->hm.s.fSingleInstruction);
    14264             rc = VINF_EM_RAW_GUEST_TRAP;
    14265         }
    14266     }
    14267     else
    14268         rc = VINF_EM_RAW_GUEST_TRAP;
    14269     Log6Func(("rc=%Rrc\n", rc));
    14270     if (rc == VINF_EM_RAW_GUEST_TRAP)
    14271     {
    14272         /*
    14273          * The exception was for the guest.  Update DR6, DR7.GD and
    14274          * IA32_DEBUGCTL.LBR before forwarding it.
    14275          * See Intel spec. 27.1 "Architectural State before a VM-Exit".
    14276          */
    14277         VMMRZCallRing3Disable(pVCpu);
    14278         HM_DISABLE_PREEMPT(pVCpu);
    14279 
    14280         pCtx->dr[6] &= ~X86_DR6_B_MASK;
    14281         pCtx->dr[6] |= uDR6;
    14282         if (CPUMIsGuestDebugStateActive(pVCpu))
    14283             ASMSetDR6(pCtx->dr[6]);
    14284 
    14285         HM_RESTORE_PREEMPT();
    14286         VMMRZCallRing3Enable(pVCpu);
    14287 
    14288         rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_DR7);
    14289         AssertRCReturn(rc, rc);
    14290 
    14291         /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
    14292         pCtx->dr[7] &= ~(uint64_t)X86_DR7_GD;
    14293 
    14294         /* Paranoia. */
    14295         pCtx->dr[7] &= ~(uint64_t)X86_DR7_RAZ_MASK;
    14296         pCtx->dr[7] |= X86_DR7_RA1_MASK;
    14297 
    14298         rc = VMXWriteVmcsNw(VMX_VMCS_GUEST_DR7, pCtx->dr[7]);
    14299         AssertRC(rc);
    14300 
    14301         /*
    14302          * Raise #DB in the guest.
    14303          *
    14304          * It is important to reflect exactly what the VM-exit gave us (preserving the
    14305          * interruption-type) rather than use hmR0VmxSetPendingXcptDB() as the #DB could've
    14306          * been raised while executing ICEBP (INT1) and not the regular #DB. Thus it may
    14307          * trigger different handling in the CPU (like skipping DPL checks), see @bugref{6398}.
    14308          *
    14309          * Intel re-documented ICEBP/INT1 on May 2018 previously documented as part of
    14310          * Intel 386, see Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
    14311          */
    14312         hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
    14313                                pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
    14314         return VINF_SUCCESS;
    14315     }
    14316 
    14317     /*
    14318      * Not a guest trap, must be a hypervisor related debug event then.
    14319      * Update DR6 in case someone is interested in it.
    14320      */
    14321     AssertMsg(rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_DBG_BREAKPOINT, ("%Rrc\n", rc));
    14322     AssertReturn(pVmxTransient->fWasHyperDebugStateActive, VERR_HM_IPE_5);
    14323     CPUMSetHyperDR6(pVCpu, uDR6);
    14324 
    14325     return rc;
    14326 }
    14327 
    14328 
    14329 /**
    14330  * Hacks its way around the lovely mesa driver's backdoor accesses.
    14331  *
    14332  * @sa hmR0SvmHandleMesaDrvGp.
    14333  */
    14334 static int hmR0VmxHandleMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
    14335 {
    14336     LogFunc(("cs:rip=%#04x:%#RX64 rcx=%#RX64 rbx=%#RX64\n", pCtx->cs.Sel, pCtx->rip, pCtx->rcx, pCtx->rbx));
    14337     RT_NOREF(pCtx);
    14338 
    14339     /* For now we'll just skip the instruction. */
    14340     return hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient);
    14341 }
    14342 
    14343 
    14344 /**
    14345  * Checks if the \#GP'ing instruction is the mesa driver doing it's lovely
    14346  * backdoor logging w/o checking what it is running inside.
    14347  *
    14348  * This recognizes an "IN EAX,DX" instruction executed in flat ring-3, with the
    14349  * backdoor port and magic numbers loaded in registers.
    14350  *
    14351  * @returns true if it is, false if it isn't.
    14352  * @sa      hmR0SvmIsMesaDrvGp.
    14353  */
    14354 DECLINLINE(bool) hmR0VmxIsMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
    14355 {
    14356     /* 0xed:  IN eAX,dx */
    14357     uint8_t abInstr[1];
    14358     if (pVmxTransient->cbExitInstr != sizeof(abInstr))
    14359         return false;
    14360 
    14361     /* Check that it is #GP(0). */
    14362     if (pVmxTransient->uExitIntErrorCode != 0)
    14363         return false;
    14364 
    14365     /* Check magic and port. */
    14366     Assert(!(pCtx->fExtrn & (CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RCX)));
    14367     /*Log(("hmR0VmxIsMesaDrvGp: rax=%RX64 rdx=%RX64\n", pCtx->rax, pCtx->rdx));*/
    14368     if (pCtx->rax != UINT32_C(0x564d5868))
    14369         return false;
    14370     if (pCtx->dx != UINT32_C(0x5658))
    14371         return false;
    14372 
    14373     /* Flat ring-3 CS. */
    14374     AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_CS);
    14375     Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_CS));
    14376     /*Log(("hmR0VmxIsMesaDrvGp: cs.Attr.n.u2Dpl=%d base=%Rx64\n", pCtx->cs.Attr.n.u2Dpl, pCtx->cs.u64Base));*/
    14377     if (pCtx->cs.Attr.n.u2Dpl != 3)
    14378         return false;
    14379     if (pCtx->cs.u64Base != 0)
    14380         return false;
    14381 
    14382     /* Check opcode. */
    14383     AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_RIP);
    14384     Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_RIP));
    14385     int rc = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, pCtx->rip, sizeof(abInstr));
    14386     /*Log(("hmR0VmxIsMesaDrvGp: PGMPhysSimpleReadGCPtr -> %Rrc %#x\n", rc, abInstr[0]));*/
    14387     if (RT_FAILURE(rc))
    14388         return false;
    14389     if (abInstr[0] != 0xed)
    14390         return false;
    14391 
    14392     return true;
    14393 }
    14394 
    14395 
    14396 /**
    14397  * VM-exit exception handler for \#GP (General-protection exception).
    14398  *
    14399  * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
    14400  */
    14401 static VBOXSTRICTRC hmR0VmxExitXcptGP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    14402 {
    14403     HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    14404     STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestGP);
    14405 
    14406     PCPUMCTX            pCtx            = &pVCpu->cpum.GstCtx;
    14407     PVMXVMCSINFO        pVmcsInfo       = pVmxTransient->pVmcsInfo;
    14408     PVMXVMCSINFOSHARED  pVmcsInfoShared = pVmcsInfo->pShared;
    14409     if (pVmcsInfoShared->RealMode.fRealOnV86Active)
    14410     { /* likely */ }
    14411     else
    14412     {
    14413 #ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
    14414         Assert(pVCpu->hmr0.s.fUsingDebugLoop || pVCpu->hm.s.fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
    14415 #endif
    14416         /*
    14417          * If the guest is not in real-mode or we have unrestricted guest execution support, or if we are
    14418          * executing a nested-guest, reflect #GP to the guest or nested-guest.
    14419          */
    14420         int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
    14421         AssertRCReturn(rc, rc);
    14422         Log4Func(("Gst: cs:rip=%#04x:%#RX64 ErrorCode=%#x cr0=%#RX64 cpl=%u tr=%#04x\n", pCtx->cs.Sel, pCtx->rip,
    14423                   pVmxTransient->uExitIntErrorCode, pCtx->cr0, CPUMGetGuestCPL(pVCpu), pCtx->tr.Sel));
    14424 
    14425         if (    pVmxTransient->fIsNestedGuest
    14426             || !pVCpu->hm.s.fTrapXcptGpForLovelyMesaDrv
    14427             || !hmR0VmxIsMesaDrvGp(pVCpu, pVmxTransient, pCtx))
    14428             hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
    14429                                    pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
    14430         else
    14431             rc = hmR0VmxHandleMesaDrvGp(pVCpu, pVmxTransient, pCtx);
    14432         return rc;
    14433     }
    14434 
    14435     Assert(CPUMIsGuestInRealModeEx(pCtx));
    14436     Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest);
    14437     Assert(!pVmxTransient->fIsNestedGuest);
    14438 
    14439     int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
    14440     AssertRCReturn(rc, rc);
    14441 
    14442     VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
    14443     if (rcStrict == VINF_SUCCESS)
    14444     {
    14445         if (!CPUMIsGuestInRealModeEx(pCtx))
    14446         {
    14447             /*
    14448              * The guest is no longer in real-mode, check if we can continue executing the
    14449              * guest using hardware-assisted VMX. Otherwise, fall back to emulation.
    14450              */
    14451             pVmcsInfoShared->RealMode.fRealOnV86Active = false;
    14452             if (HMCanExecuteVmxGuest(pVCpu->pVMR0, pVCpu, pCtx))
    14453             {
    14454                 Log4Func(("Mode changed but guest still suitable for executing using hardware-assisted VMX\n"));
    14455                 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
    14456             }
    14457             else
    14458             {
    14459                 Log4Func(("Mode changed -> VINF_EM_RESCHEDULE\n"));
    14460                 rcStrict = VINF_EM_RESCHEDULE;
    14461             }
    14462         }
    14463         else
    14464             ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
    14465     }
    14466     else if (rcStrict == VINF_IEM_RAISED_XCPT)
    14467     {
    14468         rcStrict = VINF_SUCCESS;
    14469         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
    14470     }
    14471     return VBOXSTRICTRC_VAL(rcStrict);
    14472 }
    14473 
    14474 
    14475 /**
    14476  * VM-exit exception handler wrapper for all other exceptions that are not handled
    14477  * by a specific handler.
    14478  *
    14479  * This simply re-injects the exception back into the VM without any special
    14480  * processing.
    14481  *
    14482  * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
    14483  */
    14484 static VBOXSTRICTRC hmR0VmxExitXcptOthers(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    14485 {
    14486     HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    14487 
    14488 #ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
    14489     PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    14490     AssertMsg(pVCpu->hmr0.s.fUsingDebugLoop || pVmcsInfo->pShared->RealMode.fRealOnV86Active || pVmxTransient->fIsNestedGuest,
    14491               ("uVector=%#x u32XcptBitmap=%#X32\n",
    14492                VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo), pVmcsInfo->u32XcptBitmap));
    14493     NOREF(pVmcsInfo);
    14494 #endif
    14495 
    14496     /*
    14497      * Re-inject the exception into the guest. This cannot be a double-fault condition which
    14498      * would have been handled while checking exits due to event delivery.
    14499      */
    14500     uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
    14501 
    14502 #ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
    14503     int rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
    14504     AssertRCReturn(rc, rc);
    14505     Log4Func(("Reinjecting Xcpt. uVector=%#x cs:rip=%#04x:%#RX64\n", uVector, pCtx->cs.Sel, pCtx->rip));
    14506 #endif
    14507 
    14508 #ifdef VBOX_WITH_STATISTICS
    14509     switch (uVector)
    14510     {
    14511         case X86_XCPT_DE:   STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDE);     break;
    14512         case X86_XCPT_DB:   STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB);     break;
    14513         case X86_XCPT_BP:   STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestBP);     break;
    14514         case X86_XCPT_OF:   STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestOF);     break;
    14515         case X86_XCPT_BR:   STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestBR);     break;
    14516         case X86_XCPT_UD:   STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD);     break;
    14517         case X86_XCPT_NM:   STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestOF);     break;
    14518         case X86_XCPT_DF:   STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDF);     break;
    14519         case X86_XCPT_TS:   STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestTS);     break;
    14520         case X86_XCPT_NP:   STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNP);     break;
    14521         case X86_XCPT_SS:   STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestSS);     break;
    14522         case X86_XCPT_GP:   STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestGP);     break;
    14523         case X86_XCPT_PF:   STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);     break;
    14524         case X86_XCPT_MF:   STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestMF);     break;
    14525         case X86_XCPT_AC:   STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestAC);     break;
    14526         case X86_XCPT_XF:   STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXF);     break;
    14527         default:
    14528             STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXcpUnk);
    14529             break;
    14530     }
    14531 #endif
    14532 
    14533     /* We should never call this function for a page-fault, we'd need to pass on the fault address below otherwise. */
    14534     Assert(!VMX_EXIT_INT_INFO_IS_XCPT_PF(pVmxTransient->uExitIntInfo));
    14535     NOREF(uVector);
    14536 
    14537     /* Re-inject the original exception into the guest. */
    14538     hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
    14539                            pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
    14540     return VINF_SUCCESS;
    14541 }
    14542 
    14543 
    14544 /**
    14545  * VM-exit exception handler for all exceptions (except NMIs!).
    14546  *
    14547  * @remarks This may be called for both guests and nested-guests. Take care to not
    14548  *          make assumptions and avoid doing anything that is not relevant when
    14549  *          executing a nested-guest (e.g., Mesa driver hacks).
    14550  */
    14551 static VBOXSTRICTRC hmR0VmxExitXcpt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    14552 {
    14553     HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
    14554 
    14555     /*
    14556      * If this VM-exit occurred while delivering an event through the guest IDT, take
    14557      * action based on the return code and additional hints (e.g. for page-faults)
    14558      * that will be updated in the VMX transient structure.
    14559      */
    14560     VBOXSTRICTRC rcStrict = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
    14561     if (rcStrict == VINF_SUCCESS)
    14562     {
    14563         /*
    14564          * If an exception caused a VM-exit due to delivery of an event, the original
    14565          * event may have to be re-injected into the guest. We shall reinject it and
    14566          * continue guest execution. However, page-fault is a complicated case and
    14567          * needs additional processing done in hmR0VmxExitXcptPF().
    14568          */
    14569         Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
    14570         uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
    14571         if (   !pVCpu->hm.s.Event.fPending
    14572             || uVector == X86_XCPT_PF)
    14573         {
    14574             switch (uVector)
    14575             {
    14576                 case X86_XCPT_PF: return hmR0VmxExitXcptPF(pVCpu, pVmxTransient);
    14577                 case X86_XCPT_GP: return hmR0VmxExitXcptGP(pVCpu, pVmxTransient);
    14578                 case X86_XCPT_MF: return hmR0VmxExitXcptMF(pVCpu, pVmxTransient);
    14579                 case X86_XCPT_DB: return hmR0VmxExitXcptDB(pVCpu, pVmxTransient);
    14580                 case X86_XCPT_BP: return hmR0VmxExitXcptBP(pVCpu, pVmxTransient);
    14581                 case X86_XCPT_AC: return hmR0VmxExitXcptAC(pVCpu, pVmxTransient);
    14582                 default:
    14583                     return hmR0VmxExitXcptOthers(pVCpu, pVmxTransient);
    14584             }
    14585         }
    14586         /* else: inject pending event before resuming guest execution. */
    14587     }
    14588     else if (rcStrict == VINF_HM_DOUBLE_FAULT)
    14589     {
    14590         Assert(pVCpu->hm.s.Event.fPending);
    14591         rcStrict = VINF_SUCCESS;
    14592     }
    14593 
    14594     return rcStrict;
    14595 }
    14596 /** @} */
    14597 
    14598 
    14599 /** @name VM-exit handlers.
    14600  * @{
    14601  */
    14602 /* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
    14603 /* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
    14604 /* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
    14605 
    14606 /**
    14607  * VM-exit handler for external interrupts (VMX_EXIT_EXT_INT).
    14608  */
    14609 HMVMX_EXIT_DECL hmR0VmxExitExtInt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    14610 {
    14611     HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    14612     STAM_COUNTER_INC(&pVCpu->hm.s.StatExitExtInt);
    14613     /* Windows hosts (32-bit and 64-bit) have DPC latency issues. See @bugref{6853}. */
    14614     if (VMMR0ThreadCtxHookIsEnabled(pVCpu))
    14615         return VINF_SUCCESS;
    14616     return VINF_EM_RAW_INTERRUPT;
    14617 }
    14618 
    14619 
    14620 /**
    14621  * VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI). Conditional
    14622  * VM-exit.
    14623  */
    14624 HMVMX_EXIT_DECL hmR0VmxExitXcptOrNmi(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    14625 {
    14626     HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    14627     STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitXcptNmi, y3);
    14628 
    14629     hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
    14630 
    14631     uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
    14632     uint8_t const  uVector      = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
    14633     Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
    14634 
    14635     PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    14636     Assert(   !(pVmcsInfo->u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT)
    14637            && uExitIntType != VMX_EXIT_INT_INFO_TYPE_EXT_INT);
    14638     NOREF(pVmcsInfo);
    14639 
    14640     VBOXSTRICTRC rcStrict;
    14641     switch (uExitIntType)
    14642     {
    14643         /*
    14644          * Host physical NMIs:
    14645          *     This cannot be a guest NMI as the only way for the guest to receive an NMI is if we
    14646          *     injected it ourselves and anything we inject is not going to cause a VM-exit directly
    14647          *     for the event being injected[1]. Go ahead and dispatch the NMI to the host[2].
    14648          *
    14649          *     See Intel spec. 27.2.3 "Information for VM Exits During Event Delivery".
    14650          *     See Intel spec. 27.5.5 "Updating Non-Register State".
    14651          */
    14652         case VMX_EXIT_INT_INFO_TYPE_NMI:
    14653         {
    14654             rcStrict = hmR0VmxExitHostNmi(pVCpu, pVmcsInfo);
    14655             break;
    14656         }
    14657 
    14658         /*
    14659          * Privileged software exceptions (#DB from ICEBP),
    14660          * Software exceptions (#BP and #OF),
    14661          * Hardware exceptions:
    14662          *     Process the required exceptions and resume guest execution if possible.
    14663          */
    14664         case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
    14665             Assert(uVector == X86_XCPT_DB);
    14666             RT_FALL_THRU();
    14667         case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
    14668             Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF || uExitIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT);
    14669             RT_FALL_THRU();
    14670         case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
    14671         {
    14672             NOREF(uVector);
    14673             hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
    14674             hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    14675             hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
    14676             hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);
    14677 
    14678             rcStrict = hmR0VmxExitXcpt(pVCpu, pVmxTransient);
    14679             break;
    14680         }
    14681 
    14682         default:
    14683         {
    14684             pVCpu->hm.s.u32HMError = pVmxTransient->uExitIntInfo;
    14685             rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
    14686             AssertMsgFailed(("Invalid/unexpected VM-exit interruption info %#x\n", pVmxTransient->uExitIntInfo));
    14687             break;
    14688         }
    14689     }
    14690 
    14691     STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
    14692     return rcStrict;
    14693 }
    14694 
    14695 
    14696 /**
    14697  * VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
    14698  */
    14699 HMVMX_EXIT_NSRC_DECL hmR0VmxExitIntWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    14700 {
    14701     HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    14702 
    14703     /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
    14704     PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    14705     hmR0VmxClearIntWindowExitVmcs(pVmcsInfo);
    14706 
    14707     /* Evaluate and deliver pending events and resume guest execution. */
    14708     STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIntWindow);
    14709     return VINF_SUCCESS;
    14710 }
    14711 
    14712 
    14713 /**
    14714  * VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
    14715  */
    14716 HMVMX_EXIT_NSRC_DECL hmR0VmxExitNmiWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    14717 {
    14718     HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    14719 
    14720     PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    14721     if (RT_UNLIKELY(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))) /** @todo NSTVMX: Turn this into an assertion. */
    14722     {
    14723         AssertMsgFailed(("Unexpected NMI-window exit.\n"));
    14724         HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
    14725     }
    14726 
    14727     Assert(!CPUMIsGuestNmiBlocking(pVCpu));
    14728 
    14729     /*
    14730      * If block-by-STI is set when we get this VM-exit, it means the CPU doesn't block NMIs following STI.
    14731      * It is therefore safe to unblock STI and deliver the NMI ourselves. See @bugref{7445}.
    14732      */
    14733     uint32_t fIntrState;
    14734     int rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INT_STATE, &fIntrState);
    14735     AssertRC(rc);
    14736     Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
    14737     if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
    14738     {
    14739         if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
    14740             VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
    14741 
    14742         fIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
    14743         rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_INT_STATE, fIntrState);
    14744         AssertRC(rc);
    14745     }
    14746 
    14747     /* Indicate that we no longer need to VM-exit when the guest is ready to receive NMIs, it is now ready */
    14748     hmR0VmxClearNmiWindowExitVmcs(pVmcsInfo);
    14749 
    14750     /* Evaluate and deliver pending events and resume guest execution. */
    14751     return VINF_SUCCESS;
    14752 }
    14753 
    14754 
    14755 /**
    14756  * VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
    14757  */
    14758 HMVMX_EXIT_NSRC_DECL hmR0VmxExitWbinvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    14759 {
    14760     HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    14761     return hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient);
    14762 }
    14763 
    14764 
    14765 /**
    14766  * VM-exit handler for INVD (VMX_EXIT_INVD). Unconditional VM-exit.
    14767  */
    14768 HMVMX_EXIT_NSRC_DECL hmR0VmxExitInvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    14769 {
    14770     HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    14771     return hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient);
    14772 }
    14773 
    14774 
    14775 /**
    14776  * VM-exit handler for CPUID (VMX_EXIT_CPUID). Unconditional VM-exit.
    14777  */
    14778 HMVMX_EXIT_DECL hmR0VmxExitCpuid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    14779 {
    14780     HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    14781 
    14782     /*
    14783      * Get the state we need and update the exit history entry.
    14784      */
    14785     PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    14786     hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    14787 
    14788     int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
    14789     AssertRCReturn(rc, rc);
    14790 
    14791     VBOXSTRICTRC rcStrict;
    14792     PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
    14793                                                             EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_CPUID),
    14794                                                             pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
    14795     if (!pExitRec)
    14796     {
    14797         /*
    14798          * Regular CPUID instruction execution.
    14799          */
    14800         rcStrict = IEMExecDecodedCpuid(pVCpu, pVmxTransient->cbExitInstr);
    14801         if (rcStrict == VINF_SUCCESS)
    14802             ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
    14803         else if (rcStrict == VINF_IEM_RAISED_XCPT)
    14804         {
    14805             ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
    14806             rcStrict = VINF_SUCCESS;
    14807         }
    14808     }
    14809     else
    14810     {
    14811         /*
    14812          * Frequent exit or something needing probing.  Get state and call EMHistoryExec.
    14813          */
    14814         int rc2 = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
    14815         AssertRCReturn(rc2, rc2);
    14816 
    14817         Log4(("CpuIdExit/%u: %04x:%08RX64: %#x/%#x -> EMHistoryExec\n",
    14818               pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx));
    14819 
    14820         rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
    14821         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
    14822 
    14823         Log4(("CpuIdExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
    14824               pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
    14825               VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
    14826     }
    14827     return rcStrict;
    14828 }
    14829 
    14830 
    14831 /**
    14832  * VM-exit handler for GETSEC (VMX_EXIT_GETSEC). Unconditional VM-exit.
    14833  */
    14834 HMVMX_EXIT_DECL hmR0VmxExitGetsec(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    14835 {
    14836     HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    14837 
    14838     PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    14839     int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR4);
    14840     AssertRCReturn(rc, rc);
    14841 
    14842     if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_SMXE)
    14843         return VINF_EM_RAW_EMULATE_INSTR;
    14844 
    14845     AssertMsgFailed(("hmR0VmxExitGetsec: Unexpected VM-exit when CR4.SMXE is 0.\n"));
    14846     HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
    14847 }
    14848 
    14849 
    14850 /**
    14851  * VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
    14852  */
    14853 HMVMX_EXIT_DECL hmR0VmxExitRdtsc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    14854 {
    14855     HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    14856 
    14857     PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    14858     hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    14859     int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
    14860     AssertRCReturn(rc, rc);
    14861 
    14862     VBOXSTRICTRC rcStrict = IEMExecDecodedRdtsc(pVCpu, pVmxTransient->cbExitInstr);
    14863     if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    14864     {
    14865         /* If we get a spurious VM-exit when TSC offsetting is enabled,
    14866            we must reset offsetting on VM-entry. See @bugref{6634}. */
    14867         if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
    14868             pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
    14869         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
    14870     }
    14871     else if (rcStrict == VINF_IEM_RAISED_XCPT)
    14872     {
    14873         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
    14874         rcStrict = VINF_SUCCESS;
    14875     }
    14876     return rcStrict;
    14877 }
    14878 
    14879 
    14880 /**
    14881  * VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
    14882  */
    14883 HMVMX_EXIT_DECL hmR0VmxExitRdtscp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    14884 {
    14885     HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    14886 
    14887     PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    14888     hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    14889     int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_TSC_AUX);
    14890     AssertRCReturn(rc, rc);
    14891 
    14892     VBOXSTRICTRC rcStrict = IEMExecDecodedRdtscp(pVCpu, pVmxTransient->cbExitInstr);
    14893     if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    14894     {
    14895         /* If we get a spurious VM-exit when TSC offsetting is enabled,
    14896            we must reset offsetting on VM-reentry. See @bugref{6634}. */
    14897         if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
    14898             pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
    14899         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
    14900     }
    14901     else if (rcStrict == VINF_IEM_RAISED_XCPT)
    14902     {
    14903         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
    14904         rcStrict = VINF_SUCCESS;
    14905     }
    14906     return rcStrict;
    14907 }
    14908 
    14909 
    14910 /**
    14911  * VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
    14912  */
    14913 HMVMX_EXIT_DECL hmR0VmxExitRdpmc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    14914 {
    14915     HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    14916 
    14917     PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    14918     int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR4    | CPUMCTX_EXTRN_CR0
    14919                                                      | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS);
    14920     AssertRCReturn(rc, rc);
    14921 
    14922     PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    14923     rc = EMInterpretRdpmc(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx));
    14924     if (RT_LIKELY(rc == VINF_SUCCESS))
    14925     {
    14926         rc = hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient);
    14927         Assert(pVmxTransient->cbExitInstr == 2);
    14928     }
    14929     else
    14930     {
    14931         AssertMsgFailed(("hmR0VmxExitRdpmc: EMInterpretRdpmc failed with %Rrc\n", rc));
    14932         rc = VERR_EM_INTERPRETER;
    14933     }
    14934     return rc;
    14935 }
    14936 
    14937 
    14938 /**
    14939  * VM-exit handler for VMCALL (VMX_EXIT_VMCALL). Unconditional VM-exit.
    14940  */
    14941 HMVMX_EXIT_DECL hmR0VmxExitVmcall(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    14942 {
    14943     HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    14944 
    14945     VBOXSTRICTRC rcStrict = VERR_VMX_IPE_3;
    14946     if (EMAreHypercallInstructionsEnabled(pVCpu))
    14947     {
    14948         PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    14949         int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CR0
    14950                                                          | CPUMCTX_EXTRN_SS  | CPUMCTX_EXTRN_CS     | CPUMCTX_EXTRN_EFER);
    14951         AssertRCReturn(rc, rc);
    14952 
    14953         /* Perform the hypercall. */
    14954         rcStrict = GIMHypercall(pVCpu, &pVCpu->cpum.GstCtx);
    14955         if (rcStrict == VINF_SUCCESS)
    14956         {
    14957             rc = hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient);
    14958             AssertRCReturn(rc, rc);
    14959         }
    14960         else
    14961             Assert(   rcStrict == VINF_GIM_R3_HYPERCALL
    14962                    || rcStrict == VINF_GIM_HYPERCALL_CONTINUING
    14963                    || RT_FAILURE(rcStrict));
    14964 
    14965         /* If the hypercall changes anything other than guest's general-purpose registers,
    14966            we would need to reload the guest changed bits here before VM-entry. */
    14967     }
    14968     else
    14969         Log4Func(("Hypercalls not enabled\n"));
    14970 
    14971     /* If hypercalls are disabled or the hypercall failed for some reason, raise #UD and continue. */
    14972     if (RT_FAILURE(rcStrict))
    14973     {
    14974         hmR0VmxSetPendingXcptUD(pVCpu);
    14975         rcStrict = VINF_SUCCESS;
    14976     }
    14977 
    14978     return rcStrict;
    14979 }
    14980 
    14981 
    14982 /**
    14983  * VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
    14984  */
    14985 HMVMX_EXIT_DECL hmR0VmxExitInvlpg(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    14986 {
    14987     HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    14988     Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || pVCpu->hmr0.s.fUsingDebugLoop);
    14989 
    14990     PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    14991     hmR0VmxReadExitQualVmcs(pVmxTransient);
    14992     hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    14993     int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
    14994     AssertRCReturn(rc, rc);
    14995 
    14996     VBOXSTRICTRC rcStrict = IEMExecDecodedInvlpg(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->uExitQual);
    14997 
    14998     if (rcStrict == VINF_SUCCESS || rcStrict == VINF_PGM_SYNC_CR3)
    14999         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
    15000     else if (rcStrict == VINF_IEM_RAISED_XCPT)
    15001     {
    15002         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
    15003         rcStrict = VINF_SUCCESS;
    15004     }
    15005     else
    15006         AssertMsgFailed(("Unexpected IEMExecDecodedInvlpg(%#RX64) status: %Rrc\n", pVmxTransient->uExitQual,
    15007                          VBOXSTRICTRC_VAL(rcStrict)));
    15008     return rcStrict;
    15009 }
    15010 
    15011 
    15012 /**
    15013  * VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
    15014  */
    15015 HMVMX_EXIT_DECL hmR0VmxExitMonitor(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    15016 {
    15017     HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    15018 
    15019     PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    15020     hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    15021     int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
    15022     AssertRCReturn(rc, rc);
    15023 
    15024     VBOXSTRICTRC rcStrict = IEMExecDecodedMonitor(pVCpu, pVmxTransient->cbExitInstr);
    15025     if (rcStrict == VINF_SUCCESS)
    15026         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
    15027     else if (rcStrict == VINF_IEM_RAISED_XCPT)
    15028     {
    15029         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
    15030         rcStrict = VINF_SUCCESS;
    15031     }
    15032 
    15033     return rcStrict;
    15034 }
    15035 
    15036 
    15037 /**
    15038  * VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
    15039  */
    15040 HMVMX_EXIT_DECL hmR0VmxExitMwait(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    15041 {
    15042     HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    15043 
    15044     PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    15045     hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    15046     int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
    15047     AssertRCReturn(rc, rc);
    15048 
    15049     VBOXSTRICTRC rcStrict = IEMExecDecodedMwait(pVCpu, pVmxTransient->cbExitInstr);
    15050     if (RT_SUCCESS(rcStrict))
    15051     {
    15052         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
    15053         if (EMMonitorWaitShouldContinue(pVCpu, &pVCpu->cpum.GstCtx))
    15054             rcStrict = VINF_SUCCESS;
    15055     }
    15056 
    15057     return rcStrict;
    15058 }
    15059 
    15060 
    15061 /**
    15062  * VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT). Unconditional
    15063  * VM-exit.
    15064  */
    15065 HMVMX_EXIT_DECL hmR0VmxExitTripleFault(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    15066 {
    15067     HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    15068     return VINF_EM_RESET;
    15069 }
    15070 
    15071 
    15072 /**
    15073  * VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
    15074  */
    15075 HMVMX_EXIT_DECL hmR0VmxExitHlt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    15076 {
    15077     HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    15078 
    15079     int rc = hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient);
    15080     AssertRCReturn(rc, rc);
    15081 
    15082     HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);            /* Advancing the RIP above should've imported eflags. */
    15083     if (EMShouldContinueAfterHalt(pVCpu, &pVCpu->cpum.GstCtx))    /* Requires eflags. */
    15084         rc = VINF_SUCCESS;
    15085     else
    15086         rc = VINF_EM_HALT;
    15087 
    15088     if (rc != VINF_SUCCESS)
    15089         STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHltToR3);
    15090     return rc;
    15091 }
    15092 
    15093 
    15094 /**
    15095  * VM-exit handler for instructions that result in a \#UD exception delivered to
    15096  * the guest.
    15097  */
    15098 HMVMX_EXIT_NSRC_DECL hmR0VmxExitSetPendingXcptUD(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    15099 {
    15100     HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    15101     hmR0VmxSetPendingXcptUD(pVCpu);
    15102     return VINF_SUCCESS;
    15103 }
    15104 
    15105 
    15106 /**
    15107  * VM-exit handler for expiry of the VMX-preemption timer.
    15108  */
    15109 HMVMX_EXIT_DECL hmR0VmxExitPreemptTimer(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    15110 {
    15111     HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    15112 
    15113     /* If the VMX-preemption timer has expired, reinitialize the preemption timer on next VM-entry. */
    15114     pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
    15115 Log12(("hmR0VmxExitPreemptTimer:\n"));
    15116 
    15117     /* If there are any timer events pending, fall back to ring-3, otherwise resume guest execution. */
    15118     PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    15119     bool fTimersPending = TMTimerPollBool(pVM, pVCpu);
    15120     STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatExitPreemptTimer);
    15121     return fTimersPending ? VINF_EM_RAW_TIMER_PENDING : VINF_SUCCESS;
    15122 }
    15123 
    15124 
    15125 /**
    15126  * VM-exit handler for XSETBV (VMX_EXIT_XSETBV). Unconditional VM-exit.
    15127  */
    15128 HMVMX_EXIT_DECL hmR0VmxExitXsetbv(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    15129 {
    15130     HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    15131 
    15132     PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    15133     hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    15134     int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_CR4);
    15135     AssertRCReturn(rc, rc);
    15136 
    15137     VBOXSTRICTRC rcStrict = IEMExecDecodedXsetbv(pVCpu, pVmxTransient->cbExitInstr);
    15138     ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
    15139                                                                                 : HM_CHANGED_RAISED_XCPT_MASK);
    15140 
    15141     PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    15142     bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
    15143     if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
    15144     {
    15145         pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
    15146         hmR0VmxUpdateStartVmFunction(pVCpu);
    15147     }
    15148 
    15149     return rcStrict;
    15150 }
    15151 
    15152 
    15153 /**
    15154  * VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
    15155  */
    15156 HMVMX_EXIT_DECL hmR0VmxExitInvpcid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    15157 {
    15158     HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    15159 
    15160     /** @todo Enable the new code after finding a reliably guest test-case. */
    15161 #if 1
    15162     return VERR_EM_INTERPRETER;
    15163 #else
    15164     hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    15165     hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
    15166     hmR0VmxReadExitQualVmcs(pVmxTransient);
    15167     int rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
    15168                                                                     | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
    15169     AssertRCReturn(rc, rc);
    15170 
    15171     /* Paranoia. Ensure this has a memory operand. */
    15172     Assert(!pVmxTransient->ExitInstrInfo.Inv.u1Cleared0);
    15173 
    15174     uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
    15175     Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
    15176     uint64_t const uType = CPUMIsGuestIn64BitCode(pVCpu) ? pVCpu->cpum.GstCtx.aGRegs[iGReg].u64
    15177                                                          : pVCpu->cpum.GstCtx.aGRegs[iGReg].u32;
    15178 
    15179     RTGCPTR GCPtrDesc;
    15180     HMVMX_DECODE_MEM_OPERAND(pVCpu, pVmxTransient->ExitInstrInfo.u, pVmxTransient->uExitQual, VMXMEMACCESS_READ, &GCPtrDesc);
    15181 
    15182     VBOXSTRICTRC rcStrict = IEMExecDecodedInvpcid(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->ExitInstrInfo.Inv.iSegReg,
    15183                                                   GCPtrDesc, uType);
    15184     if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    15185         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
    15186     else if (rcStrict == VINF_IEM_RAISED_XCPT)
    15187     {
    15188         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
    15189         rcStrict = VINF_SUCCESS;
    15190     }
    15191     return rcStrict;
    15192 #endif
    15193 }
    15194 
    15195 
    15196 /**
    15197  * VM-exit handler for invalid-guest-state (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error
    15198  * VM-exit.
    15199  */
    15200 HMVMX_EXIT_NSRC_DECL hmR0VmxExitErrInvalidGuestState(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    15201 {
    15202     PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    15203     int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
    15204     AssertRCReturn(rc, rc);
    15205 
    15206     rc = hmR0VmxCheckCachedVmcsCtls(pVCpu, pVmcsInfo, pVmxTransient->fIsNestedGuest);
    15207     if (RT_FAILURE(rc))
    15208         return rc;
    15209 
    15210     uint32_t const uInvalidReason = hmR0VmxCheckGuestState(pVCpu, pVmcsInfo);
    15211     NOREF(uInvalidReason);
    15212 
    15213 #ifdef VBOX_STRICT
    15214     uint32_t fIntrState;
    15215     uint64_t u64Val;
    15216     hmR0VmxReadEntryIntInfoVmcs(pVmxTransient);
    15217     hmR0VmxReadEntryXcptErrorCodeVmcs(pVmxTransient);
    15218     hmR0VmxReadEntryInstrLenVmcs(pVmxTransient);
    15219 
    15220     Log4(("uInvalidReason                             %u\n",     uInvalidReason));
    15221     Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO    %#RX32\n", pVmxTransient->uEntryIntInfo));
    15222     Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE    %#RX32\n", pVmxTransient->uEntryXcptErrorCode));
    15223     Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH         %#RX32\n", pVmxTransient->cbEntryInstr));
    15224 
    15225     rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INT_STATE, &fIntrState);            AssertRC(rc);
    15226     Log4(("VMX_VMCS32_GUEST_INT_STATE                 %#RX32\n", fIntrState));
    15227     rc = VMXReadVmcsNw(VMX_VMCS_GUEST_CR0, &u64Val);                        AssertRC(rc);
    15228     Log4(("VMX_VMCS_GUEST_CR0                         %#RX64\n", u64Val));
    15229     rc = VMXReadVmcsNw(VMX_VMCS_CTRL_CR0_MASK, &u64Val);                    AssertRC(rc);
    15230     Log4(("VMX_VMCS_CTRL_CR0_MASK                     %#RX64\n", u64Val));
    15231     rc = VMXReadVmcsNw(VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Val);             AssertRC(rc);
    15232     Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW              %#RX64\n", u64Val));
    15233     rc = VMXReadVmcsNw(VMX_VMCS_CTRL_CR4_MASK, &u64Val);                    AssertRC(rc);
    15234     Log4(("VMX_VMCS_CTRL_CR4_MASK                     %#RX64\n", u64Val));
    15235     rc = VMXReadVmcsNw(VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Val);             AssertRC(rc);
    15236     Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW              %#RX64\n", u64Val));
    15237     if (pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging)
    15238     {
    15239         rc = VMXReadVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, &u64Val);             AssertRC(rc);
    15240         Log4(("VMX_VMCS64_CTRL_EPTP_FULL                  %#RX64\n", u64Val));
    15241     }
    15242     hmR0DumpRegs(pVCpu, HM_DUMP_REG_FLAGS_ALL);
    15243 #endif
    15244 
    15245     return VERR_VMX_INVALID_GUEST_STATE;
    15246 }
    15247 
    15248 /**
    15249  * VM-exit handler for all undefined/unexpected reasons. Should never happen.
    15250  */
    15251 HMVMX_EXIT_NSRC_DECL hmR0VmxExitErrUnexpected(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    15252 {
    15253     /*
    15254      * Cumulative notes of all recognized but unexpected VM-exits.
    15255      *
    15256      * 1. This does -not- cover scenarios like a page-fault VM-exit occurring when
    15257      *    nested-paging is used.
    15258      *
    15259      * 2. Any instruction that causes a VM-exit unconditionally (for e.g. VMXON) must be
    15260      *    emulated or a #UD must be raised in the guest. Therefore, we should -not- be using
    15261      *    this function (and thereby stop VM execution) for handling such instructions.
    15262      *
    15263      *
    15264      * VMX_EXIT_INIT_SIGNAL:
    15265      *    INIT signals are blocked in VMX root operation by VMXON and by SMI in SMM.
    15266      *    It is -NOT- blocked in VMX non-root operation so we can, in theory, still get these
    15267      *    VM-exits. However, we should not receive INIT signals VM-exit while executing a VM.
    15268      *
    15269      *    See Intel spec. 33.14.1 Default Treatment of SMI Delivery"
    15270      *    See Intel spec. 29.3 "VMX Instructions" for "VMXON".
    15271      *    See Intel spec. "23.8 Restrictions on VMX operation".
    15272      *
    15273      * VMX_EXIT_SIPI:
    15274      *    SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest
    15275      *    activity state is used. We don't make use of it as our guests don't have direct
    15276      *    access to the host local APIC.
    15277      *
    15278      *    See Intel spec. 25.3 "Other Causes of VM-exits".
    15279      *
    15280      * VMX_EXIT_IO_SMI:
    15281      * VMX_EXIT_SMI:
    15282      *    This can only happen if we support dual-monitor treatment of SMI, which can be
    15283      *    activated by executing VMCALL in VMX root operation. Only an STM (SMM transfer
    15284      *    monitor) would get this VM-exit when we (the executive monitor) execute a VMCALL in
    15285      *    VMX root mode or receive an SMI. If we get here, something funny is going on.
    15286      *
    15287      *    See Intel spec. 33.15.6 "Activating the Dual-Monitor Treatment"
    15288      *    See Intel spec. 25.3 "Other Causes of VM-Exits"
    15289      *
    15290      * VMX_EXIT_ERR_MSR_LOAD:
    15291      *    Failures while loading MSRs are part of the VM-entry MSR-load area are unexpected
    15292      *    and typically indicates a bug in the hypervisor code. We thus cannot not resume
    15293      *    execution.
    15294      *
    15295      *    See Intel spec. 26.7 "VM-Entry Failures During Or After Loading Guest State".
    15296      *
    15297      * VMX_EXIT_ERR_MACHINE_CHECK:
    15298      *    Machine check exceptions indicates a fatal/unrecoverable hardware condition
    15299      *    including but not limited to system bus, ECC, parity, cache and TLB errors. A
    15300      *    #MC exception abort class exception is raised. We thus cannot assume a
    15301      *    reasonable chance of continuing any sort of execution and we bail.
    15302      *
    15303      *    See Intel spec. 15.1 "Machine-check Architecture".
    15304      *    See Intel spec. 27.1 "Architectural State Before A VM Exit".
    15305      *
    15306      * VMX_EXIT_PML_FULL:
    15307      * VMX_EXIT_VIRTUALIZED_EOI:
    15308      * VMX_EXIT_APIC_WRITE:
    15309      *    We do not currently support any of these features and thus they are all unexpected
    15310      *    VM-exits.
    15311      *
    15312      * VMX_EXIT_GDTR_IDTR_ACCESS:
    15313      * VMX_EXIT_LDTR_TR_ACCESS:
    15314      * VMX_EXIT_RDRAND:
    15315      * VMX_EXIT_RSM:
    15316      * VMX_EXIT_VMFUNC:
    15317      * VMX_EXIT_ENCLS:
    15318      * VMX_EXIT_RDSEED:
    15319      * VMX_EXIT_XSAVES:
    15320      * VMX_EXIT_XRSTORS:
    15321      * VMX_EXIT_UMWAIT:
    15322      * VMX_EXIT_TPAUSE:
    15323      * VMX_EXIT_LOADIWKEY:
    15324      *    These VM-exits are -not- caused unconditionally by execution of the corresponding
    15325      *    instruction. Any VM-exit for these instructions indicate a hardware problem,
    15326      *    unsupported CPU modes (like SMM) or potentially corrupt VMCS controls.
    15327      *
    15328      *    See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
    15329      */
    15330     HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    15331     AssertMsgFailed(("Unexpected VM-exit %u\n", pVmxTransient->uExitReason));
    15332     HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
    15333 }
    15334 
    15335 
    15336 /**
    15337  * VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
    15338  */
    15339 HMVMX_EXIT_DECL hmR0VmxExitRdmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    15340 {
    15341     HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    15342 
    15343     /** @todo Optimize this: We currently drag in the whole MSR state
    15344      * (CPUMCTX_EXTRN_ALL_MSRS) here.  We should optimize this to only get
    15345      * MSRs required.  That would require changes to IEM and possibly CPUM too.
    15346      * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
    15347     PVMXVMCSINFO   pVmcsInfo = pVmxTransient->pVmcsInfo;
    15348     uint32_t const idMsr     = pVCpu->cpum.GstCtx.ecx;
    15349     uint64_t       fImport   = IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS;
    15350     switch (idMsr)
    15351     {
    15352         case MSR_K8_FS_BASE: fImport |= CPUMCTX_EXTRN_FS; break;
    15353         case MSR_K8_GS_BASE: fImport |= CPUMCTX_EXTRN_GS; break;
    15354     }
    15355 
    15356     hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    15357     int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, fImport);
    15358     AssertRCReturn(rc, rc);
    15359 
    15360     Log4Func(("ecx=%#RX32\n", idMsr));
    15361 
    15362 #ifdef VBOX_STRICT
    15363     Assert(!pVmxTransient->fIsNestedGuest);
    15364     if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
    15365     {
    15366         if (   hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr)
    15367             && idMsr != MSR_K6_EFER)
    15368         {
    15369             AssertMsgFailed(("Unexpected RDMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n", idMsr));
    15370             HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
    15371         }
    15372         if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
    15373         {
    15374             Assert(pVmcsInfo->pvMsrBitmap);
    15375             uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
    15376             if (fMsrpm & VMXMSRPM_ALLOW_RD)
    15377             {
    15378                 AssertMsgFailed(("Unexpected RDMSR for a passthru lazy-restore MSR. ecx=%#RX32\n", idMsr));
    15379                 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
    15380             }
    15381         }
    15382     }
    15383 #endif
    15384 
    15385     VBOXSTRICTRC rcStrict = IEMExecDecodedRdmsr(pVCpu, pVmxTransient->cbExitInstr);
    15386     STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdmsr);
    15387     if (rcStrict == VINF_SUCCESS)
    15388         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
    15389     else if (rcStrict == VINF_IEM_RAISED_XCPT)
    15390     {
    15391         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
    15392         rcStrict = VINF_SUCCESS;
    15393     }
    15394     else
    15395         AssertMsg(rcStrict == VINF_CPUM_R3_MSR_READ || rcStrict == VINF_EM_TRIPLE_FAULT,
    15396                   ("Unexpected IEMExecDecodedRdmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
    15397 
    15398     return rcStrict;
    15399 }
    15400 
    15401 
    15402 /**
    15403  * VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
    15404  */
    15405 HMVMX_EXIT_DECL hmR0VmxExitWrmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    15406 {
    15407     HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    15408 
    15409     /** @todo Optimize this: We currently drag in the whole MSR state
    15410      * (CPUMCTX_EXTRN_ALL_MSRS) here.  We should optimize this to only get
    15411      * MSRs required.  That would require changes to IEM and possibly CPUM too.
    15412      * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
    15413     uint32_t const idMsr    = pVCpu->cpum.GstCtx.ecx;
    15414     uint64_t       fImport  = IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS;
    15415 
    15416     /*
    15417      * The FS and GS base MSRs are not part of the above all-MSRs mask.
    15418      * Although we don't need to fetch the base as it will be overwritten shortly, while
    15419      * loading guest-state we would also load the entire segment register including limit
    15420      * and attributes and thus we need to load them here.
    15421      */
    15422     switch (idMsr)
    15423     {
    15424         case MSR_K8_FS_BASE: fImport |= CPUMCTX_EXTRN_FS; break;
    15425         case MSR_K8_GS_BASE: fImport |= CPUMCTX_EXTRN_GS; break;
    15426     }
    15427 
    15428     PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    15429     hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    15430     int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, fImport);
    15431     AssertRCReturn(rc, rc);
    15432 
    15433     Log4Func(("ecx=%#RX32 edx:eax=%#RX32:%#RX32\n", idMsr, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.eax));
    15434 
    15435     VBOXSTRICTRC rcStrict = IEMExecDecodedWrmsr(pVCpu, pVmxTransient->cbExitInstr);
    15436     STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWrmsr);
    15437 
    15438     if (rcStrict == VINF_SUCCESS)
    15439     {
    15440         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
    15441 
    15442         /* If this is an X2APIC WRMSR access, update the APIC state as well. */
    15443         if (    idMsr == MSR_IA32_APICBASE
    15444             || (   idMsr >= MSR_IA32_X2APIC_START
    15445                 && idMsr <= MSR_IA32_X2APIC_END))
    15446         {
    15447             /*
    15448              * We've already saved the APIC related guest-state (TPR) in post-run phase.
    15449              * When full APIC register virtualization is implemented we'll have to make
    15450              * sure APIC state is saved from the VMCS before IEM changes it.
    15451              */
    15452             ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
    15453         }
    15454         else if (idMsr == MSR_IA32_TSC)        /* Windows 7 does this during bootup. See @bugref{6398}. */
    15455             pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
    15456         else if (idMsr == MSR_K6_EFER)
    15457         {
    15458             /*
    15459              * If the guest touches the EFER MSR we need to update the VM-Entry and VM-Exit controls
    15460              * as well, even if it is -not- touching bits that cause paging mode changes (LMA/LME).
    15461              * We care about the other bits as well, SCE and NXE. See @bugref{7368}.
    15462              */
    15463             ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
    15464         }
    15465 
    15466         /* Update MSRs that are part of the VMCS and auto-load/store area when MSR-bitmaps are not used. */
    15467         if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS))
    15468         {
    15469             switch (idMsr)
    15470             {
    15471                 case MSR_IA32_SYSENTER_CS:  ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_SYSENTER_CS_MSR);  break;
    15472                 case MSR_IA32_SYSENTER_EIP: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break;
    15473                 case MSR_IA32_SYSENTER_ESP: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break;
    15474                 case MSR_K8_FS_BASE:        ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_FS);               break;
    15475                 case MSR_K8_GS_BASE:        ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_GS);               break;
    15476                 case MSR_K6_EFER:           /* Nothing to do, already handled above. */                                    break;
    15477                 default:
    15478                 {
    15479                     if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
    15480                         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_VMX_GUEST_LAZY_MSRS);
    15481                     else if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
    15482                         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
    15483                     break;
    15484                 }
    15485             }
    15486         }
    15487 #ifdef VBOX_STRICT
    15488         else
    15489         {
    15490             /* Paranoia. Validate that MSRs in the MSR-bitmaps with write-passthru are not intercepted. */
    15491             switch (idMsr)
    15492             {
    15493                 case MSR_IA32_SYSENTER_CS:
    15494                 case MSR_IA32_SYSENTER_EIP:
    15495                 case MSR_IA32_SYSENTER_ESP:
    15496                 case MSR_K8_FS_BASE:
    15497                 case MSR_K8_GS_BASE:
    15498                 {
    15499                     AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", idMsr));
    15500                     HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
    15501                 }
    15502 
    15503                 /* Writes to MSRs in auto-load/store area/swapped MSRs, shouldn't cause VM-exits with MSR-bitmaps. */
    15504                 default:
    15505                 {
    15506                     if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
    15507                     {
    15508                         /* EFER MSR writes are always intercepted. */
    15509                         if (idMsr != MSR_K6_EFER)
    15510                         {
    15511                             AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
    15512                                              idMsr));
    15513                             HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
    15514                         }
    15515                     }
    15516 
    15517                     if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
    15518                     {
    15519                         Assert(pVmcsInfo->pvMsrBitmap);
    15520                         uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
    15521                         if (fMsrpm & VMXMSRPM_ALLOW_WR)
    15522                         {
    15523                             AssertMsgFailed(("Unexpected WRMSR for passthru, lazy-restore MSR. ecx=%#RX32\n", idMsr));
    15524                             HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
    15525                         }
    15526                     }
    15527                     break;
    15528                 }
    15529             }
    15530         }
    15531 #endif  /* VBOX_STRICT */
    15532     }
    15533     else if (rcStrict == VINF_IEM_RAISED_XCPT)
    15534     {
    15535         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
    15536         rcStrict = VINF_SUCCESS;
    15537     }
    15538     else
    15539         AssertMsg(rcStrict == VINF_CPUM_R3_MSR_WRITE || rcStrict == VINF_EM_TRIPLE_FAULT,
    15540                   ("Unexpected IEMExecDecodedWrmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
    15541 
    15542     return rcStrict;
    15543 }
    15544 
    15545 
    15546 /**
    15547  * VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
    15548  */
    15549 HMVMX_EXIT_DECL hmR0VmxExitPause(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    15550 {
    15551     HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    15552 
    15553     /** @todo The guest has likely hit a contended spinlock. We might want to
    15554      *        poke a schedule different guest VCPU. */
    15555     int rc = hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient);
    15556     if (RT_SUCCESS(rc))
    15557         return VINF_EM_RAW_INTERRUPT;
    15558 
    15559     AssertMsgFailed(("hmR0VmxExitPause: Failed to increment RIP. rc=%Rrc\n", rc));
    15560     return rc;
    15561 }
    15562 
    15563 
    15564 /**
    15565  * VM-exit handler for when the TPR value is lowered below the specified
    15566  * threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
    15567  */
    15568 HMVMX_EXIT_NSRC_DECL hmR0VmxExitTprBelowThreshold(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    15569 {
    15570     HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    15571     Assert(pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
    15572 
    15573     /*
    15574      * The TPR shadow would've been synced with the APIC TPR in the post-run phase.
    15575      * We'll re-evaluate pending interrupts and inject them before the next VM
    15576      * entry so we can just continue execution here.
    15577      */
    15578     STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTprBelowThreshold);
    15579     return VINF_SUCCESS;
    15580 }
    15581 
    15582 
    15583 /**
    15584  * VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX). Conditional
    15585  * VM-exit.
    15586  *
    15587  * @retval VINF_SUCCESS when guest execution can continue.
    15588  * @retval VINF_PGM_SYNC_CR3 CR3 sync is required, back to ring-3.
    15589  * @retval VERR_EM_RESCHEDULE_REM when we need to return to ring-3 due to
    15590  *         incompatible guest state for VMX execution (real-on-v86 case).
    15591  */
    15592 HMVMX_EXIT_DECL hmR0VmxExitMovCRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    15593 {
    15594     HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    15595     STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitMovCRx, y2);
    15596 
    15597     PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    15598     hmR0VmxReadExitQualVmcs(pVmxTransient);
    15599     hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    15600 
    15601     VBOXSTRICTRC rcStrict;
    15602     PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    15603     uint64_t const uExitQual   = pVmxTransient->uExitQual;
    15604     uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(uExitQual);
    15605     switch (uAccessType)
    15606     {
    15607         /*
    15608          * MOV to CRx.
    15609          */
    15610         case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
    15611         {
    15612             /*
    15613              * When PAE paging is used, the CPU will reload PAE PDPTEs from CR3 when the guest
    15614              * changes certain bits even in CR0, CR4 (and not just CR3). We are currently fine
    15615              * since IEM_CPUMCTX_EXTRN_MUST_MASK (used below) includes CR3 which will import
    15616              * PAE PDPTEs as well.
    15617              */
    15618             int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
    15619             AssertRCReturn(rc, rc);
    15620 
    15621             HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
    15622             uint32_t const uOldCr0 = pVCpu->cpum.GstCtx.cr0;
    15623             uint8_t const  iGReg   = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
    15624             uint8_t const  iCrReg  = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
    15625 
    15626             /*
    15627              * MOV to CR3 only cause a VM-exit when one or more of the following are true:
    15628              *   - When nested paging isn't used.
    15629              *   - If the guest doesn't have paging enabled (intercept CR3 to update shadow page tables).
    15630              *   - We are executing in the VM debug loop.
    15631              */
    15632             Assert(   iCrReg != 3
    15633                    || !pVM->hmr0.s.fNestedPaging
    15634                    || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
    15635                    || pVCpu->hmr0.s.fUsingDebugLoop);
    15636 
    15637             /* MOV to CR8 writes only cause VM-exits when TPR shadow is not used. */
    15638             Assert(   iCrReg != 8
    15639                    || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
    15640 
    15641             rcStrict = hmR0VmxExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
    15642             AssertMsg(   rcStrict == VINF_SUCCESS
    15643                       || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
    15644 
    15645             /*
    15646              * This is a kludge for handling switches back to real mode when we try to use
    15647              * V86 mode to run real mode code directly.  Problem is that V86 mode cannot
    15648              * deal with special selector values, so we have to return to ring-3 and run
    15649              * there till the selector values are V86 mode compatible.
    15650              *
    15651              * Note! Using VINF_EM_RESCHEDULE_REM here rather than VINF_EM_RESCHEDULE since the
    15652              *       latter is an alias for VINF_IEM_RAISED_XCPT which is asserted at the end of
    15653              *       this function.
    15654              */
    15655             if (   iCrReg == 0
    15656                 && rcStrict == VINF_SUCCESS
    15657                 && !pVM->hmr0.s.vmx.fUnrestrictedGuest
    15658                 && CPUMIsGuestInRealModeEx(&pVCpu->cpum.GstCtx)
    15659                 && (uOldCr0 & X86_CR0_PE)
    15660                 && !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
    15661             {
    15662                 /** @todo Check selectors rather than returning all the time.  */
    15663                 Assert(!pVmxTransient->fIsNestedGuest);
    15664                 Log4Func(("CR0 write, back to real mode -> VINF_EM_RESCHEDULE_REM\n"));
    15665                 rcStrict = VINF_EM_RESCHEDULE_REM;
    15666             }
    15667             break;
    15668         }
    15669 
    15670         /*
    15671          * MOV from CRx.
    15672          */
    15673         case VMX_EXIT_QUAL_CRX_ACCESS_READ:
    15674         {
    15675             uint8_t const iGReg  = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
    15676             uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
    15677 
    15678             /*
    15679              * MOV from CR3 only cause a VM-exit when one or more of the following are true:
    15680              *   - When nested paging isn't used.
    15681              *   - If the guest doesn't have paging enabled (pass guest's CR3 rather than our identity mapped CR3).
    15682              *   - We are executing in the VM debug loop.
    15683              */
    15684             Assert(   iCrReg != 3
    15685                    || !pVM->hmr0.s.fNestedPaging
    15686                    || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
    15687                    || pVCpu->hmr0.s.fLeaveDone);
    15688 
    15689             /* MOV from CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
    15690             Assert(   iCrReg != 8
    15691                    || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
    15692 
    15693             rcStrict = hmR0VmxExitMovFromCrX(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
    15694             break;
    15695         }
    15696 
    15697         /*
    15698          * CLTS (Clear Task-Switch Flag in CR0).
    15699          */
    15700         case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
    15701         {
    15702             rcStrict = hmR0VmxExitClts(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr);
    15703             break;
    15704         }
    15705 
    15706         /*
    15707          * LMSW (Load Machine-Status Word into CR0).
    15708          * LMSW cannot clear CR0.PE, so no fRealOnV86Active kludge needed here.
    15709          */
    15710         case VMX_EXIT_QUAL_CRX_ACCESS_LMSW:
    15711         {
    15712             RTGCPTR        GCPtrEffDst;
    15713             uint8_t const  cbInstr     = pVmxTransient->cbExitInstr;
    15714             uint16_t const uMsw        = VMX_EXIT_QUAL_CRX_LMSW_DATA(uExitQual);
    15715             bool const     fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(uExitQual);
    15716             if (fMemOperand)
    15717             {
    15718                 hmR0VmxReadGuestLinearAddrVmcs(pVmxTransient);
    15719                 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
    15720             }
    15721             else
    15722                 GCPtrEffDst = NIL_RTGCPTR;
    15723             rcStrict = hmR0VmxExitLmsw(pVCpu, pVmcsInfo, cbInstr, uMsw, GCPtrEffDst);
    15724             break;
    15725         }
    15726 
    15727         default:
    15728         {
    15729             AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
    15730             HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
    15731         }
    15732     }
    15733 
    15734     Assert((pVCpu->hm.s.fCtxChanged & (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS))
    15735                                    == (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS));
    15736     Assert(rcStrict != VINF_IEM_RAISED_XCPT);
    15737 
    15738     STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitMovCRx, y2);
    15739     NOREF(pVM);
    15740     return rcStrict;
    15741 }
    15742 
    15743 
    15744 /**
    15745  * VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR). Conditional
    15746  * VM-exit.
    15747  */
    15748 HMVMX_EXIT_DECL hmR0VmxExitIoInstr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    15749 {
    15750     HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    15751     STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitIO, y1);
    15752 
    15753     PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    15754     PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    15755     hmR0VmxReadExitQualVmcs(pVmxTransient);
    15756     hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    15757     int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_SREG_MASK
    15758                                                      | CPUMCTX_EXTRN_EFER);
    15759     /* EFER MSR also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
    15760     AssertRCReturn(rc, rc);
    15761 
    15762     /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
    15763     uint32_t const uIOPort      = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
    15764     uint8_t  const uIOSize      = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
    15765     bool     const fIOWrite     = (VMX_EXIT_QUAL_IO_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_IO_DIRECTION_OUT);
    15766     bool     const fIOString    = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
    15767     bool     const fGstStepping = RT_BOOL(pCtx->eflags.Bits.u1TF);
    15768     bool     const fDbgStepping = pVCpu->hm.s.fSingleInstruction;
    15769     AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
    15770 
    15771     /*
    15772      * Update exit history to see if this exit can be optimized.
    15773      */
    15774     VBOXSTRICTRC rcStrict;
    15775     PCEMEXITREC  pExitRec = NULL;
    15776     if (   !fGstStepping
    15777         && !fDbgStepping)
    15778         pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
    15779                                                     !fIOString
    15780                                                     ? !fIOWrite
    15781                                                     ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_READ)
    15782                                                     : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_WRITE)
    15783                                                     : !fIOWrite
    15784                                                     ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_READ)
    15785                                                     : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_WRITE),
    15786                                                     pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
    15787     if (!pExitRec)
    15788     {
    15789         static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 };                    /* Size of the I/O accesses in bytes. */
    15790         static uint32_t const s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff };   /* AND masks for saving result in AL/AX/EAX. */
    15791 
    15792         uint32_t const cbValue  = s_aIOSizes[uIOSize];
    15793         uint32_t const cbInstr  = pVmxTransient->cbExitInstr;
    15794         bool  fUpdateRipAlready = false; /* ugly hack, should be temporary. */
    15795         PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    15796         if (fIOString)
    15797         {
    15798             /*
    15799              * INS/OUTS - I/O String instruction.
    15800              *
    15801              * Use instruction-information if available, otherwise fall back on
    15802              * interpreting the instruction.
    15803              */
    15804             Log4Func(("cs:rip=%#04x:%#RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
    15805             AssertReturn(pCtx->dx == uIOPort, VERR_VMX_IPE_2);
    15806             bool const fInsOutsInfo = RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS);
    15807             if (fInsOutsInfo)
    15808             {
    15809                 hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
    15810                 AssertReturn(pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize <= 2, VERR_VMX_IPE_3);
    15811                 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
    15812                 IEMMODE const enmAddrMode = (IEMMODE)pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize;
    15813                 bool const fRep           = VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual);
    15814                 if (fIOWrite)
    15815                     rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr,
    15816                                                     pVmxTransient->ExitInstrInfo.StrIo.iSegReg, true /*fIoChecked*/);
    15817                 else
    15818                 {
    15819                     /*
    15820                      * The segment prefix for INS cannot be overridden and is always ES. We can safely assume X86_SREG_ES.
    15821                      * Hence "iSegReg" field is undefined in the instruction-information field in VT-x for INS.
    15822                      * See Intel Instruction spec. for "INS".
    15823                      * See Intel spec. Table 27-8 "Format of the VM-Exit Instruction-Information Field as Used for INS and OUTS".
    15824                      */
    15825                     rcStrict = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, fRep, cbInstr, true /*fIoChecked*/);
    15826                 }
    15827             }
    15828             else
    15829                 rcStrict = IEMExecOne(pVCpu);
    15830 
    15831             ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP);
    15832             fUpdateRipAlready = true;
    15833         }
    15834         else
    15835         {
    15836             /*
    15837              * IN/OUT - I/O instruction.
    15838              */
    15839             Log4Func(("cs:rip=%04x:%08RX64 %#06x/%u %c\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
    15840             uint32_t const uAndVal = s_aIOOpAnd[uIOSize];
    15841             Assert(!VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual));
    15842             if (fIOWrite)
    15843             {
    15844                 rcStrict = IOMIOPortWrite(pVM, pVCpu, uIOPort, pCtx->eax & uAndVal, cbValue);
    15845                 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOWrite);
    15846                 if (    rcStrict == VINF_IOM_R3_IOPORT_WRITE
    15847                     && !pCtx->eflags.Bits.u1TF)
    15848                     rcStrict = EMRZSetPendingIoPortWrite(pVCpu, uIOPort, cbInstr, cbValue, pCtx->eax & uAndVal);
    15849             }
    15850             else
    15851             {
    15852                 uint32_t u32Result = 0;
    15853                 rcStrict = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbValue);
    15854                 if (IOM_SUCCESS(rcStrict))
    15855                 {
    15856                     /* Save result of I/O IN instr. in AL/AX/EAX. */
    15857                     pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Result & uAndVal);
    15858                 }
    15859                 if (    rcStrict == VINF_IOM_R3_IOPORT_READ
    15860                     && !pCtx->eflags.Bits.u1TF)
    15861                     rcStrict = EMRZSetPendingIoPortRead(pVCpu, uIOPort, cbInstr, cbValue);
    15862                 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIORead);
    15863             }
    15864         }
    15865 
    15866         if (IOM_SUCCESS(rcStrict))
    15867         {
    15868             if (!fUpdateRipAlready)
    15869             {
    15870                 hmR0VmxAdvanceGuestRipBy(pVCpu, cbInstr);
    15871                 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP);
    15872             }
    15873 
    15874             /*
    15875              * INS/OUTS with REP prefix updates RFLAGS, can be observed with triple-fault guru
    15876              * while booting Fedora 17 64-bit guest.
    15877              *
    15878              * See Intel Instruction reference for REP/REPE/REPZ/REPNE/REPNZ.
    15879              */
    15880             if (fIOString)
    15881                 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RFLAGS);
    15882 
    15883             /*
    15884              * If any I/O breakpoints are armed, we need to check if one triggered
    15885              * and take appropriate action.
    15886              * Note that the I/O breakpoint type is undefined if CR4.DE is 0.
    15887              */
    15888             rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_DR7);
    15889             AssertRCReturn(rc, rc);
    15890 
    15891             /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the
    15892              *  execution engines about whether hyper BPs and such are pending. */
    15893             uint32_t const uDr7 = pCtx->dr[7];
    15894             if (RT_UNLIKELY(   (   (uDr7 & X86_DR7_ENABLED_MASK)
    15895                                 && X86_DR7_ANY_RW_IO(uDr7)
    15896                                 && (pCtx->cr4 & X86_CR4_DE))
    15897                             || DBGFBpIsHwIoArmed(pVM)))
    15898             {
    15899                 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxIoCheck);
    15900 
    15901                 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
    15902                 VMMRZCallRing3Disable(pVCpu);
    15903                 HM_DISABLE_PREEMPT(pVCpu);
    15904 
    15905                 bool fIsGuestDbgActive = CPUMR0DebugStateMaybeSaveGuest(pVCpu, true /* fDr6 */);
    15906 
    15907                 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pCtx, uIOPort, cbValue);
    15908                 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
    15909                 {
    15910                     /* Raise #DB. */
    15911                     if (fIsGuestDbgActive)
    15912                         ASMSetDR6(pCtx->dr[6]);
    15913                     if (pCtx->dr[7] != uDr7)
    15914                         pVCpu->hm.s.fCtxChanged |= HM_CHANGED_GUEST_DR7;
    15915 
    15916                     hmR0VmxSetPendingXcptDB(pVCpu);
    15917                 }
    15918                 /* rcStrict is VINF_SUCCESS, VINF_IOM_R3_IOPORT_COMMIT_WRITE, or in [VINF_EM_FIRST..VINF_EM_LAST],
    15919                    however we can ditch VINF_IOM_R3_IOPORT_COMMIT_WRITE as it has VMCPU_FF_IOM as backup. */
    15920                 else if (   rcStrict2 != VINF_SUCCESS
    15921                          && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
    15922                     rcStrict = rcStrict2;
    15923                 AssertCompile(VINF_EM_LAST < VINF_IOM_R3_IOPORT_COMMIT_WRITE);
    15924 
    15925                 HM_RESTORE_PREEMPT();
    15926                 VMMRZCallRing3Enable(pVCpu);
    15927             }
    15928         }
    15929 
    15930 #ifdef VBOX_STRICT
    15931         if (   rcStrict == VINF_IOM_R3_IOPORT_READ
    15932             || rcStrict == VINF_EM_PENDING_R3_IOPORT_READ)
    15933             Assert(!fIOWrite);
    15934         else if (   rcStrict == VINF_IOM_R3_IOPORT_WRITE
    15935                  || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
    15936                  || rcStrict == VINF_EM_PENDING_R3_IOPORT_WRITE)
    15937             Assert(fIOWrite);
    15938         else
    15939         {
    15940 # if 0 /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST
    15941            *        statuses, that the VMM device and some others may return. See
    15942            *        IOM_SUCCESS() for guidance. */
    15943             AssertMsg(   RT_FAILURE(rcStrict)
    15944                       || rcStrict == VINF_SUCCESS
    15945                       || rcStrict == VINF_EM_RAW_EMULATE_INSTR
    15946                       || rcStrict == VINF_EM_DBG_BREAKPOINT
    15947                       || rcStrict == VINF_EM_RAW_GUEST_TRAP
    15948                       || rcStrict == VINF_EM_RAW_TO_R3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
    15949 # endif
    15950         }
    15951 #endif
    15952         STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitIO, y1);
    15953     }
    15954     else
    15955     {
    15956         /*
    15957          * Frequent exit or something needing probing.  Get state and call EMHistoryExec.
    15958          */
    15959         int rc2 = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
    15960         AssertRCReturn(rc2, rc2);
    15961         STAM_COUNTER_INC(!fIOString ? fIOWrite ? &pVCpu->hm.s.StatExitIOWrite : &pVCpu->hm.s.StatExitIORead
    15962                          : fIOWrite ? &pVCpu->hm.s.StatExitIOStringWrite : &pVCpu->hm.s.StatExitIOStringRead);
    15963         Log4(("IOExit/%u: %04x:%08RX64: %s%s%s %#x LB %u -> EMHistoryExec\n",
    15964               pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
    15965               VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual) ? "REP " : "",
    15966               fIOWrite ? "OUT" : "IN", fIOString ? "S" : "", uIOPort, uIOSize));
    15967 
    15968         rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
    15969         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
    15970 
    15971         Log4(("IOExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
    15972               pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
    15973               VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
    15974     }
    15975     return rcStrict;
    15976 }
    15977 
    15978 
    15979 /**
    15980  * VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH). Unconditional
    15981  * VM-exit.
    15982  */
    15983 HMVMX_EXIT_DECL hmR0VmxExitTaskSwitch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    15984 {
    15985     HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    15986 
    15987     /* Check if this task-switch occurred while delivery an event through the guest IDT. */
    15988     hmR0VmxReadExitQualVmcs(pVmxTransient);
    15989     if (VMX_EXIT_QUAL_TASK_SWITCH_TYPE(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IDT)
    15990     {
    15991         hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
    15992         if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
    15993         {
    15994             uint32_t uErrCode;
    15995             if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uIdtVectoringInfo))
    15996             {
    15997                 hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);
    15998                 uErrCode = pVmxTransient->uIdtVectoringErrorCode;
    15999             }
    16000             else
    16001                 uErrCode = 0;
    16002 
    16003             RTGCUINTPTR GCPtrFaultAddress;
    16004             if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(pVmxTransient->uIdtVectoringInfo))
    16005                 GCPtrFaultAddress = pVCpu->cpum.GstCtx.cr2;
    16006             else
    16007                 GCPtrFaultAddress = 0;
    16008 
    16009             hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    16010 
    16011             hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
    16012                                    pVmxTransient->cbExitInstr, uErrCode, GCPtrFaultAddress);
    16013 
    16014             Log4Func(("Pending event. uIntType=%#x uVector=%#x\n", VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo),
    16015                       VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo)));
    16016             STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch);
    16017             return VINF_EM_RAW_INJECT_TRPM_EVENT;
    16018         }
    16019     }
    16020 
    16021     /* Fall back to the interpreter to emulate the task-switch. */
    16022     STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch);
    16023     return VERR_EM_INTERPRETER;
    16024 }
    16025 
    16026 
    16027 /**
    16028  * VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional VM-exit.
    16029  */
    16030 HMVMX_EXIT_DECL hmR0VmxExitMtf(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    16031 {
    16032     HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    16033 
    16034     PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    16035     pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MONITOR_TRAP_FLAG;
    16036     int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
    16037     AssertRC(rc);
    16038     return VINF_EM_DBG_STEPPED;
    16039 }
    16040 
    16041 
    16042 /**
    16043  * VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional VM-exit.
    16044  */
    16045 HMVMX_EXIT_DECL hmR0VmxExitApicAccess(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    16046 {
    16047     HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    16048     STAM_COUNTER_INC(&pVCpu->hm.s.StatExitApicAccess);
    16049 
    16050     hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
    16051     hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
    16052     hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    16053     hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
    16054     hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);
    16055 
    16056     /*
    16057      * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
    16058      */
    16059     VBOXSTRICTRC rcStrict = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
    16060     if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    16061     {
    16062         /* For some crazy guest, if an event delivery causes an APIC-access VM-exit, go to instruction emulation. */
    16063         if (RT_UNLIKELY(pVCpu->hm.s.Event.fPending))
    16064         {
    16065             STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectInterpret);
    16066             return VINF_EM_RAW_INJECT_TRPM_EVENT;
    16067         }
    16068     }
    16069     else
    16070     {
    16071         Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
    16072         return rcStrict;
    16073     }
    16074 
    16075     /* IOMMIOPhysHandler() below may call into IEM, save the necessary state. */
    16076     PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    16077     hmR0VmxReadExitQualVmcs(pVmxTransient);
    16078     int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
    16079     AssertRCReturn(rc, rc);
    16080 
    16081     /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */
    16082     uint32_t const uAccessType = VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual);
    16083     switch (uAccessType)
    16084     {
    16085         case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE:
    16086         case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
    16087         {
    16088             AssertMsg(   !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
    16089                       || VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual) != XAPIC_OFF_TPR,
    16090                       ("hmR0VmxExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
    16091 
    16092             RTGCPHYS GCPhys = pVCpu->hm.s.vmx.u64GstMsrApicBase;    /* Always up-to-date, as it is not part of the VMCS. */
    16093             GCPhys &= PAGE_BASE_GC_MASK;
    16094             GCPhys += VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual);
    16095             Log4Func(("Linear access uAccessType=%#x GCPhys=%#RGp Off=%#x\n", uAccessType, GCPhys,
    16096                  VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual)));
    16097 
    16098             rcStrict = IOMR0MmioPhysHandler(pVCpu->CTX_SUFF(pVM), pVCpu,
    16099                                             uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ ? 0 : X86_TRAP_PF_RW, GCPhys);
    16100             Log4Func(("IOMMMIOPhysHandler returned %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
    16101             if (   rcStrict == VINF_SUCCESS
    16102                 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
    16103                 || rcStrict == VERR_PAGE_NOT_PRESENT)
    16104             {
    16105                 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
    16106                                                          | HM_CHANGED_GUEST_APIC_TPR);
    16107                 rcStrict = VINF_SUCCESS;
    16108             }
    16109             break;
    16110         }
    16111 
    16112         default:
    16113         {
    16114             Log4Func(("uAccessType=%#x\n", uAccessType));
    16115             rcStrict = VINF_EM_RAW_EMULATE_INSTR;
    16116             break;
    16117         }
    16118     }
    16119 
    16120     if (rcStrict != VINF_SUCCESS)
    16121         STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchApicAccessToR3);
    16122     return rcStrict;
    16123 }
    16124 
    16125 
    16126 /**
    16127  * VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX). Conditional
    16128  * VM-exit.
    16129  */
    16130 HMVMX_EXIT_DECL hmR0VmxExitMovDRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    16131 {
    16132     HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    16133     PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    16134 
    16135     /*
    16136      * We might also get this VM-exit if the nested-guest isn't intercepting MOV DRx accesses.
    16137      * In such a case, rather than disabling MOV DRx intercepts and resuming execution, we
    16138      * must emulate the MOV DRx access.
    16139      */
    16140     if (!pVmxTransient->fIsNestedGuest)
    16141     {
    16142         /* We should -not- get this VM-exit if the guest's debug registers were active. */
    16143         if (pVmxTransient->fWasGuestDebugStateActive)
    16144         {
    16145             AssertMsgFailed(("Unexpected MOV DRx exit\n"));
    16146             HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
    16147         }
    16148 
    16149         if (   !pVCpu->hm.s.fSingleInstruction
    16150             && !pVmxTransient->fWasHyperDebugStateActive)
    16151         {
    16152             Assert(!DBGFIsStepping(pVCpu));
    16153             Assert(pVmcsInfo->u32XcptBitmap & RT_BIT(X86_XCPT_DB));
    16154 
    16155             /* Don't intercept MOV DRx any more. */
    16156             pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MOV_DR_EXIT;
    16157             int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
    16158             AssertRC(rc);
    16159 
    16160             /* We're playing with the host CPU state here, make sure we can't preempt or longjmp. */
    16161             VMMRZCallRing3Disable(pVCpu);
    16162             HM_DISABLE_PREEMPT(pVCpu);
    16163 
    16164             /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
    16165             CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
    16166             Assert(CPUMIsGuestDebugStateActive(pVCpu));
    16167 
    16168             HM_RESTORE_PREEMPT();
    16169             VMMRZCallRing3Enable(pVCpu);
    16170 
    16171 #ifdef VBOX_WITH_STATISTICS
    16172             hmR0VmxReadExitQualVmcs(pVmxTransient);
    16173             if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
    16174                 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
    16175             else
    16176                 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
    16177 #endif
    16178             STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxContextSwitch);
    16179             return VINF_SUCCESS;
    16180         }
    16181     }
    16182 
    16183     /*
    16184      * EMInterpretDRx[Write|Read]() calls CPUMIsGuestIn64BitCode() which requires EFER MSR, CS.
    16185      * The EFER MSR is always up-to-date.
    16186      * Update the segment registers and DR7 from the CPU.
    16187      */
    16188     PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    16189     hmR0VmxReadExitQualVmcs(pVmxTransient);
    16190     int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_DR7);
    16191     AssertRCReturn(rc, rc);
    16192     Log4Func(("cs:rip=%#04x:%#RX64\n", pCtx->cs.Sel, pCtx->rip));
    16193 
    16194     PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    16195     if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
    16196     {
    16197         rc = EMInterpretDRxWrite(pVM, pVCpu, CPUMCTX2CORE(pCtx),
    16198                                  VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual),
    16199                                  VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual));
    16200         if (RT_SUCCESS(rc))
    16201             ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_DR7);
    16202         STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
    16203     }
    16204     else
    16205     {
    16206         rc = EMInterpretDRxRead(pVM, pVCpu, CPUMCTX2CORE(pCtx),
    16207                                 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual),
    16208                                 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual));
    16209         STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
    16210     }
    16211 
    16212     Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER);
    16213     if (RT_SUCCESS(rc))
    16214     {
    16215         int rc2 = hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient);
    16216         AssertRCReturn(rc2, rc2);
    16217         return VINF_SUCCESS;
    16218     }
    16219     return rc;
    16220 }
    16221 
    16222 
    16223 /**
    16224  * VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
    16225  * Conditional VM-exit.
    16226  */
    16227 HMVMX_EXIT_DECL hmR0VmxExitEptMisconfig(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    16228 {
    16229     HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    16230     Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
    16231 
    16232     hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
    16233     hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
    16234     hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    16235     hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
    16236     hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);
    16237 
    16238     /*
    16239      * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
    16240      */
    16241     VBOXSTRICTRC rcStrict = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
    16242     if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    16243     {
    16244         /*
    16245          * In the unlikely case where delivering an event causes an EPT misconfig (MMIO), go back to
    16246          * instruction emulation to inject the original event. Otherwise, injecting the original event
    16247          * using hardware-assisted VMX would trigger the same EPT misconfig VM-exit again.
    16248          */
    16249         if (!pVCpu->hm.s.Event.fPending)
    16250         { /* likely */ }
    16251         else
    16252         {
    16253             STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectInterpret);
    16254 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    16255             /** @todo NSTVMX: Think about how this should be handled. */
    16256             if (pVmxTransient->fIsNestedGuest)
    16257                 return VERR_VMX_IPE_3;
    16258 #endif
    16259             return VINF_EM_RAW_INJECT_TRPM_EVENT;
    16260         }
    16261     }
    16262     else
    16263     {
    16264         Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
    16265         return rcStrict;
    16266     }
    16267 
    16268     /*
    16269      * Get sufficient state and update the exit history entry.
    16270      */
    16271     PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    16272     hmR0VmxReadGuestPhysicalAddrVmcs(pVmxTransient);
    16273     int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
    16274     AssertRCReturn(rc, rc);
    16275 
    16276     RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
    16277     PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
    16278                                                             EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_MMIO),
    16279                                                             pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
    16280     if (!pExitRec)
    16281     {
    16282         /*
    16283          * If we succeed, resume guest execution.
    16284          * If we fail in interpreting the instruction because we couldn't get the guest physical address
    16285          * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
    16286          * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
    16287          * weird case. See @bugref{6043}.
    16288          */
    16289         PVMCC    pVM  = pVCpu->CTX_SUFF(pVM);
    16290         PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    16291 /** @todo bird: We can probably just go straight to IOM here and assume that
    16292  *        it's MMIO, then fall back on PGM if that hunch didn't work out so
    16293  *        well.  However, we need to address that aliasing workarounds that
    16294  *        PGMR0Trap0eHandlerNPMisconfig implements.  So, some care is needed.
    16295  *
    16296  *        Might also be interesting to see if we can get this done more or
    16297  *        less locklessly inside IOM.  Need to consider the lookup table
    16298  *        updating and use a bit more carefully first (or do all updates via
    16299  *        rendezvous) */
    16300         rcStrict = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, CPUMCTX2CORE(pCtx), GCPhys, UINT32_MAX);
    16301         Log4Func(("At %#RGp RIP=%#RX64 rc=%Rrc\n", GCPhys, pCtx->rip, VBOXSTRICTRC_VAL(rcStrict)));
    16302         if (   rcStrict == VINF_SUCCESS
    16303             || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
    16304             || rcStrict == VERR_PAGE_NOT_PRESENT)
    16305         {
    16306             /* Successfully handled MMIO operation. */
    16307             ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
    16308                                                      | HM_CHANGED_GUEST_APIC_TPR);
    16309             rcStrict = VINF_SUCCESS;
    16310         }
    16311     }
    16312     else
    16313     {
    16314         /*
    16315          * Frequent exit or something needing probing. Call EMHistoryExec.
    16316          */
    16317         Log4(("EptMisscfgExit/%u: %04x:%08RX64: %RGp -> EMHistoryExec\n",
    16318               pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, GCPhys));
    16319 
    16320         rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
    16321         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
    16322 
    16323         Log4(("EptMisscfgExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
    16324               pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
    16325               VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
    16326     }
    16327     return rcStrict;
    16328 }
    16329 
    16330 
    16331 /**
    16332  * VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION). Conditional
    16333  * VM-exit.
    16334  */
    16335 HMVMX_EXIT_DECL hmR0VmxExitEptViolation(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    16336 {
    16337     HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    16338     Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
    16339 
    16340     hmR0VmxReadExitQualVmcs(pVmxTransient);
    16341     hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
    16342     hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
    16343     hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    16344     hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
    16345     hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);
    16346 
    16347     /*
    16348      * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
    16349      */
    16350     VBOXSTRICTRC rcStrict = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
    16351     if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    16352     {
    16353         /*
    16354          * If delivery of an event causes an EPT violation (true nested #PF and not MMIO),
    16355          * we shall resolve the nested #PF and re-inject the original event.
    16356          */
    16357         if (pVCpu->hm.s.Event.fPending)
    16358             STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectReflectNPF);
    16359     }
    16360     else
    16361     {
    16362         Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
    16363         return rcStrict;
    16364     }
    16365 
    16366     PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    16367     hmR0VmxReadGuestPhysicalAddrVmcs(pVmxTransient);
    16368     int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
    16369     AssertRCReturn(rc, rc);
    16370 
    16371     RTGCPHYS const GCPhys    = pVmxTransient->uGuestPhysicalAddr;
    16372     uint64_t const uExitQual = pVmxTransient->uExitQual;
    16373     AssertMsg(((pVmxTransient->uExitQual >> 7) & 3) != 2, ("%#RX64", uExitQual));
    16374 
    16375     RTGCUINT uErrorCode = 0;
    16376     if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH)
    16377         uErrorCode |= X86_TRAP_PF_ID;
    16378     if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
    16379         uErrorCode |= X86_TRAP_PF_RW;
    16380     if (uExitQual & (VMX_EXIT_QUAL_EPT_ENTRY_READ | VMX_EXIT_QUAL_EPT_ENTRY_WRITE | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE))
    16381         uErrorCode |= X86_TRAP_PF_P;
    16382 
    16383     PVMCC    pVM  = pVCpu->CTX_SUFF(pVM);
    16384     PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    16385     Log4Func(("at %#RX64 (%#RX64 errcode=%#x) cs:rip=%#04x:%#RX64\n", GCPhys, uExitQual, uErrorCode, pCtx->cs.Sel, pCtx->rip));
    16386 
    16387     /*
    16388      * Handle the pagefault trap for the nested shadow table.
    16389      */
    16390     TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode);
    16391     rcStrict = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, CPUMCTX2CORE(pCtx), GCPhys);
    16392     TRPMResetTrap(pVCpu);
    16393 
    16394     /* Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}. */
    16395     if (   rcStrict == VINF_SUCCESS
    16396         || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
    16397         || rcStrict == VERR_PAGE_NOT_PRESENT)
    16398     {
    16399         /* Successfully synced our nested page tables. */
    16400         STAM_COUNTER_INC(&pVCpu->hm.s.StatExitReasonNpf);
    16401         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS);
    16402         return VINF_SUCCESS;
    16403     }
    16404 
    16405     Log4Func(("EPT return to ring-3 rcStrict2=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
    16406     return rcStrict;
    16407 }
    16408 
    16409 
    16410 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    16411 /**
    16412  * VM-exit handler for VMCLEAR (VMX_EXIT_VMCLEAR). Unconditional VM-exit.
    16413  */
    16414 HMVMX_EXIT_DECL hmR0VmxExitVmclear(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    16415 {
    16416     HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    16417 
    16418     hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    16419     hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
    16420     hmR0VmxReadExitQualVmcs(pVmxTransient);
    16421     int rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
    16422                                                                     | CPUMCTX_EXTRN_HWVIRT
    16423                                                                     | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
    16424     AssertRCReturn(rc, rc);
    16425 
    16426     HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
    16427 
    16428     VMXVEXITINFO ExitInfo;
    16429     RT_ZERO(ExitInfo);
    16430     ExitInfo.uReason     = pVmxTransient->uExitReason;
    16431     ExitInfo.u64Qual     = pVmxTransient->uExitQual;
    16432     ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
    16433     ExitInfo.cbInstr     = pVmxTransient->cbExitInstr;
    16434     HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
    16435 
    16436     VBOXSTRICTRC rcStrict = IEMExecDecodedVmclear(pVCpu, &ExitInfo);
    16437     if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    16438         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
    16439     else if (rcStrict == VINF_IEM_RAISED_XCPT)
    16440     {
    16441         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
    16442         rcStrict = VINF_SUCCESS;
    16443     }
    16444     return rcStrict;
    16445 }
    16446 
    16447 
    16448 /**
    16449  * VM-exit handler for VMLAUNCH (VMX_EXIT_VMLAUNCH). Unconditional VM-exit.
    16450  */
    16451 HMVMX_EXIT_DECL hmR0VmxExitVmlaunch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    16452 {
    16453     HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    16454 
    16455     /* Import the entire VMCS state for now as we would be switching VMCS on successful VMLAUNCH,
    16456        otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
    16457     hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    16458     int rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
    16459     AssertRCReturn(rc, rc);
    16460 
    16461     HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
    16462 
    16463     STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitVmentry, z);
    16464     VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMLAUNCH);
    16465     STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitVmentry, z);
    16466     if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    16467     {
    16468         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
    16469         if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
    16470             rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
    16471     }
    16472     Assert(rcStrict != VINF_IEM_RAISED_XCPT);
    16473     return rcStrict;
    16474 }
    16475 
    16476 
    16477 /**
    16478  * VM-exit handler for VMPTRLD (VMX_EXIT_VMPTRLD). Unconditional VM-exit.
    16479  */
    16480 HMVMX_EXIT_DECL hmR0VmxExitVmptrld(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    16481 {
    16482     HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    16483 
    16484     hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    16485     hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
    16486     hmR0VmxReadExitQualVmcs(pVmxTransient);
    16487     int rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
    16488                                                                     | CPUMCTX_EXTRN_HWVIRT
    16489                                                                     | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
    16490     AssertRCReturn(rc, rc);
    16491 
    16492     HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
    16493 
    16494     VMXVEXITINFO ExitInfo;
    16495     RT_ZERO(ExitInfo);
    16496     ExitInfo.uReason     = pVmxTransient->uExitReason;
    16497     ExitInfo.u64Qual     = pVmxTransient->uExitQual;
    16498     ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
    16499     ExitInfo.cbInstr     = pVmxTransient->cbExitInstr;
    16500     HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
    16501 
    16502     VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrld(pVCpu, &ExitInfo);
    16503     if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    16504         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
    16505     else if (rcStrict == VINF_IEM_RAISED_XCPT)
    16506     {
    16507         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
    16508         rcStrict = VINF_SUCCESS;
    16509     }
    16510     return rcStrict;
    16511 }
    16512 
    16513 
    16514 /**
    16515  * VM-exit handler for VMPTRST (VMX_EXIT_VMPTRST). Unconditional VM-exit.
    16516  */
    16517 HMVMX_EXIT_DECL hmR0VmxExitVmptrst(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    16518 {
    16519     HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    16520 
    16521     hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    16522     hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
    16523     hmR0VmxReadExitQualVmcs(pVmxTransient);
    16524     int rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
    16525                                                                     | CPUMCTX_EXTRN_HWVIRT
    16526                                                                     | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
    16527     AssertRCReturn(rc, rc);
    16528 
    16529     HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
    16530 
    16531     VMXVEXITINFO ExitInfo;
    16532     RT_ZERO(ExitInfo);
    16533     ExitInfo.uReason     = pVmxTransient->uExitReason;
    16534     ExitInfo.u64Qual     = pVmxTransient->uExitQual;
    16535     ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
    16536     ExitInfo.cbInstr     = pVmxTransient->cbExitInstr;
    16537     HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
    16538 
    16539     VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrst(pVCpu, &ExitInfo);
    16540     if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    16541         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
    16542     else if (rcStrict == VINF_IEM_RAISED_XCPT)
    16543     {
    16544         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
    16545         rcStrict = VINF_SUCCESS;
    16546     }
    16547     return rcStrict;
    16548 }
    16549 
    16550 
    16551 /**
    16552  * VM-exit handler for VMREAD (VMX_EXIT_VMREAD). Conditional VM-exit.
    16553  */
    16554 HMVMX_EXIT_DECL hmR0VmxExitVmread(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    16555 {
    16556     HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    16557 
    16558     /*
    16559      * Strictly speaking we should not get VMREAD VM-exits for shadow VMCS fields and
    16560      * thus might not need to import the shadow VMCS state, it's safer just in case
    16561      * code elsewhere dares look at unsynced VMCS fields.
    16562      */
    16563     hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    16564     hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
    16565     hmR0VmxReadExitQualVmcs(pVmxTransient);
    16566     int rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
    16567                                                                     | CPUMCTX_EXTRN_HWVIRT
    16568                                                                     | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
    16569     AssertRCReturn(rc, rc);
    16570 
    16571     HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
    16572 
    16573     VMXVEXITINFO ExitInfo;
    16574     RT_ZERO(ExitInfo);
    16575     ExitInfo.uReason     = pVmxTransient->uExitReason;
    16576     ExitInfo.u64Qual     = pVmxTransient->uExitQual;
    16577     ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
    16578     ExitInfo.cbInstr     = pVmxTransient->cbExitInstr;
    16579     if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
    16580         HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
    16581 
    16582     VBOXSTRICTRC rcStrict = IEMExecDecodedVmread(pVCpu, &ExitInfo);
    16583     if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    16584         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
    16585     else if (rcStrict == VINF_IEM_RAISED_XCPT)
    16586     {
    16587         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
    16588         rcStrict = VINF_SUCCESS;
    16589     }
    16590     return rcStrict;
    16591 }
    16592 
    16593 
    16594 /**
    16595  * VM-exit handler for VMRESUME (VMX_EXIT_VMRESUME). Unconditional VM-exit.
    16596  */
    16597 HMVMX_EXIT_DECL hmR0VmxExitVmresume(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    16598 {
    16599     HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    16600 
    16601     /* Import the entire VMCS state for now as we would be switching VMCS on successful VMRESUME,
    16602        otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
    16603     hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    16604     int rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
    16605     AssertRCReturn(rc, rc);
    16606 
    16607     HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
    16608 
    16609     STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitVmentry, z);
    16610     VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMRESUME);
    16611     STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitVmentry, z);
    16612     if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    16613     {
    16614         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
    16615         if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
    16616             rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
    16617     }
    16618     Assert(rcStrict != VINF_IEM_RAISED_XCPT);
    16619     return rcStrict;
    16620 }
    16621 
    16622 
    16623 /**
    16624  * VM-exit handler for VMWRITE (VMX_EXIT_VMWRITE). Conditional VM-exit.
    16625  */
    16626 HMVMX_EXIT_DECL hmR0VmxExitVmwrite(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    16627 {
    16628     HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    16629 
    16630     /*
    16631      * Although we should not get VMWRITE VM-exits for shadow VMCS fields, since our HM hook
    16632      * gets invoked when IEM's VMWRITE instruction emulation modifies the current VMCS and it
    16633      * flags re-loading the entire shadow VMCS, we should save the entire shadow VMCS here.
    16634      */
    16635     hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    16636     hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
    16637     hmR0VmxReadExitQualVmcs(pVmxTransient);
    16638     int rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
    16639                                                                     | CPUMCTX_EXTRN_HWVIRT
    16640                                                                     | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
    16641     AssertRCReturn(rc, rc);
    16642 
    16643     HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
    16644 
    16645     VMXVEXITINFO ExitInfo;
    16646     RT_ZERO(ExitInfo);
    16647     ExitInfo.uReason     = pVmxTransient->uExitReason;
    16648     ExitInfo.u64Qual     = pVmxTransient->uExitQual;
    16649     ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
    16650     ExitInfo.cbInstr     = pVmxTransient->cbExitInstr;
    16651     if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
    16652         HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
    16653 
    16654     VBOXSTRICTRC rcStrict = IEMExecDecodedVmwrite(pVCpu, &ExitInfo);
    16655     if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    16656         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
    16657     else if (rcStrict == VINF_IEM_RAISED_XCPT)
    16658     {
    16659         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
    16660         rcStrict = VINF_SUCCESS;
    16661     }
    16662     return rcStrict;
    16663 }
    16664 
    16665 
    16666 /**
    16667  * VM-exit handler for VMXOFF (VMX_EXIT_VMXOFF). Unconditional VM-exit.
    16668  */
    16669 HMVMX_EXIT_DECL hmR0VmxExitVmxoff(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    16670 {
    16671     HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    16672 
    16673     hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    16674     int rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR4
    16675                                                                     | CPUMCTX_EXTRN_HWVIRT
    16676                                                                     | IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
    16677     AssertRCReturn(rc, rc);
    16678 
    16679     HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
    16680 
    16681     VBOXSTRICTRC rcStrict = IEMExecDecodedVmxoff(pVCpu, pVmxTransient->cbExitInstr);
    16682     if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    16683         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_HWVIRT);
    16684     else if (rcStrict == VINF_IEM_RAISED_XCPT)
    16685     {
    16686         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
    16687         rcStrict = VINF_SUCCESS;
    16688     }
    16689     return rcStrict;
    16690 }
    16691 
    16692 
    16693 /**
    16694  * VM-exit handler for VMXON (VMX_EXIT_VMXON). Unconditional VM-exit.
    16695  */
    16696 HMVMX_EXIT_DECL hmR0VmxExitVmxon(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    16697 {
    16698     HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    16699 
    16700     hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    16701     hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
    16702     hmR0VmxReadExitQualVmcs(pVmxTransient);
    16703     int rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
    16704                                                                     | CPUMCTX_EXTRN_HWVIRT
    16705                                                                     | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
    16706     AssertRCReturn(rc, rc);
    16707 
    16708     HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
    16709 
    16710     VMXVEXITINFO ExitInfo;
    16711     RT_ZERO(ExitInfo);
    16712     ExitInfo.uReason     = pVmxTransient->uExitReason;
    16713     ExitInfo.u64Qual     = pVmxTransient->uExitQual;
    16714     ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
    16715     ExitInfo.cbInstr     = pVmxTransient->cbExitInstr;
    16716     HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
    16717 
    16718     VBOXSTRICTRC rcStrict = IEMExecDecodedVmxon(pVCpu, &ExitInfo);
    16719     if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    16720         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
    16721     else if (rcStrict == VINF_IEM_RAISED_XCPT)
    16722     {
    16723         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
    16724         rcStrict = VINF_SUCCESS;
    16725     }
    16726     return rcStrict;
    16727 }
    16728 
    16729 
    16730 /**
    16731  * VM-exit handler for INVVPID (VMX_EXIT_INVVPID). Unconditional VM-exit.
    16732  */
    16733 HMVMX_EXIT_DECL hmR0VmxExitInvvpid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    16734 {
    16735     HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    16736 
    16737     hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    16738     hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
    16739     hmR0VmxReadExitQualVmcs(pVmxTransient);
    16740     int rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
    16741                                                                     | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
    16742     AssertRCReturn(rc, rc);
    16743 
    16744     HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
    16745 
    16746     VMXVEXITINFO ExitInfo;
    16747     RT_ZERO(ExitInfo);
    16748     ExitInfo.uReason     = pVmxTransient->uExitReason;
    16749     ExitInfo.u64Qual     = pVmxTransient->uExitQual;
    16750     ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
    16751     ExitInfo.cbInstr     = pVmxTransient->cbExitInstr;
    16752     HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
    16753 
    16754     VBOXSTRICTRC rcStrict = IEMExecDecodedInvvpid(pVCpu, &ExitInfo);
    16755     if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    16756         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
    16757     else if (rcStrict == VINF_IEM_RAISED_XCPT)
    16758     {
    16759         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
    16760         rcStrict = VINF_SUCCESS;
    16761     }
    16762     return rcStrict;
    16763 }
    16764 #endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
    16765 /** @} */
    16766 
    16767 
    16768 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    16769 /** @name Nested-guest VM-exit handlers.
    16770  * @{
    16771  */
    16772 /* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
    16773 /* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- Nested-guest VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
    16774 /* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
    16775 
    16776 /**
    16777  * Nested-guest VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI).
    16778  * Conditional VM-exit.
    16779  */
    16780 HMVMX_EXIT_DECL hmR0VmxExitXcptOrNmiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    16781 {
    16782     HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    16783 
    16784     hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
    16785 
    16786     uint64_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
    16787     uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
    16788     Assert(VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo));
    16789 
    16790     switch (uExitIntType)
    16791     {
    16792         /*
    16793          * Physical NMIs:
    16794          *     We shouldn't direct host physical NMIs to the nested-guest. Dispatch it to the host.
    16795          */
    16796         case VMX_EXIT_INT_INFO_TYPE_NMI:
    16797             return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
    16798 
    16799         /*
    16800          * Hardware exceptions,
    16801          * Software exceptions,
    16802          * Privileged software exceptions:
    16803          *     Figure out if the exception must be delivered to the guest or the nested-guest.
    16804          */
    16805         case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
    16806         case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
    16807         case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
    16808         {
    16809             hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
    16810             hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    16811             hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
    16812             hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);
    16813 
    16814             PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    16815             bool const fIntercept = CPUMIsGuestVmxXcptInterceptSet(pCtx, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo),
    16816                                                                    pVmxTransient->uExitIntErrorCode);
    16817             if (fIntercept)
    16818             {
    16819                 /* Exit qualification is required for debug and page-fault exceptions. */
    16820                 hmR0VmxReadExitQualVmcs(pVmxTransient);
    16821 
    16822                 /*
    16823                  * For VM-exits due to software exceptions (those generated by INT3 or INTO) and privileged
    16824                  * software exceptions (those generated by INT1/ICEBP) we need to supply the VM-exit instruction
    16825                  * length. However, if delivery of a software interrupt, software exception or privileged
    16826                  * software exception causes a VM-exit, that too provides the VM-exit instruction length.
    16827                  */
    16828                 VMXVEXITINFO ExitInfo;
    16829                 RT_ZERO(ExitInfo);
    16830                 ExitInfo.uReason = pVmxTransient->uExitReason;
    16831                 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
    16832                 ExitInfo.u64Qual = pVmxTransient->uExitQual;
    16833 
    16834                 VMXVEXITEVENTINFO ExitEventInfo;
    16835                 RT_ZERO(ExitEventInfo);
    16836                 ExitEventInfo.uExitIntInfo         = pVmxTransient->uExitIntInfo;
    16837                 ExitEventInfo.uExitIntErrCode      = pVmxTransient->uExitIntErrorCode;
    16838                 ExitEventInfo.uIdtVectoringInfo    = pVmxTransient->uIdtVectoringInfo;
    16839                 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
    16840 
    16841 #ifdef DEBUG_ramshankar
    16842                 hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
    16843                 Log4Func(("exit_int_info=%#RX32 err_code=%#RX32 exit_qual=%#RX64\n", pVmxTransient->uExitIntInfo,
    16844                           pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual));
    16845                 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
    16846                 {
    16847                     Log4Func(("idt_info=%#RX32 idt_errcode=%#RX32 cr2=%#RX64\n", pVmxTransient->uIdtVectoringInfo,
    16848                               pVmxTransient->uIdtVectoringErrorCode, pCtx->cr2));
    16849                 }
    16850 #endif
    16851                 return IEMExecVmxVmexitXcpt(pVCpu, &ExitInfo, &ExitEventInfo);
    16852             }
    16853 
    16854             /* Nested paging is currently a requirement, otherwise we would need to handle shadow #PFs in hmR0VmxExitXcptPF. */
    16855             Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
    16856             return hmR0VmxExitXcpt(pVCpu, pVmxTransient);
    16857         }
    16858 
    16859         /*
    16860          * Software interrupts:
    16861          *    VM-exits cannot be caused by software interrupts.
    16862          *
    16863          * External interrupts:
    16864          *    This should only happen when "acknowledge external interrupts on VM-exit"
    16865          *    control is set. However, we never set this when executing a guest or
    16866          *    nested-guest. For nested-guests it is emulated while injecting interrupts into
    16867          *    the guest.
    16868          */
    16869         case VMX_EXIT_INT_INFO_TYPE_SW_INT:
    16870         case VMX_EXIT_INT_INFO_TYPE_EXT_INT:
    16871         default:
    16872         {
    16873             pVCpu->hm.s.u32HMError = pVmxTransient->uExitIntInfo;
    16874             return VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
    16875         }
    16876     }
    16877 }
    16878 
    16879 
    16880 /**
    16881  * Nested-guest VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT).
    16882  * Unconditional VM-exit.
    16883  */
    16884 HMVMX_EXIT_DECL hmR0VmxExitTripleFaultNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    16885 {
    16886     HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    16887     return IEMExecVmxVmexitTripleFault(pVCpu);
    16888 }
    16889 
    16890 
    16891 /**
    16892  * Nested-guest VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
    16893  */
    16894 HMVMX_EXIT_NSRC_DECL hmR0VmxExitIntWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    16895 {
    16896     HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    16897 
    16898     if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT))
    16899         return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
    16900     return hmR0VmxExitIntWindow(pVCpu, pVmxTransient);
    16901 }
    16902 
    16903 
    16904 /**
    16905  * Nested-guest VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
    16906  */
    16907 HMVMX_EXIT_NSRC_DECL hmR0VmxExitNmiWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    16908 {
    16909     HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    16910 
    16911     if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT))
    16912         return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
    16913     return hmR0VmxExitIntWindow(pVCpu, pVmxTransient);
    16914 }
    16915 
    16916 
    16917 /**
    16918  * Nested-guest VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH).
    16919  * Unconditional VM-exit.
    16920  */
    16921 HMVMX_EXIT_DECL hmR0VmxExitTaskSwitchNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    16922 {
    16923     HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    16924 
    16925     hmR0VmxReadExitQualVmcs(pVmxTransient);
    16926     hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    16927     hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
    16928     hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);
    16929 
    16930     VMXVEXITINFO ExitInfo;
    16931     RT_ZERO(ExitInfo);
    16932     ExitInfo.uReason = pVmxTransient->uExitReason;
    16933     ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
    16934     ExitInfo.u64Qual = pVmxTransient->uExitQual;
    16935 
    16936     VMXVEXITEVENTINFO ExitEventInfo;
    16937     RT_ZERO(ExitEventInfo);
    16938     ExitEventInfo.uIdtVectoringInfo    = pVmxTransient->uIdtVectoringInfo;
    16939     ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
    16940     return IEMExecVmxVmexitTaskSwitch(pVCpu, &ExitInfo, &ExitEventInfo);
    16941 }
    16942 
    16943 
    16944 /**
    16945  * Nested-guest VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
    16946  */
    16947 HMVMX_EXIT_DECL hmR0VmxExitHltNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    16948 {
    16949     HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    16950 
    16951     if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_HLT_EXIT))
    16952     {
    16953         hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    16954         return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
    16955     }
    16956     return hmR0VmxExitHlt(pVCpu, pVmxTransient);
    16957 }
    16958 
    16959 
    16960 /**
    16961  * Nested-guest VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
    16962  */
    16963 HMVMX_EXIT_DECL hmR0VmxExitInvlpgNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    16964 {
    16965     HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    16966 
    16967     if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
    16968     {
    16969         hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    16970         hmR0VmxReadExitQualVmcs(pVmxTransient);
    16971 
    16972         VMXVEXITINFO ExitInfo;
    16973         RT_ZERO(ExitInfo);
    16974         ExitInfo.uReason = pVmxTransient->uExitReason;
    16975         ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
    16976         ExitInfo.u64Qual = pVmxTransient->uExitQual;
    16977         return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
    16978     }
    16979     return hmR0VmxExitInvlpg(pVCpu, pVmxTransient);
    16980 }
    16981 
    16982 
    16983 /**
    16984  * Nested-guest VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
    16985  */
    16986 HMVMX_EXIT_DECL hmR0VmxExitRdpmcNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    16987 {
    16988     HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    16989 
    16990     if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDPMC_EXIT))
    16991     {
    16992         hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    16993         return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
    16994     }
    16995     return hmR0VmxExitRdpmc(pVCpu, pVmxTransient);
    16996 }
    16997 
    16998 
    16999 /**
    17000  * Nested-guest VM-exit handler for VMREAD (VMX_EXIT_VMREAD) and VMWRITE
    17001  * (VMX_EXIT_VMWRITE). Conditional VM-exit.
    17002  */
    17003 HMVMX_EXIT_DECL hmR0VmxExitVmreadVmwriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    17004 {
    17005     HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    17006 
    17007     Assert(   pVmxTransient->uExitReason == VMX_EXIT_VMREAD
    17008            || pVmxTransient->uExitReason == VMX_EXIT_VMWRITE);
    17009 
    17010     hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
    17011 
    17012     uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
    17013     Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
    17014     uint64_t u64VmcsField = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
    17015 
    17016     HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_EFER);
    17017     if (!CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
    17018         u64VmcsField &= UINT64_C(0xffffffff);
    17019 
    17020     if (CPUMIsGuestVmxVmreadVmwriteInterceptSet(pVCpu, pVmxTransient->uExitReason, u64VmcsField))
    17021     {
    17022         hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    17023         hmR0VmxReadExitQualVmcs(pVmxTransient);
    17024 
    17025         VMXVEXITINFO ExitInfo;
    17026         RT_ZERO(ExitInfo);
    17027         ExitInfo.uReason   = pVmxTransient->uExitReason;
    17028         ExitInfo.cbInstr   = pVmxTransient->cbExitInstr;
    17029         ExitInfo.u64Qual   = pVmxTransient->uExitQual;
    17030         ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo;
    17031         return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
    17032     }
    17033 
    17034     if (pVmxTransient->uExitReason == VMX_EXIT_VMREAD)
    17035         return hmR0VmxExitVmread(pVCpu, pVmxTransient);
    17036     return hmR0VmxExitVmwrite(pVCpu, pVmxTransient);
    17037 }
    17038 
    17039 
    17040 /**
    17041  * Nested-guest VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
    17042  */
    17043 HMVMX_EXIT_DECL hmR0VmxExitRdtscNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    17044 {
    17045     HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    17046 
    17047     if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
    17048     {
    17049         hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    17050         return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
    17051     }
    17052 
    17053     return hmR0VmxExitRdtsc(pVCpu, pVmxTransient);
    17054 }
    17055 
    17056 
    17057 /**
    17058  * Nested-guest VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX).
    17059  * Conditional VM-exit.
    17060  */
    17061 HMVMX_EXIT_DECL hmR0VmxExitMovCRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    17062 {
    17063     HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    17064 
    17065     hmR0VmxReadExitQualVmcs(pVmxTransient);
    17066     hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    17067 
    17068     VBOXSTRICTRC rcStrict;
    17069     uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual);
    17070     switch (uAccessType)
    17071     {
    17072         case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
    17073         {
    17074             uint8_t const iCrReg   = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
    17075             uint8_t const iGReg    = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
    17076             Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
    17077             uint64_t const uNewCrX = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
    17078 
    17079             bool fIntercept;
    17080             switch (iCrReg)
    17081             {
    17082                 case 0:
    17083                 case 4:
    17084                     fIntercept = CPUMIsGuestVmxMovToCr0Cr4InterceptSet(&pVCpu->cpum.GstCtx, iCrReg, uNewCrX);
    17085                     break;
    17086 
    17087                 case 3:
    17088                     fIntercept = CPUMIsGuestVmxMovToCr3InterceptSet(pVCpu, uNewCrX);
    17089                     break;
    17090 
    17091                 case 8:
    17092                     fIntercept = CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_CR8_LOAD_EXIT);
    17093                     break;
    17094 
    17095                 default:
    17096                     fIntercept = false;
    17097                     break;
    17098             }
    17099             if (fIntercept)
    17100             {
    17101                 VMXVEXITINFO ExitInfo;
    17102                 RT_ZERO(ExitInfo);
    17103                 ExitInfo.uReason = pVmxTransient->uExitReason;
    17104                 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
    17105                 ExitInfo.u64Qual = pVmxTransient->uExitQual;
    17106                 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
    17107             }
    17108             else
    17109             {
    17110                 int const rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
    17111                 AssertRCReturn(rc, rc);
    17112                 rcStrict = hmR0VmxExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
    17113             }
    17114             break;
    17115         }
    17116 
    17117         case VMX_EXIT_QUAL_CRX_ACCESS_READ:
    17118         {
    17119             /*
    17120              * CR0/CR4 reads do not cause VM-exits, the read-shadow is used (subject to masking).
    17121              * CR2 reads do not cause a VM-exit.
    17122              * CR3 reads cause a VM-exit depending on the "CR3 store exiting" control.
    17123              * CR8 reads cause a VM-exit depending on the "CR8 store exiting" control.
    17124              */
    17125             uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
    17126             if (   iCrReg == 3
    17127                 || iCrReg == 8)
    17128             {
    17129                 static const uint32_t s_auCrXReadIntercepts[] = { 0, 0, 0, VMX_PROC_CTLS_CR3_STORE_EXIT, 0,
    17130                                                                   0, 0, 0, VMX_PROC_CTLS_CR8_STORE_EXIT };
    17131                 uint32_t const uIntercept = s_auCrXReadIntercepts[iCrReg];
    17132                 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, uIntercept))
    17133                 {
    17134                     VMXVEXITINFO ExitInfo;
    17135                     RT_ZERO(ExitInfo);
    17136                     ExitInfo.uReason = pVmxTransient->uExitReason;
    17137                     ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
    17138                     ExitInfo.u64Qual = pVmxTransient->uExitQual;
    17139                     rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
    17140                 }
    17141                 else
    17142                 {
    17143                     uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
    17144                     rcStrict = hmR0VmxExitMovFromCrX(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
    17145                 }
    17146             }
    17147             else
    17148             {
    17149                 AssertMsgFailed(("MOV from CR%d VM-exit must not happen\n", iCrReg));
    17150                 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, iCrReg);
    17151             }
    17152             break;
    17153         }
    17154 
    17155         case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
    17156         {
    17157             PCVMXVVMCS const pVmcsNstGst  = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
    17158             uint64_t const   uGstHostMask = pVmcsNstGst->u64Cr0Mask.u;
    17159             uint64_t const   uReadShadow  = pVmcsNstGst->u64Cr0ReadShadow.u;
    17160             if (   (uGstHostMask & X86_CR0_TS)
    17161                 && (uReadShadow  & X86_CR0_TS))
    17162             {
    17163                 VMXVEXITINFO ExitInfo;
    17164                 RT_ZERO(ExitInfo);
    17165                 ExitInfo.uReason = pVmxTransient->uExitReason;
    17166                 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
    17167                 ExitInfo.u64Qual = pVmxTransient->uExitQual;
    17168                 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
    17169             }
    17170             else
    17171                 rcStrict = hmR0VmxExitClts(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr);
    17172             break;
    17173         }
    17174 
    17175         case VMX_EXIT_QUAL_CRX_ACCESS_LMSW:        /* LMSW (Load Machine-Status Word into CR0) */
    17176         {
    17177             RTGCPTR        GCPtrEffDst;
    17178             uint16_t const uNewMsw     = VMX_EXIT_QUAL_CRX_LMSW_DATA(pVmxTransient->uExitQual);
    17179             bool const     fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(pVmxTransient->uExitQual);
    17180             if (fMemOperand)
    17181             {
    17182                 hmR0VmxReadGuestLinearAddrVmcs(pVmxTransient);
    17183                 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
    17184             }
    17185             else
    17186                 GCPtrEffDst = NIL_RTGCPTR;
    17187 
    17188             if (CPUMIsGuestVmxLmswInterceptSet(&pVCpu->cpum.GstCtx, uNewMsw))
    17189             {
    17190                 VMXVEXITINFO ExitInfo;
    17191                 RT_ZERO(ExitInfo);
    17192                 ExitInfo.uReason            = pVmxTransient->uExitReason;
    17193                 ExitInfo.cbInstr            = pVmxTransient->cbExitInstr;
    17194                 ExitInfo.u64GuestLinearAddr = GCPtrEffDst;
    17195                 ExitInfo.u64Qual            = pVmxTransient->uExitQual;
    17196                 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
    17197             }
    17198             else
    17199                 rcStrict = hmR0VmxExitLmsw(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, uNewMsw, GCPtrEffDst);
    17200             break;
    17201         }
    17202 
    17203         default:
    17204         {
    17205             AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
    17206             HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
    17207         }
    17208     }
    17209 
    17210     if (rcStrict == VINF_IEM_RAISED_XCPT)
    17211     {
    17212         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
    17213         rcStrict = VINF_SUCCESS;
    17214     }
    17215     return rcStrict;
    17216 }
    17217 
    17218 
    17219 /**
    17220  * Nested-guest VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX).
    17221  * Conditional VM-exit.
    17222  */
    17223 HMVMX_EXIT_DECL hmR0VmxExitMovDRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    17224 {
    17225     HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    17226 
    17227     if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MOV_DR_EXIT))
    17228     {
    17229         hmR0VmxReadExitQualVmcs(pVmxTransient);
    17230         hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    17231 
    17232         VMXVEXITINFO ExitInfo;
    17233         RT_ZERO(ExitInfo);
    17234         ExitInfo.uReason = pVmxTransient->uExitReason;
    17235         ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
    17236         ExitInfo.u64Qual = pVmxTransient->uExitQual;
    17237         return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
    17238     }
    17239     return hmR0VmxExitMovDRx(pVCpu, pVmxTransient);
    17240 }
    17241 
    17242 
    17243 /**
    17244  * Nested-guest VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR).
    17245  * Conditional VM-exit.
    17246  */
    17247 HMVMX_EXIT_DECL hmR0VmxExitIoInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    17248 {
    17249     HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    17250 
    17251     hmR0VmxReadExitQualVmcs(pVmxTransient);
    17252 
    17253     uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
    17254     uint8_t  const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
    17255     AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
    17256 
    17257     static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 };   /* Size of the I/O accesses in bytes. */
    17258     uint8_t const cbAccess = s_aIOSizes[uIOSize];
    17259     if (CPUMIsGuestVmxIoInterceptSet(pVCpu, uIOPort, cbAccess))
    17260     {
    17261         /*
    17262          * IN/OUT instruction:
    17263          *   - Provides VM-exit instruction length.
    17264          *
    17265          * INS/OUTS instruction:
    17266          *   - Provides VM-exit instruction length.
    17267          *   - Provides Guest-linear address.
    17268          *   - Optionally provides VM-exit instruction info (depends on CPU feature).
    17269          */
    17270         PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    17271         hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    17272 
    17273         /* Make sure we don't use stale/uninitialized VMX-transient info. below. */
    17274         pVmxTransient->ExitInstrInfo.u  = 0;
    17275         pVmxTransient->uGuestLinearAddr = 0;
    17276 
    17277         bool const fVmxInsOutsInfo = pVM->cpum.ro.GuestFeatures.fVmxInsOutInfo;
    17278         bool const fIOString       = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
    17279         if (fIOString)
    17280         {
    17281             hmR0VmxReadGuestLinearAddrVmcs(pVmxTransient);
    17282             if (fVmxInsOutsInfo)
    17283             {
    17284                 Assert(RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS)); /* Paranoia. */
    17285                 hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
    17286             }
    17287         }
    17288 
    17289         VMXVEXITINFO ExitInfo;
    17290         RT_ZERO(ExitInfo);
    17291         ExitInfo.uReason            = pVmxTransient->uExitReason;
    17292         ExitInfo.cbInstr            = pVmxTransient->cbExitInstr;
    17293         ExitInfo.u64Qual            = pVmxTransient->uExitQual;
    17294         ExitInfo.InstrInfo          = pVmxTransient->ExitInstrInfo;
    17295         ExitInfo.u64GuestLinearAddr = pVmxTransient->uGuestLinearAddr;
    17296         return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
    17297     }
    17298     return hmR0VmxExitIoInstr(pVCpu, pVmxTransient);
    17299 }
    17300 
    17301 
    17302 /**
    17303  * Nested-guest VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
    17304  */
    17305 HMVMX_EXIT_DECL hmR0VmxExitRdmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    17306 {
    17307     HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    17308 
    17309     uint32_t fMsrpm;
    17310     if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
    17311         fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
    17312     else
    17313         fMsrpm = VMXMSRPM_EXIT_RD;
    17314 
    17315     if (fMsrpm & VMXMSRPM_EXIT_RD)
    17316     {
    17317         hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    17318         return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
    17319     }
    17320     return hmR0VmxExitRdmsr(pVCpu, pVmxTransient);
    17321 }
    17322 
    17323 
    17324 /**
    17325  * Nested-guest VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
    17326  */
    17327 HMVMX_EXIT_DECL hmR0VmxExitWrmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    17328 {
    17329     HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    17330 
    17331     uint32_t fMsrpm;
    17332     if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
    17333         fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
    17334     else
    17335         fMsrpm = VMXMSRPM_EXIT_WR;
    17336 
    17337     if (fMsrpm & VMXMSRPM_EXIT_WR)
    17338     {
    17339         hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    17340         return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
    17341     }
    17342     return hmR0VmxExitWrmsr(pVCpu, pVmxTransient);
    17343 }
    17344 
    17345 
    17346 /**
    17347  * Nested-guest VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
    17348  */
    17349 HMVMX_EXIT_DECL hmR0VmxExitMwaitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    17350 {
    17351     HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    17352 
    17353     if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MWAIT_EXIT))
    17354     {
    17355         hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    17356         return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
    17357     }
    17358     return hmR0VmxExitMwait(pVCpu, pVmxTransient);
    17359 }
    17360 
    17361 
    17362 /**
    17363  * Nested-guest VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional
    17364  * VM-exit.
    17365  */
    17366 HMVMX_EXIT_DECL hmR0VmxExitMtfNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    17367 {
    17368     HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    17369 
    17370     /** @todo NSTVMX: Should consider debugging nested-guests using VM debugger. */
    17371     hmR0VmxReadGuestPendingDbgXctps(pVmxTransient);
    17372     VMXVEXITINFO ExitInfo;
    17373     RT_ZERO(ExitInfo);
    17374     ExitInfo.uReason                 = pVmxTransient->uExitReason;
    17375     ExitInfo.u64GuestPendingDbgXcpts = pVmxTransient->uGuestPendingDbgXcpts;
    17376     return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
    17377 }
    17378 
    17379 
    17380 /**
    17381  * Nested-guest VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
    17382  */
    17383 HMVMX_EXIT_DECL hmR0VmxExitMonitorNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    17384 {
    17385     HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    17386 
    17387     if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MONITOR_EXIT))
    17388     {
    17389         hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    17390         return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
    17391     }
    17392     return hmR0VmxExitMonitor(pVCpu, pVmxTransient);
    17393 }
    17394 
    17395 
    17396 /**
    17397  * Nested-guest VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
    17398  */
    17399 HMVMX_EXIT_DECL hmR0VmxExitPauseNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    17400 {
    17401     HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    17402 
    17403     /** @todo NSTVMX: Think about this more. Does the outer guest need to intercept
    17404      *        PAUSE when executing a nested-guest? If it does not, we would not need
    17405      *        to check for the intercepts here. Just call VM-exit... */
    17406 
    17407     /* The CPU would have already performed the necessary CPL checks for PAUSE-loop exiting. */
    17408     if (   CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_PAUSE_EXIT)
    17409         || CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_PAUSE_LOOP_EXIT))
    17410     {
    17411         hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    17412         return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
    17413     }
    17414     return hmR0VmxExitPause(pVCpu, pVmxTransient);
    17415 }
    17416 
    17417 
    17418 /**
    17419  * Nested-guest VM-exit handler for when the TPR value is lowered below the
    17420  * specified threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
    17421  */
    17422 HMVMX_EXIT_NSRC_DECL hmR0VmxExitTprBelowThresholdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    17423 {
    17424     HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    17425 
    17426     if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_TPR_SHADOW))
    17427     {
    17428         hmR0VmxReadGuestPendingDbgXctps(pVmxTransient);
    17429         VMXVEXITINFO ExitInfo;
    17430         RT_ZERO(ExitInfo);
    17431         ExitInfo.uReason                 = pVmxTransient->uExitReason;
    17432         ExitInfo.u64GuestPendingDbgXcpts = pVmxTransient->uGuestPendingDbgXcpts;
    17433         return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
    17434     }
    17435     return hmR0VmxExitTprBelowThreshold(pVCpu, pVmxTransient);
    17436 }
    17437 
    17438 
    17439 /**
    17440  * Nested-guest VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional
    17441  * VM-exit.
    17442  */
    17443 HMVMX_EXIT_DECL hmR0VmxExitApicAccessNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    17444 {
    17445     HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    17446 
    17447     hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    17448     hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
    17449     hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);
    17450     hmR0VmxReadExitQualVmcs(pVmxTransient);
    17451 
    17452     Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
    17453 
    17454     Log4Func(("at offset %#x type=%u\n", VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual),
    17455               VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual)));
    17456 
    17457     VMXVEXITINFO ExitInfo;
    17458     RT_ZERO(ExitInfo);
    17459     ExitInfo.uReason = pVmxTransient->uExitReason;
    17460     ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
    17461     ExitInfo.u64Qual = pVmxTransient->uExitQual;
    17462 
    17463     VMXVEXITEVENTINFO ExitEventInfo;
    17464     RT_ZERO(ExitEventInfo);
    17465     ExitEventInfo.uIdtVectoringInfo    = pVmxTransient->uIdtVectoringInfo;
    17466     ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
    17467     return IEMExecVmxVmexitApicAccess(pVCpu, &ExitInfo, &ExitEventInfo);
    17468 }
    17469 
    17470 
    17471 /**
    17472  * Nested-guest VM-exit handler for APIC write emulation (VMX_EXIT_APIC_WRITE).
    17473  * Conditional VM-exit.
    17474  */
    17475 HMVMX_EXIT_DECL hmR0VmxExitApicWriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    17476 {
    17477     HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    17478 
    17479     Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_APIC_REG_VIRT));
    17480     hmR0VmxReadExitQualVmcs(pVmxTransient);
    17481     return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
    17482 }
    17483 
    17484 
    17485 /**
    17486  * Nested-guest VM-exit handler for virtualized EOI (VMX_EXIT_VIRTUALIZED_EOI).
    17487  * Conditional VM-exit.
    17488  */
    17489 HMVMX_EXIT_DECL hmR0VmxExitVirtEoiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    17490 {
    17491     HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    17492 
    17493     Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_INT_DELIVERY));
    17494     hmR0VmxReadExitQualVmcs(pVmxTransient);
    17495     return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
    17496 }
    17497 
    17498 
    17499 /**
    17500  * Nested-guest VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
    17501  */
    17502 HMVMX_EXIT_DECL hmR0VmxExitRdtscpNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    17503 {
    17504     HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    17505 
    17506     if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
    17507     {
    17508         Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_RDTSCP));
    17509         hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    17510         return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
    17511     }
    17512     return hmR0VmxExitRdtscp(pVCpu, pVmxTransient);
    17513 }
    17514 
    17515 
    17516 /**
    17517  * Nested-guest VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
    17518  */
    17519 HMVMX_EXIT_NSRC_DECL hmR0VmxExitWbinvdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    17520 {
    17521     HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    17522 
    17523     if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_WBINVD_EXIT))
    17524     {
    17525         hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    17526         return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
    17527     }
    17528     return hmR0VmxExitWbinvd(pVCpu, pVmxTransient);
    17529 }
    17530 
    17531 
    17532 /**
    17533  * Nested-guest VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
    17534  */
    17535 HMVMX_EXIT_DECL hmR0VmxExitInvpcidNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    17536 {
    17537     HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    17538 
    17539     if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
    17540     {
    17541         Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_INVPCID));
    17542         hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    17543         hmR0VmxReadExitQualVmcs(pVmxTransient);
    17544         hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
    17545 
    17546         VMXVEXITINFO ExitInfo;
    17547         RT_ZERO(ExitInfo);
    17548         ExitInfo.uReason   = pVmxTransient->uExitReason;
    17549         ExitInfo.cbInstr   = pVmxTransient->cbExitInstr;
    17550         ExitInfo.u64Qual   = pVmxTransient->uExitQual;
    17551         ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo;
    17552         return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
    17553     }
    17554     return hmR0VmxExitInvpcid(pVCpu, pVmxTransient);
    17555 }
    17556 
    17557 
    17558 /**
    17559  * Nested-guest VM-exit handler for invalid-guest state
    17560  * (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error VM-exit.
    17561  */
    17562 HMVMX_EXIT_DECL hmR0VmxExitErrInvalidGuestStateNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    17563 {
    17564     HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    17565 
    17566     /*
    17567      * Currently this should never happen because we fully emulate VMLAUNCH/VMRESUME in IEM.
    17568      * So if it does happen, it indicates a bug possibly in the hardware-assisted VMX code.
    17569      * Handle it like it's in an invalid guest state of the outer guest.
    17570      *
    17571      * When the fast path is implemented, this should be changed to cause the corresponding
    17572      * nested-guest VM-exit.
    17573      */
    17574     return hmR0VmxExitErrInvalidGuestState(pVCpu, pVmxTransient);
    17575 }
    17576 
    17577 
    17578 /**
    17579  * Nested-guest VM-exit handler for instructions that cause VM-exits uncondtionally
    17580  * and only provide the instruction length.
    17581  *
    17582  * Unconditional VM-exit.
    17583  */
    17584 HMVMX_EXIT_DECL hmR0VmxExitInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    17585 {
    17586     HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    17587 
    17588 #ifdef VBOX_STRICT
    17589     PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    17590     switch (pVmxTransient->uExitReason)
    17591     {
    17592         case VMX_EXIT_ENCLS:
    17593             Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_ENCLS_EXIT));
    17594             break;
    17595 
    17596         case VMX_EXIT_VMFUNC:
    17597             Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_VMFUNC));
    17598             break;
    17599     }
    17600 #endif
    17601 
    17602     hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    17603     return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
    17604 }
    17605 
    17606 
    17607 /**
    17608  * Nested-guest VM-exit handler for instructions that provide instruction length as
    17609  * well as more information.
    17610  *
    17611  * Unconditional VM-exit.
    17612  */
    17613 HMVMX_EXIT_DECL hmR0VmxExitInstrWithInfoNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    17614 {
    17615     HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    17616 
    17617 #ifdef VBOX_STRICT
    17618     PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    17619     switch (pVmxTransient->uExitReason)
    17620     {
    17621         case VMX_EXIT_GDTR_IDTR_ACCESS:
    17622         case VMX_EXIT_LDTR_TR_ACCESS:
    17623             Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_DESC_TABLE_EXIT));
    17624             break;
    17625 
    17626         case VMX_EXIT_RDRAND:
    17627             Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDRAND_EXIT));
    17628             break;
    17629 
    17630         case VMX_EXIT_RDSEED:
    17631             Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDSEED_EXIT));
    17632             break;
    17633 
    17634         case VMX_EXIT_XSAVES:
    17635         case VMX_EXIT_XRSTORS:
    17636             /** @todo NSTVMX: Verify XSS-bitmap. */
    17637             Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_XSAVES_XRSTORS));
    17638             break;
    17639 
    17640         case VMX_EXIT_UMWAIT:
    17641         case VMX_EXIT_TPAUSE:
    17642             Assert(CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_RDTSC_EXIT));
    17643             Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_USER_WAIT_PAUSE));
    17644             break;
    17645 
    17646         case VMX_EXIT_LOADIWKEY:
    17647             Assert(CPUMIsGuestVmxProcCtls3Set(pCtx, VMX_PROC_CTLS3_LOADIWKEY_EXIT));
    17648             break;
    17649     }
    17650 #endif
    17651 
    17652     hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    17653     hmR0VmxReadExitQualVmcs(pVmxTransient);
    17654     hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
    17655 
    17656     VMXVEXITINFO ExitInfo;
    17657     RT_ZERO(ExitInfo);
    17658     ExitInfo.uReason   = pVmxTransient->uExitReason;
    17659     ExitInfo.cbInstr   = pVmxTransient->cbExitInstr;
    17660     ExitInfo.u64Qual   = pVmxTransient->uExitQual;
    17661     ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo;
    17662     return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
    17663 }
    17664 
    17665 /** @} */
    17666 #endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
    17667 
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette