VirtualBox

Changeset 79345 in vbox


Ignore:
Timestamp:
Jun 26, 2019 9:09:46 AM (6 years ago)
Author:
vboxsync
Message:

VMM/HM: Nested VMX: bugref:9180 Implemented VMCS shadowing.

Location:
trunk
Files:
6 edited

Legend:

Unmodified
Added
Removed
  • trunk/include/VBox/vmm/hm.h

    r78869 r79345  
    182182VMM_INT_DECL(bool)              HMIsNestedPagingActive(PVM pVM);
    183183VMM_INT_DECL(bool)              HMIsMsrBitmapActive(PVM pVM);
    184 # ifdef VBOX_WITH_NESTED_HWVIRT_SVM
    185 VMM_INT_DECL(void)              HMNotifyVmxNstGstVmexit(PVMCPU pVCpu, PCPUMCTX pCtx);
     184# ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     185VMM_INT_DECL(void)              HMNotifyVmxNstGstVmexit(PVMCPU pVCpu);
     186VMM_INT_DECL(void)              HMNotifyVmxNstGstCurrentVmcsChanged(PVMCPU pVCpu);
    186187# endif
    187188/** @} */
  • trunk/src/VBox/VMM/VMMAll/HMVMXAll.cpp

    r79166 r79345  
    12321232 *
    12331233 * @param   pVCpu   The cross context virtual CPU structure.
    1234  * @param   pCtx    Pointer to the guest-CPU context.
    12351234 *
    12361235 * @remarks Can be called from ring-0 as well as ring-3.
    12371236 */
    1238 VMM_INT_DECL(void) HMNotifyVmxNstGstVmexit(PVMCPU pVCpu, PCPUMCTX pCtx)
    1239 {
    1240     NOREF(pCtx);
    1241 
     1237VMM_INT_DECL(void) HMNotifyVmxNstGstVmexit(PVMCPU pVCpu)
     1238{
    12421239    /*
    12431240     * Transitions to ring-3 flag a full CPU-state change except if we transition to ring-3
     
    12491246     * for the nested-guest from ring-3.
    12501247     *
    1251      * Flag reloading of just the guest-CPU state is -not- sufficient since HM also needs
    1252      * to reload related state with VM-entry/VM-exit controls and so on. Flag reloading
    1253      * the entire state.
     1248     * Signalling reload of just the guest-CPU state that changed with the VM-exit is -not-
     1249     * sufficient since HM also needs to reload state related to VM-entry/VM-exit controls
     1250     * etc. So signal reloading of the entire state. It does not seem worth making this any
     1251     * more fine grained at the moment.
    12541252     */
    12551253    CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_ALL);
     
    12581256    /*
    12591257     * Make sure we need to merge the guest VMCS controls with the nested-guest
    1260      * VMCS controls on the next nested-guest VM entry.
     1258     * VMCS controls on the next nested-guest VM-entry.
    12611259     */
    12621260    pVCpu->hm.s.vmx.fMergedNstGstCtls = false;
    12631261}
     1262
     1263
     1264/**
     1265 * Notification callback for when the guest hypervisor's current VMCS is loaded or
     1266 * changed outside VMX R0 code (e.g. in IEM).
     1267 *
     1268 * This need -not- be called for modifications to the guest hypervisor's current
     1269 * VMCS when the guest is in VMX non-root mode as VMCS shadowing is not applicable
     1270 * there.
     1271 *
     1272 * @param   pVCpu   The cross context virtual CPU structure.
     1273 *
     1274 * @remarks Can be called from ring-0 as well as ring-3.
     1275 */
     1276VMM_INT_DECL(void) HMNotifyVmxNstGstCurrentVmcsChanged(PVMCPU pVCpu)
     1277{
     1278    CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_HWVIRT);
     1279    ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, CPUMCTX_EXTRN_HWVIRT);
     1280
     1281    /*
     1282     * Make sure we need to copy the guest hypervisor's current VMCS into the shadow VMCS
     1283     * on the next guest VM-entry.
     1284     */
     1285    pVCpu->hm.s.vmx.fCopiedNstGstToShadowVmcs = false;
     1286}
     1287
    12641288# endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
    12651289#endif /* IN_RC */
  • trunk/src/VBox/VMM/VMMR0/HMR0.cpp

    r79197 r79345  
    11721172    if (pVM->hm.s.vmx.fSupported)
    11731173    {
    1174         pVM->hm.s.vmx.fUsePreemptTimer     &= g_HmR0.hwvirt.u.vmx.fUsePreemptTimer; /* Can be overridden by CFGM see HMR3Init(). */
     1174        pVM->hm.s.vmx.fUsePreemptTimer     &= g_HmR0.hwvirt.u.vmx.fUsePreemptTimer; /* Can be overridden by CFGM in HMR3Init(). */
    11751175        pVM->hm.s.vmx.cPreemptTimerShift    = g_HmR0.hwvirt.u.vmx.cPreemptTimerShift;
    11761176        pVM->hm.s.vmx.u64HostCr4            = g_HmR0.hwvirt.u.vmx.u64HostCr4;
     
    11781178        pVM->hm.s.vmx.u64HostSmmMonitorCtl  = g_HmR0.hwvirt.u.vmx.u64HostSmmMonitorCtl;
    11791179        HMGetVmxMsrsFromHwvirtMsrs(&g_HmR0.hwvirt.Msrs, &pVM->hm.s.vmx.Msrs);
     1180
     1181        /* Enable VPID if supported and configured. */
     1182        if (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VPID)
     1183            pVM->hm.s.vmx.fVpid = pVM->hm.s.vmx.fAllowVpid; /* Can be overridden by CFGM in HMR3Init(). */
     1184
     1185        /* Use VMCS shadowing if supported. */
     1186        Assert(!pVM->hm.s.vmx.fUseVmcsShadowing);
     1187        if (   pVM->cpum.ro.GuestFeatures.fVmx
     1188            && (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VMCS_SHADOWING))
     1189            pVM->hm.s.vmx.fUseVmcsShadowing = true;
     1190
     1191        /* Use the VMCS controls for swapping the EFER MSR if supported. */
     1192        Assert(!pVM->hm.s.vmx.fSupportsVmcsEfer);
     1193#if HC_ARCH_BITS == 64
     1194        if (   (pVM->hm.s.vmx.Msrs.EntryCtls.n.allowed1 & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
     1195            && (pVM->hm.s.vmx.Msrs.ExitCtls.n.allowed1  & VMX_EXIT_CTLS_LOAD_EFER_MSR)
     1196            && (pVM->hm.s.vmx.Msrs.ExitCtls.n.allowed1  & VMX_EXIT_CTLS_SAVE_EFER_MSR))
     1197            pVM->hm.s.vmx.fSupportsVmcsEfer = true;
     1198#endif
     1199
     1200#if 0
     1201        /* Enable APIC register virtualization and virtual-interrupt delivery if supported. */
     1202        if (   (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_APIC_REG_VIRT)
     1203            && (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VIRT_INTR_DELIVERY))
     1204            pVM->hm.s.fVirtApicRegs = true;
     1205
     1206        /* Enable posted-interrupt processing if supported. */
     1207        /** @todo Add and query IPRT API for host OS support for posted-interrupt IPI
     1208         *        here. */
     1209        if (   (pVM->hm.s.vmx.Msrs.PinCtls.n.allowed1  & VMX_PIN_CTLS_POSTED_INT)
     1210            && (pVM->hm.s.vmx.Msrs.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_ACK_EXT_INT))
     1211            pVM->hm.s.fPostedIntrs = true;
     1212#endif
    11801213    }
    11811214    else if (pVM->hm.s.svm.fSupported)
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r79232 r79345  
    105105                                      | CPUMCTX_EXTRN_CR4             \
    106106                                      | CPUMCTX_EXTRN_DR7             \
     107                                      | CPUMCTX_EXTRN_HWVIRT          \
    107108                                      | CPUMCTX_EXTRN_HM_VMX_MASK)
    108109
     
    355356#endif
    356357
    357 static int  hmR0VmxImportGuestState(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo, uint64_t fWhat);
     358static int  hmR0VmxImportGuestState(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat);
    358359#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
    359360static void hmR0VmxInitVmcsReadCache(PVMCPU pVCpu);
     
    479480static VBOXSTRICTRC hmR0VmxExitXcptAC(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
    480481static VBOXSTRICTRC hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
    481 static VBOXSTRICTRC hmR0VmxExitLmsw(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint16_t uMsw, RTGCPTR GCPtrEffDst);
    482 static VBOXSTRICTRC hmR0VmxExitClts(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo, uint8_t cbInstr);
    483 static VBOXSTRICTRC hmR0VmxExitMovFromCrX(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg);
    484 static VBOXSTRICTRC hmR0VmxExitMovToCrX(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg);
     482static VBOXSTRICTRC hmR0VmxExitLmsw(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint16_t uMsw, RTGCPTR GCPtrEffDst);
     483static VBOXSTRICTRC hmR0VmxExitClts(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr);
     484static VBOXSTRICTRC hmR0VmxExitMovFromCrX(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg);
     485static VBOXSTRICTRC hmR0VmxExitMovToCrX(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg);
    485486static VBOXSTRICTRC hmR0VmxExitHostNmi(PVMCPU pVCpu);
    486487/** @} */
     
    12171218 * @returns VBox status code.
    12181219 * @param   pVmcsInfo       The VMCS info. object.
     1220 *
     1221 * @remarks Can be called with interrupts disabled.
    12191222 */
    12201223static int hmR0VmxLoadVmcs(PVMXVMCSINFO pVmcsInfo)
    12211224{
    1222     Assert(pVmcsInfo->HCPhysVmcs);
     1225    Assert(pVmcsInfo->HCPhysVmcs != 0 && pVmcsInfo->HCPhysVmcs != NIL_RTHCPHYS);
    12231226    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    12241227
    1225     if (pVmcsInfo->fVmcsState & VMX_V_VMCS_LAUNCH_STATE_CLEAR)
    1226     {
    1227         int rc = VMXLoadVmcs(pVmcsInfo->HCPhysVmcs);
    1228         if (RT_SUCCESS(rc))
    1229         {
    1230             pVmcsInfo->fVmcsState |= VMX_V_VMCS_LAUNCH_STATE_CURRENT;
    1231             return VINF_SUCCESS;
    1232         }
    1233         return rc;
    1234     }
    1235     return VERR_VMX_INVALID_VMCS_LAUNCH_STATE;
     1228    int rc = VMXLoadVmcs(pVmcsInfo->HCPhysVmcs);
     1229    if (RT_SUCCESS(rc))
     1230        pVmcsInfo->fVmcsState |= VMX_V_VMCS_LAUNCH_STATE_CURRENT;
     1231    return rc;
    12361232}
    12371233
     
    12421238 * @returns VBox status code.
    12431239 * @param   pVmcsInfo       The VMCS info. object.
     1240 *
     1241 * @remarks Can be called with interrupts disabled.
    12441242 */
    12451243static int hmR0VmxClearVmcs(PVMXVMCSINFO pVmcsInfo)
    12461244{
    1247     Assert(pVmcsInfo->HCPhysVmcs);
     1245    Assert(pVmcsInfo->HCPhysVmcs != 0 && pVmcsInfo->HCPhysVmcs != NIL_RTHCPHYS);
    12481246    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    12491247
     
    12561254
    12571255#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    1258 #if 0
    12591256/**
    12601257 * Loads the shadow VMCS specified by the VMCS info. object.
     
    12621259 * @returns VBox status code.
    12631260 * @param   pVmcsInfo       The VMCS info. object.
     1261 *
     1262 * @remarks Can be called with interrupts disabled.
    12641263 */
    12651264static int hmR0VmxLoadShadowVmcs(PVMXVMCSINFO pVmcsInfo)
    12661265{
    1267     Assert(pVmcsInfo->HCPhysShadowVmcs);
    12681266    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    1269 
    1270     if (pVmcsInfo->fShadowVmcsState & VMX_V_VMCS_LAUNCH_STATE_CLEAR)
    1271     {
    1272         int rc = VMXLoadVmcs(pVmcsInfo->HCPhysShadowVmcs);
    1273         if (RT_SUCCESS(rc))
    1274         {
    1275             pVmcsInfo->fShadowVmcsState |= VMX_V_VMCS_LAUNCH_STATE_ACTIVE;
    1276             return VINF_SUCCESS;
    1277         }
    1278         return rc;
    1279     }
    1280     return VERR_VMX_INVALID_VMCS_LAUNCH_STATE;
    1281 }
    1282 #endif
     1267    Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
     1268
     1269    int rc = VMXLoadVmcs(pVmcsInfo->HCPhysShadowVmcs);
     1270    if (RT_SUCCESS(rc))
     1271        pVmcsInfo->fShadowVmcsState |= VMX_V_VMCS_LAUNCH_STATE_CURRENT;
     1272    return rc;
     1273}
    12831274
    12841275
     
    12881279 * @returns VBox status code.
    12891280 * @param   pVmcsInfo       The VMCS info. object.
     1281 *
     1282 * @remarks Can be called with interrupts disabled.
    12901283 */
    12911284static int hmR0VmxClearShadowVmcs(PVMXVMCSINFO pVmcsInfo)
    12921285{
    1293     Assert(pVmcsInfo->HCPhysShadowVmcs);
    12941286    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     1287    Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
    12951288
    12961289    int rc = VMXClearVmcs(pVmcsInfo->HCPhysShadowVmcs);
     
    13201313        int rc = hmR0VmxClearVmcs(pVmcsInfoFrom);
    13211314        if (RT_SUCCESS(rc))
    1322         { /* likely */ }
     1315        {
     1316            /*
     1317             * The shadow VMCS, if any, would not be active at this point since we
     1318             * would have cleared it while importing the virtual hardware-virtualization
     1319             * state as part the VMLAUNCH/VMRESUME VM-exit. Hence, there's no need to
     1320             * clear the shadow VMCS here, just assert for safety.
     1321             */
     1322            Assert(!pVmcsInfoFrom->pvShadowVmcs || pVmcsInfoFrom->fShadowVmcsState == VMX_V_VMCS_LAUNCH_STATE_CLEAR);
     1323        }
    13231324        else
    13241325            return rc;
     
    16551656    return rc;
    16561657}
    1657 
    1658 
    1659 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    1660 /**
    1661  * Initializes the shadow VMCS.
    1662  *
    1663  * This builds an array (for use later while executing a nested-guest) of VMCS
    1664  * fields to copy into the shadow VMCS.
    1665  *
    1666  * @param   pVM     The cross context VM structure.
    1667  */
    1668 static void hmR0VmxInitShadowVmcsFieldsArray(PVM pVM)
    1669 {
    1670     uint32_t const cVmcsFields = RT_ELEMENTS(g_aVmcsFields);
    1671     for (uint32_t i = 0; i < cVmcsFields; i++)
    1672     {
    1673         /*
    1674          * If the VMCS field depends on a CPU feature that is not exposed to the guest,
    1675          * we must not include it in the shadow VMCS fields array. Guests attempting to
    1676          * VMREAD/VMWRITE such VMCS fields would cause a VM-exit and we shall emulate
    1677          * the required behavior.
    1678          */
    1679         uint32_t const uVmcsField      = g_aVmcsFields[i];
    1680         bool const     fVmcsFieldValid = CPUMIsGuestVmxVmcsFieldValid(pVM, uVmcsField);
    1681         if (fVmcsFieldValid)
    1682         {
    1683             pVM->hm.s.vmx.paShadowVmcsFields[i] = uVmcsField;
    1684             ++pVM->hm.s.vmx.cShadowVmcsFields;
    1685         }
    1686     }
    1687 }
    1688 
    1689 
    1690 /**
    1691  * Initializes the VMREAD/VMWRITE bitmaps.
    1692  *
    1693  * @param   pVM                 The cross context VM structure.
    1694  */
    1695 static void hmR0VmxInitVmreadVmwriteBitmaps(PVM pVM)
    1696 {
    1697     /*
    1698      * By default, ensure guest attempts to acceses to any VMCS fields cause VM-exits.
    1699      */
    1700     uint32_t const  cbBitmap        = X86_PAGE_4K_SIZE;
    1701     uint8_t        *pbVmreadBitmap  = (uint8_t *)pVM->hm.s.vmx.pvVmreadBitmap;
    1702     uint8_t        *pbVmwriteBitmap = (uint8_t *)pVM->hm.s.vmx.pvVmwriteBitmap;
    1703     ASMMemFill32(pbVmreadBitmap,  cbBitmap, UINT32_C(0xffffffff));
    1704     ASMMemFill32(pbVmwriteBitmap, cbBitmap, UINT32_C(0xffffffff));
    1705 
    1706     uint32_t const *paShadowVmcsFields = pVM->hm.s.vmx.paShadowVmcsFields;
    1707     uint32_t const  cShadowVmcsFields  = pVM->hm.s.vmx.cShadowVmcsFields;
    1708 
    1709     /*
    1710      * Initialize the VMREAD bitmap.
    1711      * All valid guest VMCS fields (read-only and read-write) can be accessed
    1712      * using VMREAD without causing a VM-exit.
    1713      */
    1714     for (uint32_t i = 0; i < cShadowVmcsFields; i++)
    1715     {
    1716         uint32_t const uVmcsField = paShadowVmcsFields[i];
    1717         Assert(!(uVmcsField & VMX_VMCSFIELD_RSVD_MASK));
    1718         uint8_t *pbField = pbVmreadBitmap + (uVmcsField >> 3);
    1719         ASMBitClear(pbField, uVmcsField & 7);
    1720     }
    1721 
    1722     /*
    1723      * Initialize the VMWRITE bitmap.
    1724      * Allow the guest to write to read-only guest VMCS fields only if the
    1725      * host CPU supports it, otherwise it would cause a VMWRITE instruction error.
    1726      */
    1727     bool const fHasVmwriteAll = RT_BOOL(pVM->hm.s.vmx.Msrs.u64Misc & VMX_MISC_VMWRITE_ALL);
    1728     for (uint32_t i = 0; i < cShadowVmcsFields; i++)
    1729     {
    1730         uint32_t const uVmcsField = paShadowVmcsFields[i];
    1731         if (   fHasVmwriteAll
    1732             || !HMVmxIsVmcsFieldReadOnly(uVmcsField))
    1733         {
    1734             Assert(!(uVmcsField & VMX_VMCSFIELD_RSVD_MASK));
    1735             uint8_t *pbField = pbVmwriteBitmap + (uVmcsField >> 3);
    1736             ASMBitClear(pbField, uVmcsField & 7);
    1737         }
    1738     }
    1739 }
    1740 #endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
    17411658
    17421659
     
    18271744
    18281745#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    1829     if (   pVM->cpum.ro.GuestFeatures.fVmx
    1830         && (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VMCS_SHADOWING))
     1746    if (pVM->hm.s.vmx.fUseVmcsShadowing)
    18311747        hmR0VmxPageFree(&pVmcsInfo->hMemObjShadowVmcs, &pVmcsInfo->pvShadowVmcs, &pVmcsInfo->HCPhysShadowVmcs);
    18321748#endif
     
    18621778        {
    18631779#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    1864             /* Allocate the shadow VMCS if supported by the CPU. */
    1865             if (   pVM->cpum.ro.GuestFeatures.fVmx
    1866                 && (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VMCS_SHADOWING))
     1780            if (pVM->hm.s.vmx.fUseVmcsShadowing)
    18671781                rc = hmR0VmxPageAllocZ(&pVmcsInfo->hMemObjShadowVmcs, &pVmcsInfo->pvShadowVmcs, &pVmcsInfo->HCPhysShadowVmcs);
    18681782#endif
     
    19591873
    19601874#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    1961     if (   pVM->cpum.ro.GuestFeatures.fVmx
    1962         && (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VMCS_SHADOWING))
     1875    if (pVM->hm.s.vmx.fUseVmcsShadowing)
    19631876    {
    19641877        RTMemFree(pVM->hm.s.vmx.paShadowVmcsFields);
     1878        RTMemFree(pVM->hm.s.vmx.paShadowVmcsRoFields);
    19651879        hmR0VmxPageFree(&pVM->hm.s.vmx.hMemObjVmreadBitmap,  &pVM->hm.s.vmx.pvVmreadBitmap,  &pVM->hm.s.vmx.HCPhysVmreadBitmap);
    19661880        hmR0VmxPageFree(&pVM->hm.s.vmx.hMemObjVmwriteBitmap, &pVM->hm.s.vmx.pvVmwriteBitmap, &pVM->hm.s.vmx.HCPhysVmwriteBitmap);
     
    20561970
    20571971#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    2058     /* Allocate the shadow VMCS fields array, VMREAD, VMWRITE bitmaps if VMCS shadowing supported by the CPU. */
    2059     if (   pVM->cpum.ro.GuestFeatures.fVmx
    2060         && (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VMCS_SHADOWING))
    2061     {
    2062         pVM->hm.s.vmx.paShadowVmcsFields = (uint32_t *)RTMemAllocZ(sizeof(g_aVmcsFields));
    2063         if (RT_LIKELY(pVM->hm.s.vmx.paShadowVmcsFields))
     1972    /* Allocate the shadow VMCS fields array, VMREAD, VMWRITE bitmaps.. */
     1973    if (pVM->hm.s.vmx.fUseVmcsShadowing)
     1974    {
     1975        Assert(!pVM->hm.s.vmx.cShadowVmcsFields);
     1976        Assert(!pVM->hm.s.vmx.cShadowVmcsRoFields);
     1977        pVM->hm.s.vmx.paShadowVmcsFields   = (uint32_t *)RTMemAllocZ(sizeof(g_aVmcsFields));
     1978        pVM->hm.s.vmx.paShadowVmcsRoFields = (uint32_t *)RTMemAllocZ(sizeof(g_aVmcsFields));
     1979        if (RT_LIKELY(   pVM->hm.s.vmx.paShadowVmcsFields
     1980                      && pVM->hm.s.vmx.paShadowVmcsRoFields))
    20641981        {
    20651982            rc = hmR0VmxPageAllocZ(&pVM->hm.s.vmx.hMemObjVmreadBitmap, &pVM->hm.s.vmx.pvVmreadBitmap,
     
    20691986                rc = hmR0VmxPageAllocZ(&pVM->hm.s.vmx.hMemObjVmwriteBitmap, &pVM->hm.s.vmx.pvVmwriteBitmap,
    20701987                                       &pVM->hm.s.vmx.HCPhysVmwriteBitmap);
    2071                 if (RT_SUCCESS(rc))
    2072                 {
    2073                     hmR0VmxInitShadowVmcsFieldsArray(pVM);
    2074                     hmR0VmxInitVmreadVmwriteBitmaps(pVM);
    2075                 }
    20761988            }
    20771989        }
     
    33763288
    33773289
     3290#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     3291/**
     3292 * Sets up the shadow VMCS fields arrays.
     3293 *
     3294 * This function builds arrays of VMCS fields to sync the shadow VMCS later while
     3295 * executing the guest.
     3296 *
     3297 * @returns VBox status code.
     3298 * @param   pVM     The cross context VM structure.
     3299 */
     3300static int hmR0VmxSetupShadowVmcsFieldsArrays(PVM pVM)
     3301{
     3302    /*
     3303     * Paranoia. Ensure we haven't exposed the VMWRITE-All VMX feature to the guest
     3304     * when the host does not support it.
     3305     */
     3306    bool const fGstVmwriteAll = pVM->cpum.ro.GuestFeatures.fVmxVmwriteAll;
     3307    if (   !fGstVmwriteAll
     3308        || (pVM->hm.s.vmx.Msrs.u64Misc & VMX_MISC_VMWRITE_ALL))
     3309    { /* likely. */ }
     3310    else
     3311    {
     3312        LogRelFunc(("VMX VMWRITE-All feature exposed to the guest but host CPU does not support it!\n"));
     3313        pVM->aCpus[0].hm.s.u32HMError = VMX_UFC_GST_HOST_VMWRITE_ALL;
     3314        return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
     3315    }
     3316
     3317    uint32_t const cVmcsFields = RT_ELEMENTS(g_aVmcsFields);
     3318    uint32_t       cRwFields   = 0;
     3319    uint32_t       cRoFields   = 0;
     3320    for (uint32_t i = 0; i < cVmcsFields; i++)
     3321    {
     3322        VMXVMCSFIELD VmcsField;
     3323        VmcsField.u = g_aVmcsFields[i];
     3324
     3325        /*
     3326         * We will be writing "FULL" (64-bit) fields while syncing the shadow VMCS.
     3327         * Therefore, "HIGH" (32-bit portion of 64-bit) fields must not be included
     3328         * in the shadow VMCS fields array as they would be redundant.
     3329         *
     3330         * If the VMCS field depends on a CPU feature that is not exposed to the guest,
     3331         * we must not include it in the shadow VMCS fields array. Guests attempting to
     3332         * VMREAD/VMWRITE such VMCS fields would cause a VM-exit and we shall emulate
     3333         * the required behavior.
     3334         */
     3335        if (   VmcsField.n.fAccessType == VMX_VMCSFIELD_ACCESS_FULL
     3336            && CPUMIsGuestVmxVmcsFieldValid(pVM, VmcsField.u))
     3337        {
     3338            /*
     3339             * Read-only fields are placed in a separate array so that while syncing shadow
     3340             * VMCS fields later (which is more performance critical) we can avoid branches.
     3341             *
     3342             * However, if the guest can write to all fields (including read-only fields),
     3343             * we treat it a as read/write field. Otherwise, writing to these fields would
     3344             * cause a VMWRITE instruction error while syncing the shadow VMCS .
     3345             */
     3346            if (   fGstVmwriteAll
     3347                || !HMVmxIsVmcsFieldReadOnly(VmcsField.u))
     3348                pVM->hm.s.vmx.paShadowVmcsFields[cRwFields++] = VmcsField.u;
     3349            else
     3350                pVM->hm.s.vmx.paShadowVmcsRoFields[cRoFields++] = VmcsField.u;
     3351        }
     3352    }
     3353
     3354    /* Update the counts. */
     3355    pVM->hm.s.vmx.cShadowVmcsFields   = cRwFields;
     3356    pVM->hm.s.vmx.cShadowVmcsRoFields = cRoFields;
     3357    return VINF_SUCCESS;
     3358}
     3359
     3360
     3361/**
     3362 * Sets up the VMREAD and VMWRITE bitmaps.
     3363 *
     3364 * @param   pVM     The cross context VM structure.
     3365 */
     3366static void hmR0VmxSetupVmreadVmwriteBitmaps(PVM pVM)
     3367{
     3368    /*
     3369     * By default, ensure guest attempts to acceses to any VMCS fields cause VM-exits.
     3370     */
     3371    uint32_t const cbBitmap        = X86_PAGE_4K_SIZE;
     3372    uint8_t       *pbVmreadBitmap  = (uint8_t *)pVM->hm.s.vmx.pvVmreadBitmap;
     3373    uint8_t       *pbVmwriteBitmap = (uint8_t *)pVM->hm.s.vmx.pvVmwriteBitmap;
     3374    ASMMemFill32(pbVmreadBitmap,  cbBitmap, UINT32_C(0xffffffff));
     3375    ASMMemFill32(pbVmwriteBitmap, cbBitmap, UINT32_C(0xffffffff));
     3376
     3377    /*
     3378     * Skip intercepting VMREAD/VMWRITE to guest read/write fields in the
     3379     * VMREAD and VMWRITE bitmaps.
     3380     */
     3381    {
     3382        uint32_t const *paShadowVmcsFields = pVM->hm.s.vmx.paShadowVmcsFields;
     3383        uint32_t const  cShadowVmcsFields  = pVM->hm.s.vmx.cShadowVmcsFields;
     3384        for (uint32_t i = 0; i < cShadowVmcsFields; i++)
     3385        {
     3386            uint32_t const uVmcsField = paShadowVmcsFields[i];
     3387            Assert(!(uVmcsField & VMX_VMCSFIELD_RSVD_MASK));
     3388            Assert(uVmcsField >> 3 < cbBitmap);
     3389            ASMBitClear(pbVmreadBitmap  + (uVmcsField >> 3), uVmcsField & 7);
     3390            ASMBitClear(pbVmwriteBitmap + (uVmcsField >> 3), uVmcsField & 7);
     3391        }
     3392    }
     3393
     3394    /*
     3395     * Skip intercepting VMREAD for guest read-only fields in the VMREAD bitmap
     3396     * if the host supports VMWRITE to all supported VMCS fields.
     3397     */
     3398    if (pVM->hm.s.vmx.Msrs.u64Misc & VMX_MISC_VMWRITE_ALL)
     3399    {
     3400        uint32_t const *paShadowVmcsRoFields = pVM->hm.s.vmx.paShadowVmcsRoFields;
     3401        uint32_t const  cShadowVmcsRoFields  = pVM->hm.s.vmx.cShadowVmcsRoFields;
     3402        for (uint32_t i = 0; i < cShadowVmcsRoFields; i++)
     3403        {
     3404            uint32_t const uVmcsField = paShadowVmcsRoFields[i];
     3405            Assert(!(uVmcsField & VMX_VMCSFIELD_RSVD_MASK));
     3406            Assert(uVmcsField >> 3 < cbBitmap);
     3407            ASMBitClear(pbVmreadBitmap + (uVmcsField >> 3), uVmcsField & 7);
     3408        }
     3409    }
     3410}
     3411#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
     3412
     3413
    33783414/**
    33793415 * Sets up the virtual-APIC page address for the VMCS.
     
    34253461
    34263462
    3427 /**
    3428  * Sets up the VMCS link pointer for the VMCS.
     3463#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     3464/**
     3465 * Sets up the VMREAD bitmap address for the VMCS.
    34293466 *
    34303467 * @returns VBox status code.
    34313468 * @param   pVCpu       The cross context virtual CPU structure.
    3432  * @param   pVmcsInfo   The VMCS info. object.
    3433  */
    3434 DECLINLINE(int) hmR0VmxSetupVmcsLinkPtr(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo)
    3435 {
    3436     NOREF(pVCpu); /* Used implicitly by VMXWriteVmcs64 on 32-bit hosts. */
    3437     uint64_t const u64VmcsLinkPtr = pVmcsInfo->u64VmcsLinkPtr;
    3438     Assert(u64VmcsLinkPtr == UINT64_C(0xffffffffffffffff));  /* Bits 63:0 MB1. */
    3439     return VMXWriteVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, u64VmcsLinkPtr);
    3440 }
     3469 */
     3470DECLINLINE(int) hmR0VmxSetupVmcsVmreadBitmapAddr(PVMCPU pVCpu)
     3471{
     3472    RTHCPHYS const HCPhysVmreadBitmap = pVCpu->CTX_SUFF(pVM)->hm.s.vmx.HCPhysVmreadBitmap;
     3473    Assert(HCPhysVmreadBitmap != NIL_RTHCPHYS);
     3474    Assert(!(HCPhysVmreadBitmap & 0xfff));                     /* Bits 11:0 MBZ. */
     3475    return VMXWriteVmcs64(VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL, HCPhysVmreadBitmap);
     3476}
     3477
     3478
     3479/**
     3480 * Sets up the VMWRITE bitmap address for the VMCS.
     3481 *
     3482 * @returns VBox status code.
     3483 * @param   pVCpu       The cross context virtual CPU structure.
     3484 */
     3485DECLINLINE(int) hmR0VmxSetupVmcsVmwriteBitmapAddr(PVMCPU pVCpu)
     3486{
     3487    RTHCPHYS const HCPhysVmwriteBitmap = pVCpu->CTX_SUFF(pVM)->hm.s.vmx.HCPhysVmwriteBitmap;
     3488    Assert(HCPhysVmwriteBitmap != NIL_RTHCPHYS);
     3489    Assert(!(HCPhysVmwriteBitmap & 0xfff));                     /* Bits 11:0 MBZ. */
     3490    return VMXWriteVmcs64(VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL, HCPhysVmwriteBitmap);
     3491}
     3492#endif
    34413493
    34423494
     
    36243676        fVal |= VMX_PROC_CTLS2_UNRESTRICTED_GUEST;
    36253677
    3626 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    3627 #if 0
    3628     /* Enable VMCS shadowing if supported by the hardware and VMX is exposed to the guest. */
    3629     if (   pVM->cpum.ro.GuestFeatures.fVmx
    3630         && (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VMCS_SHADOWING))
    3631         fVal |= VMX_PROC_CTLS2_VMCS_SHADOWING;
    3632 #endif
    3633 #endif
    3634 
    36353678#if 0
    36363679    if (pVM->hm.s.fVirtApicRegs)
     
    38073850 * @param   pVCpu           The cross context virtual CPU structure.
    38083851 * @param   pVmcsInfo       The VMCS info. object.
    3809  *
    3810  * @remarks Must be called after secondary processor-based VM-execution controls
    3811  *          have been initialized!
    38123852 */
    38133853static int hmR0VmxSetupVmcsMiscCtls(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo)
    38143854{
    3815     /* Set the VMCS link pointer in the VMCS. */
    3816     int rc = hmR0VmxSetupVmcsLinkPtr(pVCpu, pVmcsInfo);
     3855#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     3856    if (pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUseVmcsShadowing)
     3857    {
     3858        int rc = hmR0VmxSetupVmcsVmreadBitmapAddr(pVCpu);
     3859        rc    |= hmR0VmxSetupVmcsVmwriteBitmapAddr(pVCpu);
     3860        if (RT_SUCCESS(rc))
     3861        { /* likely */ }
     3862        else
     3863        {
     3864            LogRelFunc(("Failed to setup VMREAD/VMWRITE bitmap addresses. rc=%Rrc\n", rc));
     3865            return rc;
     3866        }
     3867    }
     3868#endif
     3869
     3870    int rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, NIL_RTHCPHYS);
    38173871    if (RT_SUCCESS(rc))
    38183872    {
    3819         /* Set the auto-load/store MSR area addresses in the VMCS. */
    38203873        rc = hmR0VmxSetupVmcsAutoLoadStoreMsrAddrs(pVCpu, pVmcsInfo);
    38213874        if (RT_SUCCESS(rc))
    38223875        {
    3823             /* Set the CR0/CR4 guest/host mask. */
    38243876            uint64_t const u64Cr0Mask = hmR0VmxGetFixedCr0Mask(pVCpu);
    38253877            uint64_t const u64Cr4Mask = hmR0VmxGetFixedCr4Mask(pVCpu);
     
    38903942{
    38913943    PVM pVM = pVCpu->CTX_SUFF(pVM);
    3892     int rc = hmR0VmxSetupVmcsLinkPtr(pVCpu, pVmcsInfo);
     3944    int rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, NIL_RTHCPHYS);
    38933945    if (RT_SUCCESS(rc))
    38943946    {
     
    39674019                            {
    39684020#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    3969                                 /* If VMCS shadowing is used, initialize the shadow VMCS. */
    3970                                 if (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
     4021                                /*
     4022                                 * If a shadow VMCS is allocated for the VMCS info. object, initialize the
     4023                                 * VMCS revision ID and shadow VMCS indicator bit. Also, clear the VMCS
     4024                                 * making it fit for use when VMCS shadowing is later enabled.
     4025                                 */
     4026                                if (pVmcsInfo->pvShadowVmcs)
    39714027                                {
    3972                                     Assert(pVmcsInfo->pvShadowVmcs);
    39734028                                    VMXVMCSREVID VmcsRevId;
    39744029                                    VmcsRevId.u = RT_BF_GET(pVM->hm.s.vmx.Msrs.u64Basic, VMX_BF_BASIC_VMCS_ID);
     
    42204275    if (RT_FAILURE(rc))
    42214276    {
    4222         LogRelFunc(("hmR0VmxSetupTaggedTlb failed! rc=%Rrc\n", rc));
     4277        LogRelFunc(("Failed to setup tagged TLB. rc=%Rrc\n", rc));
    42234278        return rc;
    42244279    }
    42254280
    4226     /* Check if we can use the VMCS controls for swapping the EFER MSR. */
    4227     Assert(!pVM->hm.s.vmx.fSupportsVmcsEfer);
    4228 #if HC_ARCH_BITS == 64
    4229     if (   (pVM->hm.s.vmx.Msrs.EntryCtls.n.allowed1 & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
    4230         && (pVM->hm.s.vmx.Msrs.ExitCtls.n.allowed1  & VMX_EXIT_CTLS_LOAD_EFER_MSR)
    4231         && (pVM->hm.s.vmx.Msrs.ExitCtls.n.allowed1  & VMX_EXIT_CTLS_SAVE_EFER_MSR))
    4232         pVM->hm.s.vmx.fSupportsVmcsEfer = true;
     4281#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     4282    /* Setup the shadow VMCS fields array and VMREAD/VMWRITE bitmaps. */
     4283    if (pVM->hm.s.vmx.fUseVmcsShadowing)
     4284    {
     4285        rc = hmR0VmxSetupShadowVmcsFieldsArrays(pVM);
     4286        if (RT_SUCCESS(rc))
     4287            hmR0VmxSetupVmreadVmwriteBitmaps(pVM);
     4288        else
     4289        {
     4290            LogRelFunc(("Failed to setup shadow VMCS fields arrays. rc=%Rrc\n", rc));
     4291            return rc;
     4292        }
     4293    }
    42334294#endif
    42344295
     
    53455406
    53465407
     5408#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     5409/**
     5410 * Copies the nested-guest VMCS to the shadow VMCS.
     5411 *
     5412 * @returns VBox status code.
     5413 * @param   pVCpu       The cross context virtual CPU structure.
     5414 * @param   pVmcsInfo   The VMCS info. object.
     5415 *
     5416 * @remarks No-long-jump zone!!!
     5417 */
     5418static int hmR0VmxCopyNstGstToShadowVmcs(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo)
     5419{
     5420    PVM pVM = pVCpu->CTX_SUFF(pVM);
     5421    PCVMXVVMCS pVmcsNstGst = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     5422
     5423    /*
     5424     * Disable interrupts so we don't get preempted while the shadow VMCS is the
     5425     * current VMCS, as we may try saving guest lazy MSRs.
     5426     *
     5427     * Strictly speaking the lazy MSRs are not in the VMCS, but I'd rather not risk
     5428     * calling the import VMCS code which is currently performing the guest MSR reads
     5429     * (on 64-bit hosts) and accessing the auto-load/store MSR area on 32-bit hosts
     5430     * and the rest of the VMX leave session machinery.
     5431     */
     5432    RTCCUINTREG const fEFlags = ASMIntDisableFlags();
     5433
     5434    int rc = hmR0VmxLoadShadowVmcs(pVmcsInfo);
     5435    if (RT_SUCCESS(rc))
     5436    {
     5437        /*
     5438         * Copy all guest read/write VMCS fields.
     5439         *
     5440         * We don't check for VMWRITE failures here for performance reasons and
     5441         * because they are not expected to fail, barring irrecoverable conditions
     5442         * like hardware errors.
     5443         */
     5444        uint32_t const cShadowVmcsFields = pVM->hm.s.vmx.cShadowVmcsFields;
     5445        for (uint32_t i = 0; i < cShadowVmcsFields; i++)
     5446        {
     5447            uint64_t       u64Val;
     5448            uint32_t const uVmcsField = pVM->hm.s.vmx.paShadowVmcsFields[i];
     5449            IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
     5450            VMXWriteVmcs64(uVmcsField, u64Val);
     5451        }
     5452
     5453        /*
     5454         * If the host CPU supports writing all VMCS fields, copy the guest read-only
     5455         * VMCS fields, so the guest can VMREAD them without causing a VM-exit.
     5456         */
     5457        if (pVM->hm.s.vmx.Msrs.u64Misc & VMX_MISC_VMWRITE_ALL)
     5458        {
     5459            uint32_t const cShadowVmcsRoFields = pVM->hm.s.vmx.cShadowVmcsRoFields;
     5460            for (uint32_t i = 0; i < cShadowVmcsRoFields; i++)
     5461            {
     5462                uint64_t       u64Val;
     5463                uint32_t const uVmcsField = pVM->hm.s.vmx.paShadowVmcsRoFields[i];
     5464                IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
     5465                VMXWriteVmcs64(uVmcsField, u64Val);
     5466            }
     5467        }
     5468
     5469        rc  = hmR0VmxClearShadowVmcs(pVmcsInfo);
     5470        rc |= hmR0VmxLoadVmcs(pVmcsInfo);
     5471    }
     5472
     5473    ASMSetFlags(fEFlags);
     5474    return rc;
     5475}
     5476
     5477
     5478/**
     5479 * Copies the shadow VMCS to the nested-guest VMCS.
     5480 *
     5481 * @returns VBox status code.
     5482 * @param   pVCpu       The cross context virtual CPU structure.
     5483 * @param   pVmcsInfo   The VMCS info. object.
     5484 *
     5485 * @remarks Called with interrupts disabled.
     5486 */
     5487static int hmR0VmxCopyShadowToNstGstVmcs(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo)
     5488{
     5489    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     5490    PVM pVM = pVCpu->CTX_SUFF(pVM);
     5491    PVMXVVMCS pVmcsNstGst = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     5492
     5493    int rc = hmR0VmxLoadShadowVmcs(pVmcsInfo);
     5494    if (RT_SUCCESS(rc))
     5495    {
     5496        /*
     5497         * Copy guest read/write fields from the shadow VMCS.
     5498         * Guest read-only fields cannot be modified, so no need to copy them.
     5499         *
     5500         * We don't check for VMREAD failures here for performance reasons and
     5501         * because they are not expected to fail, barring irrecoverable conditions
     5502         * like hardware errors.
     5503         */
     5504        uint32_t const cShadowVmcsFields = pVM->hm.s.vmx.cShadowVmcsFields;
     5505        for (uint32_t i = 0; i < cShadowVmcsFields; i++)
     5506        {
     5507            uint64_t       u64Val;
     5508            uint32_t const uVmcsField = pVM->hm.s.vmx.paShadowVmcsFields[i];
     5509            VMXReadVmcs64(uVmcsField, &u64Val);
     5510            IEMWriteVmxVmcsField(pVmcsNstGst, uVmcsField, u64Val);
     5511        }
     5512
     5513        rc  = hmR0VmxClearShadowVmcs(pVmcsInfo);
     5514        rc |= hmR0VmxLoadVmcs(pVmcsInfo);
     5515    }
     5516    return rc;
     5517}
     5518
     5519
     5520/**
     5521 * Enables VMCS shadowing for the given VMCS info. object.
     5522 *
     5523 * @param   pVCpu           The cross context virtual CPU structure.
     5524 * @param   pVmcsInfo       The VMCS info. object.
     5525 *
     5526 * @remarks No-long-jump zone!!!
     5527 */
     5528static void hmR0VmxEnableVmcsShadowing(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo)
     5529{
     5530    NOREF(pVCpu); /* Used implicitly by VMXWriteVmcs64 on 32-bit hosts. */
     5531
     5532    uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
     5533    if (!(uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING))
     5534    {
     5535        Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
     5536        uProcCtls2 |= VMX_PROC_CTLS2_VMCS_SHADOWING;
     5537        int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2);
     5538        rc    |= VMXWriteVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, pVmcsInfo->HCPhysShadowVmcs);
     5539        AssertRC(rc);
     5540
     5541        pVmcsInfo->u32ProcCtls2   = uProcCtls2;
     5542        pVmcsInfo->u64VmcsLinkPtr = pVmcsInfo->HCPhysShadowVmcs;
     5543        Log4Func(("Enabled\n"));
     5544    }
     5545}
     5546
     5547
     5548/**
     5549 * Disables VMCS shadowing for the given VMCS info. object.
     5550 *
     5551 * @param   pVCpu           The cross context virtual CPU structure.
     5552 * @param   pVmcsInfo       The VMCS info. object.
     5553 *
     5554 * @remarks No-long-jump zone!!!
     5555 */
     5556static void hmR0VmxDisableVmcsShadowing(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo)
     5557{
     5558    NOREF(pVCpu); /* Used implicitly by VMXWriteVmcs64 on 32-bit hosts. */
     5559
     5560    /*
     5561     * We want all VMREAD and VMWRITE instructions to cause VM-exits, so we clear the
     5562     * VMCS shadowing control. However, VM-entry requires the shadow VMCS indicator bit
     5563     * to match the VMCS shadowing control if the VMCS link pointer is not NIL_RTHCPHYS.
     5564     * Hence, we must also reset the VMCS link pointer to ensure VM-entry does not fail.
     5565     *
     5566     * See Intel spec. 26.2.1.1 "VM-Execution Control Fields".
     5567     * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State".
     5568     */
     5569    uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
     5570    if (uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
     5571    {
     5572        uProcCtls2 &= ~VMX_PROC_CTLS2_VMCS_SHADOWING;
     5573        int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2);
     5574        rc    |= VMXWriteVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, NIL_RTHCPHYS);
     5575        AssertRC(rc);
     5576
     5577        pVmcsInfo->u32ProcCtls2   = uProcCtls2;
     5578        pVmcsInfo->u64VmcsLinkPtr = NIL_RTHCPHYS;
     5579        Log4Func(("Disabled\n"));
     5580    }
     5581}
     5582#endif
     5583
     5584
     5585/**
     5586 * Exports the guest hardware-virtualization state.
     5587 *
     5588 * @returns VBox status code.
     5589 * @param   pVCpu           The cross context virtual CPU structure.
     5590 * @param   pVmxTransient   The VMX-transient structure.
     5591 *
     5592 * @remarks No-long-jump zone!!!
     5593 */
     5594static int hmR0VmxExportGuestHwvirtState(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
     5595{
     5596    if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_HWVIRT)
     5597    {
     5598#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     5599        /*
     5600         * Check if the VMX feature is exposed to the guest and if the host CPU supports
     5601         * VMCS shadowing.
     5602         */
     5603        if (pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUseVmcsShadowing)
     5604        {
     5605            /*
     5606             * If the guest hypervisor has loaded a current VMCS and is in VMX root mode,
     5607             * copy the guest hypervisor's current VMCS into the shadow VMCS and enable
     5608             * VMCS shadowing to skip intercepting some or all VMREAD/VMWRITE VM-exits.
     5609             *
     5610             * We check for VMX root mode here in case the guest executes VMXOFF without
     5611             * clearing the current VMCS pointer and our VMXOFF instruction emulation does
     5612             * not clear the current VMCS pointer.
     5613             */
     5614            PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
     5615            if (   CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx)
     5616                && !CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx)
     5617                && CPUMIsGuestVmxCurrentVmcsValid(pVCpu, &pVCpu->cpum.GstCtx))
     5618            {
     5619                /* Paranoia. */
     5620                Assert(!pVmxTransient->fIsNestedGuest);
     5621
     5622                /*
     5623                 * For performance reasons, also check if the guest hypervisor's current VMCS
     5624                 * was newly loaded or modified before copying it to the shadow VMCS.
     5625                 */
     5626                if (!pVCpu->hm.s.vmx.fCopiedNstGstToShadowVmcs)
     5627                {
     5628                    int rc = hmR0VmxCopyNstGstToShadowVmcs(pVCpu, pVmcsInfo);
     5629                    AssertRCReturn(rc, rc);
     5630                    pVCpu->hm.s.vmx.fCopiedNstGstToShadowVmcs = true;
     5631                }
     5632                hmR0VmxEnableVmcsShadowing(pVCpu, pVmcsInfo);
     5633            }
     5634            else
     5635                hmR0VmxDisableVmcsShadowing(pVCpu, pVmcsInfo);
     5636        }
     5637#else
     5638        NOREF(pVmxTransient);
     5639#endif
     5640        ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_HWVIRT);
     5641    }
     5642    return VINF_SUCCESS;
     5643}
     5644
     5645
    53475646/**
    53485647 * Exports the guest CR0 control register into the guest-state area in the VMCS.
     
    59646263 *          segments.
    59656264 */
    5966 static void hmR0VmxValidateSegmentRegs(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo)
     6265static void hmR0VmxValidateSegmentRegs(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo)
    59676266{
    59686267    /*
     
    79858284 * @param   fWhat       What to import, CPUMCTX_EXTRN_XXX.
    79868285 */
    7987 static int hmR0VmxImportGuestState(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo, uint64_t fWhat)
     8286static int hmR0VmxImportGuestState(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat)
    79888287{
    79898288#define VMXLOCAL_BREAK_RC(a_rc) \
     
    83008599                    }
    83018600                }
     8601            }
    83028602
    83038603#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     8604            if (fWhat & CPUMCTX_EXTRN_HWVIRT)
     8605            {
     8606                if (   (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
     8607                    && !CPUMIsGuestInVmxNonRootMode(pCtx))
     8608                {
     8609                    Assert(CPUMIsGuestInVmxRootMode(pCtx));
     8610                    rc = hmR0VmxCopyShadowToNstGstVmcs(pVCpu, pVmcsInfo);
     8611                    VMXLOCAL_BREAK_RC(rc);
     8612                }
     8613
    83048614# if 0
    83058615                /** @todo NSTVMX: We handle most of these fields individually by passing it to IEM
     
    83128622                 * the guest-CPU state when executing a nested-guest.
    83138623                 */
    8314                 if (   (fWhat & CPUMCTX_EXTRN_HWVIRT)
    8315                     && CPUMIsGuestInVmxNonRootMode(pCtx))
     8624                if (CPUMIsGuestInVmxNonRootMode(pCtx))
    83168625                {
    83178626                    PVMXVVMCS pGstVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
     
    83618670                }
    83628671# endif
     8672            }
    83638673#endif
    8364             }
    83658674        } while (0);
    83668675
     
    84348743{
    84358744    AssertPtr(pVCpu);
    8436     PCVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
     8745    PVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
    84378746    return hmR0VmxImportGuestState(pVCpu, pVmcsInfo, fWhat);
    84388747}
     
    88489157    int rc = hmR0VmxClearVmcs(pVmcsInfo);
    88499158    AssertRCReturn(rc, rc);
     9159
     9160#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     9161    /*
     9162     * A valid shadow VMCS is made active as part of VM-entry. It is necessary to
     9163     * clear a shadow VMCS before allowing that VMCS to become active on another
     9164     * logical processor. We may or may not be importing guest state which clears
     9165     * it, so cover for it here.
     9166     *
     9167     * See Intel spec. 24.11.1 "Software Use of Virtual-Machine Control Structures".
     9168     */
     9169    if (   pVmcsInfo->pvShadowVmcs
     9170        && pVmcsInfo->fShadowVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
     9171    {
     9172        rc = hmR0VmxClearShadowVmcs(pVmcsInfo);
     9173        AssertRCReturn(rc, rc);
     9174    }
     9175#endif
    88509176
    88519177    Log4Func(("Cleared Vmcs. HostCpuId=%u\n", idCpu));
     
    90839409        VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
    90849410
    9085         /* Clear the current VMCS data back to memory. */
     9411        /* Clear the current VMCS data back to memory (shadow VMCS if any would have been
     9412           cleared as part of importing the guest state above. */
    90869413        hmR0VmxClearVmcs(pVmcsInfo);
    90879414
     
    92339560
    92349561            /* We require RIP, RSP, RFLAGS, CS, IDTR, import them. */
    9235             PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
     9562            PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    92369563            int rc2 = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TABLE_MASK
    92379564                                                              | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_RFLAGS);
     
    987610203    AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
    987710204
     10205    rc = hmR0VmxExportGuestHwvirtState(pVCpu, pVmxTransient);
     10206    AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
     10207
    987810208    /* Clear any bits that may be set but exported unconditionally or unused/reserved bits. */
    987910209    ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~(  (HM_CHANGED_GUEST_GPRS_MASK & ~HM_CHANGED_GUEST_RSP)
     
    988810218                                                  |  HM_CHANGED_GUEST_TSC_AUX
    988910219                                                  |  HM_CHANGED_GUEST_OTHER_MSRS
    9890                                                   |  HM_CHANGED_GUEST_HWVIRT         /* More accurate PLE handling someday? */
    989110220                                                  | (HM_CHANGED_KEEPER_STATE_MASK & ~HM_CHANGED_VMX_MASK)));
    989210221
     
    1108511414     * If any new events (interrupts/NMI) are pending currently, we try to set up the
    1108611415     * guest to cause a VM-exit the next time they are ready to receive the event.
     11416     *
     11417     * With nested-guests, evaluating pending events may cause VM-exits.
    1108711418     */
    1108811419    if (TRPMHasTrap(pVCpu))
     
    1111111442     * also result in triple-faulting the VM.
    1111211443     *
    11113      * The above does not apply when executing a nested-guest (since unrestricted guest execution
    11114      * is a requirement) regardless doing it avoid duplicating code elsewhere.
     11444     * With nested-guests, the above does not apply since unrestricted guest execution is a
     11445     * requirement. Regardless, we do this here to avoid duplicating code elsewhere.
    1111511446     */
    1111611447    rcStrict = hmR0VmxInjectPendingEvent(pVCpu, pVmxTransient, fIntrState, fStepping);
     
    1116411495     *
    1116511496     * If we are injecting events to a real-on-v86 mode guest, we would have updated RIP and some segment
    11166      * registers. Hence, loading of the guest state needs to be done -after- injection of events.
     11497     * registers. Hence, exporting of the guest state needs to be done -after- injection of events.
    1116711498     */
    1116811499    rcStrict = hmR0VmxExportGuestStateOptimal(pVCpu, pVmxTransient);
     
    1165711988    Assert(pcLoops);
    1165811989    Assert(*pcLoops <= cMaxResumeLoops);
     11990    Assert(CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx));
    1165911991
    1166011992    VMXTRANSIENT VmxTransient;
     
    1314513477       do { \
    1314613478            if (a_fSave != 0) \
    13147                 hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL); \
     13479                hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL); \
    1314813480            VBOXSTRICTRC rcStrict = a_CallExpr; \
    1314913481            if (a_fSave != 0) \
     
    1369714029    STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitXcptNmi, y3);
    1369814030
    13699     PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
     14031    PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    1370014032    int rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
    1370114033    AssertRCReturn(rc, rc);
     
    1391414246     * Get the state we need and update the exit history entry.
    1391514247     */
    13916     PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
     14248    PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    1391714249    int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    1391814250    rc    |= hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
     
    1396614298    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    1396714299
    13968     PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
     14300    PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    1396914301    int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR4);
    1397014302    AssertRCReturn(rc, rc);
     
    1398514317    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    1398614318
    13987     PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
     14319    PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    1398814320    int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
    1398914321    rc    |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
     
    1401514347    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    1401614348
    14017     PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
     14349    PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    1401814350    int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_TSC_AUX);
    1401914351    rc    |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
     
    1404514377    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    1404614378
    14047     PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
     14379    PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    1404814380    int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR4    | CPUMCTX_EXTRN_CR0
    1404914381                                                     | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS);
     
    1407614408    if (EMAreHypercallInstructionsEnabled(pVCpu))
    1407714409    {
    14078         PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
     14410        PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    1407914411        int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CR0
    1408014412                                                         | CPUMCTX_EXTRN_SS  | CPUMCTX_EXTRN_CS     | CPUMCTX_EXTRN_EFER);
     
    1411814450    Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging || pVCpu->hm.s.fUsingDebugLoop);
    1411914451
    14120     PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
     14452    PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    1412114453    int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
    1412214454    rc    |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
     
    1414714479    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    1414814480
    14149     PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
     14481    PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    1415014482    int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
    1415114483    rc    |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
     
    1417214504    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    1417314505
    14174     PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
     14506    PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    1417514507    int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
    1417614508    rc    |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
     
    1425914591    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    1426014592
    14261     PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
     14593    PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    1426214594    int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    1426314595    rc    |= hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_CR4);
     
    1429214624HMVMX_EXIT_NSRC_DECL hmR0VmxExitErrInvalidGuestState(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    1429314625{
    14294     PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
     14626    PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    1429514627    int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
    1429614628    AssertRCReturn(rc, rc);
     
    1443714769     * MSRs required.  That would require changes to IEM and possibly CPUM too.
    1443814770     * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
    14439     PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    14440     uint32_t const idMsr    = pVCpu->cpum.GstCtx.ecx;
    14441     uint64_t       fImport  = IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS;
     14771    PVMXVMCSINFO  pVmcsInfo = pVmxTransient->pVmcsInfo;
     14772    uint32_t const idMsr     = pVCpu->cpum.GstCtx.ecx;
     14773    uint64_t       fImport   = IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS;
    1444214774    switch (idMsr)
    1444314775    {
     
    1451714849    }
    1451814850
    14519     PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
     14851    PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    1452014852    int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    1452114853    rc    |= hmR0VmxImportGuestState(pVCpu, pVmcsInfo, fImport);
     
    1468515017    STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitMovCRx, y2);
    1468615018
    14687     PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
     15019    PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    1468815020    int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
    1468915021    rc    |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
     
    1483815170
    1483915171    PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    14840     PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
     15172    PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    1484115173    int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
    1484215174    rc    |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
     
    1533615668     */
    1533715669    RTGCPHYS GCPhys;
    15338     PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
     15670    PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    1533915671    int rc = VMXReadVmcs64(VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &GCPhys);
    1534015672    rc    |= hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
     
    1541515747
    1541615748    RTGCPHYS GCPhys;
    15417     PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
     15749    PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    1541815750    int rc  = VMXReadVmcs64(VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &GCPhys);
    1541915751    rc     |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
     
    1591316245 * VM-exit helper for LMSW.
    1591416246 */
    15915 static VBOXSTRICTRC hmR0VmxExitLmsw(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint16_t uMsw,
    15916                                     RTGCPTR GCPtrEffDst)
     16247static VBOXSTRICTRC hmR0VmxExitLmsw(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint16_t uMsw, RTGCPTR GCPtrEffDst)
    1591716248{
    1591816249    int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
     
    1593916270 * VM-exit helper for CLTS.
    1594016271 */
    15941 static VBOXSTRICTRC hmR0VmxExitClts(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo, uint8_t cbInstr)
     16272static VBOXSTRICTRC hmR0VmxExitClts(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr)
    1594216273{
    1594316274    int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
     
    1596416295 * VM-exit helper for MOV from CRx (CRx read).
    1596516296 */
    15966 static VBOXSTRICTRC hmR0VmxExitMovFromCrX(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
     16297static VBOXSTRICTRC hmR0VmxExitMovFromCrX(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
    1596716298{
    1596816299    Assert(iCrReg < 16);
     
    1599816329 * VM-exit helper for MOV to CRx (CRx write).
    1599916330 */
    16000 static VBOXSTRICTRC hmR0VmxExitMovToCrX(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
     16331static VBOXSTRICTRC hmR0VmxExitMovToCrX(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
    1600116332{
    1600216333    int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
     
    1609416425    int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    1609516426    rc    |= hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
     16427                                                                    | CPUMCTX_EXTRN_HWVIRT
    1609616428                                                                    | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
    1609716429    rc    |= hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
     
    1613916471    if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    1614016472    {
    16141         rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
    1614216473        ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
     16474        if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
     16475            rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
    1614316476    }
    1614416477    Assert(rcStrict != VINF_IEM_RAISED_XCPT);
     
    1615616489    int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    1615716490    rc    |= hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
     16491                                                                    | CPUMCTX_EXTRN_HWVIRT
    1615816492                                                                    | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
    1615916493    rc    |= hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
     
    1619216526    int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    1619316527    rc    |= hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
     16528                                                                    | CPUMCTX_EXTRN_HWVIRT
    1619416529                                                                    | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
    1619516530    rc    |= hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
     
    1622816563    int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    1622916564    rc    |= hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
     16565                                                                    | CPUMCTX_EXTRN_HWVIRT
    1623016566                                                                    | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
    1623116567    rc    |= hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
     
    1627416610    if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    1627516611    {
    16276         rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
    1627716612        ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
     16613        if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
     16614            rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
    1627816615    }
    1627916616    Assert(rcStrict != VINF_IEM_RAISED_XCPT);
     
    1628916626    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    1629016627
     16628    /*
     16629     * Although we should not get VMWRITE VM-exits for shadow VMCS fields, since
     16630     * our HM hook that gets invoked when IEM's VMWRITE instruction emulation
     16631     * modifies the current VMCS signals re-loading the entire shadow VMCS, we
     16632     * should also save the entire shadow VMCS here.
     16633     */
    1629116634    int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    1629216635    rc    |= hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
     16636                                                                    | CPUMCTX_EXTRN_HWVIRT
    1629316637                                                                    | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
    1629416638    rc    |= hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
     
    1630716651        HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
    1630816652
     16653    /** @todo NSTVMX: Remove later. */
     16654    Log4Func(("VMWRITE: %#x\n", pVCpu->cpum.GstCtx.aGRegs[ExitInfo.InstrInfo.VmreadVmwrite.iReg2].u32));
     16655
    1630916656    VBOXSTRICTRC rcStrict = IEMExecDecodedVmwrite(pVCpu, &ExitInfo);
    1631016657    if (RT_LIKELY(rcStrict == VINF_SUCCESS))
     
    1632816675    int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    1632916676    rc    |= hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR4
     16677                                                                    | CPUMCTX_EXTRN_HWVIRT
    1633016678                                                                    | IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
    1633116679    AssertRCReturn(rc, rc);
     
    1635416702    int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    1635516703    rc    |= hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
     16704                                                                    | CPUMCTX_EXTRN_HWVIRT
    1635616705                                                                    | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
    1635716706    rc    |= hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
  • trunk/src/VBox/VMM/VMMR3/HM.cpp

    r79125 r79345  
    15971597
    15981598    /*
    1599      * Enable VPID if configured and supported.
    1600      */
    1601     if (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VPID)
    1602         pVM->hm.s.vmx.fVpid = pVM->hm.s.vmx.fAllowVpid;
    1603 
    1604 #if 0
    1605     /*
    1606      * Enable APIC register virtualization and virtual-interrupt delivery if supported.
    1607      */
    1608     if (   (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_APIC_REG_VIRT)
    1609         && (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VIRT_INTR_DELIVERY))
    1610         pVM->hm.s.fVirtApicRegs = true;
    1611 
    1612     /*
    1613      * Enable posted-interrupt processing if supported.
    1614      */
    1615     /** @todo Add and query IPRT API for host OS support for posted-interrupt IPI
    1616      *        here. */
    1617     if (   (pVM->hm.s.vmx.Msrs.PinCtls.n.allowed1  & VMX_PIN_CTLS_POSTED_INT)
    1618         && (pVM->hm.s.vmx.Msrs.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_ACK_EXT_INT))
    1619         pVM->hm.s.fPostedIntrs = true;
    1620 #endif
    1621 
    1622     /*
    16231599     * Disallow RDTSCP in the guest if there is no secondary process-based VM execution controls as otherwise
    16241600     * RDTSCP would cause a #UD. There might be no CPUs out there where this happens, as RDTSCP was introduced
     
    17711747        Assert(!pVM->hm.s.vmx.fUnrestrictedGuest);
    17721748
    1773     if (pVM->hm.s.fVirtApicRegs)
    1774         LogRel(("HM:   Enabled APIC-register virtualization support\n"));
    1775 
    1776     if (pVM->hm.s.fPostedIntrs)
    1777         LogRel(("HM:   Enabled posted-interrupt processing support\n"));
    1778 
    17791749    if (pVM->hm.s.vmx.fVpid)
    17801750    {
     
    17981768    else
    17991769        LogRel(("HM: Disabled VMX-preemption timer\n"));
     1770
     1771    if (pVM->hm.s.fVirtApicRegs)
     1772        LogRel(("HM: Enabled APIC-register virtualization support\n"));
     1773
     1774    if (pVM->hm.s.fPostedIntrs)
     1775        LogRel(("HM: Enabled posted-interrupt processing support\n"));
     1776
     1777    if (pVM->hm.s.vmx.fUseVmcsShadowing)
     1778        LogRel(("HM: Enabled VMCS shadowing support\n"));
    18001779
    18011780    return VINF_SUCCESS;
  • trunk/src/VBox/VMM/include/HMInternal.h

    r79222 r79345  
    595595        /** Whether the CPU supports VMCS fields for swapping EFER. */
    596596        bool                        fSupportsVmcsEfer;
    597         uint8_t                     u8Alignment2[7];
     597        /** Whether to use VMCS shadowing. */
     598        bool                        fUseVmcsShadowing;
     599        uint8_t                     u8Alignment2[6];
    598600
    599601        /** VMX MSR values. */
     
    603605        RTHCPHYS                    HCPhysVmxEnableError;
    604606
    605         /** Pointer to the shadow VMCS fields array. */
     607        /** Pointer to the shadow VMCS read-only fields array. */
     608        R0PTRTYPE(uint32_t *)       paShadowVmcsRoFields;
     609        /** Pointer to the shadow VMCS read/write fields array. */
    606610        R0PTRTYPE(uint32_t *)       paShadowVmcsFields;
    607         RTR0PTR                     pvR0Alignment1;
    608         /** Number of elements in the shadow VMCS fields array. */
     611        /** Number of elements in the shadow VMCS read-only fields array. */
     612        uint32_t                    cShadowVmcsRoFields;
     613        /** Number of elements in the shadow VMCS read-write fields array. */
    609614        uint32_t                    cShadowVmcsFields;
    610         uint32_t                    u32Alignemnt0;
    611615    } vmx;
    612616
     
    984988             *  nested-guest VMCS controls. */
    985989            bool                        fMergedNstGstCtls;
     990            /** Whether the nested-guest VMCS has been copied to the shadow VMCS. */
     991            bool                        fCopiedNstGstToShadowVmcs;
    986992            /** Alignment. */
    987             bool                        afAlignment0[6];
     993            bool                        afAlignment0[5];
    988994            /** Cached guest APIC-base MSR for identifying when to map the APIC-access page. */
    989995            uint64_t                    u64GstMsrApicBase;
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette