VirtualBox

Changeset 78220 in vbox for trunk/src/VBox


Ignore:
Timestamp:
Apr 20, 2019 4:08:44 AM (6 years ago)
Author:
vboxsync
Message:

VMM: Nested VMX: bugref:9180 Hardware-assisted nested VT-x infrastructure changes and VM-entry implementation.

Location:
trunk/src/VBox/VMM
Files:
19 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp

    r77712 r78220  
    30103010
    30113011/**
    3012  * Applies the TSC offset of a nested-guest if any and returns the new TSC
    3013  * value for the guest (or nested-guest).
     3012 * Applies the TSC offset of a nested-guest if any and returns the TSC value for the
     3013 * nested-guest.
    30143014 *
    30153015 * @returns The TSC offset after applying any nested-guest TSC offset.
     
    30173017 * @param   uTicks      The guest TSC.
    30183018 *
    3019  * @sa      HMApplySvmNstGstTscOffset.
     3019 * @sa      CPUMRemoveNestedGuestTscOffset.
    30203020 */
    30213021VMM_INT_DECL(uint64_t) CPUMApplyNestedGuestTscOffset(PVMCPU pVCpu, uint64_t uTicks)
     
    30333033    if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
    30343034    {
     3035        /** @todo r=bird: Bake HMApplySvmNstGstTscOffset into HMHasGuestSvmVmcbCached to save a call. */
    30353036        if (!HMHasGuestSvmVmcbCached(pVCpu))
    30363037        {
     
    30393040        }
    30403041        return HMApplySvmNstGstTscOffset(pVCpu, uTicks);
     3042    }
     3043#else
     3044    RT_NOREF(pVCpu);
     3045#endif
     3046    return uTicks;
     3047}
     3048
     3049
     3050/**
     3051 * Removes the TSC offset of a nested-guest if any and returns the TSC value for the
     3052 * guest.
     3053 *
     3054 * @returns The TSC offset after removing any nested-guest TSC offset.
     3055 * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
     3056 * @param   uTicks      The nested-guest TSC.
     3057 *
     3058 * @sa      CPUMApplyNestedGuestTscOffset.
     3059 */
     3060VMM_INT_DECL(uint64_t) CPUMRemoveNestedGuestTscOffset(PVMCPU pVCpu, uint64_t uTicks)
     3061{
     3062#ifndef IN_RC
     3063    PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
     3064    if (CPUMIsGuestInVmxNonRootMode(pCtx))
     3065    {
     3066        PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
     3067        if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
     3068            return uTicks - pVmcs->u64TscOffset.u;
     3069        return uTicks;
     3070    }
     3071
     3072    if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
     3073    {
     3074        /** @todo r=bird: Bake HMApplySvmNstGstTscOffset into HMRemoveSvmNstGstTscOffset to save a call. */
     3075        if (!HMHasGuestSvmVmcbCached(pVCpu))
     3076        {
     3077            PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
     3078            return uTicks - pVmcb->ctrl.u64TSCOffset;
     3079        }
     3080        return HMRemoveSvmNstGstTscOffset(pVCpu, uTicks);
    30413081    }
    30423082#else
  • trunk/src/VBox/VMM/VMMAll/HMAll.cpp

    r77589 r78220  
    799799     */
    800800    if (enmGuestMode == PGMMODE_REAL)
    801         pVCpu->hm.s.vmx.fWasInRealMode = true;
     801    {
     802        PVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
     803        pVmcsInfo->fWasInRealMode = true;
     804    }
    802805
    803806# ifdef IN_RING0
     
    814817            fChanged |= HM_CHANGED_SVM_GUEST_XCPT_INTERCEPTS;
    815818        else
    816             fChanged |= HM_CHANGED_VMX_GUEST_XCPT_INTERCEPTS | HM_CHANGED_VMX_ENTRY_CTLS | HM_CHANGED_VMX_EXIT_CTLS;
     819            fChanged |= HM_CHANGED_VMX_GUEST_XCPT_INTERCEPTS | HM_CHANGED_VMX_ENTRY_EXIT_CTLS;
    817820        ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, fChanged);
    818821    }
  • trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp

    r77902 r78220  
    204204 *          using hardware-assisted SVM.
    205205 *
    206  * @note    If you make any changes to this function, please check if
    207  *          hmR0SvmNstGstUndoTscOffset() needs adjusting.
    208  *
    209  * @sa      CPUMApplyNestedGuestTscOffset(), hmR0SvmNstGstUndoTscOffset().
     206 * @sa      CPUMRemoveNestedGuestTscOffset, HMRemoveSvmNstGstTscOffset.
    210207 */
    211208VMM_INT_DECL(uint64_t) HMApplySvmNstGstTscOffset(PVMCPU pVCpu, uint64_t uTicks)
     
    216213    Assert(pVmcbNstGstCache->fCacheValid);
    217214    return uTicks + pVmcbNstGstCache->u64TSCOffset;
     215}
     216
     217
     218/**
     219 * Removes the TSC offset of an SVM nested-guest if any and returns the new TSC
     220 * value for the guest.
     221 *
     222 * @returns The TSC offset after removing any nested-guest TSC offset.
     223 * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
     224 * @param   uTicks      The nested-guest TSC.
     225 *
     226 * @remarks This function looks at the VMCB cache rather than directly at the
     227 *          nested-guest VMCB. The latter may have been modified for executing
     228 *          using hardware-assisted SVM.
     229 *
     230 * @sa      CPUMApplyNestedGuestTscOffset, HMApplySvmNstGstTscOffset.
     231 */
     232VMM_INT_DECL(uint64_t) HMRemoveSvmNstGstTscOffset(PVMCPU pVCpu, uint64_t uTicks)
     233{
     234    PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
     235    Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx)); RT_NOREF(pCtx);
     236    PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
     237    Assert(pVmcbNstGstCache->fCacheValid);
     238    return uTicks - pVmcbNstGstCache->u64TSCOffset;
    218239}
    219240
  • trunk/src/VBox/VMM/VMMAll/HMVMXAll.cpp

    r77745 r78220  
    716716                 * (all sorts of RPL & DPL assumptions).
    717717                 */
    718                 if (pVCpu->hm.s.vmx.fWasInRealMode)
     718                PCVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
     719                if (pVmcsInfo->fWasInRealMode)
    719720                {
    720721                    if (!CPUMIsGuestInV86ModeEx(pCtx))
     
    858859
    859860/**
    860  * Gets the permission bits for the specified MSR in the specified MSR bitmap.
    861  *
    862  * @returns VBox status code.
     861 * Gets the read and write permission bits for an MSR in an MSR bitmap.
     862 *
     863 * @returns VMXMSRPM_XXX - the MSR permission.
    863864 * @param   pvMsrBitmap     Pointer to the MSR bitmap.
    864  * @param   idMsr           The MSR.
    865  * @param   penmRead        Where to store the read permissions. Optional, can be
    866  *                          NULL.
    867  * @param   penmWrite       Where to store the write permissions. Optional, can be
    868  *                          NULL.
    869  */
    870 VMM_INT_DECL(int) HMGetVmxMsrPermission(void const *pvMsrBitmap, uint32_t idMsr, PVMXMSREXITREAD penmRead,
    871                                         PVMXMSREXITWRITE penmWrite)
    872 {
    873     AssertPtrReturn(pvMsrBitmap, VERR_INVALID_PARAMETER);
    874 
    875     int32_t iBit;
    876     uint8_t const *pbMsrBitmap = (uint8_t *)pvMsrBitmap;
     865 * @param   idMsr           The MSR to get permissions for.
     866 *
     867 * @sa      hmR0VmxSetMsrPermission.
     868 */
     869VMM_INT_DECL(uint32_t) HMGetVmxMsrPermission(void const *pvMsrBitmap, uint32_t idMsr)
     870{
     871    AssertPtrReturn(pvMsrBitmap, VMXMSRPM_EXIT_RD | VMXMSRPM_EXIT_WR);
     872
     873    uint8_t const * const pbMsrBitmap = (uint8_t const * const)pvMsrBitmap;
    877874
    878875    /*
     
    885882     *
    886883     * A bit corresponding to an MSR within the above range causes a VM-exit
    887      * if the bit is 1 on executions of RDMSR/WRMSR.
    888      *
    889      * If an MSR falls out of the MSR range, it always cause a VM-exit.
     884     * if the bit is 1 on executions of RDMSR/WRMSR.  If an MSR falls out of
     885     * the MSR range, it always cause a VM-exit.
    890886     *
    891887     * See Intel spec. 24.6.9 "MSR-Bitmap Address".
    892888     */
    893     if (idMsr <= 0x00001fff)
    894         iBit = idMsr;
    895     else if (   idMsr >= 0xc0000000
    896              && idMsr <= 0xc0001fff)
    897     {
    898         iBit = (idMsr - 0xc0000000);
    899         pbMsrBitmap += 0x400;
     889    uint32_t const offBitmapRead  = 0;
     890    uint32_t const offBitmapWrite = 0x800;
     891    uint32_t       offMsr;
     892    uint32_t       iBit;
     893    if (idMsr <= UINT32_C(0x00001fff))
     894    {
     895        offMsr = 0;
     896        iBit   = idMsr;
     897    }
     898    else if (idMsr - UINT32_C(0xc0000000) <= UINT32_C(0x00001fff))
     899    {
     900        offMsr = 0x400;
     901        iBit   = idMsr - UINT32_C(0xc0000000);
    900902    }
    901903    else
    902904    {
    903         if (penmRead)
    904             *penmRead = VMXMSREXIT_INTERCEPT_READ;
    905         if (penmWrite)
    906             *penmWrite = VMXMSREXIT_INTERCEPT_WRITE;
    907         Log(("CPUMVmxGetMsrPermission: Warning! Out of range MSR %#RX32\n", idMsr));
    908         return VINF_SUCCESS;
    909     }
    910 
    911     /* Validate the MSR bit position. */
    912     Assert(iBit <= 0x1fff);
    913 
    914     /* Get the MSR read permissions. */
    915     if (penmRead)
    916     {
    917         if (ASMBitTest(pbMsrBitmap, iBit))
    918             *penmRead = VMXMSREXIT_INTERCEPT_READ;
    919         else
    920             *penmRead = VMXMSREXIT_PASSTHRU_READ;
    921     }
    922 
    923     /* Get the MSR write permissions. */
    924     if (penmWrite)
    925     {
    926         if (ASMBitTest(pbMsrBitmap + 0x800, iBit))
    927             *penmWrite = VMXMSREXIT_INTERCEPT_WRITE;
    928         else
    929             *penmWrite = VMXMSREXIT_PASSTHRU_WRITE;
    930     }
    931 
    932     return VINF_SUCCESS;
     905        LogFunc(("Warning! Out of range MSR %#RX32\n", idMsr));
     906        return VMXMSRPM_EXIT_RD | VMXMSRPM_EXIT_WR;
     907    }
     908
     909    /*
     910     * Get the MSR read permissions.
     911     */
     912    uint32_t fRet;
     913    uint32_t const offMsrRead = offBitmapRead + offMsr;
     914    Assert(offMsrRead + (iBit >> 3) < offBitmapWrite);
     915    if (ASMBitTest(pbMsrBitmap + offMsrRead, iBit))
     916        fRet = VMXMSRPM_EXIT_RD;
     917    else
     918        fRet = VMXMSRPM_ALLOW_RD;
     919
     920    /*
     921     * Get the MSR write permissions.
     922     */
     923    uint32_t const offMsrWrite = offBitmapWrite + offMsr;
     924    Assert(offMsrWrite + (iBit >> 3) < X86_PAGE_4K_SIZE);
     925    if (ASMBitTest(pbMsrBitmap + offMsrWrite, iBit))
     926        fRet |= VMXMSRPM_EXIT_WR;
     927    else
     928        fRet |= VMXMSRPM_ALLOW_WR;
     929
     930    Assert(VMXMSRPM_IS_FLAG_VALID(fRet));
     931    return fRet;
    933932}
    934933
     
    943942 * @param   cbAccess        The size of the I/O access in bytes (1, 2 or 4 bytes).
    944943 */
    945 VMM_INT_DECL(bool) HMGetVmxIoBitmapPermission(void const *pvIoBitmapA, void const *pvIoBitmapB, uint16_t uPort,
    946                                                 uint8_t cbAccess)
     944VMM_INT_DECL(bool) HMGetVmxIoBitmapPermission(void const *pvIoBitmapA, void const *pvIoBitmapB, uint16_t uPort, uint8_t cbAccess)
    947945{
    948946    Assert(cbAccess == 1 || cbAccess == 2 || cbAccess == 4);
     
    10171015    LogRel(("uFirstPauseLoopTick        = %RX64\n",     pCtx->hwvirt.vmx.uFirstPauseLoopTick));
    10181016    LogRel(("uPrevPauseTick             = %RX64\n",     pCtx->hwvirt.vmx.uPrevPauseTick));
    1019     LogRel(("uVmentryTick               = %RX64\n",     pCtx->hwvirt.vmx.uVmentryTick));
     1017    LogRel(("uEntryTick                 = %RX64\n",     pCtx->hwvirt.vmx.uEntryTick));
    10201018    LogRel(("offVirtApicWrite           = %#RX16\n",    pCtx->hwvirt.vmx.offVirtApicWrite));
     1019    LogRel(("fVirtNmiBlocking           = %RTbool\n",   pCtx->hwvirt.vmx.fVirtNmiBlocking));
    10211020    LogRel(("VMCS cache:\n"));
    10221021
     
    12431242}
    12441243
     1244
     1245/**
     1246 * Gets the active (in use) VMCS info. object for the specified VCPU.
     1247 *
     1248 * This is either the guest or nested-guest VMCS and need not necessarily pertain to
     1249 * the "current" VMCS (in the VMX definition of the term). For instance, if the
     1250 * VM-entry failed due to an invalid-guest state, we may have "cleared" the VMCS
     1251 * while returning to ring-3. The VMCS info. object for that VMCS would still be
     1252 * active and returned so that we could dump the VMCS fields to ring-3 for
     1253 * diagnostics. This function is thus only used to distinguish between the
     1254 * nested-guest or guest VMCS.
     1255 *
     1256 * @returns The active VMCS information.
     1257 * @param   pVCpu   The cross context virtual CPU structure.
     1258 *
     1259 * @thread  EMT.
     1260 * @remarks This function may be called with preemption or interrupts disabled!
     1261 */
     1262VMM_INT_DECL(PVMXVMCSINFO) hmGetVmxActiveVmcsInfo(PVMCPU pVCpu)
     1263{
     1264    if (!pVCpu->hm.s.vmx.fSwitchedToNstGstVmcs)
     1265        return &pVCpu->hm.s.vmx.VmcsInfo;
     1266    return &pVCpu->hm.s.vmx.VmcsInfoNstGst;
     1267}
     1268
     1269
     1270/**
     1271 * Converts a VMX event type into an appropriate TRPM event type.
     1272 *
     1273 * @returns TRPM event.
     1274 * @param   uIntInfo    The VMX event.
     1275 */
     1276VMM_INT_DECL(TRPMEVENT) HMVmxEventToTrpmEventType(uint32_t uIntInfo)
     1277{
     1278    TRPMEVENT enmTrapType;
     1279    uint8_t const uType   = VMX_ENTRY_INT_INFO_TYPE(uIntInfo);
     1280    uint8_t const uVector = VMX_ENTRY_INT_INFO_VECTOR(uIntInfo);
     1281
     1282    switch (uType)
     1283    {
     1284        case VMX_ENTRY_INT_INFO_TYPE_EXT_INT:
     1285           enmTrapType = TRPM_HARDWARE_INT;
     1286           break;
     1287
     1288        case VMX_ENTRY_INT_INFO_TYPE_NMI:
     1289        case VMX_ENTRY_INT_INFO_TYPE_HW_XCPT:
     1290            enmTrapType = TRPM_TRAP;
     1291            break;
     1292
     1293        case VMX_ENTRY_INT_INFO_TYPE_PRIV_SW_XCPT:  /* INT1 (ICEBP). */
     1294            Assert(uVector == X86_XCPT_DB); NOREF(uVector);
     1295            enmTrapType = TRPM_SOFTWARE_INT;
     1296            break;
     1297
     1298        case VMX_ENTRY_INT_INFO_TYPE_SW_XCPT:       /* INT3 (#BP) and INTO (#OF) */
     1299            Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF); NOREF(uVector);
     1300            enmTrapType = TRPM_SOFTWARE_INT;
     1301            break;
     1302
     1303        case VMX_ENTRY_INT_INFO_TYPE_SW_INT:
     1304            enmTrapType = TRPM_SOFTWARE_INT;
     1305            break;
     1306
     1307        case VMX_ENTRY_INT_INFO_TYPE_OTHER_EVENT:   /* Shouldn't really happen. */
     1308        default:
     1309            AssertMsgFailed(("Invalid trap type %#x\n", uType));
     1310            enmTrapType = TRPM_32BIT_HACK;
     1311            break;
     1312    }
     1313
     1314    return enmTrapType;
     1315}
     1316
     1317
     1318#ifndef IN_RC
     1319# ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     1320/**
     1321 * Notification callback for when a VM-exit happens outside VMX R0 code (e.g. in
     1322 * IEM).
     1323 *
     1324 * @param   pVCpu   The cross context virtual CPU structure.
     1325 * @param   pCtx    Pointer to the guest-CPU context.
     1326 */
     1327VMM_INT_DECL(void) HMNotifyVmxNstGstVmexit(PVMCPU pVCpu, PCPUMCTX pCtx)
     1328{
     1329    NOREF(pCtx);
     1330    pVCpu->hm.s.vmx.fMergedNstGstCtls = false;
     1331}
     1332# endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
     1333#endif /* IN_RC */
     1334
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h

    r77899 r78220  
    162162        return VERR_VMX_VMEXIT_FAILED; \
    163163    } while (0)
    164 
    165 /** Enables/disables IEM-only EM execution policy in and from ring-3.   */
    166 # if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
    167 #  define IEM_VMX_R3_EXECPOLICY_IEM_ALL_ENABLE_RET(a_pVCpu, a_pszLogPrefix, a_rcStrictRet) \
    168     do { \
    169         Log(("%s: Enabling IEM-only EM execution policy!\n", (a_pszLogPrefix))); \
    170         int rcSched = EMR3SetExecutionPolicy((a_pVCpu)->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, true); \
    171         if (rcSched != VINF_SUCCESS) \
    172             iemSetPassUpStatus(pVCpu, rcSched); \
    173         return (a_rcStrictRet); \
    174     } while (0)
    175 
    176 #  define IEM_VMX_R3_EXECPOLICY_IEM_ALL_DISABLE_RET(a_pVCpu, a_pszLogPrefix, a_rcStrictRet) \
    177     do { \
    178         Log(("%s: Disabling IEM-only EM execution policy!\n", (a_pszLogPrefix))); \
    179         int rcSched = EMR3SetExecutionPolicy((a_pVCpu)->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, false); \
    180         if (rcSched != VINF_SUCCESS) \
    181             iemSetPassUpStatus(pVCpu, rcSched); \
    182         return (a_rcStrictRet); \
    183     } while (0)
    184 # else
    185 #  define IEM_VMX_R3_EXECPOLICY_IEM_ALL_ENABLE_RET(a_pVCpu, a_pszLogPrefix, a_rcStrictRet)   do { return (a_rcRet); } while (0)
    186 #  define IEM_VMX_R3_EXECPOLICY_IEM_ALL_DISABLE_RET(a_pVCpu, a_pszLogPrefix, a_rcStrictRet)  do { return (a_rcRet); } while (0)
    187 # endif
    188164
    189165
     
    16461622     * PreemptTimerShift = 5
    16471623     * VmcsPreemptTimer  = 2 (i.e. need to decrement by 1 every 2 * RT_BIT(5) = 20000 TSC ticks)
    1648      * VmentryTick       = 50000 (TSC at time of VM-entry)
     1624     * EntryTick         = 50000 (TSC at time of VM-entry)
    16491625     *
    16501626     * CurTick   Delta    PreemptTimerVal
     
    16701646    IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT);
    16711647    uint64_t const uCurTick        = TMCpuTickGetNoCheck(pVCpu);
    1672     uint64_t const uVmentryTick    = pVCpu->cpum.GstCtx.hwvirt.vmx.uVmentryTick;
    1673     uint64_t const uDelta          = uCurTick - uVmentryTick;
     1648    uint64_t const uEntryTick      = pVCpu->cpum.GstCtx.hwvirt.vmx.uEntryTick;
     1649    uint64_t const uDelta          = uCurTick - uEntryTick;
    16741650    uint32_t const uVmcsPreemptVal = pVmcs->u32PreemptTimer;
    16751651    uint32_t const uPreemptTimer   = uVmcsPreemptVal
     
    19131889
    19141890/**
    1915  * Saves the guest MSRs into the VM-exit auto-store MSRs area as part of VM-exit.
     1891 * Saves the guest MSRs into the VM-exit MSR-store area as part of VM-exit.
    19161892 *
    19171893 * @returns VBox status code.
     
    19481924        IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrStoreCount);
    19491925
    1950     PVMXAUTOMSR pMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea);
     1926    PVMXAUTOMSR pMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pExitMsrStoreArea);
    19511927    Assert(pMsr);
    19521928    for (uint32_t idxMsr = 0; idxMsr < cMsrs; idxMsr++, pMsr++)
     
    19801956    }
    19811957
    1982     RTGCPHYS const GCPhysAutoMsrArea = pVmcs->u64AddrExitMsrStore.u;
    1983     int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysAutoMsrArea,
    1984                                       pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea), cMsrs * sizeof(VMXAUTOMSR));
     1958    RTGCPHYS const GCPhysVmExitMsrStoreArea = pVmcs->u64AddrExitMsrStore.u;
     1959    int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVmExitMsrStoreArea,
     1960                                      pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pExitMsrStoreArea), cMsrs * sizeof(VMXAUTOMSR));
    19851961    if (RT_SUCCESS(rc))
    19861962    { /* likely */ }
    19871963    else
    19881964    {
    1989         AssertMsgFailed(("VM-exit: Failed to write MSR auto-store area at %#RGp, rc=%Rrc\n", GCPhysAutoMsrArea, rc));
     1965        AssertMsgFailed(("VM-exit: Failed to write MSR auto-store area at %#RGp, rc=%Rrc\n", GCPhysVmExitMsrStoreArea, rc));
    19901966        IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrStorePtrWritePhys);
    19911967    }
     
    22562232
    22572233/**
    2258  * Loads the host MSRs from the VM-exit auto-load MSRs area as part of VM-exit.
     2234 * Loads the host MSRs from the VM-exit MSR-load area as part of VM-exit.
    22592235 *
    22602236 * @returns VBox status code.
     
    22912267        IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrLoadCount);
    22922268
    2293     Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea));
    2294     RTGCPHYS const GCPhysAutoMsrArea = pVmcs->u64AddrExitMsrLoad.u;
    2295     int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea),
    2296                                      GCPhysAutoMsrArea, cMsrs * sizeof(VMXAUTOMSR));
     2269    RTGCPHYS const GCPhysVmExitMsrLoadArea = pVmcs->u64AddrExitMsrLoad.u;
     2270    int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pExitMsrLoadArea),
     2271                                     GCPhysVmExitMsrLoadArea, cMsrs * sizeof(VMXAUTOMSR));
    22972272    if (RT_SUCCESS(rc))
    22982273    {
    2299         PCVMXAUTOMSR pMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea);
     2274        PCVMXAUTOMSR pMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pExitMsrLoadArea);
    23002275        Assert(pMsr);
    23012276        for (uint32_t idxMsr = 0; idxMsr < cMsrs; idxMsr++, pMsr++)
     
    23312306    else
    23322307    {
    2333         AssertMsgFailed(("VM-exit: Failed to read MSR auto-load area at %#RGp, rc=%Rrc\n", GCPhysAutoMsrArea, rc));
     2308        AssertMsgFailed(("VM-exit: Failed to read MSR auto-load area at %#RGp, rc=%Rrc\n", GCPhysVmExitMsrLoadArea, rc));
    23342309        IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrLoadPtrReadPhys);
    23352310    }
     
    28962871    pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode = false;
    28972872
    2898     /* Revert any IEM-only nested-guest execution policy if it was set earlier, otherwise return rcStrict. */
    2899     IEM_VMX_R3_EXECPOLICY_IEM_ALL_DISABLE_RET(pVCpu, "VM-exit", rcStrict);
     2873#  if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
     2874    /* Revert any IEM-only nested-guest execution policy, otherwise return rcStrict. */
     2875    Log(("vmexit: Disabling IEM-only EM execution policy!\n"));
     2876    int rcSched = EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, false);
     2877    if (rcSched != VINF_SUCCESS)
     2878        iemSetPassUpStatus(pVCpu, rcSched);
     2879#  endif
     2880    return VINF_SUCCESS;
    29002881# endif
    29012882}
     
    43124293 * @param   offReg      The offset of the register being read.
    43134294 */
    4314 DECLINLINE(uint32_t) iemVmxVirtApicReadRaw32(PVMCPU pVCpu, uint16_t offReg)
    4315 {
    4316     Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uint32_t));
    4317     uint8_t  const *pbVirtApic = (const uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage);
    4318     Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage));
    4319     uint32_t const uReg = *(const uint32_t *)(pbVirtApic + offReg);
     4295IEM_STATIC uint32_t iemVmxVirtApicReadRaw32(PVMCPU pVCpu, uint16_t offReg)
     4296{
     4297    PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     4298    Assert(pVmcs);
     4299
     4300    uint32_t uReg;
     4301    Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uReg));
     4302    RTGCPHYS const GCPhysVirtApic = pVmcs->u64AddrVirtApic.u;
     4303    int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &uReg, GCPhysVirtApic + offReg, sizeof(uReg));
     4304    if (RT_FAILURE(rc))
     4305    {
     4306        AssertMsgFailed(("Failed to read %u bytes at offset %#x of the virtual-APIC page at %#RGp\n", sizeof(uReg), offReg,
     4307                         GCPhysVirtApic));
     4308        uReg = 0;
     4309    }
    43204310    return uReg;
    43214311}
     
    43294319 * @param   offReg      The offset of the register being read.
    43304320 */
    4331 DECLINLINE(uint64_t) iemVmxVirtApicReadRaw64(PVMCPU pVCpu, uint16_t offReg)
    4332 {
    4333     Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uint32_t));
    4334     uint8_t  const *pbVirtApic = (const uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage);
    4335     Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage));
    4336     uint64_t const uReg = *(const uint64_t *)(pbVirtApic + offReg);
     4321IEM_STATIC uint64_t iemVmxVirtApicReadRaw64(PVMCPU pVCpu, uint16_t offReg)
     4322{
     4323    PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     4324    Assert(pVmcs);
     4325
     4326    uint64_t uReg;
     4327    Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uReg));
     4328    RTGCPHYS const GCPhysVirtApic = pVmcs->u64AddrVirtApic.u;
     4329    int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &uReg, GCPhysVirtApic + offReg, sizeof(uReg));
     4330    if (RT_FAILURE(rc))
     4331    {
     4332        AssertMsgFailed(("Failed to read %u bytes at offset %#x of the virtual-APIC page at %#RGp\n", sizeof(uReg), offReg,
     4333                         GCPhysVirtApic));
     4334        uReg = 0;
     4335    }
    43374336    return uReg;
    43384337}
     
    43464345 * @param   uReg        The register value to write.
    43474346 */
    4348 DECLINLINE(void) iemVmxVirtApicWriteRaw32(PVMCPU pVCpu, uint16_t offReg, uint32_t uReg)
    4349 {
    4350     Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uint32_t));
    4351     uint8_t *pbVirtApic = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage);
    4352     Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage));
    4353     *(uint32_t *)(pbVirtApic + offReg) = uReg;
     4347IEM_STATIC void iemVmxVirtApicWriteRaw32(PVMCPU pVCpu, uint16_t offReg, uint32_t uReg)
     4348{
     4349    PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     4350    Assert(pVmcs);
     4351    Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uReg));
     4352    RTGCPHYS const GCPhysVirtApic = pVmcs->u64AddrVirtApic.u;
     4353    int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVirtApic + offReg, &uReg, sizeof(uReg));
     4354    if (RT_FAILURE(rc))
     4355    {
     4356        AssertMsgFailed(("Failed to write %u bytes at offset %#x of the virtual-APIC page at %#RGp\n", sizeof(uReg), offReg,
     4357                         GCPhysVirtApic));
     4358    }
    43544359}
    43554360
     
    43624367 * @param   uReg        The register value to write.
    43634368 */
    4364 DECLINLINE(void) iemVmxVirtApicWriteRaw64(PVMCPU pVCpu, uint16_t offReg, uint64_t uReg)
    4365 {
    4366     Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uint32_t));
    4367     uint8_t *pbVirtApic = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage);
    4368     Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage));
    4369     *(uint64_t *)(pbVirtApic + offReg) = uReg;
     4369IEM_STATIC void iemVmxVirtApicWriteRaw64(PVMCPU pVCpu, uint16_t offReg, uint64_t uReg)
     4370{
     4371    PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     4372    Assert(pVmcs);
     4373    Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uReg));
     4374    RTGCPHYS const GCPhysVirtApic = pVmcs->u64AddrVirtApic.u;
     4375    int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVirtApic + offReg, &uReg, sizeof(uReg));
     4376    if (RT_FAILURE(rc))
     4377    {
     4378        AssertMsgFailed(("Failed to write %u bytes at offset %#x of the virtual-APIC page at %#RGp\n", sizeof(uReg), offReg,
     4379                         GCPhysVirtApic));
     4380    }
    43704381}
    43714382
     
    43804391 * @remarks This is based on our APIC device code.
    43814392 */
    4382 DECLINLINE(void) iemVmxVirtApicSetVector(PVMCPU pVCpu, uint16_t offReg, uint8_t uVector)
    4383 {
    4384     Assert(offReg == XAPIC_OFF_ISR0 || offReg == XAPIC_OFF_TMR0 || offReg == XAPIC_OFF_IRR0);
    4385     uint8_t       *pbBitmap     = ((uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage)) + offReg;
    4386     uint16_t const offVector    = (uVector & UINT32_C(0xe0)) >> 1;
    4387     uint16_t const idxVectorBit = uVector & UINT32_C(0x1f);
    4388     ASMAtomicBitSet(pbBitmap + offVector, idxVectorBit);
     4393IEM_STATIC void iemVmxVirtApicSetVectorInReg(PVMCPU pVCpu, uint16_t offReg, uint8_t uVector)
     4394{
     4395    PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     4396    Assert(pVmcs);
     4397    uint32_t uReg;
     4398    uint16_t const offVector      = (uVector & UINT32_C(0xe0)) >> 1;
     4399    RTGCPHYS const GCPhysVirtApic = pVmcs->u64AddrVirtApic.u;
     4400    int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &uReg, GCPhysVirtApic + offReg + offVector, sizeof(uReg));
     4401    if (RT_SUCCESS(rc))
     4402    {
     4403        uint16_t const idxVectorBit = uVector & UINT32_C(0x1f);
     4404        uReg |= RT_BIT(idxVectorBit);
     4405        rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVirtApic + offReg + offVector, &uReg, sizeof(uReg));
     4406        if (RT_FAILURE(rc))
     4407        {
     4408            AssertMsgFailed(("Failed to set vector %#x in 256-bit register at %#x of the virtual-APIC page at %#RGp\n",
     4409                             uVector, offReg, GCPhysVirtApic));
     4410        }
     4411    }
     4412    else
     4413    {
     4414        AssertMsgFailed(("Failed to get vector %#x in 256-bit register at %#x of the virtual-APIC page at %#RGp\n",
     4415                         uVector, offReg, GCPhysVirtApic));
     4416    }
    43894417}
    43904418
     
    43994427 * @remarks This is based on our APIC device code.
    44004428 */
    4401 DECLINLINE(void) iemVmxVirtApicClearVector(PVMCPU pVCpu, uint16_t offReg, uint8_t uVector)
    4402 {
    4403     Assert(offReg == XAPIC_OFF_ISR0 || offReg == XAPIC_OFF_TMR0 || offReg == XAPIC_OFF_IRR0);
    4404     uint8_t       *pbBitmap     = ((uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage)) + offReg;
    4405     uint16_t const offVector    = (uVector & UINT32_C(0xe0)) >> 1;
    4406     uint16_t const idxVectorBit = uVector & UINT32_C(0x1f);
    4407     ASMAtomicBitClear(pbBitmap + offVector, idxVectorBit);
     4429IEM_STATIC void iemVmxVirtApicClearVectorInReg(PVMCPU pVCpu, uint16_t offReg, uint8_t uVector)
     4430{
     4431    PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     4432    Assert(pVmcs);
     4433    uint32_t uReg;
     4434    uint16_t const offVector      = (uVector & UINT32_C(0xe0)) >> 1;
     4435    RTGCPHYS const GCPhysVirtApic = pVmcs->u64AddrVirtApic.u;
     4436    int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &uReg, GCPhysVirtApic + offReg + offVector, sizeof(uReg));
     4437    if (RT_SUCCESS(rc))
     4438    {
     4439        uint16_t const idxVectorBit = uVector & UINT32_C(0x1f);
     4440        uReg &= ~RT_BIT(idxVectorBit);
     4441        rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVirtApic + offReg + offVector, &uReg, sizeof(uReg));
     4442        if (RT_FAILURE(rc))
     4443        {
     4444            AssertMsgFailed(("Failed to clear vector %#x in 256-bit register at %#x of the virtual-APIC page at %#RGp\n",
     4445                             uVector, offReg, GCPhysVirtApic));
     4446        }
     4447    }
     4448    else
     4449    {
     4450        AssertMsgFailed(("Failed to get vector %#x in 256-bit register at %#x of the virtual-APIC page at %#RGp\n",
     4451                         uVector, offReg, GCPhysVirtApic));
     4452    }
    44084453}
    44094454
     
    48334878    Assert(offReg < XAPIC_OFF_END + 4);
    48344879    Assert(pidxHighestBit);
     4880    Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs));
    48354881
    48364882    /*
     
    50075053
    50085054    uint8_t uVector = uSvi;
    5009     iemVmxVirtApicClearVector(pVCpu, XAPIC_OFF_ISR0, uVector);
     5055    iemVmxVirtApicClearVectorInReg(pVCpu, XAPIC_OFF_ISR0, uVector);
    50105056
    50115057    uVector = 0;
     
    50485094    uint8_t const uVector = iemVmxVirtApicReadRaw32(pVCpu, XAPIC_OFF_ICR_LO);
    50495095    Log2(("self_ipi_virt: uVector=%#x\n", uVector));
    5050     iemVmxVirtApicSetVector(pVCpu, XAPIC_OFF_IRR0, uVector);
     5096    iemVmxVirtApicSetVectorInReg(pVCpu, XAPIC_OFF_IRR0, uVector);
    50515097    uint8_t const uRvi = RT_LO_U8(pVmcs->u16GuestIntStatus);
    50525098    uint8_t const uSvi = RT_HI_U8(pVmcs->u16GuestIntStatus);
     
    66526698            IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVirtApicPage);
    66536699
    6654         /* Read the Virtual-APIC page. */
    6655         Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage));
    6656         int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage),
    6657                                          GCPhysVirtApic, VMX_V_VIRT_APIC_PAGES);
    6658         if (RT_SUCCESS(rc))
    6659         { /* likely */ }
    6660         else
    6661             IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtApicPagePtrReadPhys);
    6662 
    66636700        /* TPR threshold without virtual-interrupt delivery. */
    66646701        if (   !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
     
    66676704
    66686705        /* TPR threshold and VTPR. */
    6669         uint8_t const *pbVirtApic = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage);
    6670         uint8_t const  u8VTpr     = *(pbVirtApic + XAPIC_OFF_TPR);
    66716706        if (   !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
    6672             && !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
    6673             && RT_BF_GET(pVmcs->u32TprThreshold, VMX_BF_TPR_THRESHOLD_TPR) > ((u8VTpr >> 4) & UINT32_C(0xf)) /* Bits 4:7 */)
    6674             IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_TprThresholdVTpr);
     6707            && !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY))
     6708        {
     6709            /* Read the VTPR from the virtual-APIC page. */
     6710            uint8_t u8VTpr;
     6711            int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &u8VTpr, GCPhysVirtApic + XAPIC_OFF_TPR, sizeof(u8VTpr));
     6712            if (RT_SUCCESS(rc))
     6713            { /* likely */ }
     6714            else
     6715                IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtApicPagePtrReadPhys);
     6716
     6717            /* Bits 3:0 of the TPR-threshold must not be greater than bits 7:4 of VTPR. */
     6718            if ((uint8_t)RT_BF_GET(pVmcs->u32TprThreshold, VMX_BF_TPR_THRESHOLD_TPR) <= (u8VTpr & 0xf0))
     6719            { /* likely */ }
     6720            else
     6721                IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_TprThresholdVTpr);
     6722        }
    66756723    }
    66766724    else
     
    70097057
    70107058/**
    7011  * Loads the guest MSRs from the VM-entry auto-load MSRs as part of VM-entry.
     7059 * Loads the guest MSRs from the VM-entry MSR-load area as part of VM-entry.
    70127060 *
    70137061 * @returns VBox status code.
     
    70477095    }
    70487096
    7049     RTGCPHYS const GCPhysAutoMsrArea = pVmcs->u64AddrEntryMsrLoad.u;
    7050     int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea),
    7051                                      GCPhysAutoMsrArea, cMsrs * sizeof(VMXAUTOMSR));
     7097    RTGCPHYS const GCPhysVmEntryMsrLoadArea = pVmcs->u64AddrEntryMsrLoad.u;
     7098    int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pEntryMsrLoadArea),
     7099                                     GCPhysVmEntryMsrLoadArea, cMsrs * sizeof(VMXAUTOMSR));
    70527100    if (RT_SUCCESS(rc))
    70537101    {
    7054         PCVMXAUTOMSR pMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea);
     7102        PCVMXAUTOMSR pMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pEntryMsrLoadArea);
    70557103        Assert(pMsr);
    70567104        for (uint32_t idxMsr = 0; idxMsr < cMsrs; idxMsr++, pMsr++)
     
    70897137    else
    70907138    {
    7091         AssertMsgFailed(("%s: Failed to read MSR auto-load area at %#RGp, rc=%Rrc\n", pszInstr, GCPhysAutoMsrArea, rc));
     7139        AssertMsgFailed(("%s: Failed to read MSR auto-load area at %#RGp, rc=%Rrc\n", pszInstr, GCPhysVmEntryMsrLoadArea, rc));
    70927140        IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_MsrLoadPtrReadPhys);
    70937141    }
     
    72987346    if (pVmcs->u32PinCtls & VMX_PIN_CTLS_PREEMPT_TIMER)
    72997347    {
    7300         uint64_t const uVmentryTick = TMCpuTickGetNoCheck(pVCpu);
    7301         pVCpu->cpum.GstCtx.hwvirt.vmx.uVmentryTick = uVmentryTick;
     7348        uint64_t const uEntryTick = TMCpuTickGetNoCheck(pVCpu);
     7349        pVCpu->cpum.GstCtx.hwvirt.vmx.uEntryTick = uEntryTick;
    73027350        VMCPU_FF_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER);
    73037351
    7304         Log(("%s: VM-entry set up VMX-preemption timer at %#RX64\n", pszInstr, uVmentryTick));
     7352        Log(("%s: VM-entry set up VMX-preemption timer at %#RX64\n", pszInstr, uEntryTick));
    73057353    }
    73067354    else
     
    73407388           break;
    73417389
     7390        case VMX_ENTRY_INT_INFO_TYPE_NMI:
     7391        case VMX_ENTRY_INT_INFO_TYPE_HW_XCPT:
     7392            enmTrapType = TRPM_TRAP;
     7393            break;
     7394
    73427395        case VMX_ENTRY_INT_INFO_TYPE_SW_INT:
    73437396            enmTrapType = TRPM_SOFTWARE_INT;
    73447397            break;
    73457398
    7346         case VMX_ENTRY_INT_INFO_TYPE_NMI:
    7347         case VMX_ENTRY_INT_INFO_TYPE_PRIV_SW_XCPT:  /* ICEBP. */
    73487399        case VMX_ENTRY_INT_INFO_TYPE_SW_XCPT:       /* #BP and #OF */
    7349         case VMX_ENTRY_INT_INFO_TYPE_HW_XCPT:
    7350             enmTrapType = TRPM_TRAP;
     7400            Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
     7401            enmTrapType = TRPM_SOFTWARE_INT;
     7402            break;
     7403
     7404        case VMX_ENTRY_INT_INFO_TYPE_PRIV_SW_XCPT:  /* #DB (INT1/ICEBP). */
     7405            Assert(uVector == X86_XCPT_DB);
     7406            enmTrapType = TRPM_SOFTWARE_INT;
    73517407            break;
    73527408
     
    73637419        TRPMSetErrorCode(pVCpu, uErrCode);
    73647420
    7365     if (   uType   == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
    7366         && uVector == X86_XCPT_PF)
     7421    if (   enmTrapType == TRPM_TRAP
     7422        && uVector     == X86_XCPT_PF)
    73677423        TRPMSetFaultAddress(pVCpu, GCPtrFaultAddress);
    7368     else if (   uType == VMX_ENTRY_INT_INFO_TYPE_SW_INT
    7369              || uType == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT
    7370              || uType == VMX_ENTRY_INT_INFO_TYPE_PRIV_SW_XCPT)
    7371     {
    7372         AssertMsg(   uType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
    7373                   || (uVector == X86_XCPT_BP || uVector == X86_XCPT_OF),
    7374                   ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uType));
     7424    else if (enmTrapType == TRPM_SOFTWARE_INT)
    73757425        TRPMSetInstrLength(pVCpu, cbInstr);
    7376     }
    73777426
    73787427    return VINF_SUCCESS;
     
    77097758                                {
    77107759                                    /* Reschedule to IEM-only execution of the nested-guest or return VINF_SUCCESS. */
    7711                                     IEM_VMX_R3_EXECPOLICY_IEM_ALL_ENABLE_RET(pVCpu, pszInstr, VINF_SUCCESS);
     7760# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
     7761                                    Log(("%s: Enabling IEM-only EM execution policy!\n", pszInstr));
     7762                                    int rcSched = EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, true);
     7763                                    if (rcSched != VINF_SUCCESS)
     7764                                        iemSetPassUpStatus(pVCpu, rcSched);
     7765# endif
     7766                                    return VINF_SUCCESS;
    77127767                                }
    77137768
     
    77577812    {
    77587813        Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap));
     7814        uint32_t fMsrpm = HMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap), idMsr);
    77597815        if (uExitReason == VMX_EXIT_RDMSR)
    7760         {
    7761             VMXMSREXITREAD enmRead;
    7762             int rc = HMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap), idMsr, &enmRead,
    7763                                              NULL /* penmWrite */);
    7764             AssertRC(rc);
    7765             if (enmRead == VMXMSREXIT_INTERCEPT_READ)
    7766                 return true;
    7767         }
    7768         else
    7769         {
    7770             VMXMSREXITWRITE enmWrite;
    7771             int rc = HMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap), idMsr, NULL /* penmRead */,
    7772                                              &enmWrite);
    7773             AssertRC(rc);
    7774             if (enmWrite == VMXMSREXIT_INTERCEPT_WRITE)
    7775                 return true;
    7776         }
    7777         return false;
     7816            return RT_BOOL(fMsrpm & VMXMSRPM_EXIT_RD);
     7817        return RT_BOOL(fMsrpm & VMXMSRPM_EXIT_WR);
    77787818    }
    77797819
  • trunk/src/VBox/VMM/VMMR0/HMR0.cpp

    r76993 r78220  
    110110                uint64_t                    u64HostCr4;
    111111                /** Host EFER value (set by ring-0 VMX init) */
    112                 uint64_t                    u64HostEfer;
     112                uint64_t                    u64HostMsrEfer;
    113113                /** Host SMM monitor control (used for logging/diagnostics) */
    114114                uint64_t                    u64HostSmmMonitorCtl;
     
    366366    {
    367367        /* Read CR4 and EFER for logging/diagnostic purposes. */
    368         g_HmR0.hwvirt.u.vmx.u64HostCr4  = ASMGetCR4();
    369         g_HmR0.hwvirt.u.vmx.u64HostEfer = ASMRdMsr(MSR_K6_EFER);
     368        g_HmR0.hwvirt.u.vmx.u64HostCr4     = ASMGetCR4();
     369        g_HmR0.hwvirt.u.vmx.u64HostMsrEfer = ASMRdMsr(MSR_K6_EFER);
    370370
    371371        /* Get VMX MSRs for determining VMX features we can ultimately use. */
     
    11741174        pVM->hm.s.vmx.cPreemptTimerShift    = g_HmR0.hwvirt.u.vmx.cPreemptTimerShift;
    11751175        pVM->hm.s.vmx.u64HostCr4            = g_HmR0.hwvirt.u.vmx.u64HostCr4;
    1176         pVM->hm.s.vmx.u64HostEfer           = g_HmR0.hwvirt.u.vmx.u64HostEfer;
     1176        pVM->hm.s.vmx.u64HostMsrEfer        = g_HmR0.hwvirt.u.vmx.u64HostMsrEfer;
    11771177        pVM->hm.s.vmx.u64HostSmmMonitorCtl  = g_HmR0.hwvirt.u.vmx.u64HostSmmMonitorCtl;
    11781178        HMGetVmxMsrsFromHwvirtMsrs(&g_HmR0.hwvirt.Msrs, &pVM->hm.s.vmx.Msrs);
  • trunk/src/VBox/VMM/VMMR0/HMR0A.asm

    r77481 r78220  
    678678; * @param   HCPhysVmcs     Physical address of VMCS structure.
    679679; */
    680 ;DECLASM(int) VMXActivateVmcs(RTHCPHYS HCPhysVmcs);
     680;DECLASM(int) VMXLoadVmcs(RTHCPHYS HCPhysVmcs);
    681681ALIGNCODE(16)
    682 BEGINPROC VMXActivateVmcs
     682BEGINPROC VMXLoadVmcs
    683683%ifdef RT_ARCH_AMD64
    684684    xor     rax, rax
     
    700700%endif
    701701    ret
    702 ENDPROC VMXActivateVmcs
     702ENDPROC VMXLoadVmcs
    703703
    704704
     
    709709; * @param    [esp + 04h]  gcc:rdi  msc:rcx   Param 1 - First parameter - Address that will receive the current pointer.
    710710; */
    711 ;DECLASM(int) VMXGetActivatedVmcs(RTHCPHYS *pVMCS);
    712 BEGINPROC VMXGetActivatedVmcs
     711;DECLASM(int) VMXGetCurrentVmcs(RTHCPHYS *pVMCS);
     712BEGINPROC VMXGetCurrentVmcs
    713713%ifdef RT_OS_OS2
    714714    mov     eax, VERR_NOT_SUPPORTED
     
    728728    ret
    729729%endif
    730 ENDPROC VMXGetActivatedVmcs
     730ENDPROC VMXGetCurrentVmcs
    731731
    732732;/**
     
    867867; load the guest ones when necessary.
    868868;
    869 ; @cproto       DECLASM(int) HMR0VMXStartVMhmR0DumpDescriptorM(RTHCUINT fResume, PCPUMCTX pCtx, PVMXVMCSBATCHCACHE pCache,
     869; @cproto       DECLASM(int) HMR0VMXStartVMhmR0DumpDescriptorM(RTHCUINT fResume, PCPUMCTX pCtx, PVMXVMCSCACHE pCache,
    870870;                                                              PVM pVM, PVMCPU pVCpu, PFNHMVMXSTARTVM pfnStartVM);
    871871;
     
    13021302    ; vmread's will fail (only eflags.cf=1 will be set) but that shouldn't cause any
    13031303    ; trouble only just less efficient.
    1304     mov     ecx, [ss:xDX + VMXVMCSBATCHCACHE.Read.cValidEntries]
     1304    mov     ecx, [ss:xDX + VMXVMCSCACHE.Read.cValidEntries]
    13051305    cmp     ecx, 0      ; Can't happen
    13061306    je      %%no_cached_read32
     
    13101310%%cached_read32:
    13111311    dec     xCX
    1312     mov     eax, [ss:xDX + VMXVMCSBATCHCACHE.Read.aField + xCX * 4]
     1312    mov     eax, [ss:xDX + VMXVMCSCACHE.Read.aField + xCX * 4]
    13131313    ; Note! This leaves the high 32 bits of the cache entry unmodified!!
    1314     vmread  [ss:xDX + VMXVMCSBATCHCACHE.Read.aFieldVal + xCX * 8], xAX
     1314    vmread  [ss:xDX + VMXVMCSCACHE.Read.aFieldVal + xCX * 8], xAX
    13151315    cmp     xCX, 0
    13161316    jnz     %%cached_read32
     
    14281428
    14291429%ifdef VMX_USE_CACHED_VMCS_ACCESSES
    1430     mov     ecx, [xBX + VMXVMCSBATCHCACHE.Write.cValidEntries]
     1430    mov     ecx, [xBX + VMXVMCSCACHE.Write.cValidEntries]
    14311431    cmp     ecx, 0
    14321432    je      .no_cached_writes
     
    14371437ALIGN(16)
    14381438.cached_write:
    1439     mov     eax, [xBX + VMXVMCSBATCHCACHE.Write.aField + xCX * 4]
    1440     vmwrite xAX, [xBX + VMXVMCSBATCHCACHE.Write.aFieldVal + xCX * 8]
     1439    mov     eax, [xBX + VMXVMCSCACHE.Write.aField + xCX * 4]
     1440    vmwrite xAX, [xBX + VMXVMCSCACHE.Write.aFieldVal + xCX * 8]
    14411441    inc     xCX
    14421442    cmp     xCX, xDX
    14431443    jl     .cached_write
    14441444
    1445     mov     dword [xBX + VMXVMCSBATCHCACHE.Write.cValidEntries], 0
     1445    mov     dword [xBX + VMXVMCSCACHE.Write.cValidEntries], 0
    14461446.no_cached_writes:
    14471447
     
    16291629    ; vmread's will fail (only eflags.cf=1 will be set) but that shouldn't cause any
    16301630    ; trouble only just less efficient.
    1631     mov     ecx, [xDX + VMXVMCSBATCHCACHE.Read.cValidEntries]
     1631    mov     ecx, [xDX + VMXVMCSCACHE.Read.cValidEntries]
    16321632    cmp     ecx, 0      ; Can't happen
    16331633    je      %%no_cached_read64
     
    16371637%%cached_read64:
    16381638    dec     xCX
    1639     mov     eax, [xDX + VMXVMCSBATCHCACHE.Read.aField + xCX * 4]
    1640     vmread  [xDX + VMXVMCSBATCHCACHE.Read.aFieldVal + xCX * 8], xAX
     1639    mov     eax, [xDX + VMXVMCSCACHE.Read.aField + xCX * 4]
     1640    vmread  [xDX + VMXVMCSCACHE.Read.aFieldVal + xCX * 8], xAX
    16411641    cmp     xCX, 0
    16421642    jnz     %%cached_read64
     
    17371737
    17381738%ifdef VMX_USE_CACHED_VMCS_ACCESSES
    1739     mov     ecx, [xBX + VMXVMCSBATCHCACHE.Write.cValidEntries]
     1739    mov     ecx, [xBX + VMXVMCSCACHE.Write.cValidEntries]
    17401740    cmp     ecx, 0
    17411741    je      .no_cached_writes
     
    17461746ALIGN(16)
    17471747.cached_write:
    1748     mov     eax, [xBX + VMXVMCSBATCHCACHE.Write.aField + xCX * 4]
    1749     vmwrite xAX, [xBX + VMXVMCSBATCHCACHE.Write.aFieldVal + xCX * 8]
     1748    mov     eax, [xBX + VMXVMCSCACHE.Write.aField + xCX * 4]
     1749    vmwrite xAX, [xBX + VMXVMCSCACHE.Write.aFieldVal + xCX * 8]
    17501750    inc     xCX
    17511751    cmp     xCX, xDX
    17521752    jl     .cached_write
    17531753
    1754     mov     dword [xBX + VMXVMCSBATCHCACHE.Write.cValidEntries], 0
     1754    mov     dword [xBX + VMXVMCSCACHE.Write.cValidEntries], 0
    17551755.no_cached_writes:
    17561756
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r77906 r78220  
    46754675
    46764676/**
    4677  * Undoes the TSC offset applied for an SVM nested-guest and returns the TSC
    4678  * value for the guest.
    4679  *
    4680  * @returns The TSC offset after undoing any nested-guest TSC offset.
    4681  * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
    4682  * @param   uTicks      The nested-guest TSC.
    4683  *
    4684  * @note    If you make any changes to this function, please check if
    4685  *          hmR0SvmNstGstUndoTscOffset() needs adjusting.
    4686  *
    4687  * @sa      HMApplySvmNstGstTscOffset().
    4688  */
    4689 DECLINLINE(uint64_t) hmR0SvmNstGstUndoTscOffset(PVMCPU pVCpu, uint64_t uTicks)
    4690 {
    4691     PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
    4692     Assert(pVmcbNstGstCache->fCacheValid);
    4693     return uTicks - pVmcbNstGstCache->u64TSCOffset;
    4694 }
    4695 
    4696 
    4697 /**
    46984677 * Performs some essential restoration of state after running guest (or
    46994678 * nested-guest) code in AMD-V.
     
    47274706        {
    47284707            /* The nested-guest VMCB TSC offset shall eventually be restored on #VMEXIT via HMNotifySvmNstGstVmexit(). */
    4729             uint64_t const uGstTsc = hmR0SvmNstGstUndoTscOffset(pVCpu, uHostTsc + pVmcbCtrl->u64TSCOffset);
     4708            uint64_t const uGstTsc = HMRemoveSvmNstGstTscOffset(pVCpu, uHostTsc + pVmcbCtrl->u64TSCOffset);
    47304709            TMCpuTickSetLastSeen(pVCpu, uGstTsc);
    47314710        }
     
    63796358    }
    63806359    HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
    6381     STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt);
    63826360    if (rcStrict != VINF_SUCCESS)
    63836361        STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHltToR3);
     
    64196397    }
    64206398    HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
    6421     STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMonitor);
    64226399    return VBOXSTRICTRC_TODO(rcStrict);
    64236400}
     
    64556432    }
    64566433    HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
    6457     STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMwait);
    64586434    return VBOXSTRICTRC_TODO(rcStrict);
    64596435}
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r77590 r78220  
    4444#include "dtrace/VBoxVMM.h"
    4545
     46# define HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE
    4647#ifdef DEBUG_ramshankar
    4748# define HMVMX_ALWAYS_SAVE_GUEST_RFLAGS
    4849# define HMVMX_ALWAYS_SAVE_FULL_GUEST_STATE
    49 # define HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE
    5050# define HMVMX_ALWAYS_CHECK_GUEST_STATE
    5151# define HMVMX_ALWAYS_TRAP_ALL_XCPTS
     
    6363
    6464/** Determine which tagged-TLB flush handler to use. */
    65 #define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID           0
    66 #define HMVMX_FLUSH_TAGGED_TLB_EPT                1
    67 #define HMVMX_FLUSH_TAGGED_TLB_VPID               2
    68 #define HMVMX_FLUSH_TAGGED_TLB_NONE               3
     65#define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID             0
     66#define HMVMX_FLUSH_TAGGED_TLB_EPT                  1
     67#define HMVMX_FLUSH_TAGGED_TLB_VPID                 2
     68#define HMVMX_FLUSH_TAGGED_TLB_NONE                 3
    6969
    7070/** @name HMVMX_READ_XXX
     
    7272 * the guest-CPU or VCPU state but are needed while handling VM-exits.
    7373 */
    74 #define HMVMX_READ_IDT_VECTORING_INFO            RT_BIT_32(0)
    75 #define HMVMX_READ_IDT_VECTORING_ERROR_CODE      RT_BIT_32(1)
    76 #define HMVMX_READ_EXIT_QUALIFICATION            RT_BIT_32(2)
    77 #define HMVMX_READ_EXIT_INSTR_LEN                RT_BIT_32(3)
    78 #define HMVMX_READ_EXIT_INTERRUPTION_INFO        RT_BIT_32(4)
    79 #define HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE  RT_BIT_32(5)
    80 #define HMVMX_READ_EXIT_INSTR_INFO               RT_BIT_32(6)
    81 #define HMVMX_READ_GUEST_LINEAR_ADDR             RT_BIT_32(7)
     74#define HMVMX_READ_IDT_VECTORING_INFO               RT_BIT_32(0)
     75#define HMVMX_READ_IDT_VECTORING_ERROR_CODE         RT_BIT_32(1)
     76#define HMVMX_READ_EXIT_QUALIFICATION               RT_BIT_32(2)
     77#define HMVMX_READ_EXIT_INSTR_LEN                   RT_BIT_32(3)
     78#define HMVMX_READ_EXIT_INTERRUPTION_INFO           RT_BIT_32(4)
     79#define HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE     RT_BIT_32(5)
     80#define HMVMX_READ_EXIT_INSTR_INFO                  RT_BIT_32(6)
     81#define HMVMX_READ_GUEST_LINEAR_ADDR                RT_BIT_32(7)
    8282/** @} */
    83 
    84 /**
    85  * States of the VMCS.
    86  *
    87  * This does not reflect all possible VMCS states but currently only those
    88  * needed for maintaining the VMCS consistently even when thread-context hooks
    89  * are used. Maybe later this can be extended (i.e. Nested Virtualization).
    90  */
    91 #define HMVMX_VMCS_STATE_CLEAR       RT_BIT(0)
    92 #define HMVMX_VMCS_STATE_ACTIVE      RT_BIT(1)
    93 #define HMVMX_VMCS_STATE_LAUNCHED    RT_BIT(2)
    9483
    9584/**
     
    123112 * - \#AC and \#DB are always intercepted to prevent the CPU from deadlocking
    124113 *   due to bugs in Intel CPUs.
    125  * - \#PF need not be intercepted even in real-mode if we have Nested Paging
     114 * - \#PF need not be intercepted even in real-mode if we have nested paging
    126115 * support.
    127116 */
     
    139128/** Profiling macro. */
    140129#ifdef HM_PROFILE_EXIT_DISPATCH
    141 # define HMVMX_START_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitDispatch, ed)
    142 # define HMVMX_STOP_EXIT_DISPATCH_PROF()  STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitDispatch, ed)
     130# define HMVMX_START_EXIT_DISPATCH_PROF()           STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitDispatch, ed)
     131# define HMVMX_STOP_EXIT_DISPATCH_PROF()            STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitDispatch, ed)
    143132#else
    144 # define HMVMX_START_EXIT_DISPATCH_PROF() do { } while (0)
    145 # define HMVMX_STOP_EXIT_DISPATCH_PROF()  do { } while (0)
     133# define HMVMX_START_EXIT_DISPATCH_PROF()           do { } while (0)
     134# define HMVMX_STOP_EXIT_DISPATCH_PROF()            do { } while (0)
    146135#endif
    147136
     
    163152                                                              (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fExtrnMbz)))
    164153
    165 /** Macro for importing guest state from the VMCS back into CPUMCTX (intended to be
    166  *  used only from VM-exit handlers). */
    167 #define HMVMX_CPUMCTX_IMPORT_STATE(a_pVCpu, a_fWhat)        (hmR0VmxImportGuestState((a_pVCpu), (a_fWhat)))
    168 
    169154/** Helper macro for VM-exit handlers called unexpectedly. */
    170155#define HMVMX_UNEXPECTED_EXIT_RET(a_pVCpu, a_pVmxTransient) \
     
    173158        return VERR_VMX_UNEXPECTED_EXIT; \
    174159    } while (0)
    175 
    176 /** Macro for importing segment registers to the VMCS from the guest-CPU context. */
    177 #ifdef VMX_USE_CACHED_VMCS_ACCESSES
    178 # define HMVMX_IMPORT_SREG(Sel, a_pCtxSelReg) \
    179     hmR0VmxImportGuestSegmentReg(pVCpu, VMX_VMCS16_GUEST_##Sel##_SEL, VMX_VMCS32_GUEST_##Sel##_LIMIT, \
    180                                  VMX_VMCS_GUEST_##Sel##_BASE_CACHE_IDX, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, (a_pCtxSelReg))
    181 #else
    182 # define HMVMX_IMPORT_SREG(Sel, a_pCtxSelReg) \
    183     hmR0VmxImportGuestSegmentReg(pVCpu, VMX_VMCS16_GUEST_##Sel##_SEL, VMX_VMCS32_GUEST_##Sel##_LIMIT, \
    184                                  VMX_VMCS_GUEST_##Sel##_BASE, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, (a_pCtxSelReg))
    185 #endif
    186 
    187 /** Macro for exporting segment registers to the VMCS from the guest-CPU context. */
    188 #define HMVMX_EXPORT_SREG(Sel, a_pCtxSelReg) \
    189     hmR0VmxExportGuestSegmentReg(pVCpu, VMX_VMCS16_GUEST_##Sel##_SEL, VMX_VMCS32_GUEST_##Sel##_LIMIT, \
    190                                  VMX_VMCS_GUEST_##Sel##_BASE, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, (a_pCtxSelReg))
    191160
    192161#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     
    277246    /** Whether the VM-entry failed or not. */
    278247    bool                fVMEntryFailed;
     248    /** Whether we are currently executing a nested-guest. */
     249    bool                fIsNestedGuest;
    279250    /** Alignment. */
    280     uint8_t             abAlignment1[3];
     251    uint8_t             abAlignment1[2];
    281252
    282253    /** The VM-entry interruption-information field. */
     
    299270    /** Whether the hyper debug state was active at the time of VM-exit. */
    300271    bool                fWasHyperDebugStateActive;
    301     /** Whether TSC-offsetting should be setup before VM-entry. */
    302     bool                fUpdateTscOffsettingAndPreemptTimer;
     272    /** Whether TSC-offsetting and VMX-preemption timer was updated before VM-entry. */
     273    bool                fUpdatedTscOffsettingAndPreemptTimer;
    303274    /** Whether the VM-exit was caused by a page-fault during delivery of a
    304275     *  contributory exception or a page-fault. */
     
    307278     *  external interrupt or NMI. */
    308279    bool                fVectoringPF;
     280    bool                afAlignment0[3];
     281
     282    /** The VMCS info. object. */
     283    PVMXVMCSINFO        pVmcsInfo;
    309284} VMXTRANSIENT;
    310285AssertCompileMemberAlignment(VMXTRANSIENT, uExitReason,               sizeof(uint64_t));
     
    312287AssertCompileMemberAlignment(VMXTRANSIENT, uEntryIntInfo,             sizeof(uint64_t));
    313288AssertCompileMemberAlignment(VMXTRANSIENT, fWasGuestDebugStateActive, sizeof(uint64_t));
     289AssertCompileMemberAlignment(VMXTRANSIENT, pVmcsInfo,                 sizeof(uint64_t));
    314290AssertCompileMemberSize(VMXTRANSIENT, ExitInstrInfo, sizeof(uint32_t));
    315291/** Pointer to VMX transient state. */
     
    330306 * @returns Strict VBox status code (i.e. informational status codes too).
    331307 * @param   pVCpu           The cross context virtual CPU structure.
    332  * @param   pVmxTransient   Pointer to the VMX-transient structure.
     308 * @param   pVmxTransient   The VMX-transient structure.
    333309 */
    334310#ifndef HMVMX_USE_FUNCTION_TABLE
    335 typedef VBOXSTRICTRC                FNVMXEXITHANDLER(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
     311typedef VBOXSTRICTRC               FNVMXEXITHANDLER(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
    336312#else
    337 typedef DECLCALLBACK(VBOXSTRICTRC)  FNVMXEXITHANDLER(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
     313typedef DECLCALLBACK(VBOXSTRICTRC) FNVMXEXITHANDLER(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
    338314/** Pointer to VM-exit handler. */
    339 typedef FNVMXEXITHANDLER           *PFNVMXEXITHANDLER;
     315typedef FNVMXEXITHANDLER          *PFNVMXEXITHANDLER;
    340316#endif
    341317
     
    347323 * @returns VBox status code, no informational status code returned.
    348324 * @param   pVCpu           The cross context virtual CPU structure.
    349  * @param   pVmxTransient   Pointer to the VMX-transient structure.
     325 * @param   pVmxTransient   The VMX-transient structure.
    350326 *
    351327 * @remarks This is not used on anything returning VERR_EM_INTERPRETER as the
     
    354330 */
    355331#ifndef HMVMX_USE_FUNCTION_TABLE
    356 typedef int                         FNVMXEXITHANDLERNSRC(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
     332typedef int                        FNVMXEXITHANDLERNSRC(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
    357333#else
    358 typedef FNVMXEXITHANDLER            FNVMXEXITHANDLERNSRC;
     334typedef FNVMXEXITHANDLER           FNVMXEXITHANDLERNSRC;
    359335#endif
    360336
     
    363339*   Internal Functions                                                                                                           *
    364340*********************************************************************************************************************************/
    365 static void         hmR0VmxFlushEpt(PVMCPU pVCpu, VMXTLBFLUSHEPT enmTlbFlush);
    366 static void         hmR0VmxFlushVpid(PVMCPU pVCpu, VMXTLBFLUSHVPID enmTlbFlush, RTGCPTR GCPtr);
    367 static void         hmR0VmxClearIntNmiWindowsVmcs(PVMCPU pVCpu);
    368 static int          hmR0VmxImportGuestState(PVMCPU pVCpu, uint64_t fWhat);
    369 static VBOXSTRICTRC hmR0VmxInjectEventVmcs(PVMCPU pVCpu, uint64_t u64IntInfo, uint32_t cbInstr, uint32_t u32ErrCode,
    370                                            RTGCUINTREG GCPtrFaultAddress, bool fStepping, uint32_t *pfIntrState);
    371 #if HC_ARCH_BITS == 32
    372 static int          hmR0VmxInitVmcsReadCache(PVMCPU pVCpu);
     341#ifndef HMVMX_USE_FUNCTION_TABLE
     342DECLINLINE(VBOXSTRICTRC)           hmR0VmxHandleExit(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
     343# define HMVMX_EXIT_DECL           DECLINLINE(VBOXSTRICTRC)
     344# define HMVMX_EXIT_NSRC_DECL      DECLINLINE(int)
     345#else
     346# define HMVMX_EXIT_DECL           static DECLCALLBACK(VBOXSTRICTRC)
     347# define HMVMX_EXIT_NSRC_DECL      HMVMX_EXIT_DECL
    373348#endif
    374 #ifndef HMVMX_USE_FUNCTION_TABLE
    375 DECLINLINE(VBOXSTRICTRC)      hmR0VmxHandleExit(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t rcReason);
    376 # define HMVMX_EXIT_DECL      DECLINLINE(VBOXSTRICTRC)
    377 # define HMVMX_EXIT_NSRC_DECL DECLINLINE(int)
    378 #else
    379 # define HMVMX_EXIT_DECL      static DECLCALLBACK(VBOXSTRICTRC)
    380 # define HMVMX_EXIT_NSRC_DECL HMVMX_EXIT_DECL
     349#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     350DECLINLINE(VBOXSTRICTRC)           hmR0VmxHandleExitNested(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
     351#endif
     352
     353static int  hmR0VmxImportGuestState(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo, uint64_t fWhat);
     354#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
     355static int  hmR0VmxInitVmcsReadCache(PVMCPU pVCpu);
    381356#endif
    382357
     
    384359 * @{
    385360 */
    386 static FNVMXEXITHANDLER     hmR0VmxExitXcptOrNmi;
    387 static FNVMXEXITHANDLER     hmR0VmxExitExtInt;
    388 static FNVMXEXITHANDLER     hmR0VmxExitTripleFault;
    389 static FNVMXEXITHANDLERNSRC hmR0VmxExitInitSignal;
    390 static FNVMXEXITHANDLERNSRC hmR0VmxExitSipi;
    391 static FNVMXEXITHANDLERNSRC hmR0VmxExitIoSmi;
    392 static FNVMXEXITHANDLERNSRC hmR0VmxExitSmi;
    393 static FNVMXEXITHANDLERNSRC hmR0VmxExitIntWindow;
    394 static FNVMXEXITHANDLERNSRC hmR0VmxExitNmiWindow;
    395 static FNVMXEXITHANDLER     hmR0VmxExitTaskSwitch;
    396 static FNVMXEXITHANDLER     hmR0VmxExitCpuid;
    397 static FNVMXEXITHANDLER     hmR0VmxExitGetsec;
    398 static FNVMXEXITHANDLER     hmR0VmxExitHlt;
    399 static FNVMXEXITHANDLERNSRC hmR0VmxExitInvd;
    400 static FNVMXEXITHANDLER     hmR0VmxExitInvlpg;
    401 static FNVMXEXITHANDLER     hmR0VmxExitRdpmc;
    402 static FNVMXEXITHANDLER     hmR0VmxExitVmcall;
     361static FNVMXEXITHANDLER            hmR0VmxExitXcptOrNmi;
     362static FNVMXEXITHANDLER            hmR0VmxExitExtInt;
     363static FNVMXEXITHANDLER            hmR0VmxExitTripleFault;
     364static FNVMXEXITHANDLERNSRC        hmR0VmxExitInitSignal;
     365static FNVMXEXITHANDLERNSRC        hmR0VmxExitSipi;
     366static FNVMXEXITHANDLERNSRC        hmR0VmxExitIoSmi;
     367static FNVMXEXITHANDLERNSRC        hmR0VmxExitSmi;
     368static FNVMXEXITHANDLERNSRC        hmR0VmxExitIntWindow;
     369static FNVMXEXITHANDLERNSRC        hmR0VmxExitNmiWindow;
     370static FNVMXEXITHANDLER            hmR0VmxExitTaskSwitch;
     371static FNVMXEXITHANDLER            hmR0VmxExitCpuid;
     372static FNVMXEXITHANDLER            hmR0VmxExitGetsec;
     373static FNVMXEXITHANDLER            hmR0VmxExitHlt;
     374static FNVMXEXITHANDLERNSRC        hmR0VmxExitInvd;
     375static FNVMXEXITHANDLER            hmR0VmxExitInvlpg;
     376static FNVMXEXITHANDLER            hmR0VmxExitRdpmc;
     377static FNVMXEXITHANDLER            hmR0VmxExitVmcall;
    403378#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    404 static FNVMXEXITHANDLER     hmR0VmxExitVmclear;
    405 static FNVMXEXITHANDLER     hmR0VmxExitVmlaunch;
    406 static FNVMXEXITHANDLER     hmR0VmxExitVmptrld;
    407 static FNVMXEXITHANDLER     hmR0VmxExitVmptrst;
    408 static FNVMXEXITHANDLER     hmR0VmxExitVmread;
    409 static FNVMXEXITHANDLER     hmR0VmxExitVmresume;
    410 static FNVMXEXITHANDLER     hmR0VmxExitVmwrite;
    411 static FNVMXEXITHANDLER     hmR0VmxExitVmxoff;
    412 static FNVMXEXITHANDLER     hmR0VmxExitVmxon;
     379static FNVMXEXITHANDLER            hmR0VmxExitVmclear;
     380static FNVMXEXITHANDLER            hmR0VmxExitVmlaunch;
     381static FNVMXEXITHANDLER            hmR0VmxExitVmptrld;
     382static FNVMXEXITHANDLER            hmR0VmxExitVmptrst;
     383static FNVMXEXITHANDLER            hmR0VmxExitVmread;
     384static FNVMXEXITHANDLER            hmR0VmxExitVmresume;
     385static FNVMXEXITHANDLER            hmR0VmxExitVmwrite;
     386static FNVMXEXITHANDLER            hmR0VmxExitVmxoff;
     387static FNVMXEXITHANDLER            hmR0VmxExitVmxon;
    413388#endif
    414 static FNVMXEXITHANDLER     hmR0VmxExitRdtsc;
    415 static FNVMXEXITHANDLERNSRC hmR0VmxExitRsm;
    416 static FNVMXEXITHANDLERNSRC hmR0VmxExitSetPendingXcptUD;
    417 static FNVMXEXITHANDLER     hmR0VmxExitMovCRx;
    418 static FNVMXEXITHANDLER     hmR0VmxExitMovDRx;
    419 static FNVMXEXITHANDLER     hmR0VmxExitIoInstr;
    420 static FNVMXEXITHANDLER     hmR0VmxExitRdmsr;
    421 static FNVMXEXITHANDLER     hmR0VmxExitWrmsr;
    422 static FNVMXEXITHANDLERNSRC hmR0VmxExitErrInvalidGuestState;
    423 static FNVMXEXITHANDLERNSRC hmR0VmxExitErrMsrLoad;
    424 static FNVMXEXITHANDLERNSRC hmR0VmxExitErrUndefined;
    425 static FNVMXEXITHANDLER     hmR0VmxExitMwait;
    426 static FNVMXEXITHANDLER     hmR0VmxExitMtf;
    427 static FNVMXEXITHANDLER     hmR0VmxExitMonitor;
    428 static FNVMXEXITHANDLER     hmR0VmxExitPause;
    429 static FNVMXEXITHANDLERNSRC hmR0VmxExitErrMachineCheck;
    430 static FNVMXEXITHANDLERNSRC hmR0VmxExitTprBelowThreshold;
    431 static FNVMXEXITHANDLER     hmR0VmxExitApicAccess;
    432 static FNVMXEXITHANDLER     hmR0VmxExitXdtrAccess;
    433 static FNVMXEXITHANDLER     hmR0VmxExitEptViolation;
    434 static FNVMXEXITHANDLER     hmR0VmxExitEptMisconfig;
    435 static FNVMXEXITHANDLER     hmR0VmxExitRdtscp;
    436 static FNVMXEXITHANDLER     hmR0VmxExitPreemptTimer;
    437 static FNVMXEXITHANDLERNSRC hmR0VmxExitWbinvd;
    438 static FNVMXEXITHANDLER     hmR0VmxExitXsetbv;
    439 static FNVMXEXITHANDLER     hmR0VmxExitRdrand;
    440 static FNVMXEXITHANDLER     hmR0VmxExitInvpcid;
     389static FNVMXEXITHANDLER            hmR0VmxExitRdtsc;
     390static FNVMXEXITHANDLERNSRC        hmR0VmxExitRsm;
     391static FNVMXEXITHANDLERNSRC        hmR0VmxExitSetPendingXcptUD;
     392static FNVMXEXITHANDLER            hmR0VmxExitMovCRx;
     393static FNVMXEXITHANDLER            hmR0VmxExitMovDRx;
     394static FNVMXEXITHANDLER            hmR0VmxExitIoInstr;
     395static FNVMXEXITHANDLER            hmR0VmxExitRdmsr;
     396static FNVMXEXITHANDLER            hmR0VmxExitWrmsr;
     397static FNVMXEXITHANDLERNSRC        hmR0VmxExitErrInvalidGuestState;
     398static FNVMXEXITHANDLERNSRC        hmR0VmxExitErrMsrLoad;
     399static FNVMXEXITHANDLERNSRC        hmR0VmxExitErrUndefined;
     400static FNVMXEXITHANDLER            hmR0VmxExitMwait;
     401static FNVMXEXITHANDLER            hmR0VmxExitMtf;
     402static FNVMXEXITHANDLER            hmR0VmxExitMonitor;
     403static FNVMXEXITHANDLER            hmR0VmxExitPause;
     404static FNVMXEXITHANDLERNSRC        hmR0VmxExitErrMachineCheck;
     405static FNVMXEXITHANDLERNSRC        hmR0VmxExitTprBelowThreshold;
     406static FNVMXEXITHANDLER            hmR0VmxExitApicAccess;
     407static FNVMXEXITHANDLER            hmR0VmxExitXdtrAccess;
     408static FNVMXEXITHANDLER            hmR0VmxExitEptViolation;
     409static FNVMXEXITHANDLER            hmR0VmxExitEptMisconfig;
     410static FNVMXEXITHANDLER            hmR0VmxExitRdtscp;
     411static FNVMXEXITHANDLER            hmR0VmxExitPreemptTimer;
     412static FNVMXEXITHANDLERNSRC        hmR0VmxExitWbinvd;
     413static FNVMXEXITHANDLER            hmR0VmxExitXsetbv;
     414static FNVMXEXITHANDLER            hmR0VmxExitRdrand;
     415static FNVMXEXITHANDLER            hmR0VmxExitInvpcid;
    441416/** @} */
    442417
    443 static int          hmR0VmxExitXcptPF(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
    444 static int          hmR0VmxExitXcptMF(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
    445 static int          hmR0VmxExitXcptDB(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
    446 static int          hmR0VmxExitXcptBP(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
    447 static int          hmR0VmxExitXcptGP(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
    448 static int          hmR0VmxExitXcptAC(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
    449 static int          hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
    450 static uint32_t     hmR0VmxCheckGuestState(PVMCPU pVCpu);
     418/** @name Helpers for hardware exceptions VM-exit handlers.
     419 * @{
     420 */
     421static int hmR0VmxExitXcptPF(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
     422static int hmR0VmxExitXcptMF(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
     423static int hmR0VmxExitXcptDB(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
     424static int hmR0VmxExitXcptBP(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
     425static int hmR0VmxExitXcptGP(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
     426static int hmR0VmxExitXcptAC(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
     427static int hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
     428/** @} */
    451429
    452430
     
    454432*   Global Variables                                                                                                             *
    455433*********************************************************************************************************************************/
     434#ifdef VMX_USE_CACHED_VMCS_ACCESSES
     435static const uint32_t g_aVmcsCacheSegBase[] =
     436{
     437    VMX_VMCS_GUEST_ES_BASE_CACHE_IDX,
     438    VMX_VMCS_GUEST_CS_BASE_CACHE_IDX,
     439    VMX_VMCS_GUEST_SS_BASE_CACHE_IDX,
     440    VMX_VMCS_GUEST_DS_BASE_CACHE_IDX,
     441    VMX_VMCS_GUEST_FS_BASE_CACHE_IDX,
     442    VMX_VMCS_GUEST_GS_BASE_CACHE_IDX
     443};
     444AssertCompile(RT_ELEMENTS(g_aVmcsCacheSegBase)  == X86_SREG_COUNT);
     445#endif
     446static const uint32_t g_aVmcsSegBase[] =
     447{
     448    VMX_VMCS_GUEST_ES_BASE,
     449    VMX_VMCS_GUEST_CS_BASE,
     450    VMX_VMCS_GUEST_SS_BASE,
     451    VMX_VMCS_GUEST_DS_BASE,
     452    VMX_VMCS_GUEST_FS_BASE,
     453    VMX_VMCS_GUEST_GS_BASE
     454};
     455static const uint32_t g_aVmcsSegSel[] =
     456{
     457    VMX_VMCS16_GUEST_ES_SEL,
     458    VMX_VMCS16_GUEST_CS_SEL,
     459    VMX_VMCS16_GUEST_SS_SEL,
     460    VMX_VMCS16_GUEST_DS_SEL,
     461    VMX_VMCS16_GUEST_FS_SEL,
     462    VMX_VMCS16_GUEST_GS_SEL
     463};
     464static const uint32_t g_aVmcsSegLimit[] =
     465{
     466    VMX_VMCS32_GUEST_ES_LIMIT,
     467    VMX_VMCS32_GUEST_CS_LIMIT,
     468    VMX_VMCS32_GUEST_SS_LIMIT,
     469    VMX_VMCS32_GUEST_DS_LIMIT,
     470    VMX_VMCS32_GUEST_FS_LIMIT,
     471    VMX_VMCS32_GUEST_GS_LIMIT
     472};
     473static const uint32_t g_aVmcsSegAttr[] =
     474{
     475    VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS,
     476    VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS,
     477    VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS,
     478    VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS,
     479    VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS,
     480    VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS
     481};
     482AssertCompile(RT_ELEMENTS(g_aVmcsSegSel)   == X86_SREG_COUNT);
     483AssertCompile(RT_ELEMENTS(g_aVmcsSegLimit) == X86_SREG_COUNT);
     484AssertCompile(RT_ELEMENTS(g_aVmcsSegBase)  == X86_SREG_COUNT);
     485AssertCompile(RT_ELEMENTS(g_aVmcsSegAttr)  == X86_SREG_COUNT);
     486
    456487#ifdef HMVMX_USE_FUNCTION_TABLE
    457 
    458488/**
    459489 * VMX_EXIT dispatch table.
     
    578608
    579609/**
     610 * Get the CR0 guest/host mask that does not change through the lifetime of a VM.
     611 *
     612 * Any bit set in this mask is owned by the host/hypervisor and would cause a
     613 * VM-exit when modified by the guest.
     614 *
     615 * @returns The static CR0 guest/host mask.
     616 * @param   pVCpu   The cross context virtual CPU structure.
     617 */
     618DECL_FORCE_INLINE(uint64_t) hmR0VmxGetFixedCr0Mask(PVMCPU pVCpu)
     619{
     620    /*
     621     * Modifications to CR0 bits that VT-x ignores saving/restoring (CD, ET, NW) and
     622     * to CR0 bits that we require for shadow paging (PG) by the guest must cause VM-exits.
     623     */
     624    /** @todo Avoid intercepting CR0.PE with unrestricted guest execution. Fix PGM
     625     *        enmGuestMode to be in-sync with the current mode. See @bugref{6398}
     626     *        and @bugref{6944}. */
     627    PVM pVM = pVCpu->CTX_SUFF(pVM);
     628    return (  X86_CR0_PE
     629            | X86_CR0_NE
     630            | (pVM->hm.s.fNestedPaging ? 0 : X86_CR0_WP)
     631            | X86_CR0_PG
     632            | X86_CR0_ET     /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.ET */
     633            | X86_CR0_CD     /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.CD */
     634            | X86_CR0_NW);   /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.NW */
     635}
     636
     637
     638/**
     639 * Gets the CR4 guest/host mask that does not change through the lifetime of a VM.
     640 *
     641 * Any bit set in this mask is owned by the host/hypervisor and would cause a
     642 * VM-exit when modified by the guest.
     643 *
     644 * @returns The static CR4 guest/host mask.
     645 * @param   pVCpu   The cross context virtual CPU structure.
     646 */
     647DECL_FORCE_INLINE(uint64_t) hmR0VmxGetFixedCr4Mask(PVMCPU pVCpu)
     648{
     649    /*
     650     * We need to look at the host features here (for e.g. OSXSAVE, PCID) because
     651     * these bits are reserved on hardware that does not support them. Since the
     652     * CPU cannot refer to our virtual CPUID, we need to intercept CR4 changes to
     653     * these  bits and handle it depending on whether we expose them to the guest.
     654     */
     655    PVM pVM = pVCpu->CTX_SUFF(pVM);
     656    bool const fXSaveRstor = pVM->cpum.ro.HostFeatures.fXSaveRstor;
     657    bool const fPcid       = pVM->cpum.ro.HostFeatures.fPcid;
     658    return (  X86_CR4_VMXE
     659            | X86_CR4_VME
     660            | X86_CR4_PAE
     661            | X86_CR4_PGE
     662            | X86_CR4_PSE
     663            | (fXSaveRstor ? X86_CR4_OSXSAVE : 0)
     664            | (fPcid       ? X86_CR4_PCIDE   : 0));
     665}
     666
     667
     668/**
     669 * Returns whether the the VM-exit MSR-store area differs from the VM-exit MSR-load
     670 * area.
     671 *
     672 * @returns @c true if it's different, @c false otherwise.
     673 * @param   pVmcsInfo       The VMCS info. object.
     674 */
     675DECL_FORCE_INLINE(bool) hmR0VmxIsSeparateExitMsrStoreAreaVmcs(PCVMXVMCSINFO pVmcsInfo)
     676{
     677    return RT_BOOL(   pVmcsInfo->pvGuestMsrStore != pVmcsInfo->pvGuestMsrLoad
     678                   && pVmcsInfo->pvGuestMsrStore);
     679}
     680
     681
     682/**
     683 * Adds one or more exceptions to the exception bitmap and commits it to the current
     684 * VMCS.
     685 *
     686 * @returns VBox status code.
     687 * @param   pVmxTransient   The VMX-transient structure.
     688 * @param   uXcptMask       The exception(s) to add.
     689 */
     690static int hmR0VmxAddXcptInterceptMask(PVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
     691{
     692    PVMXVMCSINFO pVmcsInfo   = pVmxTransient->pVmcsInfo;
     693    uint32_t     uXcptBitmap = pVmcsInfo->u32XcptBitmap;
     694    if ((uXcptBitmap & uXcptMask) != uXcptMask)
     695    {
     696        uXcptBitmap |= uXcptMask;
     697        int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
     698        AssertRCReturn(rc, rc);
     699        pVmcsInfo->u32XcptBitmap = uXcptBitmap;
     700    }
     701    return VINF_SUCCESS;
     702}
     703
     704
     705/**
     706 * Adds an exception to the exception bitmap and commits it to the current VMCS.
     707 *
     708 * @returns VBox status code.
     709 * @param   pVmxTransient   The VMX-transient structure.
     710 * @param   uXcpt           The exception to add.
     711 */
     712static int hmR0VmxAddXcptIntercept(PVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
     713{
     714    Assert(uXcpt <= X86_XCPT_LAST);
     715    return hmR0VmxAddXcptInterceptMask(pVmxTransient, RT_BIT_32(uXcpt));
     716}
     717
     718
     719/**
     720 * Remove one or more exceptions from the exception bitmap and commits it to the
     721 * current VMCS.
     722 *
     723 * This takes care of not removing the exception intercept if a nested-guest
     724 * requires the exception to be intercepted.
     725 *
     726 * @returns VBox status code.
     727 * @param   pVCpu           The cross context virtual CPU structure.
     728 * @param   pVmxTransient   The VMX-transient structure.
     729 * @param   uXcptMask       The exception(s) to remove.
     730 */
     731static int hmR0VmxRemoveXcptInterceptMask(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
     732{
     733    PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
     734    uint32_t u32XcptBitmap = pVmcsInfo->u32XcptBitmap;
     735    if (u32XcptBitmap & uXcptMask)
     736    {
     737#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     738        if (!pVmxTransient->fIsNestedGuest)
     739        { /* likely */ }
     740        else
     741        {
     742            PCVMXVVMCS pVmcsNstGst = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     743            uXcptMask &= ~pVmcsNstGst->u32XcptBitmap;
     744        }
     745#endif
     746#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
     747        uXcptMask &= ~(  RT_BIT(X86_XCPT_BP)
     748                       | RT_BIT(X86_XCPT_DE)
     749                       | RT_BIT(X86_XCPT_NM)
     750                       | RT_BIT(X86_XCPT_TS)
     751                       | RT_BIT(X86_XCPT_UD)
     752                       | RT_BIT(X86_XCPT_NP)
     753                       | RT_BIT(X86_XCPT_SS)
     754                       | RT_BIT(X86_XCPT_GP)
     755                       | RT_BIT(X86_XCPT_PF)
     756                       | RT_BIT(X86_XCPT_MF));
     757#elif defined(HMVMX_ALWAYS_TRAP_PF)
     758        uXcptMask &= ~RT_BIT(X86_XCPT_PF);
     759#endif
     760        if (uXcptMask)
     761        {
     762            /* Validate we are not removing any essential exception intercepts. */
     763            Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging || !(uXcptMask & RT_BIT(X86_XCPT_PF))); RT_NOREF(pVCpu);
     764            Assert(!(uXcptMask & RT_BIT(X86_XCPT_DB)));
     765            Assert(!(uXcptMask & RT_BIT(X86_XCPT_AC)));
     766
     767            /* Remove it from the exception bitmap. */
     768            u32XcptBitmap &= ~uXcptMask;
     769
     770            /* Commit and update the cache if necessary. */
     771            if (pVmcsInfo->u32XcptBitmap != u32XcptBitmap)
     772            {
     773                int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, u32XcptBitmap);
     774                AssertRCReturn(rc, rc);
     775                pVmcsInfo->u32XcptBitmap = u32XcptBitmap;
     776            }
     777        }
     778    }
     779    return VINF_SUCCESS;
     780}
     781
     782
     783/**
     784 * Remove an exceptions from the exception bitmap and commits it to the current
     785 * VMCS.
     786 *
     787 * @returns VBox status code.
     788 * @param   pVCpu           The cross context virtual CPU structure.
     789 * @param   pVmxTransient   The VMX-transient structure.
     790 * @param   uXcpt           The exception to remove.
     791 */
     792static int hmR0VmxRemoveXcptIntercept(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
     793{
     794    return hmR0VmxRemoveXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT(uXcpt));
     795}
     796
     797
     798/**
     799 * Loads the VMCS specified by the VMCS info. object.
     800 *
     801 * @returns VBox status code.
     802 * @param   pVmcsInfo       The VMCS info. object.
     803 */
     804static int hmR0VmxLoadVmcs(PVMXVMCSINFO pVmcsInfo)
     805{
     806    Assert(pVmcsInfo);
     807    Assert(pVmcsInfo->HCPhysVmcs);
     808    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     809
     810    if (pVmcsInfo->fVmcsState & VMX_V_VMCS_LAUNCH_STATE_CLEAR)
     811    {
     812        int rc = VMXLoadVmcs(pVmcsInfo->HCPhysVmcs);
     813        if (RT_SUCCESS(rc))
     814        {
     815            pVmcsInfo->fVmcsState |= VMX_V_VMCS_LAUNCH_STATE_CURRENT;
     816            return VINF_SUCCESS;
     817        }
     818        return rc;
     819    }
     820    return VERR_VMX_INVALID_VMCS_LAUNCH_STATE;
     821}
     822
     823
     824/**
     825 * Clears the VMCS specified by the VMCS info. object.
     826 *
     827 * @returns VBox status code.
     828 * @param   pVmcsInfo       The VMCS info. object.
     829 */
     830static int hmR0VmxClearVmcs(PVMXVMCSINFO pVmcsInfo)
     831{
     832    Assert(pVmcsInfo);
     833    Assert(pVmcsInfo->HCPhysVmcs);
     834    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     835
     836    int rc = VMXClearVmcs(pVmcsInfo->HCPhysVmcs);
     837    if (RT_SUCCESS(rc))
     838        pVmcsInfo->fVmcsState = VMX_V_VMCS_LAUNCH_STATE_CLEAR;
     839    return rc;
     840}
     841
     842
     843#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     844/**
     845 * Switches the current VMCS to the one specified.
     846 *
     847 * @returns VBox status code.
     848 * @param   pVmcsInfoFrom   The VMCS info. object we are switching from.
     849 * @param   pVmcsInfoTo     The VMCS info. object we are switching to.
     850 *
     851 * @remarks Called with interrupts disabled.
     852 */
     853static int hmR0VmxSwitchVmcs(PVMXVMCSINFO pVmcsInfoFrom, PVMXVMCSINFO pVmcsInfoTo)
     854{
     855    Assert(pVmcsInfoFrom);
     856    Assert(pVmcsInfoTo);
     857
     858    /*
     859     * Clear the VMCS we are switching out if it has not already been cleared.
     860     * This will sync any CPU internal data back to the VMCS.
     861     */
     862    if (pVmcsInfoFrom->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
     863    {
     864        int rc = hmR0VmxClearVmcs(pVmcsInfoFrom);
     865        if (RT_SUCCESS(rc))
     866        { /* likely */ }
     867        else
     868            return rc;
     869    }
     870
     871    /*
     872     * Clear the VMCS we are switching to if it has not already been cleared.
     873     * This will initialize the VMCS launch state to "clear" required for loading it.
     874     *
     875     * See Intel spec. 31.6 "Preparation And Launching A Virtual Machine".
     876     */
     877    if (pVmcsInfoTo->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
     878    {
     879        int rc = hmR0VmxClearVmcs(pVmcsInfoTo);
     880        if (RT_SUCCESS(rc))
     881        { /* likely */ }
     882        else
     883            return rc;
     884    }
     885
     886    /*
     887     * Finally, load the VMCS we are switching to.
     888     */
     889    return hmR0VmxLoadVmcs(pVmcsInfoTo);
     890}
     891#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
     892
     893
     894/**
    580895 * Updates the VM's last error record.
    581896 *
     
    605920 *
    606921 * @returns VBox status code.
    607  * @param   pVmxTransient   Pointer to the VMX transient structure.
     922 * @param   pVmxTransient   The VMX-transient structure.
    608923 *
    609924 * @remarks No-long-jump zone!!!
     
    616931}
    617932
     933
    618934#ifdef VBOX_STRICT
    619935/**
     
    622938 *
    623939 * @returns VBox status code.
    624  * @param   pVmxTransient   Pointer to the VMX transient structure.
     940 * @param   pVmxTransient   The VMX-transient structure.
    625941 *
    626942 * @remarks No-long-jump zone!!!
     
    639955 *
    640956 * @returns VBox status code.
    641  * @param   pVmxTransient   Pointer to the VMX transient structure.
     957 * @param   pVmxTransient   The VMX-transient structure.
    642958 *
    643959 * @remarks No-long-jump zone!!!
     
    657973 *
    658974 * @returns VBox status code.
    659  * @param   pVmxTransient   Pointer to the VMX transient structure.
     975 * @param   pVmxTransient   The VMX-transient structure.
    660976 */
    661977DECLINLINE(int) hmR0VmxReadExitIntInfoVmcs(PVMXTRANSIENT pVmxTransient)
     
    676992 *
    677993 * @returns VBox status code.
    678  * @param   pVmxTransient   Pointer to the VMX transient structure.
     994 * @param   pVmxTransient   The VMX-transient structure.
    679995 */
    680996DECLINLINE(int) hmR0VmxReadExitIntErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
     
    6951011 *
    6961012 * @returns VBox status code.
    697  * @param   pVmxTransient   Pointer to the VMX transient structure.
     1013 * @param   pVmxTransient   The VMX-transient structure.
    6981014 */
    6991015DECLINLINE(int) hmR0VmxReadExitInstrLenVmcs(PVMXTRANSIENT pVmxTransient)
     
    7141030 *
    7151031 * @returns VBox status code.
    716  * @param   pVmxTransient   Pointer to the VMX transient structure.
     1032 * @param   pVmxTransient   The VMX-transient structure.
    7171033 */
    7181034DECLINLINE(int) hmR0VmxReadExitInstrInfoVmcs(PVMXTRANSIENT pVmxTransient)
     
    7341050 * @param   pVCpu           The cross context virtual CPU structure of the
    7351051 *                          calling EMT. (Required for the VMCS cache case.)
    736  * @param   pVmxTransient   Pointer to the VMX transient structure.
     1052 * @param   pVmxTransient   The VMX-transient structure.
    7371053 */
    7381054DECLINLINE(int) hmR0VmxReadExitQualVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
     
    7541070 * @param   pVCpu           The cross context virtual CPU structure of the
    7551071 *                          calling EMT. (Required for the VMCS cache case.)
    756  * @param   pVmxTransient   Pointer to the VMX transient structure.
     1072 * @param   pVmxTransient   The VMX-transient structure.
    7571073 */
    7581074DECLINLINE(int) hmR0VmxReadGuestLinearAddrVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
     
    7731089 *
    7741090 * @returns VBox status code.
    775  * @param   pVmxTransient   Pointer to the VMX transient structure.
     1091 * @param   pVmxTransient   The VMX-transient structure.
    7761092 *
    7771093 * @remarks No-long-jump zone!!!
     
    7941110 *
    7951111 * @returns VBox status code.
    796  * @param   pVmxTransient   Pointer to the VMX transient structure.
     1112 * @param   pVmxTransient   The VMX-transient structure.
    7971113 */
    7981114DECLINLINE(int) hmR0VmxReadIdtVectoringErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
     
    8261142    if (pVM)
    8271143    {
    828         /* Write the VMCS revision dword to the VMXON region. */
     1144        /* Write the VMCS revision identifier to the VMXON region. */
    8291145        *(uint32_t *)pvCpuPage = RT_BF_GET(pVM->hm.s.vmx.Msrs.u64Basic, VMX_BF_BASIC_VMCS_ID);
    8301146    }
     
    8861202
    8871203/**
    888  * Allocates and maps one physically contiguous page. The allocated page is
    889  * zero'd out. (Used by various VT-x structures).
     1204 * Allocates and maps a physically contiguous page. The allocated page is
     1205 * zero'd out (used by various VT-x structures).
    8901206 *
    8911207 * @returns IPRT status code.
     
    8981214static int hmR0VmxPageAllocZ(PRTR0MEMOBJ pMemObj, PRTR0PTR ppVirt, PRTHCPHYS pHCPhys)
    8991215{
    900     AssertPtrReturn(pMemObj, VERR_INVALID_PARAMETER);
    901     AssertPtrReturn(ppVirt,  VERR_INVALID_PARAMETER);
    902     AssertPtrReturn(pHCPhys, VERR_INVALID_PARAMETER);
    903 
    904     int rc = RTR0MemObjAllocCont(pMemObj, PAGE_SIZE, false /* fExecutable */);
     1216    AssertPtr(pMemObj);
     1217    AssertPtr(ppVirt);
     1218    AssertPtr(pHCPhys);
     1219    int rc = RTR0MemObjAllocCont(pMemObj, X86_PAGE_4K_SIZE, false /* fExecutable */);
    9051220    if (RT_FAILURE(rc))
    9061221        return rc;
    9071222    *ppVirt  = RTR0MemObjAddress(*pMemObj);
    9081223    *pHCPhys = RTR0MemObjGetPagePhysAddr(*pMemObj, 0 /* iPage */);
    909     ASMMemZero32(*ppVirt, PAGE_SIZE);
     1224    ASMMemZero32(*ppVirt, X86_PAGE_4K_SIZE);
    9101225    return VINF_SUCCESS;
    9111226}
     
    9131228
    9141229/**
    915  * Frees and unmaps an allocated physical page.
     1230 * Frees and unmaps an allocated, physical page.
    9161231 *
    9171232 * @param   pMemObj         Pointer to the ring-0 memory object.
     
    9261241    AssertPtr(ppVirt);
    9271242    AssertPtr(pHCPhys);
    928     if (*pMemObj != NIL_RTR0MEMOBJ)
    929     {
    930         int rc = RTR0MemObjFree(*pMemObj, true /* fFreeMappings */);
    931         AssertRC(rc);
    932         *pMemObj = NIL_RTR0MEMOBJ;
    933         *ppVirt  = 0;
    934         *pHCPhys = 0;
    935     }
    936 }
    937 
    938 
    939 /**
    940  * Worker function to free VT-x related structures.
     1243    /* NULL is valid, accepted and ignored by the free function below. */
     1244    RTR0MemObjFree(*pMemObj, true /* fFreeMappings */);
     1245    *pMemObj = NIL_RTR0MEMOBJ;
     1246    *ppVirt  = NULL;
     1247    *pHCPhys = NIL_RTHCPHYS;
     1248}
     1249
     1250
     1251/**
     1252 * Initializes a VMCS info. object.
     1253 *
     1254 * @param   pVmcsInfo   The VMCS info. object.
     1255 */
     1256static void hmR0VmxInitVmcsInfo(PVMXVMCSINFO pVmcsInfo)
     1257{
     1258    RT_ZERO(*pVmcsInfo);
     1259
     1260    Assert(pVmcsInfo->hMemObjVmcs          == NIL_RTR0MEMOBJ);
     1261    Assert(pVmcsInfo->hMemObjMsrBitmap     == NIL_RTR0MEMOBJ);
     1262    Assert(pVmcsInfo->hMemObjGuestMsrLoad  == NIL_RTR0MEMOBJ);
     1263    Assert(pVmcsInfo->hMemObjGuestMsrStore == NIL_RTR0MEMOBJ);
     1264    Assert(pVmcsInfo->hMemObjHostMsrLoad   == NIL_RTR0MEMOBJ);
     1265    pVmcsInfo->HCPhysVmcs          = NIL_RTHCPHYS;
     1266    pVmcsInfo->HCPhysMsrBitmap     = NIL_RTHCPHYS;
     1267    pVmcsInfo->HCPhysGuestMsrLoad  = NIL_RTHCPHYS;
     1268    pVmcsInfo->HCPhysGuestMsrStore = NIL_RTHCPHYS;
     1269    pVmcsInfo->HCPhysHostMsrLoad   = NIL_RTHCPHYS;
     1270    pVmcsInfo->HCPhysVirtApic      = NIL_RTHCPHYS;
     1271    pVmcsInfo->HCPhysEPTP          = NIL_RTHCPHYS;
     1272    pVmcsInfo->u64VmcsLinkPtr      = NIL_RTHCPHYS;
     1273}
     1274
     1275
     1276/**
     1277 * Frees the VT-x structures for a VMCS info. object.
     1278 *
     1279 * @param   pVM         The cross context VM structure.
     1280 * @param   pVmcsInfo   The VMCS info. object.
     1281 */
     1282static void hmR0VmxFreeVmcsInfo(PVM pVM, PVMXVMCSINFO pVmcsInfo)
     1283{
     1284    hmR0VmxPageFree(&pVmcsInfo->hMemObjVmcs, &pVmcsInfo->pvVmcs, &pVmcsInfo->HCPhysVmcs);
     1285
     1286    if (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_MSR_BITMAPS)
     1287        hmR0VmxPageFree(&pVmcsInfo->hMemObjMsrBitmap, &pVmcsInfo->pvMsrBitmap, &pVmcsInfo->HCPhysMsrBitmap);
     1288
     1289    hmR0VmxPageFree(&pVmcsInfo->hMemObjHostMsrLoad,   &pVmcsInfo->pvHostMsrLoad,   &pVmcsInfo->HCPhysHostMsrLoad);
     1290    hmR0VmxPageFree(&pVmcsInfo->hMemObjGuestMsrLoad,  &pVmcsInfo->pvGuestMsrLoad,  &pVmcsInfo->HCPhysGuestMsrLoad);
     1291    hmR0VmxPageFree(&pVmcsInfo->hMemObjGuestMsrStore, &pVmcsInfo->pvGuestMsrStore, &pVmcsInfo->HCPhysGuestMsrStore);
     1292
     1293    hmR0VmxInitVmcsInfo(pVmcsInfo);
     1294}
     1295
     1296
     1297/**
     1298 * Allocates the VT-x structures for a VMCS info. object.
     1299 *
     1300 * @returns VBox status code.
     1301 * @param   pVCpu           The cross context virtual CPU structure.
     1302 * @param   pVmcsInfo       The VMCS info. object.
     1303 * @param   fIsNstGstVmcs   Whether this is a nested-guest VMCS.
     1304 */
     1305static int hmR0VmxAllocVmcsInfo(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs)
     1306{
     1307    PVM pVM = pVCpu->CTX_SUFF(pVM);
     1308
     1309    /* Allocate the guest VM control structure (VMCS). */
     1310    int rc = hmR0VmxPageAllocZ(&pVmcsInfo->hMemObjVmcs, &pVmcsInfo->pvVmcs, &pVmcsInfo->HCPhysVmcs);
     1311    if (RT_SUCCESS(rc))
     1312    {
     1313        if (!fIsNstGstVmcs)
     1314        {
     1315            /* Get the allocated virtual-APIC page from the virtual APIC device. */
     1316            if (   PDMHasApic(pVCpu->CTX_SUFF(pVM))
     1317                && (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_TPR_SHADOW))
     1318            {
     1319                rc = APICGetApicPageForCpu(pVCpu, &pVmcsInfo->HCPhysVirtApic, (PRTR0PTR)&pVmcsInfo->pbVirtApic,
     1320                                           NULL /* pR3Ptr */, NULL /* pRCPtr */);
     1321            }
     1322        }
     1323        else
     1324        {
     1325            Assert(pVmcsInfo->HCPhysVirtApic == NIL_RTHCPHYS);
     1326            Assert(!pVmcsInfo->pbVirtApic);
     1327        }
     1328
     1329        if (RT_SUCCESS(rc))
     1330        {
     1331            /*
     1332             * Allocate the MSR-bitmap if supported by the CPU. The MSR-bitmap is for
     1333             * transparent accesses of specific MSRs.
     1334             *
     1335             * If the condition for enabling MSR bitmaps changes here, don't forget to
     1336             * update HMIsMsrBitmapActive().
     1337             *
     1338             * We don't share MSR bitmaps between the guest and nested-guest as we then
     1339             * don't need to care about carefully restoring the guest MSR bitmap.
     1340             * The guest visible nested-guest MSR bitmap needs to remain unchanged.
     1341             * Hence, allocate a separate MSR bitmap for the guest and nested-guest.
     1342             */
     1343            if (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_MSR_BITMAPS)
     1344            {
     1345                rc = hmR0VmxPageAllocZ(&pVmcsInfo->hMemObjMsrBitmap, &pVmcsInfo->pvMsrBitmap, &pVmcsInfo->HCPhysMsrBitmap);
     1346                if (RT_SUCCESS(rc))
     1347                    ASMMemFill32(pVmcsInfo->pvMsrBitmap, X86_PAGE_4K_SIZE, UINT32_C(0xffffffff));
     1348            }
     1349
     1350            if (RT_SUCCESS(rc))
     1351            {
     1352                /*
     1353                 * Allocate the VM-entry MSR-load area for the guest MSRs.
     1354                 *
     1355                 * Similar to MSR-bitmaps, we do not share the auto MSR-load/store are between
     1356                 * the guest and nested-guest.
     1357                 */
     1358                rc = hmR0VmxPageAllocZ(&pVmcsInfo->hMemObjGuestMsrLoad, &pVmcsInfo->pvGuestMsrLoad,
     1359                                       &pVmcsInfo->HCPhysGuestMsrLoad);
     1360                if (RT_SUCCESS(rc))
     1361                {
     1362                    /*
     1363                     * We use the same page for VM-entry MSR-load and VM-exit MSR store areas.
     1364                     * These contain the guest MSRs to load on VM-entry and store on VM-exit.
     1365                     */
     1366                    Assert(pVmcsInfo->hMemObjGuestMsrStore == NIL_RTR0MEMOBJ);
     1367                    pVmcsInfo->pvGuestMsrStore     = pVmcsInfo->pvGuestMsrLoad;
     1368                    pVmcsInfo->HCPhysGuestMsrStore = pVmcsInfo->HCPhysGuestMsrLoad;
     1369
     1370                    /* Allocate the VM-exit MSR-load page for the host MSRs. */
     1371                    rc = hmR0VmxPageAllocZ(&pVmcsInfo->hMemObjHostMsrLoad, &pVmcsInfo->pvHostMsrLoad,
     1372                                           &pVmcsInfo->HCPhysHostMsrLoad);
     1373                }
     1374            }
     1375        }
     1376    }
     1377
     1378    return rc;
     1379}
     1380
     1381
     1382/**
     1383 * Free all VT-x structures for the VM.
    9411384 *
    9421385 * @returns IPRT status code.
     
    9451388static void hmR0VmxStructsFree(PVM pVM)
    9461389{
    947     for (VMCPUID i = 0; i < pVM->cCpus; i++)
    948     {
    949         PVMCPU pVCpu = &pVM->aCpus[i];
    950         AssertPtr(pVCpu);
    951 
    952         hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjHostMsr, &pVCpu->hm.s.vmx.pvHostMsr, &pVCpu->hm.s.vmx.HCPhysHostMsr);
    953         hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjGuestMsr, &pVCpu->hm.s.vmx.pvGuestMsr, &pVCpu->hm.s.vmx.HCPhysGuestMsr);
    954 
    955         if (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_MSR_BITMAPS)
    956             hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjMsrBitmap, &pVCpu->hm.s.vmx.pvMsrBitmap, &pVCpu->hm.s.vmx.HCPhysMsrBitmap);
    957 
    958         hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjVmcs, &pVCpu->hm.s.vmx.pvVmcs, &pVCpu->hm.s.vmx.HCPhysVmcs);
    959     }
    960 
    961     hmR0VmxPageFree(&pVM->hm.s.vmx.hMemObjApicAccess, (PRTR0PTR)&pVM->hm.s.vmx.pbApicAccess, &pVM->hm.s.vmx.HCPhysApicAccess);
    9621390#ifdef VBOX_WITH_CRASHDUMP_MAGIC
    9631391    hmR0VmxPageFree(&pVM->hm.s.vmx.hMemObjScratch, &pVM->hm.s.vmx.pbScratch, &pVM->hm.s.vmx.HCPhysScratch);
    9641392#endif
    965 }
    966 
    967 
    968 /**
    969  * Worker function to allocate VT-x related VM structures.
     1393    hmR0VmxPageFree(&pVM->hm.s.vmx.hMemObjApicAccess, (PRTR0PTR)&pVM->hm.s.vmx.pbApicAccess, &pVM->hm.s.vmx.HCPhysApicAccess);
     1394
     1395    for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
     1396    {
     1397        PVMCPU pVCpu = &pVM->aCpus[idCpu];
     1398        PVMXVMCSINFO pVmcsInfo = &pVCpu->hm.s.vmx.VmcsInfo;
     1399        hmR0VmxFreeVmcsInfo(pVM, pVmcsInfo);
     1400#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     1401        if (pVM->cpum.ro.GuestFeatures.fVmx)
     1402        {
     1403            pVmcsInfo = &pVCpu->hm.s.vmx.VmcsInfoNstGst;
     1404            hmR0VmxFreeVmcsInfo(pVM, pVmcsInfo);
     1405        }
     1406#endif
     1407    }
     1408}
     1409
     1410
     1411/**
     1412 * Allocate all VT-x structures for the VM.
    9701413 *
    9711414 * @returns IPRT status code.
     
    9751418{
    9761419    /*
    977      * Initialize members up-front so we can cleanup properly on allocation failure.
    978      */
    979 #define VMXLOCAL_INIT_VM_MEMOBJ(a_Name, a_VirtPrefix)       \
    980     pVM->hm.s.vmx.hMemObj##a_Name = NIL_RTR0MEMOBJ;         \
    981     pVM->hm.s.vmx.a_VirtPrefix##a_Name = 0;                 \
    982     pVM->hm.s.vmx.HCPhys##a_Name = 0;
    983 
    984 #define VMXLOCAL_INIT_VMCPU_MEMOBJ(a_Name, a_VirtPrefix)    \
    985     pVCpu->hm.s.vmx.hMemObj##a_Name = NIL_RTR0MEMOBJ;       \
    986     pVCpu->hm.s.vmx.a_VirtPrefix##a_Name = 0;               \
    987     pVCpu->hm.s.vmx.HCPhys##a_Name = 0;
    988 
     1420     * Sanity check the VMCS size reported by the CPU as we assume 4KB allocations.
     1421     * The VMCS size cannot be more than 4096 bytes.
     1422     *
     1423     * See Intel spec. Appendix A.1 "Basic VMX Information".
     1424     */
     1425    uint32_t const cbVmcs = RT_BF_GET(pVM->hm.s.vmx.Msrs.u64Basic, VMX_BF_BASIC_VMCS_SIZE);
     1426    if (cbVmcs <= X86_PAGE_4K_SIZE)
     1427    { /* likely */ }
     1428    else
     1429    {
     1430         pVM->aCpus[0].hm.s.u32HMError = VMX_UFC_INVALID_VMCS_SIZE;
     1431         return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
     1432    }
     1433
     1434    /*
     1435     * Initialize/check members up-front so we can cleanup en masse on allocation failures.
     1436     */
    9891437#ifdef VBOX_WITH_CRASHDUMP_MAGIC
    990     VMXLOCAL_INIT_VM_MEMOBJ(Scratch, pv);
     1438    Assert(pVM->hm.s.vmx.hMemObjScratch == NIL_RTR0MEMOBJ);
     1439    Assert(pVM->hm.s.vmx.pbScratch == NULL);
     1440    pVM->hm.s.vmx.HCPhysScratch = NIL_RTHCPHYS;
    9911441#endif
    992     VMXLOCAL_INIT_VM_MEMOBJ(ApicAccess, pb);
    993 
    994     AssertCompile(sizeof(VMCPUID) == sizeof(pVM->cCpus));
    995     for (VMCPUID i = 0; i < pVM->cCpus; i++)
    996     {
    997         PVMCPU pVCpu = &pVM->aCpus[i];
    998         VMXLOCAL_INIT_VMCPU_MEMOBJ(Vmcs, pv);
    999         VMXLOCAL_INIT_VMCPU_MEMOBJ(MsrBitmap, pv);
    1000         VMXLOCAL_INIT_VMCPU_MEMOBJ(GuestMsr, pv);
    1001         VMXLOCAL_INIT_VMCPU_MEMOBJ(HostMsr, pv);
    1002     }
    1003 #undef VMXLOCAL_INIT_VMCPU_MEMOBJ
    1004 #undef VMXLOCAL_INIT_VM_MEMOBJ
    1005 
    1006     /* The VMCS size cannot be more than 4096 bytes. See Intel spec. Appendix A.1 "Basic VMX Information". */
    1007     AssertReturnStmt(RT_BF_GET(pVM->hm.s.vmx.Msrs.u64Basic, VMX_BF_BASIC_VMCS_SIZE) <= PAGE_SIZE,
    1008                      (&pVM->aCpus[0])->hm.s.u32HMError = VMX_UFC_INVALID_VMCS_SIZE,
    1009                      VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO);
    1010 
    1011     /*
    1012      * Allocate all the VT-x structures.
     1442
     1443    Assert(pVM->hm.s.vmx.hMemObjApicAccess == NIL_RTR0MEMOBJ);
     1444    Assert(pVM->hm.s.vmx.pbApicAccess == NULL);
     1445    pVM->hm.s.vmx.HCPhysApicAccess = NIL_RTHCPHYS;
     1446
     1447    for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
     1448    {
     1449        PVMCPU pVCpu = &pVM->aCpus[idCpu];
     1450        hmR0VmxInitVmcsInfo(&pVCpu->hm.s.vmx.VmcsInfo);
     1451        hmR0VmxInitVmcsInfo(&pVCpu->hm.s.vmx.VmcsInfoNstGst);
     1452    }
     1453
     1454    /*
     1455     * Allocate per-VM VT-x structures.
    10131456     */
    10141457    int rc = VINF_SUCCESS;
    10151458#ifdef VBOX_WITH_CRASHDUMP_MAGIC
     1459    /* Allocate crash-dump magic scratch page. */
    10161460    rc = hmR0VmxPageAllocZ(&pVM->hm.s.vmx.hMemObjScratch, &pVM->hm.s.vmx.pbScratch, &pVM->hm.s.vmx.HCPhysScratch);
    10171461    if (RT_FAILURE(rc))
    1018         goto cleanup;
     1462    {
     1463        hmR0VmxStructsFree(pVM);
     1464        return rc;
     1465    }
    10191466    strcpy((char *)pVM->hm.s.vmx.pbScratch, "SCRATCH Magic");
    10201467    *(uint64_t *)(pVM->hm.s.vmx.pbScratch + 16) = UINT64_C(0xdeadbeefdeadbeef);
     
    10271474                               &pVM->hm.s.vmx.HCPhysApicAccess);
    10281475        if (RT_FAILURE(rc))
    1029             goto cleanup;
     1476        {
     1477            hmR0VmxStructsFree(pVM);
     1478            return rc;
     1479        }
    10301480    }
    10311481
     
    10331483     * Initialize per-VCPU VT-x structures.
    10341484     */
    1035     for (VMCPUID i = 0; i < pVM->cCpus; i++)
    1036     {
    1037         PVMCPU pVCpu = &pVM->aCpus[i];
    1038         AssertPtr(pVCpu);
    1039 
    1040         /* Allocate the VM control structure (VMCS). */
    1041         rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjVmcs, &pVCpu->hm.s.vmx.pvVmcs, &pVCpu->hm.s.vmx.HCPhysVmcs);
    1042         if (RT_FAILURE(rc))
    1043             goto cleanup;
    1044 
    1045         /* Get the allocated virtual-APIC page from the APIC device for transparent TPR accesses. */
    1046         if (   PDMHasApic(pVM)
    1047             && (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_TPR_SHADOW))
    1048         {
    1049             rc = APICGetApicPageForCpu(pVCpu, &pVCpu->hm.s.vmx.HCPhysVirtApic, (PRTR0PTR)&pVCpu->hm.s.vmx.pbVirtApic,
    1050                                        NULL /* pR3Ptr */, NULL /* pRCPtr */);
    1051             if (RT_FAILURE(rc))
    1052                 goto cleanup;
    1053         }
    1054 
    1055         /*
    1056          * Allocate the MSR-bitmap if supported by the CPU. The MSR-bitmap is for
    1057          * transparent accesses of specific MSRs.
    1058          *
    1059          * If the condition for enabling MSR bitmaps changes here, don't forget to
    1060          * update HMAreMsrBitmapsAvailable().
    1061          */
    1062         if (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_MSR_BITMAPS)
    1063         {
    1064             rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjMsrBitmap, &pVCpu->hm.s.vmx.pvMsrBitmap,
    1065                                    &pVCpu->hm.s.vmx.HCPhysMsrBitmap);
    1066             if (RT_FAILURE(rc))
    1067                 goto cleanup;
    1068             ASMMemFill32(pVCpu->hm.s.vmx.pvMsrBitmap, PAGE_SIZE, UINT32_C(0xffffffff));
    1069         }
    1070 
    1071         /* Allocate the VM-entry MSR-load and VM-exit MSR-store page for the guest MSRs. */
    1072         rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjGuestMsr, &pVCpu->hm.s.vmx.pvGuestMsr, &pVCpu->hm.s.vmx.HCPhysGuestMsr);
    1073         if (RT_FAILURE(rc))
    1074             goto cleanup;
    1075 
    1076         /* Allocate the VM-exit MSR-load page for the host MSRs. */
    1077         rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjHostMsr, &pVCpu->hm.s.vmx.pvHostMsr, &pVCpu->hm.s.vmx.HCPhysHostMsr);
    1078         if (RT_FAILURE(rc))
    1079             goto cleanup;
     1485    for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
     1486    {
     1487        /* Allocate the guest VMCS structures. */
     1488        PVMCPU pVCpu = &pVM->aCpus[idCpu];
     1489        rc = hmR0VmxAllocVmcsInfo(pVCpu, &pVCpu->hm.s.vmx.VmcsInfo, false /* fIsNstGstVmcs */);
     1490        if (RT_SUCCESS(rc))
     1491        {
     1492#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     1493            /* Allocate the nested-guest VMCS structures, when the VMX feature is exposed to the guest. */
     1494            if (pVM->cpum.ro.GuestFeatures.fVmx)
     1495            {
     1496                rc = hmR0VmxAllocVmcsInfo(pVCpu, &pVCpu->hm.s.vmx.VmcsInfoNstGst, true /* fIsNstGstVmcs */);
     1497                if (RT_SUCCESS(rc))
     1498                { /* likely */ }
     1499                else
     1500                    break;
     1501            }
     1502#endif
     1503        }
     1504        else
     1505            break;
     1506    }
     1507
     1508    if (RT_FAILURE(rc))
     1509    {
     1510        hmR0VmxStructsFree(pVM);
     1511        return rc;
    10801512    }
    10811513
    10821514    return VINF_SUCCESS;
    1083 
    1084 cleanup:
    1085     hmR0VmxStructsFree(pVM);
    1086     return rc;
    1087 }
    1088 
    1089 
    1090 /**
    1091  * Does global VT-x initialization (called during module initialization).
    1092  *
    1093  * @returns VBox status code.
    1094  */
    1095 VMMR0DECL(int) VMXR0GlobalInit(void)
    1096 {
    1097 #ifdef HMVMX_USE_FUNCTION_TABLE
    1098     AssertCompile(VMX_EXIT_MAX + 1 == RT_ELEMENTS(g_apfnVMExitHandlers));
    1099 # ifdef VBOX_STRICT
    1100     for (unsigned i = 0; i < RT_ELEMENTS(g_apfnVMExitHandlers); i++)
    1101         Assert(g_apfnVMExitHandlers[i]);
    1102 # endif
     1515}
     1516
     1517
     1518#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     1519/**
     1520 * Returns whether an MSR at the given MSR-bitmap offset is intercepted or not.
     1521 *
     1522 * @returns @c true if the MSR is intercepted, @c false otherwise.
     1523 * @param   pvMsrBitmap     The MSR bitmap.
     1524 * @param   offMsr          The MSR byte offset.
     1525 * @param   iBit            The bit offset from the byte offset.
     1526 */
     1527DECLINLINE(bool) hmR0VmxIsMsrBitSet(const void *pvMsrBitmap, uint16_t offMsr, int32_t iBit)
     1528{
     1529    uint8_t const * const pbMsrBitmap = (uint8_t const * const)pvMsrBitmap;
     1530    Assert(pbMsrBitmap);
     1531    Assert(offMsr + (iBit >> 3) <= X86_PAGE_4K_SIZE);
     1532    return ASMBitTest(pbMsrBitmap + offMsr, iBit);
     1533}
    11031534#endif
    1104     return VINF_SUCCESS;
    1105 }
    1106 
    1107 
    1108 /**
    1109  * Does global VT-x termination (called during module termination).
    1110  */
    1111 VMMR0DECL(void) VMXR0GlobalTerm()
    1112 {
    1113     /* Nothing to do currently. */
    1114 }
    1115 
    1116 
    1117 /**
    1118  * Sets up and activates VT-x on the current CPU.
    1119  *
    1120  * @returns VBox status code.
    1121  * @param   pHostCpu        The HM physical-CPU structure.
    1122  * @param   pVM             The cross context VM structure.  Can be
    1123  *                          NULL after a host resume operation.
    1124  * @param   pvCpuPage       Pointer to the VMXON region (can be NULL if @a
    1125  *                          fEnabledByHost is @c true).
    1126  * @param   HCPhysCpuPage   Physical address of the VMXON region (can be 0 if
    1127  *                          @a fEnabledByHost is @c true).
    1128  * @param   fEnabledByHost  Set if SUPR0EnableVTx() or similar was used to
    1129  *                          enable VT-x on the host.
    1130  * @param   pHwvirtMsrs     Pointer to the hardware-virtualization MSRs.
    1131  */
    1132 VMMR0DECL(int) VMXR0EnableCpu(PHMPHYSCPU pHostCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost,
    1133                               PCSUPHWVIRTMSRS pHwvirtMsrs)
    1134 {
    1135     Assert(pHostCpu);
    1136     Assert(pHwvirtMsrs);
    1137     Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    1138 
    1139     /* Enable VT-x if it's not already enabled by the host. */
    1140     if (!fEnabledByHost)
    1141     {
    1142         int rc = hmR0VmxEnterRootMode(pVM, HCPhysCpuPage, pvCpuPage);
    1143         if (RT_FAILURE(rc))
    1144             return rc;
    1145     }
    1146 
    1147     /*
    1148      * Flush all EPT tagged-TLB entries (in case VirtualBox or any other hypervisor have been
    1149      * using EPTPs) so we don't retain any stale guest-physical mappings which won't get
    1150      * invalidated when flushing by VPID.
    1151      */
    1152     if (pHwvirtMsrs->u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS)
    1153     {
    1154         hmR0VmxFlushEpt(NULL /* pVCpu */, VMXTLBFLUSHEPT_ALL_CONTEXTS);
    1155         pHostCpu->fFlushAsidBeforeUse = false;
    1156     }
    1157     else
    1158         pHostCpu->fFlushAsidBeforeUse = true;
    1159 
    1160     /* Ensure each VCPU scheduled on this CPU gets a new VPID on resume. See @bugref{6255}. */
    1161     ++pHostCpu->cTlbFlushes;
    1162 
    1163     return VINF_SUCCESS;
    1164 }
    1165 
    1166 
    1167 /**
    1168  * Deactivates VT-x on the current CPU.
    1169  *
    1170  * @returns VBox status code.
    1171  * @param   pvCpuPage       Pointer to the VMXON region.
    1172  * @param   HCPhysCpuPage   Physical address of the VMXON region.
    1173  *
    1174  * @remarks This function should never be called when SUPR0EnableVTx() or
    1175  *          similar was used to enable VT-x on the host.
    1176  */
    1177 VMMR0DECL(int) VMXR0DisableCpu(void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
    1178 {
    1179     RT_NOREF2(pvCpuPage, HCPhysCpuPage);
    1180 
    1181     Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    1182     return hmR0VmxLeaveRootMode();
    1183 }
    1184 
    1185 
    1186 /**
    1187  * Sets the permission bits for the specified MSR in the MSR bitmap.
    1188  *
    1189  * @param   pVCpu       The cross context virtual CPU structure.
    1190  * @param   uMsr        The MSR value.
    1191  * @param   enmRead     Whether reading this MSR causes a VM-exit.
    1192  * @param   enmWrite    Whether writing this MSR causes a VM-exit.
    1193  */
    1194 static void hmR0VmxSetMsrPermission(PVMCPU pVCpu, uint32_t uMsr, VMXMSREXITREAD enmRead, VMXMSREXITWRITE enmWrite)
    1195 {
    1196     int32_t iBit;
    1197     uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.vmx.pvMsrBitmap;
    1198 
    1199     /*
    1200      * MSR Layout:
     1535
     1536
     1537/**
     1538 * Sets the permission bits for the specified MSR in the given MSR bitmap.
     1539 *
     1540 * If the passed VMCS is a nested-guest VMCS, this function ensures that the
     1541 * read/write intercept is cleared from the MSR bitmap used for hardware-assisted
     1542 * VMX execution of the nested-guest, only if nested-guest is also not intercepting
     1543 * the read/write access of this MSR.
     1544 *
     1545 * @param   pVmcsInfo       The VMCS info. object.
     1546 * @param   fIsNstGstVmcs   Whether this is a nested-guest VMCS.
     1547 * @param   idMsr           The MSR value.
     1548 * @param   fMsrpm          The MSR permissions (see VMXMSRPM_XXX). This must
     1549 *                          include both a read -and- a write permission!
     1550 *
     1551 * @sa      HMGetVmxMsrPermission.
     1552 */
     1553static void hmR0VmxSetMsrPermission(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs, uint32_t idMsr, uint32_t fMsrpm)
     1554{
     1555    uint8_t *pbMsrBitmap = (uint8_t *)pVmcsInfo->pvMsrBitmap;
     1556    Assert(pbMsrBitmap);
     1557    Assert(VMXMSRPM_IS_FLAG_VALID(fMsrpm));
     1558
     1559    /*
     1560     * MSR-bitmap Layout:
    12011561     *   Byte index            MSR range            Interpreted as
    12021562     * 0x000 - 0x3ff    0x00000000 - 0x00001fff    Low MSR read bits.
     
    12061566     *
    12071567     * A bit corresponding to an MSR within the above range causes a VM-exit
    1208      * if the bit is 1 on executions of RDMSR/WRMSR.
    1209      *
    1210      * If an MSR falls out of the MSR range, it always cause a VM-exit.
     1568     * if the bit is 1 on executions of RDMSR/WRMSR.  If an MSR falls out of
     1569     * the MSR range, it always cause a VM-exit.
    12111570     *
    12121571     * See Intel spec. 24.6.9 "MSR-Bitmap Address".
    12131572     */
    1214     if (uMsr <= 0x00001fff)
    1215         iBit = uMsr;
    1216     else if (uMsr - UINT32_C(0xc0000000) <= UINT32_C(0x00001fff))
    1217     {
    1218         iBit = uMsr - UINT32_C(0xc0000000);
    1219         pbMsrBitmap += 0x400;
     1573    uint16_t const offBitmapRead  = 0;
     1574    uint16_t const offBitmapWrite = 0x800;
     1575    uint16_t       offMsr;
     1576    int32_t        iBit;
     1577    if (idMsr <= UINT32_C(0x00001fff))
     1578    {
     1579        offMsr = 0;
     1580        iBit   = idMsr;
     1581    }
     1582    else if (idMsr - UINT32_C(0xc0000000) <= UINT32_C(0x00001fff))
     1583    {
     1584        offMsr = 0x400;
     1585        iBit   = idMsr - UINT32_C(0xc0000000);
    12201586    }
    12211587    else
    1222         AssertMsgFailedReturnVoid(("hmR0VmxSetMsrPermission: Invalid MSR %#RX32\n", uMsr));
    1223 
    1224     Assert(iBit <= 0x1fff);
    1225     if (enmRead == VMXMSREXIT_INTERCEPT_READ)
    1226         ASMBitSet(pbMsrBitmap, iBit);
     1588        AssertMsgFailedReturnVoid(("Invalid MSR %#RX32\n", idMsr));
     1589
     1590    /*
     1591     * Set the MSR read permission.
     1592     */
     1593    uint16_t const offMsrRead = offBitmapRead + offMsr;
     1594    Assert(offMsrRead + (iBit >> 3) < offBitmapWrite);
     1595    if (fMsrpm & VMXMSRPM_ALLOW_RD)
     1596    {
     1597#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     1598        bool const fClear = !fIsNstGstVmcs ? true
     1599                          : !hmR0VmxIsMsrBitSet(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap), offMsrRead, iBit);
     1600#else
     1601        RT_NOREF2(pVCpu, fIsNstGstVmcs);
     1602        bool const fClear = true;
     1603#endif
     1604        if (fClear)
     1605            ASMBitClear(pbMsrBitmap + offMsrRead, iBit);
     1606    }
    12271607    else
    1228         ASMBitClear(pbMsrBitmap, iBit);
    1229 
    1230     if (enmWrite == VMXMSREXIT_INTERCEPT_WRITE)
    1231         ASMBitSet(pbMsrBitmap + 0x800, iBit);
     1608        ASMBitSet(pbMsrBitmap + offMsrRead, iBit);
     1609
     1610    /*
     1611     * Set the MSR write permission.
     1612     */
     1613    uint16_t const offMsrWrite = offBitmapWrite + offMsr;
     1614    Assert(offMsrWrite + (iBit >> 3) < X86_PAGE_4K_SIZE);
     1615    if (fMsrpm & VMXMSRPM_ALLOW_WR)
     1616    {
     1617#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     1618        bool const fClear = !fIsNstGstVmcs ? true
     1619                          : !hmR0VmxIsMsrBitSet(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap), offMsrWrite, iBit);
     1620#else
     1621        RT_NOREF2(pVCpu, fIsNstGstVmcs);
     1622        bool const fClear = true;
     1623#endif
     1624        if (fClear)
     1625            ASMBitClear(pbMsrBitmap + offMsrWrite, iBit);
     1626    }
    12321627    else
    1233         ASMBitClear(pbMsrBitmap + 0x800, iBit);
     1628        ASMBitSet(pbMsrBitmap + offMsrWrite, iBit);
    12341629}
    12351630
     
    12411636 * @returns VBox status code.
    12421637 * @param   pVCpu       The cross context virtual CPU structure.
     1638 * @param   pVmcsInfo   The VMCS info. object.
    12431639 * @param   cMsrs       The number of MSRs.
    12441640 */
    1245 static int hmR0VmxSetAutoLoadStoreMsrCount(PVMCPU pVCpu, uint32_t cMsrs)
     1641static int hmR0VmxSetAutoLoadStoreMsrCount(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t cMsrs)
    12461642{
    12471643    /* Shouldn't ever happen but there -is- a number. We're well within the recommended 512. */
    1248     uint64_t const uVmxMiscMsr       = pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.u64Misc;
    1249     uint32_t const cMaxSupportedMsrs = VMX_MISC_MAX_MSRS(uVmxMiscMsr);
    1250     if (RT_UNLIKELY(cMsrs > cMaxSupportedMsrs))
    1251     {
    1252         LogRel(("CPU auto-load/store MSR count in VMCS exceeded cMsrs=%u Supported=%u.\n", cMsrs, cMaxSupportedMsrs));
     1644    uint32_t const cMaxSupportedMsrs = VMX_MISC_MAX_MSRS(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.u64Misc);
     1645    if (RT_UNLIKELY(cMsrs >= cMaxSupportedMsrs))
     1646    {
     1647        LogRel(("Auto-load/store MSR count exceeded! cMsrs=%u Supported=%u.\n", cMsrs, cMaxSupportedMsrs));
    12531648        pVCpu->hm.s.u32HMError = VMX_UFC_INSUFFICIENT_GUEST_MSR_STORAGE;
    12541649        return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
    12551650    }
    12561651
    1257     /* Update number of guest MSRs to load/store across the world-switch. */
    1258     int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, cMsrs);
    1259     rc    |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, cMsrs);
    1260 
    1261     /* Update number of host MSRs to load after the world-switch. Identical to guest-MSR count as it's always paired. */
    1262     rc    |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT,  cMsrs);
     1652    /* Commit the MSR counts to the VMCS and update the cache. */
     1653    int rc = VINF_SUCCESS;
     1654    if (pVmcsInfo->cEntryMsrLoad != cMsrs)
     1655        rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, cMsrs);
     1656    if (pVmcsInfo->cExitMsrStore != cMsrs)
     1657        rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, cMsrs);
     1658    if (pVmcsInfo->cExitMsrLoad != cMsrs)
     1659        rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT,  cMsrs);
    12631660    AssertRCReturn(rc, rc);
    12641661
    1265     /* Update the VCPU's copy of the MSR count. */
    1266     pVCpu->hm.s.vmx.cMsrs = cMsrs;
     1662    pVmcsInfo->cEntryMsrLoad = cMsrs;
     1663    pVmcsInfo->cExitMsrStore = cMsrs;
     1664    pVmcsInfo->cExitMsrLoad  = cMsrs;
    12671665
    12681666    return VINF_SUCCESS;
     
    12771675 * @returns VBox status code.
    12781676 * @param   pVCpu               The cross context virtual CPU structure.
    1279  * @param   uMsr                The MSR.
     1677 * @param   pVmxTransient       The VMX-transient structure.
     1678 * @param   idMsr               The MSR.
    12801679 * @param   uGuestMsrValue      Value of the guest MSR.
     1680 * @param   fSetReadWrite       Whether to set the guest read/write access of this
     1681 *                              MSR (thus not causing a VM-exit).
    12811682 * @param   fUpdateHostMsr      Whether to update the value of the host MSR if
    12821683 *                              necessary.
    1283  * @param   pfAddedAndUpdated   Where to store whether the MSR was added -and-
    1284  *                              its value was updated. Optional, can be NULL.
    1285  */
    1286 static int hmR0VmxAddAutoLoadStoreMsr(PVMCPU pVCpu, uint32_t uMsr, uint64_t uGuestMsrValue, bool fUpdateHostMsr,
    1287                                        bool *pfAddedAndUpdated)
    1288 {
    1289     PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
    1290     uint32_t    cMsrs     = pVCpu->hm.s.vmx.cMsrs;
    1291     uint32_t    i;
     1684 */
     1685static int hmR0VmxAddAutoLoadStoreMsr(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t idMsr, uint64_t uGuestMsrValue,
     1686                                      bool fSetReadWrite, bool fUpdateHostMsr)
     1687{
     1688    PVMXVMCSINFO pVmcsInfo     = pVmxTransient->pVmcsInfo;
     1689    bool const   fIsNstGstVmcs = pVmxTransient->fIsNestedGuest;
     1690    PVMXAUTOMSR  pGuestMsrLoad = (PVMXAUTOMSR)pVmcsInfo->pvGuestMsrLoad;
     1691    uint32_t     cMsrs         = pVmcsInfo->cEntryMsrLoad;
     1692    uint32_t     i;
     1693
     1694    /* Paranoia. */
     1695    Assert(pGuestMsrLoad);
     1696
     1697    /* Check if the MSR already exists in the VM-entry MSR-load area. */
    12921698    for (i = 0; i < cMsrs; i++)
    12931699    {
    1294         if (pGuestMsr->u32Msr == uMsr)
     1700        if (pGuestMsrLoad->u32Msr == idMsr)
    12951701            break;
    1296         pGuestMsr++;
     1702        pGuestMsrLoad++;
    12971703    }
    12981704
     
    13001706    if (i == cMsrs)
    13011707    {
     1708        /* The MSR does not exist, bump the MSR coun to make room for the new MSR. */
    13021709        ++cMsrs;
    1303         int rc = hmR0VmxSetAutoLoadStoreMsrCount(pVCpu, cMsrs);
    1304         AssertMsgRCReturn(rc, ("hmR0VmxAddAutoLoadStoreMsr: Insufficient space to add MSR %u\n", uMsr), rc);
    1305 
    1306         /* Now that we're swapping MSRs during the world-switch, allow the guest to read/write them without causing VM-exits. */
    1307         if (pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
    1308             hmR0VmxSetMsrPermission(pVCpu, uMsr, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
     1710        int rc = hmR0VmxSetAutoLoadStoreMsrCount(pVCpu, pVmcsInfo, cMsrs);
     1711        AssertMsgRCReturn(rc, ("Insufficient space to add MSR to VM-entry MSR-load/store area %u\n", idMsr), rc);
     1712
     1713        /* Set the guest to read/write this MSR without causing VM-exits. */
     1714        if (   fSetReadWrite
     1715            && (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS))
     1716            hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, fIsNstGstVmcs, idMsr, VMXMSRPM_ALLOW_RD | VMXMSRPM_ALLOW_WR);
    13091717
    13101718        fAdded = true;
    13111719    }
    13121720
    1313     /* Update the MSR values in the auto-load/store MSR area. */
    1314     pGuestMsr->u32Msr    = uMsr;
    1315     pGuestMsr->u64Value  = uGuestMsrValue;
    1316 
    1317     /* Create/update the MSR slot in the host MSR area. */
    1318     PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
     1721    /* Update the MSR value for the newly added or already existing MSR. */
     1722    pGuestMsrLoad->u32Msr   = idMsr;
     1723    pGuestMsrLoad->u64Value = uGuestMsrValue;
     1724
     1725    /* Create the corresponding slot in the VM-exit MSR-store area if we use a different page. */
     1726    if (hmR0VmxIsSeparateExitMsrStoreAreaVmcs(pVmcsInfo))
     1727    {
     1728        PVMXAUTOMSR pGuestMsrStore = (PVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
     1729        pGuestMsrStore += i;
     1730        pGuestMsrStore->u32Msr   = idMsr;
     1731        pGuestMsrStore->u64Value = 0;
     1732    }
     1733
     1734    /* Update the corresponding slot in the host MSR area. */
     1735    PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVmcsInfo->pvHostMsrLoad;
     1736    Assert(pHostMsr != pVmcsInfo->pvGuestMsrLoad && pHostMsr != pVmcsInfo->pvGuestMsrStore);
    13191737    pHostMsr += i;
    1320     pHostMsr->u32Msr     = uMsr;
    1321 
    1322     /*
    1323      * Update the host MSR only when requested by the caller AND when we're
    1324      * adding it to the auto-load/store area. Otherwise, it would have been
    1325      * updated by hmR0VmxExportHostMsrs(). We do this for performance reasons.
    1326      */
    1327     bool fUpdatedMsrValue = false;
     1738    pHostMsr->u32Msr = idMsr;
     1739
     1740    /*
     1741     * Only if the caller requests to update the host MSR value AND we've newly added the
     1742     * MSR to the host MSR area do we actually update the value. Otherwise, it will be
     1743     * updated by hmR0VmxUpdateAutoLoadHostMsrs().
     1744     *
     1745     * We do this for performance reasons since reading MSRs may be quite expensive.
     1746     */
    13281747    if (   fAdded
    13291748        && fUpdateHostMsr)
     
    13321751        Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    13331752        pHostMsr->u64Value = ASMRdMsr(pHostMsr->u32Msr);
    1334         fUpdatedMsrValue = true;
    1335     }
    1336 
    1337     if (pfAddedAndUpdated)
    1338         *pfAddedAndUpdated = fUpdatedMsrValue;
     1753    }
    13391754    return VINF_SUCCESS;
    13401755}
     
    13461761 *
    13471762 * @returns VBox status code.
    1348  * @param   pVCpu       The cross context virtual CPU structure.
    1349  * @param   uMsr        The MSR.
    1350  */
    1351 static int hmR0VmxRemoveAutoLoadStoreMsr(PVMCPU pVCpu, uint32_t uMsr)
    1352 {
    1353     PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
    1354     uint32_t    cMsrs     = pVCpu->hm.s.vmx.cMsrs;
     1763 * @param   pVCpu           The cross context virtual CPU structure.
     1764 * @param   pVmxTransient   The VMX-transient structure.
     1765 * @param   idMsr           The MSR.
     1766 */
     1767static int hmR0VmxRemoveAutoLoadStoreMsr(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t idMsr)
     1768{
     1769    PVMXVMCSINFO pVmcsInfo     = pVmxTransient->pVmcsInfo;
     1770    bool const   fIsNstGstVmcs = pVmxTransient->fIsNestedGuest;
     1771    PVMXAUTOMSR  pGuestMsrLoad = (PVMXAUTOMSR)pVmcsInfo->pvGuestMsrLoad;
     1772    uint32_t     cMsrs         = pVmcsInfo->cEntryMsrLoad;
     1773
     1774    bool const fSeparateExitMsrStorePage = hmR0VmxIsSeparateExitMsrStoreAreaVmcs(pVmcsInfo);
    13551775    for (uint32_t i = 0; i < cMsrs; i++)
    13561776    {
    13571777        /* Find the MSR. */
    1358         if (pGuestMsr->u32Msr == uMsr)
     1778        if (pGuestMsrLoad->u32Msr == idMsr)
    13591779        {
    13601780            /* If it's the last MSR, simply reduce the count. */
     
    13651785            }
    13661786
    1367             /* Remove it by swapping the last MSR in place of it, and reducing the count. */
    1368             PVMXAUTOMSR pLastGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
    1369             pLastGuestMsr            += cMsrs - 1;
    1370             pGuestMsr->u32Msr         = pLastGuestMsr->u32Msr;
    1371             pGuestMsr->u64Value       = pLastGuestMsr->u64Value;
    1372 
    1373             PVMXAUTOMSR pHostMsr     = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
    1374             PVMXAUTOMSR pLastHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
    1375             pLastHostMsr            += cMsrs - 1;
    1376             pHostMsr->u32Msr         = pLastHostMsr->u32Msr;
    1377             pHostMsr->u64Value       = pLastHostMsr->u64Value;
     1787            /* Remove it by copying the last MSR in place of it, and reducing the count. */
     1788            PVMXAUTOMSR pLastGuestMsrLoad = (PVMXAUTOMSR)pVmcsInfo->pvGuestMsrLoad;
     1789            pLastGuestMsrLoad            += cMsrs - 1;
     1790            pGuestMsrLoad->u32Msr         = pLastGuestMsrLoad->u32Msr;
     1791            pGuestMsrLoad->u64Value       = pLastGuestMsrLoad->u64Value;
     1792
     1793            /* Remove it from the VM-exit MSR-store area if we are using a different page. */
     1794            if (fSeparateExitMsrStorePage)
     1795            {
     1796                PVMXAUTOMSR pGuestMsrStore     = (PVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
     1797                PVMXAUTOMSR pLastGuestMsrStore = (PVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
     1798                pGuestMsrStore                += i;
     1799                pLastGuestMsrStore            += cMsrs - 1;
     1800                Assert(pGuestMsrStore->u32Msr == idMsr);
     1801                pGuestMsrStore->u32Msr         = pLastGuestMsrStore->u32Msr;
     1802                pGuestMsrStore->u64Value       = pLastGuestMsrStore->u64Value;
     1803            }
     1804
     1805            /* Remove it from the VM-exit MSR-load area. */
     1806            PVMXAUTOMSR pHostMsr      = (PVMXAUTOMSR)pVmcsInfo->pvHostMsrLoad;
     1807            PVMXAUTOMSR pLastHostMsr  = (PVMXAUTOMSR)pVmcsInfo->pvHostMsrLoad;
     1808            pHostMsr                 += i;
     1809            pLastHostMsr             += cMsrs - 1;
     1810            Assert(pHostMsr->u32Msr == idMsr);
     1811            pHostMsr->u32Msr          = pLastHostMsr->u32Msr;
     1812            pHostMsr->u64Value        = pLastHostMsr->u64Value;
    13781813            --cMsrs;
    13791814            break;
    13801815        }
    1381         pGuestMsr++;
     1816        pGuestMsrLoad++;
    13821817    }
    13831818
    13841819    /* Update the VMCS if the count changed (meaning the MSR was found). */
    1385     if (cMsrs != pVCpu->hm.s.vmx.cMsrs)
    1386     {
    1387         int rc = hmR0VmxSetAutoLoadStoreMsrCount(pVCpu, cMsrs);
     1820    if (cMsrs != pVmcsInfo->cEntryMsrLoad)
     1821    {
     1822        int rc = hmR0VmxSetAutoLoadStoreMsrCount(pVCpu, pVmcsInfo, cMsrs);
    13881823        AssertRCReturn(rc, rc);
    13891824
    13901825        /* We're no longer swapping MSRs during the world-switch, intercept guest read/writes to them. */
    1391         if (pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
    1392             hmR0VmxSetMsrPermission(pVCpu, uMsr, VMXMSREXIT_INTERCEPT_READ, VMXMSREXIT_INTERCEPT_WRITE);
    1393 
    1394         Log4Func(("Removed MSR %#RX32 new cMsrs=%u\n", uMsr, pVCpu->hm.s.vmx.cMsrs));
     1826        if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
     1827            hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, fIsNstGstVmcs, idMsr, VMXMSRPM_EXIT_RD | VMXMSRPM_EXIT_WR);
     1828
     1829        Log4Func(("Removed MSR %#RX32, cMsrs=%u\n", idMsr, cMsrs));
    13951830        return VINF_SUCCESS;
    13961831    }
     
    14011836
    14021837/**
    1403  * Checks if the specified guest MSR is part of the auto-load/store area in
    1404  * the VMCS.
    1405  *
    1406  * @returns true if found, false otherwise.
     1838 * Checks if the specified guest MSR is part of the VM-entry MSR-load area.
     1839 *
     1840 * @returns @c true if found, @c false otherwise.
     1841 * @param   pVmcsInfo   The VMCS info. object.
     1842 * @param   idMsr       The MSR to find.
     1843 */
     1844static bool hmR0VmxIsAutoLoadGuestMsr(PCVMXVMCSINFO pVmcsInfo, uint32_t idMsr)
     1845{
     1846    PCVMXAUTOMSR   pGuestMsrLoad = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrLoad;
     1847    uint32_t const cMsrs         = pVmcsInfo->cEntryMsrLoad;
     1848    for (uint32_t i = 0; i < cMsrs; i++)
     1849    {
     1850        if (pGuestMsrLoad->u32Msr == idMsr)
     1851            return true;
     1852        pGuestMsrLoad++;
     1853    }
     1854    return false;
     1855}
     1856
     1857
     1858/**
     1859 * Updates the value of all host MSRs in the VM-exit MSR-load area.
     1860 *
    14071861 * @param   pVCpu       The cross context virtual CPU structure.
    1408  * @param   uMsr        The MSR to find.
    1409  */
    1410 static bool hmR0VmxIsAutoLoadStoreGuestMsr(PVMCPU pVCpu, uint32_t uMsr)
    1411 {
    1412     PVMXAUTOMSR    pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
    1413     uint32_t const cMsrs     = pVCpu->hm.s.vmx.cMsrs;
    1414 
    1415     for (uint32_t i = 0; i < cMsrs; i++, pGuestMsr++)
    1416     {
    1417         if (pGuestMsr->u32Msr == uMsr)
    1418             return true;
    1419     }
    1420     return false;
    1421 }
    1422 
    1423 
    1424 /**
    1425  * Updates the value of all host MSRs in the auto-load/store area in the VMCS.
    1426  *
    1427  * @param   pVCpu       The cross context virtual CPU structure.
     1862 * @param   pVmcsInfo   The VMCS info. object.
    14281863 *
    14291864 * @remarks No-long-jump zone!!!
    14301865 */
    1431 static void hmR0VmxUpdateAutoLoadStoreHostMsrs(PVMCPU pVCpu)
    1432 {
     1866static void hmR0VmxUpdateAutoLoadHostMsrs(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo)
     1867{
     1868    PVMXAUTOMSR pHostMsrLoad = (PVMXAUTOMSR)pVmcsInfo->pvHostMsrLoad;
     1869    uint32_t const cMsrs     = pVmcsInfo->cExitMsrLoad;
     1870
    14331871    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    1434     PVMXAUTOMSR pHostMsr  = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
    1435     PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
    1436     uint32_t const cMsrs  = pVCpu->hm.s.vmx.cMsrs;
    1437 
    1438     for (uint32_t i = 0; i < cMsrs; i++, pHostMsr++, pGuestMsr++)
    1439     {
    1440         AssertReturnVoid(pHostMsr->u32Msr == pGuestMsr->u32Msr);
    1441 
     1872    Assert(pHostMsrLoad);
     1873
     1874    for (uint32_t i = 0; i < cMsrs; i++, pHostMsrLoad++)
     1875    {
    14421876        /*
    14431877         * Performance hack for the host EFER MSR. We use the cached value rather than re-read it.
    14441878         * Strict builds will catch mismatches in hmR0VmxCheckAutoLoadStoreMsrs(). See @bugref{7368}.
    14451879         */
    1446         if (pHostMsr->u32Msr == MSR_K6_EFER)
    1447             pHostMsr->u64Value = pVCpu->CTX_SUFF(pVM)->hm.s.vmx.u64HostEfer;
     1880        if (pHostMsrLoad->u32Msr == MSR_K6_EFER)
     1881            pHostMsrLoad->u64Value = pVCpu->CTX_SUFF(pVM)->hm.s.vmx.u64HostMsrEfer;
    14481882        else
    1449             pHostMsr->u64Value = ASMRdMsr(pHostMsr->u32Msr);
    1450     }
    1451 
    1452     pVCpu->hm.s.vmx.fUpdatedHostMsrs = true;
     1883            pHostMsrLoad->u64Value = ASMRdMsr(pHostMsrLoad->u32Msr);
     1884    }
    14531885}
    14541886
     
    14671899
    14681900    /*
    1469      * Note: If you're adding MSRs here, make sure to update the MSR-bitmap permissions in hmR0VmxSetupProcCtls().
     1901     * Note: If you're adding MSRs here, make sure to update the MSR-bitmap accesses in hmR0VmxSetupVmcsProcCtls().
    14701902     */
    14711903    if (!(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_SAVED_HOST))
     
    14751907        if (pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests)
    14761908        {
    1477             pVCpu->hm.s.vmx.u64HostLStarMsr        = ASMRdMsr(MSR_K8_LSTAR);
    1478             pVCpu->hm.s.vmx.u64HostStarMsr         = ASMRdMsr(MSR_K6_STAR);
    1479             pVCpu->hm.s.vmx.u64HostSFMaskMsr       = ASMRdMsr(MSR_K8_SF_MASK);
    1480             pVCpu->hm.s.vmx.u64HostKernelGSBaseMsr = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
     1909            pVCpu->hm.s.vmx.u64HostMsrLStar        = ASMRdMsr(MSR_K8_LSTAR);
     1910            pVCpu->hm.s.vmx.u64HostMsrStar         = ASMRdMsr(MSR_K6_STAR);
     1911            pVCpu->hm.s.vmx.u64HostMsrSfMask       = ASMRdMsr(MSR_K8_SF_MASK);
     1912            pVCpu->hm.s.vmx.u64HostMsrKernelGsBase = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
    14811913        }
    14821914#endif
     
    14921924 * @returns true if it does, false otherwise.
    14931925 * @param   pVCpu       The cross context virtual CPU structure.
    1494  * @param   uMsr        The MSR to check.
    1495  */
    1496 static bool hmR0VmxIsLazyGuestMsr(PVMCPU pVCpu, uint32_t uMsr)
     1926 * @param   idMsr       The MSR to check.
     1927 */
     1928static bool hmR0VmxIsLazyGuestMsr(PVMCPU pVCpu, uint32_t idMsr)
    14971929{
    14981930    NOREF(pVCpu);
     
    15001932    if (pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests)
    15011933    {
    1502         switch (uMsr)
     1934        switch (idMsr)
    15031935        {
    15041936            case MSR_K8_LSTAR:
     
    15101942    }
    15111943#else
    1512     RT_NOREF(pVCpu, uMsr);
     1944    RT_NOREF(pVCpu, idMsr);
    15131945#endif
    15141946    return false;
     
    15491981        PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    15501982        if (   !(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
    1551             && pCtx->msrKERNELGSBASE == pVCpu->hm.s.vmx.u64HostKernelGSBaseMsr
    1552             && pCtx->msrLSTAR        == pVCpu->hm.s.vmx.u64HostLStarMsr
    1553             && pCtx->msrSTAR         == pVCpu->hm.s.vmx.u64HostStarMsr
    1554             && pCtx->msrSFMASK       == pVCpu->hm.s.vmx.u64HostSFMaskMsr)
     1983            && pCtx->msrKERNELGSBASE == pVCpu->hm.s.vmx.u64HostMsrKernelGsBase
     1984            && pCtx->msrLSTAR        == pVCpu->hm.s.vmx.u64HostMsrLStar
     1985            && pCtx->msrSTAR         == pVCpu->hm.s.vmx.u64HostMsrStar
     1986            && pCtx->msrSFMASK       == pVCpu->hm.s.vmx.u64HostMsrSfMask)
    15551987        {
    15561988#ifdef VBOX_STRICT
     
    15952027        if (pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests)
    15962028        {
    1597             ASMWrMsr(MSR_K8_LSTAR,          pVCpu->hm.s.vmx.u64HostLStarMsr);
    1598             ASMWrMsr(MSR_K6_STAR,           pVCpu->hm.s.vmx.u64HostStarMsr);
    1599             ASMWrMsr(MSR_K8_SF_MASK,        pVCpu->hm.s.vmx.u64HostSFMaskMsr);
    1600             ASMWrMsr(MSR_K8_KERNEL_GS_BASE, pVCpu->hm.s.vmx.u64HostKernelGSBaseMsr);
     2029            ASMWrMsr(MSR_K8_LSTAR,          pVCpu->hm.s.vmx.u64HostMsrLStar);
     2030            ASMWrMsr(MSR_K6_STAR,           pVCpu->hm.s.vmx.u64HostMsrStar);
     2031            ASMWrMsr(MSR_K8_SF_MASK,        pVCpu->hm.s.vmx.u64HostMsrSfMask);
     2032            ASMWrMsr(MSR_K8_KERNEL_GS_BASE, pVCpu->hm.s.vmx.u64HostMsrKernelGsBase);
    16012033        }
    16022034#endif
     
    16152047 *                                            VMCS content. HMCPU error-field is
    16162048 *                                            updated, see VMX_VCI_XXX.
    1617  * @param   pVCpu   The cross context virtual CPU structure.
    1618  */
    1619 static int hmR0VmxCheckVmcsCtls(PVMCPU pVCpu)
     2049 * @param   pVCpu       The cross context virtual CPU structure.
     2050 * @param   pVmcsInfo   The VMCS info. object.
     2051 */
     2052static int hmR0VmxCheckVmcsCtls(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo)
    16202053{
    16212054    uint32_t u32Val;
    16222055    int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY, &u32Val);
    16232056    AssertRCReturn(rc, rc);
    1624     AssertMsgReturnStmt(pVCpu->hm.s.vmx.Ctls.u32EntryCtls == u32Val,
    1625                         ("Cache=%#RX32 VMCS=%#RX32\n", pVCpu->hm.s.vmx.Ctls.u32EntryCtls, u32Val),
     2057    AssertMsgReturnStmt(pVmcsInfo->u32EntryCtls == u32Val,
     2058                        ("Cache=%#RX32 VMCS=%#RX32\n", pVmcsInfo->u32EntryCtls, u32Val),
    16262059                        pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_ENTRY,
    16272060                        VERR_VMX_VMCS_FIELD_CACHE_INVALID);
     
    16292062    rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT, &u32Val);
    16302063    AssertRCReturn(rc, rc);
    1631     AssertMsgReturnStmt(pVCpu->hm.s.vmx.Ctls.u32ExitCtls == u32Val,
    1632                         ("Cache=%#RX32 VMCS=%#RX32\n", pVCpu->hm.s.vmx.Ctls.u32ExitCtls, u32Val),
     2064    AssertMsgReturnStmt(pVmcsInfo->u32ExitCtls == u32Val,
     2065                        ("Cache=%#RX32 VMCS=%#RX32\n", pVmcsInfo->u32ExitCtls, u32Val),
    16332066                        pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_EXIT,
    16342067                        VERR_VMX_VMCS_FIELD_CACHE_INVALID);
     
    16362069    rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, &u32Val);
    16372070    AssertRCReturn(rc, rc);
    1638     AssertMsgReturnStmt(pVCpu->hm.s.vmx.Ctls.u32PinCtls == u32Val,
    1639                         ("Cache=%#RX32 VMCS=%#RX32\n", pVCpu->hm.s.vmx.Ctls.u32PinCtls, u32Val),
     2071    AssertMsgReturnStmt(pVmcsInfo->u32PinCtls == u32Val,
     2072                        ("Cache=%#RX32 VMCS=%#RX32\n", pVmcsInfo->u32PinCtls, u32Val),
    16402073                        pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_PIN_EXEC,
    16412074                        VERR_VMX_VMCS_FIELD_CACHE_INVALID);
     
    16432076    rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, &u32Val);
    16442077    AssertRCReturn(rc, rc);
    1645     AssertMsgReturnStmt(pVCpu->hm.s.vmx.Ctls.u32ProcCtls == u32Val,
    1646                         ("Cache=%#RX32 VMCS=%#RX32\n", pVCpu->hm.s.vmx.Ctls.u32ProcCtls, u32Val),
     2078    AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls == u32Val,
     2079                        ("Cache=%#RX32 VMCS=%#RX32\n", pVmcsInfo->u32ProcCtls, u32Val),
    16472080                        pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_PROC_EXEC,
    16482081                        VERR_VMX_VMCS_FIELD_CACHE_INVALID);
    16492082
    1650     if (pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
     2083    if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
    16512084    {
    16522085        rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val);
    16532086        AssertRCReturn(rc, rc);
    1654         AssertMsgReturnStmt(pVCpu->hm.s.vmx.Ctls.u32ProcCtls2 == u32Val,
    1655                             ("Cache=%#RX32 VMCS=%#RX32\n", pVCpu->hm.s.vmx.Ctls.u32ProcCtls2, u32Val),
     2087        AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls2 == u32Val,
     2088                            ("Cache=%#RX32 VMCS=%#RX32\n", pVmcsInfo->u32ProcCtls2, u32Val),
    16562089                            pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_PROC_EXEC2,
    16572090                            VERR_VMX_VMCS_FIELD_CACHE_INVALID);
     
    16602093    rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val);
    16612094    AssertRCReturn(rc, rc);
    1662     AssertMsgReturnStmt(pVCpu->hm.s.vmx.Ctls.u32XcptBitmap == u32Val,
    1663                         ("Cache=%#RX32 VMCS=%#RX32\n", pVCpu->hm.s.vmx.Ctls.u32XcptBitmap, u32Val),
     2095    AssertMsgReturnStmt(pVmcsInfo->u32XcptBitmap == u32Val,
     2096                        ("Cache=%#RX32 VMCS=%#RX32\n", pVmcsInfo->u32XcptBitmap, u32Val),
    16642097                        pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_XCPT_BITMAP,
    16652098                        VERR_VMX_VMCS_FIELD_CACHE_INVALID);
     
    16682101    rc = VMXReadVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, &u64Val);
    16692102    AssertRCReturn(rc, rc);
    1670     AssertMsgReturnStmt(pVCpu->hm.s.vmx.Ctls.u64TscOffset == u64Val,
    1671                         ("Cache=%#RX64 VMCS=%#RX64\n", pVCpu->hm.s.vmx.Ctls.u64TscOffset, u64Val),
     2103    AssertMsgReturnStmt(pVmcsInfo->u64TscOffset == u64Val,
     2104                        ("Cache=%#RX64 VMCS=%#RX64\n", pVmcsInfo->u64TscOffset, u64Val),
    16722105                        pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_TSC_OFFSET,
    16732106                        VERR_VMX_VMCS_FIELD_CACHE_INVALID);
     
    16792112#ifdef VBOX_STRICT
    16802113/**
    1681  * Verifies that our cached host EFER value has not changed
    1682  * since we cached it.
     2114 * Verifies that our cached host EFER MSR value has not changed since we cached it.
    16832115 *
    16842116 * @param   pVCpu       The cross context virtual CPU structure.
    1685  */
    1686 static void hmR0VmxCheckHostEferMsr(PVMCPU pVCpu)
     2117 * @param   pVmcsInfo   The VMCS info. object.
     2118 */
     2119static void hmR0VmxCheckHostEferMsr(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo)
    16872120{
    16882121    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    16892122
    1690     if (pVCpu->hm.s.vmx.Ctls.u32ExitCtls & VMX_EXIT_CTLS_LOAD_EFER_MSR)
    1691     {
    1692         uint64_t u64Val;
    1693         int rc = VMXReadVmcs64(VMX_VMCS64_HOST_EFER_FULL, &u64Val);
     2123    if (pVmcsInfo->u32ExitCtls & VMX_EXIT_CTLS_LOAD_EFER_MSR)
     2124    {
     2125        uint64_t const uHostEferMsr      = ASMRdMsr(MSR_K6_EFER);
     2126        uint64_t const uHostEferMsrCache = pVCpu->CTX_SUFF(pVM)->hm.s.vmx.u64HostMsrEfer;
     2127        uint64_t       uVmcsEferMsrVmcs;
     2128        int rc = VMXReadVmcs64(VMX_VMCS64_HOST_EFER_FULL, &uVmcsEferMsrVmcs);
    16942129        AssertRC(rc);
    16952130
    1696         uint64_t u64HostEferMsr = ASMRdMsr(MSR_K6_EFER);
    1697         AssertMsgReturnVoid(u64HostEferMsr == u64Val, ("u64HostEferMsr=%#RX64 u64Val=%#RX64\n", u64HostEferMsr, u64Val));
     2131        AssertMsgReturnVoid(uHostEferMsr == uVmcsEferMsrVmcs,
     2132                            ("EFER Host/VMCS mismatch! host=%#RX64 vmcs=%#RX64\n", uHostEferMsr, uVmcsEferMsrVmcs));
     2133        AssertMsgReturnVoid(uHostEferMsr == uHostEferMsrCache,
     2134                            ("EFER Host/Cache mismatch! host=%#RX64 cache=%#RX64\n", uHostEferMsr, uHostEferMsrCache));
    16982135    }
    16992136}
     
    17062143 * @param   pVCpu       The cross context virtual CPU structure.
    17072144 */
    1708 static void hmR0VmxCheckAutoLoadStoreMsrs(PVMCPU pVCpu)
     2145static void hmR0VmxCheckAutoLoadStoreMsrs(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo)
    17092146{
    17102147    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     
    17122149    /* Verify MSR counts in the VMCS are what we think it should be.  */
    17132150    uint32_t cMsrs;
    1714     int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, &cMsrs);  AssertRC(rc);
    1715     Assert(cMsrs == pVCpu->hm.s.vmx.cMsrs);
    1716 
    1717     rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, &cMsrs);      AssertRC(rc);
    1718     Assert(cMsrs == pVCpu->hm.s.vmx.cMsrs);
    1719 
    1720     rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, &cMsrs);       AssertRC(rc);
    1721     Assert(cMsrs == pVCpu->hm.s.vmx.cMsrs);
    1722 
    1723     PCVMXAUTOMSR pHostMsr  = (PCVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
    1724     PCVMXAUTOMSR pGuestMsr = (PCVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
    1725     for (uint32_t i = 0; i < cMsrs; i++, pHostMsr++, pGuestMsr++)
     2151    int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, &cMsrs);
     2152    AssertRC(rc);
     2153    Assert(cMsrs == pVmcsInfo->cEntryMsrLoad);
     2154
     2155    rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, &cMsrs);
     2156    AssertRC(rc);
     2157    Assert(cMsrs == pVmcsInfo->cExitMsrStore);
     2158
     2159    rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, &cMsrs);
     2160    AssertRC(rc);
     2161    Assert(cMsrs == pVmcsInfo->cExitMsrLoad);
     2162
     2163    /* Verify the MSR counts do not exceed the maximum count supported by the hardware. */
     2164    Assert(cMsrs < VMX_MISC_MAX_MSRS(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.u64Misc));
     2165
     2166    PCVMXAUTOMSR pGuestMsrLoad  = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrLoad;
     2167    PCVMXAUTOMSR pGuestMsrStore = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
     2168    PCVMXAUTOMSR pHostMsrLoad   = (PCVMXAUTOMSR)pVmcsInfo->pvHostMsrLoad;
     2169    bool const   fSeparateExitMsrStorePage = hmR0VmxIsSeparateExitMsrStoreAreaVmcs(pVmcsInfo);
     2170    for (uint32_t i = 0; i < cMsrs; i++)
    17262171    {
    17272172        /* Verify that the MSRs are paired properly and that the host MSR has the correct value. */
    1728         AssertMsgReturnVoid(pHostMsr->u32Msr == pGuestMsr->u32Msr, ("HostMsr=%#RX32 GuestMsr=%#RX32 cMsrs=%u\n", pHostMsr->u32Msr,
    1729                                                                     pGuestMsr->u32Msr, cMsrs));
    1730 
    1731         uint64_t u64Msr = ASMRdMsr(pHostMsr->u32Msr);
    1732         AssertMsgReturnVoid(pHostMsr->u64Value == u64Msr, ("u32Msr=%#RX32 VMCS Value=%#RX64 ASMRdMsr=%#RX64 cMsrs=%u\n",
    1733                                                            pHostMsr->u32Msr, pHostMsr->u64Value, u64Msr, cMsrs));
    1734 
    1735         /* Verify that the permissions are as expected in the MSR bitmap. */
    1736         if (pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
    1737         {
    1738             VMXMSREXITREAD  enmRead;
    1739             VMXMSREXITWRITE enmWrite;
    1740             rc = HMGetVmxMsrPermission(pVCpu->hm.s.vmx.pvMsrBitmap, pGuestMsr->u32Msr, &enmRead, &enmWrite);
    1741             AssertMsgReturnVoid(rc == VINF_SUCCESS, ("HMGetVmxMsrPermission! failed. rc=%Rrc\n", rc));
    1742             if (pGuestMsr->u32Msr == MSR_K6_EFER)
     2173        if (fSeparateExitMsrStorePage)
     2174        {
     2175            AssertMsgReturnVoid(pGuestMsrLoad->u32Msr == pGuestMsrStore->u32Msr,
     2176                                ("GuestMsrLoad=%#RX32 GuestMsrStore=%#RX32 cMsrs=%u\n",
     2177                                 pGuestMsrLoad->u32Msr, pGuestMsrStore->u32Msr, cMsrs));
     2178        }
     2179
     2180        AssertMsgReturnVoid(pHostMsrLoad->u32Msr == pGuestMsrLoad->u32Msr,
     2181                            ("HostMsrLoad=%#RX32 GuestMsrLoad=%#RX32 cMsrs=%u\n",
     2182                             pHostMsrLoad->u32Msr, pGuestMsrLoad->u32Msr, cMsrs));
     2183
     2184        uint64_t const u64Msr = ASMRdMsr(pHostMsrLoad->u32Msr);
     2185        AssertMsgReturnVoid(pHostMsrLoad->u64Value == u64Msr,
     2186                            ("u32Msr=%#RX32 VMCS Value=%#RX64 ASMRdMsr=%#RX64 cMsrs=%u\n",
     2187                             pHostMsrLoad->u32Msr, pHostMsrLoad->u64Value, u64Msr, cMsrs));
     2188
     2189        /* Verify that the accesses are as expected in the MSR bitmap for auto-load/store MSRs. */
     2190        if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
     2191        {
     2192            uint32_t fMsrpm = HMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, pGuestMsrLoad->u32Msr);
     2193            if (pGuestMsrLoad->u32Msr == MSR_K6_EFER)
    17432194            {
    1744                 AssertMsgReturnVoid(enmRead  == VMXMSREXIT_INTERCEPT_READ,  ("Passthru read for EFER!?\n"));
    1745                 AssertMsgReturnVoid(enmWrite == VMXMSREXIT_INTERCEPT_WRITE, ("Passthru write for EFER!?\n"));
     2195                AssertMsgReturnVoid((fMsrpm & VMXMSRPM_EXIT_RD), ("Passthru read for EFER MSR!?\n"));
     2196                AssertMsgReturnVoid((fMsrpm & VMXMSRPM_EXIT_WR), ("Passthru write for EFER MSR!?\n"));
    17462197            }
    17472198            else
    17482199            {
    1749                 AssertMsgReturnVoid(enmRead  == VMXMSREXIT_PASSTHRU_READ,  ("u32Msr=%#RX32 cMsrs=%u No passthru read!\n",
    1750                                                                             pGuestMsr->u32Msr, cMsrs));
    1751                 AssertMsgReturnVoid(enmWrite == VMXMSREXIT_PASSTHRU_WRITE, ("u32Msr=%#RX32 cMsrs=%u No passthru write!\n",
    1752                                                                             pGuestMsr->u32Msr, cMsrs));
     2200                AssertMsgReturnVoid((fMsrpm &  (VMXMSRPM_ALLOW_RD | VMXMSRPM_ALLOW_WR))
     2201                                            == (VMXMSRPM_ALLOW_RD | VMXMSRPM_ALLOW_WR),
     2202                                    ("u32Msr=%#RX32 cMsrs=%u No passthru read/write!\n", pGuestMsrLoad->u32Msr, cMsrs));
    17532203            }
    17542204        }
     2205
     2206        /* Move to the next MSR. */
     2207        pHostMsrLoad++;
     2208        pGuestMsrLoad++;
     2209        pGuestMsrStore++;
    17552210    }
    17562211}
     
    17642219 * @param   pVCpu           The cross context virtual CPU structure of the calling
    17652220 *                          EMT.  Can be NULL depending on @a enmTlbFlush.
     2221 * @param   pVmcsInfo       The VMCS info. object. Can be NULL depending on @a
     2222 *                          enmTlbFlush.
    17662223 * @param   enmTlbFlush     Type of flush.
    17672224 *
     
    17712228 * @remarks Can be called with interrupts disabled.
    17722229 */
    1773 static void hmR0VmxFlushEpt(PVMCPU pVCpu, VMXTLBFLUSHEPT enmTlbFlush)
     2230static void hmR0VmxFlushEpt(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo, VMXTLBFLUSHEPT enmTlbFlush)
    17742231{
    17752232    uint64_t au64Descriptor[2];
     
    17792236    {
    17802237        Assert(pVCpu);
    1781         au64Descriptor[0] = pVCpu->hm.s.vmx.HCPhysEPTP;
     2238        Assert(pVmcsInfo);
     2239        au64Descriptor[0] = pVmcsInfo->HCPhysEPTP;
    17822240    }
    17832241    au64Descriptor[1] = 0;                       /* MBZ. Intel spec. 33.3 "VMX Instructions" */
    17842242
    17852243    int rc = VMXR0InvEPT(enmTlbFlush, &au64Descriptor[0]);
    1786     AssertMsg(rc == VINF_SUCCESS,
    1787               ("VMXR0InvEPT %#x %RGv failed with %Rrc\n", enmTlbFlush, pVCpu ? pVCpu->hm.s.vmx.HCPhysEPTP : 0, rc));
     2244    AssertMsg(rc == VINF_SUCCESS, ("VMXR0InvEPT %#x %#RHp failed. rc=%Rrc\n", enmTlbFlush, au64Descriptor[0], rc));
    17882245
    17892246    if (   RT_SUCCESS(rc)
     
    18362293
    18372294/**
    1838  * Invalidates a guest page by guest virtual address. Only relevant for
    1839  * EPT/VPID, otherwise there is nothing really to invalidate.
     2295 * Invalidates a guest page by guest virtual address. Only relevant for EPT/VPID,
     2296 * otherwise there is nothing really to invalidate.
    18402297 *
    18412298 * @returns VBox status code.
     
    18482305    LogFlowFunc(("pVCpu=%p GCVirt=%RGv\n", pVCpu, GCVirt));
    18492306
    1850     bool fFlushPending = VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
    1851     if (!fFlushPending)
     2307    if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH))
    18522308    {
    18532309        /*
     
    19062362
    19072363    Assert(pHostCpu->idCpu != NIL_RTCPUID);
    1908     pVCpu->hm.s.idLastCpu           = pHostCpu->idCpu;
    1909     pVCpu->hm.s.cTlbFlushes         = pHostCpu->cTlbFlushes;
    1910     pVCpu->hm.s.fForceTLBFlush      = false;
     2364    pVCpu->hm.s.idLastCpu      = pHostCpu->idCpu;
     2365    pVCpu->hm.s.cTlbFlushes    = pHostCpu->cTlbFlushes;
     2366    pVCpu->hm.s.fForceTLBFlush = false;
    19112367    return;
    19122368}
     
    19182374 * @param   pHostCpu    The HM physical-CPU structure.
    19192375 * @param   pVCpu       The cross context virtual CPU structure.
     2376 * @param   pVmcsInfo   The VMCS info. object.
    19202377 *
    19212378 * @remarks  All references to "ASID" in this function pertains to "VPID" in Intel's
     
    19252382 * @remarks  Called with interrupts disabled.
    19262383 */
    1927 static void hmR0VmxFlushTaggedTlbBoth(PHMPHYSCPU pHostCpu, PVMCPU pVCpu)
     2384static void hmR0VmxFlushTaggedTlbBoth(PHMPHYSCPU pHostCpu, PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo)
    19282385{
    19292386#ifdef VBOX_WITH_STATISTICS
     
    19732430         * invalidated. We don't need to flush-by-VPID here as flushing by EPT covers it. See @bugref{6568}.
    19742431         */
    1975         hmR0VmxFlushEpt(pVCpu, pVM->hm.s.vmx.enmTlbFlushEpt);
     2432        hmR0VmxFlushEpt(pVCpu, pVmcsInfo, pVM->hm.s.vmx.enmTlbFlushEpt);
    19762433        STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
    19772434        HMVMX_SET_TAGGED_TLB_FLUSHED();
     
    19892446         * See Intel spec. 28.3.2 "Creating and Using Cached Translation Information".
    19902447         */
    1991         hmR0VmxFlushEpt(pVCpu, pVM->hm.s.vmx.enmTlbFlushEpt);
     2448        hmR0VmxFlushEpt(pVCpu, pVmcsInfo, pVM->hm.s.vmx.enmTlbFlushEpt);
    19922449        STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
    19932450        HMVMX_SET_TAGGED_TLB_FLUSHED();
     
    20202477 * @param   pHostCpu    The HM physical-CPU structure.
    20212478 * @param   pVCpu       The cross context virtual CPU structure.
     2479 * @param   pVmcsInfo   The VMCS info. object.
    20222480 *
    20232481 * @remarks Called with interrupts disabled.
    20242482 */
    2025 static void hmR0VmxFlushTaggedTlbEpt(PHMPHYSCPU pHostCpu, PVMCPU pVCpu)
     2483static void hmR0VmxFlushTaggedTlbEpt(PHMPHYSCPU pHostCpu, PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo)
    20262484{
    20272485    AssertPtr(pVCpu);
     
    20542512    if (pVCpu->hm.s.fForceTLBFlush)
    20552513    {
    2056         hmR0VmxFlushEpt(pVCpu, pVCpu->CTX_SUFF(pVM)->hm.s.vmx.enmTlbFlushEpt);
     2514        hmR0VmxFlushEpt(pVCpu, pVmcsInfo, pVCpu->CTX_SUFF(pVM)->hm.s.vmx.enmTlbFlushEpt);
    20572515        pVCpu->hm.s.fForceTLBFlush = false;
    20582516    }
     
    21522610 * @param   pHostCpu    The HM physical-CPU structure.
    21532611 * @param   pVCpu       The cross context virtual CPU structure.
     2612 * @param   pVmcsInfo   The VMCS info. object.
    21542613 *
    21552614 * @remarks Called with interrupts disabled.
    21562615 */
    2157 DECLINLINE(void) hmR0VmxFlushTaggedTlb(PHMPHYSCPU pHostCpu, PVMCPU pVCpu)
     2616DECLINLINE(void) hmR0VmxFlushTaggedTlb(PHMPHYSCPU pHostCpu, PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo)
    21582617{
    21592618#ifdef HMVMX_ALWAYS_FLUSH_TLB
     
    21632622    switch (pVM->hm.s.vmx.enmTlbFlushType)
    21642623    {
    2165         case VMXTLBFLUSHTYPE_EPT_VPID: hmR0VmxFlushTaggedTlbBoth(pHostCpu, pVCpu); break;
    2166         case VMXTLBFLUSHTYPE_EPT:      hmR0VmxFlushTaggedTlbEpt(pHostCpu, pVCpu);  break;
    2167         case VMXTLBFLUSHTYPE_VPID:     hmR0VmxFlushTaggedTlbVpid(pHostCpu, pVCpu); break;
    2168         case VMXTLBFLUSHTYPE_NONE:     hmR0VmxFlushTaggedTlbNone(pHostCpu, pVCpu); break;
     2624        case VMXTLBFLUSHTYPE_EPT_VPID: hmR0VmxFlushTaggedTlbBoth(pHostCpu, pVCpu, pVmcsInfo); break;
     2625        case VMXTLBFLUSHTYPE_EPT:      hmR0VmxFlushTaggedTlbEpt(pHostCpu, pVCpu, pVmcsInfo);  break;
     2626        case VMXTLBFLUSHTYPE_VPID:     hmR0VmxFlushTaggedTlbVpid(pHostCpu, pVCpu);            break;
     2627        case VMXTLBFLUSHTYPE_NONE:     hmR0VmxFlushTaggedTlbNone(pHostCpu, pVCpu);            break;
    21692628        default:
    21702629            AssertMsgFailed(("Invalid flush-tag function identifier\n"));
     
    21852644{
    21862645    /*
    2187      * Determine optimal flush type for Nested Paging.
    2188      * We cannot ignore EPT if no suitable flush-types is supported by the CPU as we've already setup unrestricted
    2189      * guest execution (see hmR3InitFinalizeR0()).
     2646     * Determine optimal flush type for nested paging.
     2647     * We cannot ignore EPT if no suitable flush-types is supported by the CPU as we've already setup
     2648     * unrestricted guest execution (see hmR3InitFinalizeR0()).
    21902649     */
    21912650    if (pVM->hm.s.fNestedPaging)
     
    22772736
    22782737/**
    2279  * Sets up pin-based VM-execution controls in the VMCS.
     2738 * Sets up the virtual-APIC page address for the VMCS.
     2739 *
     2740 * @returns VBox status code.
     2741 * @param   pVmcsInfo   The VMCS info. object.
     2742 */
     2743DECLINLINE(int) hmR0VmxSetupVmcsVirtApicAddr(PCVMXVMCSINFO pVmcsInfo)
     2744{
     2745    RTHCPHYS const HCPhysVirtApic = pVmcsInfo->HCPhysVirtApic;
     2746    Assert(HCPhysVirtApic != NIL_RTHCPHYS);
     2747    Assert(!(HCPhysVirtApic & 0xfff));                       /* Bits 11:0 MBZ. */
     2748    return VMXWriteVmcs64(VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL, HCPhysVirtApic);
     2749}
     2750
     2751
     2752/**
     2753 * Sets up the MSR-bitmap address for the VMCS.
     2754 *
     2755 * @returns VBox status code.
     2756 * @param   pVmcsInfo   The VMCS info. object.
     2757 */
     2758DECLINLINE(int) hmR0VmxSetupVmcsMsrBitmapAddr(PCVMXVMCSINFO pVmcsInfo)
     2759{
     2760    RTHCPHYS const HCPhysMsrBitmap = pVmcsInfo->HCPhysMsrBitmap;
     2761    Assert(HCPhysMsrBitmap != NIL_RTHCPHYS);
     2762    Assert(!(HCPhysMsrBitmap & 0xfff));                      /* Bits 11:0 MBZ. */
     2763    return VMXWriteVmcs64(VMX_VMCS64_CTRL_MSR_BITMAP_FULL, HCPhysMsrBitmap);
     2764}
     2765
     2766
     2767/**
     2768 * Sets up the APIC-access page address for the VMCS.
    22802769 *
    22812770 * @returns VBox status code.
    22822771 * @param   pVCpu       The cross context virtual CPU structure.
    2283  *
    2284  * @remarks We don't really care about optimizing vmwrites here as it's done only
    2285  *          once per VM and hence we don't care about VMCS-field cache comparisons.
    2286  */
    2287 static int hmR0VmxSetupPinCtls(PVMCPU pVCpu)
     2772 */
     2773DECLINLINE(int) hmR0VmxSetupVmcsApicAccessAddr(PVMCPU pVCpu)
     2774{
     2775    RTHCPHYS const HCPhysApicAccess = pVCpu->CTX_SUFF(pVM)->hm.s.vmx.HCPhysApicAccess;
     2776    Assert(HCPhysApicAccess != NIL_RTHCPHYS);
     2777    Assert(!(HCPhysApicAccess & 0xfff));                     /* Bits 11:0 MBZ. */
     2778    return VMXWriteVmcs64(VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL, HCPhysApicAccess);
     2779}
     2780
     2781
     2782/**
     2783 * Sets up the VMCS link pointer for the VMCS.
     2784 *
     2785 * @returns VBox status code.
     2786 * @param   pVmcsInfo       The VMCS info. object.
     2787 */
     2788DECLINLINE(int) hmR0VmxSetupVmcsLinkPtr(PVMXVMCSINFO pVmcsInfo)
     2789{
     2790    uint64_t const u64VmcsLinkPtr = pVmcsInfo->u64VmcsLinkPtr;
     2791    Assert(u64VmcsLinkPtr == UINT64_C(0xffffffffffffffff));  /* Bits 63:0 MB1. */
     2792    return VMXWriteVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, u64VmcsLinkPtr);
     2793}
     2794
     2795
     2796/**
     2797 * Sets up the VM-entry MSR load, VM-exit MSR-store and VM-exit MSR-load addresses
     2798 * in the VMCS.
     2799 *
     2800 * @returns VBox status code.
     2801 * @param   pVmcsInfo       The VMCS info. object.
     2802 */
     2803DECLINLINE(int) hmR0VmxSetupVmcsAutoLoadStoreMsrAddrs(PVMXVMCSINFO pVmcsInfo)
     2804{
     2805    RTHCPHYS const HCPhysGuestMsrLoad = pVmcsInfo->HCPhysGuestMsrLoad;
     2806    Assert(HCPhysGuestMsrLoad != NIL_RTHCPHYS);
     2807    Assert(!(HCPhysGuestMsrLoad & 0xf));                     /* Bits 3:0 MBZ. */
     2808
     2809    RTHCPHYS const HCPhysGuestMsrStore = pVmcsInfo->HCPhysGuestMsrStore;
     2810    Assert(HCPhysGuestMsrStore != NIL_RTHCPHYS);
     2811    Assert(!(HCPhysGuestMsrStore & 0xf));                    /* Bits 3:0 MBZ. */
     2812
     2813    RTHCPHYS const HCPhysHostMsrLoad = pVmcsInfo->HCPhysHostMsrLoad;
     2814    Assert(HCPhysHostMsrLoad != NIL_RTHCPHYS);
     2815    Assert(!(HCPhysHostMsrLoad & 0xf));                      /* Bits 3:0 MBZ. */
     2816
     2817    int rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL, HCPhysGuestMsrLoad);
     2818    rc    |= VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL, HCPhysGuestMsrStore);
     2819    rc    |= VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL, HCPhysHostMsrLoad);
     2820    AssertRCReturn(rc, rc);
     2821    return VINF_SUCCESS;
     2822}
     2823
     2824
     2825/**
     2826 * Sets up MSR permissions in the MSR bitmap of a VMCS info. object.
     2827 *
     2828 * @param   pVCpu           The cross context virtual CPU structure.
     2829 * @param   pVmcsInfo       The VMCS info. object.
     2830 * @param   fIsNstGstVmcs   Whether this is a nested-guest VMCS.
     2831 */
     2832static void hmR0VmxSetupVmcsMsrPermissions(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs)
     2833{
     2834    Assert(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS);
     2835
     2836    /*
     2837     * The guest can access the following MSRs (read, write) without causing
     2838     * VM-exits; they are loaded/stored automatically using fields in the VMCS.
     2839     */
     2840    PVM pVM = pVCpu->CTX_SUFF(pVM);
     2841    hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, fIsNstGstVmcs, MSR_IA32_SYSENTER_CS,  VMXMSRPM_ALLOW_RD | VMXMSRPM_ALLOW_WR);
     2842    hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, fIsNstGstVmcs, MSR_IA32_SYSENTER_ESP, VMXMSRPM_ALLOW_RD | VMXMSRPM_ALLOW_WR);
     2843    hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, fIsNstGstVmcs, MSR_IA32_SYSENTER_EIP, VMXMSRPM_ALLOW_RD | VMXMSRPM_ALLOW_WR);
     2844    hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, fIsNstGstVmcs, MSR_K8_GS_BASE,        VMXMSRPM_ALLOW_RD | VMXMSRPM_ALLOW_WR);
     2845    hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, fIsNstGstVmcs, MSR_K8_FS_BASE,        VMXMSRPM_ALLOW_RD | VMXMSRPM_ALLOW_WR);
     2846
     2847#ifdef VBOX_STRICT
     2848    /** @todo NSTVMX: Remove this later. */
     2849    uint32_t fMsrpm = HMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, MSR_IA32_SYSENTER_CS);
     2850    Assert((fMsrpm & (VMXMSRPM_ALLOW_RD | VMXMSRPM_ALLOW_WR)) == (VMXMSRPM_ALLOW_RD | VMXMSRPM_ALLOW_WR));
     2851
     2852    fMsrpm = HMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, MSR_K8_GS_BASE);
     2853    Assert((fMsrpm & (VMXMSRPM_ALLOW_RD | VMXMSRPM_ALLOW_WR)) == (VMXMSRPM_ALLOW_RD | VMXMSRPM_ALLOW_WR));
     2854#endif
     2855
     2856    /*
     2857     * The IA32_PRED_CMD and IA32_FLUSH_CMD MSRs are write-only and has no state
     2858     * associated with then. We never need to intercept access (writes need to be
     2859     * executed without causing a VM-exit, reads will #GP fault anyway).
     2860     *
     2861     * The IA32_SPEC_CTRL MSR is read/write and has state. We allow the guest to
     2862     * read/write them. We swap the the guest/host MSR value using the
     2863     * auto-load/store MSR area.
     2864     */
     2865    if (pVM->cpum.ro.GuestFeatures.fIbpb)
     2866        hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, fIsNstGstVmcs, MSR_IA32_PRED_CMD,  VMXMSRPM_ALLOW_RD | VMXMSRPM_ALLOW_WR);
     2867    if (pVM->cpum.ro.GuestFeatures.fFlushCmd)
     2868        hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, fIsNstGstVmcs, MSR_IA32_FLUSH_CMD, VMXMSRPM_ALLOW_RD | VMXMSRPM_ALLOW_WR);
     2869    if (pVM->cpum.ro.GuestFeatures.fIbrs)
     2870        hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, fIsNstGstVmcs, MSR_IA32_SPEC_CTRL, VMXMSRPM_ALLOW_RD | VMXMSRPM_ALLOW_WR);
     2871
     2872    /*
     2873     * IA32_EFER MSR is always intercepted, see @bugref{9180#c37}.
     2874     */
     2875
     2876#if HC_ARCH_BITS == 64
     2877    /*
     2878     * Allow full read/write access for the following MSRs (mandatory for VT-x)
     2879     * required for 64-bit guests.
     2880     */
     2881    if (pVM->hm.s.fAllow64BitGuests)
     2882    {
     2883        hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, fIsNstGstVmcs, MSR_K8_LSTAR,          VMXMSRPM_ALLOW_RD | VMXMSRPM_ALLOW_WR);
     2884        hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, fIsNstGstVmcs, MSR_K6_STAR,           VMXMSRPM_ALLOW_RD | VMXMSRPM_ALLOW_WR);
     2885        hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, fIsNstGstVmcs, MSR_K8_SF_MASK,        VMXMSRPM_ALLOW_RD | VMXMSRPM_ALLOW_WR);
     2886        hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, fIsNstGstVmcs, MSR_K8_KERNEL_GS_BASE, VMXMSRPM_ALLOW_RD | VMXMSRPM_ALLOW_WR);
     2887
     2888# ifdef VBOX_STRICT
     2889        fMsrpm = HMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, MSR_K8_GS_BASE);
     2890        Assert((fMsrpm & (VMXMSRPM_ALLOW_RD | VMXMSRPM_ALLOW_WR)) == (VMXMSRPM_ALLOW_RD | VMXMSRPM_ALLOW_WR));
     2891# endif
     2892    }
     2893#endif
     2894}
     2895
     2896
     2897/**
     2898 * Sets up pin-based VM-execution controls in the VMCS.
     2899 *
     2900 * @returns VBox status code.
     2901 * @param   pVCpu           The cross context virtual CPU structure.
     2902 * @param   pVmcsInfo       The VMCS info. object.
     2903 * @param   fIsNstGstVmcs   Whether this is a nested-guest VMCS.
     2904 */
     2905static int hmR0VmxSetupVmcsPinCtls(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo)
    22882906{
    22892907    PVM pVM = pVCpu->CTX_SUFF(pVM);
     
    22972915        fVal |= VMX_PIN_CTLS_VIRT_NMI;                       /* Use virtual NMIs and virtual-NMI blocking features. */
    22982916
    2299     /* Enable the VMX preemption timer. */
     2917    /* Enable the VMX-preemption timer. */
    23002918    if (pVM->hm.s.vmx.fUsePreemptTimer)
    23012919    {
     
    23252943    int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, fVal);
    23262944    AssertRCReturn(rc, rc);
    2327     pVCpu->hm.s.vmx.Ctls.u32PinCtls = fVal;
     2945    pVmcsInfo->u32PinCtls = fVal;
    23282946
    23292947    return VINF_SUCCESS;
     
    23352953 *
    23362954 * @returns VBox status code.
    2337  * @param   pVCpu       The cross context virtual CPU structure.
    2338  *
    2339  * @remarks We don't really care about optimizing vmwrites here as it's done only
    2340  *          once per VM and hence we don't care about VMCS-field cache comparisons.
    2341  */
    2342 static int hmR0VmxSetupProcCtls2(PVMCPU pVCpu)
     2955 * @param   pVCpu           The cross context virtual CPU structure.
     2956 * @param   pVmcsInfo       The VMCS info. object.
     2957 * @param   fIsNstGstVmcs   Whether this is a nested-guest VMCS.
     2958 */
     2959static int hmR0VmxSetupVmcsProcCtls2(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo)
    23432960{
    23442961    PVM pVM = pVCpu->CTX_SUFF(pVM);
     
    23542971        fVal |= VMX_PROC_CTLS2_EPT;
    23552972
    2356     /*
    2357      * Enable the INVPCID instruction if supported by the hardware and we expose
    2358      * it to the guest. Without this, guest executing INVPCID would cause a #UD.
    2359      */
     2973    /* Enable the INVPCID instruction if supported by the hardware and we expose
     2974      it to the guest. Without this, guest executing INVPCID would cause a #UD. */
    23602975    if (   (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_INVPCID)
    23612976        && pVM->cpum.ro.GuestFeatures.fInvpcid)
     
    23662981        fVal |= VMX_PROC_CTLS2_VPID;
    23672982
    2368     /* Enable Unrestricted guest execution. */
     2983    /* Enable unrestricted guest execution. */
    23692984    if (pVM->hm.s.vmx.fUnrestrictedGuest)
    23702985        fVal |= VMX_PROC_CTLS2_UNRESTRICTED_GUEST;
     
    23883003    if (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
    23893004    {
    2390         Assert(pVM->hm.s.vmx.HCPhysApicAccess);
    2391         Assert(!(pVM->hm.s.vmx.HCPhysApicAccess & 0xfff));    /* Bits 11:0 MBZ. */
    2392         fVal |= VMX_PROC_CTLS2_VIRT_APIC_ACCESS;              /* Virtualize APIC accesses. */
    2393         int rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL, pVM->hm.s.vmx.HCPhysApicAccess);
     3005        fVal |= VMX_PROC_CTLS2_VIRT_APIC_ACCESS;
     3006        int rc = hmR0VmxSetupVmcsApicAccessAddr(pVCpu);
    23943007        AssertRCReturn(rc, rc);
    23953008    }
    23963009
    2397     /* Enable RDTSCP. */
    2398     if (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_RDTSCP)
     3010    /* Enable the RDTSCP instruction if supported by the hardware and we expose
     3011       it to the guest. Without this, guest executing RDTSCP would cause a #UD. */
     3012    if (   (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_RDTSCP)
     3013        && pVM->cpum.ro.GuestFeatures.fRdTscP)
    23993014        fVal |= VMX_PROC_CTLS2_RDTSCP;
    24003015
     
    24223037    int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, fVal);
    24233038    AssertRCReturn(rc, rc);
    2424     pVCpu->hm.s.vmx.Ctls.u32ProcCtls2 = fVal;
     3039    pVmcsInfo->u32ProcCtls2 = fVal;
    24253040
    24263041    return VINF_SUCCESS;
     
    24323047 *
    24333048 * @returns VBox status code.
    2434  * @param   pVCpu       The cross context virtual CPU structure.
    2435  *
    2436  * @remarks We don't really care about optimizing vmwrites here as it's done only
    2437  *          once per VM and hence we don't care about VMCS-field cache comparisons.
    2438  */
    2439 static int hmR0VmxSetupProcCtls(PVMCPU pVCpu)
     3049 * @param   pVCpu           The cross context virtual CPU structure.
     3050 * @param   pVmcsInfo       The VMCS info. object.
     3051 * @param   fIsNstGstVmcs   Whether this is a nested-guest VMCS.
     3052 */
     3053static int hmR0VmxSetupVmcsProcCtls(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo)
    24403054{
    24413055    PVM pVM = pVCpu->CTX_SUFF(pVM);
     3056
    24423057    uint32_t       fVal = pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed0;     /* Bits set here must be set in the VMCS. */
    24433058    uint32_t const fZap = pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1;     /* Bits cleared here must be cleared in the VMCS. */
     
    24553070        ||  (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed0 & VMX_PROC_CTLS_MOV_DR_EXIT))
    24563071    {
    2457         LogRelFunc(("Unsupported VMX_PROC_CTLS_MOV_DR_EXIT combo!"));
    24583072        pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_MOV_DRX_EXIT;
    24593073        return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
    24603074    }
    24613075
    2462     /* Without Nested Paging, INVLPG (also affects INVPCID) and MOV CR3 instructions should cause VM-exits. */
     3076    /* Without nested paging, INVLPG (also affects INVPCID) and MOV CR3 instructions should cause VM-exits. */
    24633077    if (!pVM->hm.s.fNestedPaging)
    24643078    {
    2465         Assert(!pVM->hm.s.vmx.fUnrestrictedGuest);                /* Paranoia. */
     3079        Assert(!pVM->hm.s.vmx.fUnrestrictedGuest);
    24663080        fVal |= VMX_PROC_CTLS_INVLPG_EXIT
    24673081             |  VMX_PROC_CTLS_CR3_LOAD_EXIT
     
    24733087        && pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_TPR_SHADOW)
    24743088    {
    2475         Assert(pVCpu->hm.s.vmx.HCPhysVirtApic);
    2476         Assert(!(pVCpu->hm.s.vmx.HCPhysVirtApic & 0xfff));        /* Bits 11:0 MBZ. */
    2477         int rc  = VMXWriteVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, 0);
    2478         rc     |= VMXWriteVmcs64(VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL, pVCpu->hm.s.vmx.HCPhysVirtApic);
    2479         AssertRCReturn(rc, rc);
    2480 
    2481         fVal |= VMX_PROC_CTLS_USE_TPR_SHADOW;                     /* CR8 reads from the Virtual-APIC page. */
    2482                                                                   /* CR8 writes cause a VM-exit based on TPR threshold. */
     3089        fVal |= VMX_PROC_CTLS_USE_TPR_SHADOW;                /* CR8 reads from the Virtual-APIC page. */
     3090                                                             /* CR8 writes cause a VM-exit based on TPR threshold. */
    24833091        Assert(!(fVal & VMX_PROC_CTLS_CR8_STORE_EXIT));
    24843092        Assert(!(fVal & VMX_PROC_CTLS_CR8_LOAD_EXIT));
     3093        int rc = hmR0VmxSetupVmcsVirtApicAddr(pVmcsInfo);
     3094        AssertRCReturn(rc, rc);
    24853095    }
    24863096    else
    24873097    {
    2488         /*
    2489          * Some 32-bit CPUs do not support CR8 load/store exiting as MOV CR8 is invalid on 32-bit Intel CPUs.
    2490          * Set this control only for 64-bit guests.
    2491          */
     3098        /* Some 32-bit CPUs do not support CR8 load/store exiting as MOV CR8 is
     3099           invalid on 32-bit Intel CPUs. Set this control only for 64-bit guests. */
    24923100        if (pVM->hm.s.fAllow64BitGuests)
    24933101        {
    2494             fVal |= VMX_PROC_CTLS_CR8_STORE_EXIT                  /* CR8 reads cause a VM-exit. */
    2495                  |  VMX_PROC_CTLS_CR8_LOAD_EXIT;                  /* CR8 writes cause a VM-exit. */
     3102            fVal |= VMX_PROC_CTLS_CR8_STORE_EXIT             /* CR8 reads cause a VM-exit. */
     3103                 |  VMX_PROC_CTLS_CR8_LOAD_EXIT;             /* CR8 writes cause a VM-exit. */
    24963104        }
    24973105    }
     
    25013109    {
    25023110        fVal |= VMX_PROC_CTLS_USE_MSR_BITMAPS;
    2503 
    2504         Assert(pVCpu->hm.s.vmx.HCPhysMsrBitmap);
    2505         Assert(!(pVCpu->hm.s.vmx.HCPhysMsrBitmap & 0xfff));       /* Bits 11:0 MBZ. */
    2506         int rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_MSR_BITMAP_FULL, pVCpu->hm.s.vmx.HCPhysMsrBitmap);
     3111        int rc = hmR0VmxSetupVmcsMsrBitmapAddr(pVmcsInfo);
    25073112        AssertRCReturn(rc, rc);
    2508 
    2509         /*
    2510          * The guest can access the following MSRs (read, write) without causing VM-exits; they are loaded/stored
    2511          * automatically using dedicated fields in the VMCS.
    2512          */
    2513         hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_CS,  VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
    2514         hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_ESP, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
    2515         hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_EIP, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
    2516         hmR0VmxSetMsrPermission(pVCpu, MSR_K8_GS_BASE,        VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
    2517         hmR0VmxSetMsrPermission(pVCpu, MSR_K8_FS_BASE,        VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
    2518 #if HC_ARCH_BITS == 64
    2519         /*
    2520          * Set passthru permissions for the following MSRs (mandatory for VT-x) required for 64-bit guests.
    2521          */
    2522         if (pVM->hm.s.fAllow64BitGuests)
    2523         {
    2524             hmR0VmxSetMsrPermission(pVCpu, MSR_K8_LSTAR,          VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
    2525             hmR0VmxSetMsrPermission(pVCpu, MSR_K6_STAR,           VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
    2526             hmR0VmxSetMsrPermission(pVCpu, MSR_K8_SF_MASK,        VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
    2527             hmR0VmxSetMsrPermission(pVCpu, MSR_K8_KERNEL_GS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
    2528         }
    2529 #endif
    2530         /*
    2531          * The IA32_PRED_CMD and IA32_FLUSH_CMD MSRs are write-only and has no state
    2532          * associated with then. We never need to intercept access (writes need to
    2533          * be executed without exiting, reads will #GP-fault anyway).
    2534          */
    2535         if (pVM->cpum.ro.GuestFeatures.fIbpb)
    2536             hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_PRED_CMD,     VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
    2537         if (pVM->cpum.ro.GuestFeatures.fFlushCmd)
    2538             hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_FLUSH_CMD,    VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
    2539 
    2540         /* Though MSR_IA32_PERF_GLOBAL_CTRL is saved/restored lazily, we want intercept reads/write to it for now. */
    25413113    }
    25423114
     
    25563128    int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, fVal);
    25573129    AssertRCReturn(rc, rc);
    2558     pVCpu->hm.s.vmx.Ctls.u32ProcCtls = fVal;
     3130    pVmcsInfo->u32ProcCtls = fVal;
     3131
     3132    /* Set up MSR permissions that don't change through the lifetime of the VM. */
     3133    hmR0VmxSetupVmcsMsrPermissions(pVCpu, pVmcsInfo, false /* fIsNstGstVmcs */);
    25593134
    25603135    /* Set up secondary processor-based VM-execution controls if the CPU supports it. */
    2561     if (pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
    2562         return hmR0VmxSetupProcCtls2(pVCpu);
     3136    if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
     3137        return hmR0VmxSetupVmcsProcCtls2(pVCpu, pVmcsInfo);
    25633138
    25643139    /* Sanity check, should not really happen. */
    2565     if (RT_UNLIKELY(pVM->hm.s.vmx.fUnrestrictedGuest))
    2566     {
    2567         LogRelFunc(("Unrestricted Guest enabled when secondary processor-based VM-execution controls not available\n"));
     3140    if (RT_LIKELY(!pVM->hm.s.vmx.fUnrestrictedGuest))
     3141    { /* likely */ }
     3142    else
     3143    {
    25683144        pVCpu->hm.s.u32HMError = VMX_UFC_INVALID_UX_COMBO;
    25693145        return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
     
    25763152
    25773153/**
    2578  * Sets up miscellaneous (everything other than Pin & Processor-based
    2579  * VM-execution) control fields in the VMCS.
     3154 * Sets up miscellaneous (everything other than Pin, Processor and secondary
     3155 * Processor-based VM-execution) control fields in the VMCS.
    25803156 *
    25813157 * @returns VBox status code.
    2582  * @param   pVCpu       The cross context virtual CPU structure.
    2583  */
    2584 static int hmR0VmxSetupMiscCtls(PVMCPU pVCpu)
    2585 {
    2586     AssertPtr(pVCpu);
    2587 
    2588     int rc = VERR_GENERAL_FAILURE;
    2589 
    2590     /* All fields are zero-initialized during allocation; but don't remove the commented block below. */
    2591 #if 0
    2592     /* All CR3 accesses cause VM-exits. Later we optimize CR3 accesses (see hmR0VmxExportGuestCR3AndCR4())*/
    2593     rc  = VMXWriteVmcs32(VMX_VMCS32_CTRL_CR3_TARGET_COUNT, 0);
    2594     rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, 0);
    2595 
    2596     /*
    2597      * Set MASK & MATCH to 0. VMX checks if GuestPFErrCode & MASK == MATCH. If equal (in our case it always is)
    2598      * and if the X86_XCPT_PF bit in the exception bitmap is set it causes a VM-exit, if clear doesn't cause an exit.
    2599      * We thus use the exception bitmap to control it rather than use both.
    2600      */
    2601     rc  = VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK, 0);
    2602     rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH, 0);
    2603 
    2604     /* All IO & IOIO instructions cause VM-exits. */
    2605     rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_IO_BITMAP_A_FULL, 0);
    2606     rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_IO_BITMAP_B_FULL, 0);
    2607 
    2608     /* Initialize the MSR-bitmap area. */
    2609     rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, 0);
    2610     rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, 0);
    2611     rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT,  0);
    2612     AssertRCReturn(rc, rc);
    2613 #endif
    2614 
    2615     /* Setup MSR auto-load/store area. */
    2616     Assert(pVCpu->hm.s.vmx.HCPhysGuestMsr);
    2617     Assert(!(pVCpu->hm.s.vmx.HCPhysGuestMsr & 0xf));    /* Lower 4 bits MBZ. */
    2618     rc  = VMXWriteVmcs64(VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL, pVCpu->hm.s.vmx.HCPhysGuestMsr);
    2619     rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL, pVCpu->hm.s.vmx.HCPhysGuestMsr);
    2620     AssertRCReturn(rc, rc);
    2621 
    2622     Assert(pVCpu->hm.s.vmx.HCPhysHostMsr);
    2623     Assert(!(pVCpu->hm.s.vmx.HCPhysHostMsr & 0xf));     /* Lower 4 bits MBZ. */
    2624     rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL,  pVCpu->hm.s.vmx.HCPhysHostMsr);
    2625     AssertRCReturn(rc, rc);
    2626 
    2627     /* Set VMCS link pointer. Reserved for future use, must be -1. Intel spec. 24.4 "Guest-State Area". */
    2628     rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, UINT64_C(0xffffffffffffffff));
    2629     AssertRCReturn(rc, rc);
    2630 
    2631     /* All fields are zero-initialized during allocation; but don't remove the commented block below. */
    2632 #if 0
    2633     /* Setup debug controls */
    2634     rc  = VMXWriteVmcs64(VMX_VMCS64_GUEST_DEBUGCTL_FULL, 0);
    2635     rc |= VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, 0);
    2636     AssertRCReturn(rc, rc);
    2637 #endif
    2638 
     3158 * @param   pVCpu           The cross context virtual CPU structure.
     3159 * @param   pVmcsInfo       The VMCS info. object.
     3160 */
     3161static int hmR0VmxSetupVmcsMiscCtls(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo)
     3162{
     3163    /* Set the auto-load/store MSR area addresses in the VMCS. */
     3164    int rc = hmR0VmxSetupVmcsAutoLoadStoreMsrAddrs(pVmcsInfo);
     3165    if (RT_SUCCESS(rc))
     3166    {
     3167        /* Set the VMCS link pointer in the VMCS. */
     3168        rc = hmR0VmxSetupVmcsLinkPtr(pVmcsInfo);
     3169        if (RT_SUCCESS(rc))
     3170        {
     3171            /* Set the CR0/CR4 guest/host mask. */
     3172            uint64_t const u64Cr0Mask = hmR0VmxGetFixedCr0Mask(pVCpu);
     3173            uint64_t const u64Cr4Mask = hmR0VmxGetFixedCr4Mask(pVCpu);
     3174            rc  = VMXWriteVmcsHstN(VMX_VMCS_CTRL_CR0_MASK, u64Cr0Mask);
     3175            rc |= VMXWriteVmcsHstN(VMX_VMCS_CTRL_CR4_MASK, u64Cr4Mask);
     3176            if (RT_SUCCESS(rc))
     3177            {
     3178                pVmcsInfo->u64Cr0Mask = u64Cr0Mask;
     3179                pVmcsInfo->u64Cr4Mask = u64Cr4Mask;
     3180                return VINF_SUCCESS;
     3181            }
     3182            LogRelFunc(("Failed to initialize VMCS CR0/CR4 guest/host mask. rc=%Rrc\n", rc));
     3183        }
     3184        else
     3185            LogRelFunc(("Failed to initialize VMCS link pointer. rc=%Rrc\n", rc));
     3186    }
     3187    else
     3188        LogRelFunc(("Failed to initialize VMCS auto-load/store MSR addresses. rc=%Rrc\n", rc));
    26393189    return rc;
    26403190}
     
    26493199 *
    26503200 * @returns VBox status code.
    2651  * @param   pVCpu       The cross context virtual CPU structure.
    2652  */
    2653 static int hmR0VmxInitXcptBitmap(PVMCPU pVCpu)
    2654 {
    2655     AssertPtr(pVCpu);
    2656 
    2657     uint32_t uXcptBitmap;
    2658 
    2659     /* Must always intercept #AC to prevent the guest from hanging the CPU. */
    2660     uXcptBitmap = RT_BIT_32(X86_XCPT_AC);
    2661 
    2662     /* Because we need to maintain the DR6 state even when intercepting DRx reads
    2663        and writes, and because recursive #DBs can cause the CPU hang, we must always
    2664        intercept #DB. */
    2665     uXcptBitmap |= RT_BIT_32(X86_XCPT_DB);
    2666 
    2667     /* Without Nested Paging, #PF must cause a VM-exit so we can sync our shadow page tables. */
    2668     if (!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging)
    2669         uXcptBitmap |= RT_BIT(X86_XCPT_PF);
     3201 * @param   pVCpu           The cross context virtual CPU structure.
     3202 * @param   pVmcsInfo       The VMCS info. object.
     3203 */
     3204static int hmR0VmxSetupVmcsXcptBitmap(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo)
     3205{
     3206    /*
     3207     * The following exceptions are always intercepted:
     3208     *
     3209     * #AC - To prevent the guest from hanging the CPU.
     3210     * #DB - To maintain the DR6 state even when intercepting DRx reads/writes and
     3211     *       recursive #DBs can cause a CPU hang.
     3212     * #PF - To sync our shadow page tables when nested-paging is not used.
     3213     */
     3214    bool const fNestedPaging = pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging;
     3215    uint32_t const uXcptBitmap = RT_BIT(X86_XCPT_AC)
     3216                               | RT_BIT(X86_XCPT_DB)
     3217                               | (fNestedPaging ? 0 : RT_BIT(X86_XCPT_PF));
    26703218
    26713219    /* Commit it to the VMCS. */
     
    26743222
    26753223    /* Update our cache of the exception bitmap. */
    2676     pVCpu->hm.s.vmx.Ctls.u32XcptBitmap = uXcptBitmap;
     3224    pVmcsInfo->u32XcptBitmap = uXcptBitmap;
    26773225    return VINF_SUCCESS;
     3226}
     3227
     3228
     3229#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     3230/**
     3231 * Sets up the VMCS for executing a nested-guest using hardware-assisted VMX.
     3232 *
     3233 * @returns VBox status code.
     3234 * @param   pVCpu           The cross context virtual CPU structure.
     3235 * @param   pVmcsInfo       The VMCS info. object.
     3236 */
     3237static int hmR0VmxSetupVmcsCtlsNested(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo)
     3238{
     3239    PVM pVM = pVCpu->CTX_SUFF(pVM);
     3240    int rc = hmR0VmxSetupVmcsLinkPtr(pVmcsInfo);
     3241    if (RT_SUCCESS(rc))
     3242    {
     3243        rc = hmR0VmxSetupVmcsAutoLoadStoreMsrAddrs(pVmcsInfo);
     3244        if (RT_SUCCESS(rc))
     3245        {
     3246            if (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_MSR_BITMAPS)
     3247                rc = hmR0VmxSetupVmcsMsrBitmapAddr(pVmcsInfo);
     3248            if (RT_SUCCESS(rc))
     3249            {
     3250                if (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
     3251                    rc = hmR0VmxSetupVmcsApicAccessAddr(pVCpu);
     3252                if (RT_SUCCESS(rc))
     3253                    return VINF_SUCCESS;
     3254
     3255                LogRelFunc(("Failed to set up the APIC-access address in the nested-guest VMCS. rc=%Rrc\n", rc));
     3256            }
     3257            else
     3258                LogRelFunc(("Failed to set up the MSR-bitmap address in the nested-guest VMCS. rc=%Rrc\n", rc));
     3259        }
     3260        else
     3261            LogRelFunc(("Failed to set up the VMCS link pointer in the nested-guest VMCS. rc=%Rrc\n", rc));
     3262    }
     3263    else
     3264        LogRelFunc(("Failed to set up the auto-load/store MSR addresses in the nested-guest VMCS. rc=%Rrc\n", rc));
     3265
     3266    return rc;
     3267}
     3268#endif
     3269
     3270
     3271/**
     3272 * Sets up the VMCS for executing a guest (or nested-guest) using hardware-assisted
     3273 * VMX.
     3274 *
     3275 * @returns VBox status code.
     3276 * @param   pVCpu           The cross context virtual CPU structure.
     3277 * @param   pVmcsInfo       The VMCS info. object.
     3278 * @param   fIsNstGstVmcs   Whether this is a nested-guest VMCS.
     3279 */
     3280static int hmR0VmxSetupVmcs(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs)
     3281{
     3282    Assert(pVmcsInfo);
     3283    Assert(pVmcsInfo->pvVmcs);
     3284    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     3285
     3286    /* Set the CPU specified revision identifier at the beginning of the VMCS structure. */
     3287    PVM pVM = pVCpu->CTX_SUFF(pVM);
     3288    *(uint32_t *)pVmcsInfo->pvVmcs = RT_BF_GET(pVM->hm.s.vmx.Msrs.u64Basic, VMX_BF_BASIC_VMCS_ID);
     3289    const char * const pszVmcs     = fIsNstGstVmcs ? "nested-guest VMCS" : "guest VMCS";
     3290
     3291    LogFlowFunc(("\n"));
     3292
     3293    /*
     3294     * Initialize the VMCS using VMCLEAR before loading the VMCS.
     3295     * See Intel spec. 31.6 "Preparation And Launching A Virtual Machine".
     3296     */
     3297    int rc = hmR0VmxClearVmcs(pVmcsInfo);
     3298    if (RT_SUCCESS(rc))
     3299    {
     3300        rc = hmR0VmxLoadVmcs(pVmcsInfo);
     3301        if (RT_SUCCESS(rc))
     3302        {
     3303            if (!fIsNstGstVmcs)
     3304            {
     3305                rc = hmR0VmxSetupVmcsPinCtls(pVCpu, pVmcsInfo);
     3306                if (RT_SUCCESS(rc))
     3307                {
     3308                    rc = hmR0VmxSetupVmcsProcCtls(pVCpu, pVmcsInfo);
     3309                    if (RT_SUCCESS(rc))
     3310                    {
     3311                        rc = hmR0VmxSetupVmcsMiscCtls(pVCpu, pVmcsInfo);
     3312                        if (RT_SUCCESS(rc))
     3313                        {
     3314                            rc = hmR0VmxSetupVmcsXcptBitmap(pVCpu, pVmcsInfo);
     3315                            if (RT_SUCCESS(rc))
     3316                            { /* likely */ }
     3317                            else
     3318                                LogRelFunc(("Failed to initialize exception bitmap. rc=%Rrc\n", rc));
     3319                        }
     3320                        else
     3321                            LogRelFunc(("Failed to setup miscellaneous controls. rc=%Rrc\n", rc));
     3322                    }
     3323                    else
     3324                        LogRelFunc(("Failed to setup processor-based VM-execution controls. rc=%Rrc\n", rc));
     3325                }
     3326                else
     3327                    LogRelFunc(("Failed to setup pin-based controls. rc=%Rrc\n", rc));
     3328            }
     3329            else
     3330            {
     3331#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     3332                rc = hmR0VmxSetupVmcsCtlsNested(pVCpu, pVmcsInfo);
     3333                if (RT_SUCCESS(rc))
     3334                { /* likely */ }
     3335                else
     3336                    LogRelFunc(("Failed to initialize nested-guest VMCS. rc=%Rrc\n", rc));
     3337#else
     3338                AssertFailed();
     3339#endif
     3340            }
     3341        }
     3342        else
     3343            LogRelFunc(("Failed to load the %s. rc=%Rrc\n", rc, pszVmcs));
     3344    }
     3345    else
     3346        LogRelFunc(("Failed to clear the %s. rc=%Rrc\n", rc, pszVmcs));
     3347
     3348    /* Sync any CPU internal VMCS data back into our VMCS in memory. */
     3349    if (RT_SUCCESS(rc))
     3350    {
     3351        rc = hmR0VmxClearVmcs(pVmcsInfo);
     3352        if (RT_SUCCESS(rc))
     3353        { /* likely */ }
     3354        else
     3355            LogRelFunc(("Failed to clear the %s post setup. rc=%Rrc\n", rc, pszVmcs));
     3356    }
     3357
     3358    /*
     3359     * Update the last-error record both for failures and success, so we
     3360     * can propagate the status code back to ring-3 for diagnostics.
     3361     */
     3362    hmR0VmxUpdateErrorRecord(pVCpu, rc);
     3363    NOREF(pszVmcs);
     3364    return rc;
     3365}
     3366
     3367
     3368/**
     3369 * Does global VT-x initialization (called during module initialization).
     3370 *
     3371 * @returns VBox status code.
     3372 */
     3373VMMR0DECL(int) VMXR0GlobalInit(void)
     3374{
     3375#ifdef HMVMX_USE_FUNCTION_TABLE
     3376    AssertCompile(VMX_EXIT_MAX + 1 == RT_ELEMENTS(g_apfnVMExitHandlers));
     3377# ifdef VBOX_STRICT
     3378    for (unsigned i = 0; i < RT_ELEMENTS(g_apfnVMExitHandlers); i++)
     3379        Assert(g_apfnVMExitHandlers[i]);
     3380# endif
     3381#endif
     3382    return VINF_SUCCESS;
     3383}
     3384
     3385
     3386/**
     3387 * Does global VT-x termination (called during module termination).
     3388 */
     3389VMMR0DECL(void) VMXR0GlobalTerm()
     3390{
     3391    /* Nothing to do currently. */
     3392}
     3393
     3394
     3395/**
     3396 * Sets up and activates VT-x on the current CPU.
     3397 *
     3398 * @returns VBox status code.
     3399 * @param   pHostCpu        The HM physical-CPU structure.
     3400 * @param   pVM             The cross context VM structure.  Can be
     3401 *                          NULL after a host resume operation.
     3402 * @param   pvCpuPage       Pointer to the VMXON region (can be NULL if @a
     3403 *                          fEnabledByHost is @c true).
     3404 * @param   HCPhysCpuPage   Physical address of the VMXON region (can be 0 if
     3405 *                          @a fEnabledByHost is @c true).
     3406 * @param   fEnabledByHost  Set if SUPR0EnableVTx() or similar was used to
     3407 *                          enable VT-x on the host.
     3408 * @param   pHwvirtMsrs     Pointer to the hardware-virtualization MSRs.
     3409 */
     3410VMMR0DECL(int) VMXR0EnableCpu(PHMPHYSCPU pHostCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost,
     3411                              PCSUPHWVIRTMSRS pHwvirtMsrs)
     3412{
     3413    Assert(pHostCpu);
     3414    Assert(pHwvirtMsrs);
     3415    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     3416
     3417    /* Enable VT-x if it's not already enabled by the host. */
     3418    if (!fEnabledByHost)
     3419    {
     3420        int rc = hmR0VmxEnterRootMode(pVM, HCPhysCpuPage, pvCpuPage);
     3421        if (RT_FAILURE(rc))
     3422            return rc;
     3423    }
     3424
     3425    /*
     3426     * Flush all EPT tagged-TLB entries (in case VirtualBox or any other hypervisor have been
     3427     * using EPTPs) so we don't retain any stale guest-physical mappings which won't get
     3428     * invalidated when flushing by VPID.
     3429     */
     3430    if (pHwvirtMsrs->u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS)
     3431    {
     3432        hmR0VmxFlushEpt(NULL /* pVCpu */, NULL /* pVmcsInfo */, VMXTLBFLUSHEPT_ALL_CONTEXTS);
     3433        pHostCpu->fFlushAsidBeforeUse = false;
     3434    }
     3435    else
     3436        pHostCpu->fFlushAsidBeforeUse = true;
     3437
     3438    /* Ensure each VCPU scheduled on this CPU gets a new VPID on resume. See @bugref{6255}. */
     3439    ++pHostCpu->cTlbFlushes;
     3440
     3441    return VINF_SUCCESS;
     3442}
     3443
     3444
     3445/**
     3446 * Deactivates VT-x on the current CPU.
     3447 *
     3448 * @returns VBox status code.
     3449 * @param   pvCpuPage       Pointer to the VMXON region.
     3450 * @param   HCPhysCpuPage   Physical address of the VMXON region.
     3451 *
     3452 * @remarks This function should never be called when SUPR0EnableVTx() or
     3453 *          similar was used to enable VT-x on the host.
     3454 */
     3455VMMR0DECL(int) VMXR0DisableCpu(void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
     3456{
     3457    RT_NOREF2(pvCpuPage, HCPhysCpuPage);
     3458
     3459    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     3460    return hmR0VmxLeaveRootMode();
    26783461}
    26793462
     
    26923475    if (RT_FAILURE(rc))
    26933476    {
    2694         LogRelFunc(("hmR0VmxStructsAlloc failed! rc=%Rrc\n", rc));
     3477        LogRelFunc(("Failed to allocated VMX structures. rc=%Rrc\n", rc));
    26953478        return rc;
    26963479    }
     
    27123495#ifdef VBOX_WITH_CRASHDUMP_MAGIC
    27133496    if (pVM->hm.s.vmx.hMemObjScratch != NIL_RTR0MEMOBJ)
    2714         ASMMemZero32(pVM->hm.s.vmx.pvScratch, PAGE_SIZE);
     3497    {
     3498        Assert(pVM->hm.s.vmx.pvScratch);
     3499        ASMMemZero32(pVM->hm.s.vmx.pvScratch, X86_PAGE_4K_SIZE);
     3500    }
    27153501#endif
    27163502    hmR0VmxStructsFree(pVM);
     
    27203506
    27213507/**
    2722  * Sets up the VM for execution under VT-x.
     3508 * Sets up the VM for execution using hardware-assisted VMX.
    27233509 * This function is only called once per-VM during initialization.
    27243510 *
     
    27343520
    27353521    /*
    2736      * Without UnrestrictedGuest, pRealModeTSS and pNonPagingModeEPTPageTable *must* always be
    2737      * allocated. We no longer support the highly unlikely case of UnrestrictedGuest without
    2738      * pRealModeTSS, see hmR3InitFinalizeR0Intel().
     3522     * At least verify if VMX is enabled, since we can't check if we're in
     3523     * VMX root mode or not without causing a #GP.
     3524     */
     3525    RTCCUINTREG const uHostCR4 = ASMGetCR4();
     3526    if (RT_LIKELY(uHostCR4 & X86_CR4_VMXE))
     3527    { /* likely */ }
     3528    else
     3529        return VERR_VMX_NOT_IN_VMX_ROOT_MODE;
     3530
     3531    /*
     3532     * Without unrestricted guest execution, pRealModeTSS and pNonPagingModeEPTPageTable *must*
     3533     * always be allocated. We no longer support the highly unlikely case of unrestricted guest
     3534     * without pRealModeTSS, see hmR3InitFinalizeR0Intel().
    27393535     */
    27403536    if (   !pVM->hm.s.vmx.fUnrestrictedGuest
     
    27643560        && (pVM->hm.s.vmx.Msrs.ExitCtls.n.allowed1  & VMX_EXIT_CTLS_LOAD_EFER_MSR)
    27653561        && (pVM->hm.s.vmx.Msrs.ExitCtls.n.allowed1  & VMX_EXIT_CTLS_SAVE_EFER_MSR))
    2766     {
    27673562        pVM->hm.s.vmx.fSupportsVmcsEfer = true;
    2768     }
    27693563#endif
    27703564
    2771     /* At least verify VMX is enabled, since we can't check if we're in VMX root mode without #GP'ing. */
    2772     RTCCUINTREG const uHostCR4 = ASMGetCR4();
    2773     if (RT_UNLIKELY(!(uHostCR4 & X86_CR4_VMXE)))
    2774         return VERR_VMX_NOT_IN_VMX_ROOT_MODE;
    2775 
    2776     for (VMCPUID i = 0; i < pVM->cCpus; i++)
    2777     {
    2778         PVMCPU pVCpu = &pVM->aCpus[i];
    2779         AssertPtr(pVCpu);
    2780         AssertPtr(pVCpu->hm.s.vmx.pvVmcs);
    2781 
    2782         /* Log the VCPU pointers, useful for debugging SMP VMs. */
     3565    for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
     3566    {
     3567        PVMCPU pVCpu = &pVM->aCpus[idCpu];
    27833568        Log4Func(("pVCpu=%p idCpu=%RU32\n", pVCpu, pVCpu->idCpu));
    27843569
    2785         /* Set revision dword at the beginning of the VMCS structure. */
    2786         *(uint32_t *)pVCpu->hm.s.vmx.pvVmcs = RT_BF_GET(pVM->hm.s.vmx.Msrs.u64Basic, VMX_BF_BASIC_VMCS_ID);
    2787 
    2788         /* Set the VMCS launch state to "clear", see Intel spec. 31.6 "Preparation and launch a virtual machine". */
    2789         rc  = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
    2790         AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXClearVmcs failed! rc=%Rrc\n", rc),
    2791                                     hmR0VmxUpdateErrorRecord(pVCpu, rc), rc);
    2792 
    2793         /* Load this VMCS as the current VMCS. */
    2794         rc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
    2795         AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXActivateVmcs failed! rc=%Rrc\n", rc),
    2796                                     hmR0VmxUpdateErrorRecord(pVCpu, rc), rc);
    2797 
    2798         rc = hmR0VmxSetupPinCtls(pVCpu);
    2799         AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupPinCtls failed! rc=%Rrc\n", rc),
    2800                                     hmR0VmxUpdateErrorRecord(pVCpu, rc), rc);
    2801 
    2802         rc = hmR0VmxSetupProcCtls(pVCpu);
    2803         AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupProcCtls failed! rc=%Rrc\n", rc),
    2804                                     hmR0VmxUpdateErrorRecord(pVCpu, rc), rc);
    2805 
    2806         rc = hmR0VmxSetupMiscCtls(pVCpu);
    2807         AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupMiscCtls failed! rc=%Rrc\n", rc),
    2808                                     hmR0VmxUpdateErrorRecord(pVCpu, rc), rc);
    2809 
    2810         rc = hmR0VmxInitXcptBitmap(pVCpu);
    2811         AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitXcptBitmap failed! rc=%Rrc\n", rc),
    2812                                     hmR0VmxUpdateErrorRecord(pVCpu, rc), rc);
    2813 
     3570        rc = hmR0VmxSetupVmcs(pVCpu, &pVCpu->hm.s.vmx.VmcsInfo,  false /* fIsNstGstVmcs */);
     3571        if (RT_SUCCESS(rc))
     3572        {
    28143573#if HC_ARCH_BITS == 32
    2815         rc = hmR0VmxInitVmcsReadCache(pVCpu);
    2816         AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitVmcsReadCache failed! rc=%Rrc\n", rc),
    2817                                     hmR0VmxUpdateErrorRecord(pVCpu, rc), rc);
     3574            hmR0VmxInitVmcsReadCache(pVCpu);
    28183575#endif
    2819 
    2820         /* Sync any CPU internal VMCS data back into our VMCS in memory. */
    2821         rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
    2822         AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXClearVmcs(2) failed! rc=%Rrc\n", rc),
    2823                                     hmR0VmxUpdateErrorRecord(pVCpu, rc), rc);
    2824 
    2825         pVCpu->hm.s.vmx.fVmcsState = HMVMX_VMCS_STATE_CLEAR;
    2826 
    2827         hmR0VmxUpdateErrorRecord(pVCpu, rc);
     3576#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     3577            if (pVM->cpum.ro.GuestFeatures.fVmx)
     3578            {
     3579                rc = hmR0VmxSetupVmcs(pVCpu, &pVCpu->hm.s.vmx.VmcsInfoNstGst, true /* fIsNstGstVmcs */);
     3580                if (RT_SUCCESS(rc))
     3581                { /* likely */ }
     3582                else
     3583                {
     3584                    LogRelFunc(("Nested-guest VMCS setup failed. rc=%Rrc\n", rc));
     3585                    return rc;
     3586                }
     3587            }
     3588#endif
     3589        }
     3590        else
     3591        {
     3592            LogRelFunc(("VMCS setup failed. rc=%Rrc\n", rc));
     3593            return rc;
     3594        }
    28283595    }
    28293596
    28303597    return VINF_SUCCESS;
    28313598}
     3599
     3600
     3601#if HC_ARCH_BITS == 32
     3602# ifdef VBOX_ENABLE_64_BITS_GUESTS
     3603/**
     3604 * Check if guest state allows safe use of 32-bit switcher again.
     3605 *
     3606 * Segment bases and protected mode structures must be 32-bit addressable
     3607 * because the  32-bit switcher will ignore high dword when writing these VMCS
     3608 * fields.  See @bugref{8432} for details.
     3609 *
     3610 * @returns true if safe, false if must continue to use the 64-bit switcher.
     3611 * @param   pCtx   Pointer to the guest-CPU context.
     3612 *
     3613 * @remarks No-long-jump zone!!!
     3614 */
     3615static bool hmR0VmxIs32BitSwitcherSafe(PCCPUMCTX pCtx)
     3616{
     3617    if (pCtx->gdtr.pGdt    & UINT64_C(0xffffffff00000000))     return false;
     3618    if (pCtx->idtr.pIdt    & UINT64_C(0xffffffff00000000))     return false;
     3619    if (pCtx->ldtr.u64Base & UINT64_C(0xffffffff00000000))     return false;
     3620    if (pCtx->tr.u64Base   & UINT64_C(0xffffffff00000000))     return false;
     3621    if (pCtx->es.u64Base   & UINT64_C(0xffffffff00000000))     return false;
     3622    if (pCtx->cs.u64Base   & UINT64_C(0xffffffff00000000))     return false;
     3623    if (pCtx->ss.u64Base   & UINT64_C(0xffffffff00000000))     return false;
     3624    if (pCtx->ds.u64Base   & UINT64_C(0xffffffff00000000))     return false;
     3625    if (pCtx->fs.u64Base   & UINT64_C(0xffffffff00000000))     return false;
     3626    if (pCtx->gs.u64Base   & UINT64_C(0xffffffff00000000))     return false;
     3627
     3628    /* All good, bases are 32-bit. */
     3629    return true;
     3630}
     3631# endif
     3632
     3633/**
     3634 * Executes the specified handler in 64-bit mode.
     3635 *
     3636 * @returns VBox status code (no informational status codes).
     3637 * @param   pVCpu       The cross context virtual CPU structure.
     3638 * @param   enmOp       The operation to perform.
     3639 * @param   cParams     Number of parameters.
     3640 * @param   paParam     Array of 32-bit parameters.
     3641 */
     3642VMMR0DECL(int) VMXR0Execute64BitsHandler(PVMCPU pVCpu, HM64ON32OP enmOp, uint32_t cParams, uint32_t *paParam)
     3643{
     3644    PVM pVM = pVCpu->CTX_SUFF(pVM);
     3645    AssertReturn(pVM->hm.s.pfnHost32ToGuest64R0, VERR_HM_NO_32_TO_64_SWITCHER);
     3646    Assert(enmOp > HM64ON32OP_INVALID && enmOp < HM64ON32OP_END);
     3647    Assert(pVCpu->hm.s.vmx.VmcsCache.Write.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VmcsCache.Write.aField));
     3648    Assert(pVCpu->hm.s.vmx.VmcsCache.Read.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VmcsCache.Read.aField));
     3649
     3650#ifdef VBOX_STRICT
     3651    for (uint32_t i = 0; i < pVCpu->hm.s.vmx.VmcsCache.Write.cValidEntries; i++)
     3652        Assert(hmR0VmxIsValidWriteField(pVCpu->hm.s.vmx.VmcsCache.Write.aField[i]));
     3653
     3654    for (uint32_t i = 0; i <pVCpu->hm.s.vmx.VmcsCache.Read.cValidEntries; i++)
     3655        Assert(hmR0VmxIsValidReadField(pVCpu->hm.s.vmx.VmcsCache.Read.aField[i]));
     3656#endif
     3657
     3658    /* Disable interrupts. */
     3659    RTCCUINTREG fOldEFlags = ASMIntDisableFlags();
     3660
     3661#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
     3662    RTCPUID idHostCpu = RTMpCpuId();
     3663    CPUMR0SetLApic(pVCpu, idHostCpu);
     3664#endif
     3665
     3666    /** @todo replace with hmR0VmxEnterRootMode() and hmR0VmxLeaveRootMode(). */
     3667
     3668    PCHMPHYSCPU    pHostCpu      = hmR0GetCurrentCpu();
     3669    RTHCPHYS const HCPhysCpuPage = pHostCpu->HCPhysMemObj;
     3670
     3671    /* Clear VMCS. Marking it inactive, clearing implementation-specific data and writing VMCS data back to memory. */
     3672    PVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
     3673    hmR0VmxClearVmcs(pVmcsInfo);
     3674
     3675    /* Leave VMX root mode and disable VMX. */
     3676    VMXDisable();
     3677    SUPR0ChangeCR4(0, ~X86_CR4_VMXE);
     3678
     3679    CPUMSetHyperESP(pVCpu, VMMGetStackRC(pVCpu));
     3680    CPUMSetHyperEIP(pVCpu, enmOp);
     3681    for (int i = (int)cParams - 1; i >= 0; i--)
     3682        CPUMPushHyper(pVCpu, paParam[i]);
     3683
     3684    STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatWorldSwitch3264, z);
     3685
     3686    /* Call the switcher. */
     3687    int rc = pVM->hm.s.pfnHost32ToGuest64R0(pVM, RT_UOFFSETOF_DYN(VM, aCpus[pVCpu->idCpu].cpum) - RT_UOFFSETOF(VM, cpum));
     3688    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatWorldSwitch3264, z);
     3689
     3690    /* Re-enable VMX to make sure the VMX instructions don't cause #UD faults. */
     3691    SUPR0ChangeCR4(X86_CR4_VMXE, RTCCUINTREG_MAX);
     3692
     3693    /* Re-enter VMX root mode. */
     3694    int rc2 = VMXEnable(HCPhysCpuPage);
     3695    if (RT_FAILURE(rc2))
     3696    {
     3697        SUPR0ChangeCR4(0, ~X86_CR4_VMXE);
     3698        ASMSetFlags(fOldEFlags);
     3699        pVM->hm.s.vmx.HCPhysVmxEnableError = HCPhysCpuPage;
     3700        return rc2;
     3701    }
     3702
     3703    /* Restore the VMCS as the current VMCS. */
     3704    rc2 = hmR0VmxLoadVmcs(pVmcsInfo);
     3705    AssertRC(rc2);
     3706    Assert(!(ASMGetFlags() & X86_EFL_IF));
     3707    ASMSetFlags(fOldEFlags);
     3708    return rc;
     3709}
     3710
     3711
     3712/**
     3713 * Prepares for and executes VMLAUNCH (64-bit guests) for 32-bit hosts
     3714 * supporting 64-bit guests.
     3715 *
     3716 * @returns VBox status code.
     3717 * @param   fResume     Whether to VMLAUNCH or VMRESUME.
     3718 * @param   pCtx        Pointer to the guest-CPU context.
     3719 * @param   pCache      Pointer to the VMCS batch cache.
     3720 * @param   pVM         The cross context VM structure.
     3721 * @param   pVCpu       The cross context virtual CPU structure.
     3722 */
     3723DECLASM(int) VMXR0SwitcherStartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMXVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu)
     3724{
     3725    NOREF(fResume);
     3726
     3727    PCHMPHYSCPU    pHostCpu      = hmR0GetCurrentCpu();
     3728    RTHCPHYS const HCPhysCpuPage = pHostCpu->HCPhysMemObj;
     3729
     3730#ifdef VBOX_WITH_CRASHDUMP_MAGIC
     3731    pCache->uPos = 1;
     3732    pCache->interPD = PGMGetInterPaeCR3(pVM);
     3733    pCache->pSwitcher = (uint64_t)pVM->hm.s.pfnHost32ToGuest64R0;
     3734#endif
     3735
     3736#if defined(DEBUG) && defined(VMX_USE_CACHED_VMCS_ACCESSES)
     3737    pCache->TestIn.HCPhysCpuPage = 0;
     3738    pCache->TestIn.HCPhysVmcs    = 0;
     3739    pCache->TestIn.pCache        = 0;
     3740    pCache->TestOut.HCPhysVmcs   = 0;
     3741    pCache->TestOut.pCache       = 0;
     3742    pCache->TestOut.pCtx         = 0;
     3743    pCache->TestOut.eflags       = 0;
     3744#else
     3745    NOREF(pCache);
     3746#endif
     3747
     3748    uint32_t aParam[10];
     3749    aParam[0] = RT_LO_U32(HCPhysCpuPage);                               /* Param 1: VMXON physical address - Lo. */
     3750    aParam[1] = RT_HI_U32(HCPhysCpuPage);                               /* Param 1: VMXON physical address - Hi. */
     3751    aParam[2] = RT_LO_U32(pVCpu->hm.s.vmx.HCPhysVmcs);                  /* Param 2: VMCS physical address - Lo. */
     3752    aParam[3] = RT_HI_U32(pVCpu->hm.s.vmx.HCPhysVmcs);                  /* Param 2: VMCS physical address - Hi. */
     3753    aParam[4] = VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VmcsCache);
     3754    aParam[5] = 0;
     3755    aParam[6] = VM_RC_ADDR(pVM, pVM);
     3756    aParam[7] = 0;
     3757    aParam[8] = VM_RC_ADDR(pVM, pVCpu);
     3758    aParam[9] = 0;
     3759
     3760#ifdef VBOX_WITH_CRASHDUMP_MAGIC
     3761    pCtx->dr[4] = pVM->hm.s.vmx.pScratchPhys + 16 + 8;
     3762    *(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 1;
     3763#endif
     3764    int rc = VMXR0Execute64BitsHandler(pVCpu, HM64ON32OP_VMXRCStartVM64, RT_ELEMENTS(aParam), &aParam[0]);
     3765
     3766#ifdef VBOX_WITH_CRASHDUMP_MAGIC
     3767    Assert(*(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) == 5);
     3768    Assert(pCtx->dr[4] == 10);
     3769    *(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 0xff;
     3770#endif
     3771
     3772#if defined(DEBUG) && defined(VMX_USE_CACHED_VMCS_ACCESSES)
     3773    AssertMsg(pCache->TestIn.HCPhysCpuPage == HCPhysCpuPage, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysCpuPage, HCPhysCpuPage));
     3774    AssertMsg(pCache->TestIn.HCPhysVmcs    == pVCpu->hm.s.vmx.HCPhysVmcs, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVmcs,
     3775                                                                           pVCpu->hm.s.vmx.HCPhysVmcs));
     3776    AssertMsg(pCache->TestIn.HCPhysVmcs    == pCache->TestOut.HCPhysVmcs, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVmcs,
     3777                                                                           pCache->TestOut.HCPhysVmcs));
     3778    AssertMsg(pCache->TestIn.pCache        == pCache->TestOut.pCache, ("%RGv vs %RGv\n", pCache->TestIn.pCache,
     3779                                                                       pCache->TestOut.pCache));
     3780    AssertMsg(pCache->TestIn.pCache        == VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VmcsCache),
     3781              ("%RGv vs %RGv\n", pCache->TestIn.pCache, VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VmcsCache)));
     3782    AssertMsg(pCache->TestIn.pCtx          == pCache->TestOut.pCtx, ("%RGv vs %RGv\n", pCache->TestIn.pCtx,
     3783                                                                     pCache->TestOut.pCtx));
     3784    Assert(!(pCache->TestOut.eflags & X86_EFL_IF));
     3785#endif
     3786    NOREF(pCtx);
     3787    return rc;
     3788}
     3789#endif
    28323790
    28333791
     
    28873845
    28883846    /*
    2889      * If we've executed guest code using VT-x, the host-state bits will be messed up. We
    2890      * should -not- save the messed up state without restoring the original host-state,
    2891      * see @bugref{7240}.
     3847     * If we've executed guest code using hardware-assisted VMX, the host-state bits
     3848     * will be messed up. We should -not- save the messed up state without restoring
     3849     * the original host-state, see @bugref{7240}.
    28923850     *
    28933851     * This apparently can happen (most likely the FPU changes), deal with it rather than
     
    29563914    Assert(uSelCS);
    29573915    Assert(uSelTR);
    2958 
    2959     /* Assertion is right but we would not have updated u32ExitCtls yet. */
    2960 #if 0
    2961     if (!(pVCpu->hm.s.vmx.Ctls.u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE))
    2962         Assert(uSelSS != 0);
    2963 #endif
    29643916
    29653917    /* Write these host selector fields into the host-state area in the VMCS. */
     
    30323984    PCX86DESCHC pDesc = (PCX86DESCHC)(Gdtr.pGdt + (uSelTR & X86_SEL_MASK));
    30333985#if HC_ARCH_BITS == 64
    3034     uintptr_t uTRBase = X86DESC64_BASE(pDesc);
     3986    uintptr_t const uTRBase = X86DESC64_BASE(pDesc);
    30353987
    30363988    /*
     
    30734025    }
    30744026#else
    3075     uintptr_t uTRBase = X86DESC_BASE(pDesc);
     4027    uintptr_t const uTRBase = X86DESC_BASE(pDesc);
    30764028#endif
    30774029    rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_TR_BASE, uTRBase);
     
    30824034     */
    30834035#if HC_ARCH_BITS == 64
    3084     uint64_t u64FSBase = ASMRdMsr(MSR_K8_FS_BASE);
    3085     uint64_t u64GSBase = ASMRdMsr(MSR_K8_GS_BASE);
     4036    uint64_t const u64FSBase = ASMRdMsr(MSR_K8_FS_BASE);
     4037    uint64_t const u64GSBase = ASMRdMsr(MSR_K8_GS_BASE);
    30864038    rc  = VMXWriteVmcs64(VMX_VMCS_HOST_FS_BASE, u64FSBase);
    30874039    rc |= VMXWriteVmcs64(VMX_VMCS_HOST_GS_BASE, u64GSBase);
     
    31024054 * host-state area of the VMCS.
    31034055 *
    3104  * Theses MSRs will be automatically restored on the host after every successful
     4056 * These MSRs will be automatically restored on the host after every successful
    31054057 * VM-exit.
    31064058 *
     
    31134065{
    31144066    AssertPtr(pVCpu);
    3115     AssertPtr(pVCpu->hm.s.vmx.pvHostMsr);
    31164067
    31174068    /*
     
    31264077    int rc = VMXWriteVmcs32(VMX_VMCS32_HOST_SYSENTER_CS, ASMRdMsr_Low(MSR_IA32_SYSENTER_CS));
    31274078#if HC_ARCH_BITS == 32
    3128     rc |= VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP));
    3129     rc |= VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP));
     4079    rc    |= VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP));
     4080    rc    |= VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP));
    31304081#else
    3131     rc |= VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr(MSR_IA32_SYSENTER_ESP));
    3132     rc |= VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr(MSR_IA32_SYSENTER_EIP));
     4082    rc    |= VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr(MSR_IA32_SYSENTER_ESP));
     4083    rc    |= VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr(MSR_IA32_SYSENTER_EIP));
    31334084#endif
    31344085    AssertRCReturn(rc, rc);
     
    31434094    if (pVM->hm.s.vmx.fSupportsVmcsEfer)
    31444095    {
    3145         rc = VMXWriteVmcs64(VMX_VMCS64_HOST_EFER_FULL, pVM->hm.s.vmx.u64HostEfer);
     4096        rc = VMXWriteVmcs64(VMX_VMCS64_HOST_EFER_FULL, pVM->hm.s.vmx.u64HostMsrEfer);
    31464097        AssertRCReturn(rc, rc);
    31474098    }
    31484099
    3149     /** @todo IA32_PERF_GLOBALCTRL, IA32_PAT also see hmR0VmxExportGuestExitCtls(). */
     4100    /** @todo IA32_PERF_GLOBALCTRL, IA32_PAT also see
     4101     *        hmR0VmxExportGuestEntryExitCtls(). */
    31504102
    31514103    return VINF_SUCCESS;
     
    31574109 *
    31584110 * We check all relevant bits. For now, that's everything besides LMA/LME, as
    3159  * these two bits are handled by VM-entry, see hmR0VmxExportGuestExitCtls() and
    3160  * hmR0VMxExportGuestEntryCtls().
     4111 * these two bits are handled by VM-entry, see hmR0VMxExportGuestEntryExitCtls().
    31614112 *
    31624113 * @returns true if we need to load guest EFER, false otherwise.
     
    31724123    return true;
    31734124#else
    3174 
    31754125    PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    31764126#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
    3177     /* For 32-bit hosts running 64-bit guests, we always swap EFER in the world-switcher. Nothing to do here. */
     4127    /* For 32-bit hosts running 64-bit guests, we always swap EFER MSR in the world-switcher. Nothing to do here. */
    31784128    if (CPUMIsGuestInLongModeEx(pCtx))
    31794129        return false;
     
    31814131
    31824132    PVM pVM = pVCpu->CTX_SUFF(pVM);
    3183     uint64_t const u64HostEfer  = pVM->hm.s.vmx.u64HostEfer;
     4133    uint64_t const u64HostEfer  = pVM->hm.s.vmx.u64HostMsrEfer;
    31844134    uint64_t const u64GuestEfer = pCtx->msrEFER;
    31854135
    31864136    /*
    3187      * For 64-bit guests, if EFER.SCE bit differs, we need to swap EFER to ensure that the
    3188      * guest's SYSCALL behaviour isn't broken, see @bugref{7386}.
     4137     * For 64-bit guests, if EFER.SCE bit differs, we need to swap the EFER MSR
     4138     * to ensure that the guest's SYSCALL behaviour isn't broken, see @bugref{7386}.
    31894139     */
    31904140    if (   CPUMIsGuestInLongModeEx(pCtx)
    31914141        && (u64GuestEfer & MSR_K6_EFER_SCE) != (u64HostEfer & MSR_K6_EFER_SCE))
    3192     {
    31934142        return true;
    3194     }
    3195 
    3196     /*
    3197      * If the guest uses PAE and EFER.NXE bit differs, we need to swap EFER as it
    3198      * affects guest paging. 64-bit paging implies CR4.PAE as well.
    3199      * See Intel spec. 4.5 "IA-32e Paging" and Intel spec. 4.1.1 "Three Paging Modes".
    3200      */
     4143
     4144    /*
     4145     * If the guest uses PAE and EFER.NXE bit differs, we need to swap the EFER MSR
     4146     * as it affects guest paging. 64-bit paging implies CR4.PAE as well.
     4147     *
     4148     * See Intel spec. 4.5 "IA-32e Paging".
     4149     * See Intel spec. 4.1.1 "Three Paging Modes".
     4150     *
     4151     * Verify that we always intercept CR4.PAE and CR0.PG bits, so we don't need to
     4152     * import CR4 and CR0 from the VMCS here as those bits are always up to date.
     4153     */
     4154    Assert(hmR0VmxGetFixedCr4Mask(pVCpu) & X86_CR4_PAE);
     4155    Assert(hmR0VmxGetFixedCr0Mask(pVCpu) & X86_CR0_PG);
    32014156    if (   (pCtx->cr4 & X86_CR4_PAE)
    32024157        && (pCtx->cr0 & X86_CR0_PG)
     
    32124167}
    32134168
    3214 
    3215 /**
    3216  * Exports the guest state with appropriate VM-entry controls in the VMCS.
    3217  *
    3218  * These controls can affect things done on VM-exit; e.g. "load debug controls",
    3219  * see Intel spec. 24.8.1 "VM-entry controls".
     4169/**
     4170 * Exports the guest state with appropriate VM-entry and VM-exit controls in the
     4171 * VMCS.
     4172 *
     4173 * This is typically required when the guest changes paging mode.
    32204174 *
    32214175 * @returns VBox status code.
    3222  * @param   pVCpu       The cross context virtual CPU structure.
     4176 * @param   pVCpu           The cross context virtual CPU structure.
     4177 * @param   pVmxTransient   The VMX-transient structure.
    32234178 *
    32244179 * @remarks Requires EFER.
    32254180 * @remarks No-long-jump zone!!!
    32264181 */
    3227 static int hmR0VmxExportGuestEntryCtls(PVMCPU pVCpu)
    3228 {
    3229     if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_ENTRY_CTLS)
    3230     {
    3231         PVM pVM = pVCpu->CTX_SUFF(pVM);
    3232         uint32_t       fVal = pVM->hm.s.vmx.Msrs.EntryCtls.n.allowed0;    /* Bits set here must be set in the VMCS. */
    3233         uint32_t const fZap = pVM->hm.s.vmx.Msrs.EntryCtls.n.allowed1;    /* Bits cleared here must be cleared in the VMCS. */
    3234 
    3235         /* Load debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x capable CPUs only supports the 1-setting of this bit. */
    3236         fVal |= VMX_ENTRY_CTLS_LOAD_DEBUG;
    3237 
    3238         /* Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry. */
    3239         if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
    3240         {
    3241             fVal |= VMX_ENTRY_CTLS_IA32E_MODE_GUEST;
    3242             Log4Func(("VMX_ENTRY_CTLS_IA32E_MODE_GUEST\n"));
    3243         }
    3244         else
    3245             Assert(!(fVal & VMX_ENTRY_CTLS_IA32E_MODE_GUEST));
    3246 
    3247         /* If the CPU supports the newer VMCS controls for managing guest/host EFER, use it. */
    3248         if (   pVM->hm.s.vmx.fSupportsVmcsEfer
    3249             && hmR0VmxShouldSwapEferMsr(pVCpu))
    3250         {
    3251             fVal |= VMX_ENTRY_CTLS_LOAD_EFER_MSR;
    3252             Log4Func(("VMX_ENTRY_CTLS_LOAD_EFER_MSR\n"));
    3253         }
     4182static int hmR0VmxExportGuestEntryExitCtls(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
     4183{
     4184    if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_ENTRY_EXIT_CTLS)
     4185    {
     4186        PVM          pVM = pVCpu->CTX_SUFF(pVM);
     4187        PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    32544188
    32554189        /*
    3256          * The following should -not- be set (since we're not in SMM mode):
    3257          * - VMX_ENTRY_CTLS_ENTRY_TO_SMM
    3258          * - VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON
     4190         * VM-entry controls.
    32594191         */
    3260 
    3261         /** @todo VMX_ENTRY_CTLS_LOAD_PERF_MSR,
    3262          *        VMX_ENTRY_CTLS_LOAD_PAT_MSR. */
    3263 
    3264         if ((fVal & fZap) != fVal)
    3265         {
    3266             Log4Func(("Invalid VM-entry controls combo! Cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
    3267                       pVM->hm.s.vmx.Msrs.EntryCtls.n.allowed0, fVal, fZap));
    3268             pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_ENTRY;
    3269             return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
    3270         }
    3271 
    3272         /* Commit it to the VMCS and update our cache. */
    3273         if (pVCpu->hm.s.vmx.Ctls.u32EntryCtls != fVal)
    3274         {
    3275             int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY, fVal);
    3276             AssertRCReturn(rc, rc);
    3277             pVCpu->hm.s.vmx.Ctls.u32EntryCtls = fVal;
    3278         }
    3279 
    3280         ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_VMX_ENTRY_CTLS);
    3281     }
    3282     return VINF_SUCCESS;
    3283 }
    3284 
    3285 
    3286 /**
    3287  * Exports the guest state with appropriate VM-exit controls in the VMCS.
    3288  *
    3289  * @returns VBox status code.
    3290  * @param   pVCpu       The cross context virtual CPU structure.
    3291  *
    3292  * @remarks Requires EFER.
    3293  */
    3294 static int hmR0VmxExportGuestExitCtls(PVMCPU pVCpu)
    3295 {
    3296     if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_EXIT_CTLS)
    3297     {
    3298         PVM pVM = pVCpu->CTX_SUFF(pVM);
    3299         uint32_t       fVal = pVM->hm.s.vmx.Msrs.ExitCtls.n.allowed0;     /* Bits set here must be set in the VMCS. */
    3300         uint32_t const fZap = pVM->hm.s.vmx.Msrs.ExitCtls.n.allowed1;     /* Bits cleared here must be cleared in the VMCS. */
    3301 
    3302         /* Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only supported the 1-setting of this bit. */
    3303         fVal |= VMX_EXIT_CTLS_SAVE_DEBUG;
     4192        {
     4193            uint32_t       fVal = pVM->hm.s.vmx.Msrs.EntryCtls.n.allowed0;    /* Bits set here must be set in the VMCS. */
     4194            uint32_t const fZap = pVM->hm.s.vmx.Msrs.EntryCtls.n.allowed1;    /* Bits cleared here must be cleared in the VMCS. */
     4195
     4196            /*
     4197             * Load the guest debug controls (DR7 and IA32_DEBUGCTL MSR) on VM-entry.
     4198             * The first VT-x capable CPUs only supported the 1-setting of this bit.
     4199             *
     4200             * For nested-guests, this is a mandatory VM-entry control. It's also
     4201             * required because we do not want to leak host bits to the nested-guest.
     4202             */
     4203            fVal |= VMX_ENTRY_CTLS_LOAD_DEBUG;
     4204
     4205            /*
     4206             * Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry.
     4207             *
     4208             * For nested-guests, the "IA-32e mode guest" control we initialize with what is
     4209             * required to get the nested-guest working with hardware-assisted VMX execution.
     4210             * It depends on the nested-guest's IA32_EFER.LMA bit. Remember, a nested-hypervisor
     4211             * can skip intercepting changes to the EFER MSR. This is why it it needs to be done
     4212             * here rather than while merging the guest VMCS controls.
     4213             */
     4214            if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
     4215                fVal |= VMX_ENTRY_CTLS_IA32E_MODE_GUEST;
     4216            else
     4217                Assert(!(fVal & VMX_ENTRY_CTLS_IA32E_MODE_GUEST));
     4218
     4219            /*
     4220             * If the CPU supports the newer VMCS controls for managing guest/host EFER, use it.
     4221             *
     4222             * For nested-guests, we use the "load IA32_EFER" if the hardware supports it,
     4223             * regardless of whether the nested-guest VMCS specifies it because we are free to
     4224             * load whatever MSRs we require and we do not need to modify the guest visible copy
     4225             * of the VM-entry MSR load area.
     4226             */
     4227            if (   pVM->hm.s.vmx.fSupportsVmcsEfer
     4228                && hmR0VmxShouldSwapEferMsr(pVCpu))
     4229                fVal |= VMX_ENTRY_CTLS_LOAD_EFER_MSR;
     4230            else
     4231                Assert(!(fVal & VMX_ENTRY_CTLS_LOAD_EFER_MSR));
     4232
     4233            /*
     4234             * The following should -not- be set (since we're not in SMM mode):
     4235             * - VMX_ENTRY_CTLS_ENTRY_TO_SMM
     4236             * - VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON
     4237             */
     4238
     4239            /** @todo VMX_ENTRY_CTLS_LOAD_PERF_MSR,
     4240             *        VMX_ENTRY_CTLS_LOAD_PAT_MSR. */
     4241
     4242            if ((fVal & fZap) == fVal)
     4243            { /* likely */ }
     4244            else
     4245            {
     4246                Log4Func(("Invalid VM-entry controls combo! Cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
     4247                          pVM->hm.s.vmx.Msrs.EntryCtls.n.allowed0, fVal, fZap));
     4248                pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_ENTRY;
     4249                return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
     4250            }
     4251
     4252            /* Commit it to the VMCS. */
     4253            if (pVmcsInfo->u32EntryCtls != fVal)
     4254            {
     4255                int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY, fVal);
     4256                AssertRCReturn(rc, rc);
     4257                pVmcsInfo->u32EntryCtls = fVal;
     4258            }
     4259        }
    33044260
    33054261        /*
    3306          * Set the host long mode active (EFER.LMA) bit (which Intel calls "Host address-space size") if necessary.
    3307          * On VM-exit, VT-x sets both the host EFER.LMA and EFER.LME bit to this value. See assertion in
    3308          * hmR0VmxExportHostMsrs().
     4262         * VM-exit controls.
    33094263         */
     4264        {
     4265            uint32_t       fVal = pVM->hm.s.vmx.Msrs.ExitCtls.n.allowed0;     /* Bits set here must be set in the VMCS. */
     4266            uint32_t const fZap = pVM->hm.s.vmx.Msrs.ExitCtls.n.allowed1;     /* Bits cleared here must be cleared in the VMCS. */
     4267
     4268            /*
     4269             * Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only
     4270             * supported the 1-setting of this bit.
     4271             *
     4272             * For nested-guests, we set the "save debug controls" as the converse
     4273             * "load debug controls" is mandatory for nested-guests anyway.
     4274             */
     4275            fVal |= VMX_EXIT_CTLS_SAVE_DEBUG;
     4276
     4277            /*
     4278             * Set the host long mode active (EFER.LMA) bit (which Intel calls
     4279             * "Host address-space size") if necessary. On VM-exit, VT-x sets both the
     4280             * host EFER.LMA and EFER.LME bit to this value. See assertion in
     4281             * hmR0VmxExportHostMsrs().
     4282             *
     4283             * For nested-guests, we always set this bit as we do not support 32-bit
     4284             * hosts.
     4285             */
    33104286#if HC_ARCH_BITS == 64
    3311         fVal |= VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE;
    3312         Log4Func(("VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE\n"));
     4287            fVal |= VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE;
    33134288#else
    3314         Assert(   pVCpu->hm.s.vmx.pfnStartVM == VMXR0SwitcherStartVM64
    3315                || pVCpu->hm.s.vmx.pfnStartVM == VMXR0StartVM32);
    3316         /* Set the host address-space size based on the switcher, not guest state. See @bugref{8432}. */
    3317         if (pVCpu->hm.s.vmx.pfnStartVM == VMXR0SwitcherStartVM64)
    3318         {
    3319             /* The switcher returns to long mode, EFER is managed by the switcher. */
    3320             fVal |= VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE;
    3321             Log4Func(("VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE\n"));
    3322         }
    3323         else
    3324             Assert(!(fVal & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE));
     4289            Assert(!pVmxTransient->fIsNestedGuest);
     4290            Assert(   pVmcsInfo->pfnStartVM == VMXR0SwitcherStartVM64
     4291                   || pVmcsInfo->pfnStartVM == VMXR0StartVM32);
     4292            /* Set the host address-space size based on the switcher, not guest state. See @bugref{8432}. */
     4293            if (pVmcsInfo->pfnStartVM == VMXR0SwitcherStartVM64)
     4294            {
     4295                /* The switcher returns to long mode, the EFER MSR is managed by the switcher. */
     4296                fVal |= VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE;
     4297            }
     4298            else
     4299                Assert(!(fVal & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE));
    33254300#endif
    33264301
    3327         /* If the newer VMCS fields for managing EFER exists, use it. */
    3328         if (   pVM->hm.s.vmx.fSupportsVmcsEfer
    3329             && hmR0VmxShouldSwapEferMsr(pVCpu))
    3330         {
    3331             fVal |= VMX_EXIT_CTLS_SAVE_EFER_MSR
    3332                   | VMX_EXIT_CTLS_LOAD_EFER_MSR;
    3333             Log4Func(("VMX_EXIT_CTLS_SAVE_EFER_MSR and VMX_EXIT_CTLS_LOAD_EFER_MSR\n"));
    3334         }
    3335 
    3336         /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */
    3337         Assert(!(fVal & VMX_EXIT_CTLS_ACK_EXT_INT));
    3338 
    3339         /** @todo VMX_EXIT_CTLS_LOAD_PERF_MSR,
    3340          *        VMX_EXIT_CTLS_SAVE_PAT_MSR,
    3341          *        VMX_EXIT_CTLS_LOAD_PAT_MSR. */
    3342 
    3343         /* Enable saving of the VMX preemption timer value on VM-exit. */
    3344         if (    pVM->hm.s.vmx.fUsePreemptTimer
    3345             && (pVM->hm.s.vmx.Msrs.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER))
    3346             fVal |= VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER;
    3347 
    3348         if ((fVal & fZap) != fVal)
    3349         {
    3350             LogRelFunc(("Invalid VM-exit controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%R#X32\n",
    3351                         pVM->hm.s.vmx.Msrs.ExitCtls.n.allowed0, fVal, fZap));
    3352             pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_EXIT;
    3353             return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
    3354         }
    3355 
    3356         /* Commit it to the VMCS and update our cache. */
    3357         if (pVCpu->hm.s.vmx.Ctls.u32ExitCtls != fVal)
    3358         {
    3359             int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT, fVal);
    3360             AssertRCReturn(rc, rc);
    3361             pVCpu->hm.s.vmx.Ctls.u32ExitCtls = fVal;
    3362         }
    3363 
    3364         ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_VMX_EXIT_CTLS);
     4302            /*
     4303             * If the VMCS EFER MSR fields are supported by the hardware, we use it.
     4304             *
     4305             * For nested-guests, we should use the "save IA32_EFER" control if we also
     4306             * used the "load IA32_EFER" control while exporting VM-entry controls.
     4307             */
     4308            if (   pVM->hm.s.vmx.fSupportsVmcsEfer
     4309                && hmR0VmxShouldSwapEferMsr(pVCpu))
     4310            {
     4311                fVal |= VMX_EXIT_CTLS_SAVE_EFER_MSR
     4312                     |  VMX_EXIT_CTLS_LOAD_EFER_MSR;
     4313            }
     4314
     4315            /*
     4316             * Enable saving of the VMX-preemption timer value on VM-exit.
     4317             * For nested-guests, currently not exposed/used.
     4318             */
     4319            if (    pVM->hm.s.vmx.fUsePreemptTimer
     4320                && (pVM->hm.s.vmx.Msrs.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER))
     4321                fVal |= VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER;
     4322
     4323            /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */
     4324            Assert(!(fVal & VMX_EXIT_CTLS_ACK_EXT_INT));
     4325
     4326            /** @todo VMX_EXIT_CTLS_LOAD_PERF_MSR,
     4327             *        VMX_EXIT_CTLS_SAVE_PAT_MSR,
     4328             *        VMX_EXIT_CTLS_LOAD_PAT_MSR. */
     4329
     4330            if ((fVal & fZap) == fVal)
     4331            { /* likely */ }
     4332            else
     4333            {
     4334                Log4Func(("Invalid VM-exit controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%R#X32\n",
     4335                          pVM->hm.s.vmx.Msrs.ExitCtls.n.allowed0, fVal, fZap));
     4336                pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_EXIT;
     4337                return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
     4338            }
     4339
     4340            /* Commit it to the VMCS. */
     4341            if (pVmcsInfo->u32ExitCtls != fVal)
     4342            {
     4343                int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT, fVal);
     4344                AssertRCReturn(rc, rc);
     4345                pVmcsInfo->u32ExitCtls = fVal;
     4346            }
     4347        }
     4348
     4349        ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
    33654350    }
    33664351    return VINF_SUCCESS;
     
    33734358 * @returns VBox status code.
    33744359 * @param   pVCpu               The cross context virtual CPU structure.
     4360 * @param   pVmcsInfo           The VMCS info. object.
    33754361 * @param   u32TprThreshold     The TPR threshold (task-priority class only).
    33764362 */
    3377 DECLINLINE(int) hmR0VmxApicSetTprThreshold(PVMCPU pVCpu, uint32_t u32TprThreshold)
     4363DECLINLINE(int) hmR0VmxApicSetTprThreshold(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t u32TprThreshold)
    33784364{
    33794365    Assert(!(u32TprThreshold & ~VMX_TPR_THRESHOLD_MASK));         /* Bits 31:4 MBZ. */
    3380     Assert(pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW); RT_NOREF_PV(pVCpu);
     4366    Assert(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
     4367    RT_NOREF2(pVCpu, pVmcsInfo);
    33814368    return VMXWriteVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
    33824369}
     
    33874374 *
    33884375 * @returns VBox status code.
    3389  * @param   pVCpu       The cross context virtual CPU structure.
     4376 * @param   pVCpu           The cross context virtual CPU structure.
     4377 * @param   pVmxTransient   The VMX-transient structure.
    33904378 *
    33914379 * @remarks No-long-jump zone!!!
    33924380 */
    3393 static int hmR0VmxExportGuestApicTpr(PVMCPU pVCpu)
     4381static int hmR0VmxExportGuestApicTpr(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    33944382{
    33954383    if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_APIC_TPR)
     
    33974385        HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
    33984386
    3399         if (   PDMHasApic(pVCpu->CTX_SUFF(pVM))
    3400             && APICIsEnabled(pVCpu))
    3401         {
    3402             /*
    3403              * Setup TPR shadowing.
    3404              */
    3405             if (pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
     4387        PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
     4388        if (!pVmxTransient->fIsNestedGuest)
     4389        {
     4390            if (   PDMHasApic(pVCpu->CTX_SUFF(pVM))
     4391                && APICIsEnabled(pVCpu))
    34064392            {
    3407                 Assert(pVCpu->hm.s.vmx.HCPhysVirtApic);
    3408 
    3409                 bool    fPendingIntr  = false;
    3410                 uint8_t u8Tpr         = 0;
    3411                 uint8_t u8PendingIntr = 0;
    3412                 int rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);
    3413                 AssertRCReturn(rc, rc);
    3414 
    34154393                /*
    3416                  * If there are interrupts pending but masked by the TPR, instruct VT-x to
    3417                  * cause a TPR-below-threshold VM-exit when the guest lowers its TPR below the
    3418                  * priority of the pending interrupt so we can deliver the interrupt. If there
    3419                  * are no interrupts pending, set threshold to 0 to not cause any
    3420                  * TPR-below-threshold VM-exits.
     4394                 * Setup TPR shadowing.
    34214395                 */
    3422                 pVCpu->hm.s.vmx.pbVirtApic[XAPIC_OFF_TPR] = u8Tpr;
    3423                 uint32_t u32TprThreshold = 0;
    3424                 if (fPendingIntr)
     4396                if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
    34254397                {
    3426                     /* Bits 3:0 of the TPR threshold field correspond to bits 7:4 of the TPR (which is the Task-Priority Class). */
    3427                     const uint8_t u8PendingPriority = u8PendingIntr >> 4;
    3428                     const uint8_t u8TprPriority     = u8Tpr >> 4;
    3429                     if (u8PendingPriority <= u8TprPriority)
    3430                         u32TprThreshold = u8PendingPriority;
     4398                    bool    fPendingIntr  = false;
     4399                    uint8_t u8Tpr         = 0;
     4400                    uint8_t u8PendingIntr = 0;
     4401                    int rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);
     4402                    AssertRCReturn(rc, rc);
     4403
     4404                    /*
     4405                     * If there are interrupts pending but masked by the TPR, instruct VT-x to
     4406                     * cause a TPR-below-threshold VM-exit when the guest lowers its TPR below the
     4407                     * priority of the pending interrupt so we can deliver the interrupt. If there
     4408                     * are no interrupts pending, set threshold to 0 to not cause any
     4409                     * TPR-below-threshold VM-exits.
     4410                     */
     4411                    Assert(pVmcsInfo->pbVirtApic);
     4412                    pVmcsInfo->pbVirtApic[XAPIC_OFF_TPR] = u8Tpr;
     4413                    uint32_t u32TprThreshold = 0;
     4414                    if (fPendingIntr)
     4415                    {
     4416                        /* Bits 3:0 of the TPR threshold field correspond to bits 7:4 of the TPR
     4417                           (which is the Task-Priority Class). */
     4418                        const uint8_t u8PendingPriority = u8PendingIntr >> 4;
     4419                        const uint8_t u8TprPriority     = u8Tpr >> 4;
     4420                        if (u8PendingPriority <= u8TprPriority)
     4421                            u32TprThreshold = u8PendingPriority;
     4422                    }
     4423
     4424                    rc = hmR0VmxApicSetTprThreshold(pVCpu, pVmcsInfo, u32TprThreshold);
     4425                    AssertRCReturn(rc, rc);
    34314426                }
    3432 
    3433                 rc = hmR0VmxApicSetTprThreshold(pVCpu, u32TprThreshold);
    3434                 AssertRCReturn(rc, rc);
    34354427            }
    34364428        }
     4429        /* else: the TPR threshold has already been updated while merging the nested-guest VMCS. */
    34374430        ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_APIC_TPR);
    34384431    }
     
    34424435
    34434436/**
    3444  * Gets the guest's interruptibility-state ("interrupt shadow" as AMD calls it).
     4437 * Gets the guest interruptibility-state.
    34454438 *
    34464439 * @returns Guest's interruptibility-state.
    34474440 * @param   pVCpu       The cross context virtual CPU structure.
     4441 * @param   pVmcsInfo   The VMCS info. object.
    34484442 *
    34494443 * @remarks No-long-jump zone!!!
    34504444 */
    3451 static uint32_t hmR0VmxGetGuestIntrState(PVMCPU pVCpu)
     4445static uint32_t hmR0VmxGetGuestIntrState(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo)
    34524446{
    34534447    /*
     
    34574451    if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
    34584452    {
    3459         /* If inhibition is active, RIP & RFLAGS should've been accessed
     4453        /* If inhibition is active, RIP and RFLAGS should've been updated
    34604454           (i.e. read previously from the VMCS or from ring-3). */
    34614455        PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    34624456#ifdef VBOX_STRICT
    34634457        uint64_t const fExtrn = ASMAtomicUoReadU64(&pCtx->fExtrn);
     4458        RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
    34644459        AssertMsg(!(fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)), ("%#x\n", fExtrn));
    34654460#endif
     
    34874482     * setting this would block host-NMIs and IRET will not clear the blocking.
    34884483     *
     4484     * We always set NMI-exiting so when the host receives an NMI we get a VM-exit.
     4485     *
    34894486     * See Intel spec. 26.6.1 "Interruptibility state". See @bugref{7445}.
    34904487     */
    3491     if (   VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS)
    3492         && (pVCpu->hm.s.vmx.Ctls.u32PinCtls & VMX_PIN_CTLS_VIRT_NMI))
    3493     {
     4488    if (   (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
     4489        && CPUMIsGuestNmiBlocking(pVCpu))
    34944490        fIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI;
    3495     }
    34964491
    34974492    return fIntrState;
     
    35034498 *
    35044499 * @returns VBox status code.
    3505  * @param   pVCpu       The cross context virtual CPU structure.
     4500 * @param   pVCpu           The cross context virtual CPU structure.
     4501 * @param   pVmxTransient   The VMX-transient structure.
    35064502 *
    35074503 * @remarks No-long-jump zone!!!
    35084504 */
    3509 static int hmR0VmxExportGuestXcptIntercepts(PVMCPU pVCpu)
     4505static int hmR0VmxExportGuestXcptIntercepts(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    35104506{
    35114507    if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_GUEST_XCPT_INTERCEPTS)
    35124508    {
    3513         uint32_t uXcptBitmap = pVCpu->hm.s.vmx.Ctls.u32XcptBitmap;
    3514 
    3515         /* The remaining exception intercepts are handled elsewhere, e.g. in hmR0VmxExportGuestCR0(). */
    3516         if (pVCpu->hm.s.fGIMTrapXcptUD)
    3517             uXcptBitmap |= RT_BIT(X86_XCPT_UD);
    3518 #ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
     4509        /* When executing a nested-guest, we do not need to trap GIM hypercalls by intercepting #UD. */
     4510        if (   !pVmxTransient->fIsNestedGuest
     4511            &&  pVCpu->hm.s.fGIMTrapXcptUD)
     4512            hmR0VmxAddXcptIntercept(pVmxTransient, X86_XCPT_UD);
    35194513        else
    3520             uXcptBitmap &= ~RT_BIT(X86_XCPT_UD);
    3521 #endif
    3522 
    3523         Assert(uXcptBitmap & RT_BIT_32(X86_XCPT_AC));
    3524         Assert(uXcptBitmap & RT_BIT_32(X86_XCPT_DB));
    3525 
    3526         if (uXcptBitmap != pVCpu->hm.s.vmx.Ctls.u32XcptBitmap)
    3527         {
    3528             int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
    3529             AssertRCReturn(rc, rc);
    3530             pVCpu->hm.s.vmx.Ctls.u32XcptBitmap = uXcptBitmap;
    3531         }
    3532 
     4514            hmR0VmxRemoveXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
     4515
     4516        /* Other exception intercepts are handled elsewhere, e.g. while exporting guest CR0. */
    35334517        ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_VMX_GUEST_XCPT_INTERCEPTS);
    3534         Log4Func(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP=%#RX64\n", uXcptBitmap));
    35354518    }
    35364519    return VINF_SUCCESS;
     
    35424525 *
    35434526 * @returns VBox status code.
    3544  * @param   pVCpu       The cross context virtual CPU structure.
     4527 * @param   pVCpu           The cross context virtual CPU structure.
    35454528 *
    35464529 * @remarks No-long-jump zone!!!
     
    35484531static int hmR0VmxExportGuestRip(PVMCPU pVCpu)
    35494532{
    3550     int rc = VINF_SUCCESS;
    35514533    if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_RIP)
    35524534    {
    35534535        HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP);
    35544536
    3555         rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RIP, pVCpu->cpum.GstCtx.rip);
     4537        int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RIP, pVCpu->cpum.GstCtx.rip);
    35564538        AssertRCReturn(rc, rc);
    35574539
    35584540        ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_RIP);
    3559         Log4Func(("RIP=%#RX64\n", pVCpu->cpum.GstCtx.rip));
    3560     }
    3561     return rc;
     4541        Log4Func(("rip=%#RX64\n", pVCpu->cpum.GstCtx.rip));
     4542    }
     4543    return VINF_SUCCESS;
    35624544}
    35634545
     
    35674549 *
    35684550 * @returns VBox status code.
    3569  * @param   pVCpu       The cross context virtual CPU structure.
     4551 * @param   pVCpu           The cross context virtual CPU structure.
    35704552 *
    35714553 * @remarks No-long-jump zone!!!
     
    35904572 *
    35914573 * @returns VBox status code.
    3592  * @param   pVCpu       The cross context virtual CPU structure.
     4574 * @param   pVCpu           The cross context virtual CPU structure.
     4575 * @param   pVmxTransient   The VMX-transient structure.
    35934576 *
    35944577 * @remarks No-long-jump zone!!!
    35954578 */
    3596 static int hmR0VmxExportGuestRflags(PVMCPU pVCpu)
     4579static int hmR0VmxExportGuestRflags(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    35974580{
    35984581    if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_RFLAGS)
     
    36124595         * can run the real-mode guest code under Virtual 8086 mode.
    36134596         */
    3614         if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
     4597        PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
     4598        if (pVmcsInfo->RealMode.fRealOnV86Active)
    36154599        {
    36164600            Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
    36174601            Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
    3618             pVCpu->hm.s.vmx.RealMode.Eflags.u32 = fEFlags.u32;  /* Save the original eflags of the real-mode guest. */
    3619             fEFlags.Bits.u1VM   = 1;                            /* Set the Virtual 8086 mode bit. */
    3620             fEFlags.Bits.u2IOPL = 0;                            /* Change IOPL to 0, otherwise certain instructions won't fault. */
     4602            Assert(!pVmxTransient->fIsNestedGuest);
     4603            pVmcsInfo->RealMode.Eflags.u32 = fEFlags.u32;    /* Save the original eflags of the real-mode guest. */
     4604            fEFlags.Bits.u1VM   = 1;                         /* Set the Virtual 8086 mode bit. */
     4605            fEFlags.Bits.u2IOPL = 0;                         /* Change IOPL to 0, otherwise certain instructions won't fault. */
    36214606        }
    36224607
     
    36304615         * through the hypervisor debugger using EFLAGS.TF.
    36314616         */
    3632         if (   !pVCpu->hm.s.fSingleInstruction
     4617        if (   !pVmxTransient->fIsNestedGuest
     4618            && !pVCpu->hm.s.fSingleInstruction
    36334619            &&  fEFlags.Bits.u1TF)
    36344620        {
    3635             /** @todo r=ramshankar: Warning! We ASSUME EFLAGS.TF will not cleared on
     4621            /** @todo r=ramshankar: Warning!! We ASSUME EFLAGS.TF will not cleared on
    36364622             *        premature trips to ring-3 esp since IEM does not yet handle it. */
    36374623            rc = VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, VMX_VMCS_GUEST_PENDING_DEBUG_XCPT_BS);
    36384624            AssertRCReturn(rc, rc);
    36394625        }
     4626        /** @todo NSTVMX: Handling copying of VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS from
     4627         *        nested-guest VMCS. */
    36404628
    36414629        ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_RFLAGS);
     
    36534641 *
    36544642 * @returns VBox status code.
    3655  * @param   pVCpu       The cross context virtual CPU structure.
     4643 * @param   pVCpu           The cross context virtual CPU structure.
     4644 * @param   pVmxTransient   The VMX-transient structure.
    36564645 *
    36574646 * @remarks No-long-jump zone!!!
    36584647 */
    3659 static int hmR0VmxExportGuestCR0(PVMCPU pVCpu)
     4648static int hmR0VmxExportGuestCR0(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    36604649{
    36614650    if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_CR0)
    36624651    {
    3663         PVM pVM = pVCpu->CTX_SUFF(pVM);
    3664         HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
    3665         Assert(!RT_HI_U32(pVCpu->cpum.GstCtx.cr0));
    3666 
    3667         uint32_t const u32ShadowCr0 = pVCpu->cpum.GstCtx.cr0;
    3668         uint32_t       u32GuestCr0  = pVCpu->cpum.GstCtx.cr0;
     4652        PVM          pVM       = pVCpu->CTX_SUFF(pVM);
     4653        PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    36694654
    36704655        /*
    3671          * Setup VT-x's view of the guest CR0.
    3672          * Minimize VM-exits due to CR3 changes when we have NestedPaging.
     4656         * Figure out fixed CR0 bits in VMX operation.
    36734657         */
    3674         uint32_t uProcCtls = pVCpu->hm.s.vmx.Ctls.u32ProcCtls;
    3675         if (pVM->hm.s.fNestedPaging)
    3676         {
    3677             if (CPUMIsGuestPagingEnabled(pVCpu))
     4658        uint64_t       fSetCr0 = pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr0Fixed1;
     4659        uint64_t const fZapCr0 = pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr0Fixed1;
     4660        if (pVM->hm.s.vmx.fUnrestrictedGuest)
     4661            fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
     4662        else
     4663            Assert((fSetCr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG));
     4664
     4665        if (!pVmxTransient->fIsNestedGuest)
     4666        {
     4667            HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
     4668            uint64_t       u64GuestCr0  = pVCpu->cpum.GstCtx.cr0;
     4669            uint64_t const u64ShadowCr0 = u64GuestCr0;
     4670            Assert(!RT_HI_U32(u64GuestCr0));
     4671
     4672            /*
     4673             * Setup VT-x's view of the guest CR0.
     4674             */
     4675            uint32_t uProcCtls = pVmcsInfo->u32ProcCtls;
     4676            if (pVM->hm.s.fNestedPaging)
    36784677            {
    3679                 /* The guest has paging enabled, let it access CR3 without causing a VM-exit if supported. */
    3680                 uProcCtls &= ~(  VMX_PROC_CTLS_CR3_LOAD_EXIT
    3681                                | VMX_PROC_CTLS_CR3_STORE_EXIT);
     4678                if (CPUMIsGuestPagingEnabled(pVCpu))
     4679                {
     4680                    /* The guest has paging enabled, let it access CR3 without causing a VM-exit if supported. */
     4681                    uProcCtls &= ~(  VMX_PROC_CTLS_CR3_LOAD_EXIT
     4682                                   | VMX_PROC_CTLS_CR3_STORE_EXIT);
     4683                }
     4684                else
     4685                {
     4686                    /* The guest doesn't have paging enabled, make CR3 access cause a VM-exit to update our shadow. */
     4687                    uProcCtls |= VMX_PROC_CTLS_CR3_LOAD_EXIT
     4688                              |  VMX_PROC_CTLS_CR3_STORE_EXIT;
     4689                }
     4690
     4691                /* If we have unrestricted guest execution, we never have to intercept CR3 reads. */
     4692                if (pVM->hm.s.vmx.fUnrestrictedGuest)
     4693                    uProcCtls &= ~VMX_PROC_CTLS_CR3_STORE_EXIT;
    36824694            }
    36834695            else
    36844696            {
    3685                 /* The guest doesn't have paging enabled, make CR3 access cause a VM-exit to update our shadow. */
    3686                 uProcCtls |= VMX_PROC_CTLS_CR3_LOAD_EXIT
    3687                           |  VMX_PROC_CTLS_CR3_STORE_EXIT;
     4697                /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */
     4698                u64GuestCr0 |= X86_CR0_WP;
    36884699            }
    36894700
    3690             /* If we have unrestricted guest execution, we never have to intercept CR3 reads. */
    3691             if (pVM->hm.s.vmx.fUnrestrictedGuest)
    3692                 uProcCtls &= ~VMX_PROC_CTLS_CR3_STORE_EXIT;
     4701            /*
     4702             * Guest FPU bits.
     4703             *
     4704             * Since we pre-load the guest FPU always before VM-entry there is no need to track lazy state
     4705             * using CR0.TS.
     4706             *
     4707             * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be
     4708             * set on the first CPUs to support VT-x and no mention of with regards to UX in VM-entry checks.
     4709             */
     4710            u64GuestCr0 |= X86_CR0_NE;
     4711
     4712            /* If CR0.NE isn't set, we need to intercept #MF exceptions and report them to the guest differently. */
     4713            bool const fInterceptMF = !(u64ShadowCr0 & X86_CR0_NE);
     4714
     4715            /*
     4716             * Update exception intercepts.
     4717             */
     4718            uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
     4719            if (pVmcsInfo->RealMode.fRealOnV86Active)
     4720            {
     4721                Assert(PDMVmmDevHeapIsEnabled(pVM));
     4722                Assert(pVM->hm.s.vmx.pRealModeTSS);
     4723                uXcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;
     4724            }
     4725            else
     4726            {
     4727                /* For now, cleared here as mode-switches can happen outside HM/VT-x. See @bugref{7626#c11}. */
     4728                uXcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
     4729                if (fInterceptMF)
     4730                    uXcptBitmap |= RT_BIT(X86_XCPT_MF);
     4731            }
     4732
     4733            /* Additional intercepts for debugging, define these yourself explicitly. */
     4734#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
     4735            uXcptBitmap |= 0
     4736                        |  RT_BIT(X86_XCPT_BP)
     4737                        |  RT_BIT(X86_XCPT_DE)
     4738                        |  RT_BIT(X86_XCPT_NM)
     4739                        |  RT_BIT(X86_XCPT_TS)
     4740                        |  RT_BIT(X86_XCPT_UD)
     4741                        |  RT_BIT(X86_XCPT_NP)
     4742                        |  RT_BIT(X86_XCPT_SS)
     4743                        |  RT_BIT(X86_XCPT_GP)
     4744                        |  RT_BIT(X86_XCPT_PF)
     4745                        |  RT_BIT(X86_XCPT_MF)
     4746                        ;
     4747#elif defined(HMVMX_ALWAYS_TRAP_PF)
     4748            uXcptBitmap |= RT_BIT(X86_XCPT_PF);
     4749#endif
     4750            if (pVCpu->hm.s.fTrapXcptGpForLovelyMesaDrv)
     4751                uXcptBitmap |= RT_BIT(X86_XCPT_GP);
     4752            Assert(pVM->hm.s.fNestedPaging || (uXcptBitmap & RT_BIT(X86_XCPT_PF)));
     4753
     4754            /* Apply the fixed CR0 bits and enable caching. */
     4755            u64GuestCr0 |= fSetCr0;
     4756            u64GuestCr0 &= fZapCr0;
     4757            u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
     4758
     4759            /* Commit the CR0 and related fields to the guest VMCS. */
     4760            int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_CR0, u64GuestCr0);
     4761            rc    |= VMXWriteVmcsHstN(VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0);
     4762            if (uProcCtls != pVmcsInfo->u32ProcCtls)
     4763                rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
     4764            if (uXcptBitmap != pVmcsInfo->u32XcptBitmap)
     4765                rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
     4766            AssertRCReturn(rc, rc);
     4767
     4768            /* Update our caches. */
     4769            pVmcsInfo->u32ProcCtls   = uProcCtls;
     4770            pVmcsInfo->u32XcptBitmap = uXcptBitmap;
     4771
     4772            Log4Func(("cr0=%#RX64 shadow=%#RX64 set=%#RX64 zap=%#RX64\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
    36934773        }
    36944774        else
    36954775        {
    3696             /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */
    3697             u32GuestCr0 |= X86_CR0_WP;
    3698         }
    3699 
    3700         /*
    3701          * Guest FPU bits.
    3702          *
    3703          * Since we pre-load the guest FPU always before VM-entry there is no need to track lazy state
    3704          * using CR0.TS.
    3705          *
    3706          * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be
    3707          * set on the first CPUs to support VT-x and no mention of with regards to UX in VM-entry checks.
    3708          */
    3709         u32GuestCr0 |= X86_CR0_NE;
    3710 
    3711         /* If CR0.NE isn't set, we need to intercept #MF exceptions and report them to the guest differently. */
    3712         bool const fInterceptMF = !(u32ShadowCr0 & X86_CR0_NE);
    3713 
    3714         /*
    3715          * Update exception intercepts.
    3716          */
    3717         uint32_t uXcptBitmap = pVCpu->hm.s.vmx.Ctls.u32XcptBitmap;
    3718         if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
    3719         {
    3720             Assert(PDMVmmDevHeapIsEnabled(pVM));
    3721             Assert(pVM->hm.s.vmx.pRealModeTSS);
    3722             uXcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;
    3723         }
    3724         else
    3725         {
    3726             /* For now, cleared here as mode-switches can happen outside HM/VT-x. See @bugref{7626#c11}. */
    3727             uXcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
    3728             if (fInterceptMF)
    3729                 uXcptBitmap |= RT_BIT(X86_XCPT_MF);
    3730         }
    3731 
    3732         /* Additional intercepts for debugging, define these yourself explicitly. */
    3733 #ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
    3734         uXcptBitmap |= 0
    3735                     |  RT_BIT(X86_XCPT_BP)
    3736                     |  RT_BIT(X86_XCPT_DE)
    3737                     |  RT_BIT(X86_XCPT_NM)
    3738                     |  RT_BIT(X86_XCPT_TS)
    3739                     |  RT_BIT(X86_XCPT_UD)
    3740                     |  RT_BIT(X86_XCPT_NP)
    3741                     |  RT_BIT(X86_XCPT_SS)
    3742                     |  RT_BIT(X86_XCPT_GP)
    3743                     |  RT_BIT(X86_XCPT_PF)
    3744                     |  RT_BIT(X86_XCPT_MF)
    3745                     ;
    3746 #elif defined(HMVMX_ALWAYS_TRAP_PF)
    3747         uXcptBitmap |= RT_BIT(X86_XCPT_PF);
    3748 #endif
    3749         if (pVCpu->hm.s.fTrapXcptGpForLovelyMesaDrv)
    3750             uXcptBitmap |= RT_BIT(X86_XCPT_GP);
    3751         Assert(pVM->hm.s.fNestedPaging || (uXcptBitmap & RT_BIT(X86_XCPT_PF)));
    3752 
    3753         /*
    3754          * Set/clear the CR0 specific bits along with their exceptions (PE, PG, CD, NW).
    3755          */
    3756         uint32_t fSetCr0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);
    3757         uint32_t fZapCr0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);
    3758         if (pVM->hm.s.vmx.fUnrestrictedGuest)             /* Exceptions for unrestricted-guests for fixed CR0 bits (PE, PG). */
    3759             fSetCr0 &= ~(X86_CR0_PE | X86_CR0_PG);
    3760         else
    3761             Assert((fSetCr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG));
    3762 
    3763         u32GuestCr0 |= fSetCr0;
    3764         u32GuestCr0 &= fZapCr0;
    3765         u32GuestCr0 &= ~(X86_CR0_CD | X86_CR0_NW);        /* Always enable caching. */
    3766 
    3767         /*
    3768          * CR0 is shared between host and guest along with a CR0 read shadow. Therefore, certain bits must not be changed
    3769          * by the guest because VT-x ignores saving/restoring them (namely CD, ET, NW) and for certain other bits
    3770          * we want to be notified immediately of guest CR0 changes (e.g. PG to update our shadow page tables).
    3771          */
    3772         uint32_t u32Cr0Mask = X86_CR0_PE
    3773                             | X86_CR0_NE
    3774                             | (pVM->hm.s.fNestedPaging ? 0 : X86_CR0_WP)
    3775                             | X86_CR0_PG
    3776                             | X86_CR0_ET   /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.ET */
    3777                             | X86_CR0_CD   /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.CD */
    3778                             | X86_CR0_NW;  /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.NW */
    3779 
    3780         /** @todo Avoid intercepting CR0.PE with unrestricted guests. Fix PGM
    3781          *        enmGuestMode to be in-sync with the current mode. See @bugref{6398}
    3782          *        and @bugref{6944}. */
    3783 #if 0
    3784         if (pVM->hm.s.vmx.fUnrestrictedGuest)
    3785             u32Cr0Mask &= ~X86_CR0_PE;
    3786 #endif
    3787         /*
    3788          * Finally, update VMCS fields with the CR0 values and the exception bitmap.
    3789          */
    3790         int rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR0, u32GuestCr0);
    3791         rc    |= VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, u32ShadowCr0);
    3792         if (u32Cr0Mask != pVCpu->hm.s.vmx.Ctls.u32Cr0Mask)
    3793             rc |= VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_MASK, u32Cr0Mask);
    3794         if (uProcCtls != pVCpu->hm.s.vmx.Ctls.u32ProcCtls)
    3795             rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
    3796         if (uXcptBitmap != pVCpu->hm.s.vmx.Ctls.u32XcptBitmap)
    3797             rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
    3798         AssertRCReturn(rc, rc);
    3799 
    3800         /* Update our caches. */
    3801         pVCpu->hm.s.vmx.Ctls.u32Cr0Mask    = u32Cr0Mask;
    3802         pVCpu->hm.s.vmx.Ctls.u32ProcCtls   = uProcCtls;
    3803         pVCpu->hm.s.vmx.Ctls.u32XcptBitmap = uXcptBitmap;
     4776            PCVMXVVMCS pVmcsNstGst = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     4777            HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
     4778            uint64_t       u64GuestCr0  = pVCpu->cpum.GstCtx.cr0;
     4779            uint64_t const u64ShadowCr0 = pVmcsNstGst->u64Cr0ReadShadow.u;
     4780            Assert(!RT_HI_U32(u64GuestCr0));
     4781            Assert(u64GuestCr0 & X86_CR0_NE);
     4782
     4783            /* Apply the fixed CR0 bits and enable caching. */
     4784            u64GuestCr0 |= fSetCr0;
     4785            u64GuestCr0 &= fZapCr0;
     4786            u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
     4787
     4788            /* Commit the CR0 and CR0 read shadow to the nested-guest VMCS. */
     4789            int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_CR0,            u64GuestCr0);
     4790            rc    |= VMXWriteVmcsHstN(VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0);
     4791            AssertRCReturn(rc, rc);
     4792
     4793            Log4Func(("cr0=%#RX64 shadow=%#RX64 set=%#RX64 zap=%#RX64\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
     4794        }
    38044795
    38054796        ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_CR0);
    3806 
    3807         Log4Func(("u32Cr0Mask=%#RX32 u32ShadowCr0=%#RX32 u32GuestCr0=%#RX32 (fSetCr0=%#RX32 fZapCr0=%#RX32\n", u32Cr0Mask,
    3808                   u32ShadowCr0, u32GuestCr0, fSetCr0, fZapCr0));
    38094797    }
    38104798
     
    38224810 *          mapped (e.g. EFI32).
    38234811 *
    3824  * @param   pVCpu       The cross context virtual CPU structure.
     4812 * @param   pVCpu           The cross context virtual CPU structure.
     4813 * @param   pVmxTransient   The VMX-transient structure.
    38254814 *
    38264815 * @remarks No-long-jump zone!!!
    38274816 */
    3828 static VBOXSTRICTRC hmR0VmxExportGuestCR3AndCR4(PVMCPU pVCpu)
     4817static VBOXSTRICTRC hmR0VmxExportGuestCR3AndCR4(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    38294818{
    38304819    int rc  = VINF_SUCCESS;
     
    38464835        if (pVM->hm.s.fNestedPaging)
    38474836        {
    3848             pVCpu->hm.s.vmx.HCPhysEPTP = PGMGetHyperCR3(pVCpu);
     4837            PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
     4838            pVmcsInfo->HCPhysEPTP = PGMGetHyperCR3(pVCpu);
    38494839
    38504840            /* Validate. See Intel spec. 28.2.2 "EPT Translation Mechanism" and 24.6.11 "Extended-Page-Table Pointer (EPTP)" */
    3851             Assert(pVCpu->hm.s.vmx.HCPhysEPTP);
    3852             Assert(!(pVCpu->hm.s.vmx.HCPhysEPTP & UINT64_C(0xfff0000000000000)));
    3853             Assert(!(pVCpu->hm.s.vmx.HCPhysEPTP & 0xfff));
     4841            Assert(pVmcsInfo->HCPhysEPTP != NIL_RTHCPHYS);
     4842            Assert(!(pVmcsInfo->HCPhysEPTP & UINT64_C(0xfff0000000000000)));
     4843            Assert(!(pVmcsInfo->HCPhysEPTP & 0xfff));
    38544844
    38554845            /* VMX_EPT_MEMTYPE_WB support is already checked in hmR0VmxSetupTaggedTlb(). */
    3856             pVCpu->hm.s.vmx.HCPhysEPTP |= VMX_EPT_MEMTYPE_WB
    3857                                        |  (VMX_EPT_PAGE_WALK_LENGTH_DEFAULT << VMX_EPT_PAGE_WALK_LENGTH_SHIFT);
     4846            pVmcsInfo->HCPhysEPTP |= VMX_EPT_MEMTYPE_WB
     4847                                  |  (VMX_EPT_PAGE_WALK_LENGTH_DEFAULT << VMX_EPT_PAGE_WALK_LENGTH_SHIFT);
    38584848
    38594849            /* Validate. See Intel spec. 26.2.1 "Checks on VMX Controls" */
    3860             AssertMsg(   ((pVCpu->hm.s.vmx.HCPhysEPTP >> 3) & 0x07) == 3      /* Bits 3:5 (EPT page walk length - 1) must be 3. */
    3861                       && ((pVCpu->hm.s.vmx.HCPhysEPTP >> 7) & 0x1f) == 0,     /* Bits 7:11 MBZ. */
    3862                          ("EPTP %#RX64\n", pVCpu->hm.s.vmx.HCPhysEPTP));
    3863             AssertMsg(  !((pVCpu->hm.s.vmx.HCPhysEPTP >> 6) & 0x01)           /* Bit 6 (EPT accessed & dirty bit). */
     4850            AssertMsg(   ((pVmcsInfo->HCPhysEPTP >> 3) & 0x07) == 3      /* Bits 3:5 (EPT page walk length - 1) must be 3. */
     4851                      && ((pVmcsInfo->HCPhysEPTP >> 7) & 0x1f) == 0,     /* Bits 7:11 MBZ. */
     4852                         ("EPTP %#RX64\n", pVmcsInfo->HCPhysEPTP));
     4853            AssertMsg(  !((pVmcsInfo->HCPhysEPTP >> 6) & 0x01)           /* Bit 6 (EPT accessed & dirty bit). */
    38644854                      || (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_EPT_ACCESS_DIRTY),
    3865                          ("EPTP accessed/dirty bit not supported by CPU but set %#RX64\n", pVCpu->hm.s.vmx.HCPhysEPTP));
    3866 
    3867             rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, pVCpu->hm.s.vmx.HCPhysEPTP);
     4855                         ("EPTP accessed/dirty bit not supported by CPU but set %#RX64\n", pVmcsInfo->HCPhysEPTP));
     4856
     4857            rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, pVmcsInfo->HCPhysEPTP);
    38684858            AssertRCReturn(rc, rc);
    38694859
     
    38854875
    38864876                /*
    3887                  * The guest's view of its CR3 is unblemished with Nested Paging when the
     4877                 * The guest's view of its CR3 is unblemished with nested paging when the
    38884878                 * guest is using paging or we have unrestricted guest execution to handle
    38894879                 * the guest when it's not using paging.
    38904880                 */
     4881                HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
    38914882                GCPhysGuestCR3 = pCtx->cr3;
    38924883            }
     
    39244915        {
    39254916            /* Non-nested paging case, just use the hypervisor's CR3. */
    3926             RTHCPHYS HCPhysGuestCR3 = PGMGetHyperCR3(pVCpu);
     4917            RTHCPHYS const HCPhysGuestCR3 = PGMGetHyperCR3(pVCpu);
    39274918
    39284919            Log4Func(("u32GuestCr3=%#RHv (HstN)\n", HCPhysGuestCR3));
     
    39404931    if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_CR4)
    39414932    {
    3942         PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
     4933        PCPUMCTX     pCtx        = &pVCpu->cpum.GstCtx;
     4934        PVMXVMCSINFO pVmcsInfo   = pVmxTransient->pVmcsInfo;
     4935        PCVMXVVMCS   pVmcsNstGst = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     4936
     4937        /*
     4938         * Figure out fixed CR4 bits in VMX operation.
     4939         */
     4940        uint64_t const fSetCr4 = pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr4Fixed1;
     4941        uint64_t const fZapCr4 = pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr4Fixed1;
     4942
    39434943        HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
    3944         Assert(!RT_HI_U32(pCtx->cr4));
    3945 
    3946         uint32_t       u32GuestCr4  = pCtx->cr4;
    3947         uint32_t const u32ShadowCr4 = pCtx->cr4;
     4944        uint64_t       u64GuestCr4  = pCtx->cr4;
     4945        uint64_t const u64ShadowCr4 = !pVmxTransient->fIsNestedGuest ? pCtx->cr4 : pVmcsNstGst->u64Cr4ReadShadow.u;
     4946        Assert(!RT_HI_U32(u64GuestCr4));
    39484947
    39494948        /*
     
    39564955         * See Intel spec. 20.2 "Software Interrupt Handling Methods While in Virtual-8086 Mode".
    39574956         */
    3958         if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
     4957        if (pVmcsInfo->RealMode.fRealOnV86Active)
    39594958        {
    39604959            Assert(pVM->hm.s.vmx.pRealModeTSS);
    39614960            Assert(PDMVmmDevHeapIsEnabled(pVM));
    3962             u32GuestCr4 &= ~X86_CR4_VME;
     4961            u64GuestCr4 &= ~(uint64_t)X86_CR4_VME;
    39634962        }
    39644963
     
    39694968            {
    39704969                /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */
    3971                 u32GuestCr4 |= X86_CR4_PSE;
     4970                u64GuestCr4 |= X86_CR4_PSE;
    39724971                /* Our identity mapping is a 32-bit page directory. */
    3973                 u32GuestCr4 &= ~X86_CR4_PAE;
     4972                u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
    39744973            }
    39754974            /* else use guest CR4.*/
     
    39774976        else
    39784977        {
     4978            Assert(!pVmxTransient->fIsNestedGuest);
     4979
    39794980            /*
    39804981             * The shadow paging modes and guest paging modes are different, the shadow is in accordance with the host
     
    39874988                case PGMMODE_32_BIT:            /* 32-bit paging. */
    39884989                {
    3989                     u32GuestCr4 &= ~X86_CR4_PAE;
     4990                    u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
    39904991                    break;
    39914992                }
     
    39944995                case PGMMODE_PAE_NX:            /* PAE paging with NX. */
    39954996                {
    3996                     u32GuestCr4 |= X86_CR4_PAE;
     4997                    u64GuestCr4 |= X86_CR4_PAE;
    39974998                    break;
    39984999                }
     
    40095010        }
    40105011
    4011         /* We need to set and clear the CR4 specific bits here (mainly the X86_CR4_VMXE bit). */
    4012         uint64_t const fSetCr4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);
    4013         uint64_t const fZapCr4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);
    4014         u32GuestCr4 |= fSetCr4;
    4015         u32GuestCr4 &= fZapCr4;
    4016 
    4017         /* Setup CR4 mask. CR4 flags owned by the host, if the guest attempts to change them,
    4018            that would cause a VM-exit. */
    4019         uint32_t u32Cr4Mask = X86_CR4_VME
    4020                             | X86_CR4_PAE
    4021                             | X86_CR4_PGE
    4022                             | X86_CR4_PSE
    4023                             | X86_CR4_VMXE;
    4024         if (pVM->cpum.ro.HostFeatures.fXSaveRstor)
    4025             u32Cr4Mask |= X86_CR4_OSXSAVE;
    4026         if (pVM->cpum.ro.GuestFeatures.fPcid)
    4027             u32Cr4Mask |= X86_CR4_PCIDE;
    4028 
    4029         /* Write VT-x's view of the guest CR4, the CR4 modify mask and the read-only CR4 shadow
    4030            into the VMCS and update our cache. */
    4031         rc  = VMXWriteVmcs32(VMX_VMCS_GUEST_CR4, u32GuestCr4);
    4032         rc |= VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, u32ShadowCr4);
    4033         if (pVCpu->hm.s.vmx.Ctls.u32Cr4Mask != u32Cr4Mask)
    4034             rc |= VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_MASK, u32Cr4Mask);
     5012        /* Apply the fixed CR4 bits (mainly CR4.VMXE). */
     5013        u64GuestCr4 |= fSetCr4;
     5014        u64GuestCr4 &= fZapCr4;
     5015
     5016        /* Commit the CR4 and CR4 read shadow to the guest VMCS. */
     5017        rc  = VMXWriteVmcsGstN(VMX_VMCS_GUEST_CR4, u64GuestCr4);
     5018        rc |= VMXWriteVmcsHstN(VMX_VMCS_CTRL_CR4_READ_SHADOW, u64ShadowCr4);
    40355019        AssertRCReturn(rc, rc);
    4036         pVCpu->hm.s.vmx.Ctls.u32Cr4Mask = u32Cr4Mask;
    40375020
    40385021        /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */
     
    40415024        ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_CR4);
    40425025
    4043         Log4Func(("u32GuestCr4=%#RX32 u32ShadowCr4=%#RX32 (fSetCr4=%#RX32 fZapCr4=%#RX32)\n", u32GuestCr4, u32ShadowCr4, fSetCr4,
    4044                   fZapCr4));
     5026        Log4Func(("cr4=%#RX64 shadow=%#RX64 set=%#RX64 zap=%#RX64)\n", u64GuestCr4, u64ShadowCr4, fSetCr4, fZapCr4));
    40455027    }
    40465028    return rc;
     
    40555037 *
    40565038 * @returns VBox status code.
    4057  * @param   pVCpu       The cross context virtual CPU structure.
     5039 * @param   pVCpu           The cross context virtual CPU structure.
     5040 * @param   pVmxTransient   The VMX-transient structure.
    40585041 *
    40595042 * @remarks No-long-jump zone!!!
    40605043 */
    4061 static int hmR0VmxExportSharedDebugState(PVMCPU pVCpu)
     5044static int hmR0VmxExportSharedDebugState(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    40625045{
    40635046    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     5047
     5048    /** @todo NSTVMX: Figure out what we want to do with nested-guest instruction
     5049     *        stepping. */
     5050    PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
     5051    if (pVmxTransient->fIsNestedGuest)
     5052    {
     5053        int rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, CPUMGetGuestDR7(pVCpu));
     5054        AssertRCReturn(rc, rc);
     5055        return VINF_SUCCESS;
     5056    }
    40645057
    40655058#ifdef VBOX_STRICT
    40665059    /* Validate. Intel spec. 26.3.1.1 "Checks on Guest Controls Registers, Debug Registers, MSRs" */
    4067     if (pVCpu->hm.s.vmx.Ctls.u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
     5060    if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
    40685061    {
    40695062        /* Validate. Intel spec. 17.2 "Debug Registers", recompiler paranoia checks. */
     
    40755068    bool     fSteppingDB      = false;
    40765069    bool     fInterceptMovDRx = false;
    4077     uint32_t uProcCtls        = pVCpu->hm.s.vmx.Ctls.u32ProcCtls;
     5070    uint32_t uProcCtls        = pVmcsInfo->u32ProcCtls;
    40785071    if (pVCpu->hm.s.fSingleInstruction)
    40795072    {
     
    41005093        /*
    41015094         * Use the combined guest and host DRx values found in the hypervisor register set
    4102          * because the debugger has breakpoints active or someone is single stepping on the
    4103          * host side without a monitor trap flag.
     5095         * because the hypervisor debugger has breakpoints active or someone is single stepping
     5096         * on the host side without a monitor trap flag.
    41045097         *
    41055098         * Note! DBGF expects a clean DR6 state before executing guest code.
     
    41595152         * must intercept #DB in order to maintain a correct DR6 guest value, and
    41605153         * because we need to intercept it to prevent nested #DBs from hanging the
    4161          * CPU, we end up always having to intercept it.  See hmR0VmxInitXcptBitmap.
     5154         * CPU, we end up always having to intercept it. See hmR0VmxSetupVmcsXcptBitmap().
    41625155         */
    41635156#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
     
    41855178     * monitor-trap flag and update our cache.
    41865179     */
    4187     if (uProcCtls != pVCpu->hm.s.vmx.Ctls.u32ProcCtls)
     5180    if (uProcCtls != pVmcsInfo->u32ProcCtls)
    41885181    {
    41895182        int rc2 = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
    41905183        AssertRCReturn(rc2, rc2);
    4191         pVCpu->hm.s.vmx.Ctls.u32ProcCtls = uProcCtls;
     5184        pVmcsInfo->u32ProcCtls = uProcCtls;
    41925185    }
    41935186
     
    42305223 *
    42315224 * @param   pVCpu       The cross context virtual CPU structure.
     5225 * @param   pVmcsInfo   The VMCS info. object.
    42325226 *
    42335227 * @remarks Will import guest CR0 on strict builds during validation of
    42345228 *          segments.
    42355229 */
    4236 static void hmR0VmxValidateSegmentRegs(PVMCPU pVCpu)
     5230static void hmR0VmxValidateSegmentRegs(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo)
    42375231{
    42385232    /*
     
    42405234     *
    42415235     * The reason we check for attribute value 0 in this function and not just the unusable bit is
    4242      * because hmR0VmxExportGuestSegmentReg() only updates the VMCS' copy of the value with the unusable bit
    4243      * and doesn't change the guest-context value.
     5236     * because hmR0VmxExportGuestSegReg() only updates the VMCS' copy of the value with the
     5237     * unusable bit and doesn't change the guest-context value.
    42445238     */
    42455239    PVM       pVM  = pVCpu->CTX_SUFF(pVM);
    42465240    PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    4247     hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR0);
     5241    hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR0);
    42485242    if (   !pVM->hm.s.vmx.fUnrestrictedGuest
    42495243        && (   !CPUMIsGuestInRealModeEx(pCtx)
     
    42875281                   || (pCtx->ss.Attr.n.u1Granularity));
    42885282        }
    4289         /* DS, ES, FS, GS - only check for usable selectors, see hmR0VmxExportGuestSegmentReg(). */
     5283        /* DS, ES, FS, GS - only check for usable selectors, see hmR0VmxExportGuestSegReg(). */
    42905284        if (pCtx->ds.Attr.u && !(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
    42915285        {
     
    43575351    {
    43585352        /* Real and v86 mode checks. */
    4359         /* hmR0VmxExportGuestSegmentReg() writes the modified in VMCS. We want what we're feeding to VT-x. */
     5353        /* hmR0VmxExportGuestSegReg() writes the modified in VMCS. We want what we're feeding to VT-x. */
    43605354        uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
    4361         if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
    4362         {
    4363             u32CSAttr = 0xf3; u32SSAttr = 0xf3; u32DSAttr = 0xf3; u32ESAttr = 0xf3; u32FSAttr = 0xf3; u32GSAttr = 0xf3;
     5355        if (pVmcsInfo->RealMode.fRealOnV86Active)
     5356        {
     5357            u32CSAttr = 0xf3; u32SSAttr = 0xf3; u32DSAttr = 0xf3;
     5358            u32ESAttr = 0xf3; u32FSAttr = 0xf3; u32GSAttr = 0xf3;
    43645359        }
    43655360        else
     
    44095404 *
    44105405 * @returns VBox status code.
    4411  * @param   pVCpu       The cross context virtual CPU structure.
    4412  * @param   idxSel      Index of the selector in the VMCS.
    4413  * @param   idxLimit    Index of the segment limit in the VMCS.
    4414  * @param   idxBase     Index of the segment base in the VMCS.
    4415  * @param   idxAccess   Index of the access rights of the segment in the VMCS.
    4416  * @param   pSelReg     Pointer to the segment selector.
     5406 * @param   pVCpu           The cross context virtual CPU structure.
     5407 * @param   pVmcsInfo       The VMCS info. object.
     5408 * @param   iSegReg         The segment register number (X86_SREG_XXX).
     5409 * @param   pSelReg         Pointer to the segment selector.
    44175410 *
    44185411 * @remarks No-long-jump zone!!!
    44195412 */
    4420 static int hmR0VmxExportGuestSegmentReg(PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase, uint32_t idxAccess,
    4421                                         PCCPUMSELREG pSelReg)
    4422 {
    4423     int rc = VMXWriteVmcs32(idxSel,    pSelReg->Sel);       /* 16-bit guest selector field. */
    4424     rc    |= VMXWriteVmcs32(idxLimit,  pSelReg->u32Limit);  /* 32-bit guest segment limit field. */
    4425     rc    |= VMXWriteVmcsGstN(idxBase, pSelReg->u64Base);   /* Natural width guest segment base field.*/
    4426     AssertRCReturn(rc, rc);
     5413static int hmR0VmxExportGuestSegReg(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo, uint8_t iSegReg, PCCPUMSELREG pSelReg)
     5414{
     5415    Assert(iSegReg < X86_SREG_COUNT);
     5416    uint32_t const idxSel   = g_aVmcsSegSel[iSegReg];
     5417    uint32_t const idxLimit = g_aVmcsSegLimit[iSegReg];
     5418    uint32_t const idxBase  = g_aVmcsSegBase[iSegReg];
     5419    uint32_t const idxAttr  = g_aVmcsSegAttr[iSegReg];
    44275420
    44285421    uint32_t u32Access = pSelReg->Attr.u;
    4429     if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
     5422    if (pVmcsInfo->RealMode.fRealOnV86Active)
    44305423    {
    44315424        /* VT-x requires our real-using-v86 mode hack to override the segment access-right bits. */
     
    44335426        Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
    44345427        Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
     5428        RT_NOREF_PV(pVCpu);
    44355429    }
    44365430    else
     
    44515445              ("Access bit not set for usable segment. idx=%#x sel=%#x attr %#x\n", idxBase, pSelReg, pSelReg->Attr.u));
    44525446
    4453     rc = VMXWriteVmcs32(idxAccess, u32Access);              /* 32-bit guest segment access-rights field. */
     5447    /*
     5448     * Commit it to the VMCS.
     5449     */
     5450    int rc = VMXWriteVmcs32(idxSel,    pSelReg->Sel);
     5451    rc    |= VMXWriteVmcs32(idxLimit,  pSelReg->u32Limit);
     5452    rc    |= VMXWriteVmcsGstN(idxBase, pSelReg->u64Base);
     5453    rc    |= VMXWriteVmcs32(idxAttr,   u32Access);
    44545454    AssertRCReturn(rc, rc);
    44555455    return rc;
     
    44585458
    44595459/**
    4460  * Exports the guest segment registers, GDTR, IDTR, LDTR, (TR, FS and GS bases)
    4461  * into the guest-state area in the VMCS.
     5460 * Exports the guest segment registers, GDTR, IDTR, LDTR, TR into the guest-state
     5461 * area in the VMCS.
    44625462 *
    44635463 * @returns VBox status code.
    4464  * @param   pVCpu       The cross context virtual CPU structure.
     5464 * @param   pVCpu           The cross context virtual CPU structure.
     5465 * @param   pVmxTransient   The VMX-transient structure.
    44655466 *
    44665467 * @remarks Will import guest CR0 on strict builds during validation of
     
    44685469 * @remarks No-long-jump zone!!!
    44695470 */
    4470 static int hmR0VmxExportGuestSegmentRegs(PVMCPU pVCpu)
    4471 {
    4472     int       rc   = VERR_INTERNAL_ERROR_5;
    4473     PVM       pVM  = pVCpu->CTX_SUFF(pVM);
    4474     PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
     5471static int hmR0VmxExportGuestSegRegsXdtr(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
     5472{
     5473    int          rc        = VERR_INTERNAL_ERROR_5;
     5474    PVM          pVM       = pVCpu->CTX_SUFF(pVM);
     5475    PCCPUMCTX    pCtx      = &pVCpu->cpum.GstCtx;
     5476    PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    44755477
    44765478    /*
     
    44825484        if (!pVM->hm.s.vmx.fUnrestrictedGuest)
    44835485        {
     5486            Assert(!pVmxTransient->fIsNestedGuest);
    44845487            Assert(pVM->hm.s.vmx.pRealModeTSS);
    44855488            AssertCompile(PGMMODE_REAL < PGMMODE_PROTECTED);
    4486             if (   pVCpu->hm.s.vmx.fWasInRealMode
     5489            if (   pVmcsInfo->fWasInRealMode
    44875490                && PGMGetGuestMode(pVCpu) >= PGMMODE_PROTECTED)
    44885491            {
     
    44915494                REMFlushTBs(pVM);
    44925495                Log4Func(("Switch to protected mode detected!\n"));
    4493                 pVCpu->hm.s.vmx.fWasInRealMode = false;
     5496                pVmcsInfo->fWasInRealMode = false;
    44945497            }
    44955498        }
     
    44985501        {
    44995502            HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS);
    4500             if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
    4501                 pVCpu->hm.s.vmx.RealMode.AttrCS.u = pCtx->cs.Attr.u;
    4502             rc = HMVMX_EXPORT_SREG(CS, &pCtx->cs);
     5503            if (pVmcsInfo->RealMode.fRealOnV86Active)
     5504                pVmcsInfo->RealMode.AttrCS.u = pCtx->cs.Attr.u;
     5505            rc = hmR0VmxExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_CS, &pCtx->cs);
    45035506            AssertRCReturn(rc, rc);
    45045507            ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_CS);
     
    45085511        {
    45095512            HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SS);
    4510             if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
    4511                 pVCpu->hm.s.vmx.RealMode.AttrSS.u = pCtx->ss.Attr.u;
    4512             rc = HMVMX_EXPORT_SREG(SS, &pCtx->ss);
     5513            if (pVmcsInfo->RealMode.fRealOnV86Active)
     5514                pVmcsInfo->RealMode.AttrSS.u = pCtx->ss.Attr.u;
     5515            rc = hmR0VmxExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_SS, &pCtx->ss);
    45135516            AssertRCReturn(rc, rc);
    45145517            ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_SS);
     
    45185521        {
    45195522            HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DS);
    4520             if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
    4521                 pVCpu->hm.s.vmx.RealMode.AttrDS.u = pCtx->ds.Attr.u;
    4522             rc = HMVMX_EXPORT_SREG(DS, &pCtx->ds);
     5523            if (pVmcsInfo->RealMode.fRealOnV86Active)
     5524                pVmcsInfo->RealMode.AttrDS.u = pCtx->ds.Attr.u;
     5525            rc = hmR0VmxExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_DS, &pCtx->ds);
    45235526            AssertRCReturn(rc, rc);
    45245527            ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_DS);
     
    45285531        {
    45295532            HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_ES);
    4530             if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
    4531                 pVCpu->hm.s.vmx.RealMode.AttrES.u = pCtx->es.Attr.u;
    4532             rc = HMVMX_EXPORT_SREG(ES, &pCtx->es);
     5533            if (pVmcsInfo->RealMode.fRealOnV86Active)
     5534                pVmcsInfo->RealMode.AttrES.u = pCtx->es.Attr.u;
     5535            rc = hmR0VmxExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_ES, &pCtx->es);
    45335536            AssertRCReturn(rc, rc);
    45345537            ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_ES);
     
    45385541        {
    45395542            HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_FS);
    4540             if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
    4541                 pVCpu->hm.s.vmx.RealMode.AttrFS.u = pCtx->fs.Attr.u;
    4542             rc = HMVMX_EXPORT_SREG(FS, &pCtx->fs);
     5543            if (pVmcsInfo->RealMode.fRealOnV86Active)
     5544                pVmcsInfo->RealMode.AttrFS.u = pCtx->fs.Attr.u;
     5545            rc = hmR0VmxExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_FS, &pCtx->fs);
    45435546            AssertRCReturn(rc, rc);
    45445547            ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_FS);
     
    45485551        {
    45495552            HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GS);
    4550             if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
    4551                 pVCpu->hm.s.vmx.RealMode.AttrGS.u = pCtx->gs.Attr.u;
    4552             rc = HMVMX_EXPORT_SREG(GS, &pCtx->gs);
     5553            if (pVmcsInfo->RealMode.fRealOnV86Active)
     5554                pVmcsInfo->RealMode.AttrGS.u = pCtx->gs.Attr.u;
     5555            rc = hmR0VmxExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_GS, &pCtx->gs);
    45535556            AssertRCReturn(rc, rc);
    45545557            ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_GS);
     
    45565559
    45575560#ifdef VBOX_STRICT
    4558         hmR0VmxValidateSegmentRegs(pVCpu);
     5561        hmR0VmxValidateSegmentRegs(pVCpu, pVmcsInfo);
    45595562#endif
    4560 
    4561         Log4Func(("CS=%#RX16 Base=%#RX64 Limit=%#RX32 Attr=%#RX32\n", pCtx->cs.Sel, pCtx->cs.u64Base,
    4562                   pCtx->cs.u32Limit, pCtx->cs.Attr.u));
     5563        Log4Func(("cs={%#04x base=%#RX64 limit=%#RX32 attr=%#RX32}\n", pCtx->cs.Sel, pCtx->cs.u64Base, pCtx->cs.u32Limit,
     5564                  pCtx->cs.Attr.u));
    45635565    }
    45645566
     
    45755577         * handle INT-n's) in the TSS. See hmR3InitFinalizeR0() to see how pRealModeTSS is setup.
    45765578         */
    4577         uint16_t u16Sel          = 0;
    4578         uint32_t u32Limit        = 0;
    4579         uint64_t u64Base         = 0;
    4580         uint32_t u32AccessRights = 0;
    4581 
    4582         if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
     5579        uint16_t u16Sel;
     5580        uint32_t u32Limit;
     5581        uint64_t u64Base;
     5582        uint32_t u32AccessRights;
     5583        if (!pVmcsInfo->RealMode.fRealOnV86Active)
    45835584        {
    45845585            u16Sel          = pCtx->tr.Sel;
     
    45895590        else
    45905591        {
     5592            Assert(!pVmxTransient->fIsNestedGuest);
    45915593            Assert(pVM->hm.s.vmx.pRealModeTSS);
    45925594            Assert(PDMVmmDevHeapIsEnabled(pVM));    /* Guaranteed by HMCanExecuteGuest() -XXX- what about inner loop changes? */
     
    46045606            u16Sel          = 0;
    46055607            u32Limit        = HM_VTX_TSS_SIZE;
    4606             u64Base         = GCPhys;   /* in real-mode phys = virt. */
     5608            u64Base         = GCPhys;
    46075609            u32AccessRights = DescAttr.u;
    46085610        }
     
    46295631
    46305632        ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_TR);
    4631         Log4Func(("TR base=%#RX64\n", pCtx->tr.u64Base));
     5633        Log4Func(("tr base=%#RX64 limit=%#RX32\n", pCtx->tr.u64Base, pCtx->tr.u32Limit));
    46325634    }
    46335635
     
    46475649
    46485650        ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_GDTR);
    4649         Log4Func(("GDTR base=%#RX64\n", pCtx->gdtr.pGdt));
     5651        Log4Func(("gdtr base=%#RX64 limit=%#RX32\n", pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt));
    46505652    }
    46515653
     
    46585660
    46595661        /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
    4660         uint32_t u32Access = 0;
    4661         if (!pCtx->ldtr.Attr.u)
     5662        uint32_t u32Access;
     5663        if (   !pVmxTransient->fIsNestedGuest
     5664            && !pCtx->ldtr.Attr.u)
    46625665            u32Access = X86DESCATTR_UNUSABLE;
    46635666        else
     
    46865689
    46875690        ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_LDTR);
    4688         Log4Func(("LDTR base=%#RX64\n", pCtx->ldtr.u64Base));
     5691        Log4Func(("ldtr base=%#RX64 limit=%#RX32\n", pCtx->ldtr.u64Base, pCtx->ldtr.u32Limit));
    46895692    }
    46905693
     
    47045707
    47055708        ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_IDTR);
    4706         Log4Func(("IDTR base=%#RX64\n", pCtx->idtr.pIdt));
     5709        Log4Func(("idtr base=%#RX64 limit=%#RX32\n", pCtx->idtr.pIdt, pCtx->idtr.cbIdt));
    47075710    }
    47085711
     
    47165719 *
    47175720 * These MSRs will automatically be loaded to the host CPU on every successful
    4718  * VM-entry and stored from the host CPU on every successful VM-exit. This also
    4719  * creates/updates MSR slots for the host MSRs. The actual host MSR values are
    4720  * -not- updated here for performance reasons. See hmR0VmxExportHostMsrs().
    4721  *
    4722  * Also exports the guest sysenter MSRs into the guest-state area in the VMCS.
     5721 * VM-entry and stored from the host CPU on every successful VM-exit.
     5722 *
     5723 * We creates/updates MSR slots for the host MSRs in the VM-exit MSR-load area. The
     5724 * actual host MSR values are not- updated here for performance reasons. See
     5725 * hmR0VmxExportHostMsrs().
     5726 *
     5727 * We also exports the guest sysenter MSRs into the guest-state area in the VMCS.
    47235728 *
    47245729 * @returns VBox status code.
    4725  * @param   pVCpu       The cross context virtual CPU structure.
     5730 * @param   pVCpu           The cross context virtual CPU structure.
     5731 * @param   pVmxTransient   The VMX-transient structure.
    47265732 *
    47275733 * @remarks No-long-jump zone!!!
    47285734 */
    4729 static int hmR0VmxExportGuestMsrs(PVMCPU pVCpu)
     5735static int hmR0VmxExportGuestMsrs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    47305736{
    47315737    AssertPtr(pVCpu);
    4732     AssertPtr(pVCpu->hm.s.vmx.pvGuestMsr);
     5738    AssertPtr(pVmxTransient);
     5739
     5740    PVM       pVM  = pVCpu->CTX_SUFF(pVM);
     5741    PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    47335742
    47345743    /*
    47355744     * MSRs that we use the auto-load/store MSR area in the VMCS.
    47365745     * For 64-bit hosts, we load/restore them lazily, see hmR0VmxLazyLoadGuestMsrs().
    4737      */
    4738     PVM       pVM  = pVCpu->CTX_SUFF(pVM);
    4739     PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
     5746     * The host MSR values are updated when it's safe in hmR0VmxLazySaveHostMsrs().
     5747     *
     5748     * For nested-guests, the guests MSRs from the VM-entry MSR-load area are already
     5749     * loaded (into the guest-CPU context) by the VMLAUNCH/VMRESUME instruction
     5750     * emulation, nothing to do here.
     5751     */
    47405752    if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_GUEST_AUTO_MSRS)
    47415753    {
    4742         if (pVM->hm.s.fAllow64BitGuests)
     5754        if (   !pVmxTransient->fIsNestedGuest
     5755            &&  pVM->hm.s.fAllow64BitGuests)
    47435756        {
    47445757#if HC_ARCH_BITS == 32
    47455758            HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SYSCALL_MSRS | CPUMCTX_EXTRN_KERNEL_GS_BASE);
    4746 
    4747             int rc = hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_LSTAR,          pCtx->msrLSTAR,        false, NULL);
    4748             rc    |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K6_STAR,           pCtx->msrSTAR,         false, NULL);
    4749             rc    |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_SF_MASK,        pCtx->msrSFMASK,       false, NULL);
    4750             rc    |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_KERNEL_GS_BASE, pCtx->msrKERNELGSBASE, false, NULL);
     5759            Assert(!pVmxTransient->fIsNestedGuest);
     5760
     5761            int rc = hmR0VmxAddAutoLoadStoreMsr(pVCpu, pVmxTransient, MSR_K8_LSTAR,          pCtx->msrLSTAR,        true, false);
     5762            rc    |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, pVmxTransient, MSR_K6_STAR,           pCtx->msrSTAR,         true, false);
     5763            rc    |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, pVmxTransient, MSR_K8_SF_MASK,        pCtx->msrSFMASK,       true, false);
     5764            rc    |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, pVmxTransient, MSR_K8_KERNEL_GS_BASE, pCtx->msrKERNELGSBASE, true, false);
    47515765            AssertRCReturn(rc, rc);
    4752 # ifdef LOG_ENABLED
    4753             PCVMXAUTOMSR pMsr = (PCVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
    4754             for (uint32_t i = 0; i < pVCpu->hm.s.vmx.cMsrs; i++, pMsr++)
    4755                 Log4Func(("MSR[%RU32]: u32Msr=%#RX32 u64Value=%#RX64\n", i, pMsr->u32Msr, pMsr->u64Value));
    4756 # endif
    47575766#endif
    47585767        }
     
    47625771    /*
    47635772     * Guest Sysenter MSRs.
    4764      * These flags are only set when MSR-bitmaps are not supported by the CPU and we cause
    4765      * VM-exits on WRMSRs for these MSRs.
    47665773     */
    47675774    if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_MSR_MASK)
     
    47915798    }
    47925799
     5800    /*
     5801     * Guest/host EFER MSR.
     5802     */
    47935803    if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_EFER_MSR)
    47945804    {
     5805        /* Whether we are using the VMCS to swap the EFER MSR must have been
     5806           determined earlier while exporting VM-entry/VM-exit controls. */
     5807        Assert(!(ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_ENTRY_EXIT_CTLS));
    47955808        HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_EFER);
    47965809
     
    48045817            {
    48055818                int rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_EFER_FULL, pCtx->msrEFER);
    4806                 AssertRCReturn(rc,rc);
    4807                 Log4Func(("EFER=%#RX64\n", pCtx->msrEFER));
     5819                AssertRCReturn(rc, rc);
    48085820            }
    48095821            else
    48105822            {
    4811                 int rc = hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K6_EFER, pCtx->msrEFER, false /* fUpdateHostMsr */,
    4812                                                     NULL /* pfAddedAndUpdated */);
     5823                /*
     5824                 * We shall use the auto-load/store MSR area only for loading the EFER MSR but we must
     5825                 * continue to intercept guest read and write accesses to it, see @bugref{7386#c16}.
     5826                 */
     5827                int rc = hmR0VmxAddAutoLoadStoreMsr(pVCpu, pVmxTransient, MSR_K6_EFER, pCtx->msrEFER,
     5828                                                    false /* fSetReadWrite */, false /* fUpdateHostMsr */);
    48135829                AssertRCReturn(rc, rc);
    4814 
    4815                 /* We need to intercept reads too, see @bugref{7386#c16}. */
    4816                 if (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_MSR_BITMAPS)
    4817                     hmR0VmxSetMsrPermission(pVCpu, MSR_K6_EFER, VMXMSREXIT_INTERCEPT_READ, VMXMSREXIT_INTERCEPT_WRITE);
    4818                 Log4Func(("MSR[--]: u32Msr=%#RX32 u64Value=%#RX64 cMsrs=%u\n", MSR_K6_EFER, pCtx->msrEFER,
    4819                           pVCpu->hm.s.vmx.cMsrs));
    48205830            }
    48215831        }
    48225832        else if (!pVM->hm.s.vmx.fSupportsVmcsEfer)
    4823             hmR0VmxRemoveAutoLoadStoreMsr(pVCpu, MSR_K6_EFER);
     5833            hmR0VmxRemoveAutoLoadStoreMsr(pVCpu, pVmxTransient, MSR_K6_EFER);
     5834
    48245835        ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_EFER_MSR);
    48255836    }
    48265837
     5838    /*
     5839     * Other MSRs.
     5840     * Speculation Control (R/W).
     5841     */
     5842    if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_OTHER_MSRS)
     5843    {
     5844        HMVMX_CPUMCTX_ASSERT(pVCpu, HM_CHANGED_GUEST_OTHER_MSRS);
     5845        if (pVM->cpum.ro.GuestFeatures.fIbrs)
     5846        {
     5847            int rc = hmR0VmxAddAutoLoadStoreMsr(pVCpu, pVmxTransient, MSR_IA32_SPEC_CTRL, CPUMGetGuestSpecCtrl(pVCpu),
     5848                                                false /* fSetReadWrite */, false /* fUpdateHostMsr */);
     5849            AssertRCReturn(rc, rc);
     5850        }
     5851        ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_OTHER_MSRS);
     5852    }
     5853
    48275854    return VINF_SUCCESS;
    48285855}
    4829 
    4830 
    4831 #if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
    4832 /**
    4833  * Check if guest state allows safe use of 32-bit switcher again.
    4834  *
    4835  * Segment bases and protected mode structures must be 32-bit addressable
    4836  * because the  32-bit switcher will ignore high dword when writing these VMCS
    4837  * fields.  See @bugref{8432} for details.
    4838  *
    4839  * @returns true if safe, false if must continue to use the 64-bit switcher.
    4840  * @param   pCtx   Pointer to the guest-CPU context.
    4841  *
    4842  * @remarks No-long-jump zone!!!
    4843  */
    4844 static bool hmR0VmxIs32BitSwitcherSafe(PCCPUMCTX pCtx)
    4845 {
    4846     if (pCtx->gdtr.pGdt    & UINT64_C(0xffffffff00000000))     return false;
    4847     if (pCtx->idtr.pIdt    & UINT64_C(0xffffffff00000000))     return false;
    4848     if (pCtx->ldtr.u64Base & UINT64_C(0xffffffff00000000))     return false;
    4849     if (pCtx->tr.u64Base   & UINT64_C(0xffffffff00000000))     return false;
    4850     if (pCtx->es.u64Base   & UINT64_C(0xffffffff00000000))     return false;
    4851     if (pCtx->cs.u64Base   & UINT64_C(0xffffffff00000000))     return false;
    4852     if (pCtx->ss.u64Base   & UINT64_C(0xffffffff00000000))     return false;
    4853     if (pCtx->ds.u64Base   & UINT64_C(0xffffffff00000000))     return false;
    4854     if (pCtx->fs.u64Base   & UINT64_C(0xffffffff00000000))     return false;
    4855     if (pCtx->gs.u64Base   & UINT64_C(0xffffffff00000000))     return false;
    4856 
    4857     /* All good, bases are 32-bit. */
    4858     return true;
    4859 }
    4860 #endif
    48615856
    48625857
     
    48665861 * @returns VBox status code.
    48675862 * @param   pVCpu       The cross context virtual CPU structure.
     5863 * @param   pVmcsInfo   The VMCS info. object.
    48685864 *
    48695865 * @remarks No-long-jump zone!!!
    48705866 */
    4871 static int hmR0VmxSelectVMRunHandler(PVMCPU pVCpu)
    4872 {
    4873     PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
     5867static int hmR0VmxSelectVMRunHandler(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
     5868{
     5869    PCCPUMCTX    pCtx      = &pVCpu->cpum.GstCtx;
     5870    PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
     5871
    48745872    if (CPUMIsGuestInLongModeEx(pCtx))
    48755873    {
     
    48805878#if HC_ARCH_BITS == 32
    48815879        /* 32-bit host. We need to switch to 64-bit before running the 64-bit guest. */
    4882         if (pVCpu->hm.s.vmx.pfnStartVM != VMXR0SwitcherStartVM64)
     5880        if (pVmcsInfo->pfnStartVM != VMXR0SwitcherStartVM64)
    48835881        {
    48845882#ifdef VBOX_STRICT
    4885             if (pVCpu->hm.s.vmx.pfnStartVM != NULL) /* Very first entry would have saved host-state already, ignore it. */
     5883            if (pVmcsInfo->pfnStartVM != NULL) /* Very first VM-entry would have saved host-state already, ignore it. */
    48865884            {
    48875885                /* Currently, all mode changes sends us back to ring-3, so these should be set. See @bugref{6944}. */
    48885886                uint64_t const fCtxChanged = ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged);
    48895887                RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
    4890                 AssertMsg(fCtxChanged & (  HM_CHANGED_VMX_EXIT_CTLS
    4891                                          | HM_CHANGED_VMX_ENTRY_CTLS
    4892                                          | HM_CHANGED_GUEST_EFER_MSR), ("fCtxChanged=%#RX64\n", fCtxChanged));
     5888                AssertMsg(fCtxChanged & (HM_CHANGED_VMX_ENTRY_EXIT_CTLS | HM_CHANGED_GUEST_EFER_MSR),
     5889                          ("fCtxChanged=%#RX64\n", fCtxChanged));
    48935890            }
    48945891#endif
    4895             pVCpu->hm.s.vmx.pfnStartVM = VMXR0SwitcherStartVM64;
     5892            pVmcsInfo->pfnStartVM = VMXR0SwitcherStartVM64;
    48965893
    48975894            /* Mark that we've switched to 64-bit handler, we can't safely switch back to 32-bit for
    48985895               the rest of the VM run (until VM reset). See @bugref{8432#c7}. */
    4899             pVCpu->hm.s.vmx.fSwitchedTo64on32 = true;
     5896            pVmcsInfo->fSwitchedTo64on32 = true;
    49005897            Log4Func(("Selected 64-bit switcher\n"));
    49015898        }
    49025899#else
    49035900        /* 64-bit host. */
    4904         pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM64;
     5901        pVmcsInfo->pfnStartVM = VMXR0StartVM64;
    49055902#endif
    49065903    }
     
    49095906        /* Guest is not in long mode, use the 32-bit handler. */
    49105907#if HC_ARCH_BITS == 32
    4911         if (    pVCpu->hm.s.vmx.pfnStartVM != VMXR0StartVM32
    4912             && !pVCpu->hm.s.vmx.fSwitchedTo64on32   /* If set, guest mode change does not imply switcher change. */
    4913             &&  pVCpu->hm.s.vmx.pfnStartVM != NULL) /* Very first entry would have saved host-state already, ignore it. */
     5908        if (    pVmcsInfo->pfnStartVM != VMXR0StartVM32
     5909            && !pVmcsInfo->fSwitchedTo64on32      /* If set, guest mode change does not imply switcher change. */
     5910            &&  pVmcsInfo->pfnStartVM != NULL)    /* Very first VM-entry would have saved host-state already, ignore it. */
    49145911        {
    49155912# ifdef VBOX_STRICT
     
    49175914            uint64_t const fCtxChanged = ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged);
    49185915            RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
    4919             AssertMsg(fCtxChanged & (  HM_CHANGED_VMX_EXIT_CTLS
    4920                                      | HM_CHANGED_VMX_ENTRY_CTLS
    4921                                      | HM_CHANGED_GUEST_EFER_MSR), ("fCtxChanged=%#RX64\n", fCtxChanged));
     5916            AssertMsg(fCtxChanged & (HM_CHANGED_VMX_ENTRY_EXIT_CTLS | HM_CHANGED_GUEST_EFER_MSR),
     5917                      ("fCtxChanged=%#RX64\n", fCtxChanged));
    49225918# endif
    49235919        }
     
    49265922         * Keep using the 64-bit switcher even though we're in 32-bit because of bad Intel
    49275923         * design, see @bugref{8432#c7}. If real-on-v86 mode is active, clear the 64-bit
    4928          * switcher flag because now we know the guest is in a sane state where it's safe
    4929          * to use the 32-bit switcher. Otherwise check the guest state if it's safe to use
     5924         * switcher flag now because we know the guest is in a sane state where it's safe
     5925         * to use the 32-bit switcher. Otherwise, check the guest state if it's safe to use
    49305926         * the much faster 32-bit switcher again.
    49315927         */
    4932         if (!pVCpu->hm.s.vmx.fSwitchedTo64on32)
    4933         {
    4934             if (pVCpu->hm.s.vmx.pfnStartVM != VMXR0StartVM32)
     5928        if (!pVmcsInfo->fSwitchedTo64on32)
     5929        {
     5930            if (pVmcsInfo->pfnStartVM != VMXR0StartVM32)
    49355931                Log4Func(("Selected 32-bit switcher\n"));
    49365932            pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM32;
     
    49385934        else
    49395935        {
    4940             Assert(pVCpu->hm.s.vmx.pfnStartVM == VMXR0SwitcherStartVM64);
    4941             if (   pVCpu->hm.s.vmx.RealMode.fRealOnV86Active
     5936            Assert(pVmcsInfo->pfnStartVM == VMXR0SwitcherStartVM64);
     5937            if (   pVmcsInfo->RealMode.fRealOnV86Active
    49425938                || hmR0VmxIs32BitSwitcherSafe(pCtx))
    49435939            {
    4944                 pVCpu->hm.s.vmx.fSwitchedTo64on32 = false;
    4945                 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM32;
     5940                pVmcsInfo->fSwitchedTo64on32 = false;
     5941                pVmcsInfo->pfnStartVM = VMXR0StartVM32;
    49465942                ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_EFER_MSR
    4947                                                          | HM_CHANGED_VMX_ENTRY_CTLS
    4948                                                          | HM_CHANGED_VMX_EXIT_CTLS
     5943                                                         | HM_CHANGED_VMX_ENTRY_EXIT_CTLS
    49495944                                                         | HM_CHANGED_HOST_CONTEXT);
    49505945                Log4Func(("Selected 32-bit switcher (safe)\n"));
     
    49525947        }
    49535948# else
    4954         pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM32;
     5949        pVmcsInfo->pfnStartVM = VMXR0StartVM32;
    49555950# endif
    49565951#else
    4957         pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM32;
     5952        pVmcsInfo->pfnStartVM = VMXR0StartVM32;
    49585953#endif
    49595954    }
    4960     Assert(pVCpu->hm.s.vmx.pfnStartVM);
     5955    Assert(pVmcsInfo->pfnStartVM);
    49615956    return VINF_SUCCESS;
    49625957}
     
    49675962 *
    49685963 * @returns VBox status code, no informational status codes.
    4969  * @param   pVCpu       The cross context virtual CPU structure.
     5964 * @param   pVCpu           The cross context virtual CPU structure.
     5965 * @param   pVmxTransient   The VMX-transient structure.
    49705966 *
    49715967 * @remarks No-long-jump zone!!!
    49725968 */
    4973 DECLINLINE(int) hmR0VmxRunGuest(PVMCPU pVCpu)
     5969DECLINLINE(int) hmR0VmxRunGuest(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    49745970{
    49755971    /* Mark that HM is the keeper of all guest-CPU registers now that we're going to execute guest code. */
    49765972    PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    49775973    pCtx->fExtrn |= HMVMX_CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_KEEPER_HM;
     5974
     5975    /** @todo Add stats for VMRESUME vs VMLAUNCH. */
    49785976
    49795977    /*
     
    49845982     * See MSDN "Configuring Programs for 64-bit/x64 Software Conventions / Register Usage".
    49855983     */
    4986     bool const fResumeVM = RT_BOOL(pVCpu->hm.s.vmx.fVmcsState & HMVMX_VMCS_STATE_LAUNCHED);
    4987     /** @todo Add stats for resume vs launch. */
     5984    PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
     5985    bool const fResumeVM = RT_BOOL(pVmcsInfo->fVmcsState & VMX_V_VMCS_LAUNCH_STATE_LAUNCHED);
    49885986    PVM pVM = pVCpu->CTX_SUFF(pVM);
    49895987#ifdef VBOX_WITH_KERNEL_USING_XMM
    4990     int rc = hmR0VMXStartVMWrapXMM(fResumeVM, pCtx, &pVCpu->hm.s.vmx.VmcsBatchCache, pVM, pVCpu, pVCpu->hm.s.vmx.pfnStartVM);
     5988    int rc = hmR0VMXStartVMWrapXMM(fResumeVM, pCtx, &pVCpu->hm.s.vmx.VmcsCache, pVM, pVCpu, pVmcsInfo->pfnStartVM);
    49915989#else
    4992     int rc = pVCpu->hm.s.vmx.pfnStartVM(fResumeVM, pCtx, &pVCpu->hm.s.vmx.VmcsBatchCache, pVM, pVCpu);
     5990    int rc = pVmcsInfo->pfnStartVM(fResumeVM, pCtx, &pVCpu->hm.s.vmx.VmcsCache, pVM, pVCpu);
    49935991#endif
    49945992    AssertMsg(rc <= VINF_SUCCESS, ("%Rrc\n", rc));
     
    50026000 * @param   pVCpu           The cross context virtual CPU structure.
    50036001 * @param   rcVMRun         The return code from VMLAUNCH/VMRESUME.
    5004  * @param   pVmxTransient   Pointer to the VMX transient structure (only
     6002 * @param   pVmxTransient   The VMX-transient structure (only
    50056003 *                          exitReason updated).
    50066004 */
     
    50306028
    50316029#ifdef VBOX_STRICT
     6030                PVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
    50326031                Log4(("uExitReason        %#RX32 (VmxTransient %#RX16)\n", pVCpu->hm.s.vmx.LastError.u32ExitReason,
    50336032                     pVmxTransient->uExitReason));
     
    50496048                rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, &u32Val);                 AssertRC(rc);
    50506049                Log4(("VMX_VMCS32_CTRL_PROC_EXEC               %#RX32\n", u32Val));
    5051                 if (pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
     6050                if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
    50526051                {
    50536052                    rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val);            AssertRC(rc);
     
    52546253
    52556254/**
    5256  * Executes the specified handler in 64-bit mode.
    5257  *
    5258  * @returns VBox status code (no informational status codes).
    5259  * @param   pVCpu       The cross context virtual CPU structure.
    5260  * @param   enmOp       The operation to perform.
    5261  * @param   cParams     Number of parameters.
    5262  * @param   paParam     Array of 32-bit parameters.
    5263  */
    5264 VMMR0DECL(int) VMXR0Execute64BitsHandler(PVMCPU pVCpu, HM64ON32OP enmOp, uint32_t cParams, uint32_t *paParam)
    5265 {
    5266     PVM pVM = pVCpu->CTX_SUFF(pVM);
    5267     AssertReturn(pVM->hm.s.pfnHost32ToGuest64R0, VERR_HM_NO_32_TO_64_SWITCHER);
    5268     Assert(enmOp > HM64ON32OP_INVALID && enmOp < HM64ON32OP_END);
    5269     Assert(pVCpu->hm.s.vmx.VmcsBatchCache.Write.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VmcsBatchCache.Write.aField));
    5270     Assert(pVCpu->hm.s.vmx.VmcsBatchCache.Read.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VmcsBatchCache.Read.aField));
    5271 
    5272 #ifdef VBOX_STRICT
    5273     for (uint32_t i = 0; i < pVCpu->hm.s.vmx.VmcsBatchCache.Write.cValidEntries; i++)
    5274         Assert(hmR0VmxIsValidWriteField(pVCpu->hm.s.vmx.VmcsBatchCache.Write.aField[i]));
    5275 
    5276     for (uint32_t i = 0; i <pVCpu->hm.s.vmx.VmcsBatchCache.Read.cValidEntries; i++)
    5277         Assert(hmR0VmxIsValidReadField(pVCpu->hm.s.vmx.VmcsBatchCache.Read.aField[i]));
    5278 #endif
    5279 
    5280     /* Disable interrupts. */
    5281     RTCCUINTREG fOldEFlags = ASMIntDisableFlags();
    5282 
    5283 #ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
    5284     RTCPUID idHostCpu = RTMpCpuId();
    5285     CPUMR0SetLApic(pVCpu, idHostCpu);
    5286 #endif
    5287 
    5288     PCHMPHYSCPU pHostCpu = hmR0GetCurrentCpu();
    5289     RTHCPHYS HCPhysCpuPage = pHostCpu->HCPhysMemObj;
    5290 
    5291     /* Clear VMCS. Marking it inactive, clearing implementation-specific data and writing VMCS data back to memory. */
    5292     VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
    5293     pVCpu->hm.s.vmx.fVmcsState = HMVMX_VMCS_STATE_CLEAR;
    5294 
    5295     /* Leave VMX Root Mode. */
    5296     VMXDisable();
    5297 
    5298     SUPR0ChangeCR4(0, ~X86_CR4_VMXE);
    5299 
    5300     CPUMSetHyperESP(pVCpu, VMMGetStackRC(pVCpu));
    5301     CPUMSetHyperEIP(pVCpu, enmOp);
    5302     for (int i = (int)cParams - 1; i >= 0; i--)
    5303         CPUMPushHyper(pVCpu, paParam[i]);
    5304 
    5305     STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatWorldSwitch3264, z);
    5306 
    5307     /* Call the switcher. */
    5308     int rc = pVM->hm.s.pfnHost32ToGuest64R0(pVM, RT_UOFFSETOF_DYN(VM, aCpus[pVCpu->idCpu].cpum) - RT_UOFFSETOF(VM, cpum));
    5309     STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatWorldSwitch3264, z);
    5310 
    5311     /** @todo replace with hmR0VmxEnterRootMode() and hmR0VmxLeaveRootMode(). */
    5312     /* Make sure the VMX instructions don't cause #UD faults. */
    5313     SUPR0ChangeCR4(X86_CR4_VMXE, RTCCUINTREG_MAX);
    5314 
    5315     /* Re-enter VMX Root Mode */
    5316     int rc2 = VMXEnable(HCPhysCpuPage);
    5317     if (RT_FAILURE(rc2))
    5318     {
    5319         SUPR0ChangeCR4(0, ~X86_CR4_VMXE);
    5320         ASMSetFlags(fOldEFlags);
    5321         pVM->hm.s.vmx.HCPhysVmxEnableError = HCPhysCpuPage;
    5322         return rc2;
    5323     }
    5324 
    5325     rc2 = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
    5326     AssertRC(rc2);
    5327     pVCpu->hm.s.vmx.fVmcsState = HMVMX_VMCS_STATE_ACTIVE;
    5328     Assert(!(ASMGetFlags() & X86_EFL_IF));
    5329     ASMSetFlags(fOldEFlags);
    5330     return rc;
    5331 }
    5332 
    5333 
    5334 /**
    5335  * Prepares for and executes VMLAUNCH (64-bit guests) for 32-bit hosts
    5336  * supporting 64-bit guests.
    5337  *
    5338  * @returns VBox status code.
    5339  * @param   fResume     Whether to VMLAUNCH or VMRESUME.
    5340  * @param   pCtx        Pointer to the guest-CPU context.
    5341  * @param   pCache      Pointer to the VMCS cache.
    5342  * @param   pVM         The cross context VM structure.
    5343  * @param   pVCpu       The cross context virtual CPU structure.
    5344  */
    5345 DECLASM(int) VMXR0SwitcherStartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMXVMCSBATCHCACHE pCache, PVM pVM, PVMCPU pVCpu)
    5346 {
    5347     NOREF(fResume);
    5348 
    5349     PCHMPHYSCPU    pHostCpu      = hmR0GetCurrentCpu();
    5350     RTHCPHYS const HCPhysCpuPage = pHostCpu->HCPhysMemObj;
    5351 
    5352 #ifdef VBOX_WITH_CRASHDUMP_MAGIC
    5353     pCache->uPos = 1;
    5354     pCache->interPD = PGMGetInterPaeCR3(pVM);
    5355     pCache->pSwitcher = (uint64_t)pVM->hm.s.pfnHost32ToGuest64R0;
    5356 #endif
    5357 
    5358 #if defined(DEBUG) && defined(VMX_USE_CACHED_VMCS_ACCESSES)
    5359     pCache->TestIn.HCPhysCpuPage = 0;
    5360     pCache->TestIn.HCPhysVmcs    = 0;
    5361     pCache->TestIn.pCache        = 0;
    5362     pCache->TestOut.HCPhysVmcs   = 0;
    5363     pCache->TestOut.pCache       = 0;
    5364     pCache->TestOut.pCtx         = 0;
    5365     pCache->TestOut.eflags       = 0;
    5366 #else
    5367     NOREF(pCache);
    5368 #endif
    5369 
    5370     uint32_t aParam[10];
    5371     aParam[0] = RT_LO_U32(HCPhysCpuPage);                               /* Param 1: VMXON physical address - Lo. */
    5372     aParam[1] = RT_HI_U32(HCPhysCpuPage);                               /* Param 1: VMXON physical address - Hi. */
    5373     aParam[2] = RT_LO_U32(pVCpu->hm.s.vmx.HCPhysVmcs);                  /* Param 2: VMCS physical address - Lo. */
    5374     aParam[3] = RT_HI_U32(pVCpu->hm.s.vmx.HCPhysVmcs);                  /* Param 2: VMCS physical address - Hi. */
    5375     aParam[4] = VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VmcsBatchCache);
    5376     aParam[5] = 0;
    5377     aParam[6] = VM_RC_ADDR(pVM, pVM);
    5378     aParam[7] = 0;
    5379     aParam[8] = VM_RC_ADDR(pVM, pVCpu);
    5380     aParam[9] = 0;
    5381 
    5382 #ifdef VBOX_WITH_CRASHDUMP_MAGIC
    5383     pCtx->dr[4] = pVM->hm.s.vmx.pScratchPhys + 16 + 8;
    5384     *(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 1;
    5385 #endif
    5386     int rc = VMXR0Execute64BitsHandler(pVCpu, HM64ON32OP_VMXRCStartVM64, RT_ELEMENTS(aParam), &aParam[0]);
    5387 
    5388 #ifdef VBOX_WITH_CRASHDUMP_MAGIC
    5389     Assert(*(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) == 5);
    5390     Assert(pCtx->dr[4] == 10);
    5391     *(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 0xff;
    5392 #endif
    5393 
    5394 #if defined(DEBUG) && defined(VMX_USE_CACHED_VMCS_ACCESSES)
    5395     AssertMsg(pCache->TestIn.HCPhysCpuPage == HCPhysCpuPage, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysCpuPage, HCPhysCpuPage));
    5396     AssertMsg(pCache->TestIn.HCPhysVmcs    == pVCpu->hm.s.vmx.HCPhysVmcs, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVmcs,
    5397                                                                            pVCpu->hm.s.vmx.HCPhysVmcs));
    5398     AssertMsg(pCache->TestIn.HCPhysVmcs    == pCache->TestOut.HCPhysVmcs, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVmcs,
    5399                                                                            pCache->TestOut.HCPhysVmcs));
    5400     AssertMsg(pCache->TestIn.pCache        == pCache->TestOut.pCache, ("%RGv vs %RGv\n", pCache->TestIn.pCache,
    5401                                                                        pCache->TestOut.pCache));
    5402     AssertMsg(pCache->TestIn.pCache        == VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VmcsBatchCache),
    5403               ("%RGv vs %RGv\n", pCache->TestIn.pCache, VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VmcsBatchCache)));
    5404     AssertMsg(pCache->TestIn.pCtx          == pCache->TestOut.pCtx, ("%RGv vs %RGv\n", pCache->TestIn.pCtx,
    5405                                                                      pCache->TestOut.pCtx));
    5406     Assert(!(pCache->TestOut.eflags & X86_EFL_IF));
    5407 #endif
    5408     NOREF(pCtx);
    5409     return rc;
    5410 }
    5411 
    5412 
    5413 /**
    54146255 * Initialize the VMCS-Read cache.
    54156256 *
     
    54196260 * (those that have a 32-bit FULL & HIGH part).
    54206261 *
    5421  * @returns VBox status code.
    54226262 * @param   pVCpu       The cross context virtual CPU structure.
    54236263 */
    5424 static int hmR0VmxInitVmcsReadCache(PVMCPU pVCpu)
     6264static void hmR0VmxInitVmcsReadCache(PVMCPU pVCpu)
    54256265{
    54266266#define VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, idxField)           \
     
    54326272    } while (0)
    54336273
    5434     PVMXVMCSBATCHCACHE pCache = &pVCpu->hm.s.vmx.VmcsBatchCache;
     6274    PVMXVMCSCACHE pCache = &pVCpu->hm.s.vmx.VmcsCache;
    54356275    uint32_t cReadFields = 0;
    54366276
     
    54616301    /* Unused natural width guest-state fields. */
    54626302    VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS);
    5463     VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR3); /* Handled in Nested Paging case */
     6303    VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR3); /* Handled in nested paging case */
    54646304#endif
    54656305    VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SYSENTER_ESP);
     
    54986338
    54996339#undef VMXLOCAL_INIT_READ_CACHE_FIELD
    5500     return VINF_SUCCESS;
    55016340}
    55026341
     
    56126451{
    56136452    AssertPtr(pVCpu);
    5614     PVMXVMCSBATCHCACHE pCache = &pVCpu->hm.s.vmx.VmcsBatchCache;
    5615 
    5616     AssertMsgReturn(pCache->Write.cValidEntries < VMX_VMCS_BATCH_CACHE_MAX_ENTRY - 1,
     6453    PVMXVMCSCACHE pCache = &pVCpu->hm.s.vmx.VmcsCache;
     6454
     6455    AssertMsgReturn(pCache->Write.cValidEntries < VMX_VMCS_CACHE_MAX_ENTRY - 1,
    56176456                    ("entries=%u\n", pCache->Write.cValidEntries), VERR_ACCESS_DENIED);
    56186457
     
    56396478 *
    56406479 * If offsetting is not possible, cause VM-exits on RDTSC(P)s. Also sets up the
    5641  * VMX preemption timer.
     6480 * VMX-preemption timer.
    56426481 *
    56436482 * @returns VBox status code.
    56446483 * @param   pVCpu           The cross context virtual CPU structure.
     6484 * @param   pVmxTransient   The VMX-transient structure.
    56456485 *
    56466486 * @remarks No-long-jump zone!!!
    56476487 */
    5648 static void hmR0VmxUpdateTscOffsettingAndPreemptTimer(PVMCPU pVCpu)
    5649 {
    5650     bool     fOffsettedTsc;
    5651     bool     fParavirtTsc;
    5652     PVM      pVM = pVCpu->CTX_SUFF(pVM);
    5653     uint64_t uTscOffset;
     6488static void hmR0VmxUpdateTscOffsettingAndPreemptTimer(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
     6489{
     6490    bool         fOffsettedTsc;
     6491    bool         fParavirtTsc;
     6492    uint64_t     uTscOffset;
     6493    PVM          pVM = pVCpu->CTX_SUFF(pVM);
     6494    PVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);;
     6495
    56546496    if (pVM->hm.s.vmx.fUsePreemptTimer)
    56556497    {
     
    56626504        cTicksToDeadline >>= pVM->hm.s.vmx.cPreemptTimerShift;
    56636505
     6506        /** @todo r=ramshankar: We need to find a way to integrate nested-guest
     6507         *        preemption timers here. We probably need to clamp the preemption timer,
     6508         *        after converting the timer value to the host. */
    56646509        uint32_t cPreemptionTickCount = (uint32_t)RT_MIN(cTicksToDeadline, UINT32_MAX - 16);
    56656510        int rc = VMXWriteVmcs32(VMX_VMCS32_PREEMPT_TIMER_VALUE, cPreemptionTickCount);
     
    56806525    }
    56816526
    5682     uint32_t uProcCtls = pVCpu->hm.s.vmx.Ctls.u32ProcCtls;
     6527    uint32_t uProcCtls = pVmcsInfo->u32ProcCtls;
    56836528    if (   fOffsettedTsc
    56846529        && RT_LIKELY(!pVCpu->hm.s.fDebugWantRdTscExit))
    56856530    {
    5686         if (pVCpu->hm.s.vmx.Ctls.u64TscOffset != uTscOffset)
     6531        if (pVmxTransient->fIsNestedGuest)
     6532            uTscOffset = CPUMApplyNestedGuestTscOffset(pVCpu, uTscOffset);
     6533        if (pVmcsInfo->u64TscOffset != uTscOffset)
    56876534        {
    56886535            int rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, uTscOffset);
    56896536            AssertRC(rc);
    5690             pVCpu->hm.s.vmx.Ctls.u64TscOffset = uTscOffset;
     6537            pVmcsInfo->u64TscOffset = uTscOffset;
    56916538        }
    56926539
     
    56966543            int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
    56976544            AssertRC(rc);
    5698             pVCpu->hm.s.vmx.Ctls.u32ProcCtls = uProcCtls;
     6545            pVmcsInfo->u32ProcCtls = uProcCtls;
    56996546        }
    57006547        STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffset);
     
    57086555            int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
    57096556            AssertRC(rc);
    5710             pVCpu->hm.s.vmx.Ctls.u32ProcCtls = uProcCtls;
     6557            pVmcsInfo->u32ProcCtls = uProcCtls;
    57116558        }
    57126559        STAM_COUNTER_INC(&pVCpu->hm.s.StatTscIntercept);
     
    57216568 * @returns The IEM exception flags.
    57226569 * @param   uVector         The event vector.
    5723  * @param   uVmxVectorType  The VMX event type.
     6570 * @param   uVmxEventType   The VMX event type.
    57246571 *
    57256572 * @remarks This function currently only constructs flags required for
     
    57276574 *          and CR2 aspects of an exception are not included).
    57286575 */
    5729 static uint32_t hmR0VmxGetIemXcptFlags(uint8_t uVector, uint32_t uVmxVectorType)
     6576static uint32_t hmR0VmxGetIemXcptFlags(uint8_t uVector, uint32_t uVmxEventType)
    57306577{
    57316578    uint32_t fIemXcptFlags;
    5732     switch (uVmxVectorType)
     6579    switch (uVmxEventType)
    57336580    {
    57346581        case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
     
    57556602            {
    57566603                fIemXcptFlags = 0;
    5757                 AssertMsgFailed(("Unexpected vector for software int. uVector=%#x", uVector));
     6604                AssertMsgFailed(("Unexpected vector for software exception. uVector=%#x", uVector));
    57586605            }
    57596606            break;
     
    57666613        default:
    57676614            fIemXcptFlags = 0;
    5768             AssertMsgFailed(("Unexpected vector type! uVmxVectorType=%#x uVector=%#x", uVmxVectorType, uVector));
     6615            AssertMsgFailed(("Unexpected vector type! uVmxEventType=%#x uVector=%#x", uVmxEventType, uVector));
    57696616            break;
    57706617    }
     
    57846631 * @param   GCPtrFaultAddress   The fault-address (CR2) in case it's a
    57856632 *                              page-fault.
    5786  *
    5787  * @remarks Statistics counter assumes this is a guest event being injected or
    5788  *          re-injected into the guest, i.e. 'StatInjectPendingReflect' is
    5789  *          always incremented.
    57906633 */
    57916634DECLINLINE(void) hmR0VmxSetPendingEvent(PVMCPU pVCpu, uint32_t u32IntInfo, uint32_t cbInstr, uint32_t u32ErrCode,
     
    61106953 * @param   uExitReason     The VM-exit reason.
    61116954 *
    6112  * @todo    NstVmx: Document other error codes when VM-exit is implemented.
     6955 * @todo    NSTVMX: Document other error codes when VM-exit is implemented.
    61136956 * @remarks No-long-jump zone!!!
    61146957 */
     
    61807023
    61817024
    6182 /**
    6183  * Handle a condition that occurred while delivering an event through the guest
    6184  * IDT.
    6185  *
    6186  * @returns Strict VBox status code (i.e. informational status codes too).
    6187  * @retval  VINF_SUCCESS if we should continue handling the VM-exit.
    6188  * @retval  VINF_HM_DOUBLE_FAULT if a \#DF condition was detected and we ought
    6189  *          to continue execution of the guest which will delivery the \#DF.
    6190  * @retval  VINF_EM_RESET if we detected a triple-fault condition.
    6191  * @retval  VERR_EM_GUEST_CPU_HANG if we detected a guest CPU hang.
    6192  *
    6193  * @param   pVCpu           The cross context virtual CPU structure.
    6194  * @param   pVmxTransient   Pointer to the VMX transient structure.
    6195  *
    6196  * @remarks No-long-jump zone!!!
    6197  */
    6198 static VBOXSTRICTRC hmR0VmxCheckExitDueToEventDelivery(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    6199 {
    6200     uint32_t const uExitVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
    6201 
    6202     int rc2 = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
    6203     rc2    |= hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
    6204     AssertRCReturn(rc2, rc2);
    6205 
    6206     VBOXSTRICTRC rcStrict = VINF_SUCCESS;
    6207     if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
    6208     {
    6209         uint32_t const uIdtVectorType = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo);
    6210         uint32_t const uIdtVector     = VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo);
    6211 
    6212         /*
    6213          * If the event was a software interrupt (generated with INT n) or a software exception
    6214          * (generated by INT3/INTO) or a privileged software exception (generated by INT1), we
    6215          * can handle the VM-exit and continue guest execution which will re-execute the
    6216          * instruction rather than re-injecting the exception, as that can cause premature
    6217          * trips to ring-3 before injection and involve TRPM which currently has no way of
    6218          * storing that these exceptions were caused by these instructions (ICEBP's #DB poses
    6219          * the problem).
    6220          */
    6221         IEMXCPTRAISE     enmRaise;
    6222         IEMXCPTRAISEINFO fRaiseInfo;
    6223         if (   uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
    6224             || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
    6225             || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
    6226         {
    6227             enmRaise   = IEMXCPTRAISE_REEXEC_INSTR;
    6228             fRaiseInfo = IEMXCPTRAISEINFO_NONE;
    6229         }
    6230         else if (VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo))
    6231         {
    6232             uint32_t const uExitVectorType  = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uExitIntInfo);
    6233             uint32_t const fIdtVectorFlags  = hmR0VmxGetIemXcptFlags(uIdtVector, uIdtVectorType);
    6234             uint32_t const fExitVectorFlags = hmR0VmxGetIemXcptFlags(uExitVector, uExitVectorType);
    6235             /** @todo Make AssertMsgReturn as just AssertMsg later. */
    6236             AssertMsgReturn(uExitVectorType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT,
    6237                             ("hmR0VmxCheckExitDueToEventDelivery: Unexpected VM-exit interruption info. %#x!\n",
    6238                              uExitVectorType), VERR_VMX_IPE_5);
    6239 
    6240             enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fIdtVectorFlags, uIdtVector, fExitVectorFlags, uExitVector, &fRaiseInfo);
    6241 
    6242             /* Determine a vectoring #PF condition, see comment in hmR0VmxExitXcptPF(). */
    6243             if (fRaiseInfo & (IEMXCPTRAISEINFO_EXT_INT_PF | IEMXCPTRAISEINFO_NMI_PF))
    6244             {
    6245                 pVmxTransient->fVectoringPF = true;
    6246                 enmRaise = IEMXCPTRAISE_PREV_EVENT;
    6247             }
    6248         }
    6249         else
    6250         {
    6251             /*
    6252              * If an exception or hardware interrupt delivery caused an EPT violation/misconfig or APIC access
    6253              * VM-exit, then the VM-exit interruption-information will not be valid and we end up here.
    6254              * It is sufficient to reflect the original event to the guest after handling the VM-exit.
    6255              */
    6256             Assert(   uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
    6257                    || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
    6258                    || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT);
    6259             enmRaise   = IEMXCPTRAISE_PREV_EVENT;
    6260             fRaiseInfo = IEMXCPTRAISEINFO_NONE;
    6261         }
    6262 
    6263         /*
    6264          * On CPUs that support Virtual NMIs, if this VM-exit (be it an exception or EPT violation/misconfig
    6265          * etc.) occurred while delivering the NMI, we need to clear the block-by-NMI field in the guest
    6266          * interruptibility-state before re-delivering the NMI after handling the VM-exit. Otherwise the
    6267          * subsequent VM-entry would fail.
    6268          *
    6269          * See Intel spec. 30.7.1.2 "Resuming Guest Software after Handling an Exception". See @bugref{7445}.
    6270          */
    6271         if (   VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS)
    6272             && uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
    6273             && (   enmRaise   == IEMXCPTRAISE_PREV_EVENT
    6274                 || (fRaiseInfo & IEMXCPTRAISEINFO_NMI_PF))
    6275             && (pVCpu->hm.s.vmx.Ctls.u32PinCtls & VMX_PIN_CTLS_VIRT_NMI))
    6276         {
    6277             VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
    6278         }
    6279 
    6280         switch (enmRaise)
    6281         {
    6282             case IEMXCPTRAISE_CURRENT_XCPT:
    6283             {
    6284                 Log4Func(("IDT: Pending secondary Xcpt: uIdtVectoringInfo=%#RX64 uExitIntInfo=%#RX64\n",
    6285                           pVmxTransient->uIdtVectoringInfo, pVmxTransient->uExitIntInfo));
    6286                 Assert(rcStrict == VINF_SUCCESS);
    6287                 break;
    6288             }
    6289 
    6290             case IEMXCPTRAISE_PREV_EVENT:
    6291             {
    6292                 uint32_t u32ErrCode;
    6293                 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uIdtVectoringInfo))
    6294                 {
    6295                     rc2 = hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);
    6296                     AssertRCReturn(rc2, rc2);
    6297                     u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
    6298                 }
    6299                 else
    6300                     u32ErrCode = 0;
    6301 
    6302                 /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF, see hmR0VmxExitXcptPF(). */
    6303                 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingReflect);
    6304                 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
    6305                                        0 /* cbInstr */, u32ErrCode, pVCpu->cpum.GstCtx.cr2);
    6306 
    6307                 Log4Func(("IDT: Pending vectoring event %#RX64 Err=%#RX32\n", pVCpu->hm.s.Event.u64IntInfo,
    6308                           pVCpu->hm.s.Event.u32ErrCode));
    6309                 Assert(rcStrict == VINF_SUCCESS);
    6310                 break;
    6311             }
    6312 
    6313             case IEMXCPTRAISE_REEXEC_INSTR:
    6314                 Assert(rcStrict == VINF_SUCCESS);
    6315                 break;
    6316 
    6317             case IEMXCPTRAISE_DOUBLE_FAULT:
    6318             {
    6319                 /*
    6320                  * Determing a vectoring double #PF condition. Used later, when PGM evaluates the
    6321                  * second #PF as a guest #PF (and not a shadow #PF) and needs to be converted into a #DF.
    6322                  */
    6323                 if (fRaiseInfo & IEMXCPTRAISEINFO_PF_PF)
    6324                 {
    6325                     pVmxTransient->fVectoringDoublePF = true;
    6326                     Log4Func(("IDT: Vectoring double #PF %#RX64 cr2=%#RX64\n", pVCpu->hm.s.Event.u64IntInfo,
    6327                           pVCpu->cpum.GstCtx.cr2));
    6328                     rcStrict = VINF_SUCCESS;
    6329                 }
    6330                 else
    6331                 {
    6332                     STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingReflect);
    6333                     hmR0VmxSetPendingXcptDF(pVCpu);
    6334                     Log4Func(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", pVCpu->hm.s.Event.u64IntInfo,
    6335                               uIdtVector, uExitVector));
    6336                     rcStrict = VINF_HM_DOUBLE_FAULT;
    6337                 }
    6338                 break;
    6339             }
    6340 
    6341             case IEMXCPTRAISE_TRIPLE_FAULT:
    6342             {
    6343                 Log4Func(("IDT: Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", uIdtVector, uExitVector));
    6344                 rcStrict = VINF_EM_RESET;
    6345                 break;
    6346             }
    6347 
    6348             case IEMXCPTRAISE_CPU_HANG:
    6349             {
    6350                 Log4Func(("IDT: Bad guest! Entering CPU hang. fRaiseInfo=%#x\n", fRaiseInfo));
    6351                 rcStrict = VERR_EM_GUEST_CPU_HANG;
    6352                 break;
    6353             }
    6354 
    6355             default:
    6356             {
    6357                 AssertMsgFailed(("IDT: vcpu[%RU32] Unexpected/invalid value! enmRaise=%#x\n", pVCpu->idCpu, enmRaise));
    6358                 rcStrict = VERR_VMX_IPE_2;
    6359                 break;
    6360             }
    6361         }
    6362     }
    6363     else if (   VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo)
    6364              && VMX_EXIT_INT_INFO_IS_NMI_UNBLOCK_IRET(pVmxTransient->uExitIntInfo)
    6365              && uExitVector != X86_XCPT_DF
    6366              && (pVCpu->hm.s.vmx.Ctls.u32PinCtls & VMX_PIN_CTLS_VIRT_NMI))
    6367     {
    6368         /*
    6369          * Execution of IRET caused this fault when NMI blocking was in effect (i.e we're in the guest NMI handler).
    6370          * We need to set the block-by-NMI field so that NMIs remain blocked until the IRET execution is restarted.
    6371          * See Intel spec. 30.7.1.2 "Resuming guest software after handling an exception".
    6372          */
    6373         if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
    6374         {
    6375             Log4Func(("Setting VMCPU_FF_BLOCK_NMIS. fValid=%RTbool uExitReason=%u\n",
    6376                       VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo), pVmxTransient->uExitReason));
    6377             VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
    6378         }
    6379     }
    6380 
    6381     Assert(   rcStrict == VINF_SUCCESS  || rcStrict == VINF_HM_DOUBLE_FAULT
    6382            || rcStrict == VINF_EM_RESET || rcStrict == VERR_EM_GUEST_CPU_HANG);
    6383     return rcStrict;
    6384 }
    6385 
    6386 
    6387 /**
    6388  * Imports a guest segment register from the current VMCS into
    6389  * the guest-CPU context.
    6390  *
    6391  * @returns VBox status code.
    6392  * @param   pVCpu       The cross context virtual CPU structure.
    6393  * @param   idxSel      Index of the selector in the VMCS.
    6394  * @param   idxLimit    Index of the segment limit in the VMCS.
    6395  * @param   idxBase     Index of the segment base in the VMCS.
    6396  * @param   idxAccess   Index of the access rights of the segment in the VMCS.
    6397  * @param   pSelReg     Pointer to the segment selector.
    6398  *
    6399  * @remarks Called with interrupts and/or preemption disabled, try not to assert and
    6400  *          do not log!
    6401  *
    6402  * @remarks Never call this function directly!!! Use the
    6403  *          HMVMX_IMPORT_SREG() macro as that takes care
    6404  *          of whether to read from the VMCS cache or not.
    6405  */
    6406 static int hmR0VmxImportGuestSegmentReg(PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase, uint32_t idxAccess,
    6407                                         PCPUMSELREG pSelReg)
    6408 {
    6409     NOREF(pVCpu);
    6410 
    6411     uint32_t u32Sel;
    6412     uint32_t u32Limit;
    6413     uint32_t u32Attr;
    6414     uint64_t u64Base;
    6415     int rc = VMXReadVmcs32(idxSel, &u32Sel);
    6416     rc    |= VMXReadVmcs32(idxLimit, &u32Limit);
    6417     rc    |= VMXReadVmcs32(idxAccess, &u32Attr);
    6418     rc    |= VMXReadVmcsGstNByIdxVal(idxBase, &u64Base);
    6419     AssertRCReturn(rc, rc);
    6420 
    6421     pSelReg->Sel      = (uint16_t)u32Sel;
    6422     pSelReg->ValidSel = (uint16_t)u32Sel;
    6423     pSelReg->fFlags   = CPUMSELREG_FLAGS_VALID;
    6424     pSelReg->u32Limit = u32Limit;
    6425     pSelReg->u64Base  = u64Base;
    6426     pSelReg->Attr.u   = u32Attr;
     7025static void hmR0VmxFixUnusableSegRegAttr(PVMCPU pVCpu, PCPUMSELREG pSelReg, uint32_t idxSel)
     7026{
     7027    Assert(pSelReg->Attr.u & X86DESCATTR_UNUSABLE);
    64277028
    64287029    /*
     
    64447045     * See Intel spec. 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
    64457046     */
    6446     if (pSelReg->Attr.u & X86DESCATTR_UNUSABLE)
    6447     {
    6448         Assert(idxSel != VMX_VMCS16_GUEST_TR_SEL);          /* TR is the only selector that can never be unusable. */
    6449 
    6450         /* Masking off: X86DESCATTR_P, X86DESCATTR_LIMIT_HIGH, and X86DESCATTR_AVL. The latter two are really irrelevant. */
    6451         pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L    | X86DESCATTR_D  | X86DESCATTR_G
    6452                          | X86DESCATTR_DPL      | X86DESCATTR_TYPE | X86DESCATTR_DT;
    64537047#ifdef VBOX_STRICT
    6454         VMMRZCallRing3Disable(pVCpu);
    6455         Log4Func(("Unusable idxSel=%#x attr=%#x -> %#x\n", idxSel, u32Sel, pSelReg->Attr.u));
     7048    uint32_t const uAttr = pSelReg->Attr.u;
     7049#endif
     7050
     7051    /* Masking off: X86DESCATTR_P, X86DESCATTR_LIMIT_HIGH, and X86DESCATTR_AVL. The latter two are really irrelevant. */
     7052    pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L    | X86DESCATTR_D  | X86DESCATTR_G
     7053                     | X86DESCATTR_DPL      | X86DESCATTR_TYPE | X86DESCATTR_DT;
     7054
     7055#ifdef VBOX_STRICT
     7056    VMMRZCallRing3Disable(pVCpu);
     7057    Log4Func(("Unusable %#x: sel=%#x attr=%#x -> %#x\n", idxSel, pSelReg->Sel, uAttr, pSelReg->Attr.u));
    64567058# ifdef DEBUG_bird
    6457         AssertMsg((u32Attr & ~X86DESCATTR_P) == pSelReg->Attr.u,
    6458                   ("%#x: %#x != %#x (sel=%#x base=%#llx limit=%#x)\n",
    6459                    idxSel, u32Sel, pSelReg->Attr.u, pSelReg->Sel, pSelReg->u64Base, pSelReg->u32Limit));
     7059    AssertMsg((uAttr & ~X86DESCATTR_P) == pSelReg->Attr.u,
     7060              ("%#x: %#x != %#x (sel=%#x base=%#llx limit=%#x)\n",
     7061               idxSel, uAttr, pSelReg->Attr.u, pSelReg->Sel, pSelReg->u64Base, pSelReg->u32Limit));
    64607062# endif
    6461         VMMRZCallRing3Enable(pVCpu);
     7063    VMMRZCallRing3Enable(pVCpu);
     7064#else
     7065    RT_NOREF2(pVCpu, idxSel);
    64627066#endif
    6463     }
     7067}
     7068
     7069
     7070/**
     7071 * Imports a guest segment register from the current VMCS into the guest-CPU
     7072 * context.
     7073 *
     7074 * @returns VBox status code.
     7075 * @param   pVCpu       The cross context virtual CPU structure.
     7076 * @param   iSegReg     The segment register number (X86_SREG_XXX).
     7077 *
     7078 * @remarks Called with interrupts and/or preemption disabled, try not to assert and
     7079 *          do not log!
     7080 */
     7081static int hmR0VmxImportGuestSegReg(PVMCPU pVCpu, uint8_t iSegReg)
     7082{
     7083    Assert(iSegReg < X86_SREG_COUNT);
     7084
     7085    uint32_t const idxSel   = g_aVmcsSegSel[iSegReg];
     7086    uint32_t const idxLimit = g_aVmcsSegLimit[iSegReg];
     7087    uint32_t const idxAttr  = g_aVmcsSegAttr[iSegReg];
     7088#ifdef VMX_USE_CACHED_VMCS_ACCESSES
     7089    uint32_t const idxBase  = g_aVmcsCacheSegBase[iSegReg];
     7090#else
     7091    uint32_t const idxBase  = g_aVmcsSegBase[iSegReg];
     7092#endif
     7093    uint64_t u64Base;
     7094    uint32_t u32Sel, u32Limit, u32Attr;
     7095    int rc = VMXReadVmcs32(idxSel,   &u32Sel);
     7096    rc    |= VMXReadVmcs32(idxLimit, &u32Limit);
     7097    rc    |= VMXReadVmcs32(idxAttr,  &u32Attr);
     7098    rc    |= VMXReadVmcsGstNByIdxVal(idxBase, &u64Base);
     7099    if (RT_SUCCESS(rc))
     7100    {
     7101        PCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
     7102        pSelReg->Sel      = u32Sel;
     7103        pSelReg->ValidSel = u32Sel;
     7104        pSelReg->fFlags   = CPUMSELREG_FLAGS_VALID;
     7105        pSelReg->u32Limit = u32Limit;
     7106        pSelReg->u64Base  = u64Base;
     7107        pSelReg->Attr.u   = u32Attr;
     7108        if (u32Attr & X86DESCATTR_UNUSABLE)
     7109            hmR0VmxFixUnusableSegRegAttr(pVCpu, pSelReg, idxSel);
     7110    }
     7111    return rc;
     7112}
     7113
     7114
     7115/**
     7116 * Imports the guest LDTR from the current VMCS into the guest-CPU context.
     7117 *
     7118 * @returns VBox status code.
     7119 * @param   pVCpu       The cross context virtual CPU structure.
     7120 * @param   pSelReg     Pointer to the segment selector.
     7121 *
     7122 * @remarks Called with interrupts and/or preemption disabled, try not to assert and
     7123 *          do not log!
     7124 */
     7125static int hmR0VmxImportGuestLdtr(PVMCPU pVCpu)
     7126{
     7127    uint64_t u64Base;
     7128    uint32_t u32Sel, u32Limit, u32Attr;
     7129    int rc = VMXReadVmcs32(VMX_VMCS16_GUEST_LDTR_SEL,           &u32Sel);
     7130    rc    |= VMXReadVmcs32(VMX_VMCS32_GUEST_LDTR_LIMIT,         &u32Limit);
     7131    rc    |= VMXReadVmcs32(VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, &u32Attr);
     7132    rc    |= VMXReadVmcsGstN(VMX_VMCS_GUEST_LDTR_BASE,          &u64Base);
     7133
     7134    if (RT_SUCCESS(rc))
     7135    {
     7136        pVCpu->cpum.GstCtx.ldtr.Sel      = u32Sel;
     7137        pVCpu->cpum.GstCtx.ldtr.ValidSel = u32Sel;
     7138        pVCpu->cpum.GstCtx.ldtr.fFlags   = CPUMSELREG_FLAGS_VALID;
     7139        pVCpu->cpum.GstCtx.ldtr.u32Limit = u32Limit;
     7140        pVCpu->cpum.GstCtx.ldtr.u64Base  = u64Base;
     7141        pVCpu->cpum.GstCtx.ldtr.Attr.u   = u32Attr;
     7142        if (u32Attr & X86DESCATTR_UNUSABLE)
     7143            hmR0VmxFixUnusableSegRegAttr(pVCpu, &pVCpu->cpum.GstCtx.ldtr, VMX_VMCS16_GUEST_LDTR_SEL);
     7144    }
     7145    return rc;
     7146}
     7147
     7148
     7149/**
     7150 * Imports the guest TR from the current VMCS into the guest-CPU context.
     7151 *
     7152 * @returns VBox status code.
     7153 * @param   pVCpu       The cross context virtual CPU structure.
     7154 * @param   pSelReg     Pointer to the segment selector.
     7155 *
     7156 * @remarks Called with interrupts and/or preemption disabled, try not to assert and
     7157 *          do not log!
     7158 */
     7159static int hmR0VmxImportGuestTr(PVMCPU pVCpu)
     7160{
     7161    uint32_t u32Sel, u32Limit, u32Attr;
     7162    uint64_t u64Base;
     7163    int rc = VMXReadVmcs32(VMX_VMCS16_GUEST_TR_SEL,           &u32Sel);
     7164    rc    |= VMXReadVmcs32(VMX_VMCS32_GUEST_TR_LIMIT,         &u32Limit);
     7165    rc    |= VMXReadVmcs32(VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, &u32Attr);
     7166    rc    |= VMXReadVmcsGstN(VMX_VMCS_GUEST_TR_BASE,          &u64Base);
     7167    AssertRCReturn(rc, rc);
     7168
     7169    pVCpu->cpum.GstCtx.tr.Sel      = u32Sel;
     7170    pVCpu->cpum.GstCtx.tr.ValidSel = u32Sel;
     7171    pVCpu->cpum.GstCtx.tr.fFlags   = CPUMSELREG_FLAGS_VALID;
     7172    pVCpu->cpum.GstCtx.tr.u32Limit = u32Limit;
     7173    pVCpu->cpum.GstCtx.tr.u64Base  = u64Base;
     7174    pVCpu->cpum.GstCtx.tr.Attr.u   = u32Attr;
     7175    /* TR is the only selector that can never be unusable. */
     7176    Assert(!(u32Attr & X86DESCATTR_UNUSABLE));
    64647177    return VINF_SUCCESS;
    64657178}
     
    64767189 *          instead!!!
    64777190 */
    6478 DECLINLINE(int) hmR0VmxImportGuestRip(PVMCPU pVCpu)
     7191static int hmR0VmxImportGuestRip(PVMCPU pVCpu)
    64797192{
    64807193    uint64_t u64Val;
     
    64997212 *
    65007213 * @returns VBox status code.
    6501  * @param   pVCpu   The cross context virtual CPU structure.
     7214 * @param   pVCpu       The cross context virtual CPU structure.
     7215 * @param   pVmcsInfo   The VMCS info. object.
    65027216 *
    65037217 * @remarks Called with interrupts and/or preemption disabled, should not assert!
     
    65057219 *          instead!!!
    65067220 */
    6507 DECLINLINE(int) hmR0VmxImportGuestRFlags(PVMCPU pVCpu)
     7221static int hmR0VmxImportGuestRFlags(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo)
    65087222{
    65097223    uint32_t u32Val;
     
    65177231
    65187232            /* Restore eflags for real-on-v86-mode hack. */
    6519             if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
     7233            if (pVmcsInfo->RealMode.fRealOnV86Active)
    65207234            {
    65217235                pCtx->eflags.Bits.u1VM   = 0;
    6522                 pCtx->eflags.Bits.u2IOPL = pVCpu->hm.s.vmx.RealMode.Eflags.Bits.u2IOPL;
     7236                pCtx->eflags.Bits.u2IOPL = pVmcsInfo->RealMode.Eflags.Bits.u2IOPL;
    65237237            }
    65247238        }
     
    65357249 *
    65367250 * @returns VBox status code.
    6537  * @param   pVCpu   The cross context virtual CPU structure.
     7251 * @param   pVCpu       The cross context virtual CPU structure.
     7252 * @param   pVmcsInfo   The VMCS info. object.
    65387253 *
    65397254 * @remarks Called with interrupts and/or preemption disabled, try not to assert and
     
    65427257 *          instead!!!
    65437258 */
    6544 DECLINLINE(int) hmR0VmxImportGuestIntrState(PVMCPU pVCpu)
     7259static int hmR0VmxImportGuestIntrState(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo)
    65457260{
    65467261    uint32_t u32Val;
    6547     PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    65487262    int rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INT_STATE, &u32Val);
    6549     AssertRCReturn(rc, rc);
    6550 
    6551     /*
    6552      * We additionally have a requirement to import RIP, RFLAGS depending on whether we
    6553      * might need them in while evaluating pending events before VM-entry.
    6554      */
    6555     if (!u32Val)
    6556     {
    6557         if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
    6558         {
     7263    if (RT_SUCCESS(rc))
     7264    {
     7265        if (!u32Val)
     7266        {
     7267            if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
     7268            {
     7269                rc  = hmR0VmxImportGuestRip(pVCpu);
     7270                rc |= hmR0VmxImportGuestRFlags(pVCpu, pVmcsInfo);
     7271                AssertRCReturn(rc, rc);
     7272                VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
     7273            }
     7274
     7275            if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
     7276                VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
     7277        }
     7278        else
     7279        {
     7280            /*
     7281             * We must import RIP here to set our EM interrupt-inhibited state.
     7282             * We also import RFLAGS as our code that evaluates pending interrupts
     7283             * before VM-entry requires it.
     7284             */
    65597285            rc  = hmR0VmxImportGuestRip(pVCpu);
    6560             rc |= hmR0VmxImportGuestRFlags(pVCpu);
    6561             AssertRCReturn(rc, rc);
    6562             VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
    6563         }
    6564 
    6565         if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
    6566             VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
    6567     }
    6568     else
    6569     {
    6570         rc  = hmR0VmxImportGuestRip(pVCpu);
    6571         rc |= hmR0VmxImportGuestRFlags(pVCpu);
    6572         AssertRCReturn(rc, rc);
    6573 
    6574         if (u32Val & (  VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS
    6575                       | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
    6576         {
    6577             EMSetInhibitInterruptsPC(pVCpu, pCtx->rip);
    6578         }
    6579         else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
    6580             VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
    6581 
    6582         if (u32Val & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI)
    6583         {
    6584             if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
    6585                 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
    6586         }
    6587         else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
    6588             VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
    6589     }
    6590 
    6591     return VINF_SUCCESS;
     7286            rc |= hmR0VmxImportGuestRFlags(pVCpu, pVmcsInfo);
     7287            if (RT_SUCCESS(rc))
     7288            {
     7289                if (u32Val & (  VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS
     7290                              | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
     7291                {
     7292                    EMSetInhibitInterruptsPC(pVCpu, pVCpu->cpum.GstCtx.rip);
     7293                }
     7294                else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
     7295                    VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
     7296
     7297                if (u32Val & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI)
     7298                {
     7299                    if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
     7300                        VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
     7301                }
     7302                else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
     7303                    VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
     7304            }
     7305        }
     7306    }
     7307    return rc;
    65927308}
    65937309
     
    65977313 *
    65987314 * @returns VBox status code.
    6599  * @param   pVCpu   The cross context virtual CPU structure.
    6600  * @param   fWhat   What to import, CPUMCTX_EXTRN_XXX.
    6601  */
    6602 static int hmR0VmxImportGuestState(PVMCPU pVCpu, uint64_t fWhat)
     7315 * @param   pVCpu       The cross context virtual CPU structure.
     7316 * @param   pVmcsInfo   The VMCS info. object.
     7317 * @param   fWhat       What to import, CPUMCTX_EXTRN_XXX.
     7318 */
     7319static int hmR0VmxImportGuestState(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo, uint64_t fWhat)
    66037320{
    66047321#define VMXLOCAL_BREAK_RC(a_rc) \
    6605     if (RT_FAILURE(a_rc))       \
     7322    if (RT_SUCCESS(a_rc))       \
     7323    { }                         \
     7324    else                        \
    66067325        break
    66077326
     
    66127331    uint32_t u32Val;
    66137332
    6614     Log4Func(("fExtrn=%#RX64 fWhat=%#RX64\n", pCtx->fExtrn, fWhat));
    6615     STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatImportGuestState, x);
     7333    STAM_PROFILE_ADV_START(& pVCpu->hm.s.StatImportGuestState, x);
    66167334
    66177335    /*
     
    66347352            if (fWhat & CPUMCTX_EXTRN_RFLAGS)
    66357353            {
    6636                 rc = hmR0VmxImportGuestRFlags(pVCpu);
     7354                rc = hmR0VmxImportGuestRFlags(pVCpu, pVmcsInfo);
    66377355                VMXLOCAL_BREAK_RC(rc);
    66387356            }
     
    66407358            if (fWhat & CPUMCTX_EXTRN_HM_VMX_INT_STATE)
    66417359            {
    6642                 rc = hmR0VmxImportGuestIntrState(pVCpu);
     7360                rc = hmR0VmxImportGuestIntrState(pVCpu, pVmcsInfo);
    66437361                VMXLOCAL_BREAK_RC(rc);
    66447362            }
     
    66537371            if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
    66547372            {
     7373                bool const fRealOnV86Active = pVmcsInfo->RealMode.fRealOnV86Active;
    66557374                if (fWhat & CPUMCTX_EXTRN_CS)
    66567375                {
    6657                     rc  = HMVMX_IMPORT_SREG(CS, &pCtx->cs);
     7376                    rc |= hmR0VmxImportGuestSegReg(pVCpu, X86_SREG_CS);
    66587377                    rc |= hmR0VmxImportGuestRip(pVCpu);
    6659                     VMXLOCAL_BREAK_RC(rc);
    6660                     if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
    6661                         pCtx->cs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrCS.u;
    6662                     EMR0HistoryUpdatePC(pVCpu, pCtx->cs.u64Base + pCtx->rip, true);
     7378                    if (fRealOnV86Active)
     7379                        pCtx->cs.Attr.u = pVmcsInfo->RealMode.AttrCS.u;
     7380                    EMR0HistoryUpdatePC(pVCpu, pCtx->cs.u64Base + pCtx->rip, true /* fFlattened */);
    66637381                }
    66647382                if (fWhat & CPUMCTX_EXTRN_SS)
    66657383                {
    6666                     rc = HMVMX_IMPORT_SREG(SS, &pCtx->ss);
    6667                     VMXLOCAL_BREAK_RC(rc);
    6668                     if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
    6669                         pCtx->ss.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrSS.u;
     7384                    rc |= hmR0VmxImportGuestSegReg(pVCpu, X86_SREG_SS);
     7385                    if (fRealOnV86Active)
     7386                        pCtx->ss.Attr.u = pVmcsInfo->RealMode.AttrSS.u;
    66707387                }
    66717388                if (fWhat & CPUMCTX_EXTRN_DS)
    66727389                {
    6673                     rc = HMVMX_IMPORT_SREG(DS, &pCtx->ds);
    6674                     VMXLOCAL_BREAK_RC(rc);
    6675                     if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
    6676                         pCtx->ds.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrDS.u;
     7390                    rc |= hmR0VmxImportGuestSegReg(pVCpu, X86_SREG_DS);
     7391                    if (fRealOnV86Active)
     7392                        pCtx->ds.Attr.u = pVmcsInfo->RealMode.AttrDS.u;
    66777393                }
    66787394                if (fWhat & CPUMCTX_EXTRN_ES)
    66797395                {
    6680                     rc = HMVMX_IMPORT_SREG(ES, &pCtx->es);
    6681                     VMXLOCAL_BREAK_RC(rc);
    6682                     if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
    6683                         pCtx->es.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrES.u;
     7396                    rc |= hmR0VmxImportGuestSegReg(pVCpu, X86_SREG_ES);
     7397                    if (fRealOnV86Active)
     7398                        pCtx->es.Attr.u = pVmcsInfo->RealMode.AttrES.u;
    66847399                }
    6685                if (fWhat & CPUMCTX_EXTRN_FS)
    6686                {
    6687                     rc = HMVMX_IMPORT_SREG(FS, &pCtx->fs);
    6688                     VMXLOCAL_BREAK_RC(rc);
    6689                     if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
    6690                         pCtx->fs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrFS.u;
    6691                }
     7400                if (fWhat & CPUMCTX_EXTRN_FS)
     7401                {
     7402                    rc |= hmR0VmxImportGuestSegReg(pVCpu, X86_SREG_FS);
     7403                    if (fRealOnV86Active)
     7404                        pCtx->fs.Attr.u = pVmcsInfo->RealMode.AttrFS.u;
     7405                }
    66927406                if (fWhat & CPUMCTX_EXTRN_GS)
    66937407                {
    6694                     rc = HMVMX_IMPORT_SREG(GS, &pCtx->gs);
    6695                     VMXLOCAL_BREAK_RC(rc);
    6696                     if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
    6697                         pCtx->gs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrGS.u;
     7408                    rc |= hmR0VmxImportGuestSegReg(pVCpu, X86_SREG_GS);
     7409                    if (fRealOnV86Active)
     7410                        pCtx->gs.Attr.u = pVmcsInfo->RealMode.AttrGS.u;
    66987411                }
     7412                VMXLOCAL_BREAK_RC(rc);
    66997413            }
    67007414
     
    67027416            {
    67037417                if (fWhat & CPUMCTX_EXTRN_LDTR)
    6704                 {
    6705                     rc = HMVMX_IMPORT_SREG(LDTR, &pCtx->ldtr);
    6706                     VMXLOCAL_BREAK_RC(rc);
    6707                 }
     7418                    rc |= hmR0VmxImportGuestLdtr(pVCpu);
    67087419
    67097420                if (fWhat & CPUMCTX_EXTRN_GDTR)
    67107421                {
    6711                     rc  = VMXReadVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE,  &u64Val);
     7422                    rc |= VMXReadVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE,  &u64Val);
    67127423                    rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);
    6713                     VMXLOCAL_BREAK_RC(rc);
    67147424                    pCtx->gdtr.pGdt  = u64Val;
    67157425                    pCtx->gdtr.cbGdt = u32Val;
     
    67197429                if (fWhat & CPUMCTX_EXTRN_IDTR)
    67207430                {
    6721                     rc  = VMXReadVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE,  &u64Val);
     7431                    rc |= VMXReadVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE,  &u64Val);
    67227432                    rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);
    6723                     VMXLOCAL_BREAK_RC(rc);
    67247433                    pCtx->idtr.pIdt  = u64Val;
    67257434                    pCtx->idtr.cbIdt = u32Val;
     
    67297438                if (fWhat & CPUMCTX_EXTRN_TR)
    67307439                {
    6731                     /* Real-mode emulation using virtual-8086 mode has the fake TSS (pRealModeTSS) in TR, don't save that one. */
    6732                     if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
    6733                     {
    6734                         rc = HMVMX_IMPORT_SREG(TR, &pCtx->tr);
    6735                         VMXLOCAL_BREAK_RC(rc);
    6736                     }
     7440                    /* Real-mode emulation using virtual-8086 mode has the fake TSS (pRealModeTSS) in TR,
     7441                       don't need to import that one. */
     7442                    if (!pVmcsInfo->RealMode.fRealOnV86Active)
     7443                        rc |= hmR0VmxImportGuestTr(pVCpu);
     7444                }
     7445                VMXLOCAL_BREAK_RC(rc);
     7446            }
     7447
     7448            if (fWhat & CPUMCTX_EXTRN_DR7)
     7449            {
     7450                if (!pVCpu->hm.s.fUsingHyperDR7)
     7451                {
     7452                    /* Upper 32-bits are always zero. See Intel spec. 2.7.3 "Loading and Storing Debug Registers". */
     7453                    rc = VMXReadVmcs32(VMX_VMCS_GUEST_DR7, &u32Val);
     7454                    VMXLOCAL_BREAK_RC(rc);
     7455                    pCtx->dr[7] = u32Val;
    67377456                }
    67387457            }
     
    67737492                )
    67747493            {
    6775                 PCVMXAUTOMSR   pMsr  = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
    6776                 uint32_t const cMsrs = pVCpu->hm.s.vmx.cMsrs;
     7494                PCVMXAUTOMSR   pMsr  = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
     7495                uint32_t const cMsrs = pVmcsInfo->cExitMsrStore;
     7496                Assert(cMsrs == 0 || pMsr != NULL);
    67777497                for (uint32_t i = 0; i < cMsrs; i++, pMsr++)
    67787498                {
     
    67807500                    {
    67817501#if HC_ARCH_BITS == 32
    6782                         case MSR_K8_LSTAR:          pCtx->msrLSTAR        = pMsr->u64Value;         break;
    6783                         case MSR_K6_STAR:           pCtx->msrSTAR         = pMsr->u64Value;         break;
    6784                         case MSR_K8_SF_MASK:        pCtx->msrSFMASK       = pMsr->u64Value;         break;
    6785                         case MSR_K8_KERNEL_GS_BASE: pCtx->msrKERNELGSBASE = pMsr->u64Value;         break;
     7502                        case MSR_K8_LSTAR:          pCtx->msrLSTAR        = pMsr->u64Value;          break;
     7503                        case MSR_K6_STAR:           pCtx->msrSTAR         = pMsr->u64Value;          break;
     7504                        case MSR_K8_SF_MASK:        pCtx->msrSFMASK       = pMsr->u64Value;          break;
     7505                        case MSR_K8_KERNEL_GS_BASE: pCtx->msrKERNELGSBASE = pMsr->u64Value;          break;
    67867506#endif
    6787                         case MSR_IA32_SPEC_CTRL:    CPUMSetGuestSpecCtrl(pVCpu, pMsr->u64Value);    break;
    6788                         case MSR_K8_TSC_AUX:        CPUMSetGuestTscAux(pVCpu, pMsr->u64Value);      break;
    6789                         case MSR_K6_EFER: /* EFER can't be changed without causing a VM-exit */     break;
     7507                        case MSR_IA32_SPEC_CTRL:    CPUMSetGuestSpecCtrl(pVCpu, pMsr->u64Value);     break;
     7508                        case MSR_K8_TSC_AUX:        CPUMSetGuestTscAux(pVCpu, pMsr->u64Value);       break;
     7509                        case MSR_K6_EFER:           /* Can't be changed without causing a VM-exit */ break;
     7510
    67907511                        default:
    67917512                        {
    67927513                            pVCpu->hm.s.u32HMError = pMsr->u32Msr;
    67937514                            ASMSetFlags(fEFlags);
    6794                             AssertMsgFailed(("Unexpected MSR in auto-load/store area. uMsr=%#RX32 cMsrs=%u\n", pMsr->u32Msr,
     7515                            AssertMsgFailed(("Unexpected MSR in auto-load/store area. idMsr=%#RX32 cMsrs=%u\n", pMsr->u32Msr,
    67957516                                             cMsrs));
    67967517                            return VERR_HM_UNEXPECTED_LD_ST_MSR;
     
    68007521            }
    68017522
    6802             if (fWhat & CPUMCTX_EXTRN_DR7)
    6803             {
    6804                 if (!pVCpu->hm.s.fUsingHyperDR7)
    6805                 {
    6806                     /* Upper 32-bits are always zero. See Intel spec. 2.7.3 "Loading and Storing Debug Registers". */
    6807                     rc = VMXReadVmcs32(VMX_VMCS_GUEST_DR7, &u32Val);
    6808                     VMXLOCAL_BREAK_RC(rc);
    6809                     pCtx->dr[7] = u32Val;
    6810                 }
    6811             }
    6812 
    68137523            if (fWhat & CPUMCTX_EXTRN_CR_MASK)
    68147524            {
    6815                 uint32_t u32Shadow;
     7525                uint64_t u64Shadow;
    68167526                if (fWhat & CPUMCTX_EXTRN_CR0)
    68177527                {
    6818                     rc  = VMXReadVmcs32(VMX_VMCS_GUEST_CR0,            &u32Val);
    6819                     rc |= VMXReadVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, &u32Shadow);
     7528                    /** @todo r=ramshankar: We only read 32-bits here for legacy/convenience reasons,
     7529                     *        remove when we drop 32-bit host w/ 64-bit host support, see
     7530                     *        @bugref{9180#c39}. */
     7531                    rc  = VMXReadVmcs32(VMX_VMCS_GUEST_CR0,              &u32Val);
     7532                    rc |= VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Shadow);
    68207533                    VMXLOCAL_BREAK_RC(rc);
    6821                     u32Val = (u32Val & ~pVCpu->hm.s.vmx.Ctls.u32Cr0Mask)
    6822                            | (u32Shadow & pVCpu->hm.s.vmx.Ctls.u32Cr0Mask);
    6823                     VMMRZCallRing3Disable(pVCpu);   /* Calls into PGM which has Log statements. */
    6824                     CPUMSetGuestCR0(pVCpu, u32Val);
     7534                    u64Val = u32Val;
     7535                    u64Val = (u64Val    & ~pVmcsInfo->u64Cr0Mask)
     7536                           | (u64Shadow &  pVmcsInfo->u64Cr0Mask);
     7537                    VMMRZCallRing3Disable(pVCpu);   /* May call into PGM which has Log statements. */
     7538                    CPUMSetGuestCR0(pVCpu, u64Val);
    68257539                    VMMRZCallRing3Enable(pVCpu);
    68267540                }
     
    68287542                if (fWhat & CPUMCTX_EXTRN_CR4)
    68297543                {
    6830                     rc  = VMXReadVmcs32(VMX_VMCS_GUEST_CR4,            &u32Val);
    6831                     rc |= VMXReadVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, &u32Shadow);
     7544                    /** @todo r=ramshankar: We only read 32-bits here for legacy/convenience reasons,
     7545                     *        remove when we drop 32-bit host w/ 64-bit host support, see
     7546                     *        @bugref{9180#c39}. */
     7547                    rc  = VMXReadVmcs32(VMX_VMCS_GUEST_CR4,              &u32Val);
     7548                    rc |= VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Shadow);
    68327549                    VMXLOCAL_BREAK_RC(rc);
    6833                     u32Val = (u32Val & ~pVCpu->hm.s.vmx.Ctls.u32Cr4Mask)
    6834                            | (u32Shadow & pVCpu->hm.s.vmx.Ctls.u32Cr4Mask);
    6835                     CPUMSetGuestCR4(pVCpu, u32Val);
     7550                    u64Val = u32Val;
     7551                    u64Val = (u64Val    & ~pVmcsInfo->u64Cr4Mask)
     7552                           | (u64Shadow &  pVmcsInfo->u64Cr4Mask);
     7553                    pCtx->cr4 = u64Val;
    68367554                }
    68377555
     
    68467564                        if (pCtx->cr3 != u64Val)
    68477565                        {
    6848                             CPUMSetGuestCR3(pVCpu, u64Val);
     7566                            pCtx->cr3 = u64Val;
    68497567                            VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
    68507568                        }
    68517569
    68527570                        /* If the guest is in PAE mode, sync back the PDPE's into the guest state.
    6853                            Note: CR4.PAE, CR0.PG, EFER bit changes are always intercepted, so they're up to date. */
     7571                           Note: CR4.PAE, CR0.PG, EFER MSR changes are always intercepted, so they're up to date. */
    68547572                        if (CPUMIsGuestInPAEModeEx(pCtx))
    68557573                        {
     
    68847602    ASMSetFlags(fEFlags);
    68857603
    6886     STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatImportGuestState, x);
     7604    STAM_PROFILE_ADV_STOP(& pVCpu->hm.s.StatImportGuestState, x);
     7605
     7606    if (RT_SUCCESS(rc))
     7607    { /* likely */ }
     7608    else
     7609        return rc;
    68877610
    68887611    /*
     
    69297652VMMR0DECL(int) VMXR0ImportStateOnDemand(PVMCPU pVCpu, uint64_t fWhat)
    69307653{
    6931     return hmR0VmxImportGuestState(pVCpu, fWhat);
     7654    PCVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
     7655    return hmR0VmxImportGuestState(pVCpu, pVmcsInfo, fWhat);
    69327656}
    69337657
     
    69507674 *
    69517675 * @param   pVCpu           The cross context virtual CPU structure.
    6952  * @param   fStepping       Running in hmR0VmxRunGuestCodeStep().
     7676 * @param   fStepping       Whether we are single-stepping the guest using the
     7677 *                          hypervisor debugger.
    69537678 */
    69547679static VBOXSTRICTRC hmR0VmxCheckForceFlags(PVMCPU pVCpu, bool fStepping)
     
    70487773    if (enmTrpmEvent == TRPM_TRAP)
    70497774    {
     7775        /** @todo r=ramshankar: TRPM currently offers no way to determine a #DB that was
     7776         *        generated using INT1 (ICEBP). */
    70507777        switch (uVector)
    70517778        {
     
    70767803        u32IntInfo |= (VMX_EXIT_INT_INFO_TYPE_EXT_INT << VMX_EXIT_INT_INFO_TYPE_SHIFT);
    70777804    else if (enmTrpmEvent == TRPM_SOFTWARE_INT)
    7078         u32IntInfo |= (VMX_EXIT_INT_INFO_TYPE_SW_INT << VMX_EXIT_INT_INFO_TYPE_SHIFT);
     7805    {
     7806        switch (uVector)
     7807        {
     7808            case X86_XCPT_BP:
     7809            case X86_XCPT_OF:
     7810                u32IntInfo |= (VMX_EXIT_INT_INFO_TYPE_SW_XCPT << VMX_EXIT_INT_INFO_TYPE_SHIFT);
     7811                break;
     7812
     7813            default:
     7814                Assert(uVector == X86_XCPT_DB);
     7815                u32IntInfo |= (VMX_EXIT_INT_INFO_TYPE_SW_INT << VMX_EXIT_INT_INFO_TYPE_SHIFT);
     7816                break;
     7817        }
     7818    }
    70797819    else
    70807820        AssertMsgFailed(("Invalid TRPM event type %d\n", enmTrpmEvent));
     
    70927832 * Converts the pending HM event into a TRPM trap.
    70937833 *
    7094  * @param   pVCpu           The cross context virtual CPU structure.
     7834 * @param   pVCpu   The cross context virtual CPU structure.
    70957835 */
    70967836static void hmR0VmxPendingEventToTrpmTrap(PVMCPU pVCpu)
     
    71067846    Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
    71077847
     7848    /** @todo Use HMVmxEventToTrpmEventType() later. */
    71087849    TRPMEVENT enmTrapType;
    71097850    switch (uVectorType)
     
    71137854           break;
    71147855
     7856        case VMX_IDT_VECTORING_INFO_TYPE_NMI:
     7857        case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
     7858            enmTrapType = TRPM_TRAP;
     7859            break;
     7860
     7861        case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:  /* #DB (INT1/ICEBP). */
     7862            Assert(uVector == X86_XCPT_DB);
     7863            enmTrapType = TRPM_SOFTWARE_INT;
     7864            break;
     7865
     7866        case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:       /* #BP (INT3) and #OF (INTO) */
     7867            Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
     7868            enmTrapType = TRPM_SOFTWARE_INT;
     7869            break;
     7870
    71157871        case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
    71167872            enmTrapType = TRPM_SOFTWARE_INT;
    7117             break;
    7118 
    7119         case VMX_IDT_VECTORING_INFO_TYPE_NMI:
    7120         case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
    7121         case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:      /* #BP and #OF */
    7122         case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
    7123             enmTrapType = TRPM_TRAP;
    71247873            break;
    71257874
     
    71407889    if (   uVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
    71417890        && uVector == X86_XCPT_PF)
    7142     {
    71437891        TRPMSetFaultAddress(pVCpu, pVCpu->hm.s.Event.GCPtrFaultAddress);
    7144     }
    7145     else if (   uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
    7146              || uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
    7147              || uVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
    7148     {
    7149         AssertMsg(   uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
    7150                   || (uVector == X86_XCPT_BP || uVector == X86_XCPT_OF),
    7151                   ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
     7892    else if (enmTrapType == TRPM_SOFTWARE_INT)
    71527893        TRPMSetInstrLength(pVCpu, pVCpu->hm.s.Event.cbInstr);
    7153     }
    7154 
    7155     /* Clear the events from the VMCS. */
    7156     VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, 0);
    71577894
    71587895    /* We're now done converting the pending event. */
    71597896    pVCpu->hm.s.Event.fPending = false;
     7897}
     7898
     7899
     7900/**
     7901 * Sets the interrupt-window exiting control in the VMCS which instructs VT-x to
     7902 * cause a VM-exit as soon as the guest is in a state to receive interrupts.
     7903 *
     7904 * @param   pVCpu       The cross context virtual CPU structure.
     7905 * @param   pVmcsInfo   The VMCS info. object.
     7906 */
     7907static void hmR0VmxSetIntWindowExitVmcs(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo)
     7908{
     7909    if (pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_INT_WINDOW_EXIT)
     7910    {
     7911        if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT))
     7912        {
     7913            pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_INT_WINDOW_EXIT;
     7914            int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
     7915            AssertRC(rc);
     7916        }
     7917    } /* else we will deliver interrupts whenever the guest Vm-exits next and is in a state to receive the interrupt. */
     7918}
     7919
     7920
     7921/**
     7922 * Clears the interrupt-window exiting control in the VMCS.
     7923 *
     7924 * @param   pVmcsInfo   The VMCS info. object.
     7925 */
     7926DECLINLINE(int) hmR0VmxClearIntWindowExitVmcs(PVMXVMCSINFO pVmcsInfo)
     7927{
     7928    if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT)
     7929    {
     7930        pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_INT_WINDOW_EXIT;
     7931        return VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
     7932    }
     7933    return VINF_SUCCESS;
     7934}
     7935
     7936
     7937/**
     7938 * Sets the NMI-window exiting control in the VMCS which instructs VT-x to
     7939 * cause a VM-exit as soon as the guest is in a state to receive NMIs.
     7940 *
     7941 * @param   pVCpu       The cross context virtual CPU structure.
     7942 * @param   pVmcsInfo   The VMCS info. object.
     7943 */
     7944static void hmR0VmxSetNmiWindowExitVmcs(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo)
     7945{
     7946    if (pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
     7947    {
     7948        if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))
     7949        {
     7950            pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_NMI_WINDOW_EXIT;
     7951            int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
     7952            AssertRC(rc);
     7953            Log4Func(("Setup NMI-window exiting\n"));
     7954        }
     7955    } /* else we will deliver NMIs whenever we VM-exit next, even possibly nesting NMIs. Can't be helped on ancient CPUs. */
     7956}
     7957
     7958
     7959/**
     7960 * Clears the NMI-window exiting control in the VMCS.
     7961 *
     7962 * @param   pVmcsInfo   The VMCS info. object.
     7963 */
     7964DECLINLINE(int) hmR0VmxClearNmiWindowExitVmcs(PVMXVMCSINFO pVmcsInfo)
     7965{
     7966    if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
     7967    {
     7968        pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_NMI_WINDOW_EXIT;
     7969        return VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
     7970    }
     7971    return VINF_SUCCESS;
    71607972}
    71617973
     
    71867998
    71877999    /* Save the guest state if necessary. */
     8000    PVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
    71888001    if (fImportState)
    71898002    {
    7190         int rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
     8003        int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
    71918004        AssertRCReturn(rc, rc);
    71928005    }
     
    71998012#ifdef VBOX_STRICT
    72008013    if (CPUMIsHyperDebugStateActive(pVCpu))
    7201         Assert(pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_MOV_DR_EXIT);
     8014        Assert(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_MOV_DR_EXIT);
    72028015#endif
    72038016    CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, true /* save DR6 */);
     
    72228035        if (!fImportState)
    72238036        {
    7224             int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_KERNEL_GS_BASE | CPUMCTX_EXTRN_SYSCALL_MSRS);
     8037            int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_KERNEL_GS_BASE | CPUMCTX_EXTRN_SYSCALL_MSRS);
    72258038            AssertRCReturn(rc, rc);
    72268039        }
     
    72328045
    72338046    /* Update auto-load/store host MSRs values when we re-enter VT-x (as we could be on a different CPU). */
    7234     pVCpu->hm.s.vmx.fUpdatedHostMsrs = false;
     8047    pVCpu->hm.s.vmx.fUpdatedHostAutoMsrs = false;
    72358048
    72368049    STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatEntry);
     
    72518064     *  context.
    72528065     */
    7253     if (pVCpu->hm.s.vmx.fVmcsState & HMVMX_VMCS_STATE_ACTIVE)
    7254     {
    7255         int rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
    7256         AssertRCReturn(rc, rc);
    7257 
    7258         pVCpu->hm.s.vmx.fVmcsState = HMVMX_VMCS_STATE_CLEAR;
    7259         Log4Func(("Cleared Vmcs. HostCpuId=%u\n", idCpu));
    7260     }
    7261     Assert(!(pVCpu->hm.s.vmx.fVmcsState & HMVMX_VMCS_STATE_LAUNCHED));
     8066    int rc = hmR0VmxClearVmcs(pVmcsInfo);
     8067    AssertRCReturn(rc, rc);
     8068
     8069    Log4Func(("Cleared Vmcs. HostCpuId=%u\n", idCpu));
    72628070    NOREF(idCpu);
    7263 
    72648071    return VINF_SUCCESS;
    72658072}
     
    73428149    HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
    73438150
     8151    PVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
    73448152    if (RT_UNLIKELY(rcExit == VERR_VMX_INVALID_VMCS_PTR))
    73458153    {
    7346         VMXGetActivatedVmcs(&pVCpu->hm.s.vmx.LastError.u64VmcsPhys);
    7347         pVCpu->hm.s.vmx.LastError.u32VmcsRev   = *(uint32_t *)pVCpu->hm.s.vmx.pvVmcs;
     8154        VMXGetCurrentVmcs(&pVCpu->hm.s.vmx.LastError.HCPhysCurrentVmcs);
     8155        pVCpu->hm.s.vmx.LastError.u32VmcsRev   = *(uint32_t *)pVmcsInfo->pvVmcs;
    73488156        pVCpu->hm.s.vmx.LastError.idEnteredCpu = pVCpu->hm.s.idEnteredCpu;
    73498157        /* LastError.idCurrentCpu was updated in hmR0VmxPreRunGuestCommitted(). */
     
    73548162    Log4Func(("rcExit=%d\n", VBOXSTRICTRC_VAL(rcExit)));
    73558163
    7356     /* We need to do this only while truly exiting the "inner loop" back to ring-3 and -not- for any longjmp to ring3. */
     8164    /*
     8165     * Convert any pending HM events back to TRPM due to premature exits to ring-3.
     8166     * We need to do this only on returns to ring-3 and not for longjmps to ring3.
     8167     *
     8168     * This is because execution may continue from ring-3 and we would need to inject
     8169     * the event from there (hence place it back in TRPM).
     8170     */
    73578171    if (pVCpu->hm.s.Event.fPending)
    73588172    {
    73598173        hmR0VmxPendingEventToTrpmTrap(pVCpu);
    73608174        Assert(!pVCpu->hm.s.Event.fPending);
    7361     }
    7362 
    7363     /* Clear interrupt-window and NMI-window controls as we re-evaluate it when we return from ring-3. */
    7364     hmR0VmxClearIntNmiWindowsVmcs(pVCpu);
     8175
     8176        /* Clear the events from the VMCS. */
     8177        int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, 0);
     8178        AssertRCReturn(rc, rc);
     8179    }
     8180#ifdef VBOX_STRICT
     8181    else
     8182    {
     8183        /*
     8184         * Ensure we don't accidentally clear a pending HM event without clearing the VMCS.
     8185         * This can be pretty hard to debug otherwise, interrupts might get injected twice
     8186         * occasionally, see @bugref{9180#c42}.
     8187         */
     8188        uint32_t uEntryIntInfo;
     8189        int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &uEntryIntInfo);
     8190        AssertRC(rc);
     8191        Assert(!VMX_ENTRY_INT_INFO_IS_VALID(uEntryIntInfo));
     8192    }
     8193#endif
     8194
     8195    /*
     8196     * Clear the interrupt-window and NMI-window VMCS controls as we could have got
     8197     * a VM-exit with higher priority than interrupt-window or NMI-window VM-exits
     8198     * (e.g. TPR below threshold).
     8199     */
     8200    int rc = hmR0VmxClearIntWindowExitVmcs(pVmcsInfo);
     8201    rc    |= hmR0VmxClearNmiWindowExitVmcs(pVmcsInfo);
     8202    AssertRCReturn(rc, rc);
    73658203
    73668204    /* If we're emulating an instruction, we shouldn't have any TRPM traps pending
     
    73728210
    73738211    /* Save guest state and restore host state bits. */
    7374     int rc = hmR0VmxLeaveSession(pVCpu);
     8212    rc = hmR0VmxLeaveSession(pVCpu);
    73758213    AssertRCReturn(rc, rc);
    73768214    STAM_COUNTER_DEC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
     8215
    73778216    /* Thread-context hooks are unregistered at this point!!! */
    73788217
     
    73878226    if (   pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging
    73888227        && CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx))
    7389     {
    73908228        CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_GLOBAL_TLB_FLUSH);
    7391     }
    73928229
    73938230    Assert(!pVCpu->hm.s.fClearTrapFlag);
     
    73978234
    73988235    /* On our way back from ring-3 reload the guest state if there is a possibility of it being changed. */
    7399     if (rcExit != VINF_EM_RAW_INTERRUPT)
     8236    if (   rcExit != VINF_EM_RAW_INTERRUPT
     8237        || CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
     8238    {
     8239        Assert(!(pVCpu->cpum.GstCtx.fExtrn & HMVMX_CPUMCTX_EXTRN_ALL));
    74008240        ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
     8241    }
    74018242
    74028243    STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchExitToR3);
     
    74348275        RTThreadPreemptDisable(&PreemptState);
    74358276
    7436         hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
     8277        PVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
     8278        hmR0VmxImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
    74378279        CPUMR0FpuStateMaybeSaveGuestAndRestoreHost(pVCpu);
    74388280        CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, true /* save DR6 */);
     
    74408282#if HC_ARCH_BITS == 64
    74418283        /* Restore host-state bits that VT-x only restores partially. */
    7442         if (   (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_REQUIRED)
     8284        if (   (pVCpu->hm.s.vmx.fRestoreHostFlags &  VMX_RESTORE_HOST_REQUIRED)
    74438285            && (pVCpu->hm.s.vmx.fRestoreHostFlags & ~VMX_RESTORE_HOST_REQUIRED))
    74448286            VMXRestoreHostState(pVCpu->hm.s.vmx.fRestoreHostFlags, &pVCpu->hm.s.vmx.RestoreHost);
     
    74518293
    74528294        /* Update auto-load/store host MSRs values when we re-enter VT-x (as we could be on a different CPU). */
    7453         pVCpu->hm.s.vmx.fUpdatedHostMsrs = false;
     8295        pVCpu->hm.s.vmx.fUpdatedHostAutoMsrs = false;
    74548296        VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
    7455         if (pVCpu->hm.s.vmx.fVmcsState & HMVMX_VMCS_STATE_ACTIVE)
    7456         {
    7457             VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
    7458             pVCpu->hm.s.vmx.fVmcsState = HMVMX_VMCS_STATE_CLEAR;
    7459         }
     8297
     8298        /* Clear the current VMCS data back to memory. */
     8299        hmR0VmxClearVmcs(pVmcsInfo);
    74608300
    74618301        /** @todo eliminate the need for calling VMMR0ThreadCtxHookDisable here!  */
     
    74818321    VMMRZCallRing3Enable(pVCpu);
    74828322    return VINF_SUCCESS;
    7483 }
    7484 
    7485 
    7486 /**
    7487  * Sets the interrupt-window exiting control in the VMCS which instructs VT-x to
    7488  * cause a VM-exit as soon as the guest is in a state to receive interrupts.
    7489  *
    7490  * @param   pVCpu       The cross context virtual CPU structure.
    7491  */
    7492 DECLINLINE(void) hmR0VmxSetIntWindowExitVmcs(PVMCPU pVCpu)
    7493 {
    7494     if (RT_LIKELY(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_INT_WINDOW_EXIT))
    7495     {
    7496         if (!(pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT))
    7497         {
    7498             pVCpu->hm.s.vmx.Ctls.u32ProcCtls |= VMX_PROC_CTLS_INT_WINDOW_EXIT;
    7499             int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.Ctls.u32ProcCtls);
    7500             AssertRC(rc);
    7501             Log4Func(("Setup interrupt-window exiting\n"));
    7502         }
    7503     } /* else we will deliver interrupts whenever the guest exits next and is in a state to receive events. */
    7504 }
    7505 
    7506 
    7507 /**
    7508  * Clears the interrupt-window exiting control in the VMCS.
    7509  *
    7510  * @param   pVCpu           The cross context virtual CPU structure.
    7511  */
    7512 DECLINLINE(void) hmR0VmxClearIntWindowExitVmcs(PVMCPU pVCpu)
    7513 {
    7514     Assert(pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT);
    7515     pVCpu->hm.s.vmx.Ctls.u32ProcCtls &= ~VMX_PROC_CTLS_INT_WINDOW_EXIT;
    7516     int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.Ctls.u32ProcCtls);
    7517     AssertRC(rc);
    7518     Log4Func(("Cleared interrupt-window exiting\n"));
    7519 }
    7520 
    7521 
    7522 /**
    7523  * Sets the NMI-window exiting control in the VMCS which instructs VT-x to
    7524  * cause a VM-exit as soon as the guest is in a state to receive NMIs.
    7525  *
    7526  * @param   pVCpu       The cross context virtual CPU structure.
    7527  */
    7528 DECLINLINE(void) hmR0VmxSetNmiWindowExitVmcs(PVMCPU pVCpu)
    7529 {
    7530     if (RT_LIKELY(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_NMI_WINDOW_EXIT))
    7531     {
    7532         if (!(pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))
    7533         {
    7534             pVCpu->hm.s.vmx.Ctls.u32ProcCtls |= VMX_PROC_CTLS_NMI_WINDOW_EXIT;
    7535             int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.Ctls.u32ProcCtls);
    7536             AssertRC(rc);
    7537             Log4Func(("Setup NMI-window exiting\n"));
    7538         }
    7539     } /* else we will deliver NMIs whenever we VM-exit next, even possibly nesting NMIs. Can't be helped on ancient CPUs. */
    7540 }
    7541 
    7542 
    7543 /**
    7544  * Clears the NMI-window exiting control in the VMCS.
    7545  *
    7546  * @param   pVCpu           The cross context virtual CPU structure.
    7547  */
    7548 DECLINLINE(void) hmR0VmxClearNmiWindowExitVmcs(PVMCPU pVCpu)
    7549 {
    7550     Assert(pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT);
    7551     pVCpu->hm.s.vmx.Ctls.u32ProcCtls &= ~VMX_PROC_CTLS_NMI_WINDOW_EXIT;
    7552     int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.Ctls.u32ProcCtls);
    7553     AssertRC(rc);
    7554     Log4Func(("Cleared NMI-window exiting\n"));
    7555 }
    7556 
    7557 
    7558 /**
    7559  * Evaluates the event to be delivered to the guest and sets it as the pending
    7560  * event.
    7561  *
    7562  * @returns The VT-x guest-interruptibility state.
    7563  * @param   pVCpu           The cross context virtual CPU structure.
    7564  */
    7565 static uint32_t hmR0VmxEvaluatePendingEvent(PVMCPU pVCpu)
    7566 {
    7567     /* Get the current interruptibility-state of the guest and then figure out what can be injected. */
    7568     PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    7569     uint32_t const fIntrState = hmR0VmxGetGuestIntrState(pVCpu);
    7570     bool const fBlockMovSS    = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
    7571     bool const fBlockSti      = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI);
    7572     bool const fBlockNmi      = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI);
    7573 
    7574     Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pCtx->fExtrn) & CPUMCTX_EXTRN_RFLAGS));
    7575     Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI));    /* We don't support block-by-SMI yet.*/
    7576     Assert(!fBlockSti || pCtx->eflags.Bits.u1IF);                  /* Cannot set block-by-STI when interrupts are disabled. */
    7577     Assert(!TRPMHasTrap(pVCpu));
    7578 
    7579     /*
    7580      * Toggling of interrupt force-flags here is safe since we update TRPM on premature exits
    7581      * to ring-3 before executing guest code, see hmR0VmxExitToRing3(). We must NOT restore these force-flags.
    7582      */
    7583                                                                /** @todo SMI. SMIs take priority over NMIs. */
    7584     if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))        /* NMI. NMIs take priority over regular interrupts. */
    7585     {
    7586         /* On some CPUs block-by-STI also blocks NMIs. See Intel spec. 26.3.1.5 "Checks On Guest Non-Register State". */
    7587         if (   !pVCpu->hm.s.Event.fPending
    7588             && !fBlockNmi
    7589             && !fBlockSti
    7590             && !fBlockMovSS)
    7591         {
    7592             hmR0VmxSetPendingXcptNmi(pVCpu);
    7593             VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
    7594             Log4Func(("Pending NMI\n"));
    7595         }
    7596         else
    7597             hmR0VmxSetNmiWindowExitVmcs(pVCpu);
    7598     }
    7599     /*
    7600      * Check if the guest can receive external interrupts (PIC/APIC). Once PDMGetInterrupt() returns
    7601      * a valid interrupt we must- deliver the interrupt. We can no longer re-request it from the APIC.
    7602      */
    7603     else if (   VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
    7604              && !pVCpu->hm.s.fSingleInstruction)
    7605     {
    7606         Assert(!DBGFIsStepping(pVCpu));
    7607         int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_RFLAGS);
    7608         AssertRCReturn(rc, 0);
    7609         bool const fBlockInt = !(pCtx->eflags.u32 & X86_EFL_IF);
    7610         if (   !pVCpu->hm.s.Event.fPending
    7611             && !fBlockInt
    7612             && !fBlockSti
    7613             && !fBlockMovSS)
    7614         {
    7615             uint8_t u8Interrupt;
    7616             rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
    7617             if (RT_SUCCESS(rc))
    7618             {
    7619                 hmR0VmxSetPendingExtInt(pVCpu, u8Interrupt);
    7620                 Log4Func(("Pending external interrupt vector %#x\n", u8Interrupt));
    7621             }
    7622             else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
    7623             {
    7624                 if (pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
    7625                     hmR0VmxApicSetTprThreshold(pVCpu, u8Interrupt >> 4);
    7626                 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchTprMaskedIrq);
    7627 
    7628                 /*
    7629                  * If the CPU doesn't have TPR shadowing, we will always get a VM-exit on TPR changes and
    7630                  * APICSetTpr() will end up setting the VMCPU_FF_INTERRUPT_APIC if required, so there is no
    7631                  * need to re-set this force-flag here.
    7632                  */
    7633             }
    7634             else
    7635                 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq);
    7636         }
    7637         else
    7638             hmR0VmxSetIntWindowExitVmcs(pVCpu);
    7639     }
    7640 
    7641     return fIntrState;
    7642 }
    7643 
    7644 
    7645 /**
    7646  * Injects any pending events into the guest if the guest is in a state to
    7647  * receive them.
    7648  *
    7649  * @returns Strict VBox status code (i.e. informational status codes too).
    7650  * @param   pVCpu           The cross context virtual CPU structure.
    7651  * @param   fIntrState      The VT-x guest-interruptibility state.
    7652  * @param   fStepping       Running in hmR0VmxRunGuestCodeStep() and we should
    7653  *                          return VINF_EM_DBG_STEPPED if the event was
    7654  *                          dispatched directly.
    7655  */
    7656 static VBOXSTRICTRC hmR0VmxInjectPendingEvent(PVMCPU pVCpu, uint32_t fIntrState, bool fStepping)
    7657 {
    7658     HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
    7659     Assert(VMMRZCallRing3IsEnabled(pVCpu));
    7660 
    7661     bool const fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
    7662     bool const fBlockSti   = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI);
    7663 
    7664     Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_RFLAGS));
    7665     Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI));    /* We don't support block-by-SMI yet.*/
    7666     Assert(!fBlockSti || pVCpu->cpum.GstCtx.eflags.Bits.u1IF);     /* Cannot set block-by-STI when interrupts are disabled. */
    7667     Assert(!TRPMHasTrap(pVCpu));
    7668 
    7669     VBOXSTRICTRC rcStrict = VINF_SUCCESS;
    7670     if (pVCpu->hm.s.Event.fPending)
    7671     {
    7672         /*
    7673          * Do -not- clear any interrupt-window exiting control here. We might have an interrupt
    7674          * pending even while injecting an event and in this case, we want a VM-exit as soon as
    7675          * the guest is ready for the next interrupt, see @bugref{6208#c45}.
    7676          *
    7677          * See Intel spec. 26.6.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
    7678          */
    7679         uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(pVCpu->hm.s.Event.u64IntInfo);
    7680 #ifdef VBOX_STRICT
    7681         if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
    7682         {
    7683             bool const fBlockInt = !(pVCpu->cpum.GstCtx.eflags.u32 & X86_EFL_IF);
    7684             Assert(!fBlockInt);
    7685             Assert(!fBlockSti);
    7686             Assert(!fBlockMovSS);
    7687         }
    7688         else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI)
    7689         {
    7690             bool const fBlockNmi = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI);
    7691             Assert(!fBlockSti);
    7692             Assert(!fBlockMovSS);
    7693             Assert(!fBlockNmi);
    7694         }
    7695 #endif
    7696         Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#RX32\n", pVCpu->idCpu, pVCpu->hm.s.Event.u64IntInfo,
    7697               uIntType));
    7698 
    7699         /*
    7700          * Inject the event and get any changes to the guest-interruptibility state.
    7701          *
    7702          * The guest-interruptibility state may need to be updated if we inject the event
    7703          * into the guest IDT ourselves (for real-on-v86 guest injecting software interrupts).
    7704          */
    7705         rcStrict = hmR0VmxInjectEventVmcs(pVCpu, pVCpu->hm.s.Event.u64IntInfo, pVCpu->hm.s.Event.cbInstr,
    7706                                           pVCpu->hm.s.Event.u32ErrCode, pVCpu->hm.s.Event.GCPtrFaultAddress, fStepping,
    7707                                           &fIntrState);
    7708         AssertRCReturn(VBOXSTRICTRC_VAL(rcStrict), rcStrict);
    7709 
    7710         if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
    7711             STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectInterrupt);
    7712         else
    7713             STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectXcpt);
    7714     }
    7715 
    7716     /*
    7717      * Update the guest-interruptibility state.
    7718      *
    7719      * This is required for the real-on-v86 software interrupt injection case above, as well as
    7720      * updates to the guest state from ring-3 or IEM/REM.
    7721      */
    7722     int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_INT_STATE, fIntrState);
    7723     AssertRCReturn(rc, rc);
    7724 
    7725     /*
    7726      * There's no need to clear the VM-entry interruption-information field here if we're not
    7727      * injecting anything. VT-x clears the valid bit on every VM-exit.
    7728      *
    7729      * See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
    7730      */
    7731 
    7732     Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping));
    7733     NOREF(fBlockMovSS); NOREF(fBlockSti);
    7734     return rcStrict;
    7735 }
    7736 
    7737 
    7738 /**
    7739  * Injects a double-fault (\#DF) exception into the VM.
    7740  *
    7741  * @returns Strict VBox status code (i.e. informational status codes too).
    7742  * @param   pVCpu           The cross context virtual CPU structure.
    7743  * @param   fStepping       Whether we're running in hmR0VmxRunGuestCodeStep()
    7744  *                          and should return VINF_EM_DBG_STEPPED if the event
    7745  *                          is injected directly (register modified by us, not
    7746  *                          by hardware on VM-entry).
    7747  * @param   pfIntrState     Pointer to the current guest interruptibility-state.
    7748  *                          This interruptibility-state will be updated if
    7749  *                          necessary. This cannot not be NULL.
    7750  */
    7751 DECLINLINE(VBOXSTRICTRC) hmR0VmxInjectXcptDF(PVMCPU pVCpu, bool fStepping, uint32_t *pfIntrState)
    7752 {
    7753     uint32_t const u32IntInfo = X86_XCPT_DF | VMX_EXIT_INT_INFO_VALID
    7754                               | (VMX_EXIT_INT_INFO_TYPE_HW_XCPT << VMX_EXIT_INT_INFO_TYPE_SHIFT)
    7755                               | VMX_EXIT_INT_INFO_ERROR_CODE_VALID;
    7756     return hmR0VmxInjectEventVmcs(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */, fStepping,
    7757                                   pfIntrState);
    7758 }
    7759 
    7760 
    7761 /**
    7762  * Injects a general-protection (\#GP) fault into the VM.
    7763  *
    7764  * @returns Strict VBox status code (i.e. informational status codes too).
    7765  * @param   pVCpu               The cross context virtual CPU structure.
    7766  * @param   fErrorCodeValid     Whether the error code is valid (depends on the CPU
    7767  *                              mode, i.e. in real-mode it's not valid).
    7768  * @param   u32ErrorCode        The error code associated with the \#GP.
    7769  * @param   fStepping           Whether we're running in
    7770  *                              hmR0VmxRunGuestCodeStep() and should return
    7771  *                              VINF_EM_DBG_STEPPED if the event is injected
    7772  *                              directly (register modified by us, not by
    7773  *                              hardware on VM-entry).
    7774  * @param   pfIntrState         Pointer to the current guest interruptibility-state.
    7775  *                              This interruptibility-state will be updated if
    7776  *                              necessary. This cannot not be NULL.
    7777  */
    7778 DECLINLINE(VBOXSTRICTRC) hmR0VmxInjectXcptGP(PVMCPU pVCpu, bool fErrorCodeValid, uint32_t u32ErrorCode, bool fStepping,
    7779                                              uint32_t *pfIntrState)
    7780 {
    7781     uint32_t const u32IntInfo = X86_XCPT_GP | VMX_EXIT_INT_INFO_VALID
    7782                               | (VMX_EXIT_INT_INFO_TYPE_HW_XCPT << VMX_EXIT_INT_INFO_TYPE_SHIFT)
    7783                               | (fErrorCodeValid ? VMX_EXIT_INT_INFO_ERROR_CODE_VALID : 0);
    7784     return hmR0VmxInjectEventVmcs(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrorCode, 0 /* GCPtrFaultAddress */, fStepping,
    7785                                   pfIntrState);
    77868323}
    77878324
     
    78228359 *
    78238360 * @param   pVCpu               The cross context virtual CPU structure.
    7824  * @param   u64IntInfo          The VM-entry interruption-information field.
    7825  * @param   cbInstr             The VM-entry instruction length in bytes (for
    7826  *                              software interrupts, exceptions and privileged
    7827  *                              software exceptions).
    7828  * @param   u32ErrCode          The VM-entry exception error code.
    7829  * @param   GCPtrFaultAddress   The page-fault address for \#PF exceptions.
    7830  * @param   pfIntrState         Pointer to the current guest interruptibility-state.
    7831  *                              This interruptibility-state will be updated if
    7832  *                              necessary. This cannot not be NULL.
    7833  * @param   fStepping           Whether we're running in
    7834  *                              hmR0VmxRunGuestCodeStep() and should return
    7835  *                              VINF_EM_DBG_STEPPED if the event is injected
    7836  *                              directly (register modified by us, not by
     8361 * @param   pVmxTransient       The VMX-transient structure.
     8362 * @param   pEvent              The event being injected.
     8363 * @param   pfIntrState         Pointer to the VT-x guest-interruptibility-state.
     8364 *                              This will be updated if necessary. This cannot not
     8365 *                              be NULL.
     8366 * @param   fStepping           Whether we're single-stepping guest execution and
     8367 *                              should return VINF_EM_DBG_STEPPED if the event is
     8368 *                              injected directly (registers modified by us, not by
    78378369 *                              hardware on VM-entry).
    78388370 */
    7839 static VBOXSTRICTRC hmR0VmxInjectEventVmcs(PVMCPU pVCpu, uint64_t u64IntInfo, uint32_t cbInstr, uint32_t u32ErrCode,
    7840                                            RTGCUINTREG GCPtrFaultAddress, bool fStepping, uint32_t *pfIntrState)
     8371static VBOXSTRICTRC hmR0VmxInjectEventVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, PCHMEVENT pEvent, bool fStepping,
     8372                                           uint32_t *pfIntrState)
    78418373{
    78428374    /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */
    7843     AssertMsg(!RT_HI_U32(u64IntInfo), ("%#RX64\n", u64IntInfo));
     8375    AssertMsg(!RT_HI_U32(pEvent->u64IntInfo), ("%#RX64\n", pEvent->u64IntInfo));
    78448376    Assert(pfIntrState);
    78458377
    7846     PCPUMCTX       pCtx       = &pVCpu->cpum.GstCtx;
    7847     uint32_t       u32IntInfo = (uint32_t)u64IntInfo;
    7848     uint32_t const uVector    = VMX_ENTRY_INT_INFO_VECTOR(u32IntInfo);
    7849     uint32_t const uIntType   = VMX_ENTRY_INT_INFO_TYPE(u32IntInfo);
     8378    PCPUMCTX          pCtx       = &pVCpu->cpum.GstCtx;
     8379    uint32_t          u32IntInfo = pEvent->u64IntInfo;
     8380    uint32_t const    u32ErrCode = pEvent->u32ErrCode;
     8381    uint32_t const    cbInstr    = pEvent->cbInstr;
     8382    RTGCUINTPTR const GCPtrFault = pEvent->GCPtrFaultAddress;
     8383    uint32_t const    uVector    = VMX_ENTRY_INT_INFO_VECTOR(u32IntInfo);
     8384    uint32_t const    uIntType   = VMX_ENTRY_INT_INFO_TYPE(u32IntInfo);
    78508385
    78518386#ifdef VBOX_STRICT
     
    78758410        }
    78768411    }
    7877 #endif
    78788412
    78798413    /* Cannot inject an NMI when block-by-MOV SS is in effect. */
    78808414    Assert(   uIntType != VMX_EXIT_INT_INFO_TYPE_NMI
    78818415           || !(*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
     8416#endif
    78828417
    78838418    STAM_COUNTER_INC(&pVCpu->hm.s.paStatInjectedIrqsR0[uVector & MASK_INJECT_IRQ_STAT]);
     
    78968431        {
    78978432            /*
    7898              * For unrestricted execution enabled CPUs running real-mode guests, we must not
    7899              * set the deliver-error-code bit.
     8433             * For CPUs with unrestricted guest execution enabled and with the guest
     8434             * in real-mode, we must not set the deliver-error-code bit.
    79008435             *
    79018436             * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
     
    79088443            Assert(PDMVmmDevHeapIsEnabled(pVM));
    79098444            Assert(pVM->hm.s.vmx.pRealModeTSS);
     8445            Assert(!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx));
    79108446
    79118447            /* We require RIP, RSP, RFLAGS, CS, IDTR, import them. */
    7912             int rc2 = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TABLE_MASK | CPUMCTX_EXTRN_RIP
    7913                                                    | CPUMCTX_EXTRN_RSP       | CPUMCTX_EXTRN_RFLAGS);
     8448            PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
     8449            int rc2 = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TABLE_MASK
     8450                                                              | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_RFLAGS);
    79148451            AssertRCReturn(rc2, rc2);
    79158452
     
    79228459                    return VINF_EM_RESET;
    79238460
    7924                 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault. */
     8461                /* If we're injecting a #GP with no valid IDT entry, inject a double-fault.
     8462                   No error codes for exceptions in real-mode. */
    79258463                if (uVector == X86_XCPT_GP)
    7926                     return hmR0VmxInjectXcptDF(pVCpu, fStepping, pfIntrState);
     8464                {
     8465                    uint32_t const uXcptDfInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR,         X86_XCPT_DF)
     8466                                               | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE,           VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
     8467                                               | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
     8468                                               | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID,          1);
     8469                    HMEVENT EventXcptDf;
     8470                    RT_ZERO(EventXcptDf);
     8471                    EventXcptDf.u64IntInfo = uXcptDfInfo;
     8472                    return hmR0VmxInjectEventVmcs(pVCpu, pVmxTransient, &EventXcptDf, fStepping, pfIntrState);
     8473                }
    79278474
    79288475                /*
     
    79328479                 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
    79338480                 */
    7934                 return hmR0VmxInjectXcptGP(pVCpu, false /* fErrCodeValid */, 0 /* u32ErrCode */, fStepping, pfIntrState);
     8481                uint32_t const uXcptGpInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR,         X86_XCPT_GP)
     8482                                           | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE,           VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
     8483                                           | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
     8484                                           | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID,          1);
     8485                HMEVENT EventXcptGp;
     8486                RT_ZERO(EventXcptGp);
     8487                EventXcptGp.u64IntInfo = uXcptGpInfo;
     8488                return hmR0VmxInjectEventVmcs(pVCpu, pVmxTransient, &EventXcptGp, fStepping, pfIntrState);
    79358489            }
    79368490
     
    79488502            /* Get the code segment selector and offset from the IDT entry for the interrupt handler. */
    79498503            X86IDTR16 IdtEntry;
    7950             RTGCPHYS GCPhysIdtEntry = (RTGCPHYS)pCtx->idtr.pIdt + uVector * cbIdtEntry;
     8504            RTGCPHYS const GCPhysIdtEntry = (RTGCPHYS)pCtx->idtr.pIdt + uVector * cbIdtEntry;
    79518505            rc2 = PGMPhysSimpleReadGCPhys(pVM, &IdtEntry, GCPhysIdtEntry, cbIdtEntry);
    79528506            AssertRCReturn(rc2, rc2);
     
    79568510            rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->eflags.u32);
    79578511            if (rcStrict == VINF_SUCCESS)
     8512            {
    79588513                rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->cs.Sel);
    7959             if (rcStrict == VINF_SUCCESS)
    7960                 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, uGuestIp);
     8514                if (rcStrict == VINF_SUCCESS)
     8515                    rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, uGuestIp);
     8516            }
    79618517
    79628518            /* Clear the required eflag bits and jump to the interrupt/exception handler. */
     
    79708526                if (   uIntType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
    79718527                    && uVector  == X86_XCPT_PF)
    7972                     pCtx->cr2 = GCPtrFaultAddress;
    7973 
    7974                 /* If any other guest-state bits are changed here, make sure to update
    7975                    hmR0VmxPreRunGuestCommitted() when thread-context hooks are used. */
     8528                    pCtx->cr2 = GCPtrFault;
     8529
    79768530                ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CS  | HM_CHANGED_GUEST_CR2
    79778531                                                         | HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
    79788532                                                         | HM_CHANGED_GUEST_RSP);
    79798533
    7980                 /* We're clearing interrupts, which means no block-by-STI interrupt-inhibition. */
     8534                /*
     8535                 * If we delivered a hardware exception (other than an NMI) and if there was
     8536                 * block-by-STI in effect, we should clear it.
     8537                 */
    79818538                if (*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
    79828539                {
     
    79868543                    *pfIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
    79878544                }
    7988                 Log4(("Injecting real-mode: u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x Eflags=%#x CS:EIP=%04x:%04x\n",
     8545
     8546                Log4(("Injected real-mode: u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x Eflags=%#x CS:EIP=%04x:%04x\n",
    79898547                      u32IntInfo, u32ErrCode, cbInstr, pCtx->eflags.u, pCtx->cs.Sel, pCtx->eip));
    79908548
    7991                 /* The event has been truly dispatched. Mark it as no longer pending so we don't attempt to 'undo'
    7992                    it, if we are returning to ring-3 before executing guest code. */
     8549                /*
     8550                 * The event has been truly dispatched to the guest. Mark it as no longer pending so
     8551                 * we don't attempt to undo it if we are returning to ring-3 before executing guest code.
     8552                 */
    79938553                pVCpu->hm.s.Event.fPending = false;
    79948554
    7995                 /* Make hmR0VmxPreRunGuest() return if we're stepping since we've changed cs:rip. */
     8555                /* If we're stepping and we've changed cs:rip above, bail out of the VMX R0 execution loop. */
    79968556                if (fStepping)
    79978557                    rcStrict = VINF_EM_DBG_STEPPED;
     
    80038563    }
    80048564
    8005     /* Validate. */
     8565    /*
     8566     * Validate.
     8567     */
    80068568    Assert(VMX_ENTRY_INT_INFO_IS_VALID(u32IntInfo));                     /* Bit 31 (Valid bit) must be set by caller. */
    80078569    Assert(!(u32IntInfo & VMX_BF_ENTRY_INT_INFO_RSVD_12_30_MASK));       /* Bits 30:12 MBZ. */
    80088570
    8009     /* Inject. */
     8571    /*
     8572     * Inject the event into the VMCS.
     8573     */
    80108574    int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntInfo);
    80118575    if (VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
     
    80148578    AssertRCReturn(rc, rc);
    80158579
    8016     /* Update CR2. */
     8580    /*
     8581     * Update guest CR2 if this is a page-fault.
     8582     */
    80178583    if (   VMX_ENTRY_INT_INFO_TYPE(u32IntInfo) == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
    80188584        && uVector == X86_XCPT_PF)
    8019         pCtx->cr2 = GCPtrFaultAddress;
     8585        pCtx->cr2 = GCPtrFault;
    80208586
    80218587    Log4(("Injecting u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x CR2=%#RX64\n", u32IntInfo, u32ErrCode, cbInstr, pCtx->cr2));
    8022 
    80238588    return VINF_SUCCESS;
    80248589}
     
    80268591
    80278592/**
    8028  * Clears the interrupt-window exiting control in the VMCS and if necessary
    8029  * clears the current event in the VMCS as well.
    8030  *
    8031  * @returns VBox status code.
    8032  * @param   pVCpu         The cross context virtual CPU structure.
    8033  *
    8034  * @remarks Use this function only to clear events that have not yet been
    8035  *          delivered to the guest but are injected in the VMCS!
    8036  * @remarks No-long-jump zone!!!
    8037  */
    8038 static void hmR0VmxClearIntNmiWindowsVmcs(PVMCPU pVCpu)
    8039 {
    8040     if (pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT)
    8041     {
    8042         hmR0VmxClearIntWindowExitVmcs(pVCpu);
    8043         Log4Func(("Cleared interrupt window\n"));
    8044     }
    8045 
    8046     if (pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
    8047     {
    8048         hmR0VmxClearNmiWindowExitVmcs(pVCpu);
    8049         Log4Func(("Cleared NMI window\n"));
    8050     }
     8593 * Evaluates the event to be delivered to the guest and sets it as the pending
     8594 * event.
     8595 *
     8596 * @returns Strict VBox status code (i.e. informational status codes too).
     8597 * @param   pVCpu           The cross context virtual CPU structure.
     8598 * @param   pVmcsInfo       The VMCS info. object.
     8599 * @param   pVmxTransient   The VMX-transient structure.
     8600 * @param   pfIntrState     Where to store the VT-x guest-interruptibility state.
     8601 */
     8602static VBOXSTRICTRC hmR0VmxEvaluatePendingEvent(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t *pfIntrState)
     8603{
     8604    PCPUMCTX     pCtx = &pVCpu->cpum.GstCtx;
     8605    PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
     8606
     8607    /* Get the current interruptibility-state of the guest and then figure out what can be injected. */
     8608    uint32_t const fIntrState = hmR0VmxGetGuestIntrState(pVCpu, pVmcsInfo);
     8609    bool const fBlockMovSS    = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
     8610    bool const fBlockSti      = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI);
     8611    bool const fBlockNmi      = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI);
     8612
     8613    Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pCtx->fExtrn) & CPUMCTX_EXTRN_RFLAGS));
     8614    Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI));    /* We don't support block-by-SMI yet.*/
     8615    Assert(!fBlockSti || pCtx->eflags.Bits.u1IF);                  /* Cannot set block-by-STI when interrupts are disabled. */
     8616    Assert(!TRPMHasTrap(pVCpu));
     8617    Assert(pfIntrState);
     8618
     8619    *pfIntrState = fIntrState;
     8620
     8621    /*
     8622     * Toggling of interrupt force-flags here is safe since we update TRPM on premature exits
     8623     * to ring-3 before executing guest code, see hmR0VmxExitToRing3(). We must NOT restore these force-flags.
     8624     */
     8625                                                               /** @todo SMI. SMIs take priority over NMIs. */
     8626    if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))        /* NMI. NMIs take priority over regular interrupts. */
     8627    {
     8628        /* On some CPUs block-by-STI also blocks NMIs. See Intel spec. 26.3.1.5 "Checks On Guest Non-Register State". */
     8629        if (   !pVCpu->hm.s.Event.fPending
     8630            && !fBlockNmi
     8631            && !fBlockSti
     8632            && !fBlockMovSS)
     8633        {
     8634#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     8635            if (   pVmxTransient->fIsNestedGuest
     8636                && CPUMIsGuestVmxPinCtlsSet(pVCpu, pCtx, VMX_PIN_CTLS_NMI_EXIT))
     8637                return IEMExecVmxVmexitNmi(pVCpu);
     8638#endif
     8639            hmR0VmxSetPendingXcptNmi(pVCpu);
     8640            VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
     8641            Log4Func(("Pending NMI\n"));
     8642        }
     8643        else
     8644            hmR0VmxSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
     8645    }
     8646    /*
     8647     * Check if the guest can receive external interrupts (PIC/APIC). Once PDMGetInterrupt() returns
     8648     * a valid interrupt we -must- deliver the interrupt. We can no longer re-request it from the APIC.
     8649     */
     8650    else if (  VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
     8651        && !pVCpu->hm.s.fSingleInstruction)
     8652    {
     8653        Assert(!DBGFIsStepping(pVCpu));
     8654        int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RFLAGS);
     8655        AssertRCReturn(rc, rc);
     8656        bool const fBlockInt = !(pCtx->eflags.u32 & X86_EFL_IF);
     8657        if (   !pVCpu->hm.s.Event.fPending
     8658            && !fBlockInt
     8659            && !fBlockSti
     8660            && !fBlockMovSS)
     8661        {
     8662#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     8663            if (   pVmxTransient->fIsNestedGuest
     8664                && CPUMIsGuestVmxPinCtlsSet(pVCpu, pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
     8665            {
     8666                VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0/* uVector */, true /* fIntPending */);
     8667                if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
     8668                    return rcStrict;
     8669            }
     8670#endif
     8671            uint8_t u8Interrupt;
     8672            rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
     8673            if (RT_SUCCESS(rc))
     8674            {
     8675#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     8676                if (   pVmxTransient->fIsNestedGuest
     8677                    && CPUMIsGuestVmxPinCtlsSet(pVCpu, pCtx, VMX_PIN_CTLS_EXT_INT_EXIT)
     8678                    && CPUMIsGuestVmxExitCtlsSet(pVCpu, pCtx, VMX_EXIT_CTLS_ACK_EXT_INT))
     8679                {
     8680                    VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, u8Interrupt, false /* fIntPending */);
     8681                    if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
     8682                        return rcStrict;
     8683                }
     8684#endif
     8685                hmR0VmxSetPendingExtInt(pVCpu, u8Interrupt);
     8686                Log4Func(("Pending external interrupt vector %#x\n", u8Interrupt));
     8687            }
     8688            else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
     8689            {
     8690                if (   !pVmxTransient->fIsNestedGuest
     8691                    && (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW))
     8692                    hmR0VmxApicSetTprThreshold(pVCpu, pVmcsInfo, u8Interrupt >> 4);
     8693                STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchTprMaskedIrq);
     8694
     8695                /*
     8696                 * If the CPU doesn't have TPR shadowing, we will always get a VM-exit on TPR changes and
     8697                 * APICSetTpr() will end up setting the VMCPU_FF_INTERRUPT_APIC if required, so there is no
     8698                 * need to re-set this force-flag here.
     8699                 */
     8700            }
     8701            else
     8702                STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq);
     8703        }
     8704        else
     8705            hmR0VmxSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
     8706    }
     8707
     8708    return VINF_SUCCESS;
     8709}
     8710
     8711
     8712/**
     8713 * Injects any pending events into the guest if the guest is in a state to
     8714 * receive them.
     8715 *
     8716 * @returns Strict VBox status code (i.e. informational status codes too).
     8717 * @param   pVCpu           The cross context virtual CPU structure.
     8718 * @param   pVmxTransient   The VMX-transient structure.
     8719 * @param   fIntrState      The VT-x guest-interruptibility state.
     8720 * @param   fStepping       Whether we are single-stepping the guest using the
     8721 *                          hypervisor debugger and should return
     8722 *                          VINF_EM_DBG_STEPPED if the event was dispatched
     8723 *                          directly.
     8724 */
     8725static VBOXSTRICTRC hmR0VmxInjectPendingEvent(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t fIntrState, bool fStepping)
     8726{
     8727    HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
     8728    Assert(VMMRZCallRing3IsEnabled(pVCpu));
     8729
     8730    bool const fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
     8731    bool const fBlockSti   = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI);
     8732
     8733    Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_RFLAGS));
     8734    Assert(!fBlockSti || pVCpu->cpum.GstCtx.eflags.Bits.u1IF);     /* Cannot set block-by-STI when interrupts are disabled. */
     8735    Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI));    /* We don't support block-by-SMI yet.*/
     8736    Assert(!TRPMHasTrap(pVCpu));
     8737
     8738    VBOXSTRICTRC rcStrict = VINF_SUCCESS;
     8739    if (pVCpu->hm.s.Event.fPending)
     8740    {
     8741        /*
     8742         * Do -not- clear any interrupt-window exiting control here. We might have an interrupt
     8743         * pending even while injecting an event and in this case, we want a VM-exit as soon as
     8744         * the guest is ready for the next interrupt, see @bugref{6208#c45}.
     8745         *
     8746         * See Intel spec. 26.6.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
     8747         */
     8748        uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(pVCpu->hm.s.Event.u64IntInfo);
     8749#ifdef VBOX_STRICT
     8750        if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
     8751        {
     8752            bool const fBlockInt = !(pVCpu->cpum.GstCtx.eflags.u32 & X86_EFL_IF);
     8753            Assert(!fBlockInt);
     8754            Assert(!fBlockSti);
     8755            Assert(!fBlockMovSS);
     8756        }
     8757        else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI)
     8758        {
     8759            bool const fBlockNmi = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI);
     8760            Assert(!fBlockSti);
     8761            Assert(!fBlockMovSS);
     8762            Assert(!fBlockNmi);
     8763        }
     8764#endif
     8765        Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#RX32\n", pVCpu->idCpu, pVCpu->hm.s.Event.u64IntInfo,
     8766              uIntType));
     8767
     8768        /*
     8769         * Inject the event and get any changes to the guest-interruptibility state.
     8770         *
     8771         * The guest-interruptibility state may need to be updated if we inject the event
     8772         * into the guest IDT ourselves (for real-on-v86 guest injecting software interrupts).
     8773         */
     8774        rcStrict = hmR0VmxInjectEventVmcs(pVCpu, pVmxTransient, &pVCpu->hm.s.Event, fStepping, &fIntrState);
     8775        AssertRCReturn(VBOXSTRICTRC_VAL(rcStrict), rcStrict);
     8776
     8777        if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
     8778            STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectInterrupt);
     8779        else
     8780            STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectXcpt);
     8781    }
     8782
     8783    /*
     8784     * Update the guest-interruptibility state.
     8785     *
     8786     * This is required for the real-on-v86 software interrupt injection case above, as well as
     8787     * updates to the guest state from ring-3 or IEM/REM.
     8788     */
     8789    int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_INT_STATE, fIntrState);
     8790    AssertRCReturn(rc, rc);
     8791
     8792    /*
     8793     * There's no need to clear the VM-entry interruption-information field here if we're not
     8794     * injecting anything. VT-x clears the valid bit on every VM-exit.
     8795     *
     8796     * See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
     8797     */
     8798
     8799    Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping));
     8800    NOREF(fBlockMovSS); NOREF(fBlockSti);
     8801    return rcStrict;
    80518802}
    80528803
     
    80798830
    80808831    /*
    8081      * Load the VCPU's VMCS as the current (and active) one.
    8082      */
    8083     Assert(pVCpu->hm.s.vmx.fVmcsState & HMVMX_VMCS_STATE_CLEAR);
    8084     int rc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
     8832     * Load the appropriate VMCS as the current and active one.
     8833     */
     8834    PVMXVMCSINFO pVmcsInfo;
     8835    bool const fInNestedGuestMode = CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx);
     8836    if (!fInNestedGuestMode)
     8837        pVmcsInfo = &pVCpu->hm.s.vmx.VmcsInfo;
     8838    else
     8839        pVmcsInfo = &pVCpu->hm.s.vmx.VmcsInfoNstGst;
     8840    int rc = hmR0VmxLoadVmcs(pVmcsInfo);
    80858841    if (RT_SUCCESS(rc))
    80868842    {
    8087         pVCpu->hm.s.vmx.fVmcsState = HMVMX_VMCS_STATE_ACTIVE;
     8843        pVCpu->hm.s.vmx.fSwitchedToNstGstVmcs = fInNestedGuestMode;
    80888844        pVCpu->hm.s.fLeaveDone = false;
    8089         Log4Func(("Activated Vmcs. HostCpuId=%u\n", RTMpCpuId()));
     8845        Log4Func(("Loaded Vmcs. HostCpuId=%u\n", RTMpCpuId()));
    80908846
    80918847        /*
     
    81238879            Log4Func(("Preempting: HostCpuId=%u\n", RTMpCpuId()));
    81248880
    8125             /*
    8126              * Restore host-state (FPU, debug etc.)
    8127              */
     8881            /* Restore host-state (FPU, debug etc.) */
    81288882            if (!pVCpu->hm.s.fLeaveDone)
    81298883            {
     
    81388892            /* Leave HM context, takes care of local init (term). */
    81398893            int rc = HMR0LeaveCpu(pVCpu);
    8140             AssertRC(rc); NOREF(rc);
     8894            AssertRC(rc);
    81418895
    81428896            /* Restore longjmp state. */
     
    81608914            int rc = hmR0EnterCpu(pVCpu);
    81618915            AssertRC(rc);
    8162             Assert((pVCpu->hm.s.fCtxChanged & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE))
    8163                                             == (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE));
     8916            Assert(   (pVCpu->hm.s.fCtxChanged & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE))
     8917                   ==                            (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE));
    81648918
    81658919            /* Load the active VMCS as the current one. */
    8166             if (pVCpu->hm.s.vmx.fVmcsState & HMVMX_VMCS_STATE_CLEAR)
    8167             {
    8168                 rc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
    8169                 AssertRC(rc); NOREF(rc);
    8170                 pVCpu->hm.s.vmx.fVmcsState = HMVMX_VMCS_STATE_ACTIVE;
    8171                 Log4Func(("Resumed: Activated Vmcs. HostCpuId=%u\n", RTMpCpuId()));
    8172             }
     8920            PVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
     8921            rc = hmR0VmxLoadVmcs(pVmcsInfo);
     8922            AssertRC(rc);
     8923            Log4Func(("Resumed: Loaded Vmcs. HostCpuId=%u\n", RTMpCpuId()));
    81738924            pVCpu->hm.s.fLeaveDone = false;
    81748925
     
    82569007 * @returns VBox strict status code.
    82579008 * @retval  VINF_EM_RESCHEDULE_REM if we try to emulate non-paged guest code
    8258  *          without unrestricted guest access and the VMMDev is not presently
     9009 *          without unrestricted guest execution and the VMMDev is not presently
    82599010 *          mapped (e.g. EFI32).
    82609011 *
    8261  * @param   pVCpu       The cross context virtual CPU structure.
     9012 * @param   pVCpu           The cross context virtual CPU structure.
     9013 * @param   pVmxTransient   The VMX-transient structure.
    82629014 *
    82639015 * @remarks No-long-jump zone!!!
    82649016 */
    8265 static VBOXSTRICTRC hmR0VmxExportGuestState(PVMCPU pVCpu)
     9017static VBOXSTRICTRC hmR0VmxExportGuestState(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    82669018{
    82679019    AssertPtr(pVCpu);
    82689020    HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
    8269 
    82709021    LogFlowFunc(("pVCpu=%p\n", pVCpu));
    82719022
    82729023    STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExportGuestState, x);
    82739024
    8274     /* Determine real-on-v86 mode. */
    8275     pVCpu->hm.s.vmx.RealMode.fRealOnV86Active = false;
    8276     if (   !pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUnrestrictedGuest
    8277         &&  CPUMIsGuestInRealModeEx(&pVCpu->cpum.GstCtx))
    8278         pVCpu->hm.s.vmx.RealMode.fRealOnV86Active = true;
     9025    /*
     9026     * Determine real-on-v86 mode.
     9027     * Used when the guest is in real-mode and unrestricted guest execution is not used.
     9028     */
     9029    PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
     9030    if (    pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUnrestrictedGuest
     9031        || !CPUMIsGuestInRealModeEx(&pVCpu->cpum.GstCtx))
     9032        pVmcsInfo->RealMode. fRealOnV86Active = false;
     9033    else
     9034    {
     9035        Assert(!pVmxTransient->fIsNestedGuest);
     9036        pVmcsInfo->RealMode.fRealOnV86Active = true;
     9037    }
    82799038
    82809039    /*
     
    82829041     * Ideally, assert that the cross-dependent bits are up-to-date at the point of using it.
    82839042     */
    8284     int rc = hmR0VmxSelectVMRunHandler(pVCpu);
     9043    /** @todo r=ramshankar: Move hmR0VmxSelectVMRunHandler inside
     9044     *        hmR0VmxExportGuestEntryExitCtls and do it conditionally. There shouldn't
     9045     *        be a need to evaluate this everytime since I'm pretty sure we intercept
     9046     *        all guest paging mode changes. */
     9047    int rc = hmR0VmxSelectVMRunHandler(pVCpu, pVmxTransient);
    82859048    AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
    82869049
    8287     /* This needs to be done after hmR0VmxSelectVMRunHandler() as changing pfnStartVM may require VM-entry control updates. */
    8288     rc = hmR0VmxExportGuestEntryCtls(pVCpu);
     9050    rc = hmR0VmxExportGuestEntryExitCtls(pVCpu, pVmxTransient);
    82899051    AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
    82909052
    8291     /* This needs to be done after hmR0VmxSelectVMRunHandler() as changing pfnStartVM may require VM-exit control updates. */
    8292     rc = hmR0VmxExportGuestExitCtls(pVCpu);
     9053    rc = hmR0VmxExportGuestCR0(pVCpu, pVmxTransient);
    82939054    AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
    82949055
    8295     rc = hmR0VmxExportGuestCR0(pVCpu);
    8296     AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
    8297 
    8298     VBOXSTRICTRC rcStrict = hmR0VmxExportGuestCR3AndCR4(pVCpu);
     9056    VBOXSTRICTRC rcStrict = hmR0VmxExportGuestCR3AndCR4(pVCpu, pVmxTransient);
    82999057    if (rcStrict == VINF_SUCCESS)
    83009058    { /* likely */ }
     
    83059063    }
    83069064
    8307     rc = hmR0VmxExportGuestSegmentRegs(pVCpu);
     9065    rc = hmR0VmxExportGuestSegRegsXdtr(pVCpu, pVmxTransient);
    83089066    AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
    83099067
    8310     /* This needs to be done after hmR0VmxExportGuestEntryCtls() and hmR0VmxExportGuestExitCtls() as it
    8311        may alter controls if we determine we don't have to swap EFER after all. */
    8312     rc = hmR0VmxExportGuestMsrs(pVCpu);
     9068    rc = hmR0VmxExportGuestMsrs(pVCpu, pVmxTransient);
    83139069    AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
    83149070
    8315     rc = hmR0VmxExportGuestApicTpr(pVCpu);
     9071    rc = hmR0VmxExportGuestApicTpr(pVCpu, pVmxTransient);
    83169072    AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
    83179073
    8318     rc = hmR0VmxExportGuestXcptIntercepts(pVCpu);
     9074    rc = hmR0VmxExportGuestXcptIntercepts(pVCpu, pVmxTransient);
    83199075    AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
    83209076
    83219077    rc  = hmR0VmxExportGuestRip(pVCpu);
    83229078    rc |= hmR0VmxExportGuestRsp(pVCpu);
    8323     rc |= hmR0VmxExportGuestRflags(pVCpu);
     9079    rc |= hmR0VmxExportGuestRflags(pVCpu, pVmxTransient);
    83249080    AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
    83259081
     
    83369092                                                  |  HM_CHANGED_GUEST_TSC_AUX
    83379093                                                  |  HM_CHANGED_GUEST_OTHER_MSRS
    8338                                                   |  HM_CHANGED_GUEST_HWVIRT
     9094                                                  |  HM_CHANGED_GUEST_HWVIRT         /* More accurate PLE handling someday? */
    83399095                                                  | (HM_CHANGED_KEEPER_STATE_MASK & ~HM_CHANGED_VMX_MASK)));
    83409096
     
    83479103 * Exports the state shared between the host and guest into the VMCS.
    83489104 *
    8349  * @param   pVCpu       The cross context virtual CPU structure.
     9105 * @param   pVCpu           The cross context virtual CPU structure.
     9106 * @param   pVmxTransient   The VMX-transient structure.
    83509107 *
    83519108 * @remarks No-long-jump zone!!!
    83529109 */
    8353 static void hmR0VmxExportSharedState(PVMCPU pVCpu)
     9110static void hmR0VmxExportSharedState(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    83549111{
    83559112    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     
    83589115    if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_DR_MASK)
    83599116    {
    8360         int rc = hmR0VmxExportSharedDebugState(pVCpu);
     9117        int rc = hmR0VmxExportSharedDebugState(pVCpu, pVmxTransient);
    83619118        AssertRC(rc);
    83629119        pVCpu->hm.s.fCtxChanged &= ~HM_CHANGED_GUEST_DR_MASK;
     
    83659122        if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_RFLAGS)
    83669123        {
    8367             rc = hmR0VmxExportGuestRflags(pVCpu);
     9124            rc = hmR0VmxExportGuestRflags(pVCpu, pVmxTransient);
    83689125            AssertRC(rc);
    83699126        }
     
    83869143 * @returns Strict VBox status code (i.e. informational status codes too).
    83879144 * @retval  VINF_EM_RESCHEDULE_REM if we try to emulate non-paged guest code
    8388  *          without unrestricted guest access and the VMMDev is not presently
     9145 *          without unrestricted guest execution and the VMMDev is not presently
    83899146 *          mapped (e.g. EFI32).
    83909147 *
    83919148 * @param   pVCpu           The cross context virtual CPU structure.
     9149 * @param   pVmxTransient   The VMX-transient structure.
    83929150 *
    83939151 * @remarks No-long-jump zone!!!
    83949152 */
    8395 static VBOXSTRICTRC hmR0VmxExportGuestStateOptimal(PVMCPU pVCpu)
     9153static VBOXSTRICTRC hmR0VmxExportGuestStateOptimal(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    83969154{
    83979155    HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
     
    84169174        { /* likely */}
    84179175        else
    8418             AssertMsgFailedReturn(("hmR0VmxExportGuestRip failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)), rcStrict);
     9176            AssertMsgFailedReturn(("Failed to export guest RIP! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)), rcStrict);
    84199177        STAM_COUNTER_INC(&pVCpu->hm.s.StatExportMinimal);
    84209178    }
    84219179    else if (fCtxChanged & (HM_CHANGED_ALL_GUEST & ~HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE))
    84229180    {
    8423         rcStrict = hmR0VmxExportGuestState(pVCpu);
     9181        rcStrict = hmR0VmxExportGuestState(pVCpu, pVmxTransient);
    84249182        if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    84259183        { /* likely */}
    84269184        else
    84279185        {
    8428             AssertMsg(rcStrict == VINF_EM_RESCHEDULE_REM, ("hmR0VmxExportGuestState failed! rc=%Rrc\n",
     9186            AssertMsg(rcStrict == VINF_EM_RESCHEDULE_REM, ("Failed to export guest state! rc=%Rrc\n",
    84299187                                                           VBOXSTRICTRC_VAL(rcStrict)));
    84309188            Assert(!VMMRZCallRing3IsEnabled(pVCpu));
     
    84489206
    84499207/**
     9208 * Tries to determine what part of the guest-state VT-x has deemed as invalid
     9209 * and update error record fields accordingly.
     9210 *
     9211 * @return VMX_IGS_* return codes.
     9212 * @retval VMX_IGS_REASON_NOT_FOUND if this function could not find anything
     9213 *         wrong with the guest state.
     9214 *
     9215 * @param   pVCpu       The cross context virtual CPU structure.
     9216 * @param   pVmcsInfo   The VMCS info. object.
     9217 *
     9218 * @remarks This function assumes our cache of the VMCS controls
     9219 *          are valid, i.e. hmR0VmxCheckVmcsCtls() succeeded.
     9220 */
     9221static uint32_t hmR0VmxCheckGuestState(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo)
     9222{
     9223#define HMVMX_ERROR_BREAK(err)              { uError = (err); break; }
     9224#define HMVMX_CHECK_BREAK(expr, err)        if (!(expr)) { \
     9225                                                uError = (err); \
     9226                                                break; \
     9227                                            } else do { } while (0)
     9228
     9229    int        rc;
     9230    PVM        pVM    = pVCpu->CTX_SUFF(pVM);
     9231    PCPUMCTX   pCtx   = &pVCpu->cpum.GstCtx;
     9232    uint32_t   uError = VMX_IGS_ERROR;
     9233    uint32_t   u32Val;
     9234    bool const fUnrestrictedGuest = pVM->hm.s.vmx.fUnrestrictedGuest;
     9235
     9236    do
     9237    {
     9238        /*
     9239         * CR0.
     9240         */
     9241        uint32_t       fSetCr0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);
     9242        uint32_t const fZapCr0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);
     9243        /* Exceptions for unrestricted guest execution for fixed CR0 bits (PE, PG).
     9244           See Intel spec. 26.3.1 "Checks on Guest Control Registers, Debug Registers and MSRs." */
     9245        if (fUnrestrictedGuest)
     9246            fSetCr0 &= ~(X86_CR0_PE | X86_CR0_PG);
     9247
     9248        uint32_t u32GuestCr0;
     9249        rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &u32GuestCr0);
     9250        AssertRCBreak(rc);
     9251        HMVMX_CHECK_BREAK((u32GuestCr0 & fSetCr0) == fSetCr0, VMX_IGS_CR0_FIXED1);
     9252        HMVMX_CHECK_BREAK(!(u32GuestCr0 & ~fZapCr0), VMX_IGS_CR0_FIXED0);
     9253        if (   !fUnrestrictedGuest
     9254            &&  (u32GuestCr0 & X86_CR0_PG)
     9255            && !(u32GuestCr0 & X86_CR0_PE))
     9256        {
     9257            HMVMX_ERROR_BREAK(VMX_IGS_CR0_PG_PE_COMBO);
     9258        }
     9259
     9260        /*
     9261         * CR4.
     9262         */
     9263        uint64_t const fSetCr4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);
     9264        uint64_t const fZapCr4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);
     9265
     9266        uint32_t u32GuestCr4;
     9267        rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR4, &u32GuestCr4);
     9268        AssertRCBreak(rc);
     9269        HMVMX_CHECK_BREAK((u32GuestCr4 & fSetCr4) == fSetCr4, VMX_IGS_CR4_FIXED1);
     9270        HMVMX_CHECK_BREAK(!(u32GuestCr4 & ~fZapCr4), VMX_IGS_CR4_FIXED0);
     9271
     9272        /*
     9273         * IA32_DEBUGCTL MSR.
     9274         */
     9275        uint64_t u64Val;
     9276        rc = VMXReadVmcs64(VMX_VMCS64_GUEST_DEBUGCTL_FULL, &u64Val);
     9277        AssertRCBreak(rc);
     9278        if (   (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
     9279            && (u64Val & 0xfffffe3c))                           /* Bits 31:9, bits 5:2 MBZ. */
     9280        {
     9281            HMVMX_ERROR_BREAK(VMX_IGS_DEBUGCTL_MSR_RESERVED);
     9282        }
     9283        uint64_t u64DebugCtlMsr = u64Val;
     9284
     9285#ifdef VBOX_STRICT
     9286        rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY, &u32Val);
     9287        AssertRCBreak(rc);
     9288        Assert(u32Val == pVmcsInfo->u32EntryCtls);
     9289#endif
     9290        bool const fLongModeGuest = RT_BOOL(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
     9291
     9292        /*
     9293         * RIP and RFLAGS.
     9294         */
     9295        uint32_t u32Eflags;
     9296#if HC_ARCH_BITS == 64
     9297        rc = VMXReadVmcs64(VMX_VMCS_GUEST_RIP, &u64Val);
     9298        AssertRCBreak(rc);
     9299        /* pCtx->rip can be different than the one in the VMCS (e.g. run guest code and VM-exits that don't update it). */
     9300        if (   !fLongModeGuest
     9301            || !pCtx->cs.Attr.n.u1Long)
     9302        {
     9303            HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffff00000000)), VMX_IGS_LONGMODE_RIP_INVALID);
     9304        }
     9305        /** @todo If the processor supports N < 64 linear-address bits, bits 63:N
     9306         *        must be identical if the "IA-32e mode guest" VM-entry
     9307         *        control is 1 and CS.L is 1. No check applies if the
     9308         *        CPU supports 64 linear-address bits. */
     9309
     9310        /* Flags in pCtx can be different (real-on-v86 for instance). We are only concerned about the VMCS contents here. */
     9311        rc = VMXReadVmcs64(VMX_VMCS_GUEST_RFLAGS, &u64Val);
     9312        AssertRCBreak(rc);
     9313        HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffc08028)),                     /* Bit 63:22, Bit 15, 5, 3 MBZ. */
     9314                          VMX_IGS_RFLAGS_RESERVED);
     9315        HMVMX_CHECK_BREAK((u64Val & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1);       /* Bit 1 MB1. */
     9316        u32Eflags = u64Val;
     9317#else
     9318        rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &u32Eflags);
     9319        AssertRCBreak(rc);
     9320        HMVMX_CHECK_BREAK(!(u32Eflags & 0xffc08028), VMX_IGS_RFLAGS_RESERVED);          /* Bit 31:22, Bit 15, 5, 3 MBZ. */
     9321        HMVMX_CHECK_BREAK((u32Eflags & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1);    /* Bit 1 MB1. */
     9322#endif
     9323
     9324        if (   fLongModeGuest
     9325            || (   fUnrestrictedGuest
     9326                && !(u32GuestCr0 & X86_CR0_PE)))
     9327        {
     9328            HMVMX_CHECK_BREAK(!(u32Eflags & X86_EFL_VM), VMX_IGS_RFLAGS_VM_INVALID);
     9329        }
     9330
     9331        uint32_t u32EntryInfo;
     9332        rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32EntryInfo);
     9333        AssertRCBreak(rc);
     9334        if (   VMX_ENTRY_INT_INFO_IS_VALID(u32EntryInfo)
     9335            && VMX_ENTRY_INT_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INT_INFO_TYPE_EXT_INT)
     9336        {
     9337            HMVMX_CHECK_BREAK(u32Eflags & X86_EFL_IF, VMX_IGS_RFLAGS_IF_INVALID);
     9338        }
     9339
     9340        /*
     9341         * 64-bit checks.
     9342         */
     9343#if HC_ARCH_BITS == 64
     9344        if (fLongModeGuest)
     9345        {
     9346            HMVMX_CHECK_BREAK(u32GuestCr0 & X86_CR0_PG, VMX_IGS_CR0_PG_LONGMODE);
     9347            HMVMX_CHECK_BREAK(u32GuestCr4 & X86_CR4_PAE, VMX_IGS_CR4_PAE_LONGMODE);
     9348        }
     9349
     9350        if (   !fLongModeGuest
     9351            && (u32GuestCr4 & X86_CR4_PCIDE))
     9352        {
     9353            HMVMX_ERROR_BREAK(VMX_IGS_CR4_PCIDE);
     9354        }
     9355
     9356        /** @todo CR3 field must be such that bits 63:52 and bits in the range
     9357         *        51:32 beyond the processor's physical-address width are 0. */
     9358
     9359        if (   (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
     9360            && (pCtx->dr[7] & X86_DR7_MBZ_MASK))
     9361        {
     9362            HMVMX_ERROR_BREAK(VMX_IGS_DR7_RESERVED);
     9363        }
     9364
     9365        rc = VMXReadVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, &u64Val);
     9366        AssertRCBreak(rc);
     9367        HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_ESP_NOT_CANONICAL);
     9368
     9369        rc = VMXReadVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, &u64Val);
     9370        AssertRCBreak(rc);
     9371        HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_EIP_NOT_CANONICAL);
     9372#endif
     9373
     9374        /*
     9375         * PERF_GLOBAL MSR.
     9376         */
     9377        if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR)
     9378        {
     9379            rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL, &u64Val);
     9380            AssertRCBreak(rc);
     9381            HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffff8fffffffc)),
     9382                              VMX_IGS_PERF_GLOBAL_MSR_RESERVED);        /* Bits 63:35, bits 31:2 MBZ. */
     9383        }
     9384
     9385        /*
     9386         * PAT MSR.
     9387         */
     9388        if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
     9389        {
     9390            rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PAT_FULL, &u64Val);
     9391            AssertRCBreak(rc);
     9392            HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0x707070707070707)), VMX_IGS_PAT_MSR_RESERVED);
     9393            for (unsigned i = 0; i < 8; i++)
     9394            {
     9395                uint8_t u8Val = (u64Val & 0xff);
     9396                if (   u8Val != 0 /* UC */
     9397                    && u8Val != 1 /* WC */
     9398                    && u8Val != 4 /* WT */
     9399                    && u8Val != 5 /* WP */
     9400                    && u8Val != 6 /* WB */
     9401                    && u8Val != 7 /* UC- */)
     9402                {
     9403                    HMVMX_ERROR_BREAK(VMX_IGS_PAT_MSR_INVALID);
     9404                }
     9405                u64Val >>= 8;
     9406            }
     9407        }
     9408
     9409        /*
     9410         * EFER MSR.
     9411         */
     9412        if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
     9413        {
     9414            Assert(pVM->hm.s.vmx.fSupportsVmcsEfer);
     9415            rc = VMXReadVmcs64(VMX_VMCS64_GUEST_EFER_FULL, &u64Val);
     9416            AssertRCBreak(rc);
     9417            HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffffffffff2fe)),
     9418                              VMX_IGS_EFER_MSR_RESERVED);               /* Bits 63:12, bit 9, bits 7:1 MBZ. */
     9419            HMVMX_CHECK_BREAK(RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(  pVmcsInfo->u32EntryCtls
     9420                                                                           & VMX_ENTRY_CTLS_IA32E_MODE_GUEST),
     9421                              VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH);
     9422            /** @todo r=ramshankar: Unrestricted check here is probably wrong, see
     9423             *        iemVmxVmentryCheckGuestState(). */
     9424            HMVMX_CHECK_BREAK(   fUnrestrictedGuest
     9425                              || !(u32GuestCr0 & X86_CR0_PG)
     9426                              || RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(u64Val & MSR_K6_EFER_LME),
     9427                              VMX_IGS_EFER_LMA_LME_MISMATCH);
     9428        }
     9429
     9430        /*
     9431         * Segment registers.
     9432         */
     9433        HMVMX_CHECK_BREAK(   (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
     9434                          || !(pCtx->ldtr.Sel & X86_SEL_LDT), VMX_IGS_LDTR_TI_INVALID);
     9435        if (!(u32Eflags & X86_EFL_VM))
     9436        {
     9437            /* CS */
     9438            HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1Present, VMX_IGS_CS_ATTR_P_INVALID);
     9439            HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xf00), VMX_IGS_CS_ATTR_RESERVED);
     9440            HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xfffe0000), VMX_IGS_CS_ATTR_RESERVED);
     9441            HMVMX_CHECK_BREAK(   (pCtx->cs.u32Limit & 0xfff) == 0xfff
     9442                              || !(pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
     9443            HMVMX_CHECK_BREAK(   !(pCtx->cs.u32Limit & 0xfff00000)
     9444                              || (pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
     9445            /* CS cannot be loaded with NULL in protected mode. */
     9446            HMVMX_CHECK_BREAK(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_CS_ATTR_UNUSABLE);
     9447            HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1DescType, VMX_IGS_CS_ATTR_S_INVALID);
     9448            if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
     9449                HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_UNEQUAL);
     9450            else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
     9451                HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_MISMATCH);
     9452            else if (pVM->hm.s.vmx.fUnrestrictedGuest && pCtx->cs.Attr.n.u4Type == 3)
     9453                HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == 0, VMX_IGS_CS_ATTR_DPL_INVALID);
     9454            else
     9455                HMVMX_ERROR_BREAK(VMX_IGS_CS_ATTR_TYPE_INVALID);
     9456
     9457            /* SS */
     9458            HMVMX_CHECK_BREAK(   pVM->hm.s.vmx.fUnrestrictedGuest
     9459                              || (pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL), VMX_IGS_SS_CS_RPL_UNEQUAL);
     9460            HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL), VMX_IGS_SS_ATTR_DPL_RPL_UNEQUAL);
     9461            if (   !(pCtx->cr0 & X86_CR0_PE)
     9462                || pCtx->cs.Attr.n.u4Type == 3)
     9463            {
     9464                HMVMX_CHECK_BREAK(!pCtx->ss.Attr.n.u2Dpl, VMX_IGS_SS_ATTR_DPL_INVALID);
     9465            }
     9466            if (!(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
     9467            {
     9468                HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7, VMX_IGS_SS_ATTR_TYPE_INVALID);
     9469                HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u1Present, VMX_IGS_SS_ATTR_P_INVALID);
     9470                HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xf00), VMX_IGS_SS_ATTR_RESERVED);
     9471                HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xfffe0000), VMX_IGS_SS_ATTR_RESERVED);
     9472                HMVMX_CHECK_BREAK(   (pCtx->ss.u32Limit & 0xfff) == 0xfff
     9473                                  || !(pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
     9474                HMVMX_CHECK_BREAK(   !(pCtx->ss.u32Limit & 0xfff00000)
     9475                                  || (pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
     9476            }
     9477
     9478            /* DS, ES, FS, GS - only check for usable selectors, see hmR0VmxExportGuestSReg(). */
     9479            if (!(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
     9480            {
     9481                HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_DS_ATTR_A_INVALID);
     9482                HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u1Present, VMX_IGS_DS_ATTR_P_INVALID);
     9483                HMVMX_CHECK_BREAK(   pVM->hm.s.vmx.fUnrestrictedGuest
     9484                                  || pCtx->ds.Attr.n.u4Type > 11
     9485                                  || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
     9486                HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xf00), VMX_IGS_DS_ATTR_RESERVED);
     9487                HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xfffe0000), VMX_IGS_DS_ATTR_RESERVED);
     9488                HMVMX_CHECK_BREAK(   (pCtx->ds.u32Limit & 0xfff) == 0xfff
     9489                                  || !(pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
     9490                HMVMX_CHECK_BREAK(   !(pCtx->ds.u32Limit & 0xfff00000)
     9491                                  || (pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
     9492                HMVMX_CHECK_BREAK(   !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
     9493                                  || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_DS_ATTR_TYPE_INVALID);
     9494            }
     9495            if (!(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
     9496            {
     9497                HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_ES_ATTR_A_INVALID);
     9498                HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u1Present, VMX_IGS_ES_ATTR_P_INVALID);
     9499                HMVMX_CHECK_BREAK(   pVM->hm.s.vmx.fUnrestrictedGuest
     9500                                  || pCtx->es.Attr.n.u4Type > 11
     9501                                  || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
     9502                HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xf00), VMX_IGS_ES_ATTR_RESERVED);
     9503                HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xfffe0000), VMX_IGS_ES_ATTR_RESERVED);
     9504                HMVMX_CHECK_BREAK(   (pCtx->es.u32Limit & 0xfff) == 0xfff
     9505                                  || !(pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
     9506                HMVMX_CHECK_BREAK(   !(pCtx->es.u32Limit & 0xfff00000)
     9507                                  || (pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
     9508                HMVMX_CHECK_BREAK(   !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
     9509                                  || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_ES_ATTR_TYPE_INVALID);
     9510            }
     9511            if (!(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
     9512            {
     9513                HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_FS_ATTR_A_INVALID);
     9514                HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u1Present, VMX_IGS_FS_ATTR_P_INVALID);
     9515                HMVMX_CHECK_BREAK(   pVM->hm.s.vmx.fUnrestrictedGuest
     9516                                  || pCtx->fs.Attr.n.u4Type > 11
     9517                                  || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL), VMX_IGS_FS_ATTR_DPL_RPL_UNEQUAL);
     9518                HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xf00), VMX_IGS_FS_ATTR_RESERVED);
     9519                HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xfffe0000), VMX_IGS_FS_ATTR_RESERVED);
     9520                HMVMX_CHECK_BREAK(   (pCtx->fs.u32Limit & 0xfff) == 0xfff
     9521                                  || !(pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
     9522                HMVMX_CHECK_BREAK(   !(pCtx->fs.u32Limit & 0xfff00000)
     9523                                  || (pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
     9524                HMVMX_CHECK_BREAK(   !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
     9525                                  || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_FS_ATTR_TYPE_INVALID);
     9526            }
     9527            if (!(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
     9528            {
     9529                HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_GS_ATTR_A_INVALID);
     9530                HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u1Present, VMX_IGS_GS_ATTR_P_INVALID);
     9531                HMVMX_CHECK_BREAK(   pVM->hm.s.vmx.fUnrestrictedGuest
     9532                                  || pCtx->gs.Attr.n.u4Type > 11
     9533                                  || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL), VMX_IGS_GS_ATTR_DPL_RPL_UNEQUAL);
     9534                HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xf00), VMX_IGS_GS_ATTR_RESERVED);
     9535                HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xfffe0000), VMX_IGS_GS_ATTR_RESERVED);
     9536                HMVMX_CHECK_BREAK(   (pCtx->gs.u32Limit & 0xfff) == 0xfff
     9537                                  || !(pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
     9538                HMVMX_CHECK_BREAK(   !(pCtx->gs.u32Limit & 0xfff00000)
     9539                                  || (pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
     9540                HMVMX_CHECK_BREAK(   !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
     9541                                  || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_GS_ATTR_TYPE_INVALID);
     9542            }
     9543            /* 64-bit capable CPUs. */
     9544#if HC_ARCH_BITS == 64
     9545            HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
     9546            HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
     9547            HMVMX_CHECK_BREAK(   (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
     9548                              || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
     9549            HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
     9550            HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
     9551                              VMX_IGS_LONGMODE_SS_BASE_INVALID);
     9552            HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
     9553                              VMX_IGS_LONGMODE_DS_BASE_INVALID);
     9554            HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
     9555                              VMX_IGS_LONGMODE_ES_BASE_INVALID);
     9556#endif
     9557        }
     9558        else
     9559        {
     9560            /* V86 mode checks. */
     9561            uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
     9562            if (pVmcsInfo->RealMode.fRealOnV86Active)
     9563            {
     9564                u32CSAttr = 0xf3;   u32SSAttr = 0xf3;
     9565                u32DSAttr = 0xf3;   u32ESAttr = 0xf3;
     9566                u32FSAttr = 0xf3;   u32GSAttr = 0xf3;
     9567            }
     9568            else
     9569            {
     9570                u32CSAttr = pCtx->cs.Attr.u;   u32SSAttr = pCtx->ss.Attr.u;
     9571                u32DSAttr = pCtx->ds.Attr.u;   u32ESAttr = pCtx->es.Attr.u;
     9572                u32FSAttr = pCtx->fs.Attr.u;   u32GSAttr = pCtx->gs.Attr.u;
     9573            }
     9574
     9575            /* CS */
     9576            HMVMX_CHECK_BREAK((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), VMX_IGS_V86_CS_BASE_INVALID);
     9577            HMVMX_CHECK_BREAK(pCtx->cs.u32Limit == 0xffff, VMX_IGS_V86_CS_LIMIT_INVALID);
     9578            HMVMX_CHECK_BREAK(u32CSAttr == 0xf3, VMX_IGS_V86_CS_ATTR_INVALID);
     9579            /* SS */
     9580            HMVMX_CHECK_BREAK((pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4), VMX_IGS_V86_SS_BASE_INVALID);
     9581            HMVMX_CHECK_BREAK(pCtx->ss.u32Limit == 0xffff, VMX_IGS_V86_SS_LIMIT_INVALID);
     9582            HMVMX_CHECK_BREAK(u32SSAttr == 0xf3, VMX_IGS_V86_SS_ATTR_INVALID);
     9583            /* DS */
     9584            HMVMX_CHECK_BREAK((pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4), VMX_IGS_V86_DS_BASE_INVALID);
     9585            HMVMX_CHECK_BREAK(pCtx->ds.u32Limit == 0xffff, VMX_IGS_V86_DS_LIMIT_INVALID);
     9586            HMVMX_CHECK_BREAK(u32DSAttr == 0xf3, VMX_IGS_V86_DS_ATTR_INVALID);
     9587            /* ES */
     9588            HMVMX_CHECK_BREAK((pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4), VMX_IGS_V86_ES_BASE_INVALID);
     9589            HMVMX_CHECK_BREAK(pCtx->es.u32Limit == 0xffff, VMX_IGS_V86_ES_LIMIT_INVALID);
     9590            HMVMX_CHECK_BREAK(u32ESAttr == 0xf3, VMX_IGS_V86_ES_ATTR_INVALID);
     9591            /* FS */
     9592            HMVMX_CHECK_BREAK((pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4), VMX_IGS_V86_FS_BASE_INVALID);
     9593            HMVMX_CHECK_BREAK(pCtx->fs.u32Limit == 0xffff, VMX_IGS_V86_FS_LIMIT_INVALID);
     9594            HMVMX_CHECK_BREAK(u32FSAttr == 0xf3, VMX_IGS_V86_FS_ATTR_INVALID);
     9595            /* GS */
     9596            HMVMX_CHECK_BREAK((pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4), VMX_IGS_V86_GS_BASE_INVALID);
     9597            HMVMX_CHECK_BREAK(pCtx->gs.u32Limit == 0xffff, VMX_IGS_V86_GS_LIMIT_INVALID);
     9598            HMVMX_CHECK_BREAK(u32GSAttr == 0xf3, VMX_IGS_V86_GS_ATTR_INVALID);
     9599            /* 64-bit capable CPUs. */
     9600#if HC_ARCH_BITS == 64
     9601            HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
     9602            HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
     9603            HMVMX_CHECK_BREAK(   (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
     9604                              || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
     9605            HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
     9606            HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
     9607                              VMX_IGS_LONGMODE_SS_BASE_INVALID);
     9608            HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
     9609                              VMX_IGS_LONGMODE_DS_BASE_INVALID);
     9610            HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
     9611                              VMX_IGS_LONGMODE_ES_BASE_INVALID);
     9612#endif
     9613        }
     9614
     9615        /*
     9616         * TR.
     9617         */
     9618        HMVMX_CHECK_BREAK(!(pCtx->tr.Sel & X86_SEL_LDT), VMX_IGS_TR_TI_INVALID);
     9619        /* 64-bit capable CPUs. */
     9620#if HC_ARCH_BITS == 64
     9621        HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->tr.u64Base), VMX_IGS_TR_BASE_NOT_CANONICAL);
     9622#endif
     9623        if (fLongModeGuest)
     9624        {
     9625            HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u4Type == 11,           /* 64-bit busy TSS. */
     9626                              VMX_IGS_LONGMODE_TR_ATTR_TYPE_INVALID);
     9627        }
     9628        else
     9629        {
     9630            HMVMX_CHECK_BREAK(   pCtx->tr.Attr.n.u4Type == 3          /* 16-bit busy TSS. */
     9631                              || pCtx->tr.Attr.n.u4Type == 11,        /* 32-bit busy TSS.*/
     9632                              VMX_IGS_TR_ATTR_TYPE_INVALID);
     9633        }
     9634        HMVMX_CHECK_BREAK(!pCtx->tr.Attr.n.u1DescType, VMX_IGS_TR_ATTR_S_INVALID);
     9635        HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u1Present, VMX_IGS_TR_ATTR_P_INVALID);
     9636        HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & 0xf00), VMX_IGS_TR_ATTR_RESERVED);   /* Bits 11:8 MBZ. */
     9637        HMVMX_CHECK_BREAK(   (pCtx->tr.u32Limit & 0xfff) == 0xfff
     9638                          || !(pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
     9639        HMVMX_CHECK_BREAK(   !(pCtx->tr.u32Limit & 0xfff00000)
     9640                          || (pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
     9641        HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_TR_ATTR_UNUSABLE);
     9642
     9643        /*
     9644         * GDTR and IDTR.
     9645         */
     9646#if HC_ARCH_BITS == 64
     9647        rc = VMXReadVmcs64(VMX_VMCS_GUEST_GDTR_BASE, &u64Val);
     9648        AssertRCBreak(rc);
     9649        HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_GDTR_BASE_NOT_CANONICAL);
     9650
     9651        rc = VMXReadVmcs64(VMX_VMCS_GUEST_IDTR_BASE, &u64Val);
     9652        AssertRCBreak(rc);
     9653        HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_IDTR_BASE_NOT_CANONICAL);
     9654#endif
     9655
     9656        rc = VMXReadVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);
     9657        AssertRCBreak(rc);
     9658        HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_GDTR_LIMIT_INVALID);      /* Bits 31:16 MBZ. */
     9659
     9660        rc = VMXReadVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);
     9661        AssertRCBreak(rc);
     9662        HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_IDTR_LIMIT_INVALID);      /* Bits 31:16 MBZ. */
     9663
     9664        /*
     9665         * Guest Non-Register State.
     9666         */
     9667        /* Activity State. */
     9668        uint32_t u32ActivityState;
     9669        rc = VMXReadVmcs32(VMX_VMCS32_GUEST_ACTIVITY_STATE, &u32ActivityState);
     9670        AssertRCBreak(rc);
     9671        HMVMX_CHECK_BREAK(   !u32ActivityState
     9672                          || (u32ActivityState & RT_BF_GET(pVM->hm.s.vmx.Msrs.u64Misc, VMX_BF_MISC_ACTIVITY_STATES)),
     9673                             VMX_IGS_ACTIVITY_STATE_INVALID);
     9674        HMVMX_CHECK_BREAK(   !(pCtx->ss.Attr.n.u2Dpl)
     9675                          || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT, VMX_IGS_ACTIVITY_STATE_HLT_INVALID);
     9676        uint32_t u32IntrState;
     9677        rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INT_STATE, &u32IntrState);
     9678        AssertRCBreak(rc);
     9679        if (   u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS
     9680            || u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
     9681        {
     9682            HMVMX_CHECK_BREAK(u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE, VMX_IGS_ACTIVITY_STATE_ACTIVE_INVALID);
     9683        }
     9684
     9685        /** @todo Activity state and injecting interrupts. Left as a todo since we
     9686         *        currently don't use activity states but ACTIVE. */
     9687
     9688        HMVMX_CHECK_BREAK(   !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
     9689                          || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT, VMX_IGS_ACTIVITY_STATE_SIPI_WAIT_INVALID);
     9690
     9691        /* Guest interruptibility-state. */
     9692        HMVMX_CHECK_BREAK(!(u32IntrState & 0xffffffe0), VMX_IGS_INTERRUPTIBILITY_STATE_RESERVED);
     9693        HMVMX_CHECK_BREAK((u32IntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
     9694                                       != (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
     9695                          VMX_IGS_INTERRUPTIBILITY_STATE_STI_MOVSS_INVALID);
     9696        HMVMX_CHECK_BREAK(   (u32Eflags & X86_EFL_IF)
     9697                          || !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
     9698                          VMX_IGS_INTERRUPTIBILITY_STATE_STI_EFL_INVALID);
     9699        if (VMX_ENTRY_INT_INFO_IS_VALID(u32EntryInfo))
     9700        {
     9701            if (VMX_ENTRY_INT_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INT_INFO_TYPE_EXT_INT)
     9702            {
     9703                HMVMX_CHECK_BREAK(   !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
     9704                                  && !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
     9705                                  VMX_IGS_INTERRUPTIBILITY_STATE_EXT_INT_INVALID);
     9706            }
     9707            else if (VMX_ENTRY_INT_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INT_INFO_TYPE_NMI)
     9708            {
     9709                HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
     9710                                  VMX_IGS_INTERRUPTIBILITY_STATE_MOVSS_INVALID);
     9711                HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
     9712                                  VMX_IGS_INTERRUPTIBILITY_STATE_STI_INVALID);
     9713            }
     9714        }
     9715        /** @todo Assumes the processor is not in SMM. */
     9716        HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
     9717                          VMX_IGS_INTERRUPTIBILITY_STATE_SMI_INVALID);
     9718        HMVMX_CHECK_BREAK(   !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
     9719                          || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
     9720                             VMX_IGS_INTERRUPTIBILITY_STATE_SMI_SMM_INVALID);
     9721        if (   (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
     9722            && VMX_ENTRY_INT_INFO_IS_VALID(u32EntryInfo)
     9723            && VMX_ENTRY_INT_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INT_INFO_TYPE_NMI)
     9724        {
     9725            HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI),
     9726                              VMX_IGS_INTERRUPTIBILITY_STATE_NMI_INVALID);
     9727        }
     9728
     9729        /* Pending debug exceptions. */
     9730#if HC_ARCH_BITS == 64
     9731        rc = VMXReadVmcs64(VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &u64Val);
     9732        AssertRCBreak(rc);
     9733        /* Bits 63:15, Bit 13, Bits 11:4 MBZ. */
     9734        HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffffaff0)), VMX_IGS_LONGMODE_PENDING_DEBUG_RESERVED);
     9735        u32Val = u64Val;    /* For pending debug exceptions checks below. */
     9736#else
     9737        rc = VMXReadVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &u32Val);
     9738        AssertRCBreak(rc);
     9739        /* Bits 31:15, Bit 13, Bits 11:4 MBZ. */
     9740        HMVMX_CHECK_BREAK(!(u32Val & 0xffffaff0), VMX_IGS_PENDING_DEBUG_RESERVED);
     9741#endif
     9742
     9743        if (   (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
     9744            || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)
     9745            || u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
     9746        {
     9747            if (   (u32Eflags & X86_EFL_TF)
     9748                && !(u64DebugCtlMsr & RT_BIT_64(1)))    /* Bit 1 is IA32_DEBUGCTL.BTF. */
     9749            {
     9750                /* Bit 14 is PendingDebug.BS. */
     9751                HMVMX_CHECK_BREAK(u32Val & RT_BIT(14), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_SET);
     9752            }
     9753            if (   !(u32Eflags & X86_EFL_TF)
     9754                || (u64DebugCtlMsr & RT_BIT_64(1)))     /* Bit 1 is IA32_DEBUGCTL.BTF. */
     9755            {
     9756                /* Bit 14 is PendingDebug.BS. */
     9757                HMVMX_CHECK_BREAK(!(u32Val & RT_BIT(14)), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_CLEAR);
     9758            }
     9759        }
     9760
     9761        /* VMCS link pointer. */
     9762        rc = VMXReadVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, &u64Val);
     9763        AssertRCBreak(rc);
     9764        if (u64Val != UINT64_C(0xffffffffffffffff))
     9765        {
     9766            HMVMX_CHECK_BREAK(!(u64Val & 0xfff), VMX_IGS_VMCS_LINK_PTR_RESERVED);
     9767            /** @todo Bits beyond the processor's physical-address width MBZ. */
     9768            /** @todo 32-bit located in memory referenced by value of this field (as a
     9769             *        physical address) must contain the processor's VMCS revision ID. */
     9770            /** @todo SMM checks. */
     9771        }
     9772
     9773        /** @todo Checks on Guest Page-Directory-Pointer-Table Entries when guest is
     9774         *        not using nested paging? */
     9775        if (   pVM->hm.s.fNestedPaging
     9776            && !fLongModeGuest
     9777            && CPUMIsGuestInPAEModeEx(pCtx))
     9778        {
     9779            rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, &u64Val);
     9780            AssertRCBreak(rc);
     9781            HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
     9782
     9783            rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, &u64Val);
     9784            AssertRCBreak(rc);
     9785            HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
     9786
     9787            rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, &u64Val);
     9788            AssertRCBreak(rc);
     9789            HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
     9790
     9791            rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, &u64Val);
     9792            AssertRCBreak(rc);
     9793            HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
     9794        }
     9795
     9796        /* Shouldn't happen but distinguish it from AssertRCBreak() errors. */
     9797        if (uError == VMX_IGS_ERROR)
     9798            uError = VMX_IGS_REASON_NOT_FOUND;
     9799    } while (0);
     9800
     9801    pVCpu->hm.s.u32HMError = uError;
     9802    return uError;
     9803
     9804#undef HMVMX_ERROR_BREAK
     9805#undef HMVMX_CHECK_BREAK
     9806}
     9807
     9808
     9809/**
    84509810 * Setup the APIC-access page for virtualizing APIC access.
    84519811 *
     
    84729832
    84739833    /* Map the HC APIC-access page in place of the MMIO page, also updates the shadow page tables if necessary. */
    8474     Assert(pVM->hm.s.vmx.HCPhysApicAccess);
     9834    Assert(pVM->hm.s.vmx.HCPhysApicAccess != NIL_RTHCPHYS);
    84759835    rc = IOMMMIOMapMMIOHCPage(pVM, pVCpu, GCPhysApicBase, pVM->hm.s.vmx.HCPhysApicAccess, X86_PTE_RW | X86_PTE_P);
    84769836    AssertRCReturn(rc, rc);
    84779837
    84789838    /* Update the per-VCPU cache of the APIC base MSR. */
    8479     pVCpu->hm.s.vmx.u64MsrApicBase = u64MsrApicBase;
     9839    pVCpu->hm.s.vmx.u64GstMsrApicBase = u64MsrApicBase;
    84809840    return VINF_SUCCESS;
    84819841}
     9842
     9843
     9844#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     9845/**
     9846 * Merges the guest with the nested-guest MSR bitmap in preparation of executing the
     9847 * nested-guest using hardware-assisted VMX.
     9848 *
     9849 * @param   pVCpu               The cross context virtual CPU structure.
     9850 * @param   pVmcsInfoNstGst     The nested-guest VMCS info. object.
     9851 * @param   pVmcsInfoGst        The guest VMCS info. object.
     9852 */
     9853static void hmR0VmxMergeMsrBitmapNested(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfoNstGst, PCVMXVMCSINFO pVmcsInfoGst)
     9854{
     9855    uint64_t const *pu64MsrBitmapNstGst = (uint64_t const *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap);
     9856    uint64_t const *pu64MsrBitmapGst    = (uint64_t const *)pVmcsInfoGst->pvMsrBitmap;
     9857    uint64_t       *pu64MsrBitmap       = (uint64_t *)pVmcsInfoNstGst->pvMsrBitmap;
     9858    Assert(pu64MsrBitmapNstGst);
     9859    Assert(pu64MsrBitmapGst);
     9860    Assert(pu64MsrBitmap);
     9861
     9862    /*
     9863     * We merge the guest MSR bitmap with the nested-guest MSR bitmap such that any
     9864     * MSR that is intercepted by the guest is also intercepted while executing the
     9865     * nested-guest using hardware-assisted VMX.
     9866     */
     9867    uint32_t const cbFrag = sizeof(uint64_t);
     9868    uint32_t const cFrags = X86_PAGE_4K_SIZE / cbFrag;
     9869    for (uint32_t i = 0; i <= cFrags; i++)
     9870        pu64MsrBitmap[i] = pu64MsrBitmapNstGst[i] | pu64MsrBitmapGst[i];
     9871}
     9872
     9873
     9874/**
     9875 * Merges the guest VMCS in to the nested-guest VMCS controls in preparation of
     9876 * hardware-assisted VMX execution of the nested-guest.
     9877 *
     9878 * For a guest, we don't modify these controls once we set up the VMCS.
     9879 *
     9880 * For nested-guests since the guest hypervisor provides these controls on every
     9881 * nested-guest VM-entry and could potentially change them everytime we need to
     9882 * merge them before every nested-guest VM-entry.
     9883 *
     9884 * @returns VBox status code.
     9885 * @param   pVCpu       The cross context virtual CPU structure.
     9886 */
     9887static int hmR0VmxMergeVmcsNested(PVMCPU pVCpu)
     9888{
     9889    PVM pVM = pVCpu->CTX_SUFF(pVM);
     9890    PCVMXVMCSINFO pVmcsInfoGst = &pVCpu->hm.s.vmx.VmcsInfo;
     9891    PCVMXVVMCS    pVmcsNstGst  = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     9892    Assert(pVmcsNstGst);
     9893
     9894    /*
     9895     * Merge the controls with the requirements of the guest VMCS.
     9896     *
     9897     * We do not need to validate the nested-guest VMX features specified in the
     9898     * nested-guest VMCS with the features supported by the physical CPU as it's
     9899     * already done by the VMLAUNCH/VMRESUME instruction emulation.
     9900     *
     9901     * This is because the VMX features exposed by CPUM (through CPUID/MSRs) to the
     9902     * guest are derived from the VMX features supported by the physical CPU.
     9903     */
     9904
     9905    /* Pin-based VM-execution controls. */
     9906    uint32_t const u32PinCtls = pVmcsNstGst->u32PinCtls | pVmcsInfoGst->u32PinCtls;
     9907
     9908    /* Processor-based VM-execution controls. */
     9909    uint32_t       u32ProcCtls = (pVmcsNstGst->u32ProcCtls  & ~VMX_PROC_CTLS_USE_IO_BITMAPS)
     9910                               | (pVmcsInfoGst->u32ProcCtls & ~(  VMX_PROC_CTLS_INT_WINDOW_EXIT
     9911                                                                | VMX_PROC_CTLS_NMI_WINDOW_EXIT
     9912                                                                | VMX_PROC_CTLS_USE_TPR_SHADOW
     9913                                                                | VMX_PROC_CTLS_MONITOR_TRAP_FLAG));
     9914
     9915    /* Secondary processor-based VM-execution controls. */
     9916    uint32_t const u32ProcCtls2 = (pVmcsNstGst->u32ProcCtls2  & ~VMX_PROC_CTLS2_VPID)
     9917                                | (pVmcsInfoGst->u32ProcCtls2 & ~(  VMX_PROC_CTLS2_VIRT_APIC_ACCESS
     9918                                                                  | VMX_PROC_CTLS2_INVPCID
     9919                                                                  | VMX_PROC_CTLS2_RDTSCP
     9920                                                                  | VMX_PROC_CTLS2_XSAVES_XRSTORS
     9921                                                                  | VMX_PROC_CTLS2_APIC_REG_VIRT
     9922                                                                  | VMX_PROC_CTLS2_VIRT_INT_DELIVERY
     9923                                                                  | VMX_PROC_CTLS2_VMFUNC));
     9924
     9925    /*
     9926     * VM-entry controls:
     9927     * These controls contains state that depends on the nested-guest state (primarily
     9928     * EFER MSR) and is thus not constant through VMLAUNCH/VMRESUME and the nested-guest
     9929     * VM-exit. Although the nested-hypervisor cannot change it, we need to in order to
     9930     * properly continue executing the nested-guest if the EFER MSR changes but does not
     9931     * cause a nested-guest VM-exits.
     9932     *
     9933     * VM-exit controls:
     9934     * These controls specify the host state on return. We cannot use the controls from
     9935     * the nested-hypervisor state as is as it would contain the guest state rather than
     9936     * the host state. Since the host state is subject to change (e.g. preemption, trips
     9937     * to ring-3, longjmp and rescheduling to a different host CPU) they are not constant
     9938     * through VMLAUNCH/VMRESUME and the nested-guest VM-exit.
     9939     *
     9940     * VM-entry MSR-load:
     9941     * The guest MSRs from the VM-entry MSR-load area are already loaded into the
     9942     * guest-CPU context by the VMLAUNCH/VMRESUME instruction emulation.
     9943     *
     9944     * VM-exit MSR-store:
     9945     * The VM-exit emulation will take care of populating the MSRs from the guest-CPU
     9946     * context back into the VM-exit MSR-store area.
     9947     *
     9948     * VM-exit MSR-load areas:
     9949     * This must contain the real host MSRs with hardware-assisted VMX execution. Hence,
     9950     * we can entirely ignore what the nested-hypervisor wants to load here.
     9951     */
     9952
     9953    /*
     9954     * Exception bitmap.
     9955     *
     9956     * We could remove #UD from the guest bitmap and merge it with the nested-guest
     9957     * bitmap here (and avoid doing anything while exporting nested-guest state), but to
     9958     * keep the code more flexible if intercepting exceptions become more dynamic in
     9959     * the future we do it as part of exporting the nested-guest state.
     9960     */
     9961    uint32_t const u32XcptBitmap = pVmcsNstGst->u32XcptBitmap | pVmcsInfoGst->u32XcptBitmap;
     9962
     9963    /*
     9964     * CR0/CR4 guest/host mask.
     9965     *
     9966     * Modifications by the nested-guest to CR0/CR4 bits owned by the host and the guest
     9967     * must cause VM-exits, so we need to merge them here.
     9968     */
     9969    uint64_t const u64Cr0Mask = pVmcsNstGst->u64Cr0Mask.u | pVmcsInfoGst->u64Cr0Mask;
     9970    uint64_t const u64Cr4Mask = pVmcsNstGst->u64Cr4Mask.u | pVmcsInfoGst->u64Cr4Mask;
     9971
     9972    /*
     9973     * Page-fault error-code mask and match.
     9974     *
     9975     * Although we require unrestricted guest execution (and thereby nested-paging) for
     9976     * hardware-assisted VMX execution of nested-guests and thus the outer guest doesn't
     9977     * normally intercept #PFs, it might intercept them for debugging purposes.
     9978     *
     9979     * If the outer guest is not intercepting #PFs, we can use the nested-guest #PF
     9980     * filters. If the outer guest is intercepting #PFs we must intercept all #PFs.
     9981     */
     9982    uint32_t u32XcptPFMask;
     9983    uint32_t u32XcptPFMatch;
     9984    if (!(pVmcsInfoGst->u32XcptBitmap & RT_BIT(X86_XCPT_PF)))
     9985    {
     9986        u32XcptPFMask  = pVmcsNstGst->u32XcptPFMask;
     9987        u32XcptPFMatch = pVmcsNstGst->u32XcptPFMatch;
     9988    }
     9989    else
     9990    {
     9991        u32XcptPFMask  = 0;
     9992        u32XcptPFMatch = 0;
     9993    }
     9994
     9995    /*
     9996     * Pause-Loop exiting.
     9997     */
     9998    uint32_t const cPleGapTicks    = RT_MIN(pVM->hm.s.vmx.cPleGapTicks,    pVmcsNstGst->u32PleGap);
     9999    uint32_t const cPleWindowTicks = RT_MIN(pVM->hm.s.vmx.cPleWindowTicks, pVmcsNstGst->u32PleWindow);
     10000
     10001    /*
     10002     * I/O Bitmap.
     10003     *
     10004     * We do not use the I/O bitmap that may be provided by the guest hypervisor as we
     10005     * always intercept all I/O port accesses.
     10006     */
     10007    Assert(u32ProcCtls & VMX_PROC_CTLS_UNCOND_IO_EXIT);
     10008
     10009    /*
     10010     * APIC-access page.
     10011     *
     10012     * The APIC-access page address has already been initialized while setting up the
     10013     * nested-guest VMCS. In theory, even if the guest-physical address is invalid, it
     10014     * should not be on any consequence to the host or to the guest for that matter, but
     10015     * we only accept valid addresses verified by the VMLAUNCH/VMRESUME instruction
     10016     * emulation to keep it simple.
     10017     */
     10018
     10019    /*
     10020     * Virtual-APIC page and TPR threshold.
     10021     *
     10022     * We shall use the host-physical address of the virtual-APIC page in guest memory directly.
     10023     * For this reason, we can access the virtual-APIC page of the nested-guest only using
     10024     * PGM physical handlers as we must not assume a kernel virtual-address mapping exists and
     10025     * requesting PGM for a mapping could be expensive/resource intensive (PGM mapping cache).
     10026     */
     10027    RTHCPHYS       HCPhysVirtApic  = NIL_RTHCPHYS;
     10028    uint32_t const u32TprThreshold = pVmcsNstGst->u32TprThreshold;
     10029    if (u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
     10030    {
     10031        int rc = PGMPhysGCPhys2HCPhys(pVM, pVmcsNstGst->u64AddrVirtApic.u, &HCPhysVirtApic);
     10032
     10033        /*
     10034         * If the guest hypervisor has loaded crap into the virtual-APIC page field
     10035         * we would fail to obtain a valid host-physical address for its guest-physical
     10036         * address.
     10037         *
     10038         * We currently do not support this scenario. Maybe in the future if there is a
     10039         * pressing need we can explore making this particular set of conditions work.
     10040         * Right now we just cause a VM-entry failure.
     10041         *
     10042         * This has already been checked by VMLAUNCH/VMRESUME instruction emulation,
     10043         * so should not really failure at the moment.
     10044         */
     10045        AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
     10046    }
     10047    else
     10048    {
     10049        /*
     10050         * We must make sure CR8 reads/write must cause VM-exits when TPR shadowing is not
     10051         * used by the guest hypervisor. Preventing MMIO accesses to the physical APIC will
     10052         * be taken care of by EPT/shadow paging.
     10053         */
     10054        if (pVM->hm.s.fAllow64BitGuests)
     10055        {
     10056            u32ProcCtls |= VMX_PROC_CTLS_CR8_STORE_EXIT
     10057                        |  VMX_PROC_CTLS_CR8_LOAD_EXIT;
     10058        }
     10059    }
     10060
     10061    /*
     10062     * Validate basic assumptions.
     10063     */
     10064    PVMXVMCSINFO pVmcsInfoNstGst = &pVCpu->hm.s.vmx.VmcsInfoNstGst;
     10065    Assert(pVM->hm.s.vmx.fAllowUnrestricted);
     10066    Assert(pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS);
     10067    Assert(hmGetVmxActiveVmcsInfo(pVCpu) == pVmcsInfoNstGst);
     10068
     10069    /*
     10070     * Commit it to the nested-guest VMCS.
     10071     */
     10072    int rc = VINF_SUCCESS;
     10073    if (pVmcsInfoNstGst->u32PinCtls != u32PinCtls)
     10074        rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, u32PinCtls);
     10075    if (pVmcsInfoNstGst->u32ProcCtls != u32ProcCtls)
     10076        rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, u32ProcCtls);
     10077    if (pVmcsInfoNstGst->u32ProcCtls2 != u32ProcCtls2)
     10078        rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, u32ProcCtls2);
     10079    if (pVmcsInfoNstGst->u32XcptBitmap != u32XcptBitmap)
     10080        rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, u32XcptBitmap);
     10081    if (pVmcsInfoNstGst->u64Cr0Mask != u64Cr0Mask)
     10082        rc |= VMXWriteVmcsHstN(VMX_VMCS_CTRL_CR0_MASK, u64Cr0Mask);
     10083    if (pVmcsInfoNstGst->u64Cr4Mask != u64Cr4Mask)
     10084        rc |= VMXWriteVmcsHstN(VMX_VMCS_CTRL_CR4_MASK, u64Cr4Mask);
     10085    if (pVmcsInfoNstGst->u32XcptPFMask != u32XcptPFMask)
     10086        rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK, u32XcptPFMask);
     10087    if (pVmcsInfoNstGst->u32XcptPFMatch != u32XcptPFMatch)
     10088        rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH, u32XcptPFMatch);
     10089    if (   !(u32ProcCtls  & VMX_PROC_CTLS_PAUSE_EXIT)
     10090        &&  (u32ProcCtls2 & VMX_PROC_CTLS2_PAUSE_LOOP_EXIT))
     10091    {
     10092        Assert(pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_PAUSE_LOOP_EXIT);
     10093        rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PLE_GAP,    cPleGapTicks);
     10094        rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PLE_WINDOW, cPleWindowTicks);
     10095    }
     10096    if (u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
     10097    {
     10098        rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
     10099        rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL, HCPhysVirtApic);
     10100    }
     10101    AssertRCReturn(rc, rc);
     10102
     10103    /*
     10104     * Update the nested-guest VMCS cache.
     10105     */
     10106    pVmcsInfoNstGst->u32PinCtls     = u32PinCtls;
     10107    pVmcsInfoNstGst->u32ProcCtls    = u32ProcCtls;
     10108    pVmcsInfoNstGst->u32ProcCtls2   = u32ProcCtls2;
     10109    pVmcsInfoNstGst->u32XcptBitmap  = u32XcptBitmap;
     10110    pVmcsInfoNstGst->u64Cr0Mask     = u64Cr0Mask;
     10111    pVmcsInfoNstGst->u64Cr4Mask     = u64Cr4Mask;
     10112    pVmcsInfoNstGst->u32XcptPFMask  = u32XcptPFMask;
     10113    pVmcsInfoNstGst->u32XcptPFMatch = u32XcptPFMatch;
     10114    pVmcsInfoNstGst->HCPhysVirtApic = HCPhysVirtApic;
     10115
     10116    /*
     10117     * MSR bitmap.
     10118     *
     10119     * The MSR bitmap address has already been initialized while setting up the
     10120     * nested-guest VMCS, here we need to merge the MSR bitmaps.
     10121     */
     10122    if (u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
     10123        hmR0VmxMergeMsrBitmapNested(pVCpu, pVmcsInfoNstGst, pVmcsInfoGst);
     10124
     10125    return VINF_SUCCESS;
     10126}
     10127#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
    848210128
    848310129
     
    850510151 *
    850610152 * @param   pVCpu           The cross context virtual CPU structure.
    8507  * @param   pVmxTransient   Pointer to the VMX transient structure.
    8508  * @param   fStepping       Set if called from hmR0VmxRunGuestCodeStep().  Makes
    8509  *                          us ignore some of the reasons for returning to
    8510  *                          ring-3, and return VINF_EM_DBG_STEPPED if event
    8511  *                          dispatching took place.
     10153 * @param   pVmxTransient   The VMX-transient structure.
     10154 * @param   fStepping       Whether we are single-stepping the guest in the
     10155 *                          hypervisor debugger. Makes us ignore some of the reasons
     10156 *                          for returning to ring-3, and return VINF_EM_DBG_STEPPED
     10157 *                          if event dispatching took place.
    851210158 */
    851310159static VBOXSTRICTRC hmR0VmxPreRunGuest(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, bool fStepping)
     
    851610162
    851710163#ifdef VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM
    8518     if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
    8519     {
    8520         Log2(("hmR0VmxPreRunGuest: Rescheduling to IEM due to nested-hwvirt or forced IEM exec -> VINF_EM_RESCHEDULE_REM\n"));
    8521         RT_NOREF3(pVCpu, pVmxTransient, fStepping);
     10164    if (pVmxTransient->fIsNestedGuest)
     10165    {
     10166        RT_NOREF2(pVCpu, fStepping);
     10167        Log2Func(("Rescheduling to IEM due to nested-hwvirt or forced IEM exec -> VINF_EM_RESCHEDULE_REM\n"));
    852210168        return VINF_EM_RESCHEDULE_REM;
    852310169    }
     
    853310179    VBOXSTRICTRC rcStrict = hmR0VmxCheckForceFlags(pVCpu, fStepping);
    853410180    if (rcStrict == VINF_SUCCESS)
    8535     { /* FFs doesn't get set all the time. */ }
     10181    { /* FFs don't get set all the time. */ }
    853610182    else
    853710183        return rcStrict;
    853810184
     10185#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     10186    /*
     10187     * Switch to the nested-guest VMCS as we may have transitioned into executing
     10188     * the nested-guest without leaving ring-0. Otherwise, if we came from ring-3
     10189     * we would load the nested-guest VMCS while entering the VMX ring-0 session.
     10190     *
     10191     * We do this as late as possible to minimize (though not completely remove)
     10192     * clearing/loading VMCS again due to premature trips to ring-3 above.
     10193     */
     10194    if (pVmxTransient->fIsNestedGuest)
     10195    {
     10196        if (!pVCpu->hm.s.vmx.fSwitchedToNstGstVmcs)
     10197        {
     10198            /*
     10199             * Ensure we have synced everything from the guest VMCS and also flag that
     10200             * that we need to export the full (nested) guest-CPU context to the
     10201             * nested-guest VMCS.
     10202             */
     10203            HMVMX_CPUMCTX_ASSERT(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
     10204            ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_ALL_GUEST);
     10205
     10206            RTCCUINTREG const fEFlags = ASMIntDisableFlags();
     10207            int rc = hmR0VmxSwitchVmcs(&pVCpu->hm.s.vmx.VmcsInfo, &pVCpu->hm.s.vmx.VmcsInfoNstGst);
     10208            if (RT_LIKELY(rc == VINF_SUCCESS))
     10209            {
     10210                pVCpu->hm.s.vmx.fSwitchedToNstGstVmcs = true;
     10211                ASMSetFlags(fEFlags);
     10212                pVmxTransient->pVmcsInfo = &pVCpu->hm.s.vmx.VmcsInfoNstGst;
     10213
     10214                /*
     10215                 * We use a different VM-exit MSR-store area for the nested-guest. Hence,
     10216                 * flag that we need to update the host MSR values there. Even if we decide
     10217                 * in the future to share the VM-exit MSR-store area page with the guest,
     10218                 * if its content differs, we would have to update the host MSRs anyway.
     10219                 */
     10220                pVCpu->hm.s.vmx.fUpdatedHostAutoMsrs = false;
     10221                Assert(!pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer);   /** @todo NSTVMX: Paranoia remove later. */
     10222            }
     10223            else
     10224            {
     10225                ASMSetFlags(fEFlags);
     10226                return rc;
     10227            }
     10228        }
     10229
     10230        /*
     10231         * Merge guest VMCS controls with the nested-guest VMCS controls.
     10232         *
     10233         * Even if we have not executed the guest prior to this (e.g. when resuming
     10234         * from a saved state), we should be okay with merging controls as we
     10235         * initialize the guest VMCS controls as part of VM setup phase.
     10236         */
     10237        if (!pVCpu->hm.s.vmx.fMergedNstGstCtls)
     10238        {
     10239            int rc = hmR0VmxMergeVmcsNested(pVCpu);
     10240            AssertRCReturn(rc, rc);
     10241            pVCpu->hm.s.vmx.fMergedNstGstCtls = true;
     10242        }
     10243    }
     10244#endif
     10245
    853910246    /*
    854010247     * Virtualize memory-mapped accesses to the physical APIC (may take locks).
     10248     * We look at the guest VMCS control here as we always set it when supported by
     10249     * the physical CPU. Looking at the nested-guest control here would not be
     10250     * possible because they are not merged yet.
    854110251     */
    854210252    PVM pVM = pVCpu->CTX_SUFF(pVM);
    8543     if (   !pVCpu->hm.s.vmx.u64MsrApicBase
    8544         && (pVCpu->hm.s.vmx.Ctls.u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
     10253    PVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
     10254    if (   !pVCpu->hm.s.vmx.u64GstMsrApicBase
     10255        && (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
    854510256        && PDMHasApic(pVM))
    854610257    {
     
    854910260    }
    855010261
     10262    /*
     10263     * Evaluate events to be injected into the guest.
     10264     *
     10265     * Events in TRPM can be injected without inspecting the guest state.
     10266     * If any new events (interrupts/NMI) are pending currently, we try to set up the
     10267     * guest to cause a VM-exit the next time they are ready to receive the event.
     10268     */
    855110269    if (TRPMHasTrap(pVCpu))
    855210270        hmR0VmxTrpmTrapToPendingEvent(pVCpu);
    8553     uint32_t fIntrState = hmR0VmxEvaluatePendingEvent(pVCpu);
     10271
     10272    uint32_t fIntrState;
     10273    rcStrict = hmR0VmxEvaluatePendingEvent(pVCpu, pVmxTransient, &fIntrState);
     10274
     10275#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     10276    /*
     10277     * While evaluating pending events if something failed (unlikely) or if we were
     10278     * preparing to run a nested-guest but performed a nested-guest VM-exit, we should bail.
     10279     */
     10280    if (   rcStrict != VINF_SUCCESS
     10281        || (    pVmxTransient->fIsNestedGuest
     10282            && !CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx)))
     10283        return rcStrict;
     10284#endif
    855410285
    855510286    /*
     
    855710288     * needs to be done with longjmps or interrupts + preemption enabled. Event injection might
    855810289     * also result in triple-faulting the VM.
    8559      */
    8560     rcStrict = hmR0VmxInjectPendingEvent(pVCpu, fIntrState, fStepping);
     10290     *
     10291     * The above does not apply when executing a nested-guest (since unrestricted guest execution
     10292     * is a requirement) regardless doing it avoid duplicating code elsewhere.
     10293     */
     10294    rcStrict = hmR0VmxInjectPendingEvent(pVCpu, pVmxTransient, fIntrState, fStepping);
    856110295    if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    856210296    { /* likely */ }
     
    858710321        Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
    858810322    }
     10323
     10324#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     10325    /* Paranoia. */
     10326    Assert(!pVmxTransient->fIsNestedGuest || CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx));
     10327#endif
    858910328
    859010329    /*
     
    860210341     * CPU migration.
    860310342     *
    8604      * If we are injecting events to a real-on-v86 mode guest, we will have to update
    8605      * RIP and some segment registers, i.e. hmR0VmxInjectPendingEvent()->hmR0VmxInjectEventVmcs().
    8606      * Hence, loading of the guest state needs to be done -after- injection of events.
    8607      */
    8608     rcStrict = hmR0VmxExportGuestStateOptimal(pVCpu);
     10343     * If we are injecting events to a real-on-v86 mode guest, we would have updated RIP and some segment
     10344     * registers. Hence, loading of the guest state needs to be done -after- injection of events.
     10345     */
     10346    rcStrict = hmR0VmxExportGuestStateOptimal(pVCpu, pVmxTransient);
    860910347    if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    861010348    { /* likely */ }
     
    861810356     * We disable interrupts so that we don't miss any interrupts that would flag preemption
    861910357     * (IPI/timers etc.) when thread-context hooks aren't used and we've been running with
    8620      * preemption disabled for a while.  Since this is purly to aid the
     10358     * preemption disabled for a while.  Since this is purely to aid the
    862110359     * RTThreadPreemptIsPending() code, it doesn't matter that it may temporarily reenable and
    862210360     * disable interrupt on NT.
     
    866610404
    866710405/**
    8668  * Prepares to run guest code in VT-x and we've committed to doing so. This
    8669  * means there is no backing out to ring-3 or anywhere else at this
    8670  * point.
     10406 * Final preparations before executing guest code using hardware-assisted VMX.
     10407 *
     10408 * We can no longer get preempted to a different host CPU and there are no returns
     10409 * to ring-3. We ignore any errors that may happen from this point (e.g. VMWRITE
     10410 * failures), this function is not intended to fail sans unrecoverable hardware
     10411 * errors.
    867110412 *
    867210413 * @param   pVCpu           The cross context virtual CPU structure.
    8673  * @param   pVmxTransient   Pointer to the VMX transient structure.
     10414 * @param   pVmxTransient   The VMX-transient structure.
    867410415 *
    867510416 * @remarks Called with preemption disabled.
     
    868110422    Assert(VMMR0IsLogFlushDisabled(pVCpu));
    868210423    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     10424    Assert(!pVCpu->hm.s.Event.fPending);
    868310425
    868410426    /*
     
    868810430    VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
    868910431
    8690     PVM pVM = pVCpu->CTX_SUFF(pVM);
     10432    PVM          pVM       = pVCpu->CTX_SUFF(pVM);
     10433    PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
     10434
    869110435    if (!CPUMIsGuestFPUStateActive(pVCpu))
    869210436    {
     
    869910443
    870010444    /*
    8701      * Lazy-update of the host MSRs values in the auto-load/store MSR area.
    8702      */
    8703     if (   !pVCpu->hm.s.vmx.fUpdatedHostMsrs
    8704         && pVCpu->hm.s.vmx.cMsrs > 0)
    8705         hmR0VmxUpdateAutoLoadStoreHostMsrs(pVCpu);
    8706 
    8707     /*
    870810445     * Re-save the host state bits as we may've been preempted (only happens when
    8709      * thread-context hooks are used or when hmR0VmxSetupVMRunHandler() changes pfnStartVM).
    8710      * Note that the 64-on-32 switcher saves the (64-bit) host state into the VMCS and
    8711      * if we change the switcher back to 32-bit, we *must* save the 32-bit host state here.
    8712      * See @bugref{8432}.
     10446     * thread-context hooks are used or when the VM start function changes).
     10447     * The 64-on-32 switcher saves the (64-bit) host state into the VMCS and if we
     10448     * changed the switcher back to 32-bit, we *must* save the 32-bit host state here,
     10449     * see @bugref{8432}.
     10450     *
     10451     * This may also happen when switching to/from a nested-guest VMCS without leaving
     10452     * ring-0.
    871310453     */
    871410454    if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_HOST_CONTEXT)
     
    872410464     */
    872510465    if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE)
    8726         hmR0VmxExportSharedState(pVCpu);
     10466        hmR0VmxExportSharedState(pVCpu, pVmxTransient);
    872710467    AssertMsg(!pVCpu->hm.s.fCtxChanged, ("fCtxChanged=%#RX64\n", pVCpu->hm.s.fCtxChanged));
    872810468
    8729     /* Store status of the shared guest-host state at the time of VM-entry. */
     10469    /*
     10470     * Store status of the shared guest/host debug state at the time of VM-entry.
     10471     */
    873010472#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
    873110473    if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
     
    874210484
    874310485    /*
    8744      * Cache the TPR-shadow for checking on every VM-exit if it might have changed.
    8745      */
    8746     if (pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
    8747         pVmxTransient->u8GuestTpr = pVCpu->hm.s.vmx.pbVirtApic[XAPIC_OFF_TPR];
    8748 
    8749     PHMPHYSCPU pHostCpu     = hmR0GetCurrentCpu();
    8750     RTCPUID    idCurrentCpu = pHostCpu->idCpu;
    8751     if (   pVmxTransient->fUpdateTscOffsettingAndPreemptTimer
     10486     * Always cache the TPR-shadow if the virtual-APIC page exists, thereby skipping
     10487     * more than one conditional check. The post-run side of our code shall determine
     10488     * if it needs to sync. the virtual APIC TPR with the TPR-shadow.
     10489     */
     10490    if (pVmcsInfo->pbVirtApic)
     10491        pVmxTransient->u8GuestTpr = pVmcsInfo->pbVirtApic[XAPIC_OFF_TPR];
     10492
     10493    /*
     10494     * Update the host MSRs values in the VM-exit MSR-load area.
     10495     */
     10496    if (!pVCpu->hm.s.vmx.fUpdatedHostAutoMsrs)
     10497    {
     10498        if (pVmcsInfo->cExitMsrLoad > 0)
     10499            hmR0VmxUpdateAutoLoadHostMsrs(pVCpu, pVmcsInfo);
     10500        pVCpu->hm.s.vmx.fUpdatedHostAutoMsrs = true;
     10501    }
     10502
     10503    /*
     10504     * Evaluate if we need to intercept guest RDTSC/P accesses. Set up the
     10505     * VMX-preemption timer based on the next virtual sync clock deadline.
     10506     */
     10507    PHMPHYSCPU pHostCpu        = hmR0GetCurrentCpu();
     10508    RTCPUID const idCurrentCpu = pHostCpu->idCpu;
     10509    if (   !pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer
    875210510        || idCurrentCpu != pVCpu->hm.s.idLastCpu)
    875310511    {
    8754         hmR0VmxUpdateTscOffsettingAndPreemptTimer(pVCpu);
    8755         pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = false;
     10512        hmR0VmxUpdateTscOffsettingAndPreemptTimer(pVCpu, pVmxTransient);
     10513        pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = true;
    875610514    }
    875710515
    875810516    ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true);    /* Used for TLB flushing, set this across the world switch. */
    8759     hmR0VmxFlushTaggedTlb(pHostCpu, pVCpu);                     /* Invalidate the appropriate guest entries from the TLB. */
     10517    hmR0VmxFlushTaggedTlb(pHostCpu, pVCpu, pVmcsInfo);          /* Invalidate the appropriate guest entries from the TLB. */
    876010518    Assert(idCurrentCpu == pVCpu->hm.s.idLastCpu);
    876110519    pVCpu->hm.s.vmx.LastError.idCurrentCpu = idCurrentCpu;      /* Update the error reporting info. with the current host CPU. */
     
    876310521    STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatEntry, &pVCpu->hm.s.StatInGC, x);
    876410522
    8765     TMNotifyStartOfExecution(pVCpu);                            /* Finally, notify TM to resume its clocks as we're about
    8766                                                                    to start executing. */
    8767 
    8768     /*
    8769      * Load the TSC_AUX MSR when we are not intercepting RDTSCP.
    8770      */
    8771     if (pVCpu->hm.s.vmx.Ctls.u32ProcCtls2 & VMX_PROC_CTLS2_RDTSCP)
    8772     {
    8773         if (!(pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_RDTSC_EXIT))
    8774         {
    8775             bool fMsrUpdated;
    8776             hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_TSC_AUX);
    8777             int rc2 = hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_TSC_AUX, CPUMGetGuestTscAux(pVCpu), true /* fUpdateHostMsr */,
    8778                                              &fMsrUpdated);
    8779             AssertRC(rc2);
    8780             Assert(fMsrUpdated || pVCpu->hm.s.vmx.fUpdatedHostMsrs);
    8781             /* Finally, mark that all host MSR values are updated so we don't redo it without leaving VT-x. See @bugref{6956}. */
    8782             pVCpu->hm.s.vmx.fUpdatedHostMsrs = true;
     10523    TMNotifyStartOfExecution(pVCpu);                            /* Notify TM to resume its clocks when TSC is tied to execution,
     10524                                                                   as we're about to start executing the guest . */
     10525
     10526    /*
     10527     * Load the guest TSC_AUX MSR when we are not intercepting RDTSCP.
     10528     *
     10529     * This is done this late as updating the TSC offsetting/preemption timer above
     10530     * figures out if we can skip intercepting RDTSCP by calculating the number of
     10531     * host CPU ticks till the next virtual sync deadline (for the dynamic case).
     10532     */
     10533    if (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_RDTSCP)
     10534    {
     10535        if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_RDTSC_EXIT))
     10536        {
     10537            hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_TSC_AUX);
     10538            int rc = hmR0VmxAddAutoLoadStoreMsr(pVCpu, pVmxTransient, MSR_K8_TSC_AUX, CPUMGetGuestTscAux(pVCpu),
     10539                                                true /* fSetReadWrite */, true /* fUpdateHostMsr */);
     10540            AssertRC(rc);
    878310541        }
    878410542        else
    8785         {
    8786             hmR0VmxRemoveAutoLoadStoreMsr(pVCpu, MSR_K8_TSC_AUX);
    8787             Assert(!pVCpu->hm.s.vmx.cMsrs || pVCpu->hm.s.vmx.fUpdatedHostMsrs);
    8788         }
    8789     }
    8790 
    8791     if (pVM->cpum.ro.GuestFeatures.fIbrs)
    8792     {
    8793         bool fMsrUpdated;
    8794         hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_OTHER_MSRS);
    8795         int rc2 = hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_IA32_SPEC_CTRL, CPUMGetGuestSpecCtrl(pVCpu), true /* fUpdateHostMsr */,
    8796                                              &fMsrUpdated);
    8797         AssertRC(rc2);
    8798         Assert(fMsrUpdated || pVCpu->hm.s.vmx.fUpdatedHostMsrs);
    8799         /* Finally, mark that all host MSR values are updated so we don't redo it without leaving VT-x. See @bugref{6956}. */
    8800         pVCpu->hm.s.vmx.fUpdatedHostMsrs = true;
     10543            hmR0VmxRemoveAutoLoadStoreMsr(pVCpu, pVmxTransient, MSR_K8_TSC_AUX);
    880110544    }
    880210545
    880310546#ifdef VBOX_STRICT
    8804     hmR0VmxCheckAutoLoadStoreMsrs(pVCpu);
    8805     hmR0VmxCheckHostEferMsr(pVCpu);
    8806     AssertRC(hmR0VmxCheckVmcsCtls(pVCpu));
     10547    hmR0VmxCheckAutoLoadStoreMsrs(pVCpu, pVmcsInfo);
     10548    hmR0VmxCheckHostEferMsr(pVCpu, pVmcsInfo);
     10549    AssertRC(hmR0VmxCheckVmcsCtls(pVCpu, pVmcsInfo));
    880710550#endif
     10551
    880810552#ifdef HMVMX_ALWAYS_CHECK_GUEST_STATE
    8809     if (!(pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS))
    8810     {
    8811         uint32_t uInvalidReason = hmR0VmxCheckGuestState(pVCpu);
    8812         if (uInvalidReason != VMX_IGS_REASON_NOT_FOUND)
    8813             Log4(("hmR0VmxCheckGuestState returned %#x\n", uInvalidReason));
    8814     }
     10553    /** @todo r=ramshankar: We can now probably use iemVmxVmentryCheckGuestState here.
     10554     *        Add a PVMXMSRS parameter to it, so that IEM can look at the host MSRs. */
     10555    uint32_t const uInvalidReason = hmR0VmxCheckGuestState(pVCpu, pVmcsInfo);
     10556    if (uInvalidReason != VMX_IGS_REASON_NOT_FOUND)
     10557        Log4(("hmR0VmxCheckGuestState returned %#x\n", uInvalidReason));
    881510558#endif
    881610559}
     
    881810561
    881910562/**
    8820  * Performs some essential restoration of state after running guest code in
    8821  * VT-x.
     10563 * First C routine invoked after running guest code using hardware-assisted VMX.
    882210564 *
    882310565 * @param   pVCpu           The cross context virtual CPU structure.
    8824  * @param   pVmxTransient   Pointer to the VMX transient structure.
     10566 * @param   pVmxTransient   The VMX-transient structure.
    882510567 * @param   rcVMRun         Return code of VMLAUNCH/VMRESUME.
    882610568 *
     
    883210574static void hmR0VmxPostRunGuest(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, int rcVMRun)
    883310575{
    8834     uint64_t const uHostTsc = ASMReadTSC();
    8835     Assert(!VMMRZCallRing3IsEnabled(pVCpu));
     10576    uint64_t const uHostTsc = ASMReadTSC();                     /** @todo We can do a lot better here, see @bugref{9180#c38}. */
    883610577
    883710578    ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false);   /* See HMInvalidatePageOnAllVCpus(): used for TLB flushing. */
     
    884210583    pVmxTransient->fVectoringDoublePF  = false;                 /* Vectoring double page-fault needs to be determined later. */
    884310584
    8844     if (!(pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_RDTSC_EXIT))
    8845         TMCpuTickSetLastSeen(pVCpu, uHostTsc + pVCpu->hm.s.vmx.Ctls.u64TscOffset);
     10585    PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
     10586    if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_RDTSC_EXIT))
     10587    {
     10588        uint64_t uGstTsc;
     10589        if (!pVmxTransient->fIsNestedGuest)
     10590            uGstTsc = uHostTsc + pVmcsInfo->u64TscOffset;
     10591        else
     10592        {
     10593            uint64_t const uNstGstTsc = uHostTsc + pVmcsInfo->u64TscOffset;
     10594            uGstTsc = CPUMRemoveNestedGuestTscOffset(pVCpu, uNstGstTsc);
     10595        }
     10596        TMCpuTickSetLastSeen(pVCpu, uGstTsc);                           /* Update TM with the guest TSC. */
     10597    }
    884610598
    884710599    STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatPreExit, x);
    8848     TMNotifyEndOfExecution(pVCpu);                                    /* Notify TM that the guest is no longer running. */
    8849     Assert(!ASMIntAreEnabled());
     10600    TMNotifyEndOfExecution(pVCpu);                                      /* Notify TM that the guest is no longer running. */
    885010601    VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
    885110602
    885210603#if HC_ARCH_BITS == 64
    8853     pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_REQUIRED;   /* Host state messed up by VT-x, we must restore. */
     10604    pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_REQUIRED;     /* Some host state messed up by VMX needs restoring. */
    885410605#endif
    885510606#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
    8856     /* The 64-on-32 switcher maintains fVmcsState on its own and we need to leave it alone here. */
    8857     if (pVCpu->hm.s.vmx.pfnStartVM != VMXR0SwitcherStartVM64)
    8858         pVCpu->hm.s.vmx.fVmcsState |= HMVMX_VMCS_STATE_LAUNCHED;      /* Use VMRESUME instead of VMLAUNCH in the next run. */
     10607    /* The 64-on-32 switcher maintains VMCS-launch state on its own
     10608       and we need to leave it alone here. */
     10609    if (pVmcsInfo->pfnStartVM != VMXR0SwitcherStartVM64)
     10610        pVmcsInfo->fVmcsState |= VMX_V_VMCS_LAUNCH_STATE_LAUNCHED;      /* Use VMRESUME instead of VMLAUNCH in the next run. */
    885910611#else
    8860     pVCpu->hm.s.vmx.fVmcsState |= HMVMX_VMCS_STATE_LAUNCHED;          /* Use VMRESUME instead of VMLAUNCH in the next run. */
     10612    pVmcsInfo->fVmcsState |= VMX_V_VMCS_LAUNCH_STATE_LAUNCHED;          /* Use VMRESUME instead of VMLAUNCH in the next run. */
    886110613#endif
    886210614#ifdef VBOX_STRICT
    8863     hmR0VmxCheckHostEferMsr(pVCpu);                                   /* Verify that VMRUN/VMLAUNCH didn't modify host EFER. */
     10615    hmR0VmxCheckHostEferMsr(pVCpu, pVmcsInfo);                          /* Verify that the host EFER MSR wasn't modified. */
    886410616#endif
    8865     ASMSetFlags(pVmxTransient->fEFlags);                              /* Enable interrupts. */
    8866 
    8867     /* Save the basic VM-exit reason. Refer Intel spec. 24.9.1 "Basic VM-exit Information". */
     10617    Assert(!ASMIntAreEnabled());
     10618    ASMSetFlags(pVmxTransient->fEFlags);                                /* Enable interrupts. */
     10619    Assert(!VMMRZCallRing3IsEnabled(pVCpu));
     10620
     10621    /*
     10622     * Save the basic VM-exit reason and check if the VM-entry failed.
     10623     * See Intel spec. 24.9.1 "Basic VM-exit Information".
     10624     */
    886810625    uint32_t uExitReason;
    8869     int rc  = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_REASON, &uExitReason);
    8870     rc     |= hmR0VmxReadEntryIntInfoVmcs(pVmxTransient);
     10626    int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_REASON, &uExitReason);
    887110627    AssertRC(rc);
    887210628    pVmxTransient->uExitReason    = VMX_EXIT_REASON_BASIC(uExitReason);
    887310629    pVmxTransient->fVMEntryFailed = VMX_EXIT_REASON_HAS_ENTRY_FAILED(uExitReason);
    887410630
    8875     if (rcVMRun == VINF_SUCCESS)
     10631    /*
     10632     * Check if VMLAUNCH/VMRESUME succeeded.
     10633     * If this failed, we cause a guru meditation and cease further execution.
     10634     */
     10635    if (RT_LIKELY(rcVMRun == VINF_SUCCESS))
    887610636    {
    887710637        /*
     
    889210652                         UINT64_MAX, uHostTsc);
    889310653
    8894         if (!pVmxTransient->fVMEntryFailed)
     10654        if (RT_LIKELY(!pVmxTransient->fVMEntryFailed))
    889510655        {
    889610656            VMMRZCallRing3Enable(pVCpu);
     
    890010660
    890110661#if defined(HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE) || defined(HMVMX_ALWAYS_SAVE_FULL_GUEST_STATE)
    8902             rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
     10662            rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
    890310663            AssertRC(rc);
    890410664#elif defined(HMVMX_ALWAYS_SAVE_GUEST_RFLAGS)
    8905             rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_RFLAGS);
     10665            rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_RFLAGS);
    890610666            AssertRC(rc);
    890710667#else
     
    891010670             * injecting events on re-entry.
    891110671             *
    8912              * We don't import CR0 (when Unrestricted guest execution is unavailable) despite
     10672             * We don't import CR0 (when unrestricted guest execution is unavailable) despite
    891310673             * checking for real-mode while exporting the state because all bits that cause
    891410674             * mode changes wrt CR0 are intercepted.
    891510675             */
    8916             rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_HM_VMX_INT_STATE);
     10676            rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_HM_VMX_INT_STATE);
    891710677            AssertRC(rc);
    891810678#endif
     
    892110681             * Sync the TPR shadow with our APIC state.
    892210682             */
    8923             if (   (pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
    8924                 && pVmxTransient->u8GuestTpr != pVCpu->hm.s.vmx.pbVirtApic[XAPIC_OFF_TPR])
     10683            if (   !pVmxTransient->fIsNestedGuest
     10684                && (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW))
    892510685            {
    8926                 rc = APICSetTpr(pVCpu, pVCpu->hm.s.vmx.pbVirtApic[XAPIC_OFF_TPR]);
    8927                 AssertRC(rc);
    8928                 ASMAtomicOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
     10686                Assert(pVmcsInfo->pbVirtApic);
     10687                if (pVmxTransient->u8GuestTpr != pVmcsInfo->pbVirtApic[XAPIC_OFF_TPR])
     10688                {
     10689                    rc = APICSetTpr(pVCpu, pVmcsInfo->pbVirtApic[XAPIC_OFF_TPR]);
     10690                    AssertRC(rc);
     10691                    ASMAtomicOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
     10692                }
    892910693            }
    893010694
     
    894110705
    894210706/**
    8943  * Runs the guest code using VT-x the normal way.
     10707 * Runs the guest code using hardware-assisted VMX the normal way.
    894410708 *
    894510709 * @returns VBox status code.
    894610710 * @param   pVCpu       The cross context virtual CPU structure.
    8947  *
    8948  * @note    Mostly the same as hmR0VmxRunGuestCodeStep().
    8949  */
    8950 static VBOXSTRICTRC hmR0VmxRunGuestCodeNormal(PVMCPU pVCpu)
    8951 {
     10711 * @param   pcLoops     Pointer to the number of executed loops.
     10712 */
     10713static VBOXSTRICTRC hmR0VmxRunGuestCodeNormal(PVMCPU pVCpu, uint32_t *pcLoops)
     10714{
     10715    uint32_t const cMaxResumeLoops = pVCpu->CTX_SUFF(pVM)->hm.s.cMaxResumeLoops;
     10716    Assert(pcLoops);
     10717    Assert(*pcLoops <= cMaxResumeLoops);
     10718
    895210719    VMXTRANSIENT VmxTransient;
    8953     VmxTransient.fUpdateTscOffsettingAndPreemptTimer = true;
     10720    RT_ZERO(VmxTransient);
     10721    VmxTransient.pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
     10722
     10723    /* Paranoia. */
     10724    Assert(VmxTransient.pVmcsInfo == &pVCpu->hm.s.vmx.VmcsInfo);
     10725    Assert(!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx));
     10726
    895410727    VBOXSTRICTRC rcStrict = VERR_INTERNAL_ERROR_5;
    8955     uint32_t     cLoops = 0;
    8956 
    8957     for (;; cLoops++)
     10728    for (;;)
    895810729    {
    895910730        Assert(!HMR0SuspendPending());
    896010731        HMVMX_ASSERT_CPU_SAFE(pVCpu);
    8961 
    8962         /* Preparatory work for running guest code, this may force us to return
    8963            to ring-3.  This bugger disables interrupts on VINF_SUCCESS! */
    896410732        STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
     10733
     10734        /*
     10735         * Preparatory work for running nested-guest code, this may force us to
     10736         * return to ring-3.
     10737         *
     10738         * Warning! This bugger disables interrupts on VINF_SUCCESS!
     10739         */
    896510740        rcStrict = hmR0VmxPreRunGuest(pVCpu, &VmxTransient, false /* fStepping */);
    896610741        if (rcStrict != VINF_SUCCESS)
    896710742            break;
    896810743
     10744        /* Interrupts are disabled at this point! */
    896910745        hmR0VmxPreRunGuestCommitted(pVCpu, &VmxTransient);
    8970         int rcRun = hmR0VmxRunGuest(pVCpu);
    8971 
    8972         /* Restore any residual host-state and save any bits shared between host
    8973            and guest into the guest-CPU state.  Re-enables interrupts! */
     10746        int rcRun = hmR0VmxRunGuest(pVCpu, &VmxTransient);
    897410747        hmR0VmxPostRunGuest(pVCpu, &VmxTransient, rcRun);
    8975 
    8976         /* Check for errors with running the VM (VMLAUNCH/VMRESUME). */
     10748        /* Interrupts are re-enabled at this point! */
     10749
     10750        /*
     10751         * Check for errors with running the VM (VMLAUNCH/VMRESUME).
     10752         */
    897710753        if (RT_SUCCESS(rcRun))
    897810754        { /* very likely */ }
     
    898410760        }
    898510761
    8986         /* Profile the VM-exit. */
     10762        /*
     10763         * Profile the VM-exit.
     10764         */
    898710765        AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason));
    898810766        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitAll);
     
    899310771        VBOXVMM_R0_HMVMX_VMEXIT_NOCTX(pVCpu, &pVCpu->cpum.GstCtx, VmxTransient.uExitReason);
    899410772
    8995         /* Handle the VM-exit. */
     10773        /*
     10774         * Handle the VM-exit.
     10775         */
    899610776#ifdef HMVMX_USE_FUNCTION_TABLE
    899710777        rcStrict = g_apfnVMExitHandlers[VmxTransient.uExitReason](pVCpu, &VmxTransient);
    899810778#else
    8999         rcStrict = hmR0VmxHandleExit(pVCpu, &VmxTransient, VmxTransient.uExitReason);
     10779        rcStrict = hmR0VmxHandleExit(pVCpu, &VmxTransient);
    900010780#endif
    900110781        STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitHandling, x);
    900210782        if (rcStrict == VINF_SUCCESS)
    900310783        {
    9004             if (cLoops <= pVCpu->CTX_SUFF(pVM)->hm.s.cMaxResumeLoops)
    9005                 continue; /* likely */
     10784            if (++(*pcLoops) <= cMaxResumeLoops)
     10785                continue;
    900610786            STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchMaxResumeLoops);
    900710787            rcStrict = VINF_EM_RAW_INTERRUPT;
     
    901610796#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    901710797/**
    9018  * Runs the nested-guest code using VT-x the normal way.
     10798 * Runs the nested-guest code using hardware-assisted VMX.
    901910799 *
    902010800 * @returns VBox status code.
    902110801 * @param   pVCpu       The cross context virtual CPU structure.
    9022  * @sa      hmR0VmxRunGuestCodeNormal.
    9023  */
    9024 static VBOXSTRICTRC hmR0VmxRunGuestCodeNested(PVMCPU pVCpu)
    9025 {
    9026     RT_NOREF(pVCpu);
    9027     return VERR_NOT_IMPLEMENTED;
     10802 * @param   pcLoops     Pointer to the number of executed loops.
     10803 *
     10804 * @sa      hmR0VmxRunGuestCodeNormal().
     10805 */
     10806static VBOXSTRICTRC hmR0VmxRunGuestCodeNested(PVMCPU pVCpu, uint32_t *pcLoops)
     10807{
     10808    uint32_t const cMaxResumeLoops = pVCpu->CTX_SUFF(pVM)->hm.s.cMaxResumeLoops;
     10809    Assert(pcLoops);
     10810    Assert(*pcLoops <= cMaxResumeLoops);
     10811
     10812    VMXTRANSIENT VmxTransient;
     10813    RT_ZERO(VmxTransient);
     10814    VmxTransient.pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
     10815    VmxTransient.fIsNestedGuest = true;
     10816
     10817    VBOXSTRICTRC rcStrict = VERR_INTERNAL_ERROR_5;
     10818    for (;;)
     10819    {
     10820        Assert(!HMR0SuspendPending());
     10821        HMVMX_ASSERT_CPU_SAFE(pVCpu);
     10822        STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
     10823
     10824        /*
     10825         * Preparatory work for running guest code, this may force us to
     10826         * return to ring-3.
     10827         *
     10828         * Warning! This bugger disables interrupts on VINF_SUCCESS!
     10829         */
     10830        rcStrict = hmR0VmxPreRunGuest(pVCpu, &VmxTransient, false /* fStepping */);
     10831        if (rcStrict != VINF_SUCCESS)
     10832            break;
     10833
     10834        /* Interrupts are disabled at this point! */
     10835        hmR0VmxPreRunGuestCommitted(pVCpu, &VmxTransient);
     10836        int rcRun = hmR0VmxRunGuest(pVCpu, &VmxTransient);
     10837        hmR0VmxPostRunGuest(pVCpu, &VmxTransient, rcRun);
     10838        /* Interrupts are re-enabled at this point! */
     10839
     10840        /*
     10841         * Check for errors with running the VM (VMLAUNCH/VMRESUME).
     10842         */
     10843        if (RT_SUCCESS(rcRun))
     10844        { /* very likely */ }
     10845        else
     10846        {
     10847            STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatPreExit, x);
     10848            hmR0VmxReportWorldSwitchError(pVCpu, rcRun, &VmxTransient);
     10849            return rcRun;
     10850        }
     10851
     10852        /*
     10853         * Profile the VM-exit.
     10854         */
     10855        AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason));
     10856        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitAll);
     10857        STAM_COUNTER_INC(&pVCpu->hm.s.paStatNestedExitReasonR0[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);
     10858        STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatPreExit, &pVCpu->hm.s.StatExitHandling, x);
     10859        HMVMX_START_EXIT_DISPATCH_PROF();
     10860
     10861        VBOXVMM_R0_HMVMX_VMEXIT_NOCTX(pVCpu, &pVCpu->cpum.GstCtx, VmxTransient.uExitReason);
     10862
     10863        /*
     10864         * Handle the VM-exit.
     10865         */
     10866        rcStrict = hmR0VmxHandleExitNested(pVCpu, &VmxTransient);
     10867        STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitHandling, x);
     10868        if (   rcStrict == VINF_SUCCESS
     10869            && CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
     10870        {
     10871            if (++(*pcLoops) <= cMaxResumeLoops)
     10872                continue;
     10873            STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchMaxResumeLoops);
     10874            rcStrict = VINF_EM_RAW_INTERRUPT;
     10875        }
     10876        break;
     10877    }
     10878
     10879    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
     10880    return rcStrict;
    902810881}
    902910882#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
     
    909810951 * @param   pVCpu           The cross context virtual CPU structure of the
    909910952 *                          calling EMT.
    9100  * @param   pDbgState       The structure to initialize.
    9101  */
    9102 static void hmR0VmxRunDebugStateInit(PVMCPU pVCpu, PVMXRUNDBGSTATE pDbgState)
     10953 * @param   pVmxTransient   The VMX-transient structure.
     10954 * @param   pDbgState       The debug state to initialize.
     10955 */
     10956static void hmR0VmxRunDebugStateInit(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
    910310957{
    910410958    pDbgState->uRipStart            = pVCpu->cpum.GstCtx.rip;
     
    911410968    pDbgState->fCpe2Extra           = 0;
    911510969    pDbgState->bmXcptExtra          = 0;
    9116     pDbgState->fProcCtlsInitial     = pVCpu->hm.s.vmx.Ctls.u32ProcCtls;
    9117     pDbgState->fProcCtls2Initial    = pVCpu->hm.s.vmx.Ctls.u32ProcCtls2;
    9118     pDbgState->bmXcptInitial        = pVCpu->hm.s.vmx.Ctls.u32XcptBitmap;
     10970    pDbgState->fProcCtlsInitial     = pVmxTransient->pVmcsInfo->u32ProcCtls;
     10971    pDbgState->fProcCtls2Initial    = pVmxTransient->pVmcsInfo->u32ProcCtls2;
     10972    pDbgState->bmXcptInitial        = pVmxTransient->pVmcsInfo->u32XcptBitmap;
    911910973}
    912010974
     
    912810982 * latter case.
    912910983 *
    9130  * @param   pVCpu       The cross context virtual CPU structure.
    9131  * @param   pDbgState   The debug state.
    9132  */
    9133 static void hmR0VmxPreRunGuestDebugStateApply(PVMCPU pVCpu, PVMXRUNDBGSTATE pDbgState)
     10984 * @param   pVCpu           The cross context virtual CPU structure.
     10985 * @param   pVmxTransient   The VMX-transient structure.
     10986 * @param   pDbgState       The debug state.
     10987 */
     10988static void hmR0VmxPreRunGuestDebugStateApply(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
    913410989{
    913510990    /*
     
    914010995     *       there should be no stale data in pCtx at this point.
    914110996     */
    9142     if (   (pVCpu->hm.s.vmx.Ctls.u32ProcCtls & pDbgState->fCpe1Extra) != pDbgState->fCpe1Extra
    9143         || (pVCpu->hm.s.vmx.Ctls.u32ProcCtls & pDbgState->fCpe1Unwanted))
    9144     {
    9145         pVCpu->hm.s.vmx.Ctls.u32ProcCtls   |= pDbgState->fCpe1Extra;
    9146         pVCpu->hm.s.vmx.Ctls.u32ProcCtls   &= ~pDbgState->fCpe1Unwanted;
    9147         VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.Ctls.u32ProcCtls);
    9148         Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC: %#RX32\n", pVCpu->hm.s.vmx.Ctls.u32ProcCtls));
     10997    PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
     10998    if (   (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Extra) != pDbgState->fCpe1Extra
     10999        || (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Unwanted))
     11000    {
     11001        pVmcsInfo->u32ProcCtls |= pDbgState->fCpe1Extra;
     11002        pVmcsInfo->u32ProcCtls &= ~pDbgState->fCpe1Unwanted;
     11003        VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
     11004        Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC: %#RX32\n", pVmcsInfo->u32ProcCtls));
    914911005        pDbgState->fModifiedProcCtls   = true;
    915011006    }
    915111007
    9152     if ((pVCpu->hm.s.vmx.Ctls.u32ProcCtls2 & pDbgState->fCpe2Extra) != pDbgState->fCpe2Extra)
    9153     {
    9154         pVCpu->hm.s.vmx.Ctls.u32ProcCtls2  |= pDbgState->fCpe2Extra;
    9155         VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, pVCpu->hm.s.vmx.Ctls.u32ProcCtls2);
    9156         Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC2: %#RX32\n", pVCpu->hm.s.vmx.Ctls.u32ProcCtls2));
     11008    if ((pVmcsInfo->u32ProcCtls2 & pDbgState->fCpe2Extra) != pDbgState->fCpe2Extra)
     11009    {
     11010        pVmcsInfo->u32ProcCtls2  |= pDbgState->fCpe2Extra;
     11011        VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, pVmcsInfo->u32ProcCtls2);
     11012        Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC2: %#RX32\n", pVmcsInfo->u32ProcCtls2));
    915711013        pDbgState->fModifiedProcCtls2  = true;
    915811014    }
    915911015
    9160     if ((pVCpu->hm.s.vmx.Ctls.u32XcptBitmap & pDbgState->bmXcptExtra) != pDbgState->bmXcptExtra)
    9161     {
    9162         pVCpu->hm.s.vmx.Ctls.u32XcptBitmap |= pDbgState->bmXcptExtra;
    9163         VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.Ctls.u32XcptBitmap);
    9164         Log6Func(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP: %#RX32\n", pVCpu->hm.s.vmx.Ctls.u32XcptBitmap));
     11016    if ((pVmcsInfo->u32XcptBitmap & pDbgState->bmXcptExtra) != pDbgState->bmXcptExtra)
     11017    {
     11018        pVmcsInfo->u32XcptBitmap |= pDbgState->bmXcptExtra;
     11019        VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVmcsInfo->u32XcptBitmap);
     11020        Log6Func(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP: %#RX32\n", pVmcsInfo->u32XcptBitmap));
    916511021        pDbgState->fModifiedXcptBitmap = true;
    916611022    }
    916711023
    9168     if (pDbgState->fClearCr0Mask && pVCpu->hm.s.vmx.Ctls.u32Cr0Mask != 0)
    9169     {
    9170         pVCpu->hm.s.vmx.Ctls.u32Cr0Mask = 0;
    9171         VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_MASK, 0);
     11024    if (pDbgState->fClearCr0Mask && pVmcsInfo->u64Cr0Mask != 0)
     11025    {
     11026        pVmcsInfo->u64Cr0Mask = 0;
     11027        VMXWriteVmcsHstN(VMX_VMCS_CTRL_CR0_MASK, 0);
    917211028        Log6Func(("VMX_VMCS_CTRL_CR0_MASK: 0\n"));
    917311029    }
    917411030
    9175     if (pDbgState->fClearCr4Mask && pVCpu->hm.s.vmx.Ctls.u32Cr4Mask != 0)
    9176     {
    9177         pVCpu->hm.s.vmx.Ctls.u32Cr4Mask = 0;
    9178         VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_MASK, 0);
     11031    if (pDbgState->fClearCr4Mask && pVmcsInfo->u64Cr4Mask != 0)
     11032    {
     11033        pVmcsInfo->u64Cr4Mask = 0;
     11034        VMXWriteVmcsHstN(VMX_VMCS_CTRL_CR4_MASK, 0);
    917911035        Log6Func(("VMX_VMCS_CTRL_CR4_MASK: 0\n"));
    918011036    }
     11037
     11038    NOREF(pVCpu);
    918111039}
    918211040
     
    918711045 *
    918811046 * @returns Strict VBox status code (i.e. informational status codes too).
    9189  * @param   pVCpu       The cross context virtual CPU structure.
    9190  * @param   pDbgState   The debug state.
    9191  * @param   rcStrict    The return code from executing the guest using single
    9192  *                      stepping.
    9193  */
    9194 static VBOXSTRICTRC hmR0VmxRunDebugStateRevert(PVMCPU pVCpu, PVMXRUNDBGSTATE pDbgState, VBOXSTRICTRC rcStrict)
     11047 * @param   pVCpu           The cross context virtual CPU structure.
     11048 * @param   pVmxTransient   The VMX-transient structure.
     11049 * @param   pDbgState       The debug state.
     11050 * @param   rcStrict        The return code from executing the guest using single
     11051 *                          stepping.
     11052 */
     11053static VBOXSTRICTRC hmR0VmxRunDebugStateRevert(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState,
     11054                                               VBOXSTRICTRC rcStrict)
    919511055{
    919611056    /*
     
    919811058     * next time around.
    919911059     */
     11060    PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
     11061
    920011062    /* We reload the initial value, trigger what we can of recalculations the
    920111063       next time around.  From the looks of things, that's all that's required atm. */
     
    920611068        int rc2 = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pDbgState->fProcCtlsInitial);
    920711069        AssertRCReturn(rc2, rc2);
    9208         pVCpu->hm.s.vmx.Ctls.u32ProcCtls = pDbgState->fProcCtlsInitial;
     11070        pVmcsInfo->u32ProcCtls = pDbgState->fProcCtlsInitial;
    920911071    }
    921011072
     
    921211074       cached value and reload the field. */
    921311075    if (   pDbgState->fModifiedProcCtls2
    9214         && pVCpu->hm.s.vmx.Ctls.u32ProcCtls2 != pDbgState->fProcCtls2Initial)
     11076        && pVmcsInfo->u32ProcCtls2 != pDbgState->fProcCtls2Initial)
    921511077    {
    921611078        int rc2 = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, pDbgState->fProcCtls2Initial);
    921711079        AssertRCReturn(rc2, rc2);
    9218         pVCpu->hm.s.vmx.Ctls.u32ProcCtls2 = pDbgState->fProcCtls2Initial;
     11080        pVmcsInfo->u32ProcCtls2 = pDbgState->fProcCtls2Initial;
    921911081    }
    922011082
     
    922211084       reloading and partial recalculation the next time around. */
    922311085    if (pDbgState->fModifiedXcptBitmap)
    9224         pVCpu->hm.s.vmx.Ctls.u32XcptBitmap = pDbgState->bmXcptInitial;
     11086        pVmcsInfo->u32XcptBitmap = pDbgState->bmXcptInitial;
    922511087
    922611088    return rcStrict;
     
    923511097 *
    923611098 * @param   pVCpu           The cross context virtual CPU structure.
     11099 * @param   pVmxTransient   The VMX-transient structure. May update
     11100 *                          fUpdatedTscOffsettingAndPreemptTimer.
    923711101 * @param   pDbgState       The debug state.
    9238  * @param   pVmxTransient   Pointer to the VMX transient structure.  May update
    9239  *                          fUpdateTscOffsettingAndPreemptTimer.
    9240  */
    9241 static void hmR0VmxPreRunGuestDebugStateUpdate(PVMCPU pVCpu, PVMXRUNDBGSTATE pDbgState, PVMXTRANSIENT pVmxTransient)
     11102 */
     11103static void hmR0VmxPreRunGuestDebugStateUpdate(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
    924211104{
    924311105    /*
     
    938811250        || IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
    938911251    {
    9390         int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_APIC_TPR);
     11252        int rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4
     11253                                                                        | CPUMCTX_EXTRN_APIC_TPR);
    939111254        AssertRC(rc);
    939211255
     
    950811371    {
    950911372        pVCpu->hm.s.fDebugWantRdTscExit ^= true;
    9510         pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
     11373        pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
    951111374    }
    951211375
     
    952811391 * @returns Strict VBox status code (i.e. informational status codes too).
    952911392 * @param   pVCpu           The cross context virtual CPU structure.
    9530  * @param   pVmxTransient   Pointer to the VMX-transient structure.
     11393 * @param   pVmxTransient   The VMX-transient structure.
    953111394 * @param   uExitReason     The VM-exit reason.
    953211395 *
     
    974011603    {
    974111604        hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
    9742         hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
     11605        hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
    974311606        PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    974411607        switch (enmEvent1)
     
    988611749        && DBGF_IS_EVENT_ENABLED(pVM, enmEvent1))
    988711750    {
    9888         HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
     11751        hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
    988911752        VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent1, DBGFEVENTCTX_HM, 1, uEventArg);
    989011753        if (rcStrict != VINF_SUCCESS)
     
    989411757             && DBGF_IS_EVENT_ENABLED(pVM, enmEvent2))
    989511758    {
    9896         HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
     11759        hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
    989711760        VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent2, DBGFEVENTCTX_HM, 1, uEventArg);
    989811761        if (rcStrict != VINF_SUCCESS)
     
    991311776 * @returns Strict VBox status code (i.e. informational status codes too).
    991411777 * @param   pVCpu           The cross context virtual CPU structure of the calling EMT.
    9915  * @param   pVmxTransient   Pointer to the VMX-transient structure.
     11778 * @param   pVmxTransient   The VMX-transient structure.
    991611779 * @param   pDbgState       The debug state.
    991711780 */
     
    992711790    {
    992811791        hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
    9929         int rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
     11792        int rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
    993011793        AssertRC(rc);
    993111794        VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, &pVCpu->cpum.GstCtx, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
     
    1001011873            case VMX_EXIT_XRSTORS:
    1001111874            {
    10012                 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
     11875                int rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
    1001311876                AssertRCReturn(rc, rc);
    1001411877                if (   pVCpu->cpum.GstCtx.rip    != pDbgState->uRipStart
     
    1005811921
    1005911922/**
    10060  * Single steps guest code using VT-x.
     11923 * Single steps guest code using hardware-assisted VMX.
     11924 *
     11925 * This is -not- the same as the guest single-stepping itself (say using EFLAGS.TF)
     11926 * but single-stepping through the hypervisor debugger.
    1006111927 *
    1006211928 * @returns Strict VBox status code (i.e. informational status codes too).
    1006311929 * @param   pVCpu       The cross context virtual CPU structure.
     11930 * @param   pcLoops     Pointer to the number of executed loops.
    1006411931 *
    1006511932 * @note    Mostly the same as hmR0VmxRunGuestCodeNormal().
    1006611933 */
    10067 static VBOXSTRICTRC hmR0VmxRunGuestCodeDebug(PVMCPU pVCpu)
    10068 {
     11934static VBOXSTRICTRC hmR0VmxRunGuestCodeDebug(PVMCPU pVCpu, uint32_t *pcLoops)
     11935{
     11936    uint32_t const cMaxResumeLoops = pVCpu->CTX_SUFF(pVM)->hm.s.cMaxResumeLoops;
     11937    Assert(pcLoops);
     11938    Assert(*pcLoops <= cMaxResumeLoops);
     11939
    1006911940    VMXTRANSIENT VmxTransient;
    10070     VmxTransient.fUpdateTscOffsettingAndPreemptTimer = true;
     11941    RT_ZERO(VmxTransient);
     11942    VmxTransient.pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
    1007111943
    1007211944    /* Set HMCPU indicators.  */
     
    1007811950    /* State we keep to help modify and later restore the VMCS fields we alter, and for detecting steps.  */
    1007911951    VMXRUNDBGSTATE DbgState;
    10080     hmR0VmxRunDebugStateInit(pVCpu, &DbgState);
    10081     hmR0VmxPreRunGuestDebugStateUpdate(pVCpu, &DbgState, &VmxTransient);
     11952    hmR0VmxRunDebugStateInit(pVCpu, &VmxTransient, &DbgState);
     11953    hmR0VmxPreRunGuestDebugStateUpdate(pVCpu, &VmxTransient, &DbgState);
    1008211954
    1008311955    /*
     
    1008511957     */
    1008611958    VBOXSTRICTRC rcStrict  = VERR_INTERNAL_ERROR_5;
    10087     for (uint32_t cLoops = 0; ; cLoops++)
     11959    for (;;)
    1008811960    {
    1008911961        Assert(!HMR0SuspendPending());
    1009011962        HMVMX_ASSERT_CPU_SAFE(pVCpu);
     11963        STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
    1009111964        bool fStepping = pVCpu->hm.s.fSingleInstruction;
    1009211965
     11966        /* Set up VM-execution controls the next two can respond to. */
     11967        hmR0VmxPreRunGuestDebugStateApply(pVCpu, &VmxTransient, &DbgState);
     11968
    1009311969        /*
    10094          * Preparatory work for running guest code, this may force us to return
    10095          * to ring-3.  This bugger disables interrupts on VINF_SUCCESS!
     11970         * Preparatory work for running guest code, this may force us to
     11971         * return to ring-3.
     11972         *
     11973         * Warning! This bugger disables interrupts on VINF_SUCCESS!
    1009611974         */
    10097         STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
    10098         hmR0VmxPreRunGuestDebugStateApply(pVCpu, &DbgState); /* Set up execute controls the next to can respond to. */
    1009911975        rcStrict = hmR0VmxPreRunGuest(pVCpu, &VmxTransient, fStepping);
    1010011976        if (rcStrict != VINF_SUCCESS)
    1010111977            break;
    1010211978
     11979        /* Interrupts are disabled at this point! */
    1010311980        hmR0VmxPreRunGuestCommitted(pVCpu, &VmxTransient);
    10104         hmR0VmxPreRunGuestDebugStateApply(pVCpu, &DbgState); /* Override any obnoxious code in the above two calls. */
     11981
     11982        /* Override any obnoxious code in the above two calls. */
     11983        hmR0VmxPreRunGuestDebugStateApply(pVCpu, &VmxTransient, &DbgState);
    1010511984
    1010611985        /*
    10107          * Now we can run the guest code.
     11986         * Finally execute the guest.
    1010811987         */
    10109         int rcRun = hmR0VmxRunGuest(pVCpu);
    10110 
    10111         /*
    10112          * Restore any residual host-state and save any bits shared between host
    10113          * and guest into the guest-CPU state.  Re-enables interrupts!
    10114          */
     11988        int rcRun = hmR0VmxRunGuest(pVCpu, &VmxTransient);
     11989
    1011511990        hmR0VmxPostRunGuest(pVCpu, &VmxTransient, rcRun);
     11991        /* Interrupts are re-enabled at this point! */
    1011611992
    1011711993        /* Check for errors with running the VM (VMLAUNCH/VMRESUME). */
     
    1014112017        if (rcStrict != VINF_SUCCESS)
    1014212018            break;
    10143         if (cLoops > pVCpu->CTX_SUFF(pVM)->hm.s.cMaxResumeLoops)
     12019        if (++(*pcLoops) > cMaxResumeLoops)
    1014412020        {
    1014512021            STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchMaxResumeLoops);
     
    1015412030        if (fStepping)
    1015512031        {
    10156             int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
     12032            int rc = hmR0VmxImportGuestState(pVCpu, VmxTransient.pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
    1015712033            AssertRC(rc);
    1015812034            if (   pVCpu->cpum.GstCtx.rip    != DbgState.uRipStart
     
    1016912045         */
    1017012046        if (VBOXVMM_GET_SETTINGS_SEQ_NO() != DbgState.uDtraceSettingsSeqNo)
    10171             hmR0VmxPreRunGuestDebugStateUpdate(pVCpu, &DbgState, &VmxTransient);
     12047            hmR0VmxPreRunGuestDebugStateUpdate(pVCpu, &VmxTransient, &DbgState);
    1017212048    }
    1017312049
     
    1017712053    if (pVCpu->hm.s.fClearTrapFlag)
    1017812054    {
    10179         int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_RFLAGS);
     12055        int rc = hmR0VmxImportGuestState(pVCpu, VmxTransient.pVmcsInfo, CPUMCTX_EXTRN_RFLAGS);
    1018012056        AssertRC(rc);
    1018112057        pVCpu->hm.s.fClearTrapFlag = false;
     
    1018712063
    1018812064    /*
    10189      * Restore VM-exit control settings as we may not reenter this function the
     12065     * Restore VM-exit control settings as we may not re-enter this function the
    1019012066     * next time around.
    1019112067     */
    10192     rcStrict = hmR0VmxRunDebugStateRevert(pVCpu, &DbgState, rcStrict);
     12068    rcStrict = hmR0VmxRunDebugStateRevert(pVCpu, &VmxTransient, &DbgState, rcStrict);
    1019312069
    1019412070    /* Restore HMCPU indicators. */
     
    1034312219
    1034412220/**
    10345  * Runs the guest code using VT-x.
     12221 * Runs the guest using hardware-assisted VMX.
    1034612222 *
    1034712223 * @returns Strict VBox status code (i.e. informational status codes too).
     
    1035812234
    1035912235    VBOXSTRICTRC rcStrict;
     12236    uint32_t cLoops = 0;
    1036012237#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    1036112238    bool const fInNestedGuestMode = CPUMIsGuestInVmxNonRootMode(pCtx);
     
    1036912246            && !DBGFIsStepping(pVCpu)
    1037012247            && !pVCpu->CTX_SUFF(pVM)->dbgf.ro.cEnabledInt3Breakpoints)
    10371             rcStrict = hmR0VmxRunGuestCodeNormal(pVCpu);
     12248            rcStrict = hmR0VmxRunGuestCodeNormal(pVCpu, &cLoops);
    1037212249        else
    10373             rcStrict = hmR0VmxRunGuestCodeDebug(pVCpu);
     12250            rcStrict = hmR0VmxRunGuestCodeDebug(pVCpu, &cLoops);
    1037412251    }
    1037512252#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     
    1037812255
    1037912256    if (rcStrict == VINF_VMX_VMLAUNCH_VMRESUME)
    10380         rcStrict = hmR0VmxRunGuestCodeNested(pVCpu);
     12257        rcStrict = hmR0VmxRunGuestCodeNested(pVCpu, &cLoops);
    1038112258#endif
    1038212259
     
    1039912276
    1040012277#ifndef HMVMX_USE_FUNCTION_TABLE
    10401 DECLINLINE(VBOXSTRICTRC) hmR0VmxHandleExit(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t rcReason)
     12278/**
     12279 * Handles a guest VM-exit from hardware-assisted VMX execution.
     12280 *
     12281 * @returns Strict VBox status code (i.e. informational status codes too).
     12282 * @param   pVCpu           The cross context virtual CPU structure.
     12283 * @param   pVmxTransient   The VMX-transient structure.
     12284 */
     12285DECLINLINE(VBOXSTRICTRC) hmR0VmxHandleExit(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    1040212286{
    1040312287#ifdef DEBUG_ramshankar
     
    1041412298# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) return a_CallExpr
    1041512299#endif
     12300    uint32_t const rcReason = pVmxTransient->uExitReason;
    1041612301    switch (rcReason)
    1041712302    {
     
    1049112376
    1049212377        case VMX_EXIT_ENCLS:
    10493         case VMX_EXIT_RDSEED: /* only spurious VM-exits, so undefined */
     12378        case VMX_EXIT_RDSEED:
    1049412379        case VMX_EXIT_PML_FULL:
    1049512380        default:
     
    1049912384}
    1050012385#endif /* !HMVMX_USE_FUNCTION_TABLE */
     12386
     12387
     12388#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     12389/**
     12390 * Handles a nested-guest VM-exit from hardware-assisted VMX execution.
     12391 *
     12392 * @returns Strict VBox status code (i.e. informational status codes too).
     12393 * @param   pVCpu           The cross context virtual CPU structure.
     12394 * @param   pVmxTransient   The VMX-transient structure.
     12395 */
     12396DECLINLINE(VBOXSTRICTRC) hmR0VmxHandleExitNested(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
     12397{
     12398    uint32_t const rcReason = pVmxTransient->uExitReason;
     12399    switch (rcReason)
     12400    {
     12401        case VMX_EXIT_EPT_MISCONFIG:
     12402        case VMX_EXIT_EPT_VIOLATION:
     12403        case VMX_EXIT_IO_INSTR:
     12404        case VMX_EXIT_CPUID:
     12405        case VMX_EXIT_RDTSC:
     12406        case VMX_EXIT_RDTSCP:
     12407        case VMX_EXIT_APIC_ACCESS:
     12408        case VMX_EXIT_XCPT_OR_NMI:
     12409        case VMX_EXIT_MOV_CRX:
     12410        case VMX_EXIT_EXT_INT:
     12411        case VMX_EXIT_INT_WINDOW:
     12412        case VMX_EXIT_TPR_BELOW_THRESHOLD:
     12413        case VMX_EXIT_MWAIT:
     12414        case VMX_EXIT_MONITOR:
     12415        case VMX_EXIT_TASK_SWITCH:
     12416        case VMX_EXIT_PREEMPT_TIMER:
     12417        case VMX_EXIT_RDMSR:
     12418        case VMX_EXIT_WRMSR:
     12419        case VMX_EXIT_VMCALL:
     12420        case VMX_EXIT_MOV_DRX:
     12421        case VMX_EXIT_HLT:
     12422        case VMX_EXIT_INVD:
     12423        case VMX_EXIT_INVLPG:
     12424        case VMX_EXIT_RSM:
     12425        case VMX_EXIT_MTF:
     12426        case VMX_EXIT_PAUSE:
     12427        case VMX_EXIT_GDTR_IDTR_ACCESS:
     12428        case VMX_EXIT_LDTR_TR_ACCESS:
     12429        case VMX_EXIT_WBINVD:
     12430        case VMX_EXIT_XSETBV:
     12431        case VMX_EXIT_RDRAND:
     12432        case VMX_EXIT_INVPCID:
     12433        case VMX_EXIT_GETSEC:
     12434        case VMX_EXIT_RDPMC:
     12435#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     12436        case VMX_EXIT_VMCLEAR:
     12437        case VMX_EXIT_VMLAUNCH:
     12438        case VMX_EXIT_VMPTRLD:
     12439        case VMX_EXIT_VMPTRST:
     12440        case VMX_EXIT_VMREAD:
     12441        case VMX_EXIT_VMRESUME:
     12442        case VMX_EXIT_VMWRITE:
     12443        case VMX_EXIT_VMXOFF:
     12444        case VMX_EXIT_VMXON:
     12445#endif
     12446        case VMX_EXIT_TRIPLE_FAULT:
     12447        case VMX_EXIT_NMI_WINDOW:
     12448        case VMX_EXIT_INIT_SIGNAL:
     12449        case VMX_EXIT_SIPI:
     12450        case VMX_EXIT_IO_SMI:
     12451        case VMX_EXIT_SMI:
     12452        case VMX_EXIT_ERR_MSR_LOAD:
     12453        case VMX_EXIT_ERR_INVALID_GUEST_STATE:
     12454        case VMX_EXIT_ERR_MACHINE_CHECK:
     12455
     12456        case VMX_EXIT_INVEPT:
     12457        case VMX_EXIT_INVVPID:
     12458        case VMX_EXIT_VMFUNC:
     12459        case VMX_EXIT_XSAVES:
     12460        case VMX_EXIT_XRSTORS:
     12461
     12462        case VMX_EXIT_ENCLS:
     12463        case VMX_EXIT_RDSEED:
     12464        case VMX_EXIT_PML_FULL:
     12465        default:
     12466            return hmR0VmxExitErrUndefined(pVCpu, pVmxTransient);
     12467    }
     12468#undef VMEXIT_CALL_RET
     12469}
     12470#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
    1050112471
    1050212472
     
    1051712487        AssertPtr((a_pVmxTransient)); \
    1051812488        Assert((a_pVmxTransient)->fVMEntryFailed == false); \
     12489        Assert((a_pVmxTransient)->pVmcsInfo); \
    1051912490        Assert(ASMIntAreEnabled()); \
    1052012491        HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
     
    1056712538 * @returns VBox status code, no informational status codes.
    1056812539 * @param   pVCpu           The cross context virtual CPU structure.
    10569  * @param   pVmxTransient   Pointer to the VMX transient structure.
     12540 * @param   pVmxTransient   The VMX-transient structure.
    1057012541 *
    1057112542 * @remarks No-long-jump zone!!!
     
    1057412545{
    1057512546    int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    10576     rc    |= hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
     12547    rc    |= hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
    1057712548    AssertRCReturn(rc, rc);
    1057812549
     
    1058312554
    1058412555/**
    10585  * Tries to determine what part of the guest-state VT-x has deemed as invalid
    10586  * and update error record fields accordingly.
    10587  *
    10588  * @return VMX_IGS_* return codes.
    10589  * @retval VMX_IGS_REASON_NOT_FOUND if this function could not find anything
    10590  *         wrong with the guest state.
    10591  *
    10592  * @param   pVCpu   The cross context virtual CPU structure.
    10593  *
    10594  * @remarks This function assumes our cache of the VMCS controls
    10595  *          are valid, i.e. hmR0VmxCheckVmcsCtls() succeeded.
    10596  */
    10597 static uint32_t hmR0VmxCheckGuestState(PVMCPU pVCpu)
    10598 {
    10599 #define HMVMX_ERROR_BREAK(err)              { uError = (err); break; }
    10600 #define HMVMX_CHECK_BREAK(expr, err)        if (!(expr)) { \
    10601                                                 uError = (err); \
    10602                                                 break; \
    10603                                             } else do { } while (0)
    10604 
    10605     int        rc;
    10606     PVM        pVM = pVCpu->CTX_SUFF(pVM);
    10607     PCPUMCTX   pCtx = &pVCpu->cpum.GstCtx;
    10608     uint32_t   uError = VMX_IGS_ERROR;
    10609     uint32_t   u32Val;
    10610     bool const fUnrestrictedGuest = pVM->hm.s.vmx.fUnrestrictedGuest;
    10611 
    10612     do
    10613     {
     12556 * Handle a condition that occurred while delivering an event through the guest
     12557 * IDT.
     12558 *
     12559 * @returns Strict VBox status code (i.e. informational status codes too).
     12560 * @retval  VINF_SUCCESS if we should continue handling the VM-exit.
     12561 * @retval  VINF_HM_DOUBLE_FAULT if a \#DF condition was detected and we ought
     12562 *          to continue execution of the guest which will delivery the \#DF.
     12563 * @retval  VINF_EM_RESET if we detected a triple-fault condition.
     12564 * @retval  VERR_EM_GUEST_CPU_HANG if we detected a guest CPU hang.
     12565 *
     12566 * @param   pVCpu           The cross context virtual CPU structure.
     12567 * @param   pVmxTransient   The VMX-transient structure.
     12568 *
     12569 * @remarks No-long-jump zone!!!
     12570 */
     12571static VBOXSTRICTRC hmR0VmxCheckExitDueToEventDelivery(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
     12572{
     12573    uint32_t const uExitVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
     12574
     12575    int rc2 = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
     12576    rc2    |= hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
     12577    AssertRCReturn(rc2, rc2);
     12578
     12579    VBOXSTRICTRC  rcStrict  = VINF_SUCCESS;
     12580    PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
     12581    if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
     12582    {
     12583        uint32_t const uIdtVectorType = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo);
     12584        uint32_t const uIdtVector     = VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo);
     12585
    1061412586        /*
    10615          * CR0.
     12587         * If the event was a software interrupt (generated with INT n) or a software exception
     12588         * (generated by INT3/INTO) or a privileged software exception (generated by INT1), we
     12589         * can handle the VM-exit and continue guest execution which will re-execute the
     12590         * instruction rather than re-injecting the exception, as that can cause premature
     12591         * trips to ring-3 before injection and involve TRPM which currently has no way of
     12592         * storing that these exceptions were caused by these instructions (ICEBP's #DB poses
     12593         * the problem).
    1061612594         */
    10617         uint32_t       fSetCr0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);
    10618         uint32_t const fZapCr0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);
    10619         /* Exceptions for unrestricted-guests for fixed CR0 bits (PE, PG).
    10620            See Intel spec. 26.3.1 "Checks on Guest Control Registers, Debug Registers and MSRs." */
    10621         if (fUnrestrictedGuest)
    10622             fSetCr0 &= ~(X86_CR0_PE | X86_CR0_PG);
    10623 
    10624         uint32_t u32GuestCr0;
    10625         rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &u32GuestCr0);
    10626         AssertRCBreak(rc);
    10627         HMVMX_CHECK_BREAK((u32GuestCr0 & fSetCr0) == fSetCr0, VMX_IGS_CR0_FIXED1);
    10628         HMVMX_CHECK_BREAK(!(u32GuestCr0 & ~fZapCr0), VMX_IGS_CR0_FIXED0);
    10629         if (   !fUnrestrictedGuest
    10630             &&  (u32GuestCr0 & X86_CR0_PG)
    10631             && !(u32GuestCr0 & X86_CR0_PE))
    10632         {
    10633             HMVMX_ERROR_BREAK(VMX_IGS_CR0_PG_PE_COMBO);
     12595        IEMXCPTRAISE     enmRaise;
     12596        IEMXCPTRAISEINFO fRaiseInfo;
     12597        if (   uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
     12598            || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
     12599            || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
     12600        {
     12601            enmRaise   = IEMXCPTRAISE_REEXEC_INSTR;
     12602            fRaiseInfo = IEMXCPTRAISEINFO_NONE;
     12603        }
     12604        else if (VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo))
     12605        {
     12606            uint32_t const uExitVectorType  = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
     12607            uint32_t const fIdtVectorFlags  = hmR0VmxGetIemXcptFlags(uIdtVector, uIdtVectorType);
     12608            uint32_t const fExitVectorFlags = hmR0VmxGetIemXcptFlags(uExitVector, uExitVectorType);
     12609            /** @todo Make AssertMsgReturn as just AssertMsg later. */
     12610            AssertMsgReturn(uExitVectorType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT,
     12611                            ("Unexpected VM-exit interruption vector type %#x!\n", uExitVectorType), VERR_VMX_IPE_5);
     12612
     12613            enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fIdtVectorFlags, uIdtVector, fExitVectorFlags, uExitVector, &fRaiseInfo);
     12614
     12615            /* Determine a vectoring #PF condition, see comment in hmR0VmxExitXcptPF(). */
     12616            if (fRaiseInfo & (IEMXCPTRAISEINFO_EXT_INT_PF | IEMXCPTRAISEINFO_NMI_PF))
     12617            {
     12618                pVmxTransient->fVectoringPF = true;
     12619                enmRaise = IEMXCPTRAISE_PREV_EVENT;
     12620            }
     12621        }
     12622        else
     12623        {
     12624            /*
     12625             * If an exception or hardware interrupt delivery caused an EPT violation/misconfig or APIC access
     12626             * VM-exit, then the VM-exit interruption-information will not be valid and we end up here.
     12627             * It is sufficient to reflect the original event to the guest after handling the VM-exit.
     12628             */
     12629            Assert(   uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
     12630                   || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
     12631                   || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT);
     12632            enmRaise   = IEMXCPTRAISE_PREV_EVENT;
     12633            fRaiseInfo = IEMXCPTRAISEINFO_NONE;
    1063412634        }
    1063512635
    1063612636        /*
    10637          * CR4.
     12637         * On CPUs that support Virtual NMIs, if this VM-exit (be it an exception or EPT violation/misconfig
     12638         * etc.) occurred while delivering the NMI, we need to clear the block-by-NMI field in the guest
     12639         * interruptibility-state before re-delivering the NMI after handling the VM-exit. Otherwise the
     12640         * subsequent VM-entry would fail.
     12641         *
     12642         * See Intel spec. 30.7.1.2 "Resuming Guest Software after Handling an Exception". See @bugref{7445}.
    1063812643         */
    10639         uint64_t const fSetCr4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);
    10640         uint64_t const fZapCr4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);
    10641 
    10642         uint32_t u32GuestCr4;
    10643         rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR4, &u32GuestCr4);
    10644         AssertRCBreak(rc);
    10645         HMVMX_CHECK_BREAK((u32GuestCr4 & fSetCr4) == fSetCr4, VMX_IGS_CR4_FIXED1);
    10646         HMVMX_CHECK_BREAK(!(u32GuestCr4 & ~fZapCr4), VMX_IGS_CR4_FIXED0);
    10647 
     12644        if (   VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS)
     12645            && uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
     12646            && (   enmRaise   == IEMXCPTRAISE_PREV_EVENT
     12647                || (fRaiseInfo & IEMXCPTRAISEINFO_NMI_PF))
     12648            && (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI))
     12649        {
     12650            VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
     12651        }
     12652
     12653        switch (enmRaise)
     12654        {
     12655            case IEMXCPTRAISE_CURRENT_XCPT:
     12656            {
     12657                Log4Func(("IDT: Pending secondary Xcpt: uIdtVectoringInfo=%#RX64 uExitIntInfo=%#RX64\n",
     12658                          pVmxTransient->uIdtVectoringInfo, pVmxTransient->uExitIntInfo));
     12659                Assert(rcStrict == VINF_SUCCESS);
     12660                break;
     12661            }
     12662
     12663            case IEMXCPTRAISE_PREV_EVENT:
     12664            {
     12665                uint32_t u32ErrCode;
     12666                if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uIdtVectoringInfo))
     12667                {
     12668                    rc2 = hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);
     12669                    AssertRCReturn(rc2, rc2);
     12670                    u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
     12671                }
     12672                else
     12673                    u32ErrCode = 0;
     12674
     12675                /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF, see hmR0VmxExitXcptPF(). */
     12676                STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingReflect);
     12677                hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
     12678                                       0 /* cbInstr */, u32ErrCode, pVCpu->cpum.GstCtx.cr2);
     12679
     12680                Log4Func(("IDT: Pending vectoring event %#RX64 Err=%#RX32\n", pVCpu->hm.s.Event.u64IntInfo,
     12681                          pVCpu->hm.s.Event.u32ErrCode));
     12682                Assert(rcStrict == VINF_SUCCESS);
     12683                break;
     12684            }
     12685
     12686            case IEMXCPTRAISE_REEXEC_INSTR:
     12687                Assert(rcStrict == VINF_SUCCESS);
     12688                break;
     12689
     12690            case IEMXCPTRAISE_DOUBLE_FAULT:
     12691            {
     12692                /*
     12693                 * Determing a vectoring double #PF condition. Used later, when PGM evaluates the
     12694                 * second #PF as a guest #PF (and not a shadow #PF) and needs to be converted into a #DF.
     12695                 */
     12696                if (fRaiseInfo & IEMXCPTRAISEINFO_PF_PF)
     12697                {
     12698                    pVmxTransient->fVectoringDoublePF = true;
     12699                    Log4Func(("IDT: Vectoring double #PF %#RX64 cr2=%#RX64\n", pVCpu->hm.s.Event.u64IntInfo,
     12700                          pVCpu->cpum.GstCtx.cr2));
     12701                    rcStrict = VINF_SUCCESS;
     12702                }
     12703                else
     12704                {
     12705                    STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingReflect);
     12706                    hmR0VmxSetPendingXcptDF(pVCpu);
     12707                    Log4Func(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", pVCpu->hm.s.Event.u64IntInfo,
     12708                              uIdtVector, uExitVector));
     12709                    rcStrict = VINF_HM_DOUBLE_FAULT;
     12710                }
     12711                break;
     12712            }
     12713
     12714            case IEMXCPTRAISE_TRIPLE_FAULT:
     12715            {
     12716                Log4Func(("IDT: Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", uIdtVector, uExitVector));
     12717                rcStrict = VINF_EM_RESET;
     12718                break;
     12719            }
     12720
     12721            case IEMXCPTRAISE_CPU_HANG:
     12722            {
     12723                Log4Func(("IDT: Bad guest! Entering CPU hang. fRaiseInfo=%#x\n", fRaiseInfo));
     12724                rcStrict = VERR_EM_GUEST_CPU_HANG;
     12725                break;
     12726            }
     12727
     12728            default:
     12729            {
     12730                AssertMsgFailed(("IDT: vcpu[%RU32] Unexpected/invalid value! enmRaise=%#x\n", pVCpu->idCpu, enmRaise));
     12731                rcStrict = VERR_VMX_IPE_2;
     12732                break;
     12733            }
     12734        }
     12735    }
     12736    else if (   VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo)
     12737             && VMX_EXIT_INT_INFO_IS_NMI_UNBLOCK_IRET(pVmxTransient->uExitIntInfo)
     12738             && uExitVector != X86_XCPT_DF
     12739             && (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI))
     12740    {
    1064812741        /*
    10649          * IA32_DEBUGCTL MSR.
     12742         * Execution of IRET caused this fault when NMI blocking was in effect (i.e we're in the guest NMI handler).
     12743         * We need to set the block-by-NMI field so that NMIs remain blocked until the IRET execution is restarted.
     12744         * See Intel spec. 30.7.1.2 "Resuming guest software after handling an exception".
    1065012745         */
    10651         uint64_t u64Val;
    10652         rc = VMXReadVmcs64(VMX_VMCS64_GUEST_DEBUGCTL_FULL, &u64Val);
    10653         AssertRCBreak(rc);
    10654         if (   (pVCpu->hm.s.vmx.Ctls.u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
    10655             && (u64Val & 0xfffffe3c))                           /* Bits 31:9, bits 5:2 MBZ. */
    10656         {
    10657             HMVMX_ERROR_BREAK(VMX_IGS_DEBUGCTL_MSR_RESERVED);
    10658         }
    10659         uint64_t u64DebugCtlMsr = u64Val;
    10660 
    10661 #ifdef VBOX_STRICT
    10662         rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY, &u32Val);
    10663         AssertRCBreak(rc);
    10664         Assert(u32Val == pVCpu->hm.s.vmx.Ctls.u32EntryCtls);
    10665 #endif
    10666         bool const fLongModeGuest = RT_BOOL(pVCpu->hm.s.vmx.Ctls.u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
    10667 
    10668         /*
    10669          * RIP and RFLAGS.
    10670          */
    10671         uint32_t u32Eflags;
    10672 #if HC_ARCH_BITS == 64
    10673         rc = VMXReadVmcs64(VMX_VMCS_GUEST_RIP, &u64Val);
    10674         AssertRCBreak(rc);
    10675         /* pCtx->rip can be different than the one in the VMCS (e.g. run guest code and VM-exits that don't update it). */
    10676         if (   !fLongModeGuest
    10677             || !pCtx->cs.Attr.n.u1Long)
    10678         {
    10679             HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffff00000000)), VMX_IGS_LONGMODE_RIP_INVALID);
    10680         }
    10681         /** @todo If the processor supports N < 64 linear-address bits, bits 63:N
    10682          *        must be identical if the "IA-32e mode guest" VM-entry
    10683          *        control is 1 and CS.L is 1. No check applies if the
    10684          *        CPU supports 64 linear-address bits. */
    10685 
    10686         /* Flags in pCtx can be different (real-on-v86 for instance). We are only concerned about the VMCS contents here. */
    10687         rc = VMXReadVmcs64(VMX_VMCS_GUEST_RFLAGS, &u64Val);
    10688         AssertRCBreak(rc);
    10689         HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffc08028)),                     /* Bit 63:22, Bit 15, 5, 3 MBZ. */
    10690                           VMX_IGS_RFLAGS_RESERVED);
    10691         HMVMX_CHECK_BREAK((u64Val & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1);       /* Bit 1 MB1. */
    10692         u32Eflags = u64Val;
    10693 #else
    10694         rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &u32Eflags);
    10695         AssertRCBreak(rc);
    10696         HMVMX_CHECK_BREAK(!(u32Eflags & 0xffc08028), VMX_IGS_RFLAGS_RESERVED);          /* Bit 31:22, Bit 15, 5, 3 MBZ. */
    10697         HMVMX_CHECK_BREAK((u32Eflags & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1);    /* Bit 1 MB1. */
    10698 #endif
    10699 
    10700         if (   fLongModeGuest
    10701             || (   fUnrestrictedGuest
    10702                 && !(u32GuestCr0 & X86_CR0_PE)))
    10703         {
    10704             HMVMX_CHECK_BREAK(!(u32Eflags & X86_EFL_VM), VMX_IGS_RFLAGS_VM_INVALID);
    10705         }
    10706 
    10707         uint32_t u32EntryInfo;
    10708         rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32EntryInfo);
    10709         AssertRCBreak(rc);
    10710         if (   VMX_ENTRY_INT_INFO_IS_VALID(u32EntryInfo)
    10711             && VMX_ENTRY_INT_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INT_INFO_TYPE_EXT_INT)
    10712         {
    10713             HMVMX_CHECK_BREAK(u32Eflags & X86_EFL_IF, VMX_IGS_RFLAGS_IF_INVALID);
    10714         }
    10715 
    10716         /*
    10717          * 64-bit checks.
    10718          */
    10719 #if HC_ARCH_BITS == 64
    10720         if (fLongModeGuest)
    10721         {
    10722             HMVMX_CHECK_BREAK(u32GuestCr0 & X86_CR0_PG, VMX_IGS_CR0_PG_LONGMODE);
    10723             HMVMX_CHECK_BREAK(u32GuestCr4 & X86_CR4_PAE, VMX_IGS_CR4_PAE_LONGMODE);
    10724         }
    10725 
    10726         if (   !fLongModeGuest
    10727             && (u32GuestCr4 & X86_CR4_PCIDE))
    10728         {
    10729             HMVMX_ERROR_BREAK(VMX_IGS_CR4_PCIDE);
    10730         }
    10731 
    10732         /** @todo CR3 field must be such that bits 63:52 and bits in the range
    10733          *        51:32 beyond the processor's physical-address width are 0. */
    10734 
    10735         if (   (pVCpu->hm.s.vmx.Ctls.u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
    10736             && (pCtx->dr[7] & X86_DR7_MBZ_MASK))
    10737         {
    10738             HMVMX_ERROR_BREAK(VMX_IGS_DR7_RESERVED);
    10739         }
    10740 
    10741         rc = VMXReadVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, &u64Val);
    10742         AssertRCBreak(rc);
    10743         HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_ESP_NOT_CANONICAL);
    10744 
    10745         rc = VMXReadVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, &u64Val);
    10746         AssertRCBreak(rc);
    10747         HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_EIP_NOT_CANONICAL);
    10748 #endif
    10749 
    10750         /*
    10751          * PERF_GLOBAL MSR.
    10752          */
    10753         if (pVCpu->hm.s.vmx.Ctls.u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR)
    10754         {
    10755             rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL, &u64Val);
    10756             AssertRCBreak(rc);
    10757             HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffff8fffffffc)),
    10758                               VMX_IGS_PERF_GLOBAL_MSR_RESERVED);        /* Bits 63:35, bits 31:2 MBZ. */
    10759         }
    10760 
    10761         /*
    10762          * PAT MSR.
    10763          */
    10764         if (pVCpu->hm.s.vmx.Ctls.u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
    10765         {
    10766             rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PAT_FULL, &u64Val);
    10767             AssertRCBreak(rc);
    10768             HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0x707070707070707)), VMX_IGS_PAT_MSR_RESERVED);
    10769             for (unsigned i = 0; i < 8; i++)
    10770             {
    10771                 uint8_t u8Val = (u64Val & 0xff);
    10772                 if (   u8Val != 0 /* UC */
    10773                     && u8Val != 1 /* WC */
    10774                     && u8Val != 4 /* WT */
    10775                     && u8Val != 5 /* WP */
    10776                     && u8Val != 6 /* WB */
    10777                     && u8Val != 7 /* UC- */)
    10778                 {
    10779                     HMVMX_ERROR_BREAK(VMX_IGS_PAT_MSR_INVALID);
    10780                 }
    10781                 u64Val >>= 8;
    10782             }
    10783         }
    10784 
    10785         /*
    10786          * EFER MSR.
    10787          */
    10788         if (pVCpu->hm.s.vmx.Ctls.u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
    10789         {
    10790             Assert(pVM->hm.s.vmx.fSupportsVmcsEfer);
    10791             rc = VMXReadVmcs64(VMX_VMCS64_GUEST_EFER_FULL, &u64Val);
    10792             AssertRCBreak(rc);
    10793             HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffffffffff2fe)),
    10794                               VMX_IGS_EFER_MSR_RESERVED);               /* Bits 63:12, bit 9, bits 7:1 MBZ. */
    10795             HMVMX_CHECK_BREAK(RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(  pVCpu->hm.s.vmx.Ctls.u32EntryCtls
    10796                                                                            & VMX_ENTRY_CTLS_IA32E_MODE_GUEST),
    10797                               VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH);
    10798             /** @todo r=ramshankar: Unrestricted check here is probably wrong, see
    10799              *        iemVmxVmentryCheckGuestState(). */
    10800             HMVMX_CHECK_BREAK(   fUnrestrictedGuest
    10801                               || !(u32GuestCr0 & X86_CR0_PG)
    10802                               || RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(u64Val & MSR_K6_EFER_LME),
    10803                               VMX_IGS_EFER_LMA_LME_MISMATCH);
    10804         }
    10805 
    10806         /*
    10807          * Segment registers.
    10808          */
    10809         HMVMX_CHECK_BREAK(   (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
    10810                           || !(pCtx->ldtr.Sel & X86_SEL_LDT), VMX_IGS_LDTR_TI_INVALID);
    10811         if (!(u32Eflags & X86_EFL_VM))
    10812         {
    10813             /* CS */
    10814             HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1Present, VMX_IGS_CS_ATTR_P_INVALID);
    10815             HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xf00), VMX_IGS_CS_ATTR_RESERVED);
    10816             HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xfffe0000), VMX_IGS_CS_ATTR_RESERVED);
    10817             HMVMX_CHECK_BREAK(   (pCtx->cs.u32Limit & 0xfff) == 0xfff
    10818                               || !(pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
    10819             HMVMX_CHECK_BREAK(   !(pCtx->cs.u32Limit & 0xfff00000)
    10820                               || (pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
    10821             /* CS cannot be loaded with NULL in protected mode. */
    10822             HMVMX_CHECK_BREAK(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_CS_ATTR_UNUSABLE);
    10823             HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1DescType, VMX_IGS_CS_ATTR_S_INVALID);
    10824             if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
    10825                 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_UNEQUAL);
    10826             else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
    10827                 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_MISMATCH);
    10828             else if (pVM->hm.s.vmx.fUnrestrictedGuest && pCtx->cs.Attr.n.u4Type == 3)
    10829                 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == 0, VMX_IGS_CS_ATTR_DPL_INVALID);
    10830             else
    10831                 HMVMX_ERROR_BREAK(VMX_IGS_CS_ATTR_TYPE_INVALID);
    10832 
    10833             /* SS */
    10834             HMVMX_CHECK_BREAK(   pVM->hm.s.vmx.fUnrestrictedGuest
    10835                               || (pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL), VMX_IGS_SS_CS_RPL_UNEQUAL);
    10836             HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL), VMX_IGS_SS_ATTR_DPL_RPL_UNEQUAL);
    10837             if (   !(pCtx->cr0 & X86_CR0_PE)
    10838                 || pCtx->cs.Attr.n.u4Type == 3)
    10839             {
    10840                 HMVMX_CHECK_BREAK(!pCtx->ss.Attr.n.u2Dpl, VMX_IGS_SS_ATTR_DPL_INVALID);
    10841             }
    10842             if (!(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
    10843             {
    10844                 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7, VMX_IGS_SS_ATTR_TYPE_INVALID);
    10845                 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u1Present, VMX_IGS_SS_ATTR_P_INVALID);
    10846                 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xf00), VMX_IGS_SS_ATTR_RESERVED);
    10847                 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xfffe0000), VMX_IGS_SS_ATTR_RESERVED);
    10848                 HMVMX_CHECK_BREAK(   (pCtx->ss.u32Limit & 0xfff) == 0xfff
    10849                                   || !(pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
    10850                 HMVMX_CHECK_BREAK(   !(pCtx->ss.u32Limit & 0xfff00000)
    10851                                   || (pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
    10852             }
    10853 
    10854             /* DS, ES, FS, GS - only check for usable selectors, see hmR0VmxExportGuestSegmenReg(). */
    10855             if (!(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
    10856             {
    10857                 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_DS_ATTR_A_INVALID);
    10858                 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u1Present, VMX_IGS_DS_ATTR_P_INVALID);
    10859                 HMVMX_CHECK_BREAK(   pVM->hm.s.vmx.fUnrestrictedGuest
    10860                                   || pCtx->ds.Attr.n.u4Type > 11
    10861                                   || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
    10862                 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xf00), VMX_IGS_DS_ATTR_RESERVED);
    10863                 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xfffe0000), VMX_IGS_DS_ATTR_RESERVED);
    10864                 HMVMX_CHECK_BREAK(   (pCtx->ds.u32Limit & 0xfff) == 0xfff
    10865                                   || !(pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
    10866                 HMVMX_CHECK_BREAK(   !(pCtx->ds.u32Limit & 0xfff00000)
    10867                                   || (pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
    10868                 HMVMX_CHECK_BREAK(   !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
    10869                                   || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_DS_ATTR_TYPE_INVALID);
    10870             }
    10871             if (!(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
    10872             {
    10873                 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_ES_ATTR_A_INVALID);
    10874                 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u1Present, VMX_IGS_ES_ATTR_P_INVALID);
    10875                 HMVMX_CHECK_BREAK(   pVM->hm.s.vmx.fUnrestrictedGuest
    10876                                   || pCtx->es.Attr.n.u4Type > 11
    10877                                   || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
    10878                 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xf00), VMX_IGS_ES_ATTR_RESERVED);
    10879                 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xfffe0000), VMX_IGS_ES_ATTR_RESERVED);
    10880                 HMVMX_CHECK_BREAK(   (pCtx->es.u32Limit & 0xfff) == 0xfff
    10881                                   || !(pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
    10882                 HMVMX_CHECK_BREAK(   !(pCtx->es.u32Limit & 0xfff00000)
    10883                                   || (pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
    10884                 HMVMX_CHECK_BREAK(   !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
    10885                                   || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_ES_ATTR_TYPE_INVALID);
    10886             }
    10887             if (!(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
    10888             {
    10889                 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_FS_ATTR_A_INVALID);
    10890                 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u1Present, VMX_IGS_FS_ATTR_P_INVALID);
    10891                 HMVMX_CHECK_BREAK(   pVM->hm.s.vmx.fUnrestrictedGuest
    10892                                   || pCtx->fs.Attr.n.u4Type > 11
    10893                                   || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL), VMX_IGS_FS_ATTR_DPL_RPL_UNEQUAL);
    10894                 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xf00), VMX_IGS_FS_ATTR_RESERVED);
    10895                 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xfffe0000), VMX_IGS_FS_ATTR_RESERVED);
    10896                 HMVMX_CHECK_BREAK(   (pCtx->fs.u32Limit & 0xfff) == 0xfff
    10897                                   || !(pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
    10898                 HMVMX_CHECK_BREAK(   !(pCtx->fs.u32Limit & 0xfff00000)
    10899                                   || (pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
    10900                 HMVMX_CHECK_BREAK(   !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
    10901                                   || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_FS_ATTR_TYPE_INVALID);
    10902             }
    10903             if (!(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
    10904             {
    10905                 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_GS_ATTR_A_INVALID);
    10906                 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u1Present, VMX_IGS_GS_ATTR_P_INVALID);
    10907                 HMVMX_CHECK_BREAK(   pVM->hm.s.vmx.fUnrestrictedGuest
    10908                                   || pCtx->gs.Attr.n.u4Type > 11
    10909                                   || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL), VMX_IGS_GS_ATTR_DPL_RPL_UNEQUAL);
    10910                 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xf00), VMX_IGS_GS_ATTR_RESERVED);
    10911                 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xfffe0000), VMX_IGS_GS_ATTR_RESERVED);
    10912                 HMVMX_CHECK_BREAK(   (pCtx->gs.u32Limit & 0xfff) == 0xfff
    10913                                   || !(pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
    10914                 HMVMX_CHECK_BREAK(   !(pCtx->gs.u32Limit & 0xfff00000)
    10915                                   || (pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
    10916                 HMVMX_CHECK_BREAK(   !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
    10917                                   || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_GS_ATTR_TYPE_INVALID);
    10918             }
    10919             /* 64-bit capable CPUs. */
    10920 #if HC_ARCH_BITS == 64
    10921             HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
    10922             HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
    10923             HMVMX_CHECK_BREAK(   (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
    10924                               || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
    10925             HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
    10926             HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
    10927                               VMX_IGS_LONGMODE_SS_BASE_INVALID);
    10928             HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
    10929                               VMX_IGS_LONGMODE_DS_BASE_INVALID);
    10930             HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
    10931                               VMX_IGS_LONGMODE_ES_BASE_INVALID);
    10932 #endif
    10933         }
    10934         else
    10935         {
    10936             /* V86 mode checks. */
    10937             uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
    10938             if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
    10939             {
    10940                 u32CSAttr = 0xf3;   u32SSAttr = 0xf3;
    10941                 u32DSAttr = 0xf3;   u32ESAttr = 0xf3;
    10942                 u32FSAttr = 0xf3;   u32GSAttr = 0xf3;
    10943             }
    10944             else
    10945             {
    10946                 u32CSAttr = pCtx->cs.Attr.u;   u32SSAttr = pCtx->ss.Attr.u;
    10947                 u32DSAttr = pCtx->ds.Attr.u;   u32ESAttr = pCtx->es.Attr.u;
    10948                 u32FSAttr = pCtx->fs.Attr.u;   u32GSAttr = pCtx->gs.Attr.u;
    10949             }
    10950 
    10951             /* CS */
    10952             HMVMX_CHECK_BREAK((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), VMX_IGS_V86_CS_BASE_INVALID);
    10953             HMVMX_CHECK_BREAK(pCtx->cs.u32Limit == 0xffff, VMX_IGS_V86_CS_LIMIT_INVALID);
    10954             HMVMX_CHECK_BREAK(u32CSAttr == 0xf3, VMX_IGS_V86_CS_ATTR_INVALID);
    10955             /* SS */
    10956             HMVMX_CHECK_BREAK((pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4), VMX_IGS_V86_SS_BASE_INVALID);
    10957             HMVMX_CHECK_BREAK(pCtx->ss.u32Limit == 0xffff, VMX_IGS_V86_SS_LIMIT_INVALID);
    10958             HMVMX_CHECK_BREAK(u32SSAttr == 0xf3, VMX_IGS_V86_SS_ATTR_INVALID);
    10959             /* DS */
    10960             HMVMX_CHECK_BREAK((pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4), VMX_IGS_V86_DS_BASE_INVALID);
    10961             HMVMX_CHECK_BREAK(pCtx->ds.u32Limit == 0xffff, VMX_IGS_V86_DS_LIMIT_INVALID);
    10962             HMVMX_CHECK_BREAK(u32DSAttr == 0xf3, VMX_IGS_V86_DS_ATTR_INVALID);
    10963             /* ES */
    10964             HMVMX_CHECK_BREAK((pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4), VMX_IGS_V86_ES_BASE_INVALID);
    10965             HMVMX_CHECK_BREAK(pCtx->es.u32Limit == 0xffff, VMX_IGS_V86_ES_LIMIT_INVALID);
    10966             HMVMX_CHECK_BREAK(u32ESAttr == 0xf3, VMX_IGS_V86_ES_ATTR_INVALID);
    10967             /* FS */
    10968             HMVMX_CHECK_BREAK((pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4), VMX_IGS_V86_FS_BASE_INVALID);
    10969             HMVMX_CHECK_BREAK(pCtx->fs.u32Limit == 0xffff, VMX_IGS_V86_FS_LIMIT_INVALID);
    10970             HMVMX_CHECK_BREAK(u32FSAttr == 0xf3, VMX_IGS_V86_FS_ATTR_INVALID);
    10971             /* GS */
    10972             HMVMX_CHECK_BREAK((pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4), VMX_IGS_V86_GS_BASE_INVALID);
    10973             HMVMX_CHECK_BREAK(pCtx->gs.u32Limit == 0xffff, VMX_IGS_V86_GS_LIMIT_INVALID);
    10974             HMVMX_CHECK_BREAK(u32GSAttr == 0xf3, VMX_IGS_V86_GS_ATTR_INVALID);
    10975             /* 64-bit capable CPUs. */
    10976 #if HC_ARCH_BITS == 64
    10977             HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
    10978             HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
    10979             HMVMX_CHECK_BREAK(   (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
    10980                               || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
    10981             HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
    10982             HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
    10983                               VMX_IGS_LONGMODE_SS_BASE_INVALID);
    10984             HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
    10985                               VMX_IGS_LONGMODE_DS_BASE_INVALID);
    10986             HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
    10987                               VMX_IGS_LONGMODE_ES_BASE_INVALID);
    10988 #endif
    10989         }
    10990 
    10991         /*
    10992          * TR.
    10993          */
    10994         HMVMX_CHECK_BREAK(!(pCtx->tr.Sel & X86_SEL_LDT), VMX_IGS_TR_TI_INVALID);
    10995         /* 64-bit capable CPUs. */
    10996 #if HC_ARCH_BITS == 64
    10997         HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->tr.u64Base), VMX_IGS_TR_BASE_NOT_CANONICAL);
    10998 #endif
    10999         if (fLongModeGuest)
    11000         {
    11001             HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u4Type == 11,           /* 64-bit busy TSS. */
    11002                               VMX_IGS_LONGMODE_TR_ATTR_TYPE_INVALID);
    11003         }
    11004         else
    11005         {
    11006             HMVMX_CHECK_BREAK(   pCtx->tr.Attr.n.u4Type == 3          /* 16-bit busy TSS. */
    11007                               || pCtx->tr.Attr.n.u4Type == 11,        /* 32-bit busy TSS.*/
    11008                               VMX_IGS_TR_ATTR_TYPE_INVALID);
    11009         }
    11010         HMVMX_CHECK_BREAK(!pCtx->tr.Attr.n.u1DescType, VMX_IGS_TR_ATTR_S_INVALID);
    11011         HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u1Present, VMX_IGS_TR_ATTR_P_INVALID);
    11012         HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & 0xf00), VMX_IGS_TR_ATTR_RESERVED);   /* Bits 11:8 MBZ. */
    11013         HMVMX_CHECK_BREAK(   (pCtx->tr.u32Limit & 0xfff) == 0xfff
    11014                           || !(pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
    11015         HMVMX_CHECK_BREAK(   !(pCtx->tr.u32Limit & 0xfff00000)
    11016                           || (pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
    11017         HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_TR_ATTR_UNUSABLE);
    11018 
    11019         /*
    11020          * GDTR and IDTR.
    11021          */
    11022 #if HC_ARCH_BITS == 64
    11023         rc = VMXReadVmcs64(VMX_VMCS_GUEST_GDTR_BASE, &u64Val);
    11024         AssertRCBreak(rc);
    11025         HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_GDTR_BASE_NOT_CANONICAL);
    11026 
    11027         rc = VMXReadVmcs64(VMX_VMCS_GUEST_IDTR_BASE, &u64Val);
    11028         AssertRCBreak(rc);
    11029         HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_IDTR_BASE_NOT_CANONICAL);
    11030 #endif
    11031 
    11032         rc = VMXReadVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);
    11033         AssertRCBreak(rc);
    11034         HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_GDTR_LIMIT_INVALID);      /* Bits 31:16 MBZ. */
    11035 
    11036         rc = VMXReadVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);
    11037         AssertRCBreak(rc);
    11038         HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_IDTR_LIMIT_INVALID);      /* Bits 31:16 MBZ. */
    11039 
    11040         /*
    11041          * Guest Non-Register State.
    11042          */
    11043         /* Activity State. */
    11044         uint32_t u32ActivityState;
    11045         rc = VMXReadVmcs32(VMX_VMCS32_GUEST_ACTIVITY_STATE, &u32ActivityState);
    11046         AssertRCBreak(rc);
    11047         HMVMX_CHECK_BREAK(   !u32ActivityState
    11048                           || (u32ActivityState & RT_BF_GET(pVM->hm.s.vmx.Msrs.u64Misc, VMX_BF_MISC_ACTIVITY_STATES)),
    11049                              VMX_IGS_ACTIVITY_STATE_INVALID);
    11050         HMVMX_CHECK_BREAK(   !(pCtx->ss.Attr.n.u2Dpl)
    11051                           || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT, VMX_IGS_ACTIVITY_STATE_HLT_INVALID);
    11052         uint32_t u32IntrState;
    11053         rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INT_STATE, &u32IntrState);
    11054         AssertRCBreak(rc);
    11055         if (   u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS
    11056             || u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
    11057         {
    11058             HMVMX_CHECK_BREAK(u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE, VMX_IGS_ACTIVITY_STATE_ACTIVE_INVALID);
    11059         }
    11060 
    11061         /** @todo Activity state and injecting interrupts. Left as a todo since we
    11062          *        currently don't use activity states but ACTIVE. */
    11063 
    11064         HMVMX_CHECK_BREAK(   !(pVCpu->hm.s.vmx.Ctls.u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
    11065                           || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT, VMX_IGS_ACTIVITY_STATE_SIPI_WAIT_INVALID);
    11066 
    11067         /* Guest interruptibility-state. */
    11068         HMVMX_CHECK_BREAK(!(u32IntrState & 0xffffffe0), VMX_IGS_INTERRUPTIBILITY_STATE_RESERVED);
    11069         HMVMX_CHECK_BREAK((u32IntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
    11070                                        != (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
    11071                           VMX_IGS_INTERRUPTIBILITY_STATE_STI_MOVSS_INVALID);
    11072         HMVMX_CHECK_BREAK(   (u32Eflags & X86_EFL_IF)
    11073                           || !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
    11074                           VMX_IGS_INTERRUPTIBILITY_STATE_STI_EFL_INVALID);
    11075         if (VMX_ENTRY_INT_INFO_IS_VALID(u32EntryInfo))
    11076         {
    11077             if (VMX_ENTRY_INT_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INT_INFO_TYPE_EXT_INT)
    11078             {
    11079                 HMVMX_CHECK_BREAK(   !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
    11080                                   && !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
    11081                                   VMX_IGS_INTERRUPTIBILITY_STATE_EXT_INT_INVALID);
    11082             }
    11083             else if (VMX_ENTRY_INT_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INT_INFO_TYPE_NMI)
    11084             {
    11085                 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
    11086                                   VMX_IGS_INTERRUPTIBILITY_STATE_MOVSS_INVALID);
    11087                 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
    11088                                   VMX_IGS_INTERRUPTIBILITY_STATE_STI_INVALID);
    11089             }
    11090         }
    11091         /** @todo Assumes the processor is not in SMM. */
    11092         HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
    11093                           VMX_IGS_INTERRUPTIBILITY_STATE_SMI_INVALID);
    11094         HMVMX_CHECK_BREAK(   !(pVCpu->hm.s.vmx.Ctls.u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
    11095                           || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
    11096                              VMX_IGS_INTERRUPTIBILITY_STATE_SMI_SMM_INVALID);
    11097         if (   (pVCpu->hm.s.vmx.Ctls.u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
    11098             && VMX_ENTRY_INT_INFO_IS_VALID(u32EntryInfo)
    11099             && VMX_ENTRY_INT_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INT_INFO_TYPE_NMI)
    11100         {
    11101             HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI),
    11102                               VMX_IGS_INTERRUPTIBILITY_STATE_NMI_INVALID);
    11103         }
    11104 
    11105         /* Pending debug exceptions. */
    11106 #if HC_ARCH_BITS == 64
    11107         rc = VMXReadVmcs64(VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &u64Val);
    11108         AssertRCBreak(rc);
    11109         /* Bits 63:15, Bit 13, Bits 11:4 MBZ. */
    11110         HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffffaff0)), VMX_IGS_LONGMODE_PENDING_DEBUG_RESERVED);
    11111         u32Val = u64Val;    /* For pending debug exceptions checks below. */
    11112 #else
    11113         rc = VMXReadVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &u32Val);
    11114         AssertRCBreak(rc);
    11115         /* Bits 31:15, Bit 13, Bits 11:4 MBZ. */
    11116         HMVMX_CHECK_BREAK(!(u32Val & 0xffffaff0), VMX_IGS_PENDING_DEBUG_RESERVED);
    11117 #endif
    11118 
    11119         if (   (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
    11120             || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)
    11121             || u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
    11122         {
    11123             if (   (u32Eflags & X86_EFL_TF)
    11124                 && !(u64DebugCtlMsr & RT_BIT_64(1)))    /* Bit 1 is IA32_DEBUGCTL.BTF. */
    11125             {
    11126                 /* Bit 14 is PendingDebug.BS. */
    11127                 HMVMX_CHECK_BREAK(u32Val & RT_BIT(14), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_SET);
    11128             }
    11129             if (   !(u32Eflags & X86_EFL_TF)
    11130                 || (u64DebugCtlMsr & RT_BIT_64(1)))     /* Bit 1 is IA32_DEBUGCTL.BTF. */
    11131             {
    11132                 /* Bit 14 is PendingDebug.BS. */
    11133                 HMVMX_CHECK_BREAK(!(u32Val & RT_BIT(14)), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_CLEAR);
    11134             }
    11135         }
    11136 
    11137         /* VMCS link pointer. */
    11138         rc = VMXReadVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, &u64Val);
    11139         AssertRCBreak(rc);
    11140         if (u64Val != UINT64_C(0xffffffffffffffff))
    11141         {
    11142             HMVMX_CHECK_BREAK(!(u64Val & 0xfff), VMX_IGS_VMCS_LINK_PTR_RESERVED);
    11143             /** @todo Bits beyond the processor's physical-address width MBZ. */
    11144             /** @todo 32-bit located in memory referenced by value of this field (as a
    11145              *        physical address) must contain the processor's VMCS revision ID. */
    11146             /** @todo SMM checks. */
    11147         }
    11148 
    11149         /** @todo Checks on Guest Page-Directory-Pointer-Table Entries when guest is
    11150          *        not using Nested Paging? */
    11151         if (   pVM->hm.s.fNestedPaging
    11152             && !fLongModeGuest
    11153             && CPUMIsGuestInPAEModeEx(pCtx))
    11154         {
    11155             rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, &u64Val);
    11156             AssertRCBreak(rc);
    11157             HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
    11158 
    11159             rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, &u64Val);
    11160             AssertRCBreak(rc);
    11161             HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
    11162 
    11163             rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, &u64Val);
    11164             AssertRCBreak(rc);
    11165             HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
    11166 
    11167             rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, &u64Val);
    11168             AssertRCBreak(rc);
    11169             HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
    11170         }
    11171 
    11172         /* Shouldn't happen but distinguish it from AssertRCBreak() errors. */
    11173         if (uError == VMX_IGS_ERROR)
    11174             uError = VMX_IGS_REASON_NOT_FOUND;
    11175     } while (0);
    11176 
    11177     pVCpu->hm.s.u32HMError = uError;
    11178     return uError;
    11179 
    11180 #undef HMVMX_ERROR_BREAK
    11181 #undef HMVMX_CHECK_BREAK
     12746        if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
     12747        {
     12748            Log4Func(("Setting VMCPU_FF_BLOCK_NMIS. fValid=%RTbool uExitReason=%u\n",
     12749                      VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo), pVmxTransient->uExitReason));
     12750            VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
     12751        }
     12752    }
     12753
     12754    Assert(   rcStrict == VINF_SUCCESS  || rcStrict == VINF_HM_DOUBLE_FAULT
     12755           || rcStrict == VINF_EM_RESET || rcStrict == VERR_EM_GUEST_CPU_HANG);
     12756    return rcStrict;
    1118212757}
    1118312758
     
    1121212787    STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitXcptNmi, y3);
    1121312788
     12789    PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    1121412790    int rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
    1121512791    AssertRCReturn(rc, rc);
    1121612792
    1121712793    uint32_t uIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
    11218     Assert(   !(pVCpu->hm.s.vmx.Ctls.u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT)
     12794    Assert(   !(pVmcsInfo->u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT)
    1121912795           && uIntType != VMX_EXIT_INT_INFO_TYPE_EXT_INT);
    1122012796    Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
     
    1130012876                {
    1130112877                    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXcpUnk);
    11302                     if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
     12878                    if (pVmcsInfo->RealMode.fRealOnV86Active)
    1130312879                    {
    1130412880                        Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
     
    1130612882                        Assert(CPUMIsGuestInRealModeEx(&pVCpu->cpum.GstCtx));
    1130712883
    11308                         rc  = HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CR0);
     12884                        rc  = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR0);
    1130912885                        rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    1131012886                        rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
     
    1134712923
    1134812924    /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
    11349     hmR0VmxClearIntWindowExitVmcs(pVCpu);
     12925    PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
     12926    int rc = hmR0VmxClearIntWindowExitVmcs(pVmcsInfo);
     12927    AssertRCReturn(rc, rc);
    1135012928
    1135112929    /* Evaluate and deliver pending events and resume guest execution. */
     
    1136112939{
    1136212940    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    11363     if (RT_UNLIKELY(!(pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT)))
     12941
     12942    PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
     12943    if (RT_UNLIKELY(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))) /** @todo NSTVMX: Turn this into an assertion. */
    1136412944    {
    1136512945        AssertMsgFailed(("Unexpected NMI-window exit.\n"));
     
    1137312953     * It is therefore safe to unblock STI and deliver the NMI ourselves. See @bugref{7445}.
    1137412954     */
    11375     uint32_t fIntrState = 0;
     12955    uint32_t fIntrState;
    1137612956    int rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INT_STATE, &fIntrState);
    1137712957    AssertRCReturn(rc, rc);
     
    1138812968
    1138912969    /* Indicate that we no longer need to VM-exit when the guest is ready to receive NMIs, it is now ready */
    11390     hmR0VmxClearNmiWindowExitVmcs(pVCpu);
     12970    rc = hmR0VmxClearNmiWindowExitVmcs(pVmcsInfo);
     12971    AssertRCReturn(rc, rc);
    1139112972
    1139212973    /* Evaluate and deliver pending events and resume guest execution. */
     
    1142513006     * Get the state we need and update the exit history entry.
    1142613007     */
     13008    PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    1142713009    int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    11428     rc    |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
     13010    rc    |= hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
    1142913011    AssertRCReturn(rc, rc);
    1143013012
     
    1145213034         * Frequent exit or something needing probing.  Get state and call EMHistoryExec.
    1145313035         */
    11454         int rc2 = HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
     13036        int rc2 = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
    1145513037        AssertRCReturn(rc2, rc2);
    1145613038
     
    1147513057{
    1147613058    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    11477     int rc  = HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CR4);
     13059
     13060    PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
     13061    int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR4);
    1147813062    AssertRCReturn(rc, rc);
    1147913063
     
    1149213076{
    1149313077    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    11494     int rc = HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
     13078
     13079    PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
     13080    int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
    1149513081    rc    |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    1149613082    AssertRCReturn(rc, rc);
     
    1149913085    if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    1150013086    {
    11501         /* If we get a spurious VM-exit when offsetting is enabled,
    11502            we must reset offsetting on VM-reentry. See @bugref{6634}. */
    11503         if (pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
    11504             pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
     13087        /* If we get a spurious VM-exit when TSC offsetting is enabled,
     13088           we must reset offsetting on VM-entry. See @bugref{6634}. */
     13089        if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
     13090            pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
    1150513091        ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
    1150613092    }
     
    1152013106{
    1152113107    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    11522     int rc = HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_TSC_AUX);
     13108
     13109    PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
     13110    int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_TSC_AUX);
    1152313111    rc    |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    1152413112    AssertRCReturn(rc, rc);
     
    1152713115    if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    1152813116    {
    11529         /* If we get a spurious VM-exit when offsetting is enabled,
     13117        /* If we get a spurious VM-exit when TSC offsetting is enabled,
    1153013118           we must reset offsetting on VM-reentry. See @bugref{6634}. */
    11531         if (pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
    11532             pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
     13119        if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
     13120            pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
    1153313121        ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
    1153413122    }
     
    1154813136{
    1154913137    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    11550     int rc = HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS);
     13138
     13139    PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
     13140    int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR4    | CPUMCTX_EXTRN_CR0
     13141                                                     | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS);
    1155113142    AssertRCReturn(rc, rc);
    1155213143
    11553     PVM      pVM  = pVCpu->CTX_SUFF(pVM);
    1155413144    PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    11555     rc = EMInterpretRdpmc(pVM, pVCpu, CPUMCTX2CORE(pCtx));
     13145    rc = EMInterpretRdpmc(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx));
    1155613146    if (RT_LIKELY(rc == VINF_SUCCESS))
    1155713147    {
     
    1157813168    if (EMAreHypercallInstructionsEnabled(pVCpu))
    1157913169    {
    11580         int rc = HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CR0
    11581                                                  | CPUMCTX_EXTRN_SS  | CPUMCTX_EXTRN_CS     | CPUMCTX_EXTRN_EFER);
     13170        PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
     13171        int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CR0
     13172                                                         | CPUMCTX_EXTRN_SS  | CPUMCTX_EXTRN_CS     | CPUMCTX_EXTRN_EFER);
    1158213173        AssertRCReturn(rc, rc);
    1158313174
     
    1161913210    Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging || pVCpu->hm.s.fUsingDebugLoop);
    1162013211
     13212    PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    1162113213    int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
    1162213214    rc    |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    11623     rc    |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
     13215    rc    |= hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
    1162413216    AssertRCReturn(rc, rc);
    1162513217
     
    1164613238{
    1164713239    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    11648     int rc = HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS);
     13240
     13241    PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
     13242    int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS);
    1164913243    AssertRCReturn(rc, rc);
    1165013244
    11651     PVM      pVM  = pVCpu->CTX_SUFF(pVM);
    1165213245    PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    11653     rc = EMInterpretMonitor(pVM, pVCpu, CPUMCTX2CORE(pCtx));
     13246    rc = EMInterpretMonitor(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx));
    1165413247    if (RT_LIKELY(rc == VINF_SUCCESS))
    1165513248        rc = hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient);
     
    1165913252        rc = VERR_EM_INTERPRETER;
    1166013253    }
    11661     STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMonitor);
    1166213254    return rc;
    1166313255}
     
    1167013262{
    1167113263    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    11672     int rc = HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS);
     13264
     13265    PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
     13266    int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS);
    1167313267    AssertRCReturn(rc, rc);
    1167413268
    11675     PVM      pVM  = pVCpu->CTX_SUFF(pVM);
    1167613269    PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    11677     VBOXSTRICTRC rc2 = EMInterpretMWait(pVM, pVCpu, CPUMCTX2CORE(pCtx));
     13270    VBOXSTRICTRC rc2 = EMInterpretMWait(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx));
    1167813271    rc = VBOXSTRICTRC_VAL(rc2);
    1167913272    if (RT_LIKELY(   rc == VINF_SUCCESS
     
    1169413287    AssertMsg(rc == VINF_SUCCESS || rc == VINF_EM_HALT || rc == VERR_EM_INTERPRETER,
    1169513288              ("hmR0VmxExitMwait: failed, invalid error code %Rrc\n", rc));
    11696     STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMwait);
    1169713289    return rc;
    1169813290}
     
    1180113393{
    1180213394    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    11803     Assert(pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_HLT_EXIT);
    1180413395
    1180513396    int rc = hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient);
    11806     rc    |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_RFLAGS);
    1180713397    AssertRCReturn(rc, rc);
    1180813398
     13399    HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);            /* Advancing the RIP above should've imported eflags. */
    1180913400    if (EMShouldContinueAfterHalt(pVCpu, &pVCpu->cpum.GstCtx))    /* Requires eflags. */
    1181013401        rc = VINF_SUCCESS;
     
    1181213403        rc = VINF_EM_HALT;
    1181313404
    11814     STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt);
    1181513405    if (rc != VINF_SUCCESS)
    1181613406        STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHltToR3);
     
    1183213422
    1183313423/**
    11834  * VM-exit handler for expiry of the VMX preemption timer.
     13424 * VM-exit handler for expiry of the VMX-preemption timer.
    1183513425 */
    1183613426HMVMX_EXIT_DECL hmR0VmxExitPreemptTimer(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
     
    1183813428    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    1183913429
    11840     /* If the preemption-timer has expired, reinitialize the preemption timer on next VM-entry. */
    11841     pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
     13430    /* If the VMX-preemption timer has expired, reinitialize the preemption timer on next VM-entry. */
     13431    pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
    1184213432
    1184313433    /* If there are any timer events pending, fall back to ring-3, otherwise resume guest execution. */
     
    1185613446    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    1185713447
     13448    PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    1185813449    int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    11859     rc    |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_CR4);
     13450    rc    |= hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_CR4);
    1186013451    AssertRCReturn(rc, rc);
    1186113452
     
    1188813479HMVMX_EXIT_NSRC_DECL hmR0VmxExitErrInvalidGuestState(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    1188913480{
    11890     int rc = HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
     13481    PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
     13482    int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
    1189113483    AssertRCReturn(rc, rc);
    11892     rc = hmR0VmxCheckVmcsCtls(pVCpu);
     13484
     13485    rc = hmR0VmxCheckVmcsCtls(pVCpu, pVmcsInfo);
    1189313486    if (RT_FAILURE(rc))
    1189413487        return rc;
    1189513488
    11896     uint32_t uInvalidReason = hmR0VmxCheckGuestState(pVCpu);
     13489    uint32_t const uInvalidReason = hmR0VmxCheckGuestState(pVCpu, pVmcsInfo);
    1189713490    NOREF(uInvalidReason);
    1189813491
    1189913492#ifdef VBOX_STRICT
    11900     uint32_t       fIntrState;
    11901     RTHCUINTREG    uHCReg;
    11902     uint64_t       u64Val;
    11903     uint32_t       u32Val;
    11904 
     13493    uint32_t    fIntrState;
     13494    RTHCUINTREG uHCReg;
     13495    uint64_t    u64Val;
     13496    uint32_t    u32Val;
    1190513497    rc  = hmR0VmxReadEntryIntInfoVmcs(pVmxTransient);
    1190613498    rc |= hmR0VmxReadEntryXcptErrorCodeVmcs(pVmxTransient);
     
    1190913501    AssertRCReturn(rc, rc);
    1191013502
    11911     Log4(("uInvalidReason                             %u\n", uInvalidReason));
     13503    Log4(("uInvalidReason                             %u\n",     uInvalidReason));
    1191213504    Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO    %#RX32\n", pVmxTransient->uEntryIntInfo));
    1191313505    Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE    %#RX32\n", pVmxTransient->uEntryXcptErrorCode));
     
    1192913521
    1193013522    hmR0DumpRegs(pVCpu);
    11931 #else
    11932     NOREF(pVmxTransient);
    1193313523#endif
    1193413524
     
    1198213572    /* By default, we don't enable VMX_PROC_CTLS2_DESCRIPTOR_TABLE_EXIT. */
    1198313573    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitXdtrAccess);
    11984     if (pVCpu->hm.s.vmx.Ctls.u32ProcCtls2 & VMX_PROC_CTLS2_DESC_TABLE_EXIT)
     13574    PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
     13575    if (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_DESC_TABLE_EXIT)
    1198513576        return VERR_EM_INTERPRETER;
    1198613577    AssertMsgFailed(("Unexpected XDTR access\n"));
     
    1199713588
    1199813589    /* By default, we don't enable VMX_PROC_CTLS2_RDRAND_EXIT. */
    11999     if (pVCpu->hm.s.vmx.Ctls.u32ProcCtls2 & VMX_PROC_CTLS2_RDRAND_EXIT)
     13590    PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
     13591    if (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_RDRAND_EXIT)
    1200013592        return VERR_EM_INTERPRETER;
    1200113593    AssertMsgFailed(("Unexpected RDRAND exit\n"));
     
    1201513607     * MSRs required.  That would require changes to IEM and possibly CPUM too.
    1201613608     * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
    12017     uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
     13609    PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
     13610    uint32_t const idMsr    = pVCpu->cpum.GstCtx.ecx;
     13611    uint64_t       fImport  = IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS;
     13612    switch (idMsr)
     13613    {
     13614        case MSR_K8_FS_BASE: fImport |= CPUMCTX_EXTRN_FS; break;
     13615        case MSR_K8_GS_BASE: fImport |= CPUMCTX_EXTRN_GS; break;
     13616    }
     13617
    1201813618    int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    12019     rc    |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS);
    12020     switch (idMsr)
    12021     {
    12022         /* The FS and GS base MSRs are not part of the above all-MSRs mask. */
    12023         case MSR_K8_FS_BASE: rc |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_FS); break;
    12024         case MSR_K8_GS_BASE: rc |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_GS); break;
    12025     }
     13619    rc    |= hmR0VmxImportGuestState(pVCpu, pVmcsInfo, fImport);
    1202613620    AssertRCReturn(rc, rc);
    1202713621
     
    1202913623
    1203013624#ifdef VBOX_STRICT
    12031     if (pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
    12032     {
    12033         if (   hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, idMsr)
     13625    if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
     13626    {
     13627        if (   hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr)
    1203413628            && idMsr != MSR_K6_EFER)
    1203513629        {
     
    1203913633        if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
    1204013634        {
    12041             VMXMSREXITREAD  enmRead;
    12042             VMXMSREXITWRITE enmWrite;
    12043             int rc2 = HMGetVmxMsrPermission(pVCpu->hm.s.vmx.pvMsrBitmap, idMsr, &enmRead, &enmWrite);
    12044             AssertRCReturn(rc2, rc2);
    12045             if (enmRead == VMXMSREXIT_PASSTHRU_READ)
     13635            Assert(pVmcsInfo->pvMsrBitmap);
     13636            uint32_t fMsrpm = HMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
     13637            if (fMsrpm & VMXMSRPM_ALLOW_RD)
    1204613638            {
    1204713639                AssertMsgFailed(("Unexpected RDMSR for a passthru lazy-restore MSR. ecx=%#RX32\n", idMsr));
     
    1208013672     * MSRs required.  That would require changes to IEM and possibly CPUM too.
    1208113673     * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
    12082     uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
     13674    uint32_t const idMsr    = pVCpu->cpum.GstCtx.ecx;
     13675    uint64_t       fImport  = IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS;
     13676
     13677    /*
     13678     * The FS and GS base MSRs are not part of the above all-MSRs mask.
     13679     * Although we don't need to fetch the base as it will be overwritten shortly, while
     13680     * loading guest-state we would also load the entire segment register including limit
     13681     * and attributes and thus we need to load them here.
     13682     */
     13683    switch (idMsr)
     13684    {
     13685        case MSR_K8_FS_BASE: fImport |= CPUMCTX_EXTRN_FS; break;
     13686        case MSR_K8_GS_BASE: fImport |= CPUMCTX_EXTRN_GS; break;
     13687    }
     13688
     13689    PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    1208313690    int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    12084     rc    |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
    12085                                              | CPUMCTX_EXTRN_ALL_MSRS);
    12086     switch (idMsr)
    12087     {
    12088         /*
    12089          * The FS and GS base MSRs are not part of the above all-MSRs mask.
    12090          *
    12091          * Although we don't need to fetch the base as it will be overwritten shortly, while
    12092          * loading guest-state we would also load the entire segment register including limit
    12093          * and attributes and thus we need to load them here.
    12094          */
    12095         case MSR_K8_FS_BASE: rc |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_FS); break;
    12096         case MSR_K8_GS_BASE: rc |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_GS); break;
    12097     }
     13691    rc    |= hmR0VmxImportGuestState(pVCpu, pVmcsInfo, fImport);
    1209813692    AssertRCReturn(rc, rc);
    1209913693
     
    1211313707        {
    1211413708            /*
    12115              * We've already saved the APIC related guest-state (TPR) in hmR0VmxPostRunGuest(). When full APIC register
    12116              * virtualization is implemented we'll have to make sure APIC state is saved from the VMCS before IEM changes it.
     13709             * We've already saved the APIC related guest-state (TPR) in post-run phase.
     13710             * When full APIC register virtualization is implemented we'll have to make
     13711             * sure APIC state is saved from the VMCS before IEM changes it.
    1211713712             */
    1211813713            ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
    1211913714        }
    1212013715        else if (idMsr == MSR_IA32_TSC)        /* Windows 7 does this during bootup. See @bugref{6398}. */
    12121             pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
     13716            pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
    1212213717        else if (idMsr == MSR_K6_EFER)
    1212313718        {
    1212413719            /*
    12125              * If the guest touches EFER we need to update the VM-Entry and VM-Exit controls as well,
    12126              * even if it is -not- touching bits that cause paging mode changes (LMA/LME). We care about
    12127              * the other bits as well, SCE and NXE. See @bugref{7368}.
     13720             * If the guest touches the EFER MSR we need to update the VM-Entry and VM-Exit controls
     13721             * as well, even if it is -not- touching bits that cause paging mode changes (LMA/LME).
     13722             * We care about the other bits as well, SCE and NXE. See @bugref{7368}.
    1212813723             */
    12129             ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_CTLS
    12130                                                      | HM_CHANGED_VMX_EXIT_CTLS);
     13724            ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
    1213113725        }
    1213213726
    1213313727        /* Update MSRs that are part of the VMCS and auto-load/store area when MSR-bitmaps are not supported. */
    12134         if (!(pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS))
     13728        if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS))
    1213513729        {
    1213613730            switch (idMsr)
     
    1214413738                default:
    1214513739                {
    12146                     if (hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, idMsr))
     13740                    if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
    1214713741                        ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
    1214813742                    else if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
     
    1216413758                case MSR_K8_GS_BASE:
    1216513759                {
    12166                     AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", idMsr));
     13760                    uint32_t fMsrpm = HMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
     13761                    Assert(fMsrpm == (VMXMSRPM_ALLOW_RD | VMXMSRPM_ALLOW_WR));
     13762
     13763                    uint32_t u32Proc;
     13764                    rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, &u32Proc);
     13765                    AssertRC(rc);
     13766                    Assert(u32Proc == pVmcsInfo->u32ProcCtls);
     13767                    Assert(u32Proc & VMX_PROC_CTLS_USE_MSR_BITMAPS);
     13768
     13769                    AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32 fMsrpm=%#RX32\n", idMsr, fMsrpm));
    1216713770                    HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
    1216813771                }
     
    1217113774                default:
    1217213775                {
    12173                     if (hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, idMsr))
     13776                    if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
    1217413777                    {
    12175                         /* EFER writes are always intercepted, see hmR0VmxExportGuestMsrs(). */
     13778                        /* EFER MSR writes are always intercepted. */
    1217613779                        if (idMsr != MSR_K6_EFER)
    1217713780                        {
     
    1218413787                    if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
    1218513788                    {
    12186                         VMXMSREXITREAD  enmRead;
    12187                         VMXMSREXITWRITE enmWrite;
    12188                         int rc2 = HMGetVmxMsrPermission(pVCpu->hm.s.vmx.pvMsrBitmap, idMsr, &enmRead, &enmWrite);
    12189                         AssertRCReturn(rc2, rc2);
    12190                         if (enmWrite == VMXMSREXIT_PASSTHRU_WRITE)
     13789                        Assert(pVmcsInfo->pvMsrBitmap);
     13790                        uint32_t fMsrpm = HMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
     13791                        if (fMsrpm & VMXMSRPM_ALLOW_WR)
    1219113792                        {
    1219213793                            AssertMsgFailed(("Unexpected WRMSR for passthru, lazy-restore MSR. ecx=%#RX32\n", idMsr));
     
    1223113832{
    1223213833    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    12233     Assert(pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
    12234 
    12235     /*
    12236      * The TPR shadow would've been synced with the APIC TPR in hmR0VmxPostRunGuest(). We'll re-evaluate
    12237      * pending interrupts and inject them before the next VM-entry so we can just continue execution here.
     13834    Assert(pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
     13835
     13836    /*
     13837     * The TPR shadow would've been synced with the APIC TPR in the post-run phase.
     13838     * We'll re-evaluate pending interrupts and inject them before the next VM
     13839     * entry so we can just continue execution here.
    1223813840     */
    1223913841    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTprBelowThreshold);
     
    1225613858    STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitMovCRx, y2);
    1225713859
     13860    PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    1225813861    int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
    1225913862    rc    |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    12260     rc    |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
     13863    rc    |= hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
    1226113864    AssertRCReturn(rc, rc);
    1226213865
    1226313866    VBOXSTRICTRC rcStrict;
    12264     PVM pVM  = pVCpu->CTX_SUFF(pVM);
     13867    PVM pVM = pVCpu->CTX_SUFF(pVM);
    1226513868    RTGCUINTPTR const uExitQual = pVmxTransient->uExitQual;
    1226613869    uint32_t const uAccessType  = VMX_EXIT_QUAL_CRX_ACCESS(uExitQual);
     
    1234013943                {
    1234113944                    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR8Write);
    12342                     Assert(!(pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
     13945                    Assert(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
    1234313946                    ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged,
    1234413947                                     HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_APIC_TPR);
     
    1236013963            /* CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
    1236113964            Assert(   VMX_EXIT_QUAL_CRX_REGISTER(uExitQual) != 8
    12362                    || !(pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
     13965                   || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
    1236313966
    1236413967            rcStrict = IEMExecDecodedMovCRxRead(pVCpu, pVmxTransient->cbInstr, VMX_EXIT_QUAL_CRX_GENREG(uExitQual),
     
    1244314046
    1244414047    PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
     14048    PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    1244514049    int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
    1244614050    rc    |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    12447     rc    |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER);
    12448     /* EFER also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
     14051    rc    |= hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_SREG_MASK
     14052                                                     | CPUMCTX_EXTRN_EFER);
     14053    /* EFER MSR also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
    1244914054    AssertRCReturn(rc, rc);
    1245014055
     
    1249114096             * interpreting the instruction.
    1249214097             */
    12493             Log4Func(("CS:RIP=%04x:%08RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
     14098            Log4Func(("cs:rip=%#04x:%#RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
    1249414099            AssertReturn(pCtx->dx == uIOPort, VERR_VMX_IPE_2);
    1249514100            bool const fInsOutsInfo = RT_BF_GET(pVM->hm.s.vmx.Msrs.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS);
     
    1252714132             * IN/OUT - I/O instruction.
    1252814133             */
    12529             Log4Func(("CS:RIP=%04x:%08RX64 %#06x/%u %c\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
     14134            Log4Func(("cs:rip=%04x:%08RX64 %#06x/%u %c\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
    1253014135            uint32_t const uAndVal = s_aIOOpAnd[uIOWidth];
    1253114136            Assert(!VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual));
     
    1257614181             * Note that the I/O breakpoint type is undefined if CR4.DE is 0.
    1257714182             */
    12578             rc = HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_DR7);
     14183            rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_DR7);
    1257914184            AssertRCReturn(rc, rc);
    1258014185
     
    1264814253         * Frequent exit or something needing probing.  Get state and call EMHistoryExec.
    1264914254         */
    12650         int rc2 = HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
     14255        int rc2 = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
    1265114256        AssertRCReturn(rc2, rc2);
    1265214257        STAM_COUNTER_INC(!fIOString ? fIOWrite ? &pVCpu->hm.s.StatExitIOWrite : &pVCpu->hm.s.StatExitIORead
     
    1272914334{
    1273014335    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    12731     Assert(pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_MONITOR_TRAP_FLAG);
    12732     pVCpu->hm.s.vmx.Ctls.u32ProcCtls &= ~VMX_PROC_CTLS_MONITOR_TRAP_FLAG;
    12733     int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.Ctls.u32ProcCtls);
     14336
     14337    PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
     14338    pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MONITOR_TRAP_FLAG;
     14339    int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
    1273414340    AssertRCReturn(rc, rc);
    12735     STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMtf);
    1273614341    return VINF_EM_DBG_STEPPED;
    1273714342}
     
    1274414349{
    1274514350    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    12746 
    1274714351    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitApicAccess);
    1274814352
     
    1276614370
    1276714371    /* IOMMIOPhysHandler() below may call into IEM, save the necessary state. */
    12768     int rc  = HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
     14372    PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
     14373    int rc  = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
    1276914374    rc     |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
    1277014375    AssertRCReturn(rc, rc);
     
    1277814383        case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
    1277914384        {
    12780             AssertMsg(   !(pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
     14385            AssertMsg(   !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
    1278114386                      || VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual) != XAPIC_OFF_TPR,
    1278214387                      ("hmR0VmxExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
    1278314388
    12784             RTGCPHYS GCPhys = pVCpu->hm.s.vmx.u64MsrApicBase;   /* Always up-to-date, u64MsrApicBase is not part of the VMCS. */
     14389            RTGCPHYS GCPhys = pVCpu->hm.s.vmx.u64GstMsrApicBase;    /* Always up-to-date, as it is not part of the VMCS. */
    1278514390            GCPhys &= PAGE_BASE_GC_MASK;
    1278614391            GCPhys += VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual);
     
    1283214437    }
    1283314438
     14439    PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    1283414440    if (   !pVCpu->hm.s.fSingleInstruction
    1283514441        && !pVmxTransient->fWasHyperDebugStateActive)
    1283614442    {
    1283714443        Assert(!DBGFIsStepping(pVCpu));
    12838         Assert(pVCpu->hm.s.vmx.Ctls.u32XcptBitmap & RT_BIT_32(X86_XCPT_DB));
     14444        Assert(pVmcsInfo->u32XcptBitmap & RT_BIT(X86_XCPT_DB));
    1283914445
    1284014446        /* Don't intercept MOV DRx any more. */
    12841         pVCpu->hm.s.vmx.Ctls.u32ProcCtls &= ~VMX_PROC_CTLS_MOV_DR_EXIT;
    12842         int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.Ctls.u32ProcCtls);
     14447        pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MOV_DR_EXIT;
     14448        int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
    1284314449        AssertRCReturn(rc, rc);
    1284414450
     
    1286714473
    1286814474    /*
    12869      * EMInterpretDRx[Write|Read]() calls CPUMIsGuestIn64BitCode() which requires EFER, CS. EFER is always up-to-date.
     14475     * EMInterpretDRx[Write|Read]() calls CPUMIsGuestIn64BitCode() which requires EFER MSR, CS.
     14476     * The EFER MSR is always up-to-date.
    1287014477     * Update the segment registers and DR7 from the CPU.
    1287114478     */
    1287214479    PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    1287314480    int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
    12874     rc    |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_DR7);
     14481    rc    |= hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_DR7);
    1287514482    AssertRCReturn(rc, rc);
    12876     Log4Func(("CS:RIP=%04x:%08RX64\n", pCtx->cs.Sel, pCtx->rip));
     14483    Log4Func(("cs:rip=%#04x:%#RX64\n", pCtx->cs.Sel, pCtx->rip));
    1287714484
    1287814485    PVM pVM = pVCpu->CTX_SUFF(pVM);
     
    1293714544     */
    1293814545    RTGCPHYS GCPhys;
     14546    PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    1293914547    int rc = VMXReadVmcs64(VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &GCPhys);
    12940     rc    |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
     14548    rc    |= hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
    1294114549    AssertRCReturn(rc, rc);
    1294214550
     
    1297314581         * Frequent exit or something needing probing.  Get state and call EMHistoryExec.
    1297414582         */
    12975         int rc2 = HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
     14583        int rc2 = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
    1297614584        AssertRCReturn(rc2, rc2);
    1297714585
     
    1301514623
    1301614624    RTGCPHYS GCPhys;
     14625    PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    1301714626    int rc  = VMXReadVmcs64(VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &GCPhys);
    1301814627    rc     |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
    13019     rc     |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
     14628    rc     |= hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
    1302014629    AssertRCReturn(rc, rc);
    1302114630
     
    1303814647    PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    1303914648
    13040     Log4Func(("EPT violation %#x at %#RX64 ErrorCode %#x CS:RIP=%04x:%08RX64\n", pVmxTransient->uExitQual, GCPhys, uErrorCode,
     14649    Log4Func(("EPT violation %#x at %#RX64 ErrorCode %#x cs:rip=%#04x:%#RX64\n", pVmxTransient->uExitQual, GCPhys, uErrorCode,
    1304114650              pCtx->cs.Sel, pCtx->rip));
    1304214651
     
    1307614685    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestMF);
    1307714686
    13078     int rc = HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CR0);
     14687    int rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR0);
    1307914688    AssertRCReturn(rc, rc);
    1308014689
     
    1310614715    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestBP);
    1310714716
    13108     int rc = HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
     14717    int rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
    1310914718    AssertRCReturn(rc, rc);
    1311014719
     
    1318714796        VMMRZCallRing3Enable(pVCpu);
    1318814797
    13189         rc = HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_DR7);
     14798        rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_DR7);
    1319014799        AssertRCReturn(rc, rc);
    1319114800
     
    1323914848static int hmR0VmxHandleMesaDrvGp(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
    1324014849{
    13241     Log(("hmR0VmxHandleMesaDrvGp: at %04x:%08RX64 rcx=%RX64 rbx=%RX64\n", pCtx->cs.Sel, pCtx->rip, pCtx->rcx, pCtx->rbx));
     14850    LogFunc(("cs:rip=%#04x:%#RX64 rcx=%#RX64 rbx=%#RX64\n", pCtx->cs.Sel, pCtx->rip, pCtx->rcx, pCtx->rbx));
    1324214851    RT_NOREF(pCtx);
    1324314852
     
    1331014919
    1331114920    PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    13312     if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
     14921    PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
     14922    if (pVmcsInfo->RealMode.fRealOnV86Active)
    1331314923    { /* likely */ }
    1331414924    else
     
    1331714927        Assert(pVCpu->hm.s.fUsingDebugLoop || pVCpu->hm.s.fTrapXcptGpForLovelyMesaDrv);
    1331814928#endif
    13319         /* If the guest is not in real-mode or we have unrestricted execution support, reflect #GP to the guest. */
     14929        /* If the guest is not in real-mode or we have unrestricted guest execution support, reflect #GP to the guest. */
    1332014930        int rc  = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
    1332114931        rc     |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
    1332214932        rc     |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    13323         rc     |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
     14933        rc     |= hmR0VmxImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
    1332414934        AssertRCReturn(rc, rc);
    13325         Log4Func(("Gst: CS:RIP %04x:%08RX64 ErrorCode=%#x CR0=%#RX64 CPL=%u TR=%#04x\n", pCtx->cs.Sel, pCtx->rip,
     14935        Log4Func(("Gst: cs:rip=%#04x:%#RX64 ErrorCode=%#x cr0=%#RX64 cpl=%u tr=%#04x\n", pCtx->cs.Sel, pCtx->rip,
    1332614936                  pVmxTransient->uExitIntErrorCode, pCtx->cr0, CPUMGetGuestCPL(pVCpu), pCtx->tr.Sel));
    1332714937
     
    1333814948    Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUnrestrictedGuest);
    1333914949
    13340     int rc = HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
     14950    int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
    1334114951    AssertRCReturn(rc, rc);
    1334214952
     
    1335014960             * guest using hardware-assisted VMX. Otherwise, fall back to emulation.
    1335114961             */
    13352             pVCpu->hm.s.vmx.RealMode.fRealOnV86Active = false;
     14962            pVmcsInfo->RealMode.fRealOnV86Active = false;
    1335314963            if (HMCanExecuteVmxGuest(pVCpu, pCtx))
    1335414964            {
    13355                 Log4Func(("Mode changed but guest still suitable for executing using VT-x\n"));
     14965                Log4Func(("Mode changed but guest still suitable for executing using hardware-assisted VMX\n"));
    1335614966                ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
    1335714967            }
     
    1338514995    HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    1338614996#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
    13387     AssertMsg(pVCpu->hm.s.fUsingDebugLoop || pVCpu->hm.s.vmx.RealMode.fRealOnV86Active,
     14997    PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
     14998    AssertMsg(pVCpu->hm.s.fUsingDebugLoop || pVmcsInfo->RealMode.fRealOnV86Active,
    1338814999              ("uVector=%#x u32XcptBitmap=%#X32\n",
    13389                VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo), pVCpu->hm.s.vmx.Ctls.u32XcptBitmap));
     15000               VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo), pVmcsInfo->u32XcptBitmap));
     15001    NOREF(pVmcsInfo);
    1339015002#endif
    1339115003
     
    1339815010
    1339915011#ifdef DEBUG_ramshankar
    13400     rc |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
    13401     uint8_t uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
    13402     Log(("hmR0VmxExitXcptGeneric: Reinjecting Xcpt. uVector=%#x cs:rip=%#04x:%#RX64\n", uVector, pCtx->cs.Sel, pCtx->rip));
     15012    rc |= hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
     15013    Log(("hmR0VmxExitXcptGeneric: Reinjecting Xcpt. uVector=%#x cs:rip=%#04x:%#RX64\n",
     15014         VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo), pCtx->cs.Sel, pCtx->rip));
    1340315015#endif
    1340415016
     
    1345315065
    1345415066    PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    13455     rc = HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
     15067    rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
    1345615068    AssertRCReturn(rc, rc);
    1345715069
     
    1350715119
    1350815120#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    13509 /** @name Nested-guest VM-exit handlers.
     15121/** @name VMX instruction handlers.
    1351015122 * @{
    1351115123 */
    1351215124/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
    13513 /* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= Nested-guest VM-exit handlers =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
     15125/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VMX instructions VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
    1351415126/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
    1351515127
     
    1352215134
    1352315135    int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    13524     rc    |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
    13525                                              | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
     15136    rc    |= hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
     15137                                                                    | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
    1352615138    rc    |= hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
    1352715139    rc    |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
     
    1355715169    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    1355815170
     15171    /* Import the entire VMCS state for now as we would be switching VMCS on successful VMLAUNCH,
     15172       otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
    1355915173    int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    13560     rc    |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK);
     15174    rc    |= hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
    1356115175    AssertRCReturn(rc, rc);
    1356215176
     
    1358215196
    1358315197    int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    13584     rc    |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
    13585                                              | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
     15198    rc    |= hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
     15199                                                                    | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
    1358615200    rc    |= hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
    1358715201    rc    |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
     
    1361815232
    1361915233    int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    13620     rc    |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
    13621                                              | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
     15234    rc    |= hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
     15235                                                                    | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
    1362215236    rc    |= hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
    1362315237    rc    |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
     
    1363615250    VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrst(pVCpu, &ExitInfo);
    1363715251    if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    13638         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
     15252        ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
    1363915253    else if (rcStrict == VINF_IEM_RAISED_XCPT)
    1364015254    {
     
    1365415268
    1365515269    int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    13656     rc    |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
    13657                                              | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
     15270    rc    |= hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
     15271                                                                    | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
    1365815272    rc    |= hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
    1365915273    rc    |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
     
    1367315287    VBOXSTRICTRC rcStrict = IEMExecDecodedVmread(pVCpu, &ExitInfo);
    1367415288    if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    13675         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
     15289        ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
    1367615290    else if (rcStrict == VINF_IEM_RAISED_XCPT)
    1367715291    {
     
    1369015304    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    1369115305
     15306    /* Import the entire VMCS state for now as we would be switching VMCS on successful VMRESUME,
     15307       otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
    1369215308    int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    13693     rc    |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK);
     15309    rc    |= hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
    1369415310    AssertRCReturn(rc, rc);
    1369515311
     
    1371515331
    1371615332    int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    13717     rc    |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
    13718                                              | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
     15333    rc    |= hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
     15334                                                                    | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
    1371915335    rc    |= hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
    1372015336    rc    |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
     
    1375215368
    1375315369    int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    13754     rc    |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CR4 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
     15370    rc    |= hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR4
     15371                                                                    | IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
    1375515372    AssertRCReturn(rc, rc);
    1375615373
     
    1375915376    VBOXSTRICTRC rcStrict = IEMExecDecodedVmxoff(pVCpu, pVmxTransient->cbInstr);
    1376015377    if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    13761     {
    13762         /* VMXOFF changes the internal hwvirt. state but not anything that's visible to the guest other than RIP. */
    1376315378        ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_HWVIRT);
    13764     }
    1376515379    else if (rcStrict == VINF_IEM_RAISED_XCPT)
    1376615380    {
     
    1378015394
    1378115395    int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    13782     rc    |= HMVMX_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
    13783                                              | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
     15396    rc    |= hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
     15397                                                                    | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
    1378415398    rc    |= hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
    1378515399    rc    |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.h

    r77481 r78220  
    4646VMMR0DECL(int)          VMXR0ImportStateOnDemand(PVMCPU pVCpu, uint64_t fWhat);
    4747VMMR0DECL(VBOXSTRICTRC) VMXR0RunGuestCode(PVMCPU pVCpu);
    48 DECLASM(int)            VMXR0StartVM32(RTHCUINT fResume, PCPUMCTX pCtx, PVMXVMCSBATCHCACHE pVmcsCache, PVM pVM, PVMCPU pVCpu);
    49 DECLASM(int)            VMXR0StartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMXVMCSBATCHCACHE pVmcsCache, PVM pVM, PVMCPU pVCpu);
     48DECLASM(int)            VMXR0StartVM32(RTHCUINT fResume, PCPUMCTX pCtx, PVMXVMCSCACHE pVmcsCache, PVM pVM, PVMCPU pVCpu);
     49DECLASM(int)            VMXR0StartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMXVMCSCACHE pVmcsCache, PVM pVM, PVMCPU pVCpu);
    5050
    5151# if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
    52 DECLASM(int)            VMXR0SwitcherStartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMXVMCSBATCHCACHE pVmcsCache, PVM pVM, PVMCPU pVCpu);
     52DECLASM(int)            VMXR0SwitcherStartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMXVMCSCACHE pVmcsCache, PVM pVM, PVMCPU pVCpu);
    5353VMMR0DECL(int)          VMXR0Execute64BitsHandler(PVMCPU pVCpu, HM64ON32OP enmOp, uint32_t cbParam, uint32_t *paParam);
    5454# endif
     
    6161{
    6262    Assert(idxCache <= VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX);
    63     *pVal = pVCpu->hm.s.vmx.VmcsBatchCache.Read.aFieldVal[idxCache];
     63    *pVal = pVCpu->hm.s.vmx.VmcsCache.Read.aFieldVal[idxCache];
    6464    return VINF_SUCCESS;
    6565}
  • trunk/src/VBox/VMM/VMMR3/CPUM.cpp

    r76993 r78220  
    326326};
    327327
     328/** Saved state field descriptors for VMX nested hardware-virtualization
     329 *  VMCS. */
     330static const SSMFIELD g_aVmxHwvirtVmcs[] =
     331{
     332    SSMFIELD_ENTRY(       VMXVVMCS, u32VmcsRevId),
     333    SSMFIELD_ENTRY(       VMXVVMCS, enmVmxAbort),
     334    SSMFIELD_ENTRY(       VMXVVMCS, fVmcsState),
     335    SSMFIELD_ENTRY_IGNORE(VMXVVMCS, au8Padding0),
     336    SSMFIELD_ENTRY_IGNORE(VMXVVMCS, au32Reserved0),
     337
     338    SSMFIELD_ENTRY(       VMXVVMCS, u16Vpid),
     339    SSMFIELD_ENTRY(       VMXVVMCS, u16PostIntNotifyVector),
     340    SSMFIELD_ENTRY(       VMXVVMCS, u16EptpIndex),
     341    SSMFIELD_ENTRY_IGNORE(VMXVVMCS, au16Reserved0),
     342
     343    SSMFIELD_ENTRY(       VMXVVMCS, GuestEs),
     344    SSMFIELD_ENTRY(       VMXVVMCS, GuestCs),
     345    SSMFIELD_ENTRY(       VMXVVMCS, GuestSs),
     346    SSMFIELD_ENTRY(       VMXVVMCS, GuestDs),
     347    SSMFIELD_ENTRY(       VMXVVMCS, GuestFs),
     348    SSMFIELD_ENTRY(       VMXVVMCS, GuestGs),
     349    SSMFIELD_ENTRY(       VMXVVMCS, GuestLdtr),
     350    SSMFIELD_ENTRY(       VMXVVMCS, GuestTr),
     351    SSMFIELD_ENTRY(       VMXVVMCS, u16GuestIntStatus),
     352    SSMFIELD_ENTRY(       VMXVVMCS, u16PmlIndex),
     353    SSMFIELD_ENTRY_IGNORE(VMXVVMCS, au16Reserved1[8]),
     354
     355    SSMFIELD_ENTRY(       VMXVVMCS, HostEs),
     356    SSMFIELD_ENTRY(       VMXVVMCS, HostCs),
     357    SSMFIELD_ENTRY(       VMXVVMCS, HostSs),
     358    SSMFIELD_ENTRY(       VMXVVMCS, HostDs),
     359    SSMFIELD_ENTRY(       VMXVVMCS, HostFs),
     360    SSMFIELD_ENTRY(       VMXVVMCS, HostGs),
     361    SSMFIELD_ENTRY(       VMXVVMCS, HostTr),
     362    SSMFIELD_ENTRY_IGNORE(VMXVVMCS, au16Reserved2),
     363
     364    SSMFIELD_ENTRY(       VMXVVMCS, u32PinCtls),
     365    SSMFIELD_ENTRY(       VMXVVMCS, u32ProcCtls),
     366    SSMFIELD_ENTRY(       VMXVVMCS, u32XcptBitmap),
     367    SSMFIELD_ENTRY(       VMXVVMCS, u32XcptPFMask),
     368    SSMFIELD_ENTRY(       VMXVVMCS, u32XcptPFMatch),
     369    SSMFIELD_ENTRY(       VMXVVMCS, u32Cr3TargetCount),
     370    SSMFIELD_ENTRY(       VMXVVMCS, u32ExitCtls),
     371    SSMFIELD_ENTRY(       VMXVVMCS, u32ExitMsrStoreCount),
     372    SSMFIELD_ENTRY(       VMXVVMCS, u32ExitMsrLoadCount),
     373    SSMFIELD_ENTRY(       VMXVVMCS, u32EntryCtls),
     374    SSMFIELD_ENTRY(       VMXVVMCS, u32EntryMsrLoadCount),
     375    SSMFIELD_ENTRY(       VMXVVMCS, u32EntryIntInfo),
     376    SSMFIELD_ENTRY(       VMXVVMCS, u32EntryXcptErrCode),
     377    SSMFIELD_ENTRY(       VMXVVMCS, u32EntryInstrLen),
     378    SSMFIELD_ENTRY(       VMXVVMCS, u32TprThreshold),
     379    SSMFIELD_ENTRY(       VMXVVMCS, u32ProcCtls2),
     380    SSMFIELD_ENTRY(       VMXVVMCS, u32PleGap),
     381    SSMFIELD_ENTRY(       VMXVVMCS, u32PleWindow),
     382    SSMFIELD_ENTRY_IGNORE(VMXVVMCS, au32Reserved1),
     383
     384    SSMFIELD_ENTRY(       VMXVVMCS, u32RoVmInstrError),
     385    SSMFIELD_ENTRY(       VMXVVMCS, u32RoExitReason),
     386    SSMFIELD_ENTRY(       VMXVVMCS, u32RoExitIntInfo),
     387    SSMFIELD_ENTRY(       VMXVVMCS, u32RoExitIntErrCode),
     388    SSMFIELD_ENTRY(       VMXVVMCS, u32RoIdtVectoringInfo),
     389    SSMFIELD_ENTRY(       VMXVVMCS, u32RoIdtVectoringErrCode),
     390    SSMFIELD_ENTRY(       VMXVVMCS, u32RoExitInstrLen),
     391    SSMFIELD_ENTRY(       VMXVVMCS, u32RoExitInstrInfo),
     392    SSMFIELD_ENTRY_IGNORE(VMXVVMCS, au32RoReserved2),
     393
     394    SSMFIELD_ENTRY(       VMXVVMCS, u32GuestEsLimit),
     395    SSMFIELD_ENTRY(       VMXVVMCS, u32GuestCsLimit),
     396    SSMFIELD_ENTRY(       VMXVVMCS, u32GuestSsLimit),
     397    SSMFIELD_ENTRY(       VMXVVMCS, u32GuestDsLimit),
     398    SSMFIELD_ENTRY(       VMXVVMCS, u32GuestFsLimit),
     399    SSMFIELD_ENTRY(       VMXVVMCS, u32GuestGsLimit),
     400    SSMFIELD_ENTRY(       VMXVVMCS, u32GuestLdtrLimit),
     401    SSMFIELD_ENTRY(       VMXVVMCS, u32GuestTrLimit),
     402    SSMFIELD_ENTRY(       VMXVVMCS, u32GuestGdtrLimit),
     403    SSMFIELD_ENTRY(       VMXVVMCS, u32GuestIdtrLimit),
     404    SSMFIELD_ENTRY(       VMXVVMCS, u32GuestEsAttr),
     405    SSMFIELD_ENTRY(       VMXVVMCS, u32GuestCsAttr),
     406    SSMFIELD_ENTRY(       VMXVVMCS, u32GuestSsAttr),
     407    SSMFIELD_ENTRY(       VMXVVMCS, u32GuestDsAttr),
     408    SSMFIELD_ENTRY(       VMXVVMCS, u32GuestFsAttr),
     409    SSMFIELD_ENTRY(       VMXVVMCS, u32GuestGsAttr),
     410    SSMFIELD_ENTRY(       VMXVVMCS, u32GuestLdtrAttr),
     411    SSMFIELD_ENTRY(       VMXVVMCS, u32GuestTrAttr),
     412    SSMFIELD_ENTRY(       VMXVVMCS, u32GuestIntrState),
     413    SSMFIELD_ENTRY(       VMXVVMCS, u32GuestActivityState),
     414    SSMFIELD_ENTRY(       VMXVVMCS, u32GuestSmBase),
     415    SSMFIELD_ENTRY(       VMXVVMCS, u32GuestSysenterCS),
     416    SSMFIELD_ENTRY(       VMXVVMCS, u32PreemptTimer),
     417    SSMFIELD_ENTRY_IGNORE(VMXVVMCS, au32Reserved3),
     418
     419    SSMFIELD_ENTRY(       VMXVVMCS, u32HostSysenterCs),
     420    SSMFIELD_ENTRY_IGNORE(VMXVVMCS, au32Reserved4),
     421
     422    SSMFIELD_ENTRY(       VMXVVMCS, u64AddrIoBitmapA),
     423    SSMFIELD_ENTRY(       VMXVVMCS, u64AddrIoBitmapB),
     424    SSMFIELD_ENTRY(       VMXVVMCS, u64AddrMsrBitmap),
     425    SSMFIELD_ENTRY(       VMXVVMCS, u64AddrExitMsrStore),
     426    SSMFIELD_ENTRY(       VMXVVMCS, u64AddrExitMsrLoad),
     427    SSMFIELD_ENTRY(       VMXVVMCS, u64AddrEntryMsrLoad),
     428    SSMFIELD_ENTRY(       VMXVVMCS, u64ExecVmcsPtr),
     429    SSMFIELD_ENTRY(       VMXVVMCS, u64AddrPml),
     430    SSMFIELD_ENTRY(       VMXVVMCS, u64TscOffset),
     431    SSMFIELD_ENTRY(       VMXVVMCS, u64AddrVirtApic),
     432    SSMFIELD_ENTRY(       VMXVVMCS, u64AddrApicAccess),
     433    SSMFIELD_ENTRY(       VMXVVMCS, u64AddrPostedIntDesc),
     434    SSMFIELD_ENTRY(       VMXVVMCS, u64VmFuncCtls),
     435    SSMFIELD_ENTRY(       VMXVVMCS, u64EptpPtr),
     436    SSMFIELD_ENTRY(       VMXVVMCS, u64EoiExitBitmap0),
     437    SSMFIELD_ENTRY(       VMXVVMCS, u64EoiExitBitmap1),
     438    SSMFIELD_ENTRY(       VMXVVMCS, u64EoiExitBitmap2),
     439    SSMFIELD_ENTRY(       VMXVVMCS, u64EoiExitBitmap3),
     440    SSMFIELD_ENTRY(       VMXVVMCS, u64AddrEptpList),
     441    SSMFIELD_ENTRY(       VMXVVMCS, u64AddrVmreadBitmap),
     442    SSMFIELD_ENTRY(       VMXVVMCS, u64AddrVmwriteBitmap),
     443    SSMFIELD_ENTRY(       VMXVVMCS, u64AddrXcptVeInfo),
     444    SSMFIELD_ENTRY(       VMXVVMCS, u64XssBitmap),
     445    SSMFIELD_ENTRY(       VMXVVMCS, u64AddrEnclsBitmap),
     446    SSMFIELD_ENTRY(       VMXVVMCS, u64TscMultiplier),
     447    SSMFIELD_ENTRY_IGNORE(VMXVVMCS, au64Reserved0),
     448
     449    SSMFIELD_ENTRY(       VMXVVMCS, u64RoGuestPhysAddr),
     450    SSMFIELD_ENTRY_IGNORE(VMXVVMCS, au64Reserved1),
     451
     452    SSMFIELD_ENTRY(       VMXVVMCS, u64VmcsLinkPtr),
     453    SSMFIELD_ENTRY(       VMXVVMCS, u64GuestDebugCtlMsr),
     454    SSMFIELD_ENTRY(       VMXVVMCS, u64GuestPatMsr),
     455    SSMFIELD_ENTRY(       VMXVVMCS, u64GuestEferMsr),
     456    SSMFIELD_ENTRY(       VMXVVMCS, u64GuestPerfGlobalCtlMsr),
     457    SSMFIELD_ENTRY(       VMXVVMCS, u64GuestPdpte0),
     458    SSMFIELD_ENTRY(       VMXVVMCS, u64GuestPdpte1),
     459    SSMFIELD_ENTRY(       VMXVVMCS, u64GuestPdpte2),
     460    SSMFIELD_ENTRY(       VMXVVMCS, u64GuestPdpte3),
     461    SSMFIELD_ENTRY(       VMXVVMCS, u64GuestBndcfgsMsr),
     462    SSMFIELD_ENTRY_IGNORE(VMXVVMCS, au64Reserved2),
     463
     464    SSMFIELD_ENTRY(       VMXVVMCS, u64HostPatMsr),
     465    SSMFIELD_ENTRY(       VMXVVMCS, u64HostEferMsr),
     466    SSMFIELD_ENTRY(       VMXVVMCS, u64HostPerfGlobalCtlMsr),
     467    SSMFIELD_ENTRY_IGNORE(VMXVVMCS, au64Reserved3),
     468
     469    SSMFIELD_ENTRY(       VMXVVMCS, u64Cr0Mask),
     470    SSMFIELD_ENTRY(       VMXVVMCS, u64Cr4Mask),
     471    SSMFIELD_ENTRY(       VMXVVMCS, u64Cr0ReadShadow),
     472    SSMFIELD_ENTRY(       VMXVVMCS, u64Cr4ReadShadow),
     473    SSMFIELD_ENTRY(       VMXVVMCS, u64Cr3Target0),
     474    SSMFIELD_ENTRY(       VMXVVMCS, u64Cr3Target1),
     475    SSMFIELD_ENTRY(       VMXVVMCS, u64Cr3Target2),
     476    SSMFIELD_ENTRY(       VMXVVMCS, u64Cr3Target3),
     477    SSMFIELD_ENTRY(       VMXVVMCS, au64Reserved4),
     478
     479    SSMFIELD_ENTRY(       VMXVVMCS, u64RoExitQual),
     480    SSMFIELD_ENTRY(       VMXVVMCS, u64RoIoRcx),
     481    SSMFIELD_ENTRY(       VMXVVMCS, u64RoIoRsi),
     482    SSMFIELD_ENTRY(       VMXVVMCS, u64RoIoRdi),
     483    SSMFIELD_ENTRY(       VMXVVMCS, u64RoIoRip),
     484    SSMFIELD_ENTRY(       VMXVVMCS, u64RoGuestLinearAddr),
     485    SSMFIELD_ENTRY(       VMXVVMCS, au64Reserved5),
     486
     487    SSMFIELD_ENTRY(       VMXVVMCS, u64GuestCr0),
     488    SSMFIELD_ENTRY(       VMXVVMCS, u64GuestCr3),
     489    SSMFIELD_ENTRY(       VMXVVMCS, u64GuestCr4),
     490    SSMFIELD_ENTRY(       VMXVVMCS, u64GuestEsBase),
     491    SSMFIELD_ENTRY(       VMXVVMCS, u64GuestCsBase),
     492    SSMFIELD_ENTRY(       VMXVVMCS, u64GuestSsBase),
     493    SSMFIELD_ENTRY(       VMXVVMCS, u64GuestDsBase),
     494    SSMFIELD_ENTRY(       VMXVVMCS, u64GuestFsBase),
     495    SSMFIELD_ENTRY(       VMXVVMCS, u64GuestGsBase),
     496    SSMFIELD_ENTRY(       VMXVVMCS, u64GuestLdtrBase),
     497    SSMFIELD_ENTRY(       VMXVVMCS, u64GuestTrBase),
     498    SSMFIELD_ENTRY(       VMXVVMCS, u64GuestGdtrBase),
     499    SSMFIELD_ENTRY(       VMXVVMCS, u64GuestIdtrBase),
     500    SSMFIELD_ENTRY(       VMXVVMCS, u64GuestDr7),
     501    SSMFIELD_ENTRY(       VMXVVMCS, u64GuestRsp),
     502    SSMFIELD_ENTRY(       VMXVVMCS, u64GuestRip),
     503    SSMFIELD_ENTRY(       VMXVVMCS, u64GuestRFlags),
     504    SSMFIELD_ENTRY(       VMXVVMCS, u64GuestPendingDbgXcpt),
     505    SSMFIELD_ENTRY(       VMXVVMCS, u64GuestSysenterEsp),
     506    SSMFIELD_ENTRY(       VMXVVMCS, u64GuestSysenterEip),
     507    SSMFIELD_ENTRY(       VMXVVMCS, au64Reserved6),
     508
     509    SSMFIELD_ENTRY(       VMXVVMCS, u64HostCr0),
     510    SSMFIELD_ENTRY(       VMXVVMCS, u64HostCr3),
     511    SSMFIELD_ENTRY(       VMXVVMCS, u64HostCr4),
     512    SSMFIELD_ENTRY(       VMXVVMCS, u64HostFsBase),
     513    SSMFIELD_ENTRY(       VMXVVMCS, u64HostGsBase),
     514    SSMFIELD_ENTRY(       VMXVVMCS, u64HostTrBase),
     515    SSMFIELD_ENTRY(       VMXVVMCS, u64HostGdtrBase),
     516    SSMFIELD_ENTRY(       VMXVVMCS, u64HostIdtrBase),
     517    SSMFIELD_ENTRY(       VMXVVMCS, u64HostSysenterEsp),
     518    SSMFIELD_ENTRY(       VMXVVMCS, u64HostSysenterEip),
     519    SSMFIELD_ENTRY(       VMXVVMCS, u64HostRsp),
     520    SSMFIELD_ENTRY(       VMXVVMCS, u64HostRip),
     521    SSMFIELD_ENTRY(       VMXVVMCS, au64Reserved7),
     522    SSMFIELD_ENTRY_TERM()
     523};
     524
    328525/** Saved state field descriptors for CPUMCTX. */
    329526static const SSMFIELD g_aCpumX87Fields[] =
     
    9281125    for (VMCPUID i = 0; i < pVM->cCpus; i++)
    9291126    {
    930         PVMCPU pVCpu = &pVM->aCpus[i];
    931         if (pVCpu->cpum.s.Guest.hwvirt.vmx.pVmcsR3)
    932         {
    933             SUPR3PageFreeEx(pVCpu->cpum.s.Guest.hwvirt.vmx.pVmcsR3, VMX_V_VMCS_PAGES);
    934             pVCpu->cpum.s.Guest.hwvirt.vmx.pVmcsR3 = NULL;
    935         }
    936         if (pVCpu->cpum.s.Guest.hwvirt.vmx.pShadowVmcsR3)
    937         {
    938             SUPR3PageFreeEx(pVCpu->cpum.s.Guest.hwvirt.vmx.pShadowVmcsR3, VMX_V_VMCS_PAGES);
    939             pVCpu->cpum.s.Guest.hwvirt.vmx.pShadowVmcsR3 = NULL;
    940         }
    941         if (pVCpu->cpum.s.Guest.hwvirt.vmx.pvVirtApicPageR3)
    942         {
    943             SUPR3PageFreeEx(pVCpu->cpum.s.Guest.hwvirt.vmx.pvVirtApicPageR3, VMX_V_VIRT_APIC_PAGES);
    944             pVCpu->cpum.s.Guest.hwvirt.vmx.pvVirtApicPageR3 = NULL;
    945         }
    946         if (pVCpu->cpum.s.Guest.hwvirt.vmx.pvVmreadBitmapR3)
    947         {
    948             SUPR3PageFreeEx(pVCpu->cpum.s.Guest.hwvirt.vmx.pvVmreadBitmapR3, VMX_V_VMREAD_VMWRITE_BITMAP_PAGES);
    949             pVCpu->cpum.s.Guest.hwvirt.vmx.pvVmreadBitmapR3 = NULL;
    950         }
    951         if (pVCpu->cpum.s.Guest.hwvirt.vmx.pvVmwriteBitmapR3)
    952         {
    953             SUPR3PageFreeEx(pVCpu->cpum.s.Guest.hwvirt.vmx.pvVmwriteBitmapR3, VMX_V_VMREAD_VMWRITE_BITMAP_PAGES);
    954             pVCpu->cpum.s.Guest.hwvirt.vmx.pvVmwriteBitmapR3 = NULL;
    955         }
    956         if (pVCpu->cpum.s.Guest.hwvirt.vmx.pAutoMsrAreaR3)
    957         {
    958             SUPR3PageFreeEx(pVCpu->cpum.s.Guest.hwvirt.vmx.pAutoMsrAreaR3, VMX_V_AUTOMSR_AREA_PAGES);
    959             pVCpu->cpum.s.Guest.hwvirt.vmx.pAutoMsrAreaR3 = NULL;
    960         }
    961         if (pVCpu->cpum.s.Guest.hwvirt.vmx.pvMsrBitmapR3)
    962         {
    963             SUPR3PageFreeEx(pVCpu->cpum.s.Guest.hwvirt.vmx.pvMsrBitmapR3, VMX_V_MSR_BITMAP_PAGES);
    964             pVCpu->cpum.s.Guest.hwvirt.vmx.pvMsrBitmapR3 = NULL;
    965         }
    966         if (pVCpu->cpum.s.Guest.hwvirt.vmx.pvIoBitmapR3)
    967         {
    968             SUPR3PageFreeEx(pVCpu->cpum.s.Guest.hwvirt.vmx.pvIoBitmapR3, VMX_V_IO_BITMAP_A_PAGES + VMX_V_IO_BITMAP_B_PAGES);
    969             pVCpu->cpum.s.Guest.hwvirt.vmx.pvIoBitmapR3 = NULL;
     1127        PVMCPU   pVCpu = &pVM->aCpus[i];
     1128        PCPUMCTX pCtx  = &pVCpu->cpum.s.Guest;
     1129
     1130        if (pCtx->hwvirt.vmx.pVmcsR3)
     1131        {
     1132            SUPR3ContFree(pCtx->hwvirt.vmx.pVmcsR3, VMX_V_VMCS_PAGES);
     1133            pCtx->hwvirt.vmx.pVmcsR3 = NULL;
     1134        }
     1135        if (pCtx->hwvirt.vmx.pShadowVmcsR3)
     1136        {
     1137            SUPR3ContFree(pCtx->hwvirt.vmx.pShadowVmcsR3, VMX_V_VMCS_PAGES);
     1138            pCtx->hwvirt.vmx.pShadowVmcsR3 = NULL;
     1139        }
     1140        if (pCtx->hwvirt.vmx.pvVmreadBitmapR3)
     1141        {
     1142            SUPR3ContFree(pCtx->hwvirt.vmx.pvVmreadBitmapR3, VMX_V_VMREAD_VMWRITE_BITMAP_PAGES);
     1143            pCtx->hwvirt.vmx.pvVmreadBitmapR3 = NULL;
     1144        }
     1145        if (pCtx->hwvirt.vmx.pvVmwriteBitmapR3)
     1146        {
     1147            SUPR3ContFree(pCtx->hwvirt.vmx.pvVmwriteBitmapR3, VMX_V_VMREAD_VMWRITE_BITMAP_PAGES);
     1148            pCtx->hwvirt.vmx.pvVmwriteBitmapR3 = NULL;
     1149        }
     1150        if (pCtx->hwvirt.vmx.pEntryMsrLoadAreaR3)
     1151        {
     1152            SUPR3ContFree(pCtx->hwvirt.vmx.pEntryMsrLoadAreaR3, VMX_V_AUTOMSR_AREA_PAGES);
     1153            pCtx->hwvirt.vmx.pEntryMsrLoadAreaR3 = NULL;
     1154        }
     1155        if (pCtx->hwvirt.vmx.pExitMsrStoreAreaR3)
     1156        {
     1157            SUPR3ContFree(pCtx->hwvirt.vmx.pExitMsrStoreAreaR3, VMX_V_AUTOMSR_AREA_PAGES);
     1158            pCtx->hwvirt.vmx.pExitMsrStoreAreaR3 = NULL;
     1159        }
     1160        if (pCtx->hwvirt.vmx.pExitMsrLoadAreaR3)
     1161        {
     1162            SUPR3ContFree(pCtx->hwvirt.vmx.pExitMsrLoadAreaR3, VMX_V_AUTOMSR_AREA_PAGES);
     1163            pCtx->hwvirt.vmx.pExitMsrLoadAreaR3 = NULL;
     1164        }
     1165        if (pCtx->hwvirt.vmx.pvMsrBitmapR3)
     1166        {
     1167            SUPR3ContFree(pCtx->hwvirt.vmx.pvMsrBitmapR3, VMX_V_MSR_BITMAP_PAGES);
     1168            pCtx->hwvirt.vmx.pvMsrBitmapR3 = NULL;
     1169        }
     1170        if (pCtx->hwvirt.vmx.pvIoBitmapR3)
     1171        {
     1172            SUPR3ContFree(pCtx->hwvirt.vmx.pvIoBitmapR3, VMX_V_IO_BITMAP_A_PAGES + VMX_V_IO_BITMAP_B_PAGES);
     1173            pCtx->hwvirt.vmx.pvIoBitmapR3 = NULL;
    9701174        }
    9711175    }
     
    9821186{
    9831187    int rc = VINF_SUCCESS;
    984     LogRel(("CPUM: Allocating %u pages for the nested-guest VMCS and related structures\n",
    985             pVM->cCpus * (  VMX_V_VMCS_PAGES + VMX_V_VIRT_APIC_PAGES + VMX_V_VMREAD_VMWRITE_BITMAP_PAGES * 2
    986                           + VMX_V_AUTOMSR_AREA_PAGES)));
     1188    uint32_t const cPages = (2 * VMX_V_VMCS_PAGES)
     1189                          + VMX_V_VIRT_APIC_PAGES
     1190                          + (2 * VMX_V_VMREAD_VMWRITE_BITMAP_SIZE)
     1191                          + (3 * VMX_V_AUTOMSR_AREA_SIZE)
     1192                          + VMX_V_MSR_BITMAP_SIZE
     1193                          + (VMX_V_IO_BITMAP_A_SIZE + VMX_V_IO_BITMAP_B_SIZE);
     1194    LogRel(("CPUM: Allocating %u pages for the nested-guest VMCS and related structures\n", pVM->cCpus * cPages));
    9871195    for (VMCPUID i = 0; i < pVM->cCpus; i++)
    9881196    {
    989         PVMCPU pVCpu = &pVM->aCpus[i];
    990         pVCpu->cpum.s.Guest.hwvirt.enmHwvirt = CPUMHWVIRT_VMX;
     1197        PVMCPU   pVCpu = &pVM->aCpus[i];
     1198        PCPUMCTX pCtx  = &pVCpu->cpum.s.Guest;
     1199        pCtx->hwvirt.enmHwvirt = CPUMHWVIRT_VMX;
    9911200
    9921201        /*
     
    9941203         */
    9951204        Assert(VMX_V_VMCS_PAGES == 1);
    996         Assert(!pVCpu->cpum.s.Guest.hwvirt.vmx.pVmcsR3);
    997         rc = SUPR3PageAllocEx(VMX_V_VMCS_PAGES, 0 /* fFlags */, (void **)&pVCpu->cpum.s.Guest.hwvirt.vmx.pVmcsR3,
    998                               &pVCpu->cpum.s.Guest.hwvirt.vmx.pVmcsR0, NULL /* paPages */);
    999         if (RT_FAILURE(rc))
    1000         {
    1001             Assert(!pVCpu->cpum.s.Guest.hwvirt.vmx.pVmcsR3);
     1205        pCtx->hwvirt.vmx.pVmcsR3 = (PVMXVVMCS)SUPR3ContAlloc(VMX_V_VMCS_PAGES,
     1206                                                             &pCtx->hwvirt.vmx.pVmcsR0,
     1207                                                             &pCtx->hwvirt.vmx.HCPhysVmcs);
     1208        if (pCtx->hwvirt.vmx.pVmcsR3)
     1209        { /* likely */ }
     1210        else
     1211        {
    10021212            LogRel(("CPUM%u: Failed to alloc %u pages for the nested-guest's VMCS\n", pVCpu->idCpu, VMX_V_VMCS_PAGES));
    10031213            break;
     
    10081218         */
    10091219        Assert(VMX_V_VMCS_PAGES == 1);
    1010         Assert(!pVCpu->cpum.s.Guest.hwvirt.vmx.pShadowVmcsR3);
    1011         rc = SUPR3PageAllocEx(VMX_V_VMCS_PAGES, 0 /* fFlags */, (void **)&pVCpu->cpum.s.Guest.hwvirt.vmx.pShadowVmcsR3,
    1012                               &pVCpu->cpum.s.Guest.hwvirt.vmx.pShadowVmcsR0, NULL /* paPages */);
    1013         if (RT_FAILURE(rc))
    1014         {
    1015             Assert(!pVCpu->cpum.s.Guest.hwvirt.vmx.pShadowVmcsR3);
     1220        pCtx->hwvirt.vmx.pShadowVmcsR3 = (PVMXVVMCS)SUPR3ContAlloc(VMX_V_VMCS_PAGES,
     1221                                                                   &pCtx->hwvirt.vmx.pShadowVmcsR0,
     1222                                                                   &pCtx->hwvirt.vmx.HCPhysShadowVmcs);
     1223        if (pCtx->hwvirt.vmx.pShadowVmcsR3)
     1224        { /* likely */ }
     1225        else
     1226        {
    10161227            LogRel(("CPUM%u: Failed to alloc %u pages for the nested-guest's shadow VMCS\n", pVCpu->idCpu, VMX_V_VMCS_PAGES));
    1017             break;
    1018         }
    1019 
    1020         /*
    1021          * Allocate the Virtual-APIC page.
    1022          */
    1023         Assert(!pVCpu->cpum.s.Guest.hwvirt.vmx.pvVirtApicPageR3);
    1024         rc = SUPR3PageAllocEx(VMX_V_VIRT_APIC_PAGES, 0 /* fFlags */, &pVCpu->cpum.s.Guest.hwvirt.vmx.pvVirtApicPageR3,
    1025                               &pVCpu->cpum.s.Guest.hwvirt.vmx.pvVirtApicPageR0, NULL /* paPages */);
    1026         if (RT_FAILURE(rc))
    1027         {
    1028             Assert(!pVCpu->cpum.s.Guest.hwvirt.vmx.pvVirtApicPageR3);
    1029             LogRel(("CPUM%u: Failed to alloc %u pages for the nested-guest's Virtual-APIC page\n", pVCpu->idCpu,
    1030                     VMX_V_VIRT_APIC_PAGES));
    10311228            break;
    10321229        }
     
    10351232         * Allocate the VMREAD-bitmap.
    10361233         */
    1037         Assert(!pVCpu->cpum.s.Guest.hwvirt.vmx.pvVmreadBitmapR3);
    1038         rc = SUPR3PageAllocEx(VMX_V_VMREAD_VMWRITE_BITMAP_PAGES, 0 /* fFlags */, &pVCpu->cpum.s.Guest.hwvirt.vmx.pvVmreadBitmapR3,
    1039                               &pVCpu->cpum.s.Guest.hwvirt.vmx.pvVmreadBitmapR0, NULL /* paPages */);
    1040         if (RT_FAILURE(rc))
    1041         {
    1042             Assert(!pVCpu->cpum.s.Guest.hwvirt.vmx.pvVmreadBitmapR3);
     1234        pCtx->hwvirt.vmx.pvVmreadBitmapR3 = SUPR3ContAlloc(VMX_V_VMREAD_VMWRITE_BITMAP_PAGES,
     1235                                                           &pCtx->hwvirt.vmx.pvVmreadBitmapR0,
     1236                                                           &pCtx->hwvirt.vmx.HCPhysVmreadBitmap);
     1237        if (pCtx->hwvirt.vmx.pvVmreadBitmapR3)
     1238        { /* likely */ }
     1239        else
     1240        {
    10431241            LogRel(("CPUM%u: Failed to alloc %u pages for the nested-guest's VMREAD-bitmap\n", pVCpu->idCpu,
    10441242                    VMX_V_VMREAD_VMWRITE_BITMAP_PAGES));
     
    10491247         * Allocatge the VMWRITE-bitmap.
    10501248         */
    1051         Assert(!pVCpu->cpum.s.Guest.hwvirt.vmx.pvVmwriteBitmapR3);
    1052         rc = SUPR3PageAllocEx(VMX_V_VMREAD_VMWRITE_BITMAP_PAGES, 0 /* fFlags */,
    1053                               &pVCpu->cpum.s.Guest.hwvirt.vmx.pvVmwriteBitmapR3,
    1054                               &pVCpu->cpum.s.Guest.hwvirt.vmx.pvVmwriteBitmapR0, NULL /* paPages */);
    1055         if (RT_FAILURE(rc))
    1056         {
    1057             Assert(!pVCpu->cpum.s.Guest.hwvirt.vmx.pvVmwriteBitmapR3);
     1249        pCtx->hwvirt.vmx.pvVmwriteBitmapR3 = SUPR3ContAlloc(VMX_V_VMREAD_VMWRITE_BITMAP_PAGES,
     1250                                                            &pCtx->hwvirt.vmx.pvVmwriteBitmapR0,
     1251                                                            &pCtx->hwvirt.vmx.HCPhysVmwriteBitmap);
     1252        if (pCtx->hwvirt.vmx.pvVmwriteBitmapR3)
     1253        { /* likely */ }
     1254        else
     1255        {
    10581256            LogRel(("CPUM%u: Failed to alloc %u pages for the nested-guest's VMWRITE-bitmap\n", pVCpu->idCpu,
    10591257                    VMX_V_VMREAD_VMWRITE_BITMAP_PAGES));
     
    10621260
    10631261        /*
    1064          * Allocate the MSR auto-load/store area.
     1262         * Allocate the VM-entry MSR-load area.
    10651263         */
    1066         Assert(!pVCpu->cpum.s.Guest.hwvirt.vmx.pAutoMsrAreaR3);
    1067         rc = SUPR3PageAllocEx(VMX_V_AUTOMSR_AREA_PAGES, 0 /* fFlags */, (void **)&pVCpu->cpum.s.Guest.hwvirt.vmx.pAutoMsrAreaR3,
    1068                               &pVCpu->cpum.s.Guest.hwvirt.vmx.pAutoMsrAreaR0, NULL /* paPages */);
    1069         if (RT_FAILURE(rc))
    1070         {
    1071             Assert(!pVCpu->cpum.s.Guest.hwvirt.vmx.pAutoMsrAreaR3);
    1072             LogRel(("CPUM%u: Failed to alloc %u pages for the nested-guest's auto-load/store MSR area\n", pVCpu->idCpu,
     1264        pCtx->hwvirt.vmx.pEntryMsrLoadAreaR3 = (PVMXAUTOMSR)SUPR3ContAlloc(VMX_V_AUTOMSR_AREA_PAGES,
     1265                                                                           &pCtx->hwvirt.vmx.pEntryMsrLoadAreaR0,
     1266                                                                           &pCtx->hwvirt.vmx.HCPhysEntryMsrLoadArea);
     1267        if (pCtx->hwvirt.vmx.pEntryMsrLoadAreaR3)
     1268        { /* likely */ }
     1269        else
     1270        {
     1271            LogRel(("CPUM%u: Failed to alloc %u pages for the nested-guest's VM-entry MSR-load area\n", pVCpu->idCpu,
     1272                    VMX_V_AUTOMSR_AREA_PAGES));
     1273            break;
     1274        }
     1275
     1276        /*
     1277         * Allocate the VM-exit MSR-store area.
     1278         */
     1279        pCtx->hwvirt.vmx.pExitMsrStoreAreaR3 = (PVMXAUTOMSR)SUPR3ContAlloc(VMX_V_AUTOMSR_AREA_PAGES,
     1280                                                                           &pCtx->hwvirt.vmx.pExitMsrStoreAreaR0,
     1281                                                                           &pCtx->hwvirt.vmx.HCPhysExitMsrStoreArea);
     1282        if (pCtx->hwvirt.vmx.pExitMsrStoreAreaR3)
     1283        { /* likely */ }
     1284        else
     1285        {
     1286            LogRel(("CPUM%u: Failed to alloc %u pages for the nested-guest's VM-exit MSR-store area\n", pVCpu->idCpu,
     1287                    VMX_V_AUTOMSR_AREA_PAGES));
     1288            break;
     1289        }
     1290
     1291        /*
     1292         * Allocate the VM-exit MSR-load area.
     1293         */
     1294        pCtx->hwvirt.vmx.pExitMsrLoadAreaR3 = (PVMXAUTOMSR)SUPR3ContAlloc(VMX_V_AUTOMSR_AREA_PAGES,
     1295                                                                          &pCtx->hwvirt.vmx.pExitMsrLoadAreaR0,
     1296                                                                          &pCtx->hwvirt.vmx.HCPhysExitMsrLoadArea);
     1297        if (pCtx->hwvirt.vmx.pExitMsrLoadAreaR3)
     1298        { /* likely */ }
     1299        else
     1300        {
     1301            LogRel(("CPUM%u: Failed to alloc %u pages for the nested-guest's VM-exit MSR-load area\n", pVCpu->idCpu,
    10731302                    VMX_V_AUTOMSR_AREA_PAGES));
    10741303            break;
     
    10781307         * Allocate the MSR bitmap.
    10791308         */
    1080         Assert(!pVCpu->cpum.s.Guest.hwvirt.vmx.pvMsrBitmapR3);
    1081         rc = SUPR3PageAllocEx(VMX_V_MSR_BITMAP_PAGES, 0 /* fFlags */, (void **)&pVCpu->cpum.s.Guest.hwvirt.vmx.pvMsrBitmapR3,
    1082                               &pVCpu->cpum.s.Guest.hwvirt.vmx.pvMsrBitmapR0, NULL /* paPages */);
    1083         if (RT_FAILURE(rc))
    1084         {
    1085             Assert(!pVCpu->cpum.s.Guest.hwvirt.vmx.pvMsrBitmapR3);
     1309        pCtx->hwvirt.vmx.pvMsrBitmapR3 = SUPR3ContAlloc(VMX_V_MSR_BITMAP_PAGES,
     1310                                                        &pCtx->hwvirt.vmx.pvMsrBitmapR0,
     1311                                                        &pCtx->hwvirt.vmx.HCPhysMsrBitmap);
     1312        if (pCtx->hwvirt.vmx.pvMsrBitmapR3)
     1313        { /* likely */ }
     1314        else
     1315        {
    10861316            LogRel(("CPUM%u: Failed to alloc %u pages for the nested-guest's MSR bitmap\n", pVCpu->idCpu,
    10871317                    VMX_V_MSR_BITMAP_PAGES));
     
    10921322         * Allocate the I/O bitmaps (A and B).
    10931323         */
    1094         Assert(!pVCpu->cpum.s.Guest.hwvirt.vmx.pvIoBitmapR3);
    1095         rc = SUPR3PageAllocEx(VMX_V_IO_BITMAP_A_PAGES + VMX_V_IO_BITMAP_B_PAGES, 0 /* fFlags */,
    1096                               (void **)&pVCpu->cpum.s.Guest.hwvirt.vmx.pvIoBitmapR3,
    1097                               &pVCpu->cpum.s.Guest.hwvirt.vmx.pvIoBitmapR0, NULL /* paPages */);
    1098         if (RT_FAILURE(rc))
    1099         {
    1100             Assert(!pVCpu->cpum.s.Guest.hwvirt.vmx.pvIoBitmapR3);
     1324        pCtx->hwvirt.vmx.pvIoBitmapR3 = SUPR3ContAlloc(VMX_V_IO_BITMAP_A_PAGES + VMX_V_IO_BITMAP_B_PAGES,
     1325                                                       &pCtx->hwvirt.vmx.pvIoBitmapR0,
     1326                                                       &pCtx->hwvirt.vmx.HCPhysIoBitmap);
     1327        if (pCtx->hwvirt.vmx.pvIoBitmapR3)
     1328        { /* likely */ }
     1329        else
     1330        {
    11011331            LogRel(("CPUM%u: Failed to alloc %u pages for the nested-guest's I/O bitmaps\n", pVCpu->idCpu,
    11021332                    VMX_V_IO_BITMAP_A_PAGES + VMX_V_IO_BITMAP_B_PAGES));
    11031333            break;
    11041334        }
     1335
     1336        /*
     1337         * Zero out all allocated pages (should compress well for saved-state).
     1338         */
     1339        memset(pCtx->hwvirt.vmx.CTX_SUFF(pVmcs),               0, VMX_V_VMCS_SIZE);
     1340        memset(pCtx->hwvirt.vmx.CTX_SUFF(pShadowVmcs),         0, VMX_V_VMCS_SIZE);
     1341        memset(pCtx->hwvirt.vmx.CTX_SUFF(pvVmreadBitmap),      0, VMX_V_VMREAD_VMWRITE_BITMAP_SIZE);
     1342        memset(pCtx->hwvirt.vmx.CTX_SUFF(pvVmwriteBitmap),     0, VMX_V_VMREAD_VMWRITE_BITMAP_SIZE);
     1343        memset(pCtx->hwvirt.vmx.CTX_SUFF(pEntryMsrLoadArea),   0, VMX_V_AUTOMSR_AREA_SIZE);
     1344        memset(pCtx->hwvirt.vmx.CTX_SUFF(pExitMsrStoreArea),   0, VMX_V_AUTOMSR_AREA_SIZE);
     1345        memset(pCtx->hwvirt.vmx.CTX_SUFF(pExitMsrLoadArea),    0, VMX_V_AUTOMSR_AREA_SIZE);
     1346        memset(pCtx->hwvirt.vmx.CTX_SUFF(pvMsrBitmap),         0, VMX_V_MSR_BITMAP_SIZE);
     1347        memset(pCtx->hwvirt.vmx.CTX_SUFF(pvIoBitmap),          0, VMX_V_IO_BITMAP_A_SIZE + VMX_V_IO_BITMAP_B_SIZE);
    11051348    }
    11061349
     
    14541697
    14551698
    1456 #if 0
    14571699/**
    14581700 * Checks whether the given guest CPU VMX features are compatible with the provided
     
    14681710static bool cpumR3AreVmxCpuFeaturesCompatible(PVM pVM, PCCPUMFEATURES pBase, PCCPUMFEATURES pGst)
    14691711{
    1470     if (cpumR3IsHwAssistVmxNstGstExecAllowed(pVM))
     1712    if (cpumR3IsHwAssistNstGstExecAllowed(pVM))
    14711713    {
    14721714        uint64_t const fBase = ((uint64_t)pBase->fVmxInsOutInfo         <<  0) | ((uint64_t)pBase->fVmxExtIntExit         <<  1)
     
    15371779
    15381780        if ((fBase | fGst) != fBase)
     1781        {
     1782            LogRel(("CPUM: Host VMX features are incompatible with those from the saved state. fBase=%#RX64 fGst=%#RX64\n",
     1783                    fBase, fGst));
    15391784            return false;
     1785        }
    15401786        return true;
    15411787    }
    15421788    return true;
    15431789}
    1544 #endif
    15451790
    15461791
     
    23362581            SSMR3PutBool(pSSM,   pGstCtx->hwvirt.fGif);
    23372582        }
     2583        if (pVM->cpum.s.GuestFeatures.fVmx)
     2584        {
     2585            Assert(pGstCtx->hwvirt.vmx.CTX_SUFF(pVmcs));
     2586            SSMR3PutGCPhys(pSSM,   pGstCtx->hwvirt.vmx.GCPhysVmxon);
     2587            SSMR3PutGCPhys(pSSM,   pGstCtx->hwvirt.vmx.GCPhysVmcs);
     2588            SSMR3PutGCPhys(pSSM,   pGstCtx->hwvirt.vmx.GCPhysShadowVmcs);
     2589            SSMR3PutU32(pSSM,      (uint32_t)pGstCtx->hwvirt.vmx.enmDiag);
     2590            SSMR3PutU32(pSSM,      (uint32_t)pGstCtx->hwvirt.vmx.enmAbort);
     2591            SSMR3PutU32(pSSM,      pGstCtx->hwvirt.vmx.uAbortAux);
     2592            SSMR3PutBool(pSSM,     pGstCtx->hwvirt.vmx.fInVmxRootMode);
     2593            SSMR3PutBool(pSSM,     pGstCtx->hwvirt.vmx.fInVmxNonRootMode);
     2594            SSMR3PutBool(pSSM,     pGstCtx->hwvirt.vmx.fInterceptEvents);
     2595            SSMR3PutBool(pSSM,     pGstCtx->hwvirt.vmx.fNmiUnblockingIret);
     2596            SSMR3PutStructEx(pSSM, pGstCtx->hwvirt.vmx.pVmcsR3, sizeof(VMXVVMCS), 0, g_aVmxHwvirtVmcs, NULL);
     2597            SSMR3PutStructEx(pSSM, pGstCtx->hwvirt.vmx.pShadowVmcsR3, sizeof(VMXVVMCS), 0, g_aVmxHwvirtVmcs, NULL);
     2598            SSMR3PutMem(pSSM,      pGstCtx->hwvirt.vmx.pvVmreadBitmapR3, VMX_V_VMREAD_VMWRITE_BITMAP_SIZE);
     2599            SSMR3PutMem(pSSM,      pGstCtx->hwvirt.vmx.pvVmwriteBitmapR3, VMX_V_VMREAD_VMWRITE_BITMAP_SIZE);
     2600            SSMR3PutMem(pSSM,      pGstCtx->hwvirt.vmx.pEntryMsrLoadAreaR3, VMX_V_AUTOMSR_AREA_SIZE);
     2601            SSMR3PutMem(pSSM,      pGstCtx->hwvirt.vmx.pExitMsrStoreAreaR3, VMX_V_AUTOMSR_AREA_SIZE);
     2602            SSMR3PutMem(pSSM,      pGstCtx->hwvirt.vmx.pExitMsrLoadAreaR3,  VMX_V_AUTOMSR_AREA_SIZE);
     2603            SSMR3PutMem(pSSM,      pGstCtx->hwvirt.vmx.pvMsrBitmapR3, VMX_V_MSR_BITMAP_SIZE);
     2604            SSMR3PutMem(pSSM,      pGstCtx->hwvirt.vmx.pvIoBitmapR3, VMX_V_IO_BITMAP_A_SIZE + VMX_V_IO_BITMAP_B_SIZE);
     2605            SSMR3PutU64(pSSM,      pGstCtx->hwvirt.vmx.uFirstPauseLoopTick);
     2606            SSMR3PutU64(pSSM,      pGstCtx->hwvirt.vmx.uPrevPauseTick);
     2607            SSMR3PutU64(pSSM,      pGstCtx->hwvirt.vmx.uEntryTick);
     2608            SSMR3PutU16(pSSM,      pGstCtx->hwvirt.vmx.offVirtApicWrite);
     2609            SSMR3PutBool(pSSM,     pGstCtx->hwvirt.vmx.fVirtNmiBlocking);
     2610            SSMR3PutU64(pSSM,      pGstCtx->hwvirt.vmx.Msrs.u64FeatCtrl);
     2611            SSMR3PutU64(pSSM,      pGstCtx->hwvirt.vmx.Msrs.u64Basic);
     2612            SSMR3PutU64(pSSM,      pGstCtx->hwvirt.vmx.Msrs.PinCtls.u);
     2613            SSMR3PutU64(pSSM,      pGstCtx->hwvirt.vmx.Msrs.ProcCtls.u);
     2614            SSMR3PutU64(pSSM,      pGstCtx->hwvirt.vmx.Msrs.ProcCtls2.u);
     2615            SSMR3PutU64(pSSM,      pGstCtx->hwvirt.vmx.Msrs.ExitCtls.u);
     2616            SSMR3PutU64(pSSM,      pGstCtx->hwvirt.vmx.Msrs.EntryCtls.u);
     2617            SSMR3PutU64(pSSM,      pGstCtx->hwvirt.vmx.Msrs.TruePinCtls.u);
     2618            SSMR3PutU64(pSSM,      pGstCtx->hwvirt.vmx.Msrs.TrueProcCtls.u);
     2619            SSMR3PutU64(pSSM,      pGstCtx->hwvirt.vmx.Msrs.TrueEntryCtls.u);
     2620            SSMR3PutU64(pSSM,      pGstCtx->hwvirt.vmx.Msrs.TrueExitCtls.u);
     2621            SSMR3PutU64(pSSM,      pGstCtx->hwvirt.vmx.Msrs.u64Misc);
     2622            SSMR3PutU64(pSSM,      pGstCtx->hwvirt.vmx.Msrs.u64Cr0Fixed0);
     2623            SSMR3PutU64(pSSM,      pGstCtx->hwvirt.vmx.Msrs.u64Cr0Fixed1);
     2624            SSMR3PutU64(pSSM,      pGstCtx->hwvirt.vmx.Msrs.u64Cr4Fixed0);
     2625            SSMR3PutU64(pSSM,      pGstCtx->hwvirt.vmx.Msrs.u64Cr4Fixed1);
     2626            SSMR3PutU64(pSSM,      pGstCtx->hwvirt.vmx.Msrs.u64VmcsEnum);
     2627            SSMR3PutU64(pSSM,      pGstCtx->hwvirt.vmx.Msrs.u64VmFunc);
     2628            SSMR3PutU64(pSSM,      pGstCtx->hwvirt.vmx.Msrs.u64EptVpidCaps);
     2629        }
    23382630        SSMR3PutU32(pSSM, pVCpu->cpum.s.fUseFlags);
    23392631        SSMR3PutU32(pSSM, pVCpu->cpum.s.fChanged);
     
    23682660     * Validate version.
    23692661     */
    2370     if (    uVersion != CPUM_SAVED_STATE_VERSION_HWVIRT_SVM
     2662    if (    uVersion != CPUM_SAVED_STATE_VERSION_HWVIRT_VMX_IEM
     2663        &&  uVersion != CPUM_SAVED_STATE_VERSION_HWVIRT_SVM
    23712664        &&  uVersion != CPUM_SAVED_STATE_VERSION_XSAVE
    23722665        &&  uVersion != CPUM_SAVED_STATE_VERSION_GOOD_CPUID_COUNT
     
    25772870                    }
    25782871                }
    2579                 /** @todo NSTVMX: Load VMX state. */
     2872                if (uVersion >= CPUM_SAVED_STATE_VERSION_HWVIRT_VMX_IEM)
     2873                {
     2874                    if (pVM->cpum.s.GuestFeatures.fVmx)
     2875                    {
     2876                        Assert(pGstCtx->hwvirt.vmx.CTX_SUFF(pVmcs));
     2877                        SSMR3GetGCPhys(pSSM,   &pGstCtx->hwvirt.vmx.GCPhysVmxon);
     2878                        SSMR3GetGCPhys(pSSM,   &pGstCtx->hwvirt.vmx.GCPhysVmcs);
     2879                        SSMR3GetGCPhys(pSSM,   &pGstCtx->hwvirt.vmx.GCPhysShadowVmcs);
     2880                        SSMR3GetU32(pSSM,       (uint32_t *)&pGstCtx->hwvirt.vmx.enmDiag);
     2881                        SSMR3GetU32(pSSM,       (uint32_t *)&pGstCtx->hwvirt.vmx.enmAbort);
     2882                        SSMR3GetU32(pSSM,      &pGstCtx->hwvirt.vmx.uAbortAux);
     2883                        SSMR3GetBool(pSSM,     &pGstCtx->hwvirt.vmx.fInVmxRootMode);
     2884                        SSMR3GetBool(pSSM,     &pGstCtx->hwvirt.vmx.fInVmxNonRootMode);
     2885                        SSMR3GetBool(pSSM,     &pGstCtx->hwvirt.vmx.fInterceptEvents);
     2886                        SSMR3GetBool(pSSM,     &pGstCtx->hwvirt.vmx.fNmiUnblockingIret);
     2887                        SSMR3GetStructEx(pSSM,  pGstCtx->hwvirt.vmx.pVmcsR3, sizeof(VMXVVMCS), 0, g_aVmxHwvirtVmcs, NULL);
     2888                        SSMR3GetStructEx(pSSM,  pGstCtx->hwvirt.vmx.pShadowVmcsR3, sizeof(VMXVVMCS), 0, g_aVmxHwvirtVmcs, NULL);
     2889                        SSMR3GetMem(pSSM,       pGstCtx->hwvirt.vmx.pvVmreadBitmapR3, VMX_V_VMREAD_VMWRITE_BITMAP_SIZE);
     2890                        SSMR3GetMem(pSSM,       pGstCtx->hwvirt.vmx.pvVmwriteBitmapR3, VMX_V_VMREAD_VMWRITE_BITMAP_SIZE);
     2891                        SSMR3GetMem(pSSM,       pGstCtx->hwvirt.vmx.pEntryMsrLoadAreaR3, VMX_V_AUTOMSR_AREA_SIZE);
     2892                        SSMR3GetMem(pSSM,       pGstCtx->hwvirt.vmx.pExitMsrStoreAreaR3, VMX_V_AUTOMSR_AREA_SIZE);
     2893                        SSMR3GetMem(pSSM,       pGstCtx->hwvirt.vmx.pExitMsrLoadAreaR3,  VMX_V_AUTOMSR_AREA_SIZE);
     2894                        SSMR3GetMem(pSSM,       pGstCtx->hwvirt.vmx.pvMsrBitmapR3, VMX_V_MSR_BITMAP_SIZE);
     2895                        SSMR3GetMem(pSSM,       pGstCtx->hwvirt.vmx.pvIoBitmapR3, VMX_V_IO_BITMAP_A_SIZE + VMX_V_IO_BITMAP_B_SIZE);
     2896                        SSMR3GetU64(pSSM,      &pGstCtx->hwvirt.vmx.uFirstPauseLoopTick);
     2897                        SSMR3GetU64(pSSM,      &pGstCtx->hwvirt.vmx.uPrevPauseTick);
     2898                        SSMR3GetU64(pSSM,      &pGstCtx->hwvirt.vmx.uEntryTick);
     2899                        SSMR3GetU16(pSSM,      &pGstCtx->hwvirt.vmx.offVirtApicWrite);
     2900                        SSMR3GetBool(pSSM,     &pGstCtx->hwvirt.vmx.fVirtNmiBlocking);
     2901                        SSMR3GetU64(pSSM,      &pGstCtx->hwvirt.vmx.Msrs.u64FeatCtrl);
     2902                        SSMR3GetU64(pSSM,      &pGstCtx->hwvirt.vmx.Msrs.u64Basic);
     2903                        SSMR3GetU64(pSSM,      &pGstCtx->hwvirt.vmx.Msrs.PinCtls.u);
     2904                        SSMR3GetU64(pSSM,      &pGstCtx->hwvirt.vmx.Msrs.ProcCtls.u);
     2905                        SSMR3GetU64(pSSM,      &pGstCtx->hwvirt.vmx.Msrs.ProcCtls2.u);
     2906                        SSMR3GetU64(pSSM,      &pGstCtx->hwvirt.vmx.Msrs.ExitCtls.u);
     2907                        SSMR3GetU64(pSSM,      &pGstCtx->hwvirt.vmx.Msrs.EntryCtls.u);
     2908                        SSMR3GetU64(pSSM,      &pGstCtx->hwvirt.vmx.Msrs.TruePinCtls.u);
     2909                        SSMR3GetU64(pSSM,      &pGstCtx->hwvirt.vmx.Msrs.TrueProcCtls.u);
     2910                        SSMR3GetU64(pSSM,      &pGstCtx->hwvirt.vmx.Msrs.TrueEntryCtls.u);
     2911                        SSMR3GetU64(pSSM,      &pGstCtx->hwvirt.vmx.Msrs.TrueExitCtls.u);
     2912                        SSMR3GetU64(pSSM,      &pGstCtx->hwvirt.vmx.Msrs.u64Misc);
     2913                        SSMR3GetU64(pSSM,      &pGstCtx->hwvirt.vmx.Msrs.u64Cr0Fixed0);
     2914                        SSMR3GetU64(pSSM,      &pGstCtx->hwvirt.vmx.Msrs.u64Cr0Fixed1);
     2915                        SSMR3GetU64(pSSM,      &pGstCtx->hwvirt.vmx.Msrs.u64Cr4Fixed0);
     2916                        SSMR3GetU64(pSSM,      &pGstCtx->hwvirt.vmx.Msrs.u64Cr4Fixed1);
     2917                        SSMR3GetU64(pSSM,      &pGstCtx->hwvirt.vmx.Msrs.u64VmcsEnum);
     2918                        SSMR3GetU64(pSSM,      &pGstCtx->hwvirt.vmx.Msrs.u64VmFunc);
     2919                        SSMR3GetU64(pSSM,      &pGstCtx->hwvirt.vmx.Msrs.u64EptVpidCaps);
     2920                    }
     2921                }
    25802922            }
    25812923            else
     
    26783020
    26793021    /*
    2680      * Guest CPUIDs.
     3022     * Guest CPUIDs (and VMX MSR features).
    26813023     */
    26823024    if (uVersion >= CPUM_SAVED_STATE_VERSION_VER3_2)
     
    26843026        CPUMMSRS GuestMsrs;
    26853027        RT_ZERO(GuestMsrs);
    2686         if (pVM->cpum.s.GuestFeatures.fVmx)
     3028
     3029        CPUMFEATURES BaseFeatures;
     3030        bool const fVmxGstFeat = pVM->cpum.s.GuestFeatures.fVmx;
     3031        if (fVmxGstFeat)
     3032        {
     3033            /*
     3034             * At this point the MSRs in the guest CPU-context are loaded with the guest VMX MSRs from the saved state.
     3035             * However the VMX sub-features have not been exploded yet. So cache the base (host derived) VMX features
     3036             * here so we can compare them for compatibility after exploding guest features.
     3037             */
     3038            BaseFeatures = pVM->cpum.s.GuestFeatures;
     3039
     3040            /* Use the VMX MSR features from the saved state while exploding guest features. */
    26873041            GuestMsrs.hwvirt.vmx = pVM->aCpus[0].cpum.s.Guest.hwvirt.vmx.Msrs;
    2688         return cpumR3LoadCpuId(pVM, pSSM, uVersion, &GuestMsrs);
     3042        }
     3043
     3044        /* Load CPUID and explode guest features. */
     3045        rc = cpumR3LoadCpuId(pVM, pSSM, uVersion, &GuestMsrs);
     3046        if (fVmxGstFeat)
     3047        {
     3048            /*
     3049             * Check if the exploded VMX features from the saved state are compatible with the host-derived features
     3050             * we cached earlier (above). The is required if we use hardware-assisted nested-guest execution with
     3051             * VMX features presented to the guest.
     3052             */
     3053            bool const fIsCompat = cpumR3AreVmxCpuFeaturesCompatible(pVM, &BaseFeatures, &pVM->cpum.s.GuestFeatures);
     3054            if (!fIsCompat)
     3055                return VERR_CPUM_INVALID_HWVIRT_FEAT_COMBO;
     3056        }
     3057        return rc;
    26893058    }
    26903059    return cpumR3LoadCpuIdPre32(pVM, pSSM, uVersion);
     
    37164085        pHlp->pfnPrintf(pHlp, "  uFirstPauseLoopTick        = %RX64\n",     pCtx->hwvirt.vmx.uFirstPauseLoopTick);
    37174086        pHlp->pfnPrintf(pHlp, "  uPrevPauseTick             = %RX64\n",     pCtx->hwvirt.vmx.uPrevPauseTick);
    3718         pHlp->pfnPrintf(pHlp, "  uVmentryTick               = %RX64\n",     pCtx->hwvirt.vmx.uVmentryTick);
     4087        pHlp->pfnPrintf(pHlp, "  uEntryTick                 = %RX64\n",     pCtx->hwvirt.vmx.uEntryTick);
    37194088        pHlp->pfnPrintf(pHlp, "  offVirtApicWrite           = %#RX16\n",    pCtx->hwvirt.vmx.offVirtApicWrite);
     4089        pHlp->pfnPrintf(pHlp, "  fVirtNmiBlocking           = %RTbool\n",   pCtx->hwvirt.vmx.fVirtNmiBlocking);
    37204090        pHlp->pfnPrintf(pHlp, "  VMCS cache:\n");
    37214091        cpumR3InfoVmxVmcs(pHlp, pCtx->hwvirt.vmx.pVmcsR3, "  " /* pszPrefix */);
  • trunk/src/VBox/VMM/VMMR3/EM.cpp

    r77611 r78220  
    21462146        {
    21472147            rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitApicWrite(pVCpu));
    2148             Assert(rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE);
    2149             UPDATE_RC();
     2148            if (rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
     2149                UPDATE_RC();
    21502150        }
    21512151
     
    21692169        {
    21702170            rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitPreemptTimer(pVCpu));
    2171             if (rc2 == VINF_VMX_INTERCEPT_NOT_ACTIVE)
    2172                 rc2 = VINF_SUCCESS;
    2173             UPDATE_RC();
     2171            if (rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
     2172                UPDATE_RC();
    21742173        }
    21752174
     
    21982197             * NMIs (take priority over external interrupts).
    21992198             */
    2200             Assert(!HMR3IsEventPending(pVCpu));
    22012199            if (    VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)
    22022200                && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
  • trunk/src/VBox/VMM/VMMR3/HM.cpp

    r77591 r78220  
    734734        HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestXF,            "/HM/CPU%d/Exit/Trap/Gst/#XF", "Guest #XF (extended math fault, SIMD FPU) exception.");
    735735        HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestXcpUnk,        "/HM/CPU%d/Exit/Trap/Gst/Other", "Other guest exceptions.");
    736         HM_REG_COUNTER(&pVCpu->hm.s.StatExitHlt,                "/HM/CPU%d/Exit/Instr/Hlt", "HLT instruction.");
    737         HM_REG_COUNTER(&pVCpu->hm.s.StatExitRdmsr,              "/HM/CPU%d/Exit/Instr/Rdmsr", "RDMSR instruction.");
    738         HM_REG_COUNTER(&pVCpu->hm.s.StatExitWrmsr,              "/HM/CPU%d/Exit/Instr/Wrmsr", "WRMSR instruction.");
    739         HM_REG_COUNTER(&pVCpu->hm.s.StatExitMwait,              "/HM/CPU%d/Exit/Instr/Mwait", "MWAIT instruction.");
    740         HM_REG_COUNTER(&pVCpu->hm.s.StatExitMonitor,            "/HM/CPU%d/Exit/Instr/Monitor", "MONITOR instruction.");
     736        HM_REG_COUNTER(&pVCpu->hm.s.StatExitRdmsr,              "/HM/CPU%d/Exit/Instr/Rdmsr", "MSR read.");
     737        HM_REG_COUNTER(&pVCpu->hm.s.StatExitWrmsr,              "/HM/CPU%d/Exit/Instr/Wrmsr", "MSR write.");
    741738        HM_REG_COUNTER(&pVCpu->hm.s.StatExitDRxWrite,           "/HM/CPU%d/Exit/Instr/DR-Write", "Debug register write.");
    742739        HM_REG_COUNTER(&pVCpu->hm.s.StatExitDRxRead,            "/HM/CPU%d/Exit/Instr/DR-Read", "Debug register read.");
     
    753750        HM_REG_COUNTER(&pVCpu->hm.s.StatExitClts,               "/HM/CPU%d/Exit/Instr/CLTS", "CLTS instruction.");
    754751        HM_REG_COUNTER(&pVCpu->hm.s.StatExitLmsw,               "/HM/CPU%d/Exit/Instr/LMSW", "LMSW instruction.");
    755         HM_REG_COUNTER(&pVCpu->hm.s.StatExitCli,                "/HM/CPU%d/Exit/Instr/Cli", "CLI instruction.");
    756         HM_REG_COUNTER(&pVCpu->hm.s.StatExitSti,                "/HM/CPU%d/Exit/Instr/Sti", "STI instruction.");
    757         HM_REG_COUNTER(&pVCpu->hm.s.StatExitPushf,              "/HM/CPU%d/Exit/Instr/Pushf", "PUSHF instruction.");
    758         HM_REG_COUNTER(&pVCpu->hm.s.StatExitPopf,               "/HM/CPU%d/Exit/Instr/Popf", "POPF instruction.");
    759         HM_REG_COUNTER(&pVCpu->hm.s.StatExitIret,               "/HM/CPU%d/Exit/Instr/Iret", "IRET instruction.");
    760         HM_REG_COUNTER(&pVCpu->hm.s.StatExitInt,                "/HM/CPU%d/Exit/Instr/Int", "INT instruction.");
    761752        HM_REG_COUNTER(&pVCpu->hm.s.StatExitXdtrAccess,         "/HM/CPU%d/Exit/Instr/XdtrAccess", "GDTR, IDTR, LDTR access.");
    762         HM_REG_COUNTER(&pVCpu->hm.s.StatExitIOWrite,            "/HM/CPU%d/Exit/IO/Write", "I/O write.");
    763         HM_REG_COUNTER(&pVCpu->hm.s.StatExitIORead,             "/HM/CPU%d/Exit/IO/Read", "I/O read.");
    764         HM_REG_COUNTER(&pVCpu->hm.s.StatExitIOStringWrite,      "/HM/CPU%d/Exit/IO/WriteString", "String I/O write.");
    765         HM_REG_COUNTER(&pVCpu->hm.s.StatExitIOStringRead,       "/HM/CPU%d/Exit/IO/ReadString", "String I/O read.");
    766         HM_REG_COUNTER(&pVCpu->hm.s.StatExitIntWindow,          "/HM/CPU%d/Exit/IntWindow", "Interrupt-window exit. Guest is ready to receive interrupts again.");
     753        HM_REG_COUNTER(&pVCpu->hm.s.StatExitIOWrite,            "/HM/CPU%d/Exit/Instr/IO/Write", "I/O write.");
     754        HM_REG_COUNTER(&pVCpu->hm.s.StatExitIORead,             "/HM/CPU%d/Exit/Instr/IO/Read", "I/O read.");
     755        HM_REG_COUNTER(&pVCpu->hm.s.StatExitIOStringWrite,      "/HM/CPU%d/Exit/Instr/IO/WriteString", "String I/O write.");
     756        HM_REG_COUNTER(&pVCpu->hm.s.StatExitIOStringRead,       "/HM/CPU%d/Exit/Instr/IO/ReadString", "String I/O read.");
     757        HM_REG_COUNTER(&pVCpu->hm.s.StatExitIntWindow,          "/HM/CPU%d/Exit/IntWindow", "Interrupt-window exit. Guest is ready to receive interrupts.");
    767758        HM_REG_COUNTER(&pVCpu->hm.s.StatExitExtInt,             "/HM/CPU%d/Exit/ExtInt", "Physical maskable interrupt (host).");
    768759#endif
     
    772763        HM_REG_COUNTER(&pVCpu->hm.s.StatExitTprBelowThreshold,  "/HM/CPU%d/Exit/TprBelowThreshold", "TPR lowered below threshold by the guest.");
    773764        HM_REG_COUNTER(&pVCpu->hm.s.StatExitTaskSwitch,         "/HM/CPU%d/Exit/TaskSwitch", "Task switch.");
    774         HM_REG_COUNTER(&pVCpu->hm.s.StatExitMtf,                "/HM/CPU%d/Exit/MonitorTrapFlag", "Monitor Trap Flag.");
    775765        HM_REG_COUNTER(&pVCpu->hm.s.StatExitApicAccess,         "/HM/CPU%d/Exit/ApicAccess", "APIC access. Guest attempted to access memory at a physical address on the APIC-access page.");
    776766
     
    961951        PVMCPU pVCpu = &pVM->aCpus[i];
    962952
    963         PVMXVMCSBATCHCACHE pVmcsCache = &pVCpu->hm.s.vmx.VmcsBatchCache;
     953        PVMXVMCSCACHE pVmcsCache = &pVCpu->hm.s.vmx.VmcsCache;
    964954        strcpy((char *)pVmcsCache->aMagic, "VMCSCACHE Magic");
    965955        pVmcsCache->uMagic = UINT64_C(0xdeadbeefdeadbeef);
     
    14931483    LogRel(("HM: Max resume loops                  = %u\n", pVM->hm.s.cMaxResumeLoops));
    14941484    LogRel(("HM: Host CR4                          = %#RX64\n", pVM->hm.s.vmx.u64HostCr4));
    1495     LogRel(("HM: Host EFER                         = %#RX64\n", pVM->hm.s.vmx.u64HostEfer));
     1485    LogRel(("HM: Host EFER                         = %#RX64\n", pVM->hm.s.vmx.u64HostMsrEfer));
    14961486    LogRel(("HM: MSR_IA32_SMM_MONITOR_CTL          = %#RX64\n", pVM->hm.s.vmx.u64HostSmmMonitorCtl));
    14971487
     
    15271517    for (VMCPUID i = 0; i < pVM->cCpus; i++)
    15281518    {
    1529         LogRel(("HM: VCPU%3d: MSR bitmap physaddr      = %#RHp\n", i, pVM->aCpus[i].hm.s.vmx.HCPhysMsrBitmap));
    1530         LogRel(("HM: VCPU%3d: VMCS physaddr            = %#RHp\n", i, pVM->aCpus[i].hm.s.vmx.HCPhysVmcs));
    1531     }
     1519        PCVMXVMCSINFO pVmcsInfo = &pVM->aCpus[i].hm.s.vmx.VmcsInfo;
     1520        LogRel(("HM: VCPU%3d: MSR bitmap physaddr      = %#RHp\n", i, pVmcsInfo->HCPhysMsrBitmap));
     1521        LogRel(("HM: VCPU%3d: VMCS physaddr            = %#RHp\n", i, pVmcsInfo->HCPhysVmcs));
     1522    }
     1523#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     1524    if (pVM->cpum.ro.GuestFeatures.fVmx)
     1525    {
     1526        LogRel(("HM: Nested-guest:\n"));
     1527        for (VMCPUID i = 0; i < pVM->cCpus; i++)
     1528        {
     1529            PCVMXVMCSINFO pVmcsInfoNstGst = &pVM->aCpus[i].hm.s.vmx.VmcsInfoNstGst;
     1530            LogRel(("HM: VCPU%3d:   MSR bitmap physaddr    = %#RHp\n", i, pVmcsInfoNstGst->HCPhysMsrBitmap));
     1531            LogRel(("HM: VCPU%3d:   VMCS physaddr          = %#RHp\n", i, pVmcsInfoNstGst->HCPhysVmcs));
     1532        }
     1533    }
     1534#endif
    15321535
    15331536    /*
     
    16811684    else if (CPUMR3GetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE))
    16821685    {
    1683         if (pVM->hm.s.vmx.u64HostEfer & MSR_K6_EFER_NXE)
     1686        if (pVM->hm.s.vmx.u64HostMsrEfer & MSR_K6_EFER_NXE)
    16841687            CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
    16851688        else
     
    19741977            pVCpu->hm.s.paStatInjectedIrqsR0 = NIL_RTR0PTR;
    19751978        }
     1979# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
     1980        if (pVCpu->hm.s.paStatNestedExitReason)
     1981        {
     1982            MMHyperFree(pVM, pVCpu->hm.s.paStatNestedExitReason);
     1983            pVCpu->hm.s.paStatNestedExitReason   = NULL;
     1984            pVCpu->hm.s.paStatNestedExitReasonR0 = NIL_RTR0PTR;
     1985        }
     1986# endif
    19761987#endif
    19771988
    19781989#ifdef VBOX_WITH_CRASHDUMP_MAGIC
    1979         memset(pVCpu->hm.s.vmx.VmcsBatchCache.aMagic, 0, sizeof(pVCpu->hm.s.vmx.VmcsBatchCache.aMagic));
    1980         pVCpu->hm.s.vmx.VmcsBatchCache.uMagic = 0;
    1981         pVCpu->hm.s.vmx.VmcsBatchCache.uPos = 0xffffffff;
     1990        memset(pVCpu->hm.s.vmx.VmcsCache.aMagic, 0, sizeof(pVCpu->hm.s.vmx.VmcsCache.aMagic));
     1991        pVCpu->hm.s.vmx.VmcsCache.uMagic = 0;
     1992        pVCpu->hm.s.vmx.VmcsCache.uPos = 0xffffffff;
    19821993#endif
    19831994    }
     
    19952006VMMR3_INT_DECL(void) HMR3ResetCpu(PVMCPU pVCpu)
    19962007{
    1997     /* Sync. entire state on VM reset R0-reentry. It's safe to reset
     2008    /* Sync. entire state on VM reset ring-0 re-entry. It's safe to reset
    19982009       the HM flags here, all other EMTs are in ring-3. See VMR3Reset(). */
    19992010    pVCpu->hm.s.fCtxChanged |= HM_CHANGED_HOST_CONTEXT | HM_CHANGED_ALL_GUEST;
    20002011
    2001     pVCpu->hm.s.fActive               = false;
    2002     pVCpu->hm.s.Event.fPending        = false;
    2003     pVCpu->hm.s.vmx.fWasInRealMode    = true;
    2004     pVCpu->hm.s.vmx.u64MsrApicBase    = 0;
    2005     pVCpu->hm.s.vmx.fSwitchedTo64on32 = false;
     2012    pVCpu->hm.s.fActive                        = false;
     2013    pVCpu->hm.s.Event.fPending                 = false;
     2014    pVCpu->hm.s.vmx.u64GstMsrApicBase          = 0;
     2015    pVCpu->hm.s.vmx.VmcsInfo.fSwitchedTo64on32 = false;
     2016    pVCpu->hm.s.vmx.VmcsInfo.fWasInRealMode    = true;
     2017#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     2018    if (pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fVmx)
     2019    {
     2020        pVCpu->hm.s.vmx.VmcsInfoNstGst.fSwitchedTo64on32 = false;
     2021        pVCpu->hm.s.vmx.VmcsInfoNstGst.fWasInRealMode    = true;
     2022    }
     2023#endif
    20062024
    20072025    /* Reset the contents of the read cache. */
    2008     PVMXVMCSBATCHCACHE pVmcsCache = &pVCpu->hm.s.vmx.VmcsBatchCache;
     2026    PVMXVMCSCACHE pVmcsCache = &pVCpu->hm.s.vmx.VmcsCache;
    20092027    for (unsigned j = 0; j < pVmcsCache->Read.cValidEntries; j++)
    20102028        pVmcsCache->Read.aFieldVal[j] = 0;
     
    28472865
    28482866/**
    2849  * Checks if internal events are pending. In that case we are not allowed to dispatch interrupts.
    2850  *
    2851  * @returns true if an internal event is pending, otherwise false.
    2852  * @param   pVCpu       The cross context virtual CPU structure.
    2853  */
    2854 VMMR3_INT_DECL(bool) HMR3IsEventPending(PVMCPU pVCpu)
    2855 {
    2856     return HMIsEnabled(pVCpu->pVMR3)
    2857         && pVCpu->hm.s.Event.fPending;
    2858 }
    2859 
    2860 
    2861 /**
    28622867 * Checks if the VMX-preemption timer is being used.
    28632868 *
     
    28842889    for (VMCPUID i = 0; i < pVM->cCpus; i++)
    28852890    {
    2886         PVMCPU pVCpu = &pVM->aCpus[i];
     2891        PVMCPU        pVCpu = &pVM->aCpus[i];
     2892        PCVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
     2893        bool const    fNstGstVmcsActive = pVCpu->hm.s.vmx.fSwitchedToNstGstVmcs;
    28872894        switch (iStatusCode)
    28882895        {
     
    28932900
    28942901            case VERR_VMX_INVALID_VMCS_PTR:
     2902            {
    28952903                LogRel(("HM: VERR_VMX_INVALID_VMCS_PTR:\n"));
    2896                 LogRel(("HM: CPU[%u] Current pointer      %#RGp vs %#RGp\n", i, pVCpu->hm.s.vmx.LastError.u64VmcsPhys,
    2897                                                                                 pVCpu->hm.s.vmx.HCPhysVmcs));
     2904                LogRel(("HM: CPU[%u] %s VMCS active\n", i, fNstGstVmcsActive ? "Nested-guest" : "Guest"));
     2905                LogRel(("HM: CPU[%u] Current pointer      %#RHp vs %#RHp\n", i, pVCpu->hm.s.vmx.LastError.HCPhysCurrentVmcs,
     2906                                                                                pVmcsInfo->HCPhysVmcs));
    28982907                LogRel(("HM: CPU[%u] Current VMCS version %#x\n", i, pVCpu->hm.s.vmx.LastError.u32VmcsRev));
    28992908                LogRel(("HM: CPU[%u] Entered Host Cpu     %u\n",  i, pVCpu->hm.s.vmx.LastError.idEnteredCpu));
    29002909                LogRel(("HM: CPU[%u] Current Host Cpu     %u\n",  i, pVCpu->hm.s.vmx.LastError.idCurrentCpu));
    29012910                break;
     2911            }
    29022912
    29032913            case VERR_VMX_UNABLE_TO_START_VM:
    29042914                LogRel(("HM: VERR_VMX_UNABLE_TO_START_VM:\n"));
     2915                LogRel(("HM: CPU[%u] %s VMCS active\n", i, fNstGstVmcsActive ? "Nested-guest" : "Guest"));
    29052916                LogRel(("HM: CPU[%u] Instruction error    %#x\n", i, pVCpu->hm.s.vmx.LastError.u32InstrError));
    29062917                LogRel(("HM: CPU[%u] Exit reason          %#x\n", i, pVCpu->hm.s.vmx.LastError.u32ExitReason));
     
    29142925                else if (pVCpu->hm.s.vmx.LastError.u32InstrError == VMXINSTRERR_VMENTRY_INVALID_CTLS)
    29152926                {
    2916                     LogRel(("HM: CPU[%u] PinCtls          %#RX32\n", i, pVCpu->hm.s.vmx.Ctls.u32PinCtls));
     2927                    LogRel(("HM: CPU[%u] PinCtls              %#RX32\n", i, pVmcsInfo->u32PinCtls));
    29172928                    {
    2918                         uint32_t const u32Val = pVCpu->hm.s.vmx.Ctls.u32PinCtls;
     2929                        uint32_t const u32Val = pVmcsInfo->u32PinCtls;
    29192930                        HMVMX_LOGREL_FEAT(u32Val, VMX_PIN_CTLS_EXT_INT_EXIT );
    29202931                        HMVMX_LOGREL_FEAT(u32Val, VMX_PIN_CTLS_NMI_EXIT     );
     
    29232934                        HMVMX_LOGREL_FEAT(u32Val, VMX_PIN_CTLS_POSTED_INT   );
    29242935                    }
    2925                     LogRel(("HM: CPU[%u] ProcCtls         %#RX32\n", i, pVCpu->hm.s.vmx.Ctls.u32ProcCtls));
     2936                    LogRel(("HM: CPU[%u] ProcCtls             %#RX32\n", i, pVmcsInfo->u32ProcCtls));
    29262937                    {
    2927                         uint32_t const u32Val = pVCpu->hm.s.vmx.Ctls.u32ProcCtls;
     2938                        uint32_t const u32Val = pVmcsInfo->u32ProcCtls;
    29282939                        HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_INT_WINDOW_EXIT   );
    29292940                        HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_USE_TSC_OFFSETTING);
     
    29482959                        HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_USE_SECONDARY_CTLS);
    29492960                    }
    2950                     LogRel(("HM: CPU[%u] ProcCtls2        %#RX32\n", i, pVCpu->hm.s.vmx.Ctls.u32ProcCtls2));
     2961                    LogRel(("HM: CPU[%u] ProcCtls2            %#RX32\n", i, pVmcsInfo->u32ProcCtls2));
    29512962                    {
    2952                         uint32_t const u32Val = pVCpu->hm.s.vmx.Ctls.u32ProcCtls2;
     2963                        uint32_t const u32Val = pVmcsInfo->u32ProcCtls2;
    29532964                        HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_VIRT_APIC_ACCESS  );
    29542965                        HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_EPT               );
     
    29742985                        HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_TSC_SCALING       );
    29752986                    }
    2976                     LogRel(("HM: CPU[%u] EntryCtls        %#RX32\n", i, pVCpu->hm.s.vmx.Ctls.u32EntryCtls));
     2987                    LogRel(("HM: CPU[%u] EntryCtls            %#RX32\n", i, pVmcsInfo->u32EntryCtls));
    29772988                    {
    2978                         uint32_t const u32Val = pVCpu->hm.s.vmx.Ctls.u32EntryCtls;
     2989                        uint32_t const u32Val = pVmcsInfo->u32EntryCtls;
    29792990                        HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_LOAD_DEBUG         );
    29802991                        HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_IA32E_MODE_GUEST   );
     
    29852996                        HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_LOAD_EFER_MSR      );
    29862997                    }
    2987                     LogRel(("HM: CPU[%u] ExitCtls         %#RX32\n", i, pVCpu->hm.s.vmx.Ctls.u32ExitCtls));
     2998                    LogRel(("HM: CPU[%u] ExitCtls             %#RX32\n", i, pVmcsInfo->u32ExitCtls));
    29882999                    {
    2989                         uint32_t const u32Val = pVCpu->hm.s.vmx.Ctls.u32ExitCtls;
     3000                        uint32_t const u32Val = pVmcsInfo->u32ExitCtls;
    29903001                        HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_SAVE_DEBUG            );
    29913002                        HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE  );
     
    29983009                        HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER    );
    29993010                    }
    3000                     LogRel(("HM: CPU[%u] HCPhysMsrBitmap  %#RHp\n",  i, pVCpu->hm.s.vmx.HCPhysMsrBitmap));
    3001                     LogRel(("HM: CPU[%u] HCPhysGuestMsr   %#RHp\n",  i, pVCpu->hm.s.vmx.HCPhysGuestMsr));
    3002                     LogRel(("HM: CPU[%u] HCPhysHostMsr    %#RHp\n",  i, pVCpu->hm.s.vmx.HCPhysHostMsr));
    3003                     LogRel(("HM: CPU[%u] cMsrs            %u\n",     i, pVCpu->hm.s.vmx.cMsrs));
     3011                    LogRel(("HM: CPU[%u] HCPhysMsrBitmap      %#RHp\n",  i, pVmcsInfo->HCPhysMsrBitmap));
     3012                    LogRel(("HM: CPU[%u] HCPhysGuestMsrLoad   %#RHp\n",  i, pVmcsInfo->HCPhysGuestMsrLoad));
     3013                    LogRel(("HM: CPU[%u] HCPhysGuestMsrStore  %#RHp\n",  i, pVmcsInfo->HCPhysGuestMsrStore));
     3014                    LogRel(("HM: CPU[%u] HCPhysHostMsrLoad    %#RHp\n",  i, pVmcsInfo->HCPhysHostMsrLoad));
     3015                    LogRel(("HM: CPU[%u] cEntryMsrLoad        %u\n",     i, pVmcsInfo->cEntryMsrLoad));
     3016                    LogRel(("HM: CPU[%u] cExitMsrStore        %u\n",     i, pVmcsInfo->cExitMsrStore));
     3017                    LogRel(("HM: CPU[%u] cExitMsrLoad         %u\n",     i, pVmcsInfo->cExitMsrLoad));
    30043018                }
    30053019                /** @todo Log VM-entry event injection control fields
     
    32493263        if (pVM->hm.s.vmx.fSupported)
    32503264        {
    3251             bool const fRealOnV86Active = pVCpu->hm.s.vmx.RealMode.fRealOnV86Active;
     3265            PCVMXVMCSINFO pVmcsInfo         = hmGetVmxActiveVmcsInfo(pVCpu);
     3266            bool const    fRealOnV86Active  = pVmcsInfo->RealMode.fRealOnV86Active;
     3267            bool const    fNstGstVmcsActive = pVCpu->hm.s.vmx.fSwitchedToNstGstVmcs;
     3268
     3269            pHlp->pfnPrintf(pHlp, "  %s VMCS active\n", fNstGstVmcsActive ? "Nested-guest" :" Guest");
    32523270            pHlp->pfnPrintf(pHlp, "  Real-on-v86 active = %RTbool\n", fRealOnV86Active);
    32533271            if (fRealOnV86Active)
    32543272            {
    3255                 pHlp->pfnPrintf(pHlp, "    EFlags  = %#x\n", pVCpu->hm.s.vmx.RealMode.Eflags.u32);
    3256                 pHlp->pfnPrintf(pHlp, "    Attr CS = %#x\n", pVCpu->hm.s.vmx.RealMode.AttrCS.u);
    3257                 pHlp->pfnPrintf(pHlp, "    Attr SS = %#x\n", pVCpu->hm.s.vmx.RealMode.AttrSS.u);
    3258                 pHlp->pfnPrintf(pHlp, "    Attr DS = %#x\n", pVCpu->hm.s.vmx.RealMode.AttrDS.u);
    3259                 pHlp->pfnPrintf(pHlp, "    Attr ES = %#x\n", pVCpu->hm.s.vmx.RealMode.AttrES.u);
    3260                 pHlp->pfnPrintf(pHlp, "    Attr FS = %#x\n", pVCpu->hm.s.vmx.RealMode.AttrFS.u);
    3261                 pHlp->pfnPrintf(pHlp, "    Attr GS = %#x\n", pVCpu->hm.s.vmx.RealMode.AttrGS.u);
     3273                pHlp->pfnPrintf(pHlp, "    EFlags  = %#x\n", pVmcsInfo->RealMode.Eflags.u32);
     3274                pHlp->pfnPrintf(pHlp, "    Attr CS = %#x\n", pVmcsInfo->RealMode.AttrCS.u);
     3275                pHlp->pfnPrintf(pHlp, "    Attr SS = %#x\n", pVmcsInfo->RealMode.AttrSS.u);
     3276                pHlp->pfnPrintf(pHlp, "    Attr DS = %#x\n", pVmcsInfo->RealMode.AttrDS.u);
     3277                pHlp->pfnPrintf(pHlp, "    Attr ES = %#x\n", pVmcsInfo->RealMode.AttrES.u);
     3278                pHlp->pfnPrintf(pHlp, "    Attr FS = %#x\n", pVmcsInfo->RealMode.AttrFS.u);
     3279                pHlp->pfnPrintf(pHlp, "    Attr GS = %#x\n", pVmcsInfo->RealMode.AttrGS.u);
    32623280            }
    32633281        }
  • trunk/src/VBox/VMM/include/CPUMInternal.h

    r76678 r78220  
    121121 * @{ */
    122122/** The current saved state version. */
    123 #define CPUM_SAVED_STATE_VERSION                CPUM_SAVED_STATE_VERSION_HWVIRT_SVM
     123#define CPUM_SAVED_STATE_VERSION                CPUM_SAVED_STATE_VERSION_HWVIRT_VMX_IEM
     124/** The saved state version including VMX hardware virtualization state (IEM only
     125 *  execution). */
     126#define CPUM_SAVED_STATE_VERSION_HWVIRT_VMX_IEM 19
    124127/** The saved state version including SVM hardware virtualization state. */
    125128#define CPUM_SAVED_STATE_VERSION_HWVIRT_SVM     18
  • trunk/src/VBox/VMM/include/CPUMInternal.mac

    r76553 r78220  
    253253    alignb 8
    254254    .Guest.hwvirt.svm.HCPhysVmcb             RTHCPHYS_RES 1
    255     .Guest.hwvirt.svm.u64Padding0            resq         19
     255    .Guest.hwvirt.svm.au64Padding0           resq         33
    256256    .Guest.hwvirt.enmHwvirt                  resd         1
    257257    .Guest.hwvirt.fGif                       resb         1
     
    543543    alignb 8
    544544    .Hyper.hwvirt.svm.HCPhysVmcb             RTHCPHYS_RES 1
    545     .Hyper.hwvirt.svm.u64Padding0            resq         19
     545    .Hyper.hwvirt.svm.au64Padding0           resq         33
    546546    .Hyper.hwvirt.enmHwvirt                  resd         1
    547547    .Hyper.hwvirt.fGif                       resb         1
  • trunk/src/VBox/VMM/include/HMInternal.h

    r77716 r78220  
    142142#define HM_CHANGED_KEEPER_STATE_MASK             UINT64_C(0xffff000000000000)
    143143
     144/** @todo r=ramshankar: Remove "GUEST" from XCPT_INTERCEPTS. */
    144145#define HM_CHANGED_VMX_GUEST_XCPT_INTERCEPTS     UINT64_C(0x0001000000000000)
    145146#define HM_CHANGED_VMX_GUEST_AUTO_MSRS           UINT64_C(0x0002000000000000)
    146147#define HM_CHANGED_VMX_GUEST_LAZY_MSRS           UINT64_C(0x0004000000000000)
    147 #define HM_CHANGED_VMX_ENTRY_CTLS                UINT64_C(0x0008000000000000)
    148 #define HM_CHANGED_VMX_EXIT_CTLS                 UINT64_C(0x0010000000000000)
    149 #define HM_CHANGED_VMX_MASK                      UINT64_C(0x001f000000000000)
     148#define HM_CHANGED_VMX_ENTRY_EXIT_CTLS           UINT64_C(0x0008000000000000)
     149#define HM_CHANGED_VMX_MASK                      UINT64_C(0x000f000000000000)
    150150#define HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE   (  HM_CHANGED_GUEST_DR_MASK \
    151151                                                  | HM_CHANGED_VMX_GUEST_LAZY_MSRS)
    152152
     153/** @todo r=ramshankar: Remove "GUEST" from XCPT_INTERCEPTS. */
    153154#define HM_CHANGED_SVM_GUEST_XCPT_INTERCEPTS     UINT64_C(0x0001000000000000)
    154155#define HM_CHANGED_SVM_MASK                      UINT64_C(0x0001000000000000)
     
    405406
    406407/**
     408 * HM event.
     409 *
     410 * VT-x and AMD-V common event injection structure.
     411 */
     412typedef struct HMEVENT
     413{
     414    /** Whether the event is pending. */
     415    uint32_t        fPending;
     416    /** The error-code associated with the event. */
     417    uint32_t        u32ErrCode;
     418    /** The length of the instruction in bytes (only relevant for software
     419     *  interrupts or software exceptions). */
     420    uint32_t        cbInstr;
     421    /** Alignment. */
     422    uint32_t        u32Padding;
     423    /** The encoded event (VM-entry interruption-information for VT-x or EVENTINJ
     424     *  for SVM). */
     425    uint64_t        u64IntInfo;
     426    /** Guest virtual address if this is a page-fault event. */
     427    RTGCUINTPTR     GCPtrFaultAddress;
     428} HMEVENT;
     429/** Pointer to a HMEVENT struct. */
     430typedef HMEVENT *PHMEVENT;
     431/** Pointer to a const HMEVENT struct. */
     432typedef const HMEVENT *PCHMEVENT;
     433AssertCompileSizeAlignment(HMEVENT, 8);
     434
     435/**
    407436 * HM VM Instance data.
    408437 * Changes to this must checked against the padding of the hm union in VM!
     
    522551        uint64_t                    u64HostSmmMonitorCtl;
    523552        /** Host EFER value (set by ring-0 VMX init) */
    524         uint64_t                    u64HostEfer;
     553        uint64_t                    u64HostMsrEfer;
    525554        /** Whether the CPU supports VMCS fields for swapping EFER. */
    526555        bool                        fSupportsVmcsEfer;
     
    600629
    601630/* Maximum number of cached entries. */
    602 #define VMX_VMCS_BATCH_CACHE_MAX_ENTRY              128
     631#define VMX_VMCS_CACHE_MAX_ENTRY                    128
    603632
    604633/**
    605634 * Cache of a VMCS for batch reads or writes.
    606635 */
    607 typedef struct VMXVMCSBATCHCACHE
     636typedef struct VMXVMCSCACHE
    608637{
    609638#ifdef VBOX_WITH_CRASHDUMP_MAGIC
     
    625654        uint32_t    cValidEntries;
    626655        uint32_t    uAlignment;
    627         uint32_t    aField[VMX_VMCS_BATCH_CACHE_MAX_ENTRY];
    628         uint64_t    aFieldVal[VMX_VMCS_BATCH_CACHE_MAX_ENTRY];
     656        uint32_t    aField[VMX_VMCS_CACHE_MAX_ENTRY];
     657        uint64_t    aFieldVal[VMX_VMCS_CACHE_MAX_ENTRY];
    629658    } Write;
    630659    struct
     
    632661        uint32_t    cValidEntries;
    633662        uint32_t    uAlignment;
    634         uint32_t    aField[VMX_VMCS_BATCH_CACHE_MAX_ENTRY];
    635         uint64_t    aFieldVal[VMX_VMCS_BATCH_CACHE_MAX_ENTRY];
     663        uint32_t    aField[VMX_VMCS_CACHE_MAX_ENTRY];
     664        uint64_t    aFieldVal[VMX_VMCS_CACHE_MAX_ENTRY];
    636665    } Read;
    637666#ifdef VBOX_STRICT
     
    659688    } ScratchPad;
    660689#endif
    661 } VMXVMCSBATCHCACHE;
    662 /** Pointer to VMXVMCSBATCHCACHE. */
    663 typedef VMXVMCSBATCHCACHE *PVMXVMCSBATCHCACHE;
    664 AssertCompileSizeAlignment(VMXVMCSBATCHCACHE, 8);
     690} VMXVMCSCACHE;
     691/** Pointer to VMXVMCSCACHE. */
     692typedef VMXVMCSCACHE *PVMXVMCSCACHE;
     693AssertCompileSizeAlignment(VMXVMCSCACHE, 8);
    665694
    666695/**
     
    674703 * @param   pVCpu       Pointer to the cross context per-CPU structure.
    675704 */
    676 typedef DECLCALLBACK(int) FNHMVMXSTARTVM(RTHCUINT fResume, PCPUMCTX pCtx, PVMXVMCSBATCHCACHE pVmcsCache, PVM pVM, PVMCPU pVCpu);
     705typedef DECLCALLBACK(int) FNHMVMXSTARTVM(RTHCUINT fResume, PCPUMCTX pCtx, PVMXVMCSCACHE pVmcsCache, PVM pVM, PVMCPU pVCpu);
    677706/** Pointer to a VMX StartVM function. */
    678707typedef R0PTRTYPE(FNHMVMXSTARTVM *) PFNHMVMXSTARTVM;
     
    682711/** Pointer to a SVM VMRun function. */
    683712typedef R0PTRTYPE(FNHMSVMVMRUN *) PFNHMSVMVMRUN;
    684 
    685 /**
    686  * Cache of certain VMCS fields during execution of a guest or nested-guest.
    687  */
    688 typedef struct VMXVMCSCTLSCACHE
    689 {
    690     /** Cache of pin-based VM-execution controls. */
    691     uint32_t                    u32PinCtls;
    692     /** Cache of processor-based VM-execution controls. */
    693     uint32_t                    u32ProcCtls;
    694     /** Cache of secondary processor-based VM-execution controls. */
    695     uint32_t                    u32ProcCtls2;
    696     /** Cache of VM-entry controls. */
    697     uint32_t                    u32EntryCtls;
    698     /** Cache of VM-exit controls. */
    699     uint32_t                    u32ExitCtls;
    700     /** Cache of CR0 mask. */
    701     uint32_t                    u32Cr0Mask;
    702     /** Cache of CR4 mask. */
    703     uint32_t                    u32Cr4Mask;
    704     /** Cache of exception bitmap. */
    705     uint32_t                    u32XcptBitmap;
    706     /** Cache of TSC offset. */
    707     uint64_t                    u64TscOffset;
    708 } VMXVMCSCTLSCACHE;
    709 /** Pointer to a VMXVMCSCTLSCACHE struct. */
    710 typedef VMXVMCSCTLSCACHE *PVMXVMCSCTLSCACHE;
    711 /** Pointer to a  VMXVMCSCTLSCACHE struct. */
    712 typedef const VMXVMCSCTLSCACHE *PCVMXVMCSCTLSCACHE;
    713 AssertCompileSizeAlignment(VMXVMCSCTLSCACHE, 8);
    714713
    715714/**
     
    721720typedef struct VMXVMCSINFO
    722721{
     722    /** @name VMLAUNCH/VMRESUME information.
     723     * @{ */
     724    /** Ring-0 pointer to the hardware-assisted VMX execution function. */
     725    PFNHMVMXSTARTVM             pfnStartVM;
     726#if HC_ARCH_BITS == 32
     727    uint32_t                    u32Alignment0;
     728#endif
     729    /** @} */
     730
    723731    /** @name VMCS and related data structures.
    724732     *  @{ */
     
    732740    /** Host-physical address of the virtual APIC page. */
    733741    RTHCPHYS                    HCPhysVirtApic;
    734     /** Padding. */
     742    /** Alignment. */
    735743    R0PTRTYPE(void *)           pvAlignment0;
    736744    /** Host-virtual address of the virtual-APIC page. */
     
    744752    R0PTRTYPE(void *)           pvMsrBitmap;
    745753
    746     /** Host-physical address of the VM-entry MSR-load and VM-exit MSR-store area. */
    747     RTHCPHYS                    HCPhysGuestMsr;
    748     /** R0 memory object of the VM-entry MSR-load and VM-exit MSR-store area. */
    749     RTR0MEMOBJ                  hMemObjGuestMsr;
    750     /** Host-virtual address of the VM-entry MSR-load and VM-exit MSR-store area. */
    751     R0PTRTYPE(void *)           pvGuestMsr;
     754    /** Host-physical address of the VM-entry MSR-load area. */
     755    RTHCPHYS                    HCPhysGuestMsrLoad;
     756    /** R0 memory object of the VM-entry MSR-load area. */
     757    RTR0MEMOBJ                  hMemObjGuestMsrLoad;
     758    /** Host-virtual address of the VM-entry MSR-load area. */
     759    R0PTRTYPE(void *)           pvGuestMsrLoad;
     760
     761    /** Host-physical address of the VM-exit MSR-store area. */
     762    RTHCPHYS                    HCPhysGuestMsrStore;
     763    /** R0 memory object of the VM-exit MSR-store area. */
     764    RTR0MEMOBJ                  hMemObjGuestMsrStore;
     765    /** Host-virtual address of the VM-exit MSR-store area. */
     766    R0PTRTYPE(void *)           pvGuestMsrStore;
    752767
    753768    /** Host-physical address of the VM-exit MSR-load area. */
    754     RTHCPHYS                    HCPhysHostMsr;
     769    RTHCPHYS                    HCPhysHostMsrLoad;
    755770    /** R0 memory object for the VM-exit MSR-load area. */
    756     RTR0MEMOBJ                  hMemObjHostMsr;
     771    RTR0MEMOBJ                  hMemObjHostMsrLoad;
    757772    /** Host-virtual address of the VM-exit MSR-load area. */
    758     R0PTRTYPE(void *)           pvHostMsr;
     773    R0PTRTYPE(void *)           pvHostMsrLoad;
    759774
    760775    /** Host-physical address of the EPTP. */
    761776    RTHCPHYS                    HCPhysEPTP;
     777    /** Number of guest MSRs in the VM-entry MSR-load area. */
     778    uint32_t                    cEntryMsrLoad;
     779    /** Number of guest MSRs in the VM-exit MSR-store area. */
     780    uint32_t                    cExitMsrStore;
     781    /** Number of host MSRs in the VM-exit MSR-load area. */
     782    uint32_t                    cExitMsrLoad;
     783    /** Padding. */
     784    uint32_t                    u32Padding0;
    762785    /** @} */
    763786
    764787    /** @name Auxiliary information.
    765788     * @{ */
    766     /** Number of guest/host MSR pairs in the auto-load/store area. */
    767     uint32_t                    cMsrs;
    768     /** The VMCS state, see VMX_V_VMCS_STATE_XXX. */
     789    /** The VMCS launch state, see VMX_V_VMCS_LAUNCH_STATE_XXX. */
    769790    uint32_t                    fVmcsState;
     791    /** Set if guest was executing in real mode (extra checks). */
     792    bool                        fWasInRealMode;
     793    /** Set if the guest switched to 64-bit mode on a 32-bit host. */
     794    bool                        fSwitchedTo64on32;
     795    /** Padding. */
     796    bool                        afPadding0[2];
    770797    /** @} */
    771798
     
    782809    /** VM-exit controls. */
    783810    uint32_t                    u32ExitCtls;
    784     /** CR0 guest/host mask. */
    785     uint32_t                    u32Cr0Mask;
    786     /** CR4 guset/host mask. */
    787     uint32_t                    u32Cr4Mask;
    788811    /** Exception bitmap. */
    789812    uint32_t                    u32XcptBitmap;
     813    /** CR0 guest/host mask. */
     814    uint64_t                    u64Cr0Mask;
     815    /** CR4 guest/host mask. */
     816    uint64_t                    u64Cr4Mask;
     817    /** Page-fault exception error-code mask. */
     818    uint32_t                    u32XcptPFMask;
     819    /** Page-fault exception error-code match. */
     820    uint32_t                    u32XcptPFMatch;
    790821    /** TSC offset. */
    791822    uint64_t                    u64TscOffset;
     823    /** VMCS link pointer. */
     824    uint64_t                    u64VmcsLinkPtr;
    792825    /** @} */
    793826
     827    /** @name Real-mode emulation state.
     828     * @{ */
     829    struct
     830    {
     831        X86DESCATTR             AttrCS;
     832        X86DESCATTR             AttrDS;
     833        X86DESCATTR             AttrES;
     834        X86DESCATTR             AttrFS;
     835        X86DESCATTR             AttrGS;
     836        X86DESCATTR             AttrSS;
     837        X86EFLAGS               Eflags;
     838        bool                    fRealOnV86Active;
     839        bool                    afPadding1[3];
     840    } RealMode;
     841    /** @} */
     842
    794843    /** Padding. */
    795     uint64_t                    u64Padding[4];
     844    uint64_t                    au64Padding[2];
    796845} VMXVMCSINFO;
    797846/** Pointer to a VMXVMCSINFO struct. */
    798847typedef VMXVMCSINFO *PVMXVMCSINFO;
    799 /** Pointer to a VMXVMCSINFO struct. */
     848/** Pointer to a const VMXVMCSINFO struct. */
    800849typedef const VMXVMCSINFO *PCVMXVMCSINFO;
    801850AssertCompileSizeAlignment(VMXVMCSINFO, 8);
     851AssertCompileMemberAlignment(VMXVMCSINFO, fVmcsState, 8);
     852AssertCompileMemberAlignment(VMXVMCSINFO, u32PinCtls, 8);
     853AssertCompileMemberAlignment(VMXVMCSINFO, u64VmcsLinkPtr, 8);
     854AssertCompileMemberAlignment(VMXVMCSINFO, pvMsrBitmap, 8);
    802855
    803856/**
     
    857910    uint64_t                    fCtxChanged;
    858911    /** Host's TSC_AUX MSR (used when RDTSCP doesn't cause VM-exits). */
    859     uint64_t                    u64HostTscAux;
     912    uint64_t                    u64HostTscAux;  /** @todo r=ramshankar: Can be removed and put in SVMTRANSIENT instead! */
    860913
    861914    union /* no tag! */
     
    864917        struct
    865918        {
    866             /** Ring 0 handlers for VT-x. */
    867             PFNHMVMXSTARTVM             pfnStartVM;
    868 #if HC_ARCH_BITS == 32
     919            /** @name Guest information.
     920             * @{ */
     921            /** Guest VMCS information. */
     922            VMXVMCSINFO                 VmcsInfo;
     923            /** Nested-guest VMCS information. */
     924            VMXVMCSINFO                 VmcsInfoNstGst;
     925            /** Whether the nested-guest VMCS was the last current VMCS. */
     926            bool                        fSwitchedToNstGstVmcs;
     927            /** Whether the static guest VMCS controls has been merged with the
     928             *  nested-guest VMCS controls. */
     929            bool                        fMergedNstGstCtls;
     930            /** Alignment. */
     931            bool                        afAlignment0[6];
     932            /** Cached guest APIC-base MSR for identifying when to map the APIC-access page. */
     933            uint64_t                    u64GstMsrApicBase;
     934            /** VMCS cache for batched vmread/vmwrites. */
     935            VMXVMCSCACHE                VmcsCache;
     936            /** @} */
     937
     938            /** @name Host information.
     939             * @{ */
     940            /** Host LSTAR MSR to restore lazily while leaving VT-x. */
     941            uint64_t                    u64HostMsrLStar;
     942            /** Host STAR MSR to restore lazily while leaving VT-x. */
     943            uint64_t                    u64HostMsrStar;
     944            /** Host SF_MASK MSR to restore lazily while leaving VT-x. */
     945            uint64_t                    u64HostMsrSfMask;
     946            /** Host KernelGS-Base MSR to restore lazily while leaving VT-x. */
     947            uint64_t                    u64HostMsrKernelGsBase;
     948            /** The mask of lazy MSRs swap/restore state, see VMX_LAZY_MSRS_XXX. */
     949            uint32_t                    fLazyMsrs;
     950            /** Whether the host MSR values are up-to-date in the auto-load/store MSR area. */
     951            bool                        fUpdatedHostAutoMsrs;
     952            /** Alignment. */
     953            uint8_t                     au8Alignment0[3];
     954            /** Which host-state bits to restore before being preempted. */
     955            uint32_t                    fRestoreHostFlags;
     956            /** Alignment. */
    869957            uint32_t                    u32Alignment0;
    870 #endif
    871 
    872             /** Cache of the executing guest (or nested-guest) VMCS control fields. */
    873             VMXVMCSCTLSCACHE            Ctls;
    874             /** Cache of guest (level 1) VMCS control fields when executing a nested-guest
    875              *  (level 2). */
    876             VMXVMCSCTLSCACHE            Level1Ctls;
    877 
    878             /** Physical address of the VM control structure (VMCS). */
    879             RTHCPHYS                    HCPhysVmcs;
    880             /** R0 memory object for the VM control structure (VMCS). */
    881             RTR0MEMOBJ                  hMemObjVmcs;
    882             /** Virtual address of the VM control structure (VMCS). */
    883             R0PTRTYPE(void *)           pvVmcs;
    884 
    885             /** Physical address of the virtual APIC page for TPR caching. */
    886             RTHCPHYS                    HCPhysVirtApic;
    887             /** Padding. */
    888             R0PTRTYPE(void *)           pvAlignment0;
    889             /** Virtual address of the virtual APIC page for TPR caching. */
    890             R0PTRTYPE(uint8_t *)        pbVirtApic;
    891 
    892             /** Physical address of the MSR bitmap. */
    893             RTHCPHYS                    HCPhysMsrBitmap;
    894             /** R0 memory object for the MSR bitmap. */
    895             RTR0MEMOBJ                  hMemObjMsrBitmap;
    896             /** Virtual address of the MSR bitmap. */
    897             R0PTRTYPE(void *)           pvMsrBitmap;
    898 
    899             /** Physical address of the VM-entry MSR-load and VM-exit MSR-store area (used
    900              *  for guest MSRs). */
    901             RTHCPHYS                    HCPhysGuestMsr;
    902             /** R0 memory object of the VM-entry MSR-load and VM-exit MSR-store area
    903              *  (used for guest MSRs). */
    904             RTR0MEMOBJ                  hMemObjGuestMsr;
    905             /** Virtual address of the VM-entry MSR-load and VM-exit MSR-store area (used
    906              *  for guest MSRs). */
    907             R0PTRTYPE(void *)           pvGuestMsr;
    908 
    909             /** Physical address of the VM-exit MSR-load area (used for host MSRs). */
    910             RTHCPHYS                    HCPhysHostMsr;
    911             /** R0 memory object for the VM-exit MSR-load area (used for host MSRs). */
    912             RTR0MEMOBJ                  hMemObjHostMsr;
    913             /** Virtual address of the VM-exit MSR-load area (used for host MSRs). */
    914             R0PTRTYPE(void *)           pvHostMsr;
    915 
    916             /** Physical address of the current EPTP. */
    917             RTHCPHYS                    HCPhysEPTP;
    918 
    919             /** Number of guest/host MSR pairs in the auto-load/store area. */
    920             uint32_t                    cMsrs;
    921             /** Whether the host MSR values are up-to-date in the auto-load/store area. */
    922             bool                        fUpdatedHostMsrs;
    923             uint8_t                     au8Alignment0[3];
    924 
    925             /** Host LSTAR MSR value to restore lazily while leaving VT-x. */
    926             uint64_t                    u64HostLStarMsr;
    927             /** Host STAR MSR value to restore lazily while leaving VT-x. */
    928             uint64_t                    u64HostStarMsr;
    929             /** Host SF_MASK MSR value to restore lazily while leaving VT-x. */
    930             uint64_t                    u64HostSFMaskMsr;
    931             /** Host KernelGS-Base MSR value to restore lazily while leaving VT-x. */
    932             uint64_t                    u64HostKernelGSBaseMsr;
    933             /** A mask of which MSRs have been swapped and need restoration. */
    934             uint32_t                    fLazyMsrs;
    935             uint32_t                    u32Alignment1;
    936 
    937             /** The cached APIC-base MSR used for identifying when to map the HC physical APIC-access page. */
    938             uint64_t                    u64MsrApicBase;
    939 
    940             /** VMCS cache for batched vmread/vmwrites. */
    941             VMXVMCSBATCHCACHE           VmcsBatchCache;
    942 
    943             /** Real-mode emulation state. */
    944             struct
    945             {
    946                 X86DESCATTR             AttrCS;
    947                 X86DESCATTR             AttrDS;
    948                 X86DESCATTR             AttrES;
    949                 X86DESCATTR             AttrFS;
    950                 X86DESCATTR             AttrGS;
    951                 X86DESCATTR             AttrSS;
    952                 X86EFLAGS               Eflags;
    953                 bool                    fRealOnV86Active;
    954             } RealMode;
    955 
     958            /** The host-state restoration structure. */
     959            VMXRESTOREHOST              RestoreHost;
     960            /** @} */
     961
     962            /** @name Error reporting and diagnostics.
     963             * @{ */
    956964            /** VT-x error-reporting (mainly for ring-3 propagation). */
    957965            struct
    958966            {
    959                 uint64_t                u64VmcsPhys;
     967                RTHCPHYS                HCPhysCurrentVmcs;
    960968                uint32_t                u32VmcsRev;
    961969                uint32_t                u32InstrError;
     
    965973                RTCPUID                 idCurrentCpu;
    966974            } LastError;
    967 
    968             /** Current state of the VMCS. */
    969             uint32_t                    fVmcsState;
    970             /** Which host-state bits to restore before being preempted. */
    971             uint32_t                    fRestoreHostFlags;
    972             /** The host-state restoration structure. */
    973             VMXRESTOREHOST              RestoreHost;
    974 
    975             /** Set if guest was executing in real mode (extra checks). */
    976             bool                        fWasInRealMode;
    977             /** Set if guest switched to 64-bit mode on a 32-bit host. */
    978             bool                        fSwitchedTo64on32;
    979             /** Padding. */
    980             uint8_t                     au8Alignment1[6];
     975            /** @} */
    981976        } vmx;
    982977
     
    10231018
    10241019    /** Event injection state. */
    1025     struct
    1026     {
    1027         uint32_t                    fPending;
    1028         uint32_t                    u32ErrCode;
    1029         uint32_t                    cbInstr;
    1030         uint32_t                    u32Padding; /**< Explicit alignment padding. */
    1031         uint64_t                    u64IntInfo;
    1032         RTGCUINTPTR                 GCPtrFaultAddress;
    1033     } Event;
     1020    HMEVENT                 Event;
    10341021
    10351022    /** The PAE PDPEs used with Nested Paging (only valid when
     
    10871074    STAMCOUNTER             StatExitGuestXF;
    10881075    STAMCOUNTER             StatExitGuestXcpUnk;
    1089     STAMCOUNTER             StatExitCli;
    1090     STAMCOUNTER             StatExitSti;
    1091     STAMCOUNTER             StatExitPushf;
    1092     STAMCOUNTER             StatExitPopf;
    1093     STAMCOUNTER             StatExitIret;
    1094     STAMCOUNTER             StatExitInt;
    1095     STAMCOUNTER             StatExitHlt;
    10961076    STAMCOUNTER             StatExitDRxWrite;
    10971077    STAMCOUNTER             StatExitDRxRead;
     
    11101090    STAMCOUNTER             StatExitClts;
    11111091    STAMCOUNTER             StatExitXdtrAccess;
    1112     STAMCOUNTER             StatExitMwait;
    1113     STAMCOUNTER             StatExitMonitor;
    11141092    STAMCOUNTER             StatExitLmsw;
    11151093    STAMCOUNTER             StatExitIOWrite;
     
    11231101    STAMCOUNTER             StatExitTprBelowThreshold;
    11241102    STAMCOUNTER             StatExitTaskSwitch;
    1125     STAMCOUNTER             StatExitMtf;
    11261103    STAMCOUNTER             StatExitApicAccess;
    11271104    STAMCOUNTER             StatExitReasonNpf;
     
    12041181
    12051182#ifdef IN_RING0
    1206 VMMR0_INT_DECL(PHMPHYSCPU) hmR0GetCurrentCpu(void);
    1207 VMMR0_INT_DECL(int) hmR0EnterCpu(PVMCPU pVCpu);
     1183VMMR0_INT_DECL(PHMPHYSCPU)  hmR0GetCurrentCpu(void);
     1184VMMR0_INT_DECL(int)         hmR0EnterCpu(PVMCPU pVCpu);
    12081185
    12091186# ifdef VBOX_STRICT
    1210 VMMR0_INT_DECL(void) hmR0DumpRegs(PVMCPU pVCpu);
    1211 VMMR0_INT_DECL(void) hmR0DumpDescriptor(PCX86DESCHC pDesc, RTSEL Sel, const char *pszMsg);
     1187VMMR0_INT_DECL(void)        hmR0DumpRegs(PVMCPU pVCpu);
     1188VMMR0_INT_DECL(void)        hmR0DumpDescriptor(PCX86DESCHC pDesc, RTSEL Sel, const char *pszMsg);
    12121189# endif
    12131190
    12141191# ifdef VBOX_WITH_KERNEL_USING_XMM
    1215 DECLASM(int) hmR0VMXStartVMWrapXMM(RTHCUINT fResume, PCPUMCTX pCtx, PVMXVMCSBATCHCACHE pVmcsCache, PVM pVM, PVMCPU pVCpu,
    1216                                    PFNHMVMXSTARTVM pfnStartVM);
    1217 DECLASM(int) hmR0SVMRunWrapXMM(RTHCPHYS pVmcbHostPhys, RTHCPHYS pVmcbPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu,
    1218                                PFNHMSVMVMRUN pfnVMRun);
     1192DECLASM(int)                hmR0VMXStartVMWrapXMM(RTHCUINT fResume, PCPUMCTX pCtx, PVMXVMCSCACHE pVmcsCache, PVM pVM,
     1193                                                  PVMCPU pVCpu, PFNHMVMXSTARTVM pfnStartVM);
     1194DECLASM(int)                hmR0SVMRunWrapXMM(RTHCPHYS pVmcbHostPhys, RTHCPHYS pVmcbPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu,
     1195                                              PFNHMSVMVMRUN pfnVMRun);
    12191196# endif
    12201197#endif /* IN_RING0 */
    12211198
    1222 VMM_INT_DECL(int) hmEmulateSvmMovTpr(PVMCPU pVCpu);
     1199VMM_INT_DECL(int)           hmEmulateSvmMovTpr(PVMCPU pVCpu);
     1200
     1201VMM_INT_DECL(PVMXVMCSINFO)  hmGetVmxActiveVmcsInfo(PVMCPU pVCpu);
    12231202
    12241203/** @} */
  • trunk/src/VBox/VMM/include/HMInternal.mac

    r77481 r78220  
    2121
    2222;Maximum number of cached entries.
    23 %define VMX_VMCS_BATCH_CACHE_MAX_ENTRY                             128
     23%define VMX_VMCS_CACHE_MAX_ENTRY                             128
    2424
    2525; Structure for storing read and write VMCS actions.
    26 struc VMXVMCSBATCHCACHE
     26struc VMXVMCSCACHE
    2727%ifdef VBOX_WITH_CRASHDUMP_MAGIC
    2828    .aMagic                   resb    16
     
    3939    .Write.cValidEntries      resd    1
    4040    .Write.uAlignment         resd    1
    41     .Write.aField             resd    VMX_VMCS_BATCH_CACHE_MAX_ENTRY
    42     .Write.aFieldVal          resq    VMX_VMCS_BATCH_CACHE_MAX_ENTRY
     41    .Write.aField             resd    VMX_VMCS_CACHE_MAX_ENTRY
     42    .Write.aFieldVal          resq    VMX_VMCS_CACHE_MAX_ENTRY
    4343    .Read.cValidEntries       resd    1
    4444    .Read.uAlignment          resd    1
    45     .Read.aField              resd    VMX_VMCS_BATCH_CACHE_MAX_ENTRY
    46     .Read.aFieldVal           resq    VMX_VMCS_BATCH_CACHE_MAX_ENTRY
     45    .Read.aField              resd    VMX_VMCS_CACHE_MAX_ENTRY
     46    .Read.aFieldVal           resq    VMX_VMCS_CACHE_MAX_ENTRY
    4747%ifdef VBOX_STRICT
    4848    .TestIn.HCPhysCpuPage     resq    1
  • trunk/src/VBox/VMM/testcase/tstVMStruct.h

    r77609 r78220  
    159159    GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pShadowVmcsR0);
    160160    GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pShadowVmcsR3);
    161     GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pvVirtApicPageR0);
    162     GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pvVirtApicPageR3);
    163161    GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pvVmreadBitmapR0);
    164162    GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pvVmreadBitmapR3);
    165163    GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pvVmwriteBitmapR0);
    166164    GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pvVmwriteBitmapR3);
    167     GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pAutoMsrAreaR0);
    168     GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pAutoMsrAreaR3);
     165    GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pEntryMsrLoadAreaR0);
     166    GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pEntryMsrLoadAreaR3);
     167    GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pExitMsrStoreAreaR0);
     168    GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pExitMsrStoreAreaR3);
     169    GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pExitMsrLoadAreaR0);
     170    GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pExitMsrLoadAreaR3);
    169171    GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pvMsrBitmapR0);
    170172    GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pvMsrBitmapR3);
     
    173175    GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.uFirstPauseLoopTick);
    174176    GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.uPrevPauseTick);
    175     GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.uVmentryTick);
     177    GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.uEntryTick);
    176178    GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.offVirtApicWrite);
    177179    GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.fVirtNmiBlocking);
    178180    GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.Msrs);
     181    GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.HCPhysVmcs);
     182    GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.HCPhysShadowVmcs);
     183    GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.HCPhysVmreadBitmap);
     184    GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.HCPhysVmwriteBitmap);
     185    GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.HCPhysEntryMsrLoadArea);
     186    GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.HCPhysExitMsrStoreArea);
     187    GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.HCPhysExitMsrLoadArea);
     188    GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.HCPhysMsrBitmap);
     189    GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.HCPhysIoBitmap);
    179190    GEN_CHECK_OFF(CPUMCTX, hwvirt.enmHwvirt);
    180191    GEN_CHECK_OFF(CPUMCTX, hwvirt.fGif);
  • trunk/src/VBox/VMM/testcase/tstVMStructSize.cpp

    r76553 r78220  
    428428    CHECK_MEMBER_ALIGNMENT(HM, aPatches, 8);
    429429    CHECK_MEMBER_ALIGNMENT(HMCPU, vmx, 8);
    430     CHECK_MEMBER_ALIGNMENT(HMCPU, vmx.pfnStartVM, 8);
    431     CHECK_MEMBER_ALIGNMENT(HMCPU, vmx.HCPhysVmcs, 8);
     430    CHECK_MEMBER_ALIGNMENT(HMCPU, vmx.VmcsInfo, 8);
     431    CHECK_MEMBER_ALIGNMENT(HMCPU, vmx.VmcsInfo.pfnStartVM, 8);
     432    CHECK_MEMBER_ALIGNMENT(HMCPU, vmx.VmcsInfoNstGst, 8);
     433    CHECK_MEMBER_ALIGNMENT(HMCPU, vmx.VmcsInfoNstGst.pfnStartVM, 8);
     434    CHECK_MEMBER_ALIGNMENT(HMCPU, vmx.RestoreHost, 8);
    432435    CHECK_MEMBER_ALIGNMENT(HMCPU, vmx.LastError, 8);
    433     CHECK_MEMBER_ALIGNMENT(HMCPU, vmx.RestoreHost, 8);
    434436    CHECK_MEMBER_ALIGNMENT(HMCPU, svm, 8);
    435437    CHECK_MEMBER_ALIGNMENT(HMCPU, svm.pfnVMRun, 8);
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette