VirtualBox

Changeset 78220 in vbox for trunk/src/VBox/VMM/VMMAll


Ignore:
Timestamp:
Apr 20, 2019 4:08:44 AM (6 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
130157
Message:

VMM: Nested VMX: bugref:9180 Hardware-assisted nested VT-x infrastructure changes and VM-entry implementation.

Location:
trunk/src/VBox/VMM/VMMAll
Files:
5 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp

    r77712 r78220  
    30103010
    30113011/**
    3012  * Applies the TSC offset of a nested-guest if any and returns the new TSC
    3013  * value for the guest (or nested-guest).
     3012 * Applies the TSC offset of a nested-guest if any and returns the TSC value for the
     3013 * nested-guest.
    30143014 *
    30153015 * @returns The TSC offset after applying any nested-guest TSC offset.
     
    30173017 * @param   uTicks      The guest TSC.
    30183018 *
    3019  * @sa      HMApplySvmNstGstTscOffset.
     3019 * @sa      CPUMRemoveNestedGuestTscOffset.
    30203020 */
    30213021VMM_INT_DECL(uint64_t) CPUMApplyNestedGuestTscOffset(PVMCPU pVCpu, uint64_t uTicks)
     
    30333033    if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
    30343034    {
     3035        /** @todo r=bird: Bake HMApplySvmNstGstTscOffset into HMHasGuestSvmVmcbCached to save a call. */
    30353036        if (!HMHasGuestSvmVmcbCached(pVCpu))
    30363037        {
     
    30393040        }
    30403041        return HMApplySvmNstGstTscOffset(pVCpu, uTicks);
     3042    }
     3043#else
     3044    RT_NOREF(pVCpu);
     3045#endif
     3046    return uTicks;
     3047}
     3048
     3049
     3050/**
     3051 * Removes the TSC offset of a nested-guest if any and returns the TSC value for the
     3052 * guest.
     3053 *
     3054 * @returns The TSC offset after removing any nested-guest TSC offset.
     3055 * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
     3056 * @param   uTicks      The nested-guest TSC.
     3057 *
     3058 * @sa      CPUMApplyNestedGuestTscOffset.
     3059 */
     3060VMM_INT_DECL(uint64_t) CPUMRemoveNestedGuestTscOffset(PVMCPU pVCpu, uint64_t uTicks)
     3061{
     3062#ifndef IN_RC
     3063    PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
     3064    if (CPUMIsGuestInVmxNonRootMode(pCtx))
     3065    {
     3066        PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
     3067        if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
     3068            return uTicks - pVmcs->u64TscOffset.u;
     3069        return uTicks;
     3070    }
     3071
     3072    if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
     3073    {
     3074        /** @todo r=bird: Bake HMApplySvmNstGstTscOffset into HMRemoveSvmNstGstTscOffset to save a call. */
     3075        if (!HMHasGuestSvmVmcbCached(pVCpu))
     3076        {
     3077            PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
     3078            return uTicks - pVmcb->ctrl.u64TSCOffset;
     3079        }
     3080        return HMRemoveSvmNstGstTscOffset(pVCpu, uTicks);
    30413081    }
    30423082#else
  • trunk/src/VBox/VMM/VMMAll/HMAll.cpp

    r77589 r78220  
    799799     */
    800800    if (enmGuestMode == PGMMODE_REAL)
    801         pVCpu->hm.s.vmx.fWasInRealMode = true;
     801    {
     802        PVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
     803        pVmcsInfo->fWasInRealMode = true;
     804    }
    802805
    803806# ifdef IN_RING0
     
    814817            fChanged |= HM_CHANGED_SVM_GUEST_XCPT_INTERCEPTS;
    815818        else
    816             fChanged |= HM_CHANGED_VMX_GUEST_XCPT_INTERCEPTS | HM_CHANGED_VMX_ENTRY_CTLS | HM_CHANGED_VMX_EXIT_CTLS;
     819            fChanged |= HM_CHANGED_VMX_GUEST_XCPT_INTERCEPTS | HM_CHANGED_VMX_ENTRY_EXIT_CTLS;
    817820        ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, fChanged);
    818821    }
  • trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp

    r77902 r78220  
    204204 *          using hardware-assisted SVM.
    205205 *
    206  * @note    If you make any changes to this function, please check if
    207  *          hmR0SvmNstGstUndoTscOffset() needs adjusting.
    208  *
    209  * @sa      CPUMApplyNestedGuestTscOffset(), hmR0SvmNstGstUndoTscOffset().
     206 * @sa      CPUMRemoveNestedGuestTscOffset, HMRemoveSvmNstGstTscOffset.
    210207 */
    211208VMM_INT_DECL(uint64_t) HMApplySvmNstGstTscOffset(PVMCPU pVCpu, uint64_t uTicks)
     
    216213    Assert(pVmcbNstGstCache->fCacheValid);
    217214    return uTicks + pVmcbNstGstCache->u64TSCOffset;
     215}
     216
     217
     218/**
     219 * Removes the TSC offset of an SVM nested-guest if any and returns the new TSC
     220 * value for the guest.
     221 *
     222 * @returns The TSC offset after removing any nested-guest TSC offset.
     223 * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
     224 * @param   uTicks      The nested-guest TSC.
     225 *
     226 * @remarks This function looks at the VMCB cache rather than directly at the
     227 *          nested-guest VMCB. The latter may have been modified for executing
     228 *          using hardware-assisted SVM.
     229 *
     230 * @sa      CPUMApplyNestedGuestTscOffset, HMApplySvmNstGstTscOffset.
     231 */
     232VMM_INT_DECL(uint64_t) HMRemoveSvmNstGstTscOffset(PVMCPU pVCpu, uint64_t uTicks)
     233{
     234    PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
     235    Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx)); RT_NOREF(pCtx);
     236    PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
     237    Assert(pVmcbNstGstCache->fCacheValid);
     238    return uTicks - pVmcbNstGstCache->u64TSCOffset;
    218239}
    219240
  • trunk/src/VBox/VMM/VMMAll/HMVMXAll.cpp

    r77745 r78220  
    716716                 * (all sorts of RPL & DPL assumptions).
    717717                 */
    718                 if (pVCpu->hm.s.vmx.fWasInRealMode)
     718                PCVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
     719                if (pVmcsInfo->fWasInRealMode)
    719720                {
    720721                    if (!CPUMIsGuestInV86ModeEx(pCtx))
     
    858859
    859860/**
    860  * Gets the permission bits for the specified MSR in the specified MSR bitmap.
    861  *
    862  * @returns VBox status code.
     861 * Gets the read and write permission bits for an MSR in an MSR bitmap.
     862 *
     863 * @returns VMXMSRPM_XXX - the MSR permission.
    863864 * @param   pvMsrBitmap     Pointer to the MSR bitmap.
    864  * @param   idMsr           The MSR.
    865  * @param   penmRead        Where to store the read permissions. Optional, can be
    866  *                          NULL.
    867  * @param   penmWrite       Where to store the write permissions. Optional, can be
    868  *                          NULL.
    869  */
    870 VMM_INT_DECL(int) HMGetVmxMsrPermission(void const *pvMsrBitmap, uint32_t idMsr, PVMXMSREXITREAD penmRead,
    871                                         PVMXMSREXITWRITE penmWrite)
    872 {
    873     AssertPtrReturn(pvMsrBitmap, VERR_INVALID_PARAMETER);
    874 
    875     int32_t iBit;
    876     uint8_t const *pbMsrBitmap = (uint8_t *)pvMsrBitmap;
     865 * @param   idMsr           The MSR to get permissions for.
     866 *
     867 * @sa      hmR0VmxSetMsrPermission.
     868 */
     869VMM_INT_DECL(uint32_t) HMGetVmxMsrPermission(void const *pvMsrBitmap, uint32_t idMsr)
     870{
     871    AssertPtrReturn(pvMsrBitmap, VMXMSRPM_EXIT_RD | VMXMSRPM_EXIT_WR);
     872
     873    uint8_t const * const pbMsrBitmap = (uint8_t const * const)pvMsrBitmap;
    877874
    878875    /*
     
    885882     *
    886883     * A bit corresponding to an MSR within the above range causes a VM-exit
    887      * if the bit is 1 on executions of RDMSR/WRMSR.
    888      *
    889      * If an MSR falls out of the MSR range, it always cause a VM-exit.
     884     * if the bit is 1 on executions of RDMSR/WRMSR.  If an MSR falls out of
     885     * the MSR range, it always cause a VM-exit.
    890886     *
    891887     * See Intel spec. 24.6.9 "MSR-Bitmap Address".
    892888     */
    893     if (idMsr <= 0x00001fff)
    894         iBit = idMsr;
    895     else if (   idMsr >= 0xc0000000
    896              && idMsr <= 0xc0001fff)
    897     {
    898         iBit = (idMsr - 0xc0000000);
    899         pbMsrBitmap += 0x400;
     889    uint32_t const offBitmapRead  = 0;
     890    uint32_t const offBitmapWrite = 0x800;
     891    uint32_t       offMsr;
     892    uint32_t       iBit;
     893    if (idMsr <= UINT32_C(0x00001fff))
     894    {
     895        offMsr = 0;
     896        iBit   = idMsr;
     897    }
     898    else if (idMsr - UINT32_C(0xc0000000) <= UINT32_C(0x00001fff))
     899    {
     900        offMsr = 0x400;
     901        iBit   = idMsr - UINT32_C(0xc0000000);
    900902    }
    901903    else
    902904    {
    903         if (penmRead)
    904             *penmRead = VMXMSREXIT_INTERCEPT_READ;
    905         if (penmWrite)
    906             *penmWrite = VMXMSREXIT_INTERCEPT_WRITE;
    907         Log(("CPUMVmxGetMsrPermission: Warning! Out of range MSR %#RX32\n", idMsr));
    908         return VINF_SUCCESS;
    909     }
    910 
    911     /* Validate the MSR bit position. */
    912     Assert(iBit <= 0x1fff);
    913 
    914     /* Get the MSR read permissions. */
    915     if (penmRead)
    916     {
    917         if (ASMBitTest(pbMsrBitmap, iBit))
    918             *penmRead = VMXMSREXIT_INTERCEPT_READ;
    919         else
    920             *penmRead = VMXMSREXIT_PASSTHRU_READ;
    921     }
    922 
    923     /* Get the MSR write permissions. */
    924     if (penmWrite)
    925     {
    926         if (ASMBitTest(pbMsrBitmap + 0x800, iBit))
    927             *penmWrite = VMXMSREXIT_INTERCEPT_WRITE;
    928         else
    929             *penmWrite = VMXMSREXIT_PASSTHRU_WRITE;
    930     }
    931 
    932     return VINF_SUCCESS;
     905        LogFunc(("Warning! Out of range MSR %#RX32\n", idMsr));
     906        return VMXMSRPM_EXIT_RD | VMXMSRPM_EXIT_WR;
     907    }
     908
     909    /*
     910     * Get the MSR read permissions.
     911     */
     912    uint32_t fRet;
     913    uint32_t const offMsrRead = offBitmapRead + offMsr;
     914    Assert(offMsrRead + (iBit >> 3) < offBitmapWrite);
     915    if (ASMBitTest(pbMsrBitmap + offMsrRead, iBit))
     916        fRet = VMXMSRPM_EXIT_RD;
     917    else
     918        fRet = VMXMSRPM_ALLOW_RD;
     919
     920    /*
     921     * Get the MSR write permissions.
     922     */
     923    uint32_t const offMsrWrite = offBitmapWrite + offMsr;
     924    Assert(offMsrWrite + (iBit >> 3) < X86_PAGE_4K_SIZE);
     925    if (ASMBitTest(pbMsrBitmap + offMsrWrite, iBit))
     926        fRet |= VMXMSRPM_EXIT_WR;
     927    else
     928        fRet |= VMXMSRPM_ALLOW_WR;
     929
     930    Assert(VMXMSRPM_IS_FLAG_VALID(fRet));
     931    return fRet;
    933932}
    934933
     
    943942 * @param   cbAccess        The size of the I/O access in bytes (1, 2 or 4 bytes).
    944943 */
    945 VMM_INT_DECL(bool) HMGetVmxIoBitmapPermission(void const *pvIoBitmapA, void const *pvIoBitmapB, uint16_t uPort,
    946                                                 uint8_t cbAccess)
     944VMM_INT_DECL(bool) HMGetVmxIoBitmapPermission(void const *pvIoBitmapA, void const *pvIoBitmapB, uint16_t uPort, uint8_t cbAccess)
    947945{
    948946    Assert(cbAccess == 1 || cbAccess == 2 || cbAccess == 4);
     
    10171015    LogRel(("uFirstPauseLoopTick        = %RX64\n",     pCtx->hwvirt.vmx.uFirstPauseLoopTick));
    10181016    LogRel(("uPrevPauseTick             = %RX64\n",     pCtx->hwvirt.vmx.uPrevPauseTick));
    1019     LogRel(("uVmentryTick               = %RX64\n",     pCtx->hwvirt.vmx.uVmentryTick));
     1017    LogRel(("uEntryTick                 = %RX64\n",     pCtx->hwvirt.vmx.uEntryTick));
    10201018    LogRel(("offVirtApicWrite           = %#RX16\n",    pCtx->hwvirt.vmx.offVirtApicWrite));
     1019    LogRel(("fVirtNmiBlocking           = %RTbool\n",   pCtx->hwvirt.vmx.fVirtNmiBlocking));
    10211020    LogRel(("VMCS cache:\n"));
    10221021
     
    12431242}
    12441243
     1244
     1245/**
     1246 * Gets the active (in use) VMCS info. object for the specified VCPU.
     1247 *
     1248 * This is either the guest or nested-guest VMCS and need not necessarily pertain to
     1249 * the "current" VMCS (in the VMX definition of the term). For instance, if the
     1250 * VM-entry failed due to an invalid-guest state, we may have "cleared" the VMCS
     1251 * while returning to ring-3. The VMCS info. object for that VMCS would still be
     1252 * active and returned so that we could dump the VMCS fields to ring-3 for
     1253 * diagnostics. This function is thus only used to distinguish between the
     1254 * nested-guest or guest VMCS.
     1255 *
     1256 * @returns The active VMCS information.
     1257 * @param   pVCpu   The cross context virtual CPU structure.
     1258 *
     1259 * @thread  EMT.
     1260 * @remarks This function may be called with preemption or interrupts disabled!
     1261 */
     1262VMM_INT_DECL(PVMXVMCSINFO) hmGetVmxActiveVmcsInfo(PVMCPU pVCpu)
     1263{
     1264    if (!pVCpu->hm.s.vmx.fSwitchedToNstGstVmcs)
     1265        return &pVCpu->hm.s.vmx.VmcsInfo;
     1266    return &pVCpu->hm.s.vmx.VmcsInfoNstGst;
     1267}
     1268
     1269
     1270/**
     1271 * Converts a VMX event type into an appropriate TRPM event type.
     1272 *
     1273 * @returns TRPM event.
     1274 * @param   uIntInfo    The VMX event.
     1275 */
     1276VMM_INT_DECL(TRPMEVENT) HMVmxEventToTrpmEventType(uint32_t uIntInfo)
     1277{
     1278    TRPMEVENT enmTrapType;
     1279    uint8_t const uType   = VMX_ENTRY_INT_INFO_TYPE(uIntInfo);
     1280    uint8_t const uVector = VMX_ENTRY_INT_INFO_VECTOR(uIntInfo);
     1281
     1282    switch (uType)
     1283    {
     1284        case VMX_ENTRY_INT_INFO_TYPE_EXT_INT:
     1285           enmTrapType = TRPM_HARDWARE_INT;
     1286           break;
     1287
     1288        case VMX_ENTRY_INT_INFO_TYPE_NMI:
     1289        case VMX_ENTRY_INT_INFO_TYPE_HW_XCPT:
     1290            enmTrapType = TRPM_TRAP;
     1291            break;
     1292
     1293        case VMX_ENTRY_INT_INFO_TYPE_PRIV_SW_XCPT:  /* INT1 (ICEBP). */
     1294            Assert(uVector == X86_XCPT_DB); NOREF(uVector);
     1295            enmTrapType = TRPM_SOFTWARE_INT;
     1296            break;
     1297
     1298        case VMX_ENTRY_INT_INFO_TYPE_SW_XCPT:       /* INT3 (#BP) and INTO (#OF) */
     1299            Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF); NOREF(uVector);
     1300            enmTrapType = TRPM_SOFTWARE_INT;
     1301            break;
     1302
     1303        case VMX_ENTRY_INT_INFO_TYPE_SW_INT:
     1304            enmTrapType = TRPM_SOFTWARE_INT;
     1305            break;
     1306
     1307        case VMX_ENTRY_INT_INFO_TYPE_OTHER_EVENT:   /* Shouldn't really happen. */
     1308        default:
     1309            AssertMsgFailed(("Invalid trap type %#x\n", uType));
     1310            enmTrapType = TRPM_32BIT_HACK;
     1311            break;
     1312    }
     1313
     1314    return enmTrapType;
     1315}
     1316
     1317
     1318#ifndef IN_RC
     1319# ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     1320/**
     1321 * Notification callback for when a VM-exit happens outside VMX R0 code (e.g. in
     1322 * IEM).
     1323 *
     1324 * @param   pVCpu   The cross context virtual CPU structure.
     1325 * @param   pCtx    Pointer to the guest-CPU context.
     1326 */
     1327VMM_INT_DECL(void) HMNotifyVmxNstGstVmexit(PVMCPU pVCpu, PCPUMCTX pCtx)
     1328{
     1329    NOREF(pCtx);
     1330    pVCpu->hm.s.vmx.fMergedNstGstCtls = false;
     1331}
     1332# endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
     1333#endif /* IN_RC */
     1334
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h

    r77899 r78220  
    162162        return VERR_VMX_VMEXIT_FAILED; \
    163163    } while (0)
    164 
    165 /** Enables/disables IEM-only EM execution policy in and from ring-3.   */
    166 # if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
    167 #  define IEM_VMX_R3_EXECPOLICY_IEM_ALL_ENABLE_RET(a_pVCpu, a_pszLogPrefix, a_rcStrictRet) \
    168     do { \
    169         Log(("%s: Enabling IEM-only EM execution policy!\n", (a_pszLogPrefix))); \
    170         int rcSched = EMR3SetExecutionPolicy((a_pVCpu)->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, true); \
    171         if (rcSched != VINF_SUCCESS) \
    172             iemSetPassUpStatus(pVCpu, rcSched); \
    173         return (a_rcStrictRet); \
    174     } while (0)
    175 
    176 #  define IEM_VMX_R3_EXECPOLICY_IEM_ALL_DISABLE_RET(a_pVCpu, a_pszLogPrefix, a_rcStrictRet) \
    177     do { \
    178         Log(("%s: Disabling IEM-only EM execution policy!\n", (a_pszLogPrefix))); \
    179         int rcSched = EMR3SetExecutionPolicy((a_pVCpu)->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, false); \
    180         if (rcSched != VINF_SUCCESS) \
    181             iemSetPassUpStatus(pVCpu, rcSched); \
    182         return (a_rcStrictRet); \
    183     } while (0)
    184 # else
    185 #  define IEM_VMX_R3_EXECPOLICY_IEM_ALL_ENABLE_RET(a_pVCpu, a_pszLogPrefix, a_rcStrictRet)   do { return (a_rcRet); } while (0)
    186 #  define IEM_VMX_R3_EXECPOLICY_IEM_ALL_DISABLE_RET(a_pVCpu, a_pszLogPrefix, a_rcStrictRet)  do { return (a_rcRet); } while (0)
    187 # endif
    188164
    189165
     
    16461622     * PreemptTimerShift = 5
    16471623     * VmcsPreemptTimer  = 2 (i.e. need to decrement by 1 every 2 * RT_BIT(5) = 20000 TSC ticks)
    1648      * VmentryTick       = 50000 (TSC at time of VM-entry)
     1624     * EntryTick         = 50000 (TSC at time of VM-entry)
    16491625     *
    16501626     * CurTick   Delta    PreemptTimerVal
     
    16701646    IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT);
    16711647    uint64_t const uCurTick        = TMCpuTickGetNoCheck(pVCpu);
    1672     uint64_t const uVmentryTick    = pVCpu->cpum.GstCtx.hwvirt.vmx.uVmentryTick;
    1673     uint64_t const uDelta          = uCurTick - uVmentryTick;
     1648    uint64_t const uEntryTick      = pVCpu->cpum.GstCtx.hwvirt.vmx.uEntryTick;
     1649    uint64_t const uDelta          = uCurTick - uEntryTick;
    16741650    uint32_t const uVmcsPreemptVal = pVmcs->u32PreemptTimer;
    16751651    uint32_t const uPreemptTimer   = uVmcsPreemptVal
     
    19131889
    19141890/**
    1915  * Saves the guest MSRs into the VM-exit auto-store MSRs area as part of VM-exit.
     1891 * Saves the guest MSRs into the VM-exit MSR-store area as part of VM-exit.
    19161892 *
    19171893 * @returns VBox status code.
     
    19481924        IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrStoreCount);
    19491925
    1950     PVMXAUTOMSR pMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea);
     1926    PVMXAUTOMSR pMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pExitMsrStoreArea);
    19511927    Assert(pMsr);
    19521928    for (uint32_t idxMsr = 0; idxMsr < cMsrs; idxMsr++, pMsr++)
     
    19801956    }
    19811957
    1982     RTGCPHYS const GCPhysAutoMsrArea = pVmcs->u64AddrExitMsrStore.u;
    1983     int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysAutoMsrArea,
    1984                                       pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea), cMsrs * sizeof(VMXAUTOMSR));
     1958    RTGCPHYS const GCPhysVmExitMsrStoreArea = pVmcs->u64AddrExitMsrStore.u;
     1959    int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVmExitMsrStoreArea,
     1960                                      pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pExitMsrStoreArea), cMsrs * sizeof(VMXAUTOMSR));
    19851961    if (RT_SUCCESS(rc))
    19861962    { /* likely */ }
    19871963    else
    19881964    {
    1989         AssertMsgFailed(("VM-exit: Failed to write MSR auto-store area at %#RGp, rc=%Rrc\n", GCPhysAutoMsrArea, rc));
     1965        AssertMsgFailed(("VM-exit: Failed to write MSR auto-store area at %#RGp, rc=%Rrc\n", GCPhysVmExitMsrStoreArea, rc));
    19901966        IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrStorePtrWritePhys);
    19911967    }
     
    22562232
    22572233/**
    2258  * Loads the host MSRs from the VM-exit auto-load MSRs area as part of VM-exit.
     2234 * Loads the host MSRs from the VM-exit MSR-load area as part of VM-exit.
    22592235 *
    22602236 * @returns VBox status code.
     
    22912267        IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrLoadCount);
    22922268
    2293     Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea));
    2294     RTGCPHYS const GCPhysAutoMsrArea = pVmcs->u64AddrExitMsrLoad.u;
    2295     int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea),
    2296                                      GCPhysAutoMsrArea, cMsrs * sizeof(VMXAUTOMSR));
     2269    RTGCPHYS const GCPhysVmExitMsrLoadArea = pVmcs->u64AddrExitMsrLoad.u;
     2270    int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pExitMsrLoadArea),
     2271                                     GCPhysVmExitMsrLoadArea, cMsrs * sizeof(VMXAUTOMSR));
    22972272    if (RT_SUCCESS(rc))
    22982273    {
    2299         PCVMXAUTOMSR pMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea);
     2274        PCVMXAUTOMSR pMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pExitMsrLoadArea);
    23002275        Assert(pMsr);
    23012276        for (uint32_t idxMsr = 0; idxMsr < cMsrs; idxMsr++, pMsr++)
     
    23312306    else
    23322307    {
    2333         AssertMsgFailed(("VM-exit: Failed to read MSR auto-load area at %#RGp, rc=%Rrc\n", GCPhysAutoMsrArea, rc));
     2308        AssertMsgFailed(("VM-exit: Failed to read MSR auto-load area at %#RGp, rc=%Rrc\n", GCPhysVmExitMsrLoadArea, rc));
    23342309        IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrLoadPtrReadPhys);
    23352310    }
     
    28962871    pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode = false;
    28972872
    2898     /* Revert any IEM-only nested-guest execution policy if it was set earlier, otherwise return rcStrict. */
    2899     IEM_VMX_R3_EXECPOLICY_IEM_ALL_DISABLE_RET(pVCpu, "VM-exit", rcStrict);
     2873#  if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
     2874    /* Revert any IEM-only nested-guest execution policy, otherwise return rcStrict. */
     2875    Log(("vmexit: Disabling IEM-only EM execution policy!\n"));
     2876    int rcSched = EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, false);
     2877    if (rcSched != VINF_SUCCESS)
     2878        iemSetPassUpStatus(pVCpu, rcSched);
     2879#  endif
     2880    return VINF_SUCCESS;
    29002881# endif
    29012882}
     
    43124293 * @param   offReg      The offset of the register being read.
    43134294 */
    4314 DECLINLINE(uint32_t) iemVmxVirtApicReadRaw32(PVMCPU pVCpu, uint16_t offReg)
    4315 {
    4316     Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uint32_t));
    4317     uint8_t  const *pbVirtApic = (const uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage);
    4318     Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage));
    4319     uint32_t const uReg = *(const uint32_t *)(pbVirtApic + offReg);
     4295IEM_STATIC uint32_t iemVmxVirtApicReadRaw32(PVMCPU pVCpu, uint16_t offReg)
     4296{
     4297    PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     4298    Assert(pVmcs);
     4299
     4300    uint32_t uReg;
     4301    Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uReg));
     4302    RTGCPHYS const GCPhysVirtApic = pVmcs->u64AddrVirtApic.u;
     4303    int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &uReg, GCPhysVirtApic + offReg, sizeof(uReg));
     4304    if (RT_FAILURE(rc))
     4305    {
     4306        AssertMsgFailed(("Failed to read %u bytes at offset %#x of the virtual-APIC page at %#RGp\n", sizeof(uReg), offReg,
     4307                         GCPhysVirtApic));
     4308        uReg = 0;
     4309    }
    43204310    return uReg;
    43214311}
     
    43294319 * @param   offReg      The offset of the register being read.
    43304320 */
    4331 DECLINLINE(uint64_t) iemVmxVirtApicReadRaw64(PVMCPU pVCpu, uint16_t offReg)
    4332 {
    4333     Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uint32_t));
    4334     uint8_t  const *pbVirtApic = (const uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage);
    4335     Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage));
    4336     uint64_t const uReg = *(const uint64_t *)(pbVirtApic + offReg);
     4321IEM_STATIC uint64_t iemVmxVirtApicReadRaw64(PVMCPU pVCpu, uint16_t offReg)
     4322{
     4323    PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     4324    Assert(pVmcs);
     4325
     4326    uint64_t uReg;
     4327    Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uReg));
     4328    RTGCPHYS const GCPhysVirtApic = pVmcs->u64AddrVirtApic.u;
     4329    int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &uReg, GCPhysVirtApic + offReg, sizeof(uReg));
     4330    if (RT_FAILURE(rc))
     4331    {
     4332        AssertMsgFailed(("Failed to read %u bytes at offset %#x of the virtual-APIC page at %#RGp\n", sizeof(uReg), offReg,
     4333                         GCPhysVirtApic));
     4334        uReg = 0;
     4335    }
    43374336    return uReg;
    43384337}
     
    43464345 * @param   uReg        The register value to write.
    43474346 */
    4348 DECLINLINE(void) iemVmxVirtApicWriteRaw32(PVMCPU pVCpu, uint16_t offReg, uint32_t uReg)
    4349 {
    4350     Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uint32_t));
    4351     uint8_t *pbVirtApic = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage);
    4352     Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage));
    4353     *(uint32_t *)(pbVirtApic + offReg) = uReg;
     4347IEM_STATIC void iemVmxVirtApicWriteRaw32(PVMCPU pVCpu, uint16_t offReg, uint32_t uReg)
     4348{
     4349    PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     4350    Assert(pVmcs);
     4351    Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uReg));
     4352    RTGCPHYS const GCPhysVirtApic = pVmcs->u64AddrVirtApic.u;
     4353    int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVirtApic + offReg, &uReg, sizeof(uReg));
     4354    if (RT_FAILURE(rc))
     4355    {
     4356        AssertMsgFailed(("Failed to write %u bytes at offset %#x of the virtual-APIC page at %#RGp\n", sizeof(uReg), offReg,
     4357                         GCPhysVirtApic));
     4358    }
    43544359}
    43554360
     
    43624367 * @param   uReg        The register value to write.
    43634368 */
    4364 DECLINLINE(void) iemVmxVirtApicWriteRaw64(PVMCPU pVCpu, uint16_t offReg, uint64_t uReg)
    4365 {
    4366     Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uint32_t));
    4367     uint8_t *pbVirtApic = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage);
    4368     Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage));
    4369     *(uint64_t *)(pbVirtApic + offReg) = uReg;
     4369IEM_STATIC void iemVmxVirtApicWriteRaw64(PVMCPU pVCpu, uint16_t offReg, uint64_t uReg)
     4370{
     4371    PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     4372    Assert(pVmcs);
     4373    Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uReg));
     4374    RTGCPHYS const GCPhysVirtApic = pVmcs->u64AddrVirtApic.u;
     4375    int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVirtApic + offReg, &uReg, sizeof(uReg));
     4376    if (RT_FAILURE(rc))
     4377    {
     4378        AssertMsgFailed(("Failed to write %u bytes at offset %#x of the virtual-APIC page at %#RGp\n", sizeof(uReg), offReg,
     4379                         GCPhysVirtApic));
     4380    }
    43704381}
    43714382
     
    43804391 * @remarks This is based on our APIC device code.
    43814392 */
    4382 DECLINLINE(void) iemVmxVirtApicSetVector(PVMCPU pVCpu, uint16_t offReg, uint8_t uVector)
    4383 {
    4384     Assert(offReg == XAPIC_OFF_ISR0 || offReg == XAPIC_OFF_TMR0 || offReg == XAPIC_OFF_IRR0);
    4385     uint8_t       *pbBitmap     = ((uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage)) + offReg;
    4386     uint16_t const offVector    = (uVector & UINT32_C(0xe0)) >> 1;
    4387     uint16_t const idxVectorBit = uVector & UINT32_C(0x1f);
    4388     ASMAtomicBitSet(pbBitmap + offVector, idxVectorBit);
     4393IEM_STATIC void iemVmxVirtApicSetVectorInReg(PVMCPU pVCpu, uint16_t offReg, uint8_t uVector)
     4394{
     4395    PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     4396    Assert(pVmcs);
     4397    uint32_t uReg;
     4398    uint16_t const offVector      = (uVector & UINT32_C(0xe0)) >> 1;
     4399    RTGCPHYS const GCPhysVirtApic = pVmcs->u64AddrVirtApic.u;
     4400    int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &uReg, GCPhysVirtApic + offReg + offVector, sizeof(uReg));
     4401    if (RT_SUCCESS(rc))
     4402    {
     4403        uint16_t const idxVectorBit = uVector & UINT32_C(0x1f);
     4404        uReg |= RT_BIT(idxVectorBit);
     4405        rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVirtApic + offReg + offVector, &uReg, sizeof(uReg));
     4406        if (RT_FAILURE(rc))
     4407        {
     4408            AssertMsgFailed(("Failed to set vector %#x in 256-bit register at %#x of the virtual-APIC page at %#RGp\n",
     4409                             uVector, offReg, GCPhysVirtApic));
     4410        }
     4411    }
     4412    else
     4413    {
     4414        AssertMsgFailed(("Failed to get vector %#x in 256-bit register at %#x of the virtual-APIC page at %#RGp\n",
     4415                         uVector, offReg, GCPhysVirtApic));
     4416    }
    43894417}
    43904418
     
    43994427 * @remarks This is based on our APIC device code.
    44004428 */
    4401 DECLINLINE(void) iemVmxVirtApicClearVector(PVMCPU pVCpu, uint16_t offReg, uint8_t uVector)
    4402 {
    4403     Assert(offReg == XAPIC_OFF_ISR0 || offReg == XAPIC_OFF_TMR0 || offReg == XAPIC_OFF_IRR0);
    4404     uint8_t       *pbBitmap     = ((uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage)) + offReg;
    4405     uint16_t const offVector    = (uVector & UINT32_C(0xe0)) >> 1;
    4406     uint16_t const idxVectorBit = uVector & UINT32_C(0x1f);
    4407     ASMAtomicBitClear(pbBitmap + offVector, idxVectorBit);
     4429IEM_STATIC void iemVmxVirtApicClearVectorInReg(PVMCPU pVCpu, uint16_t offReg, uint8_t uVector)
     4430{
     4431    PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     4432    Assert(pVmcs);
     4433    uint32_t uReg;
     4434    uint16_t const offVector      = (uVector & UINT32_C(0xe0)) >> 1;
     4435    RTGCPHYS const GCPhysVirtApic = pVmcs->u64AddrVirtApic.u;
     4436    int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &uReg, GCPhysVirtApic + offReg + offVector, sizeof(uReg));
     4437    if (RT_SUCCESS(rc))
     4438    {
     4439        uint16_t const idxVectorBit = uVector & UINT32_C(0x1f);
     4440        uReg &= ~RT_BIT(idxVectorBit);
     4441        rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVirtApic + offReg + offVector, &uReg, sizeof(uReg));
     4442        if (RT_FAILURE(rc))
     4443        {
     4444            AssertMsgFailed(("Failed to clear vector %#x in 256-bit register at %#x of the virtual-APIC page at %#RGp\n",
     4445                             uVector, offReg, GCPhysVirtApic));
     4446        }
     4447    }
     4448    else
     4449    {
     4450        AssertMsgFailed(("Failed to get vector %#x in 256-bit register at %#x of the virtual-APIC page at %#RGp\n",
     4451                         uVector, offReg, GCPhysVirtApic));
     4452    }
    44084453}
    44094454
     
    48334878    Assert(offReg < XAPIC_OFF_END + 4);
    48344879    Assert(pidxHighestBit);
     4880    Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs));
    48354881
    48364882    /*
     
    50075053
    50085054    uint8_t uVector = uSvi;
    5009     iemVmxVirtApicClearVector(pVCpu, XAPIC_OFF_ISR0, uVector);
     5055    iemVmxVirtApicClearVectorInReg(pVCpu, XAPIC_OFF_ISR0, uVector);
    50105056
    50115057    uVector = 0;
     
    50485094    uint8_t const uVector = iemVmxVirtApicReadRaw32(pVCpu, XAPIC_OFF_ICR_LO);
    50495095    Log2(("self_ipi_virt: uVector=%#x\n", uVector));
    5050     iemVmxVirtApicSetVector(pVCpu, XAPIC_OFF_IRR0, uVector);
     5096    iemVmxVirtApicSetVectorInReg(pVCpu, XAPIC_OFF_IRR0, uVector);
    50515097    uint8_t const uRvi = RT_LO_U8(pVmcs->u16GuestIntStatus);
    50525098    uint8_t const uSvi = RT_HI_U8(pVmcs->u16GuestIntStatus);
     
    66526698            IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVirtApicPage);
    66536699
    6654         /* Read the Virtual-APIC page. */
    6655         Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage));
    6656         int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage),
    6657                                          GCPhysVirtApic, VMX_V_VIRT_APIC_PAGES);
    6658         if (RT_SUCCESS(rc))
    6659         { /* likely */ }
    6660         else
    6661             IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtApicPagePtrReadPhys);
    6662 
    66636700        /* TPR threshold without virtual-interrupt delivery. */
    66646701        if (   !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
     
    66676704
    66686705        /* TPR threshold and VTPR. */
    6669         uint8_t const *pbVirtApic = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage);
    6670         uint8_t const  u8VTpr     = *(pbVirtApic + XAPIC_OFF_TPR);
    66716706        if (   !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
    6672             && !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
    6673             && RT_BF_GET(pVmcs->u32TprThreshold, VMX_BF_TPR_THRESHOLD_TPR) > ((u8VTpr >> 4) & UINT32_C(0xf)) /* Bits 4:7 */)
    6674             IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_TprThresholdVTpr);
     6707            && !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY))
     6708        {
     6709            /* Read the VTPR from the virtual-APIC page. */
     6710            uint8_t u8VTpr;
     6711            int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &u8VTpr, GCPhysVirtApic + XAPIC_OFF_TPR, sizeof(u8VTpr));
     6712            if (RT_SUCCESS(rc))
     6713            { /* likely */ }
     6714            else
     6715                IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtApicPagePtrReadPhys);
     6716
     6717            /* Bits 3:0 of the TPR-threshold must not be greater than bits 7:4 of VTPR. */
     6718            if ((uint8_t)RT_BF_GET(pVmcs->u32TprThreshold, VMX_BF_TPR_THRESHOLD_TPR) <= (u8VTpr & 0xf0))
     6719            { /* likely */ }
     6720            else
     6721                IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_TprThresholdVTpr);
     6722        }
    66756723    }
    66766724    else
     
    70097057
    70107058/**
    7011  * Loads the guest MSRs from the VM-entry auto-load MSRs as part of VM-entry.
     7059 * Loads the guest MSRs from the VM-entry MSR-load area as part of VM-entry.
    70127060 *
    70137061 * @returns VBox status code.
     
    70477095    }
    70487096
    7049     RTGCPHYS const GCPhysAutoMsrArea = pVmcs->u64AddrEntryMsrLoad.u;
    7050     int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea),
    7051                                      GCPhysAutoMsrArea, cMsrs * sizeof(VMXAUTOMSR));
     7097    RTGCPHYS const GCPhysVmEntryMsrLoadArea = pVmcs->u64AddrEntryMsrLoad.u;
     7098    int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pEntryMsrLoadArea),
     7099                                     GCPhysVmEntryMsrLoadArea, cMsrs * sizeof(VMXAUTOMSR));
    70527100    if (RT_SUCCESS(rc))
    70537101    {
    7054         PCVMXAUTOMSR pMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea);
     7102        PCVMXAUTOMSR pMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pEntryMsrLoadArea);
    70557103        Assert(pMsr);
    70567104        for (uint32_t idxMsr = 0; idxMsr < cMsrs; idxMsr++, pMsr++)
     
    70897137    else
    70907138    {
    7091         AssertMsgFailed(("%s: Failed to read MSR auto-load area at %#RGp, rc=%Rrc\n", pszInstr, GCPhysAutoMsrArea, rc));
     7139        AssertMsgFailed(("%s: Failed to read MSR auto-load area at %#RGp, rc=%Rrc\n", pszInstr, GCPhysVmEntryMsrLoadArea, rc));
    70927140        IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_MsrLoadPtrReadPhys);
    70937141    }
     
    72987346    if (pVmcs->u32PinCtls & VMX_PIN_CTLS_PREEMPT_TIMER)
    72997347    {
    7300         uint64_t const uVmentryTick = TMCpuTickGetNoCheck(pVCpu);
    7301         pVCpu->cpum.GstCtx.hwvirt.vmx.uVmentryTick = uVmentryTick;
     7348        uint64_t const uEntryTick = TMCpuTickGetNoCheck(pVCpu);
     7349        pVCpu->cpum.GstCtx.hwvirt.vmx.uEntryTick = uEntryTick;
    73027350        VMCPU_FF_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER);
    73037351
    7304         Log(("%s: VM-entry set up VMX-preemption timer at %#RX64\n", pszInstr, uVmentryTick));
     7352        Log(("%s: VM-entry set up VMX-preemption timer at %#RX64\n", pszInstr, uEntryTick));
    73057353    }
    73067354    else
     
    73407388           break;
    73417389
     7390        case VMX_ENTRY_INT_INFO_TYPE_NMI:
     7391        case VMX_ENTRY_INT_INFO_TYPE_HW_XCPT:
     7392            enmTrapType = TRPM_TRAP;
     7393            break;
     7394
    73427395        case VMX_ENTRY_INT_INFO_TYPE_SW_INT:
    73437396            enmTrapType = TRPM_SOFTWARE_INT;
    73447397            break;
    73457398
    7346         case VMX_ENTRY_INT_INFO_TYPE_NMI:
    7347         case VMX_ENTRY_INT_INFO_TYPE_PRIV_SW_XCPT:  /* ICEBP. */
    73487399        case VMX_ENTRY_INT_INFO_TYPE_SW_XCPT:       /* #BP and #OF */
    7349         case VMX_ENTRY_INT_INFO_TYPE_HW_XCPT:
    7350             enmTrapType = TRPM_TRAP;
     7400            Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
     7401            enmTrapType = TRPM_SOFTWARE_INT;
     7402            break;
     7403
     7404        case VMX_ENTRY_INT_INFO_TYPE_PRIV_SW_XCPT:  /* #DB (INT1/ICEBP). */
     7405            Assert(uVector == X86_XCPT_DB);
     7406            enmTrapType = TRPM_SOFTWARE_INT;
    73517407            break;
    73527408
     
    73637419        TRPMSetErrorCode(pVCpu, uErrCode);
    73647420
    7365     if (   uType   == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
    7366         && uVector == X86_XCPT_PF)
     7421    if (   enmTrapType == TRPM_TRAP
     7422        && uVector     == X86_XCPT_PF)
    73677423        TRPMSetFaultAddress(pVCpu, GCPtrFaultAddress);
    7368     else if (   uType == VMX_ENTRY_INT_INFO_TYPE_SW_INT
    7369              || uType == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT
    7370              || uType == VMX_ENTRY_INT_INFO_TYPE_PRIV_SW_XCPT)
    7371     {
    7372         AssertMsg(   uType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
    7373                   || (uVector == X86_XCPT_BP || uVector == X86_XCPT_OF),
    7374                   ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uType));
     7424    else if (enmTrapType == TRPM_SOFTWARE_INT)
    73757425        TRPMSetInstrLength(pVCpu, cbInstr);
    7376     }
    73777426
    73787427    return VINF_SUCCESS;
     
    77097758                                {
    77107759                                    /* Reschedule to IEM-only execution of the nested-guest or return VINF_SUCCESS. */
    7711                                     IEM_VMX_R3_EXECPOLICY_IEM_ALL_ENABLE_RET(pVCpu, pszInstr, VINF_SUCCESS);
     7760# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
     7761                                    Log(("%s: Enabling IEM-only EM execution policy!\n", pszInstr));
     7762                                    int rcSched = EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, true);
     7763                                    if (rcSched != VINF_SUCCESS)
     7764                                        iemSetPassUpStatus(pVCpu, rcSched);
     7765# endif
     7766                                    return VINF_SUCCESS;
    77127767                                }
    77137768
     
    77577812    {
    77587813        Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap));
     7814        uint32_t fMsrpm = HMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap), idMsr);
    77597815        if (uExitReason == VMX_EXIT_RDMSR)
    7760         {
    7761             VMXMSREXITREAD enmRead;
    7762             int rc = HMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap), idMsr, &enmRead,
    7763                                              NULL /* penmWrite */);
    7764             AssertRC(rc);
    7765             if (enmRead == VMXMSREXIT_INTERCEPT_READ)
    7766                 return true;
    7767         }
    7768         else
    7769         {
    7770             VMXMSREXITWRITE enmWrite;
    7771             int rc = HMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap), idMsr, NULL /* penmRead */,
    7772                                              &enmWrite);
    7773             AssertRC(rc);
    7774             if (enmWrite == VMXMSREXIT_INTERCEPT_WRITE)
    7775                 return true;
    7776         }
    7777         return false;
     7816            return RT_BOOL(fMsrpm & VMXMSRPM_EXIT_RD);
     7817        return RT_BOOL(fMsrpm & VMXMSRPM_EXIT_WR);
    77787818    }
    77797819
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette