VirtualBox

Changeset 73606 in vbox for trunk/src/VBox/VMM


Ignore:
Timestamp:
Aug 10, 2018 7:38:56 AM (6 years ago)
Author:
vboxsync
Message:

VMM: Nested VMX: bugref:9180 Various bits:

  • IEM: Started VMXON, VMXOFF implementation, use IEM_OPCODE_GET_NEXT_RM.
  • IEM: Fixed INVPCID C impl, removed unused IEMExecDecodedInvpcid.
  • IEM: Updated iemCImpl_load_CrX to check for CR0/CR4 fixed bits in VMX.
  • IEM: Update offModRm to reset/re-initialize where needed.
  • CPUM: Added VMX root, non-root mode and other bits and updated a few places where they're used.
  • HM: Started adding fine-grained VMX instruction failure diagnostics.
  • HM: Made VM instruction error an enum.
  • HM: Added HMVMXAll.cpp for all context VMX code.
  • Ensure building with VBOX_WITH_NESTED_HWVIRT_[SVM|VMX] does the right thing based on host CPU.
  • CPUM: Added dumping of nested-VMX CPUMCTX state.
  • HMVMXR0: Added memory operand decoding.
  • HMVMXR0: VMX instr. privilege checks (CR0/CR4 read shadows are not consulted, so we need to do them)
  • HM: Added some more bit-field representaions.
  • Recompiler: Refuse to run when in nested-VMX guest code.
Location:
trunk/src/VBox/VMM
Files:
1 added
20 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/Makefile.kmk

    r73348 r73606  
    175175        VMMAll/HMAll.cpp \
    176176        VMMAll/HMSVMAll.cpp \
     177        VMMAll/HMVMXAll.cpp \
    177178        VMMAll/IEMAll.cpp \
    178179        VMMAll/IEMAllAImpl.asm \
     
    508509 VMMRC_DEFS      = IN_VMM_RC IN_RT_RC IN_DIS DIS_CORE_ONLY VBOX_WITH_RAW_MODE VBOX_WITH_RAW_MODE_NOT_R0 IN_SUP_RC \
    509510        $(VMM_COMMON_DEFS)
    510  VMMRC_DEFS := $(filter-out VBOX_WITH_NESTED_HWVIRT_SVM,$(VMMRC_DEFS))
     511 VMMRC_DEFS := $(filter-out VBOX_WITH_NESTED_HWVIRT_SVM VBOX_WITH_NESTED_HWVIRT_VMX,$(VMMRC_DEFS))
    511512 ifdef VBOX_WITH_VMM_R0_SWITCH_STACK
    512513  VMMRC_DEFS    += VMM_R0_SWITCH_STACK
     
    570571        VMMAll/GIMAllHv.cpp \
    571572        VMMAll/GIMAllKvm.cpp \
    572         VMMAll/HMAll.cpp \
    573         VMMAll/HMSVMAll.cpp \
     573        VMMAll/HMAll.cpp \
     574        VMMAll/HMSVMAll.cpp \
     575        VMMAll/HMVMXAll.cpp \
    574576        VMMAll/MMAll.cpp \
    575577        VMMAll/MMAllHyper.cpp \
     
    718720        VMMAll/HMAll.cpp \
    719721        VMMAll/HMSVMAll.cpp \
     722        VMMAll/HMVMXAll.cpp \
    720723        VMMAll/IEMAll.cpp \
    721724        VMMAll/IEMAllAImpl.asm \
  • trunk/src/VBox/VMM/VMMAll/CPUMAllMsrs.cpp

    r73431 r73606  
    300300
    301301
     302/**
     303 * Get MSR_IA32_SMM_MONITOR_CTL value for IEM and cpumMsrRd_Ia32SmmMonitorCtl.
     304 *
     305 * @returns The MSR_IA32_SMM_MONITOR_CTL value.
     306 * @param   pVCpu           The cross context per CPU structure.
     307 */
     308VMM_INT_DECL(uint64_t) CPUMGetGuestIa32SmmMonitorCtl(PVMCPU pVCpu)
     309{
     310    /* We do not support dual-monitor treatment for SMI and SMM. */
     311    /** @todo SMM. */
     312    RT_NOREF(pVCpu);
     313    return 0;
     314}
     315
     316
    302317/** @callback_method_impl{FNCPUMRDMSR} */
    303318static DECLCALLBACK(VBOXSTRICTRC) cpumMsrRd_Ia32SmmMonitorCtl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
    304319{
    305320    RT_NOREF_PV(pVCpu); RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange);
    306     /** @todo SMM. */
    307     *puValue = 0;
     321    *puValue = CPUMGetGuestIa32SmmMonitorCtl(pVCpu);
    308322    return VINF_SUCCESS;
    309323}
     
    12881302
    12891303
    1290 /** @callback_method_impl{FNCPUMRDMSR} */
    1291 static DECLCALLBACK(VBOXSTRICTRC) cpumMsrRd_Ia32VmxBasic(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
    1292 {
    1293     RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange);
     1304/**
     1305 * Gets IA32_VMX_BASIC for IEM and cpumMsrRd_Ia32VmxBasic.
     1306 *
     1307 * @returns IA32_VMX_BASIC value.
     1308 * @param   pVCpu           The cross context per CPU structure.
     1309 */
     1310VMM_INT_DECL(uint64_t) CPUMGetGuestIa32VmxBasic(PVMCPU pVCpu)
     1311{
    12941312    PCCPUMFEATURES pGuestFeatures = &pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures;
     1313    uint64_t uVmxMsr;
    12951314    if (pGuestFeatures->fVmx)
    12961315    {
    1297         *puValue = RT_BF_MAKE(VMX_BF_BASIC_VMCS_ID,         VMX_V_VMCS_REVISION_ID        )
    1298                  | RT_BF_MAKE(VMX_BF_BASIC_VMCS_SIZE,       VMX_V_VMCS_SIZE               )
    1299                  | RT_BF_MAKE(VMX_BF_BASIC_PHYSADDR_WIDTH,  VMX_V_VMCS_PHYSADDR_4G_LIMIT  )
    1300                  | RT_BF_MAKE(VMX_BF_BASIC_DUAL_MON,        0                             )
    1301                  | RT_BF_MAKE(VMX_BF_BASIC_VMCS_MEM_TYPE,   VMX_BASIC_MEM_TYPE_WB         )
    1302                  | RT_BF_MAKE(VMX_BF_BASIC_VMCS_INS_OUTS,   pGuestFeatures->fVmxInsOutInfo)
    1303                  | RT_BF_MAKE(VMX_BF_BASIC_TRUE_CTLS,       0                             );
     1316        uVmxMsr = RT_BF_MAKE(VMX_BF_BASIC_VMCS_ID,         VMX_V_VMCS_REVISION_ID        )
     1317                | RT_BF_MAKE(VMX_BF_BASIC_VMCS_SIZE,       VMX_V_VMCS_SIZE               )
     1318                | RT_BF_MAKE(VMX_BF_BASIC_PHYSADDR_WIDTH,  VMX_V_VMCS_PHYSADDR_4G_LIMIT  )
     1319                | RT_BF_MAKE(VMX_BF_BASIC_DUAL_MON,        0                             )
     1320                | RT_BF_MAKE(VMX_BF_BASIC_VMCS_MEM_TYPE,   VMX_BASIC_MEM_TYPE_WB         )
     1321                | RT_BF_MAKE(VMX_BF_BASIC_VMCS_INS_OUTS,   pGuestFeatures->fVmxInsOutInfo)
     1322                | RT_BF_MAKE(VMX_BF_BASIC_TRUE_CTLS,       0                             );
    13041323    }
    13051324    else
    1306         *puValue = 0;
     1325        uVmxMsr = 0;
     1326    return uVmxMsr;
     1327}
     1328
     1329
     1330/** @callback_method_impl{FNCPUMRDMSR} */
     1331static DECLCALLBACK(VBOXSTRICTRC) cpumMsrRd_Ia32VmxBasic(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
     1332{
     1333    RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange);
     1334    *puValue = CPUMGetGuestIa32VmxBasic(pVCpu);
    13071335    return VINF_SUCCESS;
    13081336}
     
    51015129static DECLCALLBACK(VBOXSTRICTRC) cpumMsrRd_Gim(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
    51025130{
    5103 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
     5131#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
    51045132    /* Raise #GP(0) like a physical CPU would since the nested-hypervisor hasn't intercept these MSRs. */
    5105     PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
    5106     if (CPUMIsGuestInNestedHwVirtMode(pCtx))
     5133    if (   CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.s.Guest)
     5134        || CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.s.Guest))
    51075135        return VERR_CPUM_RAISE_GP_0;
    51085136#endif
     
    51145142static DECLCALLBACK(VBOXSTRICTRC) cpumMsrWr_Gim(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue)
    51155143{
    5116 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
     5144#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
    51175145    /* Raise #GP(0) like a physical CPU would since the nested-hypervisor hasn't intercept these MSRs. */
    5118     PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
    5119     if (CPUMIsGuestInNestedHwVirtMode(pCtx))
     5146    if (   CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.s.Guest)
     5147        || CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.s.Guest))
    51205148        return VERR_CPUM_RAISE_GP_0;
    51215149#endif
  • trunk/src/VBox/VMM/VMMAll/HMAll.cpp

    r73395 r73606  
    439439
    440440/**
    441  * Checks if the current AMD CPU is subject to erratum 170 "In SVM mode,
    442  * incorrect code bytes may be fetched after a world-switch".
    443  *
    444  * @param   pu32Family      Where to store the CPU family (can be NULL).
    445  * @param   pu32Model       Where to store the CPU model (can be NULL).
    446  * @param   pu32Stepping    Where to store the CPU stepping (can be NULL).
    447  * @returns true if the erratum applies, false otherwise.
    448  */
    449 VMM_INT_DECL(int) HMAmdIsSubjectToErratum170(uint32_t *pu32Family, uint32_t *pu32Model, uint32_t *pu32Stepping)
    450 {
    451     /*
    452      * Erratum 170 which requires a forced TLB flush for each world switch:
    453      * See AMD spec. "Revision Guide for AMD NPT Family 0Fh Processors".
    454      *
    455      * All BH-G1/2 and DH-G1/2 models include a fix:
    456      * Athlon X2:   0x6b 1/2
    457      *              0x68 1/2
    458      * Athlon 64:   0x7f 1
    459      *              0x6f 2
    460      * Sempron:     0x7f 1/2
    461      *              0x6f 2
    462      *              0x6c 2
    463      *              0x7c 2
    464      * Turion 64:   0x68 2
    465      */
    466     uint32_t u32Dummy;
    467     uint32_t u32Version, u32Family, u32Model, u32Stepping, u32BaseFamily;
    468     ASMCpuId(1, &u32Version, &u32Dummy, &u32Dummy, &u32Dummy);
    469     u32BaseFamily = (u32Version >> 8) & 0xf;
    470     u32Family     = u32BaseFamily + (u32BaseFamily == 0xf ? ((u32Version >> 20) & 0x7f) : 0);
    471     u32Model      = ((u32Version >> 4) & 0xf);
    472     u32Model      = u32Model | ((u32BaseFamily == 0xf ? (u32Version >> 16) & 0x0f : 0) << 4);
    473     u32Stepping   = u32Version & 0xf;
    474 
    475     bool fErratumApplies = false;
    476     if (   u32Family == 0xf
    477         && !((u32Model == 0x68 || u32Model == 0x6b || u32Model == 0x7f) && u32Stepping >= 1)
    478         && !((u32Model == 0x6f || u32Model == 0x6c || u32Model == 0x7c) && u32Stepping >= 2))
    479     {
    480         fErratumApplies = true;
    481     }
    482 
    483     if (pu32Family)
    484         *pu32Family   = u32Family;
    485     if (pu32Model)
    486         *pu32Model    = u32Model;
    487     if (pu32Stepping)
    488         *pu32Stepping = u32Stepping;
    489 
    490     return fErratumApplies;
    491 }
    492 
    493 
    494 /**
    495441 * Sets or clears the single instruction flag.
    496442 *
     
    541487    else
    542488        ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_SVM_GUEST_XCPT_INTERCEPTS);
    543 }
    544 
    545 
    546 /**
    547  * VMX nested-guest VM-exit handler.
    548  *
    549  * @param   pVCpu              The cross context virtual CPU structure.
    550  * @param   uBasicExitReason   The basic exit reason.
    551  */
    552 VMM_INT_DECL(void) HMNstGstVmxVmExit(PVMCPU pVCpu, uint16_t uBasicExitReason)
    553 {
    554     RT_NOREF2(pVCpu, uBasicExitReason);
    555 }
    556 
    557 
    558 /**
    559  * Gets a copy of the VMX host MSRs that were read by HM during ring-0
    560  * initialization.
    561  *
    562  * @return VBox status code.
    563  * @param   pVM        The cross context VM structure.
    564  * @param   pVmxMsrs   Where to store the VMXMSRS struct (only valid when
    565  *                     VINF_SUCCESS is returned).
    566  *
    567  * @remarks Caller needs to take care not to call this function too early. Call
    568  *          after HM initialization is fully complete.
    569  */
    570 VMM_INT_DECL(int) HMVmxGetHostMsrs(PVM pVM, PVMXMSRS pVmxMsrs)
    571 {
    572     AssertPtrReturn(pVM,      VERR_INVALID_PARAMETER);
    573     AssertPtrReturn(pVmxMsrs, VERR_INVALID_PARAMETER);
    574     if (pVM->hm.s.vmx.fSupported)
    575     {
    576         *pVmxMsrs = pVM->hm.s.vmx.Msrs;
    577         return VINF_SUCCESS;
    578     }
    579     return VERR_VMX_NOT_SUPPORTED;
    580 }
    581 
    582 
    583 /**
    584  * Gets the specified VMX host MSR that was read by HM during ring-0
    585  * initialization.
    586  *
    587  * @return VBox status code.
    588  * @param   pVM        The cross context VM structure.
    589  * @param   idMsr      The MSR.
    590  * @param   puValue    Where to store the MSR value (only updated when VINF_SUCCESS
    591  *                     is returned).
    592  *
    593  * @remarks Caller needs to take care not to call this function too early. Call
    594  *          after HM initialization is fully complete.
    595  */
    596 VMM_INT_DECL(int) HMVmxGetHostMsr(PVM pVM, uint32_t idMsr, uint64_t *puValue)
    597 {
    598     AssertPtrReturn(pVM,     VERR_INVALID_PARAMETER);
    599     AssertPtrReturn(puValue, VERR_INVALID_PARAMETER);
    600 
    601     if (!pVM->hm.s.vmx.fSupported)
    602         return VERR_VMX_NOT_SUPPORTED;
    603 
    604     PCVMXMSRS pVmxMsrs = &pVM->hm.s.vmx.Msrs;
    605     switch (idMsr)
    606     {
    607         case MSR_IA32_FEATURE_CONTROL:         *puValue =  pVmxMsrs->u64FeatCtrl;      break;
    608         case MSR_IA32_VMX_BASIC:               *puValue =  pVmxMsrs->u64Basic;         break;
    609         case MSR_IA32_VMX_PINBASED_CTLS:       *puValue =  pVmxMsrs->PinCtls.u;        break;
    610         case MSR_IA32_VMX_PROCBASED_CTLS:      *puValue =  pVmxMsrs->ProcCtls.u;       break;
    611         case MSR_IA32_VMX_PROCBASED_CTLS2:     *puValue =  pVmxMsrs->ProcCtls2.u;      break;
    612         case MSR_IA32_VMX_EXIT_CTLS:           *puValue =  pVmxMsrs->ExitCtls.u;       break;
    613         case MSR_IA32_VMX_ENTRY_CTLS:          *puValue =  pVmxMsrs->EntryCtls.u;      break;
    614         case MSR_IA32_VMX_TRUE_PINBASED_CTLS:  *puValue =  pVmxMsrs->TruePinCtls.u;    break;
    615         case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: *puValue =  pVmxMsrs->TrueProcCtls.u;   break;
    616         case MSR_IA32_VMX_TRUE_ENTRY_CTLS:     *puValue =  pVmxMsrs->TrueEntryCtls.u;  break;
    617         case MSR_IA32_VMX_TRUE_EXIT_CTLS:      *puValue =  pVmxMsrs->TrueExitCtls.u;   break;
    618         case MSR_IA32_VMX_MISC:                *puValue =  pVmxMsrs->u64Misc;          break;
    619         case MSR_IA32_VMX_CR0_FIXED0:          *puValue =  pVmxMsrs->u64Cr0Fixed0;     break;
    620         case MSR_IA32_VMX_CR0_FIXED1:          *puValue =  pVmxMsrs->u64Cr0Fixed1;     break;
    621         case MSR_IA32_VMX_CR4_FIXED0:          *puValue =  pVmxMsrs->u64Cr4Fixed0;     break;
    622         case MSR_IA32_VMX_CR4_FIXED1:          *puValue =  pVmxMsrs->u64Cr4Fixed1;     break;
    623         case MSR_IA32_VMX_VMCS_ENUM:           *puValue =  pVmxMsrs->u64VmcsEnum;      break;
    624         case MSR_IA32_VMX_VMFUNC:              *puValue =  pVmxMsrs->u64VmFunc;        break;
    625         case MSR_IA32_VMX_EPT_VPID_CAP:        *puValue =  pVmxMsrs->u64EptVpidCaps;   break;
    626         default:
    627         {
    628             AssertMsgFailed(("Invalid MSR %#x\n", idMsr));
    629             return VERR_NOT_FOUND;
    630         }
    631     }
    632     return VINF_SUCCESS;
    633489}
    634490
  • trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp

    r73287 r73606  
    2525#include <VBox/vmm/apic.h>
    2626#include <VBox/vmm/gim.h>
    27 #include <VBox/vmm/hm.h>
    2827#include <VBox/vmm/iem.h>
    2928#include <VBox/vmm/vm.h>
    30 #include <VBox/vmm/hm_svm.h>
    3129
    3230
     
    243241}
    244242
     243
     244/**
     245 * Checks if the current AMD CPU is subject to erratum 170 "In SVM mode,
     246 * incorrect code bytes may be fetched after a world-switch".
     247 *
     248 * @param   pu32Family      Where to store the CPU family (can be NULL).
     249 * @param   pu32Model       Where to store the CPU model (can be NULL).
     250 * @param   pu32Stepping    Where to store the CPU stepping (can be NULL).
     251 * @returns true if the erratum applies, false otherwise.
     252 */
     253VMM_INT_DECL(int) HMSvmIsSubjectToErratum170(uint32_t *pu32Family, uint32_t *pu32Model, uint32_t *pu32Stepping)
     254{
     255    /*
     256     * Erratum 170 which requires a forced TLB flush for each world switch:
     257     * See AMD spec. "Revision Guide for AMD NPT Family 0Fh Processors".
     258     *
     259     * All BH-G1/2 and DH-G1/2 models include a fix:
     260     * Athlon X2:   0x6b 1/2
     261     *              0x68 1/2
     262     * Athlon 64:   0x7f 1
     263     *              0x6f 2
     264     * Sempron:     0x7f 1/2
     265     *              0x6f 2
     266     *              0x6c 2
     267     *              0x7c 2
     268     * Turion 64:   0x68 2
     269     */
     270    uint32_t u32Dummy;
     271    uint32_t u32Version, u32Family, u32Model, u32Stepping, u32BaseFamily;
     272    ASMCpuId(1, &u32Version, &u32Dummy, &u32Dummy, &u32Dummy);
     273    u32BaseFamily = (u32Version >> 8) & 0xf;
     274    u32Family     = u32BaseFamily + (u32BaseFamily == 0xf ? ((u32Version >> 20) & 0x7f) : 0);
     275    u32Model      = ((u32Version >> 4) & 0xf);
     276    u32Model      = u32Model | ((u32BaseFamily == 0xf ? (u32Version >> 16) & 0x0f : 0) << 4);
     277    u32Stepping   = u32Version & 0xf;
     278
     279    bool fErratumApplies = false;
     280    if (   u32Family == 0xf
     281        && !((u32Model == 0x68 || u32Model == 0x6b || u32Model == 0x7f) && u32Stepping >= 1)
     282        && !((u32Model == 0x6f || u32Model == 0x6c || u32Model == 0x7c) && u32Stepping >= 2))
     283    {
     284        fErratumApplies = true;
     285    }
     286
     287    if (pu32Family)
     288        *pu32Family   = u32Family;
     289    if (pu32Model)
     290        *pu32Model    = u32Model;
     291    if (pu32Stepping)
     292        *pu32Stepping = u32Stepping;
     293
     294    return fErratumApplies;
     295}
     296
    245297#endif /* !IN_RC */
     298
    246299
    247300/**
  • trunk/src/VBox/VMM/VMMAll/IEMAll.cpp

    r73555 r73606  
    388388 * Check the common VMX instruction preconditions.
    389389 */
    390 #define IEM_VMX_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) \
     390#define IEM_VMX_INSTR_COMMON_CHECKS(a_pVCpu, a_szInstr, a_InsDiagPrefix) \
    391391    do { \
    392     { \
    393392        if (!IEM_IS_VMX_ENABLED(a_pVCpu)) \
    394393        { \
    395             Log((RT_STR(a_Instr) ": CR4.VMXE not enabled -> #UD\n")); \
     394            Log((a_szInstr ": CR4.VMXE not enabled -> #UD\n")); \
     395            (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = a_InsDiagPrefix##_Vmxe; \
    396396            return iemRaiseUndefinedOpcode(a_pVCpu); \
    397397        } \
    398398        if (IEM_IS_REAL_OR_V86_MODE(a_pVCpu)) \
    399399        { \
    400             Log((RT_STR(a_Instr) ": Real or v8086 mode -> #UD\n")); \
     400            Log((a_szInstr ": Real or v8086 mode -> #UD\n")); \
     401            (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = a_InsDiagPrefix##_RealOrV86Mode; \
    401402            return iemRaiseUndefinedOpcode(a_pVCpu); \
    402403        } \
    403404        if (IEM_IS_LONG_MODE(a_pVCpu) && !IEM_IS_64BIT_CODE(a_pVCpu)) \
    404405        { \
    405             Log((RT_STR(a_Instr) ": Long mode without 64-bit code segment -> #UD\n")); \
     406            Log((a_szInstr ": Long mode without 64-bit code segment -> #UD\n")); \
     407            (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = a_InsDiagPrefix##_LongModeCS; \
    406408            return iemRaiseUndefinedOpcode(a_pVCpu); \
    407409        } \
    408 } while (0)
     410    } while (0)
    409411
    410412/**
     
    413415# define IEM_IS_VMX_ENABLED(a_pVCpu)                         (CPUMIsGuestVmxEnabled(IEM_GET_CTX(a_pVCpu)))
    414416
     417/**
     418 * Check if the guest has entered VMX root operation.
     419 */
     420#define IEM_IS_VMX_ROOT_MODE(a_pVCpu)                        (CPUMIsGuestInVmxRootMode(IEM_GET_CTX(pVCpu)))
     421
     422/**
     423 * Check if the guest has entered VMX non-root operation.
     424 */
     425#define IEM_IS_VMX_NON_ROOT_MODE(a_pVCpu)                    (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
     426
    415427#else
    416 # define IEM_VMX_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr)       do { } while (0)
     428# define IEM_VMX_INSTR_COMMON_CHECKS(a_pVCpu, a_szInstr, a_InsDiagPrefix)  do { } while (0)
    417429# define IEM_IS_VMX_ENABLED(a_pVCpu)                         (false)
     430# define IEM_IS_VMX_ROOT_MODE(a_pVCpu)                       (false)
     431# define IEM_IS_VMX_NON_ROOT_MODE(a_pVCpu)                   (false)
    418432
    419433#endif
     
    938952#endif
    939953
     954#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     955IEM_STATIC VBOXSTRICTRC     iemVmxVmxon(PVMCPU pVCpu, uint8_t cbInstr, RTGCPHYS GCPtrVmxon, PCVMXEXITINSTRINFO pExitInstrInfo,
     956                                        RTGCPTR GCPtrDisp);
     957#endif
     958
    940959/**
    941960 * Sets the pass up status.
     
    10371056    pVCpu->iem.s.uRexReg            = 127;
    10381057    pVCpu->iem.s.uRexB              = 127;
     1058    pVCpu->iem.s.offModRm           = 127;
    10391059    pVCpu->iem.s.uRexIndex          = 127;
    10401060    pVCpu->iem.s.iEffSeg            = 127;
     
    11961216    pVCpu->iem.s.cbOpcode           = 0;
    11971217#endif
     1218    pVCpu->iem.s.offModRm           = 0;
    11981219    pVCpu->iem.s.cActiveMappings    = 0;
    11991220    pVCpu->iem.s.iNextMapping       = 0;
     
    13061327    pVCpu->iem.s.offOpcode          = 0;
    13071328#endif
     1329    pVCpu->iem.s.offModRm           = 0;
    13081330    Assert(pVCpu->iem.s.cActiveMappings == 0);
    13091331    pVCpu->iem.s.iNextMapping       = 0;
     
    24342456# ifdef IEM_WITH_CODE_TLB
    24352457    uintptr_t       offBuf = pVCpu->iem.s.offInstrNextByte;
    2436     pVCpu->iem.s.offModRm = offOpcode;
     2458    pVCpu->iem.s.offModRm  = offBuf;
    24372459    uint8_t const  *pbBuf  = pVCpu->iem.s.pbInstrBuf;
    24382460    if (RT_LIKELY(   pbBuf != NULL
     
    24432465    }
    24442466# else
    2445     uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
     2467    uintptr_t offOpcode   = pVCpu->iem.s.offOpcode;
    24462468    pVCpu->iem.s.offModRm = offOpcode;
    24472469    if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
     
    24682490    do \
    24692491    { \
    2470         VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextRm(pVCpu, (a_pu8)); \
     2492        VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextRm(pVCpu, (a_pbRm)); \
    24712493        if (rcStrict2 == VINF_SUCCESS) \
    24722494        { /* likely */ } \
     
    55235545            /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
    55245546            Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
    5525             if (!CPUMIsGuestInNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
     5547            if (   !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
     5548                && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
    55265549                return VERR_EM_GUEST_CPU_HANG;
    55275550        }
     
    80838106            if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
    80848107                return VINF_SUCCESS;
     8108            /** @todo We should probably raise #SS(0) here if segment is SS; see AMD spec.
     8109             *        4.12.2 "Data Limit Checks in 64-bit Mode". */
    80858110            return iemRaiseGeneralProtectionFault0(pVCpu);
    80868111        }
     
    1254712572    } while (0)
    1254812573
     12574#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     12575/** This instruction raises an \#UD in real and V8086 mode or when not using a
     12576 *  64-bit code segment when in long mode (applicable to all VMX instructions
     12577 *  except VMCALL). */
     12578# define IEMOP_HLP_VMX_INSTR() \
     12579    do \
     12580    { \
     12581        if (   !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
     12582            && (  !IEM_IS_LONG_MODE(pVCpu) \
     12583                || IEM_IS_64BIT_CODE(pVCpu))) \
     12584        { /* likely */ } \
     12585        else \
     12586            return IEMOP_RAISE_INVALID_OPCODE(); \
     12587    } while (0)
     12588#endif
     12589
    1254912590/** The instruction is not available in 64-bit mode, throw \#UD if we're in
    1255012591 * 64-bit mode. */
     
    1509615137
    1509715138/**
    15098  * Interface for HM and EM to emulate the INVPCID instruction.
    15099  *
    15100  * @param   pVCpu               The cross context virtual CPU structure.
    15101  * @param   cbInstr             The instruction length in bytes.
    15102  * @param   uType               The invalidation type.
    15103  * @param   GCPtrInvpcidDesc    The effective address of the INVPCID descriptor.
    15104  *
    15105  * @remarks In ring-0 not all of the state needs to be synced in.
    15106  */
    15107 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPU pVCpu, uint8_t cbInstr, uint8_t uType, RTGCPTR GCPtrInvpcidDesc)
    15108 {
    15109     IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
    15110 
    15111     iemInitExec(pVCpu, false /*fBypassHandlers*/);
    15112     VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_invpcid, uType, GCPtrInvpcidDesc);
    15113     Assert(!pVCpu->iem.s.cActiveMappings);
    15114     return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
    15115 }
    15116 
    15117 
    15118 
    15119 /**
    1512015139 * Interface for HM and EM to emulate the CPUID instruction.
    1512115140 *
     
    1549815517
    1549915518#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
     15519
     15520#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     15521
     15522/**
     15523 * Interface for HM and EM to emulate the VMXOFF instruction.
     15524 *
     15525 * @returns Strict VBox status code.
     15526 * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
     15527 * @param   cbInstr     The instruction length in bytes.
     15528 * @thread  EMT(pVCpu)
     15529 */
     15530VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxoff(PVMCPU pVCpu, uint8_t cbInstr)
     15531{
     15532    IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
     15533
     15534    iemInitExec(pVCpu, false /*fBypassHandlers*/);
     15535    VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmxoff);
     15536    Assert(!pVCpu->iem.s.cActiveMappings);
     15537    return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
     15538}
     15539
     15540
     15541/**
     15542 * Interface for HM and EM to emulate the VMXON instruction.
     15543 *
     15544 * @returns Strict VBox status code.
     15545 * @param   pVCpu           The cross context virtual CPU structure of the calling EMT.
     15546 * @param   cbInstr         The instruction length in bytes.
     15547 * @param   GCPtrVmxon      The linear address of the VMXON pointer.
     15548 * @param   uExitInstrInfo  The VM-exit instruction information field.
     15549 * @param   GCPtrDisp       The displacement field for @a GCPtrVmxon if any.
     15550 * @thread  EMT(pVCpu)
     15551 */
     15552VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxon(PVMCPU pVCpu, uint8_t cbInstr, RTGCPHYS GCPtrVmxon, uint32_t uExitInstrInfo,
     15553                                               RTGCPTR GCPtrDisp)
     15554{
     15555    IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
     15556    IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT);
     15557
     15558    iemInitExec(pVCpu, false /*fBypassHandlers*/);
     15559    PCVMXEXITINSTRINFO pExitInstrInfo = (PCVMXEXITINSTRINFO)&uExitInstrInfo;
     15560    VBOXSTRICTRC rcStrict = iemVmxVmxon(pVCpu, cbInstr, GCPtrVmxon, pExitInstrInfo, GCPtrDisp);
     15561    if (pVCpu->iem.s.cActiveMappings)
     15562        iemMemRollback(pVCpu);
     15563    return iemExecStatusCodeFiddling(pVCpu, rcStrict);
     15564}
     15565
     15566#endif
     15567
    1550015568#ifdef IN_RING3
    1550115569
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h

    r73435 r73606  
    53355335            }
    53365336
     5337            /* Check for bits that must remain set in VMX operation. */
     5338            if (IEM_IS_VMX_ROOT_MODE(pVCpu))
     5339            {
     5340                uint32_t const uCr0Fixed0 = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxUnrestrictedGuest ?
     5341                                            VMX_V_CR0_FIXED0_UX : VMX_V_CR0_FIXED0;
     5342                if ((uNewCrX & uCr0Fixed0) != uCr0Fixed0)
     5343                {
     5344                    Log(("Trying to clear reserved CR0 bits in VMX operation: NewCr0=%#llx MB1=%#llx\n", uNewCrX, uCr0Fixed0));
     5345                    return iemRaiseGeneralProtectionFault0(pVCpu);
     5346                }
     5347            }
     5348
    53375349            /** @todo check reserved PDPTR bits as AMD states. */
    53385350
     
    55485560                IEM_SVM_UPDATE_NRIP(pVCpu);
    55495561                IEM_RETURN_SVM_CRX_VMEXIT(pVCpu, SVM_EXIT_WRITE_CR4, enmAccessCrX, iGReg);
     5562            }
     5563
     5564            /* Check for bits that must remain set in VMX operation. */
     5565            if (IEM_IS_VMX_ROOT_MODE(pVCpu))
     5566            {
     5567                uint32_t const uCr4Fixed0 = VMX_V_CR4_FIXED0;
     5568                if ((uNewCrX & uCr4Fixed0) != uCr4Fixed0)
     5569                {
     5570                    Log(("Trying to clear reserved CR4 bits in VMX operation: NewCr4=%#llx MB1=%#llx\n", uNewCrX, uCr4Fixed0));
     5571                    return iemRaiseGeneralProtectionFault0(pVCpu);
     5572                }
    55505573            }
    55515574
     
    59355958 * Implements INVPCID.
    59365959 *
     5960 * @param   iEffSeg              The segment of the invpcid descriptor.
     5961 * @param   GCPtrInvpcidDesc     The address of invpcid descriptor.
    59375962 * @param   uInvpcidType         The invalidation type.
    5938  * @param   GCPtrInvpcidDesc     The effective address of invpcid descriptor.
    59395963 * @remarks Updates the RIP.
    59405964 */
    5941 IEM_CIMPL_DEF_2(iemCImpl_invpcid, uint64_t, uInvpcidType, RTGCPTR, GCPtrInvpcidDesc)
     5965IEM_CIMPL_DEF_3(iemCImpl_invpcid, uint8_t, iEffSeg, RTGCPTR, GCPtrInvpcidDesc, uint8_t, uInvpcidType)
    59425966{
    59435967    /*
     
    59675991     */
    59685992    RTUINT128U uDesc;
    5969     VBOXSTRICTRC rcStrict = iemMemFetchDataU128(pVCpu, &uDesc, pVCpu->iem.s.iEffSeg, GCPtrInvpcidDesc);
     5993    VBOXSTRICTRC rcStrict = iemMemFetchDataU128(pVCpu, &uDesc, iEffSeg, GCPtrInvpcidDesc);
    59705994    if (rcStrict == VINF_SUCCESS)
    59715995    {
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h

    r72469 r73606  
    2222IEM_CIMPL_DEF_0(iemCImpl_vmcall)
    2323{
    24     /** @todo intercept. */
     24    /** @todo NSTVMX: intercept. */
    2525
    2626    /* Join forces with vmmcall. */
     
    2828}
    2929
     30#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     31
     32/**
     33 * Implements VMSucceed for VMX instruction success.
     34 *
     35 * @param   pVCpu       The cross context virtual CPU structure.
     36 */
     37DECLINLINE(void) iemVmxVmSucceed(PVMCPU pVCpu)
     38{
     39    pVCpu->cpum.GstCtx.eflags.u32 &= ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
     40}
     41
     42
     43/**
     44 * Implements VMFailInvalid for VMX instruction failure.
     45 *
     46 * @param   pVCpu       The cross context virtual CPU structure.
     47 */
     48DECLINLINE(void) iemVmxVmFailInvalid(PVMCPU pVCpu)
     49{
     50    pVCpu->cpum.GstCtx.eflags.u32 &= ~(X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
     51    pVCpu->cpum.GstCtx.eflags.u32 |= X86_EFL_CF;
     52}
     53
     54
     55/**
     56 * Implements VMFailValid for VMX instruction failure.
     57 *
     58 * @param   pVCpu       The cross context virtual CPU structure.
     59 * @param   enmInsErr   The VM instruction error.
     60 */
     61DECLINLINE(void) iemVmxVmFailValid(PVMCPU pVCpu, VMXINSTRERR enmInsErr)
     62{
     63    if (pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs))
     64    {
     65        pVCpu->cpum.GstCtx.eflags.u32 &= ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
     66        pVCpu->cpum.GstCtx.eflags.u32 |= X86_EFL_ZF;
     67        /** @todo NSTVMX: VMWrite enmInsErr to VM-instruction error field. */
     68        RT_NOREF(enmInsErr);
     69    }
     70}
     71
     72
     73/**
     74 * Implements VMFail for VMX instruction failure.
     75 *
     76 * @param   pVCpu       The cross context virtual CPU structure.
     77 * @param   enmInsErr   The VM instruction error.
     78 */
     79DECLINLINE(void) iemVmxVmFail(PVMCPU pVCpu, VMXINSTRERR enmInsErr)
     80{
     81    if (pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs))
     82    {
     83        iemVmxVmFailValid(pVCpu, enmInsErr);
     84        /** @todo Set VM-instruction error field in the current virtual-VMCS.  */
     85    }
     86    else
     87        iemVmxVmFailInvalid(pVCpu);
     88}
     89
     90
     91/**
     92 * VMXON instruction execution worker.
     93 *
     94 * @param   pVCpu           The cross context virtual CPU structure.
     95 * @param   cbInstr         The instruction length.
     96 * @param   GCPtrVmxon      The linear address of the VMXON pointer.
     97 * @param   ExitInstrInfo   The VM-exit instruction information field.
     98 * @param   GCPtrDisp       The displacement field for @a GCPtrVmxon if any.
     99 *
     100 * @remarks Common VMX instruction checks are already expected to by the caller,
     101 *          i.e. CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
     102 */
     103IEM_STATIC VBOXSTRICTRC iemVmxVmxon(PVMCPU pVCpu, uint8_t cbInstr, RTGCPHYS GCPtrVmxon, PCVMXEXITINSTRINFO pExitInstrInfo,
     104                                    RTGCPTR GCPtrDisp)
     105{
     106#if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
     107    RT_NOREF5(pVCpu, cbInstr, GCPtrVmxon, pExitInstrInfo, GCPtrDisp);
     108    return VINF_EM_RAW_EMULATE_INSTR;
     109#else
     110    if (!IEM_IS_VMX_ROOT_MODE(pVCpu))
     111    {
     112        /* CPL. */
     113        if (pVCpu->iem.s.uCpl > 0)
     114        {
     115            Log(("vmxon: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
     116            pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_Cpl;
     117            return iemRaiseGeneralProtectionFault0(pVCpu);
     118        }
     119
     120        /* A20M (A20 Masked) mode. */
     121        if (!PGMPhysIsA20Enabled(pVCpu))
     122        {
     123            Log(("vmxon: A20M mode -> #GP(0)\n"));
     124            pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_A20M;
     125            return iemRaiseGeneralProtectionFault0(pVCpu);
     126        }
     127
     128        /* CR0 fixed bits. */
     129        bool const     fUnrestrictedGuest = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxUnrestrictedGuest;
     130        uint64_t const uCr0Fixed0         = fUnrestrictedGuest ? VMX_V_CR0_FIXED0_UX : VMX_V_CR0_FIXED0;
     131        if ((pVCpu->cpum.GstCtx.cr0 & uCr0Fixed0) != uCr0Fixed0)
     132        {
     133            Log(("vmxon: CR0 fixed0 bits cleared -> #GP(0)\n"));
     134            pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_Cr0Fixed0;
     135            return iemRaiseGeneralProtectionFault0(pVCpu);
     136        }
     137
     138        /* CR4 fixed bits. */
     139        if ((pVCpu->cpum.GstCtx.cr4 & VMX_V_CR4_FIXED0) != VMX_V_CR4_FIXED0)
     140        {
     141            Log(("vmxon: CR4 fixed0 bits cleared -> #GP(0)\n"));
     142            pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_Cr4Fixed0;
     143            return iemRaiseGeneralProtectionFault0(pVCpu);
     144        }
     145
     146        /* Feature control MSR's LOCK and VMXON bits. */
     147        uint64_t const uMsrFeatCtl = CPUMGetGuestIa32FeatureControl(pVCpu);
     148        if (!(uMsrFeatCtl & (MSR_IA32_FEATURE_CONTROL_LOCK | MSR_IA32_FEATURE_CONTROL_VMXON)))
     149        {
     150            Log(("vmxon: Feature control lock bit or VMXON bit cleared -> #GP(0)\n"));
     151            pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_MsrFeatCtl;
     152            return iemRaiseGeneralProtectionFault0(pVCpu);
     153        }
     154
     155        /* Get the VMXON pointer from the location specified by the source memory operand. */
     156        RTGCPHYS GCPhysVmxon;
     157        VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmxon, pExitInstrInfo->InvVmxXsaves.iSegReg, GCPtrVmxon);
     158        if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
     159        {
     160            Log(("vmxon: Failed to read VMXON region physaddr from %#RGv, rc=%Rrc\n", GCPtrVmxon, VBOXSTRICTRC_VAL(rcStrict)));
     161            pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_PtrMap;
     162            return rcStrict;
     163        }
     164
     165        /* VMXON region pointer alignment. */
     166        if (GCPhysVmxon & X86_PAGE_4K_OFFSET_MASK)
     167        {
     168            Log(("vmxon: VMXON region pointer not page-aligned -> VMFailInvalid\n"));
     169            pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_PtrAlign;
     170            iemVmxVmFailInvalid(pVCpu);
     171            iemRegAddToRipAndClearRF(pVCpu, cbInstr);
     172            return VINF_SUCCESS;
     173        }
     174
     175        /* Ensure VMXON region is not MMIO, ROM etc. This is not an Intel requirement but a
     176           restriction imposed by our implementation. */
     177        if (!PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmxon))
     178        {
     179            Log(("vmxon: VMXON region not normal memory -> VMFailInvalid\n"));
     180            pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_PtrAbnormal;
     181            iemVmxVmFailInvalid(pVCpu);
     182            iemRegAddToRipAndClearRF(pVCpu, cbInstr);
     183            return VINF_SUCCESS;
     184        }
     185
     186        /* Read the VMCS revision ID from the VMXON region. */
     187        VMXVMCSREVID VmcsRevId;
     188        int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &VmcsRevId, GCPhysVmxon, sizeof(VmcsRevId));
     189        if (RT_FAILURE(rc))
     190        {
     191            Log(("vmxon: Failed to read VMXON region at %#RGp, rc=%Rrc\n", GCPhysVmxon, rc));
     192            pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_PtrReadPhys;
     193            return rc;
     194        }
     195
     196        /* Physical-address width. */
     197        uint64_t const uMsrBasic = CPUMGetGuestIa32VmxBasic(pVCpu);
     198        if (   RT_BF_GET(uMsrBasic, VMX_BF_BASIC_PHYSADDR_WIDTH)
     199            && RT_HI_U32(GCPhysVmxon))
     200        {
     201            Log(("vmxon: VMXON region pointer extends beyond physical-address width -> VMFailInvalid\n"));
     202            pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_PtrWidth;
     203            iemVmxVmFailInvalid(pVCpu);
     204            iemRegAddToRipAndClearRF(pVCpu, cbInstr);
     205            return VINF_SUCCESS;
     206        }
     207
     208        /* Verify the VMCS revision specified by the guest matches what we reported to the guest. */
     209        if (RT_UNLIKELY(VmcsRevId.u != VMX_V_VMCS_REVISION_ID))
     210        {
     211            /* Revision ID mismatch. */
     212            if (!VmcsRevId.n.fIsShadowVmcs)
     213            {
     214                Log(("vmxon: VMCS revision mismatch, expected %#RX32 got %#RX32 -> VMFailInvalid\n", VMX_V_VMCS_REVISION_ID,
     215                     VmcsRevId.n.u31RevisionId));
     216                pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_VmcsRevId;
     217                iemVmxVmFailInvalid(pVCpu);
     218                iemRegAddToRipAndClearRF(pVCpu, cbInstr);
     219                return VINF_SUCCESS;
     220            }
     221
     222            /* Shadow VMCS disallowed. */
     223            Log(("vmxon: Shadow VMCS -> VMFailInvalid\n"));
     224            pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_ShadowVmcs;
     225            iemVmxVmFailInvalid(pVCpu);
     226            iemRegAddToRipAndClearRF(pVCpu, cbInstr);
     227            return VINF_SUCCESS;
     228        }
     229
     230        /*
     231         * Record that we're in VMX operation, block INIT, block and disable A20M.
     232         */
     233        pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon    = GCPhysVmxon;
     234        pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxRootMode = true;
     235        /** @todo NSTVMX: init. current VMCS pointer with ~0. */
     236        /** @todo NSTVMX: clear address-range monitoring. */
     237        /** @todo NSTVMX: Intel PT. */
     238        pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_Success;
     239        iemVmxVmSucceed(pVCpu);
     240        iemRegAddToRipAndClearRF(pVCpu, cbInstr);
     241# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
     242        return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, true);
     243# else
     244        return VINF_SUCCESS;
     245# endif
     246    }
     247    else if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
     248    {
     249        RT_NOREF(GCPtrDisp);
     250        /** @todo NSTVMX: intercept. */
     251    }
     252
     253    Assert(IEM_IS_VMX_ROOT_MODE(pVCpu));
     254
     255    /* CPL. */
     256    if (pVCpu->iem.s.uCpl > 0)
     257    {
     258        Log(("vmxon: In VMX root mode: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
     259        pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_VmxRootCpl;
     260        return iemRaiseGeneralProtectionFault0(pVCpu);
     261    }
     262
     263    /* VMXON when already in VMX root mode. */
     264    iemVmxVmFail(pVCpu, VMXINSTRERR_VMXON_IN_VMXROOTMODE);
     265    pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_VmxRoot;
     266    iemRegAddToRipAndClearRF(pVCpu, cbInstr);
     267    return VINF_SUCCESS;
     268#endif
     269}
     270
     271
     272/**
     273 * Implements 'VMXON'.
     274 */
     275IEM_CIMPL_DEF_1(iemCImpl_vmxon, RTGCPTR, GCPtrVmxon)
     276{
     277    /** @todo NSTVMX: Parse ModR/M, SIB, disp.  */
     278    RTGCPTR GCPtrDisp = 0;
     279    VMXEXITINSTRINFO ExitInstrInfo;
     280    ExitInstrInfo.u = 0;
     281    ExitInstrInfo.InvVmxXsaves.u2Scaling       = 0;
     282    ExitInstrInfo.InvVmxXsaves.u3AddrSize      = pVCpu->iem.s.enmEffAddrMode;
     283    ExitInstrInfo.InvVmxXsaves.fIsRegOperand   = 0;
     284    ExitInstrInfo.InvVmxXsaves.iSegReg         = pVCpu->iem.s.iEffSeg;
     285    ExitInstrInfo.InvVmxXsaves.iIdxReg         = 0;
     286    ExitInstrInfo.InvVmxXsaves.fIdxRegInvalid  = 0;
     287    ExitInstrInfo.InvVmxXsaves.iBaseReg        = 0;
     288    ExitInstrInfo.InvVmxXsaves.fBaseRegInvalid = 0;
     289    ExitInstrInfo.InvVmxXsaves.iReg2           = 0;
     290    return iemVmxVmxon(pVCpu, cbInstr, GCPtrVmxon, &ExitInstrInfo, GCPtrDisp);
     291}
     292
     293
     294/**
     295 * Implements 'VMXOFF'.
     296 */
     297IEM_CIMPL_DEF_0(iemCImpl_vmxoff)
     298{
     299# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
     300    RT_NOREF2(pVCpu, cbInstr);
     301    return VINF_EM_RAW_EMULATE_INSTR;
     302# else
     303    IEM_VMX_INSTR_COMMON_CHECKS(pVCpu, "vmxoff", kVmxVInstrDiag_Vmxoff);
     304    if (!IEM_IS_VMX_ROOT_MODE(pVCpu))
     305    {
     306        Log(("vmxoff: Not in VMX root mode -> #GP(0)\n"));
     307        pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxoff_VmxRoot;
     308        return iemRaiseUndefinedOpcode(pVCpu);
     309    }
     310
     311    if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
     312    {
     313        /** @todo NSTVMX: intercept. */
     314    }
     315
     316    /* CPL. */
     317    if (pVCpu->iem.s.uCpl > 0)
     318    {
     319        Log(("vmxoff: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
     320        pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxoff_Cpl;
     321        return iemRaiseGeneralProtectionFault0(pVCpu);
     322    }
     323
     324    /* Dual monitor treatment of SMIs and SMM. */
     325    uint64_t const fSmmMonitorCtl = CPUMGetGuestIa32SmmMonitorCtl(pVCpu);
     326    if (fSmmMonitorCtl & MSR_IA32_SMM_MONITOR_VALID)
     327    {
     328        iemVmxVmFail(pVCpu, VMXINSTRERR_VMXOFF_DUAL_MON);
     329        iemRegAddToRipAndClearRF(pVCpu, cbInstr);
     330        return VINF_SUCCESS;
     331    }
     332
     333    /*
     334     * Record that we're no longer in VMX root operation, block INIT, block and disable A20M.
     335     */
     336    pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxRootMode = false;
     337    Assert(!pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode);
     338
     339    /** @todo NSTVMX: Unblock INIT. */
     340    if (fSmmMonitorCtl & MSR_IA32_SMM_MONITOR_VMXOFF_UNBLOCK_SMI)
     341    { /** @todo NSTVMX: Unblock SMI. */ }
     342    /** @todo NSTVMX: Unblock and enable A20M. */
     343    /** @todo NSTVMX: Clear address-range monitoring. */
     344
     345    pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxoff_Success;
     346    iemVmxVmSucceed(pVCpu);
     347    iemRegAddToRipAndClearRF(pVCpu, cbInstr);
     348#  if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
     349    return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, false);
     350#  else
     351    return VINF_SUCCESS;
     352#  endif
     353# endif
     354}
     355
     356#endif
     357
  • trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsThree0f38.cpp.h

    r70612 r73606  
    317317        if (pVCpu->iem.s.enmEffOpSize == IEMMODE_64BIT)
    318318        {
    319             IEM_MC_BEGIN(2, 0);
    320             IEM_MC_ARG(uint64_t, uInvpcidType,     0);
     319            IEM_MC_BEGIN(3, 0);
     320            IEM_MC_ARG(uint8_t,  iEffSeg,          0);
    321321            IEM_MC_ARG(RTGCPTR,  GCPtrInvpcidDesc, 1);
     322            IEM_MC_ARG(uint64_t, uInvpcidType,     2);
    322323            IEM_MC_FETCH_GREG_U64(uInvpcidType, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg);
    323324            IEM_MC_CALC_RM_EFF_ADDR(GCPtrInvpcidDesc, bRm, 0);
    324             IEM_MC_CALL_CIMPL_2(iemCImpl_invpcid, uInvpcidType, GCPtrInvpcidDesc);
     325            IEM_MC_ASSIGN(iEffSeg, pVCpu->iem.s.iEffSeg);
     326            IEM_MC_CALL_CIMPL_3(iemCImpl_invpcid, iEffSeg, GCPtrInvpcidDesc, uInvpcidType);
    325327            IEM_MC_END();
    326328        }
    327329        else
    328330        {
    329             IEM_MC_BEGIN(2, 0);
    330             IEM_MC_ARG(uint32_t, uInvpcidType,     0);
     331            IEM_MC_BEGIN(3, 0);
     332            IEM_MC_ARG(uint8_t,  iEffSeg,          0);
    331333            IEM_MC_ARG(RTGCPTR,  GCPtrInvpcidDesc, 1);
     334            IEM_MC_ARG(uint32_t, uInvpcidType,     2);
    332335            IEM_MC_FETCH_GREG_U32(uInvpcidType, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg);
    333336            IEM_MC_CALC_RM_EFF_ADDR(GCPtrInvpcidDesc, bRm, 0);
    334             IEM_MC_CALL_CIMPL_2(iemCImpl_invpcid, uInvpcidType, GCPtrInvpcidDesc);
     337            IEM_MC_ASSIGN(iEffSeg, pVCpu->iem.s.iEffSeg);
     338            IEM_MC_CALL_CIMPL_3(iemCImpl_invpcid, iEffSeg, GCPtrInvpcidDesc, uInvpcidType);
    335339            IEM_MC_END();
    336340        }
  • trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsTwoByte0f.cpp.h

    r72522 r73606  
    264264
    265265/** Opcode 0x0f 0x01 /0. */
     266#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     267FNIEMOP_DEF(iemOp_Grp7_vmxoff)
     268{
     269    IEMOP_MNEMONIC(vmxoff, "vmxoff");
     270    IEMOP_HLP_DONE_DECODING();
     271    return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_vmxoff);
     272}
     273#else
    266274FNIEMOP_DEF(iemOp_Grp7_vmxoff)
    267275{
     
    269277    return IEMOP_RAISE_INVALID_OPCODE();
    270278}
     279#endif
    271280
    272281
     
    84188427
    84198428/** Opcode 0xf3 0x0f 0xc7 !11/6. */
     8429#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     8430FNIEMOP_DEF_1(iemOp_Grp9_vmxon_Mq, uint8_t, bRm)
     8431{
     8432    IEMOP_MNEMONIC(vmxon, "vmxon");
     8433    IEMOP_HLP_VMX_INSTR();
     8434    IEM_MC_BEGIN(1, 0);
     8435    IEM_MC_ARG(RTGCPTR, GCPtrEffSrc, 0);
     8436    IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
     8437    IEMOP_HLP_DONE_DECODING();
     8438    IEM_MC_CALL_CIMPL_1(iemCImpl_vmxon, GCPtrEffSrc);
     8439    IEM_MC_END();
     8440    return VINF_SUCCESS;
     8441}
     8442#else
    84208443FNIEMOP_UD_STUB_1(iemOp_Grp9_vmxon_Mq, uint8_t, bRm);
     8444#endif
    84218445
    84228446/** Opcode [0xf3] 0x0f 0xc7 !11/7. */
     
    84648488FNIEMOP_DEF(iemOp_Grp9)
    84658489{
    8466     uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
     8490    uint8_t bRm; IEM_OPCODE_GET_NEXT_RM(&bRm);
    84678491    if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
    84688492        /* register, register */
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r73293 r73606  
    3030#include <VBox/vmm/iom.h>
    3131#include <VBox/vmm/tm.h>
     32#include <VBox/vmm/em.h>
    3233#include <VBox/vmm/gim.h>
    3334#include <VBox/vmm/apic.h>
     
    711712    uint32_t u32Model;
    712713    uint32_t u32Stepping;
    713     if (HMAmdIsSubjectToErratum170(&u32Family, &u32Model, &u32Stepping))
     714    if (HMSvmIsSubjectToErratum170(&u32Family, &u32Model, &u32Stepping))
    714715    {
    715716        Log4Func(("AMD cpu with erratum 170 family %#x model %#x stepping %#x\n", u32Family, u32Model, u32Stepping));
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.h

    r72967 r73606  
    2121#include <VBox/cdefs.h>
    2222#include <VBox/types.h>
    23 #include <VBox/vmm/em.h>
    24 #include <VBox/vmm/stam.h>
    25 #include <VBox/dis.h>
    2623#include <VBox/vmm/hm.h>
    27 #include <VBox/vmm/pgm.h>
    2824#include <VBox/vmm/hm_svm.h>
    2925
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r73437 r73606  
    3232#include <VBox/vmm/selm.h>
    3333#include <VBox/vmm/tm.h>
     34#include <VBox/vmm/em.h>
    3435#include <VBox/vmm/gim.h>
    3536#include <VBox/vmm/apic.h>
     
    196197{
    197198    /** The host's rflags/eflags. */
    198     RTCCUINTREG     fEFlags;
     199    RTCCUINTREG         fEFlags;
    199200#if HC_ARCH_BITS == 32
    200     uint32_t        u32Alignment0;
     201    uint32_t            u32Alignment0;
    201202#endif
    202203    /** The guest's TPR value used for TPR shadowing. */
    203     uint8_t         u8GuestTpr;
     204    uint8_t             u8GuestTpr;
    204205    /** Alignment. */
    205     uint8_t         abAlignment0[7];
     206    uint8_t             abAlignment0[7];
    206207
    207208    /** The basic VM-exit reason. */
    208     uint16_t        uExitReason;
     209    uint16_t            uExitReason;
    209210    /** Alignment. */
    210     uint16_t        u16Alignment0;
     211    uint16_t            u16Alignment0;
    211212    /** The VM-exit interruption error code. */
    212     uint32_t        uExitIntErrorCode;
     213    uint32_t            uExitIntErrorCode;
    213214    /** The VM-exit exit code qualification. */
    214     uint64_t        uExitQualification;
     215    uint64_t            uExitQual;
    215216
    216217    /** The VM-exit interruption-information field. */
    217     uint32_t        uExitIntInfo;
     218    uint32_t            uExitIntInfo;
    218219    /** The VM-exit instruction-length field. */
    219     uint32_t        cbInstr;
     220    uint32_t            cbInstr;
    220221    /** The VM-exit instruction-information field. */
    221     union
    222     {
    223         /** Plain unsigned int representation. */
    224         uint32_t    u;
    225         /** INS and OUTS information. */
    226         struct
    227         {
    228             uint32_t    u7Reserved0 : 7;
    229             /** The address size; 0=16-bit, 1=32-bit, 2=64-bit, rest undefined. */
    230             uint32_t    u3AddrSize  : 3;
    231             uint32_t    u5Reserved1 : 5;
    232             /** The segment register (X86_SREG_XXX). */
    233             uint32_t    iSegReg     : 3;
    234             uint32_t    uReserved2  : 14;
    235         } StrIo;
    236         /** INVEPT, INVVPID, INVPCID information.  */
    237         struct
    238         {
    239             /** Scaling; 0=no scaling, 1=scale-by-2, 2=scale-by-4, 3=scale-by-8. */
    240             uint32_t    u2Scaling     : 2;
    241             uint32_t    u5Reserved0   : 5;
    242             /** The address size; 0=16-bit, 1=32-bit, 2=64-bit, rest undefined. */
    243             uint32_t    u3AddrSize    : 3;
    244             uint32_t    u1Reserved0   : 1;
    245             uint32_t    u4Reserved0   : 4;
    246             /** The segment register (X86_SREG_XXX). */
    247             uint32_t    iSegReg       : 3;
    248             /** The index register (X86_GREG_XXX). */
    249             uint32_t    iIdxReg       : 4;
    250             /** Set if index register is invalid. */
    251             uint32_t    fIdxRegValid  : 1;
    252             /** The base register (X86_GREG_XXX). */
    253             uint32_t    iBaseReg      : 4;
    254             /** Set if base register is invalid. */
    255             uint32_t    fBaseRegValid : 1;
    256             /** Register 2 (X86_GREG_XXX). */
    257             uint32_t    iReg2         : 4;
    258         } Inv;
    259     }               ExitInstrInfo;
     222    VMXEXITINSTRINFO    ExitInstrInfo;
    260223    /** Whether the VM-entry failed or not. */
    261     bool            fVMEntryFailed;
     224    bool                fVMEntryFailed;
    262225    /** Alignment. */
    263     uint8_t         abAlignment1[3];
     226    uint8_t             abAlignment1[3];
    264227
    265228    /** The VM-entry interruption-information field. */
    266     uint32_t        uEntryIntInfo;
     229    uint32_t            uEntryIntInfo;
    267230    /** The VM-entry exception error code field. */
    268     uint32_t        uEntryXcptErrorCode;
     231    uint32_t            uEntryXcptErrorCode;
    269232    /** The VM-entry instruction length field. */
    270     uint32_t        cbEntryInstr;
     233    uint32_t            cbEntryInstr;
    271234
    272235    /** IDT-vectoring information field. */
    273     uint32_t        uIdtVectoringInfo;
     236    uint32_t            uIdtVectoringInfo;
    274237    /** IDT-vectoring error code. */
    275     uint32_t        uIdtVectoringErrorCode;
     238    uint32_t            uIdtVectoringErrorCode;
    276239
    277240    /** Mask of currently read VMCS fields; HMVMX_READ_XXX. */
    278     uint32_t        fVmcsFieldsRead;
     241    uint32_t            fVmcsFieldsRead;
    279242
    280243    /** Whether the guest debug state was active at the time of VM-exit. */
    281     bool            fWasGuestDebugStateActive;
     244    bool                fWasGuestDebugStateActive;
    282245    /** Whether the hyper debug state was active at the time of VM-exit. */
    283     bool            fWasHyperDebugStateActive;
     246    bool                fWasHyperDebugStateActive;
    284247    /** Whether TSC-offsetting should be setup before VM-entry. */
    285     bool            fUpdateTscOffsettingAndPreemptTimer;
     248    bool                fUpdateTscOffsettingAndPreemptTimer;
    286249    /** Whether the VM-exit was caused by a page-fault during delivery of a
    287250     *  contributory exception or a page-fault. */
    288     bool            fVectoringDoublePF;
     251    bool                fVectoringDoublePF;
    289252    /** Whether the VM-exit was caused by a page-fault during delivery of an
    290253     *  external interrupt or NMI. */
    291     bool            fVectoringPF;
     254    bool                fVectoringPF;
    292255} VMXTRANSIENT;
    293256AssertCompileMemberAlignment(VMXTRANSIENT, uExitReason,               sizeof(uint64_t));
     
    404367static FNVMXEXITHANDLER     hmR0VmxExitRdpmc;
    405368static FNVMXEXITHANDLER     hmR0VmxExitVmcall;
     369#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     370static FNVMXEXITHANDLER     hmR0VmxExitVmclear;
     371static FNVMXEXITHANDLER     hmR0VmxExitVmlaunch;
     372static FNVMXEXITHANDLER     hmR0VmxExitVmptrld;
     373static FNVMXEXITHANDLER     hmR0VmxExitVmptrst;
     374static FNVMXEXITHANDLER     hmR0VmxExitVmread;
     375static FNVMXEXITHANDLER     hmR0VmxExitVmresume;
     376static FNVMXEXITHANDLER     hmR0VmxExitVmwrite;
     377static FNVMXEXITHANDLER     hmR0VmxExitVmxoff;
     378static FNVMXEXITHANDLER     hmR0VmxExitVmxon;
     379#endif
    406380static FNVMXEXITHANDLER     hmR0VmxExitRdtsc;
    407381static FNVMXEXITHANDLERNSRC hmR0VmxExitRsm;
     
    473447 /* 17  VMX_EXIT_RSM                     */  hmR0VmxExitRsm,
    474448 /* 18  VMX_EXIT_VMCALL                  */  hmR0VmxExitVmcall,
     449#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     450 /* 19  VMX_EXIT_VMCLEAR                 */  hmR0VmxExitVmclear,
     451 /* 20  VMX_EXIT_VMLAUNCH                */  hmR0VmxExitVmlaunch,
     452 /* 21  VMX_EXIT_VMPTRLD                 */  hmR0VmxExitVmptrld,
     453 /* 22  VMX_EXIT_VMPTRST                 */  hmR0VmxExitVmptrst,
     454 /* 23  VMX_EXIT_VMREAD                  */  hmR0VmxExitVmread,
     455 /* 24  VMX_EXIT_VMRESUME                */  hmR0VmxExitVmresume,
     456 /* 25  VMX_EXIT_VMWRITE                 */  hmR0VmxExitVmwrite,
     457 /* 26  VMX_EXIT_VMXOFF                  */  hmR0VmxExitVmxoff,
     458 /* 27  VMX_EXIT_VMXON                   */  hmR0VmxExitVmxon,
     459#else
    475460 /* 19  VMX_EXIT_VMCLEAR                 */  hmR0VmxExitSetPendingXcptUD,
    476461 /* 20  VMX_EXIT_VMLAUNCH                */  hmR0VmxExitSetPendingXcptUD,
     
    482467 /* 26  VMX_EXIT_VMXOFF                  */  hmR0VmxExitSetPendingXcptUD,
    483468 /* 27  VMX_EXIT_VMXON                   */  hmR0VmxExitSetPendingXcptUD,
     469#endif
    484470 /* 28  VMX_EXIT_MOV_CRX                 */  hmR0VmxExitMovCRx,
    485471 /* 29  VMX_EXIT_MOV_DRX                 */  hmR0VmxExitMovDRx,
     
    719705 * @param   pVmxTransient   Pointer to the VMX transient structure.
    720706 */
    721 DECLINLINE(int) hmR0VmxReadExitQualificationVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
     707DECLINLINE(int) hmR0VmxReadExitQualVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    722708{
    723709    if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_QUALIFICATION))
    724710    {
    725         int rc = VMXReadVmcsGstN(VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQualification); NOREF(pVCpu);
     711        int rc = VMXReadVmcsGstN(VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual); NOREF(pVCpu);
    726712        AssertRCReturn(rc, rc);
    727713        pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION;
     
    49994985            int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_REASON, &pVCpu->hm.s.vmx.LastError.u32ExitReason);
    50004986            rc    |= VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &pVCpu->hm.s.vmx.LastError.u32InstrError);
    5001             rc    |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
     4987            rc    |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
    50024988            AssertRC(rc);
    50034989
     
    50094995                Log4(("uExitReason        %#RX32 (VmxTransient %#RX16)\n", pVCpu->hm.s.vmx.LastError.u32ExitReason,
    50104996                     pVmxTransient->uExitReason));
    5011                 Log4(("Exit Qualification %#RX64\n", pVmxTransient->uExitQualification));
     4997                Log4(("Exit Qualification %#RX64\n", pVmxTransient->uExitQual));
    50124998                Log4(("InstrError         %#RX32\n", pVCpu->hm.s.vmx.LastError.u32InstrError));
    50134999                if (pVCpu->hm.s.vmx.LastError.u32InstrError <= HMVMX_INSTR_ERROR_MAX)
     
    57885774DECLINLINE(void) hmR0VmxSetPendingXcptDF(PVMCPU pVCpu)
    57895775{
    5790     uint32_t const u32IntInfo = X86_XCPT_DF | VMX_EXIT_INT_INFO_VALID
    5791                               | (VMX_EXIT_INT_INFO_TYPE_HW_XCPT << VMX_EXIT_INT_INFO_TYPE_SHIFT)
    5792                               | VMX_EXIT_INT_INFO_ERROR_CODE_VALID;
     5776    uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR,         X86_XCPT_DF)
     5777                              | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE,           VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
     5778                              | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
     5779                              | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID,          1);
    57935780    hmR0VmxSetPendingEvent(pVCpu, u32IntInfo,  0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
    57945781}
     
    58025789DECLINLINE(void) hmR0VmxSetPendingXcptUD(PVMCPU pVCpu)
    58035790{
    5804     uint32_t const u32IntInfo  = X86_XCPT_UD | VMX_EXIT_INT_INFO_VALID
    5805                                | (VMX_EXIT_INT_INFO_TYPE_HW_XCPT << VMX_EXIT_INT_INFO_TYPE_SHIFT);
     5791    uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR,         X86_XCPT_UD)
     5792                              | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE,           VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
     5793                              | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
     5794                              | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID,          1);
    58065795    hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
    58075796}
     
    58155804DECLINLINE(void) hmR0VmxSetPendingXcptDB(PVMCPU pVCpu)
    58165805{
    5817     uint32_t const u32IntInfo = X86_XCPT_DB | VMX_EXIT_INT_INFO_VALID
    5818                               | (VMX_EXIT_INT_INFO_TYPE_HW_XCPT << VMX_EXIT_INT_INFO_TYPE_SHIFT);
     5806    uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR,         X86_XCPT_DB)
     5807                              | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE,           VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
     5808                              | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
     5809                              | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID,          1);
    58195810    hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
    58205811}
     
    58305821DECLINLINE(void) hmR0VmxSetPendingXcptOF(PVMCPU pVCpu, uint32_t cbInstr)
    58315822{
    5832     uint32_t const u32IntInfo  = X86_XCPT_OF | VMX_EXIT_INT_INFO_VALID
    5833                                | (VMX_EXIT_INT_INFO_TYPE_SW_INT << VMX_EXIT_INT_INFO_TYPE_SHIFT);
     5823    uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR,         X86_XCPT_OF)
     5824                              | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE,           VMX_EXIT_INT_INFO_TYPE_SW_INT)
     5825                              | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
     5826                              | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID,          1);
    58345827    hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, cbInstr, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
    58355828}
     5829
     5830
     5831/**
     5832 * Sets a general-protection (\#GP) exception as pending-for-injection into the VM.
     5833 *
     5834 * @param   pVCpu           The cross context virtual CPU structure.
     5835 * @param   u32ErrCode      The error code for the general-protection exception.
     5836 */
     5837DECLINLINE(void) hmR0VmxSetPendingXcptGP(PVMCPU pVCpu, uint32_t u32ErrCode)
     5838{
     5839    uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR,         X86_XCPT_GP)
     5840                              | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE,           VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
     5841                              | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
     5842                              | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID,          1);
     5843    hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
     5844}
     5845
     5846
     5847/**
     5848 * Sets a stack (\#SS) exception as pending-for-injection into the VM.
     5849 *
     5850 * @param   pVCpu           The cross context virtual CPU structure.
     5851 * @param   u32ErrCode      The error code for the stack exception.
     5852 */
     5853DECLINLINE(void) hmR0VmxSetPendingXcptSS(PVMCPU pVCpu, uint32_t u32ErrCode)
     5854{
     5855    uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR,         X86_XCPT_SS)
     5856                              | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE,           VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
     5857                              | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
     5858                              | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID,          1);
     5859    hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
     5860}
     5861
     5862
     5863#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     5864
     5865/**
     5866 * Decodes the memory operand of a VM-exit due to instruction execution.
     5867 *
     5868 * For instructions with two operands, the second operand is usually found in the
     5869 * VM-exit qualification field.
     5870 *
     5871 * @returns Strict VBox status code (i.e. informational status codes too).
     5872 * @retval  VINF_SUCCESS if the operand was successfully decoded.
     5873 * @retval  VINF_HM_PENDING_XCPT if an exception was raised while decoding the
     5874 *          operand.
     5875 * @param   pVCpu           The cross context virtual CPU structure.
     5876 * @param   pExitInstrInfo  Pointer to the VM-exit instruction information.
     5877 * @param   fIsWrite        Whether the operand is a destination memory operand
     5878 *                          (i.e. writeable memory location) or not.
     5879 * @param   GCPtrDisp       The instruction displacement field, if any. For
     5880 *                          RIP-relative addressing pass RIP + displacement here.
     5881 * @param   pGCPtrMem       Where to store the destination memory operand.
     5882 */
     5883static VBOXSTRICTRC hmR0VmxDecodeMemOperand(PVMCPU pVCpu, PCVMXEXITINSTRINFO pExitInstrInfo, RTGCPTR GCPtrDisp, bool fIsWrite,
     5884                                            PRTGCPTR pGCPtrMem)
     5885{
     5886    Assert(pExitInstrInfo);
     5887    Assert(pGCPtrMem);
     5888    Assert(!CPUMIsGuestInRealOrV86Mode(pVCpu));
     5889
     5890    static uint64_t const s_auAddrSizeMasks[]   = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
     5891    static uint64_t const s_auAccessSizeMasks[] = { sizeof(uint16_t), sizeof(uint32_t), sizeof(uint64_t) };
     5892    AssertCompile(RT_ELEMENTS(s_auAccessSizeMasks) == RT_ELEMENTS(s_auAddrSizeMasks));
     5893
     5894    uint8_t const   uAddrSize     =  pExitInstrInfo->InvVmxXsaves.u3AddrSize;
     5895    uint8_t const   iSegReg       =  pExitInstrInfo->InvVmxXsaves.iSegReg;
     5896    bool const      fIdxRegValid  = !pExitInstrInfo->InvVmxXsaves.fIdxRegInvalid;
     5897    uint8_t const   iIdxReg       =  pExitInstrInfo->InvVmxXsaves.iIdxReg;
     5898    uint8_t const   uScale        =  pExitInstrInfo->InvVmxXsaves.u2Scaling;
     5899    bool const      fBaseRegValid = !pExitInstrInfo->InvVmxXsaves.fBaseRegInvalid;
     5900    uint8_t const   iBaseReg      =  pExitInstrInfo->InvVmxXsaves.iBaseReg;
     5901    bool const      fIsMemOperand = !pExitInstrInfo->InvVmxXsaves.fIsRegOperand;
     5902    bool const      fIsLongMode   =  CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx);
     5903
     5904    /*
     5905     * Validate instruction information.
     5906     * This shouldn't happen on real hardware but useful while testing our nested hardware-virtualization code.
     5907     */
     5908    AssertLogRelMsgReturn(uAddrSize < RT_ELEMENTS(s_auAddrSizeMasks),
     5909                          ("Invalid address size. ExitInstrInfo=%#RX32\n", pExitInstrInfo->u), VERR_VMX_IPE_1);
     5910    AssertLogRelMsgReturn(iSegReg  < X86_SREG_COUNT,
     5911                          ("Invalid segment register. ExitInstrInfo=%#RX32\n", pExitInstrInfo->u), VERR_VMX_IPE_2);
     5912    AssertLogRelMsgReturn(fIsMemOperand,
     5913                          ("Expected memory operand. ExitInstrInfo=%#RX32\n", pExitInstrInfo->u), VERR_VMX_IPE_3);
     5914
     5915    /*
     5916     * Compute the complete effective address.
     5917     *
     5918     * See AMD instruction spec. 1.4.2 "SIB Byte Format"
     5919     * See AMD spec. 4.5.2 "Segment Registers".
     5920     */
     5921    RTGCPTR GCPtrMem  = GCPtrDisp;
     5922    if (fBaseRegValid)
     5923        GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iBaseReg].u64;
     5924    if (fIdxRegValid)
     5925        GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iIdxReg].u64 << uScale;
     5926
     5927    RTGCPTR const GCPtrOff = GCPtrMem;
     5928    if (   !fIsLongMode
     5929        || iSegReg >= X86_SREG_FS)
     5930        GCPtrMem += pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
     5931    GCPtrMem &= s_auAddrSizeMasks[uAddrSize];
     5932
     5933    /*
     5934     * Validate effective address.
     5935     * See AMD spec. 4.5.3 "Segment Registers in 64-Bit Mode".
     5936     */
     5937    uint8_t const cbAccess = s_auAccessSizeMasks[uAddrSize];
     5938    Assert(cbAccess > 0);
     5939    if (fIsLongMode)
     5940    {
     5941        if (X86_IS_CANONICAL(GCPtrMem))
     5942        {
     5943            *pGCPtrMem = GCPtrMem;
     5944            return VINF_SUCCESS;
     5945        }
     5946
     5947        Log4Func(("Long mode effective address is not canonical GCPtrMem=%#RX64\n", GCPtrMem));
     5948        hmR0VmxSetPendingXcptGP(pVCpu, 0);
     5949        return VINF_HM_PENDING_XCPT;
     5950    }
     5951
     5952    /*
     5953     * This is a watered down version of iemMemApplySegment().
     5954     * Parts that are not applicable for VMX instructions like real-or-v8086 mode
     5955     * and segment CPL/DPL checks are skipped.
     5956     */
     5957    RTGCPTR32 const GCPtrFirst32 = (RTGCPTR32)GCPtrOff;
     5958    RTGCPTR32 const GCPtrLast32  = GCPtrFirst32 + cbAccess - 1;
     5959    PCCPUMSELREG    pSel         = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
     5960
     5961    /* Check if the segment is present and usable. */
     5962    if (    pSel->Attr.n.u1Present
     5963        && !pSel->Attr.n.u1Unusable)
     5964    {
     5965        Assert(pSel->Attr.n.u1DescType);
     5966        if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
     5967        {
     5968            /* Check permissions for the data segment. */
     5969            if (   fIsWrite
     5970                && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE))
     5971            {
     5972                Log4Func(("Data segment access invalid. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
     5973                hmR0VmxSetPendingXcptGP(pVCpu, iSegReg);
     5974                return VINF_HM_PENDING_XCPT;
     5975            }
     5976
     5977            /* Check limits if it's a normal data segment. */
     5978            if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
     5979            {
     5980                if (   GCPtrFirst32 > pSel->u32Limit
     5981                    || GCPtrLast32  > pSel->u32Limit)
     5982                {
     5983                    Log4Func(("Data segment limit exceeded."
     5984                              "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
     5985                              GCPtrLast32, pSel->u32Limit));
     5986                    if (iSegReg == X86_SREG_SS)
     5987                        hmR0VmxSetPendingXcptSS(pVCpu, 0);
     5988                    else
     5989                        hmR0VmxSetPendingXcptGP(pVCpu, 0);
     5990                    return VINF_HM_PENDING_XCPT;
     5991                }
     5992            }
     5993            else
     5994            {
     5995               /* Check limits if it's an expand-down data segment.
     5996                  Note! The upper boundary is defined by the B bit, not the G bit! */
     5997               if (   GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
     5998                   || GCPtrLast32  > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
     5999               {
     6000                   Log4Func(("Expand-down data segment limit exceeded."
     6001                             "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
     6002                             GCPtrLast32, pSel->u32Limit));
     6003                   if (iSegReg == X86_SREG_SS)
     6004                       hmR0VmxSetPendingXcptSS(pVCpu, 0);
     6005                   else
     6006                       hmR0VmxSetPendingXcptGP(pVCpu, 0);
     6007                   return VINF_HM_PENDING_XCPT;
     6008               }
     6009            }
     6010        }
     6011        else
     6012        {
     6013            /* Check permissions for the code segment. */
     6014            if (   fIsWrite
     6015                || !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ))
     6016            {
     6017                Log4Func(("Code segment access invalid. Attr=%#RX32\n", pSel->Attr.u));
     6018                Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
     6019                hmR0VmxSetPendingXcptGP(pVCpu, 0);
     6020                return VINF_HM_PENDING_XCPT;
     6021            }
     6022
     6023            /* Check limits for the code segment (normal/expand-down not applicable for code segments). */
     6024            if (   GCPtrFirst32 > pSel->u32Limit
     6025                || GCPtrLast32  > pSel->u32Limit)
     6026            {
     6027                Log4Func(("Code segment limit exceeded. GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n",
     6028                          GCPtrFirst32, GCPtrLast32, pSel->u32Limit));
     6029                if (iSegReg == X86_SREG_SS)
     6030                    hmR0VmxSetPendingXcptSS(pVCpu, 0);
     6031                else
     6032                    hmR0VmxSetPendingXcptGP(pVCpu, 0);
     6033                return VINF_HM_PENDING_XCPT;
     6034            }
     6035        }
     6036    }
     6037    else
     6038    {
     6039        Log4Func(("Not present or unusable segment. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
     6040        hmR0VmxSetPendingXcptGP(pVCpu, 0);
     6041        return VINF_HM_PENDING_XCPT;
     6042    }
     6043
     6044    *pGCPtrMem = GCPtrMem;
     6045    return VINF_SUCCESS;
     6046}
     6047
     6048
     6049/**
     6050 * Perform the relevant VMX instruction checks for VM-exits that occurred due to the
     6051 * guest attempting to execute a VMX instruction.
     6052 *
     6053 * @returns Strict VBox status code (i.e. informational status codes too).
     6054 * @retval  VINF_SUCCESS if we should continue handling the VM-exit.
     6055 * @retval  VINF_HM_PENDING_XCPT if an exception was raised.
     6056 *
     6057 * @param   pVCpu           The cross context virtual CPU structure.
     6058 * @param   pVmxTransient   Pointer to the VMX transient structure.
     6059 *
     6060 * @todo    NstVmx: Document other error codes when VM-exit is implemented.
     6061 * @remarks No-long-jump zone!!!
     6062 */
     6063static VBOXSTRICTRC hmR0VmxCheckExitDueToVmxInstr(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
     6064{
     6065    HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS
     6066                              | CPUMCTX_EXTRN_HWVIRT);
     6067
     6068    if (   CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx)
     6069        || (    CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx)
     6070            && !CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx)))
     6071    {
     6072        Log4Func(("In real/v86-mode or long-mode outside 64-bit code segment -> #UD\n"));
     6073        hmR0VmxSetPendingXcptUD(pVCpu);
     6074        return VINF_HM_PENDING_XCPT;
     6075    }
     6076
     6077    if (pVmxTransient->uExitReason == VMX_EXIT_VMXON)
     6078    {
     6079        /*
     6080         * We check CR4.VMXE because it is required to be always set while in VMX operation
     6081         * by physical CPUs and our CR4 read shadow is only consulted when executing specific
     6082         * instructions (CLTS, LMSW, MOV CR, and SMSW) and thus doesn't affect CPU operation
     6083         * otherwise (i.e. physical CPU won't automatically #UD if Cr4Shadow.VMXE is 0).
     6084         */
     6085        if (!CPUMIsGuestVmxEnabled(&pVCpu->cpum.GstCtx))
     6086        {
     6087            Log4Func(("CR4.VMXE is not set -> #UD\n"));
     6088            hmR0VmxSetPendingXcptUD(pVCpu);
     6089            return VINF_HM_PENDING_XCPT;
     6090        }
     6091    }
     6092    else if (!CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx))
     6093    {
     6094        /*
     6095         * The guest has not entered VMX operation but attempted to execute a VMX instruction
     6096         * (other than VMXON), we need to raise a #UD.
     6097         */
     6098        Log4Func(("Not in VMX root mode -> #UD\n"));
     6099        hmR0VmxSetPendingXcptUD(pVCpu);
     6100        return VINF_HM_PENDING_XCPT;
     6101    }
     6102
     6103    if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
     6104    {
     6105        /*
     6106         * The nested-guest attempted to execute a VMX instruction, cause a VM-exit and let
     6107         * the guest hypervisor deal with it.
     6108         */
     6109        /** @todo NSTVMX: Trigger a VM-exit */
     6110    }
     6111
     6112    /*
     6113     * VMX instructions require CPL 0 except in VMX non-root mode where the VM-exit intercept
     6114     * (above) takes preceedence over the CPL check.
     6115     */
     6116    if (CPUMGetGuestCPL(pVCpu) > 0)
     6117    {
     6118        Log4Func(("CPL > 0 -> #GP(0)\n"));
     6119        hmR0VmxSetPendingXcptGP(pVCpu, 0);
     6120        return VINF_HM_PENDING_XCPT;
     6121    }
     6122
     6123    return VINF_SUCCESS;
     6124}
     6125
     6126#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
    58366127
    58376128
     
    58616152
    58626153    VBOXSTRICTRC rcStrict = VINF_SUCCESS;
    5863     if (VMX_IDT_VECTORING_INFO_VALID(pVmxTransient->uIdtVectoringInfo))
     6154    if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
    58646155    {
    58656156        uint32_t const uIdtVectorType = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo);
     
    81908481    Assert(VMMRZCallRing3IsEnabled(pVCpu));
    81918482
     8483#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_ONLY_IN_IEM
     8484    Log2(("hmR0SvmPreRunGuest: Rescheduling to IEM due to nested-hwvirt or forced IEM exec -> VINF_EM_RESCHEDULE_REM\n"));
     8485    return VINF_EM_RESCHEDULE_REM;
     8486#endif
     8487
    81928488#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    81938489    PGMRZDynMapFlushAutoSet(pVCpu);
     
    93199615        case VMX_EXIT_VMXON:            SET_BOTH(VMX_VMXON); break;
    93209616        case VMX_EXIT_MOV_CRX:
    9321             hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
    9322             if (VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQualification) == VMX_EXIT_QUAL_CRX_ACCESS_READ)
     9617            hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
     9618            if (VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_CRX_ACCESS_READ)
    93239619                SET_BOTH(CRX_READ);
    93249620            else
    93259621                SET_BOTH(CRX_WRITE);
    9326             uEventArg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQualification);
     9622            uEventArg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
    93279623            break;
    93289624        case VMX_EXIT_MOV_DRX:
    9329             hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
    9330             if (   VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQualification)
     9625            hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
     9626            if (   VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual)
    93319627                == VMX_EXIT_QUAL_DRX_DIRECTION_READ)
    93329628                SET_BOTH(DRX_READ);
    93339629            else
    93349630                SET_BOTH(DRX_WRITE);
    9335             uEventArg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQualification);
     9631            uEventArg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual);
    93369632            break;
    93379633        case VMX_EXIT_RDMSR:            SET_BOTH(RDMSR); break;
     
    94089704    if (fDtrace1 || fDtrace2)
    94099705    {
    9410         hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
     9706        hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
    94119707        hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
    94129708        PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
     
    95939889    else
    95949890    {
    9595         hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
     9891        hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
    95969892        int rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
    95979893        AssertRC(rc);
    9598         VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, &pVCpu->cpum.GstCtx, pVmxTransient->uExitReason, pVmxTransient->uExitQualification);
     9894        VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, &pVCpu->cpum.GstCtx, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
    95999895    }
    96009896
     
    1083311129}
    1083411130
     11131
     11132/** @name VM-exit handlers.
     11133 * @{
     11134 */
    1083511135/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
    1083611136/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
    1083711137/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
    10838 
    10839 /** @name VM-exit handlers.
    10840  * @{
    10841  */
    1084211138
    1084311139/**
     
    1096111257                        rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
    1096211258                        AssertRCReturn(rc, rc);
    10963                         hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(uExitIntInfo),
     11259                        hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(uExitIntInfo),
    1096411260                                               pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode,
    1096511261                                               0 /* GCPtrFaultAddress */);
     
    1127011566    Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging || pVCpu->hm.s.fUsingDebugLoop);
    1127111567
    11272     int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
     11568    int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
    1127311569    rc    |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    1127411570    rc    |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
    1127511571    AssertRCReturn(rc, rc);
    1127611572
    11277     VBOXSTRICTRC rcStrict = IEMExecDecodedInvlpg(pVCpu, pVmxTransient->cbInstr, pVmxTransient->uExitQualification);
     11573    VBOXSTRICTRC rcStrict = IEMExecDecodedInvlpg(pVCpu, pVmxTransient->cbInstr, pVmxTransient->uExitQual);
    1127811574
    1127911575    if (rcStrict == VINF_SUCCESS || rcStrict == VINF_PGM_SYNC_CR3)
     
    1128511581    }
    1128611582    else
    11287         AssertMsgFailed(("Unexpected IEMExecDecodedInvlpg(%#RX64) sttus: %Rrc\n",
    11288                          pVmxTransient->uExitQualification, VBOXSTRICTRC_VAL(rcStrict)));
     11583        AssertMsgFailed(("Unexpected IEMExecDecodedInvlpg(%#RX64) sttus: %Rrc\n", pVmxTransient->uExitQual,
     11584                         VBOXSTRICTRC_VAL(rcStrict)));
    1128911585    return rcStrict;
    1129011586}
     
    1188812184    STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitMovCRx, y2);
    1188912185
    11890     int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
     12186    int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
    1189112187    rc    |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    1189212188    rc    |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
     
    1189512191    VBOXSTRICTRC rcStrict;
    1189612192    PVM pVM  = pVCpu->CTX_SUFF(pVM);
    11897     RTGCUINTPTR const uExitQualification = pVmxTransient->uExitQualification;
    11898     uint32_t const uAccessType           = VMX_EXIT_QUAL_CRX_ACCESS(uExitQualification);
     12193    RTGCUINTPTR const uExitQual = pVmxTransient->uExitQual;
     12194    uint32_t const uAccessType  = VMX_EXIT_QUAL_CRX_ACCESS(uExitQual);
    1189912195    switch (uAccessType)
    1190012196    {
     
    1190212198        {
    1190312199            uint32_t const uOldCr0 = pVCpu->cpum.GstCtx.cr0;
    11904             rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, pVmxTransient->cbInstr,
    11905                                                  VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification),
    11906                                                  VMX_EXIT_QUAL_CRX_GENREG(uExitQualification));
     12200            rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, pVmxTransient->cbInstr, VMX_EXIT_QUAL_CRX_REGISTER(uExitQual),
     12201                                                 VMX_EXIT_QUAL_CRX_GENREG(uExitQual));
    1190712202            AssertMsg(   rcStrict == VINF_SUCCESS
    1190812203                      || rcStrict == VINF_IEM_RAISED_XCPT
    1190912204                      || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
    1191012205
    11911             switch (VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification))
     12206            switch (VMX_EXIT_QUAL_CRX_REGISTER(uExitQual))
    1191212207            {
    1191312208                case 0:
     
    1191612211                                     HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
    1191712212                    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR0Write);
    11918                     Log4(("CRX CR0 write rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr0));
     12213                    Log4Func(("CR0 write rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr0));
    1191912214
    1192012215                    /*
     
    1193512230                    {
    1193612231                        /** @todo check selectors rather than returning all the time.  */
    11937                         Log4(("CRx CR0 write: back to real mode -> VINF_EM_RESCHEDULE_REM\n"));
     12232                        Log4Func(("CR0 write, back to real mode -> VINF_EM_RESCHEDULE_REM\n"));
    1193812233                        rcStrict = VINF_EM_RESCHEDULE_REM;
    1193912234                    }
     
    1195612251                    ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged,
    1195712252                                     HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR3);
    11958                     Log4(("CRX CR3 write rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr3));
     12253                    Log4Func(("CR3 write rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr3));
    1195912254                    break;
    1196012255                }
     
    1196512260                    ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged,
    1196612261                                     HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR4);
    11967                     Log4(("CRX CR4 write rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict),
    11968                           pVCpu->cpum.GstCtx.cr4, pVCpu->hm.s.fLoadSaveGuestXcr0));
     12262                    Log4Func(("CR4 write rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict),
     12263                              pVCpu->cpum.GstCtx.cr4, pVCpu->hm.s.fLoadSaveGuestXcr0));
    1196912264                    break;
    1197012265                }
     
    1197912274                }
    1198012275                default:
    11981                     AssertMsgFailed(("Invalid CRx register %#x\n", VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification)));
     12276                    AssertMsgFailed(("Invalid CRx register %#x\n", VMX_EXIT_QUAL_CRX_REGISTER(uExitQual)));
    1198212277                    break;
    1198312278            }
     
    1199012285                   || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
    1199112286                   || pVCpu->hm.s.fUsingDebugLoop
    11992                    || VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification) != 3);
     12287                   || VMX_EXIT_QUAL_CRX_REGISTER(uExitQual) != 3);
    1199312288            /* CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
    11994             Assert(   VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification) != 8
     12289            Assert(   VMX_EXIT_QUAL_CRX_REGISTER(uExitQual) != 8
    1199512290                   || !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
    1199612291
    11997             rcStrict = IEMExecDecodedMovCRxRead(pVCpu, pVmxTransient->cbInstr,
    11998                                                 VMX_EXIT_QUAL_CRX_GENREG(uExitQualification),
    11999                                                 VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification));
     12292            rcStrict = IEMExecDecodedMovCRxRead(pVCpu, pVmxTransient->cbInstr, VMX_EXIT_QUAL_CRX_GENREG(uExitQual),
     12293                                                VMX_EXIT_QUAL_CRX_REGISTER(uExitQual));
    1200012294            AssertMsg(   rcStrict == VINF_SUCCESS
    1200112295                      || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
    1200212296#ifdef VBOX_WITH_STATISTICS
    12003             switch (VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification))
     12297            switch (VMX_EXIT_QUAL_CRX_REGISTER(uExitQual))
    1200412298            {
    1200512299                case 0: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR0Read); break;
     
    1201012304            }
    1201112305#endif
    12012             Log4(("CRX CR%d Read access rcStrict=%Rrc\n", VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification),
     12306            Log4Func(("CR%d Read access rcStrict=%Rrc\n", VMX_EXIT_QUAL_CRX_REGISTER(uExitQual),
    1201312307                  VBOXSTRICTRC_VAL(rcStrict)));
    12014             if (VMX_EXIT_QUAL_CRX_GENREG(uExitQualification) == X86_GREG_xSP)
     12308            if (VMX_EXIT_QUAL_CRX_GENREG(uExitQual) == X86_GREG_xSP)
    1201512309                ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_RSP);
    1201612310            else
     
    1202712321            ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
    1202812322            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitClts);
    12029             Log4(("CRX CLTS rcStrict=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
     12323            Log4Func(("CLTS rcStrict=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
    1203012324            break;
    1203112325        }
     
    1203412328        {
    1203512329            /* Note! LMSW cannot clear CR0.PE, so no fRealOnV86Active kludge needed here. */
    12036             rcStrict = IEMExecDecodedLmsw(pVCpu, pVmxTransient->cbInstr,
    12037                                           VMX_EXIT_QUAL_CRX_LMSW_DATA(uExitQualification));
     12330            rcStrict = IEMExecDecodedLmsw(pVCpu, pVmxTransient->cbInstr, VMX_EXIT_QUAL_CRX_LMSW_DATA(uExitQual));
    1203812331            AssertMsg(   rcStrict == VINF_SUCCESS
    1203912332                      || rcStrict == VINF_IEM_RAISED_XCPT
     
    1204212335            ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
    1204312336            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitLmsw);
    12044             Log4(("CRX LMSW rcStrict=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
     12337            Log4Func(("LMSW rcStrict=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
    1204512338            break;
    1204612339        }
     
    1207512368
    1207612369    PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    12077     int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
     12370    int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
    1207812371    rc    |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    1207912372    rc    |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER);
     
    1208212375
    1208312376    /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
    12084     uint32_t uIOPort      = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQualification);
    12085     uint8_t  uIOWidth     = VMX_EXIT_QUAL_IO_WIDTH(pVmxTransient->uExitQualification);
    12086     bool     fIOWrite     = (   VMX_EXIT_QUAL_IO_DIRECTION(pVmxTransient->uExitQualification)
    12087                              == VMX_EXIT_QUAL_IO_DIRECTION_OUT);
    12088     bool     fIOString    = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQualification);
     12377    uint32_t uIOPort      = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
     12378    uint8_t  uIOWidth     = VMX_EXIT_QUAL_IO_WIDTH(pVmxTransient->uExitQual);
     12379    bool     fIOWrite     = (VMX_EXIT_QUAL_IO_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_IO_DIRECTION_OUT);
     12380    bool     fIOString    = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
    1208912381    bool     fGstStepping = RT_BOOL(pCtx->eflags.Bits.u1TF);
    1209012382    bool     fDbgStepping = pVCpu->hm.s.fSingleInstruction;
     
    1212412416             * interpreting the instruction.
    1212512417             */
    12126             Log4(("CS:RIP=%04x:%08RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
     12418            Log4Func(("CS:RIP=%04x:%08RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
    1212712419            AssertReturn(pCtx->dx == uIOPort, VERR_VMX_IPE_2);
    1212812420            bool const fInsOutsInfo = RT_BF_GET(pVM->hm.s.vmx.Msrs.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS);
     
    1213412426                AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
    1213512427                IEMMODE const enmAddrMode = (IEMMODE)pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize;
    12136                 bool const fRep           = VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQualification);
     12428                bool const fRep           = VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual);
    1213712429                if (fIOWrite)
    1213812430                    rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr,
     
    1216012452             * IN/OUT - I/O instruction.
    1216112453             */
    12162             Log4(("CS:RIP=%04x:%08RX64 %#06x/%u %c\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
     12454            Log4Func(("CS:RIP=%04x:%08RX64 %#06x/%u %c\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
    1216312455            uint32_t const uAndVal = s_aIOOpAnd[uIOWidth];
    12164             Assert(!VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQualification));
     12456            Assert(!VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual));
    1216512457            if (fIOWrite)
    1216612458            {
     
    1229612588        Log4(("IOExit/%u: %04x:%08RX64: %s%s%s %#x LB %u -> EMHistoryExec\n",
    1229712589              pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
    12298               VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQualification) ? "REP " : "",
     12590              VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual) ? "REP " : "",
    1229912591              fIOWrite ? "OUT" : "IN", fIOString ? "S" : "", uIOPort, uIOWidth));
    1230012592
     
    1231912611
    1232012612    /* Check if this task-switch occurred while delivery an event through the guest IDT. */
    12321     int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
     12613    int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
    1232212614    AssertRCReturn(rc, rc);
    12323     if (VMX_EXIT_QUAL_TASK_SWITCH_TYPE(pVmxTransient->uExitQualification) == VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IDT)
     12615    if (VMX_EXIT_QUAL_TASK_SWITCH_TYPE(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IDT)
    1232412616    {
    1232512617        rc = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
    1232612618        AssertRCReturn(rc, rc);
    12327         if (VMX_IDT_VECTORING_INFO_VALID(pVmxTransient->uIdtVectoringInfo))
     12619        if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
    1232812620        {
    1232912621            uint32_t       uErrCode;
     
    1235012642                                   0 /* cbInstr */, uErrCode, GCPtrFaultAddress);
    1235112643
    12352             Log4(("Pending event on TaskSwitch uIntType=%#x uVector=%#x\n", uIntType, uVector));
     12644            Log4Func(("Pending event. uIntType=%#x uVector=%#x\n", uIntType, uVector));
    1235312645            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch);
    1235412646            return VINF_EM_RAW_INJECT_TRPM_EVENT;
     
    1240612698    /* IOMMIOPhysHandler() below may call into IEM, save the necessary state. */
    1240712699    int rc  = hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
    12408     rc     |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
     12700    rc     |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
    1240912701    AssertRCReturn(rc, rc);
    1241012702
    1241112703    /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */
    12412     uint32_t uAccessType = VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQualification);
     12704    uint32_t uAccessType = VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual);
    1241312705    VBOXSTRICTRC rcStrict2;
    1241412706    switch (uAccessType)
     
    1241812710        {
    1241912711            AssertMsg(   !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
    12420                       || VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification) != XAPIC_OFF_TPR,
     12712                      || VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual) != XAPIC_OFF_TPR,
    1242112713                      ("hmR0VmxExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
    1242212714
    1242312715            RTGCPHYS GCPhys = pVCpu->hm.s.vmx.u64MsrApicBase;   /* Always up-to-date, u64MsrApicBase is not part of the VMCS. */
    1242412716            GCPhys &= PAGE_BASE_GC_MASK;
    12425             GCPhys += VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification);
     12717            GCPhys += VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual);
    1242612718            PVM pVM = pVCpu->CTX_SUFF(pVM);
    1242712719            Log4Func(("Linear access uAccessType=%#x GCPhys=%#RGp Off=%#x\n", uAccessType, GCPhys,
    12428                  VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification)));
     12720                 VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual)));
    1242912721
    1243012722            PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
     
    1249412786
    1249512787#ifdef VBOX_WITH_STATISTICS
    12496         rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
     12788        rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
    1249712789        AssertRCReturn(rc, rc);
    12498         if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQualification) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
     12790        if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
    1249912791            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
    1250012792        else
     
    1251012802     */
    1251112803    PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    12512     int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
     12804    int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
    1251312805    rc    |= hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_DR7);
    1251412806    AssertRCReturn(rc, rc);
     
    1251612808
    1251712809    PVM pVM = pVCpu->CTX_SUFF(pVM);
    12518     if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQualification) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
     12810    if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
    1251912811    {
    1252012812        rc = EMInterpretDRxWrite(pVM, pVCpu, CPUMCTX2CORE(pCtx),
    12521                                  VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQualification),
    12522                                  VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQualification));
     12813                                 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual),
     12814                                 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual));
    1252312815        if (RT_SUCCESS(rc))
    1252412816            ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_DR7);
     
    1252812820    {
    1252912821        rc = EMInterpretDRxRead(pVM, pVCpu, CPUMCTX2CORE(pCtx),
    12530                                 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQualification),
    12531                                 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQualification));
     12822                                VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual),
     12823                                VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual));
    1253212824        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
    1253312825    }
     
    1259612888        PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    1259712889        rcStrict = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, CPUMCTX2CORE(pCtx), GCPhys, UINT32_MAX);
    12598         Log4(("EPT misconfig at %#RGp RIP=%#RX64 rc=%Rrc\n", GCPhys, pCtx->rip, VBOXSTRICTRC_VAL(rcStrict)));
     12890        Log4Func(("At %#RGp RIP=%#RX64 rc=%Rrc\n", GCPhys, pCtx->rip, VBOXSTRICTRC_VAL(rcStrict)));
    1259912891        if (   rcStrict == VINF_SUCCESS
    1260012892            || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
     
    1265512947    RTGCPHYS GCPhys;
    1265612948    int rc  = VMXReadVmcs64(VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &GCPhys);
    12657     rc     |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
     12949    rc     |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
    1265812950    rc     |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
    1265912951    AssertRCReturn(rc, rc);
    1266012952
    1266112953    /* Intel spec. Table 27-7 "Exit Qualifications for EPT violations". */
    12662     AssertMsg(((pVmxTransient->uExitQualification >> 7) & 3) != 2, ("%#RX64", pVmxTransient->uExitQualification));
     12954    AssertMsg(((pVmxTransient->uExitQual >> 7) & 3) != 2, ("%#RX64", pVmxTransient->uExitQual));
    1266312955
    1266412956    RTGCUINT uErrorCode = 0;
    12665     if (pVmxTransient->uExitQualification & VMX_EXIT_QUAL_EPT_INSTR_FETCH)
     12957    if (pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_INSTR_FETCH)
    1266612958        uErrorCode |= X86_TRAP_PF_ID;
    12667     if (pVmxTransient->uExitQualification & VMX_EXIT_QUAL_EPT_DATA_WRITE)
     12959    if (pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_DATA_WRITE)
    1266812960        uErrorCode |= X86_TRAP_PF_RW;
    12669     if (pVmxTransient->uExitQualification & VMX_EXIT_QUAL_EPT_ENTRY_PRESENT)
     12961    if (pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ENTRY_PRESENT)
    1267012962        uErrorCode |= X86_TRAP_PF_P;
    1267112963
     
    1267712969    PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    1267812970
    12679     Log4Func(("EPT violation %#x at %#RX64 ErrorCode %#x CS:RIP=%04x:%08RX64\n", pVmxTransient->uExitQualification, GCPhys,
    12680               uErrorCode, pCtx->cs.Sel, pCtx->rip));
     12971    Log4Func(("EPT violation %#x at %#RX64 ErrorCode %#x CS:RIP=%04x:%08RX64\n", pVmxTransient->uExitQual, GCPhys, uErrorCode,
     12972              pCtx->cs.Sel, pCtx->rip));
    1268112973
    1268212974    VBOXSTRICTRC rcStrict2 = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, CPUMCTX2CORE(pCtx), GCPhys);
     
    1270012992/** @} */
    1270112993
     12994/** @name VM-exit exception handlers.
     12995 * @{
     12996 */
    1270212997/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
    1270312998/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= VM-exit exception handlers =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
    1270412999/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
    12705 
    12706 /** @name VM-exit exception handlers.
    12707  * @{
    12708  */
    1270913000
    1271013001/**
     
    1273213023    }
    1273313024
    12734     hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
    12735                            pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
     13025    hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbInstr,
     13026                           pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
    1273613027    return rc;
    1273713028}
     
    1275813049        AssertRCReturn(rc, rc);
    1275913050
    12760         hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
    12761                                pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
     13051        hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbInstr,
     13052                               pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
    1276213053    }
    1276313054
     
    1278213073    Assert(ASMAtomicUoReadU32(&pVmxTransient->fVmcsFieldsRead) & HMVMX_READ_EXIT_INTERRUPTION_INFO);
    1278313074
    12784     hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
    12785                            pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
     13075    hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbInstr,
     13076                           pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
    1278613077    return VINF_SUCCESS;
    1278713078}
     
    1280013091     * for processing.
    1280113092     */
    12802     int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
     13093    int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
    1280313094
    1280413095    /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
    1280513096    uint64_t uDR6 = X86_DR6_INIT_VAL;
    12806     uDR6         |= (  pVmxTransient->uExitQualification
    12807                      & (X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3 | X86_DR6_BD | X86_DR6_BS));
     13097    uDR6         |= (pVmxTransient->uExitQual & (X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3 | X86_DR6_BD | X86_DR6_BS));
    1280813098
    1280913099    PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
     
    1285613146        rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
    1285713147        AssertRCReturn(rc, rc);
    12858         hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
    12859                                pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
     13148        hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbInstr,
     13149                               pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
    1286013150        return VINF_SUCCESS;
    1286113151    }
     
    1289913189        Log4Func(("Gst: CS:RIP %04x:%08RX64 ErrorCode=%#x CR0=%#RX64 CPL=%u TR=%#04x\n", pCtx->cs.Sel, pCtx->rip,
    1290013190                  pVmxTransient->uExitIntErrorCode, pCtx->cr0, CPUMGetGuestCPL(pVCpu), pCtx->tr.Sel));
    12901         hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
    12902                                pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
     13191        hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbInstr,
     13192                               pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
    1290313193        return rc;
    1290413194    }
     
    1316613456                    && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
    1316713457                {
    13168                     Log4(("hmR0VmxExitXcptGP: mode changed -> VINF_EM_RESCHEDULE\n"));
     13458                    Log4Func(("Mode changed -> VINF_EM_RESCHEDULE\n"));
    1316913459                    /** @todo Exit fRealOnV86Active here w/o dropping back to ring-3. */
    1317013460                    rc = VINF_EM_RESCHEDULE;
     
    1321713507#endif
    1321813508
    13219     hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
    13220                            pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
     13509    hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbInstr,
     13510                           pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
    1322113511    return VINF_SUCCESS;
    1322213512}
     
    1323013520    HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    1323113521    PVM pVM = pVCpu->CTX_SUFF(pVM);
    13232     int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
     13522    int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
    1323313523    rc    |= hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
    1323413524    rc    |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
     
    1324513535        if (RT_LIKELY(!pVmxTransient->fVectoringDoublePF))
    1324613536        {
    13247             hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
    13248                                    0 /* cbInstr */, pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQualification);
     13537            hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
     13538                                   pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual);
    1324913539        }
    1325013540        else
     
    1327013560    AssertRCReturn(rc, rc);
    1327113561
    13272     Log4Func(("#PF: cr2=%#RX64 cs:rip=%#04x:%#RX64 uErrCode %#RX32 cr3=%#RX64\n", pVmxTransient->uExitQualification,
    13273               pCtx->cs.Sel, pCtx->rip, pVmxTransient->uExitIntErrorCode, pCtx->cr3));
    13274 
    13275     TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQualification, (RTGCUINT)pVmxTransient->uExitIntErrorCode);
    13276     rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, CPUMCTX2CORE(pCtx),
    13277                           (RTGCPTR)pVmxTransient->uExitQualification);
     13562    Log4Func(("#PF: cr2=%#RX64 cs:rip=%#04x:%#RX64 uErrCode %#RX32 cr3=%#RX64\n", pVmxTransient->uExitQual, pCtx->cs.Sel,
     13563              pCtx->rip, pVmxTransient->uExitIntErrorCode, pCtx->cr3));
     13564
     13565    TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQual, (RTGCUINT)pVmxTransient->uExitIntErrorCode);
     13566    rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, CPUMCTX2CORE(pCtx), (RTGCPTR)pVmxTransient->uExitQual);
    1327813567
    1327913568    Log4Func(("#PF: rc=%Rrc\n", rc));
     
    1329813587            TRPMResetTrap(pVCpu);
    1329913588            pVCpu->hm.s.Event.fPending = false;                 /* In case it's a contributory #PF. */
    13300             hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
    13301                                    0 /* cbInstr */, uGstErrorCode, pVmxTransient->uExitQualification);
     13589            hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
     13590                                   uGstErrorCode, pVmxTransient->uExitQual);
    1330213591        }
    1330313592        else
     
    1332113610/** @} */
    1332213611
     13612#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     13613
     13614/** @name Nested-guest VM-exit handlers.
     13615 * @{
     13616 */
     13617/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
     13618/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= Nested-guest VM-exit handlers =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
     13619/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
     13620
     13621/**
     13622 * VM-exit handler for VMCLEAR (VMX_EXIT_VMCLEAR). Unconditional VM-exit.
     13623 */
     13624HMVMX_EXIT_DECL hmR0VmxExitVmclear(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
     13625{
     13626    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
     13627
     13628    /** @todo NSTVMX: Vmclear. */
     13629    hmR0VmxSetPendingXcptUD(pVCpu);
     13630    return VINF_SUCCESS;
     13631}
     13632
     13633
     13634/**
     13635 * VM-exit handler for VMLAUNCH (VMX_EXIT_VMLAUNCH). Unconditional VM-exit.
     13636 */
     13637HMVMX_EXIT_DECL hmR0VmxExitVmlaunch(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
     13638{
     13639    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
     13640
     13641    /** @todo NSTVMX: Vmlaunch. */
     13642    hmR0VmxSetPendingXcptUD(pVCpu);
     13643    return VINF_SUCCESS;
     13644}
     13645
     13646
     13647/**
     13648 * VM-exit handler for VMPTRLD (VMX_EXIT_VMPTRLD). Unconditional VM-exit.
     13649 */
     13650HMVMX_EXIT_DECL hmR0VmxExitVmptrld(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
     13651{
     13652    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
     13653
     13654    /** @todo NSTVMX: Vmptrld. */
     13655    hmR0VmxSetPendingXcptUD(pVCpu);
     13656    return VINF_SUCCESS;
     13657}
     13658
     13659
     13660/**
     13661 * VM-exit handler for VMPTRST (VMX_EXIT_VMPTRST). Unconditional VM-exit.
     13662 */
     13663HMVMX_EXIT_DECL hmR0VmxExitVmptrst(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
     13664{
     13665    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
     13666
     13667    /** @todo NSTVMX: Vmptrst. */
     13668    hmR0VmxSetPendingXcptUD(pVCpu);
     13669    return VINF_SUCCESS;
     13670}
     13671
     13672
     13673/**
     13674 * VM-exit handler for VMREAD (VMX_EXIT_VMREAD). Unconditional VM-exit.
     13675 */
     13676HMVMX_EXIT_DECL hmR0VmxExitVmread(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
     13677{
     13678    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
     13679
     13680    /** @todo NSTVMX: Vmread. */
     13681    hmR0VmxSetPendingXcptUD(pVCpu);
     13682    return VINF_SUCCESS;
     13683}
     13684
     13685
     13686/**
     13687 * VM-exit handler for VMRESUME (VMX_EXIT_VMRESUME). Unconditional VM-exit.
     13688 */
     13689HMVMX_EXIT_DECL hmR0VmxExitVmresume(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
     13690{
     13691    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
     13692
     13693    /** @todo NSTVMX: Vmresume. */
     13694    hmR0VmxSetPendingXcptUD(pVCpu);
     13695    return VINF_SUCCESS;
     13696}
     13697
     13698
     13699/**
     13700 * VM-exit handler for VMWRITE (VMX_EXIT_VMWRITE). Unconditional VM-exit.
     13701 */
     13702HMVMX_EXIT_DECL hmR0VmxExitVmwrite(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
     13703{
     13704    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
     13705
     13706    /** @todo NSTVMX: Vmwrite. */
     13707    hmR0VmxSetPendingXcptUD(pVCpu);
     13708    return VINF_SUCCESS;
     13709}
     13710
     13711
     13712/**
     13713 * VM-exit handler for VMXOFF (VMX_EXIT_VMXOFF). Unconditional VM-exit.
     13714 */
     13715HMVMX_EXIT_DECL hmR0VmxExitVmxoff(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
     13716{
     13717    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
     13718
     13719    int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
     13720    rc    |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
     13721    AssertRCReturn(rc, rc);
     13722
     13723    VBOXSTRICTRC rcStrict = IEMExecDecodedVmxoff(pVCpu, pVmxTransient->cbInstr);
     13724    if (RT_LIKELY(rcStrict == VINF_SUCCESS))
     13725    {
     13726        /* VMXOFF on success changes the internal hwvirt state but not anything that's visible to the guest. */
     13727        ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_HWVIRT);
     13728    }
     13729    else if (rcStrict == VINF_IEM_RAISED_XCPT)
     13730    {
     13731        ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
     13732        rcStrict = VINF_SUCCESS;
     13733    }
     13734    return rcStrict;
     13735}
     13736
     13737
     13738/**
     13739 * VM-exit handler for VMXON (VMX_EXIT_VMXON). Unconditional VM-exit.
     13740 */
     13741HMVMX_EXIT_DECL hmR0VmxExitVmxon(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
     13742{
     13743    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
     13744
     13745    int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
     13746    rc    |= hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_SREG_MASK | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
     13747    rc    |= hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
     13748    rc    |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
     13749    AssertRCReturn(rc, rc);
     13750
     13751    VBOXSTRICTRC rcStrict = hmR0VmxCheckExitDueToVmxInstr(pVCpu, pVmxTransient);
     13752    if (rcStrict == VINF_SUCCESS)
     13753    { /* likely */ }
     13754    else if (rcStrict == VINF_HM_PENDING_XCPT)
     13755    {
     13756        Log4Func(("Privilege checks failed, raising xcpt %#x!\n", VMX_ENTRY_INT_INFO_VECTOR(pVCpu->hm.s.Event.u64IntInfo)));
     13757        return VINF_SUCCESS;
     13758    }
     13759    else
     13760    {
     13761        Log4Func(("hmR0VmxCheckExitDueToVmxInstr failed. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
     13762        return rcStrict;
     13763    }
     13764
     13765    RTGCPTR            GCPtrVmxon;
     13766    PCVMXEXITINSTRINFO pExitInstrInfo = &pVmxTransient->ExitInstrInfo;
     13767    RTGCPTR const      GCPtrDisp      =  pVmxTransient->uExitQual;
     13768    rcStrict = hmR0VmxDecodeMemOperand(pVCpu, pExitInstrInfo, GCPtrDisp, false /*fIsWrite*/,  &GCPtrVmxon);
     13769    if (rcStrict == VINF_SUCCESS)
     13770    { /* likely */ }
     13771    else if (rcStrict == VINF_HM_PENDING_XCPT)
     13772    {
     13773        Log4Func(("Memory operand decoding failed, raising xcpt %#x\n", VMX_ENTRY_INT_INFO_VECTOR(pVCpu->hm.s.Event.u64IntInfo)));
     13774        return VINF_SUCCESS;
     13775    }
     13776    else
     13777    {
     13778        Log4Func(("hmR0VmxCheckExitDueToVmxInstr failed. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
     13779        return rcStrict;
     13780    }
     13781
     13782    rcStrict = IEMExecDecodedVmxon(pVCpu, pVmxTransient->cbInstr, GCPtrVmxon, pExitInstrInfo->u, GCPtrDisp);
     13783    if (RT_LIKELY(rcStrict == VINF_SUCCESS))
     13784        ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
     13785    else if (rcStrict == VINF_IEM_RAISED_XCPT)
     13786    {
     13787        ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
     13788        rcStrict = VINF_SUCCESS;
     13789    }
     13790    return rcStrict;
     13791}
     13792
     13793/** @} */
     13794#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
     13795
  • trunk/src/VBox/VMM/VMMR3/CPUM.cpp

    r73389 r73606  
    27752775    PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
    27762776    static const char *const s_aHwvirtModes[] = { "No/inactive", "SVM", "VMX", "Common" };
    2777     uint8_t const idxHwvirtState = CPUMIsGuestInSvmNestedHwVirtMode(pCtx) ? CPUMHWVIRTDUMP_SVM
    2778                                  : CPUMIsGuestInVmxNestedHwVirtMode(pCtx) ? CPUMHWVIRTDUMP_VMX : CPUMHWVIRTDUMP_NONE;
     2777    bool const fSvm = pVM->cpum.ro.GuestFeatures.fSvm;
     2778    bool const fVmx = pVM->cpum.ro.GuestFeatures.fVmx;
     2779    uint8_t const idxHwvirtState = fSvm ? CPUMHWVIRTDUMP_SVM : (fVmx ? CPUMHWVIRTDUMP_VMX : CPUMHWVIRTDUMP_NONE);
    27792780    AssertCompile(CPUMHWVIRTDUMP_LAST <= RT_ELEMENTS(s_aHwvirtModes));
    27802781    Assert(idxHwvirtState < RT_ELEMENTS(s_aHwvirtModes));
     
    27882789
    27892790    if (fDumpState & CPUMHWVIRTDUMP_COMMON)
    2790     {
    2791         pHlp->pfnPrintf(pHlp, "fGif                           = %RTbool\n", pCtx->hwvirt.fGif);
    2792         pHlp->pfnPrintf(pHlp, "fLocalForcedActions            = %#RX32\n",  pCtx->hwvirt.fLocalForcedActions);
    2793     }
     2791        pHlp->pfnPrintf(pHlp, "fLocalForcedActions          = %#RX32\n",  pCtx->hwvirt.fLocalForcedActions);
     2792
    27942793    pHlp->pfnPrintf(pHlp, "%s hwvirt state%s\n", pcszHwvirtMode, (fDumpState & (CPUMHWVIRTDUMP_SVM | CPUMHWVIRTDUMP_VMX)) ?
    27952794                                                                 ":" : "");
    27962795    if (fDumpState & CPUMHWVIRTDUMP_SVM)
    27972796    {
     2797        pHlp->pfnPrintf(pHlp, "  fGif                       = %RTbool\n", pCtx->hwvirt.fGif);
     2798
    27982799        char szEFlags[80];
    27992800        cpumR3InfoFormatFlags(&szEFlags[0], pCtx->hwvirt.svm.HostState.rflags.u);
    2800 
    28012801        pHlp->pfnPrintf(pHlp, "  uMsrHSavePa                = %#RX64\n",    pCtx->hwvirt.svm.uMsrHSavePa);
    28022802        pHlp->pfnPrintf(pHlp, "  GCPhysVmcb                 = %#RGp\n",     pCtx->hwvirt.svm.GCPhysVmcb);
     
    28392839    }
    28402840
    2841     /** @todo Intel.  */
    2842 #if 0
    28432841    if (fDumpState & CPUMHWVIRTDUMP_VMX)
    28442842    {
     2843        pHlp->pfnPrintf(pHlp, "  fInVmxRootMode             = %RTbool\n",   pCtx->hwvirt.vmx.fInVmxRootMode);
     2844        pHlp->pfnPrintf(pHlp, "  fInVmxNonRootMode          = %RTbool\n",   pCtx->hwvirt.vmx.fInVmxNonRootMode);
     2845        pHlp->pfnPrintf(pHlp, "  GCPhysVmxon                = %#RGp\n",     pCtx->hwvirt.vmx.GCPhysVmxon);
     2846        pHlp->pfnPrintf(pHlp, "  GCPhysVmcs                 = %#RGp\n",     pCtx->hwvirt.vmx.GCPhysVmcs);
     2847        pHlp->pfnPrintf(pHlp, "  enmInstrDiag               = %u (%s)\n",   pCtx->hwvirt.vmx.enmInstrDiag,
     2848                        HMVmxGetInstrDiagDesc(pCtx->hwvirt.vmx.enmInstrDiag));
     2849        /** @todo NSTVMX: Dump remaining/new fields. */
    28452850    }
    2846 #endif
    28472851
    28482852#undef CPUMHWVIRTDUMP_NONE
  • trunk/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp

    r73389 r73606  
    39373937    AssertLogRelRCReturn(rc, rc);
    39383938
    3939 #if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
    3940     /** @cfgm{/CPUM/NestedHWVirt, bool, false}
    3941      * Whether to expose the hardware virtualization (VMX/SVM) feature to the guest.
    3942      * The default is false, and when enabled requires nested paging and AMD-V or
    3943      * unrestricted guest mode.
    3944      */
    3945     rc = CFGMR3QueryBoolDef(pCpumCfg, "NestedHWVirt", &pConfig->fNestedHWVirt, false);
    3946     AssertLogRelRCReturn(rc, rc);
    3947     if (   pConfig->fNestedHWVirt
    3948         && !fNestedPagingAndFullGuestExec)
    3949         return VMSetError(pVM, VERR_CPUM_INVALID_HWVIRT_CONFIG, RT_SRC_POS,
    3950                           "Cannot enable nested VT-x/AMD-V without nested-paging and unresricted guest execution!\n");
    3951 
    3952     /** @todo Think about enabling this later with NEM/KVM. */
    3953     if (   pConfig->fNestedHWVirt
    3954         && VM_IS_NEM_ENABLED(pVM))
    3955     {
    3956         LogRel(("CPUM: WARNING! Can't turn on nested VT-x/AMD-V when NEM is used!\n"));
    3957         pConfig->fNestedHWVirt = false;
    3958     }
     3939    bool fQueryNestedHwvirt = false;
     3940#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
     3941    fQueryNestedHwvirt |= RT_BOOL(pVM->cpum.s.HostFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD);
    39593942#endif
     3943#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     3944    fQueryNestedHwvirt |= RT_BOOL(   pVM->cpum.s.HostFeatures.enmCpuVendor == CPUMCPUVENDOR_INTEL
     3945                                  || pVM->cpum.s.HostFeatures.enmCpuVendor == CPUMCPUVENDOR_VIA);
     3946#endif
     3947    if (fQueryNestedHwvirt)
     3948    {
     3949        /** @cfgm{/CPUM/NestedHWVirt, bool, false}
     3950         * Whether to expose the hardware virtualization (VMX/SVM) feature to the guest.
     3951         * The default is false, and when enabled requires nested paging and AMD-V or
     3952         * unrestricted guest mode.
     3953         */
     3954        rc = CFGMR3QueryBoolDef(pCpumCfg, "NestedHWVirt", &pConfig->fNestedHWVirt, false);
     3955        AssertLogRelRCReturn(rc, rc);
     3956        if (   pConfig->fNestedHWVirt
     3957            && !fNestedPagingAndFullGuestExec)
     3958            return VMSetError(pVM, VERR_CPUM_INVALID_HWVIRT_CONFIG, RT_SRC_POS,
     3959                              "Cannot enable nested VT-x/AMD-V without nested-paging and unresricted guest execution!\n");
     3960
     3961        /** @todo Think about enabling this later with NEM/KVM. */
     3962        if (   pConfig->fNestedHWVirt
     3963            && VM_IS_NEM_ENABLED(pVM))
     3964        {
     3965            LogRel(("CPUM: WARNING! Can't turn on nested VT-x/AMD-V when NEM is used!\n"));
     3966            pConfig->fNestedHWVirt = false;
     3967        }
     3968    }
    39603969
    39613970    /*
  • trunk/src/VBox/VMM/VMMR3/EM.cpp

    r73097 r73606  
    18151815    }
    18161816
    1817     if (CPUMIsGuestInVmxNestedHwVirtMode(&pVCpu->cpum.GstCtx))
     1817    if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
    18181818    { /** @todo Nested VMX. */ }
    18191819
     
    21472147                Assert(!HMR3IsEventPending(pVCpu));
    21482148#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
    2149                 if (CPUMIsGuestInNestedHwVirtMode(&pVCpu->cpum.GstCtx))
     2149                if (CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx))
    21502150                {
    21512151                    bool fResched, fInject;
  • trunk/src/VBox/VMM/VMMR3/HM.cpp

    r73389 r73606  
    4242#include <VBox/vmm/stam.h>
    4343#include <VBox/vmm/mm.h>
     44#include <VBox/vmm/em.h>
    4445#include <VBox/vmm/pdmapi.h>
    4546#include <VBox/vmm/pgm.h>
     
    7778#define EXIT_REASON(def, val, str) #def " - " #val " - " str
    7879#define EXIT_REASON_NIL() NULL
    79 /** Exit reason descriptions for VT-x, used to describe statistics. */
    80 static const char * const g_apszVTxExitReasons[MAX_EXITREASON_STAT] =
     80/** Exit reason descriptions for VT-x, used to describe statistics and exit
     81 *  history. */
     82static const char * const g_apszVmxExitReasons[MAX_EXITREASON_STAT] =
    8183{
    8284    EXIT_REASON(VMX_EXIT_XCPT_OR_NMI            ,   0, "Exception or non-maskable interrupt (NMI)."),
     
    149151#define MAX_EXITREASON_VTX                         64
    150152
    151 /** A partial list of Exit reason descriptions for AMD-V, used to describe
    152  *  statistics.
     153/** A partial list of \#EXIT reason descriptions for AMD-V, used to describe
     154 *  statistics and exit history.
    153155 *
    154156 *  @note AMD-V have annoyingly large gaps (e.g. \#NPF VMEXIT comes at 1024),
    155157 *        this array doesn't contain the entire set of exit reasons, we
    156158 *        handle them via hmSvmGetSpecialExitReasonDesc(). */
    157 static const char * const g_apszAmdVExitReasons[MAX_EXITREASON_STAT] =
     159static const char * const g_apszSvmExitReasons[MAX_EXITREASON_STAT] =
    158160{
    159161    EXIT_REASON(SVM_EXIT_READ_CR0     ,    0, "Read CR0."),
     
    310312/**
    311313 * Gets the SVM exit reason if it's one of the reasons not present in the @c
    312  * g_apszAmdVExitReasons array.
     314 * g_apszSvmExitReasons array.
    313315 *
    314316 * @returns The exit reason or NULL if unknown.
     
    10611063#undef HM_REG_COUNTER
    10621064
    1063         const char *const *papszDesc = ASMIsIntelCpu() || ASMIsViaCentaurCpu() ? &g_apszVTxExitReasons[0]
    1064                                                                                : &g_apszAmdVExitReasons[0];
     1065        const char *const *papszDesc = ASMIsIntelCpu() || ASMIsViaCentaurCpu() ? &g_apszVmxExitReasons[0]
     1066                                                                               : &g_apszSvmExitReasons[0];
    10651067
    10661068        /*
     
    19381940    uint32_t u32Model;
    19391941    uint32_t u32Stepping;
    1940     if (HMAmdIsSubjectToErratum170(&u32Family, &u32Model, &u32Stepping))
     1942    if (HMSvmIsSubjectToErratum170(&u32Family, &u32Model, &u32Stepping))
    19411943        LogRel(("HM: AMD Cpu with erratum 170 family %#x model %#x stepping %#x\n", u32Family, u32Model, u32Stepping));
    19421944    LogRel(("HM: Max resume loops                  = %u\n",     pVM->hm.s.cMaxResumeLoops));
     
    29482950
    29492951#ifdef VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM
    2950     if (CPUMIsGuestInNestedHwVirtMode(pCtx))
     2952    if (   CPUMIsGuestInSvmNestedHwVirtMode(pCtx)
     2953        || CPUMIsGuestVmxEnabled(pCtx))
    29512954    {
    29522955        Log(("HMR3CanExecuteGuest: In nested-guest mode - returning false"));
     
    31513154        &&  CPUMIsGuestInRealModeEx(pCtx)
    31523155        && !PDMVmmDevHeapIsEnabled(pVM))
    3153     {
    31543156        return true;
    3155     }
    31563157
    31573158    return false;
     
    34293430                LogRel(("HM: CPU[%u] Exit reason          %#x\n", i, pVCpu->hm.s.vmx.LastError.u32ExitReason));
    34303431
    3431                 if (   pVM->aCpus[i].hm.s.vmx.LastError.u32InstrError == VMX_ERROR_VMLAUCH_NON_CLEAR_VMCS
    3432                     || pVM->aCpus[i].hm.s.vmx.LastError.u32InstrError == VMX_ERROR_VMRESUME_NON_LAUNCHED_VMCS)
     3432                if (   pVM->aCpus[i].hm.s.vmx.LastError.u32InstrError == VMXINSTRERR_VMLAUNCH_NON_CLEAR_VMCS
     3433                    || pVM->aCpus[i].hm.s.vmx.LastError.u32InstrError == VMXINSTRERR_VMRESUME_NON_LAUNCHED_VMCS)
    34333434                {
    34343435                    LogRel(("HM: CPU[%u] Entered Host Cpu     %u\n",  i, pVCpu->hm.s.vmx.LastError.idEnteredCpu));
    34353436                    LogRel(("HM: CPU[%u] Current Host Cpu     %u\n",  i, pVCpu->hm.s.vmx.LastError.idCurrentCpu));
    34363437                }
    3437                 else if (pVM->aCpus[i].hm.s.vmx.LastError.u32InstrError == VMX_ERROR_VMENTRY_INVALID_CONTROL_FIELDS)
     3438                else if (pVM->aCpus[i].hm.s.vmx.LastError.u32InstrError == VMXINSTRERR_VMENTRY_INVALID_CTL)
    34383439                {
    34393440                    LogRel(("HM: CPU[%u] PinCtls          %#RX32\n", i, pVCpu->hm.s.vmx.u32PinCtls));
     
    37563757VMMR3DECL(const char *) HMR3GetVmxExitName(uint32_t uExit)
    37573758{
    3758     if (uExit < RT_ELEMENTS(g_apszVTxExitReasons))
    3759         return g_apszVTxExitReasons[uExit];
     3759    if (uExit < RT_ELEMENTS(g_apszVmxExitReasons))
     3760        return g_apszVmxExitReasons[uExit];
    37603761    return NULL;
    37613762}
     
    37703771VMMR3DECL(const char *) HMR3GetSvmExitName(uint32_t uExit)
    37713772{
    3772     if (uExit < RT_ELEMENTS(g_apszAmdVExitReasons))
    3773         return g_apszAmdVExitReasons[uExit];
     3773    if (uExit < RT_ELEMENTS(g_apszSvmExitReasons))
     3774        return g_apszSvmExitReasons[uExit];
    37743775    return hmSvmGetSpecialExitReasonDesc(uExit);
    37753776}
  • trunk/src/VBox/VMM/VMMR3/PGMPhys.cpp

    r73097 r73606  
    45494549    if (pVCpu->pgm.s.fA20Enabled != fEnable)
    45504550    {
     4551#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     4552        PCCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
     4553        if (   CPUMIsGuestInVmxRootMode(pCtx)
     4554            && !fEnable)
     4555        {
     4556            Log(("Cannot enter A20M mode while in VMX root mode\n"));
     4557            return;
     4558        }
     4559#endif
    45514560        pVCpu->pgm.s.fA20Enabled = fEnable;
    45524561        pVCpu->pgm.s.GCPhysA20Mask = ~((RTGCPHYS)!fEnable << 20);
  • trunk/src/VBox/VMM/include/HMInternal.h

    r73389 r73606  
    2121#include <VBox/cdefs.h>
    2222#include <VBox/types.h>
    23 #include <VBox/vmm/em.h>
    2423#include <VBox/vmm/stam.h>
    2524#include <VBox/dis.h>
  • trunk/src/VBox/VMM/testcase/tstIEMCheckMc.cpp

    r73555 r73606  
    127127#define IEMOP_HLP_DONE_VEX_DECODING_L0_AND_NO_VVVV()        do { } while (0)
    128128#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES()                                    do { } while (0)
     129#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     130# define IEMOP_HLP_VMX_INSTR()                              do { } while (0)
     131#endif
    129132
    130133
  • trunk/src/VBox/VMM/testcase/tstVMStruct.h

    r73250 r73606  
    146146    GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.pvIoBitmapR3);
    147147    GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.HCPhysVmcb);
     148    GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.GCPhysVmxon);
     149    GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.GCPhysVmcs);
     150    GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.enmInstrDiag);
     151    GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.fInVmxRootMode);
     152    GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.fInVmxNonRootMode);
     153    GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pVmcsR0);
     154    GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pVmcsR3);
    148155    GEN_CHECK_OFF(CPUMCTX, hwvirt.fLocalForcedActions);
    149156    GEN_CHECK_OFF(CPUMCTX, hwvirt.fGif);
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette