VirtualBox

Ignore:
Timestamp:
Aug 10, 2018 7:38:56 AM (6 years ago)
Author:
vboxsync
Message:

VMM: Nested VMX: bugref:9180 Various bits:

  • IEM: Started VMXON, VMXOFF implementation, use IEM_OPCODE_GET_NEXT_RM.
  • IEM: Fixed INVPCID C impl, removed unused IEMExecDecodedInvpcid.
  • IEM: Updated iemCImpl_load_CrX to check for CR0/CR4 fixed bits in VMX.
  • IEM: Update offModRm to reset/re-initialize where needed.
  • CPUM: Added VMX root, non-root mode and other bits and updated a few places where they're used.
  • HM: Started adding fine-grained VMX instruction failure diagnostics.
  • HM: Made VM instruction error an enum.
  • HM: Added HMVMXAll.cpp for all context VMX code.
  • Ensure building with VBOX_WITH_NESTED_HWVIRT_[SVM|VMX] does the right thing based on host CPU.
  • CPUM: Added dumping of nested-VMX CPUMCTX state.
  • HMVMXR0: Added memory operand decoding.
  • HMVMXR0: VMX instr. privilege checks (CR0/CR4 read shadows are not consulted, so we need to do them)
  • HM: Added some more bit-field representaions.
  • Recompiler: Refuse to run when in nested-VMX guest code.
File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r73437 r73606  
    3232#include <VBox/vmm/selm.h>
    3333#include <VBox/vmm/tm.h>
     34#include <VBox/vmm/em.h>
    3435#include <VBox/vmm/gim.h>
    3536#include <VBox/vmm/apic.h>
     
    196197{
    197198    /** The host's rflags/eflags. */
    198     RTCCUINTREG     fEFlags;
     199    RTCCUINTREG         fEFlags;
    199200#if HC_ARCH_BITS == 32
    200     uint32_t        u32Alignment0;
     201    uint32_t            u32Alignment0;
    201202#endif
    202203    /** The guest's TPR value used for TPR shadowing. */
    203     uint8_t         u8GuestTpr;
     204    uint8_t             u8GuestTpr;
    204205    /** Alignment. */
    205     uint8_t         abAlignment0[7];
     206    uint8_t             abAlignment0[7];
    206207
    207208    /** The basic VM-exit reason. */
    208     uint16_t        uExitReason;
     209    uint16_t            uExitReason;
    209210    /** Alignment. */
    210     uint16_t        u16Alignment0;
     211    uint16_t            u16Alignment0;
    211212    /** The VM-exit interruption error code. */
    212     uint32_t        uExitIntErrorCode;
     213    uint32_t            uExitIntErrorCode;
    213214    /** The VM-exit exit code qualification. */
    214     uint64_t        uExitQualification;
     215    uint64_t            uExitQual;
    215216
    216217    /** The VM-exit interruption-information field. */
    217     uint32_t        uExitIntInfo;
     218    uint32_t            uExitIntInfo;
    218219    /** The VM-exit instruction-length field. */
    219     uint32_t        cbInstr;
     220    uint32_t            cbInstr;
    220221    /** The VM-exit instruction-information field. */
    221     union
    222     {
    223         /** Plain unsigned int representation. */
    224         uint32_t    u;
    225         /** INS and OUTS information. */
    226         struct
    227         {
    228             uint32_t    u7Reserved0 : 7;
    229             /** The address size; 0=16-bit, 1=32-bit, 2=64-bit, rest undefined. */
    230             uint32_t    u3AddrSize  : 3;
    231             uint32_t    u5Reserved1 : 5;
    232             /** The segment register (X86_SREG_XXX). */
    233             uint32_t    iSegReg     : 3;
    234             uint32_t    uReserved2  : 14;
    235         } StrIo;
    236         /** INVEPT, INVVPID, INVPCID information.  */
    237         struct
    238         {
    239             /** Scaling; 0=no scaling, 1=scale-by-2, 2=scale-by-4, 3=scale-by-8. */
    240             uint32_t    u2Scaling     : 2;
    241             uint32_t    u5Reserved0   : 5;
    242             /** The address size; 0=16-bit, 1=32-bit, 2=64-bit, rest undefined. */
    243             uint32_t    u3AddrSize    : 3;
    244             uint32_t    u1Reserved0   : 1;
    245             uint32_t    u4Reserved0   : 4;
    246             /** The segment register (X86_SREG_XXX). */
    247             uint32_t    iSegReg       : 3;
    248             /** The index register (X86_GREG_XXX). */
    249             uint32_t    iIdxReg       : 4;
    250             /** Set if index register is invalid. */
    251             uint32_t    fIdxRegValid  : 1;
    252             /** The base register (X86_GREG_XXX). */
    253             uint32_t    iBaseReg      : 4;
    254             /** Set if base register is invalid. */
    255             uint32_t    fBaseRegValid : 1;
    256             /** Register 2 (X86_GREG_XXX). */
    257             uint32_t    iReg2         : 4;
    258         } Inv;
    259     }               ExitInstrInfo;
     222    VMXEXITINSTRINFO    ExitInstrInfo;
    260223    /** Whether the VM-entry failed or not. */
    261     bool            fVMEntryFailed;
     224    bool                fVMEntryFailed;
    262225    /** Alignment. */
    263     uint8_t         abAlignment1[3];
     226    uint8_t             abAlignment1[3];
    264227
    265228    /** The VM-entry interruption-information field. */
    266     uint32_t        uEntryIntInfo;
     229    uint32_t            uEntryIntInfo;
    267230    /** The VM-entry exception error code field. */
    268     uint32_t        uEntryXcptErrorCode;
     231    uint32_t            uEntryXcptErrorCode;
    269232    /** The VM-entry instruction length field. */
    270     uint32_t        cbEntryInstr;
     233    uint32_t            cbEntryInstr;
    271234
    272235    /** IDT-vectoring information field. */
    273     uint32_t        uIdtVectoringInfo;
     236    uint32_t            uIdtVectoringInfo;
    274237    /** IDT-vectoring error code. */
    275     uint32_t        uIdtVectoringErrorCode;
     238    uint32_t            uIdtVectoringErrorCode;
    276239
    277240    /** Mask of currently read VMCS fields; HMVMX_READ_XXX. */
    278     uint32_t        fVmcsFieldsRead;
     241    uint32_t            fVmcsFieldsRead;
    279242
    280243    /** Whether the guest debug state was active at the time of VM-exit. */
    281     bool            fWasGuestDebugStateActive;
     244    bool                fWasGuestDebugStateActive;
    282245    /** Whether the hyper debug state was active at the time of VM-exit. */
    283     bool            fWasHyperDebugStateActive;
     246    bool                fWasHyperDebugStateActive;
    284247    /** Whether TSC-offsetting should be setup before VM-entry. */
    285     bool            fUpdateTscOffsettingAndPreemptTimer;
     248    bool                fUpdateTscOffsettingAndPreemptTimer;
    286249    /** Whether the VM-exit was caused by a page-fault during delivery of a
    287250     *  contributory exception or a page-fault. */
    288     bool            fVectoringDoublePF;
     251    bool                fVectoringDoublePF;
    289252    /** Whether the VM-exit was caused by a page-fault during delivery of an
    290253     *  external interrupt or NMI. */
    291     bool            fVectoringPF;
     254    bool                fVectoringPF;
    292255} VMXTRANSIENT;
    293256AssertCompileMemberAlignment(VMXTRANSIENT, uExitReason,               sizeof(uint64_t));
     
    404367static FNVMXEXITHANDLER     hmR0VmxExitRdpmc;
    405368static FNVMXEXITHANDLER     hmR0VmxExitVmcall;
     369#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     370static FNVMXEXITHANDLER     hmR0VmxExitVmclear;
     371static FNVMXEXITHANDLER     hmR0VmxExitVmlaunch;
     372static FNVMXEXITHANDLER     hmR0VmxExitVmptrld;
     373static FNVMXEXITHANDLER     hmR0VmxExitVmptrst;
     374static FNVMXEXITHANDLER     hmR0VmxExitVmread;
     375static FNVMXEXITHANDLER     hmR0VmxExitVmresume;
     376static FNVMXEXITHANDLER     hmR0VmxExitVmwrite;
     377static FNVMXEXITHANDLER     hmR0VmxExitVmxoff;
     378static FNVMXEXITHANDLER     hmR0VmxExitVmxon;
     379#endif
    406380static FNVMXEXITHANDLER     hmR0VmxExitRdtsc;
    407381static FNVMXEXITHANDLERNSRC hmR0VmxExitRsm;
     
    473447 /* 17  VMX_EXIT_RSM                     */  hmR0VmxExitRsm,
    474448 /* 18  VMX_EXIT_VMCALL                  */  hmR0VmxExitVmcall,
     449#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     450 /* 19  VMX_EXIT_VMCLEAR                 */  hmR0VmxExitVmclear,
     451 /* 20  VMX_EXIT_VMLAUNCH                */  hmR0VmxExitVmlaunch,
     452 /* 21  VMX_EXIT_VMPTRLD                 */  hmR0VmxExitVmptrld,
     453 /* 22  VMX_EXIT_VMPTRST                 */  hmR0VmxExitVmptrst,
     454 /* 23  VMX_EXIT_VMREAD                  */  hmR0VmxExitVmread,
     455 /* 24  VMX_EXIT_VMRESUME                */  hmR0VmxExitVmresume,
     456 /* 25  VMX_EXIT_VMWRITE                 */  hmR0VmxExitVmwrite,
     457 /* 26  VMX_EXIT_VMXOFF                  */  hmR0VmxExitVmxoff,
     458 /* 27  VMX_EXIT_VMXON                   */  hmR0VmxExitVmxon,
     459#else
    475460 /* 19  VMX_EXIT_VMCLEAR                 */  hmR0VmxExitSetPendingXcptUD,
    476461 /* 20  VMX_EXIT_VMLAUNCH                */  hmR0VmxExitSetPendingXcptUD,
     
    482467 /* 26  VMX_EXIT_VMXOFF                  */  hmR0VmxExitSetPendingXcptUD,
    483468 /* 27  VMX_EXIT_VMXON                   */  hmR0VmxExitSetPendingXcptUD,
     469#endif
    484470 /* 28  VMX_EXIT_MOV_CRX                 */  hmR0VmxExitMovCRx,
    485471 /* 29  VMX_EXIT_MOV_DRX                 */  hmR0VmxExitMovDRx,
     
    719705 * @param   pVmxTransient   Pointer to the VMX transient structure.
    720706 */
    721 DECLINLINE(int) hmR0VmxReadExitQualificationVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
     707DECLINLINE(int) hmR0VmxReadExitQualVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    722708{
    723709    if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_QUALIFICATION))
    724710    {
    725         int rc = VMXReadVmcsGstN(VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQualification); NOREF(pVCpu);
     711        int rc = VMXReadVmcsGstN(VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual); NOREF(pVCpu);
    726712        AssertRCReturn(rc, rc);
    727713        pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION;
     
    49994985            int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_REASON, &pVCpu->hm.s.vmx.LastError.u32ExitReason);
    50004986            rc    |= VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &pVCpu->hm.s.vmx.LastError.u32InstrError);
    5001             rc    |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
     4987            rc    |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
    50024988            AssertRC(rc);
    50034989
     
    50094995                Log4(("uExitReason        %#RX32 (VmxTransient %#RX16)\n", pVCpu->hm.s.vmx.LastError.u32ExitReason,
    50104996                     pVmxTransient->uExitReason));
    5011                 Log4(("Exit Qualification %#RX64\n", pVmxTransient->uExitQualification));
     4997                Log4(("Exit Qualification %#RX64\n", pVmxTransient->uExitQual));
    50124998                Log4(("InstrError         %#RX32\n", pVCpu->hm.s.vmx.LastError.u32InstrError));
    50134999                if (pVCpu->hm.s.vmx.LastError.u32InstrError <= HMVMX_INSTR_ERROR_MAX)
     
    57885774DECLINLINE(void) hmR0VmxSetPendingXcptDF(PVMCPU pVCpu)
    57895775{
    5790     uint32_t const u32IntInfo = X86_XCPT_DF | VMX_EXIT_INT_INFO_VALID
    5791                               | (VMX_EXIT_INT_INFO_TYPE_HW_XCPT << VMX_EXIT_INT_INFO_TYPE_SHIFT)
    5792                               | VMX_EXIT_INT_INFO_ERROR_CODE_VALID;
     5776    uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR,         X86_XCPT_DF)
     5777                              | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE,           VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
     5778                              | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
     5779                              | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID,          1);
    57935780    hmR0VmxSetPendingEvent(pVCpu, u32IntInfo,  0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
    57945781}
     
    58025789DECLINLINE(void) hmR0VmxSetPendingXcptUD(PVMCPU pVCpu)
    58035790{
    5804     uint32_t const u32IntInfo  = X86_XCPT_UD | VMX_EXIT_INT_INFO_VALID
    5805                                | (VMX_EXIT_INT_INFO_TYPE_HW_XCPT << VMX_EXIT_INT_INFO_TYPE_SHIFT);
     5791    uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR,         X86_XCPT_UD)
     5792                              | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE,           VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
     5793                              | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
     5794                              | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID,          1);
    58065795    hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
    58075796}
     
    58155804DECLINLINE(void) hmR0VmxSetPendingXcptDB(PVMCPU pVCpu)
    58165805{
    5817     uint32_t const u32IntInfo = X86_XCPT_DB | VMX_EXIT_INT_INFO_VALID
    5818                               | (VMX_EXIT_INT_INFO_TYPE_HW_XCPT << VMX_EXIT_INT_INFO_TYPE_SHIFT);
     5806    uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR,         X86_XCPT_DB)
     5807                              | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE,           VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
     5808                              | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
     5809                              | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID,          1);
    58195810    hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
    58205811}
     
    58305821DECLINLINE(void) hmR0VmxSetPendingXcptOF(PVMCPU pVCpu, uint32_t cbInstr)
    58315822{
    5832     uint32_t const u32IntInfo  = X86_XCPT_OF | VMX_EXIT_INT_INFO_VALID
    5833                                | (VMX_EXIT_INT_INFO_TYPE_SW_INT << VMX_EXIT_INT_INFO_TYPE_SHIFT);
     5823    uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR,         X86_XCPT_OF)
     5824                              | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE,           VMX_EXIT_INT_INFO_TYPE_SW_INT)
     5825                              | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
     5826                              | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID,          1);
    58345827    hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, cbInstr, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
    58355828}
     5829
     5830
     5831/**
     5832 * Sets a general-protection (\#GP) exception as pending-for-injection into the VM.
     5833 *
     5834 * @param   pVCpu           The cross context virtual CPU structure.
     5835 * @param   u32ErrCode      The error code for the general-protection exception.
     5836 */
     5837DECLINLINE(void) hmR0VmxSetPendingXcptGP(PVMCPU pVCpu, uint32_t u32ErrCode)
     5838{
     5839    uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR,         X86_XCPT_GP)
     5840                              | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE,           VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
     5841                              | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
     5842                              | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID,          1);
     5843    hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
     5844}
     5845
     5846
     5847/**
     5848 * Sets a stack (\#SS) exception as pending-for-injection into the VM.
     5849 *
     5850 * @param   pVCpu           The cross context virtual CPU structure.
     5851 * @param   u32ErrCode      The error code for the stack exception.
     5852 */
     5853DECLINLINE(void) hmR0VmxSetPendingXcptSS(PVMCPU pVCpu, uint32_t u32ErrCode)
     5854{
     5855    uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR,         X86_XCPT_SS)
     5856                              | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE,           VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
     5857                              | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
     5858                              | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID,          1);
     5859    hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
     5860}
     5861
     5862
     5863#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     5864
     5865/**
     5866 * Decodes the memory operand of a VM-exit due to instruction execution.
     5867 *
     5868 * For instructions with two operands, the second operand is usually found in the
     5869 * VM-exit qualification field.
     5870 *
     5871 * @returns Strict VBox status code (i.e. informational status codes too).
     5872 * @retval  VINF_SUCCESS if the operand was successfully decoded.
     5873 * @retval  VINF_HM_PENDING_XCPT if an exception was raised while decoding the
     5874 *          operand.
     5875 * @param   pVCpu           The cross context virtual CPU structure.
     5876 * @param   pExitInstrInfo  Pointer to the VM-exit instruction information.
     5877 * @param   fIsWrite        Whether the operand is a destination memory operand
     5878 *                          (i.e. writeable memory location) or not.
     5879 * @param   GCPtrDisp       The instruction displacement field, if any. For
     5880 *                          RIP-relative addressing pass RIP + displacement here.
     5881 * @param   pGCPtrMem       Where to store the destination memory operand.
     5882 */
     5883static VBOXSTRICTRC hmR0VmxDecodeMemOperand(PVMCPU pVCpu, PCVMXEXITINSTRINFO pExitInstrInfo, RTGCPTR GCPtrDisp, bool fIsWrite,
     5884                                            PRTGCPTR pGCPtrMem)
     5885{
     5886    Assert(pExitInstrInfo);
     5887    Assert(pGCPtrMem);
     5888    Assert(!CPUMIsGuestInRealOrV86Mode(pVCpu));
     5889
     5890    static uint64_t const s_auAddrSizeMasks[]   = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
     5891    static uint64_t const s_auAccessSizeMasks[] = { sizeof(uint16_t), sizeof(uint32_t), sizeof(uint64_t) };
     5892    AssertCompile(RT_ELEMENTS(s_auAccessSizeMasks) == RT_ELEMENTS(s_auAddrSizeMasks));
     5893
     5894    uint8_t const   uAddrSize     =  pExitInstrInfo->InvVmxXsaves.u3AddrSize;
     5895    uint8_t const   iSegReg       =  pExitInstrInfo->InvVmxXsaves.iSegReg;
     5896    bool const      fIdxRegValid  = !pExitInstrInfo->InvVmxXsaves.fIdxRegInvalid;
     5897    uint8_t const   iIdxReg       =  pExitInstrInfo->InvVmxXsaves.iIdxReg;
     5898    uint8_t const   uScale        =  pExitInstrInfo->InvVmxXsaves.u2Scaling;
     5899    bool const      fBaseRegValid = !pExitInstrInfo->InvVmxXsaves.fBaseRegInvalid;
     5900    uint8_t const   iBaseReg      =  pExitInstrInfo->InvVmxXsaves.iBaseReg;
     5901    bool const      fIsMemOperand = !pExitInstrInfo->InvVmxXsaves.fIsRegOperand;
     5902    bool const      fIsLongMode   =  CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx);
     5903
     5904    /*
     5905     * Validate instruction information.
     5906     * This shouldn't happen on real hardware but useful while testing our nested hardware-virtualization code.
     5907     */
     5908    AssertLogRelMsgReturn(uAddrSize < RT_ELEMENTS(s_auAddrSizeMasks),
     5909                          ("Invalid address size. ExitInstrInfo=%#RX32\n", pExitInstrInfo->u), VERR_VMX_IPE_1);
     5910    AssertLogRelMsgReturn(iSegReg  < X86_SREG_COUNT,
     5911                          ("Invalid segment register. ExitInstrInfo=%#RX32\n", pExitInstrInfo->u), VERR_VMX_IPE_2);
     5912    AssertLogRelMsgReturn(fIsMemOperand,
     5913                          ("Expected memory operand. ExitInstrInfo=%#RX32\n", pExitInstrInfo->u), VERR_VMX_IPE_3);
     5914
     5915    /*
     5916     * Compute the complete effective address.
     5917     *
     5918     * See AMD instruction spec. 1.4.2 "SIB Byte Format"
     5919     * See AMD spec. 4.5.2 "Segment Registers".
     5920     */
     5921    RTGCPTR GCPtrMem  = GCPtrDisp;
     5922    if (fBaseRegValid)
     5923        GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iBaseReg].u64;
     5924    if (fIdxRegValid)
     5925        GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iIdxReg].u64 << uScale;
     5926
     5927    RTGCPTR const GCPtrOff = GCPtrMem;
     5928    if (   !fIsLongMode
     5929        || iSegReg >= X86_SREG_FS)
     5930        GCPtrMem += pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
     5931    GCPtrMem &= s_auAddrSizeMasks[uAddrSize];
     5932
     5933    /*
     5934     * Validate effective address.
     5935     * See AMD spec. 4.5.3 "Segment Registers in 64-Bit Mode".
     5936     */
     5937    uint8_t const cbAccess = s_auAccessSizeMasks[uAddrSize];
     5938    Assert(cbAccess > 0);
     5939    if (fIsLongMode)
     5940    {
     5941        if (X86_IS_CANONICAL(GCPtrMem))
     5942        {
     5943            *pGCPtrMem = GCPtrMem;
     5944            return VINF_SUCCESS;
     5945        }
     5946
     5947        Log4Func(("Long mode effective address is not canonical GCPtrMem=%#RX64\n", GCPtrMem));
     5948        hmR0VmxSetPendingXcptGP(pVCpu, 0);
     5949        return VINF_HM_PENDING_XCPT;
     5950    }
     5951
     5952    /*
     5953     * This is a watered down version of iemMemApplySegment().
     5954     * Parts that are not applicable for VMX instructions like real-or-v8086 mode
     5955     * and segment CPL/DPL checks are skipped.
     5956     */
     5957    RTGCPTR32 const GCPtrFirst32 = (RTGCPTR32)GCPtrOff;
     5958    RTGCPTR32 const GCPtrLast32  = GCPtrFirst32 + cbAccess - 1;
     5959    PCCPUMSELREG    pSel         = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
     5960
     5961    /* Check if the segment is present and usable. */
     5962    if (    pSel->Attr.n.u1Present
     5963        && !pSel->Attr.n.u1Unusable)
     5964    {
     5965        Assert(pSel->Attr.n.u1DescType);
     5966        if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
     5967        {
     5968            /* Check permissions for the data segment. */
     5969            if (   fIsWrite
     5970                && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE))
     5971            {
     5972                Log4Func(("Data segment access invalid. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
     5973                hmR0VmxSetPendingXcptGP(pVCpu, iSegReg);
     5974                return VINF_HM_PENDING_XCPT;
     5975            }
     5976
     5977            /* Check limits if it's a normal data segment. */
     5978            if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
     5979            {
     5980                if (   GCPtrFirst32 > pSel->u32Limit
     5981                    || GCPtrLast32  > pSel->u32Limit)
     5982                {
     5983                    Log4Func(("Data segment limit exceeded."
     5984                              "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
     5985                              GCPtrLast32, pSel->u32Limit));
     5986                    if (iSegReg == X86_SREG_SS)
     5987                        hmR0VmxSetPendingXcptSS(pVCpu, 0);
     5988                    else
     5989                        hmR0VmxSetPendingXcptGP(pVCpu, 0);
     5990                    return VINF_HM_PENDING_XCPT;
     5991                }
     5992            }
     5993            else
     5994            {
     5995               /* Check limits if it's an expand-down data segment.
     5996                  Note! The upper boundary is defined by the B bit, not the G bit! */
     5997               if (   GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
     5998                   || GCPtrLast32  > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
     5999               {
     6000                   Log4Func(("Expand-down data segment limit exceeded."
     6001                             "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
     6002                             GCPtrLast32, pSel->u32Limit));
     6003                   if (iSegReg == X86_SREG_SS)
     6004                       hmR0VmxSetPendingXcptSS(pVCpu, 0);
     6005                   else
     6006                       hmR0VmxSetPendingXcptGP(pVCpu, 0);
     6007                   return VINF_HM_PENDING_XCPT;
     6008               }
     6009            }
     6010        }
     6011        else
     6012        {
     6013            /* Check permissions for the code segment. */
     6014            if (   fIsWrite
     6015                || !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ))
     6016            {
     6017                Log4Func(("Code segment access invalid. Attr=%#RX32\n", pSel->Attr.u));
     6018                Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
     6019                hmR0VmxSetPendingXcptGP(pVCpu, 0);
     6020                return VINF_HM_PENDING_XCPT;
     6021            }
     6022
     6023            /* Check limits for the code segment (normal/expand-down not applicable for code segments). */
     6024            if (   GCPtrFirst32 > pSel->u32Limit
     6025                || GCPtrLast32  > pSel->u32Limit)
     6026            {
     6027                Log4Func(("Code segment limit exceeded. GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n",
     6028                          GCPtrFirst32, GCPtrLast32, pSel->u32Limit));
     6029                if (iSegReg == X86_SREG_SS)
     6030                    hmR0VmxSetPendingXcptSS(pVCpu, 0);
     6031                else
     6032                    hmR0VmxSetPendingXcptGP(pVCpu, 0);
     6033                return VINF_HM_PENDING_XCPT;
     6034            }
     6035        }
     6036    }
     6037    else
     6038    {
     6039        Log4Func(("Not present or unusable segment. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
     6040        hmR0VmxSetPendingXcptGP(pVCpu, 0);
     6041        return VINF_HM_PENDING_XCPT;
     6042    }
     6043
     6044    *pGCPtrMem = GCPtrMem;
     6045    return VINF_SUCCESS;
     6046}
     6047
     6048
     6049/**
     6050 * Perform the relevant VMX instruction checks for VM-exits that occurred due to the
     6051 * guest attempting to execute a VMX instruction.
     6052 *
     6053 * @returns Strict VBox status code (i.e. informational status codes too).
     6054 * @retval  VINF_SUCCESS if we should continue handling the VM-exit.
     6055 * @retval  VINF_HM_PENDING_XCPT if an exception was raised.
     6056 *
     6057 * @param   pVCpu           The cross context virtual CPU structure.
     6058 * @param   pVmxTransient   Pointer to the VMX transient structure.
     6059 *
     6060 * @todo    NstVmx: Document other error codes when VM-exit is implemented.
     6061 * @remarks No-long-jump zone!!!
     6062 */
     6063static VBOXSTRICTRC hmR0VmxCheckExitDueToVmxInstr(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
     6064{
     6065    HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS
     6066                              | CPUMCTX_EXTRN_HWVIRT);
     6067
     6068    if (   CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx)
     6069        || (    CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx)
     6070            && !CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx)))
     6071    {
     6072        Log4Func(("In real/v86-mode or long-mode outside 64-bit code segment -> #UD\n"));
     6073        hmR0VmxSetPendingXcptUD(pVCpu);
     6074        return VINF_HM_PENDING_XCPT;
     6075    }
     6076
     6077    if (pVmxTransient->uExitReason == VMX_EXIT_VMXON)
     6078    {
     6079        /*
     6080         * We check CR4.VMXE because it is required to be always set while in VMX operation
     6081         * by physical CPUs and our CR4 read shadow is only consulted when executing specific
     6082         * instructions (CLTS, LMSW, MOV CR, and SMSW) and thus doesn't affect CPU operation
     6083         * otherwise (i.e. physical CPU won't automatically #UD if Cr4Shadow.VMXE is 0).
     6084         */
     6085        if (!CPUMIsGuestVmxEnabled(&pVCpu->cpum.GstCtx))
     6086        {
     6087            Log4Func(("CR4.VMXE is not set -> #UD\n"));
     6088            hmR0VmxSetPendingXcptUD(pVCpu);
     6089            return VINF_HM_PENDING_XCPT;
     6090        }
     6091    }
     6092    else if (!CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx))
     6093    {
     6094        /*
     6095         * The guest has not entered VMX operation but attempted to execute a VMX instruction
     6096         * (other than VMXON), we need to raise a #UD.
     6097         */
     6098        Log4Func(("Not in VMX root mode -> #UD\n"));
     6099        hmR0VmxSetPendingXcptUD(pVCpu);
     6100        return VINF_HM_PENDING_XCPT;
     6101    }
     6102
     6103    if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
     6104    {
     6105        /*
     6106         * The nested-guest attempted to execute a VMX instruction, cause a VM-exit and let
     6107         * the guest hypervisor deal with it.
     6108         */
     6109        /** @todo NSTVMX: Trigger a VM-exit */
     6110    }
     6111
     6112    /*
     6113     * VMX instructions require CPL 0 except in VMX non-root mode where the VM-exit intercept
     6114     * (above) takes preceedence over the CPL check.
     6115     */
     6116    if (CPUMGetGuestCPL(pVCpu) > 0)
     6117    {
     6118        Log4Func(("CPL > 0 -> #GP(0)\n"));
     6119        hmR0VmxSetPendingXcptGP(pVCpu, 0);
     6120        return VINF_HM_PENDING_XCPT;
     6121    }
     6122
     6123    return VINF_SUCCESS;
     6124}
     6125
     6126#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
    58366127
    58376128
     
    58616152
    58626153    VBOXSTRICTRC rcStrict = VINF_SUCCESS;
    5863     if (VMX_IDT_VECTORING_INFO_VALID(pVmxTransient->uIdtVectoringInfo))
     6154    if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
    58646155    {
    58656156        uint32_t const uIdtVectorType = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo);
     
    81908481    Assert(VMMRZCallRing3IsEnabled(pVCpu));
    81918482
     8483#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_ONLY_IN_IEM
     8484    Log2(("hmR0SvmPreRunGuest: Rescheduling to IEM due to nested-hwvirt or forced IEM exec -> VINF_EM_RESCHEDULE_REM\n"));
     8485    return VINF_EM_RESCHEDULE_REM;
     8486#endif
     8487
    81928488#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    81938489    PGMRZDynMapFlushAutoSet(pVCpu);
     
    93199615        case VMX_EXIT_VMXON:            SET_BOTH(VMX_VMXON); break;
    93209616        case VMX_EXIT_MOV_CRX:
    9321             hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
    9322             if (VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQualification) == VMX_EXIT_QUAL_CRX_ACCESS_READ)
     9617            hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
     9618            if (VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_CRX_ACCESS_READ)
    93239619                SET_BOTH(CRX_READ);
    93249620            else
    93259621                SET_BOTH(CRX_WRITE);
    9326             uEventArg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQualification);
     9622            uEventArg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
    93279623            break;
    93289624        case VMX_EXIT_MOV_DRX:
    9329             hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
    9330             if (   VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQualification)
     9625            hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
     9626            if (   VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual)
    93319627                == VMX_EXIT_QUAL_DRX_DIRECTION_READ)
    93329628                SET_BOTH(DRX_READ);
    93339629            else
    93349630                SET_BOTH(DRX_WRITE);
    9335             uEventArg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQualification);
     9631            uEventArg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual);
    93369632            break;
    93379633        case VMX_EXIT_RDMSR:            SET_BOTH(RDMSR); break;
     
    94089704    if (fDtrace1 || fDtrace2)
    94099705    {
    9410         hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
     9706        hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
    94119707        hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
    94129708        PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
     
    95939889    else
    95949890    {
    9595         hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
     9891        hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
    95969892        int rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
    95979893        AssertRC(rc);
    9598         VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, &pVCpu->cpum.GstCtx, pVmxTransient->uExitReason, pVmxTransient->uExitQualification);
     9894        VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, &pVCpu->cpum.GstCtx, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
    95999895    }
    96009896
     
    1083311129}
    1083411130
     11131
     11132/** @name VM-exit handlers.
     11133 * @{
     11134 */
    1083511135/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
    1083611136/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
    1083711137/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
    10838 
    10839 /** @name VM-exit handlers.
    10840  * @{
    10841  */
    1084211138
    1084311139/**
     
    1096111257                        rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
    1096211258                        AssertRCReturn(rc, rc);
    10963                         hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(uExitIntInfo),
     11259                        hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(uExitIntInfo),
    1096411260                                               pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode,
    1096511261                                               0 /* GCPtrFaultAddress */);
     
    1127011566    Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging || pVCpu->hm.s.fUsingDebugLoop);
    1127111567
    11272     int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
     11568    int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
    1127311569    rc    |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    1127411570    rc    |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
    1127511571    AssertRCReturn(rc, rc);
    1127611572
    11277     VBOXSTRICTRC rcStrict = IEMExecDecodedInvlpg(pVCpu, pVmxTransient->cbInstr, pVmxTransient->uExitQualification);
     11573    VBOXSTRICTRC rcStrict = IEMExecDecodedInvlpg(pVCpu, pVmxTransient->cbInstr, pVmxTransient->uExitQual);
    1127811574
    1127911575    if (rcStrict == VINF_SUCCESS || rcStrict == VINF_PGM_SYNC_CR3)
     
    1128511581    }
    1128611582    else
    11287         AssertMsgFailed(("Unexpected IEMExecDecodedInvlpg(%#RX64) sttus: %Rrc\n",
    11288                          pVmxTransient->uExitQualification, VBOXSTRICTRC_VAL(rcStrict)));
     11583        AssertMsgFailed(("Unexpected IEMExecDecodedInvlpg(%#RX64) sttus: %Rrc\n", pVmxTransient->uExitQual,
     11584                         VBOXSTRICTRC_VAL(rcStrict)));
    1128911585    return rcStrict;
    1129011586}
     
    1188812184    STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitMovCRx, y2);
    1188912185
    11890     int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
     12186    int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
    1189112187    rc    |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    1189212188    rc    |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
     
    1189512191    VBOXSTRICTRC rcStrict;
    1189612192    PVM pVM  = pVCpu->CTX_SUFF(pVM);
    11897     RTGCUINTPTR const uExitQualification = pVmxTransient->uExitQualification;
    11898     uint32_t const uAccessType           = VMX_EXIT_QUAL_CRX_ACCESS(uExitQualification);
     12193    RTGCUINTPTR const uExitQual = pVmxTransient->uExitQual;
     12194    uint32_t const uAccessType  = VMX_EXIT_QUAL_CRX_ACCESS(uExitQual);
    1189912195    switch (uAccessType)
    1190012196    {
     
    1190212198        {
    1190312199            uint32_t const uOldCr0 = pVCpu->cpum.GstCtx.cr0;
    11904             rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, pVmxTransient->cbInstr,
    11905                                                  VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification),
    11906                                                  VMX_EXIT_QUAL_CRX_GENREG(uExitQualification));
     12200            rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, pVmxTransient->cbInstr, VMX_EXIT_QUAL_CRX_REGISTER(uExitQual),
     12201                                                 VMX_EXIT_QUAL_CRX_GENREG(uExitQual));
    1190712202            AssertMsg(   rcStrict == VINF_SUCCESS
    1190812203                      || rcStrict == VINF_IEM_RAISED_XCPT
    1190912204                      || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
    1191012205
    11911             switch (VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification))
     12206            switch (VMX_EXIT_QUAL_CRX_REGISTER(uExitQual))
    1191212207            {
    1191312208                case 0:
     
    1191612211                                     HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
    1191712212                    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR0Write);
    11918                     Log4(("CRX CR0 write rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr0));
     12213                    Log4Func(("CR0 write rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr0));
    1191912214
    1192012215                    /*
     
    1193512230                    {
    1193612231                        /** @todo check selectors rather than returning all the time.  */
    11937                         Log4(("CRx CR0 write: back to real mode -> VINF_EM_RESCHEDULE_REM\n"));
     12232                        Log4Func(("CR0 write, back to real mode -> VINF_EM_RESCHEDULE_REM\n"));
    1193812233                        rcStrict = VINF_EM_RESCHEDULE_REM;
    1193912234                    }
     
    1195612251                    ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged,
    1195712252                                     HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR3);
    11958                     Log4(("CRX CR3 write rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr3));
     12253                    Log4Func(("CR3 write rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr3));
    1195912254                    break;
    1196012255                }
     
    1196512260                    ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged,
    1196612261                                     HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR4);
    11967                     Log4(("CRX CR4 write rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict),
    11968                           pVCpu->cpum.GstCtx.cr4, pVCpu->hm.s.fLoadSaveGuestXcr0));
     12262                    Log4Func(("CR4 write rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict),
     12263                              pVCpu->cpum.GstCtx.cr4, pVCpu->hm.s.fLoadSaveGuestXcr0));
    1196912264                    break;
    1197012265                }
     
    1197912274                }
    1198012275                default:
    11981                     AssertMsgFailed(("Invalid CRx register %#x\n", VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification)));
     12276                    AssertMsgFailed(("Invalid CRx register %#x\n", VMX_EXIT_QUAL_CRX_REGISTER(uExitQual)));
    1198212277                    break;
    1198312278            }
     
    1199012285                   || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
    1199112286                   || pVCpu->hm.s.fUsingDebugLoop
    11992                    || VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification) != 3);
     12287                   || VMX_EXIT_QUAL_CRX_REGISTER(uExitQual) != 3);
    1199312288            /* CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
    11994             Assert(   VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification) != 8
     12289            Assert(   VMX_EXIT_QUAL_CRX_REGISTER(uExitQual) != 8
    1199512290                   || !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
    1199612291
    11997             rcStrict = IEMExecDecodedMovCRxRead(pVCpu, pVmxTransient->cbInstr,
    11998                                                 VMX_EXIT_QUAL_CRX_GENREG(uExitQualification),
    11999                                                 VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification));
     12292            rcStrict = IEMExecDecodedMovCRxRead(pVCpu, pVmxTransient->cbInstr, VMX_EXIT_QUAL_CRX_GENREG(uExitQual),
     12293                                                VMX_EXIT_QUAL_CRX_REGISTER(uExitQual));
    1200012294            AssertMsg(   rcStrict == VINF_SUCCESS
    1200112295                      || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
    1200212296#ifdef VBOX_WITH_STATISTICS
    12003             switch (VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification))
     12297            switch (VMX_EXIT_QUAL_CRX_REGISTER(uExitQual))
    1200412298            {
    1200512299                case 0: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR0Read); break;
     
    1201012304            }
    1201112305#endif
    12012             Log4(("CRX CR%d Read access rcStrict=%Rrc\n", VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification),
     12306            Log4Func(("CR%d Read access rcStrict=%Rrc\n", VMX_EXIT_QUAL_CRX_REGISTER(uExitQual),
    1201312307                  VBOXSTRICTRC_VAL(rcStrict)));
    12014             if (VMX_EXIT_QUAL_CRX_GENREG(uExitQualification) == X86_GREG_xSP)
     12308            if (VMX_EXIT_QUAL_CRX_GENREG(uExitQual) == X86_GREG_xSP)
    1201512309                ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_RSP);
    1201612310            else
     
    1202712321            ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
    1202812322            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitClts);
    12029             Log4(("CRX CLTS rcStrict=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
     12323            Log4Func(("CLTS rcStrict=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
    1203012324            break;
    1203112325        }
     
    1203412328        {
    1203512329            /* Note! LMSW cannot clear CR0.PE, so no fRealOnV86Active kludge needed here. */
    12036             rcStrict = IEMExecDecodedLmsw(pVCpu, pVmxTransient->cbInstr,
    12037                                           VMX_EXIT_QUAL_CRX_LMSW_DATA(uExitQualification));
     12330            rcStrict = IEMExecDecodedLmsw(pVCpu, pVmxTransient->cbInstr, VMX_EXIT_QUAL_CRX_LMSW_DATA(uExitQual));
    1203812331            AssertMsg(   rcStrict == VINF_SUCCESS
    1203912332                      || rcStrict == VINF_IEM_RAISED_XCPT
     
    1204212335            ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
    1204312336            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitLmsw);
    12044             Log4(("CRX LMSW rcStrict=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
     12337            Log4Func(("LMSW rcStrict=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
    1204512338            break;
    1204612339        }
     
    1207512368
    1207612369    PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    12077     int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
     12370    int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
    1207812371    rc    |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    1207912372    rc    |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER);
     
    1208212375
    1208312376    /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
    12084     uint32_t uIOPort      = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQualification);
    12085     uint8_t  uIOWidth     = VMX_EXIT_QUAL_IO_WIDTH(pVmxTransient->uExitQualification);
    12086     bool     fIOWrite     = (   VMX_EXIT_QUAL_IO_DIRECTION(pVmxTransient->uExitQualification)
    12087                              == VMX_EXIT_QUAL_IO_DIRECTION_OUT);
    12088     bool     fIOString    = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQualification);
     12377    uint32_t uIOPort      = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
     12378    uint8_t  uIOWidth     = VMX_EXIT_QUAL_IO_WIDTH(pVmxTransient->uExitQual);
     12379    bool     fIOWrite     = (VMX_EXIT_QUAL_IO_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_IO_DIRECTION_OUT);
     12380    bool     fIOString    = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
    1208912381    bool     fGstStepping = RT_BOOL(pCtx->eflags.Bits.u1TF);
    1209012382    bool     fDbgStepping = pVCpu->hm.s.fSingleInstruction;
     
    1212412416             * interpreting the instruction.
    1212512417             */
    12126             Log4(("CS:RIP=%04x:%08RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
     12418            Log4Func(("CS:RIP=%04x:%08RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
    1212712419            AssertReturn(pCtx->dx == uIOPort, VERR_VMX_IPE_2);
    1212812420            bool const fInsOutsInfo = RT_BF_GET(pVM->hm.s.vmx.Msrs.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS);
     
    1213412426                AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
    1213512427                IEMMODE const enmAddrMode = (IEMMODE)pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize;
    12136                 bool const fRep           = VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQualification);
     12428                bool const fRep           = VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual);
    1213712429                if (fIOWrite)
    1213812430                    rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr,
     
    1216012452             * IN/OUT - I/O instruction.
    1216112453             */
    12162             Log4(("CS:RIP=%04x:%08RX64 %#06x/%u %c\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
     12454            Log4Func(("CS:RIP=%04x:%08RX64 %#06x/%u %c\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
    1216312455            uint32_t const uAndVal = s_aIOOpAnd[uIOWidth];
    12164             Assert(!VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQualification));
     12456            Assert(!VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual));
    1216512457            if (fIOWrite)
    1216612458            {
     
    1229612588        Log4(("IOExit/%u: %04x:%08RX64: %s%s%s %#x LB %u -> EMHistoryExec\n",
    1229712589              pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
    12298               VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQualification) ? "REP " : "",
     12590              VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual) ? "REP " : "",
    1229912591              fIOWrite ? "OUT" : "IN", fIOString ? "S" : "", uIOPort, uIOWidth));
    1230012592
     
    1231912611
    1232012612    /* Check if this task-switch occurred while delivery an event through the guest IDT. */
    12321     int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
     12613    int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
    1232212614    AssertRCReturn(rc, rc);
    12323     if (VMX_EXIT_QUAL_TASK_SWITCH_TYPE(pVmxTransient->uExitQualification) == VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IDT)
     12615    if (VMX_EXIT_QUAL_TASK_SWITCH_TYPE(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IDT)
    1232412616    {
    1232512617        rc = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
    1232612618        AssertRCReturn(rc, rc);
    12327         if (VMX_IDT_VECTORING_INFO_VALID(pVmxTransient->uIdtVectoringInfo))
     12619        if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
    1232812620        {
    1232912621            uint32_t       uErrCode;
     
    1235012642                                   0 /* cbInstr */, uErrCode, GCPtrFaultAddress);
    1235112643
    12352             Log4(("Pending event on TaskSwitch uIntType=%#x uVector=%#x\n", uIntType, uVector));
     12644            Log4Func(("Pending event. uIntType=%#x uVector=%#x\n", uIntType, uVector));
    1235312645            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch);
    1235412646            return VINF_EM_RAW_INJECT_TRPM_EVENT;
     
    1240612698    /* IOMMIOPhysHandler() below may call into IEM, save the necessary state. */
    1240712699    int rc  = hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
    12408     rc     |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
     12700    rc     |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
    1240912701    AssertRCReturn(rc, rc);
    1241012702
    1241112703    /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */
    12412     uint32_t uAccessType = VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQualification);
     12704    uint32_t uAccessType = VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual);
    1241312705    VBOXSTRICTRC rcStrict2;
    1241412706    switch (uAccessType)
     
    1241812710        {
    1241912711            AssertMsg(   !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
    12420                       || VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification) != XAPIC_OFF_TPR,
     12712                      || VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual) != XAPIC_OFF_TPR,
    1242112713                      ("hmR0VmxExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
    1242212714
    1242312715            RTGCPHYS GCPhys = pVCpu->hm.s.vmx.u64MsrApicBase;   /* Always up-to-date, u64MsrApicBase is not part of the VMCS. */
    1242412716            GCPhys &= PAGE_BASE_GC_MASK;
    12425             GCPhys += VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification);
     12717            GCPhys += VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual);
    1242612718            PVM pVM = pVCpu->CTX_SUFF(pVM);
    1242712719            Log4Func(("Linear access uAccessType=%#x GCPhys=%#RGp Off=%#x\n", uAccessType, GCPhys,
    12428                  VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification)));
     12720                 VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual)));
    1242912721
    1243012722            PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
     
    1249412786
    1249512787#ifdef VBOX_WITH_STATISTICS
    12496         rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
     12788        rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
    1249712789        AssertRCReturn(rc, rc);
    12498         if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQualification) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
     12790        if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
    1249912791            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
    1250012792        else
     
    1251012802     */
    1251112803    PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    12512     int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
     12804    int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
    1251312805    rc    |= hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_DR7);
    1251412806    AssertRCReturn(rc, rc);
     
    1251612808
    1251712809    PVM pVM = pVCpu->CTX_SUFF(pVM);
    12518     if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQualification) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
     12810    if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
    1251912811    {
    1252012812        rc = EMInterpretDRxWrite(pVM, pVCpu, CPUMCTX2CORE(pCtx),
    12521                                  VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQualification),
    12522                                  VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQualification));
     12813                                 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual),
     12814                                 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual));
    1252312815        if (RT_SUCCESS(rc))
    1252412816            ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_DR7);
     
    1252812820    {
    1252912821        rc = EMInterpretDRxRead(pVM, pVCpu, CPUMCTX2CORE(pCtx),
    12530                                 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQualification),
    12531                                 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQualification));
     12822                                VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual),
     12823                                VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual));
    1253212824        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
    1253312825    }
     
    1259612888        PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    1259712889        rcStrict = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, CPUMCTX2CORE(pCtx), GCPhys, UINT32_MAX);
    12598         Log4(("EPT misconfig at %#RGp RIP=%#RX64 rc=%Rrc\n", GCPhys, pCtx->rip, VBOXSTRICTRC_VAL(rcStrict)));
     12890        Log4Func(("At %#RGp RIP=%#RX64 rc=%Rrc\n", GCPhys, pCtx->rip, VBOXSTRICTRC_VAL(rcStrict)));
    1259912891        if (   rcStrict == VINF_SUCCESS
    1260012892            || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
     
    1265512947    RTGCPHYS GCPhys;
    1265612948    int rc  = VMXReadVmcs64(VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &GCPhys);
    12657     rc     |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
     12949    rc     |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
    1265812950    rc     |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
    1265912951    AssertRCReturn(rc, rc);
    1266012952
    1266112953    /* Intel spec. Table 27-7 "Exit Qualifications for EPT violations". */
    12662     AssertMsg(((pVmxTransient->uExitQualification >> 7) & 3) != 2, ("%#RX64", pVmxTransient->uExitQualification));
     12954    AssertMsg(((pVmxTransient->uExitQual >> 7) & 3) != 2, ("%#RX64", pVmxTransient->uExitQual));
    1266312955
    1266412956    RTGCUINT uErrorCode = 0;
    12665     if (pVmxTransient->uExitQualification & VMX_EXIT_QUAL_EPT_INSTR_FETCH)
     12957    if (pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_INSTR_FETCH)
    1266612958        uErrorCode |= X86_TRAP_PF_ID;
    12667     if (pVmxTransient->uExitQualification & VMX_EXIT_QUAL_EPT_DATA_WRITE)
     12959    if (pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_DATA_WRITE)
    1266812960        uErrorCode |= X86_TRAP_PF_RW;
    12669     if (pVmxTransient->uExitQualification & VMX_EXIT_QUAL_EPT_ENTRY_PRESENT)
     12961    if (pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ENTRY_PRESENT)
    1267012962        uErrorCode |= X86_TRAP_PF_P;
    1267112963
     
    1267712969    PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    1267812970
    12679     Log4Func(("EPT violation %#x at %#RX64 ErrorCode %#x CS:RIP=%04x:%08RX64\n", pVmxTransient->uExitQualification, GCPhys,
    12680               uErrorCode, pCtx->cs.Sel, pCtx->rip));
     12971    Log4Func(("EPT violation %#x at %#RX64 ErrorCode %#x CS:RIP=%04x:%08RX64\n", pVmxTransient->uExitQual, GCPhys, uErrorCode,
     12972              pCtx->cs.Sel, pCtx->rip));
    1268112973
    1268212974    VBOXSTRICTRC rcStrict2 = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, CPUMCTX2CORE(pCtx), GCPhys);
     
    1270012992/** @} */
    1270112993
     12994/** @name VM-exit exception handlers.
     12995 * @{
     12996 */
    1270212997/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
    1270312998/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= VM-exit exception handlers =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
    1270412999/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
    12705 
    12706 /** @name VM-exit exception handlers.
    12707  * @{
    12708  */
    1270913000
    1271013001/**
     
    1273213023    }
    1273313024
    12734     hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
    12735                            pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
     13025    hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbInstr,
     13026                           pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
    1273613027    return rc;
    1273713028}
     
    1275813049        AssertRCReturn(rc, rc);
    1275913050
    12760         hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
    12761                                pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
     13051        hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbInstr,
     13052                               pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
    1276213053    }
    1276313054
     
    1278213073    Assert(ASMAtomicUoReadU32(&pVmxTransient->fVmcsFieldsRead) & HMVMX_READ_EXIT_INTERRUPTION_INFO);
    1278313074
    12784     hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
    12785                            pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
     13075    hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbInstr,
     13076                           pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
    1278613077    return VINF_SUCCESS;
    1278713078}
     
    1280013091     * for processing.
    1280113092     */
    12802     int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
     13093    int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
    1280313094
    1280413095    /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
    1280513096    uint64_t uDR6 = X86_DR6_INIT_VAL;
    12806     uDR6         |= (  pVmxTransient->uExitQualification
    12807                      & (X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3 | X86_DR6_BD | X86_DR6_BS));
     13097    uDR6         |= (pVmxTransient->uExitQual & (X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3 | X86_DR6_BD | X86_DR6_BS));
    1280813098
    1280913099    PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
     
    1285613146        rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
    1285713147        AssertRCReturn(rc, rc);
    12858         hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
    12859                                pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
     13148        hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbInstr,
     13149                               pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
    1286013150        return VINF_SUCCESS;
    1286113151    }
     
    1289913189        Log4Func(("Gst: CS:RIP %04x:%08RX64 ErrorCode=%#x CR0=%#RX64 CPL=%u TR=%#04x\n", pCtx->cs.Sel, pCtx->rip,
    1290013190                  pVmxTransient->uExitIntErrorCode, pCtx->cr0, CPUMGetGuestCPL(pVCpu), pCtx->tr.Sel));
    12901         hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
    12902                                pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
     13191        hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbInstr,
     13192                               pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
    1290313193        return rc;
    1290413194    }
     
    1316613456                    && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
    1316713457                {
    13168                     Log4(("hmR0VmxExitXcptGP: mode changed -> VINF_EM_RESCHEDULE\n"));
     13458                    Log4Func(("Mode changed -> VINF_EM_RESCHEDULE\n"));
    1316913459                    /** @todo Exit fRealOnV86Active here w/o dropping back to ring-3. */
    1317013460                    rc = VINF_EM_RESCHEDULE;
     
    1321713507#endif
    1321813508
    13219     hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
    13220                            pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
     13509    hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbInstr,
     13510                           pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
    1322113511    return VINF_SUCCESS;
    1322213512}
     
    1323013520    HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    1323113521    PVM pVM = pVCpu->CTX_SUFF(pVM);
    13232     int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
     13522    int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
    1323313523    rc    |= hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
    1323413524    rc    |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
     
    1324513535        if (RT_LIKELY(!pVmxTransient->fVectoringDoublePF))
    1324613536        {
    13247             hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
    13248                                    0 /* cbInstr */, pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQualification);
     13537            hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
     13538                                   pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual);
    1324913539        }
    1325013540        else
     
    1327013560    AssertRCReturn(rc, rc);
    1327113561
    13272     Log4Func(("#PF: cr2=%#RX64 cs:rip=%#04x:%#RX64 uErrCode %#RX32 cr3=%#RX64\n", pVmxTransient->uExitQualification,
    13273               pCtx->cs.Sel, pCtx->rip, pVmxTransient->uExitIntErrorCode, pCtx->cr3));
    13274 
    13275     TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQualification, (RTGCUINT)pVmxTransient->uExitIntErrorCode);
    13276     rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, CPUMCTX2CORE(pCtx),
    13277                           (RTGCPTR)pVmxTransient->uExitQualification);
     13562    Log4Func(("#PF: cr2=%#RX64 cs:rip=%#04x:%#RX64 uErrCode %#RX32 cr3=%#RX64\n", pVmxTransient->uExitQual, pCtx->cs.Sel,
     13563              pCtx->rip, pVmxTransient->uExitIntErrorCode, pCtx->cr3));
     13564
     13565    TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQual, (RTGCUINT)pVmxTransient->uExitIntErrorCode);
     13566    rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, CPUMCTX2CORE(pCtx), (RTGCPTR)pVmxTransient->uExitQual);
    1327813567
    1327913568    Log4Func(("#PF: rc=%Rrc\n", rc));
     
    1329813587            TRPMResetTrap(pVCpu);
    1329913588            pVCpu->hm.s.Event.fPending = false;                 /* In case it's a contributory #PF. */
    13300             hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
    13301                                    0 /* cbInstr */, uGstErrorCode, pVmxTransient->uExitQualification);
     13589            hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
     13590                                   uGstErrorCode, pVmxTransient->uExitQual);
    1330213591        }
    1330313592        else
     
    1332113610/** @} */
    1332213611
     13612#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     13613
     13614/** @name Nested-guest VM-exit handlers.
     13615 * @{
     13616 */
     13617/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
     13618/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= Nested-guest VM-exit handlers =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
     13619/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
     13620
     13621/**
     13622 * VM-exit handler for VMCLEAR (VMX_EXIT_VMCLEAR). Unconditional VM-exit.
     13623 */
     13624HMVMX_EXIT_DECL hmR0VmxExitVmclear(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
     13625{
     13626    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
     13627
     13628    /** @todo NSTVMX: Vmclear. */
     13629    hmR0VmxSetPendingXcptUD(pVCpu);
     13630    return VINF_SUCCESS;
     13631}
     13632
     13633
     13634/**
     13635 * VM-exit handler for VMLAUNCH (VMX_EXIT_VMLAUNCH). Unconditional VM-exit.
     13636 */
     13637HMVMX_EXIT_DECL hmR0VmxExitVmlaunch(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
     13638{
     13639    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
     13640
     13641    /** @todo NSTVMX: Vmlaunch. */
     13642    hmR0VmxSetPendingXcptUD(pVCpu);
     13643    return VINF_SUCCESS;
     13644}
     13645
     13646
     13647/**
     13648 * VM-exit handler for VMPTRLD (VMX_EXIT_VMPTRLD). Unconditional VM-exit.
     13649 */
     13650HMVMX_EXIT_DECL hmR0VmxExitVmptrld(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
     13651{
     13652    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
     13653
     13654    /** @todo NSTVMX: Vmptrld. */
     13655    hmR0VmxSetPendingXcptUD(pVCpu);
     13656    return VINF_SUCCESS;
     13657}
     13658
     13659
     13660/**
     13661 * VM-exit handler for VMPTRST (VMX_EXIT_VMPTRST). Unconditional VM-exit.
     13662 */
     13663HMVMX_EXIT_DECL hmR0VmxExitVmptrst(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
     13664{
     13665    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
     13666
     13667    /** @todo NSTVMX: Vmptrst. */
     13668    hmR0VmxSetPendingXcptUD(pVCpu);
     13669    return VINF_SUCCESS;
     13670}
     13671
     13672
     13673/**
     13674 * VM-exit handler for VMREAD (VMX_EXIT_VMREAD). Unconditional VM-exit.
     13675 */
     13676HMVMX_EXIT_DECL hmR0VmxExitVmread(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
     13677{
     13678    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
     13679
     13680    /** @todo NSTVMX: Vmread. */
     13681    hmR0VmxSetPendingXcptUD(pVCpu);
     13682    return VINF_SUCCESS;
     13683}
     13684
     13685
     13686/**
     13687 * VM-exit handler for VMRESUME (VMX_EXIT_VMRESUME). Unconditional VM-exit.
     13688 */
     13689HMVMX_EXIT_DECL hmR0VmxExitVmresume(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
     13690{
     13691    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
     13692
     13693    /** @todo NSTVMX: Vmresume. */
     13694    hmR0VmxSetPendingXcptUD(pVCpu);
     13695    return VINF_SUCCESS;
     13696}
     13697
     13698
     13699/**
     13700 * VM-exit handler for VMWRITE (VMX_EXIT_VMWRITE). Unconditional VM-exit.
     13701 */
     13702HMVMX_EXIT_DECL hmR0VmxExitVmwrite(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
     13703{
     13704    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
     13705
     13706    /** @todo NSTVMX: Vmwrite. */
     13707    hmR0VmxSetPendingXcptUD(pVCpu);
     13708    return VINF_SUCCESS;
     13709}
     13710
     13711
     13712/**
     13713 * VM-exit handler for VMXOFF (VMX_EXIT_VMXOFF). Unconditional VM-exit.
     13714 */
     13715HMVMX_EXIT_DECL hmR0VmxExitVmxoff(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
     13716{
     13717    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
     13718
     13719    int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
     13720    rc    |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
     13721    AssertRCReturn(rc, rc);
     13722
     13723    VBOXSTRICTRC rcStrict = IEMExecDecodedVmxoff(pVCpu, pVmxTransient->cbInstr);
     13724    if (RT_LIKELY(rcStrict == VINF_SUCCESS))
     13725    {
     13726        /* VMXOFF on success changes the internal hwvirt state but not anything that's visible to the guest. */
     13727        ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_HWVIRT);
     13728    }
     13729    else if (rcStrict == VINF_IEM_RAISED_XCPT)
     13730    {
     13731        ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
     13732        rcStrict = VINF_SUCCESS;
     13733    }
     13734    return rcStrict;
     13735}
     13736
     13737
     13738/**
     13739 * VM-exit handler for VMXON (VMX_EXIT_VMXON). Unconditional VM-exit.
     13740 */
     13741HMVMX_EXIT_DECL hmR0VmxExitVmxon(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
     13742{
     13743    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
     13744
     13745    int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
     13746    rc    |= hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_SREG_MASK | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
     13747    rc    |= hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
     13748    rc    |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
     13749    AssertRCReturn(rc, rc);
     13750
     13751    VBOXSTRICTRC rcStrict = hmR0VmxCheckExitDueToVmxInstr(pVCpu, pVmxTransient);
     13752    if (rcStrict == VINF_SUCCESS)
     13753    { /* likely */ }
     13754    else if (rcStrict == VINF_HM_PENDING_XCPT)
     13755    {
     13756        Log4Func(("Privilege checks failed, raising xcpt %#x!\n", VMX_ENTRY_INT_INFO_VECTOR(pVCpu->hm.s.Event.u64IntInfo)));
     13757        return VINF_SUCCESS;
     13758    }
     13759    else
     13760    {
     13761        Log4Func(("hmR0VmxCheckExitDueToVmxInstr failed. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
     13762        return rcStrict;
     13763    }
     13764
     13765    RTGCPTR            GCPtrVmxon;
     13766    PCVMXEXITINSTRINFO pExitInstrInfo = &pVmxTransient->ExitInstrInfo;
     13767    RTGCPTR const      GCPtrDisp      =  pVmxTransient->uExitQual;
     13768    rcStrict = hmR0VmxDecodeMemOperand(pVCpu, pExitInstrInfo, GCPtrDisp, false /*fIsWrite*/,  &GCPtrVmxon);
     13769    if (rcStrict == VINF_SUCCESS)
     13770    { /* likely */ }
     13771    else if (rcStrict == VINF_HM_PENDING_XCPT)
     13772    {
     13773        Log4Func(("Memory operand decoding failed, raising xcpt %#x\n", VMX_ENTRY_INT_INFO_VECTOR(pVCpu->hm.s.Event.u64IntInfo)));
     13774        return VINF_SUCCESS;
     13775    }
     13776    else
     13777    {
     13778        Log4Func(("hmR0VmxCheckExitDueToVmxInstr failed. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
     13779        return rcStrict;
     13780    }
     13781
     13782    rcStrict = IEMExecDecodedVmxon(pVCpu, pVmxTransient->cbInstr, GCPtrVmxon, pExitInstrInfo->u, GCPtrDisp);
     13783    if (RT_LIKELY(rcStrict == VINF_SUCCESS))
     13784        ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
     13785    else if (rcStrict == VINF_IEM_RAISED_XCPT)
     13786    {
     13787        ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
     13788        rcStrict = VINF_SUCCESS;
     13789    }
     13790    return rcStrict;
     13791}
     13792
     13793/** @} */
     13794#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
     13795
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette