VirtualBox

Ignore:
Timestamp:
Dec 4, 2015 5:09:04 PM (9 years ago)
Author:
vboxsync
Message:

HM: Implemented 69 new dtrace probes and DBGF events in the VT-x code.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r58938 r58998  
    316316 * VMX VM-exit handler.
    317317 *
    318  * @returns Strict VBox status code.
     318 * @returns Strict VBox status code (i.e. informational status codes too).
    319319 * @param   pVCpu           The cross context virtual CPU structure.
    320320 * @param   pMixedCtx       Pointer to the guest-CPU context. The data may be
     
    375375#endif
    376376
    377 DECLINLINE(VBOXSTRICTRC)  hmR0VmxHandleExitStep(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient,
    378                                                 uint32_t uExitReason, uint16_t uCsStart, uint64_t uRipStart);
    379377
    380378/** @name VM-exit handlers.
     
    435433static int          hmR0VmxExitXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
    436434static int          hmR0VmxExitXcptAC(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
    437 #ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
    438435static int          hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
    439 #endif
    440436static uint32_t     hmR0VmxCheckGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
    441437
     
    507503 /* 54  VMX_EXIT_WBINVD                  */  hmR0VmxExitWbinvd,
    508504 /* 55  VMX_EXIT_XSETBV                  */  hmR0VmxExitXsetbv,
    509  /* 56  UNDEFINED                        */  hmR0VmxExitErrUndefined,
     505 /* 56  VMX_EXIT_APIC_WRITE              */  hmR0VmxExitErrUndefined,
    510506 /* 57  VMX_EXIT_RDRAND                  */  hmR0VmxExitRdrand,
    511507 /* 58  VMX_EXIT_INVPCID                 */  hmR0VmxExitInvpcid,
     
    11711167    if (uMsr <= 0x00001FFF)
    11721168        iBit = uMsr;
    1173     else if (   uMsr >= 0xC0000000
    1174              && uMsr <= 0xC0001FFF)
    1175     {
    1176         iBit = (uMsr - 0xC0000000);
     1169    else if (uMsr - UINT32_C(0xC0000000) <= UINT32_C(0x00001FFF))
     1170    {
     1171        iBit = uMsr - UINT32_C(0xC0000000);
    11771172        pbMsrBitmap += 0x400;
    11781173    }
     
    34633458        if (pVCpu->hm.s.fGIMTrapXcptUD)
    34643459            pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_UD);
     3460#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
    34653461        else
    3466         {
    3467 #ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
    34683462            pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_UD);
    34693463#endif
    3470         }
    34713464
    34723465        Assert(pVCpu->hm.s.vmx.u32XcptBitmap & RT_BIT_32(X86_XCPT_AC));
     
    56125605 * IDT.
    56135606 *
    5614  * @returns Strict VBox status code (informational error codes included).
     5607 * @returns Strict VBox status code (i.e. informational status codes too).
    56155608 * @retval  VINF_SUCCESS if we should continue handling the VM-exit.
    56165609 * @retval  VINF_HM_DOUBLE_FAULT if a \#DF condition was detected and we ought
     
    66036596
    66046597    AssertMsg(HMVMXCPU_GST_VALUE(pVCpu) == HMVMX_UPDATED_GUEST_ALL,
    6605               ("Missed guest state bits while saving state; residue %RX32\n", HMVMXCPU_GST_VALUE(pVCpu)));
     6598              ("Missed guest state bits while saving state; missing %RX32 (got %RX32, want %RX32) - check log for any previous errors!\n",
     6599               HMVMX_UPDATED_GUEST_ALL ^ HMVMXCPU_GST_VALUE(pVCpu), HMVMXCPU_GST_VALUE(pVCpu), HMVMX_UPDATED_GUEST_ALL));
    66066600
    66076601    if (VMMRZCallRing3IsEnabled(pVCpu))
     
    66816675 * ring-3 for one reason or another.
    66826676 *
    6683  * @returns Strict VBox status code (information status code included).
     6677 * @returns Strict VBox status code (i.e. informational status codes too)
    66846678 * @retval VINF_SUCCESS if we don't have any actions that require going back to
    66856679 *         ring-3.
     
    74527446 * receive them.
    74537447 *
    7454  * @returns Strict VBox status code (informational status codes included).
     7448 * @returns Strict VBox status code (i.e. informational status codes too).
    74557449 * @param   pVCpu           The cross context virtual CPU structure.
    74567450 * @param   pMixedCtx       Pointer to the guest-CPU context. The data may be
     
    75817575 * Injects a double-fault (\#DF) exception into the VM.
    75827576 *
    7583  * @returns Strict VBox status code (informational status code included).
     7577 * @returns Strict VBox status code (i.e. informational status codes too).
    75847578 * @param   pVCpu           The cross context virtual CPU structure.
    75857579 * @param   pMixedCtx       Pointer to the guest-CPU context. The data may be
     
    76437637 * Injects a general-protection (\#GP) fault into the VM.
    76447638 *
    7645  * @returns Strict VBox status code (informational status code included).
     7639 * @returns Strict VBox status code (i.e. informational status codes too).
    76467640 * @param   pVCpu               The cross context virtual CPU structure.
    76477641 * @param   pMixedCtx           Pointer to the guest-CPU context. The data may be
     
    77207714 * stack.
    77217715 *
    7722  * @returns Strict VBox status code (information status code included).
     7716 * @returns Strict VBox status code (i.e. informational status codes too).
    77237717 * @retval  VINF_EM_RESET if pushing a value to the stack caused a triple-fault.
    77247718 * @param   pVM         The cross context VM structure.
     
    77467740 * in the VM-entry area in the VMCS.
    77477741 *
    7748  * @returns Strict VBox status code (informational error codes included).
     7742 * @returns Strict VBox status code (i.e. informational status codes too).
    77497743 * @retval  VINF_SUCCESS if the event is successfully injected into the VMCS.
    77507744 * @retval  VINF_EM_RESET if event injection resulted in a triple-fault.
     
    83688362 * Otherwise such operations must be done when we can no longer exit to ring-3.
    83698363 *
    8370  * @returns Strict VBox status code.
     8364 * @returns Strict VBox status code (i.e. informational status codes too).
    83718365 * @retval  VINF_SUCCESS if we can proceed with running the guest, interrupts
    83728366 *          have been disabled.
     
    88148808
    88158809        VBOXVMM_R0_HMVMX_VMEXIT_NOCTX(pVCpu, pCtx, VmxTransient.uExitReason);
    8816         if (RT_LIKELY(!VBOXVMM_R0_HMVMX_VMEXIT_ENABLED()))
    8817         { /* likely */ }
    8818         else
    8819         {
    8820             hmR0VmxReadExitQualificationVmcs(pVCpu, &VmxTransient);
    8821             hmR0VmxSaveGuestState(pVCpu, pCtx);
    8822             VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, pCtx, VmxTransient.uExitReason, VmxTransient.uExitQualification);
    8823         }
    88248810
    88258811        /* Handle the VM-exit. */
     
    88458831
    88468832
    8847 /**
    8848  * Single steps guest code using VT-x.
    8849  *
    8850  * @returns Strict VBox status code.
     8833
     8834/** @name Execution loop for single stepping, DBGF events and expensive Dtrace
     8835 *
     8836 * The following few functions and associated structure contains the bloat
     8837 * necessary for providing detailed debug events and dtrace probes as well as
     8838 * reliable host side single stepping.  This works on the principle of
     8839 * "subclassing" the normal execution loop and workers.  We replace the loop
     8840 * method completely and override selected helpers to add necessary adjustments
     8841 * to their core operation.
     8842 *
     8843 * The goal is to keep the "parent" code lean and mean, so as not to acrifice
     8844 * any performance for debug and analysis features.
     8845 *
     8846 * @{
     8847 */
     8848
     8849typedef struct VMXRUNDBGSTATE
     8850{
     8851    /** The RIP we started executing at.  This is for detecting that we stepped.  */
     8852    uint64_t    uRipStart;
     8853    /** The CS we started exectuing with.  */
     8854    uint16_t    uCsStart;
     8855
     8856    /** Whether we've actually modified the 1st execution control field. */
     8857    bool        fModifiedProcCtls : 1;
     8858    /** Whether we've actually modified the 2nd execution control field. */
     8859    bool        fModifiedProcCtls2 : 1;
     8860    /** Whether we've actually modified the exception bitmap. */
     8861    bool        fModifiedXcptBitmap : 1;
     8862
     8863    /** We desire the modified the CR0 mask to be cleared. */
     8864    bool        fClearCr0Mask : 1;
     8865    /** We desire the modified the CR4 mask to be cleared. */
     8866    bool        fClearCr4Mask : 1;
     8867    /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC. */
     8868    uint32_t    fCpe1Extra;
     8869    /** Stuff we do not want in VMX_VMCS32_CTRL_PROC_EXEC. */
     8870    uint32_t    fCpe1Unwanted;
     8871    /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC2. */
     8872    uint32_t    fCpe2Extra;
     8873    /** Extra stuff we need in    */
     8874    uint32_t    bmXcptExtra;
     8875    /** The sequence number of the Dtrace provider settings the state was
     8876     *  configured against. */
     8877    uint32_t    uDtraceSettingsSeqNo;
     8878    /** Exits to check (one bit per exit). */
     8879    uint32_t    bmExitsToCheck[2];
     8880
     8881    /** The initial VMX_VMCS32_CTRL_PROC_EXEC value (helps with restore). */
     8882    uint32_t    fProcCtlsInitial;
     8883    /** The initial VMX_VMCS32_CTRL_PROC_EXEC2 value (helps with restore). */
     8884    uint32_t    fProcCtls2Initial;
     8885    /** The initial VMX_VMCS32_CTRL_EXCEPTION_BITMAP value (helps with restore). */
     8886    uint32_t    bmXcptInitial;
     8887
     8888} VMXRUNDBGSTATE;
     8889AssertCompileMemberSize(VMXRUNDBGSTATE, bmExitsToCheck, (VMX_EXIT_MAX + 31) / 32 * 4);
     8890typedef VMXRUNDBGSTATE *PVMXRUNDBGSTATE;
     8891
     8892
     8893/**
     8894 * Initializes the VMXRUNDBGSTATE structure.
     8895 *
     8896 * @param   pVCpu           The cross context virtual CPU structure of the
     8897 *                          calling EMT.
     8898 * @param   pCtx            The CPU register context to go with @a pVCpu.
     8899 * @param   pDbgState       The structure to initialize.
     8900 */
     8901DECLINLINE(void) hmR0VmxRunDebugStateInit(PVMCPU pVCpu, PCCPUMCTX pCtx, PVMXRUNDBGSTATE pDbgState)
     8902{
     8903    pDbgState->uRipStart            = pCtx->rip;
     8904    pDbgState->uCsStart             = pCtx->cs.Sel;
     8905
     8906    pDbgState->fModifiedProcCtls    = false;
     8907    pDbgState->fModifiedProcCtls2   = false;
     8908    pDbgState->fModifiedXcptBitmap  = false;
     8909    pDbgState->fClearCr0Mask        = false;
     8910    pDbgState->fClearCr4Mask        = false;
     8911    pDbgState->fCpe1Extra           = 0;
     8912    pDbgState->fCpe1Unwanted        = 0;
     8913    pDbgState->fCpe2Extra           = 0;
     8914    pDbgState->bmXcptExtra          = 0;
     8915    pDbgState->fProcCtlsInitial     = pVCpu->hm.s.vmx.u32ProcCtls;
     8916    pDbgState->fProcCtls2Initial    = pVCpu->hm.s.vmx.u32ProcCtls2;
     8917    pDbgState->bmXcptInitial        = pVCpu->hm.s.vmx.u32XcptBitmap;
     8918}
     8919
     8920
     8921/**
     8922 * Updates the VMSC fields with changes requested by @a pDbgState.
     8923 *
     8924 * This is performed after hmR0VmxPreRunGuestDebugStateUpdate as well
     8925 * immediately before executing guest code, i.e. when interrupts are disabled.
     8926 * We don't check status codes here as we cannot easily assert or return in the
     8927 * latter case.
     8928 *
     8929 * @param   pVCpu       The cross context virtual CPU structure.
     8930 * @param   pDbgState   The debug state.
     8931 */
     8932DECLINLINE(void) hmR0VmxPreRunGuestDebugStateApply(PVMCPU pVCpu, PVMXRUNDBGSTATE pDbgState)
     8933{
     8934    /*
     8935     * Ensure desired flags in VMCS control fields are set.
     8936     * (Ignoring write failure here, as we're committed and it's just debug extras.)
     8937     *
     8938     * Note! We load the shadow CR0 & CR4 bits when we flag the clearing, so
     8939     *       there should be no stale data in pCtx at this point.
     8940     */
     8941    if (   (pVCpu->hm.s.vmx.u32ProcCtls & pDbgState->fCpe1Extra) != pDbgState->fCpe1Extra
     8942        || (pVCpu->hm.s.vmx.u32ProcCtls & pDbgState->fCpe1Unwanted))
     8943    {
     8944        pVCpu->hm.s.vmx.u32ProcCtls   |= pDbgState->fCpe1Extra;
     8945        pVCpu->hm.s.vmx.u32ProcCtls   &= ~pDbgState->fCpe1Unwanted;
     8946        VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
     8947        pDbgState->fModifiedProcCtls   = true;
     8948    }
     8949
     8950    if ((pVCpu->hm.s.vmx.u32ProcCtls2 & pDbgState->fCpe2Extra) != pDbgState->fCpe2Extra)
     8951    {
     8952        pVCpu->hm.s.vmx.u32ProcCtls2  |= pDbgState->fCpe2Extra;
     8953        VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls2);
     8954        pDbgState->fModifiedProcCtls2  = true;
     8955    }
     8956
     8957    if ((pVCpu->hm.s.vmx.u32XcptBitmap & pDbgState->bmXcptExtra) != pDbgState->bmXcptExtra)
     8958    {
     8959        pVCpu->hm.s.vmx.u32XcptBitmap |= pDbgState->bmXcptExtra;
     8960        VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);
     8961        pDbgState->fModifiedXcptBitmap = true;
     8962    }
     8963
     8964    if (pDbgState->fClearCr0Mask && pVCpu->hm.s.vmx.u32CR0Mask != 0)
     8965    {
     8966        pVCpu->hm.s.vmx.u32CR0Mask = 0;
     8967        VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_MASK, 0);
     8968    }
     8969
     8970    if (pDbgState->fClearCr4Mask && pVCpu->hm.s.vmx.u32CR4Mask != 0)
     8971    {
     8972        pVCpu->hm.s.vmx.u32CR4Mask = 0;
     8973        VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_MASK, 0);
     8974    }
     8975}
     8976
     8977
     8978DECLINLINE(VBOXSTRICTRC) hmR0VmxRunDebugStateRevert(PVMCPU pVCpu, PVMXRUNDBGSTATE pDbgState, VBOXSTRICTRC rcStrict)
     8979{
     8980    /*
     8981     * Restore exit control settings as we may not reenter this function the
     8982     * next time around.
     8983     */
     8984    /* We reload the initial value, trigger what we can of recalculations the
     8985       next time around.  From the looks of things, that's all that's required atm. */
     8986    if (pDbgState->fModifiedProcCtls)
     8987    {
     8988        if (!(pDbgState->fProcCtlsInitial & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT) && CPUMIsHyperDebugStateActive(pVCpu))
     8989            pDbgState->fProcCtlsInitial |= VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT; /* Avoid assertion in hmR0VmxLeave */
     8990        int rc2 = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pDbgState->fProcCtlsInitial);
     8991        AssertRCReturn(rc2, rc2);
     8992        pVCpu->hm.s.vmx.u32ProcCtls = pDbgState->fProcCtlsInitial;
     8993        HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0 | HM_CHANGED_GUEST_DEBUG);
     8994    }
     8995
     8996    /* We're currently the only ones messing with this one, so just restore the
     8997       cached value and reload the field. */
     8998    if (   pDbgState->fModifiedProcCtls2
     8999        && pVCpu->hm.s.vmx.u32ProcCtls2 != pDbgState->fProcCtlsInitial)
     9000    {
     9001        int rc2 = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, pDbgState->fProcCtls2Initial);
     9002        AssertRCReturn(rc2, rc2);
     9003        pVCpu->hm.s.vmx.u32ProcCtls2 = pDbgState->fProcCtls2Initial;
     9004    }
     9005
     9006    /* If we've modified the exception bitmap, we restore it and trigger
     9007       reloading and partial recalculation the next time around. */
     9008    if (pDbgState->fModifiedXcptBitmap)
     9009    {
     9010        pVCpu->hm.s.vmx.u32XcptBitmap = pDbgState->bmXcptInitial;
     9011        HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS | HM_CHANGED_GUEST_CR0);
     9012    }
     9013
     9014    /* We assume hmR0VmxLoadSharedCR0 will recalculate and load the CR0 mask. */
     9015    if (pDbgState->fClearCr0Mask)
     9016        HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
     9017
     9018    /* We assume hmR0VmxLoadGuestCR3AndCR4 will recalculate and load the CR4 mask. */
     9019    if (pDbgState->fClearCr4Mask)
     9020        HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR4);
     9021
     9022    return rcStrict;
     9023}
     9024
     9025
     9026/**
     9027 * Configures exit controls for current DBGF and DTrace settings.
     9028 *
     9029 * This updates @a pDbgState and the VMCS execution control fields to reflect
     9030 * the necessary exits demanded by DBGF and DTrace.
     9031 *
    88519032 * @param   pVM         The cross context VM structure.
    88529033 * @param   pVCpu       The cross context virtual CPU structure.
    88539034 * @param   pCtx        Pointer to the guest-CPU context.
     9035 * @param   pDbgState   The debug state.
     9036 */
     9037static void hmR0VmxPreRunGuestDebugStateUpdate(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, PVMXRUNDBGSTATE pDbgState)
     9038{
     9039    /*
     9040     * Take down the dtrace serial number so we can spot changes.
     9041     */
     9042    pDbgState->uDtraceSettingsSeqNo = VBOXVMM_GET_SETTINGS_SEQ_NO();
     9043    ASMCompilerBarrier();
     9044
     9045    /*
     9046     * We'll rebuild most of the middle block of data members (holding the
     9047     * current settings) as we go along here, so start by clearing it all.
     9048     */
     9049    pDbgState->bmXcptExtra      = 0;
     9050    pDbgState->fCpe1Extra       = 0;
     9051    pDbgState->fCpe1Unwanted    = 0;
     9052    pDbgState->fCpe2Extra       = 0;
     9053    for (unsigned i = 0; i < RT_ELEMENTS(pDbgState->bmExitsToCheck); i++)
     9054        pDbgState->bmExitsToCheck[i] = 0;
     9055
     9056    /*
     9057     * Software interrupts (INT XXh) - no idea how to trigger these...
     9058     */
     9059    if (   DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_INTERRUPT_SOFTWARE)
     9060        || VBOXVMM_INT_SOFTWARE_ENABLED())
     9061    {
     9062        ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
     9063    }
     9064
     9065    /*
     9066     * Exception bitmap and XCPT events+probes.
     9067     */
     9068    for (int iXcpt = 0; iXcpt < (DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST + 1); iXcpt++)
     9069        if (DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + iXcpt)))
     9070            pDbgState->bmXcptExtra |= RT_BIT_32(iXcpt);
     9071
     9072    if (VBOXVMM_XCPT_DE_ENABLED())  pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DE);
     9073    if (VBOXVMM_XCPT_DB_ENABLED())  pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DB);
     9074    if (VBOXVMM_XCPT_BP_ENABLED())  pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
     9075    if (VBOXVMM_XCPT_OF_ENABLED())  pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_OF);
     9076    if (VBOXVMM_XCPT_BR_ENABLED())  pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BR);
     9077    if (VBOXVMM_XCPT_UD_ENABLED())  pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_UD);
     9078    if (VBOXVMM_XCPT_NM_ENABLED())  pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NM);
     9079    if (VBOXVMM_XCPT_DF_ENABLED())  pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DF);
     9080    if (VBOXVMM_XCPT_TS_ENABLED())  pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_TS);
     9081    if (VBOXVMM_XCPT_NP_ENABLED())  pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NP);
     9082    if (VBOXVMM_XCPT_SS_ENABLED())  pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SS);
     9083    if (VBOXVMM_XCPT_GP_ENABLED())  pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_GP);
     9084    if (VBOXVMM_XCPT_PF_ENABLED())  pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_PF);
     9085    if (VBOXVMM_XCPT_MF_ENABLED())  pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_MF);
     9086    if (VBOXVMM_XCPT_AC_ENABLED())  pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_AC);
     9087    if (VBOXVMM_XCPT_XF_ENABLED())  pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_XF);
     9088    if (VBOXVMM_XCPT_VE_ENABLED())  pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_VE);
     9089    if (VBOXVMM_XCPT_SX_ENABLED())  pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SX);
     9090
     9091    if (pDbgState->bmXcptExtra)
     9092        ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
     9093
     9094    /*
     9095     * Process events and probes for VM exits, making sure we get the wanted exits.
     9096     *
     9097     * Note! This is the reverse of waht hmR0VmxHandleExitDtraceEvents does.
     9098     *       So, when adding/changing/removing please don't forget to update it.
     9099     *
     9100     * Some of the macros are picking up local variables to save horizontal space,
     9101     * (being able to see it in a table is the lesser evil here).
     9102     */
     9103#define IS_EITHER_ENABLED(a_pVM, a_EventSubName) \
     9104        (    DBGF_IS_EVENT_ENABLED(a_pVM, RT_CONCAT(DBGFEVENT_, a_EventSubName)) \
     9105         ||  RT_CONCAT3(VBOXVMM_, a_EventSubName, _ENABLED)() )
     9106#define SET_ONLY_XBM_IF_EITHER_EN(a_EventSubName, a_uExit) \
     9107        if (IS_EITHER_ENABLED(pVM, a_EventSubName)) ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); else do { } while (0)
     9108#define SET_CPE1_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec) \
     9109        if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
     9110        { \
     9111            (pDbgState)->fCpe1Extra |= (a_fCtrlProcExec); \
     9112            ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
     9113        } else do { } while (0)
     9114#define SET_CPEU_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fUnwantedCtrlProcExec) \
     9115        if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
     9116        { \
     9117            (pDbgState)->fCpe1Unwanted |= (a_fUnwantedCtrlProcExec); \
     9118            ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
     9119        } else do { } while (0)
     9120#define SET_CPE2_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec2) \
     9121        if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
     9122        { \
     9123            (pDbgState)->fCpe2Extra |= (a_fCtrlProcExec2); \
     9124            ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
     9125        } else do { } while (0)
     9126
     9127    SET_ONLY_XBM_IF_EITHER_EN(EXIT_TASK_SWITCH,         VMX_EXIT_TASK_SWITCH);      /* unconditional */
     9128    SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_VIOLATION,   VMX_EXIT_EPT_VIOLATION);    /* unconditional */
     9129    SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_MISCONFIG,   VMX_EXIT_EPT_MISCONFIG);    /* unconditional (unless #VE) */
     9130    SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_ACCESS,    VMX_EXIT_APIC_ACCESS);      /* feature dependent, nothing to enable here */
     9131    SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_WRITE,     VMX_EXIT_APIC_WRITE);       /* feature dependent, nothing to enable here */
     9132
     9133    SET_ONLY_XBM_IF_EITHER_EN(EXIT_CPUID,               VMX_EXIT_CPUID);            /* unconditional */
     9134    SET_ONLY_XBM_IF_EITHER_EN(EXIT_GETSEC,              VMX_EXIT_GETSEC);           /* unconditional */
     9135    SET_CPE1_XBM_IF_EITHER_EN(EXIT_HALT,                VMX_EXIT_HLT,      VMX_VMCS_CTRL_PROC_EXEC_HLT_EXIT); /* paranoia */
     9136    SET_ONLY_XBM_IF_EITHER_EN(EXIT_INVD,                VMX_EXIT_INVD);             /* unconditional */
     9137    SET_CPE1_XBM_IF_EITHER_EN(EXIT_INVLPG,              VMX_EXIT_INVLPG,   VMX_VMCS_CTRL_PROC_EXEC_INVLPG_EXIT);
     9138#if 0
     9139    SET_CPE1_XBM_IF_EITHER_EN(EXIT_RDPMC,               VMX_EXIT_RDPMC,    VMX_VMCS_CTRL_PROC_EXEC_RDPMC_EXIT);
     9140    SET_CPE1_XBM_IF_EITHER_EN(EXIT_RDTSC,               VMX_EXIT_RDTSC,    VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT);
     9141    SET_ONLY_XBM_IF_EITHER_EN(EXIT_RSM,                 VMX_EXIT_RSM);              /* unconditional */
     9142    SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMM_CALL,            VMX_EXIT_VMCALL);           /* unconditional */
     9143    SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VMCLEAR,         VMX_EXIT_VMCLEAR);          /* unconditional */
     9144    SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VMLAUNCH,        VMX_EXIT_VMLAUNCH);         /* unconditional */
     9145    SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VMPTRLD,         VMX_EXIT_VMPTRLD);          /* unconditional */
     9146    SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VMPTRST,         VMX_EXIT_VMPTRST);          /* unconditional */
     9147    SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VMREAD,          VMX_EXIT_VMREAD);           /* unconditional */
     9148    SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VMRESUME,        VMX_EXIT_VMRESUME);         /* unconditional */
     9149    SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VMWRITE,         VMX_EXIT_VMWRITE);          /* unconditional */
     9150    SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VMXOFF,          VMX_EXIT_VMXOFF);           /* unconditional */
     9151    SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VMXON,           VMX_EXIT_VMXON);            /* unconditional */
     9152
     9153    if (   IS_EITHER_ENABLED(pVM, EXIT_CRX_READ)
     9154        || IS_EITHER_ENABLED(pVM, EXIT_CRX_WRITE))
     9155    {
     9156        int rc2 = hmR0VmxSaveGuestCR0(pVCpu, pCtx);
     9157        rc2    |= hmR0VmxSaveGuestCR4(pVCpu, pCtx);
     9158        rc2    |= hmR0VmxSaveGuestApicState(pVCpu, pCtx);
     9159        AssertRC(rc2);
     9160
     9161        pDbgState->fClearCr0Mask = true;
     9162        pDbgState->fClearCr4Mask = true;
     9163        if (IS_EITHER_ENABLED(pVM, EXIT_CRX_READ))
     9164            pDbgState->fCpe1Extra |= VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT | VMX_VMCS_CTRL_PROC_EXEC_CR8_STORE_EXIT;
     9165        if (IS_EITHER_ENABLED(pVM, EXIT_CRX_WRITE))
     9166            pDbgState->fCpe1Extra |= VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT | VMX_VMCS_CTRL_PROC_EXEC_CR8_LOAD_EXIT;
     9167        pDbgState->fCpe1Unwanted |= VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW; /* risky? */
     9168        /* Note! We currently don't use VMX_VMCS32_CTRL_CR3_TARGET_COUNT.  It would
     9169                 require clearing here and in the loop if we start using it. */
     9170        ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_CRX);
     9171    }
     9172    else
     9173    {
     9174        if (pDbgState->fClearCr0Mask)
     9175        {
     9176            pDbgState->fClearCr0Mask = false;
     9177            HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
     9178        }
     9179        if (pDbgState->fClearCr4Mask)
     9180        {
     9181            pDbgState->fClearCr4Mask = false;
     9182            HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR4);
     9183        }
     9184    }
     9185
     9186    if (   IS_EITHER_ENABLED(pVM, EXIT_DRX_READ)
     9187        || IS_EITHER_ENABLED(pVM, EXIT_DRX_WRITE))
     9188    {
     9189        /** @todo later, need to fix handler as it assumes this won't usually happen. */
     9190        ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_DRX);
     9191    }
     9192
     9193    SET_CPEU_XBM_IF_EITHER_EN(EXIT_RDMSR,               VMX_EXIT_RDMSR,     VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS); /* risky clearing this? */
     9194    SET_CPEU_XBM_IF_EITHER_EN(EXIT_WRMSR,               VMX_EXIT_WRMSR,     VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS);
     9195    SET_CPE1_XBM_IF_EITHER_EN(EXIT_MWAIT,               VMX_EXIT_MWAIT,     VMX_VMCS_CTRL_PROC_EXEC_MWAIT_EXIT);   /* parnoia */
     9196    SET_CPE1_XBM_IF_EITHER_EN(EXIT_MONITOR,             VMX_EXIT_MONITOR,   VMX_VMCS_CTRL_PROC_EXEC_MONITOR_EXIT); /* parnoia */
     9197    SET_CPE1_XBM_IF_EITHER_EN(EXIT_PAUSE,               VMX_EXIT_PAUSE,     VMX_VMCS_CTRL_PROC_EXEC_PAUSE_EXIT);
     9198
     9199    if (   IS_EITHER_ENABLED(pVM, EXIT_SGDT)
     9200        || IS_EITHER_ENABLED(pVM, EXIT_SIDT)
     9201        || IS_EITHER_ENABLED(pVM, EXIT_LGDT)
     9202        || IS_EITHER_ENABLED(pVM, EXIT_LIDT))
     9203    {
     9204        pDbgState->fCpe2Extra |= VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_TABLE_EXIT;
     9205        ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XDTR_ACCESS);
     9206    }
     9207
     9208    if (   IS_EITHER_ENABLED(pVM, EXIT_SLDT)
     9209        || IS_EITHER_ENABLED(pVM, EXIT_STR)
     9210        || IS_EITHER_ENABLED(pVM, EXIT_LLDT)
     9211        || IS_EITHER_ENABLED(pVM, EXIT_LTR))
     9212    {
     9213        pDbgState->fCpe2Extra |= VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_TABLE_EXIT;
     9214        ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_TR_ACCESS);
     9215    }
     9216
     9217    SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_INVEPT,          VMX_EXIT_INVEPT);           /* unconditional */
     9218    SET_CPE1_XBM_IF_EITHER_EN(EXIT_RDTSCP,              VMX_EXIT_RDTSCP,    VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT);
     9219    SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_INVVPID,         VMX_EXIT_INVVPID);          /* unconditional */
     9220    SET_CPE2_XBM_IF_EITHER_EN(EXIT_WBINVD,              VMX_EXIT_WBINVD,    VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT);
     9221    SET_ONLY_XBM_IF_EITHER_EN(EXIT_XSETBV,              VMX_EXIT_XSETBV);           /* unconditional */
     9222    SET_CPE2_XBM_IF_EITHER_EN(EXIT_RDRAND,              VMX_EXIT_RDRAND,    VMX_VMCS_CTRL_PROC_EXEC2_RDRAND_EXIT);
     9223    SET_CPE1_XBM_IF_EITHER_EN(EXIT_VMX_INVPCID,         VMX_EXIT_INVPCID,   VMX_VMCS_CTRL_PROC_EXEC_INVLPG_EXIT);
     9224    SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VMFUNC,          VMX_EXIT_VMFUNC);           /* unconditional for the current setup */
     9225    SET_CPE2_XBM_IF_EITHER_EN(EXIT_RDSEED,              VMX_EXIT_RDSEED,    VMX_VMCS_CTRL_PROC_EXEC2_RDSEED_EXIT);
     9226    SET_ONLY_XBM_IF_EITHER_EN(EXIT_XSAVES,              VMX_EXIT_XSAVES);           /* unconditional (enabled by host, guest cfg) */
     9227    SET_ONLY_XBM_IF_EITHER_EN(EXIT_XRSTORS,             VMX_EXIT_XRSTORS);          /* unconditional (enabled by host, guest cfg) */
     9228#endif
     9229#undef IS_EITHER_ENABLED
     9230#undef SET_ONLY_XBM_IF_EITHER_EN
     9231#undef SET_CPE1_XBM_IF_EITHER_EN
     9232#undef SET_CPEU_XBM_IF_EITHER_EN
     9233#undef SET_CPE2_XBM_IF_EITHER_EN
     9234
     9235    /*
     9236     * Sanitize the control stuff.
     9237     */
     9238    pDbgState->fCpe1Extra       &= pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1;
     9239    pDbgState->fCpe1Unwanted    &= ~pVM->hm.s.vmx.Msrs.VmxProcCtls.n.disallowed0;
     9240    pDbgState->fCpe2Extra       &= pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1;
     9241    Log6(("HM: debug state: cpe1=%#RX32 cpeu=%#RX32 cpe2=%#RX32%s%s\n",
     9242          pDbgState->fCpe1Extra, pDbgState->fCpe1Unwanted, pDbgState->fCpe2Extra,
     9243          pDbgState->fClearCr0Mask ? " clr-cr0" : "",
     9244          pDbgState->fClearCr4Mask ? " clr-cr4" : ""));
     9245}
     9246
     9247
     9248/**
     9249 * Fires off DBGF events and dtrace probes for an exit, when it's appropriate.
     9250 *
     9251 * The caller has checked exit against the VMXRUNDBGSTATE::bmExitsToCheck
     9252 * bitmap. The caller has checked for NMIs already, so we don't have to do that
     9253 * either.
     9254 *
     9255 * @returns Strict VBox status code (i.e. informational status codes too).
     9256 * @param   pVM         The cross context VM structure.
     9257 * @param   pVCpu       The cross context virtual CPU structure.
     9258 * @param   pMixedCtx   Pointer to the guest-CPU context.
     9259 * @param   pDbgState   The debug state.
     9260 *
     9261 * @remarks The name of this function is displayed by dtrace, so keep it short
     9262 *          and to the point. No longer than 33 chars long, please.
     9263 */
     9264static VBOXSTRICTRC hmR0VmxHandleExitDtraceEvents(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx,
     9265                                                  PVMXTRANSIENT pVmxTransient, uint32_t uExitReason)
     9266{
     9267    /*
     9268     * Translate the event into a DBGF event (enmEvent + uEventArg) and at the
     9269     * same time check whether any corresponding Dtrace event is enabled (fDtrace).
     9270     *
     9271     * Note! This is the reverse operation of what hmR0VmxPreRunGuestDebugStateUpdate
     9272     *       does.  Must add/change/remove both places.  Same ordering, please.
     9273     *
     9274     *       Added/removed events must also be reflected in the next section
     9275     *       where we dispatch dtrace events.
     9276     */
     9277    bool            fDtrace   = false;
     9278    DBGFEVENTTYPE   enmEvent  = DBGFEVENT_END;
     9279    uint32_t        uEventArg = 0;
     9280#define SET_BOTH(a_EventSubName) \
     9281        do { enmEvent = RT_CONCAT(DBGFEVENT_, a_EventSubName); \
     9282            fDtrace = RT_CONCAT3(VBOXVMM_, a_EventSubName, _ENABLED)(); \
     9283        } while (0)
     9284    switch (uExitReason)
     9285    {
     9286        case VMX_EXIT_MTF:
     9287            return hmR0VmxExitMtf(pVCpu, pMixedCtx, pVmxTransient);
     9288
     9289        case VMX_EXIT_XCPT_OR_NMI:
     9290        {
     9291            uint8_t const idxVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVmxTransient->uExitIntInfo);
     9292            switch (VMX_EXIT_INTERRUPTION_INFO_TYPE(pVmxTransient->uExitIntInfo))
     9293            {
     9294                case VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT:
     9295                case VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT:
     9296                case VMX_EXIT_INTERRUPTION_INFO_TYPE_PRIV_SW_XCPT:
     9297                    if (idxVector <= (unsigned)(DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST))
     9298                    {
     9299                        if (VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(pVmxTransient->uExitIntInfo))
     9300                        {
     9301                            hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
     9302                            uEventArg = pVmxTransient->uExitIntErrorCode;
     9303                        }
     9304                        enmEvent = (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + idxVector);
     9305                        switch (enmEvent)
     9306                        {
     9307                            case DBGFEVENT_XCPT_DE: fDtrace = VBOXVMM_XCPT_DE_ENABLED(); break;
     9308                            case DBGFEVENT_XCPT_DB: fDtrace = VBOXVMM_XCPT_DB_ENABLED(); break;
     9309                            case DBGFEVENT_XCPT_BP: fDtrace = VBOXVMM_XCPT_BP_ENABLED(); break;
     9310                            case DBGFEVENT_XCPT_OF: fDtrace = VBOXVMM_XCPT_OF_ENABLED(); break;
     9311                            case DBGFEVENT_XCPT_BR: fDtrace = VBOXVMM_XCPT_BR_ENABLED(); break;
     9312                            case DBGFEVENT_XCPT_UD: fDtrace = VBOXVMM_XCPT_UD_ENABLED(); break;
     9313                            case DBGFEVENT_XCPT_NM: fDtrace = VBOXVMM_XCPT_NM_ENABLED(); break;
     9314                            case DBGFEVENT_XCPT_DF: fDtrace = VBOXVMM_XCPT_DF_ENABLED(); break;
     9315                            case DBGFEVENT_XCPT_TS: fDtrace = VBOXVMM_XCPT_TS_ENABLED(); break;
     9316                            case DBGFEVENT_XCPT_NP: fDtrace = VBOXVMM_XCPT_NP_ENABLED(); break;
     9317                            case DBGFEVENT_XCPT_SS: fDtrace = VBOXVMM_XCPT_SS_ENABLED(); break;
     9318                            case DBGFEVENT_XCPT_GP: fDtrace = VBOXVMM_XCPT_GP_ENABLED(); break;
     9319                            case DBGFEVENT_XCPT_PF: fDtrace = VBOXVMM_XCPT_PF_ENABLED(); break;
     9320                            case DBGFEVENT_XCPT_MF: fDtrace = VBOXVMM_XCPT_MF_ENABLED(); break;
     9321                            case DBGFEVENT_XCPT_AC: fDtrace = VBOXVMM_XCPT_AC_ENABLED(); break;
     9322                            case DBGFEVENT_XCPT_XF: fDtrace = VBOXVMM_XCPT_XF_ENABLED(); break;
     9323                            case DBGFEVENT_XCPT_VE: fDtrace = VBOXVMM_XCPT_VE_ENABLED(); break;
     9324                            case DBGFEVENT_XCPT_SX: fDtrace = VBOXVMM_XCPT_SX_ENABLED(); break;
     9325                        }
     9326                    }
     9327                    else
     9328                        AssertFailed();
     9329                    break;
     9330
     9331                case VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT:
     9332                    uEventArg = idxVector;
     9333                    enmEvent  = DBGFEVENT_INTERRUPT_SOFTWARE;
     9334                    fDtrace   = VBOXVMM_INT_SOFTWARE_ENABLED();
     9335                    break;
     9336            }
     9337            break;
     9338        }
     9339
     9340        case VMX_EXIT_TRIPLE_FAULT:
     9341            enmEvent = DBGFEVENT_TRIPLE_FAULT;
     9342            //fDtrace  = VBOXVMM_EXIT_TRIPLE_FAULT_ENABLED();
     9343            break;
     9344        case VMX_EXIT_TASK_SWITCH:      SET_BOTH(EXIT_TASK_SWITCH); break;
     9345        case VMX_EXIT_EPT_VIOLATION:    SET_BOTH(EXIT_VMX_EPT_VIOLATION); break;
     9346        case VMX_EXIT_EPT_MISCONFIG:    SET_BOTH(EXIT_VMX_EPT_MISCONFIG); break;
     9347        case VMX_EXIT_APIC_ACCESS:      SET_BOTH(EXIT_VMX_VAPIC_ACCESS); break;
     9348        case VMX_EXIT_APIC_WRITE:       SET_BOTH(EXIT_VMX_VAPIC_WRITE); break;
     9349
     9350        /* Instruction specific VM-exits: */
     9351        case VMX_EXIT_CPUID:            SET_BOTH(EXIT_CPUID); break;
     9352        case VMX_EXIT_GETSEC:           SET_BOTH(EXIT_GETSEC); break;
     9353        case VMX_EXIT_HLT:              SET_BOTH(EXIT_HALT); break;
     9354        case VMX_EXIT_INVD:             SET_BOTH(EXIT_INVD); break;
     9355        case VMX_EXIT_INVLPG:           SET_BOTH(EXIT_INVLPG); break;
     9356        case VMX_EXIT_RDPMC:            SET_BOTH(EXIT_RDPMC); break;
     9357        case VMX_EXIT_RDTSC:            SET_BOTH(EXIT_RDTSC); break;
     9358        case VMX_EXIT_RSM:              SET_BOTH(EXIT_RSM); break;
     9359        case VMX_EXIT_VMCALL:           SET_BOTH(EXIT_VMM_CALL); break;
     9360        case VMX_EXIT_VMCLEAR:          SET_BOTH(EXIT_VMX_VMCLEAR); break;
     9361        case VMX_EXIT_VMLAUNCH:         SET_BOTH(EXIT_VMX_VMLAUNCH); break;
     9362        case VMX_EXIT_VMPTRLD:          SET_BOTH(EXIT_VMX_VMPTRLD); break;
     9363        case VMX_EXIT_VMPTRST:          SET_BOTH(EXIT_VMX_VMPTRST); break;
     9364        case VMX_EXIT_VMREAD:           SET_BOTH(EXIT_VMX_VMREAD); break;
     9365        case VMX_EXIT_VMRESUME:         SET_BOTH(EXIT_VMX_VMRESUME); break;
     9366        case VMX_EXIT_VMWRITE:          SET_BOTH(EXIT_VMX_VMWRITE); break;
     9367        case VMX_EXIT_VMXOFF:           SET_BOTH(EXIT_VMX_VMXOFF); break;
     9368        case VMX_EXIT_VMXON:            SET_BOTH(EXIT_VMX_VMXON); break;
     9369        case VMX_EXIT_MOV_CRX:
     9370            hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
     9371/** @todo r=bird: I feel these macros aren't very descriptive and needs to be at least 30 chars longer! ;-)
     9372* Sensible abbreviations strongly recommended here because even with 130 columns this stuff get too wide! */
     9373            if (   VMX_EXIT_QUALIFICATION_CRX_ACCESS(pVmxTransient->uExitQualification)
     9374                == VMX_EXIT_QUALIFICATION_CRX_ACCESS_READ)
     9375                SET_BOTH(EXIT_CRX_READ);
     9376            else
     9377                SET_BOTH(EXIT_CRX_WRITE);
     9378            uEventArg = VMX_EXIT_QUALIFICATION_CRX_REGISTER(pVmxTransient->uExitQualification);
     9379            break;
     9380        case VMX_EXIT_MOV_DRX:
     9381            hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
     9382            if (   VMX_EXIT_QUALIFICATION_DRX_DIRECTION(pVmxTransient->uExitQualification)
     9383                == VMX_EXIT_QUALIFICATION_DRX_DIRECTION_READ)
     9384                SET_BOTH(EXIT_DRX_READ);
     9385            else
     9386                SET_BOTH(EXIT_DRX_WRITE);
     9387            uEventArg = VMX_EXIT_QUALIFICATION_DRX_REGISTER(pVmxTransient->uExitQualification);
     9388            break;
     9389        case VMX_EXIT_RDMSR:            SET_BOTH(EXIT_RDMSR); break;
     9390        case VMX_EXIT_WRMSR:            SET_BOTH(EXIT_WRMSR); break;
     9391        case VMX_EXIT_MWAIT:            SET_BOTH(EXIT_MWAIT); break;
     9392        case VMX_EXIT_MONITOR:          SET_BOTH(EXIT_MONITOR); break;
     9393        case VMX_EXIT_PAUSE:            SET_BOTH(EXIT_PAUSE); break;
     9394        case VMX_EXIT_XDTR_ACCESS:
     9395            hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
     9396            switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_XDTR_INSINFO_INSTR_ID))
     9397            {
     9398                case VMX_XDTR_INSINFO_II_SGDT: SET_BOTH(EXIT_SGDT); break;
     9399                case VMX_XDTR_INSINFO_II_SIDT: SET_BOTH(EXIT_SIDT); break;
     9400                case VMX_XDTR_INSINFO_II_LGDT: SET_BOTH(EXIT_LGDT); break;
     9401                case VMX_XDTR_INSINFO_II_LIDT: SET_BOTH(EXIT_LIDT); break;
     9402            }
     9403            break;
     9404
     9405        case VMX_EXIT_TR_ACCESS:
     9406            hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
     9407            switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_YYTR_INSINFO_INSTR_ID))
     9408            {
     9409                case VMX_YYTR_INSINFO_II_SLDT: SET_BOTH(EXIT_SLDT); break;
     9410                case VMX_YYTR_INSINFO_II_STR:  SET_BOTH(EXIT_STR); break;
     9411                case VMX_YYTR_INSINFO_II_LLDT: SET_BOTH(EXIT_LLDT); break;
     9412                case VMX_YYTR_INSINFO_II_LTR:  SET_BOTH(EXIT_LTR); break;
     9413            }
     9414            break;
     9415
     9416        case VMX_EXIT_INVEPT:           SET_BOTH(EXIT_VMX_INVEPT); break;
     9417        case VMX_EXIT_RDTSCP:           SET_BOTH(EXIT_RDTSCP); break;
     9418        case VMX_EXIT_INVVPID:          SET_BOTH(EXIT_VMX_INVVPID); break;
     9419        case VMX_EXIT_WBINVD:           SET_BOTH(EXIT_WBINVD); break;
     9420        case VMX_EXIT_XSETBV:           SET_BOTH(EXIT_XSETBV); break;
     9421        case VMX_EXIT_RDRAND:           SET_BOTH(EXIT_RDRAND); break;
     9422        case VMX_EXIT_INVPCID:          SET_BOTH(EXIT_VMX_INVPCID); break;
     9423        case VMX_EXIT_VMFUNC:           SET_BOTH(EXIT_VMX_VMFUNC); break;
     9424        case VMX_EXIT_RDSEED:           SET_BOTH(EXIT_RDSEED); break;
     9425        case VMX_EXIT_XSAVES:           SET_BOTH(EXIT_XSAVES); break;
     9426        case VMX_EXIT_XRSTORS:          SET_BOTH(EXIT_XRSTORS); break;
     9427
     9428        /* Events that aren't relevant at this point. */
     9429        case VMX_EXIT_EXT_INT:
     9430        case VMX_EXIT_INT_WINDOW:
     9431        case VMX_EXIT_NMI_WINDOW:
     9432        case VMX_EXIT_TPR_BELOW_THRESHOLD:
     9433        case VMX_EXIT_PREEMPT_TIMER:
     9434        case VMX_EXIT_IO_INSTR:
     9435            break;
     9436
     9437        /* Errors and unexpected events. */
     9438        case VMX_EXIT_INIT_SIGNAL:
     9439        case VMX_EXIT_SIPI:
     9440        case VMX_EXIT_IO_SMI:
     9441        case VMX_EXIT_SMI:
     9442        case VMX_EXIT_ERR_INVALID_GUEST_STATE:
     9443        case VMX_EXIT_ERR_MSR_LOAD:
     9444        case VMX_EXIT_ERR_MACHINE_CHECK:
     9445            break;
     9446
     9447        default:
     9448            AssertMsgFailed(("Unexpected exit=%#x\n", uExitReason));
     9449            break;
     9450    }
     9451#undef SET_BOTH
     9452
     9453    /*
     9454     * Dtrace tracepoints go first.   We do them here at once so we don't
     9455     * have to copy the guest state saving and stuff a few dozen times.
     9456     * Down side is that we've got to repeat the switch, though this time
     9457     * we use enmEvent since the probes are a subset of what DBGF does.
     9458     */
     9459    if (fDtrace)
     9460    {
     9461        hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
     9462        hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
     9463        switch (enmEvent)
     9464        {
     9465            /** @todo consider which extra parameters would be helpful for each probe.   */
     9466            case DBGFEVENT_XCPT_DE:                 VBOXVMM_XCPT_DE(pVCpu, pMixedCtx); break;
     9467            case DBGFEVENT_XCPT_DB:                 VBOXVMM_XCPT_DB(pVCpu, pMixedCtx, pMixedCtx->dr[6]); break;
     9468            case DBGFEVENT_XCPT_BP:                 VBOXVMM_XCPT_BP(pVCpu, pMixedCtx); break;
     9469            case DBGFEVENT_XCPT_OF:                 VBOXVMM_XCPT_OF(pVCpu, pMixedCtx); break;
     9470            case DBGFEVENT_XCPT_BR:                 VBOXVMM_XCPT_BR(pVCpu, pMixedCtx); break;
     9471            case DBGFEVENT_XCPT_UD:                 VBOXVMM_XCPT_UD(pVCpu, pMixedCtx); break;
     9472            case DBGFEVENT_XCPT_NM:                 VBOXVMM_XCPT_NM(pVCpu, pMixedCtx); break;
     9473            case DBGFEVENT_XCPT_DF:                 VBOXVMM_XCPT_DF(pVCpu, pMixedCtx); break;
     9474            case DBGFEVENT_XCPT_TS:                 VBOXVMM_XCPT_TS(pVCpu, pMixedCtx, uEventArg); break;
     9475            case DBGFEVENT_XCPT_NP:                 VBOXVMM_XCPT_NP(pVCpu, pMixedCtx, uEventArg); break;
     9476            case DBGFEVENT_XCPT_SS:                 VBOXVMM_XCPT_SS(pVCpu, pMixedCtx, uEventArg); break;
     9477            case DBGFEVENT_XCPT_GP:                 VBOXVMM_XCPT_GP(pVCpu, pMixedCtx, uEventArg); break;
     9478            case DBGFEVENT_XCPT_PF:                 VBOXVMM_XCPT_PF(pVCpu, pMixedCtx, uEventArg, pMixedCtx->cr2); break;
     9479            case DBGFEVENT_XCPT_MF:                 VBOXVMM_XCPT_MF(pVCpu, pMixedCtx); break;
     9480            case DBGFEVENT_XCPT_AC:                 VBOXVMM_XCPT_AC(pVCpu, pMixedCtx); break;
     9481            case DBGFEVENT_XCPT_XF:                 VBOXVMM_XCPT_XF(pVCpu, pMixedCtx); break;
     9482            case DBGFEVENT_XCPT_VE:                 VBOXVMM_XCPT_VE(pVCpu, pMixedCtx); break;
     9483            case DBGFEVENT_XCPT_SX:                 VBOXVMM_XCPT_SX(pVCpu, pMixedCtx, uEventArg); break;
     9484            case DBGFEVENT_INTERRUPT_SOFTWARE:      VBOXVMM_INT_SOFTWARE(pVCpu, pMixedCtx, (uint8_t)uEventArg); break;
     9485            case DBGFEVENT_EXIT_TASK_SWITCH:        VBOXVMM_EXIT_TASK_SWITCH(pVCpu, pMixedCtx); break;
     9486            case DBGFEVENT_EXIT_CPUID:              VBOXVMM_EXIT_CPUID(pVCpu, pMixedCtx, pMixedCtx->eax, pMixedCtx->ecx); break;
     9487            case DBGFEVENT_EXIT_GETSEC:             VBOXVMM_EXIT_GETSEC(pVCpu, pMixedCtx); break;
     9488            case DBGFEVENT_EXIT_HALT:               VBOXVMM_EXIT_HALT(pVCpu, pMixedCtx); break;
     9489            case DBGFEVENT_EXIT_INVD:               VBOXVMM_EXIT_INVD(pVCpu, pMixedCtx); break;
     9490            case DBGFEVENT_EXIT_INVLPG:             VBOXVMM_EXIT_INVLPG(pVCpu, pMixedCtx); break;
     9491            case DBGFEVENT_EXIT_RDPMC:              VBOXVMM_EXIT_RDPMC(pVCpu, pMixedCtx); break;
     9492            case DBGFEVENT_EXIT_RDTSC:              VBOXVMM_EXIT_RDTSC(pVCpu, pMixedCtx); break;
     9493            case DBGFEVENT_EXIT_RSM:                VBOXVMM_EXIT_RSM(pVCpu, pMixedCtx); break;
     9494            case DBGFEVENT_EXIT_CRX_READ:           VBOXVMM_EXIT_CRX_READ(pVCpu, pMixedCtx, (uint8_t)uEventArg); break;
     9495            case DBGFEVENT_EXIT_CRX_WRITE:          VBOXVMM_EXIT_CRX_WRITE(pVCpu, pMixedCtx, (uint8_t)uEventArg); break;
     9496            case DBGFEVENT_EXIT_DRX_READ:           VBOXVMM_EXIT_DRX_READ(pVCpu, pMixedCtx, (uint8_t)uEventArg); break;
     9497            case DBGFEVENT_EXIT_DRX_WRITE:          VBOXVMM_EXIT_DRX_WRITE(pVCpu, pMixedCtx, (uint8_t)uEventArg); break;
     9498            case DBGFEVENT_EXIT_RDMSR:              VBOXVMM_EXIT_RDMSR(pVCpu, pMixedCtx, pMixedCtx->ecx); break;
     9499            case DBGFEVENT_EXIT_WRMSR:              VBOXVMM_EXIT_WRMSR(pVCpu, pMixedCtx, pMixedCtx->ecx,
     9500                                                                       RT_MAKE_U64(pMixedCtx->eax, pMixedCtx->edx)); break;
     9501            case DBGFEVENT_EXIT_MWAIT:              VBOXVMM_EXIT_MWAIT(pVCpu, pMixedCtx); break;
     9502            case DBGFEVENT_EXIT_MONITOR:            VBOXVMM_EXIT_MONITOR(pVCpu, pMixedCtx); break;
     9503            case DBGFEVENT_EXIT_PAUSE:              VBOXVMM_EXIT_PAUSE(pVCpu, pMixedCtx); break;
     9504            case DBGFEVENT_EXIT_SGDT:               VBOXVMM_EXIT_SGDT(pVCpu, pMixedCtx); break;
     9505            case DBGFEVENT_EXIT_SIDT:               VBOXVMM_EXIT_SIDT(pVCpu, pMixedCtx); break;
     9506            case DBGFEVENT_EXIT_LGDT:               VBOXVMM_EXIT_LGDT(pVCpu, pMixedCtx); break;
     9507            case DBGFEVENT_EXIT_LIDT:               VBOXVMM_EXIT_LIDT(pVCpu, pMixedCtx); break;
     9508            case DBGFEVENT_EXIT_SLDT:               VBOXVMM_EXIT_SLDT(pVCpu, pMixedCtx); break;
     9509            case DBGFEVENT_EXIT_STR:                VBOXVMM_EXIT_STR(pVCpu, pMixedCtx); break;
     9510            case DBGFEVENT_EXIT_LLDT:               VBOXVMM_EXIT_LLDT(pVCpu, pMixedCtx); break;
     9511            case DBGFEVENT_EXIT_LTR:                VBOXVMM_EXIT_LTR(pVCpu, pMixedCtx); break;
     9512            case DBGFEVENT_EXIT_RDTSCP:             VBOXVMM_EXIT_RDTSCP(pVCpu, pMixedCtx); break;
     9513            case DBGFEVENT_EXIT_WBINVD:             VBOXVMM_EXIT_WBINVD(pVCpu, pMixedCtx); break;
     9514            case DBGFEVENT_EXIT_XSETBV:             VBOXVMM_EXIT_XSETBV(pVCpu, pMixedCtx); break;
     9515            case DBGFEVENT_EXIT_RDRAND:             VBOXVMM_EXIT_RDRAND(pVCpu, pMixedCtx); break;
     9516            case DBGFEVENT_EXIT_RDSEED:             VBOXVMM_EXIT_RDSEED(pVCpu, pMixedCtx); break;
     9517            case DBGFEVENT_EXIT_XSAVES:             VBOXVMM_EXIT_XSAVES(pVCpu, pMixedCtx); break;
     9518            case DBGFEVENT_EXIT_XRSTORS:            VBOXVMM_EXIT_XRSTORS(pVCpu, pMixedCtx); break;
     9519            case DBGFEVENT_EXIT_VMM_CALL:           VBOXVMM_EXIT_VMM_CALL(pVCpu, pMixedCtx); break;
     9520            case DBGFEVENT_EXIT_VMX_VMCLEAR:        VBOXVMM_EXIT_VMX_VMCLEAR(pVCpu, pMixedCtx); break;
     9521            case DBGFEVENT_EXIT_VMX_VMLAUNCH:       VBOXVMM_EXIT_VMX_VMLAUNCH(pVCpu, pMixedCtx); break;
     9522            case DBGFEVENT_EXIT_VMX_VMPTRLD:        VBOXVMM_EXIT_VMX_VMPTRLD(pVCpu, pMixedCtx); break;
     9523            case DBGFEVENT_EXIT_VMX_VMPTRST:        VBOXVMM_EXIT_VMX_VMPTRST(pVCpu, pMixedCtx); break;
     9524            case DBGFEVENT_EXIT_VMX_VMREAD:         VBOXVMM_EXIT_VMX_VMREAD(pVCpu, pMixedCtx); break;
     9525            case DBGFEVENT_EXIT_VMX_VMRESUME:       VBOXVMM_EXIT_VMX_VMRESUME(pVCpu, pMixedCtx); break;
     9526            case DBGFEVENT_EXIT_VMX_VMWRITE:        VBOXVMM_EXIT_VMX_VMWRITE(pVCpu, pMixedCtx); break;
     9527            case DBGFEVENT_EXIT_VMX_VMXOFF:         VBOXVMM_EXIT_VMX_VMXOFF(pVCpu, pMixedCtx); break;
     9528            case DBGFEVENT_EXIT_VMX_VMXON:          VBOXVMM_EXIT_VMX_VMXON(pVCpu, pMixedCtx); break;
     9529            case DBGFEVENT_EXIT_VMX_INVEPT:         VBOXVMM_EXIT_VMX_INVEPT(pVCpu, pMixedCtx); break;
     9530            case DBGFEVENT_EXIT_VMX_INVVPID:        VBOXVMM_EXIT_VMX_INVVPID(pVCpu, pMixedCtx); break;
     9531            case DBGFEVENT_EXIT_VMX_INVPCID:        VBOXVMM_EXIT_VMX_INVPCID(pVCpu, pMixedCtx); break;
     9532            case DBGFEVENT_EXIT_VMX_VMFUNC:         VBOXVMM_EXIT_VMX_VMFUNC(pVCpu, pMixedCtx); break;
     9533            case DBGFEVENT_EXIT_VMX_EPT_MISCONFIG:  VBOXVMM_EXIT_VMX_EPT_MISCONFIG(pVCpu, pMixedCtx); break;
     9534            case DBGFEVENT_EXIT_VMX_EPT_VIOLATION:  VBOXVMM_EXIT_VMX_EPT_VIOLATION(pVCpu, pMixedCtx); break;
     9535            case DBGFEVENT_EXIT_VMX_VAPIC_ACCESS:   VBOXVMM_EXIT_VMX_VAPIC_ACCESS(pVCpu, pMixedCtx); break;
     9536            case DBGFEVENT_EXIT_VMX_VAPIC_WRITE:    VBOXVMM_EXIT_VMX_VAPIC_WRITE(pVCpu, pMixedCtx); break;
     9537            default:                                AssertMsgFailed(("enmEvent=%d uExitReason=%d\n", enmEvent, uExitReason)); break;
     9538        }
     9539    }
     9540
     9541    /*
     9542     * Fire of the DBGF event, if enabled (our check here is just a quick one,
     9543     * the DBGF call will do a full check).
     9544     *
     9545     * Note! DBGF sets DBGFEVENT_INTERRUPT_SOFTWARE in the bitmap.
     9546     */
     9547    if (   enmEvent != DBGFEVENT_END
     9548        && DBGF_IS_EVENT_ENABLED(pVM, enmEvent))
     9549    {
     9550        VBOXSTRICTRC rcStrict = DBGFEventGenericWithArg(pVM, pVCpu, enmEvent, uEventArg);
     9551        if (rcStrict != VINF_SUCCESS)
     9552            return rcStrict;
     9553    }
     9554
     9555    return VINF_SUCCESS;
     9556}
     9557
     9558
     9559/**
     9560 * Single-stepping VM-exit filtering.
     9561 *
     9562 * This is preprocessing the exits and deciding whether we've gotten far enough
     9563 * to return VINF_EM_DBG_STEPPED already.  If not, normal VM-exit handling is
     9564 * performed.
     9565 *
     9566 * @returns Strict VBox status code (i.e. informational status codes too).
     9567 * @param   pVM             The cross context VM structure.
     9568 * @param   pVCpu           The cross context virtual CPU structure of the calling EMT.
     9569 * @param   pMixedCtx       Pointer to the guest-CPU context. The data may be
     9570 *                          out-of-sync. Make sure to update the required
     9571 *                          fields before using them.
     9572 * @param   pVmxTransient   Pointer to the VMX-transient structure.
     9573 * @param   uExitReason     The VM-exit reason.
     9574 * @param   pDbgState       The debug state.
     9575 */
     9576DECLINLINE(VBOXSTRICTRC) hmR0VmxRunDebugHandleExit(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient,
     9577                                                   uint32_t uExitReason, PVMXRUNDBGSTATE pDbgState)
     9578{
     9579    /*
     9580     * Expensive (saves context) generic dtrace exit probe.
     9581     */
     9582    if (!VBOXVMM_R0_HMVMX_VMEXIT_ENABLED())
     9583    { /* more likely */ }
     9584    else
     9585    {
     9586        hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
     9587        hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
     9588        VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, pMixedCtx, pVmxTransient->uExitReason, pVmxTransient->uExitQualification);
     9589    }
     9590
     9591    /*
     9592     * Check for host NMI, just to get that out of the way.
     9593     */
     9594    if (uExitReason != VMX_EXIT_XCPT_OR_NMI)
     9595    { /* normally likely */ }
     9596    else
     9597    {
     9598        int rc2 = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
     9599        AssertRCReturn(rc2, rc2);
     9600        uint32_t uIntType = VMX_EXIT_INTERRUPTION_INFO_TYPE(pVmxTransient->uExitIntInfo);
     9601        if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)
     9602            return hmR0VmxExitXcptOrNmi(pVCpu, pMixedCtx, pVmxTransient);
     9603    }
     9604
     9605    /*
     9606     * Check for single stepping event if we're stepping.
     9607     */
     9608    if (pVCpu->hm.s.fSingleInstruction)
     9609    {
     9610        switch (uExitReason)
     9611        {
     9612            case VMX_EXIT_MTF:
     9613                return hmR0VmxExitMtf(pVCpu, pMixedCtx, pVmxTransient);
     9614
     9615            /* Various events: */
     9616            case VMX_EXIT_XCPT_OR_NMI:
     9617            case VMX_EXIT_EXT_INT:
     9618            case VMX_EXIT_TRIPLE_FAULT:
     9619            case VMX_EXIT_INT_WINDOW:
     9620            case VMX_EXIT_NMI_WINDOW:
     9621            case VMX_EXIT_TASK_SWITCH:
     9622            case VMX_EXIT_TPR_BELOW_THRESHOLD:
     9623            case VMX_EXIT_APIC_ACCESS:
     9624            case VMX_EXIT_EPT_VIOLATION:
     9625            case VMX_EXIT_EPT_MISCONFIG:
     9626            case VMX_EXIT_PREEMPT_TIMER:
     9627
     9628            /* Instruction specific VM-exits: */
     9629            case VMX_EXIT_CPUID:
     9630            case VMX_EXIT_GETSEC:
     9631            case VMX_EXIT_HLT:
     9632            case VMX_EXIT_INVD:
     9633            case VMX_EXIT_INVLPG:
     9634            case VMX_EXIT_RDPMC:
     9635            case VMX_EXIT_RDTSC:
     9636            case VMX_EXIT_RSM:
     9637            case VMX_EXIT_VMCALL:
     9638            case VMX_EXIT_VMCLEAR:
     9639            case VMX_EXIT_VMLAUNCH:
     9640            case VMX_EXIT_VMPTRLD:
     9641            case VMX_EXIT_VMPTRST:
     9642            case VMX_EXIT_VMREAD:
     9643            case VMX_EXIT_VMRESUME:
     9644            case VMX_EXIT_VMWRITE:
     9645            case VMX_EXIT_VMXOFF:
     9646            case VMX_EXIT_VMXON:
     9647            case VMX_EXIT_MOV_CRX:
     9648            case VMX_EXIT_MOV_DRX:
     9649            case VMX_EXIT_IO_INSTR:
     9650            case VMX_EXIT_RDMSR:
     9651            case VMX_EXIT_WRMSR:
     9652            case VMX_EXIT_MWAIT:
     9653            case VMX_EXIT_MONITOR:
     9654            case VMX_EXIT_PAUSE:
     9655            case VMX_EXIT_XDTR_ACCESS:
     9656            case VMX_EXIT_TR_ACCESS:
     9657            case VMX_EXIT_INVEPT:
     9658            case VMX_EXIT_RDTSCP:
     9659            case VMX_EXIT_INVVPID:
     9660            case VMX_EXIT_WBINVD:
     9661            case VMX_EXIT_XSETBV:
     9662            case VMX_EXIT_RDRAND:
     9663            case VMX_EXIT_INVPCID:
     9664            case VMX_EXIT_VMFUNC:
     9665            case VMX_EXIT_RDSEED:
     9666            case VMX_EXIT_XSAVES:
     9667            case VMX_EXIT_XRSTORS:
     9668            {
     9669                int rc2 = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
     9670                rc2    |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
     9671                AssertRCReturn(rc2, rc2);
     9672                if (   pMixedCtx->rip    != pDbgState->uRipStart
     9673                    || pMixedCtx->cs.Sel != pDbgState->uCsStart)
     9674                    return VINF_EM_DBG_STEPPED;
     9675                break;
     9676            }
     9677
     9678            /* Errors and unexpected events: */
     9679            case VMX_EXIT_INIT_SIGNAL:
     9680            case VMX_EXIT_SIPI:
     9681            case VMX_EXIT_IO_SMI:
     9682            case VMX_EXIT_SMI:
     9683            case VMX_EXIT_ERR_INVALID_GUEST_STATE:
     9684            case VMX_EXIT_ERR_MSR_LOAD:
     9685            case VMX_EXIT_ERR_MACHINE_CHECK:
     9686            case VMX_EXIT_APIC_WRITE:  /* Some talk about this being fault like, so I guess we must process it? */
     9687                break;
     9688
     9689            default:
     9690                AssertMsgFailed(("Unexpected exit=%#x\n", uExitReason));
     9691                break;
     9692        }
     9693    }
     9694
     9695    /*
     9696     * Check for debugger event breakpoints and dtrace probes.
     9697     */
     9698    if (   uExitReason < RT_ELEMENTS(pDbgState->bmExitsToCheck) * 32U
     9699        && ASMBitTest(pDbgState->bmExitsToCheck, uExitReason) )
     9700    {
     9701        VBOXSTRICTRC rcStrict = hmR0VmxHandleExitDtraceEvents(pVM, pVCpu, pMixedCtx, pVmxTransient, uExitReason);
     9702        if (rcStrict != VINF_SUCCESS)
     9703            return rcStrict;
     9704    }
     9705
     9706    /*
     9707     * Normal processing.
     9708     */
     9709#ifdef HMVMX_USE_FUNCTION_TABLE
     9710    return g_apfnVMExitHandlers[uExitReason](pVCpu, pMixedCtx, pVmxTransient);
     9711#else
     9712    return hmR0VmxHandleExit(pVCpu, pMixedCtx, pVmxTransient, uExitReason);
     9713#endif
     9714}
     9715
     9716
     9717/**
     9718 * Single steps guest code using VT-x.
     9719 *
     9720 * @returns Strict VBox status code (i.e. informational status codes too).
     9721 * @param   pVM         The cross context VM structure.
     9722 * @param   pVCpu       The cross context virtual CPU structure.
     9723 * @param   pCtx        Pointer to the guest-CPU context.
    88549724 *
    88559725 * @note    Mostly the same as hmR0VmxRunGuestCodeNormal().
     
    88599729    VMXTRANSIENT VmxTransient;
    88609730    VmxTransient.fUpdateTscOffsettingAndPreemptTimer = true;
     9731
     9732    /* Set HMCPU indicators.  */
     9733    bool const fSavedSingleInstruction = pVCpu->hm.s.fSingleInstruction;
     9734    pVCpu->hm.s.fSingleInstruction = pVCpu->hm.s.fSingleInstruction || DBGFIsStepping(pVCpu);
     9735    pVCpu->hm.s.fUsingDebugLoop = true;
     9736
     9737    /* State we keep to help modify and later restore the VMCS fields we alter, and for detecting steps.  */
     9738    VMXRUNDBGSTATE DbgState;
     9739    hmR0VmxRunDebugStateInit(pVCpu, pCtx, &DbgState);
     9740    hmR0VmxPreRunGuestDebugStateUpdate(pVM, pVCpu, pCtx, &DbgState);
     9741
     9742    /*
     9743     * The loop.
     9744     */
    88619745    VBOXSTRICTRC rcStrict  = VERR_INTERNAL_ERROR_5;
    8862     uint32_t     cLoops    = 0;
    8863     uint16_t     uCsStart  = pCtx->cs.Sel;
    8864     uint64_t     uRipStart = pCtx->rip;
    8865 
    8866     for (;; cLoops++)
     9746    for (uint32_t cLoops = 0; ; cLoops++)
    88679747    {
    88689748        Assert(!HMR0SuspendPending());
    88699749        HMVMX_ASSERT_CPU_SAFE();
    8870 
    8871         /* Preparatory work for running guest code, this may force us to return
    8872            to ring-3.  This bugger disables interrupts on VINF_SUCCESS! */
     9750        bool fStepping = pVCpu->hm.s.fSingleInstruction;
     9751
     9752        /*
     9753         * Preparatory work for running guest code, this may force us to return
     9754         * to ring-3.  This bugger disables interrupts on VINF_SUCCESS!
     9755         */
    88739756        STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
    8874         rcStrict = hmR0VmxPreRunGuest(pVM, pVCpu, pCtx, &VmxTransient, true /* fStepping */);
     9757        hmR0VmxPreRunGuestDebugStateApply(pVCpu, &DbgState); /* Set up execute controls the next to can respond to. */
     9758        rcStrict = hmR0VmxPreRunGuest(pVM, pVCpu, pCtx, &VmxTransient, fStepping);
    88759759        if (rcStrict != VINF_SUCCESS)
    88769760            break;
    88779761
    88789762        hmR0VmxPreRunGuestCommitted(pVM, pVCpu, pCtx, &VmxTransient);
     9763        hmR0VmxPreRunGuestDebugStateApply(pVCpu, &DbgState); /* Override any obnoxious code in the above two calls. */
     9764
     9765        /*
     9766         * Now we can run the guest code.
     9767         */
    88799768        int rcRun = hmR0VmxRunGuest(pVM, pVCpu, pCtx);
     9769
    88809770        /* The guest-CPU context is now outdated, 'pCtx' is to be treated as 'pMixedCtx' from this point on!!! */
    88819771
    8882         /* Restore any residual host-state and save any bits shared between host
    8883            and guest into the guest-CPU state.  Re-enables interrupts! */
     9772        /*
     9773         * Restore any residual host-state and save any bits shared between host
     9774         * and guest into the guest-CPU state.  Re-enables interrupts!
     9775         */
    88849776        hmR0VmxPostRunGuest(pVM, pVCpu, pCtx, &VmxTransient, VBOXSTRICTRC_TODO(rcStrict));
    88859777
     
    89029794
    89039795        VBOXVMM_R0_HMVMX_VMEXIT_NOCTX(pVCpu, pCtx, VmxTransient.uExitReason);
    8904         if (!VBOXVMM_R0_HMVMX_VMEXIT_ENABLED())
    8905         { /* more likely */ }
    8906         else
    8907         {
    8908             hmR0VmxReadExitQualificationVmcs(pVCpu, &VmxTransient);
    8909             hmR0VmxSaveGuestState(pVCpu, pCtx);
    8910             VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, pCtx, VmxTransient.uExitReason, VmxTransient.uExitQualification);
    8911         }
    8912 
    8913         /* Handle the VM-exit - we quit earlier on certain VM-exits, see hmR0VmxHandleExitStep(). */
    8914         rcStrict = hmR0VmxHandleExitStep(pVCpu, pCtx, &VmxTransient, VmxTransient.uExitReason, uCsStart, uRipStart);
     9796
     9797        /*
     9798         * Handle the VM-exit - we quit earlier on certain VM-exits, see hmR0VmxHandleExitDebug().
     9799         */
     9800        rcStrict = hmR0VmxRunDebugHandleExit(pVM, pVCpu, pCtx, &VmxTransient, VmxTransient.uExitReason, &DbgState);
    89159801        STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x);
    89169802        if (rcStrict != VINF_SUCCESS)
     
    89249810
    89259811        /*
    8926          * Did the RIP change, if so, consider it a single step.
     9812         * Stepping: Did the RIP change, if so, consider it a single step.
    89279813         * Otherwise, make sure one of the TFs gets set.
    89289814         */
    8929         int rc2 = hmR0VmxSaveGuestRip(pVCpu, pCtx);
    8930         rc2    |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pCtx);
    8931         AssertRCReturn(rc2, rc2);
    8932         if (   pCtx->rip    != uRipStart
    8933             || pCtx->cs.Sel != uCsStart)
    8934         {
    8935             rcStrict = VINF_EM_DBG_STEPPED;
    8936             break;
    8937         }
    8938         HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);
     9815        if (fStepping)
     9816        {
     9817            int rc2 = hmR0VmxSaveGuestRip(pVCpu, pCtx);
     9818            rc2    |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pCtx);
     9819            AssertRCReturn(rc2, rc2);
     9820            if (   pCtx->rip    != DbgState.uRipStart
     9821                || pCtx->cs.Sel != DbgState.uCsStart)
     9822            {
     9823                rcStrict = VINF_EM_DBG_STEPPED;
     9824                break;
     9825            }
     9826            HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);
     9827        }
     9828
     9829        /*
     9830         * Update when dtrace settings changes (DBGF kicks us, so no need to check).
     9831         */
     9832        if (VBOXVMM_GET_SETTINGS_SEQ_NO() != DbgState.uDtraceSettingsSeqNo)
     9833            hmR0VmxPreRunGuestDebugStateUpdate(pVM, pVCpu, pCtx, &DbgState);
    89399834    }
    89409835
     
    89539848     *        accessing APIC page in protected mode. */
    89549849
     9850    /*
     9851     * Restore exit control settings as we may not reenter this function the
     9852     * next time around.
     9853     */
     9854    rcStrict = hmR0VmxRunDebugStateRevert(pVCpu, &DbgState, rcStrict);
     9855
     9856    /* Restore HMCPU indicators. */
     9857    pVCpu->hm.s.fUsingDebugLoop = false;
     9858    pVCpu->hm.s.fSingleInstruction = fSavedSingleInstruction;
     9859
    89559860    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
    89569861    return rcStrict;
     
    89589863
    89599864
     9865/** @} */
     9866
     9867
    89609868/**
    89619869 * Checks if any expensive dtrace probes are enabled and we should go to the
     
    89689876    /* It's probably faster to OR the raw 32-bit counter variables together.
    89699877       Since the variables are in an array and the probes are next to one
    8970        another (more or less), we have good locality. So, better read two three
    8971        cache lines ever time and only have one conditional, than 20+ conditionals. */
    8972     return (  VBOXVMM_XCPT_DE_ENABLED_RAW()
     9878       another (more or less), we have good locality.  So, better read
     9879       four-five cache lines ever time and only have one conditional, than
     9880       70+ conditionals, right? */
     9881    return (  VBOXVMM_R0_HMVMX_VMEXIT_ENABLED_RAW() /* expensive too due to context */
     9882            | VBOXVMM_XCPT_DE_ENABLED_RAW()
    89739883            | VBOXVMM_XCPT_DB_ENABLED_RAW()
    89749884            | VBOXVMM_XCPT_BP_ENABLED_RAW()
     
    89829892            | VBOXVMM_XCPT_SS_ENABLED_RAW()
    89839893            | VBOXVMM_XCPT_GP_ENABLED_RAW()
    8984             | VBOXVMM_XCPT_PG_ENABLED_RAW()
     9894            | VBOXVMM_XCPT_PF_ENABLED_RAW()
    89859895            | VBOXVMM_XCPT_MF_ENABLED_RAW()
    89869896            | VBOXVMM_XCPT_AC_ENABLED_RAW()
     
    89909900            | VBOXVMM_INT_SOFTWARE_ENABLED_RAW()
    89919901            | VBOXVMM_INT_HARDWARE_ENABLED_RAW()
     9902            | VBOXVMM_EXIT_TASK_SWITCH_ENABLED_RAW()
     9903            | VBOXVMM_EXIT_HALT_ENABLED_RAW()
     9904            | VBOXVMM_EXIT_MWAIT_ENABLED_RAW()
     9905            | VBOXVMM_EXIT_MONITOR_ENABLED_RAW()
     9906            | VBOXVMM_EXIT_CPUID_ENABLED_RAW()
     9907            | VBOXVMM_EXIT_INVD_ENABLED_RAW()
     9908            | VBOXVMM_EXIT_WBINVD_ENABLED_RAW()
     9909            | VBOXVMM_EXIT_INVLPG_ENABLED_RAW()
     9910            | VBOXVMM_EXIT_RDTSC_ENABLED_RAW()
     9911            | VBOXVMM_EXIT_RDTSCP_ENABLED_RAW()
     9912            | VBOXVMM_EXIT_RDPMC_ENABLED_RAW()
     9913            | VBOXVMM_EXIT_RDMSR_ENABLED_RAW()
     9914            | VBOXVMM_EXIT_WRMSR_ENABLED_RAW()
     9915            | VBOXVMM_EXIT_CRX_READ_ENABLED_RAW()
     9916            | VBOXVMM_EXIT_CRX_WRITE_ENABLED_RAW()
     9917            | VBOXVMM_EXIT_DRX_READ_ENABLED_RAW()
     9918            | VBOXVMM_EXIT_DRX_WRITE_ENABLED_RAW()
     9919            | VBOXVMM_EXIT_PAUSE_ENABLED_RAW()
     9920            | VBOXVMM_EXIT_XSETBV_ENABLED_RAW()
     9921            | VBOXVMM_EXIT_SIDT_ENABLED_RAW()
     9922            | VBOXVMM_EXIT_LIDT_ENABLED_RAW()
     9923            | VBOXVMM_EXIT_SGDT_ENABLED_RAW()
     9924            | VBOXVMM_EXIT_LGDT_ENABLED_RAW()
     9925            | VBOXVMM_EXIT_SLDT_ENABLED_RAW()
     9926            | VBOXVMM_EXIT_LLDT_ENABLED_RAW()
     9927            | VBOXVMM_EXIT_STR_ENABLED_RAW()
     9928            | VBOXVMM_EXIT_LTR_ENABLED_RAW()
     9929            | VBOXVMM_EXIT_GETSEC_ENABLED_RAW()
     9930            | VBOXVMM_EXIT_RSM_ENABLED_RAW()
     9931            | VBOXVMM_EXIT_RDRAND_ENABLED_RAW()
     9932            | VBOXVMM_EXIT_RDSEED_ENABLED_RAW()
     9933            | VBOXVMM_EXIT_XSAVES_ENABLED_RAW()
     9934            | VBOXVMM_EXIT_XRSTORS_ENABLED_RAW()
     9935            | VBOXVMM_EXIT_VMM_CALL_ENABLED_RAW()
     9936            | VBOXVMM_EXIT_VMX_VMCLEAR_ENABLED_RAW()
     9937            | VBOXVMM_EXIT_VMX_VMLAUNCH_ENABLED_RAW()
     9938            | VBOXVMM_EXIT_VMX_VMPTRLD_ENABLED_RAW()
     9939            | VBOXVMM_EXIT_VMX_VMPTRST_ENABLED_RAW()
     9940            | VBOXVMM_EXIT_VMX_VMREAD_ENABLED_RAW()
     9941            | VBOXVMM_EXIT_VMX_VMRESUME_ENABLED_RAW()
     9942            | VBOXVMM_EXIT_VMX_VMWRITE_ENABLED_RAW()
     9943            | VBOXVMM_EXIT_VMX_VMXOFF_ENABLED_RAW()
     9944            | VBOXVMM_EXIT_VMX_VMXON_ENABLED_RAW()
     9945            | VBOXVMM_EXIT_VMX_VMFUNC_ENABLED_RAW()
     9946            | VBOXVMM_EXIT_VMX_INVEPT_ENABLED_RAW()
     9947            | VBOXVMM_EXIT_VMX_INVVPID_ENABLED_RAW()
     9948            | VBOXVMM_EXIT_VMX_INVPCID_ENABLED_RAW()
     9949            | VBOXVMM_EXIT_VMX_EPT_VIOLATION_ENABLED_RAW()
     9950            | VBOXVMM_EXIT_VMX_EPT_MISCONFIG_ENABLED_RAW()
     9951            | VBOXVMM_EXIT_VMX_VAPIC_ACCESS_ENABLED_RAW()
     9952            | VBOXVMM_EXIT_VMX_VAPIC_WRITE_ENABLED_RAW()
    89929953           ) != 0;
    89939954}
     
    89979958 * Runs the guest code using VT-x.
    89989959 *
    8999  * @returns Strict VBox status code.
     9960 * @returns Strict VBox status code (i.e. informational status codes too).
    90009961 * @param   pVM         The cross context VM structure.
    90019962 * @param   pVCpu       The cross context virtual CPU structure.
     
    90129973    VBOXSTRICTRC rcStrict;
    90139974    if (   !pVCpu->hm.s.fUseDebugLoop
    9014         && (!VBOXVMM_ANY_PROBES_ENABLED() || !hmR0VmxAnyExpensiveProbesEnabled()) )
     9975        && (!VBOXVMM_ANY_PROBES_ENABLED() || !hmR0VmxAnyExpensiveProbesEnabled())
     9976        && !DBGFIsStepping(pVCpu) )
    90159977        rcStrict = hmR0VmxRunGuestCodeNormal(pVM, pVCpu, pCtx);
    90169978    else
     
    911710079}
    911810080#endif /* !HMVMX_USE_FUNCTION_TABLE */
    9119 
    9120 
    9121 /**
    9122  * Single-stepping VM-exit filtering.
    9123  *
    9124  * This is preprocessing the exits and deciding whether we've gotten far enough
    9125  * to return VINF_EM_DBG_STEPPED already.  If not, normal VM-exit handling is
    9126  * performed.
    9127  *
    9128  * @returns Strict VBox status code.
    9129  * @param   pVCpu           The cross context virtual CPU structure of the calling EMT.
    9130  * @param   pMixedCtx       Pointer to the guest-CPU context. The data may be
    9131  *                          out-of-sync. Make sure to update the required
    9132  *                          fields before using them.
    9133  * @param   pVmxTransient   Pointer to the VMX-transient structure.
    9134  * @param   uExitReason     The VM-exit reason.
    9135  * @param   uCsStart        The CS we started executing (stepping) on.
    9136  * @param   uRipStart       The RIP we started executing (stepping) on.
    9137  */
    9138 DECLINLINE(VBOXSTRICTRC) hmR0VmxHandleExitStep(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient,
    9139                                                uint32_t uExitReason, uint16_t uCsStart, uint64_t uRipStart)
    9140 {
    9141     switch (uExitReason)
    9142     {
    9143         case VMX_EXIT_XCPT_OR_NMI:
    9144         {
    9145             /* Check for host NMI. */
    9146             int rc2 = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
    9147             AssertRCReturn(rc2, rc2);
    9148             uint32_t uIntType = VMX_EXIT_INTERRUPTION_INFO_TYPE(pVmxTransient->uExitIntInfo);
    9149             if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)
    9150                 return hmR0VmxExitXcptOrNmi(pVCpu, pMixedCtx, pVmxTransient);
    9151             /* fall thru */
    9152         }
    9153 
    9154         case VMX_EXIT_EPT_MISCONFIG:
    9155         case VMX_EXIT_TRIPLE_FAULT:
    9156         case VMX_EXIT_APIC_ACCESS:
    9157         case VMX_EXIT_TPR_BELOW_THRESHOLD:
    9158         case VMX_EXIT_TASK_SWITCH:
    9159 
    9160         /* Instruction specific VM-exits: */
    9161         case VMX_EXIT_IO_INSTR:
    9162         case VMX_EXIT_CPUID:
    9163         case VMX_EXIT_RDTSC:
    9164         case VMX_EXIT_RDTSCP:
    9165         case VMX_EXIT_MOV_CRX:
    9166         case VMX_EXIT_MWAIT:
    9167         case VMX_EXIT_MONITOR:
    9168         case VMX_EXIT_RDMSR:
    9169         case VMX_EXIT_WRMSR:
    9170         case VMX_EXIT_MOV_DRX:
    9171         case VMX_EXIT_HLT:
    9172         case VMX_EXIT_INVD:
    9173         case VMX_EXIT_INVLPG:
    9174         case VMX_EXIT_RSM:
    9175         case VMX_EXIT_PAUSE:
    9176         case VMX_EXIT_XDTR_ACCESS:
    9177         case VMX_EXIT_TR_ACCESS:
    9178         case VMX_EXIT_WBINVD:
    9179         case VMX_EXIT_XSETBV:
    9180         case VMX_EXIT_RDRAND:
    9181         case VMX_EXIT_INVPCID:
    9182         case VMX_EXIT_GETSEC:
    9183         case VMX_EXIT_RDPMC:
    9184         case VMX_EXIT_VMCALL:
    9185         case VMX_EXIT_VMCLEAR:
    9186         case VMX_EXIT_VMLAUNCH:
    9187         case VMX_EXIT_VMPTRLD:
    9188         case VMX_EXIT_VMPTRST:
    9189         case VMX_EXIT_VMREAD:
    9190         case VMX_EXIT_VMRESUME:
    9191         case VMX_EXIT_VMWRITE:
    9192         case VMX_EXIT_VMXOFF:
    9193         case VMX_EXIT_VMXON:
    9194         case VMX_EXIT_INVEPT:
    9195         case VMX_EXIT_INVVPID:
    9196         case VMX_EXIT_VMFUNC:
    9197         {
    9198             int rc2 = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
    9199             rc2    |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
    9200             AssertRCReturn(rc2, rc2);
    9201             if (   pMixedCtx->rip    != uRipStart
    9202                 || pMixedCtx->cs.Sel != uCsStart)
    9203                 return VINF_EM_DBG_STEPPED;
    9204             break;
    9205         }
    9206     }
    9207 
    9208     /*
    9209      * Normal processing.
    9210      */
    9211 #ifdef HMVMX_USE_FUNCTION_TABLE
    9212     return g_apfnVMExitHandlers[uExitReason](pVCpu, pMixedCtx, pVmxTransient);
    9213 #else
    9214     return hmR0VmxHandleExit(pVCpu, pMixedCtx, pVmxTransient, uExitReason);
    9215 #endif
    9216 }
    921710081
    921810082
     
    998010844                case X86_XCPT_BP: rc = hmR0VmxExitXcptBP(pVCpu, pMixedCtx, pVmxTransient);      break;
    998110845                case X86_XCPT_AC: rc = hmR0VmxExitXcptAC(pVCpu, pMixedCtx, pVmxTransient);      break;
    9982 #ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
     10846
    998310847                case X86_XCPT_XF: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXF);
    998410848                                  rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
     
    999310857                case X86_XCPT_TS: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestTS);
    999410858                                  rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
    9995 #endif
    999610859                default:
    999710860                {
     
    1028411147    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
    1028511148    PVM pVM = pVCpu->CTX_SUFF(pVM);
    10286     Assert(!pVM->hm.s.fNestedPaging);
     11149    Assert(!pVM->hm.s.fNestedPaging || pVCpu->hm.s.fUsingDebugLoop);
    1028711150
    1028811151    int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
     
    1091911782                    break;
    1092011783                case 3: /* CR3 */
    10921                     Assert(!pVM->hm.s.fNestedPaging || !CPUMIsGuestPagingEnabledEx(pMixedCtx));
     11784                    Assert(!pVM->hm.s.fNestedPaging || !CPUMIsGuestPagingEnabledEx(pMixedCtx) || pVCpu->hm.s.fUsingDebugLoop);
    1092211785                    HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR3);
    1092311786                    Log4(("CRX CR3 write rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pMixedCtx->cr3));
     
    1094911812            Assert(   !pVM->hm.s.fNestedPaging
    1095011813                   || !CPUMIsGuestPagingEnabledEx(pMixedCtx)
     11814                   || pVCpu->hm.s.fUsingDebugLoop
    1095111815                   || VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification) != 3);
    1095211816
     
    1179912663    {
    1180012664#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
    11801         Assert(!pVmxTransient->fWasGuestFPUStateActive);
     12665        Assert(!pVmxTransient->fWasGuestFPUStateActive || pVCpu->hm.s.fUsingDebugLoop);
    1180212666#endif
    1180312667        rc = CPUMR0Trap07Handler(pVCpu->CTX_SUFF(pVM), pVCpu, pMixedCtx);
     
    1184012704    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestGP);
    1184112705
    11842     int rc = VERR_INTERNAL_ERROR_5;
    11843     if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
    11844     {
    11845 #ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
     12706    int rc;
     12707    if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
     12708    { /* likely */ }
     12709    else
     12710    {
     12711#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
     12712        Assert(pVCpu->hm.s.fUsingDebugLoop);
     12713#endif
    1184612714        /* If the guest is not in real-mode or we have unrestricted execution support, reflect #GP to the guest. */
    1184712715        rc  = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
     
    1185512723                               pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
    1185612724        return rc;
    11857 #else
    11858         /* We don't intercept #GP. */
    11859         AssertMsgFailed(("Unexpected VM-exit caused by #GP exception\n"));
    11860         NOREF(pVmxTransient);
    11861         return VERR_VMX_UNEXPECTED_EXCEPTION;
    11862 #endif
    1186312725    }
    1186412726
     
    1212212984
    1212312985
    12124 #ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
    1212512986/**
    1212612987 * VM-exit exception handler wrapper for generic exceptions. Simply re-injects
     
    1213312994{
    1213412995    HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
     12996#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
     12997    Assert(pVCpu->hm.s.fUsingDebugLoop);
     12998#endif
    1213512999
    1213613000    /* Re-inject the exception into the guest. This cannot be a double-fault condition which would have been handled in
     
    1215113015    return VINF_SUCCESS;
    1215213016}
    12153 #endif
    1215413017
    1215513018
     
    1216613029    AssertRCReturn(rc, rc);
    1216713030
    12168 #if defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) || defined(HMVMX_ALWAYS_TRAP_PF)
    12169     if (pVM->hm.s.fNestedPaging)
    12170     {
     13031    if (!pVM->hm.s.fNestedPaging)
     13032    { /* likely */ }
     13033    else
     13034    {
     13035#if !defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) && !defined(HMVMX_ALWAYS_TRAP_PF)
     13036        Assert(pVCpu->hm.s.fUsingDebugLoop);
     13037#endif
    1217113038        pVCpu->hm.s.Event.fPending = false;                  /* In case it's a contributory or vectoring #PF. */
    1217213039        if (RT_LIKELY(!pVmxTransient->fVectoringDoublePF))
     
    1218513052        return rc;
    1218613053    }
    12187 #else
    12188     Assert(!pVM->hm.s.fNestedPaging);
    12189     NOREF(pVM);
    12190 #endif
    1219113054
    1219213055    /* If it's a vectoring #PF, emulate injecting the original event injection as PGMTrap0eHandler() is incapable
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette