VirtualBox

Ignore:
Timestamp:
Dec 29, 2021 2:59:38 AM (3 years ago)
Author:
vboxsync
Message:

VMM/HMSVMR0: Implemented debug events - need more testing. bugref:8139

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r92745 r93091  
    4141#ifdef DEBUG_ramshankar
    4242# define HMSVM_SYNC_FULL_GUEST_STATE
     43#endif
    4344# define HMSVM_ALWAYS_TRAP_ALL_XCPTS
    4445# define HMSVM_ALWAYS_TRAP_PF
    4546# define HMSVM_ALWAYS_TRAP_TASK_SWITCH
    46 #endif
    4747
    4848
     
    410410static FNSVMEXITHANDLER hmR0SvmExitXcptBP;
    411411static FNSVMEXITHANDLER hmR0SvmExitXcptGP;
    412 #if defined(HMSVM_ALWAYS_TRAP_ALL_XCPTS) || defined(VBOX_WITH_NESTED_HWVIRT_SVM)
    413412static FNSVMEXITHANDLER hmR0SvmExitXcptGeneric;
    414 #endif
     413static FNSVMEXITHANDLER hmR0SvmExitSwInt;
    415414#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
    416415static FNSVMEXITHANDLER hmR0SvmExitClgi;
     
    429428static VBOXSTRICTRC hmR0SvmHandleExitNested(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient);
    430429#endif
     430static VBOXSTRICTRC hmR0SvmRunGuestCodeDebug(PVMCPUCC pVCpu, uint32_t *pcLoops);
    431431
    432432
     
    45214521
    45224522
    4523 /**
    4524  * Runs the guest code using AMD-V in single step mode.
    4525  *
    4526  * @returns Strict VBox status code.
    4527  * @param   pVCpu       The cross context virtual CPU structure.
    4528  * @param   pcLoops     Pointer to the number of executed loops.
    4529  */
    4530 static VBOXSTRICTRC hmR0SvmRunGuestCodeStep(PVMCPUCC pVCpu, uint32_t *pcLoops)
    4531 {
    4532     uint32_t const cMaxResumeLoops = pVCpu->CTX_SUFF(pVM)->hmr0.s.cMaxResumeLoops;
    4533     Assert(pcLoops);
    4534     Assert(*pcLoops <= cMaxResumeLoops);
    4535 
    4536     SVMTRANSIENT SvmTransient;
    4537     RT_ZERO(SvmTransient);
    4538     SvmTransient.fUpdateTscOffsetting = true;
    4539     SvmTransient.pVmcb = pVCpu->hmr0.s.svm.pVmcb;
    4540 
    4541     PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    4542     uint16_t const uCsStart  = pCtx->cs.Sel;
    4543     uint64_t const uRipStart = pCtx->rip;
    4544 
    4545     VBOXSTRICTRC rc = VERR_INTERNAL_ERROR_5;
    4546     for (;;)
    4547     {
    4548         Assert(!HMR0SuspendPending());
    4549         AssertMsg(pVCpu->hmr0.s.idEnteredCpu == RTMpCpuId(),
    4550                   ("Illegal migration! Entered on CPU %u Current %u cLoops=%u\n", (unsigned)pVCpu->hmr0.s.idEnteredCpu,
    4551                   (unsigned)RTMpCpuId(), *pcLoops));
    4552 
    4553         /* Preparatory work for running nested-guest code, this may force us to return to
    4554            ring-3.  This bugger disables interrupts on VINF_SUCCESS! */
    4555         STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
    4556         rc = hmR0SvmPreRunGuest(pVCpu, &SvmTransient);
    4557         if (rc != VINF_SUCCESS)
    4558             break;
    4559 
    4560         /*
    4561          * No longjmps to ring-3 from this point on!!!
    4562          *
    4563          * Asserts() will still longjmp to ring-3 (but won't return), which is intentional,
    4564          * better than a kernel panic. This also disables flushing of the R0-logger instance.
    4565          */
    4566         hmR0SvmPreRunGuestCommitted(pVCpu, &SvmTransient);
    4567 
    4568         rc = hmR0SvmRunGuest(pVCpu, pVCpu->hmr0.s.svm.HCPhysVmcb);
    4569 
    4570         /* Restore any residual host-state and save any bits shared between host and guest
    4571            into the guest-CPU state.  Re-enables interrupts! */
    4572         hmR0SvmPostRunGuest(pVCpu, &SvmTransient, rc);
    4573 
    4574         if (RT_UNLIKELY(   rc != VINF_SUCCESS                               /* Check for VMRUN errors. */
    4575                         || SvmTransient.u64ExitCode == SVM_EXIT_INVALID))   /* Check for invalid guest-state errors. */
    4576         {
    4577             if (rc == VINF_SUCCESS)
    4578                 rc = VERR_SVM_INVALID_GUEST_STATE;
    4579             STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatPreExit, x);
    4580             hmR0SvmReportWorldSwitchError(pVCpu, VBOXSTRICTRC_VAL(rc));
    4581             return rc;
    4582         }
    4583 
    4584         /* Handle the #VMEXIT. */
    4585         HMSVM_EXITCODE_STAM_COUNTER_INC(SvmTransient.u64ExitCode);
    4586         STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatPreExit, &pVCpu->hm.s.StatExitHandling, x);
    4587         VBOXVMM_R0_HMSVM_VMEXIT(pVCpu, pCtx, SvmTransient.u64ExitCode, pVCpu->hmr0.s.svm.pVmcb);
    4588         rc = hmR0SvmHandleExit(pVCpu, &SvmTransient);
    4589         STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitHandling, x);
    4590         if (rc != VINF_SUCCESS)
    4591             break;
    4592         if (++(*pcLoops) >= cMaxResumeLoops)
    4593         {
    4594             STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchMaxResumeLoops);
    4595             rc = VINF_EM_RAW_INTERRUPT;
    4596             break;
    4597         }
    4598 
    4599         /*
    4600          * Did the RIP change, if so, consider it a single step.
    4601          * Otherwise, make sure one of the TFs gets set.
    4602          */
    4603         if (   pCtx->rip    != uRipStart
    4604             || pCtx->cs.Sel != uCsStart)
    4605         {
    4606             rc = VINF_EM_DBG_STEPPED;
    4607             break;
    4608         }
    4609         pVCpu->hm.s.fCtxChanged |= HM_CHANGED_GUEST_DR_MASK;
    4610     }
    4611 
    4612     /*
    4613      * Clear the X86_EFL_TF if necessary.
    4614      */
    4615     if (pVCpu->hmr0.s.fClearTrapFlag)
    4616     {
    4617         pVCpu->hmr0.s.fClearTrapFlag = false;
    4618         pCtx->eflags.Bits.u1TF = 0;
    4619     }
    4620 
    4621     STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
    4622     return rc;
    4623 }
    4624 
    46254523#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
    46264524/**
     
    47264624
    47274625/**
     4626 * Checks if any expensive dtrace probes are enabled and we should go to the
     4627 * debug loop.
     4628 *
     4629 * @returns true if we should use debug loop, false if not.
     4630 */
     4631static bool hmR0SvmAnyExpensiveProbesEnabled(void)
     4632{
     4633    /* It's probably faster to OR the raw 32-bit counter variables together.
     4634       Since the variables are in an array and the probes are next to one
     4635       another (more or less), we have good locality.  So, better read
     4636       eight-nine cache lines ever time and only have one conditional, than
     4637       128+ conditionals, right? */
     4638    return (  VBOXVMM_R0_HMSVM_VMEXIT_ENABLED_RAW() /* expensive too due to context */
     4639            | VBOXVMM_XCPT_DE_ENABLED_RAW()
     4640            | VBOXVMM_XCPT_DB_ENABLED_RAW()
     4641            | VBOXVMM_XCPT_BP_ENABLED_RAW()
     4642            | VBOXVMM_XCPT_OF_ENABLED_RAW()
     4643            | VBOXVMM_XCPT_BR_ENABLED_RAW()
     4644            | VBOXVMM_XCPT_UD_ENABLED_RAW()
     4645            | VBOXVMM_XCPT_NM_ENABLED_RAW()
     4646            | VBOXVMM_XCPT_DF_ENABLED_RAW()
     4647            | VBOXVMM_XCPT_TS_ENABLED_RAW()
     4648            | VBOXVMM_XCPT_NP_ENABLED_RAW()
     4649            | VBOXVMM_XCPT_SS_ENABLED_RAW()
     4650            | VBOXVMM_XCPT_GP_ENABLED_RAW()
     4651            | VBOXVMM_XCPT_PF_ENABLED_RAW()
     4652            | VBOXVMM_XCPT_MF_ENABLED_RAW()
     4653            | VBOXVMM_XCPT_AC_ENABLED_RAW()
     4654            | VBOXVMM_XCPT_XF_ENABLED_RAW()
     4655            | VBOXVMM_XCPT_VE_ENABLED_RAW()
     4656            | VBOXVMM_XCPT_SX_ENABLED_RAW()
     4657            | VBOXVMM_INT_SOFTWARE_ENABLED_RAW()
     4658            | VBOXVMM_INT_HARDWARE_ENABLED_RAW()
     4659           ) != 0
     4660        || (  VBOXVMM_INSTR_HALT_ENABLED_RAW()
     4661            | VBOXVMM_INSTR_MWAIT_ENABLED_RAW()
     4662            | VBOXVMM_INSTR_MONITOR_ENABLED_RAW()
     4663            | VBOXVMM_INSTR_CPUID_ENABLED_RAW()
     4664            | VBOXVMM_INSTR_INVD_ENABLED_RAW()
     4665            | VBOXVMM_INSTR_WBINVD_ENABLED_RAW()
     4666            | VBOXVMM_INSTR_INVLPG_ENABLED_RAW()
     4667            | VBOXVMM_INSTR_RDTSC_ENABLED_RAW()
     4668            | VBOXVMM_INSTR_RDTSCP_ENABLED_RAW()
     4669            | VBOXVMM_INSTR_RDPMC_ENABLED_RAW()
     4670            | VBOXVMM_INSTR_RDMSR_ENABLED_RAW()
     4671            | VBOXVMM_INSTR_WRMSR_ENABLED_RAW()
     4672            | VBOXVMM_INSTR_CRX_READ_ENABLED_RAW()
     4673            | VBOXVMM_INSTR_CRX_WRITE_ENABLED_RAW()
     4674            | VBOXVMM_INSTR_DRX_READ_ENABLED_RAW()
     4675            | VBOXVMM_INSTR_DRX_WRITE_ENABLED_RAW()
     4676            | VBOXVMM_INSTR_PAUSE_ENABLED_RAW()
     4677            | VBOXVMM_INSTR_XSETBV_ENABLED_RAW()
     4678            | VBOXVMM_INSTR_SIDT_ENABLED_RAW()
     4679            | VBOXVMM_INSTR_LIDT_ENABLED_RAW()
     4680            | VBOXVMM_INSTR_SGDT_ENABLED_RAW()
     4681            | VBOXVMM_INSTR_LGDT_ENABLED_RAW()
     4682            | VBOXVMM_INSTR_SLDT_ENABLED_RAW()
     4683            | VBOXVMM_INSTR_LLDT_ENABLED_RAW()
     4684            | VBOXVMM_INSTR_STR_ENABLED_RAW()
     4685            | VBOXVMM_INSTR_LTR_ENABLED_RAW()
     4686            //| VBOXVMM_INSTR_GETSEC_ENABLED_RAW()
     4687            | VBOXVMM_INSTR_RSM_ENABLED_RAW()
     4688            //| VBOXVMM_INSTR_RDRAND_ENABLED_RAW()
     4689            //| VBOXVMM_INSTR_RDSEED_ENABLED_RAW()
     4690            //| VBOXVMM_INSTR_XSAVES_ENABLED_RAW()
     4691            //| VBOXVMM_INSTR_XRSTORS_ENABLED_RAW()
     4692            | VBOXVMM_INSTR_VMM_CALL_ENABLED_RAW()
     4693            | VBOXVMM_INSTR_SVM_VMRUN_ENABLED_RAW()
     4694            | VBOXVMM_INSTR_SVM_VMLOAD_ENABLED_RAW()
     4695            | VBOXVMM_INSTR_SVM_VMSAVE_ENABLED_RAW()
     4696            | VBOXVMM_INSTR_SVM_STGI_ENABLED_RAW()
     4697            | VBOXVMM_INSTR_SVM_CLGI_ENABLED_RAW()
     4698           ) != 0
     4699        || (  VBOXVMM_EXIT_TASK_SWITCH_ENABLED_RAW()
     4700            | VBOXVMM_EXIT_HALT_ENABLED_RAW()
     4701            | VBOXVMM_EXIT_MWAIT_ENABLED_RAW()
     4702            | VBOXVMM_EXIT_MONITOR_ENABLED_RAW()
     4703            | VBOXVMM_EXIT_CPUID_ENABLED_RAW()
     4704            | VBOXVMM_EXIT_INVD_ENABLED_RAW()
     4705            | VBOXVMM_EXIT_WBINVD_ENABLED_RAW()
     4706            | VBOXVMM_EXIT_INVLPG_ENABLED_RAW()
     4707            | VBOXVMM_EXIT_RDTSC_ENABLED_RAW()
     4708            | VBOXVMM_EXIT_RDTSCP_ENABLED_RAW()
     4709            | VBOXVMM_EXIT_RDPMC_ENABLED_RAW()
     4710            | VBOXVMM_EXIT_RDMSR_ENABLED_RAW()
     4711            | VBOXVMM_EXIT_WRMSR_ENABLED_RAW()
     4712            | VBOXVMM_EXIT_CRX_READ_ENABLED_RAW()
     4713            | VBOXVMM_EXIT_CRX_WRITE_ENABLED_RAW()
     4714            | VBOXVMM_EXIT_DRX_READ_ENABLED_RAW()
     4715            | VBOXVMM_EXIT_DRX_WRITE_ENABLED_RAW()
     4716            | VBOXVMM_EXIT_PAUSE_ENABLED_RAW()
     4717            | VBOXVMM_EXIT_XSETBV_ENABLED_RAW()
     4718            | VBOXVMM_EXIT_SIDT_ENABLED_RAW()
     4719            | VBOXVMM_EXIT_LIDT_ENABLED_RAW()
     4720            | VBOXVMM_EXIT_SGDT_ENABLED_RAW()
     4721            | VBOXVMM_EXIT_LGDT_ENABLED_RAW()
     4722            | VBOXVMM_EXIT_SLDT_ENABLED_RAW()
     4723            | VBOXVMM_EXIT_LLDT_ENABLED_RAW()
     4724            | VBOXVMM_EXIT_STR_ENABLED_RAW()
     4725            | VBOXVMM_EXIT_LTR_ENABLED_RAW()
     4726            //| VBOXVMM_EXIT_GETSEC_ENABLED_RAW()
     4727            | VBOXVMM_EXIT_RSM_ENABLED_RAW()
     4728            //| VBOXVMM_EXIT_RDRAND_ENABLED_RAW()
     4729            //| VBOXVMM_EXIT_RDSEED_ENABLED_RAW()
     4730            //| VBOXVMM_EXIT_XSAVES_ENABLED_RAW()
     4731            //| VBOXVMM_EXIT_XRSTORS_ENABLED_RAW()
     4732            | VBOXVMM_EXIT_VMM_CALL_ENABLED_RAW()
     4733            | VBOXVMM_EXIT_SVM_VMRUN_ENABLED_RAW()
     4734            | VBOXVMM_EXIT_SVM_VMLOAD_ENABLED_RAW()
     4735            | VBOXVMM_EXIT_SVM_VMSAVE_ENABLED_RAW()
     4736            | VBOXVMM_EXIT_SVM_STGI_ENABLED_RAW()
     4737            | VBOXVMM_EXIT_SVM_CLGI_ENABLED_RAW()
     4738           ) != 0;
     4739}
     4740
     4741
     4742/**
    47284743 * Runs the guest code using AMD-V.
    47294744 *
     
    47514766        if (!fInNestedGuestMode)
    47524767        {
    4753             if (!pVCpu->hm.s.fSingleInstruction)
     4768            if (   !pVCpu->hm.s.fUseDebugLoop
     4769                && (!VBOXVMM_ANY_PROBES_ENABLED() || !hmR0SvmAnyExpensiveProbesEnabled())
     4770                && !DBGFIsStepping(pVCpu)
     4771                && !pVCpu->CTX_SUFF(pVM)->dbgf.ro.cEnabledInt3Breakpoints)
    47544772                rc = hmR0SvmRunGuestCodeNormal(pVCpu, &cLoops);
    47554773            else
    4756                 rc = hmR0SvmRunGuestCodeStep(pVCpu, &cLoops);
     4774                rc = hmR0SvmRunGuestCodeDebug(pVCpu, &cLoops);
    47574775        }
    47584776#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
     
    47874805}
    47884806
    4789 
    47904807#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
     4808
    47914809/**
    47924810 * Determines whether the given I/O access should cause a nested-guest \#VMEXIT.
     
    52635281    /* not reached */
    52645282
    5265 #undef NST_GST_VMEXIT_CALL_RET
    5266 }
    5267 #endif
    5268 
    5269 
    5270 /**
    5271  * Handles a guest \#VMEXIT (for all EXITCODE values except SVM_EXIT_INVALID).
    5272  *
    5273  * @returns Strict VBox status code (informational status codes included).
    5274  * @param   pVCpu           The cross context virtual CPU structure.
    5275  * @param   pSvmTransient   Pointer to the SVM transient structure.
    5276  */
    5277 static VBOXSTRICTRC hmR0SvmHandleExit(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
    5278 {
    5279     Assert(pSvmTransient->u64ExitCode != SVM_EXIT_INVALID);
    5280     Assert(pSvmTransient->u64ExitCode <= SVM_EXIT_MAX);
    5281 
     5283# undef NST_GST_VMEXIT_CALL_RET
     5284}
     5285
     5286#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
     5287
     5288/** @def VMEXIT_CALL_RET
     5289 * Used by hmR0SvmHandleExit and hmR0SvmDebugHandleExit
     5290 */
    52825291#ifdef DEBUG_ramshankar
    52835292# define VMEXIT_CALL_RET(a_fDbg, a_CallExpr) \
     
    52935302# define VMEXIT_CALL_RET(a_fDbg, a_CallExpr) return a_CallExpr
    52945303#endif
     5304
     5305/**
     5306 * Handles a guest \#VMEXIT (for all EXITCODE values except SVM_EXIT_INVALID).
     5307 *
     5308 * @returns Strict VBox status code (informational status codes included).
     5309 * @param   pVCpu           The cross context virtual CPU structure.
     5310 * @param   pSvmTransient   Pointer to the SVM transient structure.
     5311 */
     5312static VBOXSTRICTRC hmR0SvmHandleExit(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
     5313{
     5314    Assert(pSvmTransient->u64ExitCode != SVM_EXIT_INVALID);
     5315    Assert(pSvmTransient->u64ExitCode <= SVM_EXIT_MAX);
    52955316
    52965317    /*
     
    53945415                }
    53955416
    5396 #ifdef HMSVM_ALWAYS_TRAP_ALL_XCPTS
    53975417                case SVM_EXIT_XCPT_DE:
    53985418                /*   SVM_EXIT_XCPT_DB: */       /* Handled above. */
     
    54195439                case SVM_EXIT_XCPT_28: case SVM_EXIT_XCPT_29: case SVM_EXIT_XCPT_30: case SVM_EXIT_XCPT_31:
    54205440                    VMEXIT_CALL_RET(0, hmR0SvmExitXcptGeneric(pVCpu, pSvmTransient));
    5421 #endif  /* HMSVM_ALWAYS_TRAP_ALL_XCPTS */
     5441
     5442                case SVM_EXIT_SWINT:
     5443                    VMEXIT_CALL_RET(0, hmR0SvmExitSwInt(pVCpu, pSvmTransient));
    54225444
    54235445                default:
     
    54315453    }
    54325454    /* not reached */
     5455}
     5456
     5457
     5458/** @name Execution loop for single stepping, DBGF events and expensive Dtrace probes.
     5459 *
     5460 * The following few functions and associated structure contains the bloat
     5461 * necessary for providing detailed debug events and dtrace probes as well as
     5462 * reliable host side single stepping.  This works on the principle of
     5463 * "subclassing" the normal execution loop and workers.  We replace the loop
     5464 * method completely and override selected helpers to add necessary adjustments
     5465 * to their core operation.
     5466 *
     5467 * The goal is to keep the "parent" code lean and mean, so as not to sacrifice
     5468 * any performance for debug and analysis features.
     5469 *
     5470 * @{
     5471 */
     5472
     5473/**
     5474 * Transient per-VCPU debug state of VMCS and related info. we save/restore in
     5475 * the debug run loop.
     5476 */
     5477typedef struct SVMRUNDBGSTATE
     5478{
     5479    /** The initial SVMVMCBCTRL::u64InterceptCtrl value (helps with restore). */
     5480    uint64_t    bmInterceptInitial;
     5481    /** The initial SVMVMCBCTRL::u32InterceptXcpt value (helps with restore). */
     5482    uint32_t    bmXcptInitial;
     5483    /** The initial SVMVMCBCTRL::u16InterceptRdCRx value (helps with restore). */
     5484    uint16_t    bmInterceptRdCRxInitial;
     5485    /** The initial SVMVMCBCTRL::u16InterceptWrCRx value (helps with restore). */
     5486    uint16_t    bmInterceptWrCRxInitial;
     5487    /** The initial SVMVMCBCTRL::u16InterceptRdDRx value (helps with restore). */
     5488    uint16_t    bmInterceptRdDRxInitial;
     5489    /** The initial SVMVMCBCTRL::u16InterceptWrDRx value (helps with restore). */
     5490    uint16_t    bmInterceptWrDRxInitial;
     5491
     5492    /** Whether we've actually modified the intercept control qword. */
     5493    bool        fModifiedInterceptCtrl : 1;
     5494    /** Whether we've actually modified the exception bitmap. */
     5495    bool        fModifiedXcptBitmap : 1;
     5496    /** Whether we've actually modified SVMVMCBCTRL::u16InterceptRdCRx. */
     5497    bool        fModifiedInterceptRdCRx : 1;
     5498    /** Whether we've actually modified SVMVMCBCTRL::u16InterceptWrCRx. */
     5499    bool        fModifiedInterceptWrCRx : 1;
     5500    /** Whether we've actually modified SVMVMCBCTRL::u16InterceptRdDRx. */
     5501    bool        fModifiedInterceptRdDRx : 1;
     5502    /** Whether we've actually modified SVMVMCBCTRL::u16InterceptWrDRx. */
     5503    bool        fModifiedInterceptWrDRx : 1;
     5504
     5505    /** The CS we started executing with.  */
     5506    uint16_t    uCsStart;
     5507    /** The RIP we started executing at.  This is for detecting that we stepped.  */
     5508    uint64_t    uRipStart;
     5509
     5510    /** The sequence number of the Dtrace provider settings the state was
     5511     *  configured against. */
     5512    uint32_t    uDtraceSettingsSeqNo;
     5513    /** Extra stuff we need in SVMVMCBCTRL::u32InterceptXcpt. */
     5514    uint32_t    bmXcptExtra;
     5515    /** Extra stuff we need in SVMVMCBCTRL::u64InterceptCtrl. */
     5516    uint64_t    bmInterceptExtra;
     5517    /** Extra stuff we need in SVMVMCBCTRL::u16InterceptRdCRx. */
     5518    uint16_t    bmInterceptRdCRxExtra;
     5519    /** Extra stuff we need in SVMVMCBCTRL::u16InterceptWrCRx. */
     5520    uint16_t    bmInterceptWrCRxExtra;
     5521    /** Extra stuff we need in SVMVMCBCTRL::u16InterceptRdDRx. */
     5522    uint16_t    bmInterceptRdDRxExtra;
     5523    /** Extra stuff we need in SVMVMCBCTRL::u16InterceptWrDRx. */
     5524    uint16_t    bmInterceptWrDRxExtra;
     5525    /** VM-exits to check (one bit per VM-exit). */
     5526    uint32_t    bmExitsToCheck[33];
     5527} SVMRUNDBGSTATE;
     5528AssertCompileMemberSize(SVMRUNDBGSTATE, bmExitsToCheck, (SVM_EXIT_MAX + 1 + 31) / 32 * 4);
     5529typedef SVMRUNDBGSTATE *PSVMRUNDBGSTATE;
     5530
     5531
     5532/**
     5533 * Initializes the SVMRUNDBGSTATE structure.
     5534 *
     5535 * @param   pVCpu           The cross context virtual CPU structure of the
     5536 *                          calling EMT.
     5537 * @param   pSvmTransient   The SVM-transient structure.
     5538 * @param   pDbgState       The debug state to initialize.
     5539 */
     5540static void hmR0SvmRunDebugStateInit(PVMCPUCC pVCpu, PCSVMTRANSIENT pSvmTransient, PSVMRUNDBGSTATE pDbgState)
     5541{
     5542    PSVMVMCB pVmcb = pSvmTransient->pVmcb;
     5543    pDbgState->bmInterceptInitial       = pVmcb->ctrl.u64InterceptCtrl;
     5544    pDbgState->bmXcptInitial            = pVmcb->ctrl.u32InterceptXcpt;
     5545    pDbgState->bmInterceptRdCRxInitial  = pVmcb->ctrl.u16InterceptRdCRx;
     5546    pDbgState->bmInterceptWrCRxInitial  = pVmcb->ctrl.u16InterceptWrCRx;
     5547    pDbgState->bmInterceptRdDRxInitial  = pVmcb->ctrl.u16InterceptRdDRx;
     5548    pDbgState->bmInterceptWrDRxInitial  = pVmcb->ctrl.u16InterceptWrDRx;
     5549
     5550    pDbgState->fModifiedInterceptCtrl   = false;
     5551    pDbgState->fModifiedXcptBitmap      = false;
     5552    pDbgState->fModifiedInterceptRdCRx  = false;
     5553    pDbgState->fModifiedInterceptWrCRx  = false;
     5554    pDbgState->fModifiedInterceptRdDRx  = false;
     5555    pDbgState->fModifiedInterceptWrDRx  = false;
     5556
     5557    pDbgState->uCsStart                 = pVCpu->cpum.GstCtx.cs.Sel;
     5558    pDbgState->uRipStart                = pVCpu->cpum.GstCtx.rip;
     5559
     5560    /* We don't really need to zero these. */
     5561    pDbgState->bmInterceptExtra         = 0;
     5562    pDbgState->bmXcptExtra              = 0;
     5563    pDbgState->bmInterceptRdCRxExtra    = 0;
     5564    pDbgState->bmInterceptWrCRxExtra    = 0;
     5565    pDbgState->bmInterceptRdDRxExtra    = 0;
     5566    pDbgState->bmInterceptWrDRxExtra    = 0;
     5567}
     5568
     5569
     5570/**
     5571 * Updates the VMCB fields with changes requested by @a pDbgState.
     5572 *
     5573 * This is performed after hmR0SvmPreRunGuestDebugStateUpdate as well
     5574 * immediately before executing guest code, i.e. when interrupts are disabled.
     5575 * We don't check status codes here as we cannot easily assert or return in the
     5576 * latter case.
     5577 *
     5578 * @param   pSvmTransient   The SVM-transient structure.
     5579 * @param   pDbgState       The debug state.
     5580 */
     5581static void hmR0SvmPreRunGuestDebugStateApply(PSVMTRANSIENT pSvmTransient, PSVMRUNDBGSTATE pDbgState)
     5582{
     5583    /*
     5584     * Ensure desired flags in VMCS control fields are set.
     5585     */
     5586    PSVMVMCB const pVmcb = pSvmTransient->pVmcb;
     5587#define ADD_EXTRA_INTERCEPTS(a_VmcbCtrlField, a_bmExtra, a_fModified) do { \
     5588        if ((pVmcb->ctrl. a_VmcbCtrlField & (a_bmExtra)) != (a_bmExtra)) \
     5589        { \
     5590            pVmcb->ctrl. a_VmcbCtrlField |= (a_bmExtra); \
     5591            pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS; \
     5592            Log6Func((#a_VmcbCtrlField ": %#RX64\n", pVmcb->ctrl. a_VmcbCtrlField)); \
     5593            (a_fModified) = true; \
     5594        } \
     5595    } while (0)
     5596    ADD_EXTRA_INTERCEPTS(u64InterceptCtrl,  pDbgState->bmInterceptExtra,        pDbgState->fModifiedInterceptCtrl);
     5597    ADD_EXTRA_INTERCEPTS(u32InterceptXcpt,  pDbgState->bmXcptExtra,             pDbgState->fModifiedXcptBitmap);
     5598    ADD_EXTRA_INTERCEPTS(u16InterceptRdCRx, pDbgState->bmInterceptRdCRxExtra,   pDbgState->fModifiedInterceptRdCRx);
     5599    ADD_EXTRA_INTERCEPTS(u16InterceptWrCRx, pDbgState->bmInterceptWrCRxExtra,   pDbgState->fModifiedInterceptWrCRx);
     5600    ADD_EXTRA_INTERCEPTS(u16InterceptRdDRx, pDbgState->bmInterceptRdDRxExtra,   pDbgState->fModifiedInterceptRdDRx);
     5601    ADD_EXTRA_INTERCEPTS(u16InterceptWrDRx, pDbgState->bmInterceptWrDRxExtra,   pDbgState->fModifiedInterceptWrDRx);
     5602#undef ADD_EXTRA_INTERCEPTS
     5603}
     5604
     5605
     5606/**
     5607 * Restores VMCB fields that were changed by hmR0SvmPreRunGuestDebugStateApply
     5608 * for re-entry next time around.
     5609 *
     5610 * @returns Strict VBox status code (i.e. informational status codes too).
     5611 * @param   pSvmTransient   The SVM-transient structure.
     5612 * @param   pDbgState       The debug state.
     5613 */
     5614static void hmR0SvmRunDebugStateRevert(PSVMTRANSIENT pSvmTransient, PSVMRUNDBGSTATE pDbgState)
     5615{
     5616    /*
     5617     * Restore VM-exit control settings as we may not reenter this function the
     5618     * next time around.
     5619     */
     5620    PSVMVMCB const pVmcb = pSvmTransient->pVmcb;
     5621
     5622#define RESTORE_INTERCEPTS(a_VmcbCtrlField, a_bmInitial, a_fModified) do { \
     5623        if ((a_fModified)) \
     5624        { \
     5625            pVmcb->ctrl. a_VmcbCtrlField = (a_bmInitial); \
     5626            pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS; \
     5627        } \
     5628    } while (0)
     5629    RESTORE_INTERCEPTS(u64InterceptCtrl,  pDbgState->bmInterceptInitial,      pDbgState->fModifiedInterceptCtrl);
     5630    RESTORE_INTERCEPTS(u32InterceptXcpt,  pDbgState->bmXcptInitial,           pDbgState->fModifiedXcptBitmap);
     5631    RESTORE_INTERCEPTS(u16InterceptRdCRx, pDbgState->bmInterceptRdCRxInitial, pDbgState->fModifiedInterceptRdCRx);
     5632    RESTORE_INTERCEPTS(u16InterceptWrCRx, pDbgState->bmInterceptWrCRxInitial, pDbgState->fModifiedInterceptWrCRx);
     5633    RESTORE_INTERCEPTS(u16InterceptRdDRx, pDbgState->bmInterceptRdDRxInitial, pDbgState->fModifiedInterceptRdDRx);
     5634    RESTORE_INTERCEPTS(u16InterceptWrDRx, pDbgState->bmInterceptWrDRxInitial, pDbgState->fModifiedInterceptWrDRx);
     5635#undef RESTORE_INTERCEPTS
     5636}
     5637
     5638
     5639/**
     5640 * Configures VM-exit controls for current DBGF and DTrace settings.
     5641 *
     5642 * This updates @a pDbgState and the VMCB execution control fields (in the debug
     5643 * state) to reflect the necessary VM-exits demanded by DBGF and DTrace.
     5644 *
     5645 * @param   pVCpu           The cross context virtual CPU structure.
     5646 * @param   pSvmTransient   The SVM-transient structure. May update
     5647 *                          fUpdatedTscOffsettingAndPreemptTimer.
     5648 * @param   pDbgState       The debug state.
     5649 */
     5650static void hmR0SvmPreRunGuestDebugStateUpdate(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient, PSVMRUNDBGSTATE pDbgState)
     5651{
     5652    /*
     5653     * Take down the dtrace serial number so we can spot changes.
     5654     */
     5655    pDbgState->uDtraceSettingsSeqNo = VBOXVMM_GET_SETTINGS_SEQ_NO();
     5656    ASMCompilerBarrier();
     5657
     5658    /*
     5659     * Clear data members that we'll be rebuilding here.
     5660     */
     5661    pDbgState->bmXcptExtra              = 0;
     5662    pDbgState->bmInterceptExtra         = 0;
     5663    pDbgState->bmInterceptRdCRxExtra    = 0;
     5664    pDbgState->bmInterceptWrCRxExtra    = 0;
     5665    pDbgState->bmInterceptRdDRxExtra    = 0;
     5666    pDbgState->bmInterceptWrDRxExtra    = 0;
     5667    for (unsigned i = 0; i < RT_ELEMENTS(pDbgState->bmExitsToCheck); i++)
     5668        pDbgState->bmExitsToCheck[i]    = 0;
     5669
     5670    /*
     5671     * Software interrupts (INT XXh)
     5672     */
     5673    PVMCC pVM = pVCpu->CTX_SUFF(pVM);
     5674    if (   DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_INTERRUPT_SOFTWARE)
     5675        || VBOXVMM_INT_SOFTWARE_ENABLED())
     5676    {
     5677        pDbgState->bmInterceptExtra |= SVM_CTRL_INTERCEPT_INTN;
     5678        ASMBitSet(pDbgState->bmExitsToCheck, SVM_EXIT_SWINT);
     5679    }
     5680
     5681    /*
     5682     * INT3 breakpoints - triggered by #BP exceptions.
     5683     */
     5684    if (pVM->dbgf.ro.cEnabledInt3Breakpoints > 0)
     5685        pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
     5686
     5687    /*
     5688     * Exception bitmap and XCPT events+probes.
     5689     */
     5690#define SET_XCPT(a_iXcpt) do { \
     5691        pDbgState->bmXcptExtra |= RT_BIT_32(a_iXcpt); \
     5692        ASMBitSet(pDbgState->bmExitsToCheck, SVM_EXIT_XCPT_0 + (a_iXcpt)); \
     5693    } while (0)
     5694
     5695    for (int iXcpt = 0; iXcpt < (DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST + 1); iXcpt++)
     5696        if (DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + iXcpt)))
     5697            SET_XCPT(iXcpt);
     5698
     5699    if (VBOXVMM_XCPT_DE_ENABLED())  SET_XCPT(X86_XCPT_DE);
     5700    if (VBOXVMM_XCPT_DB_ENABLED())  SET_XCPT(X86_XCPT_DB);
     5701    if (VBOXVMM_XCPT_BP_ENABLED())  SET_XCPT(X86_XCPT_BP);
     5702    if (VBOXVMM_XCPT_OF_ENABLED())  SET_XCPT(X86_XCPT_OF);
     5703    if (VBOXVMM_XCPT_BR_ENABLED())  SET_XCPT(X86_XCPT_BR);
     5704    if (VBOXVMM_XCPT_UD_ENABLED())  SET_XCPT(X86_XCPT_UD);
     5705    if (VBOXVMM_XCPT_NM_ENABLED())  SET_XCPT(X86_XCPT_NM);
     5706    if (VBOXVMM_XCPT_DF_ENABLED())  SET_XCPT(X86_XCPT_DF);
     5707    if (VBOXVMM_XCPT_TS_ENABLED())  SET_XCPT(X86_XCPT_TS);
     5708    if (VBOXVMM_XCPT_NP_ENABLED())  SET_XCPT(X86_XCPT_NP);
     5709    if (VBOXVMM_XCPT_SS_ENABLED())  SET_XCPT(X86_XCPT_SS);
     5710    if (VBOXVMM_XCPT_GP_ENABLED())  SET_XCPT(X86_XCPT_GP);
     5711    if (VBOXVMM_XCPT_PF_ENABLED())  SET_XCPT(X86_XCPT_PF);
     5712    if (VBOXVMM_XCPT_MF_ENABLED())  SET_XCPT(X86_XCPT_MF);
     5713    if (VBOXVMM_XCPT_AC_ENABLED())  SET_XCPT(X86_XCPT_AC);
     5714    if (VBOXVMM_XCPT_XF_ENABLED())  SET_XCPT(X86_XCPT_XF);
     5715    if (VBOXVMM_XCPT_VE_ENABLED())  SET_XCPT(X86_XCPT_VE);
     5716    if (VBOXVMM_XCPT_SX_ENABLED())  SET_XCPT(X86_XCPT_SX);
     5717
     5718#undef SET_XCPT
     5719
     5720    /*
     5721     * Process events and probes for VM-exits, making sure we get the wanted VM-exits.
     5722     *
     5723     * Note! This is the reverse of what hmR0SvmHandleExitDtraceEvents does.
     5724     *       So, when adding/changing/removing please don't forget to update it.
     5725     *
     5726     * Some of the macros are picking up local variables to save horizontal space,
     5727     * (being able to see it in a table is the lesser evil here).
     5728     */
     5729#define IS_EITHER_ENABLED(a_pVM, a_EventSubName) \
     5730        (    DBGF_IS_EVENT_ENABLED(a_pVM, RT_CONCAT(DBGFEVENT_, a_EventSubName)) \
     5731         ||  RT_CONCAT3(VBOXVMM_, a_EventSubName, _ENABLED)() )
     5732#define SET_ONLY_XBM_IF_EITHER_EN(a_EventSubName, a_uExit) \
     5733        if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
     5734        {   AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
     5735            ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
     5736        } else do { } while (0)
     5737#define SET_INCP_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fInterceptCtrl) \
     5738        if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
     5739        { \
     5740            (pDbgState)->bmInterceptExtra |= (a_fInterceptCtrl); \
     5741            AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
     5742            ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
     5743        } else do { } while (0)
     5744
     5745    /** @todo double check these */
     5746    /** @todo Check what more AMD-V specific we can intercept.   */
     5747    //SET_INCP_XBM_IF_EITHER_EN(EXIT_TASK_SWITCH,         SVM_EXIT_TASK_SWITCH,       SVM_CTRL_INTERCEPT_TASK_SWITCH);
     5748    SET_ONLY_XBM_IF_EITHER_EN(EXIT_TASK_SWITCH,         SVM_EXIT_TASK_SWITCH);
     5749    SET_INCP_XBM_IF_EITHER_EN(INSTR_VMM_CALL,           SVM_EXIT_VMMCALL,           SVM_CTRL_INTERCEPT_VMMCALL);
     5750    SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMM_CALL,           SVM_EXIT_VMMCALL);
     5751    SET_INCP_XBM_IF_EITHER_EN(INSTR_SVM_VMRUN,          SVM_EXIT_VMRUN,             SVM_CTRL_INTERCEPT_VMRUN);
     5752    SET_ONLY_XBM_IF_EITHER_EN( EXIT_SVM_VMRUN,          SVM_EXIT_VMRUN);
     5753    SET_INCP_XBM_IF_EITHER_EN(INSTR_SVM_VMLOAD,         SVM_EXIT_VMLOAD,            SVM_CTRL_INTERCEPT_VMLOAD);
     5754    SET_ONLY_XBM_IF_EITHER_EN( EXIT_SVM_VMLOAD,         SVM_EXIT_VMLOAD);
     5755    SET_INCP_XBM_IF_EITHER_EN(INSTR_SVM_VMSAVE,         SVM_EXIT_VMSAVE,            SVM_CTRL_INTERCEPT_VMSAVE);
     5756    SET_ONLY_XBM_IF_EITHER_EN( EXIT_SVM_VMSAVE,         SVM_EXIT_VMSAVE);
     5757    SET_INCP_XBM_IF_EITHER_EN(INSTR_SVM_STGI,           SVM_EXIT_STGI,              SVM_CTRL_INTERCEPT_STGI);
     5758    SET_ONLY_XBM_IF_EITHER_EN( EXIT_SVM_STGI,           SVM_EXIT_STGI);
     5759    SET_INCP_XBM_IF_EITHER_EN(INSTR_SVM_CLGI,           SVM_EXIT_CLGI,              SVM_CTRL_INTERCEPT_CLGI);
     5760    SET_ONLY_XBM_IF_EITHER_EN( EXIT_SVM_CLGI,           SVM_EXIT_CLGI);
     5761
     5762    SET_INCP_XBM_IF_EITHER_EN(INSTR_CPUID,              SVM_EXIT_CPUID,             SVM_CTRL_INTERCEPT_CPUID);
     5763    SET_ONLY_XBM_IF_EITHER_EN( EXIT_CPUID,              SVM_EXIT_CPUID);
     5764    SET_INCP_XBM_IF_EITHER_EN(INSTR_HALT,               SVM_EXIT_HLT,               SVM_CTRL_INTERCEPT_HLT);
     5765    SET_ONLY_XBM_IF_EITHER_EN( EXIT_HALT,               SVM_EXIT_HLT);
     5766    SET_INCP_XBM_IF_EITHER_EN(INSTR_INVD,               SVM_EXIT_INVD,              SVM_CTRL_INTERCEPT_INVD);
     5767    SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVD,               SVM_EXIT_INVD);
     5768    SET_INCP_XBM_IF_EITHER_EN(INSTR_INVLPG,             SVM_EXIT_INVLPG,            SVM_CTRL_INTERCEPT_INVLPG);
     5769    SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVLPG,             SVM_EXIT_INVLPG);
     5770    SET_INCP_XBM_IF_EITHER_EN(INSTR_RDPMC,              SVM_EXIT_RDPMC,             SVM_CTRL_INTERCEPT_RDPMC);
     5771    SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDPMC,              SVM_EXIT_RDPMC);
     5772    SET_INCP_XBM_IF_EITHER_EN(INSTR_RDTSC,              SVM_EXIT_RDTSC,             SVM_CTRL_INTERCEPT_RDTSC);
     5773    SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSC,              SVM_EXIT_RDTSC);
     5774    SET_INCP_XBM_IF_EITHER_EN(INSTR_RDTSCP,             SVM_EXIT_RDTSCP,            SVM_CTRL_INTERCEPT_RDTSCP);
     5775    SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSCP,             SVM_EXIT_RDTSCP);
     5776    SET_INCP_XBM_IF_EITHER_EN(INSTR_RSM,                SVM_EXIT_RSM,               SVM_CTRL_INTERCEPT_RSM);
     5777    SET_ONLY_XBM_IF_EITHER_EN( EXIT_RSM,                SVM_EXIT_RSM);
     5778
     5779    if (IS_EITHER_ENABLED(pVM, INSTR_CRX_READ))
     5780        pDbgState->bmInterceptRdCRxExtra = 0xffff;
     5781    if (IS_EITHER_ENABLED(pVM, INSTR_CRX_READ) || IS_EITHER_ENABLED(pVM, EXIT_CRX_READ))
     5782        ASMBitSetRange(pDbgState->bmExitsToCheck, SVM_EXIT_READ_CR0, SVM_EXIT_READ_CR15 + 1);
     5783
     5784    if (IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
     5785        pDbgState->bmInterceptWrCRxExtra = 0xffff;
     5786    if (IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE) || IS_EITHER_ENABLED(pVM, EXIT_CRX_WRITE))
     5787    {
     5788        ASMBitSetRange(pDbgState->bmExitsToCheck, SVM_EXIT_WRITE_CR0, SVM_EXIT_WRITE_CR15 + 1);
     5789        ASMBitSet(pDbgState->bmExitsToCheck, SVM_EXIT_CR0_SEL_WRITE);
     5790    }
     5791
     5792    if (IS_EITHER_ENABLED(pVM, INSTR_DRX_READ))
     5793        pDbgState->bmInterceptRdDRxExtra = 0xffff;
     5794    if (IS_EITHER_ENABLED(pVM, INSTR_DRX_READ) || IS_EITHER_ENABLED(pVM, EXIT_DRX_READ))
     5795        ASMBitSetRange(pDbgState->bmExitsToCheck, SVM_EXIT_READ_DR0, SVM_EXIT_READ_DR15 + 1);
     5796
     5797    if (IS_EITHER_ENABLED(pVM, INSTR_DRX_WRITE))
     5798        pDbgState->bmInterceptWrDRxExtra = 0xffff;
     5799    if (IS_EITHER_ENABLED(pVM, INSTR_DRX_WRITE) || IS_EITHER_ENABLED(pVM, EXIT_DRX_WRITE))
     5800        ASMBitSetRange(pDbgState->bmExitsToCheck, SVM_EXIT_WRITE_DR0, SVM_EXIT_WRITE_DR15 + 1);
     5801
     5802    SET_ONLY_XBM_IF_EITHER_EN(INSTR_RDMSR,              SVM_EXIT_MSR); /** @todo modify bitmap to intercept almost everything? (Clearing MSR_PROT just means no intercepts.) */
     5803    SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDMSR,              SVM_EXIT_MSR);
     5804    SET_ONLY_XBM_IF_EITHER_EN(INSTR_WRMSR,              SVM_EXIT_MSR); /** @todo ditto */
     5805    SET_ONLY_XBM_IF_EITHER_EN( EXIT_WRMSR,              SVM_EXIT_MSR);
     5806    SET_INCP_XBM_IF_EITHER_EN(INSTR_MWAIT,              SVM_EXIT_MWAIT,         SVM_CTRL_INTERCEPT_MWAIT);
     5807    SET_ONLY_XBM_IF_EITHER_EN( EXIT_MWAIT,              SVM_EXIT_MWAIT);
     5808    if (ASMBitTest(pDbgState->bmExitsToCheck, SVM_EXIT_MWAIT))
     5809        ASMBitSet(pDbgState->bmExitsToCheck, SVM_EXIT_MWAIT_ARMED);
     5810    SET_INCP_XBM_IF_EITHER_EN(INSTR_MONITOR,            SVM_EXIT_MONITOR,       SVM_CTRL_INTERCEPT_MONITOR);
     5811    SET_ONLY_XBM_IF_EITHER_EN( EXIT_MONITOR,            SVM_EXIT_MONITOR);
     5812    SET_INCP_XBM_IF_EITHER_EN(INSTR_PAUSE,              SVM_EXIT_PAUSE,         SVM_CTRL_INTERCEPT_PAUSE);
     5813    SET_ONLY_XBM_IF_EITHER_EN( EXIT_PAUSE,              SVM_EXIT_PAUSE);
     5814    SET_INCP_XBM_IF_EITHER_EN(INSTR_SIDT,               SVM_EXIT_IDTR_READ,     SVM_CTRL_INTERCEPT_IDTR_READS);
     5815    SET_ONLY_XBM_IF_EITHER_EN( EXIT_SIDT,               SVM_EXIT_IDTR_READ);
     5816    SET_INCP_XBM_IF_EITHER_EN(INSTR_LIDT,               SVM_EXIT_IDTR_WRITE,    SVM_CTRL_INTERCEPT_IDTR_WRITES);
     5817    SET_ONLY_XBM_IF_EITHER_EN( EXIT_LIDT,               SVM_EXIT_IDTR_WRITE);
     5818    SET_INCP_XBM_IF_EITHER_EN(INSTR_SGDT,               SVM_EXIT_GDTR_READ,     SVM_CTRL_INTERCEPT_GDTR_READS);
     5819    SET_ONLY_XBM_IF_EITHER_EN( EXIT_SGDT,               SVM_EXIT_GDTR_READ);
     5820    SET_INCP_XBM_IF_EITHER_EN(INSTR_LGDT,               SVM_EXIT_GDTR_WRITE,    SVM_CTRL_INTERCEPT_GDTR_WRITES);
     5821    SET_ONLY_XBM_IF_EITHER_EN( EXIT_LGDT,               SVM_EXIT_GDTR_WRITE);
     5822    SET_INCP_XBM_IF_EITHER_EN(INSTR_SLDT,               SVM_EXIT_LDTR_READ,     SVM_CTRL_INTERCEPT_LDTR_READS);
     5823    SET_ONLY_XBM_IF_EITHER_EN( EXIT_SLDT,               SVM_EXIT_LDTR_READ);
     5824    SET_INCP_XBM_IF_EITHER_EN(INSTR_LLDT,               SVM_EXIT_LDTR_WRITE,    SVM_CTRL_INTERCEPT_LDTR_WRITES);
     5825    SET_ONLY_XBM_IF_EITHER_EN( EXIT_LLDT,               SVM_EXIT_LDTR_WRITE);
     5826    SET_INCP_XBM_IF_EITHER_EN(INSTR_STR,                SVM_EXIT_TR_READ,       SVM_CTRL_INTERCEPT_TR_READS);
     5827    SET_ONLY_XBM_IF_EITHER_EN( EXIT_STR,                SVM_EXIT_TR_READ);
     5828    SET_INCP_XBM_IF_EITHER_EN(INSTR_LTR,                SVM_EXIT_TR_WRITE,      SVM_CTRL_INTERCEPT_TR_WRITES);
     5829    SET_ONLY_XBM_IF_EITHER_EN( EXIT_LTR,                SVM_EXIT_TR_WRITE);
     5830    SET_INCP_XBM_IF_EITHER_EN(INSTR_WBINVD,             SVM_EXIT_WBINVD,        SVM_CTRL_INTERCEPT_WBINVD);
     5831    SET_ONLY_XBM_IF_EITHER_EN( EXIT_WBINVD,             SVM_EXIT_WBINVD);
     5832    SET_INCP_XBM_IF_EITHER_EN(INSTR_XSETBV,             SVM_EXIT_XSETBV,        SVM_CTRL_INTERCEPT_XSETBV);
     5833    SET_ONLY_XBM_IF_EITHER_EN( EXIT_XSETBV,             SVM_EXIT_XSETBV);
     5834
     5835    if (DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_TRIPLE_FAULT))
     5836        ASMBitSet(pDbgState->bmExitsToCheck, SVM_EXIT_SHUTDOWN);
     5837
     5838#undef IS_EITHER_ENABLED
     5839#undef SET_ONLY_XBM_IF_EITHER_EN
     5840#undef SET_INCP_XBM_IF_EITHER_EN
     5841
     5842    /*
     5843     * Sanitize the control stuff.
     5844     */
     5845    /** @todo filter out unsupported stuff? */
     5846    if (   pVCpu->hmr0.s.fDebugWantRdTscExit
     5847        != RT_BOOL(pDbgState->bmInterceptExtra & (SVM_CTRL_INTERCEPT_RDTSC | SVM_CTRL_INTERCEPT_RDTSCP)))
     5848    {
     5849        pVCpu->hmr0.s.fDebugWantRdTscExit ^= true;
     5850        /// @todo pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
     5851        RT_NOREF(pSvmTransient);
     5852    }
     5853
     5854    Log6(("HM: debug state: bmInterceptExtra=%#RX64 bmXcptExtra=%#RX32%s%s%s%s bmExitsToCheck=%08RX32'%08RX32'%08RX32'%08RX32'%08RX32\n",
     5855          pDbgState->bmInterceptExtra, pDbgState->bmXcptExtra,
     5856          pDbgState->bmInterceptRdCRxExtra ? " rd-cr" : "",
     5857          pDbgState->bmInterceptWrCRxExtra ? " wr-cr" : "",
     5858          pDbgState->bmInterceptRdDRxExtra ? " rd-dr" : "",
     5859          pDbgState->bmInterceptWrDRxExtra ? " wr-dr" : "",
     5860          pDbgState->bmExitsToCheck[0],
     5861          pDbgState->bmExitsToCheck[1],
     5862          pDbgState->bmExitsToCheck[2],
     5863          pDbgState->bmExitsToCheck[3],
     5864          pDbgState->bmExitsToCheck[4]));
     5865}
     5866
     5867
     5868/**
     5869 * Fires off DBGF events and dtrace probes for a VM-exit, when it's
     5870 * appropriate.
     5871 *
     5872 * The caller has checked the VM-exit against the SVMRUNDBGSTATE::bmExitsToCheck
     5873 * bitmap.
     5874 *
     5875 * @returns Strict VBox status code (i.e. informational status codes too).
     5876 * @param   pVCpu           The cross context virtual CPU structure.
     5877 * @param   pSvmTransient   The SVM-transient structure.
     5878 * @param   uExitCode       The VM-exit code.
     5879 *
     5880 * @remarks The name of this function is displayed by dtrace, so keep it short
     5881 *          and to the point. No longer than 33 chars long, please.
     5882 */
     5883static VBOXSTRICTRC hmR0SvmHandleExitDtraceEvents(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient, uint64_t uExitCode)
     5884{
     5885    /*
     5886     * Translate the event into a DBGF event (enmEvent + uEventArg) and at the
     5887     * same time check whether any corresponding Dtrace event is enabled (fDtrace).
     5888     *
     5889     * Note! This is the reverse operation of what hmR0SvmPreRunGuestDebugStateUpdate
     5890     *       does.  Must add/change/remove both places.  Same ordering, please.
     5891     *
     5892     *       Added/removed events must also be reflected in the next section
     5893     *       where we dispatch dtrace events.
     5894     */
     5895    bool            fDtrace1   = false;
     5896    bool            fDtrace2   = false;
     5897    DBGFEVENTTYPE   enmEvent1  = DBGFEVENT_END;
     5898    DBGFEVENTTYPE   enmEvent2  = DBGFEVENT_END;
     5899    uint64_t        uEventArg  = 0;
     5900#define SET_XCPT(a_XcptName) \
     5901        do { \
     5902            enmEvent2 = RT_CONCAT(DBGFEVENT_XCPT_, a_XcptName); \
     5903            fDtrace2  = RT_CONCAT3(VBOXVMM_XCPT_, a_XcptName, _ENABLED)(); \
     5904        } while (0)
     5905#define SET_EXIT(a_EventSubName) \
     5906        do { \
     5907            enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_,  a_EventSubName); \
     5908            fDtrace2  = RT_CONCAT3(VBOXVMM_EXIT_,   a_EventSubName, _ENABLED)(); \
     5909        } while (0)
     5910#define SET_BOTH(a_EventSubName) \
     5911        do { \
     5912            enmEvent1 = RT_CONCAT(DBGFEVENT_INSTR_, a_EventSubName); \
     5913            enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_,  a_EventSubName); \
     5914            fDtrace1  = RT_CONCAT3(VBOXVMM_INSTR_,  a_EventSubName, _ENABLED)(); \
     5915            fDtrace2  = RT_CONCAT3(VBOXVMM_EXIT_,   a_EventSubName, _ENABLED)(); \
     5916        } while (0)
     5917    switch (uExitCode)
     5918    {
     5919        case SVM_EXIT_SWINT:
     5920            enmEvent2 = DBGFEVENT_INTERRUPT_SOFTWARE;
     5921            fDtrace2  = VBOXVMM_INT_SOFTWARE_ENABLED();
     5922            uEventArg = pSvmTransient->pVmcb->ctrl.u64ExitInfo1;
     5923            break;
     5924
     5925        case SVM_EXIT_XCPT_DE:      SET_XCPT(DE); break;
     5926        case SVM_EXIT_XCPT_DB:      SET_XCPT(DB); break;
     5927        case SVM_EXIT_XCPT_BP:      SET_XCPT(BP); break;
     5928        case SVM_EXIT_XCPT_OF:      SET_XCPT(OF); break;
     5929        case SVM_EXIT_XCPT_BR:      SET_XCPT(BR); break;
     5930        case SVM_EXIT_XCPT_UD:      SET_XCPT(UD); break;
     5931        case SVM_EXIT_XCPT_NM:      SET_XCPT(NM); break;
     5932        case SVM_EXIT_XCPT_DF:      SET_XCPT(DF); break;
     5933        case SVM_EXIT_XCPT_TS:      SET_XCPT(TS); uEventArg = pSvmTransient->pVmcb->ctrl.u64ExitInfo1; break;
     5934        case SVM_EXIT_XCPT_NP:      SET_XCPT(NP); uEventArg = pSvmTransient->pVmcb->ctrl.u64ExitInfo1; break;
     5935        case SVM_EXIT_XCPT_SS:      SET_XCPT(SS); uEventArg = pSvmTransient->pVmcb->ctrl.u64ExitInfo1; break;
     5936        case SVM_EXIT_XCPT_GP:      SET_XCPT(GP); uEventArg = pSvmTransient->pVmcb->ctrl.u64ExitInfo1; break;
     5937        case SVM_EXIT_XCPT_PF:      SET_XCPT(PF); uEventArg = pSvmTransient->pVmcb->ctrl.u64ExitInfo1; break;
     5938        case SVM_EXIT_XCPT_MF:      SET_XCPT(MF); break;
     5939        case SVM_EXIT_XCPT_AC:      SET_XCPT(AC); break;
     5940        case SVM_EXIT_XCPT_XF:      SET_XCPT(XF); break;
     5941        case SVM_EXIT_XCPT_VE:      SET_XCPT(VE); break;
     5942        case SVM_EXIT_XCPT_SX:      SET_XCPT(SX); uEventArg = pSvmTransient->pVmcb->ctrl.u64ExitInfo1; break;
     5943
     5944        case SVM_EXIT_XCPT_2:       enmEvent2 = DBGFEVENT_XCPT_02;
     5945        case SVM_EXIT_XCPT_9:       enmEvent2 = DBGFEVENT_XCPT_09;
     5946        case SVM_EXIT_XCPT_15:      enmEvent2 = DBGFEVENT_XCPT_0f;
     5947        case SVM_EXIT_XCPT_18:      enmEvent2 = DBGFEVENT_XCPT_MC;
     5948        case SVM_EXIT_XCPT_21:      enmEvent2 = DBGFEVENT_XCPT_15;
     5949        case SVM_EXIT_XCPT_22:      enmEvent2 = DBGFEVENT_XCPT_16;
     5950        case SVM_EXIT_XCPT_23:      enmEvent2 = DBGFEVENT_XCPT_17;
     5951        case SVM_EXIT_XCPT_24:      enmEvent2 = DBGFEVENT_XCPT_18;
     5952        case SVM_EXIT_XCPT_25:      enmEvent2 = DBGFEVENT_XCPT_19;
     5953        case SVM_EXIT_XCPT_26:      enmEvent2 = DBGFEVENT_XCPT_1a;
     5954        case SVM_EXIT_XCPT_27:      enmEvent2 = DBGFEVENT_XCPT_1b;
     5955        case SVM_EXIT_XCPT_28:      enmEvent2 = DBGFEVENT_XCPT_1c;
     5956        case SVM_EXIT_XCPT_29:      enmEvent2 = DBGFEVENT_XCPT_1d;
     5957        case SVM_EXIT_XCPT_31:      enmEvent2 = DBGFEVENT_XCPT_1f;
     5958
     5959        case SVM_EXIT_TASK_SWITCH:  SET_EXIT(TASK_SWITCH); break;
     5960        case SVM_EXIT_VMMCALL:      SET_BOTH(VMM_CALL); break;
     5961        case SVM_EXIT_VMRUN:        SET_BOTH(SVM_VMRUN); break;
     5962        case SVM_EXIT_VMLOAD:       SET_BOTH(SVM_VMLOAD); break;
     5963        case SVM_EXIT_VMSAVE:       SET_BOTH(SVM_VMSAVE); break;
     5964        case SVM_EXIT_STGI:         SET_BOTH(SVM_STGI); break;
     5965        case SVM_EXIT_CLGI:         SET_BOTH(SVM_CLGI); break;
     5966        case SVM_EXIT_CPUID:        SET_BOTH(CPUID); break;
     5967        case SVM_EXIT_HLT:          SET_BOTH(HALT); break;
     5968        case SVM_EXIT_INVD:         SET_BOTH(INVD); break;
     5969        case SVM_EXIT_INVLPG:       SET_BOTH(INVLPG); break;
     5970        case SVM_EXIT_RDPMC:        SET_BOTH(RDPMC); break;
     5971        case SVM_EXIT_RDTSC:        SET_BOTH(RDTSC); break;
     5972        case SVM_EXIT_RDTSCP:       SET_BOTH(RDTSCP); break;
     5973        case SVM_EXIT_RSM:          SET_BOTH(RSM); break;
     5974
     5975        case SVM_EXIT_READ_CR0:   case SVM_EXIT_READ_CR1:   case SVM_EXIT_READ_CR2:   case SVM_EXIT_READ_CR3:
     5976        case SVM_EXIT_READ_CR4:   case SVM_EXIT_READ_CR5:   case SVM_EXIT_READ_CR6:   case SVM_EXIT_READ_CR7:
     5977        case SVM_EXIT_READ_CR8:   case SVM_EXIT_READ_CR9:   case SVM_EXIT_READ_CR10:  case SVM_EXIT_READ_CR11:
     5978        case SVM_EXIT_READ_CR12:  case SVM_EXIT_READ_CR13:  case SVM_EXIT_READ_CR14:  case SVM_EXIT_READ_CR15:
     5979            SET_BOTH(CRX_READ);
     5980            uEventArg = uExitCode - SVM_EXIT_READ_CR0;
     5981            break;
     5982        case SVM_EXIT_WRITE_CR0:  case SVM_EXIT_WRITE_CR1:  case SVM_EXIT_WRITE_CR2:  case SVM_EXIT_WRITE_CR3:
     5983        case SVM_EXIT_WRITE_CR4:  case SVM_EXIT_WRITE_CR5:  case SVM_EXIT_WRITE_CR6:  case SVM_EXIT_WRITE_CR7:
     5984        case SVM_EXIT_WRITE_CR8:  case SVM_EXIT_WRITE_CR9:  case SVM_EXIT_WRITE_CR10: case SVM_EXIT_WRITE_CR11:
     5985        case SVM_EXIT_WRITE_CR12: case SVM_EXIT_WRITE_CR13: case SVM_EXIT_WRITE_CR14: case SVM_EXIT_WRITE_CR15:
     5986        case SVM_EXIT_CR0_SEL_WRITE:
     5987            SET_BOTH(CRX_WRITE);
     5988            uEventArg = uExitCode - SVM_EXIT_WRITE_CR0;
     5989            break;
     5990        case SVM_EXIT_READ_DR0:   case SVM_EXIT_READ_DR1:   case SVM_EXIT_READ_DR2:   case SVM_EXIT_READ_DR3:
     5991        case SVM_EXIT_READ_DR4:   case SVM_EXIT_READ_DR5:   case SVM_EXIT_READ_DR6:   case SVM_EXIT_READ_DR7:
     5992        case SVM_EXIT_READ_DR8:   case SVM_EXIT_READ_DR9:   case SVM_EXIT_READ_DR10:  case SVM_EXIT_READ_DR11:
     5993        case SVM_EXIT_READ_DR12:  case SVM_EXIT_READ_DR13:  case SVM_EXIT_READ_DR14:  case SVM_EXIT_READ_DR15:
     5994            SET_BOTH(DRX_READ);
     5995            uEventArg = uExitCode - SVM_EXIT_READ_DR0;
     5996            break;
     5997        case SVM_EXIT_WRITE_DR0:  case SVM_EXIT_WRITE_DR1:  case SVM_EXIT_WRITE_DR2:  case SVM_EXIT_WRITE_DR3:
     5998        case SVM_EXIT_WRITE_DR4:  case SVM_EXIT_WRITE_DR5:  case SVM_EXIT_WRITE_DR6:  case SVM_EXIT_WRITE_DR7:
     5999        case SVM_EXIT_WRITE_DR8:  case SVM_EXIT_WRITE_DR9:  case SVM_EXIT_WRITE_DR10: case SVM_EXIT_WRITE_DR11:
     6000        case SVM_EXIT_WRITE_DR12: case SVM_EXIT_WRITE_DR13: case SVM_EXIT_WRITE_DR14: case SVM_EXIT_WRITE_DR15:
     6001            SET_BOTH(DRX_WRITE);
     6002            uEventArg = uExitCode - SVM_EXIT_WRITE_DR0;
     6003            break;
     6004        case SVM_EXIT_MSR:
     6005            if (pSvmTransient->pVmcb->ctrl.u64ExitInfo1 == SVM_EXIT1_MSR_WRITE)
     6006                SET_BOTH(WRMSR);
     6007            else
     6008                SET_BOTH(RDMSR);
     6009            break;
     6010        case SVM_EXIT_MWAIT_ARMED:
     6011        case SVM_EXIT_MWAIT:          SET_BOTH(MWAIT); break;
     6012        case SVM_EXIT_MONITOR:        SET_BOTH(MONITOR); break;
     6013        case SVM_EXIT_PAUSE:          SET_BOTH(PAUSE); break;
     6014        case SVM_EXIT_IDTR_READ:      SET_BOTH(SIDT); break;
     6015        case SVM_EXIT_IDTR_WRITE:     SET_BOTH(LIDT); break;
     6016        case SVM_EXIT_GDTR_READ:      SET_BOTH(SGDT); break;
     6017        case SVM_EXIT_GDTR_WRITE:     SET_BOTH(LGDT); break;
     6018        case SVM_EXIT_LDTR_READ:      SET_BOTH(SLDT); break;
     6019        case SVM_EXIT_LDTR_WRITE:     SET_BOTH(LLDT); break;
     6020        case SVM_EXIT_TR_READ:        SET_BOTH(STR); break;
     6021        case SVM_EXIT_TR_WRITE:       SET_BOTH(LTR); break;
     6022        case SVM_EXIT_WBINVD:         SET_BOTH(WBINVD); break;
     6023        case SVM_EXIT_XSETBV:         SET_BOTH(XSETBV); break;
     6024
     6025        case SVM_EXIT_SHUTDOWN:
     6026            enmEvent1 = DBGFEVENT_TRIPLE_FAULT;
     6027            //fDtrace1  = VBOXVMM_EXIT_TRIPLE_FAULT_ENABLED();
     6028            break;
     6029
     6030        default:
     6031            AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitCode));
     6032            break;
     6033    }
     6034#undef SET_BOTH
     6035#undef SET_EXIT
     6036
     6037    /*
     6038     * Dtrace tracepoints go first.   We do them here at once so we don't
     6039     * have to copy the guest state saving and stuff a few dozen times.
     6040     * Down side is that we've got to repeat the switch, though this time
     6041     * we use enmEvent since the probes are a subset of what DBGF does.
     6042     */
     6043    if (fDtrace1 || fDtrace2)
     6044    {
     6045        hmR0SvmImportGuestState(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
     6046        PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
     6047        switch (enmEvent1)
     6048        {
     6049            /** @todo consider which extra parameters would be helpful for each probe.   */
     6050            case DBGFEVENT_END: break;
     6051            case DBGFEVENT_INTERRUPT_SOFTWARE:      VBOXVMM_INT_SOFTWARE(pVCpu, pCtx, (uint8_t)uEventArg); break;
     6052            case DBGFEVENT_XCPT_DE:                 VBOXVMM_XCPT_DE(pVCpu, pCtx); break;
     6053            case DBGFEVENT_XCPT_DB:                 VBOXVMM_XCPT_DB(pVCpu, pCtx, pCtx->dr[6]); break;
     6054            case DBGFEVENT_XCPT_BP:                 VBOXVMM_XCPT_BP(pVCpu, pCtx); break;
     6055            case DBGFEVENT_XCPT_OF:                 VBOXVMM_XCPT_OF(pVCpu, pCtx); break;
     6056            case DBGFEVENT_XCPT_BR:                 VBOXVMM_XCPT_BR(pVCpu, pCtx); break;
     6057            case DBGFEVENT_XCPT_UD:                 VBOXVMM_XCPT_UD(pVCpu, pCtx); break;
     6058            case DBGFEVENT_XCPT_NM:                 VBOXVMM_XCPT_NM(pVCpu, pCtx); break;
     6059            case DBGFEVENT_XCPT_DF:                 VBOXVMM_XCPT_DF(pVCpu, pCtx); break;
     6060            case DBGFEVENT_XCPT_TS:                 VBOXVMM_XCPT_TS(pVCpu, pCtx, (uint32_t)uEventArg); break;
     6061            case DBGFEVENT_XCPT_NP:                 VBOXVMM_XCPT_NP(pVCpu, pCtx, (uint32_t)uEventArg); break;
     6062            case DBGFEVENT_XCPT_SS:                 VBOXVMM_XCPT_SS(pVCpu, pCtx, (uint32_t)uEventArg); break;
     6063            case DBGFEVENT_XCPT_GP:                 VBOXVMM_XCPT_GP(pVCpu, pCtx, (uint32_t)uEventArg); break;
     6064            case DBGFEVENT_XCPT_PF:                 VBOXVMM_XCPT_PF(pVCpu, pCtx, (uint32_t)uEventArg, pCtx->cr2); break;
     6065            case DBGFEVENT_XCPT_MF:                 VBOXVMM_XCPT_MF(pVCpu, pCtx); break;
     6066            case DBGFEVENT_XCPT_AC:                 VBOXVMM_XCPT_AC(pVCpu, pCtx); break;
     6067            case DBGFEVENT_XCPT_XF:                 VBOXVMM_XCPT_XF(pVCpu, pCtx); break;
     6068            case DBGFEVENT_XCPT_VE:                 VBOXVMM_XCPT_VE(pVCpu, pCtx); break;
     6069            case DBGFEVENT_XCPT_SX:                 VBOXVMM_XCPT_SX(pVCpu, pCtx, (uint32_t)uEventArg); break;
     6070            case DBGFEVENT_INSTR_CPUID:             VBOXVMM_INSTR_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
     6071            case DBGFEVENT_INSTR_HALT:              VBOXVMM_INSTR_HALT(pVCpu, pCtx); break;
     6072            case DBGFEVENT_INSTR_INVD:              VBOXVMM_INSTR_INVD(pVCpu, pCtx); break;
     6073            case DBGFEVENT_INSTR_INVLPG:            VBOXVMM_INSTR_INVLPG(pVCpu, pCtx); break;
     6074            case DBGFEVENT_INSTR_RDPMC:             VBOXVMM_INSTR_RDPMC(pVCpu, pCtx); break;
     6075            case DBGFEVENT_INSTR_RDTSC:             VBOXVMM_INSTR_RDTSC(pVCpu, pCtx); break;
     6076            case DBGFEVENT_INSTR_RSM:               VBOXVMM_INSTR_RSM(pVCpu, pCtx); break;
     6077            case DBGFEVENT_INSTR_CRX_READ:          VBOXVMM_INSTR_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
     6078            case DBGFEVENT_INSTR_CRX_WRITE:         VBOXVMM_INSTR_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
     6079            case DBGFEVENT_INSTR_DRX_READ:          VBOXVMM_INSTR_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
     6080            case DBGFEVENT_INSTR_DRX_WRITE:         VBOXVMM_INSTR_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
     6081            case DBGFEVENT_INSTR_RDMSR:             VBOXVMM_INSTR_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
     6082            case DBGFEVENT_INSTR_WRMSR:             VBOXVMM_INSTR_WRMSR(pVCpu, pCtx, pCtx->ecx,
     6083                                                                        RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
     6084            case DBGFEVENT_INSTR_MWAIT:             VBOXVMM_INSTR_MWAIT(pVCpu, pCtx); break;
     6085            case DBGFEVENT_INSTR_MONITOR:           VBOXVMM_INSTR_MONITOR(pVCpu, pCtx); break;
     6086            case DBGFEVENT_INSTR_PAUSE:             VBOXVMM_INSTR_PAUSE(pVCpu, pCtx); break;
     6087            case DBGFEVENT_INSTR_SGDT:              VBOXVMM_INSTR_SGDT(pVCpu, pCtx); break;
     6088            case DBGFEVENT_INSTR_SIDT:              VBOXVMM_INSTR_SIDT(pVCpu, pCtx); break;
     6089            case DBGFEVENT_INSTR_LGDT:              VBOXVMM_INSTR_LGDT(pVCpu, pCtx); break;
     6090            case DBGFEVENT_INSTR_LIDT:              VBOXVMM_INSTR_LIDT(pVCpu, pCtx); break;
     6091            case DBGFEVENT_INSTR_SLDT:              VBOXVMM_INSTR_SLDT(pVCpu, pCtx); break;
     6092            case DBGFEVENT_INSTR_STR:               VBOXVMM_INSTR_STR(pVCpu, pCtx); break;
     6093            case DBGFEVENT_INSTR_LLDT:              VBOXVMM_INSTR_LLDT(pVCpu, pCtx); break;
     6094            case DBGFEVENT_INSTR_LTR:               VBOXVMM_INSTR_LTR(pVCpu, pCtx); break;
     6095            case DBGFEVENT_INSTR_RDTSCP:            VBOXVMM_INSTR_RDTSCP(pVCpu, pCtx); break;
     6096            case DBGFEVENT_INSTR_WBINVD:            VBOXVMM_INSTR_WBINVD(pVCpu, pCtx); break;
     6097            case DBGFEVENT_INSTR_XSETBV:            VBOXVMM_INSTR_XSETBV(pVCpu, pCtx); break;
     6098            case DBGFEVENT_INSTR_VMM_CALL:          VBOXVMM_INSTR_VMM_CALL(pVCpu, pCtx); break;
     6099            case DBGFEVENT_INSTR_SVM_VMRUN:         VBOXVMM_INSTR_SVM_VMRUN(pVCpu, pCtx); break;
     6100            case DBGFEVENT_INSTR_SVM_VMLOAD:        VBOXVMM_INSTR_SVM_VMLOAD(pVCpu, pCtx); break;
     6101            case DBGFEVENT_INSTR_SVM_VMSAVE:        VBOXVMM_INSTR_SVM_VMSAVE(pVCpu, pCtx); break;
     6102            case DBGFEVENT_INSTR_SVM_STGI:          VBOXVMM_INSTR_SVM_STGI(pVCpu, pCtx); break;
     6103            case DBGFEVENT_INSTR_SVM_CLGI:          VBOXVMM_INSTR_SVM_CLGI(pVCpu, pCtx); break;
     6104            default: AssertMsgFailed(("enmEvent1=%d uExitCode=%d\n", enmEvent1, uExitCode)); break;
     6105        }
     6106        switch (enmEvent2)
     6107        {
     6108            /** @todo consider which extra parameters would be helpful for each probe. */
     6109            case DBGFEVENT_END: break;
     6110            case DBGFEVENT_EXIT_TASK_SWITCH:        VBOXVMM_EXIT_TASK_SWITCH(pVCpu, pCtx); break;
     6111            case DBGFEVENT_EXIT_CPUID:              VBOXVMM_EXIT_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
     6112            case DBGFEVENT_EXIT_HALT:               VBOXVMM_EXIT_HALT(pVCpu, pCtx); break;
     6113            case DBGFEVENT_EXIT_INVD:               VBOXVMM_EXIT_INVD(pVCpu, pCtx); break;
     6114            case DBGFEVENT_EXIT_INVLPG:             VBOXVMM_EXIT_INVLPG(pVCpu, pCtx); break;
     6115            case DBGFEVENT_EXIT_RDPMC:              VBOXVMM_EXIT_RDPMC(pVCpu, pCtx); break;
     6116            case DBGFEVENT_EXIT_RDTSC:              VBOXVMM_EXIT_RDTSC(pVCpu, pCtx); break;
     6117            case DBGFEVENT_EXIT_RSM:                VBOXVMM_EXIT_RSM(pVCpu, pCtx); break;
     6118            case DBGFEVENT_EXIT_CRX_READ:           VBOXVMM_EXIT_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
     6119            case DBGFEVENT_EXIT_CRX_WRITE:          VBOXVMM_EXIT_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
     6120            case DBGFEVENT_EXIT_DRX_READ:           VBOXVMM_EXIT_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
     6121            case DBGFEVENT_EXIT_DRX_WRITE:          VBOXVMM_EXIT_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
     6122            case DBGFEVENT_EXIT_RDMSR:              VBOXVMM_EXIT_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
     6123            case DBGFEVENT_EXIT_WRMSR:              VBOXVMM_EXIT_WRMSR(pVCpu, pCtx, pCtx->ecx,
     6124                                                                       RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
     6125            case DBGFEVENT_EXIT_MWAIT:              VBOXVMM_EXIT_MWAIT(pVCpu, pCtx); break;
     6126            case DBGFEVENT_EXIT_MONITOR:            VBOXVMM_EXIT_MONITOR(pVCpu, pCtx); break;
     6127            case DBGFEVENT_EXIT_PAUSE:              VBOXVMM_EXIT_PAUSE(pVCpu, pCtx); break;
     6128            case DBGFEVENT_EXIT_SGDT:               VBOXVMM_EXIT_SGDT(pVCpu, pCtx); break;
     6129            case DBGFEVENT_EXIT_SIDT:               VBOXVMM_EXIT_SIDT(pVCpu, pCtx); break;
     6130            case DBGFEVENT_EXIT_LGDT:               VBOXVMM_EXIT_LGDT(pVCpu, pCtx); break;
     6131            case DBGFEVENT_EXIT_LIDT:               VBOXVMM_EXIT_LIDT(pVCpu, pCtx); break;
     6132            case DBGFEVENT_EXIT_SLDT:               VBOXVMM_EXIT_SLDT(pVCpu, pCtx); break;
     6133            case DBGFEVENT_EXIT_STR:                VBOXVMM_EXIT_STR(pVCpu, pCtx); break;
     6134            case DBGFEVENT_EXIT_LLDT:               VBOXVMM_EXIT_LLDT(pVCpu, pCtx); break;
     6135            case DBGFEVENT_EXIT_LTR:                VBOXVMM_EXIT_LTR(pVCpu, pCtx); break;
     6136            case DBGFEVENT_EXIT_RDTSCP:             VBOXVMM_EXIT_RDTSCP(pVCpu, pCtx); break;
     6137            case DBGFEVENT_EXIT_WBINVD:             VBOXVMM_EXIT_WBINVD(pVCpu, pCtx); break;
     6138            case DBGFEVENT_EXIT_XSETBV:             VBOXVMM_EXIT_XSETBV(pVCpu, pCtx); break;
     6139            case DBGFEVENT_EXIT_VMM_CALL:           VBOXVMM_EXIT_VMM_CALL(pVCpu, pCtx); break;
     6140            case DBGFEVENT_EXIT_SVM_VMRUN:          VBOXVMM_EXIT_SVM_VMRUN(pVCpu, pCtx); break;
     6141            case DBGFEVENT_EXIT_SVM_VMLOAD:         VBOXVMM_EXIT_SVM_VMLOAD(pVCpu, pCtx); break;
     6142            case DBGFEVENT_EXIT_SVM_VMSAVE:         VBOXVMM_EXIT_SVM_VMSAVE(pVCpu, pCtx); break;
     6143            case DBGFEVENT_EXIT_SVM_STGI:           VBOXVMM_EXIT_SVM_STGI(pVCpu, pCtx); break;
     6144            case DBGFEVENT_EXIT_SVM_CLGI:           VBOXVMM_EXIT_SVM_CLGI(pVCpu, pCtx); break;
     6145            default: AssertMsgFailed(("enmEvent2=%d uExitCode=%d\n", enmEvent2, uExitCode)); break;
     6146        }
     6147    }
     6148
     6149    /*
     6150     * Fire of the DBGF event, if enabled (our check here is just a quick one,
     6151     * the DBGF call will do a full check).
     6152     *
     6153     * Note! DBGF sets DBGFEVENT_INTERRUPT_SOFTWARE in the bitmap.
     6154     * Note! If we have to events, we prioritize the first, i.e. the instruction
     6155     *       one, in order to avoid event nesting.
     6156     */
     6157    PVMCC        pVM = pVCpu->CTX_SUFF(pVM);
     6158    VBOXSTRICTRC rcStrict;
     6159    if (   enmEvent1 != DBGFEVENT_END
     6160        && DBGF_IS_EVENT_ENABLED(pVM, enmEvent1))
     6161    {
     6162        hmR0SvmImportGuestState(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
     6163        rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent1, DBGFEVENTCTX_HM, 1, uEventArg);
     6164    }
     6165    else if (   enmEvent2 != DBGFEVENT_END
     6166             && DBGF_IS_EVENT_ENABLED(pVM, enmEvent2))
     6167    {
     6168        hmR0SvmImportGuestState(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
     6169        rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent2, DBGFEVENTCTX_HM, 1, uEventArg);
     6170    }
     6171    else
     6172        rcStrict = VINF_SUCCESS;
     6173    return rcStrict;
     6174}
     6175
     6176
     6177/**
     6178 * Handles a guest \#VMEXIT (for all EXITCODE values except SVM_EXIT_INVALID),
     6179 * debug variant.
     6180 *
     6181 * @returns Strict VBox status code (informational status codes included).
     6182 * @param   pVCpu           The cross context virtual CPU structure.
     6183 * @param   pSvmTransient   Pointer to the SVM transient structure.
     6184 */
     6185static VBOXSTRICTRC hmR0SvmDebugHandleExit(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient, PSVMRUNDBGSTATE pDbgState)
     6186{
     6187    Assert(pSvmTransient->u64ExitCode != SVM_EXIT_INVALID);
     6188    Assert(pSvmTransient->u64ExitCode <= SVM_EXIT_MAX);
     6189
     6190    /*
     6191     * Expensive (saves context) generic dtrace VM-exit probe.
     6192     */
     6193    uint64_t const uExitCode = pSvmTransient->u64ExitCode;
     6194    if (!VBOXVMM_R0_HMSVM_VMEXIT_ENABLED())
     6195    { /* more likely */ }
     6196    else
     6197    {
     6198        hmR0SvmImportGuestState(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
     6199        VBOXVMM_R0_HMSVM_VMEXIT(pVCpu, &pVCpu->cpum.GstCtx, uExitCode, pSvmTransient->pVmcb);
     6200    }
     6201
     6202    /*
     6203     * Check for single stepping event if we're stepping.
     6204     */
     6205    if (pVCpu->hm.s.fSingleInstruction)
     6206    {
     6207        switch (uExitCode)
     6208        {
     6209            /* Various events: */
     6210            case SVM_EXIT_XCPT_0:  case SVM_EXIT_XCPT_1:  case SVM_EXIT_XCPT_2:  case SVM_EXIT_XCPT_3:
     6211            case SVM_EXIT_XCPT_4:  case SVM_EXIT_XCPT_5:  case SVM_EXIT_XCPT_6:  case SVM_EXIT_XCPT_7:
     6212            case SVM_EXIT_XCPT_8:  case SVM_EXIT_XCPT_9:  case SVM_EXIT_XCPT_10: case SVM_EXIT_XCPT_11:
     6213            case SVM_EXIT_XCPT_12: case SVM_EXIT_XCPT_13: case SVM_EXIT_XCPT_14: case SVM_EXIT_XCPT_15:
     6214            case SVM_EXIT_XCPT_16: case SVM_EXIT_XCPT_17: case SVM_EXIT_XCPT_18: case SVM_EXIT_XCPT_19:
     6215            case SVM_EXIT_XCPT_20: case SVM_EXIT_XCPT_21: case SVM_EXIT_XCPT_22: case SVM_EXIT_XCPT_23:
     6216            case SVM_EXIT_XCPT_24: case SVM_EXIT_XCPT_25: case SVM_EXIT_XCPT_26: case SVM_EXIT_XCPT_27:
     6217            case SVM_EXIT_XCPT_28: case SVM_EXIT_XCPT_29: case SVM_EXIT_XCPT_30: case SVM_EXIT_XCPT_31:
     6218            case SVM_EXIT_INTR:
     6219            case SVM_EXIT_NMI:
     6220            case SVM_EXIT_VINTR:
     6221            case SVM_EXIT_NPF:
     6222            case SVM_EXIT_AVIC_NOACCEL:
     6223
     6224            /* Instruction specific VM-exits: */
     6225            case SVM_EXIT_READ_CR0:   case SVM_EXIT_READ_CR1:   case SVM_EXIT_READ_CR2:   case SVM_EXIT_READ_CR3:
     6226            case SVM_EXIT_READ_CR4:   case SVM_EXIT_READ_CR5:   case SVM_EXIT_READ_CR6:   case SVM_EXIT_READ_CR7:
     6227            case SVM_EXIT_READ_CR8:   case SVM_EXIT_READ_CR9:   case SVM_EXIT_READ_CR10:  case SVM_EXIT_READ_CR11:
     6228            case SVM_EXIT_READ_CR12:  case SVM_EXIT_READ_CR13:  case SVM_EXIT_READ_CR14:  case SVM_EXIT_READ_CR15:
     6229            case SVM_EXIT_WRITE_CR0:  case SVM_EXIT_WRITE_CR1:  case SVM_EXIT_WRITE_CR2:  case SVM_EXIT_WRITE_CR3:
     6230            case SVM_EXIT_WRITE_CR4:  case SVM_EXIT_WRITE_CR5:  case SVM_EXIT_WRITE_CR6:  case SVM_EXIT_WRITE_CR7:
     6231            case SVM_EXIT_WRITE_CR8:  case SVM_EXIT_WRITE_CR9:  case SVM_EXIT_WRITE_CR10: case SVM_EXIT_WRITE_CR11:
     6232            case SVM_EXIT_WRITE_CR12: case SVM_EXIT_WRITE_CR13: case SVM_EXIT_WRITE_CR14: case SVM_EXIT_WRITE_CR15:
     6233            case SVM_EXIT_READ_DR0:   case SVM_EXIT_READ_DR1:   case SVM_EXIT_READ_DR2:   case SVM_EXIT_READ_DR3:
     6234            case SVM_EXIT_READ_DR4:   case SVM_EXIT_READ_DR5:   case SVM_EXIT_READ_DR6:   case SVM_EXIT_READ_DR7:
     6235            case SVM_EXIT_READ_DR8:   case SVM_EXIT_READ_DR9:   case SVM_EXIT_READ_DR10:  case SVM_EXIT_READ_DR11:
     6236            case SVM_EXIT_READ_DR12:  case SVM_EXIT_READ_DR13:  case SVM_EXIT_READ_DR14:  case SVM_EXIT_READ_DR15:
     6237            case SVM_EXIT_WRITE_DR0:  case SVM_EXIT_WRITE_DR1:  case SVM_EXIT_WRITE_DR2:  case SVM_EXIT_WRITE_DR3:
     6238            case SVM_EXIT_WRITE_DR4:  case SVM_EXIT_WRITE_DR5:  case SVM_EXIT_WRITE_DR6:  case SVM_EXIT_WRITE_DR7:
     6239            case SVM_EXIT_WRITE_DR8:  case SVM_EXIT_WRITE_DR9:  case SVM_EXIT_WRITE_DR10: case SVM_EXIT_WRITE_DR11:
     6240            case SVM_EXIT_WRITE_DR12: case SVM_EXIT_WRITE_DR13: case SVM_EXIT_WRITE_DR14: case SVM_EXIT_WRITE_DR15:
     6241            case SVM_EXIT_CR0_SEL_WRITE:
     6242            case SVM_EXIT_IDTR_READ:
     6243            case SVM_EXIT_GDTR_READ:
     6244            case SVM_EXIT_LDTR_READ:
     6245            case SVM_EXIT_TR_READ:
     6246            case SVM_EXIT_IDTR_WRITE:
     6247            case SVM_EXIT_GDTR_WRITE:
     6248            case SVM_EXIT_LDTR_WRITE:
     6249            case SVM_EXIT_TR_WRITE:
     6250            case SVM_EXIT_RDTSC:
     6251            case SVM_EXIT_RDPMC:
     6252            case SVM_EXIT_PUSHF:
     6253            case SVM_EXIT_POPF:
     6254            case SVM_EXIT_CPUID:
     6255            case SVM_EXIT_RSM:
     6256            case SVM_EXIT_IRET:
     6257            case SVM_EXIT_SWINT:
     6258            case SVM_EXIT_INVD:
     6259            case SVM_EXIT_PAUSE:
     6260            case SVM_EXIT_HLT:
     6261            case SVM_EXIT_INVLPG:
     6262            case SVM_EXIT_INVLPGA:
     6263            case SVM_EXIT_IOIO:
     6264            case SVM_EXIT_MSR:
     6265            case SVM_EXIT_TASK_SWITCH:
     6266            case SVM_EXIT_VMRUN:
     6267            case SVM_EXIT_VMMCALL:
     6268            case SVM_EXIT_VMLOAD:
     6269            case SVM_EXIT_VMSAVE:
     6270            case SVM_EXIT_STGI:
     6271            case SVM_EXIT_CLGI:
     6272            case SVM_EXIT_SKINIT:
     6273            case SVM_EXIT_RDTSCP:
     6274            case SVM_EXIT_ICEBP:
     6275            case SVM_EXIT_WBINVD:
     6276            case SVM_EXIT_MONITOR:
     6277            case SVM_EXIT_MWAIT:
     6278            case SVM_EXIT_MWAIT_ARMED:
     6279            case SVM_EXIT_XSETBV:
     6280            case SVM_EXIT_RDPRU:
     6281            case SVM_EXIT_WRITE_EFER_TRAP:
     6282            case SVM_EXIT_WRITE_CR0_TRAP:  case SVM_EXIT_WRITE_CR1_TRAP:  case SVM_EXIT_WRITE_CR2_TRAP:  case SVM_EXIT_WRITE_CR3_TRAP:
     6283            case SVM_EXIT_WRITE_CR4_TRAP:  case SVM_EXIT_WRITE_CR5_TRAP:  case SVM_EXIT_WRITE_CR6_TRAP:  case SVM_EXIT_WRITE_CR7_TRAP:
     6284            case SVM_EXIT_WRITE_CR8_TRAP:  case SVM_EXIT_WRITE_CR9_TRAP:  case SVM_EXIT_WRITE_CR10_TRAP: case SVM_EXIT_WRITE_CR11_TRAP:
     6285            case SVM_EXIT_WRITE_CR12_TRAP: case SVM_EXIT_WRITE_CR13_TRAP: case SVM_EXIT_WRITE_CR14_TRAP: case SVM_EXIT_WRITE_CR15_TRAP:
     6286            case SVM_EXIT_MCOMMIT:
     6287            {
     6288                hmR0SvmImportGuestState(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
     6289                if (   pVCpu->cpum.GstCtx.rip    != pDbgState->uRipStart
     6290                    || pVCpu->cpum.GstCtx.cs.Sel != pDbgState->uCsStart)
     6291                {
     6292                    Log6Func(("VINF_EM_DBG_STEPPED: %04x:%08RX64 (exit %u)\n",
     6293                              pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uExitCode));
     6294                    return VINF_EM_DBG_STEPPED;
     6295                }
     6296                break;
     6297            }
     6298
     6299            /* Errors and unexpected events: */
     6300            case SVM_EXIT_FERR_FREEZE:
     6301            case SVM_EXIT_SHUTDOWN:
     6302            case SVM_EXIT_AVIC_INCOMPLETE_IPI:
     6303                break;
     6304
     6305            case SVM_EXIT_SMI:
     6306            case SVM_EXIT_INIT:
     6307            default:
     6308                AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitCode));
     6309                break;
     6310        }
     6311    }
     6312
     6313    /*
     6314     * Check for debugger event breakpoints and dtrace probes.
     6315     */
     6316    if (   uExitCode < sizeof(pDbgState->bmExitsToCheck) * 8U
     6317        && ASMBitTest(pDbgState->bmExitsToCheck, uExitCode) )
     6318    {
     6319        VBOXSTRICTRC rcStrict = hmR0SvmHandleExitDtraceEvents(pVCpu, pSvmTransient, uExitCode);
     6320        if (rcStrict != VINF_SUCCESS)
     6321        {
     6322            Log6Func(("%04x:%08RX64 (exit %u) -> %Rrc\n",
     6323                      pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uExitCode, VBOXSTRICTRC_VAL(rcStrict) ));
     6324            return rcStrict;
     6325        }
     6326    }
     6327
     6328    /*
     6329     * Normal processing.
     6330     */
     6331    return hmR0SvmHandleExit(pVCpu, pSvmTransient);
     6332}
     6333
     6334
     6335/**
     6336 * Runs the guest code using AMD-V in single step mode.
     6337 *
     6338 * @returns Strict VBox status code.
     6339 * @param   pVCpu       The cross context virtual CPU structure.
     6340 * @param   pcLoops     Pointer to the number of executed loops.
     6341 */
     6342static VBOXSTRICTRC hmR0SvmRunGuestCodeDebug(PVMCPUCC pVCpu, uint32_t *pcLoops)
     6343{
     6344    uint32_t const cMaxResumeLoops = pVCpu->CTX_SUFF(pVM)->hmr0.s.cMaxResumeLoops;
     6345    Assert(pcLoops);
     6346    Assert(*pcLoops <= cMaxResumeLoops);
     6347
     6348    SVMTRANSIENT SvmTransient;
     6349    RT_ZERO(SvmTransient);
     6350    SvmTransient.fUpdateTscOffsetting = true;
     6351    SvmTransient.pVmcb = pVCpu->hmr0.s.svm.pVmcb;
     6352
     6353    PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
     6354
     6355    /* Set HMCPU indicators.  */
     6356    bool const fSavedSingleInstruction = pVCpu->hm.s.fSingleInstruction;
     6357    pVCpu->hm.s.fSingleInstruction     = pVCpu->hm.s.fSingleInstruction || DBGFIsStepping(pVCpu);
     6358    pVCpu->hmr0.s.fDebugWantRdTscExit  = false;
     6359    pVCpu->hmr0.s.fUsingDebugLoop      = true;
     6360
     6361    /* State we keep to help modify and later restore the VMCS fields we alter, and for detecting steps.  */
     6362    SVMRUNDBGSTATE DbgState;
     6363    hmR0SvmRunDebugStateInit(pVCpu, &SvmTransient, &DbgState);
     6364    hmR0SvmPreRunGuestDebugStateUpdate(pVCpu, &SvmTransient, &DbgState);
     6365
     6366    /*
     6367     * The loop.
     6368     */
     6369    VBOXSTRICTRC rc = VERR_INTERNAL_ERROR_5;
     6370    for (;;)
     6371    {
     6372        Assert(!HMR0SuspendPending());
     6373        AssertMsg(pVCpu->hmr0.s.idEnteredCpu == RTMpCpuId(),
     6374                  ("Illegal migration! Entered on CPU %u Current %u cLoops=%u\n", (unsigned)pVCpu->hmr0.s.idEnteredCpu,
     6375                  (unsigned)RTMpCpuId(), *pcLoops));
     6376        bool fStepping = pVCpu->hm.s.fSingleInstruction;
     6377
     6378        /* Set up VM-execution controls the next two can respond to. */
     6379        hmR0SvmPreRunGuestDebugStateApply(&SvmTransient, &DbgState);
     6380
     6381        /* Preparatory work for running nested-guest code, this may force us to return to
     6382           ring-3.  This bugger disables interrupts on VINF_SUCCESS! */
     6383        STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
     6384        rc = hmR0SvmPreRunGuest(pVCpu, &SvmTransient);
     6385        if (rc != VINF_SUCCESS)
     6386            break;
     6387
     6388        /*
     6389         * No longjmps to ring-3 from this point on!!!
     6390         *
     6391         * Asserts() will still longjmp to ring-3 (but won't return), which is intentional,
     6392         * better than a kernel panic. This also disables flushing of the R0-logger instance.
     6393         */
     6394        hmR0SvmPreRunGuestCommitted(pVCpu, &SvmTransient);
     6395
     6396        /* Override any obnoxious code in the above two calls. */
     6397        hmR0SvmPreRunGuestDebugStateApply(&SvmTransient, &DbgState);
     6398
     6399        /*
     6400         * Finally execute guest code.
     6401         */
     6402        rc = hmR0SvmRunGuest(pVCpu, pVCpu->hmr0.s.svm.HCPhysVmcb);
     6403
     6404        /* Restore any residual host-state and save any bits shared between host and guest
     6405           into the guest-CPU state.  Re-enables interrupts! */
     6406        hmR0SvmPostRunGuest(pVCpu, &SvmTransient, rc);
     6407
     6408        if (RT_LIKELY(   rc == VINF_SUCCESS                               /* Check for VMRUN errors. */
     6409                      && SvmTransient.u64ExitCode != SVM_EXIT_INVALID))   /* Check for invalid guest-state errors. */
     6410        { /* very likely*/ }
     6411        else
     6412        {
     6413            if (rc == VINF_SUCCESS)
     6414                rc = VERR_SVM_INVALID_GUEST_STATE;
     6415            STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatPreExit, x);
     6416            hmR0SvmReportWorldSwitchError(pVCpu, VBOXSTRICTRC_VAL(rc));
     6417            return rc;
     6418        }
     6419
     6420        /* Handle the #VMEXIT. */
     6421        HMSVM_EXITCODE_STAM_COUNTER_INC(SvmTransient.u64ExitCode);
     6422        STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatPreExit, &pVCpu->hm.s.StatExitHandling, x);
     6423        VBOXVMM_R0_HMSVM_VMEXIT(pVCpu, pCtx, SvmTransient.u64ExitCode, pVCpu->hmr0.s.svm.pVmcb);
     6424        rc = hmR0SvmDebugHandleExit(pVCpu, &SvmTransient, &DbgState);
     6425        STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitHandling, x);
     6426        if (rc != VINF_SUCCESS)
     6427            break;
     6428        if (++(*pcLoops) >= cMaxResumeLoops)
     6429        {
     6430            STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchMaxResumeLoops);
     6431            rc = VINF_EM_RAW_INTERRUPT;
     6432            break;
     6433        }
     6434
     6435        /*
     6436         * Stepping: Did the RIP change, if so, consider it a single step.
     6437         * Otherwise, make sure one of the TFs gets set.
     6438         */
     6439        if (fStepping)
     6440        {
     6441            hmR0SvmImportGuestState(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
     6442            if (   pVCpu->cpum.GstCtx.rip    != DbgState.uRipStart
     6443                || pVCpu->cpum.GstCtx.cs.Sel != DbgState.uCsStart)
     6444            {
     6445                Log6Func(("VINF_EM_DBG_STEPPED: %04x:%08RX64 (exit %u)\n",
     6446                          pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, SvmTransient.u64ExitCode));
     6447                rc = VINF_EM_DBG_STEPPED;
     6448                break;
     6449            }
     6450            ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_DR7);
     6451        }
     6452
     6453        /*
     6454         * Update when dtrace settings changes (DBGF kicks us, so no need to check).
     6455         * Revert the state changes afterware so we can drop intercepts no longer needed.
     6456         */
     6457        if (VBOXVMM_GET_SETTINGS_SEQ_NO() != DbgState.uDtraceSettingsSeqNo)
     6458        {
     6459            hmR0SvmPreRunGuestDebugStateUpdate(pVCpu, &SvmTransient, &DbgState);
     6460            hmR0SvmRunDebugStateRevert(&SvmTransient, &DbgState);
     6461        }
     6462    }
     6463
     6464    /*
     6465     * Clear the X86_EFL_TF if necessary.
     6466     */
     6467    if (pVCpu->hmr0.s.fClearTrapFlag)
     6468    {
     6469        pVCpu->hmr0.s.fClearTrapFlag = false;
     6470        pCtx->eflags.Bits.u1TF = 0;
     6471    }
     6472
     6473    /* Restore HMCPU indicators. */
     6474    pVCpu->hmr0.s.fUsingDebugLoop     = false;
     6475    pVCpu->hmr0.s.fDebugWantRdTscExit = false;
     6476    pVCpu->hm.s.fSingleInstruction    = fSavedSingleInstruction;
     6477
     6478    /* Restore all controls applied by hmR0SvmPreRunGuestDebugStateApply above. */
     6479    hmR0SvmRunDebugStateRevert(&SvmTransient, &DbgState);
     6480
     6481    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
     6482    return rc;
     6483}
     6484
     6485/** @}  */
     6486
    54336487#undef VMEXIT_CALL_RET
    5434 }
    54356488
    54366489
     
    76998752#endif
    77008753
     8754/**
     8755 * \#VMEXIT handler for software interrupt (INTn). Conditional \#VMEXIT (debug).
     8756 */
     8757HMSVM_EXIT_DECL hmR0SvmExitSwInt(PVMCPUCC pVCpu, PSVMTRANSIENT pSvmTransient)
     8758{
     8759    HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pSvmTransient);
     8760    HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(pVCpu, pSvmTransient);
     8761
     8762    PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
     8763    SVMEVENT  Event;
     8764    Event.u          = 0;
     8765    Event.n.u1Valid  = 1;
     8766    Event.n.u3Type   = SVM_EVENT_SOFTWARE_INT;
     8767    Event.n.u8Vector = pVmcb->ctrl.u64ExitInfo1 & 0xff;
     8768    Log4Func(("uVector=%#x\n", Event.n.u8Vector));
     8769    hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
     8770    return VINF_SUCCESS;
     8771}
     8772
     8773
    77018774#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
    77028775/**
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette