VirtualBox

Changeset 72983 in vbox


Ignore:
Timestamp:
Jul 8, 2018 4:15:47 PM (7 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
123535
Message:

VMM/HM, HMVMX: bugref:9193 Stop passing pCtx around and use pVCpu->cpum.GstCtx instead where possible.

Location:
trunk
Files:
7 edited

Legend:

Unmodified
Added
Removed
  • TabularUnified trunk/include/VBox/vmm/hm.h

    r72967 r72983  
    261261VMMR3_INT_DECL(int)             HMR3EnablePatching(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem);
    262262VMMR3_INT_DECL(int)             HMR3DisablePatching(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem);
    263 VMMR3_INT_DECL(int)             HMR3PatchTprInstr(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
     263VMMR3_INT_DECL(int)             HMR3PatchTprInstr(PVM pVM, PVMCPU pVCpu);
    264264VMMR3_INT_DECL(bool)            HMR3IsRescheduleRequired(PVM pVM, PCPUMCTX pCtx);
    265265VMMR3_INT_DECL(bool)            HMR3IsVmxPreemptionTimerUsed(PVM pVM);
  • TabularUnified trunk/src/VBox/VMM/VMMR0/HMR0.cpp

    r72967 r72983  
    165165     *  actions when the host is being suspended to speed up the suspending and
    166166     *  avoid trouble. */
    167     volatile bool                   fSuspended;
     167    bool volatile                   fSuspended;
    168168
    169169    /** Whether we've already initialized all CPUs.
     
    19611961 *
    19621962 * @param   pVCpu       The cross context virtual CPU structure.
    1963  * @param   pCtx        Pointer to the CPU context.
    1964  */
    1965 VMMR0_INT_DECL(void) hmR0DumpRegs(PVMCPU pVCpu, PCPUMCTX pCtx)
     1963 */
     1964VMMR0_INT_DECL(void) hmR0DumpRegs(PVMCPU pVCpu)
    19661965{
    19671966    /*
     
    19911990    char szEFlags[80];
    19921991    char *psz = szEFlags;
     1992    PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    19931993    uint32_t uEFlags = pCtx->eflags.u32;
    19941994    for (unsigned i = 0; i < RT_ELEMENTS(s_aFlags); i++)
  • TabularUnified trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r72970 r72983  
    40594059    {
    40604060#ifdef VBOX_STRICT
    4061         hmR0DumpRegs(pVCpu, &pVCpu->cpum.GstCtx);
     4061        hmR0DumpRegs(pVCpu);
    40624062        PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
    40634063        Log4(("ctrl.u32VmcbCleanBits                 %#RX32\n",   pVmcb->ctrl.u32VmcbCleanBits));
  • TabularUnified trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r72967 r72983  
    332332 * @returns Strict VBox status code (i.e. informational status codes too).
    333333 * @param   pVCpu           The cross context virtual CPU structure.
    334  * @param   pMixedCtx       Pointer to the guest-CPU context. The data may be
    335  *                          out-of-sync. Make sure to update the required
    336  *                          fields before using them.
    337334 * @param   pVmxTransient   Pointer to the VMX-transient structure.
    338335 */
    339336#ifndef HMVMX_USE_FUNCTION_TABLE
    340 typedef VBOXSTRICTRC                FNVMXEXITHANDLER(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
     337typedef VBOXSTRICTRC                FNVMXEXITHANDLER(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
    341338#else
    342 typedef DECLCALLBACK(VBOXSTRICTRC)  FNVMXEXITHANDLER(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
     339typedef DECLCALLBACK(VBOXSTRICTRC)  FNVMXEXITHANDLER(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
    343340/** Pointer to VM-exit handler. */
    344341typedef FNVMXEXITHANDLER           *PFNVMXEXITHANDLER;
     
    352349 * @returns VBox status code, no informational status code returned.
    353350 * @param   pVCpu           The cross context virtual CPU structure.
    354  * @param   pMixedCtx       Pointer to the guest-CPU context. The data may be
    355  *                          out-of-sync. Make sure to update the required
    356  *                          fields before using them.
    357351 * @param   pVmxTransient   Pointer to the VMX-transient structure.
    358352 *
     
    362356 */
    363357#ifndef HMVMX_USE_FUNCTION_TABLE
    364 typedef int                         FNVMXEXITHANDLERNSRC(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
     358typedef int                         FNVMXEXITHANDLERNSRC(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
    365359#else
    366360typedef FNVMXEXITHANDLER            FNVMXEXITHANDLERNSRC;
     
    381375#endif
    382376#ifndef HMVMX_USE_FUNCTION_TABLE
    383 DECLINLINE(VBOXSTRICTRC)  hmR0VmxHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, uint32_t rcReason);
     377DECLINLINE(VBOXSTRICTRC)  hmR0VmxHandleExit(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t rcReason);
    384378# define HMVMX_EXIT_DECL  DECLINLINE(VBOXSTRICTRC)
    385379# define HMVMX_EXIT_NSRC_DECL DECLINLINE(int)
     
    440434/** @} */
    441435
    442 static int          hmR0VmxExitXcptPF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
    443 static int          hmR0VmxExitXcptMF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
    444 static int          hmR0VmxExitXcptDB(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
    445 static int          hmR0VmxExitXcptBP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
    446 static int          hmR0VmxExitXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
    447 static int          hmR0VmxExitXcptAC(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
    448 static int          hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
    449 static uint32_t     hmR0VmxCheckGuestState(PVMCPU pVCpu, PCPUMCTX pCtx);
     436static int          hmR0VmxExitXcptPF(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
     437static int          hmR0VmxExitXcptMF(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
     438static int          hmR0VmxExitXcptDB(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
     439static int          hmR0VmxExitXcptBP(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
     440static int          hmR0VmxExitXcptGP(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
     441static int          hmR0VmxExitXcptAC(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
     442static int          hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
     443static uint32_t     hmR0VmxCheckGuestState(PVMCPU pVCpu);
    450444
    451445
     
    15351529 *
    15361530 * @param   pVCpu       The cross context virtual CPU structure.
    1537  * @param   pMixedCtx   Pointer to the guest-CPU context. The data may be
    1538  *                      out-of-sync. Make sure to update the required fields
    1539  *                      before using them.
    15401531 *
    15411532 * @remarks No-long-jump zone!!!
    15421533 */
    1543 static void hmR0VmxLazyLoadGuestMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
     1534static void hmR0VmxLazyLoadGuestMsrs(PVMCPU pVCpu)
    15441535{
    15451536    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     
    15601551         * CPU, see @bugref{8728}.
    15611552         */
     1553        PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    15621554        if (   !(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
    1563             && pMixedCtx->msrKERNELGSBASE == pVCpu->hm.s.vmx.u64HostKernelGSBaseMsr
    1564             && pMixedCtx->msrLSTAR        == pVCpu->hm.s.vmx.u64HostLStarMsr
    1565             && pMixedCtx->msrSTAR         == pVCpu->hm.s.vmx.u64HostStarMsr
    1566             && pMixedCtx->msrSFMASK       == pVCpu->hm.s.vmx.u64HostSFMaskMsr)
     1555            && pCtx->msrKERNELGSBASE == pVCpu->hm.s.vmx.u64HostKernelGSBaseMsr
     1556            && pCtx->msrLSTAR        == pVCpu->hm.s.vmx.u64HostLStarMsr
     1557            && pCtx->msrSTAR         == pVCpu->hm.s.vmx.u64HostStarMsr
     1558            && pCtx->msrSFMASK       == pVCpu->hm.s.vmx.u64HostSFMaskMsr)
    15671559        {
    15681560#ifdef VBOX_STRICT
    1569             Assert(ASMRdMsr(MSR_K8_KERNEL_GS_BASE) == pMixedCtx->msrKERNELGSBASE);
    1570             Assert(ASMRdMsr(MSR_K8_LSTAR)          == pMixedCtx->msrLSTAR);
    1571             Assert(ASMRdMsr(MSR_K6_STAR)           == pMixedCtx->msrSTAR);
    1572             Assert(ASMRdMsr(MSR_K8_SF_MASK)        == pMixedCtx->msrSFMASK);
     1561            Assert(ASMRdMsr(MSR_K8_KERNEL_GS_BASE) == pCtx->msrKERNELGSBASE);
     1562            Assert(ASMRdMsr(MSR_K8_LSTAR)          == pCtx->msrLSTAR);
     1563            Assert(ASMRdMsr(MSR_K6_STAR)           == pCtx->msrSTAR);
     1564            Assert(ASMRdMsr(MSR_K8_SF_MASK)        == pCtx->msrSFMASK);
    15731565#endif
    15741566        }
    15751567        else
    15761568        {
    1577             ASMWrMsr(MSR_K8_KERNEL_GS_BASE, pMixedCtx->msrKERNELGSBASE);
    1578             ASMWrMsr(MSR_K8_LSTAR,          pMixedCtx->msrLSTAR);
    1579             ASMWrMsr(MSR_K6_STAR,           pMixedCtx->msrSTAR);
    1580             ASMWrMsr(MSR_K8_SF_MASK,        pMixedCtx->msrSFMASK);
    1581         }
    1582     }
    1583 #else
    1584     RT_NOREF(pMixedCtx);
     1569            ASMWrMsr(MSR_K8_KERNEL_GS_BASE, pCtx->msrKERNELGSBASE);
     1570            ASMWrMsr(MSR_K8_LSTAR,          pCtx->msrLSTAR);
     1571            ASMWrMsr(MSR_K6_STAR,           pCtx->msrSTAR);
     1572            ASMWrMsr(MSR_K8_SF_MASK,        pCtx->msrSFMASK);
     1573        }
     1574    }
    15851575#endif
    15861576    pVCpu->hm.s.vmx.fLazyMsrs |= VMX_LAZY_MSRS_LOADED_GUEST;
     
    31753165 * @returns true if we need to load guest EFER, false otherwise.
    31763166 * @param   pVCpu       The cross context virtual CPU structure.
    3177  * @param   pMixedCtx   Pointer to the guest-CPU context. The data may be
    3178  *                      out-of-sync. Make sure to update the required fields
    3179  *                      before using them.
    31803167 *
    31813168 * @remarks Requires EFER, CR4.
    31823169 * @remarks No-long-jump zone!!!
    31833170 */
    3184 static bool hmR0VmxShouldSwapEferMsr(PVMCPU pVCpu, PCCPUMCTX pMixedCtx)
     3171static bool hmR0VmxShouldSwapEferMsr(PVMCPU pVCpu)
    31853172{
    31863173#ifdef HMVMX_ALWAYS_SWAP_EFER
     
    31883175#endif
    31893176
     3177    PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    31903178#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
    31913179    /* For 32-bit hosts running 64-bit guests, we always swap EFER in the world-switcher. Nothing to do here. */
    3192     if (CPUMIsGuestInLongModeEx(pMixedCtx))
     3180    if (CPUMIsGuestInLongModeEx(pCtx))
    31933181        return false;
    31943182#endif
     
    31963184    PVM pVM = pVCpu->CTX_SUFF(pVM);
    31973185    uint64_t const u64HostEfer  = pVM->hm.s.vmx.u64HostEfer;
    3198     uint64_t const u64GuestEfer = pMixedCtx->msrEFER;
     3186    uint64_t const u64GuestEfer = pCtx->msrEFER;
    31993187
    32003188    /*
     
    32023190     * guest's SYSCALL behaviour isn't broken, see @bugref{7386}.
    32033191     */
    3204     if (   CPUMIsGuestInLongModeEx(pMixedCtx)
     3192    if (   CPUMIsGuestInLongModeEx(pCtx)
    32053193        && (u64GuestEfer & MSR_K6_EFER_SCE) != (u64HostEfer & MSR_K6_EFER_SCE))
    32063194    {
     
    32133201     * See Intel spec. 4.5 "IA-32e Paging" and Intel spec. 4.1.1 "Three Paging Modes".
    32143202     */
    3215     if (   (pMixedCtx->cr4 & X86_CR4_PAE)
    3216         && (pMixedCtx->cr0 & X86_CR0_PG)
     3203    if (   (pCtx->cr4 & X86_CR4_PAE)
     3204        && (pCtx->cr0 & X86_CR0_PG)
    32173205        && (u64GuestEfer & MSR_K6_EFER_NXE) != (u64HostEfer & MSR_K6_EFER_NXE))
    32183206    {
     
    32343222 * @returns VBox status code.
    32353223 * @param   pVCpu       The cross context virtual CPU structure.
    3236  * @param   pMixedCtx   Pointer to the guest-CPU context. The data may be
    3237  *                      out-of-sync. Make sure to update the required fields
    3238  *                      before using them.
    32393224 *
    32403225 * @remarks Requires EFER.
    32413226 * @remarks No-long-jump zone!!!
    32423227 */
    3243 static int hmR0VmxExportGuestEntryCtls(PVMCPU pVCpu, PCCPUMCTX pMixedCtx)
     3228static int hmR0VmxExportGuestEntryCtls(PVMCPU pVCpu)
    32443229{
    32453230    if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_ENTRY_CTLS)
     
    32533238
    32543239        /* Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry. */
    3255         if (CPUMIsGuestInLongModeEx(pMixedCtx))
     3240        if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
    32563241        {
    32573242            fVal |= VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST;
     
    32633248        /* If the CPU supports the newer VMCS controls for managing guest/host EFER, use it. */
    32643249        if (   pVM->hm.s.vmx.fSupportsVmcsEfer
    3265             && hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx))
     3250            && hmR0VmxShouldSwapEferMsr(pVCpu))
    32663251        {
    32673252            fVal |= VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR;
     
    33053290 * @returns VBox status code.
    33063291 * @param   pVCpu       The cross context virtual CPU structure.
    3307  * @param   pMixedCtx   Pointer to the guest-CPU context. The data may be
    3308  *                      out-of-sync. Make sure to update the required fields
    3309  *                      before using them.
    33103292 *
    33113293 * @remarks Requires EFER.
    33123294 */
    3313 static int hmR0VmxExportGuestExitCtls(PVMCPU pVCpu, PCCPUMCTX pMixedCtx)
     3295static int hmR0VmxExportGuestExitCtls(PVMCPU pVCpu)
    33143296{
    33153297    if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_EXIT_CTLS)
     
    33463328        /* If the newer VMCS fields for managing EFER exists, use it. */
    33473329        if (   pVM->hm.s.vmx.fSupportsVmcsEfer
    3348             && hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx))
     3330            && hmR0VmxShouldSwapEferMsr(pVCpu))
    33493331        {
    33503332            fVal |= VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR
     
    34653447 * @returns Guest's interruptibility-state.
    34663448 * @param   pVCpu       The cross context virtual CPU structure.
    3467  * @param   pMixedCtx   Pointer to the guest-CPU context. The data may be
    3468  *                      out-of-sync. Make sure to update the required fields
    3469  *                      before using them.
    34703449 *
    34713450 * @remarks No-long-jump zone!!!
    34723451 */
    3473 static uint32_t hmR0VmxGetGuestIntrState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
     3452static uint32_t hmR0VmxGetGuestIntrState(PVMCPU pVCpu)
    34743453{
    34753454    /*
     
    34813460        /* If inhibition is active, RIP & RFLAGS should've been accessed
    34823461           (i.e. read previously from the VMCS or from ring-3). */
     3462        PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    34833463#ifdef VBOX_STRICT
    3484         uint64_t const fExtrn = ASMAtomicUoReadU64(&pMixedCtx->fExtrn);
     3464        uint64_t const fExtrn = ASMAtomicUoReadU64(&pCtx->fExtrn);
    34853465        AssertMsg(!(fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)), ("%#x\n", fExtrn));
    34863466#endif
    3487         if (pMixedCtx->rip == EMGetInhibitInterruptsPC(pVCpu))
    3488         {
    3489             if (pMixedCtx->eflags.Bits.u1IF)
     3467        if (pCtx->rip == EMGetInhibitInterruptsPC(pVCpu))
     3468        {
     3469            if (pCtx->eflags.Bits.u1IF)
    34903470                fIntrState = VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI;
    34913471            else
     
    35813561 * @returns VBox status code.
    35823562 * @param   pVCpu       The cross context virtual CPU structure.
    3583  * @param   pMixedCtx   Pointer to the guest-CPU context. The data may be
    3584  *                      out-of-sync. Make sure to update the required fields
    3585  *                      before using them.
    35863563 *
    35873564 * @remarks No-long-jump zone!!!
    35883565 */
    3589 static int hmR0VmxExportGuestRip(PVMCPU pVCpu, PCCPUMCTX pMixedCtx)
     3566static int hmR0VmxExportGuestRip(PVMCPU pVCpu)
    35903567{
    35913568    int rc = VINF_SUCCESS;
     
    35943571        HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP);
    35953572
    3596         rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RIP, pMixedCtx->rip);
     3573        rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RIP, pVCpu->cpum.GstCtx.rip);
    35973574        AssertRCReturn(rc, rc);
    35983575
    35993576        ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_RIP);
    3600         Log4Func(("RIP=%#RX64\n", pMixedCtx->rip));
     3577        Log4Func(("RIP=%#RX64\n", pVCpu->cpum.GstCtx.rip));
    36013578    }
    36023579    return rc;
     
    36093586 * @returns VBox status code.
    36103587 * @param   pVCpu       The cross context virtual CPU structure.
    3611  * @param   pMixedCtx   Pointer to the guest-CPU context. The data may be
    3612  *                      out-of-sync. Make sure to update the required fields
    3613  *                      before using them.
    36143588 *
    36153589 * @remarks No-long-jump zone!!!
    36163590 */
    3617 static int hmR0VmxExportGuestRsp(PVMCPU pVCpu, PCCPUMCTX pMixedCtx)
     3591static int hmR0VmxExportGuestRsp(PVMCPU pVCpu)
    36183592{
    36193593    if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_RSP)
     
    36213595        HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RSP);
    36223596
    3623         int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RSP, pMixedCtx->rsp);
     3597        int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RSP, pVCpu->cpum.GstCtx.rsp);
    36243598        AssertRCReturn(rc, rc);
    36253599
     
    36353609 * @returns VBox status code.
    36363610 * @param   pVCpu       The cross context virtual CPU structure.
    3637  * @param   pMixedCtx   Pointer to the guest-CPU context. The data may be
    3638  *                      out-of-sync. Make sure to update the required fields
    3639  *                      before using them.
    36403611 *
    36413612 * @remarks No-long-jump zone!!!
    36423613 */
    3643 static int hmR0VmxExportGuestRflags(PVMCPU pVCpu, PCCPUMCTX pMixedCtx)
     3614static int hmR0VmxExportGuestRflags(PVMCPU pVCpu)
    36443615{
    36453616    if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_RFLAGS)
     
    36493620        /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits of RFLAGS are reserved (MBZ).
    36503621           Let us assert it as such and use 32-bit VMWRITE. */
    3651         Assert(!RT_HI_U32(pMixedCtx->rflags.u64));
    3652         X86EFLAGS fEFlags = pMixedCtx->eflags;
     3622        Assert(!RT_HI_U32(pVCpu->cpum.GstCtx.rflags.u64));
     3623        X86EFLAGS fEFlags = pVCpu->cpum.GstCtx.eflags;
    36533624        Assert(fEFlags.u32 & X86_EFL_RA1_MASK);
    36543625        Assert(!(fEFlags.u32 & ~(X86_EFL_1 | X86_EFL_LIVE_MASK)));
     
    36863657 * @returns VBox status code.
    36873658 * @param   pVCpu       The cross context virtual CPU structure.
    3688  * @param   pMixedCtx   Pointer to the guest-CPU context. The data may be
    3689  *                      out-of-sync. Make sure to update the required fields
    3690  *                      before using them.
    36913659 *
    36923660 * @remarks No-long-jump zone!!!
    36933661 */
    3694 static int hmR0VmxExportGuestCR0(PVMCPU pVCpu, PCCPUMCTX pMixedCtx)
     3662static int hmR0VmxExportGuestCR0(PVMCPU pVCpu)
    36953663{
    36963664    if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_CR0)
     
    36983666        PVM pVM = pVCpu->CTX_SUFF(pVM);
    36993667        HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
    3700         Assert(!RT_HI_U32(pMixedCtx->cr0));
    3701 
    3702         uint32_t const u32ShadowCr0 = pMixedCtx->cr0;
    3703         uint32_t       u32GuestCr0  = pMixedCtx->cr0;
     3668        Assert(!RT_HI_U32(pVCpu->cpum.GstCtx.cr0));
     3669
     3670        uint32_t const u32ShadowCr0 = pVCpu->cpum.GstCtx.cr0;
     3671        uint32_t       u32GuestCr0  = pVCpu->cpum.GstCtx.cr0;
    37043672
    37053673        /*
     
    38563824 *
    38573825 * @param   pVCpu       The cross context virtual CPU structure.
    3858  * @param   pMixedCtx   Pointer to the guest-CPU context. The data may be
    3859  *                      out-of-sync. Make sure to update the required fields
    3860  *                      before using them.
    38613826 *
    38623827 * @remarks No-long-jump zone!!!
    38633828 */
    3864 static VBOXSTRICTRC hmR0VmxExportGuestCR3AndCR4(PVMCPU pVCpu, PCCPUMCTX pMixedCtx)
     3829static VBOXSTRICTRC hmR0VmxExportGuestCR3AndCR4(PVMCPU pVCpu)
    38653830{
    38663831    int rc  = VINF_SUCCESS;
     
    39043869            AssertRCReturn(rc, rc);
    39053870
     3871            PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    39063872            if (   pVM->hm.s.vmx.fUnrestrictedGuest
    3907                 || CPUMIsGuestPagingEnabledEx(pMixedCtx))
     3873                || CPUMIsGuestPagingEnabledEx(pCtx))
    39083874            {
    39093875                /* If the guest is in PAE mode, pass the PDPEs to VT-x using the VMCS fields. */
    3910                 if (CPUMIsGuestInPAEModeEx(pMixedCtx))
     3876                if (CPUMIsGuestInPAEModeEx(pCtx))
    39113877                {
    39123878                    rc  = PGMGstGetPaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
     
    39243890                 * the guest when it's not using paging.
    39253891                 */
    3926                 GCPhysGuestCR3 = pMixedCtx->cr3;
     3892                GCPhysGuestCR3 = pCtx->cr3;
    39273893            }
    39283894            else
     
    39753941    if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_CR4)
    39763942    {
     3943        PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    39773944        HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
    3978         Assert(!RT_HI_U32(pMixedCtx->cr4));
    3979 
    3980         uint32_t       u32GuestCr4  = pMixedCtx->cr4;
    3981         uint32_t const u32ShadowCr4 = pMixedCtx->cr4;
     3945        Assert(!RT_HI_U32(pCtx->cr4));
     3946
     3947        uint32_t       u32GuestCr4  = pCtx->cr4;
     3948        uint32_t const u32ShadowCr4 = pCtx->cr4;
    39823949
    39833950        /*
     
    39993966        if (pVM->hm.s.fNestedPaging)
    40003967        {
    4001             if (   !CPUMIsGuestPagingEnabledEx(pMixedCtx)
     3968            if (   !CPUMIsGuestPagingEnabledEx(pCtx)
    40023969                && !pVM->hm.s.vmx.fUnrestrictedGuest)
    40033970            {
     
    40714038
    40724039        /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */
    4073         pVCpu->hm.s.fLoadSaveGuestXcr0 = (pMixedCtx->cr4 & X86_CR4_OSXSAVE) && pMixedCtx->aXcr[0] != ASMGetXcr0();
     4040        pVCpu->hm.s.fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
    40744041
    40754042        ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_CR4);
     
    40904057 * @returns VBox status code.
    40914058 * @param   pVCpu       The cross context virtual CPU structure.
    4092  * @param   pMixedCtx   Pointer to the guest-CPU context. The data may be
    4093  *                      out-of-sync. Make sure to update the required fields
    4094  *                      before using them.
    40954059 *
    40964060 * @remarks No-long-jump zone!!!
    40974061 */
    4098 static int hmR0VmxExportSharedDebugState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
     4062static int hmR0VmxExportSharedDebugState(PVMCPU pVCpu)
    40994063{
    41004064    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     
    41054069    {
    41064070        /* Validate. Intel spec. 17.2 "Debug Registers", recompiler paranoia checks. */
    4107         Assert((pMixedCtx->dr[7] & (X86_DR7_MBZ_MASK | X86_DR7_RAZ_MASK)) == 0);  /* Bits 63:32, 15, 14, 12, 11 are reserved. */
    4108         Assert((pMixedCtx->dr[7] & X86_DR7_RA1_MASK) == X86_DR7_RA1_MASK);        /* Bit 10 is reserved (RA1). */
     4071        Assert((pVCpu->cpum.GstCtx.dr[7] & (X86_DR7_MBZ_MASK | X86_DR7_RAZ_MASK)) == 0);
     4072        Assert((pVCpu->cpum.GstCtx.dr[7] & X86_DR7_RA1_MASK) == X86_DR7_RA1_MASK);
    41094073    }
    41104074#endif
     
    41244088        else
    41254089        {
    4126             pMixedCtx->eflags.u32 |= X86_EFL_TF;
     4090            pVCpu->cpum.GstCtx.eflags.u32 |= X86_EFL_TF;
    41274091            pVCpu->hm.s.fCtxChanged |= HM_CHANGED_GUEST_RFLAGS;
    41284092            pVCpu->hm.s.fClearTrapFlag = true;
     
    41434107         */
    41444108#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
    4145         if (    CPUMIsGuestInLongModeEx(pMixedCtx)
     4109        if (    CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx)
    41464110            && !CPUMIsHyperDebugStateActivePending(pVCpu))
    41474111        {
     
    41704134         * executing guest code so they'll trigger at the right time.
    41714135         */
    4172         if (pMixedCtx->dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD))
     4136        if (pVCpu->cpum.GstCtx.dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD))
    41734137        {
    41744138#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
    4175             if (    CPUMIsGuestInLongModeEx(pMixedCtx)
     4139            if (    CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx)
    41764140                && !CPUMIsGuestDebugStateActivePending(pVCpu))
    41774141            {
     
    42094173
    42104174        /* Update DR7 with the actual guest value. */
    4211         u32GuestDr7 = pMixedCtx->dr[7];
     4175        u32GuestDr7 = pVCpu->cpum.GstCtx.dr[7];
    42124176        pVCpu->hm.s.fUsingHyperDR7 = false;
    42134177    }
     
    42494213 *          segments.
    42504214 */
    4251 static void hmR0VmxValidateSegmentRegs(PVMCPU pVCpu, PCCPUMCTX pCtx)
     4215static void hmR0VmxValidateSegmentRegs(PVMCPU pVCpu)
    42524216{
    42534217    /*
     
    42584222     * and doesn't change the guest-context value.
    42594223     */
    4260     PVM pVM = pVCpu->CTX_SUFF(pVM);
     4224    PVM       pVM  = pVCpu->CTX_SUFF(pVM);
     4225    PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    42614226    hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR0);
    42624227    if (   !pVM->hm.s.vmx.fUnrestrictedGuest
     
    44774442 * @returns VBox status code.
    44784443 * @param   pVCpu       The cross context virtual CPU structure.
    4479  * @param   pMixedCtx   Pointer to the guest-CPU context. The data may be
    4480  *                      out-of-sync. Make sure to update the required fields
    4481  *                      before using them.
    44824444 *
    44834445 * @remarks Will import guest CR0 on strict builds during validation of
     
    44854447 * @remarks No-long-jump zone!!!
    44864448 */
    4487 static int hmR0VmxExportGuestSegmentRegs(PVMCPU pVCpu, PCCPUMCTX pMixedCtx)
    4488 {
    4489     int rc  = VERR_INTERNAL_ERROR_5;
    4490     PVM pVM = pVCpu->CTX_SUFF(pVM);
     4449static int hmR0VmxExportGuestSegmentRegs(PVMCPU pVCpu)
     4450{
     4451    int       rc   = VERR_INTERNAL_ERROR_5;
     4452    PVM       pVM  = pVCpu->CTX_SUFF(pVM);
     4453    PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    44914454
    44924455    /*
     
    45154478            HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS);
    45164479            if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
    4517                 pVCpu->hm.s.vmx.RealMode.AttrCS.u = pMixedCtx->cs.Attr.u;
    4518             rc = HMVMX_EXPORT_SREG(CS, &pMixedCtx->cs);
     4480                pVCpu->hm.s.vmx.RealMode.AttrCS.u = pCtx->cs.Attr.u;
     4481            rc = HMVMX_EXPORT_SREG(CS, &pCtx->cs);
    45194482            AssertRCReturn(rc, rc);
    45204483            ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_CS);
     
    45254488            HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SS);
    45264489            if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
    4527                 pVCpu->hm.s.vmx.RealMode.AttrSS.u = pMixedCtx->ss.Attr.u;
    4528             rc = HMVMX_EXPORT_SREG(SS, &pMixedCtx->ss);
     4490                pVCpu->hm.s.vmx.RealMode.AttrSS.u = pCtx->ss.Attr.u;
     4491            rc = HMVMX_EXPORT_SREG(SS, &pCtx->ss);
    45294492            AssertRCReturn(rc, rc);
    45304493            ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_SS);
     
    45354498            HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DS);
    45364499            if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
    4537                 pVCpu->hm.s.vmx.RealMode.AttrDS.u = pMixedCtx->ds.Attr.u;
    4538             rc = HMVMX_EXPORT_SREG(DS, &pMixedCtx->ds);
     4500                pVCpu->hm.s.vmx.RealMode.AttrDS.u = pCtx->ds.Attr.u;
     4501            rc = HMVMX_EXPORT_SREG(DS, &pCtx->ds);
    45394502            AssertRCReturn(rc, rc);
    45404503            ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_DS);
     
    45454508            HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_ES);
    45464509            if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
    4547                 pVCpu->hm.s.vmx.RealMode.AttrES.u = pMixedCtx->es.Attr.u;
    4548             rc = HMVMX_EXPORT_SREG(ES, &pMixedCtx->es);
     4510                pVCpu->hm.s.vmx.RealMode.AttrES.u = pCtx->es.Attr.u;
     4511            rc = HMVMX_EXPORT_SREG(ES, &pCtx->es);
    45494512            AssertRCReturn(rc, rc);
    45504513            ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_ES);
     
    45554518            HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_FS);
    45564519            if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
    4557                 pVCpu->hm.s.vmx.RealMode.AttrFS.u = pMixedCtx->fs.Attr.u;
    4558             rc = HMVMX_EXPORT_SREG(FS, &pMixedCtx->fs);
     4520                pVCpu->hm.s.vmx.RealMode.AttrFS.u = pCtx->fs.Attr.u;
     4521            rc = HMVMX_EXPORT_SREG(FS, &pCtx->fs);
    45594522            AssertRCReturn(rc, rc);
    45604523            ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_FS);
     
    45654528            HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GS);
    45664529            if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
    4567                 pVCpu->hm.s.vmx.RealMode.AttrGS.u = pMixedCtx->gs.Attr.u;
    4568             rc = HMVMX_EXPORT_SREG(GS, &pMixedCtx->gs);
     4530                pVCpu->hm.s.vmx.RealMode.AttrGS.u = pCtx->gs.Attr.u;
     4531            rc = HMVMX_EXPORT_SREG(GS, &pCtx->gs);
    45694532            AssertRCReturn(rc, rc);
    45704533            ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_GS);
     
    45724535
    45734536#ifdef VBOX_STRICT
    4574         hmR0VmxValidateSegmentRegs(pVCpu, pMixedCtx);
     4537        hmR0VmxValidateSegmentRegs(pVCpu);
    45754538#endif
    45764539
    45774540        /* Update the exit history entry with the correct CS.BASE + RIP. */
    45784541        if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_RIP)
    4579             EMR0HistoryUpdatePC(pVCpu, pMixedCtx->cs.u64Base + pMixedCtx->rip, true);
    4580 
    4581         Log4Func(("CS=%#RX16 Base=%#RX64 Limit=%#RX32 Attr=%#RX32\n", pMixedCtx->cs.Sel, pMixedCtx->cs.u64Base,
    4582                   pMixedCtx->cs.u32Limit, pMixedCtx->cs.Attr.u));
     4542            EMR0HistoryUpdatePC(pVCpu, pCtx->cs.u64Base + pCtx->rip, true);
     4543
     4544        Log4Func(("CS=%#RX16 Base=%#RX64 Limit=%#RX32 Attr=%#RX32\n", pCtx->cs.Sel, pCtx->cs.u64Base,
     4545                  pCtx->cs.u32Limit, pCtx->cs.Attr.u));
    45834546    }
    45844547
     
    46024565        if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
    46034566        {
    4604             u16Sel          = pMixedCtx->tr.Sel;
    4605             u32Limit        = pMixedCtx->tr.u32Limit;
    4606             u64Base         = pMixedCtx->tr.u64Base;
    4607             u32AccessRights = pMixedCtx->tr.Attr.u;
     4567            u16Sel          = pCtx->tr.Sel;
     4568            u32Limit        = pCtx->tr.u32Limit;
     4569            u64Base         = pCtx->tr.u64Base;
     4570            u32AccessRights = pCtx->tr.Attr.u;
    46084571        }
    46094572        else
     
    46394602        Assert(   (u32Limit & 0xfff) == 0xfff
    46404603               || !(u32AccessRights & RT_BIT(15)));             /* Granularity MBZ. */
    4641         Assert(   !(pMixedCtx->tr.u32Limit & 0xfff00000)
     4604        Assert(   !(pCtx->tr.u32Limit & 0xfff00000)
    46424605               || (u32AccessRights & RT_BIT(15)));              /* Granularity MB1. */
    46434606
     
    46494612
    46504613        ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_TR);
    4651         Log4Func(("TR base=%#RX64\n", pMixedCtx->tr.u64Base));
     4614        Log4Func(("TR base=%#RX64\n", pCtx->tr.u64Base));
    46524615    }
    46534616
     
    46594622        HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GDTR);
    46604623
    4661         rc  = VMXWriteVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, pMixedCtx->gdtr.cbGdt);
    4662         rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE,  pMixedCtx->gdtr.pGdt);
     4624        rc  = VMXWriteVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, pCtx->gdtr.cbGdt);
     4625        rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE,  pCtx->gdtr.pGdt);
    46634626        AssertRCReturn(rc, rc);
    46644627
    46654628        /* Validate. */
    4666         Assert(!(pMixedCtx->gdtr.cbGdt & 0xffff0000));          /* Bits 31:16 MBZ. */
     4629        Assert(!(pCtx->gdtr.cbGdt & 0xffff0000));          /* Bits 31:16 MBZ. */
    46674630
    46684631        ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_GDTR);
    4669         Log4Func(("GDTR base=%#RX64\n", pMixedCtx->gdtr.pGdt));
     4632        Log4Func(("GDTR base=%#RX64\n", pCtx->gdtr.pGdt));
    46704633    }
    46714634
     
    46794642        /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
    46804643        uint32_t u32Access = 0;
    4681         if (!pMixedCtx->ldtr.Attr.u)
     4644        if (!pCtx->ldtr.Attr.u)
    46824645            u32Access = X86DESCATTR_UNUSABLE;
    46834646        else
    4684             u32Access = pMixedCtx->ldtr.Attr.u;
    4685 
    4686         rc  = VMXWriteVmcs32(VMX_VMCS16_GUEST_LDTR_SEL,           pMixedCtx->ldtr.Sel);
    4687         rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_LIMIT,         pMixedCtx->ldtr.u32Limit);
     4647            u32Access = pCtx->ldtr.Attr.u;
     4648
     4649        rc  = VMXWriteVmcs32(VMX_VMCS16_GUEST_LDTR_SEL,           pCtx->ldtr.Sel);
     4650        rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_LIMIT,         pCtx->ldtr.u32Limit);
    46884651        rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access);
    4689         rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_LDTR_BASE,          pMixedCtx->ldtr.u64Base);
     4652        rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_LDTR_BASE,          pCtx->ldtr.u64Base);
    46904653        AssertRCReturn(rc, rc);
    46914654
     
    46934656        if (!(u32Access & X86DESCATTR_UNUSABLE))
    46944657        {
    4695             Assert(!(pMixedCtx->ldtr.Sel & RT_BIT(2)));              /* TI MBZ. */
    4696             Assert(pMixedCtx->ldtr.Attr.n.u4Type == 2);              /* Type MB2 (LDT). */
    4697             Assert(!pMixedCtx->ldtr.Attr.n.u1DescType);              /* System MBZ. */
    4698             Assert(pMixedCtx->ldtr.Attr.n.u1Present == 1);           /* Present MB1. */
    4699             Assert(!pMixedCtx->ldtr.Attr.n.u4LimitHigh);             /* 11:8 MBZ. */
    4700             Assert(!(pMixedCtx->ldtr.Attr.u & 0xfffe0000));          /* 31:17 MBZ. */
    4701             Assert(   (pMixedCtx->ldtr.u32Limit & 0xfff) == 0xfff
    4702                    || !pMixedCtx->ldtr.Attr.n.u1Granularity);        /* Granularity MBZ. */
    4703             Assert(   !(pMixedCtx->ldtr.u32Limit & 0xfff00000)
    4704                    || pMixedCtx->ldtr.Attr.n.u1Granularity);         /* Granularity MB1. */
     4658            Assert(!(pCtx->ldtr.Sel & RT_BIT(2)));              /* TI MBZ. */
     4659            Assert(pCtx->ldtr.Attr.n.u4Type == 2);              /* Type MB2 (LDT). */
     4660            Assert(!pCtx->ldtr.Attr.n.u1DescType);              /* System MBZ. */
     4661            Assert(pCtx->ldtr.Attr.n.u1Present == 1);           /* Present MB1. */
     4662            Assert(!pCtx->ldtr.Attr.n.u4LimitHigh);             /* 11:8 MBZ. */
     4663            Assert(!(pCtx->ldtr.Attr.u & 0xfffe0000));          /* 31:17 MBZ. */
     4664            Assert(   (pCtx->ldtr.u32Limit & 0xfff) == 0xfff
     4665                   || !pCtx->ldtr.Attr.n.u1Granularity);        /* Granularity MBZ. */
     4666            Assert(   !(pCtx->ldtr.u32Limit & 0xfff00000)
     4667                   || pCtx->ldtr.Attr.n.u1Granularity);         /* Granularity MB1. */
    47054668        }
    47064669
    47074670        ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_LDTR);
    4708         Log4Func(("LDTR base=%#RX64\n", pMixedCtx->ldtr.u64Base));
     4671        Log4Func(("LDTR base=%#RX64\n", pCtx->ldtr.u64Base));
    47094672    }
    47104673
     
    47164679        HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_IDTR);
    47174680
    4718         rc  = VMXWriteVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, pMixedCtx->idtr.cbIdt);
    4719         rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE,  pMixedCtx->idtr.pIdt);
     4681        rc  = VMXWriteVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, pCtx->idtr.cbIdt);
     4682        rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE,  pCtx->idtr.pIdt);
    47204683        AssertRCReturn(rc, rc);
    47214684
    47224685        /* Validate. */
    4723         Assert(!(pMixedCtx->idtr.cbIdt & 0xffff0000));          /* Bits 31:16 MBZ. */
     4686        Assert(!(pCtx->idtr.cbIdt & 0xffff0000));          /* Bits 31:16 MBZ. */
    47244687
    47254688        ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_IDTR);
    4726         Log4Func(("IDTR base=%#RX64\n", pMixedCtx->idtr.pIdt));
     4689        Log4Func(("IDTR base=%#RX64\n", pCtx->idtr.pIdt));
    47274690    }
    47284691
     
    47444707 * @returns VBox status code.
    47454708 * @param   pVCpu       The cross context virtual CPU structure.
    4746  * @param   pMixedCtx   Pointer to the guest-CPU context. The data may be
    4747  *                      out-of-sync. Make sure to update the required fields
    4748  *                      before using them.
    47494709 *
    47504710 * @remarks No-long-jump zone!!!
    47514711 */
    4752 static int hmR0VmxExportGuestMsrs(PVMCPU pVCpu, PCCPUMCTX pMixedCtx)
     4712static int hmR0VmxExportGuestMsrs(PVMCPU pVCpu)
    47534713{
    47544714    AssertPtr(pVCpu);
     
    47594719     * For 64-bit hosts, we load/restore them lazily, see hmR0VmxLazyLoadGuestMsrs().
    47604720     */
    4761     PVM pVM = pVCpu->CTX_SUFF(pVM);
     4721    PVM       pVM  = pVCpu->CTX_SUFF(pVM);
     4722    PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    47624723    if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_GUEST_AUTO_MSRS)
    47634724    {
     
    47674728            HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SYSCALL_MSRS | CPUMCTX_EXTRN_KERNEL_GS_BASE);
    47684729
    4769             int rc = hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_LSTAR,          pMixedCtx->msrLSTAR,        false, NULL);
    4770             rc    |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K6_STAR,           pMixedCtx->msrSTAR,         false, NULL);
    4771             rc    |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_SF_MASK,        pMixedCtx->msrSFMASK,       false, NULL);
    4772             rc    |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_KERNEL_GS_BASE, pMixedCtx->msrKERNELGSBASE, false, NULL);
     4730            int rc = hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_LSTAR,          pCtx->msrLSTAR,        false, NULL);
     4731            rc    |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K6_STAR,           pCtx->msrSTAR,         false, NULL);
     4732            rc    |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_SF_MASK,        pCtx->msrSFMASK,       false, NULL);
     4733            rc    |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_KERNEL_GS_BASE, pCtx->msrKERNELGSBASE, false, NULL);
    47734734            AssertRCReturn(rc, rc);
    47744735# ifdef LOG_ENABLED
     
    47934754        if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_CS_MSR)
    47944755        {
    4795             int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, pMixedCtx->SysEnter.cs);
     4756            int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, pCtx->SysEnter.cs);
    47964757            AssertRCReturn(rc, rc);
    47974758            ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_SYSENTER_CS_MSR);
     
    48004761        if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_EIP_MSR)
    48014762        {
    4802             int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, pMixedCtx->SysEnter.eip);
     4763            int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, pCtx->SysEnter.eip);
    48034764            AssertRCReturn(rc, rc);
    48044765            ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_SYSENTER_EIP_MSR);
     
    48074768        if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_ESP_MSR)
    48084769        {
    4809             int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, pMixedCtx->SysEnter.esp);
     4770            int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, pCtx->SysEnter.esp);
    48104771            AssertRCReturn(rc, rc);
    48114772            ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_SYSENTER_ESP_MSR);
     
    48174778        HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_EFER);
    48184779
    4819         if (hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx))
     4780        if (hmR0VmxShouldSwapEferMsr(pVCpu))
    48204781        {
    48214782            /*
     
    48254786            if (pVM->hm.s.vmx.fSupportsVmcsEfer)
    48264787            {
    4827                 int rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_EFER_FULL, pMixedCtx->msrEFER);
     4788                int rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_EFER_FULL, pCtx->msrEFER);
    48284789                AssertRCReturn(rc,rc);
    4829                 Log4Func(("EFER=%#RX64\n", pMixedCtx->msrEFER));
     4790                Log4Func(("EFER=%#RX64\n", pCtx->msrEFER));
    48304791            }
    48314792            else
    48324793            {
    4833                 int rc = hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K6_EFER, pMixedCtx->msrEFER, false /* fUpdateHostMsr */,
     4794                int rc = hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K6_EFER, pCtx->msrEFER, false /* fUpdateHostMsr */,
    48344795                                                    NULL /* pfAddedAndUpdated */);
    48354796                AssertRCReturn(rc, rc);
     
    48384799                if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
    48394800                    hmR0VmxSetMsrPermission(pVCpu, MSR_K6_EFER, VMXMSREXIT_INTERCEPT_READ, VMXMSREXIT_INTERCEPT_WRITE);
    4840                 Log4Func(("MSR[--]: u32Msr=%#RX32 u64Value=%#RX64 cMsrs=%u\n", MSR_K6_EFER, pMixedCtx->msrEFER,
     4801                Log4Func(("MSR[--]: u32Msr=%#RX32 u64Value=%#RX64 cMsrs=%u\n", MSR_K6_EFER, pCtx->msrEFER,
    48414802                          pVCpu->hm.s.vmx.cMsrs));
    48424803            }
     
    48604821 *
    48614822 * @returns true if safe, false if must continue to use the 64-bit switcher.
    4862  * @param   pMixedCtx   Pointer to the guest-CPU context. The data may be
    4863  *                      out-of-sync. Make sure to update the required fields
    4864  *                      before using them.
     4823 * @param   pCtx   Pointer to the guest-CPU context.
    48654824 *
    48664825 * @remarks No-long-jump zone!!!
    48674826 */
    4868 static bool hmR0VmxIs32BitSwitcherSafe(PCCPUMCTX pMixedCtx)
    4869 {
    4870     if (pMixedCtx->gdtr.pGdt    & UINT64_C(0xffffffff00000000))     return false;
    4871     if (pMixedCtx->idtr.pIdt    & UINT64_C(0xffffffff00000000))     return false;
    4872     if (pMixedCtx->ldtr.u64Base & UINT64_C(0xffffffff00000000))     return false;
    4873     if (pMixedCtx->tr.u64Base   & UINT64_C(0xffffffff00000000))     return false;
    4874     if (pMixedCtx->es.u64Base   & UINT64_C(0xffffffff00000000))     return false;
    4875     if (pMixedCtx->cs.u64Base   & UINT64_C(0xffffffff00000000))     return false;
    4876     if (pMixedCtx->ss.u64Base   & UINT64_C(0xffffffff00000000))     return false;
    4877     if (pMixedCtx->ds.u64Base   & UINT64_C(0xffffffff00000000))     return false;
    4878     if (pMixedCtx->fs.u64Base   & UINT64_C(0xffffffff00000000))     return false;
    4879     if (pMixedCtx->gs.u64Base   & UINT64_C(0xffffffff00000000))     return false;
     4827static bool hmR0VmxIs32BitSwitcherSafe(PCCPUMCTX pCtx)
     4828{
     4829    if (pCtx->gdtr.pGdt    & UINT64_C(0xffffffff00000000))     return false;
     4830    if (pCtx->idtr.pIdt    & UINT64_C(0xffffffff00000000))     return false;
     4831    if (pCtx->ldtr.u64Base & UINT64_C(0xffffffff00000000))     return false;
     4832    if (pCtx->tr.u64Base   & UINT64_C(0xffffffff00000000))     return false;
     4833    if (pCtx->es.u64Base   & UINT64_C(0xffffffff00000000))     return false;
     4834    if (pCtx->cs.u64Base   & UINT64_C(0xffffffff00000000))     return false;
     4835    if (pCtx->ss.u64Base   & UINT64_C(0xffffffff00000000))     return false;
     4836    if (pCtx->ds.u64Base   & UINT64_C(0xffffffff00000000))     return false;
     4837    if (pCtx->fs.u64Base   & UINT64_C(0xffffffff00000000))     return false;
     4838    if (pCtx->gs.u64Base   & UINT64_C(0xffffffff00000000))     return false;
    48804839
    48814840    /* All good, bases are 32-bit. */
     
    48904849 * @returns VBox status code.
    48914850 * @param   pVCpu       The cross context virtual CPU structure.
    4892  * @param   pMixedCtx   Pointer to the guest-CPU context. The data may be
    4893  *                      out-of-sync. Make sure to update the required fields
    4894  *                      before using them.
    48954851 *
    48964852 * @remarks No-long-jump zone!!!
    48974853 */
    4898 static int hmR0VmxSelectVMRunHandler(PVMCPU pVCpu, PCCPUMCTX pMixedCtx)
    4899 {
    4900     if (CPUMIsGuestInLongModeEx(pMixedCtx))
     4854static int hmR0VmxSelectVMRunHandler(PVMCPU pVCpu)
     4855{
     4856    PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
     4857    if (CPUMIsGuestInLongModeEx(pCtx))
    49014858    {
    49024859#ifndef VBOX_ENABLE_64_BITS_GUESTS
     
    49664923            Assert(pVCpu->hm.s.vmx.pfnStartVM == VMXR0SwitcherStartVM64);
    49674924            if (   pVCpu->hm.s.vmx.RealMode.fRealOnV86Active
    4968                 || hmR0VmxIs32BitSwitcherSafe(pMixedCtx))
     4925                || hmR0VmxIs32BitSwitcherSafe(pCtx))
    49694926            {
    49704927                pVCpu->hm.s.vmx.fSwitchedTo64on32 = false;
     
    49944951 * @returns VBox status code, no informational status codes.
    49954952 * @param   pVCpu       The cross context virtual CPU structure.
    4996  * @param   pCtx        Pointer to the guest-CPU context.
    49974953 *
    49984954 * @remarks No-long-jump zone!!!
    49994955 */
    5000 DECLINLINE(int) hmR0VmxRunGuest(PVMCPU pVCpu, PCPUMCTX pCtx)
     4956DECLINLINE(int) hmR0VmxRunGuest(PVMCPU pVCpu)
    50014957{
    50024958    /* Mark that HM is the keeper of all guest-CPU registers now that we're going to execute guest code. */
     4959    PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    50034960    pCtx->fExtrn |= HMVMX_CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_KEEPER_HM;
    50044961
     
    50284985 * @param   pVCpu           The cross context virtual CPU structure.
    50294986 * @param   rcVMRun         The return code from VMLAUNCH/VMRESUME.
    5030  * @param   pCtx            Pointer to the guest-CPU context.
    50314987 * @param   pVmxTransient   Pointer to the VMX transient structure (only
    50324988 *                          exitReason updated).
    50334989 */
    5034 static void hmR0VmxReportWorldSwitchError(PVMCPU pVCpu, int rcVMRun, PCPUMCTX pCtx, PVMXTRANSIENT pVmxTransient)
     4990static void hmR0VmxReportWorldSwitchError(PVMCPU pVCpu, int rcVMRun, PVMXTRANSIENT pVmxTransient)
    50354991{
    50364992    Assert(pVCpu);
    5037     Assert(pCtx);
    50384993    Assert(pVmxTransient);
    50394994    HMVMX_ASSERT_PREEMPT_SAFE();
     
    51245079                /* Guest bits. */
    51255080                rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RIP, &u64Val);          AssertRC(rc);
    5126                 Log4(("Old Guest Rip %#RX64 New %#RX64\n", pCtx->rip, u64Val));
     5081                Log4(("Old Guest Rip %#RX64 New %#RX64\n", pVCpu->cpum.GstCtx.rip, u64Val));
    51275082                rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RSP, &u64Val);          AssertRC(rc);
    5128                 Log4(("Old Guest Rsp %#RX64 New %#RX64\n", pCtx->rsp, u64Val));
     5083                Log4(("Old Guest Rsp %#RX64 New %#RX64\n", pVCpu->cpum.GstCtx.rsp, u64Val));
    51295084                rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &u32Val);         AssertRC(rc);
    5130                 Log4(("Old Guest Rflags %#RX32 New %#RX32\n", pCtx->eflags.u32, u32Val));
     5085                Log4(("Old Guest Rflags %#RX32 New %#RX32\n", pVCpu->cpum.GstCtx.eflags.u32, u32Val));
    51315086                if (pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fVpid)
    51325087                {
     
    52355190            break;
    52365191    }
    5237     NOREF(pCtx);
    52385192}
    52395193
     
    58395793 *
    58405794 * @param   pVCpu           The cross context virtual CPU structure.
    5841  * @param   pMixedCtx       Pointer to the guest-CPU context. The data may be
    5842  *                          out-of-sync. Make sure to update the required fields
    5843  *                          before using them.
    5844  */
    5845 DECLINLINE(void) hmR0VmxSetPendingXcptDF(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    5846 {
    5847     NOREF(pMixedCtx);
     5795 */
     5796DECLINLINE(void) hmR0VmxSetPendingXcptDF(PVMCPU pVCpu)
     5797{
    58485798    uint32_t u32IntInfo  = X86_XCPT_DF | VMX_EXIT_INTERRUPTION_INFO_VALID;
    58495799    u32IntInfo          |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
     
    58655815 *
    58665816 * @param   pVCpu           The cross context virtual CPU structure.
    5867  * @param   pMixedCtx       Pointer to the guest-CPU context. The data may be
    5868  *                          out-of-sync. Make sure to update the required fields
    5869  *                          before using them.
    58705817 * @param   pVmxTransient   Pointer to the VMX transient structure.
    58715818 *
    58725819 * @remarks No-long-jump zone!!!
    58735820 */
    5874 static VBOXSTRICTRC hmR0VmxCheckExitDueToEventDelivery(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
     5821static VBOXSTRICTRC hmR0VmxCheckExitDueToEventDelivery(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    58755822{
    58765823    uint32_t const uExitVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVmxTransient->uExitIntInfo);
     
    59795926                STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingReflect);
    59805927                hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
    5981                                        0 /* cbInstr */, u32ErrCode, pMixedCtx->cr2);
     5928                                       0 /* cbInstr */, u32ErrCode, pVCpu->cpum.GstCtx.cr2);
    59825929
    59835930                Log4Func(("IDT: Pending vectoring event %#RX64 Err=%#RX32\n", pVCpu->hm.s.Event.u64IntInfo,
     
    60015948                    pVmxTransient->fVectoringDoublePF = true;
    60025949                    Log4Func(("IDT: Vectoring double #PF %#RX64 cr2=%#RX64\n", pVCpu->hm.s.Event.u64IntInfo,
    6003                           pMixedCtx->cr2));
     5950                          pVCpu->cpum.GstCtx.cr2));
    60045951                    rcStrict = VINF_SUCCESS;
    60055952                }
     
    60075954                {
    60085955                    STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingReflect);
    6009                     hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx);
     5956                    hmR0VmxSetPendingXcptDF(pVCpu);
    60105957                    Log4Func(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", pVCpu->hm.s.Event.u64IntInfo,
    60115958                              uIdtVector, uExitVector));
     
    66256572 *
    66266573 * @param   pVCpu           The cross context virtual CPU structure.
    6627  * @param   pMixedCtx       Pointer to the guest-CPU context. The data may be
    6628  *                          out-of-sync. Make sure to update the required fields
    6629  *                          before using them.
    66306574 * @param   fStepping       Running in hmR0VmxRunGuestCodeStep().
    66316575 */
    6632 static VBOXSTRICTRC hmR0VmxCheckForceFlags(PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fStepping)
     6576static VBOXSTRICTRC hmR0VmxCheckForceFlags(PVMCPU pVCpu, bool fStepping)
    66336577{
    66346578    Assert(VMMRZCallRing3IsEnabled(pVCpu));
     
    66486592    if (VMCPU_FF_IS_PENDING(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
    66496593    {
    6650         Assert(!(ASMAtomicUoReadU64(&pMixedCtx->fExtrn) & (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4)));
    6651         VBOXSTRICTRC rcStrict2 = PGMSyncCR3(pVCpu, pMixedCtx->cr0, pMixedCtx->cr3, pMixedCtx->cr4,
     6594        PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
     6595        Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4)));
     6596        VBOXSTRICTRC rcStrict2 = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4,
    66526597                                            VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
    66536598        if (rcStrict2 != VINF_SUCCESS)
     
    69436888 * @returns VBox status code.
    69446889 * @param   pVCpu       The cross context virtual CPU structure.
    6945  * @param   pMixedCtx   Pointer to the guest-CPU context. The data may be
    6946  *                      out-of-sync. Make sure to update the required fields
    6947  *                      before using them.
    69486890 *
    69496891 * @remarks No-long-jmp zone!!!
    69506892 */
    6951 static int hmR0VmxLeaveSession(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
     6893static int hmR0VmxLeaveSession(PVMCPU pVCpu)
    69526894{
    69536895    HM_DISABLE_PREEMPT();
     
    69646906        pVCpu->hm.s.fLeaveDone = true;
    69656907    }
    6966     Assert(!pMixedCtx->fExtrn); NOREF(pMixedCtx);
     6908    Assert(!pVCpu->cpum.GstCtx.fExtrn);
    69676909
    69686910    /*
     
    69906932 * @returns VBox status code.
    69916933 * @param   pVCpu       The cross context virtual CPU structure.
    6992  * @param   pMixedCtx   Pointer to the guest-CPU context. The data may be
    6993  *                      out-of-sync. Make sure to update the required fields
    6994  *                      before using them.
    69956934 *
    69966935 * @remarks No-long-jmp zone!!!
    69976936 */
    6998 DECLINLINE(int) hmR0VmxLongJmpToRing3(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    6999 {
    7000     return hmR0VmxLeaveSession(pVCpu, pMixedCtx);
     6937DECLINLINE(int) hmR0VmxLongJmpToRing3(PVMCPU pVCpu)
     6938{
     6939    return hmR0VmxLeaveSession(pVCpu);
    70016940}
    70026941
     
    70126951 * @returns VBox status code.
    70136952 * @param   pVCpu       The cross context virtual CPU structure.
    7014  * @param   pMixedCtx   Pointer to the guest-CPU context. The data may be
    7015  *                      out-of-sync. Make sure to update the required fields
    7016  *                      before using them.
    70176953 * @param   rcExit      The reason for exiting to ring-3. Can be
    70186954 *                      VINF_VMM_UNKNOWN_RING3_CALL.
    70196955 */
    7020 static int hmR0VmxExitToRing3(PVMCPU pVCpu, PCPUMCTX pMixedCtx, VBOXSTRICTRC rcExit)
     6956static int hmR0VmxExitToRing3(PVMCPU pVCpu, VBOXSTRICTRC rcExit)
    70216957{
    70226958    Assert(pVCpu);
    7023     Assert(pMixedCtx);
    70246959    HMVMX_ASSERT_PREEMPT_SAFE();
    70256960
     
    70546989
    70556990    /* Save guest state and restore host state bits. */
    7056     int rc = hmR0VmxLeaveSession(pVCpu, pMixedCtx);
     6991    int rc = hmR0VmxLeaveSession(pVCpu);
    70576992    AssertRCReturn(rc, rc);
    70586993    STAM_COUNTER_DEC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
     
    70687003                             | CPUM_CHANGED_HIDDEN_SEL_REGS);
    70697004    if (   pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging
    7070         && CPUMIsGuestPagingEnabledEx(pMixedCtx))
     7005        && CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx))
    70717006    {
    70727007        CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_GLOBAL_TLB_FLUSH);
     
    71597094    Log4Func((" -> hmR0VmxLongJmpToRing3 enmOperation=%d\n", enmOperation));
    71607095
    7161     int rc = hmR0VmxLongJmpToRing3(pVCpu, (PCPUMCTX)pvUser);
     7096    int rc = hmR0VmxLongJmpToRing3(pVCpu);
    71627097    AssertRCReturn(rc, rc);
    71637098
     
    72457180 * @returns The VT-x guest-interruptibility state.
    72467181 * @param   pVCpu           The cross context virtual CPU structure.
    7247  * @param   pMixedCtx       Pointer to the guest-CPU context. The data may be
    7248  *                          out-of-sync. Make sure to update the required fields
    7249  *                          before using them.
    7250  */
    7251 static uint32_t hmR0VmxEvaluatePendingEvent(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
     7182 */
     7183static uint32_t hmR0VmxEvaluatePendingEvent(PVMCPU pVCpu)
    72527184{
    72537185    /* Get the current interruptibility-state of the guest and then figure out what can be injected. */
    7254     uint32_t const fIntrState = hmR0VmxGetGuestIntrState(pVCpu, pMixedCtx);
     7186    PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
     7187    uint32_t const fIntrState = hmR0VmxGetGuestIntrState(pVCpu);
    72557188    bool const fBlockMovSS    = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
    72567189    bool const fBlockSti      = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
    72577190    bool const fBlockNmi      = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI);
    72587191
    7259     Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pMixedCtx->fExtrn) & CPUMCTX_EXTRN_RFLAGS));
     7192    Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pCtx->fExtrn) & CPUMCTX_EXTRN_RFLAGS));
    72607193    Assert(!(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI));    /* We don't support block-by-SMI yet.*/
    7261     Assert(!fBlockSti || pMixedCtx->eflags.Bits.u1IF);     /* Cannot set block-by-STI when interrupts are disabled. */
     7194    Assert(!fBlockSti || pCtx->eflags.Bits.u1IF);     /* Cannot set block-by-STI when interrupts are disabled. */
    72627195    Assert(!TRPMHasTrap(pVCpu));
    72637196
     
    72987231        int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_RFLAGS);
    72997232        AssertRCReturn(rc, 0);
    7300         bool const fBlockInt = !(pMixedCtx->eflags.u32 & X86_EFL_IF);
     7233        bool const fBlockInt = !(pCtx->eflags.u32 & X86_EFL_IF);
    73017234        if (   !pVCpu->hm.s.Event.fPending
    73027235            && !fBlockInt
     
    73437276 *
    73447277 * @param   pVCpu           The cross context virtual CPU structure.
    7345  * @param   pMixedCtx       Pointer to the guest-CPU context. The data may be
    7346  *                          out-of-sync. Make sure to update the required fields
    7347  *                          before using them.
    7348  */
    7349 DECLINLINE(int) hmR0VmxSetPendingDebugXcptVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    7350 {
     7278 */
     7279DECLINLINE(int) hmR0VmxSetPendingDebugXcptVmcs(PVMCPU pVCpu)
     7280{
     7281    Assert(!(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_RFLAGS));
    73517282    RT_NOREF(pVCpu);
    7352     Assert(!(ASMAtomicUoReadU64(&pMixedCtx->fExtrn) & CPUMCTX_EXTRN_RFLAGS)); NOREF(pMixedCtx);
    73537283    return VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, VMX_VMCS_GUEST_DEBUG_EXCEPTIONS_BS);
    73547284}
     
    73617291 * @returns Strict VBox status code (i.e. informational status codes too).
    73627292 * @param   pVCpu           The cross context virtual CPU structure.
    7363  * @param   pMixedCtx       Pointer to the guest-CPU context. The data may be
    7364  *                          out-of-sync. Make sure to update the required fields
    7365  *                          before using them.
    73667293 * @param   fIntrState      The VT-x guest-interruptibility state.
    73677294 * @param   fStepping       Running in hmR0VmxRunGuestCodeStep() and we should
     
    73697296 *                          dispatched directly.
    73707297 */
    7371 static VBOXSTRICTRC hmR0VmxInjectPendingEvent(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint32_t fIntrState, bool fStepping)
     7298static VBOXSTRICTRC hmR0VmxInjectPendingEvent(PVMCPU pVCpu, uint32_t fIntrState, bool fStepping)
    73727299{
    73737300    HMVMX_ASSERT_PREEMPT_SAFE();
     
    73777304    bool fBlockSti      = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
    73787305
    7379     Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pMixedCtx->fExtrn) & CPUMCTX_EXTRN_RFLAGS));
    7380     Assert(!(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI));     /* We don't support block-by-SMI yet.*/
    7381     Assert(!fBlockSti || pMixedCtx->eflags.Bits.u1IF);       /* Cannot set block-by-STI when interrupts are disabled. */
     7306    Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_RFLAGS));
     7307    Assert(!(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI));         /* We don't support block-by-SMI yet.*/
     7308    Assert(!fBlockSti || pVCpu->cpum.GstCtx.eflags.Bits.u1IF);   /* Cannot set block-by-STI when interrupts are disabled. */
    73827309    Assert(!TRPMHasTrap(pVCpu));
    73837310
     7311    PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    73847312    VBOXSTRICTRC rcStrict = VINF_SUCCESS;
    73857313    if (pVCpu->hm.s.Event.fPending)
     
    73967324        if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT)
    73977325        {
    7398             bool const fBlockInt = !(pMixedCtx->eflags.u32 & X86_EFL_IF);
     7326            bool const fBlockInt = !(pCtx->eflags.u32 & X86_EFL_IF);
    73997327            Assert(!fBlockInt);
    74007328            Assert(!fBlockSti);
     
    74417369            int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_RFLAGS);
    74427370            AssertRCReturn(rc, rc);
    7443             if (pMixedCtx->eflags.Bits.u1TF)
     7371            if (pCtx->eflags.Bits.u1TF)
    74447372            {
    7445                 int rc2 = hmR0VmxSetPendingDebugXcptVmcs(pVCpu, pMixedCtx);
     7373                int rc2 = hmR0VmxSetPendingDebugXcptVmcs(pVCpu);
    74467374                AssertRCReturn(rc2, rc2);
    74477375            }
    74487376        }
    7449         else if (pMixedCtx->eflags.Bits.u1TF)
     7377        else if (pCtx->eflags.Bits.u1TF)
    74507378        {
    74517379            /*
     
    74757403 *
    74767404 * @param   pVCpu           The cross context virtual CPU structure.
    7477  * @param   pMixedCtx       Pointer to the guest-CPU context. The data may be
    7478  *                          out-of-sync. Make sure to update the required fields
    7479  *                          before using them.
    7480  */
    7481 DECLINLINE(void) hmR0VmxSetPendingXcptUD(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    7482 {
    7483     NOREF(pMixedCtx);
     7405 */
     7406DECLINLINE(void) hmR0VmxSetPendingXcptUD(PVMCPU pVCpu)
     7407{
    74847408    uint32_t u32IntInfo = X86_XCPT_UD | VMX_EXIT_INTERRUPTION_INFO_VALID;
    74857409    hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
     
    74927416 * @returns Strict VBox status code (i.e. informational status codes too).
    74937417 * @param   pVCpu           The cross context virtual CPU structure.
    7494  * @param   pMixedCtx       Pointer to the guest-CPU context. The data may be
    7495  *                          out-of-sync. Make sure to update the required fields
    7496  *                          before using them.
    74977418 * @param   fStepping       Whether we're running in hmR0VmxRunGuestCodeStep()
    74987419 *                          and should return VINF_EM_DBG_STEPPED if the event
     
    75037424 *                          necessary. This cannot not be NULL.
    75047425 */
    7505 DECLINLINE(VBOXSTRICTRC) hmR0VmxInjectXcptDF(PVMCPU pVCpu, PCCPUMCTX pMixedCtx, bool fStepping, uint32_t *pfIntrState)
    7506 {
    7507     NOREF(pMixedCtx);
     7426DECLINLINE(VBOXSTRICTRC) hmR0VmxInjectXcptDF(PVMCPU pVCpu, bool fStepping, uint32_t *pfIntrState)
     7427{
    75087428    uint32_t u32IntInfo  = X86_XCPT_DF | VMX_EXIT_INTERRUPTION_INFO_VALID;
    75097429    u32IntInfo          |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
     
    75187438 *
    75197439 * @param   pVCpu           The cross context virtual CPU structure.
    7520  * @param   pMixedCtx       Pointer to the guest-CPU context. The data may be
    7521  *                          out-of-sync. Make sure to update the required fields
    7522  *                          before using them.
    7523  */
    7524 DECLINLINE(void) hmR0VmxSetPendingXcptDB(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    7525 {
    7526     NOREF(pMixedCtx);
     7440 */
     7441DECLINLINE(void) hmR0VmxSetPendingXcptDB(PVMCPU pVCpu)
     7442{
    75277443    uint32_t u32IntInfo  = X86_XCPT_DB | VMX_EXIT_INTERRUPTION_INFO_VALID;
    75287444    u32IntInfo          |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
     
    75357451 *
    75367452 * @param   pVCpu           The cross context virtual CPU structure.
    7537  * @param   pMixedCtx       Pointer to the guest-CPU context. The data may be
    7538  *                          out-of-sync. Make sure to update the required fields
    7539  *                          before using them.
    75407453 * @param   cbInstr         The value of RIP that is to be pushed on the guest
    75417454 *                          stack.
    75427455 */
    7543 DECLINLINE(void) hmR0VmxSetPendingXcptOF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint32_t cbInstr)
    7544 {
    7545     NOREF(pMixedCtx);
     7456DECLINLINE(void) hmR0VmxSetPendingXcptOF(PVMCPU pVCpu, uint32_t cbInstr)
     7457{
    75467458    uint32_t u32IntInfo  = X86_XCPT_OF | VMX_EXIT_INTERRUPTION_INFO_VALID;
    75477459    u32IntInfo          |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
     
    75557467 * @returns Strict VBox status code (i.e. informational status codes too).
    75567468 * @param   pVCpu               The cross context virtual CPU structure.
    7557  * @param   pMixedCtx           Pointer to the guest-CPU context. The data may be
    7558  *                              out-of-sync. Make sure to update the required fields
    7559  *                              before using them.
    75607469 * @param   fErrorCodeValid     Whether the error code is valid (depends on the CPU
    75617470 *                              mode, i.e. in real-mode it's not valid).
     
    75707479 *                              necessary. This cannot not be NULL.
    75717480 */
    7572 DECLINLINE(VBOXSTRICTRC) hmR0VmxInjectXcptGP(PVMCPU pVCpu, PCCPUMCTX pMixedCtx, bool fErrorCodeValid, uint32_t u32ErrorCode,
    7573                                              bool fStepping, uint32_t *pfIntrState)
    7574 {
    7575     NOREF(pMixedCtx);
     7481DECLINLINE(VBOXSTRICTRC) hmR0VmxInjectXcptGP(PVMCPU pVCpu, bool fErrorCodeValid, uint32_t u32ErrorCode, bool fStepping,
     7482                                             uint32_t *pfIntrState)
     7483{
    75767484    uint32_t u32IntInfo  = X86_XCPT_GP | VMX_EXIT_INTERRUPTION_INFO_VALID;
    75777485    u32IntInfo          |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
     
    75837491
    75847492
    7585 #if 0 /* unused */
    7586 /**
    7587  * Sets a general-protection (\#GP) exception as pending-for-injection into the
    7588  * VM.
     7493/**
     7494 * Sets a software interrupt (INTn) as pending-for-injection into the VM.
    75897495 *
    75907496 * @param   pVCpu           The cross context virtual CPU structure.
    7591  * @param   pMixedCtx       Pointer to the guest-CPU context. The data may be
    7592  *                          out-of-sync. Make sure to update the required fields
    7593  *                          before using them.
    7594  * @param   u32ErrorCode    The error code associated with the \#GP.
    7595  */
    7596 DECLINLINE(void) hmR0VmxSetPendingXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint32_t u32ErrorCode)
    7597 {
    7598     NOREF(pMixedCtx);
    7599     uint32_t u32IntInfo  = X86_XCPT_GP | VMX_EXIT_INTERRUPTION_INFO_VALID;
    7600     u32IntInfo          |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
    7601     u32IntInfo          |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
    7602     hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrorCode, 0 /* GCPtrFaultAddress */);
    7603 }
    7604 #endif /* unused */
    7605 
    7606 
    7607 /**
    7608  * Sets a software interrupt (INTn) as pending-for-injection into the VM.
    7609  *
    7610  * @param   pVCpu           The cross context virtual CPU structure.
    7611  * @param   pMixedCtx       Pointer to the guest-CPU context. The data may be
    7612  *                          out-of-sync. Make sure to update the required fields
    7613  *                          before using them.
    76147497 * @param   uVector         The software interrupt vector number.
    76157498 * @param   cbInstr         The value of RIP that is to be pushed on the guest
    76167499 *                          stack.
    76177500 */
    7618 DECLINLINE(void) hmR0VmxSetPendingIntN(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint16_t uVector, uint32_t cbInstr)
    7619 {
    7620     NOREF(pMixedCtx);
     7501DECLINLINE(void) hmR0VmxSetPendingIntN(PVMCPU pVCpu, uint16_t uVector, uint32_t cbInstr)
     7502{
    76217503    uint32_t u32IntInfo = uVector | VMX_EXIT_INTERRUPTION_INFO_VALID;
    76227504    if (   uVector == X86_XCPT_BP
     
    76357517 * @returns Strict VBox status code (i.e. informational status codes too).
    76367518 * @retval  VINF_EM_RESET if pushing a value to the stack caused a triple-fault.
    7637  * @param   pVM         The cross context VM structure.
    7638  * @param   pMixedCtx   Pointer to the guest-CPU context.
     7519 * @param   pVCpu       The cross context virtual CPU structure.
    76397520 * @param   uValue      The value to push to the guest stack.
    76407521 */
    7641 DECLINLINE(VBOXSTRICTRC) hmR0VmxRealModeGuestStackPush(PVM pVM, PCPUMCTX pMixedCtx, uint16_t uValue)
     7522static VBOXSTRICTRC hmR0VmxRealModeGuestStackPush(PVMCPU pVCpu, uint16_t uValue)
    76427523{
    76437524    /*
     
    76467527     * See Intel Instruction reference for PUSH and Intel spec. 22.33.1 "Segment Wraparound".
    76477528     */
    7648     if (pMixedCtx->sp == 1)
     7529    PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
     7530    if (pCtx->sp == 1)
    76497531        return VINF_EM_RESET;
    7650     pMixedCtx->sp -= sizeof(uint16_t);       /* May wrap around which is expected behaviour. */
    7651     int rc = PGMPhysSimpleWriteGCPhys(pVM, pMixedCtx->ss.u64Base + pMixedCtx->sp, &uValue, sizeof(uint16_t));
     7532    pCtx->sp -= sizeof(uint16_t);       /* May wrap around which is expected behaviour. */
     7533    int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), pCtx->ss.u64Base + pCtx->sp, &uValue, sizeof(uint16_t));
    76527534    AssertRC(rc);
    76537535    return rc;
     
    76867568    Assert(pfIntrState);
    76877569
    7688     PCPUMCTX       pMixedCtx  = &pVCpu->cpum.GstCtx;
     7570    PCPUMCTX       pCtx       = &pVCpu->cpum.GstCtx;
    76897571    uint32_t       u32IntInfo = (uint32_t)u64IntInfo;
    76907572    uint32_t const uVector    = VMX_EXIT_INTERRUPTION_INFO_VECTOR(u32IntInfo);
     
    76997581     */
    77007582    if (   uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT
    7701         && !CPUMIsGuestInRealModeEx(pMixedCtx))
     7583        && !CPUMIsGuestInRealModeEx(pCtx))
    77027584    {
    77037585        switch (uVector)
     
    77337615     * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling.
    77347616     */
    7735     if (CPUMIsGuestInRealModeEx(pMixedCtx))     /* CR0.PE bit changes are always intercepted, so it's up to date. */
     7617    if (CPUMIsGuestInRealModeEx(pCtx))     /* CR0.PE bit changes are always intercepted, so it's up to date. */
    77367618    {
    77377619        if (pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUnrestrictedGuest)
     
    77587640            /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */
    77597641            size_t const cbIdtEntry = sizeof(X86IDTR16);
    7760             if (uVector * cbIdtEntry + (cbIdtEntry - 1) > pMixedCtx->idtr.cbIdt)
     7642            if (uVector * cbIdtEntry + (cbIdtEntry - 1) > pCtx->idtr.cbIdt)
    77617643            {
    77627644                /* If we are trying to inject a #DF with no valid IDT entry, return a triple-fault. */
     
    77667648                /* If we're injecting a #GP with no valid IDT entry, inject a double-fault. */
    77677649                if (uVector == X86_XCPT_GP)
    7768                     return hmR0VmxInjectXcptDF(pVCpu, pMixedCtx, fStepping, pfIntrState);
     7650                    return hmR0VmxInjectXcptDF(pVCpu, fStepping, pfIntrState);
    77697651
    77707652                /*
     
    77747656                 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
    77757657                 */
    7776                 return hmR0VmxInjectXcptGP(pVCpu, pMixedCtx, false /* fErrCodeValid */, 0 /* u32ErrCode */, fStepping,
    7777                                            pfIntrState);
     7658                return hmR0VmxInjectXcptGP(pVCpu, false /* fErrCodeValid */, 0 /* u32ErrCode */, fStepping, pfIntrState);
    77787659            }
    77797660
    77807661            /* Software exceptions (#BP and #OF exceptions thrown as a result of INT3 or INTO) */
    7781             uint16_t uGuestIp = pMixedCtx->ip;
     7662            uint16_t uGuestIp = pCtx->ip;
    77827663            if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT)
    77837664            {
    77847665                Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
    77857666                /* #BP and #OF are both benign traps, we need to resume the next instruction. */
    7786                 uGuestIp = pMixedCtx->ip + (uint16_t)cbInstr;
     7667                uGuestIp = pCtx->ip + (uint16_t)cbInstr;
    77877668            }
    77887669            else if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT)
    7789                 uGuestIp = pMixedCtx->ip + (uint16_t)cbInstr;
     7670                uGuestIp = pCtx->ip + (uint16_t)cbInstr;
    77907671
    77917672            /* Get the code segment selector and offset from the IDT entry for the interrupt handler. */
    77927673            X86IDTR16 IdtEntry;
    7793             RTGCPHYS GCPhysIdtEntry = (RTGCPHYS)pMixedCtx->idtr.pIdt + uVector * cbIdtEntry;
     7674            RTGCPHYS GCPhysIdtEntry = (RTGCPHYS)pCtx->idtr.pIdt + uVector * cbIdtEntry;
    77947675            rc2 = PGMPhysSimpleReadGCPhys(pVM, &IdtEntry, GCPhysIdtEntry, cbIdtEntry);
    77957676            AssertRCReturn(rc2, rc2);
     
    77977678            /* Construct the stack frame for the interrupt/exception handler. */
    77987679            VBOXSTRICTRC rcStrict;
    7799             rcStrict = hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, pMixedCtx->eflags.u32);
     7680            rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->eflags.u32);
    78007681            if (rcStrict == VINF_SUCCESS)
    7801                 rcStrict = hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, pMixedCtx->cs.Sel);
     7682                rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->cs.Sel);
    78027683            if (rcStrict == VINF_SUCCESS)
    7803                 rcStrict = hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, uGuestIp);
     7684                rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, uGuestIp);
    78047685
    78057686            /* Clear the required eflag bits and jump to the interrupt/exception handler. */
    78067687            if (rcStrict == VINF_SUCCESS)
    78077688            {
    7808                 pMixedCtx->eflags.u32 &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
    7809                 pMixedCtx->rip         = IdtEntry.offSel;
    7810                 pMixedCtx->cs.Sel      = IdtEntry.uSel;
    7811                 pMixedCtx->cs.ValidSel = IdtEntry.uSel;
    7812                 pMixedCtx->cs.u64Base  = IdtEntry.uSel << cbIdtEntry;
     7689                pCtx->eflags.u32 &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
     7690                pCtx->rip         = IdtEntry.offSel;
     7691                pCtx->cs.Sel      = IdtEntry.uSel;
     7692                pCtx->cs.ValidSel = IdtEntry.uSel;
     7693                pCtx->cs.u64Base  = IdtEntry.uSel << cbIdtEntry;
    78137694                if (   uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT
    78147695                    && uVector  == X86_XCPT_PF)
    7815                     pMixedCtx->cr2 = GCPtrFaultAddress;
     7696                    pCtx->cr2 = GCPtrFaultAddress;
    78167697
    78177698                /* If any other guest-state bits are changed here, make sure to update
     
    78307711                }
    78317712                Log4(("Injecting real-mode: u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x Eflags=%#x CS:EIP=%04x:%04x\n",
    7832                       u32IntInfo, u32ErrCode, cbInstr, pMixedCtx->eflags.u, pMixedCtx->cs.Sel, pMixedCtx->eip));
     7713                      u32IntInfo, u32ErrCode, cbInstr, pCtx->eflags.u, pCtx->cs.Sel, pCtx->eip));
    78337714
    78347715                /* The event has been truly dispatched. Mark it as no longer pending so we don't attempt to 'undo'
     
    78617742    if (   VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT
    78627743        && uVector == X86_XCPT_PF)
    7863         pMixedCtx->cr2 = GCPtrFaultAddress;
    7864 
    7865     Log4(("Injecting u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x CR2=%#RX64\n", u32IntInfo, u32ErrCode, cbInstr, pMixedCtx->cr2));
     7744        pCtx->cr2 = GCPtrFaultAddress;
     7745
     7746    Log4(("Injecting u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x CR2=%#RX64\n", u32IntInfo, u32ErrCode, cbInstr, pCtx->cr2));
    78667747
    78677748    return VINF_SUCCESS;
     
    80977978 *
    80987979 * @param   pVCpu       The cross context virtual CPU structure.
    8099  * @param   pMixedCtx   Pointer to the guest-CPU context. The data may be
    8100  *                      out-of-sync. Make sure to update the required fields
    8101  *                      before using them.
    81027980 *
    81037981 * @remarks No-long-jump zone!!!
    81047982 */
    8105 static VBOXSTRICTRC hmR0VmxExportGuestState(PVMCPU pVCpu, PCCPUMCTX pMixedCtx)
     7983static VBOXSTRICTRC hmR0VmxExportGuestState(PVMCPU pVCpu)
    81067984{
    81077985    AssertPtr(pVCpu);
    8108     AssertPtr(pMixedCtx);
    81097986    HMVMX_ASSERT_PREEMPT_SAFE();
    81107987
     
    81167993    pVCpu->hm.s.vmx.RealMode.fRealOnV86Active = false;
    81177994    if (   !pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUnrestrictedGuest
    8118         &&  CPUMIsGuestInRealModeEx(pMixedCtx))
     7995        &&  CPUMIsGuestInRealModeEx(&pVCpu->cpum.GstCtx))
    81197996    {
    81207997        pVCpu->hm.s.vmx.RealMode.fRealOnV86Active = true;
     
    81258002     * Ideally, assert that the cross-dependent bits are up-to-date at the point of using it.
    81268003     */
    8127     int rc = hmR0VmxSelectVMRunHandler(pVCpu, pMixedCtx);
     8004    int rc = hmR0VmxSelectVMRunHandler(pVCpu);
    81288005    AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
    81298006
    81308007    /* This needs to be done after hmR0VmxSelectVMRunHandler() as changing pfnStartVM may require VM-entry control updates. */
    8131     rc = hmR0VmxExportGuestEntryCtls(pVCpu, pMixedCtx);
     8008    rc = hmR0VmxExportGuestEntryCtls(pVCpu);
    81328009    AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
    81338010
    81348011    /* This needs to be done after hmR0VmxSelectVMRunHandler() as changing pfnStartVM may require VM-exit control updates. */
    8135     rc = hmR0VmxExportGuestExitCtls(pVCpu, pMixedCtx);
     8012    rc = hmR0VmxExportGuestExitCtls(pVCpu);
    81368013    AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
    81378014
    8138     rc = hmR0VmxExportGuestCR0(pVCpu, pMixedCtx);
     8015    rc = hmR0VmxExportGuestCR0(pVCpu);
    81398016    AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
    81408017
    8141     VBOXSTRICTRC rcStrict = hmR0VmxExportGuestCR3AndCR4(pVCpu, pMixedCtx);
     8018    VBOXSTRICTRC rcStrict = hmR0VmxExportGuestCR3AndCR4(pVCpu);
    81428019    if (rcStrict == VINF_SUCCESS)
    81438020    { /* likely */ }
     
    81488025    }
    81498026
    8150     rc = hmR0VmxExportGuestSegmentRegs(pVCpu, pMixedCtx);
     8027    rc = hmR0VmxExportGuestSegmentRegs(pVCpu);
    81518028    AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
    81528029
    81538030    /* This needs to be done after hmR0VmxExportGuestEntryCtls() and hmR0VmxExportGuestExitCtls() as it
    81548031       may alter controls if we determine we don't have to swap EFER after all. */
    8155     rc = hmR0VmxExportGuestMsrs(pVCpu, pMixedCtx);
     8032    rc = hmR0VmxExportGuestMsrs(pVCpu);
    81568033    AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
    81578034
     
    81648041    /* Exporting RFLAGS here is fine, even though RFLAGS.TF might depend on guest debug state which is
    81658042       not exported here. It is re-evaluated and updated if necessary in hmR0VmxExportSharedState(). */
    8166     rc  = hmR0VmxExportGuestRip(pVCpu, pMixedCtx);
    8167     rc |= hmR0VmxExportGuestRsp(pVCpu, pMixedCtx);
    8168     rc |= hmR0VmxExportGuestRflags(pVCpu, pMixedCtx);
     8043    rc  = hmR0VmxExportGuestRip(pVCpu);
     8044    rc |= hmR0VmxExportGuestRsp(pVCpu);
     8045    rc |= hmR0VmxExportGuestRflags(pVCpu);
    81698046    AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
    81708047
     
    81938070 *
    81948071 * @param   pVCpu       The cross context virtual CPU structure.
    8195  * @param   pCtx        Pointer to the guest-CPU context.
    81968072 *
    81978073 * @remarks No-long-jump zone!!!
    81988074 */
    8199 static void hmR0VmxExportSharedState(PVMCPU pVCpu, PCPUMCTX pCtx)
     8075static void hmR0VmxExportSharedState(PVMCPU pVCpu)
    82008076{
    82018077    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     
    82048080    if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_DR_MASK)
    82058081    {
    8206         int rc = hmR0VmxExportSharedDebugState(pVCpu, pCtx);
     8082        int rc = hmR0VmxExportSharedDebugState(pVCpu);
    82078083        AssertRC(rc);
    82088084        pVCpu->hm.s.fCtxChanged &= ~HM_CHANGED_GUEST_DR_MASK;
     
    82118087        if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_RFLAGS)
    82128088        {
    8213             rc = hmR0VmxExportGuestRflags(pVCpu, pCtx);
     8089            rc = hmR0VmxExportGuestRflags(pVCpu);
    82148090            AssertRC(rc);
    82158091        }
     
    82188094    if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_VMX_GUEST_LAZY_MSRS)
    82198095    {
    8220         hmR0VmxLazyLoadGuestMsrs(pVCpu, pCtx);
     8096        hmR0VmxLazyLoadGuestMsrs(pVCpu);
    82218097        pVCpu->hm.s.fCtxChanged &= ~HM_CHANGED_VMX_GUEST_LAZY_MSRS;
    82228098    }
     
    82368112 *
    82378113 * @param   pVCpu           The cross context virtual CPU structure.
    8238  * @param   pMixedCtx       Pointer to the guest-CPU context. The data may be
    8239  *                          out-of-sync. Make sure to update the required fields
    8240  *                          before using them.
    82418114 *
    82428115 * @remarks No-long-jump zone!!!
    82438116 */
    8244 static VBOXSTRICTRC hmR0VmxExportGuestStateOptimal(PVMCPU pVCpu, PCCPUMCTX pMixedCtx)
     8117static VBOXSTRICTRC hmR0VmxExportGuestStateOptimal(PVMCPU pVCpu)
    82458118{
    82468119    HMVMX_ASSERT_PREEMPT_SAFE();
     
    82618134    if ((fCtxChanged & (HM_CHANGED_ALL_GUEST & ~HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE)) == HM_CHANGED_GUEST_RIP)
    82628135    {
    8263         rcStrict = hmR0VmxExportGuestRip(pVCpu, pMixedCtx);
     8136        rcStrict = hmR0VmxExportGuestRip(pVCpu);
    82648137        if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    82658138        { /* likely */}
     
    82708143    else if (fCtxChanged & (HM_CHANGED_ALL_GUEST & ~HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE))
    82718144    {
    8272         rcStrict = hmR0VmxExportGuestState(pVCpu, pMixedCtx);
     8145        rcStrict = hmR0VmxExportGuestState(pVCpu);
    82738146        if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    82748147        { /* likely */}
     
    83198192 *
    83208193 * @param   pVCpu           The cross context virtual CPU structure.
    8321  * @param   pMixedCtx       Pointer to the guest-CPU context. The data may be
    8322  *                          out-of-sync. Make sure to update the required fields
    8323  *                          before using them.
    83248194 * @param   pVmxTransient   Pointer to the VMX transient structure.
    83258195 * @param   fStepping       Set if called from hmR0VmxRunGuestCodeStep().  Makes
     
    83288198 *                          dispatching took place.
    83298199 */
    8330 static VBOXSTRICTRC hmR0VmxPreRunGuest(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, bool fStepping)
     8200static VBOXSTRICTRC hmR0VmxPreRunGuest(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, bool fStepping)
    83318201{
    83328202    Assert(VMMRZCallRing3IsEnabled(pVCpu));
     
    83378207
    83388208    /* Check force flag actions that might require us to go back to ring-3. */
    8339     VBOXSTRICTRC rcStrict = hmR0VmxCheckForceFlags(pVCpu, pMixedCtx, fStepping);
     8209    VBOXSTRICTRC rcStrict = hmR0VmxCheckForceFlags(pVCpu, fStepping);
    83408210    if (rcStrict == VINF_SUCCESS)
    83418211    { /* FFs doesn't get set all the time. */ }
     
    83778247    if (TRPMHasTrap(pVCpu))
    83788248        hmR0VmxTrpmTrapToPendingEvent(pVCpu);
    8379     uint32_t fIntrState = hmR0VmxEvaluatePendingEvent(pVCpu, pMixedCtx);
     8249    uint32_t fIntrState = hmR0VmxEvaluatePendingEvent(pVCpu);
    83808250
    83818251    /*
     
    83848254     * also result in triple-faulting the VM.
    83858255     */
    8386     rcStrict = hmR0VmxInjectPendingEvent(pVCpu, pMixedCtx, fIntrState, fStepping);
     8256    rcStrict = hmR0VmxInjectPendingEvent(pVCpu, fIntrState, fStepping);
    83878257    if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    83888258    { /* likely */ }
     
    84028272    if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
    84038273    {
    8404         Assert(!(ASMAtomicUoReadU64(&pMixedCtx->fExtrn) & CPUMCTX_EXTRN_CR3));
     8274        Assert(!(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_CR3));
    84058275        int rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
    84068276        AssertMsgReturn(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_SYNC_CR3,
     
    84328302     * Hence, loading of the guest state needs to be done -after- injection of events.
    84338303     */
    8434     rcStrict = hmR0VmxExportGuestStateOptimal(pVCpu, pMixedCtx);
     8304    rcStrict = hmR0VmxExportGuestStateOptimal(pVCpu);
    84358305    if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    84368306    { /* likely */ }
     
    84978367 *
    84988368 * @param   pVCpu           The cross context virtual CPU structure.
    8499  * @param   pMixedCtx       Pointer to the guest-CPU context. The data may be
    8500  *                          out-of-sync. Make sure to update the required fields
    8501  *                          before using them.
    85028369 * @param   pVmxTransient   Pointer to the VMX transient structure.
    85038370 *
     
    85058372 * @remarks No-long-jump zone!!!
    85068373 */
    8507 static void hmR0VmxPreRunGuestCommitted(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
     8374static void hmR0VmxPreRunGuestCommitted(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    85088375{
    85098376    Assert(!VMMRZCallRing3IsEnabled(pVCpu));
     
    85538420     */
    85548421    if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE)
    8555         hmR0VmxExportSharedState(pVCpu, pMixedCtx);
     8422        hmR0VmxExportSharedState(pVCpu);
    85568423    AssertMsg(!pVCpu->hm.s.fCtxChanged, ("fCtxChanged=%#RX64\n", pVCpu->hm.s.fCtxChanged));
    85578424
    85588425    /* Store status of the shared guest-host state at the time of VM-entry. */
    85598426#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
    8560     if (CPUMIsGuestInLongModeEx(pMixedCtx))
     8427    if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
    85618428    {
    85628429        pVmxTransient->fWasGuestDebugStateActive = CPUMIsGuestDebugStateActivePending(pVCpu);
     
    86388505    if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS))
    86398506    {
    8640         uint32_t uInvalidReason = hmR0VmxCheckGuestState(pVCpu, pMixedCtx);
     8507        uint32_t uInvalidReason = hmR0VmxCheckGuestState(pVCpu);
    86418508        if (uInvalidReason != VMX_IGS_REASON_NOT_FOUND)
    86428509            Log4(("hmR0VmxCheckGuestState returned %#x\n", uInvalidReason));
     
    87758642 * @returns VBox status code.
    87768643 * @param   pVCpu       The cross context virtual CPU structure.
    8777  * @param   pCtx        Pointer to the guest-CPU context.
    87788644 *
    87798645 * @note    Mostly the same as hmR0VmxRunGuestCodeStep().
    87808646 */
    8781 static VBOXSTRICTRC hmR0VmxRunGuestCodeNormal(PVMCPU pVCpu, PCPUMCTX pCtx)
     8647static VBOXSTRICTRC hmR0VmxRunGuestCodeNormal(PVMCPU pVCpu)
    87828648{
    87838649    VMXTRANSIENT VmxTransient;
     
    87948660           to ring-3.  This bugger disables interrupts on VINF_SUCCESS! */
    87958661        STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
    8796         rcStrict = hmR0VmxPreRunGuest(pVCpu, pCtx, &VmxTransient, false /* fStepping */);
     8662        rcStrict = hmR0VmxPreRunGuest(pVCpu, &VmxTransient, false /* fStepping */);
    87978663        if (rcStrict != VINF_SUCCESS)
    87988664            break;
    87998665
    8800         hmR0VmxPreRunGuestCommitted(pVCpu, pCtx, &VmxTransient);
    8801         int rcRun = hmR0VmxRunGuest(pVCpu, pCtx);
    8802         /* The guest-CPU context is now outdated, 'pCtx' is to be treated as 'pMixedCtx' from this point on!!! */
     8666        hmR0VmxPreRunGuestCommitted(pVCpu, &VmxTransient);
     8667        int rcRun = hmR0VmxRunGuest(pVCpu);
    88038668
    88048669        /* Restore any residual host-state and save any bits shared between host
     
    88128677        {
    88138678            STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatPreExit, x);
    8814             hmR0VmxReportWorldSwitchError(pVCpu, rcRun, pCtx, &VmxTransient);
     8679            hmR0VmxReportWorldSwitchError(pVCpu, rcRun, &VmxTransient);
    88158680            return rcRun;
    88168681        }
     
    88238688        HMVMX_START_EXIT_DISPATCH_PROF();
    88248689
    8825         VBOXVMM_R0_HMVMX_VMEXIT_NOCTX(pVCpu, pCtx, VmxTransient.uExitReason);
     8690        VBOXVMM_R0_HMVMX_VMEXIT_NOCTX(pVCpu, &pVCpu->cpum.GstCtx, VmxTransient.uExitReason);
    88268691
    88278692        /* Handle the VM-exit. */
    88288693#ifdef HMVMX_USE_FUNCTION_TABLE
    8829         rcStrict = g_apfnVMExitHandlers[VmxTransient.uExitReason](pVCpu, pCtx, &VmxTransient);
     8694        rcStrict = g_apfnVMExitHandlers[VmxTransient.uExitReason](pVCpu, &VmxTransient);
    88308695#else
    8831         rcStrict = hmR0VmxHandleExit(pVCpu, pCtx, &VmxTransient, VmxTransient.uExitReason);
     8696        rcStrict = hmR0VmxHandleExit(pVCpu, &VmxTransient, VmxTransient.uExitReason);
    88328697#endif
    88338698        STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitHandling, x);
     
    89168781 * @param   pVCpu           The cross context virtual CPU structure of the
    89178782 *                          calling EMT.
    8918  * @param   pCtx            The CPU register context to go with @a pVCpu.
    89198783 * @param   pDbgState       The structure to initialize.
    89208784 */
    8921 static void hmR0VmxRunDebugStateInit(PVMCPU pVCpu, PCCPUMCTX pCtx, PVMXRUNDBGSTATE pDbgState)
    8922 {
    8923     pDbgState->uRipStart            = pCtx->rip;
    8924     pDbgState->uCsStart             = pCtx->cs.Sel;
     8785static void hmR0VmxRunDebugStateInit(PVMCPU pVCpu, PVMXRUNDBGSTATE pDbgState)
     8786{
     8787    pDbgState->uRipStart            = pVCpu->cpum.GstCtx.rip;
     8788    pDbgState->uCsStart             = pVCpu->cpum.GstCtx.cs.Sel;
    89258789
    89268790    pDbgState->fModifiedProcCtls    = false;
     
    93379201 * @returns Strict VBox status code (i.e. informational status codes too).
    93389202 * @param   pVCpu           The cross context virtual CPU structure.
    9339  * @param   pMixedCtx       Pointer to the guest-CPU context.
    93409203 * @param   pVmxTransient   Pointer to the VMX-transient structure.
    93419204 * @param   uExitReason     The VM-exit reason.
     
    93449207 *          and to the point. No longer than 33 chars long, please.
    93459208 */
    9346 static VBOXSTRICTRC hmR0VmxHandleExitDtraceEvents(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient,
    9347                                                   uint32_t uExitReason)
     9209static VBOXSTRICTRC hmR0VmxHandleExitDtraceEvents(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t uExitReason)
    93489210{
    93499211    /*
     
    93779239    {
    93789240        case VMX_EXIT_MTF:
    9379             return hmR0VmxExitMtf(pVCpu, pMixedCtx, pVmxTransient);
     9241            return hmR0VmxExitMtf(pVCpu, pVmxTransient);
    93809242
    93819243        case VMX_EXIT_XCPT_OR_NMI:
     
    95529414        hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
    95539415        hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
     9416        PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    95549417        switch (enmEvent1)
    95559418        {
    95569419            /** @todo consider which extra parameters would be helpful for each probe.   */
    95579420            case DBGFEVENT_END: break;
    9558             case DBGFEVENT_XCPT_DE:                 VBOXVMM_XCPT_DE(pVCpu, pMixedCtx); break;
    9559             case DBGFEVENT_XCPT_DB:                 VBOXVMM_XCPT_DB(pVCpu, pMixedCtx, pMixedCtx->dr[6]); break;
    9560             case DBGFEVENT_XCPT_BP:                 VBOXVMM_XCPT_BP(pVCpu, pMixedCtx); break;
    9561             case DBGFEVENT_XCPT_OF:                 VBOXVMM_XCPT_OF(pVCpu, pMixedCtx); break;
    9562             case DBGFEVENT_XCPT_BR:                 VBOXVMM_XCPT_BR(pVCpu, pMixedCtx); break;
    9563             case DBGFEVENT_XCPT_UD:                 VBOXVMM_XCPT_UD(pVCpu, pMixedCtx); break;
    9564             case DBGFEVENT_XCPT_NM:                 VBOXVMM_XCPT_NM(pVCpu, pMixedCtx); break;
    9565             case DBGFEVENT_XCPT_DF:                 VBOXVMM_XCPT_DF(pVCpu, pMixedCtx); break;
    9566             case DBGFEVENT_XCPT_TS:                 VBOXVMM_XCPT_TS(pVCpu, pMixedCtx, uEventArg); break;
    9567             case DBGFEVENT_XCPT_NP:                 VBOXVMM_XCPT_NP(pVCpu, pMixedCtx, uEventArg); break;
    9568             case DBGFEVENT_XCPT_SS:                 VBOXVMM_XCPT_SS(pVCpu, pMixedCtx, uEventArg); break;
    9569             case DBGFEVENT_XCPT_GP:                 VBOXVMM_XCPT_GP(pVCpu, pMixedCtx, uEventArg); break;
    9570             case DBGFEVENT_XCPT_PF:                 VBOXVMM_XCPT_PF(pVCpu, pMixedCtx, uEventArg, pMixedCtx->cr2); break;
    9571             case DBGFEVENT_XCPT_MF:                 VBOXVMM_XCPT_MF(pVCpu, pMixedCtx); break;
    9572             case DBGFEVENT_XCPT_AC:                 VBOXVMM_XCPT_AC(pVCpu, pMixedCtx); break;
    9573             case DBGFEVENT_XCPT_XF:                 VBOXVMM_XCPT_XF(pVCpu, pMixedCtx); break;
    9574             case DBGFEVENT_XCPT_VE:                 VBOXVMM_XCPT_VE(pVCpu, pMixedCtx); break;
    9575             case DBGFEVENT_XCPT_SX:                 VBOXVMM_XCPT_SX(pVCpu, pMixedCtx, uEventArg); break;
    9576             case DBGFEVENT_INTERRUPT_SOFTWARE:      VBOXVMM_INT_SOFTWARE(pVCpu, pMixedCtx, (uint8_t)uEventArg); break;
    9577             case DBGFEVENT_INSTR_CPUID:             VBOXVMM_INSTR_CPUID(pVCpu, pMixedCtx, pMixedCtx->eax, pMixedCtx->ecx); break;
    9578             case DBGFEVENT_INSTR_GETSEC:            VBOXVMM_INSTR_GETSEC(pVCpu, pMixedCtx); break;
    9579             case DBGFEVENT_INSTR_HALT:              VBOXVMM_INSTR_HALT(pVCpu, pMixedCtx); break;
    9580             case DBGFEVENT_INSTR_INVD:              VBOXVMM_INSTR_INVD(pVCpu, pMixedCtx); break;
    9581             case DBGFEVENT_INSTR_INVLPG:            VBOXVMM_INSTR_INVLPG(pVCpu, pMixedCtx); break;
    9582             case DBGFEVENT_INSTR_RDPMC:             VBOXVMM_INSTR_RDPMC(pVCpu, pMixedCtx); break;
    9583             case DBGFEVENT_INSTR_RDTSC:             VBOXVMM_INSTR_RDTSC(pVCpu, pMixedCtx); break;
    9584             case DBGFEVENT_INSTR_RSM:               VBOXVMM_INSTR_RSM(pVCpu, pMixedCtx); break;
    9585             case DBGFEVENT_INSTR_CRX_READ:          VBOXVMM_INSTR_CRX_READ(pVCpu, pMixedCtx, (uint8_t)uEventArg); break;
    9586             case DBGFEVENT_INSTR_CRX_WRITE:         VBOXVMM_INSTR_CRX_WRITE(pVCpu, pMixedCtx, (uint8_t)uEventArg); break;
    9587             case DBGFEVENT_INSTR_DRX_READ:          VBOXVMM_INSTR_DRX_READ(pVCpu, pMixedCtx, (uint8_t)uEventArg); break;
    9588             case DBGFEVENT_INSTR_DRX_WRITE:         VBOXVMM_INSTR_DRX_WRITE(pVCpu, pMixedCtx, (uint8_t)uEventArg); break;
    9589             case DBGFEVENT_INSTR_RDMSR:             VBOXVMM_INSTR_RDMSR(pVCpu, pMixedCtx, pMixedCtx->ecx); break;
    9590             case DBGFEVENT_INSTR_WRMSR:             VBOXVMM_INSTR_WRMSR(pVCpu, pMixedCtx, pMixedCtx->ecx,
    9591                                                                         RT_MAKE_U64(pMixedCtx->eax, pMixedCtx->edx)); break;
    9592             case DBGFEVENT_INSTR_MWAIT:             VBOXVMM_INSTR_MWAIT(pVCpu, pMixedCtx); break;
    9593             case DBGFEVENT_INSTR_MONITOR:           VBOXVMM_INSTR_MONITOR(pVCpu, pMixedCtx); break;
    9594             case DBGFEVENT_INSTR_PAUSE:             VBOXVMM_INSTR_PAUSE(pVCpu, pMixedCtx); break;
    9595             case DBGFEVENT_INSTR_SGDT:              VBOXVMM_INSTR_SGDT(pVCpu, pMixedCtx); break;
    9596             case DBGFEVENT_INSTR_SIDT:              VBOXVMM_INSTR_SIDT(pVCpu, pMixedCtx); break;
    9597             case DBGFEVENT_INSTR_LGDT:              VBOXVMM_INSTR_LGDT(pVCpu, pMixedCtx); break;
    9598             case DBGFEVENT_INSTR_LIDT:              VBOXVMM_INSTR_LIDT(pVCpu, pMixedCtx); break;
    9599             case DBGFEVENT_INSTR_SLDT:              VBOXVMM_INSTR_SLDT(pVCpu, pMixedCtx); break;
    9600             case DBGFEVENT_INSTR_STR:               VBOXVMM_INSTR_STR(pVCpu, pMixedCtx); break;
    9601             case DBGFEVENT_INSTR_LLDT:              VBOXVMM_INSTR_LLDT(pVCpu, pMixedCtx); break;
    9602             case DBGFEVENT_INSTR_LTR:               VBOXVMM_INSTR_LTR(pVCpu, pMixedCtx); break;
    9603             case DBGFEVENT_INSTR_RDTSCP:            VBOXVMM_INSTR_RDTSCP(pVCpu, pMixedCtx); break;
    9604             case DBGFEVENT_INSTR_WBINVD:            VBOXVMM_INSTR_WBINVD(pVCpu, pMixedCtx); break;
    9605             case DBGFEVENT_INSTR_XSETBV:            VBOXVMM_INSTR_XSETBV(pVCpu, pMixedCtx); break;
    9606             case DBGFEVENT_INSTR_RDRAND:            VBOXVMM_INSTR_RDRAND(pVCpu, pMixedCtx); break;
    9607             case DBGFEVENT_INSTR_RDSEED:            VBOXVMM_INSTR_RDSEED(pVCpu, pMixedCtx); break;
    9608             case DBGFEVENT_INSTR_XSAVES:            VBOXVMM_INSTR_XSAVES(pVCpu, pMixedCtx); break;
    9609             case DBGFEVENT_INSTR_XRSTORS:           VBOXVMM_INSTR_XRSTORS(pVCpu, pMixedCtx); break;
    9610             case DBGFEVENT_INSTR_VMM_CALL:          VBOXVMM_INSTR_VMM_CALL(pVCpu, pMixedCtx); break;
    9611             case DBGFEVENT_INSTR_VMX_VMCLEAR:       VBOXVMM_INSTR_VMX_VMCLEAR(pVCpu, pMixedCtx); break;
    9612             case DBGFEVENT_INSTR_VMX_VMLAUNCH:      VBOXVMM_INSTR_VMX_VMLAUNCH(pVCpu, pMixedCtx); break;
    9613             case DBGFEVENT_INSTR_VMX_VMPTRLD:       VBOXVMM_INSTR_VMX_VMPTRLD(pVCpu, pMixedCtx); break;
    9614             case DBGFEVENT_INSTR_VMX_VMPTRST:       VBOXVMM_INSTR_VMX_VMPTRST(pVCpu, pMixedCtx); break;
    9615             case DBGFEVENT_INSTR_VMX_VMREAD:        VBOXVMM_INSTR_VMX_VMREAD(pVCpu, pMixedCtx); break;
    9616             case DBGFEVENT_INSTR_VMX_VMRESUME:      VBOXVMM_INSTR_VMX_VMRESUME(pVCpu, pMixedCtx); break;
    9617             case DBGFEVENT_INSTR_VMX_VMWRITE:       VBOXVMM_INSTR_VMX_VMWRITE(pVCpu, pMixedCtx); break;
    9618             case DBGFEVENT_INSTR_VMX_VMXOFF:        VBOXVMM_INSTR_VMX_VMXOFF(pVCpu, pMixedCtx); break;
    9619             case DBGFEVENT_INSTR_VMX_VMXON:         VBOXVMM_INSTR_VMX_VMXON(pVCpu, pMixedCtx); break;
    9620             case DBGFEVENT_INSTR_VMX_INVEPT:        VBOXVMM_INSTR_VMX_INVEPT(pVCpu, pMixedCtx); break;
    9621             case DBGFEVENT_INSTR_VMX_INVVPID:       VBOXVMM_INSTR_VMX_INVVPID(pVCpu, pMixedCtx); break;
    9622             case DBGFEVENT_INSTR_VMX_INVPCID:       VBOXVMM_INSTR_VMX_INVPCID(pVCpu, pMixedCtx); break;
    9623             case DBGFEVENT_INSTR_VMX_VMFUNC:        VBOXVMM_INSTR_VMX_VMFUNC(pVCpu, pMixedCtx); break;
     9421            case DBGFEVENT_XCPT_DE:                 VBOXVMM_XCPT_DE(pVCpu, pCtx); break;
     9422            case DBGFEVENT_XCPT_DB:                 VBOXVMM_XCPT_DB(pVCpu, pCtx, pCtx->dr[6]); break;
     9423            case DBGFEVENT_XCPT_BP:                 VBOXVMM_XCPT_BP(pVCpu, pCtx); break;
     9424            case DBGFEVENT_XCPT_OF:                 VBOXVMM_XCPT_OF(pVCpu, pCtx); break;
     9425            case DBGFEVENT_XCPT_BR:                 VBOXVMM_XCPT_BR(pVCpu, pCtx); break;
     9426            case DBGFEVENT_XCPT_UD:                 VBOXVMM_XCPT_UD(pVCpu, pCtx); break;
     9427            case DBGFEVENT_XCPT_NM:                 VBOXVMM_XCPT_NM(pVCpu, pCtx); break;
     9428            case DBGFEVENT_XCPT_DF:                 VBOXVMM_XCPT_DF(pVCpu, pCtx); break;
     9429            case DBGFEVENT_XCPT_TS:                 VBOXVMM_XCPT_TS(pVCpu, pCtx, uEventArg); break;
     9430            case DBGFEVENT_XCPT_NP:                 VBOXVMM_XCPT_NP(pVCpu, pCtx, uEventArg); break;
     9431            case DBGFEVENT_XCPT_SS:                 VBOXVMM_XCPT_SS(pVCpu, pCtx, uEventArg); break;
     9432            case DBGFEVENT_XCPT_GP:                 VBOXVMM_XCPT_GP(pVCpu, pCtx, uEventArg); break;
     9433            case DBGFEVENT_XCPT_PF:                 VBOXVMM_XCPT_PF(pVCpu, pCtx, uEventArg, pCtx->cr2); break;
     9434            case DBGFEVENT_XCPT_MF:                 VBOXVMM_XCPT_MF(pVCpu, pCtx); break;
     9435            case DBGFEVENT_XCPT_AC:                 VBOXVMM_XCPT_AC(pVCpu, pCtx); break;
     9436            case DBGFEVENT_XCPT_XF:                 VBOXVMM_XCPT_XF(pVCpu, pCtx); break;
     9437            case DBGFEVENT_XCPT_VE:                 VBOXVMM_XCPT_VE(pVCpu, pCtx); break;
     9438            case DBGFEVENT_XCPT_SX:                 VBOXVMM_XCPT_SX(pVCpu, pCtx, uEventArg); break;
     9439            case DBGFEVENT_INTERRUPT_SOFTWARE:      VBOXVMM_INT_SOFTWARE(pVCpu, pCtx, (uint8_t)uEventArg); break;
     9440            case DBGFEVENT_INSTR_CPUID:             VBOXVMM_INSTR_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
     9441            case DBGFEVENT_INSTR_GETSEC:            VBOXVMM_INSTR_GETSEC(pVCpu, pCtx); break;
     9442            case DBGFEVENT_INSTR_HALT:              VBOXVMM_INSTR_HALT(pVCpu, pCtx); break;
     9443            case DBGFEVENT_INSTR_INVD:              VBOXVMM_INSTR_INVD(pVCpu, pCtx); break;
     9444            case DBGFEVENT_INSTR_INVLPG:            VBOXVMM_INSTR_INVLPG(pVCpu, pCtx); break;
     9445            case DBGFEVENT_INSTR_RDPMC:             VBOXVMM_INSTR_RDPMC(pVCpu, pCtx); break;
     9446            case DBGFEVENT_INSTR_RDTSC:             VBOXVMM_INSTR_RDTSC(pVCpu, pCtx); break;
     9447            case DBGFEVENT_INSTR_RSM:               VBOXVMM_INSTR_RSM(pVCpu, pCtx); break;
     9448            case DBGFEVENT_INSTR_CRX_READ:          VBOXVMM_INSTR_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
     9449            case DBGFEVENT_INSTR_CRX_WRITE:         VBOXVMM_INSTR_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
     9450            case DBGFEVENT_INSTR_DRX_READ:          VBOXVMM_INSTR_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
     9451            case DBGFEVENT_INSTR_DRX_WRITE:         VBOXVMM_INSTR_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
     9452            case DBGFEVENT_INSTR_RDMSR:             VBOXVMM_INSTR_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
     9453            case DBGFEVENT_INSTR_WRMSR:             VBOXVMM_INSTR_WRMSR(pVCpu, pCtx, pCtx->ecx,
     9454                                                                        RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
     9455            case DBGFEVENT_INSTR_MWAIT:             VBOXVMM_INSTR_MWAIT(pVCpu, pCtx); break;
     9456            case DBGFEVENT_INSTR_MONITOR:           VBOXVMM_INSTR_MONITOR(pVCpu, pCtx); break;
     9457            case DBGFEVENT_INSTR_PAUSE:             VBOXVMM_INSTR_PAUSE(pVCpu, pCtx); break;
     9458            case DBGFEVENT_INSTR_SGDT:              VBOXVMM_INSTR_SGDT(pVCpu, pCtx); break;
     9459            case DBGFEVENT_INSTR_SIDT:              VBOXVMM_INSTR_SIDT(pVCpu, pCtx); break;
     9460            case DBGFEVENT_INSTR_LGDT:              VBOXVMM_INSTR_LGDT(pVCpu, pCtx); break;
     9461            case DBGFEVENT_INSTR_LIDT:              VBOXVMM_INSTR_LIDT(pVCpu, pCtx); break;
     9462            case DBGFEVENT_INSTR_SLDT:              VBOXVMM_INSTR_SLDT(pVCpu, pCtx); break;
     9463            case DBGFEVENT_INSTR_STR:               VBOXVMM_INSTR_STR(pVCpu, pCtx); break;
     9464            case DBGFEVENT_INSTR_LLDT:              VBOXVMM_INSTR_LLDT(pVCpu, pCtx); break;
     9465            case DBGFEVENT_INSTR_LTR:               VBOXVMM_INSTR_LTR(pVCpu, pCtx); break;
     9466            case DBGFEVENT_INSTR_RDTSCP:            VBOXVMM_INSTR_RDTSCP(pVCpu, pCtx); break;
     9467            case DBGFEVENT_INSTR_WBINVD:            VBOXVMM_INSTR_WBINVD(pVCpu, pCtx); break;
     9468            case DBGFEVENT_INSTR_XSETBV:            VBOXVMM_INSTR_XSETBV(pVCpu, pCtx); break;
     9469            case DBGFEVENT_INSTR_RDRAND:            VBOXVMM_INSTR_RDRAND(pVCpu, pCtx); break;
     9470            case DBGFEVENT_INSTR_RDSEED:            VBOXVMM_INSTR_RDSEED(pVCpu, pCtx); break;
     9471            case DBGFEVENT_INSTR_XSAVES:            VBOXVMM_INSTR_XSAVES(pVCpu, pCtx); break;
     9472            case DBGFEVENT_INSTR_XRSTORS:           VBOXVMM_INSTR_XRSTORS(pVCpu, pCtx); break;
     9473            case DBGFEVENT_INSTR_VMM_CALL:          VBOXVMM_INSTR_VMM_CALL(pVCpu, pCtx); break;
     9474            case DBGFEVENT_INSTR_VMX_VMCLEAR:       VBOXVMM_INSTR_VMX_VMCLEAR(pVCpu, pCtx); break;
     9475            case DBGFEVENT_INSTR_VMX_VMLAUNCH:      VBOXVMM_INSTR_VMX_VMLAUNCH(pVCpu, pCtx); break;
     9476            case DBGFEVENT_INSTR_VMX_VMPTRLD:       VBOXVMM_INSTR_VMX_VMPTRLD(pVCpu, pCtx); break;
     9477            case DBGFEVENT_INSTR_VMX_VMPTRST:       VBOXVMM_INSTR_VMX_VMPTRST(pVCpu, pCtx); break;
     9478            case DBGFEVENT_INSTR_VMX_VMREAD:        VBOXVMM_INSTR_VMX_VMREAD(pVCpu, pCtx); break;
     9479            case DBGFEVENT_INSTR_VMX_VMRESUME:      VBOXVMM_INSTR_VMX_VMRESUME(pVCpu, pCtx); break;
     9480            case DBGFEVENT_INSTR_VMX_VMWRITE:       VBOXVMM_INSTR_VMX_VMWRITE(pVCpu, pCtx); break;
     9481            case DBGFEVENT_INSTR_VMX_VMXOFF:        VBOXVMM_INSTR_VMX_VMXOFF(pVCpu, pCtx); break;
     9482            case DBGFEVENT_INSTR_VMX_VMXON:         VBOXVMM_INSTR_VMX_VMXON(pVCpu, pCtx); break;
     9483            case DBGFEVENT_INSTR_VMX_INVEPT:        VBOXVMM_INSTR_VMX_INVEPT(pVCpu, pCtx); break;
     9484            case DBGFEVENT_INSTR_VMX_INVVPID:       VBOXVMM_INSTR_VMX_INVVPID(pVCpu, pCtx); break;
     9485            case DBGFEVENT_INSTR_VMX_INVPCID:       VBOXVMM_INSTR_VMX_INVPCID(pVCpu, pCtx); break;
     9486            case DBGFEVENT_INSTR_VMX_VMFUNC:        VBOXVMM_INSTR_VMX_VMFUNC(pVCpu, pCtx); break;
    96249487            default: AssertMsgFailed(("enmEvent1=%d uExitReason=%d\n", enmEvent1, uExitReason)); break;
    96259488        }
     
    96289491            /** @todo consider which extra parameters would be helpful for each probe. */
    96299492            case DBGFEVENT_END: break;
    9630             case DBGFEVENT_EXIT_TASK_SWITCH:        VBOXVMM_EXIT_TASK_SWITCH(pVCpu, pMixedCtx); break;
    9631             case DBGFEVENT_EXIT_CPUID:              VBOXVMM_EXIT_CPUID(pVCpu, pMixedCtx, pMixedCtx->eax, pMixedCtx->ecx); break;
    9632             case DBGFEVENT_EXIT_GETSEC:             VBOXVMM_EXIT_GETSEC(pVCpu, pMixedCtx); break;
    9633             case DBGFEVENT_EXIT_HALT:               VBOXVMM_EXIT_HALT(pVCpu, pMixedCtx); break;
    9634             case DBGFEVENT_EXIT_INVD:               VBOXVMM_EXIT_INVD(pVCpu, pMixedCtx); break;
    9635             case DBGFEVENT_EXIT_INVLPG:             VBOXVMM_EXIT_INVLPG(pVCpu, pMixedCtx); break;
    9636             case DBGFEVENT_EXIT_RDPMC:              VBOXVMM_EXIT_RDPMC(pVCpu, pMixedCtx); break;
    9637             case DBGFEVENT_EXIT_RDTSC:              VBOXVMM_EXIT_RDTSC(pVCpu, pMixedCtx); break;
    9638             case DBGFEVENT_EXIT_RSM:                VBOXVMM_EXIT_RSM(pVCpu, pMixedCtx); break;
    9639             case DBGFEVENT_EXIT_CRX_READ:           VBOXVMM_EXIT_CRX_READ(pVCpu, pMixedCtx, (uint8_t)uEventArg); break;
    9640             case DBGFEVENT_EXIT_CRX_WRITE:          VBOXVMM_EXIT_CRX_WRITE(pVCpu, pMixedCtx, (uint8_t)uEventArg); break;
    9641             case DBGFEVENT_EXIT_DRX_READ:           VBOXVMM_EXIT_DRX_READ(pVCpu, pMixedCtx, (uint8_t)uEventArg); break;
    9642             case DBGFEVENT_EXIT_DRX_WRITE:          VBOXVMM_EXIT_DRX_WRITE(pVCpu, pMixedCtx, (uint8_t)uEventArg); break;
    9643             case DBGFEVENT_EXIT_RDMSR:              VBOXVMM_EXIT_RDMSR(pVCpu, pMixedCtx, pMixedCtx->ecx); break;
    9644             case DBGFEVENT_EXIT_WRMSR:              VBOXVMM_EXIT_WRMSR(pVCpu, pMixedCtx, pMixedCtx->ecx,
    9645                                                                        RT_MAKE_U64(pMixedCtx->eax, pMixedCtx->edx)); break;
    9646             case DBGFEVENT_EXIT_MWAIT:              VBOXVMM_EXIT_MWAIT(pVCpu, pMixedCtx); break;
    9647             case DBGFEVENT_EXIT_MONITOR:            VBOXVMM_EXIT_MONITOR(pVCpu, pMixedCtx); break;
    9648             case DBGFEVENT_EXIT_PAUSE:              VBOXVMM_EXIT_PAUSE(pVCpu, pMixedCtx); break;
    9649             case DBGFEVENT_EXIT_SGDT:               VBOXVMM_EXIT_SGDT(pVCpu, pMixedCtx); break;
    9650             case DBGFEVENT_EXIT_SIDT:               VBOXVMM_EXIT_SIDT(pVCpu, pMixedCtx); break;
    9651             case DBGFEVENT_EXIT_LGDT:               VBOXVMM_EXIT_LGDT(pVCpu, pMixedCtx); break;
    9652             case DBGFEVENT_EXIT_LIDT:               VBOXVMM_EXIT_LIDT(pVCpu, pMixedCtx); break;
    9653             case DBGFEVENT_EXIT_SLDT:               VBOXVMM_EXIT_SLDT(pVCpu, pMixedCtx); break;
    9654             case DBGFEVENT_EXIT_STR:                VBOXVMM_EXIT_STR(pVCpu, pMixedCtx); break;
    9655             case DBGFEVENT_EXIT_LLDT:               VBOXVMM_EXIT_LLDT(pVCpu, pMixedCtx); break;
    9656             case DBGFEVENT_EXIT_LTR:                VBOXVMM_EXIT_LTR(pVCpu, pMixedCtx); break;
    9657             case DBGFEVENT_EXIT_RDTSCP:             VBOXVMM_EXIT_RDTSCP(pVCpu, pMixedCtx); break;
    9658             case DBGFEVENT_EXIT_WBINVD:             VBOXVMM_EXIT_WBINVD(pVCpu, pMixedCtx); break;
    9659             case DBGFEVENT_EXIT_XSETBV:             VBOXVMM_EXIT_XSETBV(pVCpu, pMixedCtx); break;
    9660             case DBGFEVENT_EXIT_RDRAND:             VBOXVMM_EXIT_RDRAND(pVCpu, pMixedCtx); break;
    9661             case DBGFEVENT_EXIT_RDSEED:             VBOXVMM_EXIT_RDSEED(pVCpu, pMixedCtx); break;
    9662             case DBGFEVENT_EXIT_XSAVES:             VBOXVMM_EXIT_XSAVES(pVCpu, pMixedCtx); break;
    9663             case DBGFEVENT_EXIT_XRSTORS:            VBOXVMM_EXIT_XRSTORS(pVCpu, pMixedCtx); break;
    9664             case DBGFEVENT_EXIT_VMM_CALL:           VBOXVMM_EXIT_VMM_CALL(pVCpu, pMixedCtx); break;
    9665             case DBGFEVENT_EXIT_VMX_VMCLEAR:        VBOXVMM_EXIT_VMX_VMCLEAR(pVCpu, pMixedCtx); break;
    9666             case DBGFEVENT_EXIT_VMX_VMLAUNCH:       VBOXVMM_EXIT_VMX_VMLAUNCH(pVCpu, pMixedCtx); break;
    9667             case DBGFEVENT_EXIT_VMX_VMPTRLD:        VBOXVMM_EXIT_VMX_VMPTRLD(pVCpu, pMixedCtx); break;
    9668             case DBGFEVENT_EXIT_VMX_VMPTRST:        VBOXVMM_EXIT_VMX_VMPTRST(pVCpu, pMixedCtx); break;
    9669             case DBGFEVENT_EXIT_VMX_VMREAD:         VBOXVMM_EXIT_VMX_VMREAD(pVCpu, pMixedCtx); break;
    9670             case DBGFEVENT_EXIT_VMX_VMRESUME:       VBOXVMM_EXIT_VMX_VMRESUME(pVCpu, pMixedCtx); break;
    9671             case DBGFEVENT_EXIT_VMX_VMWRITE:        VBOXVMM_EXIT_VMX_VMWRITE(pVCpu, pMixedCtx); break;
    9672             case DBGFEVENT_EXIT_VMX_VMXOFF:         VBOXVMM_EXIT_VMX_VMXOFF(pVCpu, pMixedCtx); break;
    9673             case DBGFEVENT_EXIT_VMX_VMXON:          VBOXVMM_EXIT_VMX_VMXON(pVCpu, pMixedCtx); break;
    9674             case DBGFEVENT_EXIT_VMX_INVEPT:         VBOXVMM_EXIT_VMX_INVEPT(pVCpu, pMixedCtx); break;
    9675             case DBGFEVENT_EXIT_VMX_INVVPID:        VBOXVMM_EXIT_VMX_INVVPID(pVCpu, pMixedCtx); break;
    9676             case DBGFEVENT_EXIT_VMX_INVPCID:        VBOXVMM_EXIT_VMX_INVPCID(pVCpu, pMixedCtx); break;
    9677             case DBGFEVENT_EXIT_VMX_VMFUNC:         VBOXVMM_EXIT_VMX_VMFUNC(pVCpu, pMixedCtx); break;
    9678             case DBGFEVENT_EXIT_VMX_EPT_MISCONFIG:  VBOXVMM_EXIT_VMX_EPT_MISCONFIG(pVCpu, pMixedCtx); break;
    9679             case DBGFEVENT_EXIT_VMX_EPT_VIOLATION:  VBOXVMM_EXIT_VMX_EPT_VIOLATION(pVCpu, pMixedCtx); break;
    9680             case DBGFEVENT_EXIT_VMX_VAPIC_ACCESS:   VBOXVMM_EXIT_VMX_VAPIC_ACCESS(pVCpu, pMixedCtx); break;
    9681             case DBGFEVENT_EXIT_VMX_VAPIC_WRITE:    VBOXVMM_EXIT_VMX_VAPIC_WRITE(pVCpu, pMixedCtx); break;
     9493            case DBGFEVENT_EXIT_TASK_SWITCH:        VBOXVMM_EXIT_TASK_SWITCH(pVCpu, pCtx); break;
     9494            case DBGFEVENT_EXIT_CPUID:              VBOXVMM_EXIT_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
     9495            case DBGFEVENT_EXIT_GETSEC:             VBOXVMM_EXIT_GETSEC(pVCpu, pCtx); break;
     9496            case DBGFEVENT_EXIT_HALT:               VBOXVMM_EXIT_HALT(pVCpu, pCtx); break;
     9497            case DBGFEVENT_EXIT_INVD:               VBOXVMM_EXIT_INVD(pVCpu, pCtx); break;
     9498            case DBGFEVENT_EXIT_INVLPG:             VBOXVMM_EXIT_INVLPG(pVCpu, pCtx); break;
     9499            case DBGFEVENT_EXIT_RDPMC:              VBOXVMM_EXIT_RDPMC(pVCpu, pCtx); break;
     9500            case DBGFEVENT_EXIT_RDTSC:              VBOXVMM_EXIT_RDTSC(pVCpu, pCtx); break;
     9501            case DBGFEVENT_EXIT_RSM:                VBOXVMM_EXIT_RSM(pVCpu, pCtx); break;
     9502            case DBGFEVENT_EXIT_CRX_READ:           VBOXVMM_EXIT_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
     9503            case DBGFEVENT_EXIT_CRX_WRITE:          VBOXVMM_EXIT_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
     9504            case DBGFEVENT_EXIT_DRX_READ:           VBOXVMM_EXIT_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
     9505            case DBGFEVENT_EXIT_DRX_WRITE:          VBOXVMM_EXIT_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
     9506            case DBGFEVENT_EXIT_RDMSR:              VBOXVMM_EXIT_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
     9507            case DBGFEVENT_EXIT_WRMSR:              VBOXVMM_EXIT_WRMSR(pVCpu, pCtx, pCtx->ecx,
     9508                                                                       RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
     9509            case DBGFEVENT_EXIT_MWAIT:              VBOXVMM_EXIT_MWAIT(pVCpu, pCtx); break;
     9510            case DBGFEVENT_EXIT_MONITOR:            VBOXVMM_EXIT_MONITOR(pVCpu, pCtx); break;
     9511            case DBGFEVENT_EXIT_PAUSE:              VBOXVMM_EXIT_PAUSE(pVCpu, pCtx); break;
     9512            case DBGFEVENT_EXIT_SGDT:               VBOXVMM_EXIT_SGDT(pVCpu, pCtx); break;
     9513            case DBGFEVENT_EXIT_SIDT:               VBOXVMM_EXIT_SIDT(pVCpu, pCtx); break;
     9514            case DBGFEVENT_EXIT_LGDT:               VBOXVMM_EXIT_LGDT(pVCpu, pCtx); break;
     9515            case DBGFEVENT_EXIT_LIDT:               VBOXVMM_EXIT_LIDT(pVCpu, pCtx); break;
     9516            case DBGFEVENT_EXIT_SLDT:               VBOXVMM_EXIT_SLDT(pVCpu, pCtx); break;
     9517            case DBGFEVENT_EXIT_STR:                VBOXVMM_EXIT_STR(pVCpu, pCtx); break;
     9518            case DBGFEVENT_EXIT_LLDT:               VBOXVMM_EXIT_LLDT(pVCpu, pCtx); break;
     9519            case DBGFEVENT_EXIT_LTR:                VBOXVMM_EXIT_LTR(pVCpu, pCtx); break;
     9520            case DBGFEVENT_EXIT_RDTSCP:             VBOXVMM_EXIT_RDTSCP(pVCpu, pCtx); break;
     9521            case DBGFEVENT_EXIT_WBINVD:             VBOXVMM_EXIT_WBINVD(pVCpu, pCtx); break;
     9522            case DBGFEVENT_EXIT_XSETBV:             VBOXVMM_EXIT_XSETBV(pVCpu, pCtx); break;
     9523            case DBGFEVENT_EXIT_RDRAND:             VBOXVMM_EXIT_RDRAND(pVCpu, pCtx); break;
     9524            case DBGFEVENT_EXIT_RDSEED:             VBOXVMM_EXIT_RDSEED(pVCpu, pCtx); break;
     9525            case DBGFEVENT_EXIT_XSAVES:             VBOXVMM_EXIT_XSAVES(pVCpu, pCtx); break;
     9526            case DBGFEVENT_EXIT_XRSTORS:            VBOXVMM_EXIT_XRSTORS(pVCpu, pCtx); break;
     9527            case DBGFEVENT_EXIT_VMM_CALL:           VBOXVMM_EXIT_VMM_CALL(pVCpu, pCtx); break;
     9528            case DBGFEVENT_EXIT_VMX_VMCLEAR:        VBOXVMM_EXIT_VMX_VMCLEAR(pVCpu, pCtx); break;
     9529            case DBGFEVENT_EXIT_VMX_VMLAUNCH:       VBOXVMM_EXIT_VMX_VMLAUNCH(pVCpu, pCtx); break;
     9530            case DBGFEVENT_EXIT_VMX_VMPTRLD:        VBOXVMM_EXIT_VMX_VMPTRLD(pVCpu, pCtx); break;
     9531            case DBGFEVENT_EXIT_VMX_VMPTRST:        VBOXVMM_EXIT_VMX_VMPTRST(pVCpu, pCtx); break;
     9532            case DBGFEVENT_EXIT_VMX_VMREAD:         VBOXVMM_EXIT_VMX_VMREAD(pVCpu, pCtx); break;
     9533            case DBGFEVENT_EXIT_VMX_VMRESUME:       VBOXVMM_EXIT_VMX_VMRESUME(pVCpu, pCtx); break;
     9534            case DBGFEVENT_EXIT_VMX_VMWRITE:        VBOXVMM_EXIT_VMX_VMWRITE(pVCpu, pCtx); break;
     9535            case DBGFEVENT_EXIT_VMX_VMXOFF:         VBOXVMM_EXIT_VMX_VMXOFF(pVCpu, pCtx); break;
     9536            case DBGFEVENT_EXIT_VMX_VMXON:          VBOXVMM_EXIT_VMX_VMXON(pVCpu, pCtx); break;
     9537            case DBGFEVENT_EXIT_VMX_INVEPT:         VBOXVMM_EXIT_VMX_INVEPT(pVCpu, pCtx); break;
     9538            case DBGFEVENT_EXIT_VMX_INVVPID:        VBOXVMM_EXIT_VMX_INVVPID(pVCpu, pCtx); break;
     9539            case DBGFEVENT_EXIT_VMX_INVPCID:        VBOXVMM_EXIT_VMX_INVPCID(pVCpu, pCtx); break;
     9540            case DBGFEVENT_EXIT_VMX_VMFUNC:         VBOXVMM_EXIT_VMX_VMFUNC(pVCpu, pCtx); break;
     9541            case DBGFEVENT_EXIT_VMX_EPT_MISCONFIG:  VBOXVMM_EXIT_VMX_EPT_MISCONFIG(pVCpu, pCtx); break;
     9542            case DBGFEVENT_EXIT_VMX_EPT_VIOLATION:  VBOXVMM_EXIT_VMX_EPT_VIOLATION(pVCpu, pCtx); break;
     9543            case DBGFEVENT_EXIT_VMX_VAPIC_ACCESS:   VBOXVMM_EXIT_VMX_VAPIC_ACCESS(pVCpu, pCtx); break;
     9544            case DBGFEVENT_EXIT_VMX_VAPIC_WRITE:    VBOXVMM_EXIT_VMX_VAPIC_WRITE(pVCpu, pCtx); break;
    96829545            default: AssertMsgFailed(("enmEvent2=%d uExitReason=%d\n", enmEvent2, uExitReason)); break;
    96839546        }
     
    97219584 * @returns Strict VBox status code (i.e. informational status codes too).
    97229585 * @param   pVCpu           The cross context virtual CPU structure of the calling EMT.
    9723  * @param   pMixedCtx       Pointer to the guest-CPU context. The data may be
    9724  *                          out-of-sync. Make sure to update the required
    9725  *                          fields before using them.
    97269586 * @param   pVmxTransient   Pointer to the VMX-transient structure.
    9727  * @param   uExitReason     The VM-exit reason.
    97289587 * @param   pDbgState       The debug state.
    97299588 */
    9730 DECLINLINE(VBOXSTRICTRC) hmR0VmxRunDebugHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient,
    9731                                                    uint32_t uExitReason, PVMXRUNDBGSTATE pDbgState)
     9589DECLINLINE(VBOXSTRICTRC) hmR0VmxRunDebugHandleExit(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
    97329590{
    97339591    /*
    97349592     * Expensive (saves context) generic dtrace VM-exit probe.
    97359593     */
     9594    uint32_t const uExitReason = pVmxTransient->uExitReason;
    97369595    if (!VBOXVMM_R0_HMVMX_VMEXIT_ENABLED())
    97379596    { /* more likely */ }
     
    97419600        int rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
    97429601        AssertRC(rc);
    9743         VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, pMixedCtx, pVmxTransient->uExitReason, pVmxTransient->uExitQualification);
     9602        VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, &pVCpu->cpum.GstCtx, pVmxTransient->uExitReason, pVmxTransient->uExitQualification);
    97449603    }
    97459604
     
    97559614        uint32_t uIntType = VMX_EXIT_INTERRUPTION_INFO_TYPE(pVmxTransient->uExitIntInfo);
    97569615        if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)
    9757             return hmR0VmxExitXcptOrNmi(pVCpu, pMixedCtx, pVmxTransient);
     9616            return hmR0VmxExitXcptOrNmi(pVCpu, pVmxTransient);
    97589617    }
    97599618
     
    97669625        {
    97679626            case VMX_EXIT_MTF:
    9768                 return hmR0VmxExitMtf(pVCpu, pMixedCtx, pVmxTransient);
     9627                return hmR0VmxExitMtf(pVCpu, pVmxTransient);
    97699628
    97709629            /* Various events: */
     
    98249683                int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
    98259684                AssertRCReturn(rc, rc);
    9826                 if (   pMixedCtx->rip    != pDbgState->uRipStart
    9827                     || pMixedCtx->cs.Sel != pDbgState->uCsStart)
     9685                if (   pVCpu->cpum.GstCtx.rip    != pDbgState->uRipStart
     9686                    || pVCpu->cpum.GstCtx.cs.Sel != pDbgState->uCsStart)
    98289687                    return VINF_EM_DBG_STEPPED;
    98299688                break;
     
    98539712        && ASMBitTest(pDbgState->bmExitsToCheck, uExitReason) )
    98549713    {
    9855         VBOXSTRICTRC rcStrict = hmR0VmxHandleExitDtraceEvents(pVCpu, pMixedCtx, pVmxTransient, uExitReason);
     9714        VBOXSTRICTRC rcStrict = hmR0VmxHandleExitDtraceEvents(pVCpu, pVmxTransient, uExitReason);
    98569715        if (rcStrict != VINF_SUCCESS)
    98579716            return rcStrict;
     
    98629721     */
    98639722#ifdef HMVMX_USE_FUNCTION_TABLE
    9864     return g_apfnVMExitHandlers[uExitReason](pVCpu, pMixedCtx, pVmxTransient);
     9723    return g_apfnVMExitHandlers[uExitReason](pVCpu, pVmxTransient);
    98659724#else
    9866     return hmR0VmxHandleExit(pVCpu, pMixedCtx, pVmxTransient, uExitReason);
     9725    return hmR0VmxHandleExit(pVCpu, pVmxTransient, uExitReason);
    98679726#endif
    98689727}
     
    98749733 * @returns Strict VBox status code (i.e. informational status codes too).
    98759734 * @param   pVCpu       The cross context virtual CPU structure.
    9876  * @param   pCtx        Pointer to the guest-CPU context.
    98779735 *
    98789736 * @note    Mostly the same as hmR0VmxRunGuestCodeNormal().
    98799737 */
    9880 static VBOXSTRICTRC hmR0VmxRunGuestCodeDebug(PVMCPU pVCpu, PCPUMCTX pCtx)
     9738static VBOXSTRICTRC hmR0VmxRunGuestCodeDebug(PVMCPU pVCpu)
    98819739{
    98829740    VMXTRANSIENT VmxTransient;
     
    98919749    /* State we keep to help modify and later restore the VMCS fields we alter, and for detecting steps.  */
    98929750    VMXRUNDBGSTATE DbgState;
    9893     hmR0VmxRunDebugStateInit(pVCpu, pCtx, &DbgState);
     9751    hmR0VmxRunDebugStateInit(pVCpu, &DbgState);
    98949752    hmR0VmxPreRunGuestDebugStateUpdate(pVCpu, &DbgState, &VmxTransient);
    98959753
     
    99109768        STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
    99119769        hmR0VmxPreRunGuestDebugStateApply(pVCpu, &DbgState); /* Set up execute controls the next to can respond to. */
    9912         rcStrict = hmR0VmxPreRunGuest(pVCpu, pCtx, &VmxTransient, fStepping);
     9770        rcStrict = hmR0VmxPreRunGuest(pVCpu, &VmxTransient, fStepping);
    99139771        if (rcStrict != VINF_SUCCESS)
    99149772            break;
    99159773
    9916         hmR0VmxPreRunGuestCommitted(pVCpu, pCtx, &VmxTransient);
     9774        hmR0VmxPreRunGuestCommitted(pVCpu, &VmxTransient);
    99179775        hmR0VmxPreRunGuestDebugStateApply(pVCpu, &DbgState); /* Override any obnoxious code in the above two calls. */
    99189776
     
    99209778         * Now we can run the guest code.
    99219779         */
    9922         int rcRun = hmR0VmxRunGuest(pVCpu, pCtx);
    9923 
    9924         /* The guest-CPU context is now outdated, 'pCtx' is to be treated as 'pMixedCtx' from this point on!!! */
     9780        int rcRun = hmR0VmxRunGuest(pVCpu);
    99259781
    99269782        /*
     
    99369792        {
    99379793            STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatPreExit, x);
    9938             hmR0VmxReportWorldSwitchError(pVCpu, rcRun, pCtx, &VmxTransient);
     9794            hmR0VmxReportWorldSwitchError(pVCpu, rcRun, &VmxTransient);
    99399795            return rcRun;
    99409796        }
     
    99479803        HMVMX_START_EXIT_DISPATCH_PROF();
    99489804
    9949         VBOXVMM_R0_HMVMX_VMEXIT_NOCTX(pVCpu, pCtx, VmxTransient.uExitReason);
     9805        VBOXVMM_R0_HMVMX_VMEXIT_NOCTX(pVCpu, &pVCpu->cpum.GstCtx, VmxTransient.uExitReason);
    99509806
    99519807        /*
    99529808         * Handle the VM-exit - we quit earlier on certain VM-exits, see hmR0VmxHandleExitDebug().
    99539809         */
    9954         rcStrict = hmR0VmxRunDebugHandleExit(pVCpu, pCtx, &VmxTransient, VmxTransient.uExitReason, &DbgState);
     9810        rcStrict = hmR0VmxRunDebugHandleExit(pVCpu, &VmxTransient, &DbgState);
    99559811        STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitHandling, x);
    99569812        if (rcStrict != VINF_SUCCESS)
     
    99719827            int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
    99729828            AssertRC(rc);
    9973             if (   pCtx->rip    != DbgState.uRipStart
    9974                 || pCtx->cs.Sel != DbgState.uCsStart)
     9829            if (   pVCpu->cpum.GstCtx.rip    != DbgState.uRipStart
     9830                || pVCpu->cpum.GstCtx.cs.Sel != DbgState.uCsStart)
    99759831            {
    99769832                rcStrict = VINF_EM_DBG_STEPPED;
     
    99959851        AssertRC(rc);
    99969852        pVCpu->hm.s.fClearTrapFlag = false;
    9997         pCtx->eflags.Bits.u1TF = 0;
     9853        pVCpu->cpum.GstCtx.eflags.Bits.u1TF = 0;
    99989854    }
    99999855    /** @todo there seems to be issues with the resume flag when the monitor trap
     
    1017710033        && !DBGFIsStepping(pVCpu)
    1017810034        && !pVCpu->CTX_SUFF(pVM)->dbgf.ro.cEnabledInt3Breakpoints)
    10179         rcStrict = hmR0VmxRunGuestCodeNormal(pVCpu, pCtx);
     10035        rcStrict = hmR0VmxRunGuestCodeNormal(pVCpu);
    1018010036    else
    10181         rcStrict = hmR0VmxRunGuestCodeDebug(pVCpu, pCtx);
     10037        rcStrict = hmR0VmxRunGuestCodeDebug(pVCpu);
    1018210038
    1018310039    if (rcStrict == VERR_EM_INTERPRETER)
     
    1018610042        rcStrict = VINF_EM_TRIPLE_FAULT;
    1018710043
    10188     int rc2 = hmR0VmxExitToRing3(pVCpu, pCtx, rcStrict);
     10044    int rc2 = hmR0VmxExitToRing3(pVCpu, rcStrict);
    1018910045    if (RT_FAILURE(rc2))
    1019010046    {
     
    1019910055
    1020010056#ifndef HMVMX_USE_FUNCTION_TABLE
    10201 DECLINLINE(VBOXSTRICTRC) hmR0VmxHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, uint32_t rcReason)
     10057DECLINLINE(VBOXSTRICTRC) hmR0VmxHandleExit(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t rcReason)
    1020210058{
    1020310059#ifdef DEBUG_ramshankar
     
    1021610072    switch (rcReason)
    1021710073    {
    10218         case VMX_EXIT_EPT_MISCONFIG:           VMEXIT_CALL_RET(0, hmR0VmxExitEptMisconfig(pVCpu, pMixedCtx, pVmxTransient));
    10219         case VMX_EXIT_EPT_VIOLATION:           VMEXIT_CALL_RET(0, hmR0VmxExitEptViolation(pVCpu, pMixedCtx, pVmxTransient));
    10220         case VMX_EXIT_IO_INSTR:                VMEXIT_CALL_RET(0, hmR0VmxExitIoInstr(pVCpu, pMixedCtx, pVmxTransient));
    10221         case VMX_EXIT_CPUID:                   VMEXIT_CALL_RET(0, hmR0VmxExitCpuid(pVCpu, pMixedCtx, pVmxTransient));
    10222         case VMX_EXIT_RDTSC:                   VMEXIT_CALL_RET(0, hmR0VmxExitRdtsc(pVCpu, pMixedCtx, pVmxTransient));
    10223         case VMX_EXIT_RDTSCP:                  VMEXIT_CALL_RET(0, hmR0VmxExitRdtscp(pVCpu, pMixedCtx, pVmxTransient));
    10224         case VMX_EXIT_APIC_ACCESS:             VMEXIT_CALL_RET(0, hmR0VmxExitApicAccess(pVCpu, pMixedCtx, pVmxTransient));
    10225         case VMX_EXIT_XCPT_OR_NMI:             VMEXIT_CALL_RET(0, hmR0VmxExitXcptOrNmi(pVCpu, pMixedCtx, pVmxTransient));
    10226         case VMX_EXIT_MOV_CRX:                 VMEXIT_CALL_RET(0, hmR0VmxExitMovCRx(pVCpu, pMixedCtx, pVmxTransient));
    10227         case VMX_EXIT_EXT_INT:                 VMEXIT_CALL_RET(0, hmR0VmxExitExtInt(pVCpu, pMixedCtx, pVmxTransient));
    10228         case VMX_EXIT_INT_WINDOW:              VMEXIT_CALL_RET(0, hmR0VmxExitIntWindow(pVCpu, pMixedCtx, pVmxTransient));
    10229         case VMX_EXIT_TPR_BELOW_THRESHOLD:     VMEXIT_CALL_RET(0, hmR0VmxExitTprBelowThreshold(pVCpu, pMixedCtx, pVmxTransient));
    10230         case VMX_EXIT_MWAIT:                   VMEXIT_CALL_RET(0, hmR0VmxExitMwait(pVCpu, pMixedCtx, pVmxTransient));
    10231         case VMX_EXIT_MONITOR:                 VMEXIT_CALL_RET(0, hmR0VmxExitMonitor(pVCpu, pMixedCtx, pVmxTransient));
    10232         case VMX_EXIT_TASK_SWITCH:             VMEXIT_CALL_RET(0, hmR0VmxExitTaskSwitch(pVCpu, pMixedCtx, pVmxTransient));
    10233         case VMX_EXIT_PREEMPT_TIMER:           VMEXIT_CALL_RET(0, hmR0VmxExitPreemptTimer(pVCpu, pMixedCtx, pVmxTransient));
    10234         case VMX_EXIT_RDMSR:                   VMEXIT_CALL_RET(0, hmR0VmxExitRdmsr(pVCpu, pMixedCtx, pVmxTransient));
    10235         case VMX_EXIT_WRMSR:                   VMEXIT_CALL_RET(0, hmR0VmxExitWrmsr(pVCpu, pMixedCtx, pVmxTransient));
    10236         case VMX_EXIT_VMCALL:                  VMEXIT_CALL_RET(0, hmR0VmxExitVmcall(pVCpu, pMixedCtx, pVmxTransient));
    10237         case VMX_EXIT_MOV_DRX:                 VMEXIT_CALL_RET(0, hmR0VmxExitMovDRx(pVCpu, pMixedCtx, pVmxTransient));
    10238         case VMX_EXIT_HLT:                     VMEXIT_CALL_RET(0, hmR0VmxExitHlt(pVCpu, pMixedCtx, pVmxTransient));
    10239         case VMX_EXIT_INVD:                    VMEXIT_CALL_RET(0, hmR0VmxExitInvd(pVCpu, pMixedCtx, pVmxTransient));
    10240         case VMX_EXIT_INVLPG:                  VMEXIT_CALL_RET(0, hmR0VmxExitInvlpg(pVCpu, pMixedCtx, pVmxTransient));
    10241         case VMX_EXIT_RSM:                     VMEXIT_CALL_RET(0, hmR0VmxExitRsm(pVCpu, pMixedCtx, pVmxTransient));
    10242         case VMX_EXIT_MTF:                     VMEXIT_CALL_RET(0, hmR0VmxExitMtf(pVCpu, pMixedCtx, pVmxTransient));
    10243         case VMX_EXIT_PAUSE:                   VMEXIT_CALL_RET(0, hmR0VmxExitPause(pVCpu, pMixedCtx, pVmxTransient));
    10244         case VMX_EXIT_XDTR_ACCESS:             VMEXIT_CALL_RET(0, hmR0VmxExitXdtrAccess(pVCpu, pMixedCtx, pVmxTransient));
    10245         case VMX_EXIT_TR_ACCESS:               VMEXIT_CALL_RET(0, hmR0VmxExitXdtrAccess(pVCpu, pMixedCtx, pVmxTransient));
    10246         case VMX_EXIT_WBINVD:                  VMEXIT_CALL_RET(0, hmR0VmxExitWbinvd(pVCpu, pMixedCtx, pVmxTransient));
    10247         case VMX_EXIT_XSETBV:                  VMEXIT_CALL_RET(0, hmR0VmxExitXsetbv(pVCpu, pMixedCtx, pVmxTransient));
    10248         case VMX_EXIT_RDRAND:                  VMEXIT_CALL_RET(0, hmR0VmxExitRdrand(pVCpu, pMixedCtx, pVmxTransient));
    10249         case VMX_EXIT_INVPCID:                 VMEXIT_CALL_RET(0, hmR0VmxExitInvpcid(pVCpu, pMixedCtx, pVmxTransient));
    10250         case VMX_EXIT_GETSEC:                  VMEXIT_CALL_RET(0, hmR0VmxExitGetsec(pVCpu, pMixedCtx, pVmxTransient));
    10251         case VMX_EXIT_RDPMC:                   VMEXIT_CALL_RET(0, hmR0VmxExitRdpmc(pVCpu, pMixedCtx, pVmxTransient));
    10252 
    10253         case VMX_EXIT_TRIPLE_FAULT:            return hmR0VmxExitTripleFault(pVCpu, pMixedCtx, pVmxTransient);
    10254         case VMX_EXIT_NMI_WINDOW:              return hmR0VmxExitNmiWindow(pVCpu, pMixedCtx, pVmxTransient);
    10255         case VMX_EXIT_INIT_SIGNAL:             return hmR0VmxExitInitSignal(pVCpu, pMixedCtx, pVmxTransient);
    10256         case VMX_EXIT_SIPI:                    return hmR0VmxExitSipi(pVCpu, pMixedCtx, pVmxTransient);
    10257         case VMX_EXIT_IO_SMI:                  return hmR0VmxExitIoSmi(pVCpu, pMixedCtx, pVmxTransient);
    10258         case VMX_EXIT_SMI:                     return hmR0VmxExitSmi(pVCpu, pMixedCtx, pVmxTransient);
    10259         case VMX_EXIT_ERR_MSR_LOAD:            return hmR0VmxExitErrMsrLoad(pVCpu, pMixedCtx, pVmxTransient);
    10260         case VMX_EXIT_ERR_INVALID_GUEST_STATE: return hmR0VmxExitErrInvalidGuestState(pVCpu, pMixedCtx, pVmxTransient);
    10261         case VMX_EXIT_ERR_MACHINE_CHECK:       return hmR0VmxExitErrMachineCheck(pVCpu, pMixedCtx, pVmxTransient);
     10074        case VMX_EXIT_EPT_MISCONFIG:           VMEXIT_CALL_RET(0, hmR0VmxExitEptMisconfig(pVCpu, pVmxTransient));
     10075        case VMX_EXIT_EPT_VIOLATION:           VMEXIT_CALL_RET(0, hmR0VmxExitEptViolation(pVCpu, pVmxTransient));
     10076        case VMX_EXIT_IO_INSTR:                VMEXIT_CALL_RET(0, hmR0VmxExitIoInstr(pVCpu, pVmxTransient));
     10077        case VMX_EXIT_CPUID:                   VMEXIT_CALL_RET(0, hmR0VmxExitCpuid(pVCpu, pVmxTransient));
     10078        case VMX_EXIT_RDTSC:                   VMEXIT_CALL_RET(0, hmR0VmxExitRdtsc(pVCpu, pVmxTransient));
     10079        case VMX_EXIT_RDTSCP:                  VMEXIT_CALL_RET(0, hmR0VmxExitRdtscp(pVCpu, pVmxTransient));
     10080        case VMX_EXIT_APIC_ACCESS:             VMEXIT_CALL_RET(0, hmR0VmxExitApicAccess(pVCpu, pVmxTransient));
     10081        case VMX_EXIT_XCPT_OR_NMI:             VMEXIT_CALL_RET(0, hmR0VmxExitXcptOrNmi(pVCpu, pVmxTransient));
     10082        case VMX_EXIT_MOV_CRX:                 VMEXIT_CALL_RET(0, hmR0VmxExitMovCRx(pVCpu, pVmxTransient));
     10083        case VMX_EXIT_EXT_INT:                 VMEXIT_CALL_RET(0, hmR0VmxExitExtInt(pVCpu, pVmxTransient));
     10084        case VMX_EXIT_INT_WINDOW:              VMEXIT_CALL_RET(0, hmR0VmxExitIntWindow(pVCpu, pVmxTransient));
     10085        case VMX_EXIT_TPR_BELOW_THRESHOLD:     VMEXIT_CALL_RET(0, hmR0VmxExitTprBelowThreshold(pVCpu, pVmxTransient));
     10086        case VMX_EXIT_MWAIT:                   VMEXIT_CALL_RET(0, hmR0VmxExitMwait(pVCpu, pVmxTransient));
     10087        case VMX_EXIT_MONITOR:                 VMEXIT_CALL_RET(0, hmR0VmxExitMonitor(pVCpu, pVmxTransient));
     10088        case VMX_EXIT_TASK_SWITCH:             VMEXIT_CALL_RET(0, hmR0VmxExitTaskSwitch(pVCpu, pVmxTransient));
     10089        case VMX_EXIT_PREEMPT_TIMER:           VMEXIT_CALL_RET(0, hmR0VmxExitPreemptTimer(pVCpu, pVmxTransient));
     10090        case VMX_EXIT_RDMSR:                   VMEXIT_CALL_RET(0, hmR0VmxExitRdmsr(pVCpu, pVmxTransient));
     10091        case VMX_EXIT_WRMSR:                   VMEXIT_CALL_RET(0, hmR0VmxExitWrmsr(pVCpu, pVmxTransient));
     10092        case VMX_EXIT_VMCALL:                  VMEXIT_CALL_RET(0, hmR0VmxExitVmcall(pVCpu, pVmxTransient));
     10093        case VMX_EXIT_MOV_DRX:                 VMEXIT_CALL_RET(0, hmR0VmxExitMovDRx(pVCpu, pVmxTransient));
     10094        case VMX_EXIT_HLT:                     VMEXIT_CALL_RET(0, hmR0VmxExitHlt(pVCpu, pVmxTransient));
     10095        case VMX_EXIT_INVD:                    VMEXIT_CALL_RET(0, hmR0VmxExitInvd(pVCpu, pVmxTransient));
     10096        case VMX_EXIT_INVLPG:                  VMEXIT_CALL_RET(0, hmR0VmxExitInvlpg(pVCpu, pVmxTransient));
     10097        case VMX_EXIT_RSM:                     VMEXIT_CALL_RET(0, hmR0VmxExitRsm(pVCpu, pVmxTransient));
     10098        case VMX_EXIT_MTF:                     VMEXIT_CALL_RET(0, hmR0VmxExitMtf(pVCpu, pVmxTransient));
     10099        case VMX_EXIT_PAUSE:                   VMEXIT_CALL_RET(0, hmR0VmxExitPause(pVCpu, pVmxTransient));
     10100        case VMX_EXIT_XDTR_ACCESS:             VMEXIT_CALL_RET(0, hmR0VmxExitXdtrAccess(pVCpu, pVmxTransient));
     10101        case VMX_EXIT_TR_ACCESS:               VMEXIT_CALL_RET(0, hmR0VmxExitXdtrAccess(pVCpu, pVmxTransient));
     10102        case VMX_EXIT_WBINVD:                  VMEXIT_CALL_RET(0, hmR0VmxExitWbinvd(pVCpu, pVmxTransient));
     10103        case VMX_EXIT_XSETBV:                  VMEXIT_CALL_RET(0, hmR0VmxExitXsetbv(pVCpu, pVmxTransient));
     10104        case VMX_EXIT_RDRAND:                  VMEXIT_CALL_RET(0, hmR0VmxExitRdrand(pVCpu, pVmxTransient));
     10105        case VMX_EXIT_INVPCID:                 VMEXIT_CALL_RET(0, hmR0VmxExitInvpcid(pVCpu, pVmxTransient));
     10106        case VMX_EXIT_GETSEC:                  VMEXIT_CALL_RET(0, hmR0VmxExitGetsec(pVCpu, pVmxTransient));
     10107        case VMX_EXIT_RDPMC:                   VMEXIT_CALL_RET(0, hmR0VmxExitRdpmc(pVCpu, pVmxTransient));
     10108
     10109        case VMX_EXIT_TRIPLE_FAULT:            return hmR0VmxExitTripleFault(pVCpu, pVmxTransient);
     10110        case VMX_EXIT_NMI_WINDOW:              return hmR0VmxExitNmiWindow(pVCpu, pVmxTransient);
     10111        case VMX_EXIT_INIT_SIGNAL:             return hmR0VmxExitInitSignal(pVCpu, pVmxTransient);
     10112        case VMX_EXIT_SIPI:                    return hmR0VmxExitSipi(pVCpu, pVmxTransient);
     10113        case VMX_EXIT_IO_SMI:                  return hmR0VmxExitIoSmi(pVCpu, pVmxTransient);
     10114        case VMX_EXIT_SMI:                     return hmR0VmxExitSmi(pVCpu, pVmxTransient);
     10115        case VMX_EXIT_ERR_MSR_LOAD:            return hmR0VmxExitErrMsrLoad(pVCpu, pVmxTransient);
     10116        case VMX_EXIT_ERR_INVALID_GUEST_STATE: return hmR0VmxExitErrInvalidGuestState(pVCpu, pVmxTransient);
     10117        case VMX_EXIT_ERR_MACHINE_CHECK:       return hmR0VmxExitErrMachineCheck(pVCpu, pVmxTransient);
    1026210118
    1026310119        case VMX_EXIT_VMCLEAR:
     
    1027510131        case VMX_EXIT_XSAVES:
    1027610132        case VMX_EXIT_XRSTORS:
    10277             return hmR0VmxExitSetPendingXcptUD(pVCpu, pMixedCtx, pVmxTransient);
     10133            return hmR0VmxExitSetPendingXcptUD(pVCpu, pVmxTransient);
    1027810134
    1027910135        case VMX_EXIT_ENCLS:
     
    1028110137        case VMX_EXIT_PML_FULL:
    1028210138        default:
    10283             return hmR0VmxExitErrUndefined(pVCpu, pMixedCtx, pVmxTransient);
     10139            return hmR0VmxExitErrUndefined(pVCpu, pVmxTransient);
    1028410140    }
    1028510141#undef VMEXIT_CALL_RET
     
    1030210158    do { \
    1030310159        AssertPtr(pVCpu); \
    10304         AssertPtr(pMixedCtx); \
    1030510160        AssertPtr(pVmxTransient); \
    1030610161        Assert(pVmxTransient->fVMEntryFailed == false); \
     
    1032310178    do { \
    1032410179        HMVMX_STOP_EXIT_DISPATCH_PROF(); \
    10325         NOREF(pVCpu); NOREF(pMixedCtx); NOREF(pVmxTransient); \
     10180        RT_NOREF2(pVCpu, pVmxTransient); \
    1032610181    } while (0)
    1032710182# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS() do { } while (0)
     
    1033310188 *
    1033410189 * @param   pVCpu           The cross context virtual CPU structure.
    10335  * @param   pMixedCtx       Pointer to the guest-CPU context. The data maybe
    10336  *                          out-of-sync. Make sure to update the required fields
    10337  *                          before using them.
    1033810190 * @param   cbInstr         Number of bytes to advance the RIP by.
    1033910191 *
    1034010192 * @remarks No-long-jump zone!!!
    1034110193 */
    10342 DECLINLINE(void) hmR0VmxAdvanceGuestRipBy(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint32_t cbInstr)
     10194DECLINLINE(void) hmR0VmxAdvanceGuestRipBy(PVMCPU pVCpu, uint32_t cbInstr)
    1034310195{
    1034410196    /* Advance the RIP. */
    10345     pMixedCtx->rip += cbInstr;
     10197    pVCpu->cpum.GstCtx.rip += cbInstr;
    1034610198    ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP);
    1034710199
    1034810200    /* Update interrupt inhibition. */
    1034910201    if (   VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
    10350         && pMixedCtx->rip != EMGetInhibitInterruptsPC(pVCpu))
     10202        && pVCpu->cpum.GstCtx.rip != EMGetInhibitInterruptsPC(pVCpu))
    1035110203        VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
    1035210204}
     
    1035810210 * @returns VBox status code, no informational status codes.
    1035910211 * @param   pVCpu           The cross context virtual CPU structure.
    10360  * @param   pMixedCtx       Pointer to the guest-CPU context. The data maybe
    10361  *                          out-of-sync. Make sure to update the required fields
    10362  *                          before using them.
    1036310212 * @param   pVmxTransient   Pointer to the VMX transient structure.
    1036410213 *
    1036510214 * @remarks No-long-jump zone!!!
    1036610215 */
    10367 static int hmR0VmxAdvanceGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
     10216static int hmR0VmxAdvanceGuestRip(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    1036810217{
    1036910218    int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
     
    1037110220    AssertRCReturn(rc, rc);
    1037210221
    10373     hmR0VmxAdvanceGuestRipBy(pVCpu, pMixedCtx, pVmxTransient->cbInstr);
     10222    hmR0VmxAdvanceGuestRipBy(pVCpu, pVmxTransient->cbInstr);
    1037410223
    1037510224    /*
     
    1038010229     */
    1038110230    if (  !pVCpu->hm.s.fSingleInstruction
    10382         && pMixedCtx->eflags.Bits.u1TF)
    10383     {
    10384         rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu, pMixedCtx);
     10231        && pVCpu->cpum.GstCtx.eflags.Bits.u1TF)
     10232    {
     10233        rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu);
    1038510234        AssertRCReturn(rc, rc);
    1038610235    }
     
    1039910248 *
    1040010249 * @param   pVCpu   The cross context virtual CPU structure.
    10401  * @param   pCtx    Pointer to the guest-CPU state.
    1040210250 *
    1040310251 * @remarks This function assumes our cache of the VMCS controls
    1040410252 *          are valid, i.e. hmR0VmxCheckVmcsCtls() succeeded.
    1040510253 */
    10406 static uint32_t hmR0VmxCheckGuestState(PVMCPU pVCpu, PCPUMCTX pCtx)
     10254static uint32_t hmR0VmxCheckGuestState(PVMCPU pVCpu)
    1040710255{
    1040810256#define HMVMX_ERROR_BREAK(err)              { uError = (err); break; }
     
    1041410262    int        rc;
    1041510263    PVM        pVM = pVCpu->CTX_SUFF(pVM);
     10264    PCPUMCTX   pCtx = &pVCpu->cpum.GstCtx;
    1041610265    uint32_t   uError = VMX_IGS_ERROR;
    1041710266    uint32_t   u32Val;
     
    1100110850 * VM-exit handler for external interrupts (VMX_EXIT_EXT_INT).
    1100210851 */
    11003 HMVMX_EXIT_DECL hmR0VmxExitExtInt(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
     10852HMVMX_EXIT_DECL hmR0VmxExitExtInt(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    1100410853{
    1100510854    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
     
    1101510864 * VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI).
    1101610865 */
    11017 HMVMX_EXIT_DECL hmR0VmxExitXcptOrNmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
     10866HMVMX_EXIT_DECL hmR0VmxExitXcptOrNmi(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    1101810867{
    1101910868    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
     
    1104510894
    1104610895    /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
    11047     VBOXSTRICTRC rcStrictRc1 = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
     10896    VBOXSTRICTRC rcStrictRc1 = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
    1104810897    if (RT_UNLIKELY(rcStrictRc1 == VINF_SUCCESS))
    1104910898    { /* likely */ }
     
    1108410933            switch (uVector)
    1108510934            {
    11086                 case X86_XCPT_PF: rc = hmR0VmxExitXcptPF(pVCpu, pMixedCtx, pVmxTransient);      break;
    11087                 case X86_XCPT_GP: rc = hmR0VmxExitXcptGP(pVCpu, pMixedCtx, pVmxTransient);      break;
    11088                 case X86_XCPT_MF: rc = hmR0VmxExitXcptMF(pVCpu, pMixedCtx, pVmxTransient);      break;
    11089                 case X86_XCPT_DB: rc = hmR0VmxExitXcptDB(pVCpu, pMixedCtx, pVmxTransient);      break;
    11090                 case X86_XCPT_BP: rc = hmR0VmxExitXcptBP(pVCpu, pMixedCtx, pVmxTransient);      break;
    11091                 case X86_XCPT_AC: rc = hmR0VmxExitXcptAC(pVCpu, pMixedCtx, pVmxTransient);      break;
     10935                case X86_XCPT_PF: rc = hmR0VmxExitXcptPF(pVCpu, pVmxTransient);      break;
     10936                case X86_XCPT_GP: rc = hmR0VmxExitXcptGP(pVCpu, pVmxTransient);      break;
     10937                case X86_XCPT_MF: rc = hmR0VmxExitXcptMF(pVCpu, pVmxTransient);      break;
     10938                case X86_XCPT_DB: rc = hmR0VmxExitXcptDB(pVCpu, pVmxTransient);      break;
     10939                case X86_XCPT_BP: rc = hmR0VmxExitXcptBP(pVCpu, pVmxTransient);      break;
     10940                case X86_XCPT_AC: rc = hmR0VmxExitXcptAC(pVCpu, pVmxTransient);      break;
    1109210941
    1109310942                case X86_XCPT_NM: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNM);
    11094                                   rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
     10943                                  rc = hmR0VmxExitXcptGeneric(pVCpu, pVmxTransient); break;
    1109510944                case X86_XCPT_XF: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXF);
    11096                                   rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
     10945                                  rc = hmR0VmxExitXcptGeneric(pVCpu, pVmxTransient); break;
    1109710946                case X86_XCPT_DE: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDE);
    11098                                   rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
     10947                                  rc = hmR0VmxExitXcptGeneric(pVCpu, pVmxTransient); break;
    1109910948                case X86_XCPT_UD: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD);
    11100                                   rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
     10949                                  rc = hmR0VmxExitXcptGeneric(pVCpu, pVmxTransient); break;
    1110110950                case X86_XCPT_SS: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestSS);
    11102                                   rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
     10951                                  rc = hmR0VmxExitXcptGeneric(pVCpu, pVmxTransient); break;
    1110310952                case X86_XCPT_NP: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNP);
    11104                                   rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
     10953                                  rc = hmR0VmxExitXcptGeneric(pVCpu, pVmxTransient); break;
    1110510954                case X86_XCPT_TS: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestTS);
    11106                                   rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
     10955                                  rc = hmR0VmxExitXcptGeneric(pVCpu, pVmxTransient); break;
    1110710956                default:
    1110810957                {
     
    1111210961                        Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
    1111310962                        Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
    11114                         Assert(CPUMIsGuestInRealModeEx(pMixedCtx));
     10963                        Assert(CPUMIsGuestInRealModeEx(&pVCpu->cpum.GstCtx));
    1111510964
    1111610965                        rc  = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR0);
     
    1115010999 * VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
    1115111000 */
    11152 HMVMX_EXIT_NSRC_DECL hmR0VmxExitIntWindow(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
     11001HMVMX_EXIT_NSRC_DECL hmR0VmxExitIntWindow(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    1115311002{
    1115411003    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
     
    1116611015 * VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
    1116711016 */
    11168 HMVMX_EXIT_NSRC_DECL hmR0VmxExitNmiWindow(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
     11017HMVMX_EXIT_NSRC_DECL hmR0VmxExitNmiWindow(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    1116911018{
    1117011019    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
     
    1120311052 * VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
    1120411053 */
    11205 HMVMX_EXIT_NSRC_DECL hmR0VmxExitWbinvd(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
     11054HMVMX_EXIT_NSRC_DECL hmR0VmxExitWbinvd(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    1120611055{
    1120711056    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
    11208     return hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
     11057    return hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient);
    1120911058}
    1121011059
     
    1121311062 * VM-exit handler for INVD (VMX_EXIT_INVD). Unconditional VM-exit.
    1121411063 */
    11215 HMVMX_EXIT_NSRC_DECL hmR0VmxExitInvd(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
     11064HMVMX_EXIT_NSRC_DECL hmR0VmxExitInvd(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    1121611065{
    1121711066    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
    11218     return hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
     11067    return hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient);
    1121911068}
    1122011069
     
    1122311072 * VM-exit handler for CPUID (VMX_EXIT_CPUID). Unconditional VM-exit.
    1122411073 */
    11225 HMVMX_EXIT_DECL hmR0VmxExitCpuid(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
     11074HMVMX_EXIT_DECL hmR0VmxExitCpuid(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    1122611075{
    1122711076    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
    11228     Assert(pMixedCtx == &pVCpu->cpum.GstCtx);
    1122911077
    1123011078    /*
     
    1125911107         * Frequent exit or something needing probing.  Get state and call EMHistoryExec.
    1126011108         */
    11261         Assert(pMixedCtx == &pVCpu->cpum.GstCtx);
    1126211109        int rc2 = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
    1126311110        AssertRCReturn(rc2, rc2);
     
    1128011127 * VM-exit handler for GETSEC (VMX_EXIT_GETSEC). Unconditional VM-exit.
    1128111128 */
    11282 HMVMX_EXIT_DECL hmR0VmxExitGetsec(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
     11129HMVMX_EXIT_DECL hmR0VmxExitGetsec(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    1128311130{
    1128411131    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
     
    1128611133    AssertRCReturn(rc, rc);
    1128711134
    11288     if (pMixedCtx->cr4 & X86_CR4_SMXE)
     11135    if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_SMXE)
    1128911136        return VINF_EM_RAW_EMULATE_INSTR;
    1129011137
     
    1129711144 * VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
    1129811145 */
    11299 HMVMX_EXIT_DECL hmR0VmxExitRdtsc(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
     11146HMVMX_EXIT_DECL hmR0VmxExitRdtsc(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    1130011147{
    1130111148    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
     
    1132611173 * VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
    1132711174 */
    11328 HMVMX_EXIT_DECL hmR0VmxExitRdtscp(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
     11175HMVMX_EXIT_DECL hmR0VmxExitRdtscp(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    1132911176{
    1133011177    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
     
    1135511202 * VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
    1135611203 */
    11357 HMVMX_EXIT_DECL hmR0VmxExitRdpmc(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
     11204HMVMX_EXIT_DECL hmR0VmxExitRdpmc(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    1135811205{
    1135911206    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
     
    1136111208    AssertRCReturn(rc, rc);
    1136211209
    11363     PVM pVM = pVCpu->CTX_SUFF(pVM);
    11364     rc = EMInterpretRdpmc(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
     11210    PVM      pVM  = pVCpu->CTX_SUFF(pVM);
     11211    PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
     11212    rc = EMInterpretRdpmc(pVM, pVCpu, CPUMCTX2CORE(pCtx));
    1136511213    if (RT_LIKELY(rc == VINF_SUCCESS))
    1136611214    {
    11367         rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
     11215        rc = hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient);
    1136811216        Assert(pVmxTransient->cbInstr == 2);
    1136911217    }
     
    1138011228 * VM-exit handler for VMCALL (VMX_EXIT_VMCALL). Unconditional VM-exit.
    1138111229 */
    11382 HMVMX_EXIT_DECL hmR0VmxExitVmcall(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
     11230HMVMX_EXIT_DECL hmR0VmxExitVmcall(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    1138311231{
    1138411232    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
     
    1139211240
    1139311241        /* Perform the hypercall. */
    11394         rcStrict = GIMHypercall(pVCpu, pMixedCtx);
     11242        rcStrict = GIMHypercall(pVCpu, &pVCpu->cpum.GstCtx);
    1139511243        if (rcStrict == VINF_SUCCESS)
    1139611244        {
    11397             rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
     11245            rc = hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient);
    1139811246            AssertRCReturn(rc, rc);
    1139911247        }
     
    1141211260    if (RT_FAILURE(rcStrict))
    1141311261    {
    11414         hmR0VmxSetPendingXcptUD(pVCpu, pMixedCtx);
     11262        hmR0VmxSetPendingXcptUD(pVCpu);
    1141511263        rcStrict = VINF_SUCCESS;
    1141611264    }
     
    1142311271 * VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
    1142411272 */
    11425 HMVMX_EXIT_DECL hmR0VmxExitInvlpg(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
     11273HMVMX_EXIT_DECL hmR0VmxExitInvlpg(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    1142611274{
    1142711275    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
     
    1145211300 * VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
    1145311301 */
    11454 HMVMX_EXIT_DECL hmR0VmxExitMonitor(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
     11302HMVMX_EXIT_DECL hmR0VmxExitMonitor(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    1145511303{
    1145611304    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
     
    1145811306    AssertRCReturn(rc, rc);
    1145911307
    11460     PVM pVM = pVCpu->CTX_SUFF(pVM);
    11461     rc = EMInterpretMonitor(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
     11308    PVM      pVM  = pVCpu->CTX_SUFF(pVM);
     11309    PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
     11310    rc = EMInterpretMonitor(pVM, pVCpu, CPUMCTX2CORE(pCtx));
    1146211311    if (RT_LIKELY(rc == VINF_SUCCESS))
    11463         rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
     11312        rc = hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient);
    1146411313    else
    1146511314    {
     
    1147511324 * VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
    1147611325 */
    11477 HMVMX_EXIT_DECL hmR0VmxExitMwait(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
     11326HMVMX_EXIT_DECL hmR0VmxExitMwait(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    1147811327{
    1147911328    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
     
    1148111330    AssertRCReturn(rc, rc);
    1148211331
    11483     PVM pVM = pVCpu->CTX_SUFF(pVM);
    11484     VBOXSTRICTRC rc2 = EMInterpretMWait(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
     11332    PVM      pVM  = pVCpu->CTX_SUFF(pVM);
     11333    PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
     11334    VBOXSTRICTRC rc2 = EMInterpretMWait(pVM, pVCpu, CPUMCTX2CORE(pCtx));
    1148511335    rc = VBOXSTRICTRC_VAL(rc2);
    1148611336    if (RT_LIKELY(   rc == VINF_SUCCESS
    1148711337                  || rc == VINF_EM_HALT))
    1148811338    {
    11489         int rc3 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
     11339        int rc3 = hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient);
    1149011340        AssertRCReturn(rc3, rc3);
    1149111341
    1149211342        if (   rc == VINF_EM_HALT
    11493             && EMMonitorWaitShouldContinue(pVCpu, pMixedCtx))
     11343            && EMMonitorWaitShouldContinue(pVCpu, pCtx))
    1149411344            rc = VINF_SUCCESS;
    1149511345    }
     
    1150911359 * VM-exit handler for RSM (VMX_EXIT_RSM). Unconditional VM-exit.
    1151011360 */
    11511 HMVMX_EXIT_NSRC_DECL hmR0VmxExitRsm(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
     11361HMVMX_EXIT_NSRC_DECL hmR0VmxExitRsm(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    1151211362{
    1151311363    /*
     
    1152011370     */
    1152111371    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
    11522     AssertMsgFailed(("Unexpected RSM VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
     11372    AssertMsgFailed(("Unexpected RSM VM-exit\n"));
    1152311373    HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
    1152411374}
     
    1152811378 * VM-exit handler for SMI (VMX_EXIT_SMI). Unconditional VM-exit.
    1152911379 */
    11530 HMVMX_EXIT_NSRC_DECL hmR0VmxExitSmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
     11380HMVMX_EXIT_NSRC_DECL hmR0VmxExitSmi(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    1153111381{
    1153211382    /*
     
    1154011390     */
    1154111391    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
    11542     AssertMsgFailed(("Unexpected SMI VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
     11392    AssertMsgFailed(("Unexpected SMI VM-exit\n"));
    1154311393    HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
    1154411394}
     
    1154811398 * VM-exit handler for IO SMI (VMX_EXIT_IO_SMI). Unconditional VM-exit.
    1154911399 */
    11550 HMVMX_EXIT_NSRC_DECL hmR0VmxExitIoSmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
     11400HMVMX_EXIT_NSRC_DECL hmR0VmxExitIoSmi(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    1155111401{
    1155211402    /* Same treatment as VMX_EXIT_SMI. See comment in hmR0VmxExitSmi(). */
    1155311403    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
    11554     AssertMsgFailed(("Unexpected IO SMI VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
     11404    AssertMsgFailed(("Unexpected IO SMI VM-exit\n"));
    1155511405    HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
    1155611406}
     
    1156011410 * VM-exit handler for SIPI (VMX_EXIT_SIPI). Conditional VM-exit.
    1156111411 */
    11562 HMVMX_EXIT_NSRC_DECL hmR0VmxExitSipi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
     11412HMVMX_EXIT_NSRC_DECL hmR0VmxExitSipi(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    1156311413{
    1156411414    /*
     
    1156811418     */
    1156911419    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
    11570     AssertMsgFailed(("Unexpected SIPI VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
     11420    AssertMsgFailed(("Unexpected SIPI VM-exit\n"));
    1157111421    HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
    1157211422}
     
    1157711427 * VM-exit.
    1157811428 */
    11579 HMVMX_EXIT_NSRC_DECL hmR0VmxExitInitSignal(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
     11429HMVMX_EXIT_NSRC_DECL hmR0VmxExitInitSignal(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    1158011430{
    1158111431    /*
     
    1159511445 * VM-exit.
    1159611446 */
    11597 HMVMX_EXIT_DECL hmR0VmxExitTripleFault(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
     11447HMVMX_EXIT_DECL hmR0VmxExitTripleFault(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    1159811448{
    1159911449    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
     
    1160511455 * VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
    1160611456 */
    11607 HMVMX_EXIT_DECL hmR0VmxExitHlt(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
     11457HMVMX_EXIT_DECL hmR0VmxExitHlt(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    1160811458{
    1160911459    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
    1161011460    Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_HLT_EXIT);
    1161111461
    11612     int rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
     11462    int rc = hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient);
     11463    rc    |= hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_RFLAGS);
    1161311464    AssertRCReturn(rc, rc);
    1161411465
    11615     if (EMShouldContinueAfterHalt(pVCpu, pMixedCtx))    /* Requires eflags. */
     11466    if (EMShouldContinueAfterHalt(pVCpu, &pVCpu->cpum.GstCtx))    /* Requires eflags. */
    1161611467        rc = VINF_SUCCESS;
    1161711468    else
     
    1162911480 * the guest.
    1163011481 */
    11631 HMVMX_EXIT_NSRC_DECL hmR0VmxExitSetPendingXcptUD(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
     11482HMVMX_EXIT_NSRC_DECL hmR0VmxExitSetPendingXcptUD(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    1163211483{
    1163311484    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
    11634     hmR0VmxSetPendingXcptUD(pVCpu, pMixedCtx);
     11485    hmR0VmxSetPendingXcptUD(pVCpu);
    1163511486    return VINF_SUCCESS;
    1163611487}
     
    1164011491 * VM-exit handler for expiry of the VMX preemption timer.
    1164111492 */
    11642 HMVMX_EXIT_DECL hmR0VmxExitPreemptTimer(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
     11493HMVMX_EXIT_DECL hmR0VmxExitPreemptTimer(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    1164311494{
    1164411495    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
     
    1165811509 * VM-exit handler for XSETBV (VMX_EXIT_XSETBV). Unconditional VM-exit.
    1165911510 */
    11660 HMVMX_EXIT_DECL hmR0VmxExitXsetbv(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
     11511HMVMX_EXIT_DECL hmR0VmxExitXsetbv(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    1166111512{
    1166211513    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
     
    1167011521                                                                                : HM_CHANGED_XCPT_RAISED_MASK);
    1167111522
    11672     pVCpu->hm.s.fLoadSaveGuestXcr0 = (pMixedCtx->cr4 & X86_CR4_OSXSAVE) && pMixedCtx->aXcr[0] != ASMGetXcr0();
     11523    PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
     11524    pVCpu->hm.s.fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
    1167311525
    1167411526    return rcStrict;
     
    1167911531 * VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
    1168011532 */
    11681 HMVMX_EXIT_DECL hmR0VmxExitInvpcid(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
     11533HMVMX_EXIT_DECL hmR0VmxExitInvpcid(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    1168211534{
    1168311535    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
     
    1169111543 * Error VM-exit.
    1169211544 */
    11693 HMVMX_EXIT_NSRC_DECL hmR0VmxExitErrInvalidGuestState(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
     11545HMVMX_EXIT_NSRC_DECL hmR0VmxExitErrInvalidGuestState(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    1169411546{
    1169511547    int rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
     
    1169911551        return rc;
    1170011552
    11701     uint32_t uInvalidReason = hmR0VmxCheckGuestState(pVCpu, pMixedCtx);
     11553    uint32_t uInvalidReason = hmR0VmxCheckGuestState(pVCpu);
    1170211554    NOREF(uInvalidReason);
    1170311555
     
    1173311585    Log4(("VMX_VMCS64_CTRL_EPTP_FULL                  %#RX64\n", u64Val));
    1173411586
    11735     hmR0DumpRegs(pVCpu, pMixedCtx);
     11587    hmR0DumpRegs(pVCpu);
    1173611588#else
    1173711589    NOREF(pVmxTransient);
     
    1174611598 * (VMX_EXIT_ERR_MSR_LOAD). Error VM-exit.
    1174711599 */
    11748 HMVMX_EXIT_NSRC_DECL hmR0VmxExitErrMsrLoad(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
    11749 {
    11750     NOREF(pVmxTransient);
    11751     AssertMsgFailed(("Unexpected MSR-load exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx)); NOREF(pMixedCtx);
     11600HMVMX_EXIT_NSRC_DECL hmR0VmxExitErrMsrLoad(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
     11601{
     11602    AssertMsgFailed(("Unexpected MSR-load exit\n"));
    1175211603    HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
    1175311604}
     
    1175811609 * (VMX_EXIT_ERR_MACHINE_CHECK). Error VM-exit.
    1175911610 */
    11760 HMVMX_EXIT_NSRC_DECL hmR0VmxExitErrMachineCheck(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
    11761 {
    11762     NOREF(pVmxTransient);
    11763     AssertMsgFailed(("Unexpected machine-check event exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx)); NOREF(pMixedCtx);
     11611HMVMX_EXIT_NSRC_DECL hmR0VmxExitErrMachineCheck(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
     11612{
     11613    AssertMsgFailed(("Unexpected machine-check event exit\n"));
    1176411614    HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
    1176511615}
     
    1177011620 * theory.
    1177111621 */
    11772 HMVMX_EXIT_NSRC_DECL hmR0VmxExitErrUndefined(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
    11773 {
    11774     AssertMsgFailed(("Huh!? Undefined VM-exit reason %d. pVCpu=%p pMixedCtx=%p\n", pVmxTransient->uExitReason, pVCpu, pMixedCtx));
    11775     NOREF(pVCpu); NOREF(pMixedCtx); NOREF(pVmxTransient);
     11622HMVMX_EXIT_NSRC_DECL hmR0VmxExitErrUndefined(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
     11623{
     11624    RT_NOREF2(pVCpu, pVmxTransient);
     11625    AssertMsgFailed(("Huh!? Undefined VM-exit reason %d\n", pVmxTransient->uExitReason));
    1177611626    return VERR_VMX_UNDEFINED_EXIT_CODE;
    1177711627}
     
    1178311633 * Conditional VM-exit.
    1178411634 */
    11785 HMVMX_EXIT_DECL hmR0VmxExitXdtrAccess(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
     11635HMVMX_EXIT_DECL hmR0VmxExitXdtrAccess(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    1178611636{
    1178711637    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
     
    1179111641    if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_TABLE_EXIT)
    1179211642        return VERR_EM_INTERPRETER;
    11793     AssertMsgFailed(("Unexpected XDTR access. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
     11643    AssertMsgFailed(("Unexpected XDTR access\n"));
    1179411644    HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
    1179511645}
     
    1179911649 * VM-exit handler for RDRAND (VMX_EXIT_RDRAND). Conditional VM-exit.
    1180011650 */
    11801 HMVMX_EXIT_DECL hmR0VmxExitRdrand(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
     11651HMVMX_EXIT_DECL hmR0VmxExitRdrand(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    1180211652{
    1180311653    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
     
    1180611656    if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDRAND_EXIT)
    1180711657        return VERR_EM_INTERPRETER;
    11808     AssertMsgFailed(("Unexpected RDRAND exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
     11658    AssertMsgFailed(("Unexpected RDRAND exit\n"));
    1180911659    HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
    1181011660}
     
    1181411664 * VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
    1181511665 */
    11816 HMVMX_EXIT_DECL hmR0VmxExitRdmsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
     11666HMVMX_EXIT_DECL hmR0VmxExitRdmsr(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    1181711667{
    1181811668    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
     
    1182211672     * MSRs required.  That would require changes to IEM and possibly CPUM too.
    1182311673     * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
    11824     uint32_t const idMsr = pMixedCtx->ecx;  NOREF(idMsr); /* Save it. */
     11674    uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;  NOREF(idMsr); /* Save it. */
    1182511675    int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    1182611676    rc    |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS);
     
    1187311723 * VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
    1187411724 */
    11875 HMVMX_EXIT_DECL hmR0VmxExitWrmsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
     11725HMVMX_EXIT_DECL hmR0VmxExitWrmsr(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    1187611726{
    1187711727    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
     
    1188111731     * MSRs required.  That would require changes to IEM and possibly CPUM too.
    1188211732     * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
    11883     uint32_t const idMsr = pMixedCtx->ecx; /* Save it. */
     11733    uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx; /* Save it. */
    1188411734    int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    1188511735    rc    |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS);
    1188611736    AssertRCReturn(rc, rc);
    1188711737
    11888     Log4Func(("ecx=%#RX32 edx:eax=%#RX32:%#RX32\n", idMsr, pMixedCtx->edx, pMixedCtx->eax));
     11738    Log4Func(("ecx=%#RX32 edx:eax=%#RX32:%#RX32\n", idMsr, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.eax));
    1188911739
    1189011740    VBOXSTRICTRC rcStrict = IEMExecDecodedWrmsr(pVCpu, pVmxTransient->cbInstr);
     
    1192411774            switch (idMsr)
    1192511775            {
    11926                 case MSR_IA32_SYSENTER_CS:  ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_SYSENTER_CS_MSR);   break;
    11927                 case MSR_IA32_SYSENTER_EIP: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_SYSENTER_EIP_MSR);  break;
    11928                 case MSR_IA32_SYSENTER_ESP: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_SYSENTER_ESP_MSR);  break;
    11929                 case MSR_K8_FS_BASE:        ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_FS);                break;
    11930                 case MSR_K8_GS_BASE:        ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_GS);                break;
    11931                 case MSR_K6_EFER:           /* Nothing to do, already handled above. */ break;
     11776                case MSR_IA32_SYSENTER_CS:  ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_SYSENTER_CS_MSR);  break;
     11777                case MSR_IA32_SYSENTER_EIP: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break;
     11778                case MSR_IA32_SYSENTER_ESP: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break;
     11779                case MSR_K8_FS_BASE:        ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_FS);               break;
     11780                case MSR_K8_GS_BASE:        ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_GS);               break;
     11781                case MSR_K6_EFER:           /* Nothing to do, already handled above. */                                    break;
    1193211782                default:
    1193311783                {
     
    1200311853 * VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
    1200411854 */
    12005 HMVMX_EXIT_DECL hmR0VmxExitPause(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
     11855HMVMX_EXIT_DECL hmR0VmxExitPause(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    1200611856{
    1200711857    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
     
    1201611866 * threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
    1201711867 */
    12018 HMVMX_EXIT_NSRC_DECL hmR0VmxExitTprBelowThreshold(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
     11868HMVMX_EXIT_NSRC_DECL hmR0VmxExitTprBelowThreshold(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    1201911869{
    1202011870    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
     
    1204011890 *         interpreter.
    1204111891 */
    12042 HMVMX_EXIT_DECL hmR0VmxExitMovCRx(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
     11892HMVMX_EXIT_DECL hmR0VmxExitMovCRx(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    1204311893{
    1204411894    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
     
    1205111901
    1205211902    VBOXSTRICTRC rcStrict;
    12053     PVM pVM                              = pVCpu->CTX_SUFF(pVM);
     11903    PVM      pVM  = pVCpu->CTX_SUFF(pVM);
     11904    PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    1205411905    RTGCUINTPTR const uExitQualification = pVmxTransient->uExitQualification;
    1205511906    uint32_t const uAccessType           = VMX_EXIT_QUAL_CRX_ACCESS(uExitQualification);
     
    1207311924                                     HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
    1207411925                    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR0Write);
    12075                     Log4(("CRX CR0 write rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pMixedCtx->cr0));
     11926                    Log4(("CRX CR0 write rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pCtx->cr0));
    1207611927                    break;
    1207711928                }
     
    1208611937                case 3:
    1208711938                {
    12088                     Assert(!pVM->hm.s.fNestedPaging || !CPUMIsGuestPagingEnabledEx(pMixedCtx) || pVCpu->hm.s.fUsingDebugLoop);
     11939                    Assert(!pVM->hm.s.fNestedPaging || !CPUMIsGuestPagingEnabledEx(pCtx) || pVCpu->hm.s.fUsingDebugLoop);
    1208911940                    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR3Write);
    1209011941                    ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged,
    1209111942                                     HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR3);
    12092                     Log4(("CRX CR3 write rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pMixedCtx->cr3));
     11943                    Log4(("CRX CR3 write rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pCtx->cr3));
    1209311944                    break;
    1209411945                }
     
    1209911950                    ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged,
    1210011951                                     HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR4);
    12101                     Log4(("CRX CR4 write rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict),
    12102                           pMixedCtx->cr4, pVCpu->hm.s.fLoadSaveGuestXcr0));
     11952                    Log4(("CRX CR4 write rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict), pCtx->cr4,
     11953                          pVCpu->hm.s.fLoadSaveGuestXcr0));
    1210311954                    break;
    1210411955                }
     
    1212211973        {
    1212311974            Assert(   !pVM->hm.s.fNestedPaging
    12124                    || !CPUMIsGuestPagingEnabledEx(pMixedCtx)
     11975                   || !CPUMIsGuestPagingEnabledEx(pCtx)
    1212511976                   || pVCpu->hm.s.fUsingDebugLoop
    1212611977                   || VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification) != 3);
     
    1220312054 * VM-exit.
    1220412055 */
    12205 HMVMX_EXIT_DECL hmR0VmxExitIoInstr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
     12056HMVMX_EXIT_DECL hmR0VmxExitIoInstr(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    1220612057{
    1220712058    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
    1220812059    STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitIO, y1);
    12209     Assert(pMixedCtx == &pVCpu->cpum.GstCtx);
    12210 
     12060
     12061    PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    1221112062    int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
    1221212063    rc    |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
     
    1222112072                             == VMX_EXIT_QUAL_IO_DIRECTION_OUT);
    1222212073    bool     fIOString    = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQualification);
    12223     bool     fGstStepping = RT_BOOL(pMixedCtx->eflags.Bits.u1TF);
     12074    bool     fGstStepping = RT_BOOL(pCtx->eflags.Bits.u1TF);
    1222412075    bool     fDbgStepping = pVCpu->hm.s.fSingleInstruction;
    1222512076    AssertReturn(uIOWidth <= 3 && uIOWidth != 2, VERR_VMX_IPE_1);
     
    1225812109             * interpreting the instruction.
    1225912110             */
    12260             Log4(("CS:RIP=%04x:%08RX64 %#06x/%u %c str\n", pMixedCtx->cs.Sel, pMixedCtx->rip, uIOPort, cbValue,
     12111            Log4(("CS:RIP=%04x:%08RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue,
    1226112112                  fIOWrite ? 'w' : 'r'));
    12262             AssertReturn(pMixedCtx->dx == uIOPort, VERR_VMX_IPE_2);
     12113            AssertReturn(pCtx->dx == uIOPort, VERR_VMX_IPE_2);
    1226312114            if (MSR_IA32_VMX_BASIC_INFO_VMCS_INS_OUTS(pVM->hm.s.vmx.Msrs.u64BasicInfo))
    1226412115            {
     
    1229412145             * IN/OUT - I/O instruction.
    1229512146             */
    12296             Log4(("CS:RIP=%04x:%08RX64 %#06x/%u %c\n", pMixedCtx->cs.Sel, pMixedCtx->rip, uIOPort, cbValue,
     12147            Log4(("CS:RIP=%04x:%08RX64 %#06x/%u %c\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue,
    1229712148                  fIOWrite ? 'w' : 'r'));
    1229812149            uint32_t const uAndVal = s_aIOOpAnd[uIOWidth];
     
    1230012151            if (fIOWrite)
    1230112152            {
    12302                 rcStrict = IOMIOPortWrite(pVM, pVCpu, uIOPort, pMixedCtx->eax & uAndVal, cbValue);
     12153                rcStrict = IOMIOPortWrite(pVM, pVCpu, uIOPort, pCtx->eax & uAndVal, cbValue);
    1230312154                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOWrite);
    1230412155            }
     
    1231012161                {
    1231112162                    /* Save result of I/O IN instr. in AL/AX/EAX. */
    12312                     pMixedCtx->eax = (pMixedCtx->eax & ~uAndVal) | (u32Result & uAndVal);
     12163                    pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Result & uAndVal);
    1231312164                }
    1231412165                else if (rcStrict == VINF_IOM_R3_IOPORT_READ)
    12315                     HMR0SavePendingIOPortRead(pVCpu, pMixedCtx->rip, pMixedCtx->rip + cbInstr, uIOPort, uAndVal, cbValue);
     12166                    HMR0SavePendingIOPortRead(pVCpu, pCtx->rip, pCtx->rip + cbInstr, uIOPort, uAndVal, cbValue);
    1231612167                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIORead);
    1231712168            }
     
    1232212173            if (!fUpdateRipAlready)
    1232312174            {
    12324                 hmR0VmxAdvanceGuestRipBy(pVCpu, pMixedCtx, cbInstr);
     12175                hmR0VmxAdvanceGuestRipBy(pVCpu, cbInstr);
    1232512176                ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP);
    1232612177            }
     
    1234012191                     && fGstStepping)
    1234112192            {
    12342                 rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu, pMixedCtx);
     12193                rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu);
    1234312194                AssertRCReturn(rc, rc);
    1234412195            }
     
    1235412205            /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the
    1235512206             *  execution engines about whether hyper BPs and such are pending. */
    12356             uint32_t const uDr7 = pMixedCtx->dr[7];
     12207            uint32_t const uDr7 = pCtx->dr[7];
    1235712208            if (RT_UNLIKELY(   (   (uDr7 & X86_DR7_ENABLED_MASK)
    1235812209                                && X86_DR7_ANY_RW_IO(uDr7)
    12359                                 && (pMixedCtx->cr4 & X86_CR4_DE))
     12210                                && (pCtx->cr4 & X86_CR4_DE))
    1236012211                            || DBGFBpIsHwIoArmed(pVM)))
    1236112212            {
     
    1236812219                bool fIsGuestDbgActive = CPUMR0DebugStateMaybeSaveGuest(pVCpu, true /* fDr6 */);
    1236912220
    12370                 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pMixedCtx, uIOPort, cbValue);
     12221                VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pCtx, uIOPort, cbValue);
    1237112222                if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
    1237212223                {
    1237312224                    /* Raise #DB. */
    1237412225                    if (fIsGuestDbgActive)
    12375                         ASMSetDR6(pMixedCtx->dr[6]);
    12376                     if (pMixedCtx->dr[7] != uDr7)
     12226                        ASMSetDR6(pCtx->dr[6]);
     12227                    if (pCtx->dr[7] != uDr7)
    1237712228                        pVCpu->hm.s.fCtxChanged |= HM_CHANGED_GUEST_DR7;
    1237812229
    12379                     hmR0VmxSetPendingXcptDB(pVCpu, pMixedCtx);
     12230                    hmR0VmxSetPendingXcptDB(pVCpu);
    1238012231                }
    1238112232                /* rcStrict is VINF_SUCCESS, VINF_IOM_R3_IOPORT_COMMIT_WRITE, or in [VINF_EM_FIRST..VINF_EM_LAST],
     
    1244212293 * VM-exit.
    1244312294 */
    12444 HMVMX_EXIT_DECL hmR0VmxExitTaskSwitch(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
     12295HMVMX_EXIT_DECL hmR0VmxExitTaskSwitch(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    1244512296{
    1244612297    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
     
    1247112322            if (   uIntType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
    1247212323                && uVector == X86_XCPT_PF)
    12473                 GCPtrFaultAddress = pMixedCtx->cr2;
     12324                GCPtrFaultAddress = pVCpu->cpum.GstCtx.cr2;
    1247412325            else
    1247512326                GCPtrFaultAddress = 0;
     
    1249312344 * VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional VM-exit.
    1249412345 */
    12495 HMVMX_EXIT_DECL hmR0VmxExitMtf(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
     12346HMVMX_EXIT_DECL hmR0VmxExitMtf(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    1249612347{
    1249712348    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
     
    1250812359 * VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional VM-exit.
    1250912360 */
    12510 HMVMX_EXIT_DECL hmR0VmxExitApicAccess(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
     12361HMVMX_EXIT_DECL hmR0VmxExitApicAccess(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    1251112362{
    1251212363    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
     
    1251512366
    1251612367    /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
    12517     VBOXSTRICTRC rcStrict1 = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
     12368    VBOXSTRICTRC rcStrict1 = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
    1251812369    if (RT_LIKELY(rcStrict1 == VINF_SUCCESS))
    1251912370    {
     
    1255612407                 VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification)));
    1255712408
     12409            PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    1255812410            rcStrict2 = IOMMMIOPhysHandler(pVM, pVCpu,
    1255912411                                           uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ ? 0 : X86_TRAP_PF_RW,
    12560                                            CPUMCTX2CORE(pMixedCtx), GCPhys);
     12412                                           CPUMCTX2CORE(pCtx), GCPhys);
    1256112413            Log4Func(("IOMMMIOPhysHandler returned %Rrc\n", VBOXSTRICTRC_VAL(rcStrict2)));
    1256212414            if (   rcStrict2 == VINF_SUCCESS
     
    1258712439 * VM-exit.
    1258812440 */
    12589 HMVMX_EXIT_DECL hmR0VmxExitMovDRx(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
     12441HMVMX_EXIT_DECL hmR0VmxExitMovDRx(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    1259012442{
    1259112443    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
     
    1259412446    if (pVmxTransient->fWasGuestDebugStateActive)
    1259512447    {
    12596         AssertMsgFailed(("Unexpected MOV DRx exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
     12448        AssertMsgFailed(("Unexpected MOV DRx exit\n"));
    1259712449        HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
    1259812450    }
     
    1263612488     * Update the segment registers and DR7 from the CPU.
    1263712489     */
     12490    PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    1263812491    int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
    1263912492    rc    |= hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_DR7);
    1264012493    AssertRCReturn(rc, rc);
    12641     Log4Func(("CS:RIP=%04x:%08RX64\n", pMixedCtx->cs.Sel, pMixedCtx->rip));
     12494    Log4Func(("CS:RIP=%04x:%08RX64\n", pCtx->cs.Sel, pCtx->rip));
    1264212495
    1264312496    PVM pVM = pVCpu->CTX_SUFF(pVM);
    1264412497    if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQualification) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
    1264512498    {
    12646         rc = EMInterpretDRxWrite(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
     12499        rc = EMInterpretDRxWrite(pVM, pVCpu, CPUMCTX2CORE(pCtx),
    1264712500                                 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQualification),
    1264812501                                 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQualification));
     
    1265312506    else
    1265412507    {
    12655         rc = EMInterpretDRxRead(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
     12508        rc = EMInterpretDRxRead(pVM, pVCpu, CPUMCTX2CORE(pCtx),
    1265612509                                VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQualification),
    1265712510                                VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQualification));
     
    1266212515    if (RT_SUCCESS(rc))
    1266312516    {
    12664         int rc2 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
     12517        int rc2 = hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient);
    1266512518        AssertRCReturn(rc2, rc2);
    1266612519        return VINF_SUCCESS;
     
    1267412527 * Conditional VM-exit.
    1267512528 */
    12676 HMVMX_EXIT_DECL hmR0VmxExitEptMisconfig(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
     12529HMVMX_EXIT_DECL hmR0VmxExitEptMisconfig(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    1267712530{
    1267812531    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
     
    1268012533
    1268112534    /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
    12682     VBOXSTRICTRC rcStrict1 = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
     12535    VBOXSTRICTRC rcStrict1 = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
    1268312536    if (RT_LIKELY(rcStrict1 == VINF_SUCCESS))
    1268412537    {
     
    1271912572         * weird case. See @bugref{6043}.
    1272012573         */
    12721         PVM pVM = pVCpu->CTX_SUFF(pVM);
    12722         rcStrict = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, CPUMCTX2CORE(pMixedCtx), GCPhys, UINT32_MAX);
    12723         Log4(("EPT misconfig at %#RGp RIP=%#RX64 rc=%Rrc\n", GCPhys, pMixedCtx->rip, VBOXSTRICTRC_VAL(rcStrict)));
     12574        PVM      pVM  = pVCpu->CTX_SUFF(pVM);
     12575        PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
     12576        rcStrict = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, CPUMCTX2CORE(pCtx), GCPhys, UINT32_MAX);
     12577        Log4(("EPT misconfig at %#RGp RIP=%#RX64 rc=%Rrc\n", GCPhys, pCtx->rip, VBOXSTRICTRC_VAL(rcStrict)));
    1272412578        if (   rcStrict == VINF_SUCCESS
    1272512579            || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
     
    1273712591         * Frequent exit or something needing probing.  Get state and call EMHistoryExec.
    1273812592         */
    12739         Assert(pMixedCtx == &pVCpu->cpum.GstCtx);
    1274012593        int rc2 = hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
    1274112594        AssertRCReturn(rc2, rc2);
     
    1275912612 * VM-exit.
    1276012613 */
    12761 HMVMX_EXIT_DECL hmR0VmxExitEptViolation(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
     12614HMVMX_EXIT_DECL hmR0VmxExitEptViolation(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    1276212615{
    1276312616    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
     
    1276512618
    1276612619    /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
    12767     VBOXSTRICTRC rcStrict1 = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
     12620    VBOXSTRICTRC rcStrict1 = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
    1276812621    if (RT_LIKELY(rcStrict1 == VINF_SUCCESS))
    1276912622    {
     
    1279812651    TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode);
    1279912652
     12653
     12654    /* Handle the pagefault trap for the nested shadow table. */
     12655    PVM      pVM  = pVCpu->CTX_SUFF(pVM);
     12656    PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
     12657
    1280012658    Log4Func(("EPT violation %#x at %#RX64 ErrorCode %#x CS:RIP=%04x:%08RX64\n", pVmxTransient->uExitQualification, GCPhys,
    12801               uErrorCode, pMixedCtx->cs.Sel, pMixedCtx->rip));
    12802 
    12803     /* Handle the pagefault trap for the nested shadow table. */
    12804     PVM pVM = pVCpu->CTX_SUFF(pVM);
    12805     VBOXSTRICTRC rcStrict2 = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, CPUMCTX2CORE(pMixedCtx), GCPhys);
     12659              uErrorCode, pCtx->cs.Sel, pCtx->rip));
     12660
     12661    VBOXSTRICTRC rcStrict2 = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, CPUMCTX2CORE(pCtx), GCPhys);
    1280612662    TRPMResetTrap(pVCpu);
    1280712663
     
    1283412690 * VM-exit exception handler for \#MF (Math Fault: floating point exception).
    1283512691 */
    12836 static int hmR0VmxExitXcptMF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
     12692static int hmR0VmxExitXcptMF(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    1283712693{
    1283812694    HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
     
    1284212698    AssertRCReturn(rc, rc);
    1284312699
    12844     if (!(pMixedCtx->cr0 & X86_CR0_NE))
     12700    if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE))
    1284512701    {
    1284612702        /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
     
    1285012706         *        provides VM-exit instruction length. If this causes problem later,
    1285112707         *        disassemble the instruction like it's done on AMD-V. */
    12852         int rc2 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
     12708        int rc2 = hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient);
    1285312709        AssertRCReturn(rc2, rc2);
    1285412710        return rc;
     
    1286412720 * VM-exit exception handler for \#BP (Breakpoint exception).
    1286512721 */
    12866 static int hmR0VmxExitXcptBP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
     12722static int hmR0VmxExitXcptBP(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    1286712723{
    1286812724    HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
     
    1287212728    AssertRCReturn(rc, rc);
    1287312729
    12874     rc = DBGFRZTrap03Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pMixedCtx));
     12730    PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
     12731    rc = DBGFRZTrap03Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx));
    1287512732    if (rc == VINF_EM_RAW_GUEST_TRAP)
    1287612733    {
     
    1289212749 * VM-exit exception handler for \#AC (alignment check exception).
    1289312750 */
    12894 static int hmR0VmxExitXcptAC(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
    12895 {
    12896     RT_NOREF_PV(pMixedCtx);
     12751static int hmR0VmxExitXcptAC(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
     12752{
    1289712753    HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
    1289812754
     
    1291412770 * VM-exit exception handler for \#DB (Debug exception).
    1291512771 */
    12916 static int hmR0VmxExitXcptDB(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
     12772static int hmR0VmxExitXcptDB(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    1291712773{
    1291812774    HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
     
    1293012786                     & (X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3 | X86_DR6_BD | X86_DR6_BS));
    1293112787
    12932     rc = DBGFRZTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pMixedCtx), uDR6, pVCpu->hm.s.fSingleInstruction);
     12788    PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
     12789    rc = DBGFRZTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx), uDR6, pVCpu->hm.s.fSingleInstruction);
    1293312790    Log6Func(("rc=%Rrc\n", rc));
    1293412791    if (rc == VINF_EM_RAW_GUEST_TRAP)
     
    1294212799        HM_DISABLE_PREEMPT();
    1294312800
    12944         pMixedCtx->dr[6] &= ~X86_DR6_B_MASK;
    12945         pMixedCtx->dr[6] |= uDR6;
     12801        pCtx->dr[6] &= ~X86_DR6_B_MASK;
     12802        pCtx->dr[6] |= uDR6;
    1294612803        if (CPUMIsGuestDebugStateActive(pVCpu))
    12947             ASMSetDR6(pMixedCtx->dr[6]);
     12804            ASMSetDR6(pCtx->dr[6]);
    1294812805
    1294912806        HM_RESTORE_PREEMPT();
     
    1295412811
    1295512812        /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
    12956         pMixedCtx->dr[7] &= ~X86_DR7_GD;
     12813        pCtx->dr[7] &= ~X86_DR7_GD;
    1295712814
    1295812815        /* Paranoia. */
    12959         pMixedCtx->dr[7] &= ~X86_DR7_RAZ_MASK;
    12960         pMixedCtx->dr[7] |= X86_DR7_RA1_MASK;
    12961 
    12962         rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, (uint32_t)pMixedCtx->dr[7]);
     12816        pCtx->dr[7] &= ~X86_DR7_RAZ_MASK;
     12817        pCtx->dr[7] |= X86_DR7_RA1_MASK;
     12818
     12819        rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, (uint32_t)pCtx->dr[7]);
    1296312820        AssertRCReturn(rc, rc);
    1296412821
     
    1299912856 * @remarks Requires pVmxTransient->uExitIntInfo to be up-to-date.
    1300012857 */
    13001 static int hmR0VmxExitXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
     12858static int hmR0VmxExitXcptGP(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    1300212859{
    1300312860    HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
     
    1300512862
    1300612863    int rc;
     12864    PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    1300712865    if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
    1300812866    { /* likely */ }
     
    1301812876        rc |= hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
    1301912877        AssertRCReturn(rc, rc);
    13020         Log4Func(("Gst: CS:RIP %04x:%08RX64 ErrorCode=%#x CR0=%#RX64 CPL=%u TR=%#04x\n", pMixedCtx->cs.Sel, pMixedCtx->rip,
    13021                   pVmxTransient->uExitIntErrorCode, pMixedCtx->cr0, CPUMGetGuestCPL(pVCpu), pMixedCtx->tr.Sel));
     12878        Log4Func(("Gst: CS:RIP %04x:%08RX64 ErrorCode=%#x CR0=%#RX64 CPL=%u TR=%#04x\n", pCtx->cs.Sel, pCtx->rip,
     12879                  pVmxTransient->uExitIntErrorCode, pCtx->cr0, CPUMGetGuestCPL(pVCpu), pCtx->tr.Sel));
    1302212880        hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
    1302312881                               pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
     
    1302512883    }
    1302612884
    13027     Assert(CPUMIsGuestInRealModeEx(pMixedCtx));
     12885    Assert(CPUMIsGuestInRealModeEx(pCtx));
    1302812886    Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUnrestrictedGuest);
    1302912887
     
    1304112899        rc = VINF_SUCCESS;
    1304212900        Assert(cbOp == pDis->cbInstr);
    13043         Log4Func(("Disas OpCode=%u CS:EIP %04x:%04RX64\n", pDis->pCurInstr->uOpcode, pMixedCtx->cs.Sel, pMixedCtx->rip));
     12901        Log4Func(("Disas OpCode=%u CS:EIP %04x:%04RX64\n", pDis->pCurInstr->uOpcode, pCtx->cs.Sel, pCtx->rip));
    1304412902        switch (pDis->pCurInstr->uOpcode)
    1304512903        {
    1304612904            case OP_CLI:
    1304712905            {
    13048                 pMixedCtx->eflags.Bits.u1IF = 0;
    13049                 pMixedCtx->eflags.Bits.u1RF = 0;
    13050                 pMixedCtx->rip += pDis->cbInstr;
     12906                pCtx->eflags.Bits.u1IF = 0;
     12907                pCtx->eflags.Bits.u1RF = 0;
     12908                pCtx->rip += pDis->cbInstr;
    1305112909                ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
    1305212910                if (   !fDbgStepping
    13053                     && pMixedCtx->eflags.Bits.u1TF)
     12911                    && pCtx->eflags.Bits.u1TF)
    1305412912                {
    13055                     rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu, pMixedCtx);
     12913                    rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu);
    1305612914                    AssertRCReturn(rc, rc);
    1305712915                }
     
    1306212920            case OP_STI:
    1306312921            {
    13064                 bool fOldIF = pMixedCtx->eflags.Bits.u1IF;
    13065                 pMixedCtx->eflags.Bits.u1IF = 1;
    13066                 pMixedCtx->eflags.Bits.u1RF = 0;
    13067                 pMixedCtx->rip += pDis->cbInstr;
     12922                bool fOldIF = pCtx->eflags.Bits.u1IF;
     12923                pCtx->eflags.Bits.u1IF = 1;
     12924                pCtx->eflags.Bits.u1RF = 0;
     12925                pCtx->rip += pDis->cbInstr;
    1306812926                if (!fOldIF)
    1306912927                {
    13070                     EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip);
     12928                    EMSetInhibitInterruptsPC(pVCpu, pCtx->rip);
    1307112929                    Assert(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
    1307212930                }
    1307312931                ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
    1307412932                if (   !fDbgStepping
    13075                     && pMixedCtx->eflags.Bits.u1TF)
     12933                    && pCtx->eflags.Bits.u1TF)
    1307612934                {
    13077                     rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu, pMixedCtx);
     12935                    rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu);
    1307812936                    AssertRCReturn(rc, rc);
    1307912937                }
     
    1308512943            {
    1308612944                rc = VINF_EM_HALT;
    13087                 pMixedCtx->rip += pDis->cbInstr;
    13088                 pMixedCtx->eflags.Bits.u1RF = 0;
     12945                pCtx->rip += pDis->cbInstr;
     12946                pCtx->eflags.Bits.u1RF = 0;
    1308912947                ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
    1309012948                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt);
     
    1309412952            case OP_POPF:
    1309512953            {
    13096                 Log4Func(("POPF CS:EIP %04x:%04RX64\n", pMixedCtx->cs.Sel, pMixedCtx->rip));
     12954                Log4Func(("POPF CS:EIP %04x:%04RX64\n", pCtx->cs.Sel, pCtx->rip));
    1309712955                uint32_t cbParm;
    1309812956                uint32_t uMask;
    13099                 bool     fGstStepping = RT_BOOL(pMixedCtx->eflags.Bits.u1TF);
     12957                bool     fGstStepping = RT_BOOL(pCtx->eflags.Bits.u1TF);
    1310012958                if (pDis->fPrefix & DISPREFIX_OPSIZE)
    1310112959                {
     
    1311312971                X86EFLAGS Eflags;
    1311412972                Eflags.u32 = 0;
    13115                 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pMixedCtx), pMixedCtx->esp & uMask, SELMTOFLAT_FLAGS_CPL0,
     12973                rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pCtx), pCtx->esp & uMask, SELMTOFLAT_FLAGS_CPL0,
    1311612974                                  &GCPtrStack);
    1311712975                if (RT_SUCCESS(rc))
     
    1312612984                    break;
    1312712985                }
    13128                 Log4Func(("POPF %#x -> %#RX64 mask=%#x RIP=%#RX64\n", Eflags.u, pMixedCtx->rsp, uMask, pMixedCtx->rip));
    13129                 pMixedCtx->eflags.u32 = (pMixedCtx->eflags.u32 & ~((X86_EFL_POPF_BITS & uMask) | X86_EFL_RF))
    13130                                       | (Eflags.u32 & X86_EFL_POPF_BITS & uMask);
    13131                 pMixedCtx->esp += cbParm;
    13132                 pMixedCtx->esp &= uMask;
    13133                 pMixedCtx->rip += pDis->cbInstr;
     12986                Log4Func(("POPF %#x -> %#RX64 mask=%#x RIP=%#RX64\n", Eflags.u, pCtx->rsp, uMask, pCtx->rip));
     12987                pCtx->eflags.u32 = (pCtx->eflags.u32 & ~((X86_EFL_POPF_BITS & uMask) | X86_EFL_RF))
     12988                                 | (Eflags.u32 & X86_EFL_POPF_BITS & uMask);
     12989                pCtx->esp += cbParm;
     12990                pCtx->esp &= uMask;
     12991                pCtx->rip += pDis->cbInstr;
    1313412992                ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS);
    1313512993                /* Generate a pending-debug exception when the guest stepping over POPF regardless of how
     
    1313812996                    && fGstStepping)
    1313912997                {
    13140                     rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu, pMixedCtx);
     12998                    rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu);
    1314112999                    AssertRCReturn(rc, rc);
    1314213000                }
     
    1316213020                /* Get the stack pointer & push the contents of eflags onto the stack. */
    1316313021                RTGCPTR GCPtrStack = 0;
    13164                 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pMixedCtx), (pMixedCtx->esp - cbParm) & uMask,
     13022                rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pCtx), (pCtx->esp - cbParm) & uMask,
    1316513023                                  SELMTOFLAT_FLAGS_CPL0, &GCPtrStack);
    1316613024                if (RT_FAILURE(rc))
     
    1316913027                    break;
    1317013028                }
    13171                 X86EFLAGS Eflags = pMixedCtx->eflags;
     13029                X86EFLAGS Eflags = pCtx->eflags;
    1317213030                /* The RF & VM bits are cleared on image stored on stack; see Intel Instruction reference for PUSHF. */
    1317313031                Eflags.Bits.u1RF = 0;
     
    1318213040                }
    1318313041                Log4Func(("PUSHF %#x -> %#RGv\n", Eflags.u, GCPtrStack));
    13184                 pMixedCtx->esp -= cbParm;
    13185                 pMixedCtx->esp &= uMask;
    13186                 pMixedCtx->rip += pDis->cbInstr;
    13187                 pMixedCtx->eflags.Bits.u1RF = 0;
     13042                pCtx->esp -= cbParm;
     13043                pCtx->esp &= uMask;
     13044                pCtx->rip += pDis->cbInstr;
     13045                pCtx->eflags.Bits.u1RF = 0;
    1318813046                ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS);
    1318913047                if (  !fDbgStepping
    13190                     && pMixedCtx->eflags.Bits.u1TF)
     13048                    && pCtx->eflags.Bits.u1TF)
    1319113049                {
    13192                     rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu, pMixedCtx);
     13050                    rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu);
    1319313051                    AssertRCReturn(rc, rc);
    1319413052                }
     
    1320313061                RTGCPTR  GCPtrStack    = 0;
    1320413062                uint32_t uMask         = 0xffff;
    13205                 bool     fGstStepping  = RT_BOOL(pMixedCtx->eflags.Bits.u1TF);
     13063                bool     fGstStepping  = RT_BOOL(pCtx->eflags.Bits.u1TF);
    1320613064                uint16_t aIretFrame[3];
    1320713065                if (pDis->fPrefix & (DISPREFIX_OPSIZE | DISPREFIX_ADDRSIZE))
     
    1321013068                    break;
    1321113069                }
    13212                 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pMixedCtx), pMixedCtx->esp & uMask, SELMTOFLAT_FLAGS_CPL0,
     13070                rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pCtx), pCtx->esp & uMask, SELMTOFLAT_FLAGS_CPL0,
    1321313071                                  &GCPtrStack);
    1321413072                if (RT_SUCCESS(rc))
     
    1322313081                    break;
    1322413082                }
    13225                 pMixedCtx->eip                = 0;
    13226                 pMixedCtx->ip                 = aIretFrame[0];
    13227                 pMixedCtx->cs.Sel             = aIretFrame[1];
    13228                 pMixedCtx->cs.ValidSel        = aIretFrame[1];
    13229                 pMixedCtx->cs.u64Base         = (uint64_t)pMixedCtx->cs.Sel << 4;
    13230                 pMixedCtx->eflags.u32         = (pMixedCtx->eflags.u32 & ((UINT32_C(0xffff0000) | X86_EFL_1) & ~X86_EFL_RF))
    13231                                               | (aIretFrame[2] & X86_EFL_POPF_BITS & uMask);
    13232                 pMixedCtx->sp                += sizeof(aIretFrame);
     13083                pCtx->eip                = 0;
     13084                pCtx->ip                 = aIretFrame[0];
     13085                pCtx->cs.Sel             = aIretFrame[1];
     13086                pCtx->cs.ValidSel        = aIretFrame[1];
     13087                pCtx->cs.u64Base         = (uint64_t)pCtx->cs.Sel << 4;
     13088                pCtx->eflags.u32         = (pCtx->eflags.u32 & ((UINT32_C(0xffff0000) | X86_EFL_1) & ~X86_EFL_RF))
     13089                                         | (aIretFrame[2] & X86_EFL_POPF_BITS & uMask);
     13090                pCtx->sp                += sizeof(aIretFrame);
    1323313091                ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
    1323413092                                                         | HM_CHANGED_GUEST_CS);
     
    1323713095                    && fGstStepping)
    1323813096                {
    13239                     rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu, pMixedCtx);
     13097                    rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu);
    1324013098                    AssertRCReturn(rc, rc);
    1324113099                }
    13242                 Log4Func(("IRET %#RX32 to %04x:%04x\n", GCPtrStack, pMixedCtx->cs.Sel, pMixedCtx->ip));
     13100                Log4Func(("IRET %#RX32 to %04x:%04x\n", GCPtrStack, pCtx->cs.Sel, pCtx->ip));
    1324313101                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIret);
    1324413102                break;
     
    1324813106            {
    1324913107                uint16_t uVector = pDis->Param1.uValue & 0xff;
    13250                 hmR0VmxSetPendingIntN(pVCpu, pMixedCtx, uVector, pDis->cbInstr);
     13108                hmR0VmxSetPendingIntN(pVCpu, uVector, pDis->cbInstr);
    1325113109                /* INT clears EFLAGS.TF, we must not set any pending debug exceptions here. */
    1325213110                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInt);
     
    1325613114            case OP_INTO:
    1325713115            {
    13258                 if (pMixedCtx->eflags.Bits.u1OF)
     13116                if (pCtx->eflags.Bits.u1OF)
    1325913117                {
    13260                     hmR0VmxSetPendingXcptOF(pVCpu, pMixedCtx, pDis->cbInstr);
     13118                    hmR0VmxSetPendingXcptOF(pVCpu, pDis->cbInstr);
    1326113119                    /* INTO clears EFLAGS.TF, we must not set any pending debug exceptions here. */
    1326213120                    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInt);
     
    1326413122                else
    1326513123                {
    13266                     pMixedCtx->eflags.Bits.u1RF = 0;
     13124                    pCtx->eflags.Bits.u1RF = 0;
    1326713125                    ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RFLAGS);
    1326813126                }
     
    1327213130            default:
    1327313131            {
    13274                 pMixedCtx->eflags.Bits.u1RF = 0; /* This is correct most of the time... */
    13275                 VBOXSTRICTRC rc2 = EMInterpretInstructionDisasState(pVCpu, pDis, CPUMCTX2CORE(pMixedCtx), 0 /* pvFault */,
     13132                pCtx->eflags.Bits.u1RF = 0; /* This is correct most of the time... */
     13133                VBOXSTRICTRC rc2 = EMInterpretInstructionDisasState(pVCpu, pDis, CPUMCTX2CORE(pCtx), 0 /* pvFault */,
    1327613134                                                                    EMCODETYPE_SUPERVISOR);
    1327713135                rc = VBOXSTRICTRC_VAL(rc2);
     
    1330013158 *          up-to-date.
    1330113159 */
    13302 static int hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
    13303 {
    13304     RT_NOREF_PV(pMixedCtx);
     13160static int hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
     13161{
    1330513162    HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
    1330613163#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
     
    1333213189 * VM-exit exception handler for \#PF (Page-fault exception).
    1333313190 */
    13334 static int hmR0VmxExitXcptPF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
     13191static int hmR0VmxExitXcptPF(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    1333513192{
    1333613193    HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
     
    1335713214        {
    1335813215            /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
    13359             hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx);
     13216            hmR0VmxSetPendingXcptDF(pVCpu);
    1336013217            Log4Func(("Pending #DF due to vectoring #PF w/ NestedPaging\n"));
    1336113218        }
     
    1337213229    }
    1337313230
     13231    PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    1337413232    rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
    1337513233    AssertRCReturn(rc, rc);
    1337613234
    1337713235    Log4Func(("#PF: cr2=%#RX64 cs:rip=%#04x:%#RX64 uErrCode %#RX32 cr3=%#RX64\n", pVmxTransient->uExitQualification,
    13378               pMixedCtx->cs.Sel, pMixedCtx->rip, pVmxTransient->uExitIntErrorCode, pMixedCtx->cr3));
     13236              pCtx->cs.Sel, pCtx->rip, pVmxTransient->uExitIntErrorCode, pCtx->cr3));
    1337913237
    1338013238    TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQualification, (RTGCUINT)pVmxTransient->uExitIntErrorCode);
    13381     rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, CPUMCTX2CORE(pMixedCtx),
     13239    rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, CPUMCTX2CORE(pCtx),
    1338213240                          (RTGCPTR)pVmxTransient->uExitQualification);
    1338313241
     
    1341113269            TRPMResetTrap(pVCpu);
    1341213270            pVCpu->hm.s.Event.fPending = false;     /* Clear pending #PF to replace it with #DF. */
    13413             hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx);
     13271            hmR0VmxSetPendingXcptDF(pVCpu);
    1341413272            Log4Func(("#PF: Pending #DF due to vectoring #PF\n"));
    1341513273        }
  • TabularUnified trunk/src/VBox/VMM/VMMR3/HM.cpp

    r72966 r72983  
    26252625 * @param   pVM         The cross context VM structure.
    26262626 * @param   pVCpu       The cross context virtual CPU structure.
    2627  * @param   pCtx        Pointer to the guest CPU context.
    2628  */
    2629 VMMR3_INT_DECL(int) HMR3PatchTprInstr(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
    2630 {
    2631     NOREF(pCtx);
     2627 */
     2628VMMR3_INT_DECL(int) HMR3PatchTprInstr(PVM pVM, PVMCPU pVCpu)
     2629{
    26322630    int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE,
    26332631                                pVM->hm.s.pGuestPatchMem ? hmR3PatchTprInstr : hmR3ReplaceTprInstr,
     
    30343032     * when the unrestricted guest execution feature is missing (VT-x only).
    30353033     */
    3036     if (   pVM->hm.s.vmx.fEnabled
     3034    if (    pVM->hm.s.vmx.fEnabled
    30373035        && !pVM->hm.s.vmx.fUnrestrictedGuest
    3038         && CPUMIsGuestInRealModeEx(pCtx)
     3036        &&  CPUMIsGuestInRealModeEx(pCtx)
    30393037        && !PDMVmmDevHeapIsEnabled(pVM))
    30403038    {
  • TabularUnified trunk/src/VBox/VMM/include/EMHandleRCTmpl.h

    r72634 r72983  
    261261
    262262        case VINF_EM_HM_PATCH_TPR_INSTR:
    263             rc = HMR3PatchTprInstr(pVM, pVCpu, &pVCpu->cpum.GstCtx);
     263            rc = HMR3PatchTprInstr(pVM, pVCpu);
    264264            break;
    265265#endif
  • TabularUnified trunk/src/VBox/VMM/include/HMInternal.h

    r72967 r72983  
    11171117
    11181118# ifdef VBOX_STRICT
    1119 VMMR0_INT_DECL(void) hmR0DumpRegs(PVMCPU pVCpu, PCPUMCTX pCtx);
     1119VMMR0_INT_DECL(void) hmR0DumpRegs(PVMCPU pVCpu);
    11201120VMMR0_INT_DECL(void) hmR0DumpDescriptor(PCX86DESCHC pDesc, RTSEL Sel, const char *pszMsg);
    11211121# endif
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette