Changeset 72983 in vbox
- Timestamp:
- Jul 8, 2018 4:15:47 PM (7 years ago)
- svn:sync-xref-src-repo-rev:
- 123535
- Location:
- trunk
- Files:
-
- 7 edited
Legend:
- Unmodified
- Added
- Removed
-
TabularUnified trunk/include/VBox/vmm/hm.h ¶
r72967 r72983 261 261 VMMR3_INT_DECL(int) HMR3EnablePatching(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem); 262 262 VMMR3_INT_DECL(int) HMR3DisablePatching(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem); 263 VMMR3_INT_DECL(int) HMR3PatchTprInstr(PVM pVM, PVMCPU pVCpu , PCPUMCTX pCtx);263 VMMR3_INT_DECL(int) HMR3PatchTprInstr(PVM pVM, PVMCPU pVCpu); 264 264 VMMR3_INT_DECL(bool) HMR3IsRescheduleRequired(PVM pVM, PCPUMCTX pCtx); 265 265 VMMR3_INT_DECL(bool) HMR3IsVmxPreemptionTimerUsed(PVM pVM); -
TabularUnified trunk/src/VBox/VMM/VMMR0/HMR0.cpp ¶
r72967 r72983 165 165 * actions when the host is being suspended to speed up the suspending and 166 166 * avoid trouble. */ 167 volatile boolfSuspended;167 bool volatile fSuspended; 168 168 169 169 /** Whether we've already initialized all CPUs. … … 1961 1961 * 1962 1962 * @param pVCpu The cross context virtual CPU structure. 1963 * @param pCtx Pointer to the CPU context. 1964 */ 1965 VMMR0_INT_DECL(void) hmR0DumpRegs(PVMCPU pVCpu, PCPUMCTX pCtx) 1963 */ 1964 VMMR0_INT_DECL(void) hmR0DumpRegs(PVMCPU pVCpu) 1966 1965 { 1967 1966 /* … … 1991 1990 char szEFlags[80]; 1992 1991 char *psz = szEFlags; 1992 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 1993 1993 uint32_t uEFlags = pCtx->eflags.u32; 1994 1994 for (unsigned i = 0; i < RT_ELEMENTS(s_aFlags); i++) -
TabularUnified trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp ¶
r72970 r72983 4059 4059 { 4060 4060 #ifdef VBOX_STRICT 4061 hmR0DumpRegs(pVCpu , &pVCpu->cpum.GstCtx);4061 hmR0DumpRegs(pVCpu); 4062 4062 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu); 4063 4063 Log4(("ctrl.u32VmcbCleanBits %#RX32\n", pVmcb->ctrl.u32VmcbCleanBits)); -
TabularUnified trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp ¶
r72967 r72983 332 332 * @returns Strict VBox status code (i.e. informational status codes too). 333 333 * @param pVCpu The cross context virtual CPU structure. 334 * @param pMixedCtx Pointer to the guest-CPU context. The data may be335 * out-of-sync. Make sure to update the required336 * fields before using them.337 334 * @param pVmxTransient Pointer to the VMX-transient structure. 338 335 */ 339 336 #ifndef HMVMX_USE_FUNCTION_TABLE 340 typedef VBOXSTRICTRC FNVMXEXITHANDLER(PVMCPU pVCpu, P CPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);337 typedef VBOXSTRICTRC FNVMXEXITHANDLER(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient); 341 338 #else 342 typedef DECLCALLBACK(VBOXSTRICTRC) FNVMXEXITHANDLER(PVMCPU pVCpu, P CPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);339 typedef DECLCALLBACK(VBOXSTRICTRC) FNVMXEXITHANDLER(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient); 343 340 /** Pointer to VM-exit handler. */ 344 341 typedef FNVMXEXITHANDLER *PFNVMXEXITHANDLER; … … 352 349 * @returns VBox status code, no informational status code returned. 353 350 * @param pVCpu The cross context virtual CPU structure. 354 * @param pMixedCtx Pointer to the guest-CPU context. The data may be355 * out-of-sync. Make sure to update the required356 * fields before using them.357 351 * @param pVmxTransient Pointer to the VMX-transient structure. 358 352 * … … 362 356 */ 363 357 #ifndef HMVMX_USE_FUNCTION_TABLE 364 typedef int FNVMXEXITHANDLERNSRC(PVMCPU pVCpu, P CPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);358 typedef int FNVMXEXITHANDLERNSRC(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient); 365 359 #else 366 360 typedef FNVMXEXITHANDLER FNVMXEXITHANDLERNSRC; … … 381 375 #endif 382 376 #ifndef HMVMX_USE_FUNCTION_TABLE 383 DECLINLINE(VBOXSTRICTRC) hmR0VmxHandleExit(PVMCPU pVCpu, P CPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, uint32_t rcReason);377 DECLINLINE(VBOXSTRICTRC) hmR0VmxHandleExit(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t rcReason); 384 378 # define HMVMX_EXIT_DECL DECLINLINE(VBOXSTRICTRC) 385 379 # define HMVMX_EXIT_NSRC_DECL DECLINLINE(int) … … 440 434 /** @} */ 441 435 442 static int hmR0VmxExitXcptPF(PVMCPU pVCpu, P CPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);443 static int hmR0VmxExitXcptMF(PVMCPU pVCpu, P CPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);444 static int hmR0VmxExitXcptDB(PVMCPU pVCpu, P CPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);445 static int hmR0VmxExitXcptBP(PVMCPU pVCpu, P CPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);446 static int hmR0VmxExitXcptGP(PVMCPU pVCpu, P CPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);447 static int hmR0VmxExitXcptAC(PVMCPU pVCpu, P CPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);448 static int hmR0VmxExitXcptGeneric(PVMCPU pVCpu, P CPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);449 static uint32_t hmR0VmxCheckGuestState(PVMCPU pVCpu , PCPUMCTX pCtx);436 static int hmR0VmxExitXcptPF(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient); 437 static int hmR0VmxExitXcptMF(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient); 438 static int hmR0VmxExitXcptDB(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient); 439 static int hmR0VmxExitXcptBP(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient); 440 static int hmR0VmxExitXcptGP(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient); 441 static int hmR0VmxExitXcptAC(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient); 442 static int hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient); 443 static uint32_t hmR0VmxCheckGuestState(PVMCPU pVCpu); 450 444 451 445 … … 1535 1529 * 1536 1530 * @param pVCpu The cross context virtual CPU structure. 1537 * @param pMixedCtx Pointer to the guest-CPU context. The data may be1538 * out-of-sync. Make sure to update the required fields1539 * before using them.1540 1531 * 1541 1532 * @remarks No-long-jump zone!!! 1542 1533 */ 1543 static void hmR0VmxLazyLoadGuestMsrs(PVMCPU pVCpu , PCPUMCTX pMixedCtx)1534 static void hmR0VmxLazyLoadGuestMsrs(PVMCPU pVCpu) 1544 1535 { 1545 1536 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); … … 1560 1551 * CPU, see @bugref{8728}. 1561 1552 */ 1553 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 1562 1554 if ( !(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST) 1563 && p MixedCtx->msrKERNELGSBASE == pVCpu->hm.s.vmx.u64HostKernelGSBaseMsr1564 && p MixedCtx->msrLSTAR == pVCpu->hm.s.vmx.u64HostLStarMsr1565 && p MixedCtx->msrSTAR == pVCpu->hm.s.vmx.u64HostStarMsr1566 && p MixedCtx->msrSFMASK == pVCpu->hm.s.vmx.u64HostSFMaskMsr)1555 && pCtx->msrKERNELGSBASE == pVCpu->hm.s.vmx.u64HostKernelGSBaseMsr 1556 && pCtx->msrLSTAR == pVCpu->hm.s.vmx.u64HostLStarMsr 1557 && pCtx->msrSTAR == pVCpu->hm.s.vmx.u64HostStarMsr 1558 && pCtx->msrSFMASK == pVCpu->hm.s.vmx.u64HostSFMaskMsr) 1567 1559 { 1568 1560 #ifdef VBOX_STRICT 1569 Assert(ASMRdMsr(MSR_K8_KERNEL_GS_BASE) == p MixedCtx->msrKERNELGSBASE);1570 Assert(ASMRdMsr(MSR_K8_LSTAR) == p MixedCtx->msrLSTAR);1571 Assert(ASMRdMsr(MSR_K6_STAR) == p MixedCtx->msrSTAR);1572 Assert(ASMRdMsr(MSR_K8_SF_MASK) == p MixedCtx->msrSFMASK);1561 Assert(ASMRdMsr(MSR_K8_KERNEL_GS_BASE) == pCtx->msrKERNELGSBASE); 1562 Assert(ASMRdMsr(MSR_K8_LSTAR) == pCtx->msrLSTAR); 1563 Assert(ASMRdMsr(MSR_K6_STAR) == pCtx->msrSTAR); 1564 Assert(ASMRdMsr(MSR_K8_SF_MASK) == pCtx->msrSFMASK); 1573 1565 #endif 1574 1566 } 1575 1567 else 1576 1568 { 1577 ASMWrMsr(MSR_K8_KERNEL_GS_BASE, pMixedCtx->msrKERNELGSBASE); 1578 ASMWrMsr(MSR_K8_LSTAR, pMixedCtx->msrLSTAR); 1579 ASMWrMsr(MSR_K6_STAR, pMixedCtx->msrSTAR); 1580 ASMWrMsr(MSR_K8_SF_MASK, pMixedCtx->msrSFMASK); 1581 } 1582 } 1583 #else 1584 RT_NOREF(pMixedCtx); 1569 ASMWrMsr(MSR_K8_KERNEL_GS_BASE, pCtx->msrKERNELGSBASE); 1570 ASMWrMsr(MSR_K8_LSTAR, pCtx->msrLSTAR); 1571 ASMWrMsr(MSR_K6_STAR, pCtx->msrSTAR); 1572 ASMWrMsr(MSR_K8_SF_MASK, pCtx->msrSFMASK); 1573 } 1574 } 1585 1575 #endif 1586 1576 pVCpu->hm.s.vmx.fLazyMsrs |= VMX_LAZY_MSRS_LOADED_GUEST; … … 3175 3165 * @returns true if we need to load guest EFER, false otherwise. 3176 3166 * @param pVCpu The cross context virtual CPU structure. 3177 * @param pMixedCtx Pointer to the guest-CPU context. The data may be3178 * out-of-sync. Make sure to update the required fields3179 * before using them.3180 3167 * 3181 3168 * @remarks Requires EFER, CR4. 3182 3169 * @remarks No-long-jump zone!!! 3183 3170 */ 3184 static bool hmR0VmxShouldSwapEferMsr(PVMCPU pVCpu , PCCPUMCTX pMixedCtx)3171 static bool hmR0VmxShouldSwapEferMsr(PVMCPU pVCpu) 3185 3172 { 3186 3173 #ifdef HMVMX_ALWAYS_SWAP_EFER … … 3188 3175 #endif 3189 3176 3177 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 3190 3178 #if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) 3191 3179 /* For 32-bit hosts running 64-bit guests, we always swap EFER in the world-switcher. Nothing to do here. */ 3192 if (CPUMIsGuestInLongModeEx(p MixedCtx))3180 if (CPUMIsGuestInLongModeEx(pCtx)) 3193 3181 return false; 3194 3182 #endif … … 3196 3184 PVM pVM = pVCpu->CTX_SUFF(pVM); 3197 3185 uint64_t const u64HostEfer = pVM->hm.s.vmx.u64HostEfer; 3198 uint64_t const u64GuestEfer = p MixedCtx->msrEFER;3186 uint64_t const u64GuestEfer = pCtx->msrEFER; 3199 3187 3200 3188 /* … … 3202 3190 * guest's SYSCALL behaviour isn't broken, see @bugref{7386}. 3203 3191 */ 3204 if ( CPUMIsGuestInLongModeEx(p MixedCtx)3192 if ( CPUMIsGuestInLongModeEx(pCtx) 3205 3193 && (u64GuestEfer & MSR_K6_EFER_SCE) != (u64HostEfer & MSR_K6_EFER_SCE)) 3206 3194 { … … 3213 3201 * See Intel spec. 4.5 "IA-32e Paging" and Intel spec. 4.1.1 "Three Paging Modes". 3214 3202 */ 3215 if ( (p MixedCtx->cr4 & X86_CR4_PAE)3216 && (p MixedCtx->cr0 & X86_CR0_PG)3203 if ( (pCtx->cr4 & X86_CR4_PAE) 3204 && (pCtx->cr0 & X86_CR0_PG) 3217 3205 && (u64GuestEfer & MSR_K6_EFER_NXE) != (u64HostEfer & MSR_K6_EFER_NXE)) 3218 3206 { … … 3234 3222 * @returns VBox status code. 3235 3223 * @param pVCpu The cross context virtual CPU structure. 3236 * @param pMixedCtx Pointer to the guest-CPU context. The data may be3237 * out-of-sync. Make sure to update the required fields3238 * before using them.3239 3224 * 3240 3225 * @remarks Requires EFER. 3241 3226 * @remarks No-long-jump zone!!! 3242 3227 */ 3243 static int hmR0VmxExportGuestEntryCtls(PVMCPU pVCpu , PCCPUMCTX pMixedCtx)3228 static int hmR0VmxExportGuestEntryCtls(PVMCPU pVCpu) 3244 3229 { 3245 3230 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_ENTRY_CTLS) … … 3253 3238 3254 3239 /* Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry. */ 3255 if (CPUMIsGuestInLongModeEx( pMixedCtx))3240 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx)) 3256 3241 { 3257 3242 fVal |= VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST; … … 3263 3248 /* If the CPU supports the newer VMCS controls for managing guest/host EFER, use it. */ 3264 3249 if ( pVM->hm.s.vmx.fSupportsVmcsEfer 3265 && hmR0VmxShouldSwapEferMsr(pVCpu , pMixedCtx))3250 && hmR0VmxShouldSwapEferMsr(pVCpu)) 3266 3251 { 3267 3252 fVal |= VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR; … … 3305 3290 * @returns VBox status code. 3306 3291 * @param pVCpu The cross context virtual CPU structure. 3307 * @param pMixedCtx Pointer to the guest-CPU context. The data may be3308 * out-of-sync. Make sure to update the required fields3309 * before using them.3310 3292 * 3311 3293 * @remarks Requires EFER. 3312 3294 */ 3313 static int hmR0VmxExportGuestExitCtls(PVMCPU pVCpu , PCCPUMCTX pMixedCtx)3295 static int hmR0VmxExportGuestExitCtls(PVMCPU pVCpu) 3314 3296 { 3315 3297 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_EXIT_CTLS) … … 3346 3328 /* If the newer VMCS fields for managing EFER exists, use it. */ 3347 3329 if ( pVM->hm.s.vmx.fSupportsVmcsEfer 3348 && hmR0VmxShouldSwapEferMsr(pVCpu , pMixedCtx))3330 && hmR0VmxShouldSwapEferMsr(pVCpu)) 3349 3331 { 3350 3332 fVal |= VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR … … 3465 3447 * @returns Guest's interruptibility-state. 3466 3448 * @param pVCpu The cross context virtual CPU structure. 3467 * @param pMixedCtx Pointer to the guest-CPU context. The data may be3468 * out-of-sync. Make sure to update the required fields3469 * before using them.3470 3449 * 3471 3450 * @remarks No-long-jump zone!!! 3472 3451 */ 3473 static uint32_t hmR0VmxGetGuestIntrState(PVMCPU pVCpu , PCPUMCTX pMixedCtx)3452 static uint32_t hmR0VmxGetGuestIntrState(PVMCPU pVCpu) 3474 3453 { 3475 3454 /* … … 3481 3460 /* If inhibition is active, RIP & RFLAGS should've been accessed 3482 3461 (i.e. read previously from the VMCS or from ring-3). */ 3462 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 3483 3463 #ifdef VBOX_STRICT 3484 uint64_t const fExtrn = ASMAtomicUoReadU64(&p MixedCtx->fExtrn);3464 uint64_t const fExtrn = ASMAtomicUoReadU64(&pCtx->fExtrn); 3485 3465 AssertMsg(!(fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)), ("%#x\n", fExtrn)); 3486 3466 #endif 3487 if (p MixedCtx->rip == EMGetInhibitInterruptsPC(pVCpu))3488 { 3489 if (p MixedCtx->eflags.Bits.u1IF)3467 if (pCtx->rip == EMGetInhibitInterruptsPC(pVCpu)) 3468 { 3469 if (pCtx->eflags.Bits.u1IF) 3490 3470 fIntrState = VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI; 3491 3471 else … … 3581 3561 * @returns VBox status code. 3582 3562 * @param pVCpu The cross context virtual CPU structure. 3583 * @param pMixedCtx Pointer to the guest-CPU context. The data may be3584 * out-of-sync. Make sure to update the required fields3585 * before using them.3586 3563 * 3587 3564 * @remarks No-long-jump zone!!! 3588 3565 */ 3589 static int hmR0VmxExportGuestRip(PVMCPU pVCpu , PCCPUMCTX pMixedCtx)3566 static int hmR0VmxExportGuestRip(PVMCPU pVCpu) 3590 3567 { 3591 3568 int rc = VINF_SUCCESS; … … 3594 3571 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP); 3595 3572 3596 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RIP, p MixedCtx->rip);3573 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RIP, pVCpu->cpum.GstCtx.rip); 3597 3574 AssertRCReturn(rc, rc); 3598 3575 3599 3576 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_RIP); 3600 Log4Func(("RIP=%#RX64\n", p MixedCtx->rip));3577 Log4Func(("RIP=%#RX64\n", pVCpu->cpum.GstCtx.rip)); 3601 3578 } 3602 3579 return rc; … … 3609 3586 * @returns VBox status code. 3610 3587 * @param pVCpu The cross context virtual CPU structure. 3611 * @param pMixedCtx Pointer to the guest-CPU context. The data may be3612 * out-of-sync. Make sure to update the required fields3613 * before using them.3614 3588 * 3615 3589 * @remarks No-long-jump zone!!! 3616 3590 */ 3617 static int hmR0VmxExportGuestRsp(PVMCPU pVCpu , PCCPUMCTX pMixedCtx)3591 static int hmR0VmxExportGuestRsp(PVMCPU pVCpu) 3618 3592 { 3619 3593 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_RSP) … … 3621 3595 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RSP); 3622 3596 3623 int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RSP, p MixedCtx->rsp);3597 int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RSP, pVCpu->cpum.GstCtx.rsp); 3624 3598 AssertRCReturn(rc, rc); 3625 3599 … … 3635 3609 * @returns VBox status code. 3636 3610 * @param pVCpu The cross context virtual CPU structure. 3637 * @param pMixedCtx Pointer to the guest-CPU context. The data may be3638 * out-of-sync. Make sure to update the required fields3639 * before using them.3640 3611 * 3641 3612 * @remarks No-long-jump zone!!! 3642 3613 */ 3643 static int hmR0VmxExportGuestRflags(PVMCPU pVCpu , PCCPUMCTX pMixedCtx)3614 static int hmR0VmxExportGuestRflags(PVMCPU pVCpu) 3644 3615 { 3645 3616 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_RFLAGS) … … 3649 3620 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits of RFLAGS are reserved (MBZ). 3650 3621 Let us assert it as such and use 32-bit VMWRITE. */ 3651 Assert(!RT_HI_U32(p MixedCtx->rflags.u64));3652 X86EFLAGS fEFlags = p MixedCtx->eflags;3622 Assert(!RT_HI_U32(pVCpu->cpum.GstCtx.rflags.u64)); 3623 X86EFLAGS fEFlags = pVCpu->cpum.GstCtx.eflags; 3653 3624 Assert(fEFlags.u32 & X86_EFL_RA1_MASK); 3654 3625 Assert(!(fEFlags.u32 & ~(X86_EFL_1 | X86_EFL_LIVE_MASK))); … … 3686 3657 * @returns VBox status code. 3687 3658 * @param pVCpu The cross context virtual CPU structure. 3688 * @param pMixedCtx Pointer to the guest-CPU context. The data may be3689 * out-of-sync. Make sure to update the required fields3690 * before using them.3691 3659 * 3692 3660 * @remarks No-long-jump zone!!! 3693 3661 */ 3694 static int hmR0VmxExportGuestCR0(PVMCPU pVCpu , PCCPUMCTX pMixedCtx)3662 static int hmR0VmxExportGuestCR0(PVMCPU pVCpu) 3695 3663 { 3696 3664 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_CR0) … … 3698 3666 PVM pVM = pVCpu->CTX_SUFF(pVM); 3699 3667 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0); 3700 Assert(!RT_HI_U32(p MixedCtx->cr0));3701 3702 uint32_t const u32ShadowCr0 = p MixedCtx->cr0;3703 uint32_t u32GuestCr0 = p MixedCtx->cr0;3668 Assert(!RT_HI_U32(pVCpu->cpum.GstCtx.cr0)); 3669 3670 uint32_t const u32ShadowCr0 = pVCpu->cpum.GstCtx.cr0; 3671 uint32_t u32GuestCr0 = pVCpu->cpum.GstCtx.cr0; 3704 3672 3705 3673 /* … … 3856 3824 * 3857 3825 * @param pVCpu The cross context virtual CPU structure. 3858 * @param pMixedCtx Pointer to the guest-CPU context. The data may be3859 * out-of-sync. Make sure to update the required fields3860 * before using them.3861 3826 * 3862 3827 * @remarks No-long-jump zone!!! 3863 3828 */ 3864 static VBOXSTRICTRC hmR0VmxExportGuestCR3AndCR4(PVMCPU pVCpu , PCCPUMCTX pMixedCtx)3829 static VBOXSTRICTRC hmR0VmxExportGuestCR3AndCR4(PVMCPU pVCpu) 3865 3830 { 3866 3831 int rc = VINF_SUCCESS; … … 3904 3869 AssertRCReturn(rc, rc); 3905 3870 3871 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 3906 3872 if ( pVM->hm.s.vmx.fUnrestrictedGuest 3907 || CPUMIsGuestPagingEnabledEx(p MixedCtx))3873 || CPUMIsGuestPagingEnabledEx(pCtx)) 3908 3874 { 3909 3875 /* If the guest is in PAE mode, pass the PDPEs to VT-x using the VMCS fields. */ 3910 if (CPUMIsGuestInPAEModeEx(p MixedCtx))3876 if (CPUMIsGuestInPAEModeEx(pCtx)) 3911 3877 { 3912 3878 rc = PGMGstGetPaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]); … … 3924 3890 * the guest when it's not using paging. 3925 3891 */ 3926 GCPhysGuestCR3 = p MixedCtx->cr3;3892 GCPhysGuestCR3 = pCtx->cr3; 3927 3893 } 3928 3894 else … … 3975 3941 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_CR4) 3976 3942 { 3943 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 3977 3944 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4); 3978 Assert(!RT_HI_U32(p MixedCtx->cr4));3979 3980 uint32_t u32GuestCr4 = p MixedCtx->cr4;3981 uint32_t const u32ShadowCr4 = p MixedCtx->cr4;3945 Assert(!RT_HI_U32(pCtx->cr4)); 3946 3947 uint32_t u32GuestCr4 = pCtx->cr4; 3948 uint32_t const u32ShadowCr4 = pCtx->cr4; 3982 3949 3983 3950 /* … … 3999 3966 if (pVM->hm.s.fNestedPaging) 4000 3967 { 4001 if ( !CPUMIsGuestPagingEnabledEx(p MixedCtx)3968 if ( !CPUMIsGuestPagingEnabledEx(pCtx) 4002 3969 && !pVM->hm.s.vmx.fUnrestrictedGuest) 4003 3970 { … … 4071 4038 4072 4039 /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */ 4073 pVCpu->hm.s.fLoadSaveGuestXcr0 = (p MixedCtx->cr4 & X86_CR4_OSXSAVE) && pMixedCtx->aXcr[0] != ASMGetXcr0();4040 pVCpu->hm.s.fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0(); 4074 4041 4075 4042 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_CR4); … … 4090 4057 * @returns VBox status code. 4091 4058 * @param pVCpu The cross context virtual CPU structure. 4092 * @param pMixedCtx Pointer to the guest-CPU context. The data may be4093 * out-of-sync. Make sure to update the required fields4094 * before using them.4095 4059 * 4096 4060 * @remarks No-long-jump zone!!! 4097 4061 */ 4098 static int hmR0VmxExportSharedDebugState(PVMCPU pVCpu , PCPUMCTX pMixedCtx)4062 static int hmR0VmxExportSharedDebugState(PVMCPU pVCpu) 4099 4063 { 4100 4064 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); … … 4105 4069 { 4106 4070 /* Validate. Intel spec. 17.2 "Debug Registers", recompiler paranoia checks. */ 4107 Assert((p MixedCtx->dr[7] & (X86_DR7_MBZ_MASK | X86_DR7_RAZ_MASK)) == 0); /* Bits 63:32, 15, 14, 12, 11 are reserved. */4108 Assert((p MixedCtx->dr[7] & X86_DR7_RA1_MASK) == X86_DR7_RA1_MASK); /* Bit 10 is reserved (RA1). */4071 Assert((pVCpu->cpum.GstCtx.dr[7] & (X86_DR7_MBZ_MASK | X86_DR7_RAZ_MASK)) == 0); 4072 Assert((pVCpu->cpum.GstCtx.dr[7] & X86_DR7_RA1_MASK) == X86_DR7_RA1_MASK); 4109 4073 } 4110 4074 #endif … … 4124 4088 else 4125 4089 { 4126 p MixedCtx->eflags.u32 |= X86_EFL_TF;4090 pVCpu->cpum.GstCtx.eflags.u32 |= X86_EFL_TF; 4127 4091 pVCpu->hm.s.fCtxChanged |= HM_CHANGED_GUEST_RFLAGS; 4128 4092 pVCpu->hm.s.fClearTrapFlag = true; … … 4143 4107 */ 4144 4108 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) 4145 if ( CPUMIsGuestInLongModeEx( pMixedCtx)4109 if ( CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx) 4146 4110 && !CPUMIsHyperDebugStateActivePending(pVCpu)) 4147 4111 { … … 4170 4134 * executing guest code so they'll trigger at the right time. 4171 4135 */ 4172 if (p MixedCtx->dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD))4136 if (pVCpu->cpum.GstCtx.dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD)) 4173 4137 { 4174 4138 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) 4175 if ( CPUMIsGuestInLongModeEx( pMixedCtx)4139 if ( CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx) 4176 4140 && !CPUMIsGuestDebugStateActivePending(pVCpu)) 4177 4141 { … … 4209 4173 4210 4174 /* Update DR7 with the actual guest value. */ 4211 u32GuestDr7 = p MixedCtx->dr[7];4175 u32GuestDr7 = pVCpu->cpum.GstCtx.dr[7]; 4212 4176 pVCpu->hm.s.fUsingHyperDR7 = false; 4213 4177 } … … 4249 4213 * segments. 4250 4214 */ 4251 static void hmR0VmxValidateSegmentRegs(PVMCPU pVCpu , PCCPUMCTX pCtx)4215 static void hmR0VmxValidateSegmentRegs(PVMCPU pVCpu) 4252 4216 { 4253 4217 /* … … 4258 4222 * and doesn't change the guest-context value. 4259 4223 */ 4260 PVM pVM = pVCpu->CTX_SUFF(pVM); 4224 PVM pVM = pVCpu->CTX_SUFF(pVM); 4225 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 4261 4226 hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR0); 4262 4227 if ( !pVM->hm.s.vmx.fUnrestrictedGuest … … 4477 4442 * @returns VBox status code. 4478 4443 * @param pVCpu The cross context virtual CPU structure. 4479 * @param pMixedCtx Pointer to the guest-CPU context. The data may be4480 * out-of-sync. Make sure to update the required fields4481 * before using them.4482 4444 * 4483 4445 * @remarks Will import guest CR0 on strict builds during validation of … … 4485 4447 * @remarks No-long-jump zone!!! 4486 4448 */ 4487 static int hmR0VmxExportGuestSegmentRegs(PVMCPU pVCpu, PCCPUMCTX pMixedCtx) 4488 { 4489 int rc = VERR_INTERNAL_ERROR_5; 4490 PVM pVM = pVCpu->CTX_SUFF(pVM); 4449 static int hmR0VmxExportGuestSegmentRegs(PVMCPU pVCpu) 4450 { 4451 int rc = VERR_INTERNAL_ERROR_5; 4452 PVM pVM = pVCpu->CTX_SUFF(pVM); 4453 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 4491 4454 4492 4455 /* … … 4515 4478 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS); 4516 4479 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 4517 pVCpu->hm.s.vmx.RealMode.AttrCS.u = p MixedCtx->cs.Attr.u;4518 rc = HMVMX_EXPORT_SREG(CS, &p MixedCtx->cs);4480 pVCpu->hm.s.vmx.RealMode.AttrCS.u = pCtx->cs.Attr.u; 4481 rc = HMVMX_EXPORT_SREG(CS, &pCtx->cs); 4519 4482 AssertRCReturn(rc, rc); 4520 4483 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_CS); … … 4525 4488 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SS); 4526 4489 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 4527 pVCpu->hm.s.vmx.RealMode.AttrSS.u = p MixedCtx->ss.Attr.u;4528 rc = HMVMX_EXPORT_SREG(SS, &p MixedCtx->ss);4490 pVCpu->hm.s.vmx.RealMode.AttrSS.u = pCtx->ss.Attr.u; 4491 rc = HMVMX_EXPORT_SREG(SS, &pCtx->ss); 4529 4492 AssertRCReturn(rc, rc); 4530 4493 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_SS); … … 4535 4498 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DS); 4536 4499 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 4537 pVCpu->hm.s.vmx.RealMode.AttrDS.u = p MixedCtx->ds.Attr.u;4538 rc = HMVMX_EXPORT_SREG(DS, &p MixedCtx->ds);4500 pVCpu->hm.s.vmx.RealMode.AttrDS.u = pCtx->ds.Attr.u; 4501 rc = HMVMX_EXPORT_SREG(DS, &pCtx->ds); 4539 4502 AssertRCReturn(rc, rc); 4540 4503 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_DS); … … 4545 4508 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_ES); 4546 4509 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 4547 pVCpu->hm.s.vmx.RealMode.AttrES.u = p MixedCtx->es.Attr.u;4548 rc = HMVMX_EXPORT_SREG(ES, &p MixedCtx->es);4510 pVCpu->hm.s.vmx.RealMode.AttrES.u = pCtx->es.Attr.u; 4511 rc = HMVMX_EXPORT_SREG(ES, &pCtx->es); 4549 4512 AssertRCReturn(rc, rc); 4550 4513 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_ES); … … 4555 4518 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_FS); 4556 4519 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 4557 pVCpu->hm.s.vmx.RealMode.AttrFS.u = p MixedCtx->fs.Attr.u;4558 rc = HMVMX_EXPORT_SREG(FS, &p MixedCtx->fs);4520 pVCpu->hm.s.vmx.RealMode.AttrFS.u = pCtx->fs.Attr.u; 4521 rc = HMVMX_EXPORT_SREG(FS, &pCtx->fs); 4559 4522 AssertRCReturn(rc, rc); 4560 4523 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_FS); … … 4565 4528 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GS); 4566 4529 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 4567 pVCpu->hm.s.vmx.RealMode.AttrGS.u = p MixedCtx->gs.Attr.u;4568 rc = HMVMX_EXPORT_SREG(GS, &p MixedCtx->gs);4530 pVCpu->hm.s.vmx.RealMode.AttrGS.u = pCtx->gs.Attr.u; 4531 rc = HMVMX_EXPORT_SREG(GS, &pCtx->gs); 4569 4532 AssertRCReturn(rc, rc); 4570 4533 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_GS); … … 4572 4535 4573 4536 #ifdef VBOX_STRICT 4574 hmR0VmxValidateSegmentRegs(pVCpu , pMixedCtx);4537 hmR0VmxValidateSegmentRegs(pVCpu); 4575 4538 #endif 4576 4539 4577 4540 /* Update the exit history entry with the correct CS.BASE + RIP. */ 4578 4541 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_RIP) 4579 EMR0HistoryUpdatePC(pVCpu, p MixedCtx->cs.u64Base + pMixedCtx->rip, true);4580 4581 Log4Func(("CS=%#RX16 Base=%#RX64 Limit=%#RX32 Attr=%#RX32\n", p MixedCtx->cs.Sel, pMixedCtx->cs.u64Base,4582 p MixedCtx->cs.u32Limit, pMixedCtx->cs.Attr.u));4542 EMR0HistoryUpdatePC(pVCpu, pCtx->cs.u64Base + pCtx->rip, true); 4543 4544 Log4Func(("CS=%#RX16 Base=%#RX64 Limit=%#RX32 Attr=%#RX32\n", pCtx->cs.Sel, pCtx->cs.u64Base, 4545 pCtx->cs.u32Limit, pCtx->cs.Attr.u)); 4583 4546 } 4584 4547 … … 4602 4565 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 4603 4566 { 4604 u16Sel = p MixedCtx->tr.Sel;4605 u32Limit = p MixedCtx->tr.u32Limit;4606 u64Base = p MixedCtx->tr.u64Base;4607 u32AccessRights = p MixedCtx->tr.Attr.u;4567 u16Sel = pCtx->tr.Sel; 4568 u32Limit = pCtx->tr.u32Limit; 4569 u64Base = pCtx->tr.u64Base; 4570 u32AccessRights = pCtx->tr.Attr.u; 4608 4571 } 4609 4572 else … … 4639 4602 Assert( (u32Limit & 0xfff) == 0xfff 4640 4603 || !(u32AccessRights & RT_BIT(15))); /* Granularity MBZ. */ 4641 Assert( !(p MixedCtx->tr.u32Limit & 0xfff00000)4604 Assert( !(pCtx->tr.u32Limit & 0xfff00000) 4642 4605 || (u32AccessRights & RT_BIT(15))); /* Granularity MB1. */ 4643 4606 … … 4649 4612 4650 4613 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_TR); 4651 Log4Func(("TR base=%#RX64\n", p MixedCtx->tr.u64Base));4614 Log4Func(("TR base=%#RX64\n", pCtx->tr.u64Base)); 4652 4615 } 4653 4616 … … 4659 4622 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GDTR); 4660 4623 4661 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, p MixedCtx->gdtr.cbGdt);4662 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, p MixedCtx->gdtr.pGdt);4624 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, pCtx->gdtr.cbGdt); 4625 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, pCtx->gdtr.pGdt); 4663 4626 AssertRCReturn(rc, rc); 4664 4627 4665 4628 /* Validate. */ 4666 Assert(!(p MixedCtx->gdtr.cbGdt & 0xffff0000)); /* Bits 31:16 MBZ. */4629 Assert(!(pCtx->gdtr.cbGdt & 0xffff0000)); /* Bits 31:16 MBZ. */ 4667 4630 4668 4631 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_GDTR); 4669 Log4Func(("GDTR base=%#RX64\n", p MixedCtx->gdtr.pGdt));4632 Log4Func(("GDTR base=%#RX64\n", pCtx->gdtr.pGdt)); 4670 4633 } 4671 4634 … … 4679 4642 /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */ 4680 4643 uint32_t u32Access = 0; 4681 if (!p MixedCtx->ldtr.Attr.u)4644 if (!pCtx->ldtr.Attr.u) 4682 4645 u32Access = X86DESCATTR_UNUSABLE; 4683 4646 else 4684 u32Access = p MixedCtx->ldtr.Attr.u;4685 4686 rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_LDTR_SEL, p MixedCtx->ldtr.Sel);4687 rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_LIMIT, p MixedCtx->ldtr.u32Limit);4647 u32Access = pCtx->ldtr.Attr.u; 4648 4649 rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_LDTR_SEL, pCtx->ldtr.Sel); 4650 rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_LIMIT, pCtx->ldtr.u32Limit); 4688 4651 rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access); 4689 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_LDTR_BASE, p MixedCtx->ldtr.u64Base);4652 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_LDTR_BASE, pCtx->ldtr.u64Base); 4690 4653 AssertRCReturn(rc, rc); 4691 4654 … … 4693 4656 if (!(u32Access & X86DESCATTR_UNUSABLE)) 4694 4657 { 4695 Assert(!(p MixedCtx->ldtr.Sel & RT_BIT(2))); /* TI MBZ. */4696 Assert(p MixedCtx->ldtr.Attr.n.u4Type == 2); /* Type MB2 (LDT). */4697 Assert(!p MixedCtx->ldtr.Attr.n.u1DescType); /* System MBZ. */4698 Assert(p MixedCtx->ldtr.Attr.n.u1Present == 1); /* Present MB1. */4699 Assert(!p MixedCtx->ldtr.Attr.n.u4LimitHigh); /* 11:8 MBZ. */4700 Assert(!(p MixedCtx->ldtr.Attr.u & 0xfffe0000)); /* 31:17 MBZ. */4701 Assert( (p MixedCtx->ldtr.u32Limit & 0xfff) == 0xfff4702 || !p MixedCtx->ldtr.Attr.n.u1Granularity); /* Granularity MBZ. */4703 Assert( !(p MixedCtx->ldtr.u32Limit & 0xfff00000)4704 || p MixedCtx->ldtr.Attr.n.u1Granularity); /* Granularity MB1. */4658 Assert(!(pCtx->ldtr.Sel & RT_BIT(2))); /* TI MBZ. */ 4659 Assert(pCtx->ldtr.Attr.n.u4Type == 2); /* Type MB2 (LDT). */ 4660 Assert(!pCtx->ldtr.Attr.n.u1DescType); /* System MBZ. */ 4661 Assert(pCtx->ldtr.Attr.n.u1Present == 1); /* Present MB1. */ 4662 Assert(!pCtx->ldtr.Attr.n.u4LimitHigh); /* 11:8 MBZ. */ 4663 Assert(!(pCtx->ldtr.Attr.u & 0xfffe0000)); /* 31:17 MBZ. */ 4664 Assert( (pCtx->ldtr.u32Limit & 0xfff) == 0xfff 4665 || !pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MBZ. */ 4666 Assert( !(pCtx->ldtr.u32Limit & 0xfff00000) 4667 || pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MB1. */ 4705 4668 } 4706 4669 4707 4670 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_LDTR); 4708 Log4Func(("LDTR base=%#RX64\n", p MixedCtx->ldtr.u64Base));4671 Log4Func(("LDTR base=%#RX64\n", pCtx->ldtr.u64Base)); 4709 4672 } 4710 4673 … … 4716 4679 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_IDTR); 4717 4680 4718 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, p MixedCtx->idtr.cbIdt);4719 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, p MixedCtx->idtr.pIdt);4681 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, pCtx->idtr.cbIdt); 4682 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, pCtx->idtr.pIdt); 4720 4683 AssertRCReturn(rc, rc); 4721 4684 4722 4685 /* Validate. */ 4723 Assert(!(p MixedCtx->idtr.cbIdt & 0xffff0000)); /* Bits 31:16 MBZ. */4686 Assert(!(pCtx->idtr.cbIdt & 0xffff0000)); /* Bits 31:16 MBZ. */ 4724 4687 4725 4688 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_IDTR); 4726 Log4Func(("IDTR base=%#RX64\n", p MixedCtx->idtr.pIdt));4689 Log4Func(("IDTR base=%#RX64\n", pCtx->idtr.pIdt)); 4727 4690 } 4728 4691 … … 4744 4707 * @returns VBox status code. 4745 4708 * @param pVCpu The cross context virtual CPU structure. 4746 * @param pMixedCtx Pointer to the guest-CPU context. The data may be4747 * out-of-sync. Make sure to update the required fields4748 * before using them.4749 4709 * 4750 4710 * @remarks No-long-jump zone!!! 4751 4711 */ 4752 static int hmR0VmxExportGuestMsrs(PVMCPU pVCpu , PCCPUMCTX pMixedCtx)4712 static int hmR0VmxExportGuestMsrs(PVMCPU pVCpu) 4753 4713 { 4754 4714 AssertPtr(pVCpu); … … 4759 4719 * For 64-bit hosts, we load/restore them lazily, see hmR0VmxLazyLoadGuestMsrs(). 4760 4720 */ 4761 PVM pVM = pVCpu->CTX_SUFF(pVM); 4721 PVM pVM = pVCpu->CTX_SUFF(pVM); 4722 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 4762 4723 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_GUEST_AUTO_MSRS) 4763 4724 { … … 4767 4728 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SYSCALL_MSRS | CPUMCTX_EXTRN_KERNEL_GS_BASE); 4768 4729 4769 int rc = hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_LSTAR, p MixedCtx->msrLSTAR, false, NULL);4770 rc |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K6_STAR, p MixedCtx->msrSTAR, false, NULL);4771 rc |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_SF_MASK, p MixedCtx->msrSFMASK, false, NULL);4772 rc |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_KERNEL_GS_BASE, p MixedCtx->msrKERNELGSBASE, false, NULL);4730 int rc = hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_LSTAR, pCtx->msrLSTAR, false, NULL); 4731 rc |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K6_STAR, pCtx->msrSTAR, false, NULL); 4732 rc |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_SF_MASK, pCtx->msrSFMASK, false, NULL); 4733 rc |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_KERNEL_GS_BASE, pCtx->msrKERNELGSBASE, false, NULL); 4773 4734 AssertRCReturn(rc, rc); 4774 4735 # ifdef LOG_ENABLED … … 4793 4754 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_CS_MSR) 4794 4755 { 4795 int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, p MixedCtx->SysEnter.cs);4756 int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, pCtx->SysEnter.cs); 4796 4757 AssertRCReturn(rc, rc); 4797 4758 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_SYSENTER_CS_MSR); … … 4800 4761 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_EIP_MSR) 4801 4762 { 4802 int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, p MixedCtx->SysEnter.eip);4763 int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, pCtx->SysEnter.eip); 4803 4764 AssertRCReturn(rc, rc); 4804 4765 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_SYSENTER_EIP_MSR); … … 4807 4768 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_ESP_MSR) 4808 4769 { 4809 int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, p MixedCtx->SysEnter.esp);4770 int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, pCtx->SysEnter.esp); 4810 4771 AssertRCReturn(rc, rc); 4811 4772 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_SYSENTER_ESP_MSR); … … 4817 4778 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_EFER); 4818 4779 4819 if (hmR0VmxShouldSwapEferMsr(pVCpu , pMixedCtx))4780 if (hmR0VmxShouldSwapEferMsr(pVCpu)) 4820 4781 { 4821 4782 /* … … 4825 4786 if (pVM->hm.s.vmx.fSupportsVmcsEfer) 4826 4787 { 4827 int rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_EFER_FULL, p MixedCtx->msrEFER);4788 int rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_EFER_FULL, pCtx->msrEFER); 4828 4789 AssertRCReturn(rc,rc); 4829 Log4Func(("EFER=%#RX64\n", p MixedCtx->msrEFER));4790 Log4Func(("EFER=%#RX64\n", pCtx->msrEFER)); 4830 4791 } 4831 4792 else 4832 4793 { 4833 int rc = hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K6_EFER, p MixedCtx->msrEFER, false /* fUpdateHostMsr */,4794 int rc = hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K6_EFER, pCtx->msrEFER, false /* fUpdateHostMsr */, 4834 4795 NULL /* pfAddedAndUpdated */); 4835 4796 AssertRCReturn(rc, rc); … … 4838 4799 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS) 4839 4800 hmR0VmxSetMsrPermission(pVCpu, MSR_K6_EFER, VMXMSREXIT_INTERCEPT_READ, VMXMSREXIT_INTERCEPT_WRITE); 4840 Log4Func(("MSR[--]: u32Msr=%#RX32 u64Value=%#RX64 cMsrs=%u\n", MSR_K6_EFER, p MixedCtx->msrEFER,4801 Log4Func(("MSR[--]: u32Msr=%#RX32 u64Value=%#RX64 cMsrs=%u\n", MSR_K6_EFER, pCtx->msrEFER, 4841 4802 pVCpu->hm.s.vmx.cMsrs)); 4842 4803 } … … 4860 4821 * 4861 4822 * @returns true if safe, false if must continue to use the 64-bit switcher. 4862 * @param pMixedCtx Pointer to the guest-CPU context. The data may be 4863 * out-of-sync. Make sure to update the required fields 4864 * before using them. 4823 * @param pCtx Pointer to the guest-CPU context. 4865 4824 * 4866 4825 * @remarks No-long-jump zone!!! 4867 4826 */ 4868 static bool hmR0VmxIs32BitSwitcherSafe(PCCPUMCTX p MixedCtx)4869 { 4870 if (p MixedCtx->gdtr.pGdt & UINT64_C(0xffffffff00000000)) return false;4871 if (p MixedCtx->idtr.pIdt & UINT64_C(0xffffffff00000000)) return false;4872 if (p MixedCtx->ldtr.u64Base & UINT64_C(0xffffffff00000000)) return false;4873 if (p MixedCtx->tr.u64Base & UINT64_C(0xffffffff00000000)) return false;4874 if (p MixedCtx->es.u64Base & UINT64_C(0xffffffff00000000)) return false;4875 if (p MixedCtx->cs.u64Base & UINT64_C(0xffffffff00000000)) return false;4876 if (p MixedCtx->ss.u64Base & UINT64_C(0xffffffff00000000)) return false;4877 if (p MixedCtx->ds.u64Base & UINT64_C(0xffffffff00000000)) return false;4878 if (p MixedCtx->fs.u64Base & UINT64_C(0xffffffff00000000)) return false;4879 if (p MixedCtx->gs.u64Base & UINT64_C(0xffffffff00000000)) return false;4827 static bool hmR0VmxIs32BitSwitcherSafe(PCCPUMCTX pCtx) 4828 { 4829 if (pCtx->gdtr.pGdt & UINT64_C(0xffffffff00000000)) return false; 4830 if (pCtx->idtr.pIdt & UINT64_C(0xffffffff00000000)) return false; 4831 if (pCtx->ldtr.u64Base & UINT64_C(0xffffffff00000000)) return false; 4832 if (pCtx->tr.u64Base & UINT64_C(0xffffffff00000000)) return false; 4833 if (pCtx->es.u64Base & UINT64_C(0xffffffff00000000)) return false; 4834 if (pCtx->cs.u64Base & UINT64_C(0xffffffff00000000)) return false; 4835 if (pCtx->ss.u64Base & UINT64_C(0xffffffff00000000)) return false; 4836 if (pCtx->ds.u64Base & UINT64_C(0xffffffff00000000)) return false; 4837 if (pCtx->fs.u64Base & UINT64_C(0xffffffff00000000)) return false; 4838 if (pCtx->gs.u64Base & UINT64_C(0xffffffff00000000)) return false; 4880 4839 4881 4840 /* All good, bases are 32-bit. */ … … 4890 4849 * @returns VBox status code. 4891 4850 * @param pVCpu The cross context virtual CPU structure. 4892 * @param pMixedCtx Pointer to the guest-CPU context. The data may be4893 * out-of-sync. Make sure to update the required fields4894 * before using them.4895 4851 * 4896 4852 * @remarks No-long-jump zone!!! 4897 4853 */ 4898 static int hmR0VmxSelectVMRunHandler(PVMCPU pVCpu, PCCPUMCTX pMixedCtx) 4899 { 4900 if (CPUMIsGuestInLongModeEx(pMixedCtx)) 4854 static int hmR0VmxSelectVMRunHandler(PVMCPU pVCpu) 4855 { 4856 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 4857 if (CPUMIsGuestInLongModeEx(pCtx)) 4901 4858 { 4902 4859 #ifndef VBOX_ENABLE_64_BITS_GUESTS … … 4966 4923 Assert(pVCpu->hm.s.vmx.pfnStartVM == VMXR0SwitcherStartVM64); 4967 4924 if ( pVCpu->hm.s.vmx.RealMode.fRealOnV86Active 4968 || hmR0VmxIs32BitSwitcherSafe(p MixedCtx))4925 || hmR0VmxIs32BitSwitcherSafe(pCtx)) 4969 4926 { 4970 4927 pVCpu->hm.s.vmx.fSwitchedTo64on32 = false; … … 4994 4951 * @returns VBox status code, no informational status codes. 4995 4952 * @param pVCpu The cross context virtual CPU structure. 4996 * @param pCtx Pointer to the guest-CPU context.4997 4953 * 4998 4954 * @remarks No-long-jump zone!!! 4999 4955 */ 5000 DECLINLINE(int) hmR0VmxRunGuest(PVMCPU pVCpu , PCPUMCTX pCtx)4956 DECLINLINE(int) hmR0VmxRunGuest(PVMCPU pVCpu) 5001 4957 { 5002 4958 /* Mark that HM is the keeper of all guest-CPU registers now that we're going to execute guest code. */ 4959 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 5003 4960 pCtx->fExtrn |= HMVMX_CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_KEEPER_HM; 5004 4961 … … 5028 4985 * @param pVCpu The cross context virtual CPU structure. 5029 4986 * @param rcVMRun The return code from VMLAUNCH/VMRESUME. 5030 * @param pCtx Pointer to the guest-CPU context.5031 4987 * @param pVmxTransient Pointer to the VMX transient structure (only 5032 4988 * exitReason updated). 5033 4989 */ 5034 static void hmR0VmxReportWorldSwitchError(PVMCPU pVCpu, int rcVMRun, P CPUMCTX pCtx, PVMXTRANSIENT pVmxTransient)4990 static void hmR0VmxReportWorldSwitchError(PVMCPU pVCpu, int rcVMRun, PVMXTRANSIENT pVmxTransient) 5035 4991 { 5036 4992 Assert(pVCpu); 5037 Assert(pCtx);5038 4993 Assert(pVmxTransient); 5039 4994 HMVMX_ASSERT_PREEMPT_SAFE(); … … 5124 5079 /* Guest bits. */ 5125 5080 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RIP, &u64Val); AssertRC(rc); 5126 Log4(("Old Guest Rip %#RX64 New %#RX64\n", p Ctx->rip, u64Val));5081 Log4(("Old Guest Rip %#RX64 New %#RX64\n", pVCpu->cpum.GstCtx.rip, u64Val)); 5127 5082 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RSP, &u64Val); AssertRC(rc); 5128 Log4(("Old Guest Rsp %#RX64 New %#RX64\n", p Ctx->rsp, u64Val));5083 Log4(("Old Guest Rsp %#RX64 New %#RX64\n", pVCpu->cpum.GstCtx.rsp, u64Val)); 5129 5084 rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &u32Val); AssertRC(rc); 5130 Log4(("Old Guest Rflags %#RX32 New %#RX32\n", p Ctx->eflags.u32, u32Val));5085 Log4(("Old Guest Rflags %#RX32 New %#RX32\n", pVCpu->cpum.GstCtx.eflags.u32, u32Val)); 5131 5086 if (pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fVpid) 5132 5087 { … … 5235 5190 break; 5236 5191 } 5237 NOREF(pCtx);5238 5192 } 5239 5193 … … 5839 5793 * 5840 5794 * @param pVCpu The cross context virtual CPU structure. 5841 * @param pMixedCtx Pointer to the guest-CPU context. The data may be 5842 * out-of-sync. Make sure to update the required fields 5843 * before using them. 5844 */ 5845 DECLINLINE(void) hmR0VmxSetPendingXcptDF(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 5846 { 5847 NOREF(pMixedCtx); 5795 */ 5796 DECLINLINE(void) hmR0VmxSetPendingXcptDF(PVMCPU pVCpu) 5797 { 5848 5798 uint32_t u32IntInfo = X86_XCPT_DF | VMX_EXIT_INTERRUPTION_INFO_VALID; 5849 5799 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT); … … 5865 5815 * 5866 5816 * @param pVCpu The cross context virtual CPU structure. 5867 * @param pMixedCtx Pointer to the guest-CPU context. The data may be5868 * out-of-sync. Make sure to update the required fields5869 * before using them.5870 5817 * @param pVmxTransient Pointer to the VMX transient structure. 5871 5818 * 5872 5819 * @remarks No-long-jump zone!!! 5873 5820 */ 5874 static VBOXSTRICTRC hmR0VmxCheckExitDueToEventDelivery(PVMCPU pVCpu, P CPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)5821 static VBOXSTRICTRC hmR0VmxCheckExitDueToEventDelivery(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 5875 5822 { 5876 5823 uint32_t const uExitVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVmxTransient->uExitIntInfo); … … 5979 5926 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingReflect); 5980 5927 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo), 5981 0 /* cbInstr */, u32ErrCode, p MixedCtx->cr2);5928 0 /* cbInstr */, u32ErrCode, pVCpu->cpum.GstCtx.cr2); 5982 5929 5983 5930 Log4Func(("IDT: Pending vectoring event %#RX64 Err=%#RX32\n", pVCpu->hm.s.Event.u64IntInfo, … … 6001 5948 pVmxTransient->fVectoringDoublePF = true; 6002 5949 Log4Func(("IDT: Vectoring double #PF %#RX64 cr2=%#RX64\n", pVCpu->hm.s.Event.u64IntInfo, 6003 p MixedCtx->cr2));5950 pVCpu->cpum.GstCtx.cr2)); 6004 5951 rcStrict = VINF_SUCCESS; 6005 5952 } … … 6007 5954 { 6008 5955 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingReflect); 6009 hmR0VmxSetPendingXcptDF(pVCpu , pMixedCtx);5956 hmR0VmxSetPendingXcptDF(pVCpu); 6010 5957 Log4Func(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", pVCpu->hm.s.Event.u64IntInfo, 6011 5958 uIdtVector, uExitVector)); … … 6625 6572 * 6626 6573 * @param pVCpu The cross context virtual CPU structure. 6627 * @param pMixedCtx Pointer to the guest-CPU context. The data may be6628 * out-of-sync. Make sure to update the required fields6629 * before using them.6630 6574 * @param fStepping Running in hmR0VmxRunGuestCodeStep(). 6631 6575 */ 6632 static VBOXSTRICTRC hmR0VmxCheckForceFlags(PVMCPU pVCpu, PCPUMCTX pMixedCtx,bool fStepping)6576 static VBOXSTRICTRC hmR0VmxCheckForceFlags(PVMCPU pVCpu, bool fStepping) 6633 6577 { 6634 6578 Assert(VMMRZCallRing3IsEnabled(pVCpu)); … … 6648 6592 if (VMCPU_FF_IS_PENDING(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)) 6649 6593 { 6650 Assert(!(ASMAtomicUoReadU64(&pMixedCtx->fExtrn) & (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4))); 6651 VBOXSTRICTRC rcStrict2 = PGMSyncCR3(pVCpu, pMixedCtx->cr0, pMixedCtx->cr3, pMixedCtx->cr4, 6594 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 6595 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4))); 6596 VBOXSTRICTRC rcStrict2 = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, 6652 6597 VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3)); 6653 6598 if (rcStrict2 != VINF_SUCCESS) … … 6943 6888 * @returns VBox status code. 6944 6889 * @param pVCpu The cross context virtual CPU structure. 6945 * @param pMixedCtx Pointer to the guest-CPU context. The data may be6946 * out-of-sync. Make sure to update the required fields6947 * before using them.6948 6890 * 6949 6891 * @remarks No-long-jmp zone!!! 6950 6892 */ 6951 static int hmR0VmxLeaveSession(PVMCPU pVCpu , PCPUMCTX pMixedCtx)6893 static int hmR0VmxLeaveSession(PVMCPU pVCpu) 6952 6894 { 6953 6895 HM_DISABLE_PREEMPT(); … … 6964 6906 pVCpu->hm.s.fLeaveDone = true; 6965 6907 } 6966 Assert(!p MixedCtx->fExtrn); NOREF(pMixedCtx);6908 Assert(!pVCpu->cpum.GstCtx.fExtrn); 6967 6909 6968 6910 /* … … 6990 6932 * @returns VBox status code. 6991 6933 * @param pVCpu The cross context virtual CPU structure. 6992 * @param pMixedCtx Pointer to the guest-CPU context. The data may be6993 * out-of-sync. Make sure to update the required fields6994 * before using them.6995 6934 * 6996 6935 * @remarks No-long-jmp zone!!! 6997 6936 */ 6998 DECLINLINE(int) hmR0VmxLongJmpToRing3(PVMCPU pVCpu , PCPUMCTX pMixedCtx)6999 { 7000 return hmR0VmxLeaveSession(pVCpu , pMixedCtx);6937 DECLINLINE(int) hmR0VmxLongJmpToRing3(PVMCPU pVCpu) 6938 { 6939 return hmR0VmxLeaveSession(pVCpu); 7001 6940 } 7002 6941 … … 7012 6951 * @returns VBox status code. 7013 6952 * @param pVCpu The cross context virtual CPU structure. 7014 * @param pMixedCtx Pointer to the guest-CPU context. The data may be7015 * out-of-sync. Make sure to update the required fields7016 * before using them.7017 6953 * @param rcExit The reason for exiting to ring-3. Can be 7018 6954 * VINF_VMM_UNKNOWN_RING3_CALL. 7019 6955 */ 7020 static int hmR0VmxExitToRing3(PVMCPU pVCpu, PCPUMCTX pMixedCtx,VBOXSTRICTRC rcExit)6956 static int hmR0VmxExitToRing3(PVMCPU pVCpu, VBOXSTRICTRC rcExit) 7021 6957 { 7022 6958 Assert(pVCpu); 7023 Assert(pMixedCtx);7024 6959 HMVMX_ASSERT_PREEMPT_SAFE(); 7025 6960 … … 7054 6989 7055 6990 /* Save guest state and restore host state bits. */ 7056 int rc = hmR0VmxLeaveSession(pVCpu , pMixedCtx);6991 int rc = hmR0VmxLeaveSession(pVCpu); 7057 6992 AssertRCReturn(rc, rc); 7058 6993 STAM_COUNTER_DEC(&pVCpu->hm.s.StatSwitchLongJmpToR3); … … 7068 7003 | CPUM_CHANGED_HIDDEN_SEL_REGS); 7069 7004 if ( pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging 7070 && CPUMIsGuestPagingEnabledEx( pMixedCtx))7005 && CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)) 7071 7006 { 7072 7007 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_GLOBAL_TLB_FLUSH); … … 7159 7094 Log4Func((" -> hmR0VmxLongJmpToRing3 enmOperation=%d\n", enmOperation)); 7160 7095 7161 int rc = hmR0VmxLongJmpToRing3(pVCpu , (PCPUMCTX)pvUser);7096 int rc = hmR0VmxLongJmpToRing3(pVCpu); 7162 7097 AssertRCReturn(rc, rc); 7163 7098 … … 7245 7180 * @returns The VT-x guest-interruptibility state. 7246 7181 * @param pVCpu The cross context virtual CPU structure. 7247 * @param pMixedCtx Pointer to the guest-CPU context. The data may be 7248 * out-of-sync. Make sure to update the required fields 7249 * before using them. 7250 */ 7251 static uint32_t hmR0VmxEvaluatePendingEvent(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 7182 */ 7183 static uint32_t hmR0VmxEvaluatePendingEvent(PVMCPU pVCpu) 7252 7184 { 7253 7185 /* Get the current interruptibility-state of the guest and then figure out what can be injected. */ 7254 uint32_t const fIntrState = hmR0VmxGetGuestIntrState(pVCpu, pMixedCtx); 7186 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 7187 uint32_t const fIntrState = hmR0VmxGetGuestIntrState(pVCpu); 7255 7188 bool const fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS); 7256 7189 bool const fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI); 7257 7190 bool const fBlockNmi = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI); 7258 7191 7259 Assert(!fBlockSti || !(ASMAtomicUoReadU64(&p MixedCtx->fExtrn) & CPUMCTX_EXTRN_RFLAGS));7192 Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pCtx->fExtrn) & CPUMCTX_EXTRN_RFLAGS)); 7260 7193 Assert(!(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/ 7261 Assert(!fBlockSti || p MixedCtx->eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */7194 Assert(!fBlockSti || pCtx->eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */ 7262 7195 Assert(!TRPMHasTrap(pVCpu)); 7263 7196 … … 7298 7231 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_RFLAGS); 7299 7232 AssertRCReturn(rc, 0); 7300 bool const fBlockInt = !(p MixedCtx->eflags.u32 & X86_EFL_IF);7233 bool const fBlockInt = !(pCtx->eflags.u32 & X86_EFL_IF); 7301 7234 if ( !pVCpu->hm.s.Event.fPending 7302 7235 && !fBlockInt … … 7343 7276 * 7344 7277 * @param pVCpu The cross context virtual CPU structure. 7345 * @param pMixedCtx Pointer to the guest-CPU context. The data may be 7346 * out-of-sync. Make sure to update the required fields 7347 * before using them. 7348 */ 7349 DECLINLINE(int) hmR0VmxSetPendingDebugXcptVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 7350 { 7278 */ 7279 DECLINLINE(int) hmR0VmxSetPendingDebugXcptVmcs(PVMCPU pVCpu) 7280 { 7281 Assert(!(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_RFLAGS)); 7351 7282 RT_NOREF(pVCpu); 7352 Assert(!(ASMAtomicUoReadU64(&pMixedCtx->fExtrn) & CPUMCTX_EXTRN_RFLAGS)); NOREF(pMixedCtx);7353 7283 return VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, VMX_VMCS_GUEST_DEBUG_EXCEPTIONS_BS); 7354 7284 } … … 7361 7291 * @returns Strict VBox status code (i.e. informational status codes too). 7362 7292 * @param pVCpu The cross context virtual CPU structure. 7363 * @param pMixedCtx Pointer to the guest-CPU context. The data may be7364 * out-of-sync. Make sure to update the required fields7365 * before using them.7366 7293 * @param fIntrState The VT-x guest-interruptibility state. 7367 7294 * @param fStepping Running in hmR0VmxRunGuestCodeStep() and we should … … 7369 7296 * dispatched directly. 7370 7297 */ 7371 static VBOXSTRICTRC hmR0VmxInjectPendingEvent(PVMCPU pVCpu, PCPUMCTX pMixedCtx,uint32_t fIntrState, bool fStepping)7298 static VBOXSTRICTRC hmR0VmxInjectPendingEvent(PVMCPU pVCpu, uint32_t fIntrState, bool fStepping) 7372 7299 { 7373 7300 HMVMX_ASSERT_PREEMPT_SAFE(); … … 7377 7304 bool fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI); 7378 7305 7379 Assert(!fBlockSti || !(ASMAtomicUoReadU64(&p MixedCtx->fExtrn) & CPUMCTX_EXTRN_RFLAGS));7380 Assert(!(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/7381 Assert(!fBlockSti || p MixedCtx->eflags.Bits.u1IF);/* Cannot set block-by-STI when interrupts are disabled. */7306 Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_RFLAGS)); 7307 Assert(!(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/ 7308 Assert(!fBlockSti || pVCpu->cpum.GstCtx.eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */ 7382 7309 Assert(!TRPMHasTrap(pVCpu)); 7383 7310 7311 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 7384 7312 VBOXSTRICTRC rcStrict = VINF_SUCCESS; 7385 7313 if (pVCpu->hm.s.Event.fPending) … … 7396 7324 if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT) 7397 7325 { 7398 bool const fBlockInt = !(p MixedCtx->eflags.u32 & X86_EFL_IF);7326 bool const fBlockInt = !(pCtx->eflags.u32 & X86_EFL_IF); 7399 7327 Assert(!fBlockInt); 7400 7328 Assert(!fBlockSti); … … 7441 7369 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_RFLAGS); 7442 7370 AssertRCReturn(rc, rc); 7443 if (p MixedCtx->eflags.Bits.u1TF)7371 if (pCtx->eflags.Bits.u1TF) 7444 7372 { 7445 int rc2 = hmR0VmxSetPendingDebugXcptVmcs(pVCpu , pMixedCtx);7373 int rc2 = hmR0VmxSetPendingDebugXcptVmcs(pVCpu); 7446 7374 AssertRCReturn(rc2, rc2); 7447 7375 } 7448 7376 } 7449 else if (p MixedCtx->eflags.Bits.u1TF)7377 else if (pCtx->eflags.Bits.u1TF) 7450 7378 { 7451 7379 /* … … 7475 7403 * 7476 7404 * @param pVCpu The cross context virtual CPU structure. 7477 * @param pMixedCtx Pointer to the guest-CPU context. The data may be 7478 * out-of-sync. Make sure to update the required fields 7479 * before using them. 7480 */ 7481 DECLINLINE(void) hmR0VmxSetPendingXcptUD(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 7482 { 7483 NOREF(pMixedCtx); 7405 */ 7406 DECLINLINE(void) hmR0VmxSetPendingXcptUD(PVMCPU pVCpu) 7407 { 7484 7408 uint32_t u32IntInfo = X86_XCPT_UD | VMX_EXIT_INTERRUPTION_INFO_VALID; 7485 7409 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */); … … 7492 7416 * @returns Strict VBox status code (i.e. informational status codes too). 7493 7417 * @param pVCpu The cross context virtual CPU structure. 7494 * @param pMixedCtx Pointer to the guest-CPU context. The data may be7495 * out-of-sync. Make sure to update the required fields7496 * before using them.7497 7418 * @param fStepping Whether we're running in hmR0VmxRunGuestCodeStep() 7498 7419 * and should return VINF_EM_DBG_STEPPED if the event … … 7503 7424 * necessary. This cannot not be NULL. 7504 7425 */ 7505 DECLINLINE(VBOXSTRICTRC) hmR0VmxInjectXcptDF(PVMCPU pVCpu, PCCPUMCTX pMixedCtx, bool fStepping, uint32_t *pfIntrState) 7506 { 7507 NOREF(pMixedCtx); 7426 DECLINLINE(VBOXSTRICTRC) hmR0VmxInjectXcptDF(PVMCPU pVCpu, bool fStepping, uint32_t *pfIntrState) 7427 { 7508 7428 uint32_t u32IntInfo = X86_XCPT_DF | VMX_EXIT_INTERRUPTION_INFO_VALID; 7509 7429 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT); … … 7518 7438 * 7519 7439 * @param pVCpu The cross context virtual CPU structure. 7520 * @param pMixedCtx Pointer to the guest-CPU context. The data may be 7521 * out-of-sync. Make sure to update the required fields 7522 * before using them. 7523 */ 7524 DECLINLINE(void) hmR0VmxSetPendingXcptDB(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 7525 { 7526 NOREF(pMixedCtx); 7440 */ 7441 DECLINLINE(void) hmR0VmxSetPendingXcptDB(PVMCPU pVCpu) 7442 { 7527 7443 uint32_t u32IntInfo = X86_XCPT_DB | VMX_EXIT_INTERRUPTION_INFO_VALID; 7528 7444 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT); … … 7535 7451 * 7536 7452 * @param pVCpu The cross context virtual CPU structure. 7537 * @param pMixedCtx Pointer to the guest-CPU context. The data may be7538 * out-of-sync. Make sure to update the required fields7539 * before using them.7540 7453 * @param cbInstr The value of RIP that is to be pushed on the guest 7541 7454 * stack. 7542 7455 */ 7543 DECLINLINE(void) hmR0VmxSetPendingXcptOF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint32_t cbInstr) 7544 { 7545 NOREF(pMixedCtx); 7456 DECLINLINE(void) hmR0VmxSetPendingXcptOF(PVMCPU pVCpu, uint32_t cbInstr) 7457 { 7546 7458 uint32_t u32IntInfo = X86_XCPT_OF | VMX_EXIT_INTERRUPTION_INFO_VALID; 7547 7459 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT); … … 7555 7467 * @returns Strict VBox status code (i.e. informational status codes too). 7556 7468 * @param pVCpu The cross context virtual CPU structure. 7557 * @param pMixedCtx Pointer to the guest-CPU context. The data may be7558 * out-of-sync. Make sure to update the required fields7559 * before using them.7560 7469 * @param fErrorCodeValid Whether the error code is valid (depends on the CPU 7561 7470 * mode, i.e. in real-mode it's not valid). … … 7570 7479 * necessary. This cannot not be NULL. 7571 7480 */ 7572 DECLINLINE(VBOXSTRICTRC) hmR0VmxInjectXcptGP(PVMCPU pVCpu, PCCPUMCTX pMixedCtx, bool fErrorCodeValid, uint32_t u32ErrorCode, 7573 bool fStepping, uint32_t *pfIntrState) 7574 { 7575 NOREF(pMixedCtx); 7481 DECLINLINE(VBOXSTRICTRC) hmR0VmxInjectXcptGP(PVMCPU pVCpu, bool fErrorCodeValid, uint32_t u32ErrorCode, bool fStepping, 7482 uint32_t *pfIntrState) 7483 { 7576 7484 uint32_t u32IntInfo = X86_XCPT_GP | VMX_EXIT_INTERRUPTION_INFO_VALID; 7577 7485 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT); … … 7583 7491 7584 7492 7585 #if 0 /* unused */ 7586 /** 7587 * Sets a general-protection (\#GP) exception as pending-for-injection into the 7588 * VM. 7493 /** 7494 * Sets a software interrupt (INTn) as pending-for-injection into the VM. 7589 7495 * 7590 7496 * @param pVCpu The cross context virtual CPU structure. 7591 * @param pMixedCtx Pointer to the guest-CPU context. The data may be7592 * out-of-sync. Make sure to update the required fields7593 * before using them.7594 * @param u32ErrorCode The error code associated with the \#GP.7595 */7596 DECLINLINE(void) hmR0VmxSetPendingXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint32_t u32ErrorCode)7597 {7598 NOREF(pMixedCtx);7599 uint32_t u32IntInfo = X86_XCPT_GP | VMX_EXIT_INTERRUPTION_INFO_VALID;7600 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);7601 u32IntInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;7602 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrorCode, 0 /* GCPtrFaultAddress */);7603 }7604 #endif /* unused */7605 7606 7607 /**7608 * Sets a software interrupt (INTn) as pending-for-injection into the VM.7609 *7610 * @param pVCpu The cross context virtual CPU structure.7611 * @param pMixedCtx Pointer to the guest-CPU context. The data may be7612 * out-of-sync. Make sure to update the required fields7613 * before using them.7614 7497 * @param uVector The software interrupt vector number. 7615 7498 * @param cbInstr The value of RIP that is to be pushed on the guest 7616 7499 * stack. 7617 7500 */ 7618 DECLINLINE(void) hmR0VmxSetPendingIntN(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint16_t uVector, uint32_t cbInstr) 7619 { 7620 NOREF(pMixedCtx); 7501 DECLINLINE(void) hmR0VmxSetPendingIntN(PVMCPU pVCpu, uint16_t uVector, uint32_t cbInstr) 7502 { 7621 7503 uint32_t u32IntInfo = uVector | VMX_EXIT_INTERRUPTION_INFO_VALID; 7622 7504 if ( uVector == X86_XCPT_BP … … 7635 7517 * @returns Strict VBox status code (i.e. informational status codes too). 7636 7518 * @retval VINF_EM_RESET if pushing a value to the stack caused a triple-fault. 7637 * @param pVM The cross context VM structure. 7638 * @param pMixedCtx Pointer to the guest-CPU context. 7519 * @param pVCpu The cross context virtual CPU structure. 7639 7520 * @param uValue The value to push to the guest stack. 7640 7521 */ 7641 DECLINLINE(VBOXSTRICTRC) hmR0VmxRealModeGuestStackPush(PVM pVM, PCPUMCTX pMixedCtx, uint16_t uValue)7522 static VBOXSTRICTRC hmR0VmxRealModeGuestStackPush(PVMCPU pVCpu, uint16_t uValue) 7642 7523 { 7643 7524 /* … … 7646 7527 * See Intel Instruction reference for PUSH and Intel spec. 22.33.1 "Segment Wraparound". 7647 7528 */ 7648 if (pMixedCtx->sp == 1) 7529 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 7530 if (pCtx->sp == 1) 7649 7531 return VINF_EM_RESET; 7650 p MixedCtx->sp -= sizeof(uint16_t); /* May wrap around which is expected behaviour. */7651 int rc = PGMPhysSimpleWriteGCPhys(pV M, pMixedCtx->ss.u64Base + pMixedCtx->sp, &uValue, sizeof(uint16_t));7532 pCtx->sp -= sizeof(uint16_t); /* May wrap around which is expected behaviour. */ 7533 int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), pCtx->ss.u64Base + pCtx->sp, &uValue, sizeof(uint16_t)); 7652 7534 AssertRC(rc); 7653 7535 return rc; … … 7686 7568 Assert(pfIntrState); 7687 7569 7688 PCPUMCTX p MixedCtx= &pVCpu->cpum.GstCtx;7570 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 7689 7571 uint32_t u32IntInfo = (uint32_t)u64IntInfo; 7690 7572 uint32_t const uVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(u32IntInfo); … … 7699 7581 */ 7700 7582 if ( uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT 7701 && !CPUMIsGuestInRealModeEx(p MixedCtx))7583 && !CPUMIsGuestInRealModeEx(pCtx)) 7702 7584 { 7703 7585 switch (uVector) … … 7733 7615 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling. 7734 7616 */ 7735 if (CPUMIsGuestInRealModeEx(p MixedCtx)) /* CR0.PE bit changes are always intercepted, so it's up to date. */7617 if (CPUMIsGuestInRealModeEx(pCtx)) /* CR0.PE bit changes are always intercepted, so it's up to date. */ 7736 7618 { 7737 7619 if (pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUnrestrictedGuest) … … 7758 7640 /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */ 7759 7641 size_t const cbIdtEntry = sizeof(X86IDTR16); 7760 if (uVector * cbIdtEntry + (cbIdtEntry - 1) > p MixedCtx->idtr.cbIdt)7642 if (uVector * cbIdtEntry + (cbIdtEntry - 1) > pCtx->idtr.cbIdt) 7761 7643 { 7762 7644 /* If we are trying to inject a #DF with no valid IDT entry, return a triple-fault. */ … … 7766 7648 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault. */ 7767 7649 if (uVector == X86_XCPT_GP) 7768 return hmR0VmxInjectXcptDF(pVCpu, pMixedCtx,fStepping, pfIntrState);7650 return hmR0VmxInjectXcptDF(pVCpu, fStepping, pfIntrState); 7769 7651 7770 7652 /* … … 7774 7656 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" 7775 7657 */ 7776 return hmR0VmxInjectXcptGP(pVCpu, pMixedCtx, false /* fErrCodeValid */, 0 /* u32ErrCode */, fStepping, 7777 pfIntrState); 7658 return hmR0VmxInjectXcptGP(pVCpu, false /* fErrCodeValid */, 0 /* u32ErrCode */, fStepping, pfIntrState); 7778 7659 } 7779 7660 7780 7661 /* Software exceptions (#BP and #OF exceptions thrown as a result of INT3 or INTO) */ 7781 uint16_t uGuestIp = p MixedCtx->ip;7662 uint16_t uGuestIp = pCtx->ip; 7782 7663 if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT) 7783 7664 { 7784 7665 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF); 7785 7666 /* #BP and #OF are both benign traps, we need to resume the next instruction. */ 7786 uGuestIp = p MixedCtx->ip + (uint16_t)cbInstr;7667 uGuestIp = pCtx->ip + (uint16_t)cbInstr; 7787 7668 } 7788 7669 else if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT) 7789 uGuestIp = p MixedCtx->ip + (uint16_t)cbInstr;7670 uGuestIp = pCtx->ip + (uint16_t)cbInstr; 7790 7671 7791 7672 /* Get the code segment selector and offset from the IDT entry for the interrupt handler. */ 7792 7673 X86IDTR16 IdtEntry; 7793 RTGCPHYS GCPhysIdtEntry = (RTGCPHYS)p MixedCtx->idtr.pIdt + uVector * cbIdtEntry;7674 RTGCPHYS GCPhysIdtEntry = (RTGCPHYS)pCtx->idtr.pIdt + uVector * cbIdtEntry; 7794 7675 rc2 = PGMPhysSimpleReadGCPhys(pVM, &IdtEntry, GCPhysIdtEntry, cbIdtEntry); 7795 7676 AssertRCReturn(rc2, rc2); … … 7797 7678 /* Construct the stack frame for the interrupt/exception handler. */ 7798 7679 VBOXSTRICTRC rcStrict; 7799 rcStrict = hmR0VmxRealModeGuestStackPush(pV M, pMixedCtx, pMixedCtx->eflags.u32);7680 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->eflags.u32); 7800 7681 if (rcStrict == VINF_SUCCESS) 7801 rcStrict = hmR0VmxRealModeGuestStackPush(pV M, pMixedCtx, pMixedCtx->cs.Sel);7682 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->cs.Sel); 7802 7683 if (rcStrict == VINF_SUCCESS) 7803 rcStrict = hmR0VmxRealModeGuestStackPush(pV M, pMixedCtx, uGuestIp);7684 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, uGuestIp); 7804 7685 7805 7686 /* Clear the required eflag bits and jump to the interrupt/exception handler. */ 7806 7687 if (rcStrict == VINF_SUCCESS) 7807 7688 { 7808 p MixedCtx->eflags.u32 &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);7809 p MixedCtx->rip = IdtEntry.offSel;7810 p MixedCtx->cs.Sel = IdtEntry.uSel;7811 p MixedCtx->cs.ValidSel = IdtEntry.uSel;7812 p MixedCtx->cs.u64Base = IdtEntry.uSel << cbIdtEntry;7689 pCtx->eflags.u32 &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC); 7690 pCtx->rip = IdtEntry.offSel; 7691 pCtx->cs.Sel = IdtEntry.uSel; 7692 pCtx->cs.ValidSel = IdtEntry.uSel; 7693 pCtx->cs.u64Base = IdtEntry.uSel << cbIdtEntry; 7813 7694 if ( uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT 7814 7695 && uVector == X86_XCPT_PF) 7815 p MixedCtx->cr2 = GCPtrFaultAddress;7696 pCtx->cr2 = GCPtrFaultAddress; 7816 7697 7817 7698 /* If any other guest-state bits are changed here, make sure to update … … 7830 7711 } 7831 7712 Log4(("Injecting real-mode: u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x Eflags=%#x CS:EIP=%04x:%04x\n", 7832 u32IntInfo, u32ErrCode, cbInstr, p MixedCtx->eflags.u, pMixedCtx->cs.Sel, pMixedCtx->eip));7713 u32IntInfo, u32ErrCode, cbInstr, pCtx->eflags.u, pCtx->cs.Sel, pCtx->eip)); 7833 7714 7834 7715 /* The event has been truly dispatched. Mark it as no longer pending so we don't attempt to 'undo' … … 7861 7742 if ( VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT 7862 7743 && uVector == X86_XCPT_PF) 7863 p MixedCtx->cr2 = GCPtrFaultAddress;7864 7865 Log4(("Injecting u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x CR2=%#RX64\n", u32IntInfo, u32ErrCode, cbInstr, p MixedCtx->cr2));7744 pCtx->cr2 = GCPtrFaultAddress; 7745 7746 Log4(("Injecting u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x CR2=%#RX64\n", u32IntInfo, u32ErrCode, cbInstr, pCtx->cr2)); 7866 7747 7867 7748 return VINF_SUCCESS; … … 8097 7978 * 8098 7979 * @param pVCpu The cross context virtual CPU structure. 8099 * @param pMixedCtx Pointer to the guest-CPU context. The data may be8100 * out-of-sync. Make sure to update the required fields8101 * before using them.8102 7980 * 8103 7981 * @remarks No-long-jump zone!!! 8104 7982 */ 8105 static VBOXSTRICTRC hmR0VmxExportGuestState(PVMCPU pVCpu , PCCPUMCTX pMixedCtx)7983 static VBOXSTRICTRC hmR0VmxExportGuestState(PVMCPU pVCpu) 8106 7984 { 8107 7985 AssertPtr(pVCpu); 8108 AssertPtr(pMixedCtx);8109 7986 HMVMX_ASSERT_PREEMPT_SAFE(); 8110 7987 … … 8116 7993 pVCpu->hm.s.vmx.RealMode.fRealOnV86Active = false; 8117 7994 if ( !pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUnrestrictedGuest 8118 && CPUMIsGuestInRealModeEx( pMixedCtx))7995 && CPUMIsGuestInRealModeEx(&pVCpu->cpum.GstCtx)) 8119 7996 { 8120 7997 pVCpu->hm.s.vmx.RealMode.fRealOnV86Active = true; … … 8125 8002 * Ideally, assert that the cross-dependent bits are up-to-date at the point of using it. 8126 8003 */ 8127 int rc = hmR0VmxSelectVMRunHandler(pVCpu , pMixedCtx);8004 int rc = hmR0VmxSelectVMRunHandler(pVCpu); 8128 8005 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc); 8129 8006 8130 8007 /* This needs to be done after hmR0VmxSelectVMRunHandler() as changing pfnStartVM may require VM-entry control updates. */ 8131 rc = hmR0VmxExportGuestEntryCtls(pVCpu , pMixedCtx);8008 rc = hmR0VmxExportGuestEntryCtls(pVCpu); 8132 8009 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc); 8133 8010 8134 8011 /* This needs to be done after hmR0VmxSelectVMRunHandler() as changing pfnStartVM may require VM-exit control updates. */ 8135 rc = hmR0VmxExportGuestExitCtls(pVCpu , pMixedCtx);8012 rc = hmR0VmxExportGuestExitCtls(pVCpu); 8136 8013 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc); 8137 8014 8138 rc = hmR0VmxExportGuestCR0(pVCpu , pMixedCtx);8015 rc = hmR0VmxExportGuestCR0(pVCpu); 8139 8016 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc); 8140 8017 8141 VBOXSTRICTRC rcStrict = hmR0VmxExportGuestCR3AndCR4(pVCpu , pMixedCtx);8018 VBOXSTRICTRC rcStrict = hmR0VmxExportGuestCR3AndCR4(pVCpu); 8142 8019 if (rcStrict == VINF_SUCCESS) 8143 8020 { /* likely */ } … … 8148 8025 } 8149 8026 8150 rc = hmR0VmxExportGuestSegmentRegs(pVCpu , pMixedCtx);8027 rc = hmR0VmxExportGuestSegmentRegs(pVCpu); 8151 8028 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc); 8152 8029 8153 8030 /* This needs to be done after hmR0VmxExportGuestEntryCtls() and hmR0VmxExportGuestExitCtls() as it 8154 8031 may alter controls if we determine we don't have to swap EFER after all. */ 8155 rc = hmR0VmxExportGuestMsrs(pVCpu , pMixedCtx);8032 rc = hmR0VmxExportGuestMsrs(pVCpu); 8156 8033 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc); 8157 8034 … … 8164 8041 /* Exporting RFLAGS here is fine, even though RFLAGS.TF might depend on guest debug state which is 8165 8042 not exported here. It is re-evaluated and updated if necessary in hmR0VmxExportSharedState(). */ 8166 rc = hmR0VmxExportGuestRip(pVCpu , pMixedCtx);8167 rc |= hmR0VmxExportGuestRsp(pVCpu , pMixedCtx);8168 rc |= hmR0VmxExportGuestRflags(pVCpu , pMixedCtx);8043 rc = hmR0VmxExportGuestRip(pVCpu); 8044 rc |= hmR0VmxExportGuestRsp(pVCpu); 8045 rc |= hmR0VmxExportGuestRflags(pVCpu); 8169 8046 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc); 8170 8047 … … 8193 8070 * 8194 8071 * @param pVCpu The cross context virtual CPU structure. 8195 * @param pCtx Pointer to the guest-CPU context.8196 8072 * 8197 8073 * @remarks No-long-jump zone!!! 8198 8074 */ 8199 static void hmR0VmxExportSharedState(PVMCPU pVCpu , PCPUMCTX pCtx)8075 static void hmR0VmxExportSharedState(PVMCPU pVCpu) 8200 8076 { 8201 8077 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); … … 8204 8080 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_DR_MASK) 8205 8081 { 8206 int rc = hmR0VmxExportSharedDebugState(pVCpu , pCtx);8082 int rc = hmR0VmxExportSharedDebugState(pVCpu); 8207 8083 AssertRC(rc); 8208 8084 pVCpu->hm.s.fCtxChanged &= ~HM_CHANGED_GUEST_DR_MASK; … … 8211 8087 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_RFLAGS) 8212 8088 { 8213 rc = hmR0VmxExportGuestRflags(pVCpu , pCtx);8089 rc = hmR0VmxExportGuestRflags(pVCpu); 8214 8090 AssertRC(rc); 8215 8091 } … … 8218 8094 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_VMX_GUEST_LAZY_MSRS) 8219 8095 { 8220 hmR0VmxLazyLoadGuestMsrs(pVCpu , pCtx);8096 hmR0VmxLazyLoadGuestMsrs(pVCpu); 8221 8097 pVCpu->hm.s.fCtxChanged &= ~HM_CHANGED_VMX_GUEST_LAZY_MSRS; 8222 8098 } … … 8236 8112 * 8237 8113 * @param pVCpu The cross context virtual CPU structure. 8238 * @param pMixedCtx Pointer to the guest-CPU context. The data may be8239 * out-of-sync. Make sure to update the required fields8240 * before using them.8241 8114 * 8242 8115 * @remarks No-long-jump zone!!! 8243 8116 */ 8244 static VBOXSTRICTRC hmR0VmxExportGuestStateOptimal(PVMCPU pVCpu , PCCPUMCTX pMixedCtx)8117 static VBOXSTRICTRC hmR0VmxExportGuestStateOptimal(PVMCPU pVCpu) 8245 8118 { 8246 8119 HMVMX_ASSERT_PREEMPT_SAFE(); … … 8261 8134 if ((fCtxChanged & (HM_CHANGED_ALL_GUEST & ~HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE)) == HM_CHANGED_GUEST_RIP) 8262 8135 { 8263 rcStrict = hmR0VmxExportGuestRip(pVCpu , pMixedCtx);8136 rcStrict = hmR0VmxExportGuestRip(pVCpu); 8264 8137 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 8265 8138 { /* likely */} … … 8270 8143 else if (fCtxChanged & (HM_CHANGED_ALL_GUEST & ~HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE)) 8271 8144 { 8272 rcStrict = hmR0VmxExportGuestState(pVCpu , pMixedCtx);8145 rcStrict = hmR0VmxExportGuestState(pVCpu); 8273 8146 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 8274 8147 { /* likely */} … … 8319 8192 * 8320 8193 * @param pVCpu The cross context virtual CPU structure. 8321 * @param pMixedCtx Pointer to the guest-CPU context. The data may be8322 * out-of-sync. Make sure to update the required fields8323 * before using them.8324 8194 * @param pVmxTransient Pointer to the VMX transient structure. 8325 8195 * @param fStepping Set if called from hmR0VmxRunGuestCodeStep(). Makes … … 8328 8198 * dispatching took place. 8329 8199 */ 8330 static VBOXSTRICTRC hmR0VmxPreRunGuest(PVMCPU pVCpu, P CPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, bool fStepping)8200 static VBOXSTRICTRC hmR0VmxPreRunGuest(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, bool fStepping) 8331 8201 { 8332 8202 Assert(VMMRZCallRing3IsEnabled(pVCpu)); … … 8337 8207 8338 8208 /* Check force flag actions that might require us to go back to ring-3. */ 8339 VBOXSTRICTRC rcStrict = hmR0VmxCheckForceFlags(pVCpu, pMixedCtx,fStepping);8209 VBOXSTRICTRC rcStrict = hmR0VmxCheckForceFlags(pVCpu, fStepping); 8340 8210 if (rcStrict == VINF_SUCCESS) 8341 8211 { /* FFs doesn't get set all the time. */ } … … 8377 8247 if (TRPMHasTrap(pVCpu)) 8378 8248 hmR0VmxTrpmTrapToPendingEvent(pVCpu); 8379 uint32_t fIntrState = hmR0VmxEvaluatePendingEvent(pVCpu , pMixedCtx);8249 uint32_t fIntrState = hmR0VmxEvaluatePendingEvent(pVCpu); 8380 8250 8381 8251 /* … … 8384 8254 * also result in triple-faulting the VM. 8385 8255 */ 8386 rcStrict = hmR0VmxInjectPendingEvent(pVCpu, pMixedCtx,fIntrState, fStepping);8256 rcStrict = hmR0VmxInjectPendingEvent(pVCpu, fIntrState, fStepping); 8387 8257 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 8388 8258 { /* likely */ } … … 8402 8272 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3)) 8403 8273 { 8404 Assert(!(ASMAtomicUoReadU64(&p MixedCtx->fExtrn) & CPUMCTX_EXTRN_CR3));8274 Assert(!(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_CR3)); 8405 8275 int rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu)); 8406 8276 AssertMsgReturn(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_SYNC_CR3, … … 8432 8302 * Hence, loading of the guest state needs to be done -after- injection of events. 8433 8303 */ 8434 rcStrict = hmR0VmxExportGuestStateOptimal(pVCpu , pMixedCtx);8304 rcStrict = hmR0VmxExportGuestStateOptimal(pVCpu); 8435 8305 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 8436 8306 { /* likely */ } … … 8497 8367 * 8498 8368 * @param pVCpu The cross context virtual CPU structure. 8499 * @param pMixedCtx Pointer to the guest-CPU context. The data may be8500 * out-of-sync. Make sure to update the required fields8501 * before using them.8502 8369 * @param pVmxTransient Pointer to the VMX transient structure. 8503 8370 * … … 8505 8372 * @remarks No-long-jump zone!!! 8506 8373 */ 8507 static void hmR0VmxPreRunGuestCommitted(PVMCPU pVCpu, P CPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)8374 static void hmR0VmxPreRunGuestCommitted(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 8508 8375 { 8509 8376 Assert(!VMMRZCallRing3IsEnabled(pVCpu)); … … 8553 8420 */ 8554 8421 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE) 8555 hmR0VmxExportSharedState(pVCpu , pMixedCtx);8422 hmR0VmxExportSharedState(pVCpu); 8556 8423 AssertMsg(!pVCpu->hm.s.fCtxChanged, ("fCtxChanged=%#RX64\n", pVCpu->hm.s.fCtxChanged)); 8557 8424 8558 8425 /* Store status of the shared guest-host state at the time of VM-entry. */ 8559 8426 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) 8560 if (CPUMIsGuestInLongModeEx( pMixedCtx))8427 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx)) 8561 8428 { 8562 8429 pVmxTransient->fWasGuestDebugStateActive = CPUMIsGuestDebugStateActivePending(pVCpu); … … 8638 8505 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)) 8639 8506 { 8640 uint32_t uInvalidReason = hmR0VmxCheckGuestState(pVCpu , pMixedCtx);8507 uint32_t uInvalidReason = hmR0VmxCheckGuestState(pVCpu); 8641 8508 if (uInvalidReason != VMX_IGS_REASON_NOT_FOUND) 8642 8509 Log4(("hmR0VmxCheckGuestState returned %#x\n", uInvalidReason)); … … 8775 8642 * @returns VBox status code. 8776 8643 * @param pVCpu The cross context virtual CPU structure. 8777 * @param pCtx Pointer to the guest-CPU context.8778 8644 * 8779 8645 * @note Mostly the same as hmR0VmxRunGuestCodeStep(). 8780 8646 */ 8781 static VBOXSTRICTRC hmR0VmxRunGuestCodeNormal(PVMCPU pVCpu , PCPUMCTX pCtx)8647 static VBOXSTRICTRC hmR0VmxRunGuestCodeNormal(PVMCPU pVCpu) 8782 8648 { 8783 8649 VMXTRANSIENT VmxTransient; … … 8794 8660 to ring-3. This bugger disables interrupts on VINF_SUCCESS! */ 8795 8661 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x); 8796 rcStrict = hmR0VmxPreRunGuest(pVCpu, pCtx,&VmxTransient, false /* fStepping */);8662 rcStrict = hmR0VmxPreRunGuest(pVCpu, &VmxTransient, false /* fStepping */); 8797 8663 if (rcStrict != VINF_SUCCESS) 8798 8664 break; 8799 8665 8800 hmR0VmxPreRunGuestCommitted(pVCpu, pCtx, &VmxTransient); 8801 int rcRun = hmR0VmxRunGuest(pVCpu, pCtx); 8802 /* The guest-CPU context is now outdated, 'pCtx' is to be treated as 'pMixedCtx' from this point on!!! */ 8666 hmR0VmxPreRunGuestCommitted(pVCpu, &VmxTransient); 8667 int rcRun = hmR0VmxRunGuest(pVCpu); 8803 8668 8804 8669 /* Restore any residual host-state and save any bits shared between host … … 8812 8677 { 8813 8678 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatPreExit, x); 8814 hmR0VmxReportWorldSwitchError(pVCpu, rcRun, pCtx,&VmxTransient);8679 hmR0VmxReportWorldSwitchError(pVCpu, rcRun, &VmxTransient); 8815 8680 return rcRun; 8816 8681 } … … 8823 8688 HMVMX_START_EXIT_DISPATCH_PROF(); 8824 8689 8825 VBOXVMM_R0_HMVMX_VMEXIT_NOCTX(pVCpu, pCtx, VmxTransient.uExitReason);8690 VBOXVMM_R0_HMVMX_VMEXIT_NOCTX(pVCpu, &pVCpu->cpum.GstCtx, VmxTransient.uExitReason); 8826 8691 8827 8692 /* Handle the VM-exit. */ 8828 8693 #ifdef HMVMX_USE_FUNCTION_TABLE 8829 rcStrict = g_apfnVMExitHandlers[VmxTransient.uExitReason](pVCpu, pCtx,&VmxTransient);8694 rcStrict = g_apfnVMExitHandlers[VmxTransient.uExitReason](pVCpu, &VmxTransient); 8830 8695 #else 8831 rcStrict = hmR0VmxHandleExit(pVCpu, pCtx,&VmxTransient, VmxTransient.uExitReason);8696 rcStrict = hmR0VmxHandleExit(pVCpu, &VmxTransient, VmxTransient.uExitReason); 8832 8697 #endif 8833 8698 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitHandling, x); … … 8916 8781 * @param pVCpu The cross context virtual CPU structure of the 8917 8782 * calling EMT. 8918 * @param pCtx The CPU register context to go with @a pVCpu.8919 8783 * @param pDbgState The structure to initialize. 8920 8784 */ 8921 static void hmR0VmxRunDebugStateInit(PVMCPU pVCpu, P CCPUMCTX pCtx, PVMXRUNDBGSTATE pDbgState)8922 { 8923 pDbgState->uRipStart = p Ctx->rip;8924 pDbgState->uCsStart = p Ctx->cs.Sel;8785 static void hmR0VmxRunDebugStateInit(PVMCPU pVCpu, PVMXRUNDBGSTATE pDbgState) 8786 { 8787 pDbgState->uRipStart = pVCpu->cpum.GstCtx.rip; 8788 pDbgState->uCsStart = pVCpu->cpum.GstCtx.cs.Sel; 8925 8789 8926 8790 pDbgState->fModifiedProcCtls = false; … … 9337 9201 * @returns Strict VBox status code (i.e. informational status codes too). 9338 9202 * @param pVCpu The cross context virtual CPU structure. 9339 * @param pMixedCtx Pointer to the guest-CPU context.9340 9203 * @param pVmxTransient Pointer to the VMX-transient structure. 9341 9204 * @param uExitReason The VM-exit reason. … … 9344 9207 * and to the point. No longer than 33 chars long, please. 9345 9208 */ 9346 static VBOXSTRICTRC hmR0VmxHandleExitDtraceEvents(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, 9347 uint32_t uExitReason) 9209 static VBOXSTRICTRC hmR0VmxHandleExitDtraceEvents(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t uExitReason) 9348 9210 { 9349 9211 /* … … 9377 9239 { 9378 9240 case VMX_EXIT_MTF: 9379 return hmR0VmxExitMtf(pVCpu, p MixedCtx, pVmxTransient);9241 return hmR0VmxExitMtf(pVCpu, pVmxTransient); 9380 9242 9381 9243 case VMX_EXIT_XCPT_OR_NMI: … … 9552 9414 hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient); 9553 9415 hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL); 9416 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 9554 9417 switch (enmEvent1) 9555 9418 { 9556 9419 /** @todo consider which extra parameters would be helpful for each probe. */ 9557 9420 case DBGFEVENT_END: break; 9558 case DBGFEVENT_XCPT_DE: VBOXVMM_XCPT_DE(pVCpu, p MixedCtx); break;9559 case DBGFEVENT_XCPT_DB: VBOXVMM_XCPT_DB(pVCpu, p MixedCtx, pMixedCtx->dr[6]); break;9560 case DBGFEVENT_XCPT_BP: VBOXVMM_XCPT_BP(pVCpu, p MixedCtx); break;9561 case DBGFEVENT_XCPT_OF: VBOXVMM_XCPT_OF(pVCpu, p MixedCtx); break;9562 case DBGFEVENT_XCPT_BR: VBOXVMM_XCPT_BR(pVCpu, p MixedCtx); break;9563 case DBGFEVENT_XCPT_UD: VBOXVMM_XCPT_UD(pVCpu, p MixedCtx); break;9564 case DBGFEVENT_XCPT_NM: VBOXVMM_XCPT_NM(pVCpu, p MixedCtx); break;9565 case DBGFEVENT_XCPT_DF: VBOXVMM_XCPT_DF(pVCpu, p MixedCtx); break;9566 case DBGFEVENT_XCPT_TS: VBOXVMM_XCPT_TS(pVCpu, p MixedCtx, uEventArg); break;9567 case DBGFEVENT_XCPT_NP: VBOXVMM_XCPT_NP(pVCpu, p MixedCtx, uEventArg); break;9568 case DBGFEVENT_XCPT_SS: VBOXVMM_XCPT_SS(pVCpu, p MixedCtx, uEventArg); break;9569 case DBGFEVENT_XCPT_GP: VBOXVMM_XCPT_GP(pVCpu, p MixedCtx, uEventArg); break;9570 case DBGFEVENT_XCPT_PF: VBOXVMM_XCPT_PF(pVCpu, p MixedCtx, uEventArg, pMixedCtx->cr2); break;9571 case DBGFEVENT_XCPT_MF: VBOXVMM_XCPT_MF(pVCpu, p MixedCtx); break;9572 case DBGFEVENT_XCPT_AC: VBOXVMM_XCPT_AC(pVCpu, p MixedCtx); break;9573 case DBGFEVENT_XCPT_XF: VBOXVMM_XCPT_XF(pVCpu, p MixedCtx); break;9574 case DBGFEVENT_XCPT_VE: VBOXVMM_XCPT_VE(pVCpu, p MixedCtx); break;9575 case DBGFEVENT_XCPT_SX: VBOXVMM_XCPT_SX(pVCpu, p MixedCtx, uEventArg); break;9576 case DBGFEVENT_INTERRUPT_SOFTWARE: VBOXVMM_INT_SOFTWARE(pVCpu, p MixedCtx, (uint8_t)uEventArg); break;9577 case DBGFEVENT_INSTR_CPUID: VBOXVMM_INSTR_CPUID(pVCpu, p MixedCtx, pMixedCtx->eax, pMixedCtx->ecx); break;9578 case DBGFEVENT_INSTR_GETSEC: VBOXVMM_INSTR_GETSEC(pVCpu, p MixedCtx); break;9579 case DBGFEVENT_INSTR_HALT: VBOXVMM_INSTR_HALT(pVCpu, p MixedCtx); break;9580 case DBGFEVENT_INSTR_INVD: VBOXVMM_INSTR_INVD(pVCpu, p MixedCtx); break;9581 case DBGFEVENT_INSTR_INVLPG: VBOXVMM_INSTR_INVLPG(pVCpu, p MixedCtx); break;9582 case DBGFEVENT_INSTR_RDPMC: VBOXVMM_INSTR_RDPMC(pVCpu, p MixedCtx); break;9583 case DBGFEVENT_INSTR_RDTSC: VBOXVMM_INSTR_RDTSC(pVCpu, p MixedCtx); break;9584 case DBGFEVENT_INSTR_RSM: VBOXVMM_INSTR_RSM(pVCpu, p MixedCtx); break;9585 case DBGFEVENT_INSTR_CRX_READ: VBOXVMM_INSTR_CRX_READ(pVCpu, p MixedCtx, (uint8_t)uEventArg); break;9586 case DBGFEVENT_INSTR_CRX_WRITE: VBOXVMM_INSTR_CRX_WRITE(pVCpu, p MixedCtx, (uint8_t)uEventArg); break;9587 case DBGFEVENT_INSTR_DRX_READ: VBOXVMM_INSTR_DRX_READ(pVCpu, p MixedCtx, (uint8_t)uEventArg); break;9588 case DBGFEVENT_INSTR_DRX_WRITE: VBOXVMM_INSTR_DRX_WRITE(pVCpu, p MixedCtx, (uint8_t)uEventArg); break;9589 case DBGFEVENT_INSTR_RDMSR: VBOXVMM_INSTR_RDMSR(pVCpu, p MixedCtx, pMixedCtx->ecx); break;9590 case DBGFEVENT_INSTR_WRMSR: VBOXVMM_INSTR_WRMSR(pVCpu, p MixedCtx, pMixedCtx->ecx,9591 RT_MAKE_U64(p MixedCtx->eax, pMixedCtx->edx)); break;9592 case DBGFEVENT_INSTR_MWAIT: VBOXVMM_INSTR_MWAIT(pVCpu, p MixedCtx); break;9593 case DBGFEVENT_INSTR_MONITOR: VBOXVMM_INSTR_MONITOR(pVCpu, p MixedCtx); break;9594 case DBGFEVENT_INSTR_PAUSE: VBOXVMM_INSTR_PAUSE(pVCpu, p MixedCtx); break;9595 case DBGFEVENT_INSTR_SGDT: VBOXVMM_INSTR_SGDT(pVCpu, p MixedCtx); break;9596 case DBGFEVENT_INSTR_SIDT: VBOXVMM_INSTR_SIDT(pVCpu, p MixedCtx); break;9597 case DBGFEVENT_INSTR_LGDT: VBOXVMM_INSTR_LGDT(pVCpu, p MixedCtx); break;9598 case DBGFEVENT_INSTR_LIDT: VBOXVMM_INSTR_LIDT(pVCpu, p MixedCtx); break;9599 case DBGFEVENT_INSTR_SLDT: VBOXVMM_INSTR_SLDT(pVCpu, p MixedCtx); break;9600 case DBGFEVENT_INSTR_STR: VBOXVMM_INSTR_STR(pVCpu, p MixedCtx); break;9601 case DBGFEVENT_INSTR_LLDT: VBOXVMM_INSTR_LLDT(pVCpu, p MixedCtx); break;9602 case DBGFEVENT_INSTR_LTR: VBOXVMM_INSTR_LTR(pVCpu, p MixedCtx); break;9603 case DBGFEVENT_INSTR_RDTSCP: VBOXVMM_INSTR_RDTSCP(pVCpu, p MixedCtx); break;9604 case DBGFEVENT_INSTR_WBINVD: VBOXVMM_INSTR_WBINVD(pVCpu, p MixedCtx); break;9605 case DBGFEVENT_INSTR_XSETBV: VBOXVMM_INSTR_XSETBV(pVCpu, p MixedCtx); break;9606 case DBGFEVENT_INSTR_RDRAND: VBOXVMM_INSTR_RDRAND(pVCpu, p MixedCtx); break;9607 case DBGFEVENT_INSTR_RDSEED: VBOXVMM_INSTR_RDSEED(pVCpu, p MixedCtx); break;9608 case DBGFEVENT_INSTR_XSAVES: VBOXVMM_INSTR_XSAVES(pVCpu, p MixedCtx); break;9609 case DBGFEVENT_INSTR_XRSTORS: VBOXVMM_INSTR_XRSTORS(pVCpu, p MixedCtx); break;9610 case DBGFEVENT_INSTR_VMM_CALL: VBOXVMM_INSTR_VMM_CALL(pVCpu, p MixedCtx); break;9611 case DBGFEVENT_INSTR_VMX_VMCLEAR: VBOXVMM_INSTR_VMX_VMCLEAR(pVCpu, p MixedCtx); break;9612 case DBGFEVENT_INSTR_VMX_VMLAUNCH: VBOXVMM_INSTR_VMX_VMLAUNCH(pVCpu, p MixedCtx); break;9613 case DBGFEVENT_INSTR_VMX_VMPTRLD: VBOXVMM_INSTR_VMX_VMPTRLD(pVCpu, p MixedCtx); break;9614 case DBGFEVENT_INSTR_VMX_VMPTRST: VBOXVMM_INSTR_VMX_VMPTRST(pVCpu, p MixedCtx); break;9615 case DBGFEVENT_INSTR_VMX_VMREAD: VBOXVMM_INSTR_VMX_VMREAD(pVCpu, p MixedCtx); break;9616 case DBGFEVENT_INSTR_VMX_VMRESUME: VBOXVMM_INSTR_VMX_VMRESUME(pVCpu, p MixedCtx); break;9617 case DBGFEVENT_INSTR_VMX_VMWRITE: VBOXVMM_INSTR_VMX_VMWRITE(pVCpu, p MixedCtx); break;9618 case DBGFEVENT_INSTR_VMX_VMXOFF: VBOXVMM_INSTR_VMX_VMXOFF(pVCpu, p MixedCtx); break;9619 case DBGFEVENT_INSTR_VMX_VMXON: VBOXVMM_INSTR_VMX_VMXON(pVCpu, p MixedCtx); break;9620 case DBGFEVENT_INSTR_VMX_INVEPT: VBOXVMM_INSTR_VMX_INVEPT(pVCpu, p MixedCtx); break;9621 case DBGFEVENT_INSTR_VMX_INVVPID: VBOXVMM_INSTR_VMX_INVVPID(pVCpu, p MixedCtx); break;9622 case DBGFEVENT_INSTR_VMX_INVPCID: VBOXVMM_INSTR_VMX_INVPCID(pVCpu, p MixedCtx); break;9623 case DBGFEVENT_INSTR_VMX_VMFUNC: VBOXVMM_INSTR_VMX_VMFUNC(pVCpu, p MixedCtx); break;9421 case DBGFEVENT_XCPT_DE: VBOXVMM_XCPT_DE(pVCpu, pCtx); break; 9422 case DBGFEVENT_XCPT_DB: VBOXVMM_XCPT_DB(pVCpu, pCtx, pCtx->dr[6]); break; 9423 case DBGFEVENT_XCPT_BP: VBOXVMM_XCPT_BP(pVCpu, pCtx); break; 9424 case DBGFEVENT_XCPT_OF: VBOXVMM_XCPT_OF(pVCpu, pCtx); break; 9425 case DBGFEVENT_XCPT_BR: VBOXVMM_XCPT_BR(pVCpu, pCtx); break; 9426 case DBGFEVENT_XCPT_UD: VBOXVMM_XCPT_UD(pVCpu, pCtx); break; 9427 case DBGFEVENT_XCPT_NM: VBOXVMM_XCPT_NM(pVCpu, pCtx); break; 9428 case DBGFEVENT_XCPT_DF: VBOXVMM_XCPT_DF(pVCpu, pCtx); break; 9429 case DBGFEVENT_XCPT_TS: VBOXVMM_XCPT_TS(pVCpu, pCtx, uEventArg); break; 9430 case DBGFEVENT_XCPT_NP: VBOXVMM_XCPT_NP(pVCpu, pCtx, uEventArg); break; 9431 case DBGFEVENT_XCPT_SS: VBOXVMM_XCPT_SS(pVCpu, pCtx, uEventArg); break; 9432 case DBGFEVENT_XCPT_GP: VBOXVMM_XCPT_GP(pVCpu, pCtx, uEventArg); break; 9433 case DBGFEVENT_XCPT_PF: VBOXVMM_XCPT_PF(pVCpu, pCtx, uEventArg, pCtx->cr2); break; 9434 case DBGFEVENT_XCPT_MF: VBOXVMM_XCPT_MF(pVCpu, pCtx); break; 9435 case DBGFEVENT_XCPT_AC: VBOXVMM_XCPT_AC(pVCpu, pCtx); break; 9436 case DBGFEVENT_XCPT_XF: VBOXVMM_XCPT_XF(pVCpu, pCtx); break; 9437 case DBGFEVENT_XCPT_VE: VBOXVMM_XCPT_VE(pVCpu, pCtx); break; 9438 case DBGFEVENT_XCPT_SX: VBOXVMM_XCPT_SX(pVCpu, pCtx, uEventArg); break; 9439 case DBGFEVENT_INTERRUPT_SOFTWARE: VBOXVMM_INT_SOFTWARE(pVCpu, pCtx, (uint8_t)uEventArg); break; 9440 case DBGFEVENT_INSTR_CPUID: VBOXVMM_INSTR_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break; 9441 case DBGFEVENT_INSTR_GETSEC: VBOXVMM_INSTR_GETSEC(pVCpu, pCtx); break; 9442 case DBGFEVENT_INSTR_HALT: VBOXVMM_INSTR_HALT(pVCpu, pCtx); break; 9443 case DBGFEVENT_INSTR_INVD: VBOXVMM_INSTR_INVD(pVCpu, pCtx); break; 9444 case DBGFEVENT_INSTR_INVLPG: VBOXVMM_INSTR_INVLPG(pVCpu, pCtx); break; 9445 case DBGFEVENT_INSTR_RDPMC: VBOXVMM_INSTR_RDPMC(pVCpu, pCtx); break; 9446 case DBGFEVENT_INSTR_RDTSC: VBOXVMM_INSTR_RDTSC(pVCpu, pCtx); break; 9447 case DBGFEVENT_INSTR_RSM: VBOXVMM_INSTR_RSM(pVCpu, pCtx); break; 9448 case DBGFEVENT_INSTR_CRX_READ: VBOXVMM_INSTR_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break; 9449 case DBGFEVENT_INSTR_CRX_WRITE: VBOXVMM_INSTR_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break; 9450 case DBGFEVENT_INSTR_DRX_READ: VBOXVMM_INSTR_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break; 9451 case DBGFEVENT_INSTR_DRX_WRITE: VBOXVMM_INSTR_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break; 9452 case DBGFEVENT_INSTR_RDMSR: VBOXVMM_INSTR_RDMSR(pVCpu, pCtx, pCtx->ecx); break; 9453 case DBGFEVENT_INSTR_WRMSR: VBOXVMM_INSTR_WRMSR(pVCpu, pCtx, pCtx->ecx, 9454 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break; 9455 case DBGFEVENT_INSTR_MWAIT: VBOXVMM_INSTR_MWAIT(pVCpu, pCtx); break; 9456 case DBGFEVENT_INSTR_MONITOR: VBOXVMM_INSTR_MONITOR(pVCpu, pCtx); break; 9457 case DBGFEVENT_INSTR_PAUSE: VBOXVMM_INSTR_PAUSE(pVCpu, pCtx); break; 9458 case DBGFEVENT_INSTR_SGDT: VBOXVMM_INSTR_SGDT(pVCpu, pCtx); break; 9459 case DBGFEVENT_INSTR_SIDT: VBOXVMM_INSTR_SIDT(pVCpu, pCtx); break; 9460 case DBGFEVENT_INSTR_LGDT: VBOXVMM_INSTR_LGDT(pVCpu, pCtx); break; 9461 case DBGFEVENT_INSTR_LIDT: VBOXVMM_INSTR_LIDT(pVCpu, pCtx); break; 9462 case DBGFEVENT_INSTR_SLDT: VBOXVMM_INSTR_SLDT(pVCpu, pCtx); break; 9463 case DBGFEVENT_INSTR_STR: VBOXVMM_INSTR_STR(pVCpu, pCtx); break; 9464 case DBGFEVENT_INSTR_LLDT: VBOXVMM_INSTR_LLDT(pVCpu, pCtx); break; 9465 case DBGFEVENT_INSTR_LTR: VBOXVMM_INSTR_LTR(pVCpu, pCtx); break; 9466 case DBGFEVENT_INSTR_RDTSCP: VBOXVMM_INSTR_RDTSCP(pVCpu, pCtx); break; 9467 case DBGFEVENT_INSTR_WBINVD: VBOXVMM_INSTR_WBINVD(pVCpu, pCtx); break; 9468 case DBGFEVENT_INSTR_XSETBV: VBOXVMM_INSTR_XSETBV(pVCpu, pCtx); break; 9469 case DBGFEVENT_INSTR_RDRAND: VBOXVMM_INSTR_RDRAND(pVCpu, pCtx); break; 9470 case DBGFEVENT_INSTR_RDSEED: VBOXVMM_INSTR_RDSEED(pVCpu, pCtx); break; 9471 case DBGFEVENT_INSTR_XSAVES: VBOXVMM_INSTR_XSAVES(pVCpu, pCtx); break; 9472 case DBGFEVENT_INSTR_XRSTORS: VBOXVMM_INSTR_XRSTORS(pVCpu, pCtx); break; 9473 case DBGFEVENT_INSTR_VMM_CALL: VBOXVMM_INSTR_VMM_CALL(pVCpu, pCtx); break; 9474 case DBGFEVENT_INSTR_VMX_VMCLEAR: VBOXVMM_INSTR_VMX_VMCLEAR(pVCpu, pCtx); break; 9475 case DBGFEVENT_INSTR_VMX_VMLAUNCH: VBOXVMM_INSTR_VMX_VMLAUNCH(pVCpu, pCtx); break; 9476 case DBGFEVENT_INSTR_VMX_VMPTRLD: VBOXVMM_INSTR_VMX_VMPTRLD(pVCpu, pCtx); break; 9477 case DBGFEVENT_INSTR_VMX_VMPTRST: VBOXVMM_INSTR_VMX_VMPTRST(pVCpu, pCtx); break; 9478 case DBGFEVENT_INSTR_VMX_VMREAD: VBOXVMM_INSTR_VMX_VMREAD(pVCpu, pCtx); break; 9479 case DBGFEVENT_INSTR_VMX_VMRESUME: VBOXVMM_INSTR_VMX_VMRESUME(pVCpu, pCtx); break; 9480 case DBGFEVENT_INSTR_VMX_VMWRITE: VBOXVMM_INSTR_VMX_VMWRITE(pVCpu, pCtx); break; 9481 case DBGFEVENT_INSTR_VMX_VMXOFF: VBOXVMM_INSTR_VMX_VMXOFF(pVCpu, pCtx); break; 9482 case DBGFEVENT_INSTR_VMX_VMXON: VBOXVMM_INSTR_VMX_VMXON(pVCpu, pCtx); break; 9483 case DBGFEVENT_INSTR_VMX_INVEPT: VBOXVMM_INSTR_VMX_INVEPT(pVCpu, pCtx); break; 9484 case DBGFEVENT_INSTR_VMX_INVVPID: VBOXVMM_INSTR_VMX_INVVPID(pVCpu, pCtx); break; 9485 case DBGFEVENT_INSTR_VMX_INVPCID: VBOXVMM_INSTR_VMX_INVPCID(pVCpu, pCtx); break; 9486 case DBGFEVENT_INSTR_VMX_VMFUNC: VBOXVMM_INSTR_VMX_VMFUNC(pVCpu, pCtx); break; 9624 9487 default: AssertMsgFailed(("enmEvent1=%d uExitReason=%d\n", enmEvent1, uExitReason)); break; 9625 9488 } … … 9628 9491 /** @todo consider which extra parameters would be helpful for each probe. */ 9629 9492 case DBGFEVENT_END: break; 9630 case DBGFEVENT_EXIT_TASK_SWITCH: VBOXVMM_EXIT_TASK_SWITCH(pVCpu, p MixedCtx); break;9631 case DBGFEVENT_EXIT_CPUID: VBOXVMM_EXIT_CPUID(pVCpu, p MixedCtx, pMixedCtx->eax, pMixedCtx->ecx); break;9632 case DBGFEVENT_EXIT_GETSEC: VBOXVMM_EXIT_GETSEC(pVCpu, p MixedCtx); break;9633 case DBGFEVENT_EXIT_HALT: VBOXVMM_EXIT_HALT(pVCpu, p MixedCtx); break;9634 case DBGFEVENT_EXIT_INVD: VBOXVMM_EXIT_INVD(pVCpu, p MixedCtx); break;9635 case DBGFEVENT_EXIT_INVLPG: VBOXVMM_EXIT_INVLPG(pVCpu, p MixedCtx); break;9636 case DBGFEVENT_EXIT_RDPMC: VBOXVMM_EXIT_RDPMC(pVCpu, p MixedCtx); break;9637 case DBGFEVENT_EXIT_RDTSC: VBOXVMM_EXIT_RDTSC(pVCpu, p MixedCtx); break;9638 case DBGFEVENT_EXIT_RSM: VBOXVMM_EXIT_RSM(pVCpu, p MixedCtx); break;9639 case DBGFEVENT_EXIT_CRX_READ: VBOXVMM_EXIT_CRX_READ(pVCpu, p MixedCtx, (uint8_t)uEventArg); break;9640 case DBGFEVENT_EXIT_CRX_WRITE: VBOXVMM_EXIT_CRX_WRITE(pVCpu, p MixedCtx, (uint8_t)uEventArg); break;9641 case DBGFEVENT_EXIT_DRX_READ: VBOXVMM_EXIT_DRX_READ(pVCpu, p MixedCtx, (uint8_t)uEventArg); break;9642 case DBGFEVENT_EXIT_DRX_WRITE: VBOXVMM_EXIT_DRX_WRITE(pVCpu, p MixedCtx, (uint8_t)uEventArg); break;9643 case DBGFEVENT_EXIT_RDMSR: VBOXVMM_EXIT_RDMSR(pVCpu, p MixedCtx, pMixedCtx->ecx); break;9644 case DBGFEVENT_EXIT_WRMSR: VBOXVMM_EXIT_WRMSR(pVCpu, p MixedCtx, pMixedCtx->ecx,9645 RT_MAKE_U64(p MixedCtx->eax, pMixedCtx->edx)); break;9646 case DBGFEVENT_EXIT_MWAIT: VBOXVMM_EXIT_MWAIT(pVCpu, p MixedCtx); break;9647 case DBGFEVENT_EXIT_MONITOR: VBOXVMM_EXIT_MONITOR(pVCpu, p MixedCtx); break;9648 case DBGFEVENT_EXIT_PAUSE: VBOXVMM_EXIT_PAUSE(pVCpu, p MixedCtx); break;9649 case DBGFEVENT_EXIT_SGDT: VBOXVMM_EXIT_SGDT(pVCpu, p MixedCtx); break;9650 case DBGFEVENT_EXIT_SIDT: VBOXVMM_EXIT_SIDT(pVCpu, p MixedCtx); break;9651 case DBGFEVENT_EXIT_LGDT: VBOXVMM_EXIT_LGDT(pVCpu, p MixedCtx); break;9652 case DBGFEVENT_EXIT_LIDT: VBOXVMM_EXIT_LIDT(pVCpu, p MixedCtx); break;9653 case DBGFEVENT_EXIT_SLDT: VBOXVMM_EXIT_SLDT(pVCpu, p MixedCtx); break;9654 case DBGFEVENT_EXIT_STR: VBOXVMM_EXIT_STR(pVCpu, p MixedCtx); break;9655 case DBGFEVENT_EXIT_LLDT: VBOXVMM_EXIT_LLDT(pVCpu, p MixedCtx); break;9656 case DBGFEVENT_EXIT_LTR: VBOXVMM_EXIT_LTR(pVCpu, p MixedCtx); break;9657 case DBGFEVENT_EXIT_RDTSCP: VBOXVMM_EXIT_RDTSCP(pVCpu, p MixedCtx); break;9658 case DBGFEVENT_EXIT_WBINVD: VBOXVMM_EXIT_WBINVD(pVCpu, p MixedCtx); break;9659 case DBGFEVENT_EXIT_XSETBV: VBOXVMM_EXIT_XSETBV(pVCpu, p MixedCtx); break;9660 case DBGFEVENT_EXIT_RDRAND: VBOXVMM_EXIT_RDRAND(pVCpu, p MixedCtx); break;9661 case DBGFEVENT_EXIT_RDSEED: VBOXVMM_EXIT_RDSEED(pVCpu, p MixedCtx); break;9662 case DBGFEVENT_EXIT_XSAVES: VBOXVMM_EXIT_XSAVES(pVCpu, p MixedCtx); break;9663 case DBGFEVENT_EXIT_XRSTORS: VBOXVMM_EXIT_XRSTORS(pVCpu, p MixedCtx); break;9664 case DBGFEVENT_EXIT_VMM_CALL: VBOXVMM_EXIT_VMM_CALL(pVCpu, p MixedCtx); break;9665 case DBGFEVENT_EXIT_VMX_VMCLEAR: VBOXVMM_EXIT_VMX_VMCLEAR(pVCpu, p MixedCtx); break;9666 case DBGFEVENT_EXIT_VMX_VMLAUNCH: VBOXVMM_EXIT_VMX_VMLAUNCH(pVCpu, p MixedCtx); break;9667 case DBGFEVENT_EXIT_VMX_VMPTRLD: VBOXVMM_EXIT_VMX_VMPTRLD(pVCpu, p MixedCtx); break;9668 case DBGFEVENT_EXIT_VMX_VMPTRST: VBOXVMM_EXIT_VMX_VMPTRST(pVCpu, p MixedCtx); break;9669 case DBGFEVENT_EXIT_VMX_VMREAD: VBOXVMM_EXIT_VMX_VMREAD(pVCpu, p MixedCtx); break;9670 case DBGFEVENT_EXIT_VMX_VMRESUME: VBOXVMM_EXIT_VMX_VMRESUME(pVCpu, p MixedCtx); break;9671 case DBGFEVENT_EXIT_VMX_VMWRITE: VBOXVMM_EXIT_VMX_VMWRITE(pVCpu, p MixedCtx); break;9672 case DBGFEVENT_EXIT_VMX_VMXOFF: VBOXVMM_EXIT_VMX_VMXOFF(pVCpu, p MixedCtx); break;9673 case DBGFEVENT_EXIT_VMX_VMXON: VBOXVMM_EXIT_VMX_VMXON(pVCpu, p MixedCtx); break;9674 case DBGFEVENT_EXIT_VMX_INVEPT: VBOXVMM_EXIT_VMX_INVEPT(pVCpu, p MixedCtx); break;9675 case DBGFEVENT_EXIT_VMX_INVVPID: VBOXVMM_EXIT_VMX_INVVPID(pVCpu, p MixedCtx); break;9676 case DBGFEVENT_EXIT_VMX_INVPCID: VBOXVMM_EXIT_VMX_INVPCID(pVCpu, p MixedCtx); break;9677 case DBGFEVENT_EXIT_VMX_VMFUNC: VBOXVMM_EXIT_VMX_VMFUNC(pVCpu, p MixedCtx); break;9678 case DBGFEVENT_EXIT_VMX_EPT_MISCONFIG: VBOXVMM_EXIT_VMX_EPT_MISCONFIG(pVCpu, p MixedCtx); break;9679 case DBGFEVENT_EXIT_VMX_EPT_VIOLATION: VBOXVMM_EXIT_VMX_EPT_VIOLATION(pVCpu, p MixedCtx); break;9680 case DBGFEVENT_EXIT_VMX_VAPIC_ACCESS: VBOXVMM_EXIT_VMX_VAPIC_ACCESS(pVCpu, p MixedCtx); break;9681 case DBGFEVENT_EXIT_VMX_VAPIC_WRITE: VBOXVMM_EXIT_VMX_VAPIC_WRITE(pVCpu, p MixedCtx); break;9493 case DBGFEVENT_EXIT_TASK_SWITCH: VBOXVMM_EXIT_TASK_SWITCH(pVCpu, pCtx); break; 9494 case DBGFEVENT_EXIT_CPUID: VBOXVMM_EXIT_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break; 9495 case DBGFEVENT_EXIT_GETSEC: VBOXVMM_EXIT_GETSEC(pVCpu, pCtx); break; 9496 case DBGFEVENT_EXIT_HALT: VBOXVMM_EXIT_HALT(pVCpu, pCtx); break; 9497 case DBGFEVENT_EXIT_INVD: VBOXVMM_EXIT_INVD(pVCpu, pCtx); break; 9498 case DBGFEVENT_EXIT_INVLPG: VBOXVMM_EXIT_INVLPG(pVCpu, pCtx); break; 9499 case DBGFEVENT_EXIT_RDPMC: VBOXVMM_EXIT_RDPMC(pVCpu, pCtx); break; 9500 case DBGFEVENT_EXIT_RDTSC: VBOXVMM_EXIT_RDTSC(pVCpu, pCtx); break; 9501 case DBGFEVENT_EXIT_RSM: VBOXVMM_EXIT_RSM(pVCpu, pCtx); break; 9502 case DBGFEVENT_EXIT_CRX_READ: VBOXVMM_EXIT_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break; 9503 case DBGFEVENT_EXIT_CRX_WRITE: VBOXVMM_EXIT_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break; 9504 case DBGFEVENT_EXIT_DRX_READ: VBOXVMM_EXIT_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break; 9505 case DBGFEVENT_EXIT_DRX_WRITE: VBOXVMM_EXIT_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break; 9506 case DBGFEVENT_EXIT_RDMSR: VBOXVMM_EXIT_RDMSR(pVCpu, pCtx, pCtx->ecx); break; 9507 case DBGFEVENT_EXIT_WRMSR: VBOXVMM_EXIT_WRMSR(pVCpu, pCtx, pCtx->ecx, 9508 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break; 9509 case DBGFEVENT_EXIT_MWAIT: VBOXVMM_EXIT_MWAIT(pVCpu, pCtx); break; 9510 case DBGFEVENT_EXIT_MONITOR: VBOXVMM_EXIT_MONITOR(pVCpu, pCtx); break; 9511 case DBGFEVENT_EXIT_PAUSE: VBOXVMM_EXIT_PAUSE(pVCpu, pCtx); break; 9512 case DBGFEVENT_EXIT_SGDT: VBOXVMM_EXIT_SGDT(pVCpu, pCtx); break; 9513 case DBGFEVENT_EXIT_SIDT: VBOXVMM_EXIT_SIDT(pVCpu, pCtx); break; 9514 case DBGFEVENT_EXIT_LGDT: VBOXVMM_EXIT_LGDT(pVCpu, pCtx); break; 9515 case DBGFEVENT_EXIT_LIDT: VBOXVMM_EXIT_LIDT(pVCpu, pCtx); break; 9516 case DBGFEVENT_EXIT_SLDT: VBOXVMM_EXIT_SLDT(pVCpu, pCtx); break; 9517 case DBGFEVENT_EXIT_STR: VBOXVMM_EXIT_STR(pVCpu, pCtx); break; 9518 case DBGFEVENT_EXIT_LLDT: VBOXVMM_EXIT_LLDT(pVCpu, pCtx); break; 9519 case DBGFEVENT_EXIT_LTR: VBOXVMM_EXIT_LTR(pVCpu, pCtx); break; 9520 case DBGFEVENT_EXIT_RDTSCP: VBOXVMM_EXIT_RDTSCP(pVCpu, pCtx); break; 9521 case DBGFEVENT_EXIT_WBINVD: VBOXVMM_EXIT_WBINVD(pVCpu, pCtx); break; 9522 case DBGFEVENT_EXIT_XSETBV: VBOXVMM_EXIT_XSETBV(pVCpu, pCtx); break; 9523 case DBGFEVENT_EXIT_RDRAND: VBOXVMM_EXIT_RDRAND(pVCpu, pCtx); break; 9524 case DBGFEVENT_EXIT_RDSEED: VBOXVMM_EXIT_RDSEED(pVCpu, pCtx); break; 9525 case DBGFEVENT_EXIT_XSAVES: VBOXVMM_EXIT_XSAVES(pVCpu, pCtx); break; 9526 case DBGFEVENT_EXIT_XRSTORS: VBOXVMM_EXIT_XRSTORS(pVCpu, pCtx); break; 9527 case DBGFEVENT_EXIT_VMM_CALL: VBOXVMM_EXIT_VMM_CALL(pVCpu, pCtx); break; 9528 case DBGFEVENT_EXIT_VMX_VMCLEAR: VBOXVMM_EXIT_VMX_VMCLEAR(pVCpu, pCtx); break; 9529 case DBGFEVENT_EXIT_VMX_VMLAUNCH: VBOXVMM_EXIT_VMX_VMLAUNCH(pVCpu, pCtx); break; 9530 case DBGFEVENT_EXIT_VMX_VMPTRLD: VBOXVMM_EXIT_VMX_VMPTRLD(pVCpu, pCtx); break; 9531 case DBGFEVENT_EXIT_VMX_VMPTRST: VBOXVMM_EXIT_VMX_VMPTRST(pVCpu, pCtx); break; 9532 case DBGFEVENT_EXIT_VMX_VMREAD: VBOXVMM_EXIT_VMX_VMREAD(pVCpu, pCtx); break; 9533 case DBGFEVENT_EXIT_VMX_VMRESUME: VBOXVMM_EXIT_VMX_VMRESUME(pVCpu, pCtx); break; 9534 case DBGFEVENT_EXIT_VMX_VMWRITE: VBOXVMM_EXIT_VMX_VMWRITE(pVCpu, pCtx); break; 9535 case DBGFEVENT_EXIT_VMX_VMXOFF: VBOXVMM_EXIT_VMX_VMXOFF(pVCpu, pCtx); break; 9536 case DBGFEVENT_EXIT_VMX_VMXON: VBOXVMM_EXIT_VMX_VMXON(pVCpu, pCtx); break; 9537 case DBGFEVENT_EXIT_VMX_INVEPT: VBOXVMM_EXIT_VMX_INVEPT(pVCpu, pCtx); break; 9538 case DBGFEVENT_EXIT_VMX_INVVPID: VBOXVMM_EXIT_VMX_INVVPID(pVCpu, pCtx); break; 9539 case DBGFEVENT_EXIT_VMX_INVPCID: VBOXVMM_EXIT_VMX_INVPCID(pVCpu, pCtx); break; 9540 case DBGFEVENT_EXIT_VMX_VMFUNC: VBOXVMM_EXIT_VMX_VMFUNC(pVCpu, pCtx); break; 9541 case DBGFEVENT_EXIT_VMX_EPT_MISCONFIG: VBOXVMM_EXIT_VMX_EPT_MISCONFIG(pVCpu, pCtx); break; 9542 case DBGFEVENT_EXIT_VMX_EPT_VIOLATION: VBOXVMM_EXIT_VMX_EPT_VIOLATION(pVCpu, pCtx); break; 9543 case DBGFEVENT_EXIT_VMX_VAPIC_ACCESS: VBOXVMM_EXIT_VMX_VAPIC_ACCESS(pVCpu, pCtx); break; 9544 case DBGFEVENT_EXIT_VMX_VAPIC_WRITE: VBOXVMM_EXIT_VMX_VAPIC_WRITE(pVCpu, pCtx); break; 9682 9545 default: AssertMsgFailed(("enmEvent2=%d uExitReason=%d\n", enmEvent2, uExitReason)); break; 9683 9546 } … … 9721 9584 * @returns Strict VBox status code (i.e. informational status codes too). 9722 9585 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 9723 * @param pMixedCtx Pointer to the guest-CPU context. The data may be9724 * out-of-sync. Make sure to update the required9725 * fields before using them.9726 9586 * @param pVmxTransient Pointer to the VMX-transient structure. 9727 * @param uExitReason The VM-exit reason.9728 9587 * @param pDbgState The debug state. 9729 9588 */ 9730 DECLINLINE(VBOXSTRICTRC) hmR0VmxRunDebugHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, 9731 uint32_t uExitReason, PVMXRUNDBGSTATE pDbgState) 9589 DECLINLINE(VBOXSTRICTRC) hmR0VmxRunDebugHandleExit(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState) 9732 9590 { 9733 9591 /* 9734 9592 * Expensive (saves context) generic dtrace VM-exit probe. 9735 9593 */ 9594 uint32_t const uExitReason = pVmxTransient->uExitReason; 9736 9595 if (!VBOXVMM_R0_HMVMX_VMEXIT_ENABLED()) 9737 9596 { /* more likely */ } … … 9741 9600 int rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL); 9742 9601 AssertRC(rc); 9743 VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, pMixedCtx, pVmxTransient->uExitReason, pVmxTransient->uExitQualification);9602 VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, &pVCpu->cpum.GstCtx, pVmxTransient->uExitReason, pVmxTransient->uExitQualification); 9744 9603 } 9745 9604 … … 9755 9614 uint32_t uIntType = VMX_EXIT_INTERRUPTION_INFO_TYPE(pVmxTransient->uExitIntInfo); 9756 9615 if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI) 9757 return hmR0VmxExitXcptOrNmi(pVCpu, p MixedCtx, pVmxTransient);9616 return hmR0VmxExitXcptOrNmi(pVCpu, pVmxTransient); 9758 9617 } 9759 9618 … … 9766 9625 { 9767 9626 case VMX_EXIT_MTF: 9768 return hmR0VmxExitMtf(pVCpu, p MixedCtx, pVmxTransient);9627 return hmR0VmxExitMtf(pVCpu, pVmxTransient); 9769 9628 9770 9629 /* Various events: */ … … 9824 9683 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP); 9825 9684 AssertRCReturn(rc, rc); 9826 if ( p MixedCtx->rip != pDbgState->uRipStart9827 || p MixedCtx->cs.Sel != pDbgState->uCsStart)9685 if ( pVCpu->cpum.GstCtx.rip != pDbgState->uRipStart 9686 || pVCpu->cpum.GstCtx.cs.Sel != pDbgState->uCsStart) 9828 9687 return VINF_EM_DBG_STEPPED; 9829 9688 break; … … 9853 9712 && ASMBitTest(pDbgState->bmExitsToCheck, uExitReason) ) 9854 9713 { 9855 VBOXSTRICTRC rcStrict = hmR0VmxHandleExitDtraceEvents(pVCpu, p MixedCtx, pVmxTransient, uExitReason);9714 VBOXSTRICTRC rcStrict = hmR0VmxHandleExitDtraceEvents(pVCpu, pVmxTransient, uExitReason); 9856 9715 if (rcStrict != VINF_SUCCESS) 9857 9716 return rcStrict; … … 9862 9721 */ 9863 9722 #ifdef HMVMX_USE_FUNCTION_TABLE 9864 return g_apfnVMExitHandlers[uExitReason](pVCpu, p MixedCtx, pVmxTransient);9723 return g_apfnVMExitHandlers[uExitReason](pVCpu, pVmxTransient); 9865 9724 #else 9866 return hmR0VmxHandleExit(pVCpu, p MixedCtx, pVmxTransient, uExitReason);9725 return hmR0VmxHandleExit(pVCpu, pVmxTransient, uExitReason); 9867 9726 #endif 9868 9727 } … … 9874 9733 * @returns Strict VBox status code (i.e. informational status codes too). 9875 9734 * @param pVCpu The cross context virtual CPU structure. 9876 * @param pCtx Pointer to the guest-CPU context.9877 9735 * 9878 9736 * @note Mostly the same as hmR0VmxRunGuestCodeNormal(). 9879 9737 */ 9880 static VBOXSTRICTRC hmR0VmxRunGuestCodeDebug(PVMCPU pVCpu , PCPUMCTX pCtx)9738 static VBOXSTRICTRC hmR0VmxRunGuestCodeDebug(PVMCPU pVCpu) 9881 9739 { 9882 9740 VMXTRANSIENT VmxTransient; … … 9891 9749 /* State we keep to help modify and later restore the VMCS fields we alter, and for detecting steps. */ 9892 9750 VMXRUNDBGSTATE DbgState; 9893 hmR0VmxRunDebugStateInit(pVCpu, pCtx,&DbgState);9751 hmR0VmxRunDebugStateInit(pVCpu, &DbgState); 9894 9752 hmR0VmxPreRunGuestDebugStateUpdate(pVCpu, &DbgState, &VmxTransient); 9895 9753 … … 9910 9768 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x); 9911 9769 hmR0VmxPreRunGuestDebugStateApply(pVCpu, &DbgState); /* Set up execute controls the next to can respond to. */ 9912 rcStrict = hmR0VmxPreRunGuest(pVCpu, pCtx,&VmxTransient, fStepping);9770 rcStrict = hmR0VmxPreRunGuest(pVCpu, &VmxTransient, fStepping); 9913 9771 if (rcStrict != VINF_SUCCESS) 9914 9772 break; 9915 9773 9916 hmR0VmxPreRunGuestCommitted(pVCpu, pCtx,&VmxTransient);9774 hmR0VmxPreRunGuestCommitted(pVCpu, &VmxTransient); 9917 9775 hmR0VmxPreRunGuestDebugStateApply(pVCpu, &DbgState); /* Override any obnoxious code in the above two calls. */ 9918 9776 … … 9920 9778 * Now we can run the guest code. 9921 9779 */ 9922 int rcRun = hmR0VmxRunGuest(pVCpu, pCtx); 9923 9924 /* The guest-CPU context is now outdated, 'pCtx' is to be treated as 'pMixedCtx' from this point on!!! */ 9780 int rcRun = hmR0VmxRunGuest(pVCpu); 9925 9781 9926 9782 /* … … 9936 9792 { 9937 9793 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatPreExit, x); 9938 hmR0VmxReportWorldSwitchError(pVCpu, rcRun, pCtx,&VmxTransient);9794 hmR0VmxReportWorldSwitchError(pVCpu, rcRun, &VmxTransient); 9939 9795 return rcRun; 9940 9796 } … … 9947 9803 HMVMX_START_EXIT_DISPATCH_PROF(); 9948 9804 9949 VBOXVMM_R0_HMVMX_VMEXIT_NOCTX(pVCpu, pCtx, VmxTransient.uExitReason);9805 VBOXVMM_R0_HMVMX_VMEXIT_NOCTX(pVCpu, &pVCpu->cpum.GstCtx, VmxTransient.uExitReason); 9950 9806 9951 9807 /* 9952 9808 * Handle the VM-exit - we quit earlier on certain VM-exits, see hmR0VmxHandleExitDebug(). 9953 9809 */ 9954 rcStrict = hmR0VmxRunDebugHandleExit(pVCpu, pCtx, &VmxTransient, VmxTransient.uExitReason, &DbgState);9810 rcStrict = hmR0VmxRunDebugHandleExit(pVCpu, &VmxTransient, &DbgState); 9955 9811 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitHandling, x); 9956 9812 if (rcStrict != VINF_SUCCESS) … … 9971 9827 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP); 9972 9828 AssertRC(rc); 9973 if ( p Ctx->rip != DbgState.uRipStart9974 || p Ctx->cs.Sel != DbgState.uCsStart)9829 if ( pVCpu->cpum.GstCtx.rip != DbgState.uRipStart 9830 || pVCpu->cpum.GstCtx.cs.Sel != DbgState.uCsStart) 9975 9831 { 9976 9832 rcStrict = VINF_EM_DBG_STEPPED; … … 9995 9851 AssertRC(rc); 9996 9852 pVCpu->hm.s.fClearTrapFlag = false; 9997 p Ctx->eflags.Bits.u1TF = 0;9853 pVCpu->cpum.GstCtx.eflags.Bits.u1TF = 0; 9998 9854 } 9999 9855 /** @todo there seems to be issues with the resume flag when the monitor trap … … 10177 10033 && !DBGFIsStepping(pVCpu) 10178 10034 && !pVCpu->CTX_SUFF(pVM)->dbgf.ro.cEnabledInt3Breakpoints) 10179 rcStrict = hmR0VmxRunGuestCodeNormal(pVCpu , pCtx);10035 rcStrict = hmR0VmxRunGuestCodeNormal(pVCpu); 10180 10036 else 10181 rcStrict = hmR0VmxRunGuestCodeDebug(pVCpu , pCtx);10037 rcStrict = hmR0VmxRunGuestCodeDebug(pVCpu); 10182 10038 10183 10039 if (rcStrict == VERR_EM_INTERPRETER) … … 10186 10042 rcStrict = VINF_EM_TRIPLE_FAULT; 10187 10043 10188 int rc2 = hmR0VmxExitToRing3(pVCpu, pCtx,rcStrict);10044 int rc2 = hmR0VmxExitToRing3(pVCpu, rcStrict); 10189 10045 if (RT_FAILURE(rc2)) 10190 10046 { … … 10199 10055 10200 10056 #ifndef HMVMX_USE_FUNCTION_TABLE 10201 DECLINLINE(VBOXSTRICTRC) hmR0VmxHandleExit(PVMCPU pVCpu, P CPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, uint32_t rcReason)10057 DECLINLINE(VBOXSTRICTRC) hmR0VmxHandleExit(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t rcReason) 10202 10058 { 10203 10059 #ifdef DEBUG_ramshankar … … 10216 10072 switch (rcReason) 10217 10073 { 10218 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, hmR0VmxExitEptMisconfig(pVCpu, p MixedCtx, pVmxTransient));10219 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, hmR0VmxExitEptViolation(pVCpu, p MixedCtx, pVmxTransient));10220 case VMX_EXIT_IO_INSTR: VMEXIT_CALL_RET(0, hmR0VmxExitIoInstr(pVCpu, p MixedCtx, pVmxTransient));10221 case VMX_EXIT_CPUID: VMEXIT_CALL_RET(0, hmR0VmxExitCpuid(pVCpu, p MixedCtx, pVmxTransient));10222 case VMX_EXIT_RDTSC: VMEXIT_CALL_RET(0, hmR0VmxExitRdtsc(pVCpu, p MixedCtx, pVmxTransient));10223 case VMX_EXIT_RDTSCP: VMEXIT_CALL_RET(0, hmR0VmxExitRdtscp(pVCpu, p MixedCtx, pVmxTransient));10224 case VMX_EXIT_APIC_ACCESS: VMEXIT_CALL_RET(0, hmR0VmxExitApicAccess(pVCpu, p MixedCtx, pVmxTransient));10225 case VMX_EXIT_XCPT_OR_NMI: VMEXIT_CALL_RET(0, hmR0VmxExitXcptOrNmi(pVCpu, p MixedCtx, pVmxTransient));10226 case VMX_EXIT_MOV_CRX: VMEXIT_CALL_RET(0, hmR0VmxExitMovCRx(pVCpu, p MixedCtx, pVmxTransient));10227 case VMX_EXIT_EXT_INT: VMEXIT_CALL_RET(0, hmR0VmxExitExtInt(pVCpu, p MixedCtx, pVmxTransient));10228 case VMX_EXIT_INT_WINDOW: VMEXIT_CALL_RET(0, hmR0VmxExitIntWindow(pVCpu, p MixedCtx, pVmxTransient));10229 case VMX_EXIT_TPR_BELOW_THRESHOLD: VMEXIT_CALL_RET(0, hmR0VmxExitTprBelowThreshold(pVCpu, p MixedCtx, pVmxTransient));10230 case VMX_EXIT_MWAIT: VMEXIT_CALL_RET(0, hmR0VmxExitMwait(pVCpu, p MixedCtx, pVmxTransient));10231 case VMX_EXIT_MONITOR: VMEXIT_CALL_RET(0, hmR0VmxExitMonitor(pVCpu, p MixedCtx, pVmxTransient));10232 case VMX_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(0, hmR0VmxExitTaskSwitch(pVCpu, p MixedCtx, pVmxTransient));10233 case VMX_EXIT_PREEMPT_TIMER: VMEXIT_CALL_RET(0, hmR0VmxExitPreemptTimer(pVCpu, p MixedCtx, pVmxTransient));10234 case VMX_EXIT_RDMSR: VMEXIT_CALL_RET(0, hmR0VmxExitRdmsr(pVCpu, p MixedCtx, pVmxTransient));10235 case VMX_EXIT_WRMSR: VMEXIT_CALL_RET(0, hmR0VmxExitWrmsr(pVCpu, p MixedCtx, pVmxTransient));10236 case VMX_EXIT_VMCALL: VMEXIT_CALL_RET(0, hmR0VmxExitVmcall(pVCpu, p MixedCtx, pVmxTransient));10237 case VMX_EXIT_MOV_DRX: VMEXIT_CALL_RET(0, hmR0VmxExitMovDRx(pVCpu, p MixedCtx, pVmxTransient));10238 case VMX_EXIT_HLT: VMEXIT_CALL_RET(0, hmR0VmxExitHlt(pVCpu, p MixedCtx, pVmxTransient));10239 case VMX_EXIT_INVD: VMEXIT_CALL_RET(0, hmR0VmxExitInvd(pVCpu, p MixedCtx, pVmxTransient));10240 case VMX_EXIT_INVLPG: VMEXIT_CALL_RET(0, hmR0VmxExitInvlpg(pVCpu, p MixedCtx, pVmxTransient));10241 case VMX_EXIT_RSM: VMEXIT_CALL_RET(0, hmR0VmxExitRsm(pVCpu, p MixedCtx, pVmxTransient));10242 case VMX_EXIT_MTF: VMEXIT_CALL_RET(0, hmR0VmxExitMtf(pVCpu, p MixedCtx, pVmxTransient));10243 case VMX_EXIT_PAUSE: VMEXIT_CALL_RET(0, hmR0VmxExitPause(pVCpu, p MixedCtx, pVmxTransient));10244 case VMX_EXIT_XDTR_ACCESS: VMEXIT_CALL_RET(0, hmR0VmxExitXdtrAccess(pVCpu, p MixedCtx, pVmxTransient));10245 case VMX_EXIT_TR_ACCESS: VMEXIT_CALL_RET(0, hmR0VmxExitXdtrAccess(pVCpu, p MixedCtx, pVmxTransient));10246 case VMX_EXIT_WBINVD: VMEXIT_CALL_RET(0, hmR0VmxExitWbinvd(pVCpu, p MixedCtx, pVmxTransient));10247 case VMX_EXIT_XSETBV: VMEXIT_CALL_RET(0, hmR0VmxExitXsetbv(pVCpu, p MixedCtx, pVmxTransient));10248 case VMX_EXIT_RDRAND: VMEXIT_CALL_RET(0, hmR0VmxExitRdrand(pVCpu, p MixedCtx, pVmxTransient));10249 case VMX_EXIT_INVPCID: VMEXIT_CALL_RET(0, hmR0VmxExitInvpcid(pVCpu, p MixedCtx, pVmxTransient));10250 case VMX_EXIT_GETSEC: VMEXIT_CALL_RET(0, hmR0VmxExitGetsec(pVCpu, p MixedCtx, pVmxTransient));10251 case VMX_EXIT_RDPMC: VMEXIT_CALL_RET(0, hmR0VmxExitRdpmc(pVCpu, p MixedCtx, pVmxTransient));10252 10253 case VMX_EXIT_TRIPLE_FAULT: return hmR0VmxExitTripleFault(pVCpu, p MixedCtx, pVmxTransient);10254 case VMX_EXIT_NMI_WINDOW: return hmR0VmxExitNmiWindow(pVCpu, p MixedCtx, pVmxTransient);10255 case VMX_EXIT_INIT_SIGNAL: return hmR0VmxExitInitSignal(pVCpu, p MixedCtx, pVmxTransient);10256 case VMX_EXIT_SIPI: return hmR0VmxExitSipi(pVCpu, p MixedCtx, pVmxTransient);10257 case VMX_EXIT_IO_SMI: return hmR0VmxExitIoSmi(pVCpu, p MixedCtx, pVmxTransient);10258 case VMX_EXIT_SMI: return hmR0VmxExitSmi(pVCpu, p MixedCtx, pVmxTransient);10259 case VMX_EXIT_ERR_MSR_LOAD: return hmR0VmxExitErrMsrLoad(pVCpu, p MixedCtx, pVmxTransient);10260 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return hmR0VmxExitErrInvalidGuestState(pVCpu, p MixedCtx, pVmxTransient);10261 case VMX_EXIT_ERR_MACHINE_CHECK: return hmR0VmxExitErrMachineCheck(pVCpu, p MixedCtx, pVmxTransient);10074 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, hmR0VmxExitEptMisconfig(pVCpu, pVmxTransient)); 10075 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, hmR0VmxExitEptViolation(pVCpu, pVmxTransient)); 10076 case VMX_EXIT_IO_INSTR: VMEXIT_CALL_RET(0, hmR0VmxExitIoInstr(pVCpu, pVmxTransient)); 10077 case VMX_EXIT_CPUID: VMEXIT_CALL_RET(0, hmR0VmxExitCpuid(pVCpu, pVmxTransient)); 10078 case VMX_EXIT_RDTSC: VMEXIT_CALL_RET(0, hmR0VmxExitRdtsc(pVCpu, pVmxTransient)); 10079 case VMX_EXIT_RDTSCP: VMEXIT_CALL_RET(0, hmR0VmxExitRdtscp(pVCpu, pVmxTransient)); 10080 case VMX_EXIT_APIC_ACCESS: VMEXIT_CALL_RET(0, hmR0VmxExitApicAccess(pVCpu, pVmxTransient)); 10081 case VMX_EXIT_XCPT_OR_NMI: VMEXIT_CALL_RET(0, hmR0VmxExitXcptOrNmi(pVCpu, pVmxTransient)); 10082 case VMX_EXIT_MOV_CRX: VMEXIT_CALL_RET(0, hmR0VmxExitMovCRx(pVCpu, pVmxTransient)); 10083 case VMX_EXIT_EXT_INT: VMEXIT_CALL_RET(0, hmR0VmxExitExtInt(pVCpu, pVmxTransient)); 10084 case VMX_EXIT_INT_WINDOW: VMEXIT_CALL_RET(0, hmR0VmxExitIntWindow(pVCpu, pVmxTransient)); 10085 case VMX_EXIT_TPR_BELOW_THRESHOLD: VMEXIT_CALL_RET(0, hmR0VmxExitTprBelowThreshold(pVCpu, pVmxTransient)); 10086 case VMX_EXIT_MWAIT: VMEXIT_CALL_RET(0, hmR0VmxExitMwait(pVCpu, pVmxTransient)); 10087 case VMX_EXIT_MONITOR: VMEXIT_CALL_RET(0, hmR0VmxExitMonitor(pVCpu, pVmxTransient)); 10088 case VMX_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(0, hmR0VmxExitTaskSwitch(pVCpu, pVmxTransient)); 10089 case VMX_EXIT_PREEMPT_TIMER: VMEXIT_CALL_RET(0, hmR0VmxExitPreemptTimer(pVCpu, pVmxTransient)); 10090 case VMX_EXIT_RDMSR: VMEXIT_CALL_RET(0, hmR0VmxExitRdmsr(pVCpu, pVmxTransient)); 10091 case VMX_EXIT_WRMSR: VMEXIT_CALL_RET(0, hmR0VmxExitWrmsr(pVCpu, pVmxTransient)); 10092 case VMX_EXIT_VMCALL: VMEXIT_CALL_RET(0, hmR0VmxExitVmcall(pVCpu, pVmxTransient)); 10093 case VMX_EXIT_MOV_DRX: VMEXIT_CALL_RET(0, hmR0VmxExitMovDRx(pVCpu, pVmxTransient)); 10094 case VMX_EXIT_HLT: VMEXIT_CALL_RET(0, hmR0VmxExitHlt(pVCpu, pVmxTransient)); 10095 case VMX_EXIT_INVD: VMEXIT_CALL_RET(0, hmR0VmxExitInvd(pVCpu, pVmxTransient)); 10096 case VMX_EXIT_INVLPG: VMEXIT_CALL_RET(0, hmR0VmxExitInvlpg(pVCpu, pVmxTransient)); 10097 case VMX_EXIT_RSM: VMEXIT_CALL_RET(0, hmR0VmxExitRsm(pVCpu, pVmxTransient)); 10098 case VMX_EXIT_MTF: VMEXIT_CALL_RET(0, hmR0VmxExitMtf(pVCpu, pVmxTransient)); 10099 case VMX_EXIT_PAUSE: VMEXIT_CALL_RET(0, hmR0VmxExitPause(pVCpu, pVmxTransient)); 10100 case VMX_EXIT_XDTR_ACCESS: VMEXIT_CALL_RET(0, hmR0VmxExitXdtrAccess(pVCpu, pVmxTransient)); 10101 case VMX_EXIT_TR_ACCESS: VMEXIT_CALL_RET(0, hmR0VmxExitXdtrAccess(pVCpu, pVmxTransient)); 10102 case VMX_EXIT_WBINVD: VMEXIT_CALL_RET(0, hmR0VmxExitWbinvd(pVCpu, pVmxTransient)); 10103 case VMX_EXIT_XSETBV: VMEXIT_CALL_RET(0, hmR0VmxExitXsetbv(pVCpu, pVmxTransient)); 10104 case VMX_EXIT_RDRAND: VMEXIT_CALL_RET(0, hmR0VmxExitRdrand(pVCpu, pVmxTransient)); 10105 case VMX_EXIT_INVPCID: VMEXIT_CALL_RET(0, hmR0VmxExitInvpcid(pVCpu, pVmxTransient)); 10106 case VMX_EXIT_GETSEC: VMEXIT_CALL_RET(0, hmR0VmxExitGetsec(pVCpu, pVmxTransient)); 10107 case VMX_EXIT_RDPMC: VMEXIT_CALL_RET(0, hmR0VmxExitRdpmc(pVCpu, pVmxTransient)); 10108 10109 case VMX_EXIT_TRIPLE_FAULT: return hmR0VmxExitTripleFault(pVCpu, pVmxTransient); 10110 case VMX_EXIT_NMI_WINDOW: return hmR0VmxExitNmiWindow(pVCpu, pVmxTransient); 10111 case VMX_EXIT_INIT_SIGNAL: return hmR0VmxExitInitSignal(pVCpu, pVmxTransient); 10112 case VMX_EXIT_SIPI: return hmR0VmxExitSipi(pVCpu, pVmxTransient); 10113 case VMX_EXIT_IO_SMI: return hmR0VmxExitIoSmi(pVCpu, pVmxTransient); 10114 case VMX_EXIT_SMI: return hmR0VmxExitSmi(pVCpu, pVmxTransient); 10115 case VMX_EXIT_ERR_MSR_LOAD: return hmR0VmxExitErrMsrLoad(pVCpu, pVmxTransient); 10116 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return hmR0VmxExitErrInvalidGuestState(pVCpu, pVmxTransient); 10117 case VMX_EXIT_ERR_MACHINE_CHECK: return hmR0VmxExitErrMachineCheck(pVCpu, pVmxTransient); 10262 10118 10263 10119 case VMX_EXIT_VMCLEAR: … … 10275 10131 case VMX_EXIT_XSAVES: 10276 10132 case VMX_EXIT_XRSTORS: 10277 return hmR0VmxExitSetPendingXcptUD(pVCpu, p MixedCtx, pVmxTransient);10133 return hmR0VmxExitSetPendingXcptUD(pVCpu, pVmxTransient); 10278 10134 10279 10135 case VMX_EXIT_ENCLS: … … 10281 10137 case VMX_EXIT_PML_FULL: 10282 10138 default: 10283 return hmR0VmxExitErrUndefined(pVCpu, p MixedCtx, pVmxTransient);10139 return hmR0VmxExitErrUndefined(pVCpu, pVmxTransient); 10284 10140 } 10285 10141 #undef VMEXIT_CALL_RET … … 10302 10158 do { \ 10303 10159 AssertPtr(pVCpu); \ 10304 AssertPtr(pMixedCtx); \10305 10160 AssertPtr(pVmxTransient); \ 10306 10161 Assert(pVmxTransient->fVMEntryFailed == false); \ … … 10323 10178 do { \ 10324 10179 HMVMX_STOP_EXIT_DISPATCH_PROF(); \ 10325 NOREF(pVCpu); NOREF(pMixedCtx); NOREF(pVmxTransient); \10180 RT_NOREF2(pVCpu, pVmxTransient); \ 10326 10181 } while (0) 10327 10182 # define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS() do { } while (0) … … 10333 10188 * 10334 10189 * @param pVCpu The cross context virtual CPU structure. 10335 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe10336 * out-of-sync. Make sure to update the required fields10337 * before using them.10338 10190 * @param cbInstr Number of bytes to advance the RIP by. 10339 10191 * 10340 10192 * @remarks No-long-jump zone!!! 10341 10193 */ 10342 DECLINLINE(void) hmR0VmxAdvanceGuestRipBy(PVMCPU pVCpu, PCPUMCTX pMixedCtx,uint32_t cbInstr)10194 DECLINLINE(void) hmR0VmxAdvanceGuestRipBy(PVMCPU pVCpu, uint32_t cbInstr) 10343 10195 { 10344 10196 /* Advance the RIP. */ 10345 p MixedCtx->rip += cbInstr;10197 pVCpu->cpum.GstCtx.rip += cbInstr; 10346 10198 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP); 10347 10199 10348 10200 /* Update interrupt inhibition. */ 10349 10201 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) 10350 && p MixedCtx->rip != EMGetInhibitInterruptsPC(pVCpu))10202 && pVCpu->cpum.GstCtx.rip != EMGetInhibitInterruptsPC(pVCpu)) 10351 10203 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 10352 10204 } … … 10358 10210 * @returns VBox status code, no informational status codes. 10359 10211 * @param pVCpu The cross context virtual CPU structure. 10360 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe10361 * out-of-sync. Make sure to update the required fields10362 * before using them.10363 10212 * @param pVmxTransient Pointer to the VMX transient structure. 10364 10213 * 10365 10214 * @remarks No-long-jump zone!!! 10366 10215 */ 10367 static int hmR0VmxAdvanceGuestRip(PVMCPU pVCpu, P CPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)10216 static int hmR0VmxAdvanceGuestRip(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 10368 10217 { 10369 10218 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); … … 10371 10220 AssertRCReturn(rc, rc); 10372 10221 10373 hmR0VmxAdvanceGuestRipBy(pVCpu, p MixedCtx, pVmxTransient->cbInstr);10222 hmR0VmxAdvanceGuestRipBy(pVCpu, pVmxTransient->cbInstr); 10374 10223 10375 10224 /* … … 10380 10229 */ 10381 10230 if ( !pVCpu->hm.s.fSingleInstruction 10382 && p MixedCtx->eflags.Bits.u1TF)10383 { 10384 rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu , pMixedCtx);10231 && pVCpu->cpum.GstCtx.eflags.Bits.u1TF) 10232 { 10233 rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu); 10385 10234 AssertRCReturn(rc, rc); 10386 10235 } … … 10399 10248 * 10400 10249 * @param pVCpu The cross context virtual CPU structure. 10401 * @param pCtx Pointer to the guest-CPU state.10402 10250 * 10403 10251 * @remarks This function assumes our cache of the VMCS controls 10404 10252 * are valid, i.e. hmR0VmxCheckVmcsCtls() succeeded. 10405 10253 */ 10406 static uint32_t hmR0VmxCheckGuestState(PVMCPU pVCpu , PCPUMCTX pCtx)10254 static uint32_t hmR0VmxCheckGuestState(PVMCPU pVCpu) 10407 10255 { 10408 10256 #define HMVMX_ERROR_BREAK(err) { uError = (err); break; } … … 10414 10262 int rc; 10415 10263 PVM pVM = pVCpu->CTX_SUFF(pVM); 10264 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 10416 10265 uint32_t uError = VMX_IGS_ERROR; 10417 10266 uint32_t u32Val; … … 11001 10850 * VM-exit handler for external interrupts (VMX_EXIT_EXT_INT). 11002 10851 */ 11003 HMVMX_EXIT_DECL hmR0VmxExitExtInt(PVMCPU pVCpu, P CPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)10852 HMVMX_EXIT_DECL hmR0VmxExitExtInt(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 11004 10853 { 11005 10854 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); … … 11015 10864 * VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI). 11016 10865 */ 11017 HMVMX_EXIT_DECL hmR0VmxExitXcptOrNmi(PVMCPU pVCpu, P CPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)10866 HMVMX_EXIT_DECL hmR0VmxExitXcptOrNmi(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 11018 10867 { 11019 10868 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); … … 11045 10894 11046 10895 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */ 11047 VBOXSTRICTRC rcStrictRc1 = hmR0VmxCheckExitDueToEventDelivery(pVCpu, p MixedCtx, pVmxTransient);10896 VBOXSTRICTRC rcStrictRc1 = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pVmxTransient); 11048 10897 if (RT_UNLIKELY(rcStrictRc1 == VINF_SUCCESS)) 11049 10898 { /* likely */ } … … 11084 10933 switch (uVector) 11085 10934 { 11086 case X86_XCPT_PF: rc = hmR0VmxExitXcptPF(pVCpu, p MixedCtx, pVmxTransient); break;11087 case X86_XCPT_GP: rc = hmR0VmxExitXcptGP(pVCpu, p MixedCtx, pVmxTransient); break;11088 case X86_XCPT_MF: rc = hmR0VmxExitXcptMF(pVCpu, p MixedCtx, pVmxTransient); break;11089 case X86_XCPT_DB: rc = hmR0VmxExitXcptDB(pVCpu, p MixedCtx, pVmxTransient); break;11090 case X86_XCPT_BP: rc = hmR0VmxExitXcptBP(pVCpu, p MixedCtx, pVmxTransient); break;11091 case X86_XCPT_AC: rc = hmR0VmxExitXcptAC(pVCpu, p MixedCtx, pVmxTransient); break;10935 case X86_XCPT_PF: rc = hmR0VmxExitXcptPF(pVCpu, pVmxTransient); break; 10936 case X86_XCPT_GP: rc = hmR0VmxExitXcptGP(pVCpu, pVmxTransient); break; 10937 case X86_XCPT_MF: rc = hmR0VmxExitXcptMF(pVCpu, pVmxTransient); break; 10938 case X86_XCPT_DB: rc = hmR0VmxExitXcptDB(pVCpu, pVmxTransient); break; 10939 case X86_XCPT_BP: rc = hmR0VmxExitXcptBP(pVCpu, pVmxTransient); break; 10940 case X86_XCPT_AC: rc = hmR0VmxExitXcptAC(pVCpu, pVmxTransient); break; 11092 10941 11093 10942 case X86_XCPT_NM: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNM); 11094 rc = hmR0VmxExitXcptGeneric(pVCpu, p MixedCtx, pVmxTransient); break;10943 rc = hmR0VmxExitXcptGeneric(pVCpu, pVmxTransient); break; 11095 10944 case X86_XCPT_XF: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXF); 11096 rc = hmR0VmxExitXcptGeneric(pVCpu, p MixedCtx, pVmxTransient); break;10945 rc = hmR0VmxExitXcptGeneric(pVCpu, pVmxTransient); break; 11097 10946 case X86_XCPT_DE: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDE); 11098 rc = hmR0VmxExitXcptGeneric(pVCpu, p MixedCtx, pVmxTransient); break;10947 rc = hmR0VmxExitXcptGeneric(pVCpu, pVmxTransient); break; 11099 10948 case X86_XCPT_UD: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD); 11100 rc = hmR0VmxExitXcptGeneric(pVCpu, p MixedCtx, pVmxTransient); break;10949 rc = hmR0VmxExitXcptGeneric(pVCpu, pVmxTransient); break; 11101 10950 case X86_XCPT_SS: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestSS); 11102 rc = hmR0VmxExitXcptGeneric(pVCpu, p MixedCtx, pVmxTransient); break;10951 rc = hmR0VmxExitXcptGeneric(pVCpu, pVmxTransient); break; 11103 10952 case X86_XCPT_NP: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNP); 11104 rc = hmR0VmxExitXcptGeneric(pVCpu, p MixedCtx, pVmxTransient); break;10953 rc = hmR0VmxExitXcptGeneric(pVCpu, pVmxTransient); break; 11105 10954 case X86_XCPT_TS: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestTS); 11106 rc = hmR0VmxExitXcptGeneric(pVCpu, p MixedCtx, pVmxTransient); break;10955 rc = hmR0VmxExitXcptGeneric(pVCpu, pVmxTransient); break; 11107 10956 default: 11108 10957 { … … 11112 10961 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS); 11113 10962 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM))); 11114 Assert(CPUMIsGuestInRealModeEx( pMixedCtx));10963 Assert(CPUMIsGuestInRealModeEx(&pVCpu->cpum.GstCtx)); 11115 10964 11116 10965 rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR0); … … 11150 10999 * VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW). 11151 11000 */ 11152 HMVMX_EXIT_NSRC_DECL hmR0VmxExitIntWindow(PVMCPU pVCpu, P CPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)11001 HMVMX_EXIT_NSRC_DECL hmR0VmxExitIntWindow(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 11153 11002 { 11154 11003 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); … … 11166 11015 * VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW). 11167 11016 */ 11168 HMVMX_EXIT_NSRC_DECL hmR0VmxExitNmiWindow(PVMCPU pVCpu, P CPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)11017 HMVMX_EXIT_NSRC_DECL hmR0VmxExitNmiWindow(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 11169 11018 { 11170 11019 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); … … 11203 11052 * VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit. 11204 11053 */ 11205 HMVMX_EXIT_NSRC_DECL hmR0VmxExitWbinvd(PVMCPU pVCpu, P CPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)11054 HMVMX_EXIT_NSRC_DECL hmR0VmxExitWbinvd(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 11206 11055 { 11207 11056 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); 11208 return hmR0VmxAdvanceGuestRip(pVCpu, p MixedCtx, pVmxTransient);11057 return hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient); 11209 11058 } 11210 11059 … … 11213 11062 * VM-exit handler for INVD (VMX_EXIT_INVD). Unconditional VM-exit. 11214 11063 */ 11215 HMVMX_EXIT_NSRC_DECL hmR0VmxExitInvd(PVMCPU pVCpu, P CPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)11064 HMVMX_EXIT_NSRC_DECL hmR0VmxExitInvd(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 11216 11065 { 11217 11066 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); 11218 return hmR0VmxAdvanceGuestRip(pVCpu, p MixedCtx, pVmxTransient);11067 return hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient); 11219 11068 } 11220 11069 … … 11223 11072 * VM-exit handler for CPUID (VMX_EXIT_CPUID). Unconditional VM-exit. 11224 11073 */ 11225 HMVMX_EXIT_DECL hmR0VmxExitCpuid(PVMCPU pVCpu, P CPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)11074 HMVMX_EXIT_DECL hmR0VmxExitCpuid(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 11226 11075 { 11227 11076 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); 11228 Assert(pMixedCtx == &pVCpu->cpum.GstCtx);11229 11077 11230 11078 /* … … 11259 11107 * Frequent exit or something needing probing. Get state and call EMHistoryExec. 11260 11108 */ 11261 Assert(pMixedCtx == &pVCpu->cpum.GstCtx);11262 11109 int rc2 = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL); 11263 11110 AssertRCReturn(rc2, rc2); … … 11280 11127 * VM-exit handler for GETSEC (VMX_EXIT_GETSEC). Unconditional VM-exit. 11281 11128 */ 11282 HMVMX_EXIT_DECL hmR0VmxExitGetsec(PVMCPU pVCpu, P CPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)11129 HMVMX_EXIT_DECL hmR0VmxExitGetsec(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 11283 11130 { 11284 11131 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); … … 11286 11133 AssertRCReturn(rc, rc); 11287 11134 11288 if (p MixedCtx->cr4 & X86_CR4_SMXE)11135 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_SMXE) 11289 11136 return VINF_EM_RAW_EMULATE_INSTR; 11290 11137 … … 11297 11144 * VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit. 11298 11145 */ 11299 HMVMX_EXIT_DECL hmR0VmxExitRdtsc(PVMCPU pVCpu, P CPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)11146 HMVMX_EXIT_DECL hmR0VmxExitRdtsc(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 11300 11147 { 11301 11148 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); … … 11326 11173 * VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit. 11327 11174 */ 11328 HMVMX_EXIT_DECL hmR0VmxExitRdtscp(PVMCPU pVCpu, P CPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)11175 HMVMX_EXIT_DECL hmR0VmxExitRdtscp(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 11329 11176 { 11330 11177 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); … … 11355 11202 * VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit. 11356 11203 */ 11357 HMVMX_EXIT_DECL hmR0VmxExitRdpmc(PVMCPU pVCpu, P CPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)11204 HMVMX_EXIT_DECL hmR0VmxExitRdpmc(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 11358 11205 { 11359 11206 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); … … 11361 11208 AssertRCReturn(rc, rc); 11362 11209 11363 PVM pVM = pVCpu->CTX_SUFF(pVM); 11364 rc = EMInterpretRdpmc(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx)); 11210 PVM pVM = pVCpu->CTX_SUFF(pVM); 11211 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 11212 rc = EMInterpretRdpmc(pVM, pVCpu, CPUMCTX2CORE(pCtx)); 11365 11213 if (RT_LIKELY(rc == VINF_SUCCESS)) 11366 11214 { 11367 rc = hmR0VmxAdvanceGuestRip(pVCpu, p MixedCtx, pVmxTransient);11215 rc = hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient); 11368 11216 Assert(pVmxTransient->cbInstr == 2); 11369 11217 } … … 11380 11228 * VM-exit handler for VMCALL (VMX_EXIT_VMCALL). Unconditional VM-exit. 11381 11229 */ 11382 HMVMX_EXIT_DECL hmR0VmxExitVmcall(PVMCPU pVCpu, P CPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)11230 HMVMX_EXIT_DECL hmR0VmxExitVmcall(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 11383 11231 { 11384 11232 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); … … 11392 11240 11393 11241 /* Perform the hypercall. */ 11394 rcStrict = GIMHypercall(pVCpu, pMixedCtx);11242 rcStrict = GIMHypercall(pVCpu, &pVCpu->cpum.GstCtx); 11395 11243 if (rcStrict == VINF_SUCCESS) 11396 11244 { 11397 rc = hmR0VmxAdvanceGuestRip(pVCpu, p MixedCtx, pVmxTransient);11245 rc = hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient); 11398 11246 AssertRCReturn(rc, rc); 11399 11247 } … … 11412 11260 if (RT_FAILURE(rcStrict)) 11413 11261 { 11414 hmR0VmxSetPendingXcptUD(pVCpu , pMixedCtx);11262 hmR0VmxSetPendingXcptUD(pVCpu); 11415 11263 rcStrict = VINF_SUCCESS; 11416 11264 } … … 11423 11271 * VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit. 11424 11272 */ 11425 HMVMX_EXIT_DECL hmR0VmxExitInvlpg(PVMCPU pVCpu, P CPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)11273 HMVMX_EXIT_DECL hmR0VmxExitInvlpg(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 11426 11274 { 11427 11275 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); … … 11452 11300 * VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit. 11453 11301 */ 11454 HMVMX_EXIT_DECL hmR0VmxExitMonitor(PVMCPU pVCpu, P CPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)11302 HMVMX_EXIT_DECL hmR0VmxExitMonitor(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 11455 11303 { 11456 11304 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); … … 11458 11306 AssertRCReturn(rc, rc); 11459 11307 11460 PVM pVM = pVCpu->CTX_SUFF(pVM); 11461 rc = EMInterpretMonitor(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx)); 11308 PVM pVM = pVCpu->CTX_SUFF(pVM); 11309 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 11310 rc = EMInterpretMonitor(pVM, pVCpu, CPUMCTX2CORE(pCtx)); 11462 11311 if (RT_LIKELY(rc == VINF_SUCCESS)) 11463 rc = hmR0VmxAdvanceGuestRip(pVCpu, p MixedCtx, pVmxTransient);11312 rc = hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient); 11464 11313 else 11465 11314 { … … 11475 11324 * VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit. 11476 11325 */ 11477 HMVMX_EXIT_DECL hmR0VmxExitMwait(PVMCPU pVCpu, P CPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)11326 HMVMX_EXIT_DECL hmR0VmxExitMwait(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 11478 11327 { 11479 11328 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); … … 11481 11330 AssertRCReturn(rc, rc); 11482 11331 11483 PVM pVM = pVCpu->CTX_SUFF(pVM); 11484 VBOXSTRICTRC rc2 = EMInterpretMWait(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx)); 11332 PVM pVM = pVCpu->CTX_SUFF(pVM); 11333 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 11334 VBOXSTRICTRC rc2 = EMInterpretMWait(pVM, pVCpu, CPUMCTX2CORE(pCtx)); 11485 11335 rc = VBOXSTRICTRC_VAL(rc2); 11486 11336 if (RT_LIKELY( rc == VINF_SUCCESS 11487 11337 || rc == VINF_EM_HALT)) 11488 11338 { 11489 int rc3 = hmR0VmxAdvanceGuestRip(pVCpu, p MixedCtx, pVmxTransient);11339 int rc3 = hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient); 11490 11340 AssertRCReturn(rc3, rc3); 11491 11341 11492 11342 if ( rc == VINF_EM_HALT 11493 && EMMonitorWaitShouldContinue(pVCpu, p MixedCtx))11343 && EMMonitorWaitShouldContinue(pVCpu, pCtx)) 11494 11344 rc = VINF_SUCCESS; 11495 11345 } … … 11509 11359 * VM-exit handler for RSM (VMX_EXIT_RSM). Unconditional VM-exit. 11510 11360 */ 11511 HMVMX_EXIT_NSRC_DECL hmR0VmxExitRsm(PVMCPU pVCpu, P CPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)11361 HMVMX_EXIT_NSRC_DECL hmR0VmxExitRsm(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 11512 11362 { 11513 11363 /* … … 11520 11370 */ 11521 11371 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); 11522 AssertMsgFailed(("Unexpected RSM VM-exit . pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));11372 AssertMsgFailed(("Unexpected RSM VM-exit\n")); 11523 11373 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient); 11524 11374 } … … 11528 11378 * VM-exit handler for SMI (VMX_EXIT_SMI). Unconditional VM-exit. 11529 11379 */ 11530 HMVMX_EXIT_NSRC_DECL hmR0VmxExitSmi(PVMCPU pVCpu, P CPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)11380 HMVMX_EXIT_NSRC_DECL hmR0VmxExitSmi(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 11531 11381 { 11532 11382 /* … … 11540 11390 */ 11541 11391 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); 11542 AssertMsgFailed(("Unexpected SMI VM-exit . pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));11392 AssertMsgFailed(("Unexpected SMI VM-exit\n")); 11543 11393 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient); 11544 11394 } … … 11548 11398 * VM-exit handler for IO SMI (VMX_EXIT_IO_SMI). Unconditional VM-exit. 11549 11399 */ 11550 HMVMX_EXIT_NSRC_DECL hmR0VmxExitIoSmi(PVMCPU pVCpu, P CPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)11400 HMVMX_EXIT_NSRC_DECL hmR0VmxExitIoSmi(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 11551 11401 { 11552 11402 /* Same treatment as VMX_EXIT_SMI. See comment in hmR0VmxExitSmi(). */ 11553 11403 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); 11554 AssertMsgFailed(("Unexpected IO SMI VM-exit . pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));11404 AssertMsgFailed(("Unexpected IO SMI VM-exit\n")); 11555 11405 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient); 11556 11406 } … … 11560 11410 * VM-exit handler for SIPI (VMX_EXIT_SIPI). Conditional VM-exit. 11561 11411 */ 11562 HMVMX_EXIT_NSRC_DECL hmR0VmxExitSipi(PVMCPU pVCpu, P CPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)11412 HMVMX_EXIT_NSRC_DECL hmR0VmxExitSipi(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 11563 11413 { 11564 11414 /* … … 11568 11418 */ 11569 11419 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); 11570 AssertMsgFailed(("Unexpected SIPI VM-exit . pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));11420 AssertMsgFailed(("Unexpected SIPI VM-exit\n")); 11571 11421 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient); 11572 11422 } … … 11577 11427 * VM-exit. 11578 11428 */ 11579 HMVMX_EXIT_NSRC_DECL hmR0VmxExitInitSignal(PVMCPU pVCpu, P CPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)11429 HMVMX_EXIT_NSRC_DECL hmR0VmxExitInitSignal(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 11580 11430 { 11581 11431 /* … … 11595 11445 * VM-exit. 11596 11446 */ 11597 HMVMX_EXIT_DECL hmR0VmxExitTripleFault(PVMCPU pVCpu, P CPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)11447 HMVMX_EXIT_DECL hmR0VmxExitTripleFault(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 11598 11448 { 11599 11449 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); … … 11605 11455 * VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit. 11606 11456 */ 11607 HMVMX_EXIT_DECL hmR0VmxExitHlt(PVMCPU pVCpu, P CPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)11457 HMVMX_EXIT_DECL hmR0VmxExitHlt(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 11608 11458 { 11609 11459 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); 11610 11460 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_HLT_EXIT); 11611 11461 11612 int rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient); 11462 int rc = hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient); 11463 rc |= hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_RFLAGS); 11613 11464 AssertRCReturn(rc, rc); 11614 11465 11615 if (EMShouldContinueAfterHalt(pVCpu, pMixedCtx)) /* Requires eflags. */11466 if (EMShouldContinueAfterHalt(pVCpu, &pVCpu->cpum.GstCtx)) /* Requires eflags. */ 11616 11467 rc = VINF_SUCCESS; 11617 11468 else … … 11629 11480 * the guest. 11630 11481 */ 11631 HMVMX_EXIT_NSRC_DECL hmR0VmxExitSetPendingXcptUD(PVMCPU pVCpu, P CPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)11482 HMVMX_EXIT_NSRC_DECL hmR0VmxExitSetPendingXcptUD(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 11632 11483 { 11633 11484 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); 11634 hmR0VmxSetPendingXcptUD(pVCpu , pMixedCtx);11485 hmR0VmxSetPendingXcptUD(pVCpu); 11635 11486 return VINF_SUCCESS; 11636 11487 } … … 11640 11491 * VM-exit handler for expiry of the VMX preemption timer. 11641 11492 */ 11642 HMVMX_EXIT_DECL hmR0VmxExitPreemptTimer(PVMCPU pVCpu, P CPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)11493 HMVMX_EXIT_DECL hmR0VmxExitPreemptTimer(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 11643 11494 { 11644 11495 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); … … 11658 11509 * VM-exit handler for XSETBV (VMX_EXIT_XSETBV). Unconditional VM-exit. 11659 11510 */ 11660 HMVMX_EXIT_DECL hmR0VmxExitXsetbv(PVMCPU pVCpu, P CPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)11511 HMVMX_EXIT_DECL hmR0VmxExitXsetbv(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 11661 11512 { 11662 11513 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); … … 11670 11521 : HM_CHANGED_XCPT_RAISED_MASK); 11671 11522 11672 pVCpu->hm.s.fLoadSaveGuestXcr0 = (pMixedCtx->cr4 & X86_CR4_OSXSAVE) && pMixedCtx->aXcr[0] != ASMGetXcr0(); 11523 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 11524 pVCpu->hm.s.fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0(); 11673 11525 11674 11526 return rcStrict; … … 11679 11531 * VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit. 11680 11532 */ 11681 HMVMX_EXIT_DECL hmR0VmxExitInvpcid(PVMCPU pVCpu, P CPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)11533 HMVMX_EXIT_DECL hmR0VmxExitInvpcid(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 11682 11534 { 11683 11535 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); … … 11691 11543 * Error VM-exit. 11692 11544 */ 11693 HMVMX_EXIT_NSRC_DECL hmR0VmxExitErrInvalidGuestState(PVMCPU pVCpu, P CPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)11545 HMVMX_EXIT_NSRC_DECL hmR0VmxExitErrInvalidGuestState(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 11694 11546 { 11695 11547 int rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL); … … 11699 11551 return rc; 11700 11552 11701 uint32_t uInvalidReason = hmR0VmxCheckGuestState(pVCpu , pMixedCtx);11553 uint32_t uInvalidReason = hmR0VmxCheckGuestState(pVCpu); 11702 11554 NOREF(uInvalidReason); 11703 11555 … … 11733 11585 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val)); 11734 11586 11735 hmR0DumpRegs(pVCpu , pMixedCtx);11587 hmR0DumpRegs(pVCpu); 11736 11588 #else 11737 11589 NOREF(pVmxTransient); … … 11746 11598 * (VMX_EXIT_ERR_MSR_LOAD). Error VM-exit. 11747 11599 */ 11748 HMVMX_EXIT_NSRC_DECL hmR0VmxExitErrMsrLoad(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient) 11749 { 11750 NOREF(pVmxTransient); 11751 AssertMsgFailed(("Unexpected MSR-load exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx)); NOREF(pMixedCtx); 11600 HMVMX_EXIT_NSRC_DECL hmR0VmxExitErrMsrLoad(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 11601 { 11602 AssertMsgFailed(("Unexpected MSR-load exit\n")); 11752 11603 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient); 11753 11604 } … … 11758 11609 * (VMX_EXIT_ERR_MACHINE_CHECK). Error VM-exit. 11759 11610 */ 11760 HMVMX_EXIT_NSRC_DECL hmR0VmxExitErrMachineCheck(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient) 11761 { 11762 NOREF(pVmxTransient); 11763 AssertMsgFailed(("Unexpected machine-check event exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx)); NOREF(pMixedCtx); 11611 HMVMX_EXIT_NSRC_DECL hmR0VmxExitErrMachineCheck(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 11612 { 11613 AssertMsgFailed(("Unexpected machine-check event exit\n")); 11764 11614 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient); 11765 11615 } … … 11770 11620 * theory. 11771 11621 */ 11772 HMVMX_EXIT_NSRC_DECL hmR0VmxExitErrUndefined(PVMCPU pVCpu, P CPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)11773 { 11774 AssertMsgFailed(("Huh!? Undefined VM-exit reason %d. pVCpu=%p pMixedCtx=%p\n", pVmxTransient->uExitReason, pVCpu, pMixedCtx));11775 NOREF(pVCpu); NOREF(pMixedCtx); NOREF(pVmxTransient);11622 HMVMX_EXIT_NSRC_DECL hmR0VmxExitErrUndefined(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 11623 { 11624 RT_NOREF2(pVCpu, pVmxTransient); 11625 AssertMsgFailed(("Huh!? Undefined VM-exit reason %d\n", pVmxTransient->uExitReason)); 11776 11626 return VERR_VMX_UNDEFINED_EXIT_CODE; 11777 11627 } … … 11783 11633 * Conditional VM-exit. 11784 11634 */ 11785 HMVMX_EXIT_DECL hmR0VmxExitXdtrAccess(PVMCPU pVCpu, P CPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)11635 HMVMX_EXIT_DECL hmR0VmxExitXdtrAccess(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 11786 11636 { 11787 11637 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); … … 11791 11641 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_TABLE_EXIT) 11792 11642 return VERR_EM_INTERPRETER; 11793 AssertMsgFailed(("Unexpected XDTR access . pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));11643 AssertMsgFailed(("Unexpected XDTR access\n")); 11794 11644 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient); 11795 11645 } … … 11799 11649 * VM-exit handler for RDRAND (VMX_EXIT_RDRAND). Conditional VM-exit. 11800 11650 */ 11801 HMVMX_EXIT_DECL hmR0VmxExitRdrand(PVMCPU pVCpu, P CPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)11651 HMVMX_EXIT_DECL hmR0VmxExitRdrand(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 11802 11652 { 11803 11653 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); … … 11806 11656 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDRAND_EXIT) 11807 11657 return VERR_EM_INTERPRETER; 11808 AssertMsgFailed(("Unexpected RDRAND exit . pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));11658 AssertMsgFailed(("Unexpected RDRAND exit\n")); 11809 11659 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient); 11810 11660 } … … 11814 11664 * VM-exit handler for RDMSR (VMX_EXIT_RDMSR). 11815 11665 */ 11816 HMVMX_EXIT_DECL hmR0VmxExitRdmsr(PVMCPU pVCpu, P CPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)11666 HMVMX_EXIT_DECL hmR0VmxExitRdmsr(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 11817 11667 { 11818 11668 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); … … 11822 11672 * MSRs required. That would require changes to IEM and possibly CPUM too. 11823 11673 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */ 11824 uint32_t const idMsr = p MixedCtx->ecx; NOREF(idMsr); /* Save it. */11674 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx; NOREF(idMsr); /* Save it. */ 11825 11675 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 11826 11676 rc |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS); … … 11873 11723 * VM-exit handler for WRMSR (VMX_EXIT_WRMSR). 11874 11724 */ 11875 HMVMX_EXIT_DECL hmR0VmxExitWrmsr(PVMCPU pVCpu, P CPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)11725 HMVMX_EXIT_DECL hmR0VmxExitWrmsr(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 11876 11726 { 11877 11727 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); … … 11881 11731 * MSRs required. That would require changes to IEM and possibly CPUM too. 11882 11732 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */ 11883 uint32_t const idMsr = p MixedCtx->ecx; /* Save it. */11733 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx; /* Save it. */ 11884 11734 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 11885 11735 rc |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS); 11886 11736 AssertRCReturn(rc, rc); 11887 11737 11888 Log4Func(("ecx=%#RX32 edx:eax=%#RX32:%#RX32\n", idMsr, p MixedCtx->edx, pMixedCtx->eax));11738 Log4Func(("ecx=%#RX32 edx:eax=%#RX32:%#RX32\n", idMsr, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.eax)); 11889 11739 11890 11740 VBOXSTRICTRC rcStrict = IEMExecDecodedWrmsr(pVCpu, pVmxTransient->cbInstr); … … 11924 11774 switch (idMsr) 11925 11775 { 11926 case MSR_IA32_SYSENTER_CS: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_SYSENTER_CS_MSR); 11927 case MSR_IA32_SYSENTER_EIP: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); 11928 case MSR_IA32_SYSENTER_ESP: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); 11929 case MSR_K8_FS_BASE: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_FS); 11930 case MSR_K8_GS_BASE: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_GS); 11931 case MSR_K6_EFER: /* Nothing to do, already handled above. */ break;11776 case MSR_IA32_SYSENTER_CS: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break; 11777 case MSR_IA32_SYSENTER_EIP: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break; 11778 case MSR_IA32_SYSENTER_ESP: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break; 11779 case MSR_K8_FS_BASE: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_FS); break; 11780 case MSR_K8_GS_BASE: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_GS); break; 11781 case MSR_K6_EFER: /* Nothing to do, already handled above. */ break; 11932 11782 default: 11933 11783 { … … 12003 11853 * VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit. 12004 11854 */ 12005 HMVMX_EXIT_DECL hmR0VmxExitPause(PVMCPU pVCpu, P CPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)11855 HMVMX_EXIT_DECL hmR0VmxExitPause(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 12006 11856 { 12007 11857 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); … … 12016 11866 * threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit. 12017 11867 */ 12018 HMVMX_EXIT_NSRC_DECL hmR0VmxExitTprBelowThreshold(PVMCPU pVCpu, P CPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)11868 HMVMX_EXIT_NSRC_DECL hmR0VmxExitTprBelowThreshold(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 12019 11869 { 12020 11870 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); … … 12040 11890 * interpreter. 12041 11891 */ 12042 HMVMX_EXIT_DECL hmR0VmxExitMovCRx(PVMCPU pVCpu, P CPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)11892 HMVMX_EXIT_DECL hmR0VmxExitMovCRx(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 12043 11893 { 12044 11894 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); … … 12051 11901 12052 11902 VBOXSTRICTRC rcStrict; 12053 PVM pVM = pVCpu->CTX_SUFF(pVM); 11903 PVM pVM = pVCpu->CTX_SUFF(pVM); 11904 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 12054 11905 RTGCUINTPTR const uExitQualification = pVmxTransient->uExitQualification; 12055 11906 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(uExitQualification); … … 12073 11924 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0); 12074 11925 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR0Write); 12075 Log4(("CRX CR0 write rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), p MixedCtx->cr0));11926 Log4(("CRX CR0 write rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pCtx->cr0)); 12076 11927 break; 12077 11928 } … … 12086 11937 case 3: 12087 11938 { 12088 Assert(!pVM->hm.s.fNestedPaging || !CPUMIsGuestPagingEnabledEx(p MixedCtx) || pVCpu->hm.s.fUsingDebugLoop);11939 Assert(!pVM->hm.s.fNestedPaging || !CPUMIsGuestPagingEnabledEx(pCtx) || pVCpu->hm.s.fUsingDebugLoop); 12089 11940 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR3Write); 12090 11941 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, 12091 11942 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR3); 12092 Log4(("CRX CR3 write rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), p MixedCtx->cr3));11943 Log4(("CRX CR3 write rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pCtx->cr3)); 12093 11944 break; 12094 11945 } … … 12099 11950 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, 12100 11951 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR4); 12101 Log4(("CRX CR4 write rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict), 12102 p MixedCtx->cr4, pVCpu->hm.s.fLoadSaveGuestXcr0));11952 Log4(("CRX CR4 write rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict), pCtx->cr4, 11953 pVCpu->hm.s.fLoadSaveGuestXcr0)); 12103 11954 break; 12104 11955 } … … 12122 11973 { 12123 11974 Assert( !pVM->hm.s.fNestedPaging 12124 || !CPUMIsGuestPagingEnabledEx(p MixedCtx)11975 || !CPUMIsGuestPagingEnabledEx(pCtx) 12125 11976 || pVCpu->hm.s.fUsingDebugLoop 12126 11977 || VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification) != 3); … … 12203 12054 * VM-exit. 12204 12055 */ 12205 HMVMX_EXIT_DECL hmR0VmxExitIoInstr(PVMCPU pVCpu, P CPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)12056 HMVMX_EXIT_DECL hmR0VmxExitIoInstr(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 12206 12057 { 12207 12058 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); 12208 12059 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitIO, y1); 12209 Assert(pMixedCtx == &pVCpu->cpum.GstCtx); 12210 12060 12061 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 12211 12062 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient); 12212 12063 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient); … … 12221 12072 == VMX_EXIT_QUAL_IO_DIRECTION_OUT); 12222 12073 bool fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQualification); 12223 bool fGstStepping = RT_BOOL(p MixedCtx->eflags.Bits.u1TF);12074 bool fGstStepping = RT_BOOL(pCtx->eflags.Bits.u1TF); 12224 12075 bool fDbgStepping = pVCpu->hm.s.fSingleInstruction; 12225 12076 AssertReturn(uIOWidth <= 3 && uIOWidth != 2, VERR_VMX_IPE_1); … … 12258 12109 * interpreting the instruction. 12259 12110 */ 12260 Log4(("CS:RIP=%04x:%08RX64 %#06x/%u %c str\n", p MixedCtx->cs.Sel, pMixedCtx->rip, uIOPort, cbValue,12111 Log4(("CS:RIP=%04x:%08RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, 12261 12112 fIOWrite ? 'w' : 'r')); 12262 AssertReturn(p MixedCtx->dx == uIOPort, VERR_VMX_IPE_2);12113 AssertReturn(pCtx->dx == uIOPort, VERR_VMX_IPE_2); 12263 12114 if (MSR_IA32_VMX_BASIC_INFO_VMCS_INS_OUTS(pVM->hm.s.vmx.Msrs.u64BasicInfo)) 12264 12115 { … … 12294 12145 * IN/OUT - I/O instruction. 12295 12146 */ 12296 Log4(("CS:RIP=%04x:%08RX64 %#06x/%u %c\n", p MixedCtx->cs.Sel, pMixedCtx->rip, uIOPort, cbValue,12147 Log4(("CS:RIP=%04x:%08RX64 %#06x/%u %c\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, 12297 12148 fIOWrite ? 'w' : 'r')); 12298 12149 uint32_t const uAndVal = s_aIOOpAnd[uIOWidth]; … … 12300 12151 if (fIOWrite) 12301 12152 { 12302 rcStrict = IOMIOPortWrite(pVM, pVCpu, uIOPort, p MixedCtx->eax & uAndVal, cbValue);12153 rcStrict = IOMIOPortWrite(pVM, pVCpu, uIOPort, pCtx->eax & uAndVal, cbValue); 12303 12154 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOWrite); 12304 12155 } … … 12310 12161 { 12311 12162 /* Save result of I/O IN instr. in AL/AX/EAX. */ 12312 p MixedCtx->eax = (pMixedCtx->eax & ~uAndVal) | (u32Result & uAndVal);12163 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Result & uAndVal); 12313 12164 } 12314 12165 else if (rcStrict == VINF_IOM_R3_IOPORT_READ) 12315 HMR0SavePendingIOPortRead(pVCpu, p MixedCtx->rip, pMixedCtx->rip + cbInstr, uIOPort, uAndVal, cbValue);12166 HMR0SavePendingIOPortRead(pVCpu, pCtx->rip, pCtx->rip + cbInstr, uIOPort, uAndVal, cbValue); 12316 12167 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIORead); 12317 12168 } … … 12322 12173 if (!fUpdateRipAlready) 12323 12174 { 12324 hmR0VmxAdvanceGuestRipBy(pVCpu, pMixedCtx,cbInstr);12175 hmR0VmxAdvanceGuestRipBy(pVCpu, cbInstr); 12325 12176 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP); 12326 12177 } … … 12340 12191 && fGstStepping) 12341 12192 { 12342 rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu , pMixedCtx);12193 rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu); 12343 12194 AssertRCReturn(rc, rc); 12344 12195 } … … 12354 12205 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the 12355 12206 * execution engines about whether hyper BPs and such are pending. */ 12356 uint32_t const uDr7 = p MixedCtx->dr[7];12207 uint32_t const uDr7 = pCtx->dr[7]; 12357 12208 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK) 12358 12209 && X86_DR7_ANY_RW_IO(uDr7) 12359 && (p MixedCtx->cr4 & X86_CR4_DE))12210 && (pCtx->cr4 & X86_CR4_DE)) 12360 12211 || DBGFBpIsHwIoArmed(pVM))) 12361 12212 { … … 12368 12219 bool fIsGuestDbgActive = CPUMR0DebugStateMaybeSaveGuest(pVCpu, true /* fDr6 */); 12369 12220 12370 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, p MixedCtx, uIOPort, cbValue);12221 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pCtx, uIOPort, cbValue); 12371 12222 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP) 12372 12223 { 12373 12224 /* Raise #DB. */ 12374 12225 if (fIsGuestDbgActive) 12375 ASMSetDR6(p MixedCtx->dr[6]);12376 if (p MixedCtx->dr[7] != uDr7)12226 ASMSetDR6(pCtx->dr[6]); 12227 if (pCtx->dr[7] != uDr7) 12377 12228 pVCpu->hm.s.fCtxChanged |= HM_CHANGED_GUEST_DR7; 12378 12229 12379 hmR0VmxSetPendingXcptDB(pVCpu , pMixedCtx);12230 hmR0VmxSetPendingXcptDB(pVCpu); 12380 12231 } 12381 12232 /* rcStrict is VINF_SUCCESS, VINF_IOM_R3_IOPORT_COMMIT_WRITE, or in [VINF_EM_FIRST..VINF_EM_LAST], … … 12442 12293 * VM-exit. 12443 12294 */ 12444 HMVMX_EXIT_DECL hmR0VmxExitTaskSwitch(PVMCPU pVCpu, P CPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)12295 HMVMX_EXIT_DECL hmR0VmxExitTaskSwitch(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 12445 12296 { 12446 12297 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); … … 12471 12322 if ( uIntType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT 12472 12323 && uVector == X86_XCPT_PF) 12473 GCPtrFaultAddress = p MixedCtx->cr2;12324 GCPtrFaultAddress = pVCpu->cpum.GstCtx.cr2; 12474 12325 else 12475 12326 GCPtrFaultAddress = 0; … … 12493 12344 * VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional VM-exit. 12494 12345 */ 12495 HMVMX_EXIT_DECL hmR0VmxExitMtf(PVMCPU pVCpu, P CPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)12346 HMVMX_EXIT_DECL hmR0VmxExitMtf(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 12496 12347 { 12497 12348 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); … … 12508 12359 * VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional VM-exit. 12509 12360 */ 12510 HMVMX_EXIT_DECL hmR0VmxExitApicAccess(PVMCPU pVCpu, P CPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)12361 HMVMX_EXIT_DECL hmR0VmxExitApicAccess(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 12511 12362 { 12512 12363 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); … … 12515 12366 12516 12367 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */ 12517 VBOXSTRICTRC rcStrict1 = hmR0VmxCheckExitDueToEventDelivery(pVCpu, p MixedCtx, pVmxTransient);12368 VBOXSTRICTRC rcStrict1 = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pVmxTransient); 12518 12369 if (RT_LIKELY(rcStrict1 == VINF_SUCCESS)) 12519 12370 { … … 12556 12407 VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification))); 12557 12408 12409 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 12558 12410 rcStrict2 = IOMMMIOPhysHandler(pVM, pVCpu, 12559 12411 uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ ? 0 : X86_TRAP_PF_RW, 12560 CPUMCTX2CORE(p MixedCtx), GCPhys);12412 CPUMCTX2CORE(pCtx), GCPhys); 12561 12413 Log4Func(("IOMMMIOPhysHandler returned %Rrc\n", VBOXSTRICTRC_VAL(rcStrict2))); 12562 12414 if ( rcStrict2 == VINF_SUCCESS … … 12587 12439 * VM-exit. 12588 12440 */ 12589 HMVMX_EXIT_DECL hmR0VmxExitMovDRx(PVMCPU pVCpu, P CPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)12441 HMVMX_EXIT_DECL hmR0VmxExitMovDRx(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 12590 12442 { 12591 12443 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); … … 12594 12446 if (pVmxTransient->fWasGuestDebugStateActive) 12595 12447 { 12596 AssertMsgFailed(("Unexpected MOV DRx exit . pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));12448 AssertMsgFailed(("Unexpected MOV DRx exit\n")); 12597 12449 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient); 12598 12450 } … … 12636 12488 * Update the segment registers and DR7 from the CPU. 12637 12489 */ 12490 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 12638 12491 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient); 12639 12492 rc |= hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_DR7); 12640 12493 AssertRCReturn(rc, rc); 12641 Log4Func(("CS:RIP=%04x:%08RX64\n", p MixedCtx->cs.Sel, pMixedCtx->rip));12494 Log4Func(("CS:RIP=%04x:%08RX64\n", pCtx->cs.Sel, pCtx->rip)); 12642 12495 12643 12496 PVM pVM = pVCpu->CTX_SUFF(pVM); 12644 12497 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQualification) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE) 12645 12498 { 12646 rc = EMInterpretDRxWrite(pVM, pVCpu, CPUMCTX2CORE(p MixedCtx),12499 rc = EMInterpretDRxWrite(pVM, pVCpu, CPUMCTX2CORE(pCtx), 12647 12500 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQualification), 12648 12501 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQualification)); … … 12653 12506 else 12654 12507 { 12655 rc = EMInterpretDRxRead(pVM, pVCpu, CPUMCTX2CORE(p MixedCtx),12508 rc = EMInterpretDRxRead(pVM, pVCpu, CPUMCTX2CORE(pCtx), 12656 12509 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQualification), 12657 12510 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQualification)); … … 12662 12515 if (RT_SUCCESS(rc)) 12663 12516 { 12664 int rc2 = hmR0VmxAdvanceGuestRip(pVCpu, p MixedCtx, pVmxTransient);12517 int rc2 = hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient); 12665 12518 AssertRCReturn(rc2, rc2); 12666 12519 return VINF_SUCCESS; … … 12674 12527 * Conditional VM-exit. 12675 12528 */ 12676 HMVMX_EXIT_DECL hmR0VmxExitEptMisconfig(PVMCPU pVCpu, P CPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)12529 HMVMX_EXIT_DECL hmR0VmxExitEptMisconfig(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 12677 12530 { 12678 12531 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); … … 12680 12533 12681 12534 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */ 12682 VBOXSTRICTRC rcStrict1 = hmR0VmxCheckExitDueToEventDelivery(pVCpu, p MixedCtx, pVmxTransient);12535 VBOXSTRICTRC rcStrict1 = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pVmxTransient); 12683 12536 if (RT_LIKELY(rcStrict1 == VINF_SUCCESS)) 12684 12537 { … … 12719 12572 * weird case. See @bugref{6043}. 12720 12573 */ 12721 PVM pVM = pVCpu->CTX_SUFF(pVM); 12722 rcStrict = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, CPUMCTX2CORE(pMixedCtx), GCPhys, UINT32_MAX); 12723 Log4(("EPT misconfig at %#RGp RIP=%#RX64 rc=%Rrc\n", GCPhys, pMixedCtx->rip, VBOXSTRICTRC_VAL(rcStrict))); 12574 PVM pVM = pVCpu->CTX_SUFF(pVM); 12575 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 12576 rcStrict = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, CPUMCTX2CORE(pCtx), GCPhys, UINT32_MAX); 12577 Log4(("EPT misconfig at %#RGp RIP=%#RX64 rc=%Rrc\n", GCPhys, pCtx->rip, VBOXSTRICTRC_VAL(rcStrict))); 12724 12578 if ( rcStrict == VINF_SUCCESS 12725 12579 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT … … 12737 12591 * Frequent exit or something needing probing. Get state and call EMHistoryExec. 12738 12592 */ 12739 Assert(pMixedCtx == &pVCpu->cpum.GstCtx);12740 12593 int rc2 = hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK); 12741 12594 AssertRCReturn(rc2, rc2); … … 12759 12612 * VM-exit. 12760 12613 */ 12761 HMVMX_EXIT_DECL hmR0VmxExitEptViolation(PVMCPU pVCpu, P CPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)12614 HMVMX_EXIT_DECL hmR0VmxExitEptViolation(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 12762 12615 { 12763 12616 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); … … 12765 12618 12766 12619 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */ 12767 VBOXSTRICTRC rcStrict1 = hmR0VmxCheckExitDueToEventDelivery(pVCpu, p MixedCtx, pVmxTransient);12620 VBOXSTRICTRC rcStrict1 = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pVmxTransient); 12768 12621 if (RT_LIKELY(rcStrict1 == VINF_SUCCESS)) 12769 12622 { … … 12798 12651 TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode); 12799 12652 12653 12654 /* Handle the pagefault trap for the nested shadow table. */ 12655 PVM pVM = pVCpu->CTX_SUFF(pVM); 12656 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 12657 12800 12658 Log4Func(("EPT violation %#x at %#RX64 ErrorCode %#x CS:RIP=%04x:%08RX64\n", pVmxTransient->uExitQualification, GCPhys, 12801 uErrorCode, pMixedCtx->cs.Sel, pMixedCtx->rip)); 12802 12803 /* Handle the pagefault trap for the nested shadow table. */ 12804 PVM pVM = pVCpu->CTX_SUFF(pVM); 12805 VBOXSTRICTRC rcStrict2 = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, CPUMCTX2CORE(pMixedCtx), GCPhys); 12659 uErrorCode, pCtx->cs.Sel, pCtx->rip)); 12660 12661 VBOXSTRICTRC rcStrict2 = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, CPUMCTX2CORE(pCtx), GCPhys); 12806 12662 TRPMResetTrap(pVCpu); 12807 12663 … … 12834 12690 * VM-exit exception handler for \#MF (Math Fault: floating point exception). 12835 12691 */ 12836 static int hmR0VmxExitXcptMF(PVMCPU pVCpu, P CPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)12692 static int hmR0VmxExitXcptMF(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 12837 12693 { 12838 12694 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(); … … 12842 12698 AssertRCReturn(rc, rc); 12843 12699 12844 if (!(p MixedCtx->cr0 & X86_CR0_NE))12700 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE)) 12845 12701 { 12846 12702 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */ … … 12850 12706 * provides VM-exit instruction length. If this causes problem later, 12851 12707 * disassemble the instruction like it's done on AMD-V. */ 12852 int rc2 = hmR0VmxAdvanceGuestRip(pVCpu, p MixedCtx, pVmxTransient);12708 int rc2 = hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient); 12853 12709 AssertRCReturn(rc2, rc2); 12854 12710 return rc; … … 12864 12720 * VM-exit exception handler for \#BP (Breakpoint exception). 12865 12721 */ 12866 static int hmR0VmxExitXcptBP(PVMCPU pVCpu, P CPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)12722 static int hmR0VmxExitXcptBP(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 12867 12723 { 12868 12724 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(); … … 12872 12728 AssertRCReturn(rc, rc); 12873 12729 12874 rc = DBGFRZTrap03Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pMixedCtx)); 12730 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 12731 rc = DBGFRZTrap03Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx)); 12875 12732 if (rc == VINF_EM_RAW_GUEST_TRAP) 12876 12733 { … … 12892 12749 * VM-exit exception handler for \#AC (alignment check exception). 12893 12750 */ 12894 static int hmR0VmxExitXcptAC(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient) 12895 { 12896 RT_NOREF_PV(pMixedCtx); 12751 static int hmR0VmxExitXcptAC(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 12752 { 12897 12753 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(); 12898 12754 … … 12914 12770 * VM-exit exception handler for \#DB (Debug exception). 12915 12771 */ 12916 static int hmR0VmxExitXcptDB(PVMCPU pVCpu, P CPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)12772 static int hmR0VmxExitXcptDB(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 12917 12773 { 12918 12774 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(); … … 12930 12786 & (X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3 | X86_DR6_BD | X86_DR6_BS)); 12931 12787 12932 rc = DBGFRZTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pMixedCtx), uDR6, pVCpu->hm.s.fSingleInstruction); 12788 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 12789 rc = DBGFRZTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx), uDR6, pVCpu->hm.s.fSingleInstruction); 12933 12790 Log6Func(("rc=%Rrc\n", rc)); 12934 12791 if (rc == VINF_EM_RAW_GUEST_TRAP) … … 12942 12799 HM_DISABLE_PREEMPT(); 12943 12800 12944 p MixedCtx->dr[6] &= ~X86_DR6_B_MASK;12945 p MixedCtx->dr[6] |= uDR6;12801 pCtx->dr[6] &= ~X86_DR6_B_MASK; 12802 pCtx->dr[6] |= uDR6; 12946 12803 if (CPUMIsGuestDebugStateActive(pVCpu)) 12947 ASMSetDR6(p MixedCtx->dr[6]);12804 ASMSetDR6(pCtx->dr[6]); 12948 12805 12949 12806 HM_RESTORE_PREEMPT(); … … 12954 12811 12955 12812 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */ 12956 p MixedCtx->dr[7] &= ~X86_DR7_GD;12813 pCtx->dr[7] &= ~X86_DR7_GD; 12957 12814 12958 12815 /* Paranoia. */ 12959 p MixedCtx->dr[7] &= ~X86_DR7_RAZ_MASK;12960 p MixedCtx->dr[7] |= X86_DR7_RA1_MASK;12961 12962 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, (uint32_t)p MixedCtx->dr[7]);12816 pCtx->dr[7] &= ~X86_DR7_RAZ_MASK; 12817 pCtx->dr[7] |= X86_DR7_RA1_MASK; 12818 12819 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, (uint32_t)pCtx->dr[7]); 12963 12820 AssertRCReturn(rc, rc); 12964 12821 … … 12999 12856 * @remarks Requires pVmxTransient->uExitIntInfo to be up-to-date. 13000 12857 */ 13001 static int hmR0VmxExitXcptGP(PVMCPU pVCpu, P CPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)12858 static int hmR0VmxExitXcptGP(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 13002 12859 { 13003 12860 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(); … … 13005 12862 13006 12863 int rc; 12864 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 13007 12865 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 13008 12866 { /* likely */ } … … 13018 12876 rc |= hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL); 13019 12877 AssertRCReturn(rc, rc); 13020 Log4Func(("Gst: CS:RIP %04x:%08RX64 ErrorCode=%#x CR0=%#RX64 CPL=%u TR=%#04x\n", p MixedCtx->cs.Sel, pMixedCtx->rip,13021 pVmxTransient->uExitIntErrorCode, p MixedCtx->cr0, CPUMGetGuestCPL(pVCpu), pMixedCtx->tr.Sel));12878 Log4Func(("Gst: CS:RIP %04x:%08RX64 ErrorCode=%#x CR0=%#RX64 CPL=%u TR=%#04x\n", pCtx->cs.Sel, pCtx->rip, 12879 pVmxTransient->uExitIntErrorCode, pCtx->cr0, CPUMGetGuestCPL(pVCpu), pCtx->tr.Sel)); 13022 12880 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 13023 12881 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */); … … 13025 12883 } 13026 12884 13027 Assert(CPUMIsGuestInRealModeEx(p MixedCtx));12885 Assert(CPUMIsGuestInRealModeEx(pCtx)); 13028 12886 Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUnrestrictedGuest); 13029 12887 … … 13041 12899 rc = VINF_SUCCESS; 13042 12900 Assert(cbOp == pDis->cbInstr); 13043 Log4Func(("Disas OpCode=%u CS:EIP %04x:%04RX64\n", pDis->pCurInstr->uOpcode, p MixedCtx->cs.Sel, pMixedCtx->rip));12901 Log4Func(("Disas OpCode=%u CS:EIP %04x:%04RX64\n", pDis->pCurInstr->uOpcode, pCtx->cs.Sel, pCtx->rip)); 13044 12902 switch (pDis->pCurInstr->uOpcode) 13045 12903 { 13046 12904 case OP_CLI: 13047 12905 { 13048 p MixedCtx->eflags.Bits.u1IF = 0;13049 p MixedCtx->eflags.Bits.u1RF = 0;13050 p MixedCtx->rip += pDis->cbInstr;12906 pCtx->eflags.Bits.u1IF = 0; 12907 pCtx->eflags.Bits.u1RF = 0; 12908 pCtx->rip += pDis->cbInstr; 13051 12909 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS); 13052 12910 if ( !fDbgStepping 13053 && p MixedCtx->eflags.Bits.u1TF)12911 && pCtx->eflags.Bits.u1TF) 13054 12912 { 13055 rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu , pMixedCtx);12913 rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu); 13056 12914 AssertRCReturn(rc, rc); 13057 12915 } … … 13062 12920 case OP_STI: 13063 12921 { 13064 bool fOldIF = p MixedCtx->eflags.Bits.u1IF;13065 p MixedCtx->eflags.Bits.u1IF = 1;13066 p MixedCtx->eflags.Bits.u1RF = 0;13067 p MixedCtx->rip += pDis->cbInstr;12922 bool fOldIF = pCtx->eflags.Bits.u1IF; 12923 pCtx->eflags.Bits.u1IF = 1; 12924 pCtx->eflags.Bits.u1RF = 0; 12925 pCtx->rip += pDis->cbInstr; 13068 12926 if (!fOldIF) 13069 12927 { 13070 EMSetInhibitInterruptsPC(pVCpu, p MixedCtx->rip);12928 EMSetInhibitInterruptsPC(pVCpu, pCtx->rip); 13071 12929 Assert(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)); 13072 12930 } 13073 12931 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS); 13074 12932 if ( !fDbgStepping 13075 && p MixedCtx->eflags.Bits.u1TF)12933 && pCtx->eflags.Bits.u1TF) 13076 12934 { 13077 rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu , pMixedCtx);12935 rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu); 13078 12936 AssertRCReturn(rc, rc); 13079 12937 } … … 13085 12943 { 13086 12944 rc = VINF_EM_HALT; 13087 p MixedCtx->rip += pDis->cbInstr;13088 p MixedCtx->eflags.Bits.u1RF = 0;12945 pCtx->rip += pDis->cbInstr; 12946 pCtx->eflags.Bits.u1RF = 0; 13089 12947 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS); 13090 12948 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt); … … 13094 12952 case OP_POPF: 13095 12953 { 13096 Log4Func(("POPF CS:EIP %04x:%04RX64\n", p MixedCtx->cs.Sel, pMixedCtx->rip));12954 Log4Func(("POPF CS:EIP %04x:%04RX64\n", pCtx->cs.Sel, pCtx->rip)); 13097 12955 uint32_t cbParm; 13098 12956 uint32_t uMask; 13099 bool fGstStepping = RT_BOOL(p MixedCtx->eflags.Bits.u1TF);12957 bool fGstStepping = RT_BOOL(pCtx->eflags.Bits.u1TF); 13100 12958 if (pDis->fPrefix & DISPREFIX_OPSIZE) 13101 12959 { … … 13113 12971 X86EFLAGS Eflags; 13114 12972 Eflags.u32 = 0; 13115 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(p MixedCtx), pMixedCtx->esp & uMask, SELMTOFLAT_FLAGS_CPL0,12973 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pCtx), pCtx->esp & uMask, SELMTOFLAT_FLAGS_CPL0, 13116 12974 &GCPtrStack); 13117 12975 if (RT_SUCCESS(rc)) … … 13126 12984 break; 13127 12985 } 13128 Log4Func(("POPF %#x -> %#RX64 mask=%#x RIP=%#RX64\n", Eflags.u, p MixedCtx->rsp, uMask, pMixedCtx->rip));13129 p MixedCtx->eflags.u32 = (pMixedCtx->eflags.u32 & ~((X86_EFL_POPF_BITS & uMask) | X86_EFL_RF))13130 13131 p MixedCtx->esp += cbParm;13132 p MixedCtx->esp &= uMask;13133 p MixedCtx->rip += pDis->cbInstr;12986 Log4Func(("POPF %#x -> %#RX64 mask=%#x RIP=%#RX64\n", Eflags.u, pCtx->rsp, uMask, pCtx->rip)); 12987 pCtx->eflags.u32 = (pCtx->eflags.u32 & ~((X86_EFL_POPF_BITS & uMask) | X86_EFL_RF)) 12988 | (Eflags.u32 & X86_EFL_POPF_BITS & uMask); 12989 pCtx->esp += cbParm; 12990 pCtx->esp &= uMask; 12991 pCtx->rip += pDis->cbInstr; 13134 12992 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS); 13135 12993 /* Generate a pending-debug exception when the guest stepping over POPF regardless of how … … 13138 12996 && fGstStepping) 13139 12997 { 13140 rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu , pMixedCtx);12998 rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu); 13141 12999 AssertRCReturn(rc, rc); 13142 13000 } … … 13162 13020 /* Get the stack pointer & push the contents of eflags onto the stack. */ 13163 13021 RTGCPTR GCPtrStack = 0; 13164 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(p MixedCtx), (pMixedCtx->esp - cbParm) & uMask,13022 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pCtx), (pCtx->esp - cbParm) & uMask, 13165 13023 SELMTOFLAT_FLAGS_CPL0, &GCPtrStack); 13166 13024 if (RT_FAILURE(rc)) … … 13169 13027 break; 13170 13028 } 13171 X86EFLAGS Eflags = p MixedCtx->eflags;13029 X86EFLAGS Eflags = pCtx->eflags; 13172 13030 /* The RF & VM bits are cleared on image stored on stack; see Intel Instruction reference for PUSHF. */ 13173 13031 Eflags.Bits.u1RF = 0; … … 13182 13040 } 13183 13041 Log4Func(("PUSHF %#x -> %#RGv\n", Eflags.u, GCPtrStack)); 13184 p MixedCtx->esp -= cbParm;13185 p MixedCtx->esp &= uMask;13186 p MixedCtx->rip += pDis->cbInstr;13187 p MixedCtx->eflags.Bits.u1RF = 0;13042 pCtx->esp -= cbParm; 13043 pCtx->esp &= uMask; 13044 pCtx->rip += pDis->cbInstr; 13045 pCtx->eflags.Bits.u1RF = 0; 13188 13046 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS); 13189 13047 if ( !fDbgStepping 13190 && p MixedCtx->eflags.Bits.u1TF)13048 && pCtx->eflags.Bits.u1TF) 13191 13049 { 13192 rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu , pMixedCtx);13050 rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu); 13193 13051 AssertRCReturn(rc, rc); 13194 13052 } … … 13203 13061 RTGCPTR GCPtrStack = 0; 13204 13062 uint32_t uMask = 0xffff; 13205 bool fGstStepping = RT_BOOL(p MixedCtx->eflags.Bits.u1TF);13063 bool fGstStepping = RT_BOOL(pCtx->eflags.Bits.u1TF); 13206 13064 uint16_t aIretFrame[3]; 13207 13065 if (pDis->fPrefix & (DISPREFIX_OPSIZE | DISPREFIX_ADDRSIZE)) … … 13210 13068 break; 13211 13069 } 13212 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(p MixedCtx), pMixedCtx->esp & uMask, SELMTOFLAT_FLAGS_CPL0,13070 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pCtx), pCtx->esp & uMask, SELMTOFLAT_FLAGS_CPL0, 13213 13071 &GCPtrStack); 13214 13072 if (RT_SUCCESS(rc)) … … 13223 13081 break; 13224 13082 } 13225 p MixedCtx->eip = 0;13226 p MixedCtx->ip = aIretFrame[0];13227 p MixedCtx->cs.Sel = aIretFrame[1];13228 p MixedCtx->cs.ValidSel = aIretFrame[1];13229 p MixedCtx->cs.u64Base = (uint64_t)pMixedCtx->cs.Sel << 4;13230 p MixedCtx->eflags.u32 = (pMixedCtx->eflags.u32 & ((UINT32_C(0xffff0000) | X86_EFL_1) & ~X86_EFL_RF))13231 13232 p MixedCtx->sp += sizeof(aIretFrame);13083 pCtx->eip = 0; 13084 pCtx->ip = aIretFrame[0]; 13085 pCtx->cs.Sel = aIretFrame[1]; 13086 pCtx->cs.ValidSel = aIretFrame[1]; 13087 pCtx->cs.u64Base = (uint64_t)pCtx->cs.Sel << 4; 13088 pCtx->eflags.u32 = (pCtx->eflags.u32 & ((UINT32_C(0xffff0000) | X86_EFL_1) & ~X86_EFL_RF)) 13089 | (aIretFrame[2] & X86_EFL_POPF_BITS & uMask); 13090 pCtx->sp += sizeof(aIretFrame); 13233 13091 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS 13234 13092 | HM_CHANGED_GUEST_CS); … … 13237 13095 && fGstStepping) 13238 13096 { 13239 rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu , pMixedCtx);13097 rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu); 13240 13098 AssertRCReturn(rc, rc); 13241 13099 } 13242 Log4Func(("IRET %#RX32 to %04x:%04x\n", GCPtrStack, p MixedCtx->cs.Sel, pMixedCtx->ip));13100 Log4Func(("IRET %#RX32 to %04x:%04x\n", GCPtrStack, pCtx->cs.Sel, pCtx->ip)); 13243 13101 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIret); 13244 13102 break; … … 13248 13106 { 13249 13107 uint16_t uVector = pDis->Param1.uValue & 0xff; 13250 hmR0VmxSetPendingIntN(pVCpu, pMixedCtx,uVector, pDis->cbInstr);13108 hmR0VmxSetPendingIntN(pVCpu, uVector, pDis->cbInstr); 13251 13109 /* INT clears EFLAGS.TF, we must not set any pending debug exceptions here. */ 13252 13110 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInt); … … 13256 13114 case OP_INTO: 13257 13115 { 13258 if (p MixedCtx->eflags.Bits.u1OF)13116 if (pCtx->eflags.Bits.u1OF) 13259 13117 { 13260 hmR0VmxSetPendingXcptOF(pVCpu, p MixedCtx, pDis->cbInstr);13118 hmR0VmxSetPendingXcptOF(pVCpu, pDis->cbInstr); 13261 13119 /* INTO clears EFLAGS.TF, we must not set any pending debug exceptions here. */ 13262 13120 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInt); … … 13264 13122 else 13265 13123 { 13266 p MixedCtx->eflags.Bits.u1RF = 0;13124 pCtx->eflags.Bits.u1RF = 0; 13267 13125 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RFLAGS); 13268 13126 } … … 13272 13130 default: 13273 13131 { 13274 p MixedCtx->eflags.Bits.u1RF = 0; /* This is correct most of the time... */13275 VBOXSTRICTRC rc2 = EMInterpretInstructionDisasState(pVCpu, pDis, CPUMCTX2CORE(p MixedCtx), 0 /* pvFault */,13132 pCtx->eflags.Bits.u1RF = 0; /* This is correct most of the time... */ 13133 VBOXSTRICTRC rc2 = EMInterpretInstructionDisasState(pVCpu, pDis, CPUMCTX2CORE(pCtx), 0 /* pvFault */, 13276 13134 EMCODETYPE_SUPERVISOR); 13277 13135 rc = VBOXSTRICTRC_VAL(rc2); … … 13300 13158 * up-to-date. 13301 13159 */ 13302 static int hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient) 13303 { 13304 RT_NOREF_PV(pMixedCtx); 13160 static int hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 13161 { 13305 13162 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(); 13306 13163 #ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS … … 13332 13189 * VM-exit exception handler for \#PF (Page-fault exception). 13333 13190 */ 13334 static int hmR0VmxExitXcptPF(PVMCPU pVCpu, P CPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)13191 static int hmR0VmxExitXcptPF(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 13335 13192 { 13336 13193 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(); … … 13357 13214 { 13358 13215 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */ 13359 hmR0VmxSetPendingXcptDF(pVCpu , pMixedCtx);13216 hmR0VmxSetPendingXcptDF(pVCpu); 13360 13217 Log4Func(("Pending #DF due to vectoring #PF w/ NestedPaging\n")); 13361 13218 } … … 13372 13229 } 13373 13230 13231 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 13374 13232 rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL); 13375 13233 AssertRCReturn(rc, rc); 13376 13234 13377 13235 Log4Func(("#PF: cr2=%#RX64 cs:rip=%#04x:%#RX64 uErrCode %#RX32 cr3=%#RX64\n", pVmxTransient->uExitQualification, 13378 p MixedCtx->cs.Sel, pMixedCtx->rip, pVmxTransient->uExitIntErrorCode, pMixedCtx->cr3));13236 pCtx->cs.Sel, pCtx->rip, pVmxTransient->uExitIntErrorCode, pCtx->cr3)); 13379 13237 13380 13238 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQualification, (RTGCUINT)pVmxTransient->uExitIntErrorCode); 13381 rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, CPUMCTX2CORE(p MixedCtx),13239 rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, CPUMCTX2CORE(pCtx), 13382 13240 (RTGCPTR)pVmxTransient->uExitQualification); 13383 13241 … … 13411 13269 TRPMResetTrap(pVCpu); 13412 13270 pVCpu->hm.s.Event.fPending = false; /* Clear pending #PF to replace it with #DF. */ 13413 hmR0VmxSetPendingXcptDF(pVCpu , pMixedCtx);13271 hmR0VmxSetPendingXcptDF(pVCpu); 13414 13272 Log4Func(("#PF: Pending #DF due to vectoring #PF\n")); 13415 13273 } -
TabularUnified trunk/src/VBox/VMM/VMMR3/HM.cpp ¶
r72966 r72983 2625 2625 * @param pVM The cross context VM structure. 2626 2626 * @param pVCpu The cross context virtual CPU structure. 2627 * @param pCtx Pointer to the guest CPU context. 2628 */ 2629 VMMR3_INT_DECL(int) HMR3PatchTprInstr(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 2630 { 2631 NOREF(pCtx); 2627 */ 2628 VMMR3_INT_DECL(int) HMR3PatchTprInstr(PVM pVM, PVMCPU pVCpu) 2629 { 2632 2630 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE, 2633 2631 pVM->hm.s.pGuestPatchMem ? hmR3PatchTprInstr : hmR3ReplaceTprInstr, … … 3034 3032 * when the unrestricted guest execution feature is missing (VT-x only). 3035 3033 */ 3036 if ( pVM->hm.s.vmx.fEnabled3034 if ( pVM->hm.s.vmx.fEnabled 3037 3035 && !pVM->hm.s.vmx.fUnrestrictedGuest 3038 && CPUMIsGuestInRealModeEx(pCtx)3036 && CPUMIsGuestInRealModeEx(pCtx) 3039 3037 && !PDMVmmDevHeapIsEnabled(pVM)) 3040 3038 { -
TabularUnified trunk/src/VBox/VMM/include/EMHandleRCTmpl.h ¶
r72634 r72983 261 261 262 262 case VINF_EM_HM_PATCH_TPR_INSTR: 263 rc = HMR3PatchTprInstr(pVM, pVCpu , &pVCpu->cpum.GstCtx);263 rc = HMR3PatchTprInstr(pVM, pVCpu); 264 264 break; 265 265 #endif -
TabularUnified trunk/src/VBox/VMM/include/HMInternal.h ¶
r72967 r72983 1117 1117 1118 1118 # ifdef VBOX_STRICT 1119 VMMR0_INT_DECL(void) hmR0DumpRegs(PVMCPU pVCpu , PCPUMCTX pCtx);1119 VMMR0_INT_DECL(void) hmR0DumpRegs(PVMCPU pVCpu); 1120 1120 VMMR0_INT_DECL(void) hmR0DumpDescriptor(PCX86DESCHC pDesc, RTSEL Sel, const char *pszMsg); 1121 1121 # endif
Note:
See TracChangeset
for help on using the changeset viewer.