Changeset 77563 in vbox for trunk/src/VBox
- Timestamp:
- Mar 5, 2019 5:47:43 AM (6 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r77493 r77563 1305 1305 1306 1306 /* Now that we're swapping MSRs during the world-switch, allow the guest to read/write them without causing VM-exits. */ 1307 if (pVCpu->hm.s.vmx. GstCtls.u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)1307 if (pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS) 1308 1308 hmR0VmxSetMsrPermission(pVCpu, uMsr, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE); 1309 1309 … … 1389 1389 1390 1390 /* We're no longer swapping MSRs during the world-switch, intercept guest read/writes to them. */ 1391 if (pVCpu->hm.s.vmx. GstCtls.u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)1391 if (pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS) 1392 1392 hmR0VmxSetMsrPermission(pVCpu, uMsr, VMXMSREXIT_INTERCEPT_READ, VMXMSREXIT_INTERCEPT_WRITE); 1393 1393 … … 1622 1622 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY, &u32Val); 1623 1623 AssertRCReturn(rc, rc); 1624 AssertMsgReturnStmt(pVCpu->hm.s.vmx. GstCtls.u32EntryCtls == u32Val,1625 ("Cache=%#RX32 VMCS=%#RX32\n", pVCpu->hm.s.vmx. GstCtls.u32EntryCtls, u32Val),1624 AssertMsgReturnStmt(pVCpu->hm.s.vmx.Ctls.u32EntryCtls == u32Val, 1625 ("Cache=%#RX32 VMCS=%#RX32\n", pVCpu->hm.s.vmx.Ctls.u32EntryCtls, u32Val), 1626 1626 pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_ENTRY, 1627 1627 VERR_VMX_VMCS_FIELD_CACHE_INVALID); … … 1629 1629 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT, &u32Val); 1630 1630 AssertRCReturn(rc, rc); 1631 AssertMsgReturnStmt(pVCpu->hm.s.vmx. GstCtls.u32ExitCtls == u32Val,1632 ("Cache=%#RX32 VMCS=%#RX32\n", pVCpu->hm.s.vmx. GstCtls.u32ExitCtls, u32Val),1631 AssertMsgReturnStmt(pVCpu->hm.s.vmx.Ctls.u32ExitCtls == u32Val, 1632 ("Cache=%#RX32 VMCS=%#RX32\n", pVCpu->hm.s.vmx.Ctls.u32ExitCtls, u32Val), 1633 1633 pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_EXIT, 1634 1634 VERR_VMX_VMCS_FIELD_CACHE_INVALID); … … 1636 1636 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, &u32Val); 1637 1637 AssertRCReturn(rc, rc); 1638 AssertMsgReturnStmt(pVCpu->hm.s.vmx. GstCtls.u32PinCtls == u32Val,1639 ("Cache=%#RX32 VMCS=%#RX32\n", pVCpu->hm.s.vmx. GstCtls.u32PinCtls, u32Val),1638 AssertMsgReturnStmt(pVCpu->hm.s.vmx.Ctls.u32PinCtls == u32Val, 1639 ("Cache=%#RX32 VMCS=%#RX32\n", pVCpu->hm.s.vmx.Ctls.u32PinCtls, u32Val), 1640 1640 pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_PIN_EXEC, 1641 1641 VERR_VMX_VMCS_FIELD_CACHE_INVALID); … … 1643 1643 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, &u32Val); 1644 1644 AssertRCReturn(rc, rc); 1645 AssertMsgReturnStmt(pVCpu->hm.s.vmx. GstCtls.u32ProcCtls == u32Val,1646 ("Cache=%#RX32 VMCS=%#RX32\n", pVCpu->hm.s.vmx. GstCtls.u32ProcCtls, u32Val),1645 AssertMsgReturnStmt(pVCpu->hm.s.vmx.Ctls.u32ProcCtls == u32Val, 1646 ("Cache=%#RX32 VMCS=%#RX32\n", pVCpu->hm.s.vmx.Ctls.u32ProcCtls, u32Val), 1647 1647 pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_PROC_EXEC, 1648 1648 VERR_VMX_VMCS_FIELD_CACHE_INVALID); 1649 1649 1650 if (pVCpu->hm.s.vmx. GstCtls.u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)1650 if (pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS) 1651 1651 { 1652 1652 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val); 1653 1653 AssertRCReturn(rc, rc); 1654 AssertMsgReturnStmt(pVCpu->hm.s.vmx. GstCtls.u32ProcCtls2 == u32Val,1655 ("Cache=%#RX32 VMCS=%#RX32\n", pVCpu->hm.s.vmx. GstCtls.u32ProcCtls2, u32Val),1654 AssertMsgReturnStmt(pVCpu->hm.s.vmx.Ctls.u32ProcCtls2 == u32Val, 1655 ("Cache=%#RX32 VMCS=%#RX32\n", pVCpu->hm.s.vmx.Ctls.u32ProcCtls2, u32Val), 1656 1656 pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_PROC_EXEC2, 1657 1657 VERR_VMX_VMCS_FIELD_CACHE_INVALID); … … 1660 1660 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val); 1661 1661 AssertRCReturn(rc, rc); 1662 AssertMsgReturnStmt(pVCpu->hm.s.vmx. GstCtls.u32XcptBitmap == u32Val,1663 ("Cache=%#RX32 VMCS=%#RX32\n", pVCpu->hm.s.vmx. GstCtls.u32XcptBitmap, u32Val),1662 AssertMsgReturnStmt(pVCpu->hm.s.vmx.Ctls.u32XcptBitmap == u32Val, 1663 ("Cache=%#RX32 VMCS=%#RX32\n", pVCpu->hm.s.vmx.Ctls.u32XcptBitmap, u32Val), 1664 1664 pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_XCPT_BITMAP, 1665 1665 VERR_VMX_VMCS_FIELD_CACHE_INVALID); … … 1668 1668 rc = VMXReadVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, &u64Val); 1669 1669 AssertRCReturn(rc, rc); 1670 AssertMsgReturnStmt(pVCpu->hm.s.vmx. GstCtls.u64TscOffset == u64Val,1671 ("Cache=%#RX64 VMCS=%#RX64\n", pVCpu->hm.s.vmx. GstCtls.u64TscOffset, u64Val),1670 AssertMsgReturnStmt(pVCpu->hm.s.vmx.Ctls.u64TscOffset == u64Val, 1671 ("Cache=%#RX64 VMCS=%#RX64\n", pVCpu->hm.s.vmx.Ctls.u64TscOffset, u64Val), 1672 1672 pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_TSC_OFFSET, 1673 1673 VERR_VMX_VMCS_FIELD_CACHE_INVALID); … … 1688 1688 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 1689 1689 1690 if (pVCpu->hm.s.vmx. GstCtls.u32ExitCtls & VMX_EXIT_CTLS_LOAD_EFER_MSR)1690 if (pVCpu->hm.s.vmx.Ctls.u32ExitCtls & VMX_EXIT_CTLS_LOAD_EFER_MSR) 1691 1691 { 1692 1692 uint64_t u64Val; … … 1734 1734 1735 1735 /* Verify that the permissions are as expected in the MSR bitmap. */ 1736 if (pVCpu->hm.s.vmx. GstCtls.u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)1736 if (pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS) 1737 1737 { 1738 1738 VMXMSREXITREAD enmRead; … … 2325 2325 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, fVal); 2326 2326 AssertRCReturn(rc, rc); 2327 pVCpu->hm.s.vmx. GstCtls.u32PinCtls = fVal;2327 pVCpu->hm.s.vmx.Ctls.u32PinCtls = fVal; 2328 2328 2329 2329 return VINF_SUCCESS; … … 2422 2422 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, fVal); 2423 2423 AssertRCReturn(rc, rc); 2424 pVCpu->hm.s.vmx. GstCtls.u32ProcCtls2 = fVal;2424 pVCpu->hm.s.vmx.Ctls.u32ProcCtls2 = fVal; 2425 2425 2426 2426 return VINF_SUCCESS; … … 2556 2556 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, fVal); 2557 2557 AssertRCReturn(rc, rc); 2558 pVCpu->hm.s.vmx. GstCtls.u32ProcCtls = fVal;2558 pVCpu->hm.s.vmx.Ctls.u32ProcCtls = fVal; 2559 2559 2560 2560 /* Set up secondary processor-based VM-execution controls if the CPU supports it. */ 2561 if (pVCpu->hm.s.vmx. GstCtls.u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)2561 if (pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS) 2562 2562 return hmR0VmxSetupProcCtls2(pVCpu); 2563 2563 … … 2674 2674 2675 2675 /* Update our cache of the exception bitmap. */ 2676 pVCpu->hm.s.vmx. GstCtls.u32XcptBitmap = uXcptBitmap;2676 pVCpu->hm.s.vmx.Ctls.u32XcptBitmap = uXcptBitmap; 2677 2677 return VINF_SUCCESS; 2678 2678 } … … 2959 2959 /* Assertion is right but we would not have updated u32ExitCtls yet. */ 2960 2960 #if 0 2961 if (!(pVCpu->hm.s.vmx. GstCtls.u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE))2961 if (!(pVCpu->hm.s.vmx.Ctls.u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE)) 2962 2962 Assert(uSelSS != 0); 2963 2963 #endif … … 3271 3271 3272 3272 /* Commit it to the VMCS and update our cache. */ 3273 if (pVCpu->hm.s.vmx. GstCtls.u32EntryCtls != fVal)3273 if (pVCpu->hm.s.vmx.Ctls.u32EntryCtls != fVal) 3274 3274 { 3275 3275 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY, fVal); 3276 3276 AssertRCReturn(rc, rc); 3277 pVCpu->hm.s.vmx. GstCtls.u32EntryCtls = fVal;3277 pVCpu->hm.s.vmx.Ctls.u32EntryCtls = fVal; 3278 3278 } 3279 3279 … … 3355 3355 3356 3356 /* Commit it to the VMCS and update our cache. */ 3357 if (pVCpu->hm.s.vmx. GstCtls.u32ExitCtls != fVal)3357 if (pVCpu->hm.s.vmx.Ctls.u32ExitCtls != fVal) 3358 3358 { 3359 3359 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT, fVal); 3360 3360 AssertRCReturn(rc, rc); 3361 pVCpu->hm.s.vmx. GstCtls.u32ExitCtls = fVal;3361 pVCpu->hm.s.vmx.Ctls.u32ExitCtls = fVal; 3362 3362 } 3363 3363 … … 3378 3378 { 3379 3379 Assert(!(u32TprThreshold & ~VMX_TPR_THRESHOLD_MASK)); /* Bits 31:4 MBZ. */ 3380 Assert(pVCpu->hm.s.vmx. GstCtls.u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW); RT_NOREF_PV(pVCpu);3380 Assert(pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW); RT_NOREF_PV(pVCpu); 3381 3381 return VMXWriteVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold); 3382 3382 } … … 3403 3403 * Setup TPR shadowing. 3404 3404 */ 3405 if (pVCpu->hm.s.vmx. GstCtls.u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)3405 if (pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW) 3406 3406 { 3407 3407 Assert(pVCpu->hm.s.vmx.HCPhysVirtApic); … … 3490 3490 */ 3491 3491 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS) 3492 && (pVCpu->hm.s.vmx. GstCtls.u32PinCtls & VMX_PIN_CTLS_VIRT_NMI))3492 && (pVCpu->hm.s.vmx.Ctls.u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)) 3493 3493 { 3494 3494 fIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI; … … 3511 3511 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_GUEST_XCPT_INTERCEPTS) 3512 3512 { 3513 uint32_t uXcptBitmap = pVCpu->hm.s.vmx. GstCtls.u32XcptBitmap;3513 uint32_t uXcptBitmap = pVCpu->hm.s.vmx.Ctls.u32XcptBitmap; 3514 3514 3515 3515 /* The remaining exception intercepts are handled elsewhere, e.g. in hmR0VmxExportGuestCR0(). */ … … 3524 3524 Assert(uXcptBitmap & RT_BIT_32(X86_XCPT_DB)); 3525 3525 3526 if (uXcptBitmap != pVCpu->hm.s.vmx. GstCtls.u32XcptBitmap)3526 if (uXcptBitmap != pVCpu->hm.s.vmx.Ctls.u32XcptBitmap) 3527 3527 { 3528 3528 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap); 3529 3529 AssertRCReturn(rc, rc); 3530 pVCpu->hm.s.vmx. GstCtls.u32XcptBitmap = uXcptBitmap;3530 pVCpu->hm.s.vmx.Ctls.u32XcptBitmap = uXcptBitmap; 3531 3531 } 3532 3532 … … 3672 3672 * Minimize VM-exits due to CR3 changes when we have NestedPaging. 3673 3673 */ 3674 uint32_t uProcCtls = pVCpu->hm.s.vmx. GstCtls.u32ProcCtls;3674 uint32_t uProcCtls = pVCpu->hm.s.vmx.Ctls.u32ProcCtls; 3675 3675 if (pVM->hm.s.fNestedPaging) 3676 3676 { … … 3715 3715 * Update exception intercepts. 3716 3716 */ 3717 uint32_t uXcptBitmap = pVCpu->hm.s.vmx. GstCtls.u32XcptBitmap;3717 uint32_t uXcptBitmap = pVCpu->hm.s.vmx.Ctls.u32XcptBitmap; 3718 3718 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 3719 3719 { … … 3790 3790 int rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR0, u32GuestCr0); 3791 3791 rc |= VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, u32ShadowCr0); 3792 if (u32Cr0Mask != pVCpu->hm.s.vmx. GstCtls.u32Cr0Mask)3792 if (u32Cr0Mask != pVCpu->hm.s.vmx.Ctls.u32Cr0Mask) 3793 3793 rc |= VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_MASK, u32Cr0Mask); 3794 if (uProcCtls != pVCpu->hm.s.vmx. GstCtls.u32ProcCtls)3794 if (uProcCtls != pVCpu->hm.s.vmx.Ctls.u32ProcCtls) 3795 3795 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls); 3796 if (uXcptBitmap != pVCpu->hm.s.vmx. GstCtls.u32XcptBitmap)3796 if (uXcptBitmap != pVCpu->hm.s.vmx.Ctls.u32XcptBitmap) 3797 3797 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap); 3798 3798 AssertRCReturn(rc, rc); 3799 3799 3800 3800 /* Update our caches. */ 3801 pVCpu->hm.s.vmx. GstCtls.u32Cr0Mask = u32Cr0Mask;3802 pVCpu->hm.s.vmx. GstCtls.u32ProcCtls = uProcCtls;3803 pVCpu->hm.s.vmx. GstCtls.u32XcptBitmap = uXcptBitmap;3801 pVCpu->hm.s.vmx.Ctls.u32Cr0Mask = u32Cr0Mask; 3802 pVCpu->hm.s.vmx.Ctls.u32ProcCtls = uProcCtls; 3803 pVCpu->hm.s.vmx.Ctls.u32XcptBitmap = uXcptBitmap; 3804 3804 3805 3805 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_CR0); … … 4031 4031 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR4, u32GuestCr4); 4032 4032 rc |= VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, u32ShadowCr4); 4033 if (pVCpu->hm.s.vmx. GstCtls.u32Cr4Mask != u32Cr4Mask)4033 if (pVCpu->hm.s.vmx.Ctls.u32Cr4Mask != u32Cr4Mask) 4034 4034 rc |= VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_MASK, u32Cr4Mask); 4035 4035 AssertRCReturn(rc, rc); 4036 pVCpu->hm.s.vmx. GstCtls.u32Cr4Mask = u32Cr4Mask;4036 pVCpu->hm.s.vmx.Ctls.u32Cr4Mask = u32Cr4Mask; 4037 4037 4038 4038 /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */ … … 4065 4065 #ifdef VBOX_STRICT 4066 4066 /* Validate. Intel spec. 26.3.1.1 "Checks on Guest Controls Registers, Debug Registers, MSRs" */ 4067 if (pVCpu->hm.s.vmx. GstCtls.u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)4067 if (pVCpu->hm.s.vmx.Ctls.u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG) 4068 4068 { 4069 4069 /* Validate. Intel spec. 17.2 "Debug Registers", recompiler paranoia checks. */ … … 4075 4075 bool fSteppingDB = false; 4076 4076 bool fInterceptMovDRx = false; 4077 uint32_t uProcCtls = pVCpu->hm.s.vmx. GstCtls.u32ProcCtls;4077 uint32_t uProcCtls = pVCpu->hm.s.vmx.Ctls.u32ProcCtls; 4078 4078 if (pVCpu->hm.s.fSingleInstruction) 4079 4079 { … … 4185 4185 * monitor-trap flag and update our cache. 4186 4186 */ 4187 if (uProcCtls != pVCpu->hm.s.vmx. GstCtls.u32ProcCtls)4187 if (uProcCtls != pVCpu->hm.s.vmx.Ctls.u32ProcCtls) 4188 4188 { 4189 4189 int rc2 = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls); 4190 4190 AssertRCReturn(rc2, rc2); 4191 pVCpu->hm.s.vmx. GstCtls.u32ProcCtls = uProcCtls;4191 pVCpu->hm.s.vmx.Ctls.u32ProcCtls = uProcCtls; 4192 4192 } 4193 4193 … … 5049 5049 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, &u32Val); AssertRC(rc); 5050 5050 Log4(("VMX_VMCS32_CTRL_PROC_EXEC %#RX32\n", u32Val)); 5051 if (pVCpu->hm.s.vmx. GstCtls.u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)5051 if (pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS) 5052 5052 { 5053 5053 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val); AssertRC(rc); … … 5680 5680 } 5681 5681 5682 uint32_t uProcCtls = pVCpu->hm.s.vmx. GstCtls.u32ProcCtls;5682 uint32_t uProcCtls = pVCpu->hm.s.vmx.Ctls.u32ProcCtls; 5683 5683 if ( fOffsettedTsc 5684 5684 && RT_LIKELY(!pVCpu->hm.s.fDebugWantRdTscExit)) 5685 5685 { 5686 if (pVCpu->hm.s.vmx. GstCtls.u64TscOffset != uTscOffset)5686 if (pVCpu->hm.s.vmx.Ctls.u64TscOffset != uTscOffset) 5687 5687 { 5688 5688 int rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, uTscOffset); 5689 5689 AssertRC(rc); 5690 pVCpu->hm.s.vmx. GstCtls.u64TscOffset = uTscOffset;5690 pVCpu->hm.s.vmx.Ctls.u64TscOffset = uTscOffset; 5691 5691 } 5692 5692 … … 5696 5696 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls); 5697 5697 AssertRC(rc); 5698 pVCpu->hm.s.vmx. GstCtls.u32ProcCtls = uProcCtls;5698 pVCpu->hm.s.vmx.Ctls.u32ProcCtls = uProcCtls; 5699 5699 } 5700 5700 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffset); … … 5708 5708 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls); 5709 5709 AssertRC(rc); 5710 pVCpu->hm.s.vmx. GstCtls.u32ProcCtls = uProcCtls;5710 pVCpu->hm.s.vmx.Ctls.u32ProcCtls = uProcCtls; 5711 5711 } 5712 5712 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscIntercept); … … 6240 6240 && ( enmRaise == IEMXCPTRAISE_PREV_EVENT 6241 6241 || (fRaiseInfo & IEMXCPTRAISEINFO_NMI_PF)) 6242 && (pVCpu->hm.s.vmx. GstCtls.u32PinCtls & VMX_PIN_CTLS_VIRT_NMI))6242 && (pVCpu->hm.s.vmx.Ctls.u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)) 6243 6243 { 6244 6244 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS); … … 6331 6331 && VMX_EXIT_INT_INFO_IS_NMI_UNBLOCK_IRET(pVmxTransient->uExitIntInfo) 6332 6332 && uExitVector != X86_XCPT_DF 6333 && (pVCpu->hm.s.vmx. GstCtls.u32PinCtls & VMX_PIN_CTLS_VIRT_NMI))6333 && (pVCpu->hm.s.vmx.Ctls.u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)) 6334 6334 { 6335 6335 /* … … 6786 6786 rc |= VMXReadVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, &u32Shadow); 6787 6787 VMXLOCAL_BREAK_RC(rc); 6788 u32Val = (u32Val & ~pVCpu->hm.s.vmx. GstCtls.u32Cr0Mask)6789 | (u32Shadow & pVCpu->hm.s.vmx. GstCtls.u32Cr0Mask);6788 u32Val = (u32Val & ~pVCpu->hm.s.vmx.Ctls.u32Cr0Mask) 6789 | (u32Shadow & pVCpu->hm.s.vmx.Ctls.u32Cr0Mask); 6790 6790 VMMRZCallRing3Disable(pVCpu); /* Calls into PGM which has Log statements. */ 6791 6791 CPUMSetGuestCR0(pVCpu, u32Val); … … 6798 6798 rc |= VMXReadVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, &u32Shadow); 6799 6799 VMXLOCAL_BREAK_RC(rc); 6800 u32Val = (u32Val & ~pVCpu->hm.s.vmx. GstCtls.u32Cr4Mask)6801 | (u32Shadow & pVCpu->hm.s.vmx. GstCtls.u32Cr4Mask);6800 u32Val = (u32Val & ~pVCpu->hm.s.vmx.Ctls.u32Cr4Mask) 6801 | (u32Shadow & pVCpu->hm.s.vmx.Ctls.u32Cr4Mask); 6802 6802 CPUMSetGuestCR4(pVCpu, u32Val); 6803 6803 } … … 7160 7160 #ifdef VBOX_STRICT 7161 7161 if (CPUMIsHyperDebugStateActive(pVCpu)) 7162 Assert(pVCpu->hm.s.vmx. GstCtls.u32ProcCtls & VMX_PROC_CTLS_MOV_DR_EXIT);7162 Assert(pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_MOV_DR_EXIT); 7163 7163 #endif 7164 7164 CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, true /* save DR6 */); … … 7455 7455 if (RT_LIKELY(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_INT_WINDOW_EXIT)) 7456 7456 { 7457 if (!(pVCpu->hm.s.vmx. GstCtls.u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT))7458 { 7459 pVCpu->hm.s.vmx. GstCtls.u32ProcCtls |= VMX_PROC_CTLS_INT_WINDOW_EXIT;7460 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx. GstCtls.u32ProcCtls);7457 if (!(pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT)) 7458 { 7459 pVCpu->hm.s.vmx.Ctls.u32ProcCtls |= VMX_PROC_CTLS_INT_WINDOW_EXIT; 7460 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.Ctls.u32ProcCtls); 7461 7461 AssertRC(rc); 7462 7462 Log4Func(("Setup interrupt-window exiting\n")); … … 7473 7473 DECLINLINE(void) hmR0VmxClearIntWindowExitVmcs(PVMCPU pVCpu) 7474 7474 { 7475 Assert(pVCpu->hm.s.vmx. GstCtls.u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT);7476 pVCpu->hm.s.vmx. GstCtls.u32ProcCtls &= ~VMX_PROC_CTLS_INT_WINDOW_EXIT;7477 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx. GstCtls.u32ProcCtls);7475 Assert(pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT); 7476 pVCpu->hm.s.vmx.Ctls.u32ProcCtls &= ~VMX_PROC_CTLS_INT_WINDOW_EXIT; 7477 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.Ctls.u32ProcCtls); 7478 7478 AssertRC(rc); 7479 7479 Log4Func(("Cleared interrupt-window exiting\n")); … … 7491 7491 if (RT_LIKELY(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_NMI_WINDOW_EXIT)) 7492 7492 { 7493 if (!(pVCpu->hm.s.vmx. GstCtls.u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))7494 { 7495 pVCpu->hm.s.vmx. GstCtls.u32ProcCtls |= VMX_PROC_CTLS_NMI_WINDOW_EXIT;7496 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx. GstCtls.u32ProcCtls);7493 if (!(pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT)) 7494 { 7495 pVCpu->hm.s.vmx.Ctls.u32ProcCtls |= VMX_PROC_CTLS_NMI_WINDOW_EXIT; 7496 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.Ctls.u32ProcCtls); 7497 7497 AssertRC(rc); 7498 7498 Log4Func(("Setup NMI-window exiting\n")); … … 7509 7509 DECLINLINE(void) hmR0VmxClearNmiWindowExitVmcs(PVMCPU pVCpu) 7510 7510 { 7511 Assert(pVCpu->hm.s.vmx. GstCtls.u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT);7512 pVCpu->hm.s.vmx. GstCtls.u32ProcCtls &= ~VMX_PROC_CTLS_NMI_WINDOW_EXIT;7513 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx. GstCtls.u32ProcCtls);7511 Assert(pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT); 7512 pVCpu->hm.s.vmx.Ctls.u32ProcCtls &= ~VMX_PROC_CTLS_NMI_WINDOW_EXIT; 7513 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.Ctls.u32ProcCtls); 7514 7514 AssertRC(rc); 7515 7515 Log4Func(("Cleared NMI-window exiting\n")); … … 7593 7593 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR) 7594 7594 { 7595 if (pVCpu->hm.s.vmx. GstCtls.u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)7595 if (pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW) 7596 7596 hmR0VmxApicSetTprThreshold(pVCpu, u8Interrupt >> 4); 7597 7597 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchTprMaskedIrq); … … 8009 8009 static void hmR0VmxClearIntNmiWindowsVmcs(PVMCPU pVCpu) 8010 8010 { 8011 if (pVCpu->hm.s.vmx. GstCtls.u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT)8011 if (pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT) 8012 8012 { 8013 8013 hmR0VmxClearIntWindowExitVmcs(pVCpu); … … 8015 8015 } 8016 8016 8017 if (pVCpu->hm.s.vmx. GstCtls.u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT)8017 if (pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT) 8018 8018 { 8019 8019 hmR0VmxClearNmiWindowExitVmcs(pVCpu); … … 8481 8481 PVM pVM = pVCpu->CTX_SUFF(pVM); 8482 8482 if ( !pVCpu->hm.s.vmx.u64MsrApicBase 8483 && (pVCpu->hm.s.vmx. GstCtls.u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)8483 && (pVCpu->hm.s.vmx.Ctls.u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS) 8484 8484 && PDMHasApic(pVM)) 8485 8485 { … … 8698 8698 * Cache the TPR-shadow for checking on every VM-exit if it might have changed. 8699 8699 */ 8700 if (pVCpu->hm.s.vmx. GstCtls.u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)8700 if (pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW) 8701 8701 pVmxTransient->u8GuestTpr = pVCpu->hm.s.vmx.pbVirtApic[XAPIC_OFF_TPR]; 8702 8702 … … 8723 8723 * Load the TSC_AUX MSR when we are not intercepting RDTSCP. 8724 8724 */ 8725 if (pVCpu->hm.s.vmx. GstCtls.u32ProcCtls2 & VMX_PROC_CTLS2_RDTSCP)8726 { 8727 if (!(pVCpu->hm.s.vmx. GstCtls.u32ProcCtls & VMX_PROC_CTLS_RDTSC_EXIT))8725 if (pVCpu->hm.s.vmx.Ctls.u32ProcCtls2 & VMX_PROC_CTLS2_RDTSCP) 8726 { 8727 if (!(pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_RDTSC_EXIT)) 8728 8728 { 8729 8729 bool fMsrUpdated; … … 8761 8761 #endif 8762 8762 #ifdef HMVMX_ALWAYS_CHECK_GUEST_STATE 8763 if (!(pVCpu->hm.s.vmx. GstCtls.u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS))8763 if (!(pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)) 8764 8764 { 8765 8765 uint32_t uInvalidReason = hmR0VmxCheckGuestState(pVCpu); … … 8796 8796 pVmxTransient->fVectoringDoublePF = false; /* Vectoring double page-fault needs to be determined later. */ 8797 8797 8798 if (!(pVCpu->hm.s.vmx. GstCtls.u32ProcCtls & VMX_PROC_CTLS_RDTSC_EXIT))8799 TMCpuTickSetLastSeen(pVCpu, uHostTsc + pVCpu->hm.s.vmx. GstCtls.u64TscOffset);8798 if (!(pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_RDTSC_EXIT)) 8799 TMCpuTickSetLastSeen(pVCpu, uHostTsc + pVCpu->hm.s.vmx.Ctls.u64TscOffset); 8800 8800 8801 8801 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatPreExit, x); … … 8875 8875 * Sync the TPR shadow with our APIC state. 8876 8876 */ 8877 if ( (pVCpu->hm.s.vmx. GstCtls.u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)8877 if ( (pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW) 8878 8878 && pVmxTransient->u8GuestTpr != pVCpu->hm.s.vmx.pbVirtApic[XAPIC_OFF_TPR]) 8879 8879 { … … 9054 9054 pDbgState->fCpe2Extra = 0; 9055 9055 pDbgState->bmXcptExtra = 0; 9056 pDbgState->fProcCtlsInitial = pVCpu->hm.s.vmx. GstCtls.u32ProcCtls;9057 pDbgState->fProcCtls2Initial = pVCpu->hm.s.vmx. GstCtls.u32ProcCtls2;9058 pDbgState->bmXcptInitial = pVCpu->hm.s.vmx. GstCtls.u32XcptBitmap;9056 pDbgState->fProcCtlsInitial = pVCpu->hm.s.vmx.Ctls.u32ProcCtls; 9057 pDbgState->fProcCtls2Initial = pVCpu->hm.s.vmx.Ctls.u32ProcCtls2; 9058 pDbgState->bmXcptInitial = pVCpu->hm.s.vmx.Ctls.u32XcptBitmap; 9059 9059 } 9060 9060 … … 9080 9080 * there should be no stale data in pCtx at this point. 9081 9081 */ 9082 if ( (pVCpu->hm.s.vmx. GstCtls.u32ProcCtls & pDbgState->fCpe1Extra) != pDbgState->fCpe1Extra9083 || (pVCpu->hm.s.vmx. GstCtls.u32ProcCtls & pDbgState->fCpe1Unwanted))9084 { 9085 pVCpu->hm.s.vmx. GstCtls.u32ProcCtls |= pDbgState->fCpe1Extra;9086 pVCpu->hm.s.vmx. GstCtls.u32ProcCtls &= ~pDbgState->fCpe1Unwanted;9087 VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx. GstCtls.u32ProcCtls);9088 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC: %#RX32\n", pVCpu->hm.s.vmx. GstCtls.u32ProcCtls));9082 if ( (pVCpu->hm.s.vmx.Ctls.u32ProcCtls & pDbgState->fCpe1Extra) != pDbgState->fCpe1Extra 9083 || (pVCpu->hm.s.vmx.Ctls.u32ProcCtls & pDbgState->fCpe1Unwanted)) 9084 { 9085 pVCpu->hm.s.vmx.Ctls.u32ProcCtls |= pDbgState->fCpe1Extra; 9086 pVCpu->hm.s.vmx.Ctls.u32ProcCtls &= ~pDbgState->fCpe1Unwanted; 9087 VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.Ctls.u32ProcCtls); 9088 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC: %#RX32\n", pVCpu->hm.s.vmx.Ctls.u32ProcCtls)); 9089 9089 pDbgState->fModifiedProcCtls = true; 9090 9090 } 9091 9091 9092 if ((pVCpu->hm.s.vmx. GstCtls.u32ProcCtls2 & pDbgState->fCpe2Extra) != pDbgState->fCpe2Extra)9093 { 9094 pVCpu->hm.s.vmx. GstCtls.u32ProcCtls2 |= pDbgState->fCpe2Extra;9095 VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, pVCpu->hm.s.vmx. GstCtls.u32ProcCtls2);9096 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC2: %#RX32\n", pVCpu->hm.s.vmx. GstCtls.u32ProcCtls2));9092 if ((pVCpu->hm.s.vmx.Ctls.u32ProcCtls2 & pDbgState->fCpe2Extra) != pDbgState->fCpe2Extra) 9093 { 9094 pVCpu->hm.s.vmx.Ctls.u32ProcCtls2 |= pDbgState->fCpe2Extra; 9095 VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, pVCpu->hm.s.vmx.Ctls.u32ProcCtls2); 9096 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC2: %#RX32\n", pVCpu->hm.s.vmx.Ctls.u32ProcCtls2)); 9097 9097 pDbgState->fModifiedProcCtls2 = true; 9098 9098 } 9099 9099 9100 if ((pVCpu->hm.s.vmx. GstCtls.u32XcptBitmap & pDbgState->bmXcptExtra) != pDbgState->bmXcptExtra)9101 { 9102 pVCpu->hm.s.vmx. GstCtls.u32XcptBitmap |= pDbgState->bmXcptExtra;9103 VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx. GstCtls.u32XcptBitmap);9104 Log6Func(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP: %#RX32\n", pVCpu->hm.s.vmx. GstCtls.u32XcptBitmap));9100 if ((pVCpu->hm.s.vmx.Ctls.u32XcptBitmap & pDbgState->bmXcptExtra) != pDbgState->bmXcptExtra) 9101 { 9102 pVCpu->hm.s.vmx.Ctls.u32XcptBitmap |= pDbgState->bmXcptExtra; 9103 VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.Ctls.u32XcptBitmap); 9104 Log6Func(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP: %#RX32\n", pVCpu->hm.s.vmx.Ctls.u32XcptBitmap)); 9105 9105 pDbgState->fModifiedXcptBitmap = true; 9106 9106 } 9107 9107 9108 if (pDbgState->fClearCr0Mask && pVCpu->hm.s.vmx. GstCtls.u32Cr0Mask != 0)9109 { 9110 pVCpu->hm.s.vmx. GstCtls.u32Cr0Mask = 0;9108 if (pDbgState->fClearCr0Mask && pVCpu->hm.s.vmx.Ctls.u32Cr0Mask != 0) 9109 { 9110 pVCpu->hm.s.vmx.Ctls.u32Cr0Mask = 0; 9111 9111 VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_MASK, 0); 9112 9112 Log6Func(("VMX_VMCS_CTRL_CR0_MASK: 0\n")); 9113 9113 } 9114 9114 9115 if (pDbgState->fClearCr4Mask && pVCpu->hm.s.vmx. GstCtls.u32Cr4Mask != 0)9116 { 9117 pVCpu->hm.s.vmx. GstCtls.u32Cr4Mask = 0;9115 if (pDbgState->fClearCr4Mask && pVCpu->hm.s.vmx.Ctls.u32Cr4Mask != 0) 9116 { 9117 pVCpu->hm.s.vmx.Ctls.u32Cr4Mask = 0; 9118 9118 VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_MASK, 0); 9119 9119 Log6Func(("VMX_VMCS_CTRL_CR4_MASK: 0\n")); … … 9146 9146 int rc2 = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pDbgState->fProcCtlsInitial); 9147 9147 AssertRCReturn(rc2, rc2); 9148 pVCpu->hm.s.vmx. GstCtls.u32ProcCtls = pDbgState->fProcCtlsInitial;9148 pVCpu->hm.s.vmx.Ctls.u32ProcCtls = pDbgState->fProcCtlsInitial; 9149 9149 } 9150 9150 … … 9152 9152 cached value and reload the field. */ 9153 9153 if ( pDbgState->fModifiedProcCtls2 9154 && pVCpu->hm.s.vmx. GstCtls.u32ProcCtls2 != pDbgState->fProcCtls2Initial)9154 && pVCpu->hm.s.vmx.Ctls.u32ProcCtls2 != pDbgState->fProcCtls2Initial) 9155 9155 { 9156 9156 int rc2 = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, pDbgState->fProcCtls2Initial); 9157 9157 AssertRCReturn(rc2, rc2); 9158 pVCpu->hm.s.vmx. GstCtls.u32ProcCtls2 = pDbgState->fProcCtls2Initial;9158 pVCpu->hm.s.vmx.Ctls.u32ProcCtls2 = pDbgState->fProcCtls2Initial; 9159 9159 } 9160 9160 … … 9162 9162 reloading and partial recalculation the next time around. */ 9163 9163 if (pDbgState->fModifiedXcptBitmap) 9164 pVCpu->hm.s.vmx. GstCtls.u32XcptBitmap = pDbgState->bmXcptInitial;9164 pVCpu->hm.s.vmx.Ctls.u32XcptBitmap = pDbgState->bmXcptInitial; 9165 9165 9166 9166 return rcStrict; … … 10577 10577 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_DEBUGCTL_FULL, &u64Val); 10578 10578 AssertRCBreak(rc); 10579 if ( (pVCpu->hm.s.vmx. GstCtls.u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)10579 if ( (pVCpu->hm.s.vmx.Ctls.u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG) 10580 10580 && (u64Val & 0xfffffe3c)) /* Bits 31:9, bits 5:2 MBZ. */ 10581 10581 { … … 10587 10587 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY, &u32Val); 10588 10588 AssertRCBreak(rc); 10589 Assert(u32Val == pVCpu->hm.s.vmx. GstCtls.u32EntryCtls);10589 Assert(u32Val == pVCpu->hm.s.vmx.Ctls.u32EntryCtls); 10590 10590 #endif 10591 bool const fLongModeGuest = RT_BOOL(pVCpu->hm.s.vmx. GstCtls.u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);10591 bool const fLongModeGuest = RT_BOOL(pVCpu->hm.s.vmx.Ctls.u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST); 10592 10592 10593 10593 /* … … 10658 10658 * 51:32 beyond the processor's physical-address width are 0. */ 10659 10659 10660 if ( (pVCpu->hm.s.vmx. GstCtls.u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)10660 if ( (pVCpu->hm.s.vmx.Ctls.u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG) 10661 10661 && (pCtx->dr[7] & X86_DR7_MBZ_MASK)) 10662 10662 { … … 10676 10676 * PERF_GLOBAL MSR. 10677 10677 */ 10678 if (pVCpu->hm.s.vmx. GstCtls.u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR)10678 if (pVCpu->hm.s.vmx.Ctls.u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR) 10679 10679 { 10680 10680 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL, &u64Val); … … 10687 10687 * PAT MSR. 10688 10688 */ 10689 if (pVCpu->hm.s.vmx. GstCtls.u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)10689 if (pVCpu->hm.s.vmx.Ctls.u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR) 10690 10690 { 10691 10691 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PAT_FULL, &u64Val); … … 10711 10711 * EFER MSR. 10712 10712 */ 10713 if (pVCpu->hm.s.vmx. GstCtls.u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)10713 if (pVCpu->hm.s.vmx.Ctls.u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR) 10714 10714 { 10715 10715 Assert(pVM->hm.s.vmx.fSupportsVmcsEfer); … … 10718 10718 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffffffffff2fe)), 10719 10719 VMX_IGS_EFER_MSR_RESERVED); /* Bits 63:12, bit 9, bits 7:1 MBZ. */ 10720 HMVMX_CHECK_BREAK(RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL( pVCpu->hm.s.vmx. GstCtls.u32EntryCtls10720 HMVMX_CHECK_BREAK(RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL( pVCpu->hm.s.vmx.Ctls.u32EntryCtls 10721 10721 & VMX_ENTRY_CTLS_IA32E_MODE_GUEST), 10722 10722 VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH); … … 10987 10987 * currently don't use activity states but ACTIVE. */ 10988 10988 10989 HMVMX_CHECK_BREAK( !(pVCpu->hm.s.vmx. GstCtls.u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)10989 HMVMX_CHECK_BREAK( !(pVCpu->hm.s.vmx.Ctls.u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM) 10990 10990 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT, VMX_IGS_ACTIVITY_STATE_SIPI_WAIT_INVALID); 10991 10991 … … 11017 11017 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI), 11018 11018 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_INVALID); 11019 HMVMX_CHECK_BREAK( !(pVCpu->hm.s.vmx. GstCtls.u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)11019 HMVMX_CHECK_BREAK( !(pVCpu->hm.s.vmx.Ctls.u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM) 11020 11020 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI), 11021 11021 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_SMM_INVALID); 11022 if ( (pVCpu->hm.s.vmx. GstCtls.u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)11022 if ( (pVCpu->hm.s.vmx.Ctls.u32PinCtls & VMX_PIN_CTLS_VIRT_NMI) 11023 11023 && VMX_ENTRY_INT_INFO_IS_VALID(u32EntryInfo) 11024 11024 && VMX_ENTRY_INT_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INT_INFO_TYPE_NMI) … … 11141 11141 11142 11142 uint32_t uIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo); 11143 Assert( !(pVCpu->hm.s.vmx. GstCtls.u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT)11143 Assert( !(pVCpu->hm.s.vmx.Ctls.u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT) 11144 11144 && uIntType != VMX_EXIT_INT_INFO_TYPE_EXT_INT); 11145 11145 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo)); … … 11286 11286 { 11287 11287 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient); 11288 if (RT_UNLIKELY(!(pVCpu->hm.s.vmx. GstCtls.u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT)))11288 if (RT_UNLIKELY(!(pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))) 11289 11289 { 11290 11290 AssertMsgFailed(("Unexpected NMI-window exit.\n")); … … 11426 11426 /* If we get a spurious VM-exit when offsetting is enabled, 11427 11427 we must reset offsetting on VM-reentry. See @bugref{6634}. */ 11428 if (pVCpu->hm.s.vmx. GstCtls.u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)11428 if (pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING) 11429 11429 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true; 11430 11430 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS); … … 11454 11454 /* If we get a spurious VM-exit when offsetting is enabled, 11455 11455 we must reset offsetting on VM-reentry. See @bugref{6634}. */ 11456 if (pVCpu->hm.s.vmx. GstCtls.u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)11456 if (pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING) 11457 11457 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true; 11458 11458 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS); … … 11726 11726 { 11727 11727 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient); 11728 Assert(pVCpu->hm.s.vmx. GstCtls.u32ProcCtls & VMX_PROC_CTLS_HLT_EXIT);11728 Assert(pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_HLT_EXIT); 11729 11729 11730 11730 int rc = hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient); … … 11907 11907 /* By default, we don't enable VMX_PROC_CTLS2_DESCRIPTOR_TABLE_EXIT. */ 11908 11908 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitXdtrAccess); 11909 if (pVCpu->hm.s.vmx. GstCtls.u32ProcCtls2 & VMX_PROC_CTLS2_DESC_TABLE_EXIT)11909 if (pVCpu->hm.s.vmx.Ctls.u32ProcCtls2 & VMX_PROC_CTLS2_DESC_TABLE_EXIT) 11910 11910 return VERR_EM_INTERPRETER; 11911 11911 AssertMsgFailed(("Unexpected XDTR access\n")); … … 11922 11922 11923 11923 /* By default, we don't enable VMX_PROC_CTLS2_RDRAND_EXIT. */ 11924 if (pVCpu->hm.s.vmx. GstCtls.u32ProcCtls2 & VMX_PROC_CTLS2_RDRAND_EXIT)11924 if (pVCpu->hm.s.vmx.Ctls.u32ProcCtls2 & VMX_PROC_CTLS2_RDRAND_EXIT) 11925 11925 return VERR_EM_INTERPRETER; 11926 11926 AssertMsgFailed(("Unexpected RDRAND exit\n")); … … 11954 11954 11955 11955 #ifdef VBOX_STRICT 11956 if (pVCpu->hm.s.vmx. GstCtls.u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)11956 if (pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS) 11957 11957 { 11958 11958 if ( hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, idMsr) … … 12057 12057 12058 12058 /* Update MSRs that are part of the VMCS and auto-load/store area when MSR-bitmaps are not supported. */ 12059 if (!(pVCpu->hm.s.vmx. GstCtls.u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS))12059 if (!(pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)) 12060 12060 { 12061 12061 switch (idMsr) … … 12156 12156 { 12157 12157 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient); 12158 Assert(pVCpu->hm.s.vmx. GstCtls.u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);12158 Assert(pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW); 12159 12159 12160 12160 /* … … 12265 12265 { 12266 12266 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR8Write); 12267 Assert(!(pVCpu->hm.s.vmx. GstCtls.u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));12267 Assert(!(pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)); 12268 12268 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, 12269 12269 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_APIC_TPR); … … 12285 12285 /* CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */ 12286 12286 Assert( VMX_EXIT_QUAL_CRX_REGISTER(uExitQual) != 8 12287 || !(pVCpu->hm.s.vmx. GstCtls.u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));12287 || !(pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)); 12288 12288 12289 12289 rcStrict = IEMExecDecodedMovCRxRead(pVCpu, pVmxTransient->cbInstr, VMX_EXIT_QUAL_CRX_GENREG(uExitQual), … … 12654 12654 { 12655 12655 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient); 12656 Assert(pVCpu->hm.s.vmx. GstCtls.u32ProcCtls & VMX_PROC_CTLS_MONITOR_TRAP_FLAG);12657 pVCpu->hm.s.vmx. GstCtls.u32ProcCtls &= ~VMX_PROC_CTLS_MONITOR_TRAP_FLAG;12658 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx. GstCtls.u32ProcCtls);12656 Assert(pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_MONITOR_TRAP_FLAG); 12657 pVCpu->hm.s.vmx.Ctls.u32ProcCtls &= ~VMX_PROC_CTLS_MONITOR_TRAP_FLAG; 12658 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.Ctls.u32ProcCtls); 12659 12659 AssertRCReturn(rc, rc); 12660 12660 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMtf); … … 12703 12703 case VMX_APIC_ACCESS_TYPE_LINEAR_READ: 12704 12704 { 12705 AssertMsg( !(pVCpu->hm.s.vmx. GstCtls.u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)12705 AssertMsg( !(pVCpu->hm.s.vmx.Ctls.u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW) 12706 12706 || VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual) != XAPIC_OFF_TPR, 12707 12707 ("hmR0VmxExitApicAccess: can't access TPR offset while using TPR shadowing.\n")); … … 12761 12761 { 12762 12762 Assert(!DBGFIsStepping(pVCpu)); 12763 Assert(pVCpu->hm.s.vmx. GstCtls.u32XcptBitmap & RT_BIT_32(X86_XCPT_DB));12763 Assert(pVCpu->hm.s.vmx.Ctls.u32XcptBitmap & RT_BIT_32(X86_XCPT_DB)); 12764 12764 12765 12765 /* Don't intercept MOV DRx any more. */ 12766 pVCpu->hm.s.vmx. GstCtls.u32ProcCtls &= ~VMX_PROC_CTLS_MOV_DR_EXIT;12767 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx. GstCtls.u32ProcCtls);12766 pVCpu->hm.s.vmx.Ctls.u32ProcCtls &= ~VMX_PROC_CTLS_MOV_DR_EXIT; 12767 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.Ctls.u32ProcCtls); 12768 12768 AssertRCReturn(rc, rc); 12769 12769 … … 13312 13312 AssertMsg(pVCpu->hm.s.fUsingDebugLoop || pVCpu->hm.s.vmx.RealMode.fRealOnV86Active, 13313 13313 ("uVector=%#x u32XcptBitmap=%#X32\n", 13314 VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo), pVCpu->hm.s.vmx. GstCtls.u32XcptBitmap));13314 VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo), pVCpu->hm.s.vmx.Ctls.u32XcptBitmap)); 13315 13315 #endif 13316 13316 -
trunk/src/VBox/VMM/VMMR3/HM.cpp
r77494 r77563 2914 2914 else if (pVCpu->hm.s.vmx.LastError.u32InstrError == VMXINSTRERR_VMENTRY_INVALID_CTLS) 2915 2915 { 2916 LogRel(("HM: CPU[%u] PinCtls %#RX32\n", i, pVCpu->hm.s.vmx. GstCtls.u32PinCtls));2916 LogRel(("HM: CPU[%u] PinCtls %#RX32\n", i, pVCpu->hm.s.vmx.Ctls.u32PinCtls)); 2917 2917 { 2918 uint32_t const u32Val = pVCpu->hm.s.vmx. GstCtls.u32PinCtls;2918 uint32_t const u32Val = pVCpu->hm.s.vmx.Ctls.u32PinCtls; 2919 2919 HMVMX_LOGREL_FEAT(u32Val, VMX_PIN_CTLS_EXT_INT_EXIT ); 2920 2920 HMVMX_LOGREL_FEAT(u32Val, VMX_PIN_CTLS_NMI_EXIT ); … … 2923 2923 HMVMX_LOGREL_FEAT(u32Val, VMX_PIN_CTLS_POSTED_INT ); 2924 2924 } 2925 LogRel(("HM: CPU[%u] ProcCtls %#RX32\n", i, pVCpu->hm.s.vmx. GstCtls.u32ProcCtls));2925 LogRel(("HM: CPU[%u] ProcCtls %#RX32\n", i, pVCpu->hm.s.vmx.Ctls.u32ProcCtls)); 2926 2926 { 2927 uint32_t const u32Val = pVCpu->hm.s.vmx. GstCtls.u32ProcCtls;2927 uint32_t const u32Val = pVCpu->hm.s.vmx.Ctls.u32ProcCtls; 2928 2928 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_INT_WINDOW_EXIT ); 2929 2929 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_USE_TSC_OFFSETTING); … … 2948 2948 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_USE_SECONDARY_CTLS); 2949 2949 } 2950 LogRel(("HM: CPU[%u] ProcCtls2 %#RX32\n", i, pVCpu->hm.s.vmx. GstCtls.u32ProcCtls2));2950 LogRel(("HM: CPU[%u] ProcCtls2 %#RX32\n", i, pVCpu->hm.s.vmx.Ctls.u32ProcCtls2)); 2951 2951 { 2952 uint32_t const u32Val = pVCpu->hm.s.vmx. GstCtls.u32ProcCtls2;2952 uint32_t const u32Val = pVCpu->hm.s.vmx.Ctls.u32ProcCtls2; 2953 2953 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_VIRT_APIC_ACCESS ); 2954 2954 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_EPT ); … … 2974 2974 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_TSC_SCALING ); 2975 2975 } 2976 LogRel(("HM: CPU[%u] EntryCtls %#RX32\n", i, pVCpu->hm.s.vmx. GstCtls.u32EntryCtls));2976 LogRel(("HM: CPU[%u] EntryCtls %#RX32\n", i, pVCpu->hm.s.vmx.Ctls.u32EntryCtls)); 2977 2977 { 2978 uint32_t const u32Val = pVCpu->hm.s.vmx. GstCtls.u32EntryCtls;2978 uint32_t const u32Val = pVCpu->hm.s.vmx.Ctls.u32EntryCtls; 2979 2979 HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_LOAD_DEBUG ); 2980 2980 HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_IA32E_MODE_GUEST ); … … 2985 2985 HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_LOAD_EFER_MSR ); 2986 2986 } 2987 LogRel(("HM: CPU[%u] ExitCtls %#RX32\n", i, pVCpu->hm.s.vmx. GstCtls.u32ExitCtls));2987 LogRel(("HM: CPU[%u] ExitCtls %#RX32\n", i, pVCpu->hm.s.vmx.Ctls.u32ExitCtls)); 2988 2988 { 2989 uint32_t const u32Val = pVCpu->hm.s.vmx. GstCtls.u32ExitCtls;2989 uint32_t const u32Val = pVCpu->hm.s.vmx.Ctls.u32ExitCtls; 2990 2990 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_SAVE_DEBUG ); 2991 2991 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE ); -
trunk/src/VBox/VMM/include/HMInternal.h
r77493 r77563 782 782 #endif 783 783 784 /** Cache of guest VMCS control fields. */ 785 VMXVMCSCTLSCACHE GstCtls; 786 /** Cache of nested-guest VMCS control fields. */ 787 VMXVMCSCTLSCACHE NstGstCtls; 784 /** Cache of the executing guest (or nested-guest) VMCS control fields. */ 785 VMXVMCSCTLSCACHE Ctls; 786 /** Cache of guest (level 1) VMCS control fields when executing a nested-guest 787 * (level 2). */ 788 VMXVMCSCTLSCACHE Level1Ctls; 788 789 789 790 /** Physical address of the VM control structure (VMCS). */
Note:
See TracChangeset
for help on using the changeset viewer.