Changeset 72811 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Jul 3, 2018 8:01:59 AM (7 years ago)
- svn:sync-xref-src-repo-rev:
- 123354
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r72810 r72811 3663 3663 PVM pVM = pVCpu->CTX_SUFF(pVM); 3664 3664 Assert(!RT_HI_U32(pMixedCtx->cr0)); 3665 uint32_t const u ShadowCR0 = pMixedCtx->cr0;3666 uint32_t u GuestCR0 = pMixedCtx->cr0;3665 uint32_t const u32ShadowCr0 = pMixedCtx->cr0; 3666 uint32_t u32GuestCr0 = pMixedCtx->cr0; 3667 3667 3668 3668 /* … … 3693 3693 { 3694 3694 /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */ 3695 u GuestCR0 |= X86_CR0_WP;3695 u32GuestCr0 |= X86_CR0_WP; 3696 3696 } 3697 3697 … … 3705 3705 * set on the first CPUs to support VT-x and no mention of with regards to UX in VM-entry checks. 3706 3706 */ 3707 u GuestCR0 |= X86_CR0_NE;3707 u32GuestCr0 |= X86_CR0_NE; 3708 3708 3709 3709 /* If CR0.NE isn't set, we need to intercept #MF exceptions and report them to the guest differently. */ 3710 bool const fInterceptMF = !(u ShadowCR0 & X86_CR0_NE);3710 bool const fInterceptMF = !(u32ShadowCr0 & X86_CR0_NE); 3711 3711 3712 3712 /* … … 3755 3755 * Set/clear the CR0 specific bits along with their exceptions (PE, PG, CD, NW). 3756 3756 */ 3757 uint32_t fSetC R0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);3758 uint32_t fZapC R0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);3757 uint32_t fSetCr0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr0Fixed1); 3758 uint32_t fZapCr0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr0Fixed1); 3759 3759 if (pVM->hm.s.vmx.fUnrestrictedGuest) /* Exceptions for unrestricted-guests for fixed CR0 bits (PE, PG). */ 3760 fSetC R0 &= ~(X86_CR0_PE | X86_CR0_PG);3760 fSetCr0 &= ~(X86_CR0_PE | X86_CR0_PG); 3761 3761 else 3762 Assert((fSetC R0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG));3763 3764 u GuestCR0 |= fSetCR0;3765 u GuestCR0 &= fZapCR0;3766 u GuestCR0 &= ~(X86_CR0_CD | X86_CR0_NW);/* Always enable caching. */3762 Assert((fSetCr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)); 3763 3764 u32GuestCr0 |= fSetCr0; 3765 u32GuestCr0 &= fZapCr0; 3766 u32GuestCr0 &= ~(X86_CR0_CD | X86_CR0_NW); /* Always enable caching. */ 3767 3767 3768 3768 /* … … 3771 3771 * we want to be notified immediately of guest CR0 changes (e.g. PG to update our shadow page tables). 3772 3772 */ 3773 uint32_t u CR0Mask = X86_CR0_PE3774 | X86_CR0_NE3775 | (pVM->hm.s.fNestedPaging ? 0 : X86_CR0_WP)3776 | X86_CR0_PG3777 | X86_CR0_ET/* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.ET */3778 | X86_CR0_CD/* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.CD */3779 | X86_CR0_NW;/* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.NW */3773 uint32_t u32Cr0Mask = X86_CR0_PE 3774 | X86_CR0_NE 3775 | (pVM->hm.s.fNestedPaging ? 0 : X86_CR0_WP) 3776 | X86_CR0_PG 3777 | X86_CR0_ET /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.ET */ 3778 | X86_CR0_CD /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.CD */ 3779 | X86_CR0_NW; /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.NW */ 3780 3780 3781 3781 /** @todo Avoid intercepting CR0.PE with unrestricted guests. Fix PGM … … 3784 3784 #if 0 3785 3785 if (pVM->hm.s.vmx.fUnrestrictedGuest) 3786 u Cr0Mask &= ~X86_CR0_PE;3786 u32Cr0Mask &= ~X86_CR0_PE; 3787 3787 #endif 3788 3788 /* 3789 3789 * Finally, update VMCS fields with the CR0 values. 3790 3790 */ 3791 int rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR0, u GuestCR0);3792 rc |= VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, u ShadowCR0);3793 if (u CR0Mask != pVCpu->hm.s.vmx.u32CR0Mask)3794 rc |= VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_MASK, u CR0Mask);3791 int rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR0, u32GuestCr0); 3792 rc |= VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, u32ShadowCr0); 3793 if (u32Cr0Mask != pVCpu->hm.s.vmx.u32Cr0Mask) 3794 rc |= VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_MASK, u32Cr0Mask); 3795 3795 if (uProcCtls != pVCpu->hm.s.vmx.u32ProcCtls) 3796 3796 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls); … … 3798 3798 3799 3799 /* Update our caches. */ 3800 pVCpu->hm.s.vmx.u32C R0Mask = uCR0Mask;3800 pVCpu->hm.s.vmx.u32Cr0Mask = u32Cr0Mask; 3801 3801 pVCpu->hm.s.vmx.u32ProcCtls = uProcCtls; 3802 3802 3803 3803 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_CR0); 3804 3804 3805 Log4Func(("u Cr0Mask=%#RX32 uShadowCR0=%#RX32 uGuestCR0=%#RX32 (fSetCR0=%#RX32 fZapCR0=%#RX32\n", uCR0Mask, uShadowCR0,3806 u GuestCR0, fSetCR0, fZapCR0));3805 Log4Func(("u32Cr0Mask=%#RX32 u32ShadowCr0=%#RX32 u32GuestCr0=%#RX32 (fSetCr0=%#RX32 fZapCr0=%#RX32\n", u32Cr0Mask, 3806 u32ShadowCr0, u32GuestCr0, fSetCr0, fZapCr0)); 3807 3807 } 3808 3808 … … 3915 3915 } 3916 3916 3917 Log4Func(("u GuestCR3=%#RGp (GstN)\n", GCPhysGuestCR3));3917 Log4Func(("u32GuestCr3=%#RGp (GstN)\n", GCPhysGuestCR3)); 3918 3918 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_CR3, GCPhysGuestCR3); 3919 3919 AssertRCReturn(rc, rc); … … 3924 3924 RTHCPHYS HCPhysGuestCR3 = PGMGetHyperCR3(pVCpu); 3925 3925 3926 Log4Func(("u GuestCR3=%#RHv (HstN)\n", HCPhysGuestCR3));3926 Log4Func(("u32GuestCr3=%#RHv (HstN)\n", HCPhysGuestCR3)); 3927 3927 rc = VMXWriteVmcsHstN(VMX_VMCS_GUEST_CR3, HCPhysGuestCR3); 3928 3928 AssertRCReturn(rc, rc); … … 3939 3939 { 3940 3940 Assert(!RT_HI_U32(pMixedCtx->cr4)); 3941 uint32_t u GuestCR4 = pMixedCtx->cr4;3942 uint32_t const u ShadowCR4 = pMixedCtx->cr4;3941 uint32_t u32GuestCr4 = pMixedCtx->cr4; 3942 uint32_t const u32ShadowCr4 = pMixedCtx->cr4; 3943 3943 3944 3944 /* … … 3955 3955 Assert(pVM->hm.s.vmx.pRealModeTSS); 3956 3956 Assert(PDMVmmDevHeapIsEnabled(pVM)); 3957 u GuestCR4 &= ~X86_CR4_VME;3957 u32GuestCr4 &= ~X86_CR4_VME; 3958 3958 } 3959 3959 … … 3964 3964 { 3965 3965 /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */ 3966 u GuestCR4 |= X86_CR4_PSE;3966 u32GuestCr4 |= X86_CR4_PSE; 3967 3967 /* Our identity mapping is a 32-bit page directory. */ 3968 u GuestCR4 &= ~X86_CR4_PAE;3968 u32GuestCr4 &= ~X86_CR4_PAE; 3969 3969 } 3970 3970 /* else use guest CR4.*/ … … 3982 3982 case PGMMODE_32_BIT: /* 32-bit paging. */ 3983 3983 { 3984 u GuestCR4 &= ~X86_CR4_PAE;3984 u32GuestCr4 &= ~X86_CR4_PAE; 3985 3985 break; 3986 3986 } … … 3989 3989 case PGMMODE_PAE_NX: /* PAE paging with NX. */ 3990 3990 { 3991 u GuestCR4 |= X86_CR4_PAE;3991 u32GuestCr4 |= X86_CR4_PAE; 3992 3992 break; 3993 3993 } … … 4005 4005 4006 4006 /* We need to set and clear the CR4 specific bits here (mainly the X86_CR4_VMXE bit). */ 4007 uint64_t const fSetC R4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);4008 uint64_t const fZapC R4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);4009 u GuestCR4 |= fSetCR4;4010 u GuestCR4 &= fZapCR4;4007 uint64_t const fSetCr4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr4Fixed1); 4008 uint64_t const fZapCr4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr4Fixed1); 4009 u32GuestCr4 |= fSetCr4; 4010 u32GuestCr4 &= fZapCr4; 4011 4011 4012 4012 /* Setup CR4 mask. CR4 flags owned by the host, if the guest attempts to change them, 4013 4013 that would cause a VM-exit. */ 4014 uint32_t u32C R4Mask = X86_CR4_VME4014 uint32_t u32Cr4Mask = X86_CR4_VME 4015 4015 | X86_CR4_PAE 4016 4016 | X86_CR4_PGE … … 4018 4018 | X86_CR4_VMXE; 4019 4019 if (pVM->cpum.ro.HostFeatures.fXSaveRstor) 4020 u32C R4Mask |= X86_CR4_OSXSAVE;4020 u32Cr4Mask |= X86_CR4_OSXSAVE; 4021 4021 if (pVM->cpum.ro.GuestFeatures.fPcid) 4022 u32C R4Mask |= X86_CR4_PCIDE;4022 u32Cr4Mask |= X86_CR4_PCIDE; 4023 4023 4024 4024 /* Write VT-x's view of the guest CR4, the CR4 modify mask and the read-only CR4 shadow 4025 4025 into the VMCS and update our cache. */ 4026 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR4, u GuestCR4);4027 rc |= VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, u ShadowCR4);4028 if (pVCpu->hm.s.vmx.u32C R4Mask != u32CR4Mask)4029 rc |= VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_MASK, u32C R4Mask);4026 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR4, u32GuestCr4); 4027 rc |= VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, u32ShadowCr4); 4028 if (pVCpu->hm.s.vmx.u32Cr4Mask != u32Cr4Mask) 4029 rc |= VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_MASK, u32Cr4Mask); 4030 4030 AssertRCReturn(rc, rc); 4031 pVCpu->hm.s.vmx.u32C R4Mask = u32CR4Mask;4031 pVCpu->hm.s.vmx.u32Cr4Mask = u32Cr4Mask; 4032 4032 4033 4033 /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */ … … 4036 4036 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_CR4); 4037 4037 4038 Log4Func(("u GuestCR4=%#RX32 uShadowCR4=%#RX32 (fSetCR4=%#RX32 fZapCR4=%#RX32)\n", uGuestCR4, uShadowCR4, fSetCR4,4039 fZapC R4));4038 Log4Func(("u32GuestCr4=%#RX32 u32ShadowCr4=%#RX32 (fSetCr4=%#RX32 fZapCr4=%#RX32)\n", u32GuestCr4, u32ShadowCr4, fSetCr4, 4039 fZapCr4)); 4040 4040 } 4041 4041 return rc; … … 4092 4092 } 4093 4093 4094 uint32_t u GuestDR7;4094 uint32_t u32GuestDr7; 4095 4095 if ( fSteppingDB 4096 4096 || (CPUMGetHyperDR7(pVCpu) & X86_DR7_ENABLED_MASK)) … … 4121 4121 4122 4122 /* Update DR7 with the hypervisor value (other DRx registers are handled by CPUM one way or another). */ 4123 u GuestDR7 = (uint32_t)CPUMGetHyperDR7(pVCpu);4123 u32GuestDr7 = (uint32_t)CPUMGetHyperDR7(pVCpu); 4124 4124 pVCpu->hm.s.fUsingHyperDR7 = true; 4125 4125 fInterceptMovDRx = true; … … 4170 4170 4171 4171 /* Update DR7 with the actual guest value. */ 4172 u GuestDR7 = pMixedCtx->dr[7];4172 u32GuestDr7 = pMixedCtx->dr[7]; 4173 4173 pVCpu->hm.s.fUsingHyperDR7 = false; 4174 4174 } … … 4193 4193 * Update guest DR7. 4194 4194 */ 4195 int rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, u GuestDR7);4195 int rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, u32GuestDr7); 4196 4196 AssertRCReturn(rc, rc); 4197 4197 … … 6399 6399 rc |= VMXReadVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, &u32Shadow); 6400 6400 VMXLOCAL_BREAK_RC(rc); 6401 u32Val = (u32Val & ~pVCpu->hm.s.vmx.u32C R0Mask)6402 | (u32Shadow & pVCpu->hm.s.vmx.u32C R0Mask);6401 u32Val = (u32Val & ~pVCpu->hm.s.vmx.u32Cr0Mask) 6402 | (u32Shadow & pVCpu->hm.s.vmx.u32Cr0Mask); 6403 6403 VMMRZCallRing3Disable(pVCpu); /* Calls into PGM which has Log statements. */ 6404 6404 CPUMSetGuestCR0(pVCpu, u32Val); … … 6411 6411 rc |= VMXReadVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, &u32Shadow); 6412 6412 VMXLOCAL_BREAK_RC(rc); 6413 u32Val = (u32Val & ~pVCpu->hm.s.vmx.u32C R4Mask)6414 | (u32Shadow & pVCpu->hm.s.vmx.u32C R4Mask);6413 u32Val = (u32Val & ~pVCpu->hm.s.vmx.u32Cr4Mask) 6414 | (u32Shadow & pVCpu->hm.s.vmx.u32Cr4Mask); 6415 6415 CPUMSetGuestCR4(pVCpu, u32Val); 6416 6416 } … … 8891 8891 } 8892 8892 8893 if (pDbgState->fClearCr0Mask && pVCpu->hm.s.vmx.u32C R0Mask != 0)8894 { 8895 pVCpu->hm.s.vmx.u32C R0Mask = 0;8893 if (pDbgState->fClearCr0Mask && pVCpu->hm.s.vmx.u32Cr0Mask != 0) 8894 { 8895 pVCpu->hm.s.vmx.u32Cr0Mask = 0; 8896 8896 VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_MASK, 0); 8897 8897 Log6(("hmR0VmxRunDebugStateRevert: VMX_VMCS_CTRL_CR0_MASK: 0\n")); 8898 8898 } 8899 8899 8900 if (pDbgState->fClearCr4Mask && pVCpu->hm.s.vmx.u32C R4Mask != 0)8901 { 8902 pVCpu->hm.s.vmx.u32C R4Mask = 0;8900 if (pDbgState->fClearCr4Mask && pVCpu->hm.s.vmx.u32Cr4Mask != 0) 8901 { 8902 pVCpu->hm.s.vmx.u32Cr4Mask = 0; 8903 8903 VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_MASK, 0); 8904 8904 Log6(("hmR0VmxRunDebugStateRevert: VMX_VMCS_CTRL_CR4_MASK: 0\n")); … … 10334 10334 * CR0. 10335 10335 */ 10336 uint32_t fSetC R0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);10337 uint32_t const fZapC R0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);10336 uint32_t fSetCr0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr0Fixed1); 10337 uint32_t const fZapCr0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr0Fixed1); 10338 10338 /* Exceptions for unrestricted-guests for fixed CR0 bits (PE, PG). 10339 10339 See Intel spec. 26.3.1 "Checks on Guest Control Registers, Debug Registers and MSRs." */ 10340 10340 if (fUnrestrictedGuest) 10341 fSetC R0 &= ~(X86_CR0_PE | X86_CR0_PG);10342 10343 uint32_t u GuestCR0;10344 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &u GuestCR0);10341 fSetCr0 &= ~(X86_CR0_PE | X86_CR0_PG); 10342 10343 uint32_t u32GuestCr0; 10344 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &u32GuestCr0); 10345 10345 AssertRCBreak(rc); 10346 HMVMX_CHECK_BREAK((u GuestCR0 & fSetCR0) == fSetCR0, VMX_IGS_CR0_FIXED1);10347 HMVMX_CHECK_BREAK(!(u GuestCR0 & ~fZapCR0), VMX_IGS_CR0_FIXED0);10346 HMVMX_CHECK_BREAK((u32GuestCr0 & fSetCr0) == fSetCr0, VMX_IGS_CR0_FIXED1); 10347 HMVMX_CHECK_BREAK(!(u32GuestCr0 & ~fZapCr0), VMX_IGS_CR0_FIXED0); 10348 10348 if ( !fUnrestrictedGuest 10349 && (u GuestCR0 & X86_CR0_PG)10350 && !(u GuestCR0 & X86_CR0_PE))10349 && (u32GuestCr0 & X86_CR0_PG) 10350 && !(u32GuestCr0 & X86_CR0_PE)) 10351 10351 { 10352 10352 HMVMX_ERROR_BREAK(VMX_IGS_CR0_PG_PE_COMBO); … … 10356 10356 * CR4. 10357 10357 */ 10358 uint64_t const fSetC R4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);10359 uint64_t const fZapC R4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);10360 10361 uint32_t u GuestCR4;10362 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR4, &u GuestCR4);10358 uint64_t const fSetCr4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr4Fixed1); 10359 uint64_t const fZapCr4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr4Fixed1); 10360 10361 uint32_t u32GuestCr4; 10362 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR4, &u32GuestCr4); 10363 10363 AssertRCBreak(rc); 10364 HMVMX_CHECK_BREAK((u GuestCR4 & fSetCR4) == fSetCR4, VMX_IGS_CR4_FIXED1);10365 HMVMX_CHECK_BREAK(!(u GuestCR4 & ~fZapCR4), VMX_IGS_CR4_FIXED0);10364 HMVMX_CHECK_BREAK((u32GuestCr4 & fSetCr4) == fSetCr4, VMX_IGS_CR4_FIXED1); 10365 HMVMX_CHECK_BREAK(!(u32GuestCr4 & ~fZapCr4), VMX_IGS_CR4_FIXED0); 10366 10366 10367 10367 /* … … 10419 10419 if ( fLongModeGuest 10420 10420 || ( fUnrestrictedGuest 10421 && !(u GuestCR0 & X86_CR0_PE)))10421 && !(u32GuestCr0 & X86_CR0_PE))) 10422 10422 { 10423 10423 HMVMX_CHECK_BREAK(!(u32Eflags & X86_EFL_VM), VMX_IGS_RFLAGS_VM_INVALID); … … 10439 10439 if (fLongModeGuest) 10440 10440 { 10441 HMVMX_CHECK_BREAK(u GuestCR0 & X86_CR0_PG, VMX_IGS_CR0_PG_LONGMODE);10442 HMVMX_CHECK_BREAK(u GuestCR4 & X86_CR4_PAE, VMX_IGS_CR4_PAE_LONGMODE);10441 HMVMX_CHECK_BREAK(u32GuestCr0 & X86_CR0_PG, VMX_IGS_CR0_PG_LONGMODE); 10442 HMVMX_CHECK_BREAK(u32GuestCr4 & X86_CR4_PAE, VMX_IGS_CR4_PAE_LONGMODE); 10443 10443 } 10444 10444 10445 10445 if ( !fLongModeGuest 10446 && (u GuestCR4 & X86_CR4_PCIDE))10446 && (u32GuestCr4 & X86_CR4_PCIDE)) 10447 10447 { 10448 10448 HMVMX_ERROR_BREAK(VMX_IGS_CR4_PCIDE); … … 10516 10516 VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH); 10517 10517 HMVMX_CHECK_BREAK( fUnrestrictedGuest 10518 || !(u GuestCR0 & X86_CR0_PG)10518 || !(u32GuestCr0 & X86_CR0_PG) 10519 10519 || RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(u64Val & MSR_K6_EFER_LME), 10520 10520 VMX_IGS_EFER_LMA_LME_MISMATCH); -
trunk/src/VBox/VMM/VMMR3/HM.cpp
r72805 r72811 2023 2023 pVCpu->hm.s.fCtxChanged |= HM_CHANGED_HOST_CONTEXT | HM_CHANGED_ALL_GUEST; 2024 2024 2025 pVCpu->hm.s.vmx.u32C R0Mask = 0;2026 pVCpu->hm.s.vmx.u32C R4Mask = 0;2025 pVCpu->hm.s.vmx.u32Cr0Mask = 0; 2026 pVCpu->hm.s.vmx.u32Cr4Mask = 0; 2027 2027 pVCpu->hm.s.fActive = false; 2028 2028 pVCpu->hm.s.Event.fPending = false; -
trunk/src/VBox/VMM/include/HMInternal.h
r72810 r72811 744 744 745 745 /** Current CR0 mask. */ 746 uint32_t u32C R0Mask;746 uint32_t u32Cr0Mask; 747 747 /** Current CR4 mask. */ 748 uint32_t u32C R4Mask;748 uint32_t u32Cr4Mask; 749 749 /** Current exception bitmap. */ 750 750 uint32_t u32XcptBitmap;
Note:
See TracChangeset
for help on using the changeset viewer.