Changeset 48624 in vbox for trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
- Timestamp:
- Sep 23, 2013 7:50:17 AM (12 years ago)
- svn:sync-xref-src-repo-rev:
- 89203
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r48621 r48624 1229 1229 LogFlowFunc(("pVM=%p pVCpu=%p GCVirt=%RGv\n", pVM, pVCpu, GCVirt)); 1230 1230 1231 bool fFlushPending = VMCPU_FF_IS_ SET(pVCpu, VMCPU_FF_TLB_FLUSH);1231 bool fFlushPending = VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_FLUSH); 1232 1232 if (!fFlushPending) 1233 1233 { … … 2630 2630 { 2631 2631 int rc = VINF_SUCCESS; 2632 if ( pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_ENTRY_CTLS)2632 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_VMX_ENTRY_CTLS)) 2633 2633 { 2634 2634 PVM pVM = pVCpu->CTX_SUFF(pVM); … … 2668 2668 /* Update VCPU with the currently set VM-exit controls. */ 2669 2669 pVCpu->hm.s.vmx.u32EntryCtls = val; 2670 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_ENTRY_CTLS;2670 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_VMX_ENTRY_CTLS); 2671 2671 } 2672 2672 return rc; … … 2689 2689 { 2690 2690 int rc = VINF_SUCCESS; 2691 if ( pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_EXIT_CTLS)2691 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_VMX_EXIT_CTLS)) 2692 2692 { 2693 2693 PVM pVM = pVCpu->CTX_SUFF(pVM); … … 2739 2739 /* Update VCPU with the currently set VM-exit controls. */ 2740 2740 pVCpu->hm.s.vmx.u32ExitCtls = val; 2741 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_EXIT_CTLS;2741 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_VMX_EXIT_CTLS); 2742 2742 } 2743 2743 return rc; … … 2758 2758 { 2759 2759 int rc = VINF_SUCCESS; 2760 if ( pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_GUEST_APIC_STATE)2760 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE)) 2761 2761 { 2762 2762 /* Setup TPR shadowing. Also setup TPR patching for 32-bit guests. */ … … 2795 2795 } 2796 2796 2797 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_GUEST_APIC_STATE;2797 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE); 2798 2798 } 2799 2799 return rc; … … 2820 2820 */ 2821 2821 uint32_t uIntrState = 0; 2822 if (VMCPU_FF_IS_ SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))2822 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 2823 2823 { 2824 2824 /* If inhibition is active, RIP & RFLAGS should've been accessed (i.e. read previously from the VMCS or from ring-3). */ … … 2874 2874 { 2875 2875 int rc = VINF_SUCCESS; 2876 if ( pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_RIP)2876 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RIP)) 2877 2877 { 2878 2878 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RIP, pMixedCtx->rip); 2879 2879 AssertRCReturn(rc, rc); 2880 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_RIP; 2881 Log4(("Load: VMX_VMCS_GUEST_RIP=%#RX64 fContextUseFlags=%#x\n", pMixedCtx->rip, pVCpu->hm.s.fContextUseFlags)); 2880 2881 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_RIP); 2882 Log4(("Load: VMX_VMCS_GUEST_RIP=%#RX64 fContextUseFlags=%#RX32\n", pMixedCtx->rip, VMCPU_HMCF_VALUE(pVCpu))); 2882 2883 } 2883 2884 return rc; … … 2899 2900 { 2900 2901 int rc = VINF_SUCCESS; 2901 if ( pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_RSP)2902 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RSP)) 2902 2903 { 2903 2904 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RSP, pMixedCtx->rsp); 2904 2905 AssertRCReturn(rc, rc); 2905 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_RSP; 2906 2907 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_RSP); 2906 2908 Log4(("Load: VMX_VMCS_GUEST_RSP=%#RX64\n", pMixedCtx->rsp)); 2907 2909 } … … 2924 2926 { 2925 2927 int rc = VINF_SUCCESS; 2926 if ( pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_RFLAGS)2928 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RFLAGS)) 2927 2929 { 2928 2930 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits of RFLAGS are reserved (MBZ). … … 2949 2951 AssertRCReturn(rc, rc); 2950 2952 2951 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_RFLAGS;2953 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_RFLAGS); 2952 2954 Log4(("Load: VMX_VMCS_GUEST_RFLAGS=%#RX32\n", Eflags.u32)); 2953 2955 } … … 2999 3001 */ 3000 3002 int rc = VINF_SUCCESS; 3001 if ( pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR0)3003 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0)) 3002 3004 { 3003 3005 Assert(!(pMixedCtx->cr0 >> 32)); … … 3160 3162 Log4(("Load: VMX_VMCS_CTRL_CR0_MASK=%#RX32\n", u32CR0Mask)); 3161 3163 3162 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR0;3164 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR0); 3163 3165 } 3164 3166 return rc; … … 3192 3194 * Guest CR3. 3193 3195 */ 3194 if ( pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR3)3196 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR3)) 3195 3197 { 3196 3198 RTGCPHYS GCPhysGuestCR3 = NIL_RTGCPHYS; … … 3265 3267 AssertRCReturn(rc, rc); 3266 3268 3267 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR3;3269 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR3); 3268 3270 } 3269 3271 … … 3271 3273 * Guest CR4. 3272 3274 */ 3273 if ( pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR4)3275 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR4)) 3274 3276 { 3275 3277 Assert(!(pMixedCtx->cr4 >> 32)); … … 3362 3364 AssertRCReturn(rc, rc); 3363 3365 3364 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR4;3366 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR4); 3365 3367 } 3366 3368 return rc; … … 3384 3386 static int hmR0VmxLoadSharedDebugState(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 3385 3387 { 3386 if (! (pVCpu->hm.s.fContextUseFlags &HM_CHANGED_GUEST_DEBUG))3388 if (!VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_DEBUG)) 3387 3389 return VINF_SUCCESS; 3388 3390 … … 3415 3417 pMixedCtx->eflags.u32 |= X86_EFL_TF; 3416 3418 pVCpu->hm.s.fClearTrapFlag = true; 3417 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RFLAGS;3419 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_RFLAGS); 3418 3420 fInterceptDB = true; 3419 3421 } … … 3528 3530 AssertRCReturn(rc, rc); 3529 3531 3530 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_DEBUG;3532 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_DEBUG); 3531 3533 return VINF_SUCCESS; 3532 3534 } … … 3779 3781 * Guest Segment registers: CS, SS, DS, ES, FS, GS. 3780 3782 */ 3781 if ( pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SEGMENT_REGS)3783 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS)) 3782 3784 { 3783 3785 /* Save the segment attributes for real-on-v86 mode hack, so we can restore them on VM-exit. */ … … 3827 3829 AssertRCReturn(rc, rc); 3828 3830 3831 #ifdef VBOX_STRICT 3832 /* Validate. */ 3833 hmR0VmxValidateSegmentRegs(pVM, pVCpu, pMixedCtx); 3834 #endif 3835 3836 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS); 3829 3837 Log4(("Load: CS=%#RX16 Base=%#RX64 Limit=%#RX32 Attr=%#RX32\n", pMixedCtx->cs.Sel, pMixedCtx->cs.u64Base, 3830 3838 pMixedCtx->cs.u32Limit, pMixedCtx->cs.Attr.u)); 3831 #ifdef VBOX_STRICT3832 hmR0VmxValidateSegmentRegs(pVM, pVCpu, pMixedCtx);3833 #endif3834 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SEGMENT_REGS;3835 3839 } 3836 3840 … … 3838 3842 * Guest TR. 3839 3843 */ 3840 if ( pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_TR)3844 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_TR)) 3841 3845 { 3842 3846 /* … … 3897 3901 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights); AssertRCReturn(rc, rc); 3898 3902 3903 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_TR); 3899 3904 Log4(("Load: VMX_VMCS_GUEST_TR_BASE=%#RX64\n", u64Base)); 3900 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_TR;3901 3905 } 3902 3906 … … 3904 3908 * Guest GDTR. 3905 3909 */ 3906 if ( pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_GDTR)3910 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_GDTR)) 3907 3911 { 3908 3912 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, pMixedCtx->gdtr.cbGdt); AssertRCReturn(rc, rc); 3909 3913 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, pMixedCtx->gdtr.pGdt); AssertRCReturn(rc, rc); 3910 3914 3915 /* Validate. */ 3911 3916 Assert(!(pMixedCtx->gdtr.cbGdt & 0xffff0000)); /* Bits 31:16 MBZ. */ 3917 3918 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_GDTR); 3912 3919 Log4(("Load: VMX_VMCS_GUEST_GDTR_BASE=%#RX64\n", pMixedCtx->gdtr.pGdt)); 3913 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_GDTR;3914 3920 } 3915 3921 … … 3917 3923 * Guest LDTR. 3918 3924 */ 3919 if ( pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_LDTR)3925 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_LDTR)) 3920 3926 { 3921 3927 /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */ … … 3946 3952 } 3947 3953 3954 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_LDTR); 3948 3955 Log4(("Load: VMX_VMCS_GUEST_LDTR_BASE=%#RX64\n", pMixedCtx->ldtr.u64Base)); 3949 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_LDTR;3950 3956 } 3951 3957 … … 3953 3959 * Guest IDTR. 3954 3960 */ 3955 if ( pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_IDTR)3961 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_IDTR)) 3956 3962 { 3957 3963 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, pMixedCtx->idtr.cbIdt); AssertRCReturn(rc, rc); 3958 3964 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, pMixedCtx->idtr.pIdt); AssertRCReturn(rc, rc); 3959 3965 3966 /* Validate. */ 3960 3967 Assert(!(pMixedCtx->idtr.cbIdt & 0xffff0000)); /* Bits 31:16 MBZ. */ 3968 3969 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_IDTR); 3961 3970 Log4(("Load: VMX_VMCS_GUEST_IDTR_BASE=%#RX64\n", pMixedCtx->idtr.pIdt)); 3962 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_IDTR;3963 3971 } 3964 3972 … … 3990 3998 */ 3991 3999 int rc = VINF_SUCCESS; 3992 if ( pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_GUEST_AUTO_MSRS)4000 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS)) 3993 4001 { 3994 4002 #ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE … … 4049 4057 #endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */ 4050 4058 4051 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_GUEST_AUTO_MSRS;4059 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS); 4052 4060 } 4053 4061 … … 4057 4065 * VM-exits on WRMSRs for these MSRs. 4058 4066 */ 4059 if ( pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SYSENTER_CS_MSR)4067 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR)) 4060 4068 { 4061 4069 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, pMixedCtx->SysEnter.cs); AssertRCReturn(rc, rc); 4062 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SYSENTER_CS_MSR; 4063 } 4064 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SYSENTER_EIP_MSR) 4070 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR); 4071 } 4072 4073 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR)) 4065 4074 { 4066 4075 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, pMixedCtx->SysEnter.eip); AssertRCReturn(rc, rc); 4067 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SYSENTER_EIP_MSR; 4068 } 4069 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SYSENTER_ESP_MSR) 4076 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); 4077 } 4078 4079 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR)) 4070 4080 { 4071 4081 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, pMixedCtx->SysEnter.esp); AssertRCReturn(rc, rc); 4072 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SYSENTER_ESP_MSR;4082 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); 4073 4083 } 4074 4084 … … 4092 4102 /** @todo See if we can make use of other states, e.g. 4093 4103 * VMX_VMCS_GUEST_ACTIVITY_SHUTDOWN or HLT. */ 4094 int rc = VINF_SUCCESS; 4095 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_GUEST_ACTIVITY_STATE) 4096 { 4097 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_ACTIVITY_STATE, VMX_VMCS_GUEST_ACTIVITY_ACTIVE); 4104 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_VMX_GUEST_ACTIVITY_STATE)) 4105 { 4106 int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_ACTIVITY_STATE, VMX_VMCS_GUEST_ACTIVITY_ACTIVE); 4098 4107 AssertRCReturn(rc, rc); 4099 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_GUEST_ACTIVITY_STATE; 4100 } 4101 return rc; 4108 4109 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_VMX_GUEST_ACTIVITY_STATE); 4110 } 4111 return VINF_SUCCESS; 4102 4112 } 4103 4113 … … 4127 4137 { 4128 4138 pVCpu->hm.s.vmx.pfnStartVM = VMXR0SwitcherStartVM64; 4129 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_EXIT_CTLS | HM_CHANGED_VMX_ENTRY_CTLS;4139 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_EXIT_CTLS | HM_CHANGED_VMX_ENTRY_CTLS); 4130 4140 } 4131 4141 #else … … 4141 4151 { 4142 4152 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM32; 4143 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_EXIT_CTLS | HM_CHANGED_VMX_ENTRY_CTLS;4153 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_EXIT_CTLS | HM_CHANGED_VMX_ENTRY_CTLS); 4144 4154 } 4145 4155 #else … … 5336 5346 5337 5347 EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip); 5338 Assert(VMCPU_FF_IS_ SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));5348 Assert(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)); 5339 5349 } 5340 5350 } … … 5964 5974 { 5965 5975 int rc2 = PGMSyncCR3(pVCpu, pMixedCtx->cr0, pMixedCtx->cr3, pMixedCtx->cr4, 5966 VMCPU_FF_IS_ SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));5976 VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3)); 5967 5977 if (rc2 != VINF_SUCCESS) 5968 5978 { … … 6179 6189 if (CPUMIsGuestFPUStateActive(pVCpu)) 6180 6190 { 6191 /* We shouldn't reload CR0 without saving it first. */ 6181 6192 if (!fSaveGuestState) 6182 6193 { … … 6186 6197 CPUMR0SaveGuestFPU(pVM, pVCpu, pMixedCtx); 6187 6198 Assert(!CPUMIsGuestFPUStateActive(pVCpu)); 6188 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;6199 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_CR0); 6189 6200 } 6190 6201 … … 6195 6206 #endif 6196 6207 if (CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, true /* save DR6 */)) 6197 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;6208 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG); 6198 6209 Assert(!CPUMIsGuestDebugStateActive(pVCpu) && !CPUMIsGuestDebugStateActivePending(pVCpu)); 6199 6210 Assert(!CPUMIsHyperDebugStateActive(pVCpu) && !CPUMIsHyperDebugStateActivePending(pVCpu)); … … 6380 6391 /* On our way back from ring-3 reload the guest state if there is a possibility of it being changed. */ 6381 6392 if (rcExit != VINF_EM_RAW_INTERRUPT) 6382 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL_GUEST;6393 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_ALL_GUEST); 6383 6394 6384 6395 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchExitToR3); … … 6503 6514 Assert(!TRPMHasTrap(pVCpu)); 6504 6515 6505 /** @todo SMI. SMIs take priority over NMIs. */6506 if (VMCPU_FF_IS_ SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)) /* NMI. NMIs take priority over regular interrupts . */6516 /** @todo SMI. SMIs take priority over NMIs. */ 6517 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI)) /* NMI. NMIs take priority over regular interrupts . */ 6507 6518 { 6508 6519 /* On some CPUs block-by-STI also blocks NMIs. See Intel spec. 26.3.1.5 "Checks On Guest Non-Register State". */ … … 6948 6959 /* If any other guest-state bits are changed here, make sure to update 6949 6960 hmR0VmxPreRunGuestCommitted() when thread-context hooks are used. */ 6950 pVCpu->hm.s.fContextUseFlags |=HM_CHANGED_GUEST_SEGMENT_REGS6951 6952 6953 | HM_CHANGED_GUEST_RSP;6961 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS 6962 | HM_CHANGED_GUEST_RIP 6963 | HM_CHANGED_GUEST_RFLAGS 6964 | HM_CHANGED_GUEST_RSP); 6954 6965 6955 6966 /* We're clearing interrupts, which means no block-by-STI interrupt-inhibition. */ … … 7066 7077 7067 7078 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu)); 7068 Assert((pVCpu->hm.s.fContextUseFlags & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE)) 7069 == (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE)); 7079 Assert(VMCPU_HMCF_IS_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE)); 7070 7080 7071 7081 #ifdef VBOX_STRICT … … 7155 7165 int rc = HMR0EnterCpu(pVCpu); 7156 7166 AssertRC(rc); 7157 Assert((pVCpu->hm.s.fContextUseFlags & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE)) 7158 == (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE)); 7167 Assert(VMCPU_HMCF_IS_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE)); 7159 7168 7160 7169 /* Load the active VMCS as the current one. */ … … 7195 7204 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 7196 7205 7197 if (! (pVCpu->hm.s.fContextUseFlags &HM_CHANGED_HOST_CONTEXT))7206 if (!VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_HOST_CONTEXT)) 7198 7207 return VINF_SUCCESS; 7199 7208 … … 7207 7216 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostMsrs failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); 7208 7217 7209 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_HOST_CONTEXT;7218 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_HOST_CONTEXT); 7210 7219 return rc; 7211 7220 } … … 7323 7332 7324 7333 /* Clear any unused and reserved bits. */ 7325 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR2;7334 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR2); 7326 7335 7327 7336 #ifdef LOG_ENABLED … … 7350 7359 Assert(!VMMRZCallRing3IsEnabled(pVCpu)); 7351 7360 7352 if ( pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR0)7361 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0)) 7353 7362 { 7354 7363 int rc = hmR0VmxLoadSharedCR0(pVCpu, pCtx); … … 7356 7365 } 7357 7366 7358 if ( pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_DEBUG)7367 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_DEBUG)) 7359 7368 { 7360 7369 int rc = hmR0VmxLoadSharedDebugState(pVCpu, pCtx); … … 7362 7371 7363 7372 /* Loading shared debug bits might have changed eflags.TF bit for debugging purposes. */ 7364 if ( pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_RFLAGS)7373 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RFLAGS)) 7365 7374 { 7366 7375 rc = hmR0VmxLoadGuestRflags(pVCpu, pCtx); … … 7369 7378 } 7370 7379 7371 AssertMsg(! (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_GUEST_SHARED_STATE), ("fContextUseFlags=%#x\n",7372 pVCpu->hm.s.fContextUseFlags));7380 AssertMsg(!VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_HOST_GUEST_SHARED_STATE), 7381 ("fContextUseFlags=%#RX32\n", VMCPU_HMCF_VALUE(pVCpu))); 7373 7382 } 7374 7383 … … 7387 7396 HMVMX_ASSERT_PREEMPT_SAFE(); 7388 7397 7389 Log5(("LoadFlags=%#RX32\n", pVCpu->hm.s.fContextUseFlags));7398 Log5(("LoadFlags=%#RX32\n", VMCPU_HMCF_VALUE(pVCpu))); 7390 7399 #ifdef HMVMX_SYNC_FULL_GUEST_STATE 7391 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL_GUEST;7392 #endif 7393 7394 if ( pVCpu->hm.s.fContextUseFlags == HM_CHANGED_GUEST_RIP)7400 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_ALL_GUEST); 7401 #endif 7402 7403 if (VMCPU_HMCF_IS_SET_ONLY(pVCpu, HM_CHANGED_GUEST_RIP)) 7395 7404 { 7396 7405 int rc = hmR0VmxLoadGuestRip(pVCpu, pMixedCtx); … … 7398 7407 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadMinimal); 7399 7408 } 7400 else if ( pVCpu->hm.s.fContextUseFlags)7409 else if (VMCPU_HMCF_VALUE(pVCpu)) 7401 7410 { 7402 7411 int rc = hmR0VmxLoadGuestState(pVM, pVCpu, pMixedCtx); … … 7405 7414 } 7406 7415 7407 /* All the guest state bits should be loaded except maybe the host context and shared host/guest bits. */7408 AssertMsg( ! (pVCpu->hm.s.fContextUseFlags &HM_CHANGED_ALL_GUEST)7409 || !(pVCpu->hm.s.fContextUseFlags & ~(HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE)),7410 ("fContextUseFlags=%# x\n", pVCpu->hm.s.fContextUseFlags));7416 /* All the guest state bits should be loaded except maybe the host context and/or the shared host/guest bits. */ 7417 AssertMsg( !VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_ALL_GUEST) 7418 || VMCPU_HMCF_IS_PENDING_ONLY(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE), 7419 ("fContextUseFlags=%#RX32\n", VMCPU_HMCF_VALUE(pVCpu))); 7411 7420 7412 7421 #ifdef HMVMX_ALWAYS_CHECK_GUEST_STATE … … 7588 7597 if (!CPUMIsGuestFPUStateActive(pVCpu)) 7589 7598 CPUMR0LoadGuestFPU(pVM, pVCpu, pMixedCtx); 7590 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;7599 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_CR0); 7591 7600 #endif 7592 7601 … … 7595 7604 * thread-context hooks are used or when hmR0VmxSetupVMRunHandler() changes pfnStartVM). 7596 7605 */ 7597 if ( pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_CONTEXT)7606 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_HOST_CONTEXT)) 7598 7607 { 7599 7608 /* This ASSUMES that pfnStartVM has been set up already. */ … … 7602 7611 STAM_COUNTER_INC(&pVCpu->hm.s.StatPreemptSaveHostState); 7603 7612 } 7604 Assert(! (pVCpu->hm.s.fContextUseFlags &HM_CHANGED_HOST_CONTEXT));7613 Assert(!VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_HOST_CONTEXT)); 7605 7614 7606 7615 /* 7607 7616 * Load the state shared between host and guest (FPU, debug). 7608 7617 */ 7609 if ( pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_GUEST_SHARED_STATE)7618 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_HOST_GUEST_SHARED_STATE)) 7610 7619 hmR0VmxLoadSharedState(pVM, pVCpu, pMixedCtx); 7611 AssertMsg(! pVCpu->hm.s.fContextUseFlags, ("fContextUseFlags=%#x\n", pVCpu->hm.s.fContextUseFlags));7620 AssertMsg(!VMCPU_HMCF_VALUE(pVCpu), ("fContextUseFlags=%#RX32\n", VMCPU_HMCF_VALUE(pVCpu))); 7612 7621 7613 7622 /* Store status of the shared guest-host state at the time of VM-entry. */ … … 7718 7727 hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); 7719 7728 CPUMR0SaveGuestFPU(pVM, pVCpu, pMixedCtx); 7720 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;7729 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_CR0); 7721 7730 } 7722 7731 #endif … … 7762 7771 rc = PDMApicSetTPR(pVCpu, pVCpu->hm.s.vmx.pbVirtApic[0x80]); 7763 7772 AssertRC(rc); 7764 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_APIC_STATE;7773 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE); 7765 7774 } 7766 7775 } … … 7924 7933 break; 7925 7934 } 7926 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;7935 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG); 7927 7936 } 7928 7937 … … 8102 8111 8103 8112 pMixedCtx->rip += pVmxTransient->cbInstr; 8104 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;8113 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_RIP); 8105 8114 return rc; 8106 8115 } … … 9222 9231 9223 9232 pMixedCtx->rip++; 9224 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;9233 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_RIP); 9225 9234 if (EMShouldContinueAfterHalt(pVCpu, pMixedCtx)) /* Requires eflags. */ 9226 9235 rc = VINF_SUCCESS; … … 9467 9476 * virtualization is implemented we'll have to make sure APIC state is saved from the VMCS before 9468 9477 EMInterpretWrmsr() changes it. */ 9469 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_APIC_STATE;9478 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE); 9470 9479 } 9471 9480 else if (pMixedCtx->ecx == MSR_K6_EFER) /* EFER is the only MSR we auto-load but don't allow write-passthrough. */ … … 9473 9482 rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx); 9474 9483 AssertRCReturn(rc, rc); 9475 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_AUTO_MSRS;9484 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS); 9476 9485 } 9477 9486 else if (pMixedCtx->ecx == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */ … … 9483 9492 switch (pMixedCtx->ecx) 9484 9493 { 9485 case MSR_IA32_SYSENTER_CS: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SYSENTER_CS_MSR; break;9486 case MSR_IA32_SYSENTER_EIP: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SYSENTER_EIP_MSR; break;9487 case MSR_IA32_SYSENTER_ESP: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SYSENTER_ESP_MSR; break;9494 case MSR_IA32_SYSENTER_CS: VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break; 9495 case MSR_IA32_SYSENTER_EIP: VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break; 9496 case MSR_IA32_SYSENTER_ESP: VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break; 9488 9497 case MSR_K8_FS_BASE: /* no break */ 9489 case MSR_K8_GS_BASE: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SEGMENT_REGS; break;9490 case MSR_K8_KERNEL_GS_BASE: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_AUTO_MSRS; break;9498 case MSR_K8_GS_BASE: VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS); break; 9499 case MSR_K8_KERNEL_GS_BASE: VMCPU_HMCF_SET(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS); break; 9491 9500 } 9492 9501 } … … 9555 9564 * resume guest execution. 9556 9565 */ 9557 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_APIC_STATE;9566 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE); 9558 9567 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTprBelowThreshold); 9559 9568 return VINF_SUCCESS; … … 9603 9612 { 9604 9613 case 0: /* CR0 */ 9614 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_CR0); 9605 9615 Log4(("CRX CR0 write rc=%d CR0=%#RX64\n", rc, pMixedCtx->cr0)); 9606 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;9607 9616 break; 9608 9617 case 2: /* C2 **/ … … 9611 9620 case 3: /* CR3 */ 9612 9621 Assert(!pVM->hm.s.fNestedPaging || !CPUMIsGuestPagingEnabledEx(pMixedCtx)); 9622 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_CR3); 9613 9623 Log4(("CRX CR3 write rc=%d CR3=%#RX64\n", rc, pMixedCtx->cr3)); 9614 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR3;9615 9624 break; 9616 9625 case 4: /* CR4 */ 9626 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_CR4); 9617 9627 Log4(("CRX CR4 write rc=%d CR4=%#RX64\n", rc, pMixedCtx->cr4)); 9618 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR4;9619 9628 break; 9620 9629 case 8: /* CR8 */ 9621 9630 Assert(!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)); 9622 9631 /* CR8 contains the APIC TPR. Was updated by EMInterpretCRxWrite(). */ 9623 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_APIC_STATE;9632 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE); 9624 9633 break; 9625 9634 default: … … 9660 9669 rc = EMInterpretCLTS(pVM, pVCpu); 9661 9670 AssertRCReturn(rc, rc); 9662 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;9671 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_CR0); 9663 9672 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitClts); 9664 9673 Log4(("CRX CLTS write rc=%d\n", rc)); … … 9672 9681 rc = EMInterpretLMSW(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), VMX_EXIT_QUALIFICATION_CRX_LMSW_DATA(uExitQualification)); 9673 9682 if (RT_LIKELY(rc == VINF_SUCCESS)) 9674 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;9683 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_CR0); 9675 9684 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitLmsw); 9676 9685 Log4(("CRX LMSW write rc=%d\n", rc)); … … 9781 9790 } 9782 9791 /** @todo IEM needs to be setting these flags somehow. */ 9783 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;9792 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_RIP); 9784 9793 fUpdateRipAlready = true; 9785 9794 #else … … 9843 9852 { 9844 9853 pMixedCtx->rip += cbInstr; 9845 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;9854 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_RIP); 9846 9855 } 9847 9856 … … 9877 9886 ASMSetDR6(pMixedCtx->dr[6]); 9878 9887 if (pMixedCtx->dr[7] != uDr7) 9879 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;9888 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG); 9880 9889 9881 9890 hmR0VmxSetPendingXcptDB(pVCpu, pMixedCtx); … … 10041 10050 || rc == VERR_PAGE_NOT_PRESENT) 10042 10051 { 10043 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS 10044 | HM_CHANGED_VMX_GUEST_APIC_STATE; 10052 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_RIP 10053 | HM_CHANGED_GUEST_RSP 10054 | HM_CHANGED_GUEST_RFLAGS 10055 | HM_CHANGED_VMX_GUEST_APIC_STATE); 10045 10056 rc = VINF_SUCCESS; 10046 10057 } … … 10133 10144 VMX_EXIT_QUALIFICATION_DRX_GENREG(pVmxTransient->uExitQualification)); 10134 10145 if (RT_SUCCESS(rc)) 10135 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;10146 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG); 10136 10147 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite); 10137 10148 } … … 10199 10210 { 10200 10211 /* Successfully handled MMIO operation. */ 10201 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS 10202 | HM_CHANGED_VMX_GUEST_APIC_STATE; 10212 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_RIP 10213 | HM_CHANGED_GUEST_RSP 10214 | HM_CHANGED_GUEST_RFLAGS 10215 | HM_CHANGED_VMX_GUEST_APIC_STATE); 10203 10216 rc = VINF_SUCCESS; 10204 10217 } … … 10264 10277 /* Successfully synced our nested page tables. */ 10265 10278 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitReasonNpf); 10266 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS; 10279 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_RIP 10280 | HM_CHANGED_GUEST_RSP 10281 | HM_CHANGED_GUEST_RFLAGS); 10267 10282 return VINF_SUCCESS; 10268 10283 } … … 10434 10449 { 10435 10450 rc = VINF_EM_RAW_GUEST_TRAP; 10436 Assert(CPUMIsGuestFPUStateActive(pVCpu) || (pVCpu->hm.s.fContextUseFlags &HM_CHANGED_GUEST_CR0));10451 Assert(CPUMIsGuestFPUStateActive(pVCpu) || VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0)); 10437 10452 } 10438 10453 else … … 10451 10466 if (rc == VINF_SUCCESS) 10452 10467 { 10453 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;10468 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_CR0); 10454 10469 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowNM); 10455 10470 } … … 10523 10538 pMixedCtx->eflags.Bits.u1IF = 0; 10524 10539 pMixedCtx->rip += pDis->cbInstr; 10525 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS;10540 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS); 10526 10541 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCli); 10527 10542 break; … … 10533 10548 pMixedCtx->rip += pDis->cbInstr; 10534 10549 EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip); 10535 Assert(VMCPU_FF_IS_ SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));10536 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS;10550 Assert(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)); 10551 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS); 10537 10552 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitSti); 10538 10553 break; … … 10543 10558 rc = VINF_EM_HALT; 10544 10559 pMixedCtx->rip += pDis->cbInstr; 10545 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;10560 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_RIP); 10546 10561 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt); 10547 10562 break; … … 10583 10598 pMixedCtx->eflags.u32 = (pMixedCtx->eflags.u32 & ~(X86_EFL_POPF_BITS & uMask)) 10584 10599 | (Eflags.u32 & X86_EFL_POPF_BITS & uMask); 10585 /* The RF bit is always cleared by POPF; see Intel Instruction reference for POPF. */ 10586 pMixedCtx->eflags.Bits.u1RF = 0; 10587 pMixedCtx->esp += cbParm; 10588 pMixedCtx->esp &= uMask; 10589 pMixedCtx->rip += pDis->cbInstr; 10590 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS; 10600 pMixedCtx->eflags.Bits.u1RF = 0; /* The RF bit is always cleared by POPF; see Intel Instruction reference. */ 10601 pMixedCtx->esp += cbParm; 10602 pMixedCtx->esp &= uMask; 10603 pMixedCtx->rip += pDis->cbInstr; 10604 10605 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_RIP 10606 | HM_CHANGED_GUEST_RSP 10607 | HM_CHANGED_GUEST_RFLAGS); 10591 10608 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPopf); 10592 10609 break; … … 10632 10649 pMixedCtx->esp &= uMask; 10633 10650 pMixedCtx->rip += pDis->cbInstr; 10634 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP;10651 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP); 10635 10652 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPushf); 10636 10653 break; … … 10666 10683 | (aIretFrame[2] & X86_EFL_POPF_BITS & uMask); 10667 10684 pMixedCtx->sp += sizeof(aIretFrame); 10668 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_SEGMENT_REGS | HM_CHANGED_GUEST_RSP 10669 | HM_CHANGED_GUEST_RFLAGS; 10685 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_RIP 10686 | HM_CHANGED_GUEST_SEGMENT_REGS 10687 | HM_CHANGED_GUEST_RSP 10688 | HM_CHANGED_GUEST_RFLAGS); 10670 10689 Log4(("IRET %#RX32 to %04x:%x\n", GCPtrStack, pMixedCtx->cs.Sel, pMixedCtx->ip)); 10671 10690 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIret); … … 10696 10715 EMCODETYPE_SUPERVISOR); 10697 10716 rc = VBOXSTRICTRC_VAL(rc2); 10698 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL_GUEST;10717 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_ALL_GUEST); 10699 10718 Log4(("#GP rc=%Rrc\n", rc)); 10700 10719 break; … … 10786 10805 /** @todo this isn't quite right, what if guest does lgdt with some MMIO 10787 10806 * memory? We don't update the whole state here... */ 10788 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS 10789 | HM_CHANGED_VMX_GUEST_APIC_STATE; 10807 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_RIP 10808 | HM_CHANGED_GUEST_RSP 10809 | HM_CHANGED_GUEST_RFLAGS 10810 | HM_CHANGED_VMX_GUEST_APIC_STATE); 10790 10811 TRPMResetTrap(pVCpu); 10791 10812 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF);
Note:
See TracChangeset
for help on using the changeset viewer.