Changeset 48624 in vbox
- Timestamp:
- Sep 23, 2013 7:50:17 AM (11 years ago)
- Location:
- trunk/src/VBox/VMM/VMMR0
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r48621 r48624 1103 1103 */ 1104 1104 PVM pVM = pVCpu->CTX_SUFF(pVM); 1105 if ( pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR0)1105 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0)) 1106 1106 { 1107 1107 uint64_t u64GuestCR0 = pCtx->cr0; … … 1156 1156 pVmcb->guest.u64CR0 = u64GuestCR0; 1157 1157 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER; 1158 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR0;1158 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR0); 1159 1159 } 1160 1160 } … … 1178 1178 * Guest CR2. 1179 1179 */ 1180 if ( pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR2)1180 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR2)) 1181 1181 { 1182 1182 pVmcb->guest.u64CR2 = pCtx->cr2; 1183 1183 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CR2; 1184 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR2;1184 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR2); 1185 1185 } 1186 1186 … … 1188 1188 * Guest CR3. 1189 1189 */ 1190 if ( pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR3)1190 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR3)) 1191 1191 { 1192 1192 if (pVM->hm.s.fNestedPaging) … … 1209 1209 1210 1210 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER; 1211 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR3;1211 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR3); 1212 1212 } 1213 1213 … … 1215 1215 * Guest CR4. 1216 1216 */ 1217 if ( pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR4)1217 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR4)) 1218 1218 { 1219 1219 uint64_t u64GuestCR4 = pCtx->cr4; … … 1254 1254 pVmcb->guest.u64CR4 = u64GuestCR4; 1255 1255 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER; 1256 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR4;1256 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR4); 1257 1257 } 1258 1258 … … 1274 1274 { 1275 1275 /* Guest Segment registers: CS, SS, DS, ES, FS, GS. */ 1276 if ( pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SEGMENT_REGS)1276 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS)) 1277 1277 { 1278 1278 HMSVM_LOAD_SEG_REG(CS, cs); … … 1284 1284 1285 1285 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_SEG; 1286 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SEGMENT_REGS;1286 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS); 1287 1287 } 1288 1288 1289 1289 /* Guest TR. */ 1290 if ( pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_TR)1290 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_TR)) 1291 1291 { 1292 1292 HMSVM_LOAD_SEG_REG(TR, tr); 1293 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_TR;1293 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_TR); 1294 1294 } 1295 1295 1296 1296 /* Guest LDTR. */ 1297 if ( pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_LDTR)1297 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_LDTR)) 1298 1298 { 1299 1299 HMSVM_LOAD_SEG_REG(LDTR, ldtr); 1300 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_LDTR;1300 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_LDTR); 1301 1301 } 1302 1302 1303 1303 /* Guest GDTR. */ 1304 if ( pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_GDTR)1304 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_GDTR)) 1305 1305 { 1306 1306 pVmcb->guest.GDTR.u32Limit = pCtx->gdtr.cbGdt; 1307 1307 pVmcb->guest.GDTR.u64Base = pCtx->gdtr.pGdt; 1308 1308 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DT; 1309 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_GDTR;1309 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_GDTR); 1310 1310 } 1311 1311 1312 1312 /* Guest IDTR. */ 1313 if ( pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_IDTR)1313 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_IDTR)) 1314 1314 { 1315 1315 pVmcb->guest.IDTR.u32Limit = pCtx->idtr.cbIdt; 1316 1316 pVmcb->guest.IDTR.u64Base = pCtx->idtr.pIdt; 1317 1317 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DT; 1318 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_IDTR;1318 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_IDTR); 1319 1319 } 1320 1320 } … … 1342 1342 * See AMD spec. 15.5.1 "Basic Operation" | "Canonicalization and Consistency Checks". 1343 1343 */ 1344 if ( pVCpu->hm.s.fContextUseFlags & HM_CHANGED_SVM_GUEST_EFER_MSR)1344 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_SVM_GUEST_EFER_MSR)) 1345 1345 { 1346 1346 pVmcb->guest.u64EFER = pCtx->msrEFER | MSR_K6_EFER_SVME; 1347 1347 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER; 1348 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_SVM_GUEST_EFER_MSR;1348 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_SVM_GUEST_EFER_MSR); 1349 1349 } 1350 1350 … … 1389 1389 static void hmR0SvmLoadSharedDebugState(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx) 1390 1390 { 1391 if (! (pVCpu->hm.s.fContextUseFlags &HM_CHANGED_GUEST_DEBUG))1391 if (!VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_DEBUG)) 1392 1392 return; 1393 1393 Assert((pCtx->dr[6] & X86_DR6_RA1_MASK) == X86_DR6_RA1_MASK); Assert((pCtx->dr[6] & X86_DR6_RAZ_MASK) == 0); … … 1539 1539 } 1540 1540 1541 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_DEBUG;1541 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_DEBUG); 1542 1542 } 1543 1543 … … 1553 1553 static int hmR0SvmLoadGuestApicState(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx) 1554 1554 { 1555 if (! (pVCpu->hm.s.fContextUseFlags &HM_CHANGED_SVM_GUEST_APIC_STATE))1555 if (!VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE)) 1556 1556 return VINF_SUCCESS; 1557 1557 … … 1597 1597 } 1598 1598 1599 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_SVM_GUEST_APIC_STATE;1599 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE); 1600 1600 return rc; 1601 1601 } … … 1653 1653 1654 1654 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu)); 1655 Assert( pVCpu->hm.s.fContextUseFlags & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE));1655 Assert(VMCPU_HMCF_IS_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE)); 1656 1656 1657 1657 pVCpu->hm.s.fLeaveDone = false; … … 1714 1714 int rc = HMR0EnterCpu(pVCpu); 1715 1715 AssertRC(rc); NOREF(rc); 1716 Assert( pVCpu->hm.s.fContextUseFlags & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE));1716 Assert(VMCPU_HMCF_IS_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE)); 1717 1717 1718 1718 pVCpu->hm.s.fLeaveDone = false; … … 1743 1743 NOREF(pVCpu); 1744 1744 /* Nothing to do here. AMD-V does this for us automatically during the world-switch. */ 1745 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_HOST_CONTEXT;1745 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_HOST_CONTEXT); 1746 1746 return VINF_SUCCESS; 1747 1747 } … … 1752 1752 * fields on every successful VM-entry. 1753 1753 * 1754 * Sets up the appropriate VMRUN function to execute guest code based1755 * onthe guest CPU mode.1754 * Also sets up the appropriate VMRUN function to execute guest code based on 1755 * the guest CPU mode. 1756 1756 * 1757 1757 * @returns VBox status code. … … 1788 1788 1789 1789 /* Clear any unused and reserved bits. */ 1790 pVCpu->hm.s.fContextUseFlags &= ~( HM_CHANGED_GUEST_RIP /* Unused (loaded unconditionally). */ 1791 | HM_CHANGED_GUEST_RSP 1792 | HM_CHANGED_GUEST_RFLAGS 1793 | HM_CHANGED_GUEST_SYSENTER_CS_MSR 1794 | HM_CHANGED_GUEST_SYSENTER_EIP_MSR 1795 | HM_CHANGED_GUEST_SYSENTER_ESP_MSR 1796 | HM_CHANGED_SVM_RESERVED1 /* Reserved. */ 1797 | HM_CHANGED_SVM_RESERVED2 1798 | HM_CHANGED_SVM_RESERVED3); 1799 1800 /* All the guest state bits should be loaded except maybe the host context and shared host/guest bits. */ 1801 AssertMsg( !(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_ALL_GUEST) 1802 || !(pVCpu->hm.s.fContextUseFlags & ~(HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE)), 1803 ("Missed updating flags while loading guest state. pVM=%p pVCpu=%p fContextUseFlags=%#RX32\n", 1804 pVM, pVCpu, pVCpu->hm.s.fContextUseFlags)); 1790 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_RIP /* Unused (loaded unconditionally). */ 1791 | HM_CHANGED_GUEST_RSP 1792 | HM_CHANGED_GUEST_RFLAGS 1793 | HM_CHANGED_GUEST_SYSENTER_CS_MSR 1794 | HM_CHANGED_GUEST_SYSENTER_EIP_MSR 1795 | HM_CHANGED_GUEST_SYSENTER_ESP_MSR 1796 | HM_CHANGED_SVM_RESERVED1 /* Reserved. */ 1797 | HM_CHANGED_SVM_RESERVED2 1798 | HM_CHANGED_SVM_RESERVED3); 1799 1800 /* All the guest state bits should be loaded except maybe the host context and/or shared host/guest bits. */ 1801 AssertMsg( !VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_ALL_GUEST) 1802 || VMCPU_HMCF_IS_PENDING_ONLY(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE), 1803 ("fContextUseFlags=%#RX32\n", VMCPU_HMCF_VALUE(pVCpu))); 1805 1804 1806 1805 Log4(("Load: CS:RIP=%04x:%RX64 EFL=%#x SS:RSP=%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip, pCtx->eflags.u, pCtx->ss, pCtx->rsp)); 1807 1808 1806 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestState, x); 1809 1807 return rc; … … 1826 1824 Assert(!VMMRZCallRing3IsEnabled(pVCpu)); 1827 1825 1828 if ( pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR0)1826 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0)) 1829 1827 hmR0SvmLoadSharedCR0(pVCpu, pVmcb, pCtx); 1830 1828 1831 if ( pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_DEBUG)1829 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_DEBUG)) 1832 1830 hmR0SvmLoadSharedDebugState(pVCpu, pVmcb, pCtx); 1833 1831 1834 AssertMsg(! (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_GUEST_SHARED_STATE), ("fContextUseFlags=%#x\n",1835 pVCpu->hm.s.fContextUseFlags));1832 AssertMsg(!VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_HOST_GUEST_SHARED_STATE), 1833 ("fContextUseFlags=%#RX32\n", VMCPU_HMCF_VALUE(pVCpu))); 1836 1834 } 1837 1835 … … 1995 1993 CPUMR0SaveGuestFPU(pVM, pVCpu, pCtx); 1996 1994 Assert(!CPUMIsGuestFPUStateActive(pVCpu)); 1997 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;1995 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_CR0); 1998 1996 } 1999 1997 … … 2010 2008 #endif 2011 2009 if (CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, false /* save DR6 */)) 2012 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;2010 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG); 2013 2011 2014 2012 Assert(!CPUMIsHyperDebugStateActive(pVCpu)); … … 2167 2165 /* On our way back from ring-3 reload the guest state if there is a possibility of it being changed. */ 2168 2166 if (rcExit != VINF_EM_RAW_INTERRUPT) 2169 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL_GUEST;2167 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_ALL_GUEST); 2170 2168 2171 2169 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchExitToR3); … … 2836 2834 2837 2835 #ifdef HMSVM_SYNC_FULL_GUEST_STATE 2838 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL_GUEST;2836 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_ALL_GUEST); 2839 2837 #endif 2840 2838 … … 2920 2918 /* Load the state shared between host and guest (FPU, debug). */ 2921 2919 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb; 2922 if ( pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_GUEST_SHARED_STATE)2920 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_HOST_GUEST_SHARED_STATE)) 2923 2921 hmR0SvmLoadSharedState(pVCpu, pVmcb, pCtx); 2924 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_HOST_CONTEXT;/* Preemption might set this, nothing to do on AMD-V. */2925 AssertMsg(! pVCpu->hm.s.fContextUseFlags, ("fContextUseFlags=%#x\n", pVCpu->hm.s.fContextUseFlags));2922 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_HOST_CONTEXT); /* Preemption might set this, nothing to do on AMD-V. */ 2923 AssertMsg(!VMCPU_HMCF_VALUE(pVCpu), ("fContextUseFlags=%#RX32\n", VMCPU_HMCF_VALUE(pVCpu))); 2926 2924 2927 2925 /* If VMCB Clean Bits isn't supported by the CPU, simply mark all state-bits as dirty, indicating (re)load-from-VMCB. */ … … 3075 3073 int rc = PDMApicSetTPR(pVCpu, pMixedCtx->msrLSTAR & 0xff); 3076 3074 AssertRC(rc); 3077 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_SVM_GUEST_APIC_STATE;3075 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE); 3078 3076 } 3079 3077 else if (pSvmTransient->u8GuestTpr != pVmcb->ctrl.IntCtrl.n.u8VTPR) … … 3081 3079 int rc = PDMApicSetTPR(pVCpu, pVmcb->ctrl.IntCtrl.n.u8VTPR << 4); 3082 3080 AssertRC(rc); 3083 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_SVM_GUEST_APIC_STATE;3081 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE); 3084 3082 } 3085 3083 } … … 3553 3551 { 3554 3552 pCtx->cr2 = uFaultAddress; 3555 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR2;3553 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_CR2); 3556 3554 } 3557 3555 … … 3664 3662 int rc2 = PDMApicSetTPR(pVCpu, u8Tpr); 3665 3663 AssertRC(rc2); 3666 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_SVM_GUEST_APIC_STATE;3664 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE); 3667 3665 3668 3666 pCtx->rip += pPatch->cbOp; … … 4144 4142 { 4145 4143 case 0: /* CR0. */ 4146 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;4144 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_CR0); 4147 4145 break; 4148 4146 4149 4147 case 3: /* CR3. */ 4150 4148 Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging); 4151 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR3;4149 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_CR3); 4152 4150 break; 4153 4151 4154 4152 case 4: /* CR4. */ 4155 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR4;4153 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_CR4); 4156 4154 break; 4157 4155 4158 4156 case 8: /* CR8 (TPR). */ 4159 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_SVM_GUEST_APIC_STATE;4157 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE); 4160 4158 break; 4161 4159 … … 4208 4206 int rc2 = PDMApicSetTPR(pVCpu, pCtx->eax & 0xff); 4209 4207 AssertRC(rc2); 4210 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_SVM_GUEST_APIC_STATE;4208 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE); 4211 4209 } 4212 4210 hmR0SvmUpdateRip(pVCpu, pCtx, 2); … … 4243 4241 * virtualization is implemented we'll have to make sure APIC state is saved from the VMCB before 4244 4242 EMInterpretWrmsr() changes it. */ 4245 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_SVM_GUEST_APIC_STATE;4243 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE); 4246 4244 } 4247 4245 else if (pCtx->ecx == MSR_K6_EFER) 4248 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_SVM_GUEST_EFER_MSR;4246 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_SVM_GUEST_EFER_MSR); 4249 4247 else if (pCtx->ecx == MSR_IA32_TSC) 4250 4248 pSvmTransient->fUpdateTscOffsetting = true; … … 4336 4334 /* Not necessary for read accesses but whatever doesn't hurt for now, will be fixed with decode assist. */ 4337 4335 /** @todo CPUM should set this flag! */ 4338 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;4336 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG); 4339 4337 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc); 4340 4338 } … … 4590 4588 { 4591 4589 /* Successfully handled MMIO operation. */ 4592 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_SVM_GUEST_APIC_STATE;4590 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE); 4593 4591 rc = VINF_SUCCESS; 4594 4592 } … … 4774 4772 TRPMResetTrap(pVCpu); 4775 4773 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF); 4776 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_SVM_GUEST_APIC_STATE;4774 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE); 4777 4775 return rc; 4778 4776 } … … 4825 4823 { 4826 4824 rc = VINF_EM_RAW_GUEST_TRAP; 4827 Assert(CPUMIsGuestFPUStateActive(pVCpu) || (pVCpu->hm.s.fContextUseFlags &HM_CHANGED_GUEST_CR0));4825 Assert(CPUMIsGuestFPUStateActive(pVCpu) || VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0)); 4828 4826 } 4829 4827 else … … 4842 4840 if (rc == VINF_SUCCESS) 4843 4841 { 4844 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;4842 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_CR0); 4845 4843 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowNM); 4846 4844 } -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r48621 r48624 1229 1229 LogFlowFunc(("pVM=%p pVCpu=%p GCVirt=%RGv\n", pVM, pVCpu, GCVirt)); 1230 1230 1231 bool fFlushPending = VMCPU_FF_IS_ SET(pVCpu, VMCPU_FF_TLB_FLUSH);1231 bool fFlushPending = VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_FLUSH); 1232 1232 if (!fFlushPending) 1233 1233 { … … 2630 2630 { 2631 2631 int rc = VINF_SUCCESS; 2632 if ( pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_ENTRY_CTLS)2632 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_VMX_ENTRY_CTLS)) 2633 2633 { 2634 2634 PVM pVM = pVCpu->CTX_SUFF(pVM); … … 2668 2668 /* Update VCPU with the currently set VM-exit controls. */ 2669 2669 pVCpu->hm.s.vmx.u32EntryCtls = val; 2670 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_ENTRY_CTLS;2670 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_VMX_ENTRY_CTLS); 2671 2671 } 2672 2672 return rc; … … 2689 2689 { 2690 2690 int rc = VINF_SUCCESS; 2691 if ( pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_EXIT_CTLS)2691 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_VMX_EXIT_CTLS)) 2692 2692 { 2693 2693 PVM pVM = pVCpu->CTX_SUFF(pVM); … … 2739 2739 /* Update VCPU with the currently set VM-exit controls. */ 2740 2740 pVCpu->hm.s.vmx.u32ExitCtls = val; 2741 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_EXIT_CTLS;2741 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_VMX_EXIT_CTLS); 2742 2742 } 2743 2743 return rc; … … 2758 2758 { 2759 2759 int rc = VINF_SUCCESS; 2760 if ( pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_GUEST_APIC_STATE)2760 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE)) 2761 2761 { 2762 2762 /* Setup TPR shadowing. Also setup TPR patching for 32-bit guests. */ … … 2795 2795 } 2796 2796 2797 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_GUEST_APIC_STATE;2797 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE); 2798 2798 } 2799 2799 return rc; … … 2820 2820 */ 2821 2821 uint32_t uIntrState = 0; 2822 if (VMCPU_FF_IS_ SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))2822 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 2823 2823 { 2824 2824 /* If inhibition is active, RIP & RFLAGS should've been accessed (i.e. read previously from the VMCS or from ring-3). */ … … 2874 2874 { 2875 2875 int rc = VINF_SUCCESS; 2876 if ( pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_RIP)2876 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RIP)) 2877 2877 { 2878 2878 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RIP, pMixedCtx->rip); 2879 2879 AssertRCReturn(rc, rc); 2880 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_RIP; 2881 Log4(("Load: VMX_VMCS_GUEST_RIP=%#RX64 fContextUseFlags=%#x\n", pMixedCtx->rip, pVCpu->hm.s.fContextUseFlags)); 2880 2881 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_RIP); 2882 Log4(("Load: VMX_VMCS_GUEST_RIP=%#RX64 fContextUseFlags=%#RX32\n", pMixedCtx->rip, VMCPU_HMCF_VALUE(pVCpu))); 2882 2883 } 2883 2884 return rc; … … 2899 2900 { 2900 2901 int rc = VINF_SUCCESS; 2901 if ( pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_RSP)2902 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RSP)) 2902 2903 { 2903 2904 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RSP, pMixedCtx->rsp); 2904 2905 AssertRCReturn(rc, rc); 2905 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_RSP; 2906 2907 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_RSP); 2906 2908 Log4(("Load: VMX_VMCS_GUEST_RSP=%#RX64\n", pMixedCtx->rsp)); 2907 2909 } … … 2924 2926 { 2925 2927 int rc = VINF_SUCCESS; 2926 if ( pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_RFLAGS)2928 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RFLAGS)) 2927 2929 { 2928 2930 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits of RFLAGS are reserved (MBZ). … … 2949 2951 AssertRCReturn(rc, rc); 2950 2952 2951 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_RFLAGS;2953 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_RFLAGS); 2952 2954 Log4(("Load: VMX_VMCS_GUEST_RFLAGS=%#RX32\n", Eflags.u32)); 2953 2955 } … … 2999 3001 */ 3000 3002 int rc = VINF_SUCCESS; 3001 if ( pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR0)3003 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0)) 3002 3004 { 3003 3005 Assert(!(pMixedCtx->cr0 >> 32)); … … 3160 3162 Log4(("Load: VMX_VMCS_CTRL_CR0_MASK=%#RX32\n", u32CR0Mask)); 3161 3163 3162 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR0;3164 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR0); 3163 3165 } 3164 3166 return rc; … … 3192 3194 * Guest CR3. 3193 3195 */ 3194 if ( pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR3)3196 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR3)) 3195 3197 { 3196 3198 RTGCPHYS GCPhysGuestCR3 = NIL_RTGCPHYS; … … 3265 3267 AssertRCReturn(rc, rc); 3266 3268 3267 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR3;3269 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR3); 3268 3270 } 3269 3271 … … 3271 3273 * Guest CR4. 3272 3274 */ 3273 if ( pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR4)3275 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR4)) 3274 3276 { 3275 3277 Assert(!(pMixedCtx->cr4 >> 32)); … … 3362 3364 AssertRCReturn(rc, rc); 3363 3365 3364 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR4;3366 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR4); 3365 3367 } 3366 3368 return rc; … … 3384 3386 static int hmR0VmxLoadSharedDebugState(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 3385 3387 { 3386 if (! (pVCpu->hm.s.fContextUseFlags &HM_CHANGED_GUEST_DEBUG))3388 if (!VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_DEBUG)) 3387 3389 return VINF_SUCCESS; 3388 3390 … … 3415 3417 pMixedCtx->eflags.u32 |= X86_EFL_TF; 3416 3418 pVCpu->hm.s.fClearTrapFlag = true; 3417 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RFLAGS;3419 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_RFLAGS); 3418 3420 fInterceptDB = true; 3419 3421 } … … 3528 3530 AssertRCReturn(rc, rc); 3529 3531 3530 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_DEBUG;3532 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_DEBUG); 3531 3533 return VINF_SUCCESS; 3532 3534 } … … 3779 3781 * Guest Segment registers: CS, SS, DS, ES, FS, GS. 3780 3782 */ 3781 if ( pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SEGMENT_REGS)3783 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS)) 3782 3784 { 3783 3785 /* Save the segment attributes for real-on-v86 mode hack, so we can restore them on VM-exit. */ … … 3827 3829 AssertRCReturn(rc, rc); 3828 3830 3831 #ifdef VBOX_STRICT 3832 /* Validate. */ 3833 hmR0VmxValidateSegmentRegs(pVM, pVCpu, pMixedCtx); 3834 #endif 3835 3836 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS); 3829 3837 Log4(("Load: CS=%#RX16 Base=%#RX64 Limit=%#RX32 Attr=%#RX32\n", pMixedCtx->cs.Sel, pMixedCtx->cs.u64Base, 3830 3838 pMixedCtx->cs.u32Limit, pMixedCtx->cs.Attr.u)); 3831 #ifdef VBOX_STRICT3832 hmR0VmxValidateSegmentRegs(pVM, pVCpu, pMixedCtx);3833 #endif3834 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SEGMENT_REGS;3835 3839 } 3836 3840 … … 3838 3842 * Guest TR. 3839 3843 */ 3840 if ( pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_TR)3844 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_TR)) 3841 3845 { 3842 3846 /* … … 3897 3901 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights); AssertRCReturn(rc, rc); 3898 3902 3903 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_TR); 3899 3904 Log4(("Load: VMX_VMCS_GUEST_TR_BASE=%#RX64\n", u64Base)); 3900 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_TR;3901 3905 } 3902 3906 … … 3904 3908 * Guest GDTR. 3905 3909 */ 3906 if ( pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_GDTR)3910 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_GDTR)) 3907 3911 { 3908 3912 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, pMixedCtx->gdtr.cbGdt); AssertRCReturn(rc, rc); 3909 3913 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, pMixedCtx->gdtr.pGdt); AssertRCReturn(rc, rc); 3910 3914 3915 /* Validate. */ 3911 3916 Assert(!(pMixedCtx->gdtr.cbGdt & 0xffff0000)); /* Bits 31:16 MBZ. */ 3917 3918 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_GDTR); 3912 3919 Log4(("Load: VMX_VMCS_GUEST_GDTR_BASE=%#RX64\n", pMixedCtx->gdtr.pGdt)); 3913 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_GDTR;3914 3920 } 3915 3921 … … 3917 3923 * Guest LDTR. 3918 3924 */ 3919 if ( pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_LDTR)3925 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_LDTR)) 3920 3926 { 3921 3927 /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */ … … 3946 3952 } 3947 3953 3954 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_LDTR); 3948 3955 Log4(("Load: VMX_VMCS_GUEST_LDTR_BASE=%#RX64\n", pMixedCtx->ldtr.u64Base)); 3949 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_LDTR;3950 3956 } 3951 3957 … … 3953 3959 * Guest IDTR. 3954 3960 */ 3955 if ( pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_IDTR)3961 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_IDTR)) 3956 3962 { 3957 3963 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, pMixedCtx->idtr.cbIdt); AssertRCReturn(rc, rc); 3958 3964 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, pMixedCtx->idtr.pIdt); AssertRCReturn(rc, rc); 3959 3965 3966 /* Validate. */ 3960 3967 Assert(!(pMixedCtx->idtr.cbIdt & 0xffff0000)); /* Bits 31:16 MBZ. */ 3968 3969 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_IDTR); 3961 3970 Log4(("Load: VMX_VMCS_GUEST_IDTR_BASE=%#RX64\n", pMixedCtx->idtr.pIdt)); 3962 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_IDTR;3963 3971 } 3964 3972 … … 3990 3998 */ 3991 3999 int rc = VINF_SUCCESS; 3992 if ( pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_GUEST_AUTO_MSRS)4000 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS)) 3993 4001 { 3994 4002 #ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE … … 4049 4057 #endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */ 4050 4058 4051 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_GUEST_AUTO_MSRS;4059 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS); 4052 4060 } 4053 4061 … … 4057 4065 * VM-exits on WRMSRs for these MSRs. 4058 4066 */ 4059 if ( pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SYSENTER_CS_MSR)4067 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR)) 4060 4068 { 4061 4069 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, pMixedCtx->SysEnter.cs); AssertRCReturn(rc, rc); 4062 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SYSENTER_CS_MSR; 4063 } 4064 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SYSENTER_EIP_MSR) 4070 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR); 4071 } 4072 4073 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR)) 4065 4074 { 4066 4075 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, pMixedCtx->SysEnter.eip); AssertRCReturn(rc, rc); 4067 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SYSENTER_EIP_MSR; 4068 } 4069 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SYSENTER_ESP_MSR) 4076 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); 4077 } 4078 4079 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR)) 4070 4080 { 4071 4081 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, pMixedCtx->SysEnter.esp); AssertRCReturn(rc, rc); 4072 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SYSENTER_ESP_MSR;4082 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); 4073 4083 } 4074 4084 … … 4092 4102 /** @todo See if we can make use of other states, e.g. 4093 4103 * VMX_VMCS_GUEST_ACTIVITY_SHUTDOWN or HLT. */ 4094 int rc = VINF_SUCCESS; 4095 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_GUEST_ACTIVITY_STATE) 4096 { 4097 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_ACTIVITY_STATE, VMX_VMCS_GUEST_ACTIVITY_ACTIVE); 4104 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_VMX_GUEST_ACTIVITY_STATE)) 4105 { 4106 int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_ACTIVITY_STATE, VMX_VMCS_GUEST_ACTIVITY_ACTIVE); 4098 4107 AssertRCReturn(rc, rc); 4099 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_GUEST_ACTIVITY_STATE; 4100 } 4101 return rc; 4108 4109 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_VMX_GUEST_ACTIVITY_STATE); 4110 } 4111 return VINF_SUCCESS; 4102 4112 } 4103 4113 … … 4127 4137 { 4128 4138 pVCpu->hm.s.vmx.pfnStartVM = VMXR0SwitcherStartVM64; 4129 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_EXIT_CTLS | HM_CHANGED_VMX_ENTRY_CTLS;4139 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_EXIT_CTLS | HM_CHANGED_VMX_ENTRY_CTLS); 4130 4140 } 4131 4141 #else … … 4141 4151 { 4142 4152 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM32; 4143 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_EXIT_CTLS | HM_CHANGED_VMX_ENTRY_CTLS;4153 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_EXIT_CTLS | HM_CHANGED_VMX_ENTRY_CTLS); 4144 4154 } 4145 4155 #else … … 5336 5346 5337 5347 EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip); 5338 Assert(VMCPU_FF_IS_ SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));5348 Assert(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)); 5339 5349 } 5340 5350 } … … 5964 5974 { 5965 5975 int rc2 = PGMSyncCR3(pVCpu, pMixedCtx->cr0, pMixedCtx->cr3, pMixedCtx->cr4, 5966 VMCPU_FF_IS_ SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));5976 VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3)); 5967 5977 if (rc2 != VINF_SUCCESS) 5968 5978 { … … 6179 6189 if (CPUMIsGuestFPUStateActive(pVCpu)) 6180 6190 { 6191 /* We shouldn't reload CR0 without saving it first. */ 6181 6192 if (!fSaveGuestState) 6182 6193 { … … 6186 6197 CPUMR0SaveGuestFPU(pVM, pVCpu, pMixedCtx); 6187 6198 Assert(!CPUMIsGuestFPUStateActive(pVCpu)); 6188 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;6199 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_CR0); 6189 6200 } 6190 6201 … … 6195 6206 #endif 6196 6207 if (CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, true /* save DR6 */)) 6197 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;6208 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG); 6198 6209 Assert(!CPUMIsGuestDebugStateActive(pVCpu) && !CPUMIsGuestDebugStateActivePending(pVCpu)); 6199 6210 Assert(!CPUMIsHyperDebugStateActive(pVCpu) && !CPUMIsHyperDebugStateActivePending(pVCpu)); … … 6380 6391 /* On our way back from ring-3 reload the guest state if there is a possibility of it being changed. */ 6381 6392 if (rcExit != VINF_EM_RAW_INTERRUPT) 6382 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL_GUEST;6393 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_ALL_GUEST); 6383 6394 6384 6395 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchExitToR3); … … 6503 6514 Assert(!TRPMHasTrap(pVCpu)); 6504 6515 6505 /** @todo SMI. SMIs take priority over NMIs. */6506 if (VMCPU_FF_IS_ SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)) /* NMI. NMIs take priority over regular interrupts . */6516 /** @todo SMI. SMIs take priority over NMIs. */ 6517 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI)) /* NMI. NMIs take priority over regular interrupts . */ 6507 6518 { 6508 6519 /* On some CPUs block-by-STI also blocks NMIs. See Intel spec. 26.3.1.5 "Checks On Guest Non-Register State". */ … … 6948 6959 /* If any other guest-state bits are changed here, make sure to update 6949 6960 hmR0VmxPreRunGuestCommitted() when thread-context hooks are used. */ 6950 pVCpu->hm.s.fContextUseFlags |=HM_CHANGED_GUEST_SEGMENT_REGS6951 6952 6953 | HM_CHANGED_GUEST_RSP;6961 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS 6962 | HM_CHANGED_GUEST_RIP 6963 | HM_CHANGED_GUEST_RFLAGS 6964 | HM_CHANGED_GUEST_RSP); 6954 6965 6955 6966 /* We're clearing interrupts, which means no block-by-STI interrupt-inhibition. */ … … 7066 7077 7067 7078 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu)); 7068 Assert((pVCpu->hm.s.fContextUseFlags & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE)) 7069 == (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE)); 7079 Assert(VMCPU_HMCF_IS_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE)); 7070 7080 7071 7081 #ifdef VBOX_STRICT … … 7155 7165 int rc = HMR0EnterCpu(pVCpu); 7156 7166 AssertRC(rc); 7157 Assert((pVCpu->hm.s.fContextUseFlags & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE)) 7158 == (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE)); 7167 Assert(VMCPU_HMCF_IS_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE)); 7159 7168 7160 7169 /* Load the active VMCS as the current one. */ … … 7195 7204 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 7196 7205 7197 if (! (pVCpu->hm.s.fContextUseFlags &HM_CHANGED_HOST_CONTEXT))7206 if (!VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_HOST_CONTEXT)) 7198 7207 return VINF_SUCCESS; 7199 7208 … … 7207 7216 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostMsrs failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); 7208 7217 7209 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_HOST_CONTEXT;7218 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_HOST_CONTEXT); 7210 7219 return rc; 7211 7220 } … … 7323 7332 7324 7333 /* Clear any unused and reserved bits. */ 7325 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR2;7334 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR2); 7326 7335 7327 7336 #ifdef LOG_ENABLED … … 7350 7359 Assert(!VMMRZCallRing3IsEnabled(pVCpu)); 7351 7360 7352 if ( pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR0)7361 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0)) 7353 7362 { 7354 7363 int rc = hmR0VmxLoadSharedCR0(pVCpu, pCtx); … … 7356 7365 } 7357 7366 7358 if ( pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_DEBUG)7367 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_DEBUG)) 7359 7368 { 7360 7369 int rc = hmR0VmxLoadSharedDebugState(pVCpu, pCtx); … … 7362 7371 7363 7372 /* Loading shared debug bits might have changed eflags.TF bit for debugging purposes. */ 7364 if ( pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_RFLAGS)7373 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RFLAGS)) 7365 7374 { 7366 7375 rc = hmR0VmxLoadGuestRflags(pVCpu, pCtx); … … 7369 7378 } 7370 7379 7371 AssertMsg(! (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_GUEST_SHARED_STATE), ("fContextUseFlags=%#x\n",7372 pVCpu->hm.s.fContextUseFlags));7380 AssertMsg(!VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_HOST_GUEST_SHARED_STATE), 7381 ("fContextUseFlags=%#RX32\n", VMCPU_HMCF_VALUE(pVCpu))); 7373 7382 } 7374 7383 … … 7387 7396 HMVMX_ASSERT_PREEMPT_SAFE(); 7388 7397 7389 Log5(("LoadFlags=%#RX32\n", pVCpu->hm.s.fContextUseFlags));7398 Log5(("LoadFlags=%#RX32\n", VMCPU_HMCF_VALUE(pVCpu))); 7390 7399 #ifdef HMVMX_SYNC_FULL_GUEST_STATE 7391 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL_GUEST;7392 #endif 7393 7394 if ( pVCpu->hm.s.fContextUseFlags == HM_CHANGED_GUEST_RIP)7400 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_ALL_GUEST); 7401 #endif 7402 7403 if (VMCPU_HMCF_IS_SET_ONLY(pVCpu, HM_CHANGED_GUEST_RIP)) 7395 7404 { 7396 7405 int rc = hmR0VmxLoadGuestRip(pVCpu, pMixedCtx); … … 7398 7407 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadMinimal); 7399 7408 } 7400 else if ( pVCpu->hm.s.fContextUseFlags)7409 else if (VMCPU_HMCF_VALUE(pVCpu)) 7401 7410 { 7402 7411 int rc = hmR0VmxLoadGuestState(pVM, pVCpu, pMixedCtx); … … 7405 7414 } 7406 7415 7407 /* All the guest state bits should be loaded except maybe the host context and shared host/guest bits. */7408 AssertMsg( ! (pVCpu->hm.s.fContextUseFlags &HM_CHANGED_ALL_GUEST)7409 || !(pVCpu->hm.s.fContextUseFlags & ~(HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE)),7410 ("fContextUseFlags=%# x\n", pVCpu->hm.s.fContextUseFlags));7416 /* All the guest state bits should be loaded except maybe the host context and/or the shared host/guest bits. */ 7417 AssertMsg( !VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_ALL_GUEST) 7418 || VMCPU_HMCF_IS_PENDING_ONLY(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE), 7419 ("fContextUseFlags=%#RX32\n", VMCPU_HMCF_VALUE(pVCpu))); 7411 7420 7412 7421 #ifdef HMVMX_ALWAYS_CHECK_GUEST_STATE … … 7588 7597 if (!CPUMIsGuestFPUStateActive(pVCpu)) 7589 7598 CPUMR0LoadGuestFPU(pVM, pVCpu, pMixedCtx); 7590 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;7599 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_CR0); 7591 7600 #endif 7592 7601 … … 7595 7604 * thread-context hooks are used or when hmR0VmxSetupVMRunHandler() changes pfnStartVM). 7596 7605 */ 7597 if ( pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_CONTEXT)7606 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_HOST_CONTEXT)) 7598 7607 { 7599 7608 /* This ASSUMES that pfnStartVM has been set up already. */ … … 7602 7611 STAM_COUNTER_INC(&pVCpu->hm.s.StatPreemptSaveHostState); 7603 7612 } 7604 Assert(! (pVCpu->hm.s.fContextUseFlags &HM_CHANGED_HOST_CONTEXT));7613 Assert(!VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_HOST_CONTEXT)); 7605 7614 7606 7615 /* 7607 7616 * Load the state shared between host and guest (FPU, debug). 7608 7617 */ 7609 if ( pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_GUEST_SHARED_STATE)7618 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_HOST_GUEST_SHARED_STATE)) 7610 7619 hmR0VmxLoadSharedState(pVM, pVCpu, pMixedCtx); 7611 AssertMsg(! pVCpu->hm.s.fContextUseFlags, ("fContextUseFlags=%#x\n", pVCpu->hm.s.fContextUseFlags));7620 AssertMsg(!VMCPU_HMCF_VALUE(pVCpu), ("fContextUseFlags=%#RX32\n", VMCPU_HMCF_VALUE(pVCpu))); 7612 7621 7613 7622 /* Store status of the shared guest-host state at the time of VM-entry. */ … … 7718 7727 hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); 7719 7728 CPUMR0SaveGuestFPU(pVM, pVCpu, pMixedCtx); 7720 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;7729 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_CR0); 7721 7730 } 7722 7731 #endif … … 7762 7771 rc = PDMApicSetTPR(pVCpu, pVCpu->hm.s.vmx.pbVirtApic[0x80]); 7763 7772 AssertRC(rc); 7764 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_APIC_STATE;7773 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE); 7765 7774 } 7766 7775 } … … 7924 7933 break; 7925 7934 } 7926 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;7935 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG); 7927 7936 } 7928 7937 … … 8102 8111 8103 8112 pMixedCtx->rip += pVmxTransient->cbInstr; 8104 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;8113 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_RIP); 8105 8114 return rc; 8106 8115 } … … 9222 9231 9223 9232 pMixedCtx->rip++; 9224 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;9233 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_RIP); 9225 9234 if (EMShouldContinueAfterHalt(pVCpu, pMixedCtx)) /* Requires eflags. */ 9226 9235 rc = VINF_SUCCESS; … … 9467 9476 * virtualization is implemented we'll have to make sure APIC state is saved from the VMCS before 9468 9477 EMInterpretWrmsr() changes it. */ 9469 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_APIC_STATE;9478 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE); 9470 9479 } 9471 9480 else if (pMixedCtx->ecx == MSR_K6_EFER) /* EFER is the only MSR we auto-load but don't allow write-passthrough. */ … … 9473 9482 rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx); 9474 9483 AssertRCReturn(rc, rc); 9475 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_AUTO_MSRS;9484 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS); 9476 9485 } 9477 9486 else if (pMixedCtx->ecx == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */ … … 9483 9492 switch (pMixedCtx->ecx) 9484 9493 { 9485 case MSR_IA32_SYSENTER_CS: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SYSENTER_CS_MSR; break;9486 case MSR_IA32_SYSENTER_EIP: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SYSENTER_EIP_MSR; break;9487 case MSR_IA32_SYSENTER_ESP: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SYSENTER_ESP_MSR; break;9494 case MSR_IA32_SYSENTER_CS: VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break; 9495 case MSR_IA32_SYSENTER_EIP: VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break; 9496 case MSR_IA32_SYSENTER_ESP: VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break; 9488 9497 case MSR_K8_FS_BASE: /* no break */ 9489 case MSR_K8_GS_BASE: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SEGMENT_REGS; break;9490 case MSR_K8_KERNEL_GS_BASE: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_AUTO_MSRS; break;9498 case MSR_K8_GS_BASE: VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS); break; 9499 case MSR_K8_KERNEL_GS_BASE: VMCPU_HMCF_SET(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS); break; 9491 9500 } 9492 9501 } … … 9555 9564 * resume guest execution. 9556 9565 */ 9557 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_APIC_STATE;9566 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE); 9558 9567 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTprBelowThreshold); 9559 9568 return VINF_SUCCESS; … … 9603 9612 { 9604 9613 case 0: /* CR0 */ 9614 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_CR0); 9605 9615 Log4(("CRX CR0 write rc=%d CR0=%#RX64\n", rc, pMixedCtx->cr0)); 9606 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;9607 9616 break; 9608 9617 case 2: /* C2 **/ … … 9611 9620 case 3: /* CR3 */ 9612 9621 Assert(!pVM->hm.s.fNestedPaging || !CPUMIsGuestPagingEnabledEx(pMixedCtx)); 9622 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_CR3); 9613 9623 Log4(("CRX CR3 write rc=%d CR3=%#RX64\n", rc, pMixedCtx->cr3)); 9614 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR3;9615 9624 break; 9616 9625 case 4: /* CR4 */ 9626 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_CR4); 9617 9627 Log4(("CRX CR4 write rc=%d CR4=%#RX64\n", rc, pMixedCtx->cr4)); 9618 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR4;9619 9628 break; 9620 9629 case 8: /* CR8 */ 9621 9630 Assert(!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)); 9622 9631 /* CR8 contains the APIC TPR. Was updated by EMInterpretCRxWrite(). */ 9623 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_APIC_STATE;9632 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE); 9624 9633 break; 9625 9634 default: … … 9660 9669 rc = EMInterpretCLTS(pVM, pVCpu); 9661 9670 AssertRCReturn(rc, rc); 9662 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;9671 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_CR0); 9663 9672 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitClts); 9664 9673 Log4(("CRX CLTS write rc=%d\n", rc)); … … 9672 9681 rc = EMInterpretLMSW(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), VMX_EXIT_QUALIFICATION_CRX_LMSW_DATA(uExitQualification)); 9673 9682 if (RT_LIKELY(rc == VINF_SUCCESS)) 9674 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;9683 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_CR0); 9675 9684 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitLmsw); 9676 9685 Log4(("CRX LMSW write rc=%d\n", rc)); … … 9781 9790 } 9782 9791 /** @todo IEM needs to be setting these flags somehow. */ 9783 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;9792 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_RIP); 9784 9793 fUpdateRipAlready = true; 9785 9794 #else … … 9843 9852 { 9844 9853 pMixedCtx->rip += cbInstr; 9845 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;9854 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_RIP); 9846 9855 } 9847 9856 … … 9877 9886 ASMSetDR6(pMixedCtx->dr[6]); 9878 9887 if (pMixedCtx->dr[7] != uDr7) 9879 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;9888 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG); 9880 9889 9881 9890 hmR0VmxSetPendingXcptDB(pVCpu, pMixedCtx); … … 10041 10050 || rc == VERR_PAGE_NOT_PRESENT) 10042 10051 { 10043 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS 10044 | HM_CHANGED_VMX_GUEST_APIC_STATE; 10052 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_RIP 10053 | HM_CHANGED_GUEST_RSP 10054 | HM_CHANGED_GUEST_RFLAGS 10055 | HM_CHANGED_VMX_GUEST_APIC_STATE); 10045 10056 rc = VINF_SUCCESS; 10046 10057 } … … 10133 10144 VMX_EXIT_QUALIFICATION_DRX_GENREG(pVmxTransient->uExitQualification)); 10134 10145 if (RT_SUCCESS(rc)) 10135 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;10146 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG); 10136 10147 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite); 10137 10148 } … … 10199 10210 { 10200 10211 /* Successfully handled MMIO operation. */ 10201 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS 10202 | HM_CHANGED_VMX_GUEST_APIC_STATE; 10212 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_RIP 10213 | HM_CHANGED_GUEST_RSP 10214 | HM_CHANGED_GUEST_RFLAGS 10215 | HM_CHANGED_VMX_GUEST_APIC_STATE); 10203 10216 rc = VINF_SUCCESS; 10204 10217 } … … 10264 10277 /* Successfully synced our nested page tables. */ 10265 10278 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitReasonNpf); 10266 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS; 10279 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_RIP 10280 | HM_CHANGED_GUEST_RSP 10281 | HM_CHANGED_GUEST_RFLAGS); 10267 10282 return VINF_SUCCESS; 10268 10283 } … … 10434 10449 { 10435 10450 rc = VINF_EM_RAW_GUEST_TRAP; 10436 Assert(CPUMIsGuestFPUStateActive(pVCpu) || (pVCpu->hm.s.fContextUseFlags &HM_CHANGED_GUEST_CR0));10451 Assert(CPUMIsGuestFPUStateActive(pVCpu) || VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0)); 10437 10452 } 10438 10453 else … … 10451 10466 if (rc == VINF_SUCCESS) 10452 10467 { 10453 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;10468 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_CR0); 10454 10469 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowNM); 10455 10470 } … … 10523 10538 pMixedCtx->eflags.Bits.u1IF = 0; 10524 10539 pMixedCtx->rip += pDis->cbInstr; 10525 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS;10540 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS); 10526 10541 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCli); 10527 10542 break; … … 10533 10548 pMixedCtx->rip += pDis->cbInstr; 10534 10549 EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip); 10535 Assert(VMCPU_FF_IS_ SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));10536 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS;10550 Assert(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)); 10551 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS); 10537 10552 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitSti); 10538 10553 break; … … 10543 10558 rc = VINF_EM_HALT; 10544 10559 pMixedCtx->rip += pDis->cbInstr; 10545 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;10560 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_RIP); 10546 10561 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt); 10547 10562 break; … … 10583 10598 pMixedCtx->eflags.u32 = (pMixedCtx->eflags.u32 & ~(X86_EFL_POPF_BITS & uMask)) 10584 10599 | (Eflags.u32 & X86_EFL_POPF_BITS & uMask); 10585 /* The RF bit is always cleared by POPF; see Intel Instruction reference for POPF. */ 10586 pMixedCtx->eflags.Bits.u1RF = 0; 10587 pMixedCtx->esp += cbParm; 10588 pMixedCtx->esp &= uMask; 10589 pMixedCtx->rip += pDis->cbInstr; 10590 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS; 10600 pMixedCtx->eflags.Bits.u1RF = 0; /* The RF bit is always cleared by POPF; see Intel Instruction reference. */ 10601 pMixedCtx->esp += cbParm; 10602 pMixedCtx->esp &= uMask; 10603 pMixedCtx->rip += pDis->cbInstr; 10604 10605 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_RIP 10606 | HM_CHANGED_GUEST_RSP 10607 | HM_CHANGED_GUEST_RFLAGS); 10591 10608 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPopf); 10592 10609 break; … … 10632 10649 pMixedCtx->esp &= uMask; 10633 10650 pMixedCtx->rip += pDis->cbInstr; 10634 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP;10651 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP); 10635 10652 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPushf); 10636 10653 break; … … 10666 10683 | (aIretFrame[2] & X86_EFL_POPF_BITS & uMask); 10667 10684 pMixedCtx->sp += sizeof(aIretFrame); 10668 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_SEGMENT_REGS | HM_CHANGED_GUEST_RSP 10669 | HM_CHANGED_GUEST_RFLAGS; 10685 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_RIP 10686 | HM_CHANGED_GUEST_SEGMENT_REGS 10687 | HM_CHANGED_GUEST_RSP 10688 | HM_CHANGED_GUEST_RFLAGS); 10670 10689 Log4(("IRET %#RX32 to %04x:%x\n", GCPtrStack, pMixedCtx->cs.Sel, pMixedCtx->ip)); 10671 10690 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIret); … … 10696 10715 EMCODETYPE_SUPERVISOR); 10697 10716 rc = VBOXSTRICTRC_VAL(rc2); 10698 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL_GUEST;10717 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_ALL_GUEST); 10699 10718 Log4(("#GP rc=%Rrc\n", rc)); 10700 10719 break; … … 10786 10805 /** @todo this isn't quite right, what if guest does lgdt with some MMIO 10787 10806 * memory? We don't update the whole state here... */ 10788 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS 10789 | HM_CHANGED_VMX_GUEST_APIC_STATE; 10807 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_RIP 10808 | HM_CHANGED_GUEST_RSP 10809 | HM_CHANGED_GUEST_RFLAGS 10810 | HM_CHANGED_VMX_GUEST_APIC_STATE); 10790 10811 TRPMResetTrap(pVCpu); 10791 10812 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF);
Note:
See TracChangeset
for help on using the changeset viewer.