Changeset 49729 in vbox for trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
- Timestamp:
- Nov 29, 2013 2:20:44 PM (11 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r49727 r49729 3069 3069 { 3070 3070 int rc = VINF_SUCCESS; 3071 if ( VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_VMX_ENTRY_CTLS))3071 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_ENTRY_CTLS)) 3072 3072 { 3073 3073 PVM pVM = pVCpu->CTX_SUFF(pVM); … … 3107 3107 /* Update VCPU with the currently set VM-exit controls. */ 3108 3108 pVCpu->hm.s.vmx.u32EntryCtls = val; 3109 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_VMX_ENTRY_CTLS);3109 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMX_ENTRY_CTLS); 3110 3110 } 3111 3111 return rc; … … 3130 3130 3131 3131 int rc = VINF_SUCCESS; 3132 if ( VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_VMX_EXIT_CTLS))3132 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_EXIT_CTLS)) 3133 3133 { 3134 3134 PVM pVM = pVCpu->CTX_SUFF(pVM); … … 3180 3180 /* Update VCPU with the currently set VM-exit controls. */ 3181 3181 pVCpu->hm.s.vmx.u32ExitCtls = val; 3182 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_VMX_EXIT_CTLS);3182 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMX_EXIT_CTLS); 3183 3183 } 3184 3184 return rc; … … 3201 3201 3202 3202 int rc = VINF_SUCCESS; 3203 if ( VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE))3203 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE)) 3204 3204 { 3205 3205 /* Setup TPR shadowing. Also setup TPR patching for 32-bit guests. */ … … 3238 3238 } 3239 3239 3240 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE);3240 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE); 3241 3241 } 3242 3242 return rc; … … 3318 3318 { 3319 3319 int rc = VINF_SUCCESS; 3320 if ( VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RIP))3320 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RIP)) 3321 3321 { 3322 3322 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RIP, pMixedCtx->rip); 3323 3323 AssertRCReturn(rc, rc); 3324 3324 3325 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_RIP);3326 Log4(("Load: VMX_VMCS_GUEST_RIP=%#RX64 fContextUseFlags=%#RX32\n", pMixedCtx->rip, VMCPU_HMCF_VALUE(pVCpu)));3325 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_RIP); 3326 Log4(("Load: VMX_VMCS_GUEST_RIP=%#RX64 fContextUseFlags=%#RX32\n", pMixedCtx->rip, HMCPU_CF_VALUE(pVCpu))); 3327 3327 } 3328 3328 return rc; … … 3344 3344 { 3345 3345 int rc = VINF_SUCCESS; 3346 if ( VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RSP))3346 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RSP)) 3347 3347 { 3348 3348 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RSP, pMixedCtx->rsp); 3349 3349 AssertRCReturn(rc, rc); 3350 3350 3351 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_RSP);3351 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_RSP); 3352 3352 Log4(("Load: VMX_VMCS_GUEST_RSP=%#RX64\n", pMixedCtx->rsp)); 3353 3353 } … … 3370 3370 { 3371 3371 int rc = VINF_SUCCESS; 3372 if ( VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RFLAGS))3372 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RFLAGS)) 3373 3373 { 3374 3374 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits of RFLAGS are reserved (MBZ). … … 3395 3395 AssertRCReturn(rc, rc); 3396 3396 3397 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_RFLAGS);3397 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_RFLAGS); 3398 3398 Log4(("Load: VMX_VMCS_GUEST_RFLAGS=%#RX32\n", Eflags.u32)); 3399 3399 } … … 3445 3445 */ 3446 3446 int rc = VINF_SUCCESS; 3447 if ( VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0))3447 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0)) 3448 3448 { 3449 3449 Assert(!(pMixedCtx->cr0 >> 32)); … … 3606 3606 Log4(("Load: VMX_VMCS_CTRL_CR0_MASK=%#RX32\n", u32CR0Mask)); 3607 3607 3608 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR0);3608 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR0); 3609 3609 } 3610 3610 return rc; … … 3638 3638 * Guest CR3. 3639 3639 */ 3640 if ( VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR3))3640 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR3)) 3641 3641 { 3642 3642 RTGCPHYS GCPhysGuestCR3 = NIL_RTGCPHYS; … … 3711 3711 AssertRCReturn(rc, rc); 3712 3712 3713 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR3);3713 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR3); 3714 3714 } 3715 3715 … … 3717 3717 * Guest CR4. 3718 3718 */ 3719 if ( VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR4))3719 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR4)) 3720 3720 { 3721 3721 Assert(!(pMixedCtx->cr4 >> 32)); … … 3808 3808 AssertRCReturn(rc, rc); 3809 3809 3810 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR4);3810 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR4); 3811 3811 } 3812 3812 return rc; … … 3830 3830 static int hmR0VmxLoadSharedDebugState(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 3831 3831 { 3832 if (! VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_DEBUG))3832 if (!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_DEBUG)) 3833 3833 return VINF_SUCCESS; 3834 3834 … … 3862 3862 pMixedCtx->eflags.u32 |= X86_EFL_TF; 3863 3863 pVCpu->hm.s.fClearTrapFlag = true; 3864 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_RFLAGS);3864 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RFLAGS); 3865 3865 fInterceptDB = true; 3866 3866 } … … 3977 3977 AssertRCReturn(rc, rc); 3978 3978 3979 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_DEBUG);3979 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_DEBUG); 3980 3980 return VINF_SUCCESS; 3981 3981 } … … 4227 4227 * Guest Segment registers: CS, SS, DS, ES, FS, GS. 4228 4228 */ 4229 if ( VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS))4229 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS)) 4230 4230 { 4231 4231 /* Save the segment attributes for real-on-v86 mode hack, so we can restore them on VM-exit. */ … … 4280 4280 #endif 4281 4281 4282 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS);4282 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS); 4283 4283 Log4(("Load: CS=%#RX16 Base=%#RX64 Limit=%#RX32 Attr=%#RX32\n", pMixedCtx->cs.Sel, pMixedCtx->cs.u64Base, 4284 4284 pMixedCtx->cs.u32Limit, pMixedCtx->cs.Attr.u)); … … 4288 4288 * Guest TR. 4289 4289 */ 4290 if ( VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_TR))4290 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_TR)) 4291 4291 { 4292 4292 /* … … 4347 4347 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights); AssertRCReturn(rc, rc); 4348 4348 4349 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_TR);4349 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_TR); 4350 4350 Log4(("Load: VMX_VMCS_GUEST_TR_BASE=%#RX64\n", u64Base)); 4351 4351 } … … 4354 4354 * Guest GDTR. 4355 4355 */ 4356 if ( VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_GDTR))4356 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_GDTR)) 4357 4357 { 4358 4358 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, pMixedCtx->gdtr.cbGdt); AssertRCReturn(rc, rc); … … 4362 4362 Assert(!(pMixedCtx->gdtr.cbGdt & 0xffff0000)); /* Bits 31:16 MBZ. */ 4363 4363 4364 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_GDTR);4364 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_GDTR); 4365 4365 Log4(("Load: VMX_VMCS_GUEST_GDTR_BASE=%#RX64\n", pMixedCtx->gdtr.pGdt)); 4366 4366 } … … 4369 4369 * Guest LDTR. 4370 4370 */ 4371 if ( VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_LDTR))4371 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_LDTR)) 4372 4372 { 4373 4373 /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */ … … 4398 4398 } 4399 4399 4400 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_LDTR);4400 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_LDTR); 4401 4401 Log4(("Load: VMX_VMCS_GUEST_LDTR_BASE=%#RX64\n", pMixedCtx->ldtr.u64Base)); 4402 4402 } … … 4405 4405 * Guest IDTR. 4406 4406 */ 4407 if ( VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_IDTR))4407 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_IDTR)) 4408 4408 { 4409 4409 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, pMixedCtx->idtr.cbIdt); AssertRCReturn(rc, rc); … … 4413 4413 Assert(!(pMixedCtx->idtr.cbIdt & 0xffff0000)); /* Bits 31:16 MBZ. */ 4414 4414 4415 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_IDTR);4415 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_IDTR); 4416 4416 Log4(("Load: VMX_VMCS_GUEST_IDTR_BASE=%#RX64\n", pMixedCtx->idtr.pIdt)); 4417 4417 } … … 4449 4449 */ 4450 4450 PVM pVM = pVCpu->CTX_SUFF(pVM); 4451 if ( VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS))4451 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS)) 4452 4452 { 4453 4453 if (pVM->hm.s.fAllow64BitGuests) … … 4468 4468 #endif 4469 4469 } 4470 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS);4470 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS); 4471 4471 } 4472 4472 … … 4476 4476 * VM-exits on WRMSRs for these MSRs. 4477 4477 */ 4478 if ( VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR))4478 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR)) 4479 4479 { 4480 4480 int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, pMixedCtx->SysEnter.cs); AssertRCReturn(rc, rc); 4481 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR);4482 } 4483 4484 if ( VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR))4481 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR); 4482 } 4483 4484 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR)) 4485 4485 { 4486 4486 int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, pMixedCtx->SysEnter.eip); AssertRCReturn(rc, rc); 4487 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR);4488 } 4489 4490 if ( VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR))4487 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); 4488 } 4489 4490 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR)) 4491 4491 { 4492 4492 int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, pMixedCtx->SysEnter.esp); AssertRCReturn(rc, rc); 4493 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR);4493 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); 4494 4494 } 4495 4495 … … 4514 4514 /** @todo See if we can make use of other states, e.g. 4515 4515 * VMX_VMCS_GUEST_ACTIVITY_SHUTDOWN or HLT. */ 4516 if ( VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_VMX_GUEST_ACTIVITY_STATE))4516 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_GUEST_ACTIVITY_STATE)) 4517 4517 { 4518 4518 int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_ACTIVITY_STATE, VMX_VMCS_GUEST_ACTIVITY_ACTIVE); 4519 4519 AssertRCReturn(rc, rc); 4520 4520 4521 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_VMX_GUEST_ACTIVITY_STATE);4521 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMX_GUEST_ACTIVITY_STATE); 4522 4522 } 4523 4523 return VINF_SUCCESS; … … 4549 4549 { 4550 4550 pVCpu->hm.s.vmx.pfnStartVM = VMXR0SwitcherStartVM64; 4551 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_EXIT_CTLS | HM_CHANGED_VMX_ENTRY_CTLS);4551 HMCPU_CF_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_EXIT_CTLS | HM_CHANGED_VMX_ENTRY_CTLS); 4552 4552 } 4553 4553 #else … … 4563 4563 { 4564 4564 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM32; 4565 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_EXIT_CTLS | HM_CHANGED_VMX_ENTRY_CTLS);4565 HMCPU_CF_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_EXIT_CTLS | HM_CHANGED_VMX_ENTRY_CTLS); 4566 4566 } 4567 4567 #else … … 6612 6612 CPUMR0SaveGuestFPU(pVM, pVCpu, pMixedCtx); 6613 6613 Assert(!CPUMIsGuestFPUStateActive(pVCpu)); 6614 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_CR0);6614 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0); 6615 6615 } 6616 6616 … … 6621 6621 #endif 6622 6622 if (CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, true /* save DR6 */)) 6623 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);6623 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG); 6624 6624 Assert(!CPUMIsGuestDebugStateActive(pVCpu) && !CPUMIsGuestDebugStateActivePending(pVCpu)); 6625 6625 Assert(!CPUMIsHyperDebugStateActive(pVCpu) && !CPUMIsHyperDebugStateActivePending(pVCpu)); … … 6813 6813 /* On our way back from ring-3 reload the guest state if there is a possibility of it being changed. */ 6814 6814 if (rcExit != VINF_EM_RAW_INTERRUPT) 6815 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_ALL_GUEST);6815 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST); 6816 6816 6817 6817 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchExitToR3); … … 7398 7398 /* If any other guest-state bits are changed here, make sure to update 7399 7399 hmR0VmxPreRunGuestCommitted() when thread-context hooks are used. */ 7400 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS7401 7402 7403 7400 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS 7401 | HM_CHANGED_GUEST_RIP 7402 | HM_CHANGED_GUEST_RFLAGS 7403 | HM_CHANGED_GUEST_RSP); 7404 7404 7405 7405 /* We're clearing interrupts, which means no block-by-STI interrupt-inhibition. */ … … 7516 7516 7517 7517 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu)); 7518 Assert( VMCPU_HMCF_IS_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE));7518 Assert(HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE)); 7519 7519 7520 7520 #ifdef VBOX_STRICT … … 7606 7606 int rc = HMR0EnterCpu(pVCpu); 7607 7607 AssertRC(rc); 7608 Assert( VMCPU_HMCF_IS_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE));7608 Assert(HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE)); 7609 7609 7610 7610 /* Load the active VMCS as the current one. */ … … 7645 7645 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 7646 7646 7647 if (! VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_HOST_CONTEXT))7647 if (!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_CONTEXT)) 7648 7648 return VINF_SUCCESS; 7649 7649 … … 7657 7657 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostMsrs failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); 7658 7658 7659 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_HOST_CONTEXT);7659 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_HOST_CONTEXT); 7660 7660 return rc; 7661 7661 } … … 7773 7773 7774 7774 /* Clear any unused and reserved bits. */ 7775 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR2);7775 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR2); 7776 7776 7777 7777 #ifdef LOG_ENABLED … … 7802 7802 Assert(!VMMRZCallRing3IsEnabled(pVCpu)); 7803 7803 7804 if ( VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0))7804 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0)) 7805 7805 { 7806 7806 int rc = hmR0VmxLoadSharedCR0(pVCpu, pCtx); … … 7808 7808 } 7809 7809 7810 if ( VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_DEBUG))7810 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_DEBUG)) 7811 7811 { 7812 7812 int rc = hmR0VmxLoadSharedDebugState(pVCpu, pCtx); … … 7814 7814 7815 7815 /* Loading shared debug bits might have changed eflags.TF bit for debugging purposes. */ 7816 if ( VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RFLAGS))7816 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RFLAGS)) 7817 7817 { 7818 7818 rc = hmR0VmxLoadGuestRflags(pVCpu, pCtx); … … 7821 7821 } 7822 7822 7823 if ( VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_LAZY_MSRS))7823 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_LAZY_MSRS)) 7824 7824 { 7825 7825 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) … … 7830 7830 } 7831 7831 #endif 7832 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_LAZY_MSRS);7833 } 7834 7835 AssertMsg(! VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_HOST_GUEST_SHARED_STATE),7836 ("fContextUseFlags=%#RX32\n", VMCPU_HMCF_VALUE(pVCpu)));7832 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_LAZY_MSRS); 7833 } 7834 7835 AssertMsg(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_GUEST_SHARED_STATE), 7836 ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu))); 7837 7837 } 7838 7838 … … 7851 7851 HMVMX_ASSERT_PREEMPT_SAFE(); 7852 7852 7853 Log5(("LoadFlags=%#RX32\n", VMCPU_HMCF_VALUE(pVCpu)));7853 Log5(("LoadFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu))); 7854 7854 #ifdef HMVMX_SYNC_FULL_GUEST_STATE 7855 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_ALL_GUEST);7856 #endif 7857 7858 if ( VMCPU_HMCF_IS_SET_ONLY(pVCpu, HM_CHANGED_GUEST_RIP))7855 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST); 7856 #endif 7857 7858 if (HMCPU_CF_IS_SET_ONLY(pVCpu, HM_CHANGED_GUEST_RIP)) 7859 7859 { 7860 7860 int rc = hmR0VmxLoadGuestRip(pVCpu, pMixedCtx); … … 7862 7862 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadMinimal); 7863 7863 } 7864 else if ( VMCPU_HMCF_VALUE(pVCpu))7864 else if (HMCPU_CF_VALUE(pVCpu)) 7865 7865 { 7866 7866 int rc = hmR0VmxLoadGuestState(pVM, pVCpu, pMixedCtx); … … 7870 7870 7871 7871 /* All the guest state bits should be loaded except maybe the host context and/or the shared host/guest bits. */ 7872 AssertMsg( ! VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_ALL_GUEST)7873 || VMCPU_HMCF_IS_PENDING_ONLY(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE),7874 ("fContextUseFlags=%#RX32\n", VMCPU_HMCF_VALUE(pVCpu)));7872 AssertMsg( !HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_ALL_GUEST) 7873 || HMCPU_CF_IS_PENDING_ONLY(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE), 7874 ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu))); 7875 7875 7876 7876 #ifdef HMVMX_ALWAYS_CHECK_GUEST_STATE … … 8049 8049 if (!CPUMIsGuestFPUStateActive(pVCpu)) 8050 8050 CPUMR0LoadGuestFPU(pVM, pVCpu, pMixedCtx); 8051 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_CR0);8051 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0); 8052 8052 #endif 8053 8053 … … 8057 8057 CPUMR0LoadGuestFPU(pVM, pVCpu, pMixedCtx); 8058 8058 Assert(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_CR0); 8059 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_CR0);8059 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0); 8060 8060 } 8061 8061 … … 8067 8067 && pVCpu->hm.s.vmx.cMsrs > 0) 8068 8068 { 8069 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_HOST_CONTEXT);8069 HMCPU_CF_SET(pVCpu, HM_CHANGED_HOST_CONTEXT); 8070 8070 } 8071 8071 … … 8074 8074 * thread-context hooks are used or when hmR0VmxSetupVMRunHandler() changes pfnStartVM). 8075 8075 */ 8076 if ( VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_HOST_CONTEXT))8076 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_CONTEXT)) 8077 8077 { 8078 8078 /* This ASSUMES that pfnStartVM has been set up already. */ … … 8081 8081 STAM_COUNTER_INC(&pVCpu->hm.s.StatPreemptSaveHostState); 8082 8082 } 8083 Assert(! VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_HOST_CONTEXT));8083 Assert(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_CONTEXT)); 8084 8084 8085 8085 /* 8086 8086 * Load the state shared between host and guest (FPU, debug, lazy MSRs). 8087 8087 */ 8088 if ( VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_HOST_GUEST_SHARED_STATE))8088 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_GUEST_SHARED_STATE)) 8089 8089 hmR0VmxLoadSharedState(pVM, pVCpu, pMixedCtx); 8090 AssertMsg(! VMCPU_HMCF_VALUE(pVCpu), ("fContextUseFlags=%#RX32\n", VMCPU_HMCF_VALUE(pVCpu)));8090 AssertMsg(!HMCPU_CF_VALUE(pVCpu), ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu))); 8091 8091 8092 8092 /* Store status of the shared guest-host state at the time of VM-entry. */ … … 8200 8200 hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); 8201 8201 CPUMR0SaveGuestFPU(pVM, pVCpu, pMixedCtx); 8202 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_CR0);8202 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0); 8203 8203 } 8204 8204 #endif … … 8244 8244 rc = PDMApicSetTPR(pVCpu, pVCpu->hm.s.vmx.pbVirtApic[0x80]); 8245 8245 AssertRC(rc); 8246 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE);8246 HMCPU_CF_SET(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE); 8247 8247 } 8248 8248 } … … 8404 8404 break; 8405 8405 } 8406 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);8406 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG); 8407 8407 } 8408 8408 … … 8469 8469 #ifdef DEBUG_ramshankar 8470 8470 # define SVVMCS() do { int rc2 = hmR0VmxSaveGuestState(pVCpu, pMixedCtx); AssertRC(rc2); } while (0) 8471 # define LDVMCS() do { VMCPU_HMCF_SET(pVCpu, HM_CHANGED_ALL_GUEST); } while (0)8471 # define LDVMCS() do { HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST); } while (0) 8472 8472 #endif 8473 8473 int rc; … … 8602 8602 8603 8603 pMixedCtx->rip += pVmxTransient->cbInstr; 8604 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_RIP);8604 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP); 8605 8605 return rc; 8606 8606 } … … 9714 9714 9715 9715 pMixedCtx->rip++; 9716 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_RIP);9716 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP); 9717 9717 if (EMShouldContinueAfterHalt(pVCpu, pMixedCtx)) /* Requires eflags. */ 9718 9718 rc = VINF_SUCCESS; … … 9992 9992 * virtualization is implemented we'll have to make sure APIC state is saved from the VMCS before 9993 9993 EMInterpretWrmsr() changes it. */ 9994 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE);9994 HMCPU_CF_SET(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE); 9995 9995 } 9996 9996 else if (pMixedCtx->ecx == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */ … … 10002 10002 switch (pMixedCtx->ecx) 10003 10003 { 10004 case MSR_IA32_SYSENTER_CS: VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break;10005 case MSR_IA32_SYSENTER_EIP: VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break;10006 case MSR_IA32_SYSENTER_ESP: VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break;10004 case MSR_IA32_SYSENTER_CS: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break; 10005 case MSR_IA32_SYSENTER_EIP: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break; 10006 case MSR_IA32_SYSENTER_ESP: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break; 10007 10007 case MSR_K8_FS_BASE: /* no break */ 10008 case MSR_K8_GS_BASE: VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS); break;10008 case MSR_K8_GS_BASE: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS); break; 10009 10009 default: 10010 10010 { 10011 10011 if (hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, pMixedCtx->ecx)) 10012 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS);10012 HMCPU_CF_SET(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS); 10013 10013 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 10014 10014 else if ( HMVMX_IS_64BIT_HOST_MODE() 10015 10015 && hmR0VmxIsLazyGuestMsr(pVCpu, pMixedCtx->ecx)) 10016 10016 { 10017 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_LAZY_MSRS);10017 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_LAZY_MSRS); 10018 10018 } 10019 10019 #endif … … 10096 10096 * resume guest execution. 10097 10097 */ 10098 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE);10098 HMCPU_CF_SET(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE); 10099 10099 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTprBelowThreshold); 10100 10100 return VINF_SUCCESS; … … 10144 10144 { 10145 10145 case 0: /* CR0 */ 10146 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_CR0);10146 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0); 10147 10147 Log4(("CRX CR0 write rc=%d CR0=%#RX64\n", rc, pMixedCtx->cr0)); 10148 10148 break; … … 10152 10152 case 3: /* CR3 */ 10153 10153 Assert(!pVM->hm.s.fNestedPaging || !CPUMIsGuestPagingEnabledEx(pMixedCtx)); 10154 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_CR3);10154 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR3); 10155 10155 Log4(("CRX CR3 write rc=%d CR3=%#RX64\n", rc, pMixedCtx->cr3)); 10156 10156 break; 10157 10157 case 4: /* CR4 */ 10158 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_CR4);10158 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR4); 10159 10159 Log4(("CRX CR4 write rc=%d CR4=%#RX64\n", rc, pMixedCtx->cr4)); 10160 10160 break; … … 10162 10162 Assert(!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)); 10163 10163 /* CR8 contains the APIC TPR. Was updated by EMInterpretCRxWrite(). */ 10164 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE);10164 HMCPU_CF_SET(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE); 10165 10165 break; 10166 10166 default: … … 10201 10201 rc = EMInterpretCLTS(pVM, pVCpu); 10202 10202 AssertRCReturn(rc, rc); 10203 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_CR0);10203 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0); 10204 10204 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitClts); 10205 10205 Log4(("CRX CLTS write rc=%d\n", rc)); … … 10213 10213 rc = EMInterpretLMSW(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), VMX_EXIT_QUALIFICATION_CRX_LMSW_DATA(uExitQualification)); 10214 10214 if (RT_LIKELY(rc == VINF_SUCCESS)) 10215 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_CR0);10215 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0); 10216 10216 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitLmsw); 10217 10217 Log4(("CRX LMSW write rc=%d\n", rc)); … … 10320 10320 } 10321 10321 /** @todo IEM needs to be setting these flags somehow. */ 10322 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_RIP);10322 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP); 10323 10323 fUpdateRipAlready = true; 10324 10324 #else … … 10382 10382 { 10383 10383 pMixedCtx->rip += cbInstr; 10384 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_RIP);10384 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP); 10385 10385 } 10386 10386 … … 10390 10390 */ 10391 10391 if (fIOString) 10392 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_RFLAGS);10392 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RFLAGS); 10393 10393 10394 10394 /* … … 10423 10423 ASMSetDR6(pMixedCtx->dr[6]); 10424 10424 if (pMixedCtx->dr[7] != uDr7) 10425 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);10425 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG); 10426 10426 10427 10427 hmR0VmxSetPendingXcptDB(pVCpu, pMixedCtx); … … 10587 10587 || rc == VERR_PAGE_NOT_PRESENT) 10588 10588 { 10589 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_RIP10590 10591 10592 10589 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP 10590 | HM_CHANGED_GUEST_RSP 10591 | HM_CHANGED_GUEST_RFLAGS 10592 | HM_CHANGED_VMX_GUEST_APIC_STATE); 10593 10593 rc = VINF_SUCCESS; 10594 10594 } … … 10681 10681 VMX_EXIT_QUALIFICATION_DRX_GENREG(pVmxTransient->uExitQualification)); 10682 10682 if (RT_SUCCESS(rc)) 10683 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);10683 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG); 10684 10684 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite); 10685 10685 } … … 10747 10747 { 10748 10748 /* Successfully handled MMIO operation. */ 10749 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_RIP10749 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP 10750 10750 | HM_CHANGED_GUEST_RSP 10751 10751 | HM_CHANGED_GUEST_RFLAGS … … 10814 10814 /* Successfully synced our nested page tables. */ 10815 10815 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitReasonNpf); 10816 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_RIP10817 10818 10816 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP 10817 | HM_CHANGED_GUEST_RSP 10818 | HM_CHANGED_GUEST_RFLAGS); 10819 10819 return VINF_SUCCESS; 10820 10820 } … … 10988 10988 { 10989 10989 rc = VINF_EM_RAW_GUEST_TRAP; 10990 Assert(CPUMIsGuestFPUStateActive(pVCpu) || VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0));10990 Assert(CPUMIsGuestFPUStateActive(pVCpu) || HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0)); 10991 10991 } 10992 10992 else … … 11005 11005 { 11006 11006 /* Guest FPU state was activated, we'll want to change CR0 FPU intercepts before the next VM-reentry. */ 11007 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_CR0);11007 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0); 11008 11008 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowNM); 11009 11009 pVCpu->hm.s.fUseGuestFpu = true; … … 11079 11079 pMixedCtx->eflags.Bits.u1IF = 0; 11080 11080 pMixedCtx->rip += pDis->cbInstr; 11081 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);11081 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS); 11082 11082 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCli); 11083 11083 break; … … 11090 11090 EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip); 11091 11091 Assert(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)); 11092 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);11092 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS); 11093 11093 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitSti); 11094 11094 break; … … 11099 11099 rc = VINF_EM_HALT; 11100 11100 pMixedCtx->rip += pDis->cbInstr; 11101 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_RIP);11101 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP); 11102 11102 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt); 11103 11103 break; … … 11144 11144 pMixedCtx->rip += pDis->cbInstr; 11145 11145 11146 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_RIP11146 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP 11147 11147 | HM_CHANGED_GUEST_RSP 11148 11148 | HM_CHANGED_GUEST_RFLAGS); … … 11190 11190 pMixedCtx->esp &= uMask; 11191 11191 pMixedCtx->rip += pDis->cbInstr; 11192 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP);11192 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP); 11193 11193 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPushf); 11194 11194 break; … … 11224 11224 | (aIretFrame[2] & X86_EFL_POPF_BITS & uMask); 11225 11225 pMixedCtx->sp += sizeof(aIretFrame); 11226 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_RIP11227 11228 11229 11226 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP 11227 | HM_CHANGED_GUEST_SEGMENT_REGS 11228 | HM_CHANGED_GUEST_RSP 11229 | HM_CHANGED_GUEST_RFLAGS); 11230 11230 Log4(("IRET %#RX32 to %04x:%x\n", GCPtrStack, pMixedCtx->cs.Sel, pMixedCtx->ip)); 11231 11231 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIret); … … 11256 11256 EMCODETYPE_SUPERVISOR); 11257 11257 rc = VBOXSTRICTRC_VAL(rc2); 11258 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_ALL_GUEST);11258 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST); 11259 11259 Log4(("#GP rc=%Rrc\n", rc)); 11260 11260 break; … … 11349 11349 /** @todo this isn't quite right, what if guest does lgdt with some MMIO 11350 11350 * memory? We don't update the whole state here... */ 11351 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_RIP11352 11353 11354 11351 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP 11352 | HM_CHANGED_GUEST_RSP 11353 | HM_CHANGED_GUEST_RFLAGS 11354 | HM_CHANGED_VMX_GUEST_APIC_STATE); 11355 11355 TRPMResetTrap(pVCpu); 11356 11356 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF);
Note:
See TracChangeset
for help on using the changeset viewer.