VirtualBox

Ignore:
Timestamp:
Nov 29, 2013 2:20:44 PM (11 years ago)
Author:
vboxsync
Message:

VMM/HM: VMCPU_HMCF -> HMCPU_CF macro renaming.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r49727 r49729  
    30693069{
    30703070    int rc = VINF_SUCCESS;
    3071     if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_VMX_ENTRY_CTLS))
     3071    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_ENTRY_CTLS))
    30723072    {
    30733073        PVM pVM      = pVCpu->CTX_SUFF(pVM);
     
    31073107        /* Update VCPU with the currently set VM-exit controls. */
    31083108        pVCpu->hm.s.vmx.u32EntryCtls = val;
    3109         VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_VMX_ENTRY_CTLS);
     3109        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMX_ENTRY_CTLS);
    31103110    }
    31113111    return rc;
     
    31303130
    31313131    int rc = VINF_SUCCESS;
    3132     if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_VMX_EXIT_CTLS))
     3132    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_EXIT_CTLS))
    31333133    {
    31343134        PVM pVM      = pVCpu->CTX_SUFF(pVM);
     
    31803180        /* Update VCPU with the currently set VM-exit controls. */
    31813181        pVCpu->hm.s.vmx.u32ExitCtls = val;
    3182         VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_VMX_EXIT_CTLS);
     3182        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMX_EXIT_CTLS);
    31833183    }
    31843184    return rc;
     
    32013201
    32023202    int rc = VINF_SUCCESS;
    3203     if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE))
     3203    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE))
    32043204    {
    32053205        /* Setup TPR shadowing. Also setup TPR patching for 32-bit guests. */
     
    32383238        }
    32393239
    3240         VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE);
     3240        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE);
    32413241    }
    32423242    return rc;
     
    33183318{
    33193319    int rc = VINF_SUCCESS;
    3320     if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RIP))
     3320    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RIP))
    33213321    {
    33223322        rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RIP, pMixedCtx->rip);
    33233323        AssertRCReturn(rc, rc);
    33243324
    3325         VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_RIP);
    3326         Log4(("Load: VMX_VMCS_GUEST_RIP=%#RX64 fContextUseFlags=%#RX32\n", pMixedCtx->rip, VMCPU_HMCF_VALUE(pVCpu)));
     3325        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_RIP);
     3326        Log4(("Load: VMX_VMCS_GUEST_RIP=%#RX64 fContextUseFlags=%#RX32\n", pMixedCtx->rip, HMCPU_CF_VALUE(pVCpu)));
    33273327    }
    33283328    return rc;
     
    33443344{
    33453345    int rc = VINF_SUCCESS;
    3346     if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RSP))
     3346    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RSP))
    33473347    {
    33483348        rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RSP, pMixedCtx->rsp);
    33493349        AssertRCReturn(rc, rc);
    33503350
    3351         VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_RSP);
     3351        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_RSP);
    33523352        Log4(("Load: VMX_VMCS_GUEST_RSP=%#RX64\n", pMixedCtx->rsp));
    33533353    }
     
    33703370{
    33713371    int rc = VINF_SUCCESS;
    3372     if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RFLAGS))
     3372    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RFLAGS))
    33733373    {
    33743374        /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits of RFLAGS are reserved (MBZ).
     
    33953395        AssertRCReturn(rc, rc);
    33963396
    3397         VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_RFLAGS);
     3397        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_RFLAGS);
    33983398        Log4(("Load: VMX_VMCS_GUEST_RFLAGS=%#RX32\n", Eflags.u32));
    33993399    }
     
    34453445     */
    34463446    int rc = VINF_SUCCESS;
    3447     if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0))
     3447    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0))
    34483448    {
    34493449        Assert(!(pMixedCtx->cr0 >> 32));
     
    36063606        Log4(("Load: VMX_VMCS_CTRL_CR0_MASK=%#RX32\n", u32CR0Mask));
    36073607
    3608         VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR0);
     3608        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR0);
    36093609    }
    36103610    return rc;
     
    36383638     * Guest CR3.
    36393639     */
    3640     if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR3))
     3640    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR3))
    36413641    {
    36423642        RTGCPHYS GCPhysGuestCR3 = NIL_RTGCPHYS;
     
    37113711        AssertRCReturn(rc, rc);
    37123712
    3713         VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR3);
     3713        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR3);
    37143714    }
    37153715
     
    37173717     * Guest CR4.
    37183718     */
    3719     if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR4))
     3719    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR4))
    37203720    {
    37213721        Assert(!(pMixedCtx->cr4 >> 32));
     
    38083808        AssertRCReturn(rc, rc);
    38093809
    3810         VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR4);
     3810        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR4);
    38113811    }
    38123812    return rc;
     
    38303830static int hmR0VmxLoadSharedDebugState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    38313831{
    3832     if (!VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_DEBUG))
     3832    if (!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_DEBUG))
    38333833        return VINF_SUCCESS;
    38343834
     
    38623862            pMixedCtx->eflags.u32 |= X86_EFL_TF;
    38633863            pVCpu->hm.s.fClearTrapFlag = true;
    3864             VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_RFLAGS);
     3864            HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RFLAGS);
    38653865            fInterceptDB = true;
    38663866        }
     
    39773977    AssertRCReturn(rc, rc);
    39783978
    3979     VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_DEBUG);
     3979    HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_DEBUG);
    39803980    return VINF_SUCCESS;
    39813981}
     
    42274227     * Guest Segment registers: CS, SS, DS, ES, FS, GS.
    42284228     */
    4229     if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS))
     4229    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS))
    42304230    {
    42314231        /* Save the segment attributes for real-on-v86 mode hack, so we can restore them on VM-exit. */
     
    42804280#endif
    42814281
    4282         VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS);
     4282        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS);
    42834283        Log4(("Load: CS=%#RX16 Base=%#RX64 Limit=%#RX32 Attr=%#RX32\n", pMixedCtx->cs.Sel, pMixedCtx->cs.u64Base,
    42844284             pMixedCtx->cs.u32Limit, pMixedCtx->cs.Attr.u));
     
    42884288     * Guest TR.
    42894289     */
    4290     if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_TR))
     4290    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_TR))
    42914291    {
    42924292        /*
     
    43474347        rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights);       AssertRCReturn(rc, rc);
    43484348
    4349         VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_TR);
     4349        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_TR);
    43504350        Log4(("Load: VMX_VMCS_GUEST_TR_BASE=%#RX64\n", u64Base));
    43514351    }
     
    43544354     * Guest GDTR.
    43554355     */
    4356     if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_GDTR))
     4356    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_GDTR))
    43574357    {
    43584358        rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, pMixedCtx->gdtr.cbGdt);        AssertRCReturn(rc, rc);
     
    43624362        Assert(!(pMixedCtx->gdtr.cbGdt & 0xffff0000));          /* Bits 31:16 MBZ. */
    43634363
    4364         VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_GDTR);
     4364        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_GDTR);
    43654365        Log4(("Load: VMX_VMCS_GUEST_GDTR_BASE=%#RX64\n", pMixedCtx->gdtr.pGdt));
    43664366    }
     
    43694369     * Guest LDTR.
    43704370     */
    4371     if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_LDTR))
     4371    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_LDTR))
    43724372    {
    43734373        /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
     
    43984398        }
    43994399
    4400         VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_LDTR);
     4400        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_LDTR);
    44014401        Log4(("Load: VMX_VMCS_GUEST_LDTR_BASE=%#RX64\n", pMixedCtx->ldtr.u64Base));
    44024402    }
     
    44054405     * Guest IDTR.
    44064406     */
    4407     if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_IDTR))
     4407    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_IDTR))
    44084408    {
    44094409        rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, pMixedCtx->idtr.cbIdt);         AssertRCReturn(rc, rc);
     
    44134413        Assert(!(pMixedCtx->idtr.cbIdt & 0xffff0000));          /* Bits 31:16 MBZ. */
    44144414
    4415         VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_IDTR);
     4415        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_IDTR);
    44164416        Log4(("Load: VMX_VMCS_GUEST_IDTR_BASE=%#RX64\n", pMixedCtx->idtr.pIdt));
    44174417    }
     
    44494449     */
    44504450    PVM pVM = pVCpu->CTX_SUFF(pVM);
    4451     if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS))
     4451    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS))
    44524452    {
    44534453        if (pVM->hm.s.fAllow64BitGuests)
     
    44684468#endif
    44694469        }
    4470         VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
     4470        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
    44714471    }
    44724472
     
    44764476     * VM-exits on WRMSRs for these MSRs.
    44774477     */
    4478     if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR))
     4478    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR))
    44794479    {
    44804480        int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, pMixedCtx->SysEnter.cs);      AssertRCReturn(rc, rc);
    4481         VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR);
    4482     }
    4483 
    4484     if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR))
     4481        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR);
     4482    }
     4483
     4484    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR))
    44854485    {
    44864486        int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, pMixedCtx->SysEnter.eip);    AssertRCReturn(rc, rc);
    4487         VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR);
    4488     }
    4489 
    4490     if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR))
     4487        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR);
     4488    }
     4489
     4490    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR))
    44914491    {
    44924492        int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, pMixedCtx->SysEnter.esp);    AssertRCReturn(rc, rc);
    4493         VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR);
     4493        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR);
    44944494    }
    44954495
     
    45144514    /** @todo See if we can make use of other states, e.g.
    45154515     *        VMX_VMCS_GUEST_ACTIVITY_SHUTDOWN or HLT.  */
    4516     if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_VMX_GUEST_ACTIVITY_STATE))
     4516    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_GUEST_ACTIVITY_STATE))
    45174517    {
    45184518        int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_ACTIVITY_STATE, VMX_VMCS_GUEST_ACTIVITY_ACTIVE);
    45194519        AssertRCReturn(rc, rc);
    45204520
    4521         VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_VMX_GUEST_ACTIVITY_STATE);
     4521        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMX_GUEST_ACTIVITY_STATE);
    45224522    }
    45234523    return VINF_SUCCESS;
     
    45494549        {
    45504550            pVCpu->hm.s.vmx.pfnStartVM = VMXR0SwitcherStartVM64;
    4551             VMCPU_HMCF_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_EXIT_CTLS | HM_CHANGED_VMX_ENTRY_CTLS);
     4551            HMCPU_CF_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_EXIT_CTLS | HM_CHANGED_VMX_ENTRY_CTLS);
    45524552        }
    45534553#else
     
    45634563        {
    45644564            pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM32;
    4565             VMCPU_HMCF_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_EXIT_CTLS | HM_CHANGED_VMX_ENTRY_CTLS);
     4565            HMCPU_CF_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_EXIT_CTLS | HM_CHANGED_VMX_ENTRY_CTLS);
    45664566        }
    45674567#else
     
    66126612        CPUMR0SaveGuestFPU(pVM, pVCpu, pMixedCtx);
    66136613        Assert(!CPUMIsGuestFPUStateActive(pVCpu));
    6614         VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
     6614        HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
    66156615    }
    66166616
     
    66216621#endif
    66226622    if (CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, true /* save DR6 */))
    6623         VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);
     6623        HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);
    66246624    Assert(!CPUMIsGuestDebugStateActive(pVCpu) && !CPUMIsGuestDebugStateActivePending(pVCpu));
    66256625    Assert(!CPUMIsHyperDebugStateActive(pVCpu) && !CPUMIsHyperDebugStateActivePending(pVCpu));
     
    68136813    /* On our way back from ring-3 reload the guest state if there is a possibility of it being changed. */
    68146814    if (rcExit != VINF_EM_RAW_INTERRUPT)
    6815         VMCPU_HMCF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
     6815        HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
    68166816
    68176817    STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchExitToR3);
     
    73987398                /* If any other guest-state bits are changed here, make sure to update
    73997399                   hmR0VmxPreRunGuestCommitted() when thread-context hooks are used. */
    7400                 VMCPU_HMCF_SET(pVCpu,   HM_CHANGED_GUEST_SEGMENT_REGS
    7401                                       | HM_CHANGED_GUEST_RIP
    7402                                       | HM_CHANGED_GUEST_RFLAGS
    7403                                       | HM_CHANGED_GUEST_RSP);
     7400                HMCPU_CF_SET(pVCpu,   HM_CHANGED_GUEST_SEGMENT_REGS
     7401                                    | HM_CHANGED_GUEST_RIP
     7402                                    | HM_CHANGED_GUEST_RFLAGS
     7403                                    | HM_CHANGED_GUEST_RSP);
    74047404
    74057405                /* We're clearing interrupts, which means no block-by-STI interrupt-inhibition. */
     
    75167516
    75177517    LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
    7518     Assert(VMCPU_HMCF_IS_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE));
     7518    Assert(HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE));
    75197519
    75207520#ifdef VBOX_STRICT
     
    76067606            int rc = HMR0EnterCpu(pVCpu);
    76077607            AssertRC(rc);
    7608             Assert(VMCPU_HMCF_IS_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE));
     7608            Assert(HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE));
    76097609
    76107610            /* Load the active VMCS as the current one. */
     
    76457645    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    76467646
    7647     if (!VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_HOST_CONTEXT))
     7647    if (!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_CONTEXT))
    76487648        return VINF_SUCCESS;
    76497649
     
    76577657    AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostMsrs failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
    76587658
    7659     VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_HOST_CONTEXT);
     7659    HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_HOST_CONTEXT);
    76607660    return rc;
    76617661}
     
    77737773
    77747774    /* Clear any unused and reserved bits. */
    7775     VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR2);
     7775    HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR2);
    77767776
    77777777#ifdef LOG_ENABLED
     
    78027802    Assert(!VMMRZCallRing3IsEnabled(pVCpu));
    78037803
    7804     if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0))
     7804    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0))
    78057805    {
    78067806        int rc = hmR0VmxLoadSharedCR0(pVCpu, pCtx);
     
    78087808    }
    78097809
    7810     if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_DEBUG))
     7810    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_DEBUG))
    78117811    {
    78127812        int rc = hmR0VmxLoadSharedDebugState(pVCpu, pCtx);
     
    78147814
    78157815        /* Loading shared debug bits might have changed eflags.TF bit for debugging purposes. */
    7816         if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RFLAGS))
     7816        if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RFLAGS))
    78177817        {
    78187818            rc = hmR0VmxLoadGuestRflags(pVCpu, pCtx);
     
    78217821    }
    78227822
    7823     if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_LAZY_MSRS))
     7823    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_LAZY_MSRS))
    78247824    {
    78257825#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     
    78307830        }
    78317831#endif
    7832         VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_LAZY_MSRS);
    7833     }
    7834 
    7835     AssertMsg(!VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_HOST_GUEST_SHARED_STATE),
    7836               ("fContextUseFlags=%#RX32\n", VMCPU_HMCF_VALUE(pVCpu)));
     7832        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_LAZY_MSRS);
     7833    }
     7834
     7835    AssertMsg(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_GUEST_SHARED_STATE),
     7836              ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));
    78377837}
    78387838
     
    78517851    HMVMX_ASSERT_PREEMPT_SAFE();
    78527852
    7853     Log5(("LoadFlags=%#RX32\n", VMCPU_HMCF_VALUE(pVCpu)));
     7853    Log5(("LoadFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));
    78547854#ifdef HMVMX_SYNC_FULL_GUEST_STATE
    7855     VMCPU_HMCF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
    7856 #endif
    7857 
    7858     if (VMCPU_HMCF_IS_SET_ONLY(pVCpu, HM_CHANGED_GUEST_RIP))
     7855    HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
     7856#endif
     7857
     7858    if (HMCPU_CF_IS_SET_ONLY(pVCpu, HM_CHANGED_GUEST_RIP))
    78597859    {
    78607860        int rc = hmR0VmxLoadGuestRip(pVCpu, pMixedCtx);
     
    78627862        STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadMinimal);
    78637863    }
    7864     else if (VMCPU_HMCF_VALUE(pVCpu))
     7864    else if (HMCPU_CF_VALUE(pVCpu))
    78657865    {
    78667866        int rc = hmR0VmxLoadGuestState(pVM, pVCpu, pMixedCtx);
     
    78707870
    78717871    /* All the guest state bits should be loaded except maybe the host context and/or the shared host/guest bits. */
    7872     AssertMsg(   !VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_ALL_GUEST)
    7873               ||  VMCPU_HMCF_IS_PENDING_ONLY(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE),
    7874               ("fContextUseFlags=%#RX32\n", VMCPU_HMCF_VALUE(pVCpu)));
     7872    AssertMsg(   !HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_ALL_GUEST)
     7873              ||  HMCPU_CF_IS_PENDING_ONLY(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE),
     7874              ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));
    78757875
    78767876#ifdef HMVMX_ALWAYS_CHECK_GUEST_STATE
     
    80498049    if (!CPUMIsGuestFPUStateActive(pVCpu))
    80508050        CPUMR0LoadGuestFPU(pVM, pVCpu, pMixedCtx);
    8051     VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
     8051    HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
    80528052#endif
    80538053
     
    80578057        CPUMR0LoadGuestFPU(pVM, pVCpu, pMixedCtx);
    80588058        Assert(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_CR0);
    8059         VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
     8059        HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
    80608060    }
    80618061
     
    80678067        && pVCpu->hm.s.vmx.cMsrs > 0)
    80688068    {
    8069         VMCPU_HMCF_SET(pVCpu, HM_CHANGED_HOST_CONTEXT);
     8069        HMCPU_CF_SET(pVCpu, HM_CHANGED_HOST_CONTEXT);
    80708070    }
    80718071
     
    80748074     * thread-context hooks are used or when hmR0VmxSetupVMRunHandler() changes pfnStartVM).
    80758075     */
    8076     if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_HOST_CONTEXT))
     8076    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_CONTEXT))
    80778077    {
    80788078        /* This ASSUMES that pfnStartVM has been set up already. */
     
    80818081        STAM_COUNTER_INC(&pVCpu->hm.s.StatPreemptSaveHostState);
    80828082    }
    8083     Assert(!VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_HOST_CONTEXT));
     8083    Assert(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_CONTEXT));
    80848084
    80858085    /*
    80868086     * Load the state shared between host and guest (FPU, debug, lazy MSRs).
    80878087     */
    8088     if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_HOST_GUEST_SHARED_STATE))
     8088    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_GUEST_SHARED_STATE))
    80898089        hmR0VmxLoadSharedState(pVM, pVCpu, pMixedCtx);
    8090     AssertMsg(!VMCPU_HMCF_VALUE(pVCpu), ("fContextUseFlags=%#RX32\n", VMCPU_HMCF_VALUE(pVCpu)));
     8090    AssertMsg(!HMCPU_CF_VALUE(pVCpu), ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));
    80918091
    80928092    /* Store status of the shared guest-host state at the time of VM-entry. */
     
    82008200        hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
    82018201        CPUMR0SaveGuestFPU(pVM, pVCpu, pMixedCtx);
    8202         VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
     8202        HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
    82038203    }
    82048204#endif
     
    82448244            rc = PDMApicSetTPR(pVCpu, pVCpu->hm.s.vmx.pbVirtApic[0x80]);
    82458245            AssertRC(rc);
    8246             VMCPU_HMCF_SET(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE);
     8246            HMCPU_CF_SET(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE);
    82478247        }
    82488248    }
     
    84048404            break;
    84058405        }
    8406         VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);
     8406        HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);
    84078407    }
    84088408
     
    84698469#ifdef DEBUG_ramshankar
    84708470# define SVVMCS()       do { int rc2 = hmR0VmxSaveGuestState(pVCpu, pMixedCtx); AssertRC(rc2); } while (0)
    8471 # define LDVMCS()       do { VMCPU_HMCF_SET(pVCpu, HM_CHANGED_ALL_GUEST); } while (0)
     8471# define LDVMCS()       do { HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST); } while (0)
    84728472#endif
    84738473    int rc;
     
    86028602
    86038603    pMixedCtx->rip += pVmxTransient->cbInstr;
    8604     VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_RIP);
     8604    HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP);
    86058605    return rc;
    86068606}
     
    97149714
    97159715    pMixedCtx->rip++;
    9716     VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_RIP);
     9716    HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP);
    97179717    if (EMShouldContinueAfterHalt(pVCpu, pMixedCtx))    /* Requires eflags. */
    97189718        rc = VINF_SUCCESS;
     
    99929992             * virtualization is implemented we'll have to make sure APIC state is saved from the VMCS before
    99939993               EMInterpretWrmsr() changes it. */
    9994             VMCPU_HMCF_SET(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE);
     9994            HMCPU_CF_SET(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE);
    99959995        }
    99969996        else if (pMixedCtx->ecx == MSR_IA32_TSC)        /* Windows 7 does this during bootup. See @bugref{6398}. */
     
    1000210002            switch (pMixedCtx->ecx)
    1000310003            {
    10004                 case MSR_IA32_SYSENTER_CS:  VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR);  break;
    10005                 case MSR_IA32_SYSENTER_EIP: VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break;
    10006                 case MSR_IA32_SYSENTER_ESP: VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break;
     10004                case MSR_IA32_SYSENTER_CS:  HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR);  break;
     10005                case MSR_IA32_SYSENTER_EIP: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break;
     10006                case MSR_IA32_SYSENTER_ESP: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break;
    1000710007                case MSR_K8_FS_BASE:        /* no break */
    10008                 case MSR_K8_GS_BASE:        VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS);     break;
     10008                case MSR_K8_GS_BASE:        HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS);     break;
    1000910009                default:
    1001010010                {
    1001110011                    if (hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, pMixedCtx->ecx))
    10012                         VMCPU_HMCF_SET(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
     10012                        HMCPU_CF_SET(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
    1001310013#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
    1001410014                    else if (   HMVMX_IS_64BIT_HOST_MODE()
    1001510015                             && hmR0VmxIsLazyGuestMsr(pVCpu, pMixedCtx->ecx))
    1001610016                    {
    10017                         VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_LAZY_MSRS);
     10017                        HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_LAZY_MSRS);
    1001810018                    }
    1001910019#endif
     
    1009610096     * resume guest execution.
    1009710097     */
    10098     VMCPU_HMCF_SET(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE);
     10098    HMCPU_CF_SET(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE);
    1009910099    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTprBelowThreshold);
    1010010100    return VINF_SUCCESS;
     
    1014410144            {
    1014510145                case 0: /* CR0 */
    10146                     VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
     10146                    HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
    1014710147                    Log4(("CRX CR0 write rc=%d CR0=%#RX64\n", rc, pMixedCtx->cr0));
    1014810148                    break;
     
    1015210152                case 3: /* CR3 */
    1015310153                    Assert(!pVM->hm.s.fNestedPaging || !CPUMIsGuestPagingEnabledEx(pMixedCtx));
    10154                     VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_CR3);
     10154                    HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR3);
    1015510155                    Log4(("CRX CR3 write rc=%d CR3=%#RX64\n", rc, pMixedCtx->cr3));
    1015610156                    break;
    1015710157                case 4: /* CR4 */
    10158                     VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_CR4);
     10158                    HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR4);
    1015910159                    Log4(("CRX CR4 write rc=%d CR4=%#RX64\n", rc, pMixedCtx->cr4));
    1016010160                    break;
     
    1016210162                    Assert(!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW));
    1016310163                    /* CR8 contains the APIC TPR. Was updated by EMInterpretCRxWrite(). */
    10164                     VMCPU_HMCF_SET(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE);
     10164                    HMCPU_CF_SET(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE);
    1016510165                    break;
    1016610166                default:
     
    1020110201            rc = EMInterpretCLTS(pVM, pVCpu);
    1020210202            AssertRCReturn(rc, rc);
    10203             VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
     10203            HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
    1020410204            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitClts);
    1020510205            Log4(("CRX CLTS write rc=%d\n", rc));
     
    1021310213            rc = EMInterpretLMSW(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), VMX_EXIT_QUALIFICATION_CRX_LMSW_DATA(uExitQualification));
    1021410214            if (RT_LIKELY(rc == VINF_SUCCESS))
    10215                 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
     10215                HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
    1021610216            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitLmsw);
    1021710217            Log4(("CRX LMSW write rc=%d\n", rc));
     
    1032010320        }
    1032110321        /** @todo IEM needs to be setting these flags somehow. */
    10322         VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_RIP);
     10322        HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP);
    1032310323        fUpdateRipAlready = true;
    1032410324#else
     
    1038210382        {
    1038310383            pMixedCtx->rip += cbInstr;
    10384             VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_RIP);
     10384            HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP);
    1038510385        }
    1038610386
     
    1039010390         */
    1039110391        if (fIOString)
    10392             VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_RFLAGS);
     10392            HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RFLAGS);
    1039310393
    1039410394        /*
     
    1042310423                    ASMSetDR6(pMixedCtx->dr[6]);
    1042410424                if (pMixedCtx->dr[7] != uDr7)
    10425                     VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);
     10425                    HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);
    1042610426
    1042710427                hmR0VmxSetPendingXcptDB(pVCpu, pMixedCtx);
     
    1058710587                || rc == VERR_PAGE_NOT_PRESENT)
    1058810588            {
    10589                 VMCPU_HMCF_SET(pVCpu,   HM_CHANGED_GUEST_RIP
    10590                                       | HM_CHANGED_GUEST_RSP
    10591                                       | HM_CHANGED_GUEST_RFLAGS
    10592                                       | HM_CHANGED_VMX_GUEST_APIC_STATE);
     10589                HMCPU_CF_SET(pVCpu,   HM_CHANGED_GUEST_RIP
     10590                                    | HM_CHANGED_GUEST_RSP
     10591                                    | HM_CHANGED_GUEST_RFLAGS
     10592                                    | HM_CHANGED_VMX_GUEST_APIC_STATE);
    1059310593                rc = VINF_SUCCESS;
    1059410594            }
     
    1068110681                                 VMX_EXIT_QUALIFICATION_DRX_GENREG(pVmxTransient->uExitQualification));
    1068210682        if (RT_SUCCESS(rc))
    10683             VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);
     10683            HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);
    1068410684        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
    1068510685    }
     
    1074710747    {
    1074810748        /* Successfully handled MMIO operation. */
    10749         VMCPU_HMCF_SET(pVCpu,  HM_CHANGED_GUEST_RIP
     10749        HMCPU_CF_SET(pVCpu,  HM_CHANGED_GUEST_RIP
    1075010750                             | HM_CHANGED_GUEST_RSP
    1075110751                             | HM_CHANGED_GUEST_RFLAGS
     
    1081410814        /* Successfully synced our nested page tables. */
    1081510815        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitReasonNpf);
    10816         VMCPU_HMCF_SET(pVCpu,   HM_CHANGED_GUEST_RIP
    10817                               | HM_CHANGED_GUEST_RSP
    10818                               | HM_CHANGED_GUEST_RFLAGS);
     10816        HMCPU_CF_SET(pVCpu,   HM_CHANGED_GUEST_RIP
     10817                            | HM_CHANGED_GUEST_RSP
     10818                            | HM_CHANGED_GUEST_RFLAGS);
    1081910819        return VINF_SUCCESS;
    1082010820    }
     
    1098810988    {
    1098910989        rc = VINF_EM_RAW_GUEST_TRAP;
    10990         Assert(CPUMIsGuestFPUStateActive(pVCpu) || VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0));
     10990        Assert(CPUMIsGuestFPUStateActive(pVCpu) || HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0));
    1099110991    }
    1099210992    else
     
    1100511005    {
    1100611006        /* Guest FPU state was activated, we'll want to change CR0 FPU intercepts before the next VM-reentry. */
    11007         VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
     11007        HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
    1100811008        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowNM);
    1100911009        pVCpu->hm.s.fUseGuestFpu = true;
     
    1107911079                pMixedCtx->eflags.Bits.u1IF = 0;
    1108011080                pMixedCtx->rip += pDis->cbInstr;
    11081                 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
     11081                HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
    1108211082                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCli);
    1108311083                break;
     
    1109011090                EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip);
    1109111091                Assert(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
    11092                 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
     11092                HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
    1109311093                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitSti);
    1109411094                break;
     
    1109911099                rc = VINF_EM_HALT;
    1110011100                pMixedCtx->rip += pDis->cbInstr;
    11101                 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_RIP);
     11101                HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP);
    1110211102                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt);
    1110311103                break;
     
    1114411144                pMixedCtx->rip              += pDis->cbInstr;
    1114511145
    11146                 VMCPU_HMCF_SET(pVCpu,   HM_CHANGED_GUEST_RIP
     11146                HMCPU_CF_SET(pVCpu,   HM_CHANGED_GUEST_RIP
    1114711147                                      | HM_CHANGED_GUEST_RSP
    1114811148                                      | HM_CHANGED_GUEST_RFLAGS);
     
    1119011190                pMixedCtx->esp               &= uMask;
    1119111191                pMixedCtx->rip               += pDis->cbInstr;
    11192                 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP);
     11192                HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP);
    1119311193                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPushf);
    1119411194                break;
     
    1122411224                                                | (aIretFrame[2] & X86_EFL_POPF_BITS & uMask);
    1122511225                pMixedCtx->sp                += sizeof(aIretFrame);
    11226                 VMCPU_HMCF_SET(pVCpu,   HM_CHANGED_GUEST_RIP
    11227                                       | HM_CHANGED_GUEST_SEGMENT_REGS
    11228                                       | HM_CHANGED_GUEST_RSP
    11229                                       | HM_CHANGED_GUEST_RFLAGS);
     11226                HMCPU_CF_SET(pVCpu,   HM_CHANGED_GUEST_RIP
     11227                                    | HM_CHANGED_GUEST_SEGMENT_REGS
     11228                                    | HM_CHANGED_GUEST_RSP
     11229                                    | HM_CHANGED_GUEST_RFLAGS);
    1123011230                Log4(("IRET %#RX32 to %04x:%x\n", GCPtrStack, pMixedCtx->cs.Sel, pMixedCtx->ip));
    1123111231                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIret);
     
    1125611256                                                                    EMCODETYPE_SUPERVISOR);
    1125711257                rc = VBOXSTRICTRC_VAL(rc2);
    11258                 VMCPU_HMCF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
     11258                HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
    1125911259                Log4(("#GP rc=%Rrc\n", rc));
    1126011260                break;
     
    1134911349        /** @todo this isn't quite right, what if guest does lgdt with some MMIO
    1135011350         *        memory? We don't update the whole state here... */
    11351         VMCPU_HMCF_SET(pVCpu,   HM_CHANGED_GUEST_RIP
    11352                               | HM_CHANGED_GUEST_RSP
    11353                               | HM_CHANGED_GUEST_RFLAGS
    11354                               | HM_CHANGED_VMX_GUEST_APIC_STATE);
     11351        HMCPU_CF_SET(pVCpu,   HM_CHANGED_GUEST_RIP
     11352                            | HM_CHANGED_GUEST_RSP
     11353                            | HM_CHANGED_GUEST_RFLAGS
     11354                            | HM_CHANGED_VMX_GUEST_APIC_STATE);
    1135511355        TRPMResetTrap(pVCpu);
    1135611356        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF);
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette