VirtualBox

Changeset 48624 in vbox


Ignore:
Timestamp:
Sep 23, 2013 7:50:17 AM (11 years ago)
Author:
vboxsync
Message:

VMM/HMVMXR0, HMSVMR0: Use HMCF macros.

Location:
trunk/src/VBox/VMM/VMMR0
Files:
2 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r48621 r48624  
    11031103     */
    11041104    PVM pVM = pVCpu->CTX_SUFF(pVM);
    1105     if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR0)
     1105    if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0))
    11061106    {
    11071107        uint64_t u64GuestCR0 = pCtx->cr0;
     
    11561156        pVmcb->guest.u64CR0 = u64GuestCR0;
    11571157        pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
    1158         pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR0;
     1158        VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR0);
    11591159    }
    11601160}
     
    11781178     * Guest CR2.
    11791179     */
    1180     if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR2)
     1180    if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR2))
    11811181    {
    11821182        pVmcb->guest.u64CR2 = pCtx->cr2;
    11831183        pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CR2;
    1184         pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR2;
     1184        VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR2);
    11851185    }
    11861186
     
    11881188     * Guest CR3.
    11891189     */
    1190     if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR3)
     1190    if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR3))
    11911191    {
    11921192        if (pVM->hm.s.fNestedPaging)
     
    12091209
    12101210        pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
    1211         pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR3;
     1211        VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR3);
    12121212    }
    12131213
     
    12151215     * Guest CR4.
    12161216     */
    1217     if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR4)
     1217    if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR4))
    12181218    {
    12191219        uint64_t u64GuestCR4 = pCtx->cr4;
     
    12541254        pVmcb->guest.u64CR4 = u64GuestCR4;
    12551255        pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
    1256         pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR4;
     1256        VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR4);
    12571257    }
    12581258
     
    12741274{
    12751275    /* Guest Segment registers: CS, SS, DS, ES, FS, GS. */
    1276     if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SEGMENT_REGS)
     1276    if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS))
    12771277    {
    12781278        HMSVM_LOAD_SEG_REG(CS, cs);
     
    12841284
    12851285        pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_SEG;
    1286         pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SEGMENT_REGS;
     1286        VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS);
    12871287    }
    12881288
    12891289    /* Guest TR. */
    1290     if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_TR)
     1290    if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_TR))
    12911291    {
    12921292        HMSVM_LOAD_SEG_REG(TR, tr);
    1293         pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_TR;
     1293        VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_TR);
    12941294    }
    12951295
    12961296    /* Guest LDTR. */
    1297     if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_LDTR)
     1297    if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_LDTR))
    12981298    {
    12991299        HMSVM_LOAD_SEG_REG(LDTR, ldtr);
    1300         pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_LDTR;
     1300        VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_LDTR);
    13011301    }
    13021302
    13031303    /* Guest GDTR. */
    1304     if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_GDTR)
     1304    if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_GDTR))
    13051305    {
    13061306        pVmcb->guest.GDTR.u32Limit = pCtx->gdtr.cbGdt;
    13071307        pVmcb->guest.GDTR.u64Base  = pCtx->gdtr.pGdt;
    13081308        pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DT;
    1309         pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_GDTR;
     1309        VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_GDTR);
    13101310    }
    13111311
    13121312    /* Guest IDTR. */
    1313     if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_IDTR)
     1313    if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_IDTR))
    13141314    {
    13151315        pVmcb->guest.IDTR.u32Limit = pCtx->idtr.cbIdt;
    13161316        pVmcb->guest.IDTR.u64Base  = pCtx->idtr.pIdt;
    13171317        pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DT;
    1318         pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_IDTR;
     1318        VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_IDTR);
    13191319    }
    13201320}
     
    13421342     * See AMD spec. 15.5.1 "Basic Operation" | "Canonicalization and Consistency Checks".
    13431343     */
    1344     if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_SVM_GUEST_EFER_MSR)
     1344    if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_SVM_GUEST_EFER_MSR))
    13451345    {
    13461346        pVmcb->guest.u64EFER = pCtx->msrEFER | MSR_K6_EFER_SVME;
    13471347        pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
    1348         pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_SVM_GUEST_EFER_MSR;
     1348        VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_SVM_GUEST_EFER_MSR);
    13491349    }
    13501350
     
    13891389static void hmR0SvmLoadSharedDebugState(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)
    13901390{
    1391     if (!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_DEBUG))
     1391    if (!VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_DEBUG))
    13921392        return;
    13931393    Assert((pCtx->dr[6] & X86_DR6_RA1_MASK) == X86_DR6_RA1_MASK); Assert((pCtx->dr[6] & X86_DR6_RAZ_MASK) == 0);
     
    15391539    }
    15401540
    1541     pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_DEBUG;
     1541    VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_DEBUG);
    15421542}
    15431543
     
    15531553static int hmR0SvmLoadGuestApicState(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)
    15541554{
    1555     if (!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_SVM_GUEST_APIC_STATE))
     1555    if (!VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE))
    15561556        return VINF_SUCCESS;
    15571557
     
    15971597    }
    15981598
    1599     pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_SVM_GUEST_APIC_STATE;
     1599    VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE);
    16001600    return rc;
    16011601}
     
    16531653
    16541654    LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
    1655     Assert(pVCpu->hm.s.fContextUseFlags & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE));
     1655    Assert(VMCPU_HMCF_IS_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE));
    16561656
    16571657    pVCpu->hm.s.fLeaveDone = false;
     
    17141714            int rc = HMR0EnterCpu(pVCpu);
    17151715            AssertRC(rc); NOREF(rc);
    1716             Assert(pVCpu->hm.s.fContextUseFlags & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE));
     1716            Assert(VMCPU_HMCF_IS_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE));
    17171717
    17181718            pVCpu->hm.s.fLeaveDone = false;
     
    17431743    NOREF(pVCpu);
    17441744    /* Nothing to do here. AMD-V does this for us automatically during the world-switch. */
    1745     pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_HOST_CONTEXT;
     1745    VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_HOST_CONTEXT);
    17461746    return VINF_SUCCESS;
    17471747}
     
    17521752 * fields on every successful VM-entry.
    17531753 *
    1754  * Sets up the appropriate VMRUN function to execute guest code based
    1755  * on the guest CPU mode.
     1754 * Also sets up the appropriate VMRUN function to execute guest code based on
     1755 * the guest CPU mode.
    17561756 *
    17571757 * @returns VBox status code.
     
    17881788
    17891789    /* Clear any unused and reserved bits. */
    1790     pVCpu->hm.s.fContextUseFlags &= ~(  HM_CHANGED_GUEST_RIP                /* Unused (loaded unconditionally). */
    1791                                       | HM_CHANGED_GUEST_RSP
    1792                                       | HM_CHANGED_GUEST_RFLAGS
    1793                                       | HM_CHANGED_GUEST_SYSENTER_CS_MSR
    1794                                       | HM_CHANGED_GUEST_SYSENTER_EIP_MSR
    1795                                       | HM_CHANGED_GUEST_SYSENTER_ESP_MSR
    1796                                       | HM_CHANGED_SVM_RESERVED1            /* Reserved. */
    1797                                       | HM_CHANGED_SVM_RESERVED2
    1798                                       | HM_CHANGED_SVM_RESERVED3);
    1799 
    1800     /* All the guest state bits should be loaded except maybe the host context and shared host/guest bits. */
    1801     AssertMsg(   !(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_ALL_GUEST)
    1802               || !(pVCpu->hm.s.fContextUseFlags & ~(HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE)),
    1803                ("Missed updating flags while loading guest state. pVM=%p pVCpu=%p fContextUseFlags=%#RX32\n",
    1804                 pVM, pVCpu, pVCpu->hm.s.fContextUseFlags));
     1790    VMCPU_HMCF_CLEAR(pVCpu,   HM_CHANGED_GUEST_RIP                /* Unused (loaded unconditionally). */
     1791                            | HM_CHANGED_GUEST_RSP
     1792                            | HM_CHANGED_GUEST_RFLAGS
     1793                            | HM_CHANGED_GUEST_SYSENTER_CS_MSR
     1794                            | HM_CHANGED_GUEST_SYSENTER_EIP_MSR
     1795                            | HM_CHANGED_GUEST_SYSENTER_ESP_MSR
     1796                            | HM_CHANGED_SVM_RESERVED1            /* Reserved. */
     1797                            | HM_CHANGED_SVM_RESERVED2
     1798                            | HM_CHANGED_SVM_RESERVED3);
     1799
     1800    /* All the guest state bits should be loaded except maybe the host context and/or shared host/guest bits. */
     1801    AssertMsg(   !VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_ALL_GUEST)
     1802              ||  VMCPU_HMCF_IS_PENDING_ONLY(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE),
     1803               ("fContextUseFlags=%#RX32\n", VMCPU_HMCF_VALUE(pVCpu)));
    18051804
    18061805    Log4(("Load: CS:RIP=%04x:%RX64 EFL=%#x SS:RSP=%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip, pCtx->eflags.u, pCtx->ss, pCtx->rsp));
    1807 
    18081806    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestState, x);
    18091807    return rc;
     
    18261824    Assert(!VMMRZCallRing3IsEnabled(pVCpu));
    18271825
    1828     if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR0)
     1826    if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0))
    18291827        hmR0SvmLoadSharedCR0(pVCpu, pVmcb, pCtx);
    18301828
    1831     if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_DEBUG)
     1829    if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_DEBUG))
    18321830        hmR0SvmLoadSharedDebugState(pVCpu, pVmcb, pCtx);
    18331831
    1834     AssertMsg(!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_GUEST_SHARED_STATE), ("fContextUseFlags=%#x\n",
    1835                                                                                      pVCpu->hm.s.fContextUseFlags));
     1832    AssertMsg(!VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_HOST_GUEST_SHARED_STATE),
     1833              ("fContextUseFlags=%#RX32\n", VMCPU_HMCF_VALUE(pVCpu)));
    18361834}
    18371835
     
    19951993        CPUMR0SaveGuestFPU(pVM, pVCpu, pCtx);
    19961994        Assert(!CPUMIsGuestFPUStateActive(pVCpu));
    1997         pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
     1995        VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
    19981996    }
    19991997
     
    20102008#endif
    20112009    if (CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, false /* save DR6 */))
    2012         pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
     2010        VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);
    20132011
    20142012    Assert(!CPUMIsHyperDebugStateActive(pVCpu));
     
    21672165    /* On our way back from ring-3 reload the guest state if there is a possibility of it being changed. */
    21682166    if (rcExit != VINF_EM_RAW_INTERRUPT)
    2169         pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL_GUEST;
     2167        VMCPU_HMCF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
    21702168
    21712169    STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchExitToR3);
     
    28362834
    28372835#ifdef HMSVM_SYNC_FULL_GUEST_STATE
    2838     pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL_GUEST;
     2836    VMCPU_HMCF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
    28392837#endif
    28402838
     
    29202918    /* Load the state shared between host and guest (FPU, debug). */
    29212919    PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
    2922     if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_GUEST_SHARED_STATE)
     2920    if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_HOST_GUEST_SHARED_STATE))
    29232921        hmR0SvmLoadSharedState(pVCpu, pVmcb, pCtx);
    2924     pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_HOST_CONTEXT;       /* Preemption might set this, nothing to do on AMD-V. */
    2925     AssertMsg(!pVCpu->hm.s.fContextUseFlags, ("fContextUseFlags=%#x\n", pVCpu->hm.s.fContextUseFlags));
     2922    VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_HOST_CONTEXT);           /* Preemption might set this, nothing to do on AMD-V. */
     2923    AssertMsg(!VMCPU_HMCF_VALUE(pVCpu), ("fContextUseFlags=%#RX32\n", VMCPU_HMCF_VALUE(pVCpu)));
    29262924
    29272925    /* If VMCB Clean Bits isn't supported by the CPU, simply mark all state-bits as dirty, indicating (re)load-from-VMCB. */
     
    30753073                int rc = PDMApicSetTPR(pVCpu, pMixedCtx->msrLSTAR & 0xff);
    30763074                AssertRC(rc);
    3077                 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_SVM_GUEST_APIC_STATE;
     3075                VMCPU_HMCF_SET(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE);
    30783076            }
    30793077            else if (pSvmTransient->u8GuestTpr != pVmcb->ctrl.IntCtrl.n.u8VTPR)
     
    30813079                int rc = PDMApicSetTPR(pVCpu, pVmcb->ctrl.IntCtrl.n.u8VTPR << 4);
    30823080                AssertRC(rc);
    3083                 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_SVM_GUEST_APIC_STATE;
     3081                VMCPU_HMCF_SET(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE);
    30843082            }
    30853083        }
     
    35533551    {
    35543552        pCtx->cr2 = uFaultAddress;
    3555         pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR2;
     3553        VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_CR2);
    35563554    }
    35573555
     
    36643662                int rc2 = PDMApicSetTPR(pVCpu, u8Tpr);
    36653663                AssertRC(rc2);
    3666                 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_SVM_GUEST_APIC_STATE;
     3664                VMCPU_HMCF_SET(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE);
    36673665
    36683666                pCtx->rip += pPatch->cbOp;
     
    41444142        {
    41454143            case 0:     /* CR0. */
    4146                 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
     4144                VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
    41474145                break;
    41484146
    41494147            case 3:     /* CR3. */
    41504148                Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging);
    4151                 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR3;
     4149                VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_CR3);
    41524150                break;
    41534151
    41544152            case 4:     /* CR4. */
    4155                 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR4;
     4153                VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_CR4);
    41564154                break;
    41574155
    41584156            case 8:     /* CR8 (TPR). */
    4159                 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_SVM_GUEST_APIC_STATE;
     4157                VMCPU_HMCF_SET(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE);
    41604158                break;
    41614159
     
    42084206                int rc2 = PDMApicSetTPR(pVCpu, pCtx->eax & 0xff);
    42094207                AssertRC(rc2);
    4210                 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_SVM_GUEST_APIC_STATE;
     4208                VMCPU_HMCF_SET(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE);
    42114209            }
    42124210            hmR0SvmUpdateRip(pVCpu, pCtx, 2);
     
    42434241             * virtualization is implemented we'll have to make sure APIC state is saved from the VMCB before
    42444242               EMInterpretWrmsr() changes it. */
    4245             pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_SVM_GUEST_APIC_STATE;
     4243            VMCPU_HMCF_SET(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE);
    42464244        }
    42474245        else if (pCtx->ecx == MSR_K6_EFER)
    4248             pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_SVM_GUEST_EFER_MSR;
     4246            VMCPU_HMCF_SET(pVCpu, HM_CHANGED_SVM_GUEST_EFER_MSR);
    42494247        else if (pCtx->ecx == MSR_IA32_TSC)
    42504248            pSvmTransient->fUpdateTscOffsetting = true;
     
    43364334        /* Not necessary for read accesses but whatever doesn't hurt for now, will be fixed with decode assist. */
    43374335        /** @todo CPUM should set this flag! */
    4338         pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
     4336        VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);
    43394337        HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
    43404338    }
     
    45904588        {
    45914589            /* Successfully handled MMIO operation. */
    4592             pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_SVM_GUEST_APIC_STATE;
     4590            VMCPU_HMCF_SET(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE);
    45934591            rc = VINF_SUCCESS;
    45944592        }
     
    47744772        TRPMResetTrap(pVCpu);
    47754773        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF);
    4776         pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_SVM_GUEST_APIC_STATE;
     4774        VMCPU_HMCF_SET(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE);
    47774775        return rc;
    47784776    }
     
    48254823    {
    48264824        rc = VINF_EM_RAW_GUEST_TRAP;
    4827         Assert(CPUMIsGuestFPUStateActive(pVCpu) || (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR0));
     4825        Assert(CPUMIsGuestFPUStateActive(pVCpu) || VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0));
    48284826    }
    48294827    else
     
    48424840    if (rc == VINF_SUCCESS)
    48434841    {
    4844         pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
     4842        VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
    48454843        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowNM);
    48464844    }
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r48621 r48624  
    12291229    LogFlowFunc(("pVM=%p pVCpu=%p GCVirt=%RGv\n", pVM, pVCpu, GCVirt));
    12301230
    1231     bool fFlushPending = VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
     1231    bool fFlushPending = VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_FLUSH);
    12321232    if (!fFlushPending)
    12331233    {
     
    26302630{
    26312631    int rc = VINF_SUCCESS;
    2632     if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_ENTRY_CTLS)
     2632    if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_VMX_ENTRY_CTLS))
    26332633    {
    26342634        PVM pVM      = pVCpu->CTX_SUFF(pVM);
     
    26682668        /* Update VCPU with the currently set VM-exit controls. */
    26692669        pVCpu->hm.s.vmx.u32EntryCtls = val;
    2670         pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_ENTRY_CTLS;
     2670        VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_VMX_ENTRY_CTLS);
    26712671    }
    26722672    return rc;
     
    26892689{
    26902690    int rc = VINF_SUCCESS;
    2691     if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_EXIT_CTLS)
     2691    if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_VMX_EXIT_CTLS))
    26922692    {
    26932693        PVM pVM      = pVCpu->CTX_SUFF(pVM);
     
    27392739        /* Update VCPU with the currently set VM-exit controls. */
    27402740        pVCpu->hm.s.vmx.u32ExitCtls = val;
    2741         pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_EXIT_CTLS;
     2741        VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_VMX_EXIT_CTLS);
    27422742    }
    27432743    return rc;
     
    27582758{
    27592759    int rc = VINF_SUCCESS;
    2760     if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_GUEST_APIC_STATE)
     2760    if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE))
    27612761    {
    27622762        /* Setup TPR shadowing. Also setup TPR patching for 32-bit guests. */
     
    27952795        }
    27962796
    2797         pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_GUEST_APIC_STATE;
     2797        VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE);
    27982798    }
    27992799    return rc;
     
    28202820     */
    28212821    uint32_t uIntrState = 0;
    2822     if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
     2822    if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
    28232823    {
    28242824        /* If inhibition is active, RIP & RFLAGS should've been accessed (i.e. read previously from the VMCS or from ring-3). */
     
    28742874{
    28752875    int rc = VINF_SUCCESS;
    2876     if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_RIP)
     2876    if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RIP))
    28772877    {
    28782878        rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RIP, pMixedCtx->rip);
    28792879        AssertRCReturn(rc, rc);
    2880         pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_RIP;
    2881         Log4(("Load: VMX_VMCS_GUEST_RIP=%#RX64 fContextUseFlags=%#x\n", pMixedCtx->rip, pVCpu->hm.s.fContextUseFlags));
     2880
     2881        VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_RIP);
     2882        Log4(("Load: VMX_VMCS_GUEST_RIP=%#RX64 fContextUseFlags=%#RX32\n", pMixedCtx->rip, VMCPU_HMCF_VALUE(pVCpu)));
    28822883    }
    28832884    return rc;
     
    28992900{
    29002901    int rc = VINF_SUCCESS;
    2901     if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_RSP)
     2902    if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RSP))
    29022903    {
    29032904        rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RSP, pMixedCtx->rsp);
    29042905        AssertRCReturn(rc, rc);
    2905         pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_RSP;
     2906
     2907        VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_RSP);
    29062908        Log4(("Load: VMX_VMCS_GUEST_RSP=%#RX64\n", pMixedCtx->rsp));
    29072909    }
     
    29242926{
    29252927    int rc = VINF_SUCCESS;
    2926     if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_RFLAGS)
     2928    if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RFLAGS))
    29272929    {
    29282930        /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits of RFLAGS are reserved (MBZ).
     
    29492951        AssertRCReturn(rc, rc);
    29502952
    2951         pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_RFLAGS;
     2953        VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_RFLAGS);
    29522954        Log4(("Load: VMX_VMCS_GUEST_RFLAGS=%#RX32\n", Eflags.u32));
    29532955    }
     
    29993001     */
    30003002    int rc = VINF_SUCCESS;
    3001     if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR0)
     3003    if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0))
    30023004    {
    30033005        Assert(!(pMixedCtx->cr0 >> 32));
     
    31603162        Log4(("Load: VMX_VMCS_CTRL_CR0_MASK=%#RX32\n", u32CR0Mask));
    31613163
    3162         pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR0;
     3164        VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR0);
    31633165    }
    31643166    return rc;
     
    31923194     * Guest CR3.
    31933195     */
    3194     if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR3)
     3196    if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR3))
    31953197    {
    31963198        RTGCPHYS GCPhysGuestCR3 = NIL_RTGCPHYS;
     
    32653267        AssertRCReturn(rc, rc);
    32663268
    3267         pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR3;
     3269        VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR3);
    32683270    }
    32693271
     
    32713273     * Guest CR4.
    32723274     */
    3273     if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR4)
     3275    if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR4))
    32743276    {
    32753277        Assert(!(pMixedCtx->cr4 >> 32));
     
    33623364        AssertRCReturn(rc, rc);
    33633365
    3364         pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR4;
     3366        VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR4);
    33653367    }
    33663368    return rc;
     
    33843386static int hmR0VmxLoadSharedDebugState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    33853387{
    3386     if (!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_DEBUG))
     3388    if (!VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_DEBUG))
    33873389        return VINF_SUCCESS;
    33883390
     
    34153417            pMixedCtx->eflags.u32 |= X86_EFL_TF;
    34163418            pVCpu->hm.s.fClearTrapFlag = true;
    3417             pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RFLAGS;
     3419            VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_RFLAGS);
    34183420            fInterceptDB = true;
    34193421        }
     
    35283530    AssertRCReturn(rc, rc);
    35293531
    3530     pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_DEBUG;
     3532    VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_DEBUG);
    35313533    return VINF_SUCCESS;
    35323534}
     
    37793781     * Guest Segment registers: CS, SS, DS, ES, FS, GS.
    37803782     */
    3781     if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SEGMENT_REGS)
     3783    if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS))
    37823784    {
    37833785        /* Save the segment attributes for real-on-v86 mode hack, so we can restore them on VM-exit. */
     
    38273829        AssertRCReturn(rc, rc);
    38283830
     3831#ifdef VBOX_STRICT
     3832        /* Validate. */
     3833        hmR0VmxValidateSegmentRegs(pVM, pVCpu, pMixedCtx);
     3834#endif
     3835
     3836        VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS);
    38293837        Log4(("Load: CS=%#RX16 Base=%#RX64 Limit=%#RX32 Attr=%#RX32\n", pMixedCtx->cs.Sel, pMixedCtx->cs.u64Base,
    38303838             pMixedCtx->cs.u32Limit, pMixedCtx->cs.Attr.u));
    3831 #ifdef VBOX_STRICT
    3832         hmR0VmxValidateSegmentRegs(pVM, pVCpu, pMixedCtx);
    3833 #endif
    3834         pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SEGMENT_REGS;
    38353839    }
    38363840
     
    38383842     * Guest TR.
    38393843     */
    3840     if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_TR)
     3844    if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_TR))
    38413845    {
    38423846        /*
     
    38973901        rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights);       AssertRCReturn(rc, rc);
    38983902
     3903        VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_TR);
    38993904        Log4(("Load: VMX_VMCS_GUEST_TR_BASE=%#RX64\n", u64Base));
    3900         pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_TR;
    39013905    }
    39023906
     
    39043908     * Guest GDTR.
    39053909     */
    3906     if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_GDTR)
     3910    if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_GDTR))
    39073911    {
    39083912        rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, pMixedCtx->gdtr.cbGdt);        AssertRCReturn(rc, rc);
    39093913        rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE,  pMixedCtx->gdtr.pGdt);         AssertRCReturn(rc, rc);
    39103914
     3915        /* Validate. */
    39113916        Assert(!(pMixedCtx->gdtr.cbGdt & 0xffff0000));          /* Bits 31:16 MBZ. */
     3917
     3918        VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_GDTR);
    39123919        Log4(("Load: VMX_VMCS_GUEST_GDTR_BASE=%#RX64\n", pMixedCtx->gdtr.pGdt));
    3913         pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_GDTR;
    39143920    }
    39153921
     
    39173923     * Guest LDTR.
    39183924     */
    3919     if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_LDTR)
     3925    if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_LDTR))
    39203926    {
    39213927        /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
     
    39463952        }
    39473953
     3954        VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_LDTR);
    39483955        Log4(("Load: VMX_VMCS_GUEST_LDTR_BASE=%#RX64\n",  pMixedCtx->ldtr.u64Base));
    3949         pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_LDTR;
    39503956    }
    39513957
     
    39533959     * Guest IDTR.
    39543960     */
    3955     if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_IDTR)
     3961    if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_IDTR))
    39563962    {
    39573963        rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, pMixedCtx->idtr.cbIdt);         AssertRCReturn(rc, rc);
    39583964        rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE,  pMixedCtx->idtr.pIdt);          AssertRCReturn(rc, rc);
    39593965
     3966        /* Validate. */
    39603967        Assert(!(pMixedCtx->idtr.cbIdt & 0xffff0000));          /* Bits 31:16 MBZ. */
     3968
     3969        VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_IDTR);
    39613970        Log4(("Load: VMX_VMCS_GUEST_IDTR_BASE=%#RX64\n", pMixedCtx->idtr.pIdt));
    3962         pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_IDTR;
    39633971    }
    39643972
     
    39903998     */
    39913999    int rc = VINF_SUCCESS;
    3992     if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_GUEST_AUTO_MSRS)
     4000    if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS))
    39934001    {
    39944002#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
     
    40494057#endif  /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
    40504058
    4051         pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_GUEST_AUTO_MSRS;
     4059        VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
    40524060    }
    40534061
     
    40574065     * VM-exits on WRMSRs for these MSRs.
    40584066     */
    4059     if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SYSENTER_CS_MSR)
     4067    if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR))
    40604068    {
    40614069        rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, pMixedCtx->SysEnter.cs);      AssertRCReturn(rc, rc);
    4062         pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SYSENTER_CS_MSR;
    4063     }
    4064     if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SYSENTER_EIP_MSR)
     4070        VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR);
     4071    }
     4072
     4073    if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR))
    40654074    {
    40664075        rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, pMixedCtx->SysEnter.eip);    AssertRCReturn(rc, rc);
    4067         pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SYSENTER_EIP_MSR;
    4068     }
    4069     if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SYSENTER_ESP_MSR)
     4076        VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR);
     4077    }
     4078
     4079    if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR))
    40704080    {
    40714081        rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, pMixedCtx->SysEnter.esp);    AssertRCReturn(rc, rc);
    4072         pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SYSENTER_ESP_MSR;
     4082        VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR);
    40734083    }
    40744084
     
    40924102    /** @todo See if we can make use of other states, e.g.
    40934103     *        VMX_VMCS_GUEST_ACTIVITY_SHUTDOWN or HLT.  */
    4094     int rc = VINF_SUCCESS;
    4095     if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_GUEST_ACTIVITY_STATE)
    4096     {
    4097         rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_ACTIVITY_STATE, VMX_VMCS_GUEST_ACTIVITY_ACTIVE);
     4104    if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_VMX_GUEST_ACTIVITY_STATE))
     4105    {
     4106        int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_ACTIVITY_STATE, VMX_VMCS_GUEST_ACTIVITY_ACTIVE);
    40984107        AssertRCReturn(rc, rc);
    4099         pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_GUEST_ACTIVITY_STATE;
    4100     }
    4101     return rc;
     4108
     4109        VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_VMX_GUEST_ACTIVITY_STATE);
     4110    }
     4111    return VINF_SUCCESS;
    41024112}
    41034113
     
    41274137        {
    41284138            pVCpu->hm.s.vmx.pfnStartVM = VMXR0SwitcherStartVM64;
    4129             pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_EXIT_CTLS | HM_CHANGED_VMX_ENTRY_CTLS;
     4139            VMCPU_HMCF_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_EXIT_CTLS | HM_CHANGED_VMX_ENTRY_CTLS);
    41304140        }
    41314141#else
     
    41414151        {
    41424152            pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM32;
    4143             pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_EXIT_CTLS | HM_CHANGED_VMX_ENTRY_CTLS;
     4153            VMCPU_HMCF_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_EXIT_CTLS | HM_CHANGED_VMX_ENTRY_CTLS);
    41444154        }
    41454155#else
     
    53365346
    53375347        EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip);
    5338         Assert(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
     5348        Assert(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
    53395349    }
    53405350}
     
    59645974        {
    59655975            int rc2 = PGMSyncCR3(pVCpu, pMixedCtx->cr0, pMixedCtx->cr3, pMixedCtx->cr4,
    5966                                  VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
     5976                                 VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
    59675977            if (rc2 != VINF_SUCCESS)
    59685978            {
     
    61796189    if (CPUMIsGuestFPUStateActive(pVCpu))
    61806190    {
     6191        /* We shouldn't reload CR0 without saving it first. */
    61816192        if (!fSaveGuestState)
    61826193        {
     
    61866197        CPUMR0SaveGuestFPU(pVM, pVCpu, pMixedCtx);
    61876198        Assert(!CPUMIsGuestFPUStateActive(pVCpu));
    6188         pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
     6199        VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
    61896200    }
    61906201
     
    61956206#endif
    61966207    if (CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, true /* save DR6 */))
    6197         pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
     6208        VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);
    61986209    Assert(!CPUMIsGuestDebugStateActive(pVCpu) && !CPUMIsGuestDebugStateActivePending(pVCpu));
    61996210    Assert(!CPUMIsHyperDebugStateActive(pVCpu) && !CPUMIsHyperDebugStateActivePending(pVCpu));
     
    63806391    /* On our way back from ring-3 reload the guest state if there is a possibility of it being changed. */
    63816392    if (rcExit != VINF_EM_RAW_INTERRUPT)
    6382         pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL_GUEST;
     6393        VMCPU_HMCF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
    63836394
    63846395    STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchExitToR3);
     
    65036514    Assert(!TRPMHasTrap(pVCpu));
    65046515
    6505                                                            /** @todo SMI. SMIs take priority over NMIs. */
    6506     if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))    /* NMI. NMIs take priority over regular interrupts . */
     6516                                                               /** @todo SMI. SMIs take priority over NMIs. */
     6517    if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI))    /* NMI. NMIs take priority over regular interrupts . */
    65076518    {
    65086519        /* On some CPUs block-by-STI also blocks NMIs. See Intel spec. 26.3.1.5 "Checks On Guest Non-Register State". */
     
    69486959                /* If any other guest-state bits are changed here, make sure to update
    69496960                   hmR0VmxPreRunGuestCommitted() when thread-context hooks are used. */
    6950                 pVCpu->hm.s.fContextUseFlags |=   HM_CHANGED_GUEST_SEGMENT_REGS
    6951                                                 | HM_CHANGED_GUEST_RIP
    6952                                                 | HM_CHANGED_GUEST_RFLAGS
    6953                                                 | HM_CHANGED_GUEST_RSP;
     6961                VMCPU_HMCF_SET(pVCpu,   HM_CHANGED_GUEST_SEGMENT_REGS
     6962                                      | HM_CHANGED_GUEST_RIP
     6963                                      | HM_CHANGED_GUEST_RFLAGS
     6964                                      | HM_CHANGED_GUEST_RSP);
    69546965
    69556966                /* We're clearing interrupts, which means no block-by-STI interrupt-inhibition. */
     
    70667077
    70677078    LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
    7068     Assert((pVCpu->hm.s.fContextUseFlags & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE))
    7069                                         == (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE));
     7079    Assert(VMCPU_HMCF_IS_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE));
    70707080
    70717081#ifdef VBOX_STRICT
     
    71557165            int rc = HMR0EnterCpu(pVCpu);
    71567166            AssertRC(rc);
    7157             Assert((pVCpu->hm.s.fContextUseFlags & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE))
    7158                                                 == (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE));
     7167            Assert(VMCPU_HMCF_IS_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE));
    71597168
    71607169            /* Load the active VMCS as the current one. */
     
    71957204    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    71967205
    7197     if (!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_CONTEXT))
     7206    if (!VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_HOST_CONTEXT))
    71987207        return VINF_SUCCESS;
    71997208
     
    72077216    AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostMsrs failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
    72087217
    7209     pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_HOST_CONTEXT;
     7218    VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_HOST_CONTEXT);
    72107219    return rc;
    72117220}
     
    73237332
    73247333    /* Clear any unused and reserved bits. */
    7325     pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR2;
     7334    VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR2);
    73267335
    73277336#ifdef LOG_ENABLED
     
    73507359    Assert(!VMMRZCallRing3IsEnabled(pVCpu));
    73517360
    7352     if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR0)
     7361    if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0))
    73537362    {
    73547363        int rc = hmR0VmxLoadSharedCR0(pVCpu, pCtx);
     
    73567365    }
    73577366
    7358     if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_DEBUG)
     7367    if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_DEBUG))
    73597368    {
    73607369        int rc = hmR0VmxLoadSharedDebugState(pVCpu, pCtx);
     
    73627371
    73637372        /* Loading shared debug bits might have changed eflags.TF bit for debugging purposes. */
    7364         if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_RFLAGS)
     7373        if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RFLAGS))
    73657374        {
    73667375            rc = hmR0VmxLoadGuestRflags(pVCpu, pCtx);
     
    73697378    }
    73707379
    7371     AssertMsg(!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_GUEST_SHARED_STATE), ("fContextUseFlags=%#x\n",
    7372                                                                                      pVCpu->hm.s.fContextUseFlags));
     7380    AssertMsg(!VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_HOST_GUEST_SHARED_STATE),
     7381              ("fContextUseFlags=%#RX32\n", VMCPU_HMCF_VALUE(pVCpu)));
    73737382}
    73747383
     
    73877396    HMVMX_ASSERT_PREEMPT_SAFE();
    73887397
    7389     Log5(("LoadFlags=%#RX32\n", pVCpu->hm.s.fContextUseFlags));
     7398    Log5(("LoadFlags=%#RX32\n", VMCPU_HMCF_VALUE(pVCpu)));
    73907399#ifdef HMVMX_SYNC_FULL_GUEST_STATE
    7391     pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL_GUEST;
    7392 #endif
    7393 
    7394     if (pVCpu->hm.s.fContextUseFlags == HM_CHANGED_GUEST_RIP)
     7400    VMCPU_HMCF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
     7401#endif
     7402
     7403    if (VMCPU_HMCF_IS_SET_ONLY(pVCpu, HM_CHANGED_GUEST_RIP))
    73957404    {
    73967405        int rc = hmR0VmxLoadGuestRip(pVCpu, pMixedCtx);
     
    73987407        STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadMinimal);
    73997408    }
    7400     else if (pVCpu->hm.s.fContextUseFlags)
     7409    else if (VMCPU_HMCF_VALUE(pVCpu))
    74017410    {
    74027411        int rc = hmR0VmxLoadGuestState(pVM, pVCpu, pMixedCtx);
     
    74057414    }
    74067415
    7407     /* All the guest state bits should be loaded except maybe the host context and shared host/guest bits. */
    7408     AssertMsg(   !(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_ALL_GUEST)
    7409               || !(pVCpu->hm.s.fContextUseFlags & ~(HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE)),
    7410               ("fContextUseFlags=%#x\n", pVCpu->hm.s.fContextUseFlags));
     7416    /* All the guest state bits should be loaded except maybe the host context and/or the shared host/guest bits. */
     7417    AssertMsg(   !VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_ALL_GUEST)
     7418              ||  VMCPU_HMCF_IS_PENDING_ONLY(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE),
     7419              ("fContextUseFlags=%#RX32\n", VMCPU_HMCF_VALUE(pVCpu)));
    74117420
    74127421#ifdef HMVMX_ALWAYS_CHECK_GUEST_STATE
     
    75887597    if (!CPUMIsGuestFPUStateActive(pVCpu))
    75897598        CPUMR0LoadGuestFPU(pVM, pVCpu, pMixedCtx);
    7590     pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
     7599    VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
    75917600#endif
    75927601
     
    75957604     * thread-context hooks are used or when hmR0VmxSetupVMRunHandler() changes pfnStartVM).
    75967605     */
    7597     if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_CONTEXT)
     7606    if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_HOST_CONTEXT))
    75987607    {
    75997608        /* This ASSUMES that pfnStartVM has been set up already. */
     
    76027611        STAM_COUNTER_INC(&pVCpu->hm.s.StatPreemptSaveHostState);
    76037612    }
    7604     Assert(!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_CONTEXT));
     7613    Assert(!VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_HOST_CONTEXT));
    76057614
    76067615    /*
    76077616     * Load the state shared between host and guest (FPU, debug).
    76087617     */
    7609     if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_GUEST_SHARED_STATE)
     7618    if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_HOST_GUEST_SHARED_STATE))
    76107619        hmR0VmxLoadSharedState(pVM, pVCpu, pMixedCtx);
    7611     AssertMsg(!pVCpu->hm.s.fContextUseFlags, ("fContextUseFlags=%#x\n", pVCpu->hm.s.fContextUseFlags));
     7620    AssertMsg(!VMCPU_HMCF_VALUE(pVCpu), ("fContextUseFlags=%#RX32\n", VMCPU_HMCF_VALUE(pVCpu)));
    76127621
    76137622    /* Store status of the shared guest-host state at the time of VM-entry. */
     
    77187727        hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
    77197728        CPUMR0SaveGuestFPU(pVM, pVCpu, pMixedCtx);
    7720         pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
     7729        VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
    77217730    }
    77227731#endif
     
    77627771            rc = PDMApicSetTPR(pVCpu, pVCpu->hm.s.vmx.pbVirtApic[0x80]);
    77637772            AssertRC(rc);
    7764             pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_APIC_STATE;
     7773            VMCPU_HMCF_SET(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE);
    77657774        }
    77667775    }
     
    79247933            break;
    79257934        }
    7926         pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
     7935        VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);
    79277936    }
    79287937
     
    81028111
    81038112    pMixedCtx->rip += pVmxTransient->cbInstr;
    8104     pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
     8113    VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_RIP);
    81058114    return rc;
    81068115}
     
    92229231
    92239232    pMixedCtx->rip++;
    9224     pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
     9233    VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_RIP);
    92259234    if (EMShouldContinueAfterHalt(pVCpu, pMixedCtx))    /* Requires eflags. */
    92269235        rc = VINF_SUCCESS;
     
    94679476             * virtualization is implemented we'll have to make sure APIC state is saved from the VMCS before
    94689477               EMInterpretWrmsr() changes it. */
    9469             pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_APIC_STATE;
     9478            VMCPU_HMCF_SET(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE);
    94709479        }
    94719480        else if (pMixedCtx->ecx == MSR_K6_EFER)         /* EFER is the only MSR we auto-load but don't allow write-passthrough. */
     
    94739482            rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
    94749483            AssertRCReturn(rc, rc);
    9475             pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_AUTO_MSRS;
     9484            VMCPU_HMCF_SET(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
    94769485        }
    94779486        else if (pMixedCtx->ecx == MSR_IA32_TSC)        /* Windows 7 does this during bootup. See @bugref{6398}. */
     
    94839492            switch (pMixedCtx->ecx)
    94849493            {
    9485                 case MSR_IA32_SYSENTER_CS:  pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SYSENTER_CS_MSR;  break;
    9486                 case MSR_IA32_SYSENTER_EIP: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SYSENTER_EIP_MSR; break;
    9487                 case MSR_IA32_SYSENTER_ESP: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SYSENTER_ESP_MSR; break;
     9494                case MSR_IA32_SYSENTER_CS:  VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR);  break;
     9495                case MSR_IA32_SYSENTER_EIP: VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break;
     9496                case MSR_IA32_SYSENTER_ESP: VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break;
    94889497                case MSR_K8_FS_BASE:        /* no break */
    9489                 case MSR_K8_GS_BASE:        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SEGMENT_REGS;     break;
    9490                 case MSR_K8_KERNEL_GS_BASE: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_AUTO_MSRS;    break;
     9498                case MSR_K8_GS_BASE:        VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS);     break;
     9499                case MSR_K8_KERNEL_GS_BASE: VMCPU_HMCF_SET(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS);    break;
    94919500            }
    94929501        }
     
    95559564     * resume guest execution.
    95569565     */
    9557     pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_APIC_STATE;
     9566    VMCPU_HMCF_SET(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE);
    95589567    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTprBelowThreshold);
    95599568    return VINF_SUCCESS;
     
    96039612            {
    96049613                case 0: /* CR0 */
     9614                    VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
    96059615                    Log4(("CRX CR0 write rc=%d CR0=%#RX64\n", rc, pMixedCtx->cr0));
    9606                     pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
    96079616                    break;
    96089617                case 2: /* C2 **/
     
    96119620                case 3: /* CR3 */
    96129621                    Assert(!pVM->hm.s.fNestedPaging || !CPUMIsGuestPagingEnabledEx(pMixedCtx));
     9622                    VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_CR3);
    96139623                    Log4(("CRX CR3 write rc=%d CR3=%#RX64\n", rc, pMixedCtx->cr3));
    9614                     pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR3;
    96159624                    break;
    96169625                case 4: /* CR4 */
     9626                    VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_CR4);
    96179627                    Log4(("CRX CR4 write rc=%d CR4=%#RX64\n", rc, pMixedCtx->cr4));
    9618                     pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR4;
    96199628                    break;
    96209629                case 8: /* CR8 */
    96219630                    Assert(!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW));
    96229631                    /* CR8 contains the APIC TPR. Was updated by EMInterpretCRxWrite(). */
    9623                     pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_APIC_STATE;
     9632                    VMCPU_HMCF_SET(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE);
    96249633                    break;
    96259634                default:
     
    96609669            rc = EMInterpretCLTS(pVM, pVCpu);
    96619670            AssertRCReturn(rc, rc);
    9662             pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
     9671            VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
    96639672            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitClts);
    96649673            Log4(("CRX CLTS write rc=%d\n", rc));
     
    96729681            rc = EMInterpretLMSW(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), VMX_EXIT_QUALIFICATION_CRX_LMSW_DATA(uExitQualification));
    96739682            if (RT_LIKELY(rc == VINF_SUCCESS))
    9674                 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
     9683                VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
    96759684            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitLmsw);
    96769685            Log4(("CRX LMSW write rc=%d\n", rc));
     
    97819790        }
    97829791        /** @todo IEM needs to be setting these flags somehow. */
    9783         pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
     9792        VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_RIP);
    97849793        fUpdateRipAlready = true;
    97859794#else
     
    98439852        {
    98449853            pMixedCtx->rip += cbInstr;
    9845             pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
     9854            VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_RIP);
    98469855        }
    98479856
     
    98779886                    ASMSetDR6(pMixedCtx->dr[6]);
    98789887                if (pMixedCtx->dr[7] != uDr7)
    9879                     pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
     9888                    VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);
    98809889
    98819890                hmR0VmxSetPendingXcptDB(pVCpu, pMixedCtx);
     
    1004110050                || rc == VERR_PAGE_NOT_PRESENT)
    1004210051            {
    10043                 pVCpu->hm.s.fContextUseFlags |=   HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
    10044                                                 | HM_CHANGED_VMX_GUEST_APIC_STATE;
     10052                VMCPU_HMCF_SET(pVCpu,   HM_CHANGED_GUEST_RIP
     10053                                      | HM_CHANGED_GUEST_RSP
     10054                                      | HM_CHANGED_GUEST_RFLAGS
     10055                                      | HM_CHANGED_VMX_GUEST_APIC_STATE);
    1004510056                rc = VINF_SUCCESS;
    1004610057            }
     
    1013310144                                 VMX_EXIT_QUALIFICATION_DRX_GENREG(pVmxTransient->uExitQualification));
    1013410145        if (RT_SUCCESS(rc))
    10135             pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
     10146            VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);
    1013610147        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
    1013710148    }
     
    1019910210    {
    1020010211        /* Successfully handled MMIO operation. */
    10201         pVCpu->hm.s.fContextUseFlags |=   HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
    10202                                         | HM_CHANGED_VMX_GUEST_APIC_STATE;
     10212        VMCPU_HMCF_SET(pVCpu,  HM_CHANGED_GUEST_RIP
     10213                             | HM_CHANGED_GUEST_RSP
     10214                             | HM_CHANGED_GUEST_RFLAGS
     10215                             | HM_CHANGED_VMX_GUEST_APIC_STATE);
    1020310216        rc = VINF_SUCCESS;
    1020410217    }
     
    1026410277        /* Successfully synced our nested page tables. */
    1026510278        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitReasonNpf);
    10266         pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS;
     10279        VMCPU_HMCF_SET(pVCpu,   HM_CHANGED_GUEST_RIP
     10280                              | HM_CHANGED_GUEST_RSP
     10281                              | HM_CHANGED_GUEST_RFLAGS);
    1026710282        return VINF_SUCCESS;
    1026810283    }
     
    1043410449    {
    1043510450        rc = VINF_EM_RAW_GUEST_TRAP;
    10436         Assert(CPUMIsGuestFPUStateActive(pVCpu) || (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR0));
     10451        Assert(CPUMIsGuestFPUStateActive(pVCpu) || VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0));
    1043710452    }
    1043810453    else
     
    1045110466    if (rc == VINF_SUCCESS)
    1045210467    {
    10453         pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
     10468        VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
    1045410469        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowNM);
    1045510470    }
     
    1052310538                pMixedCtx->eflags.Bits.u1IF = 0;
    1052410539                pMixedCtx->rip += pDis->cbInstr;
    10525                 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS;
     10540                VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
    1052610541                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCli);
    1052710542                break;
     
    1053310548                pMixedCtx->rip += pDis->cbInstr;
    1053410549                EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip);
    10535                 Assert(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
    10536                 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS;
     10550                Assert(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
     10551                VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
    1053710552                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitSti);
    1053810553                break;
     
    1054310558                rc = VINF_EM_HALT;
    1054410559                pMixedCtx->rip += pDis->cbInstr;
    10545                 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
     10560                VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_RIP);
    1054610561                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt);
    1054710562                break;
     
    1058310598                pMixedCtx->eflags.u32 =   (pMixedCtx->eflags.u32 & ~(X86_EFL_POPF_BITS & uMask))
    1058410599                                        | (Eflags.u32 & X86_EFL_POPF_BITS & uMask);
    10585                 /* The RF bit is always cleared by POPF; see Intel Instruction reference for POPF. */
    10586                 pMixedCtx->eflags.Bits.u1RF   = 0;
    10587                 pMixedCtx->esp               += cbParm;
    10588                 pMixedCtx->esp               &= uMask;
    10589                 pMixedCtx->rip               += pDis->cbInstr;
    10590                 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS;
     10600                pMixedCtx->eflags.Bits.u1RF  = 0;    /* The RF bit is always cleared by POPF; see Intel Instruction reference. */
     10601                pMixedCtx->esp              += cbParm;
     10602                pMixedCtx->esp              &= uMask;
     10603                pMixedCtx->rip              += pDis->cbInstr;
     10604
     10605                VMCPU_HMCF_SET(pVCpu,   HM_CHANGED_GUEST_RIP
     10606                                      | HM_CHANGED_GUEST_RSP
     10607                                      | HM_CHANGED_GUEST_RFLAGS);
    1059110608                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPopf);
    1059210609                break;
     
    1063210649                pMixedCtx->esp               &= uMask;
    1063310650                pMixedCtx->rip               += pDis->cbInstr;
    10634                 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP;
     10651                VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP);
    1063510652                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPushf);
    1063610653                break;
     
    1066610683                                                | (aIretFrame[2] & X86_EFL_POPF_BITS & uMask);
    1066710684                pMixedCtx->sp                += sizeof(aIretFrame);
    10668                 pVCpu->hm.s.fContextUseFlags |=   HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_SEGMENT_REGS | HM_CHANGED_GUEST_RSP
    10669                                                 | HM_CHANGED_GUEST_RFLAGS;
     10685                VMCPU_HMCF_SET(pVCpu,   HM_CHANGED_GUEST_RIP
     10686                                      | HM_CHANGED_GUEST_SEGMENT_REGS
     10687                                      | HM_CHANGED_GUEST_RSP
     10688                                      | HM_CHANGED_GUEST_RFLAGS);
    1067010689                Log4(("IRET %#RX32 to %04x:%x\n", GCPtrStack, pMixedCtx->cs.Sel, pMixedCtx->ip));
    1067110690                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIret);
     
    1069610715                                                                    EMCODETYPE_SUPERVISOR);
    1069710716                rc = VBOXSTRICTRC_VAL(rc2);
    10698                 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL_GUEST;
     10717                VMCPU_HMCF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
    1069910718                Log4(("#GP rc=%Rrc\n", rc));
    1070010719                break;
     
    1078610805        /** @todo this isn't quite right, what if guest does lgdt with some MMIO
    1078710806         *        memory? We don't update the whole state here... */
    10788         pVCpu->hm.s.fContextUseFlags |=   HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
    10789                                         | HM_CHANGED_VMX_GUEST_APIC_STATE;
     10807        VMCPU_HMCF_SET(pVCpu,   HM_CHANGED_GUEST_RIP
     10808                              | HM_CHANGED_GUEST_RSP
     10809                              | HM_CHANGED_GUEST_RFLAGS
     10810                              | HM_CHANGED_VMX_GUEST_APIC_STATE);
    1079010811        TRPMResetTrap(pVCpu);
    1079110812        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF);
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette