VirtualBox

Ignore:
Timestamp:
Dec 3, 2013 2:09:51 PM (11 years ago)
Author:
vboxsync
Message:

VMM/HMVMXR0: Safer assumptions while updating guest-state on fully preemptible kernels.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r49752 r49755  
    25412541{
    25422542    /* Initially the guest-state is up-to-date as there is nothing in the VMCS. */
    2543     pVCpu->hm.s.vmx.fUpdatedGuestState = HMVMX_UPDATED_GUEST_ALL;
     2543    HMVMXCPU_GST_RESET_TO(pVCpu, HMVMX_UPDATED_GUEST_ALL);
    25442544    return VINF_SUCCESS;
    25452545}
     
    32623262    {
    32633263        /* If inhibition is active, RIP & RFLAGS should've been accessed (i.e. read previously from the VMCS or from ring-3). */
    3264         AssertMsg((pVCpu->hm.s.vmx.fUpdatedGuestState & (HMVMX_UPDATED_GUEST_RIP | HMVMX_UPDATED_GUEST_RFLAGS))
    3265                    == (HMVMX_UPDATED_GUEST_RIP | HMVMX_UPDATED_GUEST_RFLAGS), ("%#x\n", pVCpu->hm.s.vmx.fUpdatedGuestState));
     3264        AssertMsg(HMVMXCPU_GST_IS_SET(pVCpu, HMVMX_UPDATED_GUEST_RIP | HMVMX_UPDATED_GUEST_RFLAGS),
     3265                  ("%#x\n", HMVMXCPU_GST_VALUE(pVCpu)));
    32663266        if (pMixedCtx->rip != EMGetInhibitInterruptsPC(pVCpu))
    32673267        {
     
    55785578    NOREF(pMixedCtx);
    55795579
    5580     if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_CR0))
     5580    /* Since this can be called from our preemption hook it's safer to make the guest-CR0 update non-preemptible. */
     5581    VMMRZCallRing3Disable(pVCpu);
     5582    HM_DISABLE_PREEMPT_IF_NEEDED();
     5583
     5584    if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR0))
    55815585    {
    55825586        uint32_t uVal    = 0;
     
    55905594        uVal = (uShadow & pVCpu->hm.s.vmx.u32CR0Mask) | (uVal & ~pVCpu->hm.s.vmx.u32CR0Mask);
    55915595        CPUMSetGuestCR0(pVCpu, uVal);
    5592         pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_CR0;
    5593     }
     5596        HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR0);
     5597    }
     5598
     5599    HM_RESTORE_PREEMPT_IF_NEEDED();
     5600    VMMRZCallRing3Enable(pVCpu);
     5601
    55945602    return VINF_SUCCESS;
    55955603}
     
    56125620
    56135621    int rc = VINF_SUCCESS;
    5614     if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_CR4))
     5622    if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR4))
    56155623    {
    56165624        uint32_t uVal    = 0;
     
    56235631        uVal = (uShadow & pVCpu->hm.s.vmx.u32CR4Mask) | (uVal & ~pVCpu->hm.s.vmx.u32CR4Mask);
    56245632        CPUMSetGuestCR4(pVCpu, uVal);
    5625         pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_CR4;
     5633        HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR4);
    56265634    }
    56275635    return rc;
     
    56435651{
    56445652    int rc = VINF_SUCCESS;
    5645     if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RIP))
     5653    if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RIP))
    56465654    {
    56475655        uint64_t u64Val = 0;
     
    56505658
    56515659        pMixedCtx->rip = u64Val;
    5652         pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_RIP;
     5660        HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RIP);
    56535661    }
    56545662    return rc;
     
    56705678{
    56715679    int rc = VINF_SUCCESS;
    5672     if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RSP))
     5680    if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RSP))
    56735681    {
    56745682        uint64_t u64Val = 0;
     
    56775685
    56785686        pMixedCtx->rsp = u64Val;
    5679         pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_RSP;
     5687        HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RSP);
    56805688    }
    56815689    return rc;
     
    56965704static int hmR0VmxSaveGuestRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    56975705{
    5698     if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RFLAGS))
     5706    if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RFLAGS))
    56995707    {
    57005708        uint32_t uVal = 0;
     
    57125720        }
    57135721
    5714         pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_RFLAGS;
     5722        HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RFLAGS);
    57155723    }
    57165724    return VINF_SUCCESS;
     
    57805788    NOREF(pMixedCtx);
    57815789    /* Nothing to do for now until we make use of different guest-CPU activity state. Just update the flag. */
    5782     pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_ACTIVITY_STATE;
     5790    HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_ACTIVITY_STATE);
    57835791    return VINF_SUCCESS;
    57845792}
     
    58005808{
    58015809    int rc = VINF_SUCCESS;
    5802     if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR))
     5810    if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR))
    58035811    {
    58045812        uint32_t u32Val = 0;
    58055813        rc = VMXReadVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val);     AssertRCReturn(rc, rc);
    58065814        pMixedCtx->SysEnter.cs = u32Val;
    5807         pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR;
     5815        HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR);
    58085816    }
    58095817
    58105818    uint64_t u64Val = 0;
    5811     if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR))
     5819    if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR))
    58125820    {
    58135821        rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, &u64Val);    AssertRCReturn(rc, rc);
    58145822        pMixedCtx->SysEnter.eip = u64Val;
    5815         pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR;
    5816     }
    5817     if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR))
     5823        HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR);
     5824    }
     5825    if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR))
    58185826    {
    58195827        rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, &u64Val);    AssertRCReturn(rc, rc);
    58205828        pMixedCtx->SysEnter.esp = u64Val;
    5821         pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR;
     5829        HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR);
    58225830    }
    58235831    return rc;
     
    58425850    if (pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests)
    58435851    {
    5844         /* We should not get preempted to a different CPU at this point while reading the MSRs. */
     5852        /* Since this can be called from our preemption hook it's safer to make the guest-MSRs update non-preemptible. */
    58455853        VMMRZCallRing3Disable(pVCpu);
    58465854        HM_DISABLE_PREEMPT_IF_NEEDED();
    58475855
    58485856        /* Doing the check here ensures we don't overwrite already-saved guest MSRs from a preemption hook. */
    5849         if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_LAZY_MSRS))
     5857        if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS))
    58505858        {
    58515859            hmR0VmxLazySaveGuestMsrs(pVCpu, pMixedCtx);
    5852             pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_LAZY_MSRS;
     5860            HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS);
    58535861        }
    58545862
     
    58575865    }
    58585866    else
    5859         pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_LAZY_MSRS;
     5867        HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS);
    58605868#else
    58615869    NOREF(pMixedCtx);
    5862     pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_LAZY_MSRS;
     5870    HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS);
    58635871#endif
    58645872
     
    58815889static int hmR0VmxSaveGuestAutoLoadStoreMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    58825890{
    5883     if (pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS)
     5891    if (HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS))
    58845892        return VINF_SUCCESS;
    58855893
     
    59045912    }
    59055913
    5906     pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS;
     5914    HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS);
    59075915    return VINF_SUCCESS;
    59085916}
     
    59335941    /* Guest CR2 - updated always during the world-switch or in #PF. */
    59345942    /* Guest CR3. Only changes with Nested Paging. This must be done -after- saving CR0 and CR4 from the guest! */
    5935     if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_CR3))
    5936     {
    5937         Assert(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_CR0);
    5938         Assert(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_CR4);
     5943    if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR3))
     5944    {
     5945        Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR0));
     5946        Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR4));
    59395947
    59405948        PVM pVM = pVCpu->CTX_SUFF(pVM);
     
    59815989        }
    59825990
    5983         pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_CR3;
     5991        HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR3);
    59845992    }
    59855993
     
    61176125{
    61186126    /* Guest segment registers. */
    6119     if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_SEGMENT_REGS))
     6127    if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SEGMENT_REGS))
    61206128    {
    61216129        int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);   AssertRCReturn(rc, rc);
     
    61376145            pMixedCtx->gs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrGS.u;
    61386146        }
    6139         pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_SEGMENT_REGS;
     6147        HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SEGMENT_REGS);
    61406148    }
    61416149
     
    61616169
    61626170    /* Guest LDTR. */
    6163     if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_LDTR))
     6171    if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LDTR))
    61646172    {
    61656173        rc = VMXLOCAL_READ_SEG(LDTR, ldtr);
    61666174        AssertRCReturn(rc, rc);
    6167         pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_LDTR;
     6175        HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LDTR);
    61686176    }
    61696177
     
    61716179    uint64_t u64Val = 0;
    61726180    uint32_t u32Val = 0;
    6173     if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_GDTR))
     6181    if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_GDTR))
    61746182    {
    61756183        rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, &u64Val);        AssertRCReturn(rc, rc);
     
    61776185        pMixedCtx->gdtr.pGdt  = u64Val;
    61786186        pMixedCtx->gdtr.cbGdt = u32Val;
    6179         pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_GDTR;
     6187        HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_GDTR);
    61806188    }
    61816189
    61826190    /* Guest IDTR. */
    6183     if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_IDTR))
     6191    if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_IDTR))
    61846192    {
    61856193        rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, &u64Val);        AssertRCReturn(rc, rc);
     
    61876195        pMixedCtx->idtr.pIdt  = u64Val;
    61886196        pMixedCtx->idtr.cbIdt = u32Val;
    6189         pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_IDTR;
     6197        HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_IDTR);
    61906198    }
    61916199
    61926200    /* Guest TR. */
    6193     if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_TR))
     6201    if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_TR))
    61946202    {
    61956203        rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
     
    62026210            AssertRCReturn(rc, rc);
    62036211        }
    6204         pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_TR;
     6212        HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_TR);
    62056213    }
    62066214    return rc;
     
    62246232static int hmR0VmxSaveGuestDR7(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    62256233{
    6226     if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_DEBUG))
     6234    if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_DEBUG))
    62276235    {
    62286236        if (!pVCpu->hm.s.fUsingHyperDR7)
     
    62346242        }
    62356243
    6236         pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_DEBUG;
     6244        HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_DEBUG);
    62376245    }
    62386246    return VINF_SUCCESS;
     
    62566264
    62576265    /* Updating TPR is already done in hmR0VmxPostRunGuest(). Just update the flag. */
    6258     pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_APIC_STATE;
     6266    HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_APIC_STATE);
    62596267    return VINF_SUCCESS;
    62606268}
     
    62766284    Assert(pMixedCtx);
    62776285
    6278     if (pVCpu->hm.s.vmx.fUpdatedGuestState == HMVMX_UPDATED_GUEST_ALL)
     6286    if (HMVMXCPU_GST_VALUE(pVCpu) == HMVMX_UPDATED_GUEST_ALL)
    62796287        return VINF_SUCCESS;
    62806288
     
    63176325    AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestApicState failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
    63186326
    6319     AssertMsg(pVCpu->hm.s.vmx.fUpdatedGuestState == HMVMX_UPDATED_GUEST_ALL,
    6320               ("Missed guest state bits while saving state; residue %RX32\n", pVCpu->hm.s.vmx.fUpdatedGuestState));
     6327    AssertMsg(HMVMXCPU_GST_VALUE(pVCpu) == HMVMX_UPDATED_GUEST_ALL,
     6328              ("Missed guest state bits while saving state; residue %RX32\n", HMVMXCPU_GST_VALUE(pVCpu)));
    63216329
    63226330    if (VMMRZCallRing3IsEnabled(pVCpu))
     
    65896597    /* Save the guest state if necessary. */
    65906598    if (   fSaveGuestState
    6591         && pVCpu->hm.s.vmx.fUpdatedGuestState != HMVMX_UPDATED_GUEST_ALL)
     6599        && HMVMXCPU_GST_VALUE(pVCpu) != HMVMX_UPDATED_GUEST_ALL)
    65926600    {
    65936601        int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
    65946602        AssertRCReturn(rc, rc);
    6595         Assert(pVCpu->hm.s.vmx.fUpdatedGuestState == HMVMX_UPDATED_GUEST_ALL);
     6603        Assert(HMVMXCPU_GST_VALUE(pVCpu) == HMVMX_UPDATED_GUEST_ALL);
    65966604    }
    65976605
     
    66426650            AssertRCReturn(rc, rc);
    66436651        }
    6644         Assert(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_LAZY_MSRS);
     6652        Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS));
    66456653        hmR0VmxLazyRestoreHostMsrs(pVCpu);
    66466654        Assert(!pVCpu->hm.s.vmx.fRestoreHostMsrs);
     
    67966804                              | CPUM_CHANGED_TR
    67976805                              | CPUM_CHANGED_HIDDEN_SEL_REGS);
    6798     Assert(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_CR0);
     6806    Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR0));
    67996807    if (   pVM->hm.s.fNestedPaging
    68006808        && CPUMIsGuestPagingEnabledEx(pMixedCtx))
     
    69386946    bool fBlockSti      = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
    69396947
    6940     Assert(!fBlockSti || (pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RFLAGS));
     6948    Assert(!fBlockSti || HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RFLAGS));
    69416949    Assert(   !(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI)      /* We don't support block-by-NMI and SMI yet.*/
    69426950           && !(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI));
     
    70207028    bool fBlockSti      = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
    70217029
    7022     Assert(!fBlockSti || (pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RFLAGS));
     7030    Assert(!fBlockSti || HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RFLAGS));
    70237031    Assert(   !(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI)      /* We don't support block-by-NMI and SMI yet.*/
    70247032           && !(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI));
     
    73337341            rc |= hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
    73347342            AssertRCReturn(rc, rc);
    7335             Assert(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RIP);
     7343            Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RIP));
    73367344
    73377345            /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */
     
    80468054    {
    80478055        CPUMR0LoadGuestFPU(pVM, pVCpu, pMixedCtx);
    8048         Assert(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_CR0);
     8056        Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR0));
    80498057        HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
    80508058    }
     
    81298137            int rc2 = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
    81308138            AssertRC(rc2);
    8131             Assert(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS);
     8139            Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS));
    81328140            uint64_t u64GuestTscAuxMsr;
    81338141            rc2 = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &u64GuestTscAuxMsr);
     
    81698177    ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false);   /* See HMInvalidatePageOnAllVCpus(): used for TLB-shootdowns. */
    81708178    ASMAtomicIncU32(&pVCpu->hm.s.cWorldSwitchExits);            /* Initialized in vmR3CreateUVM(): used for TLB-shootdowns. */
    8171     pVCpu->hm.s.vmx.fUpdatedGuestState = 0;                     /* Exits/longjmps to ring-3 requires saving the guest state. */
     8179    HMVMXCPU_GST_RESET_TO(pVCpu, 0);                            /* Exits/longjmps to ring-3 requires saving the guest state. */
    81728180    pVmxTransient->fVmcsFieldsRead     = 0;                     /* Transient fields need to be read from the VMCS. */
    81738181    pVmxTransient->fVectoringPF        = false;                 /* Vectoring page-fault needs to be determined later. */
     
    84278435{
    84288436    Assert(VMMRZCallRing3IsEnabled(pVCpu));
    8429     Assert(pVCpu->hm.s.vmx.fUpdatedGuestState == HMVMX_UPDATED_GUEST_ALL);
     8437    Assert(HMVMXCPU_GST_VALUE(pVCpu) == HMVMX_UPDATED_GUEST_ALL);
    84308438    HMVMX_ASSERT_PREEMPT_SAFE();
    84318439
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette