VirtualBox

Changeset 92219 in vbox


Ignore:
Timestamp:
Nov 4, 2021 7:17:05 PM (4 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
148052
Message:

VMM/VMMAll/VMXAllTemplate.cpp.h: Delete more code only usable in R0 and introduce the VCPU_2_VMXSTATE() macro which resolves from the vCPU pointer to the VMX state of the calling component (either HM or NEM/Apple), bugref:10136

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/VMXAllTemplate.cpp.h

    r92189 r92219  
    119119/** Profiling macro. */
    120120#ifdef HM_PROFILE_EXIT_DISPATCH
    121 # define HMVMX_START_EXIT_DISPATCH_PROF()           STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitDispatch, ed)
    122 # define HMVMX_STOP_EXIT_DISPATCH_PROF()            STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitDispatch, ed)
     121# define HMVMX_START_EXIT_DISPATCH_PROF()           STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATE(pVCpu).StatExitDispatch, ed)
     122# define HMVMX_STOP_EXIT_DISPATCH_PROF()            STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATE(pVCpu).StatExitDispatch, ed)
    123123#else
    124124# define HMVMX_START_EXIT_DISPATCH_PROF()           do { } while (0)
     
    126126#endif
    127127
     128#ifdef IN_RING0
    128129/** Assert that preemption is disabled or covered by thread-context hooks. */
    129 #define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu)          Assert(   VMMR0ThreadCtxHookIsEnabled((a_pVCpu))   \
    130                                                            || !RTThreadPreemptIsEnabled(NIL_RTTHREAD))
     130# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu)          Assert(   VMMR0ThreadCtxHookIsEnabled((a_pVCpu))   \
     131                                                            || !RTThreadPreemptIsEnabled(NIL_RTTHREAD))
    131132
    132133/** Assert that we haven't migrated CPUs when thread-context hooks are not
    133134 *  used. */
    134 #define HMVMX_ASSERT_CPU_SAFE(a_pVCpu)              AssertMsg(   VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
    135                                                               || (a_pVCpu)->hmr0.s.idEnteredCpu == RTMpCpuId(), \
    136                                                               ("Illegal migration! Entered on CPU %u Current %u\n", \
    137                                                               (a_pVCpu)->hmr0.s.idEnteredCpu, RTMpCpuId()))
     135# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu)              AssertMsg(   VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
     136                                                               || (a_pVCpu)->hmr0.s.idEnteredCpu == RTMpCpuId(), \
     137                                                               ("Illegal migration! Entered on CPU %u Current %u\n", \
     138                                                               (a_pVCpu)->hmr0.s.idEnteredCpu, RTMpCpuId()))
     139#else
     140# define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu)          do { } while (0)
     141# define HMVMX_ASSERT_CPU_SAFE(a_pVCpu)              do { } while (0)
     142#endif
    138143
    139144/** Asserts that the given CPUMCTX_EXTRN_XXX bits are present in the guest-CPU
     
    839844
    840845
     846#ifdef IN_RING0
    841847/**
    842848 * Checks if the given MSR is part of the lastbranch-from-IP MSR stack.
     
    894900    return false;
    895901}
     902#endif
    896903
    897904
     
    919926     *        enmGuestMode to be in-sync with the current mode. See @bugref{6398}
    920927     *        and @bugref{6944}. */
     928#ifdef IN_RING0
    921929    PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
     930#else
     931    RT_NOREF(pVCpu);
     932#endif
    922933    return (  X86_CR0_PE
    923934            | X86_CR0_NE
     935#ifdef IN_RING0
    924936            | (pVM->hmr0.s.fNestedPaging ? 0 : X86_CR0_WP)
     937#endif
    925938            | X86_CR0_PG
    926939            | VMX_EXIT_HOST_CR0_IGNORE_MASK);
     
    978991                               | (fFxSaveRstor ? X86_CR4_OSFXSR   : 0));
    979992    return ~fGstMask;
    980 }
    981 
    982 
    983 /**
    984  * Gets the active (in use) VMCS info. object for the specified VCPU.
    985  *
    986  * This is either the guest or nested-guest VMCS info. and need not necessarily
    987  * pertain to the "current" VMCS (in the VMX definition of the term). For instance,
    988  * if the VM-entry failed due to an invalid-guest state, we may have "cleared" the
    989  * current VMCS while returning to ring-3. However, the VMCS info. object for that
    990  * VMCS would still be active and returned here so that we could dump the VMCS
    991  * fields to ring-3 for diagnostics. This function is thus only used to
    992  * distinguish between the nested-guest or guest VMCS.
    993  *
    994  * @returns The active VMCS information.
    995  * @param   pVCpu   The cross context virtual CPU structure.
    996  *
    997  * @thread  EMT.
    998  * @remarks This function may be called with preemption or interrupts disabled!
    999  */
    1000 DECLINLINE(PVMXVMCSINFO) hmGetVmxActiveVmcsInfo(PVMCPUCC pVCpu)
    1001 {
    1002     if (!pVCpu->hmr0.s.vmx.fSwitchedToNstGstVmcs)
    1003         return &pVCpu->hmr0.s.vmx.VmcsInfo;
    1004     return &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
    1005993}
    1006994
     
    11651153        {
    11661154            /* Validate we are not removing any essential exception intercepts. */
     1155#ifdef IN_RING0
    11671156            Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || !(uXcptMask & RT_BIT(X86_XCPT_PF)));
     1157#else
     1158            Assert(!(uXcptMask & RT_BIT(X86_XCPT_PF)));
     1159#endif
    11681160            NOREF(pVCpu);
    11691161            Assert(!(uXcptMask & RT_BIT(X86_XCPT_DB)));
     
    12011193
    12021194
    1203 /**
    1204  * Loads the VMCS specified by the VMCS info. object.
    1205  *
    1206  * @returns VBox status code.
    1207  * @param   pVmcsInfo       The VMCS info. object.
    1208  *
    1209  * @remarks Can be called with interrupts disabled.
    1210  */
    1211 static int vmxHCLoadVmcs(PVMXVMCSINFO pVmcsInfo)
    1212 {
    1213     Assert(pVmcsInfo->HCPhysVmcs != 0 && pVmcsInfo->HCPhysVmcs != NIL_RTHCPHYS);
    1214     Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    1215 
    1216     int rc = VMXLoadVmcs(pVmcsInfo->HCPhysVmcs);
    1217     if (RT_SUCCESS(rc))
    1218         pVmcsInfo->fVmcsState |= VMX_V_VMCS_LAUNCH_STATE_CURRENT;
    1219     return rc;
    1220 }
    1221 
    1222 
    1223 /**
    1224  * Clears the VMCS specified by the VMCS info. object.
    1225  *
    1226  * @returns VBox status code.
    1227  * @param   pVmcsInfo   The VMCS info. object.
    1228  *
    1229  * @remarks Can be called with interrupts disabled.
    1230  */
    1231 static int vmxHCClearVmcs(PVMXVMCSINFO pVmcsInfo)
    1232 {
    1233     Assert(pVmcsInfo->HCPhysVmcs != 0 && pVmcsInfo->HCPhysVmcs != NIL_RTHCPHYS);
    1234     Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    1235 
    1236     int rc = VMXClearVmcs(pVmcsInfo->HCPhysVmcs);
    1237     if (RT_SUCCESS(rc))
    1238         pVmcsInfo->fVmcsState = VMX_V_VMCS_LAUNCH_STATE_CLEAR;
    1239     return rc;
    1240 }
    1241 
    1242 
    12431195#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    12441196/**
     
    13731325    {
    13741326        pVCpu->hmr0.s.vmx.fSwitchedToNstGstVmcs           = fSwitchToNstGstVmcs;
    1375         pVCpu->hm.s.vmx.fSwitchedToNstGstVmcsCopyForRing3 = fSwitchToNstGstVmcs;
     1327        VCPU_2_VMXSTATE(pVCpu).vmx.fSwitchedToNstGstVmcsCopyForRing3 = fSwitchToNstGstVmcs;
    13761328
    13771329        /*
     
    13891341        { /* likely */ }
    13901342        else
    1391             ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE);
     1343            ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE);
    13921344
    13931345        ASMSetFlags(fEFlags);
     
    14251377    {
    14261378        AssertPtrReturnVoid(pVCpu);
    1427         VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_VM_INSTR_ERROR, &pVCpu->hm.s.vmx.LastError.u32InstrError);
    1428     }
     1379        VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_VM_INSTR_ERROR, &VCPU_2_VMXSTATE(pVCpu).vmx.LastError.u32InstrError);
     1380    }
     1381#if IN_RING0
    14291382    pVCpu->CTX_SUFF(pVM)->hm.s.ForR3.rcInit = rc;
     1383#endif
    14301384}
    14311385
     
    18261780
    18271781    LogRel(("Auto-load/store MSR count exceeded! cMsrs=%u MaxSupported=%u\n", cMsrs, cMaxSupportedMsrs));
    1828     pVCpu->hm.s.u32HMError = VMX_UFC_INSUFFICIENT_GUEST_MSR_STORAGE;
     1782    VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_INSUFFICIENT_GUEST_MSR_STORAGE;
    18291783    return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
    18301784}
     
    18471801 */
    18481802static int vmxHCAddAutoLoadStoreMsr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t idMsr, uint64_t uGuestMsrValue,
    1849                                       bool fSetReadWrite, bool fUpdateHostMsr)
     1803                                    bool fSetReadWrite, bool fUpdateHostMsr)
    18501804{
    18511805    PVMXVMCSINFO  pVmcsInfo     = pVmxTransient->pVmcsInfo;
     
    19041858    pHostMsr[i].u32Msr = idMsr;
    19051859
     1860#ifdef IN_RING0
    19061861    /*
    19071862     * Only if the caller requests to update the host MSR value AND we've newly added the
     
    19251880        }
    19261881    }
     1882#else
     1883    RT_NOREF(fUpdateHostMsr);
     1884#endif
    19271885    return VINF_SUCCESS;
    19281886}
     
    20271985
    20281986/**
    2029  * Updates the value of all host MSRs in the VM-exit MSR-load area.
    2030  *
    2031  * @param   pVCpu       The cross context virtual CPU structure.
    2032  * @param   pVmcsInfo   The VMCS info. object.
    2033  *
    2034  * @remarks No-long-jump zone!!!
    2035  */
    2036 static void vmxHCUpdateAutoLoadHostMsrs(PCVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
    2037 {
    2038     RT_NOREF(pVCpu);
    2039     Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    2040 
    2041     PVMXAUTOMSR pHostMsrLoad = (PVMXAUTOMSR)pVmcsInfo->pvHostMsrLoad;
    2042     uint32_t const cMsrs     = pVmcsInfo->cExitMsrLoad;
    2043     Assert(pHostMsrLoad);
    2044     Assert(sizeof(*pHostMsrLoad) * cMsrs <= X86_PAGE_4K_SIZE);
    2045     LogFlowFunc(("pVCpu=%p cMsrs=%u\n", pVCpu, cMsrs));
    2046     for (uint32_t i = 0; i < cMsrs; i++)
    2047     {
    2048         /*
    2049          * Performance hack for the host EFER MSR. We use the cached value rather than re-read it.
    2050          * Strict builds will catch mismatches in vmxHCCheckAutoLoadStoreMsrs(). See @bugref{7368}.
    2051          */
    2052         if (pHostMsrLoad[i].u32Msr == MSR_K6_EFER)
    2053             pHostMsrLoad[i].u64Value = g_uHmVmxHostMsrEfer;
    2054         else
    2055             pHostMsrLoad[i].u64Value = ASMRdMsr(pHostMsrLoad[i].u32Msr);
    2056     }
    2057 }
    2058 
    2059 
    2060 /**
    2061  * Saves a set of host MSRs to allow read/write passthru access to the guest and
    2062  * perform lazy restoration of the host MSRs while leaving VT-x.
    2063  *
    2064  * @param   pVCpu   The cross context virtual CPU structure.
    2065  *
    2066  * @remarks No-long-jump zone!!!
    2067  */
    2068 static void vmxHCLazySaveHostMsrs(PVMCPUCC pVCpu)
    2069 {
    2070     Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    2071 
    2072     /*
    2073      * Note: If you're adding MSRs here, make sure to update the MSR-bitmap accesses in vmxHCSetupVmcsProcCtls().
    2074      */
    2075     if (!(pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_SAVED_HOST))
    2076     {
    2077         Assert(!(pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST));  /* Guest MSRs better not be loaded now. */
    2078         if (pVCpu->CTX_SUFF(pVM)->hmr0.s.fAllow64BitGuests)
    2079         {
    2080             pVCpu->hmr0.s.vmx.u64HostMsrLStar        = ASMRdMsr(MSR_K8_LSTAR);
    2081             pVCpu->hmr0.s.vmx.u64HostMsrStar         = ASMRdMsr(MSR_K6_STAR);
    2082             pVCpu->hmr0.s.vmx.u64HostMsrSfMask       = ASMRdMsr(MSR_K8_SF_MASK);
    2083             pVCpu->hmr0.s.vmx.u64HostMsrKernelGsBase = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
    2084         }
    2085         pVCpu->hmr0.s.vmx.fLazyMsrs |= VMX_LAZY_MSRS_SAVED_HOST;
    2086     }
    2087 }
    2088 
    2089 
    2090 /**
    2091  * Checks whether the MSR belongs to the set of guest MSRs that we restore
    2092  * lazily while leaving VT-x.
    2093  *
    2094  * @returns true if it does, false otherwise.
    2095  * @param   pVCpu   The cross context virtual CPU structure.
    2096  * @param   idMsr   The MSR to check.
    2097  */
    2098 static bool vmxHCIsLazyGuestMsr(PCVMCPUCC pVCpu, uint32_t idMsr)
    2099 {
    2100     if (pVCpu->CTX_SUFF(pVM)->hmr0.s.fAllow64BitGuests)
    2101     {
    2102         switch (idMsr)
    2103         {
    2104             case MSR_K8_LSTAR:
    2105             case MSR_K6_STAR:
    2106             case MSR_K8_SF_MASK:
    2107             case MSR_K8_KERNEL_GS_BASE:
    2108                 return true;
    2109         }
    2110     }
    2111     return false;
    2112 }
    2113 
    2114 
    2115 /**
    2116  * Loads a set of guests MSRs to allow read/passthru to the guest.
    2117  *
    2118  * The name of this function is slightly confusing. This function does NOT
    2119  * postpone loading, but loads the MSR right now. "vmxHCLazy" is simply a
    2120  * common prefix for functions dealing with "lazy restoration" of the shared
    2121  * MSRs.
    2122  *
    2123  * @param   pVCpu   The cross context virtual CPU structure.
    2124  *
    2125  * @remarks No-long-jump zone!!!
    2126  */
    2127 static void vmxHCLazyLoadGuestMsrs(PVMCPUCC pVCpu)
    2128 {
    2129     Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    2130     Assert(!VMMRZCallRing3IsEnabled(pVCpu));
    2131 
    2132     Assert(pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_SAVED_HOST);
    2133     if (pVCpu->CTX_SUFF(pVM)->hmr0.s.fAllow64BitGuests)
    2134     {
    2135         /*
    2136          * If the guest MSRs are not loaded -and- if all the guest MSRs are identical
    2137          * to the MSRs on the CPU (which are the saved host MSRs, see assertion above) then
    2138          * we can skip a few MSR writes.
    2139          *
    2140          * Otherwise, it implies either 1. they're not loaded, or 2. they're loaded but the
    2141          * guest MSR values in the guest-CPU context might be different to what's currently
    2142          * loaded in the CPU. In either case, we need to write the new guest MSR values to the
    2143          * CPU, see @bugref{8728}.
    2144          */
    2145         PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    2146         if (   !(pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
    2147             && pCtx->msrKERNELGSBASE == pVCpu->hmr0.s.vmx.u64HostMsrKernelGsBase
    2148             && pCtx->msrLSTAR        == pVCpu->hmr0.s.vmx.u64HostMsrLStar
    2149             && pCtx->msrSTAR         == pVCpu->hmr0.s.vmx.u64HostMsrStar
    2150             && pCtx->msrSFMASK       == pVCpu->hmr0.s.vmx.u64HostMsrSfMask)
    2151         {
    2152 #ifdef VBOX_STRICT
    2153             Assert(ASMRdMsr(MSR_K8_KERNEL_GS_BASE) == pCtx->msrKERNELGSBASE);
    2154             Assert(ASMRdMsr(MSR_K8_LSTAR)          == pCtx->msrLSTAR);
    2155             Assert(ASMRdMsr(MSR_K6_STAR)           == pCtx->msrSTAR);
    2156             Assert(ASMRdMsr(MSR_K8_SF_MASK)        == pCtx->msrSFMASK);
    2157 #endif
    2158         }
    2159         else
    2160         {
    2161             ASMWrMsr(MSR_K8_KERNEL_GS_BASE, pCtx->msrKERNELGSBASE);
    2162             ASMWrMsr(MSR_K8_LSTAR,          pCtx->msrLSTAR);
    2163             ASMWrMsr(MSR_K6_STAR,           pCtx->msrSTAR);
    2164             /* The system call flag mask register isn't as benign and accepting of all
    2165                values as the above, so mask it to avoid #GP'ing on corrupted input. */
    2166             Assert(!(pCtx->msrSFMASK & ~(uint64_t)UINT32_MAX));
    2167             ASMWrMsr(MSR_K8_SF_MASK,        pCtx->msrSFMASK & UINT32_MAX);
    2168         }
    2169     }
    2170     pVCpu->hmr0.s.vmx.fLazyMsrs |= VMX_LAZY_MSRS_LOADED_GUEST;
    2171 }
    2172 
    2173 
    2174 /**
    2175  * Performs lazy restoration of the set of host MSRs if they were previously
    2176  * loaded with guest MSR values.
    2177  *
    2178  * @param   pVCpu   The cross context virtual CPU structure.
    2179  *
    2180  * @remarks No-long-jump zone!!!
    2181  * @remarks The guest MSRs should have been saved back into the guest-CPU
    2182  *          context by vmxHCImportGuestState()!!!
    2183  */
    2184 static void vmxHCLazyRestoreHostMsrs(PVMCPUCC pVCpu)
    2185 {
    2186     Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    2187     Assert(!VMMRZCallRing3IsEnabled(pVCpu));
    2188 
    2189     if (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
    2190     {
    2191         Assert(pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_SAVED_HOST);
    2192         if (pVCpu->CTX_SUFF(pVM)->hmr0.s.fAllow64BitGuests)
    2193         {
    2194             ASMWrMsr(MSR_K8_LSTAR,          pVCpu->hmr0.s.vmx.u64HostMsrLStar);
    2195             ASMWrMsr(MSR_K6_STAR,           pVCpu->hmr0.s.vmx.u64HostMsrStar);
    2196             ASMWrMsr(MSR_K8_SF_MASK,        pVCpu->hmr0.s.vmx.u64HostMsrSfMask);
    2197             ASMWrMsr(MSR_K8_KERNEL_GS_BASE, pVCpu->hmr0.s.vmx.u64HostMsrKernelGsBase);
    2198         }
    2199     }
    2200     pVCpu->hmr0.s.vmx.fLazyMsrs &= ~(VMX_LAZY_MSRS_LOADED_GUEST | VMX_LAZY_MSRS_SAVED_HOST);
    2201 }
    2202 
    2203 
    2204 /**
    22051987 * Verifies that our cached values of the VMCS fields are all consistent with
    22061988 * what's actually present in the VMCS.
     
    22242006    AssertMsgReturnStmt(pVmcsInfo->u32EntryCtls == u32Val,
    22252007                        ("%s entry controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32EntryCtls, u32Val),
    2226                         pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_ENTRY,
     2008                        VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_ENTRY,
    22272009                        VERR_VMX_VMCS_FIELD_CACHE_INVALID);
    22282010
     
    22312013    AssertMsgReturnStmt(pVmcsInfo->u32ExitCtls == u32Val,
    22322014                        ("%s exit controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ExitCtls, u32Val),
    2233                         pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_EXIT,
     2015                        VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_EXIT,
    22342016                        VERR_VMX_VMCS_FIELD_CACHE_INVALID);
    22352017
     
    22382020    AssertMsgReturnStmt(pVmcsInfo->u32PinCtls == u32Val,
    22392021                        ("%s pin controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32PinCtls, u32Val),
    2240                         pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_PIN_EXEC,
     2022                        VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PIN_EXEC,
    22412023                        VERR_VMX_VMCS_FIELD_CACHE_INVALID);
    22422024
     
    22452027    AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls == u32Val,
    22462028                        ("%s proc controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls, u32Val),
    2247                         pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_PROC_EXEC,
     2029                        VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC,
    22482030                        VERR_VMX_VMCS_FIELD_CACHE_INVALID);
    22492031
     
    22542036        AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls2 == u32Val,
    22552037                            ("%s proc2 controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls2, u32Val),
    2256                             pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_PROC_EXEC2,
     2038                            VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC2,
    22572039                            VERR_VMX_VMCS_FIELD_CACHE_INVALID);
    22582040    }
     
    22652047        AssertMsgReturnStmt(pVmcsInfo->u64ProcCtls3 == u64Val,
    22662048                            ("%s proc3 controls mismatch: Cache=%#RX32 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64ProcCtls3, u64Val),
    2267                             pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_PROC_EXEC3,
     2049                            VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC3,
    22682050                            VERR_VMX_VMCS_FIELD_CACHE_INVALID);
    22692051    }
     
    22732055    AssertMsgReturnStmt(pVmcsInfo->u32XcptBitmap == u32Val,
    22742056                        ("%s exception bitmap mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32XcptBitmap, u32Val),
    2275                         pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_XCPT_BITMAP,
     2057                        VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_XCPT_BITMAP,
    22762058                        VERR_VMX_VMCS_FIELD_CACHE_INVALID);
    22772059
     
    22802062    AssertMsgReturnStmt(pVmcsInfo->u64TscOffset == u64Val,
    22812063                        ("%s TSC offset mismatch: Cache=%#RX64 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64TscOffset, u64Val),
    2282                         pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_TSC_OFFSET,
     2064                        VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_TSC_OFFSET,
    22832065                        VERR_VMX_VMCS_FIELD_CACHE_INVALID);
    22842066
     
    22872069}
    22882070
    2289 #ifdef VBOX_STRICT
    2290 
    2291 /**
    2292  * Verifies that our cached host EFER MSR value has not changed since we cached it.
    2293  *
    2294  * @param   pVmcsInfo   The VMCS info. object.
    2295  */
    2296 static void vmxHCCheckHostEferMsr(PCVMXVMCSINFO pVmcsInfo)
    2297 {
    2298     Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    2299 
    2300     if (pVmcsInfo->u32ExitCtls & VMX_EXIT_CTLS_LOAD_EFER_MSR)
    2301     {
    2302         uint64_t const uHostEferMsr      = ASMRdMsr(MSR_K6_EFER);
    2303         uint64_t const uHostEferMsrCache = g_uHmVmxHostMsrEfer;
    2304         uint64_t       uVmcsEferMsrVmcs;
    2305         int rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_HOST_EFER_FULL, &uVmcsEferMsrVmcs);
    2306         AssertRC(rc);
    2307 
    2308         AssertMsgReturnVoid(uHostEferMsr == uVmcsEferMsrVmcs,
    2309                             ("EFER Host/VMCS mismatch! host=%#RX64 vmcs=%#RX64\n", uHostEferMsr, uVmcsEferMsrVmcs));
    2310         AssertMsgReturnVoid(uHostEferMsr == uHostEferMsrCache,
    2311                             ("EFER Host/Cache mismatch! host=%#RX64 cache=%#RX64\n", uHostEferMsr, uHostEferMsrCache));
    2312     }
    2313 }
    2314 
    2315 
    2316 /**
    2317  * Verifies whether the guest/host MSR pairs in the auto-load/store area in the
    2318  * VMCS are correct.
    2319  *
    2320  * @param   pVCpu           The cross context virtual CPU structure.
    2321  * @param   pVmcsInfo       The VMCS info. object.
    2322  * @param   fIsNstGstVmcs   Whether this is a nested-guest VMCS.
    2323  */
    2324 static void vmxHCCheckAutoLoadStoreMsrs(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs)
    2325 {
    2326     Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    2327 
    2328     /* Read the various MSR-area counts from the VMCS. */
    2329     uint32_t cEntryLoadMsrs;
    2330     uint32_t cExitStoreMsrs;
    2331     uint32_t cExitLoadMsrs;
    2332     int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, &cEntryLoadMsrs);  AssertRC(rc);
    2333     rc     = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, &cExitStoreMsrs);  AssertRC(rc);
    2334     rc     = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT,  &cExitLoadMsrs);   AssertRC(rc);
    2335 
    2336     /* Verify all the MSR counts are the same. */
    2337     Assert(cEntryLoadMsrs == cExitStoreMsrs);
    2338     Assert(cExitStoreMsrs == cExitLoadMsrs);
    2339     uint32_t const cMsrs = cExitLoadMsrs;
    2340 
    2341     /* Verify the MSR counts do not exceed the maximum count supported by the hardware. */
    2342     Assert(cMsrs < VMX_MISC_MAX_MSRS(g_HmMsrs.u.vmx.u64Misc));
    2343 
    2344     /* Verify the MSR counts are within the allocated page size. */
    2345     Assert(sizeof(VMXAUTOMSR) * cMsrs <= X86_PAGE_4K_SIZE);
    2346 
    2347     /* Verify the relevant contents of the MSR areas match. */
    2348     PCVMXAUTOMSR pGuestMsrLoad  = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrLoad;
    2349     PCVMXAUTOMSR pGuestMsrStore = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
    2350     PCVMXAUTOMSR pHostMsrLoad   = (PCVMXAUTOMSR)pVmcsInfo->pvHostMsrLoad;
    2351     bool const   fSeparateExitMsrStorePage = vmxHCIsSeparateExitMsrStoreAreaVmcs(pVmcsInfo);
    2352     for (uint32_t i = 0; i < cMsrs; i++)
    2353     {
    2354         /* Verify that the MSRs are paired properly and that the host MSR has the correct value. */
    2355         if (fSeparateExitMsrStorePage)
    2356         {
    2357             AssertMsgReturnVoid(pGuestMsrLoad->u32Msr == pGuestMsrStore->u32Msr,
    2358                                 ("GuestMsrLoad=%#RX32 GuestMsrStore=%#RX32 cMsrs=%u\n",
    2359                                  pGuestMsrLoad->u32Msr, pGuestMsrStore->u32Msr, cMsrs));
    2360         }
    2361 
    2362         AssertMsgReturnVoid(pHostMsrLoad->u32Msr == pGuestMsrLoad->u32Msr,
    2363                             ("HostMsrLoad=%#RX32 GuestMsrLoad=%#RX32 cMsrs=%u\n",
    2364                              pHostMsrLoad->u32Msr, pGuestMsrLoad->u32Msr, cMsrs));
    2365 
    2366         uint64_t const u64HostMsr = ASMRdMsr(pHostMsrLoad->u32Msr);
    2367         AssertMsgReturnVoid(pHostMsrLoad->u64Value == u64HostMsr,
    2368                             ("u32Msr=%#RX32 VMCS Value=%#RX64 ASMRdMsr=%#RX64 cMsrs=%u\n",
    2369                              pHostMsrLoad->u32Msr, pHostMsrLoad->u64Value, u64HostMsr, cMsrs));
    2370 
    2371         /* Verify that cached host EFER MSR matches what's loaded on the CPU. */
    2372         bool const fIsEferMsr = RT_BOOL(pHostMsrLoad->u32Msr == MSR_K6_EFER);
    2373         AssertMsgReturnVoid(!fIsEferMsr || u64HostMsr == g_uHmVmxHostMsrEfer,
    2374                             ("Cached=%#RX64 ASMRdMsr=%#RX64 cMsrs=%u\n", g_uHmVmxHostMsrEfer, u64HostMsr, cMsrs));
    2375 
    2376         /* Verify that the accesses are as expected in the MSR bitmap for auto-load/store MSRs. */
    2377         if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
    2378         {
    2379             uint32_t const fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, pGuestMsrLoad->u32Msr);
    2380             if (fIsEferMsr)
    2381             {
    2382                 AssertMsgReturnVoid((fMsrpm & VMXMSRPM_EXIT_RD), ("Passthru read for EFER MSR!?\n"));
    2383                 AssertMsgReturnVoid((fMsrpm & VMXMSRPM_EXIT_WR), ("Passthru write for EFER MSR!?\n"));
    2384             }
    2385             else
    2386             {
    2387                 /* Verify LBR MSRs (used only for debugging) are intercepted. We don't passthru these MSRs to the guest yet. */
    2388                 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
    2389                 if (   pVM->hmr0.s.vmx.fLbr
    2390                     && (   vmxHCIsLbrBranchFromMsr(pVM, pGuestMsrLoad->u32Msr, NULL /* pidxMsr */)
    2391                         || vmxHCIsLbrBranchToMsr(pVM, pGuestMsrLoad->u32Msr, NULL /* pidxMsr */)
    2392                         || pGuestMsrLoad->u32Msr == pVM->hmr0.s.vmx.idLbrTosMsr))
    2393                 {
    2394                     AssertMsgReturnVoid((fMsrpm & VMXMSRPM_MASK) == VMXMSRPM_EXIT_RD_WR,
    2395                                         ("u32Msr=%#RX32 cMsrs=%u Passthru read/write for LBR MSRs!\n",
    2396                                          pGuestMsrLoad->u32Msr, cMsrs));
    2397                 }
    2398                 else if (!fIsNstGstVmcs)
    2399                 {
    2400                     AssertMsgReturnVoid((fMsrpm & VMXMSRPM_MASK) == VMXMSRPM_ALLOW_RD_WR,
    2401                                         ("u32Msr=%#RX32 cMsrs=%u No passthru read/write!\n", pGuestMsrLoad->u32Msr, cMsrs));
    2402                 }
    2403                 else
    2404                 {
    2405                     /*
    2406                      * A nested-guest VMCS must -also- allow read/write passthrough for the MSR for us to
    2407                      * execute a nested-guest with MSR passthrough.
    2408                      *
    2409                      * Check if the nested-guest MSR bitmap allows passthrough, and if so, assert that we
    2410                      * allow passthrough too.
    2411                      */
    2412                     void const *pvMsrBitmapNstGst = pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap;
    2413                     Assert(pvMsrBitmapNstGst);
    2414                     uint32_t const fMsrpmNstGst = CPUMGetVmxMsrPermission(pvMsrBitmapNstGst, pGuestMsrLoad->u32Msr);
    2415                     AssertMsgReturnVoid(fMsrpm == fMsrpmNstGst,
    2416                                         ("u32Msr=%#RX32 cMsrs=%u Permission mismatch fMsrpm=%#x fMsrpmNstGst=%#x!\n",
    2417                                          pGuestMsrLoad->u32Msr, cMsrs, fMsrpm, fMsrpmNstGst));
    2418                 }
    2419             }
    2420         }
    2421 
    2422         /* Move to the next MSR. */
    2423         pHostMsrLoad++;
    2424         pGuestMsrLoad++;
    2425         pGuestMsrStore++;
    2426     }
    2427 }
    2428 
    2429 #endif /* VBOX_STRICT */
    2430 
    2431 /**
    2432  * Flushes the TLB using EPT.
    2433  *
    2434  * @returns VBox status code.
    2435  * @param   pVCpu           The cross context virtual CPU structure of the calling
    2436  *                          EMT.  Can be NULL depending on @a enmTlbFlush.
    2437  * @param   pVmcsInfo       The VMCS info. object. Can be NULL depending on @a
    2438  *                          enmTlbFlush.
    2439  * @param   enmTlbFlush     Type of flush.
    2440  *
    2441  * @remarks Caller is responsible for making sure this function is called only
    2442  *          when NestedPaging is supported and providing @a enmTlbFlush that is
    2443  *          supported by the CPU.
    2444  * @remarks Can be called with interrupts disabled.
    2445  */
    2446 static void vmxHCFlushEpt(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, VMXTLBFLUSHEPT enmTlbFlush)
    2447 {
    2448     uint64_t au64Descriptor[2];
    2449     if (enmTlbFlush == VMXTLBFLUSHEPT_ALL_CONTEXTS)
    2450         au64Descriptor[0] = 0;
    2451     else
    2452     {
    2453         Assert(pVCpu);
    2454         Assert(pVmcsInfo);
    2455         au64Descriptor[0] = pVmcsInfo->HCPhysEPTP;
    2456     }
    2457     au64Descriptor[1] = 0;                       /* MBZ. Intel spec. 33.3 "VMX Instructions" */
    2458 
    2459     int rc = VMXR0InvEPT(enmTlbFlush, &au64Descriptor[0]);
    2460     AssertMsg(rc == VINF_SUCCESS, ("VMXR0InvEPT %#x %#RHp failed. rc=%Rrc\n", enmTlbFlush, au64Descriptor[0], rc));
    2461 
    2462     if (   RT_SUCCESS(rc)
    2463         && pVCpu)
    2464         STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushNestedPaging);
    2465 }
    2466 
    2467 
    2468 /**
    2469  * Flushes the TLB using VPID.
    2470  *
    2471  * @returns VBox status code.
    2472  * @param   pVCpu           The cross context virtual CPU structure of the calling
    2473  *                          EMT.  Can be NULL depending on @a enmTlbFlush.
    2474  * @param   enmTlbFlush     Type of flush.
    2475  * @param   GCPtr           Virtual address of the page to flush (can be 0 depending
    2476  *                          on @a enmTlbFlush).
    2477  *
    2478  * @remarks Can be called with interrupts disabled.
    2479  */
    2480 static void vmxHCFlushVpid(PVMCPUCC pVCpu, VMXTLBFLUSHVPID enmTlbFlush, RTGCPTR GCPtr)
    2481 {
    2482     Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fVpid);
    2483 
    2484     uint64_t au64Descriptor[2];
    2485     if (enmTlbFlush == VMXTLBFLUSHVPID_ALL_CONTEXTS)
    2486     {
    2487         au64Descriptor[0] = 0;
    2488         au64Descriptor[1] = 0;
    2489     }
    2490     else
    2491     {
    2492         AssertPtr(pVCpu);
    2493         AssertMsg(pVCpu->hmr0.s.uCurrentAsid != 0, ("VMXR0InvVPID: invalid ASID %lu\n", pVCpu->hmr0.s.uCurrentAsid));
    2494         AssertMsg(pVCpu->hmr0.s.uCurrentAsid <= UINT16_MAX, ("VMXR0InvVPID: invalid ASID %lu\n", pVCpu->hmr0.s.uCurrentAsid));
    2495         au64Descriptor[0] = pVCpu->hmr0.s.uCurrentAsid;
    2496         au64Descriptor[1] = GCPtr;
    2497     }
    2498 
    2499     int rc = VMXR0InvVPID(enmTlbFlush, &au64Descriptor[0]);
    2500     AssertMsg(rc == VINF_SUCCESS,
    2501               ("VMXR0InvVPID %#x %u %RGv failed with %Rrc\n", enmTlbFlush, pVCpu ? pVCpu->hmr0.s.uCurrentAsid : 0, GCPtr, rc));
    2502 
    2503     if (   RT_SUCCESS(rc)
    2504         && pVCpu)
    2505         STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushAsid);
    2506     NOREF(rc);
    2507 }
    2508 
    2509 
    2510 /**
    2511  * Dummy placeholder for tagged-TLB flush handling before VM-entry. Used in the
    2512  * case where neither EPT nor VPID is supported by the CPU.
    2513  *
    2514  * @param   pHostCpu    The HM physical-CPU structure.
    2515  * @param   pVCpu       The cross context virtual CPU structure.
    2516  *
    2517  * @remarks Called with interrupts disabled.
    2518  */
    2519 static void vmxHCFlushTaggedTlbNone(PHMPHYSCPU pHostCpu, PVMCPUCC pVCpu)
    2520 {
    2521     AssertPtr(pVCpu);
    2522     AssertPtr(pHostCpu);
    2523 
    2524     VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH);
    2525 
    2526     Assert(pHostCpu->idCpu != NIL_RTCPUID);
    2527     pVCpu->hmr0.s.idLastCpu     = pHostCpu->idCpu;
    2528     pVCpu->hmr0.s.cTlbFlushes   = pHostCpu->cTlbFlushes;
    2529     pVCpu->hmr0.s.fForceTLBFlush  = false;
    2530     return;
    2531 }
    2532 
    2533 
    2534 /**
    2535  * Flushes the tagged-TLB entries for EPT+VPID CPUs as necessary.
    2536  *
    2537  * @param   pHostCpu    The HM physical-CPU structure.
    2538  * @param   pVCpu       The cross context virtual CPU structure.
    2539  * @param   pVmcsInfo   The VMCS info. object.
    2540  *
    2541  * @remarks  All references to "ASID" in this function pertains to "VPID" in Intel's
    2542  *           nomenclature. The reason is, to avoid confusion in compare statements
    2543  *           since the host-CPU copies are named "ASID".
    2544  *
    2545  * @remarks  Called with interrupts disabled.
    2546  */
    2547 static void vmxHCFlushTaggedTlbBoth(PHMPHYSCPU pHostCpu, PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
    2548 {
    2549 #ifdef VBOX_WITH_STATISTICS
    2550     bool fTlbFlushed = false;
    2551 # define HMVMX_SET_TAGGED_TLB_FLUSHED()       do { fTlbFlushed = true; } while (0)
    2552 # define HMVMX_UPDATE_FLUSH_SKIPPED_STAT()    do { \
    2553                                                 if (!fTlbFlushed) \
    2554                                                     STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch); \
    2555                                               } while (0)
    2556 #else
    2557 # define HMVMX_SET_TAGGED_TLB_FLUSHED()       do { } while (0)
    2558 # define HMVMX_UPDATE_FLUSH_SKIPPED_STAT()    do { } while (0)
    2559 #endif
    2560 
    2561     AssertPtr(pVCpu);
    2562     AssertPtr(pHostCpu);
    2563     Assert(pHostCpu->idCpu != NIL_RTCPUID);
    2564 
    2565     PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    2566     AssertMsg(pVM->hmr0.s.fNestedPaging && pVM->hmr0.s.vmx.fVpid,
    2567               ("vmxHCFlushTaggedTlbBoth cannot be invoked unless NestedPaging & VPID are enabled."
    2568                "fNestedPaging=%RTbool fVpid=%RTbool", pVM->hmr0.s.fNestedPaging, pVM->hmr0.s.vmx.fVpid));
    2569 
    2570     /*
    2571      * Force a TLB flush for the first world-switch if the current CPU differs from the one we
    2572      * ran on last. If the TLB flush count changed, another VM (VCPU rather) has hit the ASID
    2573      * limit while flushing the TLB or the host CPU is online after a suspend/resume, so we
    2574      * cannot reuse the current ASID anymore.
    2575      */
    2576     if (   pVCpu->hmr0.s.idLastCpu   != pHostCpu->idCpu
    2577         || pVCpu->hmr0.s.cTlbFlushes != pHostCpu->cTlbFlushes)
    2578     {
    2579         ++pHostCpu->uCurrentAsid;
    2580         if (pHostCpu->uCurrentAsid >= g_uHmMaxAsid)
    2581         {
    2582             pHostCpu->uCurrentAsid = 1;            /* Wraparound to 1; host uses 0. */
    2583             pHostCpu->cTlbFlushes++;               /* All VCPUs that run on this host CPU must use a new VPID. */
    2584             pHostCpu->fFlushAsidBeforeUse = true;  /* All VCPUs that run on this host CPU must flush their new VPID before use. */
    2585         }
    2586 
    2587         pVCpu->hmr0.s.uCurrentAsid = pHostCpu->uCurrentAsid;
    2588         pVCpu->hmr0.s.idLastCpu    = pHostCpu->idCpu;
    2589         pVCpu->hmr0.s.cTlbFlushes  = pHostCpu->cTlbFlushes;
    2590 
    2591         /*
    2592          * Flush by EPT when we get rescheduled to a new host CPU to ensure EPT-only tagged mappings are also
    2593          * invalidated. We don't need to flush-by-VPID here as flushing by EPT covers it. See @bugref{6568}.
    2594          */
    2595         vmxHCFlushEpt(pVCpu, pVmcsInfo, pVM->hmr0.s.vmx.enmTlbFlushEpt);
    2596         STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
    2597         HMVMX_SET_TAGGED_TLB_FLUSHED();
    2598         VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH);
    2599     }
    2600     else if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))    /* Check for explicit TLB flushes. */
    2601     {
    2602         /*
    2603          * Changes to the EPT paging structure by VMM requires flushing-by-EPT as the CPU
    2604          * creates guest-physical (ie. only EPT-tagged) mappings while traversing the EPT
    2605          * tables when EPT is in use. Flushing-by-VPID will only flush linear (only
    2606          * VPID-tagged) and combined (EPT+VPID tagged) mappings but not guest-physical
    2607          * mappings, see @bugref{6568}.
    2608          *
    2609          * See Intel spec. 28.3.2 "Creating and Using Cached Translation Information".
    2610          */
    2611         vmxHCFlushEpt(pVCpu, pVmcsInfo, pVM->hmr0.s.vmx.enmTlbFlushEpt);
    2612         STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
    2613         HMVMX_SET_TAGGED_TLB_FLUSHED();
    2614     }
    2615     else if (pVCpu->hm.s.vmx.fSwitchedNstGstFlushTlb)
    2616     {
    2617         /*
    2618          * The nested-guest specifies its own guest-physical address to use as the APIC-access
    2619          * address which requires flushing the TLB of EPT cached structures.
    2620          *
    2621          * See Intel spec. 28.3.3.4 "Guidelines for Use of the INVEPT Instruction".
    2622          */
    2623         vmxHCFlushEpt(pVCpu, pVmcsInfo, pVM->hmr0.s.vmx.enmTlbFlushEpt);
    2624         pVCpu->hm.s.vmx.fSwitchedNstGstFlushTlb = false;
    2625         STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbNstGst);
    2626         HMVMX_SET_TAGGED_TLB_FLUSHED();
    2627     }
    2628 
    2629 
    2630     pVCpu->hmr0.s.fForceTLBFlush = false;
    2631     HMVMX_UPDATE_FLUSH_SKIPPED_STAT();
    2632 
    2633     Assert(pVCpu->hmr0.s.idLastCpu == pHostCpu->idCpu);
    2634     Assert(pVCpu->hmr0.s.cTlbFlushes == pHostCpu->cTlbFlushes);
    2635     AssertMsg(pVCpu->hmr0.s.cTlbFlushes == pHostCpu->cTlbFlushes,
    2636               ("Flush count mismatch for cpu %d (%u vs %u)\n", pHostCpu->idCpu, pVCpu->hmr0.s.cTlbFlushes, pHostCpu->cTlbFlushes));
    2637     AssertMsg(pHostCpu->uCurrentAsid >= 1 && pHostCpu->uCurrentAsid < g_uHmMaxAsid,
    2638               ("Cpu[%u] uCurrentAsid=%u cTlbFlushes=%u pVCpu->idLastCpu=%u pVCpu->cTlbFlushes=%u\n", pHostCpu->idCpu,
    2639                pHostCpu->uCurrentAsid, pHostCpu->cTlbFlushes, pVCpu->hmr0.s.idLastCpu, pVCpu->hmr0.s.cTlbFlushes));
    2640     AssertMsg(pVCpu->hmr0.s.uCurrentAsid >= 1 && pVCpu->hmr0.s.uCurrentAsid < g_uHmMaxAsid,
    2641               ("Cpu[%u] pVCpu->uCurrentAsid=%u\n", pHostCpu->idCpu, pVCpu->hmr0.s.uCurrentAsid));
    2642 
    2643     /* Update VMCS with the VPID. */
    2644     int rc  = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_VPID, pVCpu->hmr0.s.uCurrentAsid);
    2645     AssertRC(rc);
    2646 
    2647 #undef HMVMX_SET_TAGGED_TLB_FLUSHED
    2648 }
    2649 
    2650 
    2651 /**
    2652  * Flushes the tagged-TLB entries for EPT CPUs as necessary.
    2653  *
    2654  * @param   pHostCpu    The HM physical-CPU structure.
    2655  * @param   pVCpu       The cross context virtual CPU structure.
    2656  * @param   pVmcsInfo   The VMCS info. object.
    2657  *
    2658  * @remarks Called with interrupts disabled.
    2659  */
    2660 static void vmxHCFlushTaggedTlbEpt(PHMPHYSCPU pHostCpu, PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
    2661 {
    2662     AssertPtr(pVCpu);
    2663     AssertPtr(pHostCpu);
    2664     Assert(pHostCpu->idCpu != NIL_RTCPUID);
    2665     AssertMsg(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging, ("vmxHCFlushTaggedTlbEpt cannot be invoked without NestedPaging."));
    2666     AssertMsg(!pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fVpid, ("vmxHCFlushTaggedTlbEpt cannot be invoked with VPID."));
    2667 
    2668     /*
    2669      * Force a TLB flush for the first world-switch if the current CPU differs from the one we ran on last.
    2670      * A change in the TLB flush count implies the host CPU is online after a suspend/resume.
    2671      */
    2672     if (   pVCpu->hmr0.s.idLastCpu   != pHostCpu->idCpu
    2673         || pVCpu->hmr0.s.cTlbFlushes != pHostCpu->cTlbFlushes)
    2674     {
    2675         pVCpu->hmr0.s.fForceTLBFlush = true;
    2676         STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
    2677     }
    2678 
    2679     /* Check for explicit TLB flushes. */
    2680     if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
    2681     {
    2682         pVCpu->hmr0.s.fForceTLBFlush = true;
    2683         STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
    2684     }
    2685 
    2686     /* Check for TLB flushes while switching to/from a nested-guest. */
    2687     if (pVCpu->hm.s.vmx.fSwitchedNstGstFlushTlb)
    2688     {
    2689         pVCpu->hmr0.s.fForceTLBFlush = true;
    2690         pVCpu->hm.s.vmx.fSwitchedNstGstFlushTlb = false;
    2691         STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbNstGst);
    2692     }
    2693 
    2694     pVCpu->hmr0.s.idLastCpu = pHostCpu->idCpu;
    2695     pVCpu->hmr0.s.cTlbFlushes = pHostCpu->cTlbFlushes;
    2696 
    2697     if (pVCpu->hmr0.s.fForceTLBFlush)
    2698     {
    2699         vmxHCFlushEpt(pVCpu, pVmcsInfo, pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.enmTlbFlushEpt);
    2700         pVCpu->hmr0.s.fForceTLBFlush = false;
    2701     }
    2702 }
    2703 
    2704 
    2705 /**
    2706  * Flushes the tagged-TLB entries for VPID CPUs as necessary.
    2707  *
    2708  * @param   pHostCpu    The HM physical-CPU structure.
    2709  * @param   pVCpu       The cross context virtual CPU structure.
    2710  *
    2711  * @remarks Called with interrupts disabled.
    2712  */
    2713 static void vmxHCFlushTaggedTlbVpid(PHMPHYSCPU pHostCpu, PVMCPUCC pVCpu)
    2714 {
    2715     AssertPtr(pVCpu);
    2716     AssertPtr(pHostCpu);
    2717     Assert(pHostCpu->idCpu != NIL_RTCPUID);
    2718     AssertMsg(pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fVpid, ("vmxHCFlushTlbVpid cannot be invoked without VPID."));
    2719     AssertMsg(!pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging, ("vmxHCFlushTlbVpid cannot be invoked with NestedPaging"));
    2720 
    2721     /*
    2722      * Force a TLB flush for the first world switch if the current CPU differs from the one we
    2723      * ran on last. If the TLB flush count changed, another VM (VCPU rather) has hit the ASID
    2724      * limit while flushing the TLB or the host CPU is online after a suspend/resume, so we
    2725      * cannot reuse the current ASID anymore.
    2726      */
    2727     if (   pVCpu->hmr0.s.idLastCpu != pHostCpu->idCpu
    2728         || pVCpu->hmr0.s.cTlbFlushes != pHostCpu->cTlbFlushes)
    2729     {
    2730         pVCpu->hmr0.s.fForceTLBFlush = true;
    2731         STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
    2732     }
    2733 
    2734     /* Check for explicit TLB flushes. */
    2735     if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
    2736     {
    2737         /*
    2738          * If we ever support VPID flush combinations other than ALL or SINGLE-context (see
    2739          * vmxHCSetupTaggedTlb()) we would need to explicitly flush in this case (add an
    2740          * fExplicitFlush = true here and change the pHostCpu->fFlushAsidBeforeUse check below to
    2741          * include fExplicitFlush's too) - an obscure corner case.
    2742          */
    2743         pVCpu->hmr0.s.fForceTLBFlush = true;
    2744         STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
    2745     }
    2746 
    2747     /* Check for TLB flushes while switching to/from a nested-guest. */
    2748     if (pVCpu->hm.s.vmx.fSwitchedNstGstFlushTlb)
    2749     {
    2750         pVCpu->hmr0.s.fForceTLBFlush = true;
    2751         pVCpu->hm.s.vmx.fSwitchedNstGstFlushTlb = false;
    2752         STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbNstGst);
    2753     }
    2754 
    2755     PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    2756     pVCpu->hmr0.s.idLastCpu = pHostCpu->idCpu;
    2757     if (pVCpu->hmr0.s.fForceTLBFlush)
    2758     {
    2759         ++pHostCpu->uCurrentAsid;
    2760         if (pHostCpu->uCurrentAsid >= g_uHmMaxAsid)
    2761         {
    2762             pHostCpu->uCurrentAsid        = 1;     /* Wraparound to 1; host uses 0 */
    2763             pHostCpu->cTlbFlushes++;               /* All VCPUs that run on this host CPU must use a new VPID. */
    2764             pHostCpu->fFlushAsidBeforeUse = true;  /* All VCPUs that run on this host CPU must flush their new VPID before use. */
    2765         }
    2766 
    2767         pVCpu->hmr0.s.fForceTLBFlush = false;
    2768         pVCpu->hmr0.s.cTlbFlushes    = pHostCpu->cTlbFlushes;
    2769         pVCpu->hmr0.s.uCurrentAsid   = pHostCpu->uCurrentAsid;
    2770         if (pHostCpu->fFlushAsidBeforeUse)
    2771         {
    2772             if (pVM->hmr0.s.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_SINGLE_CONTEXT)
    2773                 vmxHCFlushVpid(pVCpu, VMXTLBFLUSHVPID_SINGLE_CONTEXT, 0 /* GCPtr */);
    2774             else if (pVM->hmr0.s.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_ALL_CONTEXTS)
    2775             {
    2776                 vmxHCFlushVpid(pVCpu, VMXTLBFLUSHVPID_ALL_CONTEXTS, 0 /* GCPtr */);
    2777                 pHostCpu->fFlushAsidBeforeUse = false;
    2778             }
    2779             else
    2780             {
    2781                 /* vmxHCSetupTaggedTlb() ensures we never get here. Paranoia. */
    2782                 AssertMsgFailed(("Unsupported VPID-flush context type.\n"));
    2783             }
    2784         }
    2785     }
    2786 
    2787     AssertMsg(pVCpu->hmr0.s.cTlbFlushes == pHostCpu->cTlbFlushes,
    2788               ("Flush count mismatch for cpu %d (%u vs %u)\n", pHostCpu->idCpu, pVCpu->hmr0.s.cTlbFlushes, pHostCpu->cTlbFlushes));
    2789     AssertMsg(pHostCpu->uCurrentAsid >= 1 && pHostCpu->uCurrentAsid < g_uHmMaxAsid,
    2790               ("Cpu[%u] uCurrentAsid=%u cTlbFlushes=%u pVCpu->idLastCpu=%u pVCpu->cTlbFlushes=%u\n", pHostCpu->idCpu,
    2791                pHostCpu->uCurrentAsid, pHostCpu->cTlbFlushes, pVCpu->hmr0.s.idLastCpu, pVCpu->hmr0.s.cTlbFlushes));
    2792     AssertMsg(pVCpu->hmr0.s.uCurrentAsid >= 1 && pVCpu->hmr0.s.uCurrentAsid < g_uHmMaxAsid,
    2793               ("Cpu[%u] pVCpu->uCurrentAsid=%u\n", pHostCpu->idCpu, pVCpu->hmr0.s.uCurrentAsid));
    2794 
    2795     int rc  = VMX_VMCS_WRITE_16(pVCpu, VMX_VMCS16_VPID, pVCpu->hmr0.s.uCurrentAsid);
    2796     AssertRC(rc);
    2797 }
    2798 
    2799 
    2800 /**
    2801  * Flushes the guest TLB entry based on CPU capabilities.
    2802  *
    2803  * @param   pHostCpu    The HM physical-CPU structure.
    2804  * @param   pVCpu       The cross context virtual CPU structure.
    2805  * @param   pVmcsInfo   The VMCS info. object.
    2806  *
    2807  * @remarks Called with interrupts disabled.
    2808  */
    2809 static void vmxHCFlushTaggedTlb(PHMPHYSCPU pHostCpu, PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
    2810 {
    2811 #ifdef HMVMX_ALWAYS_FLUSH_TLB
    2812     VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
    2813 #endif
    2814     PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    2815     switch (pVM->hmr0.s.vmx.enmTlbFlushType)
    2816     {
    2817         case VMXTLBFLUSHTYPE_EPT_VPID: vmxHCFlushTaggedTlbBoth(pHostCpu, pVCpu, pVmcsInfo); break;
    2818         case VMXTLBFLUSHTYPE_EPT:      vmxHCFlushTaggedTlbEpt(pHostCpu, pVCpu, pVmcsInfo);  break;
    2819         case VMXTLBFLUSHTYPE_VPID:     vmxHCFlushTaggedTlbVpid(pHostCpu, pVCpu);            break;
    2820         case VMXTLBFLUSHTYPE_NONE:     vmxHCFlushTaggedTlbNone(pHostCpu, pVCpu);            break;
    2821         default:
    2822             AssertMsgFailed(("Invalid flush-tag function identifier\n"));
    2823             break;
    2824     }
    2825     /* Don't assert that VMCPU_FF_TLB_FLUSH should no longer be pending. It can be set by other EMTs. */
    2826 }
    2827 
    2828 
    2829 /**
    2830  * Sets up the appropriate tagged TLB-flush level and handler for flushing guest
    2831  * TLB entries from the host TLB before VM-entry.
    2832  *
    2833  * @returns VBox status code.
    2834  * @param   pVM     The cross context VM structure.
    2835  */
    2836 static int vmxHCSetupTaggedTlb(PVMCC pVM)
    2837 {
    2838     /*
    2839      * Determine optimal flush type for nested paging.
    2840      * We cannot ignore EPT if no suitable flush-types is supported by the CPU as we've already setup
    2841      * unrestricted guest execution (see hmR3InitFinalizeR0()).
    2842      */
    2843     if (pVM->hmr0.s.fNestedPaging)
    2844     {
    2845         if (g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT)
    2846         {
    2847             if (g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_SINGLE_CONTEXT)
    2848                 pVM->hmr0.s.vmx.enmTlbFlushEpt = VMXTLBFLUSHEPT_SINGLE_CONTEXT;
    2849             else if (g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS)
    2850                 pVM->hmr0.s.vmx.enmTlbFlushEpt = VMXTLBFLUSHEPT_ALL_CONTEXTS;
    2851             else
    2852             {
    2853                 /* Shouldn't happen. EPT is supported but no suitable flush-types supported. */
    2854                 pVM->hmr0.s.vmx.enmTlbFlushEpt = VMXTLBFLUSHEPT_NOT_SUPPORTED;
    2855                 VMCC_GET_CPU_0(pVM)->hm.s.u32HMError = VMX_UFC_EPT_FLUSH_TYPE_UNSUPPORTED;
    2856                 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
    2857             }
    2858 
    2859             /* Make sure the write-back cacheable memory type for EPT is supported. */
    2860             if (RT_UNLIKELY(!(g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_MEMTYPE_WB)))
    2861             {
    2862                 pVM->hmr0.s.vmx.enmTlbFlushEpt = VMXTLBFLUSHEPT_NOT_SUPPORTED;
    2863                 VMCC_GET_CPU_0(pVM)->hm.s.u32HMError = VMX_UFC_EPT_MEM_TYPE_NOT_WB;
    2864                 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
    2865             }
    2866 
    2867             /* EPT requires a page-walk length of 4. */
    2868             if (RT_UNLIKELY(!(g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_PAGE_WALK_LENGTH_4)))
    2869             {
    2870                 pVM->hmr0.s.vmx.enmTlbFlushEpt = VMXTLBFLUSHEPT_NOT_SUPPORTED;
    2871                 VMCC_GET_CPU_0(pVM)->hm.s.u32HMError = VMX_UFC_EPT_PAGE_WALK_LENGTH_UNSUPPORTED;
    2872                 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
    2873             }
    2874         }
    2875         else
    2876         {
    2877             /* Shouldn't happen. EPT is supported but INVEPT instruction is not supported. */
    2878             pVM->hmr0.s.vmx.enmTlbFlushEpt = VMXTLBFLUSHEPT_NOT_SUPPORTED;
    2879             VMCC_GET_CPU_0(pVM)->hm.s.u32HMError = VMX_UFC_EPT_INVEPT_UNAVAILABLE;
    2880             return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
    2881         }
    2882     }
    2883 
    2884     /*
    2885      * Determine optimal flush type for VPID.
    2886      */
    2887     if (pVM->hmr0.s.vmx.fVpid)
    2888     {
    2889         if (g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID)
    2890         {
    2891             if (g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT)
    2892                 pVM->hmr0.s.vmx.enmTlbFlushVpid = VMXTLBFLUSHVPID_SINGLE_CONTEXT;
    2893             else if (g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_ALL_CONTEXTS)
    2894                 pVM->hmr0.s.vmx.enmTlbFlushVpid = VMXTLBFLUSHVPID_ALL_CONTEXTS;
    2895             else
    2896             {
    2897                 /* Neither SINGLE nor ALL-context flush types for VPID is supported by the CPU. Ignore VPID capability. */
    2898                 if (g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
    2899                     LogRelFunc(("Only INDIV_ADDR supported. Ignoring VPID.\n"));
    2900                 if (g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS)
    2901                     LogRelFunc(("Only SINGLE_CONTEXT_RETAIN_GLOBALS supported. Ignoring VPID.\n"));
    2902                 pVM->hmr0.s.vmx.enmTlbFlushVpid = VMXTLBFLUSHVPID_NOT_SUPPORTED;
    2903                 pVM->hmr0.s.vmx.fVpid           = false;
    2904             }
    2905         }
    2906         else
    2907         {
    2908             /*  Shouldn't happen. VPID is supported but INVVPID is not supported by the CPU. Ignore VPID capability. */
    2909             Log4Func(("VPID supported without INVEPT support. Ignoring VPID.\n"));
    2910             pVM->hmr0.s.vmx.enmTlbFlushVpid = VMXTLBFLUSHVPID_NOT_SUPPORTED;
    2911             pVM->hmr0.s.vmx.fVpid           = false;
    2912         }
    2913     }
    2914 
    2915     /*
    2916      * Setup the handler for flushing tagged-TLBs.
    2917      */
    2918     if (pVM->hmr0.s.fNestedPaging && pVM->hmr0.s.vmx.fVpid)
    2919         pVM->hmr0.s.vmx.enmTlbFlushType = VMXTLBFLUSHTYPE_EPT_VPID;
    2920     else if (pVM->hmr0.s.fNestedPaging)
    2921         pVM->hmr0.s.vmx.enmTlbFlushType = VMXTLBFLUSHTYPE_EPT;
    2922     else if (pVM->hmr0.s.vmx.fVpid)
    2923         pVM->hmr0.s.vmx.enmTlbFlushType = VMXTLBFLUSHTYPE_VPID;
    2924     else
    2925         pVM->hmr0.s.vmx.enmTlbFlushType = VMXTLBFLUSHTYPE_NONE;
    2926 
    2927 
    2928     /*
    2929      * Copy out the result to ring-3.
    2930      */
    2931     pVM->hm.s.ForR3.vmx.fVpid           = pVM->hmr0.s.vmx.fVpid;
    2932     pVM->hm.s.ForR3.vmx.enmTlbFlushType = pVM->hmr0.s.vmx.enmTlbFlushType;
    2933     pVM->hm.s.ForR3.vmx.enmTlbFlushEpt  = pVM->hmr0.s.vmx.enmTlbFlushEpt;
    2934     pVM->hm.s.ForR3.vmx.enmTlbFlushVpid = pVM->hmr0.s.vmx.enmTlbFlushVpid;
    2935     return VINF_SUCCESS;
    2936 }
    2937 
    2938 
     2071
     2072#ifdef IN_RING0
    29392073/**
    29402074 * Sets up the LBR MSR ranges based on the host CPU.
     
    31662300
    31672301/**
    3168  * Sets up the virtual-APIC page address for the VMCS.
    3169  *
    3170  * @param   pVmcsInfo   The VMCS info. object.
    3171  */
    3172 DECLINLINE(void) vmxHCSetupVmcsVirtApicAddr(PCVMXVMCSINFO pVmcsInfo)
    3173 {
    3174     RTHCPHYS const HCPhysVirtApic = pVmcsInfo->HCPhysVirtApic;
    3175     Assert(HCPhysVirtApic != NIL_RTHCPHYS);
    3176     Assert(!(HCPhysVirtApic & 0xfff));                       /* Bits 11:0 MBZ. */
    3177     int rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL, HCPhysVirtApic);
    3178     AssertRC(rc);
    3179 }
    3180 
    3181 
    3182 /**
    3183  * Sets up the MSR-bitmap address for the VMCS.
    3184  *
    3185  * @param   pVmcsInfo   The VMCS info. object.
    3186  */
    3187 DECLINLINE(void) vmxHCSetupVmcsMsrBitmapAddr(PCVMXVMCSINFO pVmcsInfo)
    3188 {
    3189     RTHCPHYS const HCPhysMsrBitmap = pVmcsInfo->HCPhysMsrBitmap;
    3190     Assert(HCPhysMsrBitmap != NIL_RTHCPHYS);
    3191     Assert(!(HCPhysMsrBitmap & 0xfff));                      /* Bits 11:0 MBZ. */
    3192     int rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_CTRL_MSR_BITMAP_FULL, HCPhysMsrBitmap);
    3193     AssertRC(rc);
    3194 }
    3195 
    3196 
    3197 /**
    31982302 * Sets up the APIC-access page address for the VMCS.
    31992303 *
     
    32412345
    32422346#endif
    3243 
    3244 /**
    3245  * Sets up the VM-entry MSR load, VM-exit MSR-store and VM-exit MSR-load addresses
    3246  * in the VMCS.
    3247  *
    3248  * @returns VBox status code.
    3249  * @param   pVmcsInfo   The VMCS info. object.
    3250  */
    3251 DECLINLINE(int) vmxHCSetupVmcsAutoLoadStoreMsrAddrs(PVMXVMCSINFO pVmcsInfo)
    3252 {
    3253     RTHCPHYS const HCPhysGuestMsrLoad = pVmcsInfo->HCPhysGuestMsrLoad;
    3254     Assert(HCPhysGuestMsrLoad != NIL_RTHCPHYS);
    3255     Assert(!(HCPhysGuestMsrLoad & 0xf));                     /* Bits 3:0 MBZ. */
    3256 
    3257     RTHCPHYS const HCPhysGuestMsrStore = pVmcsInfo->HCPhysGuestMsrStore;
    3258     Assert(HCPhysGuestMsrStore != NIL_RTHCPHYS);
    3259     Assert(!(HCPhysGuestMsrStore & 0xf));                    /* Bits 3:0 MBZ. */
    3260 
    3261     RTHCPHYS const HCPhysHostMsrLoad = pVmcsInfo->HCPhysHostMsrLoad;
    3262     Assert(HCPhysHostMsrLoad != NIL_RTHCPHYS);
    3263     Assert(!(HCPhysHostMsrLoad & 0xf));                      /* Bits 3:0 MBZ. */
    3264 
    3265     int rc = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL, HCPhysGuestMsrLoad);   AssertRC(rc);
    3266     rc     = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL, HCPhysGuestMsrStore);  AssertRC(rc);
    3267     rc     = VMX_VMCS_WRITE_64(pVCpu, VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL,  HCPhysHostMsrLoad);    AssertRC(rc);
    3268     return VINF_SUCCESS;
    3269 }
    3270 
    32712347
    32722348/**
     
    33812457        LogRelFunc(("Invalid pin-based VM-execution controls combo! Cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
    33822458                    g_HmMsrs.u.vmx.PinCtls.n.allowed0, fVal, fZap));
    3383         pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PIN_EXEC;
     2459        VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_PIN_EXEC;
    33842460        return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
    33852461    }
     
    34732549        LogRelFunc(("Invalid secondary processor-based VM-execution controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
    34742550                    g_HmMsrs.u.vmx.ProcCtls2.n.allowed0, fVal, fZap));
    3475         pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_EXEC2;
     2551        VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_PROC_EXEC2;
    34762552        return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
    34772553    }
     
    35112587        ||  (g_HmMsrs.u.vmx.ProcCtls.n.allowed0 & VMX_PROC_CTLS_MOV_DR_EXIT))
    35122588    {
    3513         pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_MOV_DRX_EXIT;
     2589        VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_PROC_MOV_DRX_EXIT;
    35142590        return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
    35152591    }
     
    35242600    }
    35252601
     2602#ifdef IN_INRG0
    35262603    /* Use TPR shadowing if supported by the CPU. */
    35272604    if (   PDMHasApic(pVM)
     
    35492626        vmxHCSetupVmcsMsrBitmapAddr(pVmcsInfo);
    35502627    }
     2628#endif
    35512629
    35522630    /* Use the secondary processor-based VM-execution controls if supported by the CPU. */
     
    35582636        LogRelFunc(("Invalid processor-based VM-execution controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
    35592637                    g_HmMsrs.u.vmx.ProcCtls.n.allowed0, fVal, fZap));
    3560         pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_EXEC;
     2638        VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_PROC_EXEC;
    35612639        return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
    35622640    }
     
    35802658    else
    35812659    {
    3582         pVCpu->hm.s.u32HMError = VMX_UFC_INVALID_UX_COMBO;
     2660        VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_INVALID_UX_COMBO;
    35832661        return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
    35842662    }
     
    37022780
    37032781/**
    3704  * Sets pfnStartVm to the best suited variant.
    3705  *
    3706  * This must be called whenever anything changes relative to the vmxHCStartVm
    3707  * variant selection:
    3708  *      - pVCpu->hm.s.fLoadSaveGuestXcr0
    3709  *      - HM_WSF_IBPB_ENTRY in pVCpu->hmr0.s.fWorldSwitcher
    3710  *      - HM_WSF_IBPB_EXIT  in pVCpu->hmr0.s.fWorldSwitcher
    3711  *      - Perhaps: CPUMIsGuestFPUStateActive() (windows only)
    3712  *      - Perhaps: CPUMCTX.fXStateMask (windows only)
    3713  *
    3714  * We currently ASSUME that neither HM_WSF_IBPB_ENTRY nor HM_WSF_IBPB_EXIT
    3715  * cannot be changed at runtime.
    3716  */
    3717 static void vmxHCUpdateStartVmFunction(PVMCPUCC pVCpu)
    3718 {
    3719     static const struct CLANGWORKAROUND { PFNHMVMXSTARTVM pfn; } s_avmxHCStartVmFunctions[] =
    3720     {
    3721         { vmxHCStartVm_SansXcr0_SansIbpbEntry_SansL1dEntry_SansMdsEntry_SansIbpbExit },
    3722         { vmxHCStartVm_WithXcr0_SansIbpbEntry_SansL1dEntry_SansMdsEntry_SansIbpbExit },
    3723         { vmxHCStartVm_SansXcr0_WithIbpbEntry_SansL1dEntry_SansMdsEntry_SansIbpbExit },
    3724         { vmxHCStartVm_WithXcr0_WithIbpbEntry_SansL1dEntry_SansMdsEntry_SansIbpbExit },
    3725         { vmxHCStartVm_SansXcr0_SansIbpbEntry_WithL1dEntry_SansMdsEntry_SansIbpbExit },
    3726         { vmxHCStartVm_WithXcr0_SansIbpbEntry_WithL1dEntry_SansMdsEntry_SansIbpbExit },
    3727         { vmxHCStartVm_SansXcr0_WithIbpbEntry_WithL1dEntry_SansMdsEntry_SansIbpbExit },
    3728         { vmxHCStartVm_WithXcr0_WithIbpbEntry_WithL1dEntry_SansMdsEntry_SansIbpbExit },
    3729         { vmxHCStartVm_SansXcr0_SansIbpbEntry_SansL1dEntry_WithMdsEntry_SansIbpbExit },
    3730         { vmxHCStartVm_WithXcr0_SansIbpbEntry_SansL1dEntry_WithMdsEntry_SansIbpbExit },
    3731         { vmxHCStartVm_SansXcr0_WithIbpbEntry_SansL1dEntry_WithMdsEntry_SansIbpbExit },
    3732         { vmxHCStartVm_WithXcr0_WithIbpbEntry_SansL1dEntry_WithMdsEntry_SansIbpbExit },
    3733         { vmxHCStartVm_SansXcr0_SansIbpbEntry_WithL1dEntry_WithMdsEntry_SansIbpbExit },
    3734         { vmxHCStartVm_WithXcr0_SansIbpbEntry_WithL1dEntry_WithMdsEntry_SansIbpbExit },
    3735         { vmxHCStartVm_SansXcr0_WithIbpbEntry_WithL1dEntry_WithMdsEntry_SansIbpbExit },
    3736         { vmxHCStartVm_WithXcr0_WithIbpbEntry_WithL1dEntry_WithMdsEntry_SansIbpbExit },
    3737         { vmxHCStartVm_SansXcr0_SansIbpbEntry_SansL1dEntry_SansMdsEntry_WithIbpbExit },
    3738         { vmxHCStartVm_WithXcr0_SansIbpbEntry_SansL1dEntry_SansMdsEntry_WithIbpbExit },
    3739         { vmxHCStartVm_SansXcr0_WithIbpbEntry_SansL1dEntry_SansMdsEntry_WithIbpbExit },
    3740         { vmxHCStartVm_WithXcr0_WithIbpbEntry_SansL1dEntry_SansMdsEntry_WithIbpbExit },
    3741         { vmxHCStartVm_SansXcr0_SansIbpbEntry_WithL1dEntry_SansMdsEntry_WithIbpbExit },
    3742         { vmxHCStartVm_WithXcr0_SansIbpbEntry_WithL1dEntry_SansMdsEntry_WithIbpbExit },
    3743         { vmxHCStartVm_SansXcr0_WithIbpbEntry_WithL1dEntry_SansMdsEntry_WithIbpbExit },
    3744         { vmxHCStartVm_WithXcr0_WithIbpbEntry_WithL1dEntry_SansMdsEntry_WithIbpbExit },
    3745         { vmxHCStartVm_SansXcr0_SansIbpbEntry_SansL1dEntry_WithMdsEntry_WithIbpbExit },
    3746         { vmxHCStartVm_WithXcr0_SansIbpbEntry_SansL1dEntry_WithMdsEntry_WithIbpbExit },
    3747         { vmxHCStartVm_SansXcr0_WithIbpbEntry_SansL1dEntry_WithMdsEntry_WithIbpbExit },
    3748         { vmxHCStartVm_WithXcr0_WithIbpbEntry_SansL1dEntry_WithMdsEntry_WithIbpbExit },
    3749         { vmxHCStartVm_SansXcr0_SansIbpbEntry_WithL1dEntry_WithMdsEntry_WithIbpbExit },
    3750         { vmxHCStartVm_WithXcr0_SansIbpbEntry_WithL1dEntry_WithMdsEntry_WithIbpbExit },
    3751         { vmxHCStartVm_SansXcr0_WithIbpbEntry_WithL1dEntry_WithMdsEntry_WithIbpbExit },
    3752         { vmxHCStartVm_WithXcr0_WithIbpbEntry_WithL1dEntry_WithMdsEntry_WithIbpbExit },
    3753     };
    3754     uintptr_t const idx = (pVCpu->hmr0.s.fLoadSaveGuestXcr0                 ?  1 : 0)
    3755                         | (pVCpu->hmr0.s.fWorldSwitcher & HM_WSF_IBPB_ENTRY ?  2 : 0)
    3756                         | (pVCpu->hmr0.s.fWorldSwitcher & HM_WSF_L1D_ENTRY  ?  4 : 0)
    3757                         | (pVCpu->hmr0.s.fWorldSwitcher & HM_WSF_MDS_ENTRY  ?  8 : 0)
    3758                         | (pVCpu->hmr0.s.fWorldSwitcher & HM_WSF_IBPB_EXIT  ? 16 : 0);
    3759     PFNHMVMXSTARTVM const pfnStartVm = s_avmxHCStartVmFunctions[idx].pfn;
    3760     if (pVCpu->hmr0.s.vmx.pfnStartVm != pfnStartVm)
    3761         pVCpu->hmr0.s.vmx.pfnStartVm = pfnStartVm;
    3762 }
    3763 
    3764 
    3765 /**
    3766  * Selector FNHMSVMVMRUN implementation.
    3767  */
    3768 static DECLCALLBACK(int) vmxHCStartVmSelector(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume)
    3769 {
    3770     vmxHCUpdateStartVmFunction(pVCpu);
    3771     return pVCpu->hmr0.s.vmx.pfnStartVm(pVmcsInfo, pVCpu, fResume);
    3772 }
    3773 
    3774 
    3775 /**
    3776  * Sets up the VMCS for executing a guest (or nested-guest) using hardware-assisted
    3777  * VMX.
    3778  *
    3779  * @returns VBox status code.
    3780  * @param   pVCpu           The cross context virtual CPU structure.
    3781  * @param   pVmcsInfo       The VMCS info. object.
    3782  * @param   fIsNstGstVmcs   Whether this is a nested-guest VMCS.
    3783  */
    3784 static int vmxHCSetupVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs)
    3785 {
    3786     Assert(pVmcsInfo->pvVmcs);
    3787     Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    3788 
    3789     /* Set the CPU specified revision identifier at the beginning of the VMCS structure. */
    3790     *(uint32_t *)pVmcsInfo->pvVmcs = RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_ID);
    3791     const char * const pszVmcs     = fIsNstGstVmcs ? "nested-guest VMCS" : "guest VMCS";
    3792 
    3793     LogFlowFunc(("\n"));
    3794 
    3795     /*
    3796      * Initialize the VMCS using VMCLEAR before loading the VMCS.
    3797      * See Intel spec. 31.6 "Preparation And Launching A Virtual Machine".
    3798      */
    3799     int rc = vmxHCClearVmcs(pVmcsInfo);
    3800     if (RT_SUCCESS(rc))
    3801     {
    3802         rc = vmxHCLoadVmcs(pVmcsInfo);
    3803         if (RT_SUCCESS(rc))
    3804         {
    3805             /*
    3806              * Initialize the hardware-assisted VMX execution handler for guest and nested-guest VMCS.
    3807              * The host is always 64-bit since we no longer support 32-bit hosts.
    3808              * Currently we have just a single handler for all guest modes as well, see @bugref{6208#c73}.
    3809              */
    3810             if (!fIsNstGstVmcs)
    3811             {
    3812                 rc = vmxHCSetupVmcsPinCtls(pVCpu, pVmcsInfo);
    3813                 if (RT_SUCCESS(rc))
    3814                 {
    3815                     rc = vmxHCSetupVmcsProcCtls(pVCpu, pVmcsInfo);
    3816                     if (RT_SUCCESS(rc))
    3817                     {
    3818                         rc = vmxHCSetupVmcsMiscCtls(pVCpu, pVmcsInfo);
    3819                         if (RT_SUCCESS(rc))
    3820                         {
    3821                             vmxHCSetupVmcsXcptBitmap(pVCpu, pVmcsInfo);
    3822 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    3823                             /*
    3824                              * If a shadow VMCS is allocated for the VMCS info. object, initialize the
    3825                              * VMCS revision ID and shadow VMCS indicator bit. Also, clear the VMCS
    3826                              * making it fit for use when VMCS shadowing is later enabled.
    3827                              */
    3828                             if (pVmcsInfo->pvShadowVmcs)
    3829                             {
    3830                                 VMXVMCSREVID VmcsRevId;
    3831                                 VmcsRevId.u = RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_ID);
    3832                                 VmcsRevId.n.fIsShadowVmcs = 1;
    3833                                 *(uint32_t *)pVmcsInfo->pvShadowVmcs = VmcsRevId.u;
    3834                                 rc = vmxHCClearShadowVmcs(pVmcsInfo);
    3835                                 if (RT_SUCCESS(rc))
    3836                                 { /* likely */ }
    3837                                 else
    3838                                     LogRelFunc(("Failed to initialize shadow VMCS. rc=%Rrc\n", rc));
    3839                             }
    3840 #endif
    3841                         }
    3842                         else
    3843                             LogRelFunc(("Failed to setup miscellaneous controls. rc=%Rrc\n", rc));
    3844                     }
    3845                     else
    3846                         LogRelFunc(("Failed to setup processor-based VM-execution controls. rc=%Rrc\n", rc));
    3847                 }
    3848                 else
    3849                     LogRelFunc(("Failed to setup pin-based controls. rc=%Rrc\n", rc));
    3850             }
    3851             else
    3852             {
    3853 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    3854                 rc = vmxHCSetupVmcsCtlsNested(pVmcsInfo);
    3855                 if (RT_SUCCESS(rc))
    3856                 { /* likely */ }
    3857                 else
    3858                     LogRelFunc(("Failed to initialize nested-guest VMCS. rc=%Rrc\n", rc));
    3859 #else
    3860                 AssertFailed();
    3861 #endif
    3862             }
    3863         }
    3864         else
    3865             LogRelFunc(("Failed to load the %s. rc=%Rrc\n", rc, pszVmcs));
    3866     }
    3867     else
    3868         LogRelFunc(("Failed to clear the %s. rc=%Rrc\n", rc, pszVmcs));
    3869 
    3870     /* Sync any CPU internal VMCS data back into our VMCS in memory. */
    3871     if (RT_SUCCESS(rc))
    3872     {
    3873         rc = vmxHCClearVmcs(pVmcsInfo);
    3874         if (RT_SUCCESS(rc))
    3875         { /* likely */ }
    3876         else
    3877             LogRelFunc(("Failed to clear the %s post setup. rc=%Rrc\n", rc, pszVmcs));
    3878     }
    3879 
    3880     /*
    3881      * Update the last-error record both for failures and success, so we
    3882      * can propagate the status code back to ring-3 for diagnostics.
    3883      */
    3884     vmxHCUpdateErrorRecord(pVCpu, rc);
    3885     NOREF(pszVmcs);
    3886     return rc;
    3887 }
    3888 
    3889 
    3890 /**
    38912782 * Exports the guest state with appropriate VM-entry and VM-exit controls in the
    38922783 * VMCS.
     
    39032794static int vmxHCExportGuestEntryExitCtls(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
    39042795{
    3905     if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_ENTRY_EXIT_CTLS)
     2796    if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_ENTRY_EXIT_CTLS)
    39062797    {
    39072798        PVMCC pVM = pVCpu->CTX_SUFF(pVM);
     
    39702861                Log4Func(("Invalid VM-entry controls combo! Cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
    39712862                          g_HmMsrs.u.vmx.EntryCtls.n.allowed0, fVal, fZap));
    3972                 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_ENTRY;
     2863                VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_ENTRY;
    39732864                return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
    39742865            }
     
    40482939                Log4Func(("Invalid VM-exit controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%R#X32\n",
    40492940                          g_HmMsrs.u.vmx.ExitCtls.n.allowed0, fVal, fZap));
    4050                 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_EXIT;
     2941                VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_UFC_CTRL_EXIT;
    40512942                return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
    40522943            }
     
    40612952        }
    40622953
    4063         ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
     2954        ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
    40642955    }
    40652956    return VINF_SUCCESS;
    40662957}
     2958#endif /* !IN_RING0 */
    40672959
    40682960
     
    40702962 * Sets the TPR threshold in the VMCS.
    40712963 *
     2964 * @param   pVCpu               The cross context virtual CPU structure.
    40722965 * @param   pVmcsInfo           The VMCS info. object.
    40732966 * @param   u32TprThreshold     The TPR threshold (task-priority class only).
    40742967 */
    4075 DECLINLINE(void) vmxHCApicSetTprThreshold(PVMXVMCSINFO pVmcsInfo, uint32_t u32TprThreshold)
     2968DECLINLINE(void) vmxHCApicSetTprThreshold(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t u32TprThreshold)
    40762969{
    40772970    Assert(!(u32TprThreshold & ~VMX_TPR_THRESHOLD_MASK));         /* Bits 31:4 MBZ. */
     
    40932986static void vmxHCExportGuestApicTpr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
    40942987{
    4095     if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_APIC_TPR)
     2988    if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_APIC_TPR)
    40962989    {
    40972990        HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
     
    41323025                    }
    41333026
    4134                     vmxHCApicSetTprThreshold(pVmcsInfo, u32TprThreshold);
     3027                    vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u32TprThreshold);
    41353028                }
    41363029            }
    41373030        }
    41383031        /* else: the TPR threshold has already been updated while merging the nested-guest VMCS. */
    4139         ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_APIC_TPR);
     3032        ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_APIC_TPR);
    41403033    }
    41413034}
     
    42043097}
    42053098
    4206 
     3099#ifdef IN_RING0
    42073100/**
    42083101 * Exports the exception intercepts required for guest execution in the VMCS.
     
    42153108static void vmxHCExportGuestXcptIntercepts(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
    42163109{
    4217     if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_XCPT_INTERCEPTS)
     3110    if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_XCPT_INTERCEPTS)
    42183111    {
    42193112        /* When executing a nested-guest, we do not need to trap GIM hypercalls by intercepting #UD. */
    42203113        if (   !pVmxTransient->fIsNestedGuest
    4221             &&  pVCpu->hm.s.fGIMTrapXcptUD)
     3114            &&  VCPU_2_VMXSTATE(pVCpu).fGIMTrapXcptUD)
    42223115            vmxHCAddXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
    42233116        else
     
    42253118
    42263119        /* Other exception intercepts are handled elsewhere, e.g. while exporting guest CR0. */
    4227         ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_VMX_XCPT_INTERCEPTS);
     3120        ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_XCPT_INTERCEPTS);
    42283121    }
    42293122}
     
    42393132static void vmxHCExportGuestRip(PVMCPUCC pVCpu)
    42403133{
    4241     if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_RIP)
     3134    if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RIP)
    42423135    {
    42433136        HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP);
     
    42463139        AssertRC(rc);
    42473140
    4248         ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_RIP);
     3141        ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RIP);
    42493142        Log4Func(("rip=%#RX64\n", pVCpu->cpum.GstCtx.rip));
    42503143    }
     
    42613154static void vmxHCExportGuestRsp(PVMCPUCC pVCpu)
    42623155{
    4263     if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_RSP)
     3156    if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RSP)
    42643157    {
    42653158        HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RSP);
     
    42683161        AssertRC(rc);
    42693162
    4270         ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_RSP);
     3163        ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RSP);
    42713164        Log4Func(("rsp=%#RX64\n", pVCpu->cpum.GstCtx.rsp));
    42723165    }
     
    42843177static void vmxHCExportGuestRflags(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
    42853178{
    4286     if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_RFLAGS)
     3179    if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_RFLAGS)
    42873180    {
    42883181        HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
     
    43143207        AssertRC(rc);
    43153208
    4316         ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_RFLAGS);
     3209        ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_RFLAGS);
    43173210        Log4Func(("eflags=%#RX32\n", fEFlags.u32));
    43183211    }
     
    44983391static int vmxHCExportGuestHwvirtState(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
    44993392{
    4500     if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_HWVIRT)
     3393    if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_HWVIRT)
    45013394    {
    45023395#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     
    45283421                 * was newly loaded or modified before copying it to the shadow VMCS.
    45293422                 */
    4530                 if (!pVCpu->hm.s.vmx.fCopiedNstGstToShadowVmcs)
     3423                if (!VCPU_2_VMXSTATE(pVCpu).vmx.fCopiedNstGstToShadowVmcs)
    45313424                {
    45323425                    int rc = vmxHCCopyNstGstToShadowVmcs(pVCpu, pVmcsInfo);
    45333426                    AssertRCReturn(rc, rc);
    4534                     pVCpu->hm.s.vmx.fCopiedNstGstToShadowVmcs = true;
     3427                    VCPU_2_VMXSTATE(pVCpu).vmx.fCopiedNstGstToShadowVmcs = true;
    45353428                }
    45363429                vmxHCEnableVmcsShadowing(pVmcsInfo);
     
    45423435        NOREF(pVmxTransient);
    45433436#endif
    4544         ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_HWVIRT);
     3437        ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_HWVIRT);
    45453438    }
    45463439    return VINF_SUCCESS;
     
    45623455static int vmxHCExportGuestCR0(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
    45633456{
    4564     if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_CR0)
     3457    if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR0)
    45653458    {
    45663459        PVMCC pVM = pVCpu->CTX_SUFF(pVM);
     
    46593552            uXcptBitmap |= RT_BIT(X86_XCPT_PF);
    46603553#endif
    4661             if (pVCpu->hm.s.fTrapXcptGpForLovelyMesaDrv)
     3554            if (VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv)
    46623555                uXcptBitmap |= RT_BIT(X86_XCPT_GP);
    46633556            Assert(pVM->hmr0.s.fNestedPaging || (uXcptBitmap & RT_BIT(X86_XCPT_PF)));
     
    47153608        }
    47163609
    4717         ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_CR0);
     3610        ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR0);
    47183611    }
    47193612
     
    47493642     * Guest CR3.
    47503643     */
    4751     if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_CR3)
     3644    if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR3)
    47523645    {
    47533646        HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
     
    48403733        }
    48413734
    4842         ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_CR3);
     3735        ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR3);
    48433736    }
    48443737
     
    48473740     * ASSUMES this is done everytime we get in from ring-3! (XCR0)
    48483741     */
    4849     if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_CR4)
     3742    if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CR4)
    48503743    {
    48513744        PCPUMCTX     pCtx      = &pVCpu->cpum.GstCtx;
     
    49053798             * paging mode and thus we need to adjust VT-x's view of CR4 depending on our shadow page tables.
    49063799             */
    4907             switch (pVCpu->hm.s.enmShadowMode)
     3800            switch (VCPU_2_VMXSTATE(pVCpu).enmShadowMode)
    49083801            {
    49093802                case PGMMODE_REAL:              /* Real-mode. */
     
    49533846        }
    49543847
    4955         ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_CR4);
     3848        ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CR4);
    49563849
    49573850        Log4Func(("cr4=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr4, u64ShadowCr4, fSetCr4, fZapCr4));
     
    50143907    bool     fInterceptMovDRx = false;
    50153908    uint32_t uProcCtls        = pVmcsInfo->u32ProcCtls;
    5016     if (pVCpu->hm.s.fSingleInstruction)
     3909    if (VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
    50173910    {
    50183911        /* If the CPU supports the monitor trap flag, use it for single stepping in DBGF and avoid intercepting #DB. */
     
    50253918        {
    50263919            pVCpu->cpum.GstCtx.eflags.u32 |= X86_EFL_TF;
    5027             pVCpu->hm.s.fCtxChanged |= HM_CHANGED_GUEST_RFLAGS;
     3920            VCPU_2_VMXSTATE(pVCpu).fCtxChanged |= HM_CHANGED_GUEST_RFLAGS;
    50283921            pVCpu->hmr0.s.fClearTrapFlag = true;
    50293922            fSteppingDB = true;
     
    50683961                Assert(CPUMIsGuestDebugStateActive(pVCpu));
    50693962                Assert(!CPUMIsHyperDebugStateActive(pVCpu));
    5070                 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
     3963                STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatDRxArmed);
    50713964            }
    50723965            Assert(!fInterceptMovDRx);
     
    51184011    if (fSteppingDB)
    51194012    {
    5120         Assert(pVCpu->hm.s.fSingleInstruction);
     4013        Assert(VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
    51214014        Assert(pVCpu->cpum.GstCtx.eflags.Bits.u1TF);
    51224015
     
    53964289     * Guest Segment registers: CS, SS, DS, ES, FS, GS.
    53974290     */
    5398     if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SREG_MASK)
    5399     {
    5400         if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_CS)
     4291    if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SREG_MASK)
     4292    {
     4293        if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_CS)
    54014294        {
    54024295            HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS);
     
    54054298            rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_CS, &pCtx->cs);
    54064299            AssertRC(rc);
    5407             ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_CS);
    5408         }
    5409 
    5410         if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SS)
     4300            ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_CS);
     4301        }
     4302
     4303        if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SS)
    54114304        {
    54124305            HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SS);
     
    54154308            rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_SS, &pCtx->ss);
    54164309            AssertRC(rc);
    5417             ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_SS);
    5418         }
    5419 
    5420         if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_DS)
     4310            ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_SS);
     4311        }
     4312
     4313        if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_DS)
    54214314        {
    54224315            HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DS);
     
    54254318            rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_DS, &pCtx->ds);
    54264319            AssertRC(rc);
    5427             ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_DS);
    5428         }
    5429 
    5430         if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_ES)
     4320            ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_DS);
     4321        }
     4322
     4323        if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_ES)
    54314324        {
    54324325            HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_ES);
     
    54354328            rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_ES, &pCtx->es);
    54364329            AssertRC(rc);
    5437             ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_ES);
    5438         }
    5439 
    5440         if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_FS)
     4330            ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_ES);
     4331        }
     4332
     4333        if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_FS)
    54414334        {
    54424335            HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_FS);
     
    54454338            rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_FS, &pCtx->fs);
    54464339            AssertRC(rc);
    5447             ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_FS);
    5448         }
    5449 
    5450         if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_GS)
     4340            ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_FS);
     4341        }
     4342
     4343        if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GS)
    54514344        {
    54524345            HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GS);
     
    54554348            rc = vmxHCExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_GS, &pCtx->gs);
    54564349            AssertRC(rc);
    5457             ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_GS);
     4350            ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GS);
    54584351        }
    54594352
     
    54684361     * Guest TR.
    54694362     */
    5470     if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_TR)
     4363    if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_TR)
    54714364    {
    54724365        HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_TR);
     
    55294422        rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_TR_BASE,            u64Base);            AssertRC(rc);
    55304423
    5531         ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_TR);
     4424        ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_TR);
    55324425        Log4Func(("tr base=%#RX64 limit=%#RX32\n", pCtx->tr.u64Base, pCtx->tr.u32Limit));
    55334426    }
     
    55364429     * Guest GDTR.
    55374430     */
    5538     if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_GDTR)
     4431    if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_GDTR)
    55394432    {
    55404433        HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GDTR);
     
    55464439        Assert(!(pCtx->gdtr.cbGdt & 0xffff0000));          /* Bits 31:16 MBZ. */
    55474440
    5548         ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_GDTR);
     4441        ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_GDTR);
    55494442        Log4Func(("gdtr base=%#RX64 limit=%#RX32\n", pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt));
    55504443    }
     
    55534446     * Guest LDTR.
    55544447     */
    5555     if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_LDTR)
     4448    if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_LDTR)
    55564449    {
    55574450        HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_LDTR);
     
    55854478        }
    55864479
    5587         ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_LDTR);
     4480        ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_LDTR);
    55884481        Log4Func(("ldtr base=%#RX64 limit=%#RX32\n", pCtx->ldtr.u64Base, pCtx->ldtr.u32Limit));
    55894482    }
     
    55924485     * Guest IDTR.
    55934486     */
    5594     if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_IDTR)
     4487    if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_IDTR)
    55954488    {
    55964489        HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_IDTR);
     
    56024495        Assert(!(pCtx->idtr.cbIdt & 0xffff0000));          /* Bits 31:16 MBZ. */
    56034496
    5604         ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_IDTR);
     4497        ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_IDTR);
    56054498        Log4Func(("idtr base=%#RX64 limit=%#RX32\n", pCtx->idtr.pIdt, pCtx->idtr.cbIdt));
    56064499    }
     
    56494542     * those MSRs into the auto-load/store MSR area. Nothing to do here.
    56504543     */
    5651     if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_GUEST_AUTO_MSRS)
     4544    if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_GUEST_AUTO_MSRS)
    56524545    {
    56534546        /* No auto-load/store MSRs currently. */
    5654         ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_VMX_GUEST_AUTO_MSRS);
     4547        ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_VMX_GUEST_AUTO_MSRS);
    56554548    }
    56564549
     
    56584551     * Guest Sysenter MSRs.
    56594552     */
    5660     if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_MSR_MASK)
     4553    if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_MSR_MASK)
    56614554    {
    56624555        HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SYSENTER_MSRS);
    56634556
    5664         if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_CS_MSR)
     4557        if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_CS_MSR)
    56654558        {
    56664559            int rc = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_GUEST_SYSENTER_CS, pCtx->SysEnter.cs);
    56674560            AssertRC(rc);
    5668             ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_SYSENTER_CS_MSR);
    5669         }
    5670 
    5671         if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_EIP_MSR)
     4561            ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_SYSENTER_CS_MSR);
     4562        }
     4563
     4564        if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_EIP_MSR)
    56724565        {
    56734566            int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_EIP, pCtx->SysEnter.eip);
    56744567            AssertRC(rc);
    5675             ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_SYSENTER_EIP_MSR);
    5676         }
    5677 
    5678         if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_ESP_MSR)
     4568            ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_SYSENTER_EIP_MSR);
     4569        }
     4570
     4571        if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_ESP_MSR)
    56794572        {
    56804573            int rc = VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_GUEST_SYSENTER_ESP, pCtx->SysEnter.esp);
    56814574            AssertRC(rc);
    5682             ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_SYSENTER_ESP_MSR);
     4575            ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_SYSENTER_ESP_MSR);
    56834576        }
    56844577    }
     
    56874580     * Guest/host EFER MSR.
    56884581     */
    5689     if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_EFER_MSR)
     4582    if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_EFER_MSR)
    56904583    {
    56914584        /* Whether we are using the VMCS to swap the EFER MSR must have been
    56924585           determined earlier while exporting VM-entry/VM-exit controls. */
    5693         Assert(!(ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_ENTRY_EXIT_CTLS));
     4586        Assert(!(ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_VMX_ENTRY_EXIT_CTLS));
    56944587        HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_EFER);
    56954588
     
    57394632            vmxHCRemoveAutoLoadStoreMsr(pVCpu, pVmxTransient, MSR_K6_EFER);
    57404633
    5741         ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_EFER_MSR);
     4634        ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_EFER_MSR);
    57424635    }
    57434636
     
    57454638     * Other MSRs.
    57464639     */
    5747     if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_OTHER_MSRS)
     4640    if (ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged) & HM_CHANGED_GUEST_OTHER_MSRS)
    57484641    {
    57494642        /* Speculation Control (R/W). */
     
    57884681        }
    57894682
    5790         ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_OTHER_MSRS);
     4683        ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~HM_CHANGED_GUEST_OTHER_MSRS);
    57914684    }
    57924685
    57934686    return VINF_SUCCESS;
    5794 }
    5795 
    5796 
    5797 /**
    5798  * Wrapper for running the guest code in VT-x.
    5799  *
    5800  * @returns VBox status code, no informational status codes.
    5801  * @param   pVCpu           The cross context virtual CPU structure.
    5802  * @param   pVmxTransient   The VMX-transient structure.
    5803  *
    5804  * @remarks No-long-jump zone!!!
    5805  */
    5806 DECLINLINE(int) vmxHCRunGuest(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
    5807 {
    5808     /* Mark that HM is the keeper of all guest-CPU registers now that we're going to execute guest code. */
    5809     pVCpu->cpum.GstCtx.fExtrn |= HMVMX_CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_KEEPER_HM;
    5810 
    5811     PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    5812     bool const   fResumeVM = RT_BOOL(pVmcsInfo->fVmcsState & VMX_V_VMCS_LAUNCH_STATE_LAUNCHED);
    5813 #ifdef VBOX_WITH_STATISTICS
    5814     if (fResumeVM)
    5815         STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxVmResume);
    5816     else
    5817         STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxVmLaunch);
    5818 #endif
    5819     int rc = pVCpu->hmr0.s.vmx.pfnStartVm(pVmcsInfo, pVCpu, fResumeVM);
    5820     AssertMsg(rc <= VINF_SUCCESS, ("%Rrc\n", rc));
    5821     return rc;
    5822 }
    5823 
    5824 
    5825 /**
    5826  * Reports world-switch error and dumps some useful debug info.
    5827  *
    5828  * @param   pVCpu           The cross context virtual CPU structure.
    5829  * @param   rcVMRun         The return code from VMLAUNCH/VMRESUME.
    5830  * @param   pVmxTransient   The VMX-transient structure (only
    5831  *                          exitReason updated).
    5832  */
    5833 static void vmxHCReportWorldSwitchError(PVMCPUCC pVCpu, int rcVMRun, PVMXTRANSIENT pVmxTransient)
    5834 {
    5835     Assert(pVCpu);
    5836     Assert(pVmxTransient);
    5837     HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
    5838 
    5839     Log4Func(("VM-entry failure: %Rrc\n", rcVMRun));
    5840     switch (rcVMRun)
    5841     {
    5842         case VERR_VMX_INVALID_VMXON_PTR:
    5843             AssertFailed();
    5844             break;
    5845         case VINF_SUCCESS:                  /* VMLAUNCH/VMRESUME succeeded but VM-entry failed... yeah, true story. */
    5846         case VERR_VMX_UNABLE_TO_START_VM:   /* VMLAUNCH/VMRESUME itself failed. */
    5847         {
    5848             int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_REASON, &pVCpu->hm.s.vmx.LastError.u32ExitReason);
    5849             rc    |= VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_VM_INSTR_ERROR, &pVCpu->hm.s.vmx.LastError.u32InstrError);
    5850             AssertRC(rc);
    5851             vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
    5852 
    5853             pVCpu->hm.s.vmx.LastError.idEnteredCpu = pVCpu->hmr0.s.idEnteredCpu;
    5854             /* LastError.idCurrentCpu was already updated in vmxHCPreRunGuestCommitted().
    5855                Cannot do it here as we may have been long preempted. */
    5856 
    5857 #ifdef VBOX_STRICT
    5858                 PVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
    5859                 Log4(("uExitReason        %#RX32 (VmxTransient %#RX16)\n", pVCpu->hm.s.vmx.LastError.u32ExitReason,
    5860                      pVmxTransient->uExitReason));
    5861                 Log4(("Exit Qualification %#RX64\n", pVmxTransient->uExitQual));
    5862                 Log4(("InstrError         %#RX32\n", pVCpu->hm.s.vmx.LastError.u32InstrError));
    5863                 if (pVCpu->hm.s.vmx.LastError.u32InstrError <= HMVMX_INSTR_ERROR_MAX)
    5864                     Log4(("InstrError Desc.  \"%s\"\n", g_apszVmxInstrErrors[pVCpu->hm.s.vmx.LastError.u32InstrError]));
    5865                 else
    5866                     Log4(("InstrError Desc.    Range exceeded %u\n", HMVMX_INSTR_ERROR_MAX));
    5867                 Log4(("Entered host CPU   %u\n", pVCpu->hm.s.vmx.LastError.idEnteredCpu));
    5868                 Log4(("Current host CPU   %u\n", pVCpu->hm.s.vmx.LastError.idCurrentCpu));
    5869 
    5870                 static struct
    5871                 {
    5872                     /** Name of the field to log. */
    5873                     const char     *pszName;
    5874                     /** The VMCS field. */
    5875                     uint32_t        uVmcsField;
    5876                     /** Whether host support of this field needs to be checked. */
    5877                     bool            fCheckSupport;
    5878                 } const s_aVmcsFields[] =
    5879                 {
    5880                     { "VMX_VMCS32_CTRL_PIN_EXEC",                 VMX_VMCS32_CTRL_PIN_EXEC,                   false  },
    5881                     { "VMX_VMCS32_CTRL_PROC_EXEC",                VMX_VMCS32_CTRL_PROC_EXEC,                  false  },
    5882                     { "VMX_VMCS32_CTRL_PROC_EXEC2",               VMX_VMCS32_CTRL_PROC_EXEC2,                 true   },
    5883                     { "VMX_VMCS32_CTRL_ENTRY",                    VMX_VMCS32_CTRL_ENTRY,                      false  },
    5884                     { "VMX_VMCS32_CTRL_EXIT",                     VMX_VMCS32_CTRL_EXIT,                       false  },
    5885                     { "VMX_VMCS32_CTRL_CR3_TARGET_COUNT",         VMX_VMCS32_CTRL_CR3_TARGET_COUNT,           false  },
    5886                     { "VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO",  VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO,    false  },
    5887                     { "VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE",  VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE,    false  },
    5888                     { "VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH",       VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH,         false  },
    5889                     { "VMX_VMCS32_CTRL_TPR_THRESHOLD",            VMX_VMCS32_CTRL_TPR_THRESHOLD,              false  },
    5890                     { "VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT",     VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT,       false  },
    5891                     { "VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT",      VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT,        false  },
    5892                     { "VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT",     VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT,       false  },
    5893                     { "VMX_VMCS32_CTRL_EXCEPTION_BITMAP",         VMX_VMCS32_CTRL_EXCEPTION_BITMAP,           false  },
    5894                     { "VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK",     VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK,       false  },
    5895                     { "VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH",    VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH,      false  },
    5896                     { "VMX_VMCS_CTRL_CR0_MASK",                   VMX_VMCS_CTRL_CR0_MASK,                     false  },
    5897                     { "VMX_VMCS_CTRL_CR0_READ_SHADOW",            VMX_VMCS_CTRL_CR0_READ_SHADOW,              false  },
    5898                     { "VMX_VMCS_CTRL_CR4_MASK",                   VMX_VMCS_CTRL_CR4_MASK,                     false  },
    5899                     { "VMX_VMCS_CTRL_CR4_READ_SHADOW",            VMX_VMCS_CTRL_CR4_READ_SHADOW,              false  },
    5900                     { "VMX_VMCS64_CTRL_EPTP_FULL",                VMX_VMCS64_CTRL_EPTP_FULL,                  true   },
    5901                     { "VMX_VMCS_GUEST_RIP",                       VMX_VMCS_GUEST_RIP,                         false  },
    5902                     { "VMX_VMCS_GUEST_RSP",                       VMX_VMCS_GUEST_RSP,                         false  },
    5903                     { "VMX_VMCS_GUEST_RFLAGS",                    VMX_VMCS_GUEST_RFLAGS,                      false  },
    5904                     { "VMX_VMCS16_VPID",                          VMX_VMCS16_VPID,                            true,  },
    5905                     { "VMX_VMCS_HOST_CR0",                        VMX_VMCS_HOST_CR0,                          false  },
    5906                     { "VMX_VMCS_HOST_CR3",                        VMX_VMCS_HOST_CR3,                          false  },
    5907                     { "VMX_VMCS_HOST_CR4",                        VMX_VMCS_HOST_CR4,                          false  },
    5908                     /* The order of selector fields below are fixed! */
    5909                     { "VMX_VMCS16_HOST_ES_SEL",                   VMX_VMCS16_HOST_ES_SEL,                     false  },
    5910                     { "VMX_VMCS16_HOST_CS_SEL",                   VMX_VMCS16_HOST_CS_SEL,                     false  },
    5911                     { "VMX_VMCS16_HOST_SS_SEL",                   VMX_VMCS16_HOST_SS_SEL,                     false  },
    5912                     { "VMX_VMCS16_HOST_DS_SEL",                   VMX_VMCS16_HOST_DS_SEL,                     false  },
    5913                     { "VMX_VMCS16_HOST_FS_SEL",                   VMX_VMCS16_HOST_FS_SEL,                     false  },
    5914                     { "VMX_VMCS16_HOST_GS_SEL",                   VMX_VMCS16_HOST_GS_SEL,                     false  },
    5915                     { "VMX_VMCS16_HOST_TR_SEL",                   VMX_VMCS16_HOST_TR_SEL,                     false  },
    5916                     /* End of ordered selector fields. */
    5917                     { "VMX_VMCS_HOST_TR_BASE",                    VMX_VMCS_HOST_TR_BASE,                      false  },
    5918                     { "VMX_VMCS_HOST_GDTR_BASE",                  VMX_VMCS_HOST_GDTR_BASE,                    false  },
    5919                     { "VMX_VMCS_HOST_IDTR_BASE",                  VMX_VMCS_HOST_IDTR_BASE,                    false  },
    5920                     { "VMX_VMCS32_HOST_SYSENTER_CS",              VMX_VMCS32_HOST_SYSENTER_CS,                false  },
    5921                     { "VMX_VMCS_HOST_SYSENTER_EIP",               VMX_VMCS_HOST_SYSENTER_EIP,                 false  },
    5922                     { "VMX_VMCS_HOST_SYSENTER_ESP",               VMX_VMCS_HOST_SYSENTER_ESP,                 false  },
    5923                     { "VMX_VMCS_HOST_RSP",                        VMX_VMCS_HOST_RSP,                          false  },
    5924                     { "VMX_VMCS_HOST_RIP",                        VMX_VMCS_HOST_RIP,                          false  }
    5925                 };
    5926 
    5927                 RTGDTR      HostGdtr;
    5928                 ASMGetGDTR(&HostGdtr);
    5929 
    5930                 uint32_t const cVmcsFields = RT_ELEMENTS(s_aVmcsFields);
    5931                 for (uint32_t i = 0; i < cVmcsFields; i++)
    5932                 {
    5933                     uint32_t const uVmcsField = s_aVmcsFields[i].uVmcsField;
    5934 
    5935                     bool fSupported;
    5936                     if (!s_aVmcsFields[i].fCheckSupport)
    5937                         fSupported = true;
    5938                     else
    5939                     {
    5940                         PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    5941                         switch (uVmcsField)
    5942                         {
    5943                             case VMX_VMCS64_CTRL_EPTP_FULL:  fSupported = pVM->hmr0.s.fNestedPaging;    break;
    5944                             case VMX_VMCS16_VPID:            fSupported = pVM->hmr0.s.vmx.fVpid;          break;
    5945                             case VMX_VMCS32_CTRL_PROC_EXEC2:
    5946                                 fSupported = RT_BOOL(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS);
    5947                                 break;
    5948                             default:
    5949                                 AssertMsgFailedReturnVoid(("Failed to provide VMCS field support for %#RX32\n", uVmcsField));
    5950                         }
    5951                     }
    5952 
    5953                     if (fSupported)
    5954                     {
    5955                         uint8_t const uWidth = RT_BF_GET(uVmcsField, VMX_BF_VMCSFIELD_WIDTH);
    5956                         switch (uWidth)
    5957                         {
    5958                             case VMX_VMCSFIELD_WIDTH_16BIT:
    5959                             {
    5960                                 uint16_t u16Val;
    5961                                 rc = VMX_VMCS_READ_16(pVCpu, uVmcsField, &u16Val);
    5962                                 AssertRC(rc);
    5963                                 Log4(("%-40s = %#RX16\n", s_aVmcsFields[i].pszName, u16Val));
    5964 
    5965                                 if (   uVmcsField >= VMX_VMCS16_HOST_ES_SEL
    5966                                     && uVmcsField <= VMX_VMCS16_HOST_TR_SEL)
    5967                                 {
    5968                                     if (u16Val < HostGdtr.cbGdt)
    5969                                     {
    5970                                         /* Order of selectors in s_apszSel is fixed and matches the order in s_aVmcsFields. */
    5971                                         static const char * const s_apszSel[] = { "Host ES", "Host CS", "Host SS", "Host DS",
    5972                                                                                   "Host FS", "Host GS", "Host TR" };
    5973                                         uint8_t const idxSel = RT_BF_GET(uVmcsField, VMX_BF_VMCSFIELD_INDEX);
    5974                                         Assert(idxSel < RT_ELEMENTS(s_apszSel));
    5975                                         PCX86DESCHC pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u16Val & X86_SEL_MASK));
    5976                                         hmR0DumpDescriptor(pDesc, u16Val, s_apszSel[idxSel]);
    5977                                     }
    5978                                     else
    5979                                         Log4(("  Selector value exceeds GDT limit!\n"));
    5980                                 }
    5981                                 break;
    5982                             }
    5983 
    5984                             case VMX_VMCSFIELD_WIDTH_32BIT:
    5985                             {
    5986                                 uint32_t u32Val;
    5987                                 rc = VMX_VMCS_READ_32(pVCpu, uVmcsField, &u32Val);
    5988                                 AssertRC(rc);
    5989                                 Log4(("%-40s = %#RX32\n", s_aVmcsFields[i].pszName, u32Val));
    5990                                 break;
    5991                             }
    5992 
    5993                             case VMX_VMCSFIELD_WIDTH_64BIT:
    5994                             case VMX_VMCSFIELD_WIDTH_NATURAL:
    5995                             {
    5996                                 uint64_t u64Val;
    5997                                 rc = VMX_VMCS_READ_64(pVCpu, uVmcsField, &u64Val);
    5998                                 AssertRC(rc);
    5999                                 Log4(("%-40s = %#RX64\n", s_aVmcsFields[i].pszName, u64Val));
    6000                                 break;
    6001                             }
    6002                         }
    6003                     }
    6004                 }
    6005 
    6006                 Log4(("MSR_K6_EFER            = %#RX64\n", ASMRdMsr(MSR_K6_EFER)));
    6007                 Log4(("MSR_K8_CSTAR           = %#RX64\n", ASMRdMsr(MSR_K8_CSTAR)));
    6008                 Log4(("MSR_K8_LSTAR           = %#RX64\n", ASMRdMsr(MSR_K8_LSTAR)));
    6009                 Log4(("MSR_K6_STAR            = %#RX64\n", ASMRdMsr(MSR_K6_STAR)));
    6010                 Log4(("MSR_K8_SF_MASK         = %#RX64\n", ASMRdMsr(MSR_K8_SF_MASK)));
    6011                 Log4(("MSR_K8_KERNEL_GS_BASE  = %#RX64\n", ASMRdMsr(MSR_K8_KERNEL_GS_BASE)));
    6012 #endif /* VBOX_STRICT */
    6013             break;
    6014         }
    6015 
    6016         default:
    6017             /* Impossible */
    6018             AssertMsgFailed(("vmxHCReportWorldSwitchError %Rrc (%#x)\n", rcVMRun, rcVMRun));
    6019             break;
    6020     }
    60214687}
    60224688
     
    60514717            && TMVirtualSyncIsCurrentDeadlineVersion(pVM, pVCpu->hmr0.s.vmx.uTscDeadlineVersion))
    60524718        {
    6053             STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatVmxPreemptionReusingDeadline);
     4719            STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatVmxPreemptionReusingDeadline);
    60544720            fOffsettedTsc = TMCpuTickCanUseRealTSC(pVM, pVCpu, &uTscOffset, &fParavirtTsc);
    60554721            cTicksToDeadline = pVCpu->hmr0.s.vmx.uTscDeadline - SUPReadTsc();
     
    60584724            else
    60594725            {
    6060                 STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatVmxPreemptionReusingDeadlineExpired);
     4726                STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatVmxPreemptionReusingDeadlineExpired);
    60614727                cTicksToDeadline = 0;
    60624728            }
     
    60644730        else
    60654731        {
    6066             STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatVmxPreemptionRecalcingDeadline);
     4732            STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatVmxPreemptionRecalcingDeadline);
    60674733            cTicksToDeadline = TMCpuTickGetDeadlineAndTscOffset(pVM, pVCpu, &uTscOffset, &fOffsettedTsc, &fParavirtTsc,
    60684734                                                                &pVCpu->hmr0.s.vmx.uTscDeadline,
     
    60724738            { /* hopefully */ }
    60734739            else
    6074                 STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatVmxPreemptionRecalcingDeadlineExpired);
     4740                STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatVmxPreemptionRecalcingDeadlineExpired);
    60754741        }
    60764742
     
    60994765        AssertRC(rc);
    61004766#endif
    6101         STAM_COUNTER_INC(&pVCpu->hm.s.StatTscParavirt);
     4767        STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatTscParavirt);
    61024768    }
    61034769
     
    61164782    }
    61174783}
     4784#endif /* !IN_RING0 */
    61184785
    61194786
     
    61914858                                        RTGCUINTPTR GCPtrFaultAddress)
    61924859{
    6193     Assert(!pVCpu->hm.s.Event.fPending);
    6194     pVCpu->hm.s.Event.fPending          = true;
    6195     pVCpu->hm.s.Event.u64IntInfo        = u32IntInfo;
    6196     pVCpu->hm.s.Event.u32ErrCode        = u32ErrCode;
    6197     pVCpu->hm.s.Event.cbInstr           = cbInstr;
    6198     pVCpu->hm.s.Event.GCPtrFaultAddress = GCPtrFaultAddress;
     4860    Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
     4861    VCPU_2_VMXSTATE(pVCpu).Event.fPending          = true;
     4862    VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo        = u32IntInfo;
     4863    VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode        = u32ErrCode;
     4864    VCPU_2_VMXSTATE(pVCpu).Event.cbInstr           = cbInstr;
     4865    VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress = GCPtrFaultAddress;
    61994866}
    62004867
     
    63485015
    63495016#ifdef VBOX_STRICT
     5017# ifdef IN_RING0
    63505018    VMMRZCallRing3Disable(pVCpu);
     5019# endif
    63515020    Log4Func(("Unusable %s: sel=%#x attr=%#x -> %#x\n", pszRegName, pSelReg->Sel, uAttr, pSelReg->Attr.u));
    63525021# ifdef DEBUG_bird
     
    63555024               pszRegName, uAttr, pSelReg->Attr.u, pSelReg->Sel, pSelReg->u64Base, pSelReg->u32Limit));
    63565025# endif
     5026# ifdef IN_RING0
    63575027    VMMRZCallRing3Enable(pVCpu);
     5028# endif
    63585029    NOREF(uAttr);
    63595030#endif
     
    64745145
    64755146        pCtx->rip = u64Val;
    6476         EMR0HistoryUpdatePC(pVCpu, pCtx->rip, false);
     5147        EMHistoryUpdatePC(pVCpu, pCtx->rip, false);
    64775148        pCtx->fExtrn &= ~CPUMCTX_EXTRN_RIP;
    64785149    }
     
    65005171
    65015172        pCtx->rflags.u64 = u64Val;
     5173#ifdef IN_RING0
    65025174        PCVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
    65035175        if (pVmcsInfoShared->RealMode.fRealOnV86Active)
     
    65065178            pCtx->eflags.Bits.u2IOPL = pVmcsInfoShared->RealMode.Eflags.Bits.u2IOPL;
    65075179        }
     5180#else
     5181        RT_NOREF(pVmcsInfo);
     5182#endif
    65085183        pCtx->fExtrn &= ~CPUMCTX_EXTRN_RFLAGS;
    65095184    }
     
    65655240{
    65665241    int      rc   = VINF_SUCCESS;
     5242#ifdef IN_RING0
    65675243    PVMCC    pVM  = pVCpu->CTX_SUFF(pVM);
     5244#endif
    65685245    PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    65695246    uint32_t u32Val;
     
    65785255     * Update: This is very likely a compiler optimization bug, see @bugref{9180}.
    65795256     */
    6580 #ifdef RT_OS_WINDOWS
     5257# ifdef RT_OS_WINDOWS
    65815258    if (pVM == 0 || pVM == (void *)(uintptr_t)-1)
    65825259        return VERR_HM_IPE_1;
    6583 #endif
    6584 
    6585     STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatImportGuestState, x);
    6586 
     5260# endif
     5261
     5262    STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATE(pVCpu).StatImportGuestState, x);
     5263
     5264#ifdef IN_RING0
    65875265    /*
    65885266     * We disable interrupts to make the updating of the state and in particular
     
    65905268     */
    65915269    RTCCUINTREG const fEFlags = ASMIntDisableFlags();
     5270#endif
    65925271
    65935272    fWhat &= pCtx->fExtrn;
     
    66215300                    if (fRealOnV86Active)
    66225301                        pCtx->cs.Attr.u = pVmcsInfoShared->RealMode.AttrCS.u;
    6623                     EMR0HistoryUpdatePC(pVCpu, pCtx->cs.u64Base + pCtx->rip, true /* fFlattened */);
     5302                    EMHistoryUpdatePC(pVCpu, pCtx->cs.u64Base + pCtx->rip, true /* fFlattened */);
    66245303                }
    66255304                if (fWhat & CPUMCTX_EXTRN_SS)
     
    66785357                if (fWhat & CPUMCTX_EXTRN_TR)
    66795358                {
     5359#ifdef IN_RING0
    66805360                    /* Real-mode emulation using virtual-8086 mode has the fake TSS (pRealModeTSS) in TR,
    66815361                       don't need to import that one. */
    66825362                    if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
     5363#endif
    66835364                        vmxHCImportGuestTr(pVCpu);
    66845365                }
     
    66875368            if (fWhat & CPUMCTX_EXTRN_DR7)
    66885369            {
     5370#ifdef IN_RING0
    66895371                if (!pVCpu->hmr0.s.fUsingHyperDR7)
     5372#endif
    66905373                {
    66915374                    rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_GUEST_DR7, &pCtx->dr[7]);
     
    67025385            }
    67035386
     5387#ifdef IN_RING0
    67045388            if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
    67055389            {
     
    67615445                            }
    67625446                            pCtx->fExtrn = 0;
    6763                             pVCpu->hm.s.u32HMError = pMsrs->u32Msr;
     5447                            VCPU_2_VMXSTATE(pVCpu).u32HMError = pMsrs->u32Msr;
    67645448                            ASMSetFlags(fEFlags);
    67655449                            AssertMsgFailed(("Unexpected MSR in auto-load/store area. idMsr=%#RX32 cMsrs=%u\n", idMsr, cMsrs));
     
    67695453                }
    67705454            }
     5455#endif
    67715456
    67725457            if (fWhat & CPUMCTX_EXTRN_CR_MASK)
     
    68015486                    }
    68025487#endif
     5488#ifdef IN_RING0
    68035489                    VMMRZCallRing3Disable(pVCpu);   /* May call into PGM which has Log statements. */
     5490#endif
    68045491                    CPUMSetGuestCR0(pVCpu, u64Cr0);
     5492#ifdef IN_RING0
    68055493                    VMMRZCallRing3Enable(pVCpu);
     5494#endif
    68065495                }
    68075496
     
    68415530                {
    68425531                    /* CR0.PG bit changes are always intercepted, so it's up to date. */
     5532#ifdef IN_RING0 /* With R3 we always have unresitricted guest support. */
    68435533                    if (   pVM->hmr0.s.vmx.fUnrestrictedGuest
    68445534                        || (   pVM->hmr0.s.fNestedPaging
    68455535                            && CPUMIsGuestPagingEnabledEx(pCtx)))
     5536#endif
    68465537                    {
    68475538                        uint64_t u64Cr3;
     
    69055596        }
    69065597    }
     5598#ifdef IN_RING0
    69075599    else
    69085600        AssertMsg(!pCtx->fExtrn || (pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL), ("%#RX64\n", pCtx->fExtrn));
     
    69125604     */
    69135605    ASMSetFlags(fEFlags);
    6914 
    6915     STAM_PROFILE_ADV_STOP(& pVCpu->hm.s.StatImportGuestState, x);
     5606#endif
     5607
     5608    STAM_PROFILE_ADV_STOP(& VCPU_2_VMXSTATE(pVCpu).StatImportGuestState, x);
    69165609
    69175610    if (RT_SUCCESS(rc))
     
    69375630     */
    69385631    if (   VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)
    6939         && VMMRZCallRing3IsEnabled(pVCpu))
     5632#ifdef IN_RING0
     5633        && VMMRZCallRing3IsEnabled(pVCpu)
     5634#endif
     5635        )
    69405636    {
    69415637        Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & CPUMCTX_EXTRN_CR3));
     
    69655661 *
    69665662 * @param   pVCpu           The cross context virtual CPU structure.
    6967  * @param   pVmxTransient   The VMX-transient structure.
     5663 * @param   fIsNestedGuest  Flag whether this is for a for a pending nested guest event.
    69685664 * @param   fStepping       Whether we are single-stepping the guest using the
    69695665 *                          hypervisor debugger.
     
    69725668 *          is no longer in VMX non-root mode.
    69735669 */
    6974 static VBOXSTRICTRC vmxHCCheckForceFlags(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, bool fStepping)
    6975 {
     5670static VBOXSTRICTRC vmxHCCheckForceFlags(PVMCPUCC pVCpu, bool fIsNestedGuest, bool fStepping)
     5671{
     5672#ifdef IN_RING0
    69765673    Assert(VMMRZCallRing3IsEnabled(pVCpu));
     5674#endif
    69775675
    69785676    /*
     
    70125710        || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
    70135711    {
    7014         STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
     5712        STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatSwitchHmToR3FF);
    70155713        int rc = RT_LIKELY(!VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_RAW_TO_R3 : VINF_EM_NO_MEMORY;
    70165714        Log4Func(("HM_TO_R3 forcing us back to ring-3. rc=%d\n", rc));
     
    70225720        || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
    70235721    {
    7024         STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchVmReq);
     5722        STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatSwitchVmReq);
    70255723        Log4Func(("Pending VM request forcing us back to ring-3\n"));
    70265724        return VINF_EM_PENDING_REQUEST;
     
    70305728    if (VM_FF_IS_SET(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
    70315729    {
    7032         STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchPgmPoolFlush);
     5730        STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatSwitchPgmPoolFlush);
    70335731        Log4Func(("PGM pool flush pending forcing us back to ring-3\n"));
    70345732        return VINF_PGM_POOL_FLUSH_PENDING;
     
    70385736    if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
    70395737    {
    7040         STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchDma);
     5738        STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatSwitchDma);
    70415739        Log4Func(("Pending DMA request forcing us back to ring-3\n"));
    70425740        return VINF_EM_RAW_TO_R3;
     
    70515749     * See Intel spec. 6.9 "Priority Among Simultaneous Exceptions And Interrupts".
    70525750     */
    7053     if (pVmxTransient->fIsNestedGuest)
     5751    if (fIsNestedGuest)
    70545752    {
    70555753        /* Pending nested-guest APIC-write. */
     
    70815779    }
    70825780#else
    7083     NOREF(pVmxTransient);
     5781    NOREF(fIsNestedGuest);
    70845782#endif
    70855783
     
    70975795{
    70985796    Assert(TRPMHasTrap(pVCpu));
    7099     Assert(!pVCpu->hm.s.Event.fPending);
     5797    Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
    71005798
    71015799    uint8_t     uVector;
     
    71295827static void vmxHCPendingEventToTrpmTrap(PVMCPUCC pVCpu)
    71305828{
    7131     Assert(pVCpu->hm.s.Event.fPending);
     5829    Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
    71325830
    71335831    /* If a trap was already pending, we did something wrong! */
    71345832    Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
    71355833
    7136     uint32_t const  u32IntInfo  = pVCpu->hm.s.Event.u64IntInfo;
     5834    uint32_t const  u32IntInfo  = VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo;
    71375835    uint32_t const  uVector     = VMX_IDT_VECTORING_INFO_VECTOR(u32IntInfo);
    71385836    TRPMEVENT const enmTrapType = HMVmxEventTypeToTrpmEventType(u32IntInfo);
     
    71445842
    71455843    if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
    7146         TRPMSetErrorCode(pVCpu, pVCpu->hm.s.Event.u32ErrCode);
     5844        TRPMSetErrorCode(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode);
    71475845
    71485846    if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(u32IntInfo))
    7149         TRPMSetFaultAddress(pVCpu, pVCpu->hm.s.Event.GCPtrFaultAddress);
     5847        TRPMSetFaultAddress(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.GCPtrFaultAddress);
    71505848    else
    71515849    {
     
    71645862                              || uVector == X86_XCPT_DB /* INT1 (ICEBP) */),
    71655863                          ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
    7166                 TRPMSetInstrLength(pVCpu, pVCpu->hm.s.Event.cbInstr);
     5864                TRPMSetInstrLength(pVCpu, VCPU_2_VMXSTATE(pVCpu).Event.cbInstr);
    71675865                break;
    71685866            }
     
    71715869
    71725870    /* We're now done converting the pending event. */
    7173     pVCpu->hm.s.Event.fPending = false;
     5871    VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
    71745872}
    71755873
     
    71795877 * cause a VM-exit as soon as the guest is in a state to receive interrupts.
    71805878 *
     5879 * @param   pVCpu       The cross context virtual CPU structure.
    71815880 * @param   pVmcsInfo   The VMCS info. object.
    71825881 */
    7183 static void vmxHCSetIntWindowExitVmcs(PVMXVMCSINFO pVmcsInfo)
     5882static void vmxHCSetIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
    71845883{
    71855884    if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_INT_WINDOW_EXIT)
     
    71985897 * Clears the interrupt-window exiting control in the VMCS.
    71995898 *
     5899 * @param   pVCpu       The cross context virtual CPU structure.
    72005900 * @param   pVmcsInfo   The VMCS info. object.
    72015901 */
    7202 DECLINLINE(void) vmxHCClearIntWindowExitVmcs(PVMXVMCSINFO pVmcsInfo)
     5902DECLINLINE(void) vmxHCClearIntWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
    72035903{
    72045904    if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT)
     
    72155915 * cause a VM-exit as soon as the guest is in a state to receive NMIs.
    72165916 *
     5917 * @param   pVCpu       The cross context virtual CPU structure.
    72175918 * @param   pVmcsInfo   The VMCS info. object.
    72185919 */
    7219 static void vmxHCSetNmiWindowExitVmcs(PVMXVMCSINFO pVmcsInfo)
     5920static void vmxHCSetNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
    72205921{
    72215922    if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
     
    72355936 * Clears the NMI-window exiting control in the VMCS.
    72365937 *
     5938 * @param   pVCpu       The cross context virtual CPU structure.
    72375939 * @param   pVmcsInfo   The VMCS info. object.
    72385940 */
    7239 DECLINLINE(void) vmxHCClearNmiWindowExitVmcs(PVMXVMCSINFO pVmcsInfo)
     5941DECLINLINE(void) vmxHCClearNmiWindowExitVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
    72405942{
    72415943    if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
     
    72485950
    72495951
     5952#ifdef IN_RING0
    72505953/**
    72515954 * Does the necessary state syncing before returning to ring-3 for any reason
     
    73196022    pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs = false;
    73206023
    7321     STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatEntry);
    7322     STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatImportGuestState);
    7323     STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExportGuestState);
    7324     STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatPreExit);
    7325     STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitHandling);
    7326     STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitIO);
    7327     STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitMovCRx);
    7328     STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitXcptNmi);
    7329     STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitVmentry);
    7330     STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
     6024    STAM_PROFILE_ADV_SET_STOPPED(&VCPU_2_VMXSTATE(pVCpu).StatEntry);
     6025    STAM_PROFILE_ADV_SET_STOPPED(&VCPU_2_VMXSTATE(pVCpu).StatImportGuestState);
     6026    STAM_PROFILE_ADV_SET_STOPPED(&VCPU_2_VMXSTATE(pVCpu).StatExportGuestState);
     6027    STAM_PROFILE_ADV_SET_STOPPED(&VCPU_2_VMXSTATE(pVCpu).StatPreExit);
     6028    STAM_PROFILE_ADV_SET_STOPPED(&VCPU_2_VMXSTATE(pVCpu).StatExitHandling);
     6029    STAM_PROFILE_ADV_SET_STOPPED(&VCPU_2_VMXSTATE(pVCpu).StatExitIO);
     6030    STAM_PROFILE_ADV_SET_STOPPED(&VCPU_2_VMXSTATE(pVCpu).StatExitMovCRx);
     6031    STAM_PROFILE_ADV_SET_STOPPED(&VCPU_2_VMXSTATE(pVCpu).StatExitXcptNmi);
     6032    STAM_PROFILE_ADV_SET_STOPPED(&VCPU_2_VMXSTATE(pVCpu).StatExitVmentry);
     6033    STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatSwitchLongJmpToR3);
    73316034
    73326035    VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
     
    74466149    if (RT_UNLIKELY(rcExit == VERR_VMX_INVALID_VMCS_PTR))
    74476150    {
    7448         VMXGetCurrentVmcs(&pVCpu->hm.s.vmx.LastError.HCPhysCurrentVmcs);
    7449         pVCpu->hm.s.vmx.LastError.u32VmcsRev   = *(uint32_t *)pVmcsInfo->pvVmcs;
    7450         pVCpu->hm.s.vmx.LastError.idEnteredCpu = pVCpu->hmr0.s.idEnteredCpu;
     6151        VMXGetCurrentVmcs(&VCPU_2_VMXSTATE(pVCpu).vmx.LastError.HCPhysCurrentVmcs);
     6152        VCPU_2_VMXSTATE(pVCpu).vmx.LastError.u32VmcsRev   = *(uint32_t *)pVmcsInfo->pvVmcs;
     6153        VCPU_2_VMXSTATE(pVCpu).vmx.LastError.idEnteredCpu = pVCpu->hmr0.s.idEnteredCpu;
    74516154        /* LastError.idCurrentCpu was updated in vmxHCPreRunGuestCommitted(). */
    74526155    }
     
    74636166     * the event from there (hence place it back in TRPM).
    74646167     */
    7465     if (pVCpu->hm.s.Event.fPending)
     6168    if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
    74666169    {
    74676170        vmxHCPendingEventToTrpmTrap(pVCpu);
    7468         Assert(!pVCpu->hm.s.Event.fPending);
     6171        Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
    74696172
    74706173        /* Clear the events from the VMCS. */
     
    75076210    if (!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
    75086211    {
    7509         vmxHCClearIntWindowExitVmcs(pVmcsInfo);
    7510         vmxHCClearNmiWindowExitVmcs(pVmcsInfo);
     6212        vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
     6213        vmxHCClearNmiWindowExitVmcs(pVCpu, pVmcsInfo);
    75116214    }
    75126215
     
    75216224    int rc = vmxHCLeaveSession(pVCpu);
    75226225    AssertRCReturn(rc, rc);
    7523     STAM_COUNTER_DEC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
     6226    STAM_COUNTER_DEC(&VCPU_2_VMXSTATE(pVCpu).StatSwitchLongJmpToR3);
    75246227
    75256228    /* Thread-context hooks are unregistered at this point!!! */
     
    75416244
    75426245    /* Update the exit-to-ring 3 reason. */
    7543     pVCpu->hm.s.rcLastExitToR3 = VBOXSTRICTRC_VAL(rcExit);
     6246    VCPU_2_VMXSTATE(pVCpu).rcLastExitToR3 = VBOXSTRICTRC_VAL(rcExit);
    75446247
    75456248    /* On our way back from ring-3 reload the guest state if there is a possibility of it being changed. */
     
    75486251    {
    75496252        Assert(!(pVCpu->cpum.GstCtx.fExtrn & HMVMX_CPUMCTX_EXTRN_ALL));
    7550         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
    7551     }
    7552 
    7553     STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchExitToR3);
     6253        ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
     6254    }
     6255
     6256    STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatSwitchExitToR3);
    75546257    VMMRZCallRing3Enable(pVCpu);
    75556258    return rc;
     
    75816284    return rc;
    75826285}
    7583 
     6286#endif /* !IN_RING */
    75846287
    75856288/**
     
    76016304 *                          VM-entry).
    76026305 */
    7603 static VBOXSTRICTRC vmxHCInjectEventVmcs(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, PCHMEVENT pEvent, bool fStepping,
     6306static VBOXSTRICTRC vmxHCInjectEventVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, PCHMEVENT pEvent, bool fStepping,
    76046307                                           uint32_t *pfIntrState)
    76056308{
     
    76566359        Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_NMI          || uVector == X86_XCPT_NMI);
    76576360        Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT || uVector == X86_XCPT_DB);
    7658         STAM_COUNTER_INC(&pVCpu->hm.s.aStatInjectedXcpts[uVector]);
     6361        STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).aStatInjectedXcpts[uVector]);
    76596362    }
    76606363    else
    7661         STAM_COUNTER_INC(&pVCpu->hm.s.aStatInjectedIrqs[uVector & MASK_INJECT_IRQ_STAT]);
     6364        STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).aStatInjectedIrqs[uVector & MASK_INJECT_IRQ_STAT]);
    76626365
    76636366    /*
     
    76716374    if (CPUMIsGuestInRealModeEx(pCtx))     /* CR0.PE bit changes are always intercepted, so it's up to date. */
    76726375    {
     6376#ifdef IN_RING0
    76736377        if (pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest)
     6378#endif
    76746379        {
    76756380            /*
     
    76816386            u32IntInfo &= ~VMX_ENTRY_INT_INFO_ERROR_CODE_VALID;
    76826387        }
     6388#ifdef IN_RING0
    76836389        else
    76846390        {
     
    76896395
    76906396            /* We require RIP, RSP, RFLAGS, CS, IDTR, import them. */
    7691             PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    76926397            int rc2 = vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TABLE_MASK
    76936398                                                              | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_RFLAGS);
     
    77136418                    RT_ZERO(EventXcptDf);
    77146419                    EventXcptDf.u64IntInfo = uXcptDfInfo;
    7715                     return vmxHCInjectEventVmcs(pVCpu, pVmxTransient, &EventXcptDf, fStepping, pfIntrState);
     6420                    return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &EventXcptDf, fStepping, pfIntrState);
    77166421                }
    77176422
     
    77296434                RT_ZERO(EventXcptGp);
    77306435                EventXcptGp.u64IntInfo = uXcptGpInfo;
    7731                 return vmxHCInjectEventVmcs(pVCpu, pVmxTransient, &EventXcptGp, fStepping, pfIntrState);
     6436                return vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &EventXcptGp, fStepping, pfIntrState);
    77326437            }
    77336438
     
    77716476                    pCtx->cr2 = GCPtrFault;
    77726477
    7773                 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CS  | HM_CHANGED_GUEST_CR2
     6478                ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CS  | HM_CHANGED_GUEST_CR2
    77746479                                                         | HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
    77756480                                                         | HM_CHANGED_GUEST_RSP);
     
    77946499                 * we don't attempt to undo it if we are returning to ring-3 before executing guest code.
    77956500                 */
    7796                 pVCpu->hm.s.Event.fPending = false;
     6501                VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;
    77976502
    77986503                /*
     
    78006505                 * we should set fInterceptEvents here.
    78016506                 */
    7802                 Assert(!pVmxTransient->fIsNestedGuest);
     6507                Assert(!fIsNestedGuest);
    78036508
    78046509                /* If we're stepping and we've changed cs:rip above, bail out of the VMX R0 execution loop. */
     
    78106515            return rcStrict;
    78116516        }
     6517#else
     6518        RT_NOREF(pVmcsInfo);
     6519#endif
    78126520    }
    78136521
     
    78486556 * @returns Strict VBox status code (i.e. informational status codes too).
    78496557 * @param   pVCpu           The cross context virtual CPU structure.
    7850  * @param   pVmxTransient   The VMX-transient structure.
     6558 * @param   pVmcsInfo       The VMCS information structure.
     6559 * @param   fIsNestedGuest  Flag whether the evaluation happens for a nestd guest.
    78516560 * @param   pfIntrState     Where to store the VT-x guest-interruptibility state.
    78526561 */
    7853 static VBOXSTRICTRC vmxHCEvaluatePendingEvent(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t *pfIntrState)
     6562static VBOXSTRICTRC vmxHCEvaluatePendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, uint32_t *pfIntrState)
    78546563{
    78556564    Assert(pfIntrState);
     
    78666575     * An event that's already pending has already performed all necessary checks.
    78676576     */
    7868     PVMXVMCSINFO pVmcsInfo      = pVmxTransient->pVmcsInfo;
    7869     bool const   fIsNestedGuest = pVmxTransient->fIsNestedGuest;
    7870     if (   !pVCpu->hm.s.Event.fPending
     6577    if (   !VCPU_2_VMXSTATE(pVCpu).Event.fPending
    78716578        && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
    78726579    {
     
    78776584         * NMIs take priority over external interrupts.
    78786585         */
     6586#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    78796587        PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
     6588#endif
    78806589        if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
    78816590        {
     
    79026611            }
    79036612            else if (!fIsNestedGuest)
    7904                 vmxHCSetNmiWindowExitVmcs(pVmcsInfo);
     6613                vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
    79056614        }
    79066615
     
    79116620         */
    79126621        if (    VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
    7913             && !pVCpu->hm.s.fSingleInstruction)
     6622            && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
    79146623        {
    79156624            Assert(!DBGFIsStepping(pVCpu));
     
    79556664                else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
    79566665                {
    7957                     STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchTprMaskedIrq);
     6666                    STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatSwitchTprMaskedIrq);
    79586667
    79596668                    if (   !fIsNestedGuest
    79606669                        && (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW))
    7961                         vmxHCApicSetTprThreshold(pVmcsInfo, u8Interrupt >> 4);
     6670                        vmxHCApicSetTprThreshold(pVCpu, pVmcsInfo, u8Interrupt >> 4);
    79626671                    /* else: for nested-guests, TPR threshold is picked up while merging VMCS controls. */
    79636672
     
    79696678                }
    79706679                else
    7971                     STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq);
     6680                    STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatSwitchGuestIrq);
    79726681
    79736682                /* We've injected the interrupt or taken necessary action, bail. */
     
    79756684            }
    79766685            if (!fIsNestedGuest)
    7977                 vmxHCSetIntWindowExitVmcs(pVmcsInfo);
     6686                vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
    79786687        }
    79796688    }
     
    79866695         */
    79876696        if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
    7988             vmxHCSetNmiWindowExitVmcs(pVmcsInfo);
     6697            vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
    79896698        else if (   VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
    7990                  && !pVCpu->hm.s.fSingleInstruction)
    7991             vmxHCSetIntWindowExitVmcs(pVmcsInfo);
     6699                 && !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
     6700            vmxHCSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
    79926701    }
    79936702    /* else: for nested-guests, NMI/interrupt-window exiting will be picked up when merging VMCS controls. */
     
    80036712 * @returns Strict VBox status code (i.e. informational status codes too).
    80046713 * @param   pVCpu           The cross context virtual CPU structure.
    8005  * @param   pVmxTransient   The VMX-transient structure.
     6714 * @param   fIsNestedGuest  Flag whether the event injection happens for a nested guest.
    80066715 * @param   fIntrState      The VT-x guest-interruptibility state.
    80076716 * @param   fStepping       Whether we are single-stepping the guest using the
     
    80106719 *                          directly.
    80116720 */
    8012 static VBOXSTRICTRC vmxHCInjectPendingEvent(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t fIntrState, bool fStepping)
     6721static VBOXSTRICTRC vmxHCInjectPendingEvent(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNestedGuest, uint32_t fIntrState, bool fStepping)
    80136722{
    80146723    HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
     6724#ifdef IN_RING0
    80156725    Assert(VMMRZCallRing3IsEnabled(pVCpu));
     6726#endif
    80166727
    80176728#ifdef VBOX_STRICT
     
    80356746
    80366747    VBOXSTRICTRC rcStrict = VINF_SUCCESS;
    8037     if (pVCpu->hm.s.Event.fPending)
     6748    if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
    80386749    {
    80396750        /*
     
    80446755         * See Intel spec. 26.6.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
    80456756         */
    8046         uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(pVCpu->hm.s.Event.u64IntInfo);
     6757        uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo);
    80476758#ifdef VBOX_STRICT
    80486759        if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
     
    80596770        }
    80606771#endif
    8061         Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#RX32\n", pVCpu->idCpu, pVCpu->hm.s.Event.u64IntInfo,
     6772        Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#RX32\n", pVCpu->idCpu, VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
    80626773              uIntType));
    80636774
     
    80686779         * into the guest IDT ourselves (for real-on-v86 guest injecting software interrupts).
    80696780         */
    8070         rcStrict = vmxHCInjectEventVmcs(pVCpu, pVmxTransient, &pVCpu->hm.s.Event, fStepping, &fIntrState);
     6781        rcStrict = vmxHCInjectEventVmcs(pVCpu, pVmcsInfo, fIsNestedGuest, &VCPU_2_VMXSTATE(pVCpu).Event, fStepping, &fIntrState);
    80716782        AssertRCReturn(VBOXSTRICTRC_VAL(rcStrict), rcStrict);
    80726783
    80736784        if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
    8074             STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectInterrupt);
     6785            STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatInjectInterrupt);
    80756786        else
    8076             STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectXcpt);
     6787            STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatInjectXcpt);
    80776788    }
    80786789
     
    80826793     */
    80836794    if (   (fIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
    8084         && !pVmxTransient->fIsNestedGuest)
     6795        && !fIsNestedGuest)
    80856796    {
    80866797        HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
    80876798
    8088         if (!pVCpu->hm.s.fSingleInstruction)
     6799        if (!VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
    80896800        {
    80906801            /*
     
    81056816             * we use MTF, so just make sure it's called before executing guest-code.
    81066817             */
    8107             ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_DR_MASK);
     6818            ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_DR_MASK);
    81086819        }
    81096820    }
     
    81306841}
    81316842
    8132 
     6843#ifdef IN_RING0
    81336844/**
    81346845 * Exports the guest state into the VMCS guest-state area.
     
    81586869    LogFlowFunc(("pVCpu=%p\n", pVCpu));
    81596870
    8160     STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExportGuestState, x);
     6871    STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATE(pVCpu).StatExportGuestState, x);
    81616872
    81626873    /*
     
    82096920
    82106921    /* Clear any bits that may be set but exported unconditionally or unused/reserved bits. */
    8211     ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~(  (HM_CHANGED_GUEST_GPRS_MASK & ~HM_CHANGED_GUEST_RSP)
     6922    ASMAtomicUoAndU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, ~(  (HM_CHANGED_GUEST_GPRS_MASK & ~HM_CHANGED_GUEST_RSP)
    82126923                                                  |  HM_CHANGED_GUEST_CR2
    82136924                                                  | (HM_CHANGED_GUEST_DR_MASK & ~HM_CHANGED_GUEST_DR7)
     
    82226933                                                  | (HM_CHANGED_KEEPER_STATE_MASK & ~HM_CHANGED_VMX_MASK)));
    82236934
    8224     STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExportGuestState, x);
     6935    STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATE(pVCpu).StatExportGuestState, x);
    82256936    return rc;
    82266937}
     
    82406951    Assert(!VMMRZCallRing3IsEnabled(pVCpu));
    82416952
    8242     if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_DR_MASK)
     6953    if (VCPU_2_VMXSTATE(pVCpu).fCtxChanged & HM_CHANGED_GUEST_DR_MASK)
    82436954    {
    82446955        int rc = vmxHCExportSharedDebugState(pVCpu, pVmxTransient);
    82456956        AssertRC(rc);
    8246         pVCpu->hm.s.fCtxChanged &= ~HM_CHANGED_GUEST_DR_MASK;
     6957        VCPU_2_VMXSTATE(pVCpu).fCtxChanged &= ~HM_CHANGED_GUEST_DR_MASK;
    82476958
    82486959        /* Loading shared debug bits might have changed eflags.TF bit for debugging purposes. */
    8249         if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_RFLAGS)
     6960        if (VCPU_2_VMXSTATE(pVCpu).fCtxChanged & HM_CHANGED_GUEST_RFLAGS)
    82506961            vmxHCExportGuestRflags(pVCpu, pVmxTransient);
    82516962    }
    82526963
    8253     if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_VMX_GUEST_LAZY_MSRS)
     6964    if (VCPU_2_VMXSTATE(pVCpu).fCtxChanged & HM_CHANGED_VMX_GUEST_LAZY_MSRS)
    82546965    {
    82556966        vmxHCLazyLoadGuestMsrs(pVCpu);
    8256         pVCpu->hm.s.fCtxChanged &= ~HM_CHANGED_VMX_GUEST_LAZY_MSRS;
    8257     }
    8258 
    8259     AssertMsg(!(pVCpu->hm.s.fCtxChanged & HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE),
    8260               ("fCtxChanged=%#RX64\n", pVCpu->hm.s.fCtxChanged));
     6967        VCPU_2_VMXSTATE(pVCpu).fCtxChanged &= ~HM_CHANGED_VMX_GUEST_LAZY_MSRS;
     6968    }
     6969
     6970    AssertMsg(!(VCPU_2_VMXSTATE(pVCpu).fCtxChanged & HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE),
     6971              ("fCtxChanged=%#RX64\n", VCPU_2_VMXSTATE(pVCpu).fCtxChanged));
    82616972}
    82626973
     
    82816992
    82826993#ifdef HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE
    8283     ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
     6994    ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
    82846995#endif
    82856996
     
    82917002    uint64_t const fCtxMask     = HM_CHANGED_ALL_GUEST & ~HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE;
    82927003    uint64_t const fMinimalMask = HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT;
    8293     uint64_t const fCtxChanged  = ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged);
     7004    uint64_t const fCtxChanged  = ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged);
    82947005
    82957006    /* If only RIP/RSP/RFLAGS/HWVIRT changed, export only those (quicker, happens more often).*/
     
    83017012        vmxHCExportGuestRflags(pVCpu, pVmxTransient);
    83027013        rcStrict = vmxHCExportGuestHwvirtState(pVCpu, pVmxTransient);
    8303         STAM_COUNTER_INC(&pVCpu->hm.s.StatExportMinimal);
     7014        STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExportMinimal);
    83047015    }
    83057016    /* If anything else also changed, go through the full export routine and export as required. */
     
    83167027            return rcStrict;
    83177028        }
    8318         STAM_COUNTER_INC(&pVCpu->hm.s.StatExportFull);
     7029        STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExportFull);
    83197030    }
    83207031    /* Nothing changed, nothing to load here. */
     
    83247035#ifdef VBOX_STRICT
    83257036    /* All the guest state bits should be loaded except maybe the host context and/or the shared host/guest bits. */
    8326     uint64_t const fCtxChangedCur = ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged);
     7037    uint64_t const fCtxChangedCur = ASMAtomicUoReadU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged);
    83277038    AssertMsg(!(fCtxChangedCur & fCtxMask), ("fCtxChangedCur=%#RX64\n", fCtxChangedCur));
    83287039#endif
    83297040    return rcStrict;
    83307041}
     7042#endif /* !IN_RING0 */
    83317043
    83327044
     
    83527064                                            } while (0)
    83537065
    8354     PVMCC    pVM    = pVCpu->CTX_SUFF(pVM);
    83557066    PCPUMCTX pCtx   = &pVCpu->cpum.GstCtx;
    83567067    uint32_t uError = VMX_IGS_ERROR;
    83577068    uint32_t u32IntrState = 0;
     7069#ifdef IN_RING0
     7070    PVMCC    pVM    = pVCpu->CTX_SUFF(pVM);
    83587071    bool const fUnrestrictedGuest = pVM->hmr0.s.vmx.fUnrestrictedGuest;
     7072#else
     7073    bool const fUnrestrictedGuest = true;
     7074#endif
    83597075    do
    83607076    {
     
    85237239        }
    85247240
     7241#ifdef IN_RING0
    85257242        /*
    85267243         * EFER MSR.
     
    85437260                              VMX_IGS_EFER_LMA_LME_MISMATCH);
    85447261        }
     7262#endif
    85457263
    85467264        /*
     
    88447562        }
    88457563
     7564#ifdef IN_RING0
    88467565        /* VMCS link pointer. */
    88477566        rc = VMX_VMCS_READ_64(pVCpu, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, &u64Val);
     
    88847603            HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
    88857604        }
     7605#endif
    88867606
    88877607        /* Shouldn't happen but distinguish it from AssertRCBreak() errors. */
     
    88907610    } while (0);
    88917611
    8892     pVCpu->hm.s.u32HMError = uError;
    8893     pVCpu->hm.s.vmx.LastError.u32GuestIntrState = u32IntrState;
     7612    VCPU_2_VMXSTATE(pVCpu).u32HMError = uError;
     7613    VCPU_2_VMXSTATE(pVCpu).vmx.LastError.u32GuestIntrState = u32IntrState;
    88947614    return uError;
    88957615
     
    88997619
    89007620
     7621#ifdef IN_RING0
    89017622/**
    89027623 * Map the APIC-access page for virtualizing APIC accesses.
     
    89297650
    89307651    /* Update the per-VCPU cache of the APIC base MSR. */
    8931     pVCpu->hm.s.vmx.u64GstMsrApicBase = u64MsrApicBase;
     7652    VCPU_2_VMXSTATE(pVCpu).vmx.u64GstMsrApicBase = u64MsrApicBase;
    89327653    return VINF_SUCCESS;
    89337654}
     
    89907711    if (fDispatched)
    89917712    {
    8992         STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatExitHostNmiInGC);
     7713        STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitHostNmiInGC);
    89937714        return VINF_SUCCESS;
    89947715    }
     
    89997720     * (to the target CPU) without dispatching the host NMI above.
    90007721     */
    9001     STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatExitHostNmiInGCIpi);
     7722    STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitHostNmiInGCIpi);
    90027723    return RTMpOnSpecific(idCpu, &hmR0DispatchHostNmi, NULL /* pvUser1 */,  NULL /* pvUser2 */);
    90037724}
     
    93418062     */
    93428063    if (u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
    9343         pVCpu->hm.s.vmx.fSwitchedNstGstFlushTlb = true;
     8064        VCPU_2_VMXSTATE(pVCpu).vmx.fSwitchedNstGstFlushTlb = true;
    93448065
    93458066    /*
     
    93588079
    93598080/**
    9360  * Does the preparations before executing guest code in VT-x.
    9361  *
    9362  * This may cause longjmps to ring-3 and may even result in rescheduling to the
    9363  * recompiler/IEM. We must be cautious what we do here regarding committing
    9364  * guest-state information into the VMCS assuming we assuredly execute the
    9365  * guest in VT-x mode.
    9366  *
    9367  * If we fall back to the recompiler/IEM after updating the VMCS and clearing
    9368  * the common-state (TRPM/forceflags), we must undo those changes so that the
    9369  * recompiler/IEM can (and should) use them when it resumes guest execution.
    9370  * Otherwise such operations must be done when we can no longer exit to ring-3.
    9371  *
    9372  * @returns Strict VBox status code (i.e. informational status codes too).
    9373  * @retval  VINF_SUCCESS if we can proceed with running the guest, interrupts
    9374  *          have been disabled.
    9375  * @retval  VINF_VMX_VMEXIT if a nested-guest VM-exit occurs (e.g., while evaluating
    9376  *          pending events).
    9377  * @retval  VINF_EM_RESET if a triple-fault occurs while injecting a
    9378  *          double-fault into the guest.
    9379  * @retval  VINF_EM_DBG_STEPPED if @a fStepping is true and an event was
    9380  *          dispatched directly.
    9381  * @retval  VINF_* scheduling changes, we have to go back to ring-3.
    9382  *
    9383  * @param   pVCpu           The cross context virtual CPU structure.
    9384  * @param   pVmxTransient   The VMX-transient structure.
    9385  * @param   fStepping       Whether we are single-stepping the guest in the
    9386  *                          hypervisor debugger. Makes us ignore some of the reasons
    9387  *                          for returning to ring-3, and return VINF_EM_DBG_STEPPED
    9388  *                          if event dispatching took place.
    9389  */
    9390 static VBOXSTRICTRC vmxHCPreRunGuest(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, bool fStepping)
    9391 {
    9392     Assert(VMMRZCallRing3IsEnabled(pVCpu));
    9393 
    9394     Log4Func(("fIsNested=%RTbool fStepping=%RTbool\n", pVmxTransient->fIsNestedGuest, fStepping));
    9395 
    9396 #ifdef VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM
    9397     if (pVmxTransient->fIsNestedGuest)
    9398     {
    9399         RT_NOREF2(pVCpu, fStepping);
    9400         Log2Func(("Rescheduling to IEM due to nested-hwvirt or forced IEM exec -> VINF_EM_RESCHEDULE_REM\n"));
    9401         return VINF_EM_RESCHEDULE_REM;
    9402     }
    9403 #endif
    9404 
    9405     /*
    9406      * Check and process force flag actions, some of which might require us to go back to ring-3.
    9407      */
    9408     VBOXSTRICTRC rcStrict = vmxHCCheckForceFlags(pVCpu, pVmxTransient, fStepping);
    9409     if (rcStrict == VINF_SUCCESS)
    9410     {
    9411         /* FFs don't get set all the time. */
    9412 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    9413         if (   pVmxTransient->fIsNestedGuest
    9414             && !CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
    9415         {
    9416             STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchNstGstVmexit);
    9417             return VINF_VMX_VMEXIT;
    9418         }
    9419 #endif
    9420     }
    9421     else
    9422         return rcStrict;
    9423 
    9424     /*
    9425      * Virtualize memory-mapped accesses to the physical APIC (may take locks).
    9426      */
    9427     PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    9428     if (   !pVCpu->hm.s.vmx.u64GstMsrApicBase
    9429         && (g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
    9430         && PDMHasApic(pVM))
    9431     {
    9432         int rc = vmxHCMapHCApicAccessPage(pVCpu);
    9433         AssertRCReturn(rc, rc);
    9434     }
    9435 
    9436 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    9437     /*
    9438      * Merge guest VMCS controls with the nested-guest VMCS controls.
    9439      *
    9440      * Even if we have not executed the guest prior to this (e.g. when resuming from a
    9441      * saved state), we should be okay with merging controls as we initialize the
    9442      * guest VMCS controls as part of VM setup phase.
    9443      */
    9444     if (   pVmxTransient->fIsNestedGuest
    9445         && !pVCpu->hm.s.vmx.fMergedNstGstCtls)
    9446     {
    9447         int rc = vmxHCMergeVmcsNested(pVCpu);
    9448         AssertRCReturn(rc, rc);
    9449         pVCpu->hm.s.vmx.fMergedNstGstCtls = true;
    9450     }
    9451 #endif
    9452 
    9453     /*
    9454      * Evaluate events to be injected into the guest.
    9455      *
    9456      * Events in TRPM can be injected without inspecting the guest state.
    9457      * If any new events (interrupts/NMI) are pending currently, we try to set up the
    9458      * guest to cause a VM-exit the next time they are ready to receive the event.
    9459      */
    9460     if (TRPMHasTrap(pVCpu))
    9461         vmxHCTrpmTrapToPendingEvent(pVCpu);
    9462 
    9463     uint32_t fIntrState;
    9464     rcStrict = vmxHCEvaluatePendingEvent(pVCpu, pVmxTransient, &fIntrState);
    9465 
    9466 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    9467     /*
    9468      * While evaluating pending events if something failed (unlikely) or if we were
    9469      * preparing to run a nested-guest but performed a nested-guest VM-exit, we should bail.
    9470      */
    9471     if (rcStrict != VINF_SUCCESS)
    9472         return rcStrict;
    9473     if (   pVmxTransient->fIsNestedGuest
    9474         && !CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
    9475     {
    9476         STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchNstGstVmexit);
    9477         return VINF_VMX_VMEXIT;
    9478     }
    9479 #else
    9480     Assert(rcStrict == VINF_SUCCESS);
    9481 #endif
    9482 
    9483     /*
    9484      * Event injection may take locks (currently the PGM lock for real-on-v86 case) and thus
    9485      * needs to be done with longjmps or interrupts + preemption enabled. Event injection might
    9486      * also result in triple-faulting the VM.
    9487      *
    9488      * With nested-guests, the above does not apply since unrestricted guest execution is a
    9489      * requirement. Regardless, we do this here to avoid duplicating code elsewhere.
    9490      */
    9491     rcStrict = vmxHCInjectPendingEvent(pVCpu, pVmxTransient, fIntrState, fStepping);
    9492     if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    9493     { /* likely */ }
    9494     else
    9495     {
    9496         AssertMsg(rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping),
    9497                   ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
    9498         return rcStrict;
    9499     }
    9500 
    9501     /*
    9502      * A longjump might result in importing CR3 even for VM-exits that don't necessarily
    9503      * import CR3 themselves. We will need to update them here, as even as late as the above
    9504      * vmxHCInjectPendingEvent() call may lazily import guest-CPU state on demand causing
    9505      * the below force flags to be set.
    9506      */
    9507     if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
    9508     {
    9509         Assert(!(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_CR3));
    9510         int rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu), false /* fPdpesMapped */);
    9511         AssertMsgReturn(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_SYNC_CR3,
    9512                         ("%Rrc\n", rc2), RT_FAILURE_NP(rc2) ? rc2 : VERR_IPE_UNEXPECTED_INFO_STATUS);
    9513         Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
    9514     }
    9515 
    9516 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    9517     /* Paranoia. */
    9518     Assert(!pVmxTransient->fIsNestedGuest || CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx));
    9519 #endif
    9520 
    9521     /*
    9522      * No longjmps to ring-3 from this point on!!!
    9523      * Asserts() will still longjmp to ring-3 (but won't return), which is intentional, better than a kernel panic.
    9524      * This also disables flushing of the R0-logger instance (if any).
    9525      */
    9526     VMMRZCallRing3Disable(pVCpu);
    9527 
    9528     /*
    9529      * Export the guest state bits.
    9530      *
    9531      * We cannot perform longjmps while loading the guest state because we do not preserve the
    9532      * host/guest state (although the VMCS will be preserved) across longjmps which can cause
    9533      * CPU migration.
    9534      *
    9535      * If we are injecting events to a real-on-v86 mode guest, we would have updated RIP and some segment
    9536      * registers. Hence, exporting of the guest state needs to be done -after- injection of events.
    9537      */
    9538     rcStrict = vmxHCExportGuestStateOptimal(pVCpu, pVmxTransient);
    9539     if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    9540     { /* likely */ }
    9541     else
    9542     {
    9543         VMMRZCallRing3Enable(pVCpu);
    9544         return rcStrict;
    9545     }
    9546 
    9547     /*
    9548      * We disable interrupts so that we don't miss any interrupts that would flag preemption
    9549      * (IPI/timers etc.) when thread-context hooks aren't used and we've been running with
    9550      * preemption disabled for a while.  Since this is purely to aid the
    9551      * RTThreadPreemptIsPending() code, it doesn't matter that it may temporarily reenable and
    9552      * disable interrupt on NT.
    9553      *
    9554      * We need to check for force-flags that could've possible been altered since we last
    9555      * checked them (e.g. by PDMGetInterrupt() leaving the PDM critical section,
    9556      * see @bugref{6398}).
    9557      *
    9558      * We also check a couple of other force-flags as a last opportunity to get the EMT back
    9559      * to ring-3 before executing guest code.
    9560      */
    9561     pVmxTransient->fEFlags = ASMIntDisableFlags();
    9562 
    9563     if (   (   !VM_FF_IS_ANY_SET(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC)
    9564             && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
    9565         || (   fStepping /* Optimized for the non-stepping case, so a bit of unnecessary work when stepping. */
    9566             && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK & ~(VMCPU_FF_TIMER | VMCPU_FF_PDM_CRITSECT))) )
    9567     {
    9568         if (!RTThreadPreemptIsPending(NIL_RTTHREAD))
    9569         {
    9570 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    9571             /*
    9572              * If we are executing a nested-guest make sure that we should intercept subsequent
    9573              * events. The one we are injecting might be part of VM-entry. This is mainly to keep
    9574              * the VM-exit instruction emulation happy.
    9575              */
    9576             if (pVmxTransient->fIsNestedGuest)
    9577                 CPUMSetGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx, true);
    9578 #endif
    9579 
    9580             /*
    9581              * We've injected any pending events. This is really the point of no return (to ring-3).
    9582              *
    9583              * Note! The caller expects to continue with interrupts & longjmps disabled on successful
    9584              *       returns from this function, so do -not- enable them here.
    9585              */
    9586             pVCpu->hm.s.Event.fPending = false;
    9587             return VINF_SUCCESS;
    9588         }
    9589 
    9590         STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchPendingHostIrq);
    9591         rcStrict = VINF_EM_RAW_INTERRUPT;
    9592     }
    9593     else
    9594     {
    9595         STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
    9596         rcStrict = VINF_EM_RAW_TO_R3;
    9597     }
    9598 
    9599     ASMSetFlags(pVmxTransient->fEFlags);
    9600     VMMRZCallRing3Enable(pVCpu);
    9601 
    9602     return rcStrict;
    9603 }
    9604 
    9605 
    9606 /**
    9607  * Final preparations before executing guest code using hardware-assisted VMX.
    9608  *
    9609  * We can no longer get preempted to a different host CPU and there are no returns
    9610  * to ring-3. We ignore any errors that may happen from this point (e.g. VMWRITE
    9611  * failures), this function is not intended to fail sans unrecoverable hardware
    9612  * errors.
    9613  *
    9614  * @param   pVCpu           The cross context virtual CPU structure.
    9615  * @param   pVmxTransient   The VMX-transient structure.
    9616  *
    9617  * @remarks Called with preemption disabled.
    9618  * @remarks No-long-jump zone!!!
    9619  */
    9620 static void vmxHCPreRunGuestCommitted(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    9621 {
    9622     Assert(!VMMRZCallRing3IsEnabled(pVCpu));
    9623     Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    9624     Assert(!pVCpu->hm.s.Event.fPending);
    9625 
    9626     /*
    9627      * Indicate start of guest execution and where poking EMT out of guest-context is recognized.
    9628      */
    9629     VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
    9630     VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
    9631 
    9632     PVMCC         pVM          = pVCpu->CTX_SUFF(pVM);
    9633     PVMXVMCSINFO  pVmcsInfo    = pVmxTransient->pVmcsInfo;
    9634     PHMPHYSCPU    pHostCpu     = hmR0GetCurrentCpu();
    9635     RTCPUID const idCurrentCpu = pHostCpu->idCpu;
    9636 
    9637     if (!CPUMIsGuestFPUStateActive(pVCpu))
    9638     {
    9639         STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestFpuState, x);
    9640         if (CPUMR0LoadGuestFPU(pVM, pVCpu) == VINF_CPUM_HOST_CR0_MODIFIED)
    9641             pVCpu->hm.s.fCtxChanged |= HM_CHANGED_HOST_CONTEXT;
    9642         STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestFpuState, x);
    9643         STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadGuestFpu);
    9644     }
    9645 
    9646     /*
    9647      * Re-export the host state bits as we may've been preempted (only happens when
    9648      * thread-context hooks are used or when the VM start function changes) or if
    9649      * the host CR0 is modified while loading the guest FPU state above.
    9650      *
    9651      * The 64-on-32 switcher saves the (64-bit) host state into the VMCS and if we
    9652      * changed the switcher back to 32-bit, we *must* save the 32-bit host state here,
    9653      * see @bugref{8432}.
    9654      *
    9655      * This may also happen when switching to/from a nested-guest VMCS without leaving
    9656      * ring-0.
    9657      */
    9658     if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_HOST_CONTEXT)
    9659     {
    9660         vmxHCExportHostState(pVCpu);
    9661         STAM_COUNTER_INC(&pVCpu->hm.s.StatExportHostState);
    9662     }
    9663     Assert(!(pVCpu->hm.s.fCtxChanged & HM_CHANGED_HOST_CONTEXT));
    9664 
    9665     /*
    9666      * Export the state shared between host and guest (FPU, debug, lazy MSRs).
    9667      */
    9668     if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE)
    9669         vmxHCExportSharedState(pVCpu, pVmxTransient);
    9670     AssertMsg(!pVCpu->hm.s.fCtxChanged, ("fCtxChanged=%#RX64\n", pVCpu->hm.s.fCtxChanged));
    9671 
    9672     /*
    9673      * Store status of the shared guest/host debug state at the time of VM-entry.
    9674      */
    9675     pVmxTransient->fWasGuestDebugStateActive = CPUMIsGuestDebugStateActive(pVCpu);
    9676     pVmxTransient->fWasHyperDebugStateActive = CPUMIsHyperDebugStateActive(pVCpu);
    9677 
    9678     /*
    9679      * Always cache the TPR-shadow if the virtual-APIC page exists, thereby skipping
    9680      * more than one conditional check. The post-run side of our code shall determine
    9681      * if it needs to sync. the virtual APIC TPR with the TPR-shadow.
    9682      */
    9683     if (pVmcsInfo->pbVirtApic)
    9684         pVmxTransient->u8GuestTpr = pVmcsInfo->pbVirtApic[XAPIC_OFF_TPR];
    9685 
    9686     /*
    9687      * Update the host MSRs values in the VM-exit MSR-load area.
    9688      */
    9689     if (!pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs)
    9690     {
    9691         if (pVmcsInfo->cExitMsrLoad > 0)
    9692             vmxHCUpdateAutoLoadHostMsrs(pVCpu, pVmcsInfo);
    9693         pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs = true;
    9694     }
    9695 
    9696     /*
    9697      * Evaluate if we need to intercept guest RDTSC/P accesses. Set up the
    9698      * VMX-preemption timer based on the next virtual sync clock deadline.
    9699      */
    9700     if (   !pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer
    9701         || idCurrentCpu != pVCpu->hmr0.s.idLastCpu)
    9702     {
    9703         vmxHCUpdateTscOffsettingAndPreemptTimer(pVCpu, pVmxTransient, idCurrentCpu);
    9704         pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = true;
    9705     }
    9706 
    9707     /* Record statistics of how often we use TSC offsetting as opposed to intercepting RDTSC/P. */
    9708     bool const fIsRdtscIntercepted = RT_BOOL(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_RDTSC_EXIT);
    9709     if (!fIsRdtscIntercepted)
    9710         STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffset);
    9711     else
    9712         STAM_COUNTER_INC(&pVCpu->hm.s.StatTscIntercept);
    9713 
    9714     ASMAtomicUoWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true);  /* Used for TLB flushing, set this across the world switch. */
    9715     vmxHCFlushTaggedTlb(pHostCpu, pVCpu, pVmcsInfo);          /* Invalidate the appropriate guest entries from the TLB. */
    9716     Assert(idCurrentCpu == pVCpu->hmr0.s.idLastCpu);
    9717     pVCpu->hm.s.vmx.LastError.idCurrentCpu = idCurrentCpu;      /* Record the error reporting info. with the current host CPU. */
    9718     pVmcsInfo->idHostCpuState = idCurrentCpu;                   /* Record the CPU for which the host-state has been exported. */
    9719     pVmcsInfo->idHostCpuExec  = idCurrentCpu;                   /* Record the CPU on which we shall execute. */
    9720 
    9721     STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatEntry, &pVCpu->hm.s.StatInGC, x);
    9722 
    9723     TMNotifyStartOfExecution(pVM, pVCpu);                       /* Notify TM to resume its clocks when TSC is tied to execution,
    9724                                                                    as we're about to start executing the guest. */
    9725 
    9726     /*
    9727      * Load the guest TSC_AUX MSR when we are not intercepting RDTSCP.
    9728      *
    9729      * This is done this late as updating the TSC offsetting/preemption timer above
    9730      * figures out if we can skip intercepting RDTSCP by calculating the number of
    9731      * host CPU ticks till the next virtual sync deadline (for the dynamic case).
    9732      */
    9733     if (   (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_RDTSCP)
    9734         && !fIsRdtscIntercepted)
    9735     {
    9736         vmxHCImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_TSC_AUX);
    9737 
    9738         /* NB: Because we call vmxHCAddAutoLoadStoreMsr with fUpdateHostMsr=true,
    9739            it's safe even after vmxHCUpdateAutoLoadHostMsrs has already been done. */
    9740         int rc = vmxHCAddAutoLoadStoreMsr(pVCpu, pVmxTransient, MSR_K8_TSC_AUX, CPUMGetGuestTscAux(pVCpu),
    9741                                             true /* fSetReadWrite */, true /* fUpdateHostMsr */);
    9742         AssertRC(rc);
    9743         Assert(!pVmxTransient->fRemoveTscAuxMsr);
    9744         pVmxTransient->fRemoveTscAuxMsr = true;
    9745     }
    9746 
    9747 #ifdef VBOX_STRICT
    9748     Assert(pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs);
    9749     vmxHCCheckAutoLoadStoreMsrs(pVCpu, pVmcsInfo, pVmxTransient->fIsNestedGuest);
    9750     vmxHCCheckHostEferMsr(pVmcsInfo);
    9751     AssertRC(vmxHCCheckCachedVmcsCtls(pVCpu, pVmcsInfo, pVmxTransient->fIsNestedGuest));
    9752 #endif
    9753 
    9754 #ifdef HMVMX_ALWAYS_CHECK_GUEST_STATE
    9755     /** @todo r=ramshankar: We can now probably use iemVmxVmentryCheckGuestState here.
    9756      *        Add a PVMXMSRS parameter to it, so that IEM can look at the host MSRs,
    9757      *        see @bugref{9180#c54}. */
    9758     uint32_t const uInvalidReason = vmxHCCheckGuestState(pVCpu, pVmcsInfo);
    9759     if (uInvalidReason != VMX_IGS_REASON_NOT_FOUND)
    9760         Log4(("vmxHCCheckGuestState returned %#x\n", uInvalidReason));
    9761 #endif
    9762 }
    9763 
    9764 
    9765 /**
    9766  * First C routine invoked after running guest code using hardware-assisted VMX.
    9767  *
    9768  * @param   pVCpu           The cross context virtual CPU structure.
    9769  * @param   pVmxTransient   The VMX-transient structure.
    9770  * @param   rcVMRun         Return code of VMLAUNCH/VMRESUME.
    9771  *
    9772  * @remarks Called with interrupts disabled, and returns with interrupts enabled!
    9773  *
    9774  * @remarks No-long-jump zone!!! This function will however re-enable longjmps
    9775  *          unconditionally when it is safe to do so.
    9776  */
    9777 static void vmxHCPostRunGuest(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, int rcVMRun)
    9778 {
    9779     ASMAtomicUoWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false); /* See HMInvalidatePageOnAllVCpus(): used for TLB flushing. */
    9780     ASMAtomicIncU32(&pVCpu->hmr0.s.cWorldSwitchExits);          /* Initialized in vmR3CreateUVM(): used for EMT poking. */
    9781     pVCpu->hm.s.fCtxChanged            = 0;                     /* Exits/longjmps to ring-3 requires saving the guest state. */
    9782     pVmxTransient->fVmcsFieldsRead     = 0;                     /* Transient fields need to be read from the VMCS. */
    9783     pVmxTransient->fVectoringPF        = false;                 /* Vectoring page-fault needs to be determined later. */
    9784     pVmxTransient->fVectoringDoublePF  = false;                 /* Vectoring double page-fault needs to be determined later. */
    9785 
    9786     PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    9787     if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_RDTSC_EXIT))
    9788     {
    9789         uint64_t uGstTsc;
    9790         if (!pVmxTransient->fIsNestedGuest)
    9791             uGstTsc = pVCpu->hmr0.s.uTscExit + pVmcsInfo->u64TscOffset;
    9792         else
    9793         {
    9794             uint64_t const uNstGstTsc = pVCpu->hmr0.s.uTscExit + pVmcsInfo->u64TscOffset;
    9795             uGstTsc = CPUMRemoveNestedGuestTscOffset(pVCpu, uNstGstTsc);
    9796         }
    9797         TMCpuTickSetLastSeen(pVCpu, uGstTsc);                           /* Update TM with the guest TSC. */
    9798     }
    9799 
    9800     STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatPreExit, x);
    9801     TMNotifyEndOfExecution(pVCpu->CTX_SUFF(pVM), pVCpu, pVCpu->hmr0.s.uTscExit); /* Notify TM that the guest is no longer running. */
    9802     VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
    9803 
    9804     pVCpu->hmr0.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_REQUIRED;   /* Some host state messed up by VMX needs restoring. */
    9805     pVmcsInfo->fVmcsState |= VMX_V_VMCS_LAUNCH_STATE_LAUNCHED;          /* Use VMRESUME instead of VMLAUNCH in the next run. */
    9806 #ifdef VBOX_STRICT
    9807     vmxHCCheckHostEferMsr(pVmcsInfo);                                 /* Verify that the host EFER MSR wasn't modified. */
    9808 #endif
    9809     Assert(!ASMIntAreEnabled());
    9810     ASMSetFlags(pVmxTransient->fEFlags);                                /* Enable interrupts. */
    9811     Assert(!VMMRZCallRing3IsEnabled(pVCpu));
    9812 
    9813 #ifdef HMVMX_ALWAYS_CLEAN_TRANSIENT
    9814     /*
    9815      * Clean all the VMCS fields in the transient structure before reading
    9816      * anything from the VMCS.
    9817      */
    9818     pVmxTransient->uExitReason            = 0;
    9819     pVmxTransient->uExitIntErrorCode      = 0;
    9820     pVmxTransient->uExitQual              = 0;
    9821     pVmxTransient->uGuestLinearAddr       = 0;
    9822     pVmxTransient->uExitIntInfo           = 0;
    9823     pVmxTransient->cbExitInstr            = 0;
    9824     pVmxTransient->ExitInstrInfo.u        = 0;
    9825     pVmxTransient->uEntryIntInfo          = 0;
    9826     pVmxTransient->uEntryXcptErrorCode    = 0;
    9827     pVmxTransient->cbEntryInstr           = 0;
    9828     pVmxTransient->uIdtVectoringInfo      = 0;
    9829     pVmxTransient->uIdtVectoringErrorCode = 0;
    9830 #endif
    9831 
    9832     /*
    9833      * Save the basic VM-exit reason and check if the VM-entry failed.
    9834      * See Intel spec. 24.9.1 "Basic VM-exit Information".
    9835      */
    9836     uint32_t uExitReason;
    9837     int rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_RO_EXIT_REASON, &uExitReason);
    9838     AssertRC(rc);
    9839     pVmxTransient->uExitReason    = VMX_EXIT_REASON_BASIC(uExitReason);
    9840     pVmxTransient->fVMEntryFailed = VMX_EXIT_REASON_HAS_ENTRY_FAILED(uExitReason);
    9841 
    9842     /*
    9843      * Log the VM-exit before logging anything else as otherwise it might be a
    9844      * tad confusing what happens before and after the world-switch.
    9845      */
    9846     HMVMX_LOG_EXIT(pVCpu, uExitReason);
    9847 
    9848     /*
    9849      * Remove the TSC_AUX MSR from the auto-load/store MSR area and reset any MSR
    9850      * bitmap permissions, if it was added before VM-entry.
    9851      */
    9852     if (pVmxTransient->fRemoveTscAuxMsr)
    9853     {
    9854         vmxHCRemoveAutoLoadStoreMsr(pVCpu, pVmxTransient, MSR_K8_TSC_AUX);
    9855         pVmxTransient->fRemoveTscAuxMsr = false;
    9856     }
    9857 
    9858     /*
    9859      * Check if VMLAUNCH/VMRESUME succeeded.
    9860      * If this failed, we cause a guru meditation and cease further execution.
    9861      *
    9862      * However, if we are executing a nested-guest we might fail if we use the
    9863      * fast path rather than fully emulating VMLAUNCH/VMRESUME instruction in IEM.
    9864      */
    9865     if (RT_LIKELY(rcVMRun == VINF_SUCCESS))
    9866     {
    9867         /*
    9868          * Update the VM-exit history array here even if the VM-entry failed due to:
    9869          *   - Invalid guest state.
    9870          *   - MSR loading.
    9871          *   - Machine-check event.
    9872          *
    9873          * In any of the above cases we will still have a "valid" VM-exit reason
    9874          * despite @a fVMEntryFailed being false.
    9875          *
    9876          * See Intel spec. 26.7 "VM-Entry failures during or after loading guest state".
    9877          *
    9878          * Note! We don't have CS or RIP at this point.  Will probably address that later
    9879          *       by amending the history entry added here.
    9880          */
    9881         EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_VMX, pVmxTransient->uExitReason & EMEXIT_F_TYPE_MASK),
    9882                          UINT64_MAX, pVCpu->hmr0.s.uTscExit);
    9883 
    9884         if (RT_LIKELY(!pVmxTransient->fVMEntryFailed))
    9885         {
    9886             VMMRZCallRing3Enable(pVCpu);
    9887             Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
    9888 
    9889 #ifdef HMVMX_ALWAYS_SAVE_RO_GUEST_STATE
    9890             vmxHCReadAllRoFieldsVmcs(pVCpu, pVmxTransient);
    9891 #endif
    9892 
    9893             /*
    9894              * Import the guest-interruptibility state always as we need it while evaluating
    9895              * injecting events on re-entry.
    9896              *
    9897              * We don't import CR0 (when unrestricted guest execution is unavailable) despite
    9898              * checking for real-mode while exporting the state because all bits that cause
    9899              * mode changes wrt CR0 are intercepted.
    9900              */
    9901             uint64_t const fImportMask = CPUMCTX_EXTRN_HM_VMX_INT_STATE
    9902 #if defined(HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE) || defined(HMVMX_ALWAYS_SAVE_FULL_GUEST_STATE)
    9903                                        | HMVMX_CPUMCTX_EXTRN_ALL
    9904 #elif defined(HMVMX_ALWAYS_SAVE_GUEST_RFLAGS)
    9905                                        | CPUMCTX_EXTRN_RFLAGS
    9906 #endif
    9907                                        ;
    9908             rc = vmxHCImportGuestState(pVCpu, pVmcsInfo, fImportMask);
    9909             AssertRC(rc);
    9910 
    9911             /*
    9912              * Sync the TPR shadow with our APIC state.
    9913              */
    9914             if (   !pVmxTransient->fIsNestedGuest
    9915                 && (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW))
    9916             {
    9917                 Assert(pVmcsInfo->pbVirtApic);
    9918                 if (pVmxTransient->u8GuestTpr != pVmcsInfo->pbVirtApic[XAPIC_OFF_TPR])
    9919                 {
    9920                     rc = APICSetTpr(pVCpu, pVmcsInfo->pbVirtApic[XAPIC_OFF_TPR]);
    9921                     AssertRC(rc);
    9922                     ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
    9923                 }
    9924             }
    9925 
    9926             Assert(VMMRZCallRing3IsEnabled(pVCpu));
    9927             Assert(   pVmxTransient->fWasGuestDebugStateActive == false
    9928                    || pVmxTransient->fWasHyperDebugStateActive == false);
    9929             return;
    9930         }
    9931     }
    9932 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    9933     else if (pVmxTransient->fIsNestedGuest)
    9934         AssertMsgFailed(("VMLAUNCH/VMRESUME failed but shouldn't happen when VMLAUNCH/VMRESUME was emulated in IEM!\n"));
    9935 #endif
    9936     else
    9937         Log4Func(("VM-entry failure: rcVMRun=%Rrc fVMEntryFailed=%RTbool\n", rcVMRun, pVmxTransient->fVMEntryFailed));
    9938 
    9939     VMMRZCallRing3Enable(pVCpu);
    9940 }
    9941 
    9942 
    9943 /**
    99448081 * Runs the guest code using hardware-assisted VMX the normal way.
    99458082 *
     
    99868123        Assert(!HMR0SuspendPending());
    99878124        HMVMX_ASSERT_CPU_SAFE(pVCpu);
    9988         STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
     8125        STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATE(pVCpu).StatEntry, x);
    99898126
    99908127        /*
     
    100118148        else
    100128149        {
    10013             STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatPreExit, x);
     8150            STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATE(pVCpu).StatPreExit, x);
    100148151            vmxHCReportWorldSwitchError(pVCpu, rcRun, &VmxTransient);
    100158152            return rcRun;
     
    100208157         */
    100218158        AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason));
    10022         STAM_COUNTER_INC(&pVCpu->hm.s.StatExitAll);
    10023         STAM_COUNTER_INC(&pVCpu->hm.s.aStatExitReason[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);
    10024         STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatPreExit, &pVCpu->hm.s.StatExitHandling, x);
     8159        STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitAll);
     8160        STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).aStatExitReason[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);
     8161        STAM_PROFILE_ADV_STOP_START(&VCPU_2_VMXSTATE(pVCpu).StatPreExit, &VCPU_2_VMXSTATE(pVCpu).StatExitHandling, x);
    100258162        HMVMX_START_EXIT_DISPATCH_PROF();
    100268163
     
    100358172        rcStrict = vmxHCHandleExit(pVCpu, &VmxTransient);
    100368173#endif
    10037         STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitHandling, x);
     8174        STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATE(pVCpu).StatExitHandling, x);
    100388175        if (rcStrict == VINF_SUCCESS)
    100398176        {
    100408177            if (++(*pcLoops) <= cMaxResumeLoops)
    100418178                continue;
    10042             STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchMaxResumeLoops);
     8179            STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatSwitchMaxResumeLoops);
    100438180            rcStrict = VINF_EM_RAW_INTERRUPT;
    100448181        }
     
    100468183    }
    100478184
    10048     STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
     8185    STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATE(pVCpu).StatEntry, x);
    100498186    return rcStrict;
    100508187}
     
    100988235        Assert(!HMR0SuspendPending());
    100998236        HMVMX_ASSERT_CPU_SAFE(pVCpu);
    10100         STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
     8237        STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATE(pVCpu).StatEntry, x);
    101018238
    101028239        /*
     
    101238260        else
    101248261        {
    10125             STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatPreExit, x);
     8262            STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATE(pVCpu).StatPreExit, x);
    101268263            vmxHCReportWorldSwitchError(pVCpu, rcRun, &VmxTransient);
    101278264            return rcRun;
     
    101328269         */
    101338270        AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason));
    10134         STAM_COUNTER_INC(&pVCpu->hm.s.StatExitAll);
    10135         STAM_COUNTER_INC(&pVCpu->hm.s.StatNestedExitAll);
    10136         STAM_COUNTER_INC(&pVCpu->hm.s.aStatNestedExitReason[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);
    10137         STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatPreExit, &pVCpu->hm.s.StatExitHandling, x);
     8271        STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitAll);
     8272        STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatNestedExitAll);
     8273        STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).aStatNestedExitReason[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);
     8274        STAM_PROFILE_ADV_STOP_START(&VCPU_2_VMXSTATE(pVCpu).StatPreExit, &VCPU_2_VMXSTATE(pVCpu).StatExitHandling, x);
    101388275        HMVMX_START_EXIT_DISPATCH_PROF();
    101398276
     
    101448281         */
    101458282        rcStrict = vmxHCHandleExitNested(pVCpu, &VmxTransient);
    10146         STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitHandling, x);
     8283        STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATE(pVCpu).StatExitHandling, x);
    101478284        if (rcStrict == VINF_SUCCESS)
    101488285        {
    101498286            if (!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
    101508287            {
    10151                 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchNstGstVmexit);
     8288                STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatSwitchNstGstVmexit);
    101528289                rcStrict = VINF_VMX_VMEXIT;
    101538290            }
     
    101568293                if (++(*pcLoops) <= cMaxResumeLoops)
    101578294                    continue;
    10158                 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchMaxResumeLoops);
     8295                STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatSwitchMaxResumeLoops);
    101598296                rcStrict = VINF_EM_RAW_INTERRUPT;
    101608297            }
     
    101658302    }
    101668303
    10167     STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
     8304    STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATE(pVCpu).StatEntry, x);
    101688305    return rcStrict;
    101698306}
     
    105608697        {
    105618698            pDbgState->fClearCr0Mask = false;
    10562             ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR0);
     8699            ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR0);
    105638700        }
    105648701        if (pDbgState->fClearCr4Mask)
    105658702        {
    105668703            pDbgState->fClearCr4Mask = false;
    10567             ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR4);
     8704            ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR4);
    105688705        }
    105698706    }
     
    110859222    }
    110869223
     9224#ifdef IN_RING0 /* NMIs should never reach R3. */
    110879225    /*
    110889226     * Check for host NMI, just to get that out of the way.
     
    110979235            return vmxHCExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
    110989236    }
     9237#endif
    110999238
    111009239    /*
    111019240     * Check for single stepping event if we're stepping.
    111029241     */
    11103     if (pVCpu->hm.s.fSingleInstruction)
     9242    if (VCPU_2_VMXSTATE(pVCpu).fSingleInstruction)
    111049243    {
    111059244        switch (uExitReason)
     
    112349373
    112359374    /* Set HMCPU indicators.  */
    11236     bool const fSavedSingleInstruction = pVCpu->hm.s.fSingleInstruction;
    11237     pVCpu->hm.s.fSingleInstruction     = pVCpu->hm.s.fSingleInstruction || DBGFIsStepping(pVCpu);
     9375    bool const fSavedSingleInstruction = VCPU_2_VMXSTATE(pVCpu).fSingleInstruction;
     9376    VCPU_2_VMXSTATE(pVCpu).fSingleInstruction     = VCPU_2_VMXSTATE(pVCpu).fSingleInstruction || DBGFIsStepping(pVCpu);
    112389377    pVCpu->hmr0.s.fDebugWantRdTscExit    = false;
    112399378    pVCpu->hmr0.s.fUsingDebugLoop        = true;
     
    112529391        Assert(!HMR0SuspendPending());
    112539392        HMVMX_ASSERT_CPU_SAFE(pVCpu);
    11254         STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
    11255         bool fStepping = pVCpu->hm.s.fSingleInstruction;
     9393        STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATE(pVCpu).StatEntry, x);
     9394        bool fStepping = VCPU_2_VMXSTATE(pVCpu).fSingleInstruction;
    112569395
    112579396        /* Set up VM-execution controls the next two can respond to. */
     
    112879426        else
    112889427        {
    11289             STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatPreExit, x);
     9428            STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATE(pVCpu).StatPreExit, x);
    112909429            vmxHCReportWorldSwitchError(pVCpu, rcRun, &VmxTransient);
    112919430            return rcRun;
     
    112949433        /* Profile the VM-exit. */
    112959434        AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason));
    11296         STAM_COUNTER_INC(&pVCpu->hm.s.StatExitAll);
    11297         STAM_COUNTER_INC(&pVCpu->hm.s.aStatExitReason[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);
    11298         STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatPreExit, &pVCpu->hm.s.StatExitHandling, x);
     9435        STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitAll);
     9436        STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).aStatExitReason[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);
     9437        STAM_PROFILE_ADV_STOP_START(&VCPU_2_VMXSTATE(pVCpu).StatPreExit, &VCPU_2_VMXSTATE(pVCpu).StatExitHandling, x);
    112999438        HMVMX_START_EXIT_DISPATCH_PROF();
    113009439
     
    113059444         */
    113069445        rcStrict = vmxHCRunDebugHandleExit(pVCpu, &VmxTransient, &DbgState);
    11307         STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitHandling, x);
     9446        STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATE(pVCpu).StatExitHandling, x);
    113089447        if (rcStrict != VINF_SUCCESS)
    113099448            break;
    113109449        if (++(*pcLoops) > cMaxResumeLoops)
    113119450        {
    11312             STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchMaxResumeLoops);
     9451            STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatSwitchMaxResumeLoops);
    113139452            rcStrict = VINF_EM_RAW_INTERRUPT;
    113149453            break;
     
    113299468                break;
    113309469            }
    11331             ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_DR7);
     9470            ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_DR7);
    113329471        }
    113339472
     
    113609499    pVCpu->hmr0.s.fUsingDebugLoop     = false;
    113619500    pVCpu->hmr0.s.fDebugWantRdTscExit = false;
    11362     pVCpu->hm.s.fSingleInstruction  = fSavedSingleInstruction;
    11363 
    11364     STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
     9501    VCPU_2_VMXSTATE(pVCpu).fSingleInstruction  = fSavedSingleInstruction;
     9502
     9503    STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATE(pVCpu).StatEntry, x);
    113659504    return rcStrict;
    113669505}
    11367 
     9506#endif
    113689507
    113699508/** @} */
     
    113879526            VBOXSTRICTRC rcStrict = a_CallExpr; \
    113889527            if (a_fSave != 0) \
    11389                 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST); \
     9528                ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST); \
    113909529            return rcStrict; \
    113919530        } while (0)
     
    116219760#define HMVMX_UNEXPECTED_EXIT_RET(a_pVCpu, a_HmError) \
    116229761    do { \
    11623         (a_pVCpu)->hm.s.u32HMError = (a_HmError); \
     9762        VCPU_2_VMXSTATE((a_pVCpu)).u32HMError = (a_HmError); \
    116249763        return VERR_VMX_UNEXPECTED_EXIT; \
    116259764    } while (0)
    116269765
    116279766#ifdef VBOX_STRICT
     9767# ifdef IN_RING0
    116289768/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
    116299769# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() \
     
    116519791        HMVMX_STOP_EXIT_DISPATCH_PROF(); \
    116529792    } while (0)
     9793# else
     9794# define HMVMX_ASSERT_PREEMPT_CPUID_VAR()   do { } while(0)
     9795# define HMVMX_ASSERT_PREEMPT_CPUID()       do { } while(0)
     9796# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
     9797    do { \
     9798        AssertPtr((a_pVCpu)); \
     9799        AssertPtr((a_pVmxTransient)); \
     9800        Assert((a_pVmxTransient)->fVMEntryFailed == false); \
     9801        Assert((a_pVmxTransient)->pVmcsInfo); \
     9802        Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
     9803        HMVMX_STOP_EXIT_DISPATCH_PROF(); \
     9804    } while (0)
     9805# endif
    116539806
    116549807# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
     
    117339886    /* Advance the RIP. */
    117349887    pVCpu->cpum.GstCtx.rip += cbInstr;
    11735     ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP);
     9888    ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
    117369889
    117379890    /* Update interrupt inhibition. */
     
    117849937static VBOXSTRICTRC vmxHCCheckExitDueToEventDelivery(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    117859938{
    11786     Assert(!pVCpu->hm.s.Event.fPending);
     9939    Assert(!VCPU_2_VMXSTATE(pVCpu).Event.fPending);
    117879940    HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
    117889941    if (   pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
     
    1188410037
    1188510038                /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF, see vmxHCExitXcptPF(). */
    11886                 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectReflect);
     10039                STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatInjectReflect);
    1188710040                vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(uIdtVectorInfo), 0 /* cbInstr */,
    1188810041                                       u32ErrCode, pVCpu->cpum.GstCtx.cr2);
    1188910042
    11890                 Log4Func(("IDT: Pending vectoring event %#RX64 Err=%#RX32\n", pVCpu->hm.s.Event.u64IntInfo,
    11891                           pVCpu->hm.s.Event.u32ErrCode));
     10043                Log4Func(("IDT: Pending vectoring event %#RX64 Err=%#RX32\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
     10044                          VCPU_2_VMXSTATE(pVCpu).Event.u32ErrCode));
    1189210045                Assert(rcStrict == VINF_SUCCESS);
    1189310046                break;
     
    1190710060                {
    1190810061                    pVmxTransient->fVectoringDoublePF = true;
    11909                     Log4Func(("IDT: Vectoring double #PF %#RX64 cr2=%#RX64\n", pVCpu->hm.s.Event.u64IntInfo,
     10062                    Log4Func(("IDT: Vectoring double #PF %#RX64 cr2=%#RX64\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
    1191010063                          pVCpu->cpum.GstCtx.cr2));
    1191110064                    rcStrict = VINF_SUCCESS;
     
    1191310066                else
    1191410067                {
    11915                     STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectConvertDF);
     10068                    STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatInjectConvertDF);
    1191610069                    vmxHCSetPendingXcptDF(pVCpu);
    11917                     Log4Func(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", pVCpu->hm.s.Event.u64IntInfo,
     10070                    Log4Func(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", VCPU_2_VMXSTATE(pVCpu).Event.u64IntInfo,
    1191810071                              uIdtVector, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
    1191910072                    rcStrict = VINF_HM_DOUBLE_FAULT;
     
    1225610409              || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
    1225710410
    12258     ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
     10411    ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
    1225910412    if (rcStrict == VINF_IEM_RAISED_XCPT)
    1226010413    {
    12261         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
     10414        ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
    1226210415        rcStrict = VINF_SUCCESS;
    1226310416    }
    1226410417
    12265     STAM_COUNTER_INC(&pVCpu->hm.s.StatExitLmsw);
     10418    STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitLmsw);
    1226610419    Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
    1226710420    return rcStrict;
     
    1228110434              || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
    1228210435
    12283     ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
     10436    ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
    1228410437    if (rcStrict == VINF_IEM_RAISED_XCPT)
    1228510438    {
    12286         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
     10439        ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
    1228710440        rcStrict = VINF_SUCCESS;
    1228810441    }
    1228910442
    12290     STAM_COUNTER_INC(&pVCpu->hm.s.StatExitClts);
     10443    STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitClts);
    1229110444    Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
    1229210445    return rcStrict;
     
    1231010463
    1231110464    if (iGReg == X86_GREG_xSP)
    12312         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_RSP);
     10465        ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_RSP);
    1231310466    else
    12314         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
     10467        ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
    1231510468#ifdef VBOX_WITH_STATISTICS
    1231610469    switch (iCrReg)
    1231710470    {
    12318         case 0: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR0Read); break;
    12319         case 2: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR2Read); break;
    12320         case 3: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR3Read); break;
    12321         case 4: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR4Read); break;
    12322         case 8: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR8Read); break;
     10471        case 0: STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitCR0Read); break;
     10472        case 2: STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitCR2Read); break;
     10473        case 3: STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitCR3Read); break;
     10474        case 4: STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitCR4Read); break;
     10475        case 8: STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitCR8Read); break;
    1232310476    }
    1232410477#endif
     
    1234310496    {
    1234410497        case 0:
    12345             ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0
     10498            ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0
    1234610499                                                     | HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
    12347             STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR0Write);
     10500            STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitCR0Write);
    1234810501            Log4Func(("CR0 write. rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr0));
    1234910502            break;
    1235010503
    1235110504        case 2:
    12352             STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR2Write);
     10505            STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitCR2Write);
    1235310506            /* Nothing to do here, CR2 it's not part of the VMCS. */
    1235410507            break;
    1235510508
    1235610509        case 3:
    12357             ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR3);
    12358             STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR3Write);
     10510            ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR3);
     10511            STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitCR3Write);
    1235910512            Log4Func(("CR3 write. rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr3));
    1236010513            break;
    1236110514
    1236210515        case 4:
    12363             ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR4);
    12364             STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR4Write);
     10516            ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR4);
     10517            STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitCR4Write);
     10518#ifdef IN_RING0
    1236510519            Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict),
    1236610520                      pVCpu->cpum.GstCtx.cr4, pVCpu->hmr0.s.fLoadSaveGuestXcr0));
     10521#else
     10522            Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr4));
     10523#endif
    1236710524            break;
    1236810525
    1236910526        case 8:
    12370             ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged,
     10527            ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
    1237110528                             HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_APIC_TPR);
    12372             STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR8Write);
     10529            STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitCR8Write);
    1237310530            break;
    1237410531
     
    1238010537    if (rcStrict == VINF_IEM_RAISED_XCPT)
    1238110538    {
    12382         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
     10539        ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
    1238310540        rcStrict = VINF_SUCCESS;
    1238410541    }
     
    1239510552{
    1239610553    HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
     10554    vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
     10555
     10556#ifdef IN_RING0
    1239710557    PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    12398     vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
    12399 
    1240010558    if (!pVM->hmr0.s.fNestedPaging)
    1240110559    { /* likely */ }
    1240210560    else
    12403     {
    12404 #if !defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) && !defined(HMVMX_ALWAYS_TRAP_PF)
     10561#endif
     10562    {
     10563#if !defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) && !defined(HMVMX_ALWAYS_TRAP_PF) && defined(IN_RING0)
    1240510564        Assert(pVmxTransient->fIsNestedGuest || pVCpu->hmr0.s.fUsingDebugLoop);
    1240610565#endif
    12407         pVCpu->hm.s.Event.fPending = false;                  /* In case it's a contributory or vectoring #PF. */
     10566        VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;                  /* In case it's a contributory or vectoring #PF. */
    1240810567        if (!pVmxTransient->fVectoringDoublePF)
    1240910568        {
     
    1241810577            Log4Func(("Pending #DF due to vectoring #PF w/ NestedPaging\n"));
    1241910578        }
    12420         STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
     10579        STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitGuestPF);
    1242110580        return VINF_SUCCESS;
    1242210581    }
     
    1242810587    if (pVmxTransient->fVectoringPF)
    1242910588    {
    12430         Assert(pVCpu->hm.s.Event.fPending);
     10589        Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
    1243110590        return VINF_EM_RAW_INJECT_TRPM_EVENT;
    1243210591    }
     
    1244910608         * emulated something like LTR or a far jump. Any part of the CPU context may have changed.
    1245010609         */
    12451         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
     10610        ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
    1245210611        TRPMResetTrap(pVCpu);
    12453         STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF);
     10612        STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitShadowPF);
    1245410613        return rc;
    1245510614    }
     
    1246210621            uint32_t const uGstErrorCode = TRPMGetErrorCode(pVCpu);
    1246310622            TRPMResetTrap(pVCpu);
    12464             pVCpu->hm.s.Event.fPending = false;                 /* In case it's a contributory #PF. */
     10623            VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;                 /* In case it's a contributory #PF. */
    1246510624            vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
    1246610625                                   uGstErrorCode, pVmxTransient->uExitQual);
     
    1247010629            /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
    1247110630            TRPMResetTrap(pVCpu);
    12472             pVCpu->hm.s.Event.fPending = false;     /* Clear pending #PF to replace it with #DF. */
     10631            VCPU_2_VMXSTATE(pVCpu).Event.fPending = false;     /* Clear pending #PF to replace it with #DF. */
    1247310632            vmxHCSetPendingXcptDF(pVCpu);
    1247410633            Log4Func(("#PF: Pending #DF due to vectoring #PF\n"));
    1247510634        }
    1247610635
    12477         STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
     10636        STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitGuestPF);
    1247810637        return VINF_SUCCESS;
    1247910638    }
    1248010639
    1248110640    TRPMResetTrap(pVCpu);
    12482     STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPFEM);
     10641    STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitShadowPFEM);
    1248310642    return rc;
    1248410643}
     
    1249310652{
    1249410653    HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    12495     STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestMF);
     10654    STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitGuestMF);
    1249610655
    1249710656    int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR0);
     
    1252510684{
    1252610685    HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    12527     STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestBP);
     10686    STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitGuestBP);
    1252810687
    1252910688    int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
     
    1257410733         * Check for debug/trace events and import state accordingly.
    1257510734         */
    12576         STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatExitGuestACSplitLock);
    12577         PVMCC pVM = pVCpu->pVMR0;
     10735        STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitGuestACSplitLock);
     10736        PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    1257810737        if (   !DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK)
    12579             && !VBOXVMM_VMX_SPLIT_LOCK_ENABLED())
     10738#ifdef IN_RING0
     10739            && !VBOXVMM_VMX_SPLIT_LOCK_ENABLED()
     10740#endif
     10741            )
    1258010742        {
    1258110743            if (pVM->cCpus == 1)
     
    1261910781            if (rcStrict == VINF_SUCCESS)
    1262010782#if 0 /** @todo r=bird: This is potentially wrong.  Might have to just do a whole state sync above and mark everything changed to be safe... */
    12621                 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged,
     10783                ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
    1262210784                                   HM_CHANGED_GUEST_RIP
    1262310785                                 | HM_CHANGED_GUEST_RFLAGS
     
    1262610788                                 | HM_CHANGED_GUEST_SS);
    1262710789#else
    12628                 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
     10790                ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
    1262910791#endif
    1263010792            else if (rcStrict == VINF_IEM_RAISED_XCPT)
    1263110793            {
    12632                 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
     10794                ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
    1263310795                rcStrict = VINF_SUCCESS;
    1263410796            }
     
    1264010802    }
    1264110803
    12642     STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatExitGuestAC);
     10804    STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitGuestAC);
    1264310805    Log8Func(("cs:rip=%#04x:%#RX64 rflags=%#RX64 cr0=%#RX64 cpl=%d -> #AC\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
    1264410806              pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0, CPUMGetGuestCPL(pVCpu) ));
     
    1265910821{
    1266010822    HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    12661     STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB);
     10823    STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitGuestDB);
    1266210824
    1266310825    /*
     
    1267510837    if (!pVmxTransient->fIsNestedGuest)
    1267610838    {
    12677         rc = DBGFTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx), uDR6, pVCpu->hm.s.fSingleInstruction);
     10839        rc = DBGFTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx), uDR6, VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
    1267810840
    1267910841        /*
     
    1268510847            && (pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_MONITOR_TRAP_FLAG))
    1268610848        {
    12687             Assert(pVCpu->hm.s.fSingleInstruction);
     10849            Assert(VCPU_2_VMXSTATE(pVCpu).fSingleInstruction);
    1268810850            rc = VINF_EM_RAW_GUEST_TRAP;
    1268910851        }
     
    1269910861         * See Intel spec. 27.1 "Architectural State before a VM-Exit".
    1270010862         */
     10863#ifdef IN_RING0
    1270110864        VMMRZCallRing3Disable(pVCpu);
    1270210865        HM_DISABLE_PREEMPT(pVCpu);
     
    1270910872        HM_RESTORE_PREEMPT();
    1271010873        VMMRZCallRing3Enable(pVCpu);
     10874#else
     10875        /** @todo */
     10876#endif
    1271110877
    1271210878        rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_DR7);
     
    1282610992{
    1282710993    HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    12828     STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestGP);
     10994    STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitGuestGP);
    1282910995
    1283010996    PCPUMCTX            pCtx            = &pVCpu->cpum.GstCtx;
    1283110997    PVMXVMCSINFO        pVmcsInfo       = pVmxTransient->pVmcsInfo;
     10998#ifdef IN_RING0
    1283210999    PVMXVMCSINFOSHARED  pVmcsInfoShared = pVmcsInfo->pShared;
    1283311000    if (pVmcsInfoShared->RealMode.fRealOnV86Active)
    1283411001    { /* likely */ }
    1283511002    else
     11003#endif
    1283611004    {
    1283711005#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
    12838         Assert(pVCpu->hmr0.s.fUsingDebugLoop || pVCpu->hm.s.fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
     11006# ifdef IN_RING0
     11007        Assert(pVCpu->hmr0.s.fUsingDebugLoop || VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
     11008# else
     11009        Assert(/*pVCpu->hmr0.s.fUsingDebugLoop ||*/ VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
     11010# endif
    1283911011#endif
    1284011012        /*
     
    1284811020
    1284911021        if (    pVmxTransient->fIsNestedGuest
    12850             || !pVCpu->hm.s.fTrapXcptGpForLovelyMesaDrv
     11022            || !VCPU_2_VMXSTATE(pVCpu).fTrapXcptGpForLovelyMesaDrv
    1285111023            || !vmxHCIsMesaDrvGp(pVCpu, pVmxTransient, pCtx))
    1285211024            vmxHCSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
     
    1285711029    }
    1285811030
     11031#ifdef IN_RING0
    1285911032    Assert(CPUMIsGuestInRealModeEx(pCtx));
    1286011033    Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest);
     
    1287411047             */
    1287511048            pVmcsInfoShared->RealMode.fRealOnV86Active = false;
    12876             if (HMCanExecuteVmxGuest(pVCpu->pVMR0, pVCpu, pCtx))
     11049            if (HMCanExecuteVmxGuest(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx))
    1287711050            {
    1287811051                Log4Func(("Mode changed but guest still suitable for executing using hardware-assisted VMX\n"));
    12879                 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
     11052                ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
    1288011053            }
    1288111054            else
     
    1288611059        }
    1288711060        else
    12888             ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
     11061            ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
    1288911062    }
    1289011063    else if (rcStrict == VINF_IEM_RAISED_XCPT)
    1289111064    {
    1289211065        rcStrict = VINF_SUCCESS;
    12893         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
     11066        ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
    1289411067    }
    1289511068    return VBOXSTRICTRC_VAL(rcStrict);
     11069#endif
    1289611070}
    1289711071
     
    1291111085
    1291211086#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
     11087# ifdef IN_RING0
    1291311088    PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    1291411089    AssertMsg(pVCpu->hmr0.s.fUsingDebugLoop || pVmcsInfo->pShared->RealMode.fRealOnV86Active || pVmxTransient->fIsNestedGuest,
     
    1291611091               VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo), pVmcsInfo->u32XcptBitmap));
    1291711092    NOREF(pVmcsInfo);
     11093# endif
    1291811094#endif
    1291911095
     
    1292711103    int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
    1292811104    AssertRCReturn(rc, rc);
    12929     Log4Func(("Reinjecting Xcpt. uVector=%#x cs:rip=%#04x:%#RX64\n", uVector, pCtx->cs.Sel, pCtx->rip));
     11105    Log4Func(("Reinjecting Xcpt. uVector=%#x cs:rip=%#04x:%#RX64\n", uVector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
    1293011106#endif
    1293111107
     
    1293311109    switch (uVector)
    1293411110    {
    12935         case X86_XCPT_DE:   STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDE);     break;
    12936         case X86_XCPT_DB:   STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB);     break;
    12937         case X86_XCPT_BP:   STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestBP);     break;
    12938         case X86_XCPT_OF:   STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestOF);     break;
    12939         case X86_XCPT_BR:   STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestBR);     break;
    12940         case X86_XCPT_UD:   STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD);     break;
    12941         case X86_XCPT_NM:   STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestOF);     break;
    12942         case X86_XCPT_DF:   STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDF);     break;
    12943         case X86_XCPT_TS:   STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestTS);     break;
    12944         case X86_XCPT_NP:   STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNP);     break;
    12945         case X86_XCPT_SS:   STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestSS);     break;
    12946         case X86_XCPT_GP:   STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestGP);     break;
    12947         case X86_XCPT_PF:   STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);     break;
    12948         case X86_XCPT_MF:   STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestMF);     break;
    12949         case X86_XCPT_AC:   STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestAC);     break;
    12950         case X86_XCPT_XF:   STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXF);     break;
     11111        case X86_XCPT_DE:   STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitGuestDE);     break;
     11112        case X86_XCPT_DB:   STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitGuestDB);     break;
     11113        case X86_XCPT_BP:   STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitGuestBP);     break;
     11114        case X86_XCPT_OF:   STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitGuestOF);     break;
     11115        case X86_XCPT_BR:   STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitGuestBR);     break;
     11116        case X86_XCPT_UD:   STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitGuestUD);     break;
     11117        case X86_XCPT_NM:   STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitGuestOF);     break;
     11118        case X86_XCPT_DF:   STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitGuestDF);     break;
     11119        case X86_XCPT_TS:   STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitGuestTS);     break;
     11120        case X86_XCPT_NP:   STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitGuestNP);     break;
     11121        case X86_XCPT_SS:   STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitGuestSS);     break;
     11122        case X86_XCPT_GP:   STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitGuestGP);     break;
     11123        case X86_XCPT_PF:   STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitGuestPF);     break;
     11124        case X86_XCPT_MF:   STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitGuestMF);     break;
     11125        case X86_XCPT_AC:   STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitGuestAC);     break;
     11126        case X86_XCPT_XF:   STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitGuestXF);     break;
    1295111127        default:
    12952             STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXcpUnk);
     11128            STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitGuestXcpUnk);
    1295311129            break;
    1295411130    }
     
    1299311169        Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
    1299411170        uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
    12995         if (   !pVCpu->hm.s.Event.fPending
     11171        if (   !VCPU_2_VMXSTATE(pVCpu).Event.fPending
    1299611172            || uVector == X86_XCPT_PF)
    1299711173        {
     
    1301211188    else if (rcStrict == VINF_HM_DOUBLE_FAULT)
    1301311189    {
    13014         Assert(pVCpu->hm.s.Event.fPending);
     11190        Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
    1301511191        rcStrict = VINF_SUCCESS;
    1301611192    }
     
    1303411210{
    1303511211    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    13036     STAM_COUNTER_INC(&pVCpu->hm.s.StatExitExtInt);
     11212    STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitExtInt);
     11213
     11214#ifdef IN_RING0
    1303711215    /* Windows hosts (32-bit and 64-bit) have DPC latency issues. See @bugref{6853}. */
    1303811216    if (VMMR0ThreadCtxHookIsEnabled(pVCpu))
    1303911217        return VINF_SUCCESS;
    1304011218    return VINF_EM_RAW_INTERRUPT;
     11219#else
     11220    return VINF_SUCCESS;
     11221#endif
    1304111222}
    1304211223
     
    1304911230{
    1305011231    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    13051     STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitXcptNmi, y3);
     11232    STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATE(pVCpu).StatExitXcptNmi, y3);
    1305211233
    1305311234    vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
     
    1306511246    switch (uExitIntType)
    1306611247    {
     11248#ifdef IN_RING0 /* NMIs should never reach R3. */
    1306711249        /*
    1306811250         * Host physical NMIs:
     
    1307911261            break;
    1308011262        }
     11263#endif
    1308111264
    1308211265        /*
     
    1310611289        default:
    1310711290        {
    13108             pVCpu->hm.s.u32HMError = pVmxTransient->uExitIntInfo;
     11291            VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
    1310911292            rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
    1311011293            AssertMsgFailed(("Invalid/unexpected VM-exit interruption info %#x\n", pVmxTransient->uExitIntInfo));
     
    1311311296    }
    1311411297
    13115     STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
     11298    STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATE(pVCpu).StatExitXcptNmi, y3);
    1311611299    return rcStrict;
    1311711300}
     
    1312711310    /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
    1312811311    PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    13129     vmxHCClearIntWindowExitVmcs(pVmcsInfo);
     11312    vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
    1313011313
    1313111314    /* Evaluate and deliver pending events and resume guest execution. */
    13132     STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIntWindow);
     11315    STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitIntWindow);
    1313311316    return VINF_SUCCESS;
    1313411317}
     
    1317011353
    1317111354    /* Indicate that we no longer need to VM-exit when the guest is ready to receive NMIs, it is now ready */
    13172     vmxHCClearNmiWindowExitVmcs(pVmcsInfo);
     11355    vmxHCClearNmiWindowExitVmcs(pVCpu, pVmcsInfo);
    1317311356
    1317411357    /* Evaluate and deliver pending events and resume guest execution. */
     
    1322411407        rcStrict = IEMExecDecodedCpuid(pVCpu, pVmxTransient->cbExitInstr);
    1322511408        if (rcStrict == VINF_SUCCESS)
    13226             ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
     11409            ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
    1322711410        else if (rcStrict == VINF_IEM_RAISED_XCPT)
    1322811411        {
    13229             ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
     11412            ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
    1323011413            rcStrict = VINF_SUCCESS;
    1323111414        }
     
    1324311426
    1324411427        rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
    13245         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
     11428        ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
    1324611429
    1324711430        Log4(("CpuIdExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
     
    1329111474        if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
    1329211475            pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
    13293         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
     11476        ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
    1329411477    }
    1329511478    else if (rcStrict == VINF_IEM_RAISED_XCPT)
    1329611479    {
    13297         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
     11480        ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
    1329811481        rcStrict = VINF_SUCCESS;
    1329911482    }
     
    1332111504        if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
    1332211505            pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
    13323         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
     11506        ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
    1332411507    }
    1332511508    else if (rcStrict == VINF_IEM_RAISED_XCPT)
    1332611509    {
    13327         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
     11510        ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
    1332811511        rcStrict = VINF_SUCCESS;
    1332911512    }
     
    1341011593{
    1341111594    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
     11595#ifdef IN_RING0
    1341211596    Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || pVCpu->hmr0.s.fUsingDebugLoop);
     11597#endif
    1341311598
    1341411599    PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
     
    1342111606
    1342211607    if (rcStrict == VINF_SUCCESS || rcStrict == VINF_PGM_SYNC_CR3)
    13423         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
     11608        ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
    1342411609    else if (rcStrict == VINF_IEM_RAISED_XCPT)
    1342511610    {
    13426         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
     11611        ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
    1342711612        rcStrict = VINF_SUCCESS;
    1342811613    }
     
    1344811633    VBOXSTRICTRC rcStrict = IEMExecDecodedMonitor(pVCpu, pVmxTransient->cbExitInstr);
    1344911634    if (rcStrict == VINF_SUCCESS)
    13450         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
     11635        ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
    1345111636    else if (rcStrict == VINF_IEM_RAISED_XCPT)
    1345211637    {
    13453         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
     11638        ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
    1345411639        rcStrict = VINF_SUCCESS;
    1345511640    }
     
    1347411659    if (RT_SUCCESS(rcStrict))
    1347511660    {
    13476         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
     11661        ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
    1347711662        if (EMMonitorWaitShouldContinue(pVCpu, &pVCpu->cpum.GstCtx))
    1347811663            rcStrict = VINF_SUCCESS;
     
    1351111696
    1351211697    if (rc != VINF_SUCCESS)
    13513         STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHltToR3);
     11698        STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatSwitchHltToR3);
    1351411699    return rc;
    1351511700}
     
    1354211727    PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    1354311728    bool fTimersPending = TMTimerPollBool(pVM, pVCpu);
    13544     STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatExitPreemptTimer);
     11729    STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitPreemptTimer);
    1354511730    return fTimersPending ? VINF_EM_RAW_TIMER_PENDING : VINF_SUCCESS;
    1354611731}
     
    1355411739    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    1355511740
     11741#ifdef IN_RING0
    1355611742    PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    1355711743    vmxHCReadExitInstrLenVmcs(pVCpu, pVmxTransient);
     
    1356011746
    1356111747    VBOXSTRICTRC rcStrict = IEMExecDecodedXsetbv(pVCpu, pVmxTransient->cbExitInstr);
    13562     ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
     11748    ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
    1356311749                                                                                : HM_CHANGED_RAISED_XCPT_MASK);
    1356411750
     
    1357211758
    1357311759    return rcStrict;
     11760#else
     11761    return VERR_EM_INTERPRETER;
     11762#endif
    1357411763}
    1357511764
     
    1360711796                                                  GCPtrDesc, uType);
    1360811797    if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    13609         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
     11798        ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
    1361011799    else if (rcStrict == VINF_IEM_RAISED_XCPT)
    1361111800    {
    13612         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
     11801        ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
    1361311802        rcStrict = VINF_SUCCESS;
    1361411803    }
     
    1365911848    rc = VMX_VMCS_READ_NW(pVCpu, VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Val);             AssertRC(rc);
    1366011849    Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW              %#RX64\n", u64Val));
     11850# ifdef IN_RING0
    1366111851    if (pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging)
    1366211852    {
     
    1366411854        Log4(("VMX_VMCS64_CTRL_EPTP_FULL                  %#RX64\n", u64Val));
    1366511855    }
     11856
    1366611857    hmR0DumpRegs(pVCpu, HM_DUMP_REG_FLAGS_ALL);
     11858# endif
    1366711859#endif
    1366811860
     
    1378411976    Log4Func(("ecx=%#RX32\n", idMsr));
    1378511977
    13786 #ifdef VBOX_STRICT
     11978#if defined(VBOX_STRICT) && defined(IN_RING0)
    1378711979    Assert(!pVmxTransient->fIsNestedGuest);
    1378811980    if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
     
    1380812000
    1380912001    VBOXSTRICTRC rcStrict = IEMExecDecodedRdmsr(pVCpu, pVmxTransient->cbExitInstr);
    13810     STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdmsr);
     12002    STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitRdmsr);
    1381112003    if (rcStrict == VINF_SUCCESS)
    13812         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
     12004        ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
    1381312005    else if (rcStrict == VINF_IEM_RAISED_XCPT)
    1381412006    {
    13815         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
     12007        ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
    1381612008        rcStrict = VINF_SUCCESS;
    1381712009    }
     
    1385812050
    1385912051    VBOXSTRICTRC rcStrict = IEMExecDecodedWrmsr(pVCpu, pVmxTransient->cbExitInstr);
    13860     STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWrmsr);
     12052    STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitWrmsr);
    1386112053
    1386212054    if (rcStrict == VINF_SUCCESS)
    1386312055    {
    13864         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
     12056        ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
    1386512057
    1386612058        /* If this is an X2APIC WRMSR access, update the APIC state as well. */
     
    1387412066             * sure APIC state is saved from the VMCS before IEM changes it.
    1387512067             */
    13876             ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
     12068            ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
    1387712069        }
    1387812070        else if (idMsr == MSR_IA32_TSC)        /* Windows 7 does this during bootup. See @bugref{6398}. */
     
    1388512077             * We care about the other bits as well, SCE and NXE. See @bugref{7368}.
    1388612078             */
    13887             ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
     12079            ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
    1388812080        }
    1388912081
     
    1389312085            switch (idMsr)
    1389412086            {
    13895                 case MSR_IA32_SYSENTER_CS:  ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_SYSENTER_CS_MSR);  break;
    13896                 case MSR_IA32_SYSENTER_EIP: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break;
    13897                 case MSR_IA32_SYSENTER_ESP: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break;
    13898                 case MSR_K8_FS_BASE:        ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_FS);               break;
    13899                 case MSR_K8_GS_BASE:        ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_GS);               break;
     12087                case MSR_IA32_SYSENTER_CS:  ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_CS_MSR);  break;
     12088                case MSR_IA32_SYSENTER_EIP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break;
     12089                case MSR_IA32_SYSENTER_ESP: ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break;
     12090                case MSR_K8_FS_BASE:        ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_FS);               break;
     12091                case MSR_K8_GS_BASE:        ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_GS);               break;
    1390012092                case MSR_K6_EFER:           /* Nothing to do, already handled above. */                                    break;
    1390112093                default:
    1390212094                {
     12095#ifdef IN_RING0
    1390312096                    if (vmxHCIsLazyGuestMsr(pVCpu, idMsr))
    13904                         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_VMX_GUEST_LAZY_MSRS);
     12097                        ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_LAZY_MSRS);
    1390512098                    else if (vmxHCIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
    13906                         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
     12099                        ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
     12100#else
     12101                    AssertMsgFailed(("TODO\n"));
     12102#endif
    1390712103                    break;
    1390812104                }
    1390912105            }
    1391012106        }
    13911 #ifdef VBOX_STRICT
     12107#if defined(VBOX_STRICT) && defined(IN_RING0)
    1391212108        else
    1391312109        {
     
    1395712153    else if (rcStrict == VINF_IEM_RAISED_XCPT)
    1395812154    {
    13959         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
     12155        ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
    1396012156        rcStrict = VINF_SUCCESS;
    1396112157    }
     
    1400012196     * entry so we can just continue execution here.
    1400112197     */
    14002     STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTprBelowThreshold);
     12198    STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitTprBelowThreshold);
    1400312199    return VINF_SUCCESS;
    1400412200}
     
    1401712213{
    1401812214    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    14019     STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitMovCRx, y2);
     12215    STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATE(pVCpu).StatExitMovCRx, y2);
    1402012216
    1402112217    PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
     
    1404412240
    1404512241            HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
     12242#ifdef IN_RING0
    1404612243            uint32_t const uOldCr0 = pVCpu->cpum.GstCtx.cr0;
     12244#endif
    1404712245            uint8_t const  iGReg   = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
    1404812246            uint8_t const  iCrReg  = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
     
    1405412252             *   - We are executing in the VM debug loop.
    1405512253             */
     12254#ifdef IN_RING0
    1405612255            Assert(   iCrReg != 3
    1405712256                   || !pVM->hmr0.s.fNestedPaging
    1405812257                   || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
    1405912258                   || pVCpu->hmr0.s.fUsingDebugLoop);
     12259#else
     12260            Assert(   iCrReg != 3
     12261                   || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
     12262#endif
    1406012263
    1406112264            /* MOV to CR8 writes only cause VM-exits when TPR shadow is not used. */
     
    1406712270                      || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
    1406812271
     12272#ifdef IN_RING0
    1406912273            /*
    1407012274             * This is a kludge for handling switches back to real mode when we try to use
     
    1408912293                rcStrict = VINF_EM_RESCHEDULE_REM;
    1409012294            }
     12295#endif
     12296
    1409112297            break;
    1409212298        }
     
    1410612312             *   - We are executing in the VM debug loop.
    1410712313             */
     12314#ifdef IN_RING0
    1410812315            Assert(   iCrReg != 3
    1410912316                   || !pVM->hmr0.s.fNestedPaging
    1411012317                   || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
    1411112318                   || pVCpu->hmr0.s.fLeaveDone);
     12319#else
     12320            Assert(   iCrReg != 3
     12321                   || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx));
     12322#endif
    1411212323
    1411312324            /* MOV from CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
     
    1415612367    }
    1415712368
    14158     Assert((pVCpu->hm.s.fCtxChanged & (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS))
     12369    Assert((VCPU_2_VMXSTATE(pVCpu).fCtxChanged & (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS))
    1415912370                                   == (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS));
    1416012371    Assert(rcStrict != VINF_IEM_RAISED_XCPT);
    1416112372
    14162     STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitMovCRx, y2);
     12373    STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATE(pVCpu).StatExitMovCRx, y2);
    1416312374    NOREF(pVM);
    1416412375    return rcStrict;
     
    1417312384{
    1417412385    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    14175     STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitIO, y1);
     12386    STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATE(pVCpu).StatExitIO, y1);
    1417612387
    1417712388    PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
     
    1419012401    bool     const fIOString    = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
    1419112402    bool     const fGstStepping = RT_BOOL(pCtx->eflags.Bits.u1TF);
    14192     bool     const fDbgStepping = pVCpu->hm.s.fSingleInstruction;
     12403    bool     const fDbgStepping = VCPU_2_VMXSTATE(pVCpu).fSingleInstruction;
    1419312404    AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
    1419412405
     
    1425312464                rcStrict = IEMExecOne(pVCpu);
    1425412465
    14255             ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP);
     12466            ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
    1425612467            fUpdateRipAlready = true;
    1425712468        }
     
    1426712478            {
    1426812479                rcStrict = IOMIOPortWrite(pVM, pVCpu, uIOPort, pCtx->eax & uAndVal, cbValue);
    14269                 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOWrite);
     12480                STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitIOWrite);
     12481#ifdef IN_RING0
    1427012482                if (    rcStrict == VINF_IOM_R3_IOPORT_WRITE
    1427112483                    && !pCtx->eflags.Bits.u1TF)
    1427212484                    rcStrict = EMRZSetPendingIoPortWrite(pVCpu, uIOPort, cbInstr, cbValue, pCtx->eax & uAndVal);
     12485#endif
    1427312486            }
    1427412487            else
     
    1428112494                    pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Result & uAndVal);
    1428212495                }
     12496#ifdef IN_RING0
    1428312497                if (    rcStrict == VINF_IOM_R3_IOPORT_READ
    1428412498                    && !pCtx->eflags.Bits.u1TF)
    1428512499                    rcStrict = EMRZSetPendingIoPortRead(pVCpu, uIOPort, cbInstr, cbValue);
    14286                 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIORead);
     12500#endif
     12501                STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitIORead);
    1428712502            }
    1428812503        }
     
    1429312508            {
    1429412509                vmxHCAdvanceGuestRipBy(pVCpu, cbInstr);
    14295                 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP);
     12510                ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
    1429612511            }
    1429712512
     
    1430312518             */
    1430412519            if (fIOString)
    14305                 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RFLAGS);
     12520                ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RFLAGS);
    1430612521
    1430712522            /*
     
    1432112536                            || DBGFBpIsHwIoArmed(pVM)))
    1432212537            {
    14323                 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxIoCheck);
    14324 
     12538                STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatDRxIoCheck);
     12539
     12540#ifdef IN_RING0
    1432512541                /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
    1432612542                VMMRZCallRing3Disable(pVCpu);
     
    1433612552                        ASMSetDR6(pCtx->dr[6]);
    1433712553                    if (pCtx->dr[7] != uDr7)
    14338                         pVCpu->hm.s.fCtxChanged |= HM_CHANGED_GUEST_DR7;
     12554                        VCPU_2_VMXSTATE(pVCpu).fCtxChanged |= HM_CHANGED_GUEST_DR7;
    1433912555
    1434012556                    vmxHCSetPendingXcptDB(pVCpu);
     
    1434912565                HM_RESTORE_PREEMPT();
    1435012566                VMMRZCallRing3Enable(pVCpu);
     12567#else
     12568                /** @todo */
     12569#endif
    1435112570            }
    1435212571        }
     
    1437412593        }
    1437512594#endif
    14376         STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitIO, y1);
     12595        STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATE(pVCpu).StatExitIO, y1);
    1437712596    }
    1437812597    else
     
    1438312602        int rc2 = vmxHCImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
    1438412603        AssertRCReturn(rc2, rc2);
    14385         STAM_COUNTER_INC(!fIOString ? fIOWrite ? &pVCpu->hm.s.StatExitIOWrite : &pVCpu->hm.s.StatExitIORead
    14386                          : fIOWrite ? &pVCpu->hm.s.StatExitIOStringWrite : &pVCpu->hm.s.StatExitIOStringRead);
     12604        STAM_COUNTER_INC(!fIOString ? fIOWrite ? &VCPU_2_VMXSTATE(pVCpu).StatExitIOWrite : &VCPU_2_VMXSTATE(pVCpu).StatExitIORead
     12605                         : fIOWrite ? &VCPU_2_VMXSTATE(pVCpu).StatExitIOStringWrite : &VCPU_2_VMXSTATE(pVCpu).StatExitIOStringRead);
    1438712606        Log4(("IOExit/%u: %04x:%08RX64: %s%s%s %#x LB %u -> EMHistoryExec\n",
    1438812607              pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
     
    1439112610
    1439212611        rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
    14393         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
     12612        ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
    1439412613
    1439512614        Log4(("IOExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
     
    1443812657            Log4Func(("Pending event. uIntType=%#x uVector=%#x\n", VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo),
    1443912658                      VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo)));
    14440             STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch);
     12659            STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitTaskSwitch);
    1444112660            return VINF_EM_RAW_INJECT_TRPM_EVENT;
    1444212661        }
     
    1444412663
    1444512664    /* Fall back to the interpreter to emulate the task-switch. */
    14446     STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch);
     12665    STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitTaskSwitch);
    1444712666    return VERR_EM_INTERPRETER;
    1444812667}
     
    1447012689{
    1447112690    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    14472     STAM_COUNTER_INC(&pVCpu->hm.s.StatExitApicAccess);
     12691    STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitApicAccess);
    1447312692
    1447412693    vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);
     
    1448512704    {
    1448612705        /* For some crazy guest, if an event delivery causes an APIC-access VM-exit, go to instruction emulation. */
    14487         if (RT_UNLIKELY(pVCpu->hm.s.Event.fPending))
    14488         {
    14489             STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectInterpret);
     12706        if (RT_UNLIKELY(VCPU_2_VMXSTATE(pVCpu).Event.fPending))
     12707        {
     12708            STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatInjectInterpret);
    1449012709            return VINF_EM_RAW_INJECT_TRPM_EVENT;
    1449112710        }
     
    1450712726    switch (uAccessType)
    1450812727    {
     12728#ifdef IN_RING0
    1450912729        case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE:
    1451012730        case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
     
    1451412734                      ("vmxHCExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
    1451512735
    14516             RTGCPHYS GCPhys = pVCpu->hm.s.vmx.u64GstMsrApicBase;    /* Always up-to-date, as it is not part of the VMCS. */
     12736            RTGCPHYS GCPhys = VCPU_2_VMXSTATE(pVCpu).vmx.u64GstMsrApicBase;    /* Always up-to-date, as it is not part of the VMCS. */
    1451712737            GCPhys &= PAGE_BASE_GC_MASK;
    1451812738            GCPhys += VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual);
     
    1452712747                || rcStrict == VERR_PAGE_NOT_PRESENT)
    1452812748            {
    14529                 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
     12749                ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
    1453012750                                                         | HM_CHANGED_GUEST_APIC_TPR);
    1453112751                rcStrict = VINF_SUCCESS;
     
    1453312753            break;
    1453412754        }
     12755#else
     12756        /** @todo */
     12757#endif
    1453512758
    1453612759        default:
     
    1454312766
    1454412767    if (rcStrict != VINF_SUCCESS)
    14545         STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchApicAccessToR3);
     12768        STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatSwitchApicAccessToR3);
    1454612769    return rcStrict;
    1454712770}
     
    1457112794        }
    1457212795
    14573         if (   !pVCpu->hm.s.fSingleInstruction
     12796        if (   !VCPU_2_VMXSTATE(pVCpu).fSingleInstruction
    1457412797            && !pVmxTransient->fWasHyperDebugStateActive)
    1457512798        {
     
    1458212805            AssertRC(rc);
    1458312806
     12807#ifdef IN_RING0
    1458412808            /* We're playing with the host CPU state here, make sure we can't preempt or longjmp. */
    1458512809            VMMRZCallRing3Disable(pVCpu);
     
    1459212816            HM_RESTORE_PREEMPT();
    1459312817            VMMRZCallRing3Enable(pVCpu);
     12818#else
     12819            /** @todo */
     12820#endif
    1459412821
    1459512822#ifdef VBOX_WITH_STATISTICS
    1459612823            vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
    1459712824            if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
    14598                 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
     12825                STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitDRxWrite);
    1459912826            else
    14600                 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
     12827                STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitDRxRead);
    1460112828#endif
    14602             STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxContextSwitch);
     12829            STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatDRxContextSwitch);
    1460312830            return VINF_SUCCESS;
    1460412831        }
     
    1462312850                                 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual));
    1462412851        if (RT_SUCCESS(rc))
    14625             ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_DR7);
    14626         STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
     12852            ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_DR7);
     12853        STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitDRxWrite);
    1462712854    }
    1462812855    else
     
    1463112858                                VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual),
    1463212859                                VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual));
    14633         STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
     12860        STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitDRxRead);
    1463412861    }
    1463512862
     
    1465212879{
    1465312880    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
     12881
     12882#ifdef IN_RING0
    1465412883    Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
    1465512884
     
    1467112900         * using hardware-assisted VMX would trigger the same EPT misconfig VM-exit again.
    1467212901         */
    14673         if (!pVCpu->hm.s.Event.fPending)
     12902        if (!VCPU_2_VMXSTATE(pVCpu).Event.fPending)
    1467412903        { /* likely */ }
    1467512904        else
    1467612905        {
    14677             STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectInterpret);
     12906            STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatInjectInterpret);
    1467812907#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    1467912908            /** @todo NSTVMX: Think about how this should be handled. */
     
    1472912958        {
    1473012959            /* Successfully handled MMIO operation. */
    14731             ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
     12960            ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
    1473212961                                                     | HM_CHANGED_GUEST_APIC_TPR);
    1473312962            rcStrict = VINF_SUCCESS;
     
    1474312972
    1474412973        rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
    14745         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
     12974        ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
    1474612975
    1474712976        Log4(("EptMisscfgExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
     
    1475012979    }
    1475112980    return rcStrict;
     12981#else
     12982    AssertFailed();
     12983    return VERR_VMX_IPE_3; /* Should never happen with Apple HV in R3. */
     12984#endif
    1475212985}
    1475312986
     
    1476012993{
    1476112994    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
     12995#ifdef IN_RING0
    1476212996    Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
    1476312997
     
    1477913013         * we shall resolve the nested #PF and re-inject the original event.
    1478013014         */
    14781         if (pVCpu->hm.s.Event.fPending)
    14782             STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectReflectNPF);
     13015        if (VCPU_2_VMXSTATE(pVCpu).Event.fPending)
     13016            STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatInjectReflectNPF);
    1478313017    }
    1478413018    else
     
    1480513039        uErrorCode |= X86_TRAP_PF_P;
    1480613040
    14807     PVMCC    pVM  = pVCpu->CTX_SUFF(pVM);
    1480813041    PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    1480913042    Log4Func(("at %#RX64 (%#RX64 errcode=%#x) cs:rip=%#04x:%#RX64\n", GCPhys, uExitQual, uErrorCode, pCtx->cs.Sel, pCtx->rip));
     13043
     13044    PVMCC    pVM  = pVCpu->CTX_SUFF(pVM);
    1481013045
    1481113046    /*
     
    1482213057    {
    1482313058        /* Successfully synced our nested page tables. */
    14824         STAM_COUNTER_INC(&pVCpu->hm.s.StatExitReasonNpf);
    14825         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS);
     13059        STAM_COUNTER_INC(&VCPU_2_VMXSTATE(pVCpu).StatExitReasonNpf);
     13060        ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS);
    1482613061        return VINF_SUCCESS;
    1482713062    }
     13063#else
     13064    PVM pVM = pVCpu->CTX_SUFF(pVM);
     13065    uint64_t const uHostTsc = ASMReadTSC(); RT_NOREF(uHostTsc);
     13066    vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);
     13067    vmxHCReadGuestPhysicalAddrVmcs(pVCpu, pVmxTransient);
     13068    vmxHCImportGuestRip(pVCpu);
     13069    vmxHCImportGuestSegReg(pVCpu, X86_SREG_CS);
     13070
     13071    /*
     13072     * Ask PGM for information about the given GCPhys.  We need to check if we're
     13073     * out of sync first.
     13074     */
     13075    NEMHCDARWINHMACPCCSTATE State = { RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE), false, false };
     13076    PGMPHYSNEMPAGEINFO      Info;
     13077    int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pVmxTransient->uGuestPhysicalAddr, State.fWriteAccess, &Info,
     13078                                       nemR3DarwinHandleMemoryAccessPageCheckerCallback, &State);
     13079    if (RT_SUCCESS(rc))
     13080    {
     13081        if (Info.fNemProt & (  RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
     13082                             ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
     13083        {
     13084            if (State.fCanResume)
     13085            {
     13086                Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting\n",
     13087                      pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
     13088                      pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
     13089                      Info.fHasHandlers ? " handlers" : "", Info.fZeroPage    ? " zero-pg" : "",
     13090                      State.fDidSomething ? "" : " no-change"));
     13091                EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_NEM, NEMEXITTYPE_MEMORY_ACCESS),
     13092                                 pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
     13093                return VINF_SUCCESS;
     13094            }
     13095        }
     13096
     13097        Log4(("MemExit/%u: %04x:%08RX64: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating\n",
     13098              pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
     13099              pVmxTransient->uGuestPhysicalAddr, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
     13100              Info.fHasHandlers ? " handlers" : "", Info.fZeroPage    ? " zero-pg" : "",
     13101              State.fDidSomething ? "" : " no-change"));
     13102    }
     13103    else
     13104        Log4(("MemExit/%u: %04x:%08RX64: %RGp rc=%Rrc%s; emulating\n",
     13105              pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
     13106              pVmxTransient->uGuestPhysicalAddr, rc, State.fDidSomething ? " modified-backing" : ""));
     13107
     13108    /*
     13109     * Emulate the memory access, either access handler or special memory.
     13110     */
     13111    PCEMEXITREC pExitRec = EMHistoryAddExit(pVCpu,
     13112                                              RT_BOOL(pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
     13113                                            ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
     13114                                            : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
     13115                                            pVCpu->cpum.GstCtx.cs.u64Base + pVCpu->cpum.GstCtx.rip, uHostTsc);
     13116#if 0
     13117    rc = nemR3DarwinCopyStateFromHv(pVM, pVCpu,   CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS
     13118                                                | NEM_DARWIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES);
     13119    AssertRCReturn(rc, rc);
     13120#endif
     13121    VBOXSTRICTRC rcStrict;
     13122    if (!pExitRec)
     13123    {
     13124        rcStrict = IEMExecOne(pVCpu);
     13125        /** @todo do we need to do anything wrt debugging here?   */
     13126    }
     13127    else
     13128    {
     13129        /* Frequent access or probing. */
     13130        rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
     13131        Log4(("MemExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
     13132              pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
     13133              VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
     13134    }
     13135#endif
    1482813136
    1482913137    Log4Func(("EPT return to ring-3 rcStrict2=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
     
    1486013168    VBOXSTRICTRC rcStrict = IEMExecDecodedVmclear(pVCpu, &ExitInfo);
    1486113169    if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    14862         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
     13170        ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
    1486313171    else if (rcStrict == VINF_IEM_RAISED_XCPT)
    1486413172    {
    14865         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
     13173        ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
    1486613174        rcStrict = VINF_SUCCESS;
    1486713175    }
     
    1488513193    HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
    1488613194
    14887     STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitVmentry, z);
     13195    STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATE(pVCpu).StatExitVmentry, z);
    1488813196    VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMLAUNCH);
    14889     STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitVmentry, z);
     13197    STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATE(pVCpu).StatExitVmentry, z);
    1489013198    if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    1489113199    {
    14892         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
     13200        ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
    1489313201        if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
    1489413202            rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
     
    1492613234    VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrld(pVCpu, &ExitInfo);
    1492713235    if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    14928         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
     13236        ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
    1492913237    else if (rcStrict == VINF_IEM_RAISED_XCPT)
    1493013238    {
    14931         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
     13239        ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
    1493213240        rcStrict = VINF_SUCCESS;
    1493313241    }
     
    1496313271    VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrst(pVCpu, &ExitInfo);
    1496413272    if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    14965         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
     13273        ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
    1496613274    else if (rcStrict == VINF_IEM_RAISED_XCPT)
    1496713275    {
    14968         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
     13276        ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
    1496913277        rcStrict = VINF_SUCCESS;
    1497013278    }
     
    1500613314    VBOXSTRICTRC rcStrict = IEMExecDecodedVmread(pVCpu, &ExitInfo);
    1500713315    if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    15008         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
     13316        ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
    1500913317    else if (rcStrict == VINF_IEM_RAISED_XCPT)
    1501013318    {
    15011         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
     13319        ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
    1501213320        rcStrict = VINF_SUCCESS;
    1501313321    }
     
    1503113339    HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
    1503213340
    15033     STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitVmentry, z);
     13341    STAM_PROFILE_ADV_START(&VCPU_2_VMXSTATE(pVCpu).StatExitVmentry, z);
    1503413342    VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMRESUME);
    15035     STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitVmentry, z);
     13343    STAM_PROFILE_ADV_STOP(&VCPU_2_VMXSTATE(pVCpu).StatExitVmentry, z);
    1503613344    if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    1503713345    {
    15038         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
     13346        ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
    1503913347        if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
    1504013348            rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
     
    1507813386    VBOXSTRICTRC rcStrict = IEMExecDecodedVmwrite(pVCpu, &ExitInfo);
    1507913387    if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    15080         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
     13388        ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
    1508113389    else if (rcStrict == VINF_IEM_RAISED_XCPT)
    1508213390    {
    15083         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
     13391        ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
    1508413392        rcStrict = VINF_SUCCESS;
    1508513393    }
     
    1510513413    VBOXSTRICTRC rcStrict = IEMExecDecodedVmxoff(pVCpu, pVmxTransient->cbExitInstr);
    1510613414    if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    15107         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_HWVIRT);
     13415        ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_HWVIRT);
    1510813416    else if (rcStrict == VINF_IEM_RAISED_XCPT)
    1510913417    {
    15110         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
     13418        ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
    1511113419        rcStrict = VINF_SUCCESS;
    1511213420    }
     
    1514213450    VBOXSTRICTRC rcStrict = IEMExecDecodedVmxon(pVCpu, &ExitInfo);
    1514313451    if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    15144         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
     13452        ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
    1514513453    else if (rcStrict == VINF_IEM_RAISED_XCPT)
    1514613454    {
    15147         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
     13455        ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
    1514813456        rcStrict = VINF_SUCCESS;
    1514913457    }
     
    1517813486    VBOXSTRICTRC rcStrict = IEMExecDecodedInvvpid(pVCpu, &ExitInfo);
    1517913487    if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    15180         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
     13488        ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
    1518113489    else if (rcStrict == VINF_IEM_RAISED_XCPT)
    1518213490    {
    15183         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
     13491        ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
    1518413492        rcStrict = VINF_SUCCESS;
    1518513493    }
     
    1529513603        default:
    1529613604        {
    15297             pVCpu->hm.s.u32HMError = pVmxTransient->uExitIntInfo;
     13605            VCPU_2_VMXSTATE(pVCpu).u32HMError = pVmxTransient->uExitIntInfo;
    1529813606            return VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
    1529913607        }
     
    1563413942    if (rcStrict == VINF_IEM_RAISED_XCPT)
    1563513943    {
    15636         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
     13944        ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
    1563713945        rcStrict = VINF_SUCCESS;
    1563813946    }
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette