VirtualBox

Changeset 76482 in vbox for trunk/src/VBox/VMM


Ignore:
Timestamp:
Dec 26, 2018 3:49:56 AM (6 years ago)
Author:
vboxsync
Message:

VMM/HM: Made vmx/svm VCPU state as a union, saves some space now that SVM bits have grown with nested-SVM and other cleanup.

Location:
trunk/src/VBox/VMM
Files:
8 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMR0.cpp

    r76469 r76482  
    8383{
    8484    /** Per CPU globals. */
    85     HMGLOBALCPUINFO                 aCpuInfo[RTCPUSET_MAX_CPUS];
     85    HMPHYSCPU                       aCpuInfo[RTCPUSET_MAX_CPUS];
    8686
    8787    /** @name Ring-0 method table for AMD-V and VT-x specific operations.
    8888     * @{ */
    89     DECLR0CALLBACKMEMBER(int,  pfnEnterSession, (PVMCPU pVCpu, PHMGLOBALCPUINFO pHostCpu));
     89    DECLR0CALLBACKMEMBER(int,  pfnEnterSession, (PVMCPU pVCpu));
    9090    DECLR0CALLBACKMEMBER(void, pfnThreadCtxCallback, (RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit));
    9191    DECLR0CALLBACKMEMBER(int,  pfnExportHostState, (PVMCPU pVCpu));
    9292    DECLR0CALLBACKMEMBER(VBOXSTRICTRC, pfnRunGuestCode, (PVMCPU pVCpu));
    93     DECLR0CALLBACKMEMBER(int,  pfnEnableCpu, (PHMGLOBALCPUINFO pHostCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage,
     93    DECLR0CALLBACKMEMBER(int,  pfnEnableCpu, (PHMPHYSCPU pHostCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage,
    9494                                              bool fEnabledByHost, PCSUPHWVIRTMSRS pHwvirtMsrs));
    95     DECLR0CALLBACKMEMBER(int,  pfnDisableCpu, (PHMGLOBALCPUINFO pHostCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage));
     95    DECLR0CALLBACKMEMBER(int,  pfnDisableCpu, (void *pvCpuPage, RTHCPHYS HCPhysCpuPage));
    9696    DECLR0CALLBACKMEMBER(int,  pfnInitVM, (PVM pVM));
    9797    DECLR0CALLBACKMEMBER(int,  pfnTermVM, (PVM pVM));
     
    227227 * @{ */
    228228
    229 static DECLCALLBACK(int) hmR0DummyEnter(PVMCPU pVCpu, PHMGLOBALCPUINFO pHostCpu)
    230 {
    231     RT_NOREF2(pVCpu, pHostCpu);
     229static DECLCALLBACK(int) hmR0DummyEnter(PVMCPU pVCpu)
     230{
     231    RT_NOREF1(pVCpu);
    232232    return VINF_SUCCESS;
    233233}
     
    238238}
    239239
    240 static DECLCALLBACK(int) hmR0DummyEnableCpu(PHMGLOBALCPUINFO pHostCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage,
     240static DECLCALLBACK(int) hmR0DummyEnableCpu(PHMPHYSCPU pHostCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage,
    241241                                            bool fEnabledBySystem, PCSUPHWVIRTMSRS pHwvirtMsrs)
    242242{
     
    245245}
    246246
    247 static DECLCALLBACK(int) hmR0DummyDisableCpu(PHMGLOBALCPUINFO pHostCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
    248 {
    249     RT_NOREF3(pHostCpu, pvCpuPage, HCPhysCpuPage);
     247static DECLCALLBACK(int) hmR0DummyDisableCpu(void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
     248{
     249    RT_NOREF2(pvCpuPage, HCPhysCpuPage);
    250250    return VINF_SUCCESS;
    251251}
     
    789789static int hmR0EnableCpu(PVM pVM, RTCPUID idCpu)
    790790{
    791     PHMGLOBALCPUINFO pHostCpu = &g_HmR0.aCpuInfo[idCpu];
     791    PHMPHYSCPU pHostCpu = &g_HmR0.aCpuInfo[idCpu];
    792792
    793793    Assert(idCpu == (RTCPUID)RTMpCpuIdToSetIndex(idCpu)); /** @todo fix idCpu == index assumption (rainy day) */
     
    975975static int hmR0DisableCpu(RTCPUID idCpu)
    976976{
    977     PHMGLOBALCPUINFO pHostCpu = &g_HmR0.aCpuInfo[idCpu];
     977    PHMPHYSCPU pHostCpu = &g_HmR0.aCpuInfo[idCpu];
    978978
    979979    Assert(!g_HmR0.hwvirt.u.vmx.fSupported || !g_HmR0.hwvirt.u.vmx.fUsingSUPR0EnableVTx);
     
    992992    if (pHostCpu->fConfigured)
    993993    {
    994         rc = g_HmR0.pfnDisableCpu(pHostCpu, pHostCpu->pvMemObj, pHostCpu->HCPhysMemObj);
     994        rc = g_HmR0.pfnDisableCpu(pHostCpu->pvMemObj, pHostCpu->HCPhysMemObj);
    995995        AssertRCReturn(rc, rc);
    996996
     
    13231323    int              rc       = VINF_SUCCESS;
    13241324    RTCPUID const    idCpu    = RTMpCpuId();
    1325     PHMGLOBALCPUINFO pHostCpu = &g_HmR0.aCpuInfo[idCpu];
     1325    PHMPHYSCPU      pHostCpu = &g_HmR0.aCpuInfo[idCpu];
    13261326    AssertPtr(pHostCpu);
    13271327
     
    13581358    /* Load the bare minimum state required for entering HM. */
    13591359    int rc = hmR0EnterCpu(pVCpu);
    1360     AssertRCReturn(rc, rc);
     1360    if (RT_SUCCESS(rc))
     1361    {
     1362        if (g_HmR0.hwvirt.u.vmx.fSupported)
     1363        {
     1364            Assert((pVCpu->hm.s.fCtxChanged & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE))
     1365                                           == (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE));
     1366        }
     1367        else
     1368        {
     1369            Assert((pVCpu->hm.s.fCtxChanged & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE))
     1370                                           == (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE));
     1371        }
    13611372
    13621373#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
    1363     AssertReturn(!VMMR0ThreadCtxHookIsEnabled(pVCpu), VERR_HM_IPE_5);
    1364     bool fStartedSet = PGMR0DynMapStartOrMigrateAutoSet(pVCpu);
     1374        AssertReturn(!VMMR0ThreadCtxHookIsEnabled(pVCpu), VERR_HM_IPE_5);
     1375        bool const fStartedSet = PGMR0DynMapStartOrMigrateAutoSet(pVCpu);
    13651376#endif
    13661377
    1367     RTCPUID const    idCpu    = RTMpCpuId();
    1368     PHMGLOBALCPUINFO pHostCpu = &g_HmR0.aCpuInfo[idCpu];
    1369     Assert(pHostCpu);
    1370     if (g_HmR0.hwvirt.u.vmx.fSupported)
    1371     {
    1372         Assert((pVCpu->hm.s.fCtxChanged & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE))
    1373                                        == (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE));
    1374     }
    1375     else
    1376     {
    1377         Assert((pVCpu->hm.s.fCtxChanged & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE))
    1378                                        == (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE));
    1379     }
    1380 
    1381     rc = g_HmR0.pfnEnterSession(pVCpu, pHostCpu);
    1382     AssertMsgRCReturn(rc, ("rc=%Rrc pVCpu=%p HostCpuId=%u\n", rc, pVCpu, idCpu), rc);
    1383 
    1384     /* Exports the host-state as we may be resuming code after a longjmp and quite
    1385        possibly now be scheduled on a different CPU. */
    1386     rc = g_HmR0.pfnExportHostState(pVCpu);
    1387     AssertMsgRCReturn(rc, ("rc=%Rrc pVCpu=%p HostCpuId=%u\n", rc, pVCpu, idCpu), rc);
     1378        /* Keep track of the CPU owning the VMCS for debugging scheduling weirdness and ring-3 calls. */
     1379        rc = g_HmR0.pfnEnterSession(pVCpu);
     1380        AssertMsgRCReturnStmt(rc, ("rc=%Rrc pVCpu=%p\n", rc, pVCpu),  pVCpu->hm.s.idEnteredCpu = NIL_RTCPUID, rc);
     1381
     1382        /* Exports the host-state as we may be resuming code after a longjmp and quite
     1383           possibly now be scheduled on a different CPU. */
     1384        rc = g_HmR0.pfnExportHostState(pVCpu);
     1385        AssertMsgRCReturnStmt(rc, ("rc=%Rrc pVCpu=%p\n", rc, pVCpu),  pVCpu->hm.s.idEnteredCpu = NIL_RTCPUID, rc);
    13881386
    13891387#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
    1390     if (fStartedSet)
    1391         PGMRZDynMapReleaseAutoSet(pVCpu);
     1388        if (fStartedSet)
     1389            PGMRZDynMapReleaseAutoSet(pVCpu);
    13921390#endif
    1393 
    1394     /* Keep track of the CPU owning the VMCS for debugging scheduling weirdness and ring-3 calls. */
    1395     if (RT_FAILURE(rc))
    1396         pVCpu->hm.s.idEnteredCpu = NIL_RTCPUID;
     1391    }
    13971392    return rc;
    13981393}
     
    14131408    VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_HM_WRONG_CPU);
    14141409
    1415     RTCPUID const    idCpu    = RTMpCpuId();
    1416     PHMGLOBALCPUINFO pHostCpu = &g_HmR0.aCpuInfo[idCpu];
     1410    RTCPUID const idCpu    = RTMpCpuId();
     1411    PCHMPHYSCPU  pHostCpu = &g_HmR0.aCpuInfo[idCpu];
    14171412
    14181413    if (   !g_HmR0.fGlobalInit
     
    14701465    if (!RTThreadPreemptIsEnabled(NIL_RTTHREAD))
    14711466    {
    1472         PHMGLOBALCPUINFO pHostCpu = &g_HmR0.aCpuInfo[RTMpCpuId()];
     1467        PCHMPHYSCPU pHostCpu = &g_HmR0.aCpuInfo[RTMpCpuId()];
    14731468        Assert(!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL));
    14741469        Assert(pHostCpu->fConfigured);
     
    16091604 * @returns The cpu structure pointer.
    16101605 */
    1611 VMMR0_INT_DECL(PHMGLOBALCPUINFO) hmR0GetCurrentCpu(void)
     1606VMMR0_INT_DECL(PHMPHYSCPU) hmR0GetCurrentCpu(void)
    16121607{
    16131608    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     
    16921687
    16931688    /* Ok, disable VT-x. */
    1694     PHMGLOBALCPUINFO pHostCpu = hmR0GetCurrentCpu();
     1689    PCHMPHYSCPU pHostCpu = hmR0GetCurrentCpu();
    16951690    AssertReturn(   pHostCpu
    16961691                 && pHostCpu->hMemObj != NIL_RTR0MEMOBJ
     
    17001695
    17011696    *pfVTxDisabled = true;
    1702     return VMXR0DisableCpu(pHostCpu, pHostCpu->pvMemObj, pHostCpu->HCPhysMemObj);
     1697    return VMXR0DisableCpu(pHostCpu->pvMemObj, pHostCpu->HCPhysMemObj);
    17031698}
    17041699
     
    17261721        Assert(g_HmR0.fGlobalInit);
    17271722
    1728         PHMGLOBALCPUINFO pHostCpu = hmR0GetCurrentCpu();
     1723        PHMPHYSCPU pHostCpu = hmR0GetCurrentCpu();
    17291724        AssertReturnVoid(   pHostCpu
    17301725                         && pHostCpu->hMemObj != NIL_RTR0MEMOBJ
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r76464 r76482  
    519519 *
    520520 * @returns VBox status code.
    521  * @param   pHostCpu        Pointer to the CPU info struct.
     521 * @param   pHostCpu        The HM physical-CPU structure.
    522522 * @param   pVM             The cross context VM structure. Can be
    523523 *                          NULL after a resume!
     
    528528 *                          unused).
    529529 */
    530 VMMR0DECL(int) SVMR0EnableCpu(PHMGLOBALCPUINFO pHostCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost,
     530VMMR0DECL(int) SVMR0EnableCpu(PHMPHYSCPU pHostCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost,
    531531                              PCSUPHWVIRTMSRS pHwvirtMsrs)
    532532{
     
    590590 *
    591591 * @returns VBox status code.
    592  * @param   pHostCpu        Pointer to the CPU info struct.
    593592 * @param   pvCpuPage       Pointer to the global CPU page.
    594593 * @param   HCPhysCpuPage   Physical address of the global CPU page.
    595594 */
    596 VMMR0DECL(int) SVMR0DisableCpu(PHMGLOBALCPUINFO pHostCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
     595VMMR0DECL(int) SVMR0DisableCpu(void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
    597596{
    598597    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     
    600599                 && HCPhysCpuPage != NIL_RTHCPHYS, VERR_INVALID_PARAMETER);
    601600    AssertReturn(pvCpuPage, VERR_INVALID_PARAMETER);
    602     RT_NOREF(pHostCpu);
    603601
    604602    /* Paranoid: Disable interrupts as, in theory, interrupt handlers might mess with EFER. */
     
    11921190 * Flushes the appropriate tagged-TLB entries.
    11931191 *
     1192 * @param   pHostCpu    The HM physical-CPU structure.
    11941193 * @param   pVCpu       The cross context virtual CPU structure.
    11951194 * @param   pVmcb       Pointer to the VM control block.
    1196  * @param   pHostCpu    Pointer to the HM host-CPU info.
    1197  */
    1198 static void hmR0SvmFlushTaggedTlb(PVMCPU pVCpu, PSVMVMCB pVmcb, PHMGLOBALCPUINFO pHostCpu)
     1195 */
     1196static void hmR0SvmFlushTaggedTlb(PHMPHYSCPU pHostCpu, PVMCPU pVCpu, PSVMVMCB pVmcb)
    11991197{
    12001198    /*
     
    23212319 * @returns VBox status code.
    23222320 * @param   pVCpu       The cross context virtual CPU structure.
    2323  * @param   pHostCpu    Pointer to the CPU info struct.
    2324  */
    2325 VMMR0DECL(int) SVMR0Enter(PVMCPU pVCpu, PHMGLOBALCPUINFO pHostCpu)
     2321 */
     2322VMMR0DECL(int) SVMR0Enter(PVMCPU pVCpu)
    23262323{
    23272324    AssertPtr(pVCpu);
    23282325    Assert(pVCpu->CTX_SUFF(pVM)->hm.s.svm.fSupported);
    23292326    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    2330     RT_NOREF(pHostCpu);
    23312327
    23322328    LogFlowFunc(("pVCpu=%p\n", pVCpu));
     
    25222518 * whether the nested-guest is intercepting it or not.
    25232519 *
    2524  * @param   pHostCpu        Pointer to the physical CPU HM info. struct.
    2525  * @param   pVCpu           The cross context virtual CPU structure.
     2520 * @param   pHostCpu    The HM physical-CPU structure.
     2521 * @param   pVCpu       The cross context virtual CPU structure.
    25262522 *
    25272523 * @remarks No-long-jmp zone!!!
    25282524 */
    2529 DECLINLINE(void) hmR0SvmMergeMsrpmNested(PHMGLOBALCPUINFO pHostCpu, PVMCPU pVCpu)
     2525DECLINLINE(void) hmR0SvmMergeMsrpmNested(PHMPHYSCPU pHostCpu, PVMCPU pVCpu)
    25302526{
    25312527    uint64_t const *pu64GstMsrpm    = (uint64_t const *)pVCpu->hm.s.svm.pvMsrBitmap;
     
    45584554    AssertMsg(!pVCpu->hm.s.fCtxChanged, ("fCtxChanged=%#RX64\n", pVCpu->hm.s.fCtxChanged));
    45594555
    4560     PHMGLOBALCPUINFO pHostCpu         = hmR0GetCurrentCpu();
    4561     RTCPUID const    idHostCpu        = pHostCpu->idCpu;
    4562     bool const       fMigratedHostCpu = idHostCpu != pVCpu->hm.s.idLastCpu;
     4556    PHMPHYSCPU    pHostCpu         = hmR0GetCurrentCpu();
     4557    RTCPUID const idHostCpu        = pHostCpu->idCpu;
     4558    bool const    fMigratedHostCpu = idHostCpu != pVCpu->hm.s.idLastCpu;
    45634559
    45644560    /* Setup TSC offsetting. */
     
    46064602    ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true);    /* Used for TLB flushing, set this across the world switch. */
    46074603    /* Flush the appropriate tagged-TLB entries. */
    4608     hmR0SvmFlushTaggedTlb(pVCpu, pVmcb, pHostCpu);
     4604    hmR0SvmFlushTaggedTlb(pHostCpu,  pVCpu, pVmcb);
    46094605    Assert(pVCpu->hm.s.idLastCpu == idHostCpu);
    46104606
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.h

    r76464 r76482  
    3636VMMR0DECL(int)          SVMR0GlobalInit(void);
    3737VMMR0DECL(void)         SVMR0GlobalTerm(void);
    38 VMMR0DECL(int)          SVMR0Enter(PVMCPU pVCpu, PHMGLOBALCPUINFO pHostCpu);
     38VMMR0DECL(int)          SVMR0Enter(PVMCPU pVCpu);
    3939VMMR0DECL(void)         SVMR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit);
    40 VMMR0DECL(int)          SVMR0EnableCpu(PHMGLOBALCPUINFO pHostCpu, PVM pVM, void *pvPageCpu, RTHCPHYS HCPhysCpuPage,
     40VMMR0DECL(int)          SVMR0EnableCpu(PHMPHYSCPU pHostCpu, PVM pVM, void *pvPageCpu, RTHCPHYS HCPhysCpuPage,
    4141                                       bool fEnabledBySystem, PCSUPHWVIRTMSRS pHwvirtMsrs);
    42 VMMR0DECL(int)          SVMR0DisableCpu(PHMGLOBALCPUINFO pHostCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys);
     42VMMR0DECL(int)          SVMR0DisableCpu(void *pvPageCpu, RTHCPHYS pPageCpuPhys);
    4343VMMR0DECL(int)          SVMR0InitVM(PVM pVM);
    4444VMMR0DECL(int)          SVMR0TermVM(PVM pVM);
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r76477 r76482  
    11191119 *
    11201120 * @returns VBox status code.
    1121  * @param   pHostCpu        Pointer to the global CPU info struct.
     1121 * @param   pHostCpu        The HM physical-CPU structure.
    11221122 * @param   pVM             The cross context VM structure.  Can be
    11231123 *                          NULL after a host resume operation.
     
    11301130 * @param   pHwvirtMsrs     Pointer to the hardware-virtualization MSRs.
    11311131 */
    1132 VMMR0DECL(int) VMXR0EnableCpu(PHMGLOBALCPUINFO pHostCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost,
     1132VMMR0DECL(int) VMXR0EnableCpu(PHMPHYSCPU pHostCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost,
    11331133                              PCSUPHWVIRTMSRS pHwvirtMsrs)
    11341134{
     
    11691169 *
    11701170 * @returns VBox status code.
    1171  * @param   pHostCpu        Pointer to the global CPU info struct.
    11721171 * @param   pvCpuPage       Pointer to the VMXON region.
    11731172 * @param   HCPhysCpuPage   Physical address of the VMXON region.
     
    11761175 *          similar was used to enable VT-x on the host.
    11771176 */
    1178 VMMR0DECL(int) VMXR0DisableCpu(PHMGLOBALCPUINFO pHostCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
    1179 {
    1180     RT_NOREF3(pHostCpu, pvCpuPage, HCPhysCpuPage);
     1177VMMR0DECL(int) VMXR0DisableCpu(void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
     1178{
     1179    RT_NOREF2(pvCpuPage, HCPhysCpuPage);
    11811180
    11821181    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     
    14111410static bool hmR0VmxIsAutoLoadStoreGuestMsr(PVMCPU pVCpu, uint32_t uMsr)
    14121411{
    1413     PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
    1414     uint32_t   cMsrs     = pVCpu->hm.s.vmx.cMsrs;
     1412    PVMXAUTOMSR    pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
     1413    uint32_t const cMsrs     = pVCpu->hm.s.vmx.cMsrs;
    14151414
    14161415    for (uint32_t i = 0; i < cMsrs; i++, pGuestMsr++)
     
    14351434    PVMXAUTOMSR pHostMsr  = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
    14361435    PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
    1437     uint32_t    cMsrs    = pVCpu->hm.s.vmx.cMsrs;
     1436    uint32_t const cMsrs  = pVCpu->hm.s.vmx.cMsrs;
    14381437
    14391438    for (uint32_t i = 0; i < cMsrs; i++, pHostMsr++, pGuestMsr++)
     
    18941893 * case where neither EPT nor VPID is supported by the CPU.
    18951894 *
    1896  * @param   pVCpu           The cross context virtual CPU structure.
    1897  * @param   pCpu            Pointer to the global HM struct.
     1895 * @param   pHostCpu    The HM physical-CPU structure.
     1896 * @param   pVCpu       The cross context virtual CPU structure.
    18981897 *
    18991898 * @remarks Called with interrupts disabled.
    19001899 */
    1901 static void hmR0VmxFlushTaggedTlbNone(PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
     1900static void hmR0VmxFlushTaggedTlbNone(PHMPHYSCPU pHostCpu, PVMCPU pVCpu)
    19021901{
    19031902    AssertPtr(pVCpu);
    1904     AssertPtr(pCpu);
     1903    AssertPtr(pHostCpu);
    19051904
    19061905    VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH);
    19071906
    1908     Assert(pCpu->idCpu != NIL_RTCPUID);
    1909     pVCpu->hm.s.idLastCpu           = pCpu->idCpu;
    1910     pVCpu->hm.s.cTlbFlushes         = pCpu->cTlbFlushes;
     1907    Assert(pHostCpu->idCpu != NIL_RTCPUID);
     1908    pVCpu->hm.s.idLastCpu           = pHostCpu->idCpu;
     1909    pVCpu->hm.s.cTlbFlushes         = pHostCpu->cTlbFlushes;
    19111910    pVCpu->hm.s.fForceTLBFlush      = false;
    19121911    return;
     
    19171916 * Flushes the tagged-TLB entries for EPT+VPID CPUs as necessary.
    19181917 *
    1919  * @param    pVCpu          The cross context virtual CPU structure.
    1920  * @param    pCpu           Pointer to the global HM CPU struct.
     1918 * @param   pHostCpu    The HM physical-CPU structure.
     1919 * @param   pVCpu       The cross context virtual CPU structure.
    19211920 *
    19221921 * @remarks  All references to "ASID" in this function pertains to "VPID" in Intel's
     
    19261925 * @remarks  Called with interrupts disabled.
    19271926 */
    1928 static void hmR0VmxFlushTaggedTlbBoth(PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
     1927static void hmR0VmxFlushTaggedTlbBoth(PHMPHYSCPU pHostCpu, PVMCPU pVCpu)
    19291928{
    19301929#ifdef VBOX_WITH_STATISTICS
     
    19401939#endif
    19411940
    1942     AssertPtr(pCpu);
    19431941    AssertPtr(pVCpu);
    1944     Assert(pCpu->idCpu != NIL_RTCPUID);
     1942    AssertPtr(pHostCpu);
     1943    Assert(pHostCpu->idCpu != NIL_RTCPUID);
    19451944
    19461945    PVM pVM = pVCpu->CTX_SUFF(pVM);
     
    19551954     * cannot reuse the current ASID anymore.
    19561955     */
    1957     if (   pVCpu->hm.s.idLastCpu   != pCpu->idCpu
    1958         || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
    1959     {
    1960         ++pCpu->uCurrentAsid;
    1961         if (pCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)
    1962         {
    1963             pCpu->uCurrentAsid = 1;              /* Wraparound to 1; host uses 0. */
    1964             pCpu->cTlbFlushes++;                 /* All VCPUs that run on this host CPU must use a new VPID. */
    1965             pCpu->fFlushAsidBeforeUse = true;    /* All VCPUs that run on this host CPU must flush their new VPID before use. */
    1966         }
    1967 
    1968         pVCpu->hm.s.uCurrentAsid = pCpu->uCurrentAsid;
    1969         pVCpu->hm.s.idLastCpu    = pCpu->idCpu;
    1970         pVCpu->hm.s.cTlbFlushes  = pCpu->cTlbFlushes;
     1956    if (   pVCpu->hm.s.idLastCpu   != pHostCpu->idCpu
     1957        || pVCpu->hm.s.cTlbFlushes != pHostCpu->cTlbFlushes)
     1958    {
     1959        ++pHostCpu->uCurrentAsid;
     1960        if (pHostCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)
     1961        {
     1962            pHostCpu->uCurrentAsid = 1;            /* Wraparound to 1; host uses 0. */
     1963            pHostCpu->cTlbFlushes++;               /* All VCPUs that run on this host CPU must use a new VPID. */
     1964            pHostCpu->fFlushAsidBeforeUse = true;  /* All VCPUs that run on this host CPU must flush their new VPID before use. */
     1965        }
     1966
     1967        pVCpu->hm.s.uCurrentAsid = pHostCpu->uCurrentAsid;
     1968        pVCpu->hm.s.idLastCpu    = pHostCpu->idCpu;
     1969        pVCpu->hm.s.cTlbFlushes  = pHostCpu->cTlbFlushes;
    19711970
    19721971        /*
     
    19981997    HMVMX_UPDATE_FLUSH_SKIPPED_STAT();
    19991998
    2000     Assert(pVCpu->hm.s.idLastCpu == pCpu->idCpu);
    2001     Assert(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes);
    2002     AssertMsg(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes,
    2003               ("Flush count mismatch for cpu %d (%u vs %u)\n", pCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes));
    2004     AssertMsg(pCpu->uCurrentAsid >= 1 && pCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,
    2005               ("Cpu[%u] uCurrentAsid=%u cTlbFlushes=%u pVCpu->idLastCpu=%u pVCpu->cTlbFlushes=%u\n", pCpu->idCpu,
    2006                pCpu->uCurrentAsid, pCpu->cTlbFlushes, pVCpu->hm.s.idLastCpu, pVCpu->hm.s.cTlbFlushes));
     1999    Assert(pVCpu->hm.s.idLastCpu == pHostCpu->idCpu);
     2000    Assert(pVCpu->hm.s.cTlbFlushes == pHostCpu->cTlbFlushes);
     2001    AssertMsg(pVCpu->hm.s.cTlbFlushes == pHostCpu->cTlbFlushes,
     2002              ("Flush count mismatch for cpu %d (%u vs %u)\n", pHostCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pHostCpu->cTlbFlushes));
     2003    AssertMsg(pHostCpu->uCurrentAsid >= 1 && pHostCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,
     2004              ("Cpu[%u] uCurrentAsid=%u cTlbFlushes=%u pVCpu->idLastCpu=%u pVCpu->cTlbFlushes=%u\n", pHostCpu->idCpu,
     2005               pHostCpu->uCurrentAsid, pHostCpu->cTlbFlushes, pVCpu->hm.s.idLastCpu, pVCpu->hm.s.cTlbFlushes));
    20072006    AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid,
    2008               ("Cpu[%u] pVCpu->uCurrentAsid=%u\n", pCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
     2007              ("Cpu[%u] pVCpu->uCurrentAsid=%u\n", pHostCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
    20092008
    20102009    /* Update VMCS with the VPID. */
     
    20192018 * Flushes the tagged-TLB entries for EPT CPUs as necessary.
    20202019 *
    2021  * @returns VBox status code.
     2020 * @param   pHostCpu    The HM physical-CPU structure.
    20222021 * @param   pVCpu       The cross context virtual CPU structure.
    2023  * @param   pCpu        Pointer to the global HM CPU struct.
    20242022 *
    20252023 * @remarks Called with interrupts disabled.
    20262024 */
    2027 static void hmR0VmxFlushTaggedTlbEpt(PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
     2025static void hmR0VmxFlushTaggedTlbEpt(PHMPHYSCPU pHostCpu, PVMCPU pVCpu)
    20282026{
    20292027    AssertPtr(pVCpu);
    2030     AssertPtr(pCpu);
    2031     Assert(pCpu->idCpu != NIL_RTCPUID);
     2028    AssertPtr(pHostCpu);
     2029    Assert(pHostCpu->idCpu != NIL_RTCPUID);
    20322030    AssertMsg(pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging, ("hmR0VmxFlushTaggedTlbEpt cannot be invoked without NestedPaging."));
    20332031    AssertMsg(!pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fVpid, ("hmR0VmxFlushTaggedTlbEpt cannot be invoked with VPID."));
     
    20372035     * A change in the TLB flush count implies the host CPU is online after a suspend/resume.
    20382036     */
    2039     if (   pVCpu->hm.s.idLastCpu   != pCpu->idCpu
    2040         || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
     2037    if (   pVCpu->hm.s.idLastCpu   != pHostCpu->idCpu
     2038        || pVCpu->hm.s.cTlbFlushes != pHostCpu->cTlbFlushes)
    20412039    {
    20422040        pVCpu->hm.s.fForceTLBFlush = true;
     
    20512049    }
    20522050
    2053     pVCpu->hm.s.idLastCpu   = pCpu->idCpu;
    2054     pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
     2051    pVCpu->hm.s.idLastCpu   = pHostCpu->idCpu;
     2052    pVCpu->hm.s.cTlbFlushes = pHostCpu->cTlbFlushes;
    20552053
    20562054    if (pVCpu->hm.s.fForceTLBFlush)
     
    20652063 * Flushes the tagged-TLB entries for VPID CPUs as necessary.
    20662064 *
    2067  * @returns VBox status code.
     2065 * @param   pHostCpu    The HM physical-CPU structure.
    20682066 * @param   pVCpu       The cross context virtual CPU structure.
    2069  * @param   pCpu        Pointer to the global HM CPU struct.
    20702067 *
    20712068 * @remarks Called with interrupts disabled.
    20722069 */
    2073 static void hmR0VmxFlushTaggedTlbVpid(PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
     2070static void hmR0VmxFlushTaggedTlbVpid(PHMPHYSCPU pHostCpu, PVMCPU pVCpu)
    20742071{
    20752072    AssertPtr(pVCpu);
    2076     AssertPtr(pCpu);
    2077     Assert(pCpu->idCpu != NIL_RTCPUID);
     2073    AssertPtr(pHostCpu);
     2074    Assert(pHostCpu->idCpu != NIL_RTCPUID);
    20782075    AssertMsg(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fVpid, ("hmR0VmxFlushTlbVpid cannot be invoked without VPID."));
    20792076    AssertMsg(!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging, ("hmR0VmxFlushTlbVpid cannot be invoked with NestedPaging"));
     
    20852082     * cannot reuse the current ASID anymore.
    20862083     */
    2087     if (   pVCpu->hm.s.idLastCpu   != pCpu->idCpu
    2088         || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
     2084    if (   pVCpu->hm.s.idLastCpu   != pHostCpu->idCpu
     2085        || pVCpu->hm.s.cTlbFlushes != pHostCpu->cTlbFlushes)
    20892086    {
    20902087        pVCpu->hm.s.fForceTLBFlush = true;
     
    20982095         * If we ever support VPID flush combinations other than ALL or SINGLE-context (see
    20992096         * hmR0VmxSetupTaggedTlb()) we would need to explicitly flush in this case (add an
    2100          * fExplicitFlush = true here and change the pCpu->fFlushAsidBeforeUse check below to
     2097         * fExplicitFlush = true here and change the pHostCpu->fFlushAsidBeforeUse check below to
    21012098         * include fExplicitFlush's too) - an obscure corner case.
    21022099         */
     
    21062103
    21072104    PVM pVM = pVCpu->CTX_SUFF(pVM);
    2108     pVCpu->hm.s.idLastCpu = pCpu->idCpu;
     2105    pVCpu->hm.s.idLastCpu = pHostCpu->idCpu;
    21092106    if (pVCpu->hm.s.fForceTLBFlush)
    21102107    {
    2111         ++pCpu->uCurrentAsid;
    2112         if (pCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)
    2113         {
    2114             pCpu->uCurrentAsid        = 1;       /* Wraparound to 1; host uses 0 */
    2115             pCpu->cTlbFlushes++;                 /* All VCPUs that run on this host CPU must use a new VPID. */
    2116             pCpu->fFlushAsidBeforeUse = true;    /* All VCPUs that run on this host CPU must flush their new VPID before use. */
     2108        ++pHostCpu->uCurrentAsid;
     2109        if (pHostCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)
     2110        {
     2111            pHostCpu->uCurrentAsid        = 1;     /* Wraparound to 1; host uses 0 */
     2112            pHostCpu->cTlbFlushes++;               /* All VCPUs that run on this host CPU must use a new VPID. */
     2113            pHostCpu->fFlushAsidBeforeUse = true;  /* All VCPUs that run on this host CPU must flush their new VPID before use. */
    21172114        }
    21182115
    21192116        pVCpu->hm.s.fForceTLBFlush = false;
    2120         pVCpu->hm.s.cTlbFlushes    = pCpu->cTlbFlushes;
    2121         pVCpu->hm.s.uCurrentAsid   = pCpu->uCurrentAsid;
    2122         if (pCpu->fFlushAsidBeforeUse)
     2117        pVCpu->hm.s.cTlbFlushes    = pHostCpu->cTlbFlushes;
     2118        pVCpu->hm.s.uCurrentAsid   = pHostCpu->uCurrentAsid;
     2119        if (pHostCpu->fFlushAsidBeforeUse)
    21232120        {
    21242121            if (pVM->hm.s.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_SINGLE_CONTEXT)
     
    21272124            {
    21282125                hmR0VmxFlushVpid(pVCpu, VMXTLBFLUSHVPID_ALL_CONTEXTS, 0 /* GCPtr */);
    2129                 pCpu->fFlushAsidBeforeUse = false;
     2126                pHostCpu->fFlushAsidBeforeUse = false;
    21302127            }
    21312128            else
     
    21372134    }
    21382135
    2139     AssertMsg(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes,
    2140               ("Flush count mismatch for cpu %d (%u vs %u)\n", pCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes));
    2141     AssertMsg(pCpu->uCurrentAsid >= 1 && pCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,
    2142               ("Cpu[%u] uCurrentAsid=%u cTlbFlushes=%u pVCpu->idLastCpu=%u pVCpu->cTlbFlushes=%u\n", pCpu->idCpu,
    2143                pCpu->uCurrentAsid, pCpu->cTlbFlushes, pVCpu->hm.s.idLastCpu, pVCpu->hm.s.cTlbFlushes));
     2136    AssertMsg(pVCpu->hm.s.cTlbFlushes == pHostCpu->cTlbFlushes,
     2137              ("Flush count mismatch for cpu %d (%u vs %u)\n", pHostCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pHostCpu->cTlbFlushes));
     2138    AssertMsg(pHostCpu->uCurrentAsid >= 1 && pHostCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,
     2139              ("Cpu[%u] uCurrentAsid=%u cTlbFlushes=%u pVCpu->idLastCpu=%u pVCpu->cTlbFlushes=%u\n", pHostCpu->idCpu,
     2140               pHostCpu->uCurrentAsid, pHostCpu->cTlbFlushes, pVCpu->hm.s.idLastCpu, pVCpu->hm.s.cTlbFlushes));
    21442141    AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid,
    2145               ("Cpu[%u] pVCpu->uCurrentAsid=%u\n", pCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
     2142              ("Cpu[%u] pVCpu->uCurrentAsid=%u\n", pHostCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
    21462143
    21472144    int rc  = VMXWriteVmcs32(VMX_VMCS16_VPID, pVCpu->hm.s.uCurrentAsid);
     
    21532150 * Flushes the guest TLB entry based on CPU capabilities.
    21542151 *
    2155  * @param   pVCpu     The cross context virtual CPU structure.
    2156  * @param   pCpu      Pointer to the global HM CPU struct.
    2157  */
    2158 DECLINLINE(void) hmR0VmxFlushTaggedTlb(PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
     2152 * @param   pHostCpu    The HM physical-CPU structure.
     2153 * @param   pVCpu       The cross context virtual CPU structure.
     2154 *
     2155 * @remarks Called with interrupts disabled.
     2156 */
     2157DECLINLINE(void) hmR0VmxFlushTaggedTlb(PHMPHYSCPU pHostCpu, PVMCPU pVCpu)
    21592158{
    21602159#ifdef HMVMX_ALWAYS_FLUSH_TLB
     
    21642163    switch (pVM->hm.s.vmx.enmTlbFlushType)
    21652164    {
    2166         case VMXTLBFLUSHTYPE_EPT_VPID: hmR0VmxFlushTaggedTlbBoth(pVCpu, pCpu); break;
    2167         case VMXTLBFLUSHTYPE_EPT:      hmR0VmxFlushTaggedTlbEpt(pVCpu, pCpu);  break;
    2168         case VMXTLBFLUSHTYPE_VPID:     hmR0VmxFlushTaggedTlbVpid(pVCpu, pCpu); break;
    2169         case VMXTLBFLUSHTYPE_NONE:     hmR0VmxFlushTaggedTlbNone(pVCpu, pCpu); break;
     2165        case VMXTLBFLUSHTYPE_EPT_VPID: hmR0VmxFlushTaggedTlbBoth(pHostCpu, pVCpu); break;
     2166        case VMXTLBFLUSHTYPE_EPT:      hmR0VmxFlushTaggedTlbEpt(pHostCpu, pVCpu);  break;
     2167        case VMXTLBFLUSHTYPE_VPID:     hmR0VmxFlushTaggedTlbVpid(pHostCpu, pVCpu); break;
     2168        case VMXTLBFLUSHTYPE_NONE:     hmR0VmxFlushTaggedTlbNone(pHostCpu, pVCpu); break;
    21702169        default:
    21712170            AssertMsgFailed(("Invalid flush-tag function identifier\n"));
     
    28212820                                    hmR0VmxUpdateErrorRecord(pVCpu, rc), rc);
    28222821
    2823         pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_CLEAR;
     2822        pVCpu->hm.s.vmx.fVmcsState = HMVMX_VMCS_STATE_CLEAR;
    28242823
    28252824        hmR0VmxUpdateErrorRecord(pVCpu, rc);
     
    49824981     * See MSDN "Configuring Programs for 64-bit/x64 Software Conventions / Register Usage".
    49834982     */
    4984     bool const fResumeVM = RT_BOOL(pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_LAUNCHED);
     4983    bool const fResumeVM = RT_BOOL(pVCpu->hm.s.vmx.fVmcsState & HMVMX_VMCS_STATE_LAUNCHED);
    49854984    /** @todo Add stats for resume vs launch. */
    49864985    PVM pVM = pVCpu->CTX_SUFF(pVM);
     
    52845283#endif
    52855284
    5286     PHMGLOBALCPUINFO pCpu = hmR0GetCurrentCpu();
    5287     RTHCPHYS HCPhysCpuPage = pCpu->HCPhysMemObj;
     5285    PCHMPHYSCPU pHostCpu = hmR0GetCurrentCpu();
     5286    RTHCPHYS HCPhysCpuPage = pHostCpu->HCPhysMemObj;
    52885287
    52895288    /* Clear VMCS. Marking it inactive, clearing implementation-specific data and writing VMCS data back to memory. */
    52905289    VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
    5291     pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_CLEAR;
     5290    pVCpu->hm.s.vmx.fVmcsState = HMVMX_VMCS_STATE_CLEAR;
    52925291
    52935292    /* Leave VMX Root Mode. */
     
    53235322    rc2 = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
    53245323    AssertRC(rc2);
    5325     pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_ACTIVE;
     5324    pVCpu->hm.s.vmx.fVmcsState = HMVMX_VMCS_STATE_ACTIVE;
    53265325    Assert(!(ASMGetFlags() & X86_EFL_IF));
    53275326    ASMSetFlags(fOldEFlags);
     
    53455344    NOREF(fResume);
    53465345
    5347     PHMGLOBALCPUINFO pCpu = hmR0GetCurrentCpu();
    5348     RTHCPHYS HCPhysCpuPage = pCpu->HCPhysMemObj;
     5346    PCHMPHYSCPU    pHostCpu      = hmR0GetCurrentCpu();
     5347    RTHCPHYS const HCPhysCpuPage = pHostCpu->HCPhysMemObj;
    53495348
    53505349#ifdef VBOX_WITH_CRASHDUMP_MAGIC
     
    72117210     *  context.
    72127211     */
    7213     if (pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_ACTIVE)
     7212    if (pVCpu->hm.s.vmx.fVmcsState & HMVMX_VMCS_STATE_ACTIVE)
    72147213    {
    72157214        int rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
    72167215        AssertRCReturn(rc, rc);
    72177216
    7218         pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_CLEAR;
     7217        pVCpu->hm.s.vmx.fVmcsState = HMVMX_VMCS_STATE_CLEAR;
    72197218        Log4Func(("Cleared Vmcs. HostCpuId=%u\n", idCpu));
    72207219    }
    7221     Assert(!(pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_LAUNCHED));
     7220    Assert(!(pVCpu->hm.s.vmx.fVmcsState & HMVMX_VMCS_STATE_LAUNCHED));
    72227221    NOREF(idCpu);
    72237222
     
    74137412        pVCpu->hm.s.vmx.fUpdatedHostMsrs = false;
    74147413        VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
    7415         if (pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_ACTIVE)
     7414        if (pVCpu->hm.s.vmx.fVmcsState & HMVMX_VMCS_STATE_ACTIVE)
    74167415        {
    74177416            VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
    7418             pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_CLEAR;
     7417            pVCpu->hm.s.vmx.fVmcsState = HMVMX_VMCS_STATE_CLEAR;
    74197418        }
    74207419
     
    80278026 * @returns VBox status code.
    80288027 * @param   pVCpu       The cross context virtual CPU structure.
    8029  * @param   pHostCpu    Pointer to the global CPU info struct.
    8030  */
    8031 VMMR0DECL(int) VMXR0Enter(PVMCPU pVCpu, PHMGLOBALCPUINFO pHostCpu)
     8028 */
     8029VMMR0DECL(int) VMXR0Enter(PVMCPU pVCpu)
    80328030{
    80338031    AssertPtr(pVCpu);
    80348032    Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fSupported);
    80358033    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    8036     RT_NOREF(pHostCpu);
    80378034
    80388035    LogFlowFunc(("pVCpu=%p\n", pVCpu));
     
    80538050     * Load the VCPU's VMCS as the current (and active) one.
    80548051     */
    8055     Assert(pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_CLEAR);
     8052    Assert(pVCpu->hm.s.vmx.fVmcsState & HMVMX_VMCS_STATE_CLEAR);
    80568053    int rc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
    8057     if (RT_FAILURE(rc))
    8058         return rc;
    8059 
    8060     pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_ACTIVE;
    8061     pVCpu->hm.s.fLeaveDone = false;
    8062     Log4Func(("Activated Vmcs. HostCpuId=%u\n", RTMpCpuId()));
    8063 
    8064     return VINF_SUCCESS;
     8054    if (RT_SUCCESS(rc))
     8055    {
     8056        pVCpu->hm.s.vmx.fVmcsState = HMVMX_VMCS_STATE_ACTIVE;
     8057        pVCpu->hm.s.fLeaveDone = false;
     8058        Log4Func(("Activated Vmcs. HostCpuId=%u\n", RTMpCpuId()));
     8059    }
     8060    return rc;
    80658061}
    80668062
     
    81318127
    81328128            /* Load the active VMCS as the current one. */
    8133             if (pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_CLEAR)
     8129            if (pVCpu->hm.s.vmx.fVmcsState & HMVMX_VMCS_STATE_CLEAR)
    81348130            {
    81358131                rc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
    81368132                AssertRC(rc); NOREF(rc);
    8137                 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_ACTIVE;
     8133                pVCpu->hm.s.vmx.fVmcsState = HMVMX_VMCS_STATE_ACTIVE;
    81388134                Log4Func(("Resumed: Activated Vmcs. HostCpuId=%u\n", RTMpCpuId()));
    81398135            }
     
    86938689        pVmxTransient->u8GuestTpr = pVCpu->hm.s.vmx.pbVirtApic[XAPIC_OFF_TPR];
    86948690
    8695     PHMGLOBALCPUINFO pCpu = hmR0GetCurrentCpu();
    8696     RTCPUID  idCurrentCpu = pCpu->idCpu;
     8691    PHMPHYSCPU pHostCpu    = hmR0GetCurrentCpu();
     8692    RTCPUID    idCurrentCpu = pHostCpu->idCpu;
    86978693    if (   pVmxTransient->fUpdateTscOffsettingAndPreemptTimer
    86988694        || idCurrentCpu != pVCpu->hm.s.idLastCpu)
     
    87038699
    87048700    ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true);    /* Used for TLB flushing, set this across the world switch. */
    8705     hmR0VmxFlushTaggedTlb(pVCpu, pCpu);                         /* Invalidate the appropriate guest entries from the TLB. */
     8701    hmR0VmxFlushTaggedTlb(pHostCpu, pVCpu);                     /* Invalidate the appropriate guest entries from the TLB. */
    87068702    Assert(idCurrentCpu == pVCpu->hm.s.idLastCpu);
    87078703    pVCpu->hm.s.vmx.LastError.idCurrentCpu = idCurrentCpu;      /* Update the error reporting info. with the current host CPU. */
     
    88008796#endif
    88018797#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
    8802     /* The 64-on-32 switcher maintains uVmcsState on its own and we need to leave it alone here. */
     8798    /* The 64-on-32 switcher maintains fVmcsState on its own and we need to leave it alone here. */
    88038799    if (pVCpu->hm.s.vmx.pfnStartVM != VMXR0SwitcherStartVM64)
    8804         pVCpu->hm.s.vmx.uVmcsState |= HMVMX_VMCS_STATE_LAUNCHED;      /* Use VMRESUME instead of VMLAUNCH in the next run. */
     8800        pVCpu->hm.s.vmx.fVmcsState |= HMVMX_VMCS_STATE_LAUNCHED;      /* Use VMRESUME instead of VMLAUNCH in the next run. */
    88058801#else
    8806     pVCpu->hm.s.vmx.uVmcsState |= HMVMX_VMCS_STATE_LAUNCHED;          /* Use VMRESUME instead of VMLAUNCH in the next run. */
     8802    pVCpu->hm.s.vmx.fVmcsState |= HMVMX_VMCS_STATE_LAUNCHED;          /* Use VMRESUME instead of VMLAUNCH in the next run. */
    88078803#endif
    88088804#ifdef VBOX_STRICT
     
    92749270        } else do { } while (0)
    92759271
    9276     SET_ONLY_XBM_IF_EITHER_EN(EXIT_TASK_SWITCH,         VMX_EXIT_TASK_SWITCH);      /* unconditional */
    9277     SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_VIOLATION,   VMX_EXIT_EPT_VIOLATION);    /* unconditional */
    9278     SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_MISCONFIG,   VMX_EXIT_EPT_MISCONFIG);    /* unconditional (unless #VE) */
    9279     SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_ACCESS,    VMX_EXIT_APIC_ACCESS);      /* feature dependent, nothing to enable here */
    9280     SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_WRITE,     VMX_EXIT_APIC_WRITE);       /* feature dependent, nothing to enable here */
    9281 
    9282     SET_ONLY_XBM_IF_EITHER_EN(INSTR_CPUID,              VMX_EXIT_CPUID);            /* unconditional */
     9272    SET_ONLY_XBM_IF_EITHER_EN(EXIT_TASK_SWITCH,         VMX_EXIT_TASK_SWITCH);   /* unconditional */
     9273    SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_VIOLATION,   VMX_EXIT_EPT_VIOLATION); /* unconditional */
     9274    SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_MISCONFIG,   VMX_EXIT_EPT_MISCONFIG); /* unconditional (unless #VE) */
     9275    SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_ACCESS,    VMX_EXIT_APIC_ACCESS);   /* feature dependent, nothing to enable here */
     9276    SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_WRITE,     VMX_EXIT_APIC_WRITE);    /* feature dependent, nothing to enable here */
     9277
     9278    SET_ONLY_XBM_IF_EITHER_EN(INSTR_CPUID,              VMX_EXIT_CPUID);         /* unconditional */
    92839279    SET_ONLY_XBM_IF_EITHER_EN( EXIT_CPUID,              VMX_EXIT_CPUID);
    9284     SET_ONLY_XBM_IF_EITHER_EN(INSTR_GETSEC,             VMX_EXIT_GETSEC);           /* unconditional */
     9280    SET_ONLY_XBM_IF_EITHER_EN(INSTR_GETSEC,             VMX_EXIT_GETSEC);        /* unconditional */
    92859281    SET_ONLY_XBM_IF_EITHER_EN( EXIT_GETSEC,             VMX_EXIT_GETSEC);
    92869282    SET_CPE1_XBM_IF_EITHER_EN(INSTR_HALT,               VMX_EXIT_HLT,      VMX_PROC_CTLS_HLT_EXIT); /* paranoia */
    92879283    SET_ONLY_XBM_IF_EITHER_EN( EXIT_HALT,               VMX_EXIT_HLT);
    9288     SET_ONLY_XBM_IF_EITHER_EN(INSTR_INVD,               VMX_EXIT_INVD);             /* unconditional */
     9284    SET_ONLY_XBM_IF_EITHER_EN(INSTR_INVD,               VMX_EXIT_INVD);          /* unconditional */
    92899285    SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVD,               VMX_EXIT_INVD);
    92909286    SET_CPE1_XBM_IF_EITHER_EN(INSTR_INVLPG,             VMX_EXIT_INVLPG,   VMX_PROC_CTLS_INVLPG_EXIT);
     
    92949290    SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSC,              VMX_EXIT_RDTSC,    VMX_PROC_CTLS_RDTSC_EXIT);
    92959291    SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSC,              VMX_EXIT_RDTSC);
    9296     SET_ONLY_XBM_IF_EITHER_EN(INSTR_RSM,                VMX_EXIT_RSM);              /* unconditional */
     9292    SET_ONLY_XBM_IF_EITHER_EN(INSTR_RSM,                VMX_EXIT_RSM);           /* unconditional */
    92979293    SET_ONLY_XBM_IF_EITHER_EN( EXIT_RSM,                VMX_EXIT_RSM);
    9298     SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMM_CALL,           VMX_EXIT_VMCALL);           /* unconditional */
     9294    SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMM_CALL,           VMX_EXIT_VMCALL);        /* unconditional */
    92999295    SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMM_CALL,           VMX_EXIT_VMCALL);
    9300     SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMCLEAR,        VMX_EXIT_VMCLEAR);          /* unconditional */
     9296    SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMCLEAR,        VMX_EXIT_VMCLEAR);       /* unconditional */
    93019297    SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMCLEAR,        VMX_EXIT_VMCLEAR);
    9302     SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMLAUNCH,       VMX_EXIT_VMLAUNCH);         /* unconditional */
     9298    SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMLAUNCH,       VMX_EXIT_VMLAUNCH);      /* unconditional */
    93039299    SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMLAUNCH,       VMX_EXIT_VMLAUNCH);
    9304     SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRLD,        VMX_EXIT_VMPTRLD);          /* unconditional */
     9300    SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRLD,        VMX_EXIT_VMPTRLD);       /* unconditional */
    93059301    SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRLD,        VMX_EXIT_VMPTRLD);
    9306     SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRST,        VMX_EXIT_VMPTRST);          /* unconditional */
     9302    SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRST,        VMX_EXIT_VMPTRST);       /* unconditional */
    93079303    SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRST,        VMX_EXIT_VMPTRST);
    9308     SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMREAD,         VMX_EXIT_VMREAD);           /* unconditional */
     9304    SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMREAD,         VMX_EXIT_VMREAD);        /* unconditional */
    93099305    SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMREAD,         VMX_EXIT_VMREAD);
    9310     SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMRESUME,       VMX_EXIT_VMRESUME);         /* unconditional */
     9306    SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMRESUME,       VMX_EXIT_VMRESUME);      /* unconditional */
    93119307    SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMRESUME,       VMX_EXIT_VMRESUME);
    9312     SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMWRITE,        VMX_EXIT_VMWRITE);          /* unconditional */
     9308    SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMWRITE,        VMX_EXIT_VMWRITE);       /* unconditional */
    93139309    SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMWRITE,        VMX_EXIT_VMWRITE);
    9314     SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXOFF,         VMX_EXIT_VMXOFF);           /* unconditional */
     9310    SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXOFF,         VMX_EXIT_VMXOFF);        /* unconditional */
    93159311    SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXOFF,         VMX_EXIT_VMXOFF);
    9316     SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXON,          VMX_EXIT_VMXON);            /* unconditional */
     9312    SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXON,          VMX_EXIT_VMXON);         /* unconditional */
    93179313    SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXON,          VMX_EXIT_VMXON);
    93189314
     
    94009396    SET_ONLY_XBM_IF_EITHER_EN( EXIT_LTR,                VMX_EXIT_LDTR_TR_ACCESS);
    94019397
    9402     SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVEPT,         VMX_EXIT_INVEPT);           /* unconditional */
     9398    SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVEPT,         VMX_EXIT_INVEPT);        /* unconditional */
    94039399    SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVEPT,         VMX_EXIT_INVEPT);
    94049400    SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSCP,             VMX_EXIT_RDTSCP,   VMX_PROC_CTLS_RDTSC_EXIT);
    94059401    SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSCP,             VMX_EXIT_RDTSCP);
    9406     SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVVPID,        VMX_EXIT_INVVPID);          /* unconditional */
     9402    SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVVPID,        VMX_EXIT_INVVPID);       /* unconditional */
    94079403    SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVVPID,        VMX_EXIT_INVVPID);
    94089404    SET_CPE2_XBM_IF_EITHER_EN(INSTR_WBINVD,             VMX_EXIT_WBINVD,   VMX_PROC_CTLS2_WBINVD_EXIT);
    94099405    SET_ONLY_XBM_IF_EITHER_EN( EXIT_WBINVD,             VMX_EXIT_WBINVD);
    9410     SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSETBV,             VMX_EXIT_XSETBV);           /* unconditional */
     9406    SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSETBV,             VMX_EXIT_XSETBV);        /* unconditional */
    94119407    SET_ONLY_XBM_IF_EITHER_EN( EXIT_XSETBV,             VMX_EXIT_XSETBV);
    94129408    SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDRAND,             VMX_EXIT_RDRAND,   VMX_PROC_CTLS2_RDRAND_EXIT);
     
    94149410    SET_CPE1_XBM_IF_EITHER_EN(INSTR_VMX_INVPCID,        VMX_EXIT_INVPCID,  VMX_PROC_CTLS_INVLPG_EXIT);
    94159411    SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVPCID,        VMX_EXIT_INVPCID);
    9416     SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMFUNC,         VMX_EXIT_VMFUNC);           /* unconditional for the current setup */
     9412    SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMFUNC,         VMX_EXIT_VMFUNC);        /* unconditional for the current setup */
    94179413    SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMFUNC,         VMX_EXIT_VMFUNC);
    94189414    SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDSEED,             VMX_EXIT_RDSEED,   VMX_PROC_CTLS2_RDSEED_EXIT);
    94199415    SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDSEED,             VMX_EXIT_RDSEED);
    9420     SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSAVES,             VMX_EXIT_XSAVES);           /* unconditional (enabled by host, guest cfg) */
     9416    SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSAVES,             VMX_EXIT_XSAVES);        /* unconditional (enabled by host, guest cfg) */
    94219417    SET_ONLY_XBM_IF_EITHER_EN(EXIT_XSAVES,              VMX_EXIT_XSAVES);
    9422     SET_ONLY_XBM_IF_EITHER_EN(INSTR_XRSTORS,            VMX_EXIT_XRSTORS);          /* unconditional (enabled by host, guest cfg) */
     9418    SET_ONLY_XBM_IF_EITHER_EN(INSTR_XRSTORS,            VMX_EXIT_XRSTORS);       /* unconditional (enabled by host, guest cfg) */
    94239419    SET_ONLY_XBM_IF_EITHER_EN( EXIT_XRSTORS,            VMX_EXIT_XRSTORS);
    94249420
     
    1196711963    }
    1196811964    else
    11969         AssertMsg(rcStrict == VINF_CPUM_R3_MSR_READ, ("Unexpected IEMExecDecodedRdmsr status: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
     11965        AssertMsg(rcStrict == VINF_CPUM_R3_MSR_READ, ("Unexpected IEMExecDecodedRdmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
    1197011966
    1197111967    return rcStrict;
     
    1211012106    }
    1211112107    else
    12112         AssertMsg(rcStrict == VINF_CPUM_R3_MSR_WRITE, ("Unexpected IEMExecDecodedWrmsr status: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
     12108        AssertMsg(rcStrict == VINF_CPUM_R3_MSR_WRITE, ("Unexpected IEMExecDecodedWrmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
    1211312109
    1211412110    return rcStrict;
     
    1323213228        if (   !pVCpu->hm.s.fTrapXcptGpForLovelyMesaDrv
    1323313229            || !hmR0VmxIsMesaDrvGp(pVCpu, pVmxTransient, pCtx))
    13234             hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbInstr,
    13235                                    pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
     13230            hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
     13231                                   pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
    1323613232        else
    1323713233            rc = hmR0VmxHandleMesaDrvGp(pVCpu, pVmxTransient, pCtx);
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.h

    r76464 r76482  
    2929#ifdef IN_RING0
    3030
    31 VMMR0DECL(int)          VMXR0Enter(PVMCPU pVCpu, PHMGLOBALCPUINFO pHostCpu);
     31VMMR0DECL(int)          VMXR0Enter(PVMCPU pVCpu);
    3232VMMR0DECL(void)         VMXR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit);
    33 VMMR0DECL(int)          VMXR0EnableCpu(PHMGLOBALCPUINFO pHostCpu, PVM pVM, void *pvPageCpu, RTHCPHYS pPageCpuPhys,
     33VMMR0DECL(int)          VMXR0EnableCpu(PHMPHYSCPU pHostCpu, PVM pVM, void *pvPageCpu, RTHCPHYS pPageCpuPhys,
    3434                                       bool fEnabledBySystem, PCSUPHWVIRTMSRS pHwvirtMsrs);
    35 VMMR0DECL(int)          VMXR0DisableCpu(PHMGLOBALCPUINFO pHostCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys);
     35VMMR0DECL(int)          VMXR0DisableCpu(void *pvPageCpu, RTHCPHYS pPageCpuPhys);
    3636VMMR0DECL(int)          VMXR0GlobalInit(void);
    3737VMMR0DECL(void)         VMXR0GlobalTerm(void);
  • trunk/src/VBox/VMM/VMMR3/HM.cpp

    r76477 r76482  
    30923092                LogRel(("HM: CPU[%u] Exit reason          %#x\n", i, pVCpu->hm.s.vmx.LastError.u32ExitReason));
    30933093
    3094                 if (   pVM->aCpus[i].hm.s.vmx.LastError.u32InstrError == VMXINSTRERR_VMLAUNCH_NON_CLEAR_VMCS
    3095                     || pVM->aCpus[i].hm.s.vmx.LastError.u32InstrError == VMXINSTRERR_VMRESUME_NON_LAUNCHED_VMCS)
     3094                if (   pVCpu->hm.s.vmx.LastError.u32InstrError == VMXINSTRERR_VMLAUNCH_NON_CLEAR_VMCS
     3095                    || pVCpu->hm.s.vmx.LastError.u32InstrError == VMXINSTRERR_VMRESUME_NON_LAUNCHED_VMCS)
    30963096                {
    30973097                    LogRel(("HM: CPU[%u] Entered Host Cpu     %u\n",  i, pVCpu->hm.s.vmx.LastError.idEnteredCpu));
    30983098                    LogRel(("HM: CPU[%u] Current Host Cpu     %u\n",  i, pVCpu->hm.s.vmx.LastError.idCurrentCpu));
    30993099                }
    3100                 else if (pVM->aCpus[i].hm.s.vmx.LastError.u32InstrError == VMXINSTRERR_VMENTRY_INVALID_CTLS)
     3100                else if (pVCpu->hm.s.vmx.LastError.u32InstrError == VMXINSTRERR_VMENTRY_INVALID_CTLS)
    31013101                {
    31023102                    LogRel(("HM: CPU[%u] PinCtls          %#RX32\n", i, pVCpu->hm.s.vmx.u32PinCtls));
  • trunk/src/VBox/VMM/include/HMInternal.h

    r76477 r76482  
    241241
    242242/**
    243  * Global per-cpu information. (host)
    244  */
    245 typedef struct HMGLOBALCPUINFO
     243 * HM physical (host) CPU information.
     244 */
     245typedef struct HMPHYSCPU
    246246{
    247247    /** The CPU ID. */
     
    284284    } n;
    285285#endif
    286 } HMGLOBALCPUINFO;
    287 /** Pointer to the per-cpu global information. */
    288 typedef HMGLOBALCPUINFO *PHMGLOBALCPUINFO;
    289 
    290 typedef enum
    291 {
    292     HMPENDINGIO_INVALID = 0,
    293     HMPENDINGIO_PORT_READ,
    294     /* not implemented: HMPENDINGIO_STRING_READ, */
    295     /* not implemented: HMPENDINGIO_STRING_WRITE, */
    296     /** The usual 32-bit paranoia. */
    297     HMPENDINGIO_32BIT_HACK   = 0x7fffffff
    298 } HMPENDINGIO;
    299 
    300 
     286} HMPHYSCPU;
     287/** Pointer to HMPHYSCPU struct. */
     288typedef HMPHYSCPU *PHMPHYSCPU;
     289/** Pointer to a const HMPHYSCPU struct. */
     290typedef const HMPHYSCPU *PCHMPHYSCPU;
     291
     292/**
     293 * TPR-instruction type.
     294 */
    301295typedef enum
    302296{
     
    311305} HMTPRINSTR;
    312306
     307/**
     308 * TPR patch information.
     309 */
    313310typedef struct
    314311{
     
    384381/** Pointer to switcher function. */
    385382typedef FNHMSWITCHERHC *PFNHMSWITCHERHC;
     383
     384/** @def HM_UNION_NM
     385 * For compilers (like DTrace) that does not grok nameless unions, we have a
     386 * little hack to make them palatable.
     387 */
     388/** @def HM_STRUCT_NM
     389 * For compilers (like DTrace) that does not grok nameless structs (it is
     390 * non-standard C++), we have a little hack to make them palatable.
     391 */
     392#ifdef VBOX_FOR_DTRACE_LIB
     393# define HM_UNION_NM(a_Nm)  a_Nm
     394# define HM_STRUCT_NM(a_Nm) a_Nm
     395#elif defined(IPRT_WITHOUT_NAMED_UNIONS_AND_STRUCTS)
     396# define HM_UNION_NM(a_Nm)  a_Nm
     397# define HM_STRUCT_NM(a_Nm) a_Nm
     398#else
     399# define HM_UNION_NM(a_Nm)
     400# define HM_STRUCT_NM(a_Nm)
     401#endif
    386402
    387403/**
     
    720736    uint64_t                    u64HostTscAux;
    721737
    722     struct
     738    union /* no tag! */
    723739    {
    724         /** Ring 0 handlers for VT-x. */
    725         PFNHMVMXSTARTVM             pfnStartVM;
    726 #if HC_ARCH_BITS == 32
    727         uint32_t                    u32Alignment0;
    728 #endif
    729         /** Current pin-based VM-execution controls. */
    730         uint32_t                    u32PinCtls;
    731         /** Current processor-based VM-execution controls. */
    732         uint32_t                    u32ProcCtls;
    733         /** Current secondary processor-based VM-execution controls. */
    734         uint32_t                    u32ProcCtls2;
    735         /** Current VM-entry controls. */
    736         uint32_t                    u32EntryCtls;
    737         /** Current VM-exit controls. */
    738         uint32_t                    u32ExitCtls;
    739 
    740         /** Current CR0 mask. */
    741         uint32_t                    u32Cr0Mask;
    742         /** Current CR4 mask. */
    743         uint32_t                    u32Cr4Mask;
    744         /** Current exception bitmap. */
    745         uint32_t                    u32XcptBitmap;
    746         /** The updated-guest-state mask. */
    747         uint32_t                    au32Alignment0[2];
    748 
    749         /** Physical address of the VM control structure (VMCS). */
    750         RTHCPHYS                    HCPhysVmcs;
    751         /** R0 memory object for the VM control structure (VMCS). */
    752         RTR0MEMOBJ                  hMemObjVmcs;
    753         /** Virtual address of the VM control structure (VMCS). */
    754         R0PTRTYPE(void *)           pvVmcs;
    755 
    756         /** Physical address of the virtual APIC page for TPR caching. */
    757         RTHCPHYS                    HCPhysVirtApic;
    758         /** Padding. */
    759         R0PTRTYPE(void *)           pvAlignment0;
    760         /** Virtual address of the virtual APIC page for TPR caching. */
    761         R0PTRTYPE(uint8_t *)        pbVirtApic;
    762 
    763         /** Physical address of the MSR bitmap. */
    764         RTHCPHYS                    HCPhysMsrBitmap;
    765         /** R0 memory object for the MSR bitmap. */
    766         RTR0MEMOBJ                  hMemObjMsrBitmap;
    767         /** Virtual address of the MSR bitmap. */
    768         R0PTRTYPE(void *)           pvMsrBitmap;
    769 
    770         /** Physical address of the VM-entry MSR-load and VM-exit MSR-store area (used
    771          *  for guest MSRs). */
    772         RTHCPHYS                    HCPhysGuestMsr;
    773         /** R0 memory object of the VM-entry MSR-load and VM-exit MSR-store area
    774          *  (used for guest MSRs). */
    775         RTR0MEMOBJ                  hMemObjGuestMsr;
    776         /** Virtual address of the VM-entry MSR-load and VM-exit MSR-store area (used
    777          *  for guest MSRs). */
    778         R0PTRTYPE(void *)           pvGuestMsr;
    779 
    780         /** Physical address of the VM-exit MSR-load area (used for host MSRs). */
    781         RTHCPHYS                    HCPhysHostMsr;
    782         /** R0 memory object for the VM-exit MSR-load area (used for host MSRs). */
    783         RTR0MEMOBJ                  hMemObjHostMsr;
    784         /** Virtual address of the VM-exit MSR-load area (used for host MSRs). */
    785         R0PTRTYPE(void *)           pvHostMsr;
    786 
    787         /** Current EPTP. */
    788         RTHCPHYS                    HCPhysEPTP;
    789 
    790         /** Number of guest/host MSR pairs in the auto-load/store area. */
    791         uint32_t                    cMsrs;
    792         /** Whether the host MSR values are up-to-date in the auto-load/store area. */
    793         bool                        fUpdatedHostMsrs;
    794         uint8_t                     u8Alignment0[3];
    795 
    796         /** Host LSTAR MSR value to restore lazily while leaving VT-x. */
    797         uint64_t                    u64HostLStarMsr;
    798         /** Host STAR MSR value to restore lazily while leaving VT-x. */
    799         uint64_t                    u64HostStarMsr;
    800         /** Host SF_MASK MSR value to restore lazily while leaving VT-x. */
    801         uint64_t                    u64HostSFMaskMsr;
    802         /** Host KernelGS-Base MSR value to restore lazily while leaving VT-x. */
    803         uint64_t                    u64HostKernelGSBaseMsr;
    804         /** A mask of which MSRs have been swapped and need restoration. */
    805         uint32_t                    fLazyMsrs;
    806         uint32_t                    u32Alignment2;
    807 
    808         /** The cached APIC-base MSR used for identifying when to map the HC physical APIC-access page. */
    809         uint64_t                    u64MsrApicBase;
    810         /** Last use TSC offset value. (cached) */
    811         uint64_t                    u64TscOffset;
    812 
    813         /** VMCS cache. */
    814         VMCSCACHE                   VMCSCache;
    815 
    816         /** Real-mode emulation state. */
     740        /** VT-x data.   */
    817741        struct
    818742        {
    819             X86DESCATTR             AttrCS;
    820             X86DESCATTR             AttrDS;
    821             X86DESCATTR             AttrES;
    822             X86DESCATTR             AttrFS;
    823             X86DESCATTR             AttrGS;
    824             X86DESCATTR             AttrSS;
    825             X86EFLAGS               Eflags;
    826             bool                    fRealOnV86Active;
    827         } RealMode;
    828 
    829         /** VT-x error-reporting (mainly for ring-3 propagation). */
     743            /** Ring 0 handlers for VT-x. */
     744            PFNHMVMXSTARTVM             pfnStartVM;
     745#if HC_ARCH_BITS == 32
     746            uint32_t                    u32Alignment0;
     747#endif
     748
     749            /** Current pin-based VM-execution controls. */
     750            uint32_t                    u32PinCtls;
     751            /** Current processor-based VM-execution controls. */
     752            uint32_t                    u32ProcCtls;
     753            /** Current secondary processor-based VM-execution controls. */
     754            uint32_t                    u32ProcCtls2;
     755            /** Current VM-entry controls. */
     756            uint32_t                    u32EntryCtls;
     757            /** Current VM-exit controls. */
     758            uint32_t                    u32ExitCtls;
     759            /** Current CR0 mask. */
     760            uint32_t                    u32Cr0Mask;
     761            /** Current CR4 mask. */
     762            uint32_t                    u32Cr4Mask;
     763            /** Current exception bitmap. */
     764            uint32_t                    u32XcptBitmap;
     765
     766            /** Physical address of the VM control structure (VMCS). */
     767            RTHCPHYS                    HCPhysVmcs;
     768            /** R0 memory object for the VM control structure (VMCS). */
     769            RTR0MEMOBJ                  hMemObjVmcs;
     770            /** Virtual address of the VM control structure (VMCS). */
     771            R0PTRTYPE(void *)           pvVmcs;
     772
     773            /** Physical address of the current EPTP. */
     774            RTHCPHYS                    HCPhysEPTP;
     775            /** Physical address of the virtual APIC page for TPR caching. */
     776            RTHCPHYS                    HCPhysVirtApic;
     777            /** Virtual address of the virtual APIC page for TPR caching. */
     778            R0PTRTYPE(uint8_t *)        pbVirtApic;
     779
     780            /** Physical address of the MSR bitmap. */
     781            RTHCPHYS                    HCPhysMsrBitmap;
     782            /** R0 memory object for the MSR bitmap. */
     783            RTR0MEMOBJ                  hMemObjMsrBitmap;
     784            /** Virtual address of the MSR bitmap. */
     785            R0PTRTYPE(void *)           pvMsrBitmap;
     786
     787            /** Physical address of the VM-entry MSR-load and VM-exit MSR-store area (used
     788             *  for guest MSRs). */
     789            RTHCPHYS                    HCPhysGuestMsr;
     790            /** R0 memory object of the VM-entry MSR-load and VM-exit MSR-store area
     791             *  (used for guest MSRs). */
     792            RTR0MEMOBJ                  hMemObjGuestMsr;
     793            /** Virtual address of the VM-entry MSR-load and VM-exit MSR-store area (used
     794             *  for guest MSRs). */
     795            R0PTRTYPE(void *)           pvGuestMsr;
     796
     797            /** Physical address of the VM-exit MSR-load area (used for host MSRs). */
     798            RTHCPHYS                    HCPhysHostMsr;
     799            /** R0 memory object for the VM-exit MSR-load area (used for host MSRs). */
     800            RTR0MEMOBJ                  hMemObjHostMsr;
     801            /** Virtual address of the VM-exit MSR-load area (used for host MSRs). */
     802            R0PTRTYPE(void *)           pvHostMsr;
     803
     804            /** Number of guest/host MSR pairs in the auto-load/store area. */
     805            uint32_t                    cMsrs;
     806            /** Whether the host MSR values are up-to-date in the auto-load/store area. */
     807            bool                        fUpdatedHostMsrs;
     808            uint8_t                     au8Alignment0[3];
     809
     810            /** Host LSTAR MSR value to restore lazily while leaving VT-x. */
     811            uint64_t                    u64HostLStarMsr;
     812            /** Host STAR MSR value to restore lazily while leaving VT-x. */
     813            uint64_t                    u64HostStarMsr;
     814            /** Host SF_MASK MSR value to restore lazily while leaving VT-x. */
     815            uint64_t                    u64HostSFMaskMsr;
     816            /** Host KernelGS-Base MSR value to restore lazily while leaving VT-x. */
     817            uint64_t                    u64HostKernelGSBaseMsr;
     818            /** A mask of which MSRs have been swapped and need restoration. */
     819            uint32_t                    fLazyMsrs;
     820            uint32_t                    u32Alignment1;
     821
     822            /** The cached APIC-base MSR used for identifying when to map the HC physical APIC-access page. */
     823            uint64_t                    u64MsrApicBase;
     824            /** Last use TSC offset value. (cached) */
     825            uint64_t                    u64TscOffset;
     826
     827            /** VMCS cache. */
     828            VMCSCACHE                   VMCSCache;
     829
     830            /** Real-mode emulation state. */
     831            struct
     832            {
     833                X86DESCATTR             AttrCS;
     834                X86DESCATTR             AttrDS;
     835                X86DESCATTR             AttrES;
     836                X86DESCATTR             AttrFS;
     837                X86DESCATTR             AttrGS;
     838                X86DESCATTR             AttrSS;
     839                X86EFLAGS               Eflags;
     840                bool                    fRealOnV86Active;
     841            } RealMode;
     842
     843            /** VT-x error-reporting (mainly for ring-3 propagation). */
     844            struct
     845            {
     846                uint64_t                u64VmcsPhys;
     847                uint32_t                u32VmcsRev;
     848                uint32_t                u32InstrError;
     849                uint32_t                u32ExitReason;
     850                uint32_t                u32Alignment0;
     851                RTCPUID                 idEnteredCpu;
     852                RTCPUID                 idCurrentCpu;
     853            } LastError;
     854
     855            /** Current state of the VMCS. */
     856            uint32_t                    fVmcsState;
     857            /** Which host-state bits to restore before being preempted. */
     858            uint32_t                    fRestoreHostFlags;
     859            /** The host-state restoration structure. */
     860            VMXRESTOREHOST              RestoreHost;
     861
     862            /** Set if guest was executing in real mode (extra checks). */
     863            bool                        fWasInRealMode;
     864            /** Set if guest switched to 64-bit mode on a 32-bit host. */
     865            bool                        fSwitchedTo64on32;
     866            /** Padding. */
     867            uint8_t                     au8Alignment1[6];
     868        } vmx;
     869
     870        /** SVM data. */
    830871        struct
    831872        {
    832             uint64_t                u64VmcsPhys;
    833             uint32_t                u32VmcsRev;
    834             uint32_t                u32InstrError;
    835             uint32_t                u32ExitReason;
    836             RTCPUID                 idEnteredCpu;
    837             RTCPUID                 idCurrentCpu;
    838             uint32_t                u32Alignment0;
    839         } LastError;
    840 
    841         /** Current state of the VMCS. */
    842         uint32_t                    uVmcsState;
    843         /** Which host-state bits to restore before being preempted. */
    844         uint32_t                    fRestoreHostFlags;
    845         /** The host-state restoration structure. */
    846         VMXRESTOREHOST              RestoreHost;
    847 
    848         /** Set if guest was executing in real mode (extra checks). */
    849         bool                        fWasInRealMode;
    850         /** Set if guest switched to 64-bit mode on a 32-bit host. */
    851         bool                        fSwitchedTo64on32;
    852 
    853         uint8_t                     u8Alignment1[6];
    854     } vmx;
    855 
    856     struct
    857     {
    858         /** Ring 0 handlers for VT-x. */
    859         PFNHMSVMVMRUN               pfnVMRun;
     873            /** Ring 0 handlers for VT-x. */
     874            PFNHMSVMVMRUN               pfnVMRun;
    860875#if HC_ARCH_BITS == 32
    861         uint32_t                    u32Alignment0;
    862 #endif
    863 
    864         /** Physical address of the host VMCB which holds additional host-state. */
    865         RTHCPHYS                    HCPhysVmcbHost;
    866         /** R0 memory object for the host VMCB which holds additional host-state. */
    867         RTR0MEMOBJ                  hMemObjVmcbHost;
    868         /** Padding. */
    869         R0PTRTYPE(void *)           pvPadding;
    870 
    871         /** Physical address of the guest VMCB. */
    872         RTHCPHYS                    HCPhysVmcb;
    873         /** R0 memory object for the guest VMCB. */
    874         RTR0MEMOBJ                  hMemObjVmcb;
    875         /** Pointer to the guest VMCB. */
    876         R0PTRTYPE(PSVMVMCB)         pVmcb;
    877 
    878         /** Physical address of the MSR bitmap (8 KB). */
    879         RTHCPHYS                    HCPhysMsrBitmap;
    880         /** R0 memory object for the MSR bitmap (8 KB). */
    881         RTR0MEMOBJ                  hMemObjMsrBitmap;
    882         /** Pointer to the MSR bitmap. */
    883         R0PTRTYPE(void *)           pvMsrBitmap;
    884 
    885         /** Whether VTPR with V_INTR_MASKING set is in effect, indicating
    886          *  we should check if the VTPR changed on every VM-exit. */
    887         bool                        fSyncVTpr;
    888         uint8_t                     u8Alignment0[7];
    889 
    890         /** Cache of the nested-guest's VMCB fields that we modify in order to run the
    891          *  nested-guest using AMD-V. This will be restored on \#VMEXIT. */
    892         SVMNESTEDVMCBCACHE          NstGstVmcbCache;
    893     } svm;
     876            uint32_t                    u32Alignment0;
     877#endif
     878
     879            /** Physical address of the host VMCB which holds additional host-state. */
     880            RTHCPHYS                    HCPhysVmcbHost;
     881            /** R0 memory object for the host VMCB which holds additional host-state. */
     882            RTR0MEMOBJ                  hMemObjVmcbHost;
     883            /** Padding. */
     884            R0PTRTYPE(void *)           pvPadding;
     885
     886            /** Physical address of the guest VMCB. */
     887            RTHCPHYS                    HCPhysVmcb;
     888            /** R0 memory object for the guest VMCB. */
     889            RTR0MEMOBJ                  hMemObjVmcb;
     890            /** Pointer to the guest VMCB. */
     891            R0PTRTYPE(PSVMVMCB)         pVmcb;
     892
     893            /** Physical address of the MSR bitmap (8 KB). */
     894            RTHCPHYS                    HCPhysMsrBitmap;
     895            /** R0 memory object for the MSR bitmap (8 KB). */
     896            RTR0MEMOBJ                  hMemObjMsrBitmap;
     897            /** Pointer to the MSR bitmap. */
     898            R0PTRTYPE(void *)           pvMsrBitmap;
     899
     900            /** Whether VTPR with V_INTR_MASKING set is in effect, indicating
     901             *  we should check if the VTPR changed on every VM-exit. */
     902            bool                        fSyncVTpr;
     903            uint8_t                     au8Alignment0[7];
     904
     905            /** Cache of the nested-guest's VMCB fields that we modify in order to run the
     906             *  nested-guest using AMD-V. This will be restored on \#VMEXIT. */
     907            SVMNESTEDVMCBCACHE          NstGstVmcbCache;
     908        } svm;
     909    } HM_UNION_NM(u);
    894910
    895911    /** Event injection state. */
     
    10671083AssertCompileMemberAlignment(HMCPU, cWorldSwitchExits, 4);
    10681084AssertCompileMemberAlignment(HMCPU, fCtxChanged, 8);
    1069 AssertCompileMemberAlignment(HMCPU, vmx, 8);
    1070 AssertCompileMemberAlignment(HMCPU, svm, 8);
     1085AssertCompileMemberAlignment(HMCPU, HM_UNION_NM(u.) vmx, 8);
     1086AssertCompileMemberAlignment(HMCPU, HM_UNION_NM(u.) svm, 8);
    10711087AssertCompileMemberAlignment(HMCPU, Event, 8);
    10721088
    10731089#ifdef IN_RING0
    1074 VMMR0_INT_DECL(PHMGLOBALCPUINFO) hmR0GetCurrentCpu(void);
     1090VMMR0_INT_DECL(PHMPHYSCPU) hmR0GetCurrentCpu(void);
    10751091VMMR0_INT_DECL(int) hmR0EnterCpu(PVMCPU pVCpu);
    10761092
     
    10811097
    10821098# ifdef VBOX_WITH_KERNEL_USING_XMM
    1083 DECLASM(int) hmR0VMXStartVMWrapXMM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu, PFNHMVMXSTARTVM pfnStartVM);
    1084 DECLASM(int) hmR0SVMRunWrapXMM(RTHCPHYS pVmcbHostPhys, RTHCPHYS pVmcbPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu, PFNHMSVMVMRUN pfnVMRun);
     1099DECLASM(int) hmR0VMXStartVMWrapXMM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu,
     1100                                   PFNHMVMXSTARTVM pfnStartVM);
     1101DECLASM(int) hmR0SVMRunWrapXMM(RTHCPHYS pVmcbHostPhys, RTHCPHYS pVmcbPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu,
     1102                               PFNHMSVMVMRUN pfnVMRun);
    10851103# endif
    10861104#endif /* IN_RING0 */
  • trunk/src/VBox/VMM/testcase/tstVMStructSize.cpp

    r73097 r76482  
    431431    CHECK_MEMBER_ALIGNMENT(HMCPU, vmx.HCPhysVmcs, 8);
    432432    CHECK_MEMBER_ALIGNMENT(HMCPU, vmx.LastError, 8);
     433    CHECK_MEMBER_ALIGNMENT(HMCPU, vmx.RestoreHost, 8);
    433434    CHECK_MEMBER_ALIGNMENT(HMCPU, svm, 8);
    434435    CHECK_MEMBER_ALIGNMENT(HMCPU, svm.pfnVMRun, 8);
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette