VirtualBox

Changeset 47989 in vbox


Ignore:
Timestamp:
Aug 22, 2013 1:56:52 PM (11 years ago)
Author:
vboxsync
Message:

VMM/VMMR0: Preemption hooks implemented and enabled for Solaris and Linux hosts.

Location:
trunk
Files:
8 edited

Legend:

Unmodified
Added
Removed
  • trunk/include/VBox/vmm/vm.h

    r47671 r47989  
    651651# define VMCPU_ASSERT_EMT(pVCpu)            Assert(VMCPU_IS_EMT(pVCpu))
    652652#elif defined(IN_RING0)
    653 # define VMCPU_ASSERT_EMT(pVCpu)            Assert(VMCPU_IS_EMT(pVCpu))
     653# define VMCPU_ASSERT_EMT(pVCpu)            AssertMsg(VMCPU_IS_EMT(pVCpu), \
     654                                                      ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd idCpu=%u\n", \
     655                                                      RTThreadNativeSelf(), (pVCpu) ? (pVCpu)->hNativeThreadR0 : 0, \
     656                                                      (pVCpu) ? (pVCpu)->idCpu : 0))
    654657#else
    655658# define VMCPU_ASSERT_EMT(pVCpu) \
  • trunk/include/VBox/vmm/vmm.h

    r47760 r47989  
    498498VMMR0DECL(int)       VMMR0TermVM(PVM pVM, PGVM pGVM);
    499499VMMR0_INT_DECL(bool) VMMR0IsLongJumpArmed(PVMCPU pVCpu);
     500VMMR0_INT_DECL(bool) VMMR0IsInRing3LongJump(PVMCPU pVCpu);
    500501VMMR0DECL(int)       VMMR0ThreadCtxHooksCreate(PVMCPU pVCpu);
    501502VMMR0DECL(void)      VMMR0ThreadCtxHooksRelease(PVMCPU pVCpu);
     
    506507
    507508# ifdef LOG_ENABLED
    508 VMMR0DECL(void)     VMMR0LogFlushDisable(PVMCPU pVCpu);
    509 VMMR0DECL(void)     VMMR0LogFlushEnable(PVMCPU pVCpu);
    510 VMMR0DECL(bool)     VMMR0IsLogFlushDisabled(PVMCPU pVCpu);
     509VMMR0DECL(void)      VMMR0LogFlushDisable(PVMCPU pVCpu);
     510VMMR0DECL(void)      VMMR0LogFlushEnable(PVMCPU pVCpu);
     511VMMR0DECL(bool)      VMMR0IsLogFlushDisabled(PVMCPU pVCpu);
    511512# else
    512 #  define           VMMR0LogFlushDisable(pVCpu)     do { } while(0)
    513 #  define           VMMR0LogFlushEnable(pVCpu)      do { } while(0)
    514 #  define           VMMR0IsLogFlushDisabled(pVCpu)  (true)
     513#  define            VMMR0LogFlushDisable(pVCpu)     do { } while(0)
     514#  define            VMMR0LogFlushEnable(pVCpu)      do { } while(0)
     515#  define            VMMR0IsLogFlushDisabled(pVCpu)  (true)
    515516# endif /* LOG_ENABLED */
    516517#endif /* IN_RING0 */
  • trunk/src/VBox/VMM/VMMAll/VMMAll.cpp

    r46861 r47989  
    253253        return &pVM->aCpus[0];
    254254
    255     /* Search first by host cpu id (most common case)
     255    /*
     256     * Search first by host cpu id (most common case)
    256257     * and then by native thread id (page fusion case).
    257258     */
    258 
    259     /* RTMpCpuId had better be cheap. */
    260     RTCPUID idHostCpu = RTMpCpuId();
    261 
    262     /** @todo optimize for large number of VCPUs when that becomes more common. */
    263     for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
    264     {
    265         PVMCPU pVCpu = &pVM->aCpus[idCpu];
    266 
    267         if (pVCpu->idHostCpu == idHostCpu)
    268             return pVCpu;
     259    if (!RTThreadPreemptIsEnabled(NIL_RTTHREAD))
     260    {
     261        /** @todo r=ramshankar: This doesn't buy us anything in terms of performance
     262         *        leaving it here for hysterical raisins and as a reference if we
     263         *        implemented a hashing approach in the future. */
     264
     265        /* RTMpCpuId had better be cheap. */
     266        RTCPUID idHostCpu = RTMpCpuId();
     267
     268        /** @todo optimize for large number of VCPUs when that becomes more common. */
     269        for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
     270        {
     271            PVMCPU pVCpu = &pVM->aCpus[idCpu];
     272
     273            if (pVCpu->idHostCpu == idHostCpu)
     274                return pVCpu;
     275        }
    269276    }
    270277
  • trunk/src/VBox/VMM/VMMR0/HMR0.cpp

    r47959 r47989  
    475475                    }
    476476
    477                     /* Enter VMX Root Mode */
     477                    /*
     478                     * The only way of checking if we're in VMX root mode or not is to try and enter it.
     479                     * There is no instruction or control bit that tells us if we're in VMX root mode.
     480                     * Therefore, try and enter VMX root mode here.
     481                     */
    478482                    rc = VMXEnable(HCPhysScratchPage);
    479483                    if (RT_SUCCESS(rc))
     
    989993    {
    990994        Assert(g_HvmR0.aCpuInfo[i].hMemObj == NIL_RTR0MEMOBJ);
    991         g_HvmR0.aCpuInfo[i].fConfigured = false;
    992         g_HvmR0.aCpuInfo[i].cTlbFlushes = 0;
     995        g_HvmR0.aCpuInfo[i].fConfigured  = false;
     996        g_HvmR0.aCpuInfo[i].cTlbFlushes  = 0;
     997        g_HvmR0.aCpuInfo[i].uCurrentAsid = 0;
    993998    }
    994999
     
    10121017         * We're doing the job ourselves.
    10131018         */
    1014         /* Allocate one page per cpu for the global vt-x and amd-v pages */
     1019        /* Allocate one page per cpu for the global VT-x and AMD-V pages */
    10151020        for (unsigned i = 0; i < RT_ELEMENTS(g_HvmR0.aCpuInfo); i++)
    10161021        {
     
    10301035    }
    10311036
    1032     if (RT_SUCCESS(rc) && g_HvmR0.fGlobalInit)
     1037    if (   RT_SUCCESS(rc)
     1038        && g_HvmR0.fGlobalInit)
    10331039    {
    10341040        /* First time, so initialize each cpu/core. */
     
    14111417
    14121418    /* Enable VT-x or AMD-V if local init is required, or enable if it's a freshly onlined CPU. */
    1413     if (   !pCpu->fConfigured
    1414         || !g_HvmR0.fGlobalInit)
    1415     {
     1419    if (!pCpu->fConfigured)
    14161420        hmR0EnableCpu(pVCpu->CTX_SUFF(pVM), idCpu);
    1417     }
    14181421
    14191422    /* Reload host-context (back from ring-3/migrated CPUs), reload guest CR0 (for FPU bits). */
     
    14551458
    14561459    int rc  = g_HvmR0.pfnEnterSession(pVM, pVCpu, pCpu);
    1457     AssertRC(rc);
     1460    AssertMsgRC(rc, ("pfnEnterSession failed. rc=%Rrc pVCpu=%p HostCpuId=%u\n", rc, pVCpu, idCpu));
    14581461
    14591462    /* Load the host as we may be resuming code after a longjmp and quite
    14601463       possibly be scheduled on a different CPU. */
    14611464    rc |= g_HvmR0.pfnSaveHostState(pVM, pVCpu);
    1462     AssertRC(rc);
     1465    AssertMsgRC(rc, ("pfnSaveHostState failed. rc=%Rrc pVCpu=%p HostCpuId=%u\n", rc, pVCpu, idCpu));
    14631466
    14641467    /** @todo This is not needed to be done here anymore, can fix/optimize later. */
    14651468    rc |= g_HvmR0.pfnLoadGuestState(pVM, pVCpu, pCtx);
    1466     AssertRC(rc);
     1469    AssertMsgRC(rc, ("pfnLoadGuestState failed. rc=%Rrc pVCpu=%p HostCpuId=%u\n", rc, pVCpu, idCpu));
    14671470
    14681471#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
     
    14891492    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    14901493
    1491     if (!g_HvmR0.fGlobalInit)
    1492     {
    1493         RTCPUID idCpu = RTMpCpuId();
     1494    RTCPUID          idCpu = RTMpCpuId();
     1495    PHMGLOBALCPUINFO pCpu  = &g_HvmR0.aCpuInfo[idCpu];
     1496
     1497    if (   !g_HvmR0.fGlobalInit
     1498        && pCpu->fConfigured)
     1499    {
    14941500        int rc = hmR0DisableCpu(idCpu);
    14951501        AssertRCReturn(rc, rc);
     1502        Assert(!pCpu->fConfigured);
    14961503    }
    14971504
    14981505    /* Reset these to force a TLB flush for the next entry. */
    14991506    pVCpu->hm.s.idLastCpu    = NIL_RTCPUID;
     1507    pVCpu->hm.s.idEnteredCpu = NIL_RTCPUID;
    15001508    pVCpu->hm.s.uCurrentAsid = 0;
    15011509    VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
     
    15201528    AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HM_SUSPEND_PENDING);
    15211529
    1522     PCPUMCTX pCtx  = CPUMQueryGuestCtxPtr(pVCpu);
     1530    PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
    15231531    AssertPtr(pCtx);
    15241532
     1533    bool fDisabledPreempt = false;
     1534    RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
     1535    if (RTThreadPreemptIsEnabled(NIL_RTTHREAD))
     1536    {
     1537        Assert(VMMR0ThreadCtxHooksAreRegistered(pVCpu));
     1538        RTThreadPreemptDisable(&PreemptState);
     1539        fDisabledPreempt = true;
     1540    }
     1541
    15251542    int rc = g_HvmR0.pfnLeaveSession(pVM, pVCpu, pCtx);
    15261543
    1527     /*
    1528      * When thread-context hooks are not used, leave HM context and if necessary disable HM on the CPU.
    1529      * When thread-context hooks -are- used, this work would be done in the VT-x and AMD-V thread-context callback.
    1530      */
    15311544    if (!VMMR0ThreadCtxHooksAreRegistered(pVCpu))
    15321545    {
    1533         Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    1534         RTCPUID idCpu = RTMpCpuId();
    1535 
    15361546        /* Keep track of the CPU owning the VMCS for debugging scheduling weirdness
    15371547           and ring-3 calls when thread-context hooks are not supported. */
     1548        RTCPUID idCpu = RTMpCpuId();
    15381549        AssertMsgStmt(   pVCpu->hm.s.idEnteredCpu == idCpu
    15391550                      || RT_FAILURE_NP(rc), ("Owner is %u, I'm %u", pVCpu->hm.s.idEnteredCpu, idCpu),
    15401551                      rc = VERR_HM_WRONG_CPU_1);
    1541 
     1552    }
     1553
     1554    /* Leave HM context, takes care of local init (term). */
     1555    if (RT_SUCCESS(rc))
     1556    {
    15421557        rc = HMR0LeaveEx(pVCpu);
    15431558        AssertRCReturn(rc, rc);
    15441559    }
     1560
     1561    /* Deregister hook now that we've left HM context before re-enabling preemption. */
     1562    /** @todo This is bad. Deregistering here means we need to VMCLEAR always
     1563     *        (longjmp/exit-to-r3) in VT-x which is not efficient. */
     1564    if (VMMR0ThreadCtxHooksAreRegistered(pVCpu))
     1565        VMMR0ThreadCtxHooksDeregister(pVCpu);
     1566
     1567    if (fDisabledPreempt)
     1568        RTThreadPreemptRestore(&PreemptState);
    15451569
    15461570    /* Guest FPU and debug state shouldn't be active now, it's likely that we're going back to ring-3. */
     
    15481572    Assert(!CPUMIsGuestDebugStateActive(pVCpu));
    15491573
    1550     pVCpu->hm.s.idEnteredCpu = NIL_RTCPUID;
    15511574    return rc;
    15521575}
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r47844 r47989  
    119119    } while (0)
    120120
     121/** Assert that preemption is disabled or covered by thread-context hooks. */
     122#define HMSVM_ASSERT_PREEMPT_SAFE()           Assert(   VMMR0ThreadCtxHooksAreRegistered(pVCpu) \
     123                                                     || !RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     124
     125/** Assert that we haven't migrated CPUs when thread-context hooks are not
     126 *  used. */
     127#define HMSVM_ASSERT_CPU_SAFE()               AssertMsg(   VMMR0ThreadCtxHooksAreRegistered(pVCpu) \
     128                                                        || pVCpu->hm.s.idEnteredCpu == RTMpCpuId(), \
     129                                                        ("Illegal migration! Entered on CPU %u Current %u\n", \
     130                                                        pVCpu->hm.s.idEnteredCpu, RTMpCpuId()));
    121131
    122132/** Exception bitmap mask for all contributory exceptions.
     
    15711581VMMR0DECL(int) SVMR0Leave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
    15721582{
    1573     Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    15741583    NOREF(pVM);
    15751584    NOREF(pVCpu);
     
    15951604        {
    15961605            Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     1606            Assert(VMMR0ThreadCtxHooksAreRegistered(pVCpu));
     1607            VMCPU_ASSERT_EMT(pVCpu);
    15971608
    15981609            PVM         pVM  = pVCpu->CTX_SUFF(pVM);
     
    16111622        case RTTHREADCTXEVENT_RESUMED:
    16121623        {
    1613             /* Disable preemption, we don't want to be migrated to another CPU while re-initializing AMD-V state. */
    1614             RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
    1615             RTThreadPreemptDisable(&PreemptState);
    1616 
    1617             /* Initialize the bare minimum state required for HM. This takes care of
    1618                initializing AMD-V if necessary (onlined CPUs, local init etc.) */
     1624            Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     1625            Assert(VMMR0ThreadCtxHooksAreRegistered(pVCpu));
     1626            VMCPU_ASSERT_EMT(pVCpu);
     1627
     1628            VMMRZCallRing3Disable(pVCpu);                        /* No longjmps (log-flush, locks) in this fragile context. */
     1629
     1630            /*
     1631             * Initialize the bare minimum state required for HM. This takes care of
     1632             * initializing AMD-V if necessary (onlined CPUs, local init etc.)
     1633             */
    16191634            HMR0EnterEx(pVCpu);
    16201635            Assert(pVCpu->hm.s.fContextUseFlags & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_GUEST_CR0));
    16211636
    16221637            pVCpu->hm.s.fLeaveDone = false;
    1623             RTThreadPreemptRestore(&PreemptState);
     1638            VMMRZCallRing3Enable(pVCpu);                        /* Restore longjmp state. */
    16241639            break;
    16251640        }
     
    19111926         * Restore host debug registers if necessary and resync on next R0 reentry.
    19121927         */
    1913     #ifdef VBOX_STRICT
     1928#ifdef VBOX_STRICT
    19141929        if (CPUMIsHyperDebugStateActive(pVCpu))
    19151930        {
     
    19181933            Assert(pVmcb->ctrl.u16InterceptWrDRx == 0xffff);
    19191934        }
    1920     #endif
     1935#endif
    19211936        if (CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, false /* save DR6 */))
    19221937            pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
     
    19761991    Assert(pvUser);
    19771992    Assert(VMMRZCallRing3IsEnabled(pVCpu));
    1978     Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     1993    HMSVM_ASSERT_PREEMPT_SAFE();
    19791994
    19801995    VMMRZCallRing3Disable(pVCpu);
     
    20062021    Assert(pVCpu);
    20072022    Assert(pCtx);
    2008     Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     2023    HMSVM_ASSERT_PREEMPT_SAFE();
    20092024
    20102025    if (RT_UNLIKELY(rcExit == VERR_SVM_INVALID_GUEST_STATE))
     
    24502465static void hmR0SvmReportWorldSwitchError(PVM pVM, PVMCPU pVCpu, int rcVMRun, PCPUMCTX pCtx)
    24512466{
    2452     Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     2467    HMSVM_ASSERT_PREEMPT_SAFE();
    24532468    PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
    24542469
     
    29312946{
    29322947    Assert(VMMRZCallRing3IsEnabled(pVCpu));
    2933     Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     2948    HMSVM_ASSERT_PREEMPT_SAFE();
    29342949
    29352950    SVMTRANSIENT SvmTransient;
     
    29422957    {
    29432958        Assert(!HMR0SuspendPending());
    2944         AssertMsg(pVCpu->hm.s.idEnteredCpu == RTMpCpuId(),
    2945                   ("Illegal migration! Entered on CPU %u Current %u cLoops=%u\n", (unsigned)pVCpu->hm.s.idEnteredCpu,
    2946                   (unsigned)RTMpCpuId(), cLoops));
     2959        HMSVM_ASSERT_CPU_SAFE();
    29472960
    29482961        /* Preparatory work for running guest code, this may return to ring-3 for some last minute updates. */
     
    32523265        AssertPtr(pSvmTransient); \
    32533266        Assert(ASMIntAreEnabled()); \
    3254         Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); \
     3267        HMSVM_ASSERT_PREEMPT_SAFE(); \
    32553268        HMSVM_ASSERT_PREEMPT_CPUID_VAR(); \
    32563269        Log4Func(("vcpu[%u] -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-\n", (uint32_t)pVCpu->idCpu)); \
    3257         Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); \
     3270        HMSVM_ASSERT_PREEMPT_SAFE(); \
    32583271        if (VMMR0IsLogFlushDisabled(pVCpu)) \
    32593272            HMSVM_ASSERT_PREEMPT_CPUID(); \
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r47844 r47989  
    6464
    6565/** Determine which tagged-TLB flush handler to use. */
    66 #define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID          0
    67 #define HMVMX_FLUSH_TAGGED_TLB_EPT               1
    68 #define HMVMX_FLUSH_TAGGED_TLB_VPID              2
    69 #define HMVMX_FLUSH_TAGGED_TLB_NONE              3
     66#define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID           0
     67#define HMVMX_FLUSH_TAGGED_TLB_EPT                1
     68#define HMVMX_FLUSH_TAGGED_TLB_VPID               2
     69#define HMVMX_FLUSH_TAGGED_TLB_NONE               3
    7070
    7171/** @name Updated-guest-state flags.
     
    125125/** @} */
    126126
     127/** @name
     128 * States of the VMCS.
     129 *
     130 * This does not reflect all possible VMCS states but currently only those
     131 * needed for maintaining the VMCS consistently even when thread-context hooks
     132 * are used. Maybe later this can be extended (i.e. Nested Virtualization).
     133 */
     134#define HMVMX_VMCS_STATE_CLEAR                         RT_BIT(0)
     135#define HMVMX_VMCS_STATE_ACTIVE                        RT_BIT(1)
     136#define HMVMX_VMCS_STATE_LAUNCHED                      RT_BIT(2)
     137/** @} */
     138
    127139/**
    128140 * Exception bitmap mask for real-mode guests (real-on-v86).
     
    161173#endif
    162174
     175/** Assert that preemption is disabled or covered by thread-context hooks. */
     176#define HMVMX_ASSERT_PREEMPT_SAFE()       Assert(   VMMR0ThreadCtxHooksAreRegistered(pVCpu)   \
     177                                                 || !RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     178
     179/** Assert that we haven't migrated CPUs when thread-context hooks are not
     180 *  used. */
     181#define HMVMX_ASSERT_CPU_SAFE()           AssertMsg(   VMMR0ThreadCtxHooksAreRegistered(pVCpu) \
     182                                                    || pVCpu->hm.s.idEnteredCpu == RTMpCpuId(), \
     183                                                    ("Illegal migration! Entered on CPU %u Current %u\n", \
     184                                                    pVCpu->hm.s.idEnteredCpu, RTMpCpuId())); \
    163185
    164186/*******************************************************************************
     
    208230            uint32_t    u6Reserved0 : 6;
    209231            /** The address size; 0=16-bit, 1=32-bit, 2=64-bit, rest undefined. */
    210             uint32_t    u3AddrSize : 3;
     232            uint32_t    u3AddrSize  : 3;
    211233            uint32_t    u5Reserved1 : 5;
    212234            /** The segment register (X86_SREG_XXX). */
    213             uint32_t    iSegReg : 3;
    214             uint32_t    uReserved2 : 14;
     235            uint32_t    iSegReg     : 3;
     236            uint32_t    uReserved2  : 14;
    215237        } StrIo;
    216238    }               ExitInstrInfo;
     
    21492171                                    hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
    21502172
     2173        pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_CLEAR;
     2174
    21512175        hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc);
    21522176    }
     
    28312855        rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RIP, pMixedCtx->rip);
    28322856        AssertRCReturn(rc, rc);
    2833         Log4(("Load: VMX_VMCS_GUEST_RIP=%#RX64\n", pMixedCtx->rip));
    28342857        pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_RIP;
     2858        Log4(("Load: VMX_VMCS_GUEST_RIP=%#RX64 fContextUseFlags=%#x\n", pMixedCtx->rip, pVCpu->hm.s.fContextUseFlags));
    28352859    }
    28362860    return rc;
     
    28562880        rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RSP, pMixedCtx->rsp);
    28572881        AssertRCReturn(rc, rc);
     2882        pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_RSP;
    28582883        Log4(("Load: VMX_VMCS_GUEST_RSP=%#RX64\n", pMixedCtx->rsp));
    2859         pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_RSP;
    28602884    }
    28612885    return rc;
     
    29022926        AssertRCReturn(rc, rc);
    29032927
     2928        pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_RFLAGS;
    29042929        Log4(("Load: VMX_VMCS_GUEST_RFLAGS=%#RX32\n", Eflags.u32));
    2905         pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_RFLAGS;
    29062930    }
    29072931    return rc;
     
    40474071     * Refer MSDN docs. "Configuring Programs for 64-bit / x64 Software Conventions / Register Usage" for details.
    40484072     */
     4073    const bool fResumeVM = !!(pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_LAUNCHED);
     4074    /** @todo Add stats for resume vs launch. */
    40494075#ifdef VBOX_WITH_KERNEL_USING_XMM
    4050     return HMR0VMXStartVMWrapXMM(pVCpu->hm.s.fResumeVM, pCtx, &pVCpu->hm.s.vmx.VMCSCache, pVM, pVCpu, pVCpu->hm.s.vmx.pfnStartVM);
     4076    return HMR0VMXStartVMWrapXMM(fResumeVM, pCtx, &pVCpu->hm.s.vmx.VMCSCache, pVM, pVCpu, pVCpu->hm.s.vmx.pfnStartVM);
    40514077#else
    4052     return pVCpu->hm.s.vmx.pfnStartVM(pVCpu->hm.s.fResumeVM, pCtx, &pVCpu->hm.s.vmx.VMCSCache, pVM, pVCpu);
     4078    return pVCpu->hm.s.vmx.pfnStartVM(fResumeVM, pCtx, &pVCpu->hm.s.vmx.VMCSCache, pVM, pVCpu);
    40534079#endif
    40544080}
     
    40714097    Assert(pCtx);
    40724098    Assert(pVmxTransient);
    4073     Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     4099    HMVMX_ASSERT_PREEMPT_SAFE();
    40744100
    40754101    Log4(("VM-entry failure: %Rrc\n", rcVMRun));
     
    40864112            rc    |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
    40874113            AssertRC(rc);
     4114
     4115            pVCpu->hm.s.vmx.LastError.idEnteredCpu = pVCpu->hm.s.idEnteredCpu;
     4116            /* LastError.idCurrentCpu was already updated in hmR0VmxPreRunGuestCommitted().
     4117               Cannot do it here as we may have been long preempted. */
    40884118
    40894119#ifdef VBOX_STRICT
     
    60456075    if (!pVCpu->hm.s.fLeaveDone)
    60466076    {
     6077        Log4Func(("HostCpuId=%u\n", RTMpCpuId()));
     6078
    60476079        /* Save the guest state if necessary. */
    60486080        if (pVCpu->hm.s.vmx.fUpdatedGuestState != HMVMX_UPDATED_GUEST_ALL)
     
    60626094
    60636095        /* Restore host debug registers if necessary and resync on next R0 reentry. */
    6064     #ifdef VBOX_STRICT
     6096#ifdef VBOX_STRICT
    60656097        if (CPUMIsHyperDebugStateActive(pVCpu))
    60666098            Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT);
    6067     #endif
     6099#endif
    60686100        if (CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, true /* save DR6 */))
    60696101            pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
     
    60896121        VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
    60906122
     6123        /** @todo This kinda defeats the purpose of having preemption hooks.
     6124         *  The problem is, deregistering the hooks should be moved to a place that
     6125         *  lasts until the EMT is about to be destroyed not everytime while leaving HM
     6126         *  context.
     6127         */
     6128        if (pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_ACTIVE)
     6129        {
     6130            int rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
     6131            AssertRC(rc);
     6132            pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_CLEAR;
     6133        }
     6134
    60916135        pVCpu->hm.s.fLeaveDone = true;
    60926136    }
     
    61366180    Assert(pVCpu);
    61376181    Assert(pMixedCtx);
    6138     Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     6182    HMVMX_ASSERT_PREEMPT_SAFE();
    61396183
    61406184    if (RT_UNLIKELY(rcExit == VERR_VMX_INVALID_GUEST_STATE))
     
    61966240
    61976241    /* On our way back from ring-3 the following needs to be done. */
    6198     /** @todo This can change with preemption hooks. */
    61996242    if (rcExit == VINF_EM_RAW_INTERRUPT)
    62006243        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_HOST_CONTEXT;
     
    62256268    Assert(pvUser);
    62266269    Assert(VMMRZCallRing3IsEnabled(pVCpu));
    6227     Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     6270    HMVMX_ASSERT_PREEMPT_SAFE();
    62286271
    62296272    VMMRZCallRing3Disable(pVCpu);
     
    67876830#endif
    67886831
    6789     /* Load the active VMCS as the current one. */
     6832    /*
     6833     * The VMCS state here will not be reliable because we deregister the hook in VMMR0EntryFast()
     6834     * on the way out. If we had got a preempt/resume callback -after- hmR0VmxLeave() but before
     6835     * deregistering the hook, the VMCS state will be ACTIVE. Once deregistered we no longer get
     6836     * notifications and lose track. Following that if we get rescheduled to another host CPU, the
     6837     * VMCS state says ACTIVE even though it really is not.
     6838     *
     6839     * Load the VCPU's VMCS as the current (and active) one.
     6840     */
    67906841    int rc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
    67916842    if (RT_FAILURE(rc))
    67926843        return rc;
    6793 
    6794     /** @todo this will change with preemption hooks where can VMRESUME as long
    6795      *        as we're no preempted. */
    6796     pVCpu->hm.s.fResumeVM  = false;
     6844    pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_ACTIVE;
     6845
    67976846    pVCpu->hm.s.fLeaveDone = false;
    67986847    return VINF_SUCCESS;
     
    68146863        case RTTHREADCTXEVENT_PREEMPTING:
    68156864        {
     6865            /** @todo Stats. */
    68166866            Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    6817             Assert(VMMR0ThreadCtxHooksAreRegistered(pVCpu));     /* Paranoia. */
    6818 
    6819             PVM         pVM       = pVCpu->CTX_SUFF(pVM);
    6820             PCPUMCTX    pMixedCtx = CPUMQueryGuestCtxPtr(pVCpu);
    6821             VMMRZCallRing3Disable(pVCpu);                        /* No longjmps (log-flush, locks) in this fragile context. */
    6822             hmR0VmxLeave(pVM, pVCpu, pMixedCtx);                 /* Save the guest-state, restore host-state (FPU, debug etc.). */
    6823 
    6824             int rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);   /* Flush VMCS CPU state to VMCS region in memory. */
    6825             AssertRC(rc); NOREF(rc);
    6826 
    6827             rc = HMR0LeaveEx(pVCpu);                             /* Leave HM context, takes care of local init (term). */
    6828             AssertRC(rc); NOREF(rc);
    6829 
    6830             VMMRZCallRing3Enable(pVCpu);                         /* Restore longjmp state. */
     6867            Assert(VMMR0ThreadCtxHooksAreRegistered(pVCpu));
     6868            VMCPU_ASSERT_EMT(pVCpu);
     6869
     6870            PVM      pVM       = pVCpu->CTX_SUFF(pVM);
     6871            PCPUMCTX pMixedCtx = CPUMQueryGuestCtxPtr(pVCpu);
     6872
     6873            /* No longjmps (logger flushes, locks) in this fragile context. */
     6874            VMMRZCallRing3Disable(pVCpu);
     6875            Log4Func(("Preempting: HostCpuId=%u\n", RTMpCpuId()));
     6876
     6877            /* Save the guest-state, restore host-state (FPU, debug etc.). */
     6878            hmR0VmxLeave(pVM, pVCpu, pMixedCtx);
     6879
     6880            /* Flush VMCS CPU state to the VMCS region in memory. */
     6881            int rc;
     6882            if (pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_ACTIVE)
     6883            {
     6884                rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
     6885                AssertRC(rc);
     6886                pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_CLEAR;
     6887            }
     6888
     6889            /* Leave HM context, takes care of local init (term). */
     6890            rc = HMR0LeaveEx(pVCpu);
     6891            AssertRC(rc);
     6892
     6893            /* Restore longjmp state. */
     6894            VMMRZCallRing3Enable(pVCpu);
     6895            NOREF(rc);
    68316896            break;
    68326897        }
     
    68346899        case RTTHREADCTXEVENT_RESUMED:
    68356900        {
    6836             /* Disable preemption, we don't want to be migrated to another CPU while re-initializing VT-x state. */
    6837             RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
    6838             RTThreadPreemptDisable(&PreemptState);
     6901            /** @todo Stats. */
     6902            Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     6903            Assert(VMMR0ThreadCtxHooksAreRegistered(pVCpu));
     6904            VMCPU_ASSERT_EMT(pVCpu);
     6905
     6906            /* No longjmps here, as we don't want to trigger preemption (& its hook) while resuming. */
     6907            VMMRZCallRing3Disable(pVCpu);
     6908            Log4Func(("Resumed: HostCpuId=%u\n", RTMpCpuId()));
    68396909
    68406910            /* Initialize the bare minimum state required for HM. This takes care of
     
    68446914
    68456915            /* Load the active VMCS as the current one. */
    6846             int rc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
    6847             AssertRC(rc);
    6848 
    6849             pVCpu->hm.s.fResumeVM  = false;
     6916            if (pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_CLEAR)
     6917            {
     6918                int rc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
     6919                AssertRC(rc); NOREF(rc);
     6920                pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_ACTIVE;
     6921                Log4Func(("Activated: HostCpuId=%u\n", RTMpCpuId()));
     6922            }
    68506923            pVCpu->hm.s.fLeaveDone = false;
    6851 
    6852             /* Restore preemption, migrating to another CPU should be fine now. */
    6853             RTThreadPreemptRestore(&PreemptState);
     6924            VMMRZCallRing3Enable(pVCpu);
    68546925            break;
    68556926        }
     
    68716942VMMR0DECL(int) VMXR0Leave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
    68726943{
    6873     AssertPtr(pVCpu);
     6944    NOREF(pVCpu);
    68746945    NOREF(pVM);
    68756946    NOREF(pCtx);
    68766947
    6877     if (!VMMR0ThreadCtxHooksAreRegistered(pVCpu))
    6878     {
    6879         Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    6880 
    6881         /*
    6882          * Sync the current VMCS (writes back internal data back into the VMCS region in memory)
    6883          * and mark the VMCS launch-state as "clear".
    6884          */
    6885         int rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
    6886         return rc;
    6887     }
    6888 
    6889     /* With thread-context hooks, nothing to do here. It's taken care of in VMXR0ThreadCtxCallback(). */
     6948    /* Everything is taken care of in hmR0VmxLeave() and VMXR0ThreadCtxCallback()'s preempt event. */
    68906949    return VINF_SUCCESS;
    68916950}
     
    69046963 * @remarks No-long-jump zone!!!
    69056964 */
     6965static int hmR0VmxSaveHostState(PVM pVM, PVMCPU pVCpu)
     6966{
     6967    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     6968
     6969    if (!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_CONTEXT))
     6970        return VINF_SUCCESS;
     6971
     6972    int rc = hmR0VmxSaveHostControlRegs(pVM, pVCpu);
     6973    AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostControlRegisters failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
     6974
     6975    rc = hmR0VmxSaveHostSegmentRegs(pVM, pVCpu);
     6976    AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostSegmentRegisters failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
     6977
     6978    rc = hmR0VmxSaveHostMsrs(pVM, pVCpu);
     6979    AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostMsrs failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
     6980
     6981    pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_HOST_CONTEXT;
     6982    return rc;
     6983}
     6984
     6985
     6986/**
     6987 * Saves the host state in the VMCS host-state.
     6988 *
     6989 * @returns VBox status code.
     6990 * @param   pVM         Pointer to the VM.
     6991 * @param   pVCpu       Pointer to the VMCPU.
     6992 *
     6993 * @remarks No-long-jump zone!!!
     6994 */
    69066995VMMR0DECL(int) VMXR0SaveHostState(PVM pVM, PVMCPU pVCpu)
    69076996{
    69086997    AssertPtr(pVM);
    69096998    AssertPtr(pVCpu);
    6910     Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    69116999
    69127000    LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
    6913 
    6914     /* Nothing to do if the host-state-changed flag isn't set. This will later be optimized when preemption hooks are in place. */
    6915     if (!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_CONTEXT))
    6916         return VINF_SUCCESS;
    6917 
    6918     int rc = hmR0VmxSaveHostControlRegs(pVM, pVCpu);
    6919     AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostControlRegisters failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
    6920 
    6921     rc = hmR0VmxSaveHostSegmentRegs(pVM, pVCpu);
    6922     AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostSegmentRegisters failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
    6923 
    6924     rc = hmR0VmxSaveHostMsrs(pVM, pVCpu);
    6925     AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostMsrs failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
    6926 
    6927     pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_HOST_CONTEXT;
    6928     return rc;
     7001    return hmR0VmxSaveHostState(pVM, pVCpu);
    69297002}
    69307003
     
    69537026    AssertPtr(pVCpu);
    69547027    AssertPtr(pMixedCtx);
    6955     Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     7028    HMVMX_ASSERT_PREEMPT_SAFE();
    69567029
    69577030#ifdef LOG_ENABLED
    69587031    /** @todo r=ramshankar: I'm not able to use VMMRZCallRing3Disable() here,
    6959      *        probably not initialized yet? Anyway this will do for now. */
     7032     *        probably not initialized yet? Anyway this will do for now.
     7033     *
     7034     *  Update: Should be possible once VMXR0LoadGuestState() is removed as an
     7035     *  interface and disable ring-3 calls when thread-context hooks are not
     7036     *  available. */
    69607037    bool fCallerDisabledLogFlush = VMMR0IsLogFlushDisabled(pVCpu);
    69617038    VMMR0LogFlushDisable(pVCpu);
     
    70147091    pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR2;
    70157092
    7016     AssertMsg(!pVCpu->hm.s.fContextUseFlags,
     7093    AssertMsg(!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_ALL_GUEST),
    70177094             ("Missed updating flags while loading guest state. pVM=%p pVCpu=%p idCpu=%RU32 fContextUseFlags=%#RX32\n",
    70187095              pVM, pVCpu, pVCpu->idCpu, pVCpu->hm.s.fContextUseFlags));
     
    70457122    /*
    70467123     * Avoid reloading the guest state on longjmp reentrants and do it lazily just before executing the guest.
    7047      * This only helps when we get rescheduled more than once to a different host CPU on a longjmp trip before
    7048      * finally executing guest code.
     7124     * When thread-context hooks are not used: This only helps when we get rescheduled more than once to a
     7125     * different host CPU on a longjmp trip before finally executing guest code.
     7126     *
     7127     * When thread-context hooks are used: We avoid loading the guest state here for the above reason plus
     7128     * we can avoid doing it while preemption is disabled (which it is here).
    70497129     */
    70507130    return VINF_SUCCESS;
     7131}
     7132
     7133/**
     7134 * Wrapper for loading the guest-state bits in the inner VT-x execution loop.
     7135 *
     7136 * @param   pVM             Pointer to the VM.
     7137 * @param   pVCpu           Pointer to the VMCPU.
     7138 * @param   pMixedCtx       Pointer to the guest-CPU context. The data may be
     7139 *                          out-of-sync. Make sure to update the required fields
     7140 *                          before using them.
     7141 */
     7142DECLINLINE(void) hmR0VmxLoadGuestStateOptimal(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
     7143{
     7144    Log5(("LoadFlags=%#RX32\n", pVCpu->hm.s.fContextUseFlags));
     7145#ifdef HMVMX_SYNC_FULL_GUEST_STATE
     7146    pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL_GUEST;
     7147#endif
     7148
     7149    if (pVCpu->hm.s.fContextUseFlags == HM_CHANGED_GUEST_RIP)
     7150    {
     7151        int rc = hmR0VmxLoadGuestRip(pVCpu, pMixedCtx);
     7152        AssertRC(rc);
     7153        STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadMinimal);
     7154    }
     7155    else if (pVCpu->hm.s.fContextUseFlags)
     7156    {
     7157        int rc = hmR0VmxLoadGuestState(pVM, pVCpu, pMixedCtx);
     7158        AssertRC(rc);
     7159        STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadFull);
     7160    }
     7161
     7162    /* When thread-context hooks are available, we could be preempted which means re-updating Guest.CR0
     7163       (shared FPU state) and debug controls (shared debug state). This is done in hmR0VmxPreRunGuestCommitted() */
     7164#ifdef VBOX_STRICT
     7165    if (VMMR0ThreadCtxHooksAreRegistered(pVCpu))
     7166    {
     7167        AssertMsg(   !(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_ALL_GUEST)
     7168                  ||  (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_ALL_GUEST) == HM_CHANGED_GUEST_CR0
     7169                  ||  (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_ALL_GUEST) == (HM_CHANGED_GUEST_CR0 | HM_CHANGED_GUEST_DEBUG),
     7170                     ("fContextUseFlags=%#x\n", pVCpu->hm.s.fContextUseFlags));
     7171    }
     7172    else
     7173    {
     7174        AssertMsg(!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_ALL_GUEST), ("fContextUseFlags=%#x\n",
     7175                                                                           pVCpu->hm.s.fContextUseFlags));
     7176    }
     7177#endif
     7178
     7179#ifdef HMVMX_ALWAYS_CHECK_GUEST_STATE
     7180    uint32_t uInvalidReason = hmR0VmxCheckGuestState(pVM, pVCpu, pMixedCtx);
     7181    if (uInvalidReason != VMX_IGS_REASON_NOT_FOUND)
     7182        Log4(("hmR0VmxCheckGuestState returned %#x\n", uInvalidReason));
     7183#endif
    70517184}
    70527185
     
    71147247    }
    71157248#endif /* !IEM_VERIFICATION_MODE_FULL */
     7249
     7250    /*
     7251     * When thread-context hooks are used, load the required guest-state bits
     7252     * here before we go ahead and disable interrupts.
     7253     */
     7254    if (VMMR0ThreadCtxHooksAreRegistered(pVCpu))
     7255        hmR0VmxLoadGuestStateOptimal(pVM, pVCpu, pMixedCtx);
    71167256
    71177257#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
     
    71727312#endif
    71737313
    7174     /* Load the required guest state bits (for guest-state changes in the inner execution loop). */
     7314    /*
     7315     * Load the host state bits as we may've been preempted
     7316     * (only happens when thread-context hooks are used).
     7317     */
     7318    int rc = VINF_SUCCESS;
     7319    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_CONTEXT)
     7320    {
     7321        Assert(VMMR0ThreadCtxHooksAreRegistered(pVCpu));
     7322        rc = hmR0VmxSaveHostState(pVM, pVCpu);
     7323        AssertRC(rc);
     7324    }
    71757325    Assert(!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_CONTEXT));
    7176     Log5(("LoadFlags=%#RX32\n", pVCpu->hm.s.fContextUseFlags));
    7177 #ifdef HMVMX_SYNC_FULL_GUEST_STATE
    7178     pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL_GUEST;
    7179 #endif
    7180     int rc = VINF_SUCCESS;
    7181     if (pVCpu->hm.s.fContextUseFlags == HM_CHANGED_GUEST_RIP)
    7182     {
    7183         rc = hmR0VmxLoadGuestRip(pVCpu, pMixedCtx);
    7184         STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadMinimal);
    7185     }
    7186     else if (pVCpu->hm.s.fContextUseFlags)
    7187     {
    7188         rc = hmR0VmxLoadGuestState(pVM, pVCpu, pMixedCtx);
    7189         STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadFull);
    7190     }
    7191     AssertRC(rc);
    7192     AssertMsg(!pVCpu->hm.s.fContextUseFlags, ("fContextUseFlags =%#x\n", pVCpu->hm.s.fContextUseFlags));
    7193 
    7194 #ifdef HMVMX_ALWAYS_CHECK_GUEST_STATE
    7195     uint32_t uInvalidReason = hmR0VmxCheckGuestState(pVM, pVCpu, pMixedCtx);
    7196     if (uInvalidReason != VMX_IGS_REASON_NOT_FOUND)
    7197         Log4(("hmR0VmxCheckGuestState returned %#x\n", uInvalidReason));
    7198 #endif
    7199 
    7200     /* Cache the TPR-shadow for checking on every VM-exit if it might have changed. */
     7326
     7327    /*
     7328     * When thread-context hooks are not used we need to load the required
     7329     * guest state bits here i.e. when we can no longer be preempted.
     7330     */
     7331    if (!VMMR0ThreadCtxHooksAreRegistered(pVCpu))
     7332        hmR0VmxLoadGuestStateOptimal(pVM, pVCpu, pMixedCtx);
     7333    else
     7334    {
     7335        /*
     7336         * If we got preempted previously while loading the guest state, the guest FPU and debug
     7337         * state need to be re-updated because we share them with the host state.
     7338         */
     7339        if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
     7340        {
     7341            if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR0)
     7342                hmR0VmxLoadGuestControlRegs(pVCpu, pMixedCtx);
     7343            if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_DEBUG)
     7344                hmR0VmxLoadGuestDebugState(pVCpu, pMixedCtx);
     7345        }
     7346        else
     7347        {
     7348            /*
     7349             * If we are injecting events real-on-v86 mode guest then we potentially have to update
     7350             * RIP and other registers. Just reload the state here if we're in real-on-v86 mode.
     7351             */
     7352            hmR0VmxLoadGuestStateOptimal(pVM, pVCpu, pMixedCtx);
     7353        }
     7354    }
     7355    AssertMsg(!pVCpu->hm.s.fContextUseFlags, ("fContextUseFlags=%#x\n", pVCpu->hm.s.fContextUseFlags));
     7356
     7357    /*
     7358     * Cache the TPR-shadow for checking on every VM-exit if it might have changed.
     7359     */
    72017360    if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
    72027361        pVmxTransient->u8GuestTpr = pVCpu->hm.s.vmx.pbVirtApic[0x80];
     
    72117370    ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true);    /* Used for TLB-shootdowns, set this across the world switch. */
    72127371    hmR0VmxFlushTaggedTlb(pVCpu);                               /* Invalidate the appropriate guest entries from the TLB. */
    7213     Assert(HMR0GetCurrentCpu()->idCpu == pVCpu->hm.s.idLastCpu);
     7372
     7373    RTCPUID idCurrentCpu = HMR0GetCurrentCpu()->idCpu;
     7374    Assert(idCurrentCpu == pVCpu->hm.s.idLastCpu);
     7375    pVCpu->hm.s.vmx.LastError.idCurrentCpu = idCurrentCpu;      /* Update the error reporting info. with the current host CPU. */
    72147376
    72157377    STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatEntry, &pVCpu->hm.s.StatInGC, x);
    72167378
    72177379    TMNotifyStartOfExecution(pVCpu);                            /* Finally, notify TM to resume its clocks as we're about
    7218                                                                     to start executing. */
     7380                                                                   to start executing. */
    72197381
    72207382#ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
     
    72807442
    72817443    ASMSetFlags(pVmxTransient->uEflags);                        /* Enable interrupts. */
    7282     pVCpu->hm.s.fResumeVM = true;                               /* Use VMRESUME instead of VMLAUNCH in the next run. */
     7444    pVCpu->hm.s.vmx.uVmcsState |= HMVMX_VMCS_STATE_LAUNCHED;    /* Use VMRESUME instead of VMLAUNCH in the next run. */
    72837445
    72847446    /* Save the basic VM-exit reason. Refer Intel spec. 24.9.1 "Basic VM-exit Information". */
     
    73487510    {
    73497511        Assert(!HMR0SuspendPending());
    7350         AssertMsg(pVCpu->hm.s.idEnteredCpu == RTMpCpuId(),
    7351                   ("Illegal migration! Entered on CPU %u Current %u cLoops=%u\n", (unsigned)pVCpu->hm.s.idEnteredCpu,
    7352                   (unsigned)RTMpCpuId(), cLoops));
     7512        HMVMX_ASSERT_CPU_SAFE();
    73537513
    73547514        /* Preparatory work for running guest code, this may return to ring-3 for some last minute updates. */
     
    74317591    {
    74327592        Assert(!HMR0SuspendPending());
    7433         AssertMsg(pVCpu->hm.s.idEnteredCpu == RTMpCpuId(),
    7434                   ("Illegal migration! Entered on CPU %u Current %u cLoops=%u\n", (unsigned)pVCpu->hm.s.idEnteredCpu,
    7435                   (unsigned)RTMpCpuId(), cLoops));
     7593        HMVMX_ASSERT_CPU_SAFE();
    74367594
    74377595        /* Preparatory work for running guest code, this may return to ring-3 for some last minute updates. */
     
    75197677{
    75207678    Assert(VMMRZCallRing3IsEnabled(pVCpu));
    7521     Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     7679    HMVMX_ASSERT_PREEMPT_SAFE();
    75227680
    75237681    int rc;
     
    76287786                Assert(pVmxTransient->fVMEntryFailed == false); \
    76297787                Assert(ASMIntAreEnabled()); \
    7630                 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); \
     7788                HMVMX_ASSERT_PREEMPT_SAFE(); \
    76317789                HMVMX_ASSERT_PREEMPT_CPUID_VAR(); \
    76327790                Log4Func(("vcpu[%RU32] -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v\n", pVCpu->idCpu)); \
    7633                 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); \
     7791                HMVMX_ASSERT_PREEMPT_SAFE(); \
    76347792                if (VMMR0IsLogFlushDisabled(pVCpu)) \
    76357793                    HMVMX_ASSERT_PREEMPT_CPUID(); \
     
    93359493        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
    93369494        fUpdateRipAlready = true;
    9337 
    93389495#else
    93399496        PDISCPUSTATE pDis = &pVCpu->hm.s.DisState;
  • trunk/src/VBox/VMM/VMMR0/VMMR0.cpp

    r47844 r47989  
    468468 * Registers the thread-context hook for this VCPU.
    469469 *
     470 * @returns VBox status code.
    470471 * @param   pVCpu           Pointer to the VMCPU.
    471472 * @param   pfnThreadHook   Pointer to the thread-context callback.
    472  * @returns VBox status code.
    473473 *
    474474 * @thread EMT.
     
    476476VMMR0DECL(int) VMMR0ThreadCtxHooksRegister(PVMCPU pVCpu, PFNRTTHREADCTXHOOK pfnThreadHook)
    477477{
     478    VMCPU_ASSERT_EMT(pVCpu);
    478479    return RTThreadCtxHooksRegister(pVCpu->vmm.s.hR0ThreadCtx, pfnThreadHook, pVCpu);
    479480}
     
    485486 * @returns VBox status code.
    486487 * @param   pVCpu       Pointer to the VMCPU.
     488 *
    487489 * @thread EMT.
    488490 */
     
    517519{
    518520    return RTThreadCtxHooksAreRegistered(pVCpu->vmm.s.hR0ThreadCtx);
     521}
     522
     523
     524/**
     525 * VMM ring-0 thread-context callback.
     526 *
     527 * This does common HM state updating and calls the HM-specific thread-context
     528 * callback.
     529 *
     530 * @param   enmEvent    The thread-context event.
     531 * @param   pvUser      Opaque pointer to the VMCPU.
     532 */
     533static void vmmR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, void *pvUser)
     534{
     535    PVMCPU pVCpu = (PVMCPU)pvUser;
     536
     537    switch (enmEvent)
     538    {
     539        case RTTHREADCTXEVENT_RESUMED:
     540        {
     541            /** @todo Linux may call us with preemption enabled (really!) but technically we
     542             * cannot get preempted here, otherwise we end up in an infinite recursion
     543             * scenario (i.e. preempted in resume hook -> preempt hook -> resume hook... ad
     544             * infinitum). Let's just disable preemption for now...
     545             */
     546            bool fPreemptDisabled = false;
     547            RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
     548            if (RTThreadPreemptIsEnabled(NIL_RTTHREAD))
     549            {
     550                RTThreadPreemptDisable(&PreemptState);
     551                fPreemptDisabled = true;
     552            }
     553
     554            /* We need to update the VCPU <-> host CPU mapping. */
     555            RTCPUID idHostCpu = RTMpCpuId();
     556            ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
     557
     558            /* Invoke the HM-specific thread-context callback. */
     559            HMR0ThreadCtxCallback(enmEvent, pvUser);
     560
     561            /* Restore preemption. */
     562            if (fPreemptDisabled)
     563                RTThreadPreemptRestore(&PreemptState);
     564            break;
     565        }
     566
     567        case RTTHREADCTXEVENT_PREEMPTING:
     568            /*
     569             * Sigh. See VMMGetCpu() used by VMCPU_ASSERT_EMT(). We cannot let several VCPUs
     570             * have the same host CPU associated with it.
     571             */
     572            ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
     573            /* fallthru, no break! */
     574        default:
     575            /* Invoke the HM-specific thread-context callback. */
     576            HMR0ThreadCtxCallback(enmEvent, pvUser);
     577            break;
     578    }
     579
    519580}
    520581
     
    838899            RTCCUINTREG uFlags = ASMIntDisableFlags();
    839900#endif
     901            /* Update the VCPU <-> host CPU mapping before doing anything else. */
    840902            ASMAtomicWriteU32(&pVCpu->idHostCpu, RTMpCpuId());
    841903            if (pVM->vmm.s.fUsePeriodicPreemptionTimers)
    842904                GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu));
    843 
    844905#ifdef LOG_ENABLED
    845906            if (pVCpu->idCpu > 0)
     
    855916            }
    856917#endif
    857             int rc;
     918
     919            int  rc;
     920            bool fPreemptRestored = false;
    858921            if (!HMR0SuspendPending())
    859922            {
    860                 /** @todo VMMR0ThreadCtxHooks support. */
     923                /* Register thread-context hooks if required. */
     924#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
     925                if (    VMMR0ThreadCtxHooksAreCreated(pVCpu)
     926                    && !VMMR0ThreadCtxHooksAreRegistered(pVCpu))
     927                {
     928                    rc = VMMR0ThreadCtxHooksRegister(pVCpu, vmmR0ThreadCtxCallback);
     929                    AssertRC(rc);
     930                }
     931#endif
     932
     933                /* Enter HM context. */
    861934                rc = HMR0Enter(pVM, pVCpu);
     935
     936                /* When preemption hooks are in place, enable preemption now that we're in HM context. */
     937                if (VMMR0ThreadCtxHooksAreRegistered(pVCpu))
     938                {
     939                    fPreemptRestored = true;
     940                    RTThreadPreemptRestore(&PreemptState);
     941                }
     942
    862943                if (RT_SUCCESS(rc))
    863944                {
    864945                    VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
    865946
    866                     rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, HMR0RunGuestCode, pVM, pVCpu); /* this may resume code. */
     947                    /* Setup the longjmp machinery and execute guest code. */
     948                    rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, HMR0RunGuestCode, pVM, pVCpu);
     949
     950                    /* Leave HM context. This deregisters thread-context hooks if any. */
    867951                    int rc2 = HMR0Leave(pVM, pVCpu);
    868952                    AssertRC(rc2);
     
    888972            pVCpu->vmm.s.iLastGZRc = rc;
    889973
     974            /* Clear the VCPU <-> host CPU mapping as we've left HM context. */
    890975            ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
     976
    891977#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
    892             RTThreadPreemptRestore(&PreemptState);
     978            if (!fPreemptRestored)
     979                RTThreadPreemptRestore(&PreemptState);
    893980#elif !defined(RT_OS_WINDOWS)
    894981            ASMSetFlags(uFlags);
     
    14871574 *
    14881575 * @returns @c true / @c false
    1489  * @param   pVCpu           The caller's cross context virtual CPU structure.
     1576 * @param   pVCpu           Pointer to the VMCPU.
    14901577 * @thread  EMT
    14911578 * @sa      VMMIsLongJumpArmed
     
    15001587        && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
    15011588#endif
     1589}
     1590
     1591
     1592/**
     1593 * Checks whether we've done a ring-3 long jump.
     1594 *
     1595 * @returns @c true / @c false
     1596 * @param   pVCpu       Pointer to the VMCPU.
     1597 * @thread EMT
     1598 */
     1599VMMR0_INT_DECL(bool) VMMR0IsInRing3LongJump(PVMCPU pVCpu)
     1600{
     1601    return pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
    15021602}
    15031603
  • trunk/src/VBox/VMM/include/HMInternal.h

    r47771 r47989  
    8888#define HM_CHANGED_GUEST_SEGMENT_REGS            RT_BIT(7)
    8989#define HM_CHANGED_GUEST_DEBUG                   RT_BIT(8)
    90 # define HM_CHANGED_GUEST_RIP                    RT_BIT(9)
    91 # define HM_CHANGED_GUEST_RSP                    RT_BIT(10)
    92 # define HM_CHANGED_GUEST_RFLAGS                 RT_BIT(11)
    93 # define HM_CHANGED_GUEST_CR2                    RT_BIT(12)
    94 # define HM_CHANGED_GUEST_SYSENTER_CS_MSR        RT_BIT(13)
    95 # define HM_CHANGED_GUEST_SYSENTER_EIP_MSR       RT_BIT(14)
    96 # define HM_CHANGED_GUEST_SYSENTER_ESP_MSR       RT_BIT(15)
     90#define HM_CHANGED_GUEST_RIP                     RT_BIT(9)
     91#define HM_CHANGED_GUEST_RSP                     RT_BIT(10)
     92#define HM_CHANGED_GUEST_RFLAGS                  RT_BIT(11)
     93#define HM_CHANGED_GUEST_CR2                     RT_BIT(12)
     94#define HM_CHANGED_GUEST_SYSENTER_CS_MSR         RT_BIT(13)
     95#define HM_CHANGED_GUEST_SYSENTER_EIP_MSR        RT_BIT(14)
     96#define HM_CHANGED_GUEST_SYSENTER_ESP_MSR        RT_BIT(15)
    9797/* VT-x specific state. */
    98 # define HM_CHANGED_VMX_GUEST_AUTO_MSRS          RT_BIT(16)
    99 # define HM_CHANGED_VMX_GUEST_ACTIVITY_STATE     RT_BIT(17)
    100 # define HM_CHANGED_VMX_GUEST_APIC_STATE         RT_BIT(18)
    101 # define HM_CHANGED_VMX_ENTRY_CTLS               RT_BIT(19)
    102 # define HM_CHANGED_VMX_EXIT_CTLS                RT_BIT(20)
     98#define HM_CHANGED_VMX_GUEST_AUTO_MSRS           RT_BIT(16)
     99#define HM_CHANGED_VMX_GUEST_ACTIVITY_STATE      RT_BIT(17)
     100#define HM_CHANGED_VMX_GUEST_APIC_STATE          RT_BIT(18)
     101#define HM_CHANGED_VMX_ENTRY_CTLS                RT_BIT(19)
     102#define HM_CHANGED_VMX_EXIT_CTLS                 RT_BIT(20)
    103103/* AMD-V specific state. */
    104 # define HM_CHANGED_SVM_GUEST_EFER_MSR           RT_BIT(16)
    105 # define HM_CHANGED_SVM_GUEST_APIC_STATE         RT_BIT(17)
    106 # define HM_CHANGED_SVM_RESERVED1                RT_BIT(18)
    107 # define HM_CHANGED_SVM_RESERVED2                RT_BIT(19)
    108 # define HM_CHANGED_SVM_RESERVED3                RT_BIT(20)
    109 
    110 # define HM_CHANGED_ALL_GUEST                   (  HM_CHANGED_GUEST_CR0                \
    111                                                  | HM_CHANGED_GUEST_CR3                \
    112                                                  | HM_CHANGED_GUEST_CR4                \
    113                                                  | HM_CHANGED_GUEST_GDTR               \
    114                                                  | HM_CHANGED_GUEST_IDTR               \
    115                                                  | HM_CHANGED_GUEST_LDTR               \
    116                                                  | HM_CHANGED_GUEST_TR                 \
    117                                                  | HM_CHANGED_GUEST_SEGMENT_REGS       \
    118                                                  | HM_CHANGED_GUEST_DEBUG              \
    119                                                  | HM_CHANGED_GUEST_RIP                \
    120                                                  | HM_CHANGED_GUEST_RSP                \
    121                                                  | HM_CHANGED_GUEST_RFLAGS             \
    122                                                  | HM_CHANGED_GUEST_CR2                \
    123                                                  | HM_CHANGED_GUEST_SYSENTER_CS_MSR    \
    124                                                  | HM_CHANGED_GUEST_SYSENTER_EIP_MSR   \
    125                                                  | HM_CHANGED_GUEST_SYSENTER_ESP_MSR   \
    126                                                  | HM_CHANGED_VMX_GUEST_AUTO_MSRS      \
    127                                                  | HM_CHANGED_VMX_GUEST_ACTIVITY_STATE \
    128                                                  | HM_CHANGED_VMX_GUEST_APIC_STATE     \
    129                                                  | HM_CHANGED_VMX_ENTRY_CTLS           \
    130                                                  | HM_CHANGED_VMX_EXIT_CTLS)
    131 
    132 #define HM_CHANGED_HOST_CONTEXT                 RT_BIT(21)
     104#define HM_CHANGED_SVM_GUEST_EFER_MSR            RT_BIT(16)
     105#define HM_CHANGED_SVM_GUEST_APIC_STATE          RT_BIT(17)
     106#define HM_CHANGED_SVM_RESERVED1                 RT_BIT(18)
     107#define HM_CHANGED_SVM_RESERVED2                 RT_BIT(19)
     108#define HM_CHANGED_SVM_RESERVED3                 RT_BIT(20)
     109
     110#define HM_CHANGED_ALL_GUEST                     (  HM_CHANGED_GUEST_CR0                \
     111                                                  | HM_CHANGED_GUEST_CR3                \
     112                                                  | HM_CHANGED_GUEST_CR4                \
     113                                                  | HM_CHANGED_GUEST_GDTR               \
     114                                                  | HM_CHANGED_GUEST_IDTR               \
     115                                                  | HM_CHANGED_GUEST_LDTR               \
     116                                                  | HM_CHANGED_GUEST_TR                 \
     117                                                  | HM_CHANGED_GUEST_SEGMENT_REGS       \
     118                                                  | HM_CHANGED_GUEST_DEBUG              \
     119                                                  | HM_CHANGED_GUEST_RIP                \
     120                                                  | HM_CHANGED_GUEST_RSP                \
     121                                                  | HM_CHANGED_GUEST_RFLAGS             \
     122                                                  | HM_CHANGED_GUEST_CR2                \
     123                                                  | HM_CHANGED_GUEST_SYSENTER_CS_MSR    \
     124                                                  | HM_CHANGED_GUEST_SYSENTER_EIP_MSR   \
     125                                                  | HM_CHANGED_GUEST_SYSENTER_ESP_MSR   \
     126                                                  | HM_CHANGED_VMX_GUEST_AUTO_MSRS      \
     127                                                  | HM_CHANGED_VMX_GUEST_ACTIVITY_STATE \
     128                                                  | HM_CHANGED_VMX_GUEST_APIC_STATE     \
     129                                                  | HM_CHANGED_VMX_ENTRY_CTLS           \
     130                                                  | HM_CHANGED_VMX_EXIT_CTLS)
     131
     132#define HM_CHANGED_HOST_CONTEXT                  RT_BIT(21)
    133133/** @} */
    134134
     
    514514typedef struct HMCPU
    515515{
    516     /** Set if we don't have to flush the TLB on VM entry. */
    517     bool                        fResumeVM;
    518516    /** Set if we need to flush the TLB during the world switch. */
    519517    bool                        fForceTLBFlush;
     
    528526    /** Whether we've completed the inner HM leave function. */
    529527    bool                        fLeaveDone;
    530     uint8_t                     abAlignment[1];
     528    uint8_t                     abAlignment[2];
    531529
    532530    /** World switch exit counter. */
     
    652650        } LastError;
    653651
     652        /** State of the VMCS. */
     653        uint32_t                    uVmcsState;
    654654        /** Which host-state bits to restore before being preempted. */
    655655        uint32_t                    fRestoreHostFlags;
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette