VirtualBox

Changeset 48037 in vbox


Ignore:
Timestamp:
Aug 23, 2013 6:11:36 PM (12 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
88372
Message:

VMM/HM: Preemption hooks, work in progress. Hopefully I didn't break the non-hook case.

Location:
trunk
Files:
5 edited

Legend:

Unmodified
Added
Removed
  • trunk/include/VBox/vmm/hm.h

    r47760 r48037  
    224224VMMR0_INT_DECL(int)             HMR0Enter(PVM pVM, PVMCPU pVCpu);
    225225VMMR0_INT_DECL(int)             HMR0Leave(PVM pVM, PVMCPU pVCpu);
    226 VMMR0_INT_DECL(void)            HMR0EnterEx(PVMCPU pVCpu);
    227 VMMR0_INT_DECL(int)             HMR0LeaveEx(PVMCPU pVCpu);
     226VMMR0_INT_DECL(int)             HMR0EnterCpu(PVMCPU pVCpu);
     227VMMR0_INT_DECL(int)             HMR0LeaveCpu(PVMCPU pVCpu);
    228228VMMR0_INT_DECL(void)            HMR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, void *pvUser);
    229229VMMR0_INT_DECL(bool)            HMR0SuspendPending(void);
  • trunk/src/VBox/VMM/VMMR0/HMR0.cpp

    r47989 r48037  
    14021402
    14031403/**
    1404  * Initializes the bare minimum state required for entering HM context.
    1405  *
     1404 * Turns on HM on the CPU if necessary and initializes the bare minimum state
     1405 * required for entering HM context.
     1406 *
     1407 * @returns VBox status code.
    14061408 * @param   pvCpu       Pointer to the VMCPU.
    14071409 *
    14081410 * @remarks No-long-jump zone!!!
    14091411 */
    1410 VMMR0_INT_DECL(void) HMR0EnterEx(PVMCPU pVCpu)
     1412VMMR0_INT_DECL(int) HMR0EnterCpu(PVMCPU pVCpu)
    14111413{
    14121414    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    14131415
     1416    int              rc    = VINF_SUCCESS;
    14141417    RTCPUID          idCpu = RTMpCpuId();
    14151418    PHMGLOBALCPUINFO pCpu = &g_HvmR0.aCpuInfo[idCpu];
     
    14181421    /* Enable VT-x or AMD-V if local init is required, or enable if it's a freshly onlined CPU. */
    14191422    if (!pCpu->fConfigured)
    1420         hmR0EnableCpu(pVCpu->CTX_SUFF(pVM), idCpu);
    1421 
    1422     /* Reload host-context (back from ring-3/migrated CPUs), reload guest CR0 (for FPU bits). */
    1423     pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_HOST_CONTEXT | HM_CHANGED_GUEST_CR0;
     1423        rc = hmR0EnableCpu(pVCpu->CTX_SUFF(pVM), idCpu);
     1424
     1425    /* Reload host-context (back from ring-3/migrated CPUs), reload host context & shared bits. */
     1426    pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE;
    14241427    pVCpu->hm.s.idEnteredCpu = idCpu;
     1428    return rc;
    14251429}
    14261430
     
    14371441VMMR0_INT_DECL(int) HMR0Enter(PVM pVM, PVMCPU pVCpu)
    14381442{
    1439     Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    1440 
    14411443    /* Make sure we can't enter a session after we've disabled HM in preparation of a suspend. */
    14421444    AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HM_SUSPEND_PENDING);
     1445    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    14431446
    14441447    /* Load the bare minimum state required for entering HM. */
    1445     HMR0EnterEx(pVCpu);
     1448    int rc = HMR0EnterCpu(pVCpu);
     1449    AssertRCReturn(rc, rc);
    14461450
    14471451#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
     
    14551459    Assert(pCpu);
    14561460    Assert(pCtx);
    1457     Assert(pVCpu->hm.s.fContextUseFlags & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_GUEST_CR0));
    1458 
    1459     int rc = g_HvmR0.pfnEnterSession(pVM, pVCpu, pCpu);
    1460     AssertMsgRC(rc, ("pfnEnterSession failed. rc=%Rrc pVCpu=%p HostCpuId=%u\n", rc, pVCpu, idCpu));
     1461    Assert(pVCpu->hm.s.fContextUseFlags & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE));
     1462
     1463    rc = g_HvmR0.pfnEnterSession(pVM, pVCpu, pCpu);
     1464    AssertMsgRCReturn(rc, ("pfnEnterSession failed. rc=%Rrc pVCpu=%p HostCpuId=%u\n", rc, pVCpu, idCpu), rc);
    14611465
    14621466    /* Load the host as we may be resuming code after a longjmp and quite
    14631467       possibly be scheduled on a different CPU. */
    1464     rc |= g_HvmR0.pfnSaveHostState(pVM, pVCpu);
    1465     AssertMsgRC(rc, ("pfnSaveHostState failed. rc=%Rrc pVCpu=%p HostCpuId=%u\n", rc, pVCpu, idCpu));
     1468    rc = g_HvmR0.pfnSaveHostState(pVM, pVCpu);
     1469    AssertMsgRCReturn(rc, ("pfnSaveHostState failed. rc=%Rrc pVCpu=%p HostCpuId=%u\n", rc, pVCpu, idCpu), rc);
    14661470
    14671471    /** @todo This is not needed to be done here anymore, can fix/optimize later. */
    1468     rc |= g_HvmR0.pfnLoadGuestState(pVM, pVCpu, pCtx);
    1469     AssertMsgRC(rc, ("pfnLoadGuestState failed. rc=%Rrc pVCpu=%p HostCpuId=%u\n", rc, pVCpu, idCpu));
     1472    rc = g_HvmR0.pfnLoadGuestState(pVM, pVCpu, pCtx);
     1473    AssertMsgRCReturn(rc, ("pfnLoadGuestState failed. rc=%Rrc pVCpu=%p HostCpuId=%u\n", rc, pVCpu, idCpu), rc);
    14701474
    14711475#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
     
    14831487
    14841488/**
    1485  * Deinitializes the bare minimum state used for HM context.
     1489 * Deinitializes the bare minimum state used for HM context and if necessary
     1490 * disable HM on the CPU.
    14861491 *
    14871492 * @returns VBox status code.
    14881493 * @param   pVCpu       Pointer to the VMCPU.
    14891494 */
    1490 VMMR0_INT_DECL(int) HMR0LeaveEx(PVMCPU pVCpu)
     1495VMMR0_INT_DECL(int) HMR0LeaveCpu(PVMCPU pVCpu)
    14911496{
    14921497    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     
    15091514    VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
    15101515
     1516    /* Clear the VCPU <-> host CPU mapping as we've left HM context. */
     1517    ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
     1518
    15111519    return VINF_SUCCESS;
    15121520}
     
    15251533VMMR0_INT_DECL(int) HMR0Leave(PVM pVM, PVMCPU pVCpu)
    15261534{
    1527     /** @todo r=bird: This can't be entirely right? */
    1528     AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HM_SUSPEND_PENDING);
    1529 
    1530     PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
    1531     AssertPtr(pCtx);
    1532 
    1533     bool fDisabledPreempt = false;
    1534     RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
    1535     if (RTThreadPreemptIsEnabled(NIL_RTTHREAD))
    1536     {
    1537         Assert(VMMR0ThreadCtxHooksAreRegistered(pVCpu));
    1538         RTThreadPreemptDisable(&PreemptState);
    1539         fDisabledPreempt = true;
    1540     }
    1541 
    1542     int rc = g_HvmR0.pfnLeaveSession(pVM, pVCpu, pCtx);
    1543 
    1544     if (!VMMR0ThreadCtxHooksAreRegistered(pVCpu))
    1545     {
    1546         /* Keep track of the CPU owning the VMCS for debugging scheduling weirdness
    1547            and ring-3 calls when thread-context hooks are not supported. */
    1548         RTCPUID idCpu = RTMpCpuId();
    1549         AssertMsgStmt(   pVCpu->hm.s.idEnteredCpu == idCpu
    1550                       || RT_FAILURE_NP(rc), ("Owner is %u, I'm %u", pVCpu->hm.s.idEnteredCpu, idCpu),
    1551                       rc = VERR_HM_WRONG_CPU_1);
    1552     }
    1553 
    1554     /* Leave HM context, takes care of local init (term). */
    1555     if (RT_SUCCESS(rc))
    1556     {
    1557         rc = HMR0LeaveEx(pVCpu);
    1558         AssertRCReturn(rc, rc);
    1559     }
    1560 
    1561     /* Deregister hook now that we've left HM context before re-enabling preemption. */
    1562     /** @todo This is bad. Deregistering here means we need to VMCLEAR always
    1563      *        (longjmp/exit-to-r3) in VT-x which is not efficient. */
    1564     if (VMMR0ThreadCtxHooksAreRegistered(pVCpu))
    1565         VMMR0ThreadCtxHooksDeregister(pVCpu);
    1566 
    1567     if (fDisabledPreempt)
    1568         RTThreadPreemptRestore(&PreemptState);
    1569 
    1570     /* Guest FPU and debug state shouldn't be active now, it's likely that we're going back to ring-3. */
    1571     Assert(!CPUMIsGuestFPUStateActive(pVCpu));
    1572     Assert(!CPUMIsGuestDebugStateActive(pVCpu));
    1573 
    1574     return rc;
     1535    /* Nothing to do currently. Taken care of HMR0LeaveCpu() and in hmR0VmxLeaveSession() and hmR0SvmLeaveSession(). */
     1536    /** @todo refactor later to more common code. */
     1537    return VINF_SUCCESS;
    15751538}
    15761539
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r48019 r48037  
    16111611            VMMRZCallRing3Disable(pVCpu);                        /* No longjmps (log-flush, locks) in this fragile context. */
    16121612
    1613             hmR0SvmLeave(pVM, pVCpu, pCtx);
    1614 
    1615             int rc = HMR0LeaveEx(pVCpu);                         /* Leave HM context, takes care of local init (term). */
     1613            if (!pVCpu->hm.s.fLeaveDone)
     1614            {
     1615                hmR0SvmLeave(pVM, pVCpu, pCtx);
     1616                pVCpu->hm.s.fLeaveDone = true;
     1617            }
     1618
     1619            int rc = HMR0LeaveCpu(pVCpu);                         /* Leave HM context, takes care of local init (term). */
    16161620            AssertRC(rc); NOREF(rc);
    16171621
     
    16321636             * initializing AMD-V if necessary (onlined CPUs, local init etc.)
    16331637             */
    1634             HMR0EnterEx(pVCpu);
    1635             Assert(pVCpu->hm.s.fContextUseFlags & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_GUEST_CR0));
     1638            int rc = HMR0EnterCpu(pVCpu);
     1639            AssertRC(rc); NOREF(rc);
     1640            Assert(pVCpu->hm.s.fContextUseFlags & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE));
    16361641
    16371642            pVCpu->hm.s.fLeaveDone = false;
     
    17181723                                      | HM_CHANGED_SVM_RESERVED3);
    17191724
    1720     AssertMsg(!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_ALL_GUEST),
    1721              ("Missed updating flags while loading guest state. pVM=%p pVCpu=%p fContextUseFlags=%#RX32\n",
    1722               pVM, pVCpu, pVCpu->hm.s.fContextUseFlags));
     1725    /* All the guest state bits should be loaded except maybe the host context and shared host/guest bits. */
     1726    AssertMsg(   !(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_ALL_GUEST)
     1727              || !(pVCpu->hm.s.fContextUseFlags & ~(HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE)),
     1728               ("Missed updating flags while loading guest state. pVM=%p pVCpu=%p fContextUseFlags=%#RX32\n",
     1729                pVM, pVCpu, pVCpu->hm.s.fContextUseFlags));
    17231730
    17241731    Log4(("Load: CS:RIP=%04x:%RX64 EFL=%#x SS:RSP=%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip, pCtx->eflags.u, pCtx->ss, pCtx->rsp));
     
    18991906static void hmR0SvmLeave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
    19001907{
     1908    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    19011909    Assert(!VMMRZCallRing3IsEnabled(pVCpu));
    19021910    Assert(VMMR0IsLogFlushDisabled(pVCpu));
    19031911
     1912    /* Restore host FPU state if necessary and resync on next R0 reentry .*/
     1913    if (CPUMIsGuestFPUStateActive(pVCpu))
     1914    {
     1915        CPUMR0SaveGuestFPU(pVM, pVCpu, pCtx);
     1916        Assert(!CPUMIsGuestFPUStateActive(pVCpu));
     1917        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
     1918    }
     1919
     1920    /*
     1921     * Restore host debug registers if necessary and resync on next R0 reentry.
     1922     */
     1923#ifdef VBOX_STRICT
     1924    if (CPUMIsHyperDebugStateActive(pVCpu))
     1925    {
     1926        PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
     1927        Assert(pVmcb->ctrl.u16InterceptRdDRx == 0xffff);
     1928        Assert(pVmcb->ctrl.u16InterceptWrDRx == 0xffff);
     1929    }
     1930#endif
     1931    if (CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, false /* save DR6 */))
     1932        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
     1933
     1934    Assert(!CPUMIsHyperDebugStateActive(pVCpu));
     1935    Assert(!CPUMIsGuestDebugStateActive(pVCpu));
     1936
     1937    STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatEntry);
     1938    STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatLoadGuestState);
     1939    STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit1);
     1940    STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit2);
     1941    STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
     1942
     1943    VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
     1944}
     1945
     1946
     1947DECLINLINE(void) hmR0SvmLeaveSession(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
     1948{
     1949    Assert(!VMMRZCallRing3IsEnabled(pVCpu));
     1950
    19041951    /* Avoid repeating this work when thread-context hooks are used and we had been preempted before
    1905        which would've done this work from the SVMR0ThreadCtxCallback(). */
     1952       which would've done this work from the VMXR0ThreadCtxCallback(). */
    19061953    RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
    19071954    bool fPreemptDisabled = false;
     
    19151962    if (!pVCpu->hm.s.fLeaveDone)
    19161963    {
    1917         /* Restore host FPU state if necessary and resync on next R0 reentry .*/
    1918         if (CPUMIsGuestFPUStateActive(pVCpu))
    1919         {
    1920             CPUMR0SaveGuestFPU(pVM, pVCpu, pCtx);
    1921             Assert(!CPUMIsGuestFPUStateActive(pVCpu));
    1922             pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
    1923         }
    1924 
    1925         /*
    1926          * Restore host debug registers if necessary and resync on next R0 reentry.
    1927          */
    1928 #ifdef VBOX_STRICT
    1929         if (CPUMIsHyperDebugStateActive(pVCpu))
    1930         {
    1931             PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
    1932             Assert(pVmcb->ctrl.u16InterceptRdDRx == 0xffff);
    1933             Assert(pVmcb->ctrl.u16InterceptWrDRx == 0xffff);
    1934         }
    1935 #endif
    1936         if (CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, false /* save DR6 */))
    1937             pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
    1938 
    1939         Assert(!CPUMIsHyperDebugStateActive(pVCpu));
    1940         Assert(!CPUMIsGuestDebugStateActive(pVCpu));
    1941 
    1942         STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatEntry);
    1943         STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatLoadGuestState);
    1944         STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit1);
    1945         STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit2);
    1946         STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
    1947 
    1948         VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
    1949 
     1964        hmR0SvmLeave(pVM, pVCpu, pCtx);
    19501965        pVCpu->hm.s.fLeaveDone = true;
    19511966    }
     1967
     1968    /* Deregister hook now that we've left HM context before re-enabling preemption. */
     1969    /** @todo This is bad. Deregistering here means we need to VMCLEAR always
     1970     *        (longjmp/exit-to-r3) in VT-x which is not efficient. */
     1971    if (VMMR0ThreadCtxHooksAreRegistered(pVCpu))
     1972        VMMR0ThreadCtxHooksDeregister(pVCpu);
     1973
     1974    /* Leave HM context. This takes care of local init (term). */
     1975    int rc = HMR0LeaveCpu(pVCpu);
     1976    AssertRC(rc); NOREF(rc);
    19521977
    19531978    /* Restore preemption if we previous disabled it ourselves. */
     
    19681993static void hmR0SvmLongJmpToRing3(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
    19691994{
    1970     hmR0SvmLeave(pVM, pVCpu, pCtx);
     1995    hmR0SvmLeaveSession(pVM, pVCpu, pCtx);
    19711996}
    19721997
     
    20412066
    20422067    /* Sync. the necessary state for going back to ring-3. */
    2043     hmR0SvmLeave(pVM, pVCpu, pCtx);
     2068    hmR0SvmLeaveSession(pVM, pVCpu, pCtx);
    20442069    STAM_COUNTER_DEC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
    20452070
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r48029 r48037  
    29632963
    29642964/**
    2965  * Loads the guest control registers (CR0, CR3, CR4) into the guest-state area
    2966  * in the VMCS.
     2965 * Loads the guest CR0 control register into the guest-state area in the VMCS.
     2966 * CR0 is partially shared with the host and we have to consider the FPU bits.
    29672967 *
    29682968 * @returns VBox status code.
     
    29752975 * @remarks No-long-jump zone!!!
    29762976 */
    2977 static int hmR0VmxLoadGuestControlRegs(PVMCPU pVCpu, PCPUMCTX pCtx)
    2978 {
    2979     int rc  = VINF_SUCCESS;
    2980     PVM pVM = pVCpu->CTX_SUFF(pVM);
    2981 
     2977static int hmR0VmxLoadSharedCR0(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
     2978{
    29822979    /*
    29832980     * Guest CR0.
    29842981     * Guest FPU.
    29852982     */
     2983    int rc = VINF_SUCCESS;
    29862984    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR0)
    29872985    {
    2988         Assert(!(pCtx->cr0 >> 32));
    2989         uint32_t u32GuestCR0 = pCtx->cr0;
     2986        Assert(!(pMixedCtx->cr0 >> 32));
     2987        uint32_t u32GuestCR0 = pMixedCtx->cr0;
     2988        PVM      pVM         = pVCpu->CTX_SUFF(pVM);
    29902989
    29912990        /* The guest's view (read access) of its CR0 is unblemished. */
     
    29982997        if (pVM->hm.s.fNestedPaging)
    29992998        {
    3000             if (CPUMIsGuestPagingEnabledEx(pCtx))
     2999            if (CPUMIsGuestPagingEnabledEx(pMixedCtx))
    30013000            {
    30023001                /* The guest has paging enabled, let it access CR3 without causing a VM exit if supported. */
     
    30433042        /* Catch floating point exceptions if we need to report them to the guest in a different way. */
    30443043        bool fInterceptMF = false;
    3045         if (!(pCtx->cr0 & X86_CR0_NE))
     3044        if (!(pMixedCtx->cr0 & X86_CR0_NE))
    30463045            fInterceptMF = true;
    30473046
     
    31383137        pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR0;
    31393138    }
     3139    return rc;
     3140}
     3141
     3142
     3143/**
     3144 * Loads the guest control registers (CR3, CR4) into the guest-state area
     3145 * in the VMCS.
     3146 *
     3147 * @returns VBox status code.
     3148 * @param   pVM         Pointer to the VM.
     3149 * @param   pVCpu       Pointer to the VMCPU.
     3150 * @param   pMixedCtx   Pointer to the guest-CPU context. The data may be
     3151 *                      out-of-sync. Make sure to update the required fields
     3152 *                      before using them.
     3153 *
     3154 * @remarks No-long-jump zone!!!
     3155 */
     3156static int hmR0VmxLoadGuestCR3AndCR4(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
     3157{
     3158    int rc  = VINF_SUCCESS;
     3159    PVM pVM = pVCpu->CTX_SUFF(pVM);
    31403160
    31413161    /*
     
    31733193
    31743194            if (   pVM->hm.s.vmx.fUnrestrictedGuest
    3175                 || CPUMIsGuestPagingEnabledEx(pCtx))
     3195                || CPUMIsGuestPagingEnabledEx(pMixedCtx))
    31763196            {
    31773197                /* If the guest is in PAE mode, pass the PDPEs to VT-x using the VMCS fields. */
    3178                 if (CPUMIsGuestInPAEModeEx(pCtx))
     3198                if (CPUMIsGuestInPAEModeEx(pMixedCtx))
    31793199                {
    31803200                    rc  = PGMGstGetPaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);                         AssertRCReturn(rc, rc);
     
    31873207                /* The guest's view of its CR3 is unblemished with Nested Paging when the guest is using paging or we
    31883208                   have Unrestricted Execution to handle the guest when it's not using paging. */
    3189                 GCPhysGuestCR3 = pCtx->cr3;
     3209                GCPhysGuestCR3 = pMixedCtx->cr3;
    31903210            }
    31913211            else
     
    32283248    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR4)
    32293249    {
    3230         Assert(!(pCtx->cr4 >> 32));
    3231         uint32_t u32GuestCR4 = pCtx->cr4;
     3250        Assert(!(pMixedCtx->cr4 >> 32));
     3251        uint32_t u32GuestCR4 = pMixedCtx->cr4;
    32323252
    32333253        /* The guest's view of its CR4 is unblemished. */
     
    32513271        if (pVM->hm.s.fNestedPaging)
    32523272        {
    3253             if (   !CPUMIsGuestPagingEnabledEx(pCtx)
     3273            if (   !CPUMIsGuestPagingEnabledEx(pMixedCtx)
    32543274                && !pVM->hm.s.vmx.fUnrestrictedGuest)
    32553275            {
     
    33273347 * This also sets up whether #DB and MOV DRx accesses cause VM exits.
    33283348 *
     3349 * The guest debug bits are partially shared with the host (e.g. DR6, DR0-3).
     3350 *
    33293351 * @returns VBox status code.
    33303352 * @param   pVCpu       Pointer to the VMCPU.
     
    33353357 * @remarks No-long-jump zone!!!
    33363358 */
    3337 static int hmR0VmxLoadGuestDebugState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
     3359static int hmR0VmxLoadSharedDebugState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    33383360{
    33393361    if (!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_DEBUG))
     
    34573479 * Strict function to validate segment registers.
    34583480 *
    3459  * @remarks Requires CR0.
     3481 * @remarks ASSUMES CR0 is up to date.
    34603482 */
    34613483static void hmR0VmxValidateSegmentRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
     
    34883510        Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
    34893511        Assert(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL));
    3490         Assert(!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR0));
    34913512        if (   !(pCtx->cr0 & X86_CR0_PE)
    34923513            || pCtx->cs.Attr.n.u4Type == 3)
     
    36893710 *                      before using them.
    36903711 *
    3691  * @remarks Requires CR0 (strict builds validation).
     3712 * @remarks ASSUMES CR0 is up to date (strict builds validation).
    36923713 * @remarks No-long-jump zone!!!
    36933714 */
     
    60716092    Assert(VMMR0IsLogFlushDisabled(pVCpu));
    60726093
     6094    RTCPUID idCpu = RTMpCpuId();
     6095    Log4Func(("HostCpuId=%u\n", idCpu));
     6096
     6097    /* Save the guest state if necessary. */
     6098    if (pVCpu->hm.s.vmx.fUpdatedGuestState != HMVMX_UPDATED_GUEST_ALL)
     6099    {
     6100        int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
     6101        AssertRC(rc);
     6102        Assert(pVCpu->hm.s.vmx.fUpdatedGuestState == HMVMX_UPDATED_GUEST_ALL);
     6103    }
     6104
     6105    /* Restore host FPU state if necessary and resync on next R0 reentry .*/
     6106    if (CPUMIsGuestFPUStateActive(pVCpu))
     6107    {
     6108        CPUMR0SaveGuestFPU(pVM, pVCpu, pMixedCtx);
     6109        Assert(!CPUMIsGuestFPUStateActive(pVCpu));
     6110        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
     6111    }
     6112
     6113    /* Restore host debug registers if necessary and resync on next R0 reentry. */
     6114#ifdef VBOX_STRICT
     6115    if (CPUMIsHyperDebugStateActive(pVCpu))
     6116        Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT);
     6117#endif
     6118    if (CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, true /* save DR6 */))
     6119        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
     6120    Assert(!CPUMIsGuestDebugStateActive(pVCpu));
     6121    Assert(!CPUMIsHyperDebugStateActive(pVCpu));
     6122
     6123    /* Restore host-state bits that VT-x only restores partially. */
     6124    if (pVCpu->hm.s.vmx.fRestoreHostFlags)
     6125    {
     6126        Log4Func(("Restoring Host State: fRestoreHostFlags=%#RX32 HostCpuId=%u\n", pVCpu->hm.s.vmx.fRestoreHostFlags, idCpu));
     6127        VMXRestoreHostState(pVCpu->hm.s.vmx.fRestoreHostFlags, &pVCpu->hm.s.vmx.RestoreHost);
     6128        pVCpu->hm.s.vmx.fRestoreHostFlags = 0;
     6129    }
     6130
     6131    STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatEntry);
     6132    STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatLoadGuestState);
     6133    STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit1);
     6134    STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit2);
     6135    STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitIO);
     6136    STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitMovCRx);
     6137    STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitXcptNmi);
     6138    STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
     6139
     6140    VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
     6141
     6142    /** @todo This kinda defeats the purpose of having preemption hooks.
     6143     *  The problem is, deregistering the hooks should be moved to a place that
     6144     *  lasts until the EMT is about to be destroyed not everytime while leaving HM
     6145     *  context.
     6146     */
     6147    if (pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_ACTIVE)
     6148    {
     6149        int rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
     6150        AssertRC(rc);
     6151        pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_CLEAR;
     6152        Log4Func(("Cleared Vmcs. HostCpuId=%u\n", idCpu));
     6153    }
     6154
     6155    pVCpu->hm.s.vmx.uVmcsState &= ~HMVMX_VMCS_STATE_LAUNCHED;
     6156    NOREF(idCpu);
     6157}
     6158
     6159
     6160DECLINLINE(void) hmR0VmxLeaveSession(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
     6161{
     6162    Assert(!VMMRZCallRing3IsEnabled(pVCpu));
     6163
    60736164    /* Avoid repeating this work when thread-context hooks are used and we had been preempted before
    60746165       which would've done this work from the VMXR0ThreadCtxCallback(). */
     
    60826173    }
    60836174
    6084     RTCPUID idCpu = RTMpCpuId();
    6085     Log4Func(("HostCpuId=%u\n", idCpu));
    6086 
    60876175    if (!pVCpu->hm.s.fLeaveDone)
    60886176    {
    6089         Log4Func(("Leaving: HostCpuId=%u\n", idCpu));
    6090 
    6091         /* Save the guest state if necessary. */
    6092         if (pVCpu->hm.s.vmx.fUpdatedGuestState != HMVMX_UPDATED_GUEST_ALL)
    6093         {
    6094             int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
    6095             AssertRC(rc);
    6096             Assert(pVCpu->hm.s.vmx.fUpdatedGuestState == HMVMX_UPDATED_GUEST_ALL);
    6097         }
    6098 
    6099         /* Restore host FPU state if necessary and resync on next R0 reentry .*/
    6100         if (CPUMIsGuestFPUStateActive(pVCpu))
    6101         {
    6102             CPUMR0SaveGuestFPU(pVM, pVCpu, pMixedCtx);
    6103             Assert(!CPUMIsGuestFPUStateActive(pVCpu));
    6104             pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
    6105         }
    6106 
    6107         /* Restore host debug registers if necessary and resync on next R0 reentry. */
    6108 #ifdef VBOX_STRICT
    6109         if (CPUMIsHyperDebugStateActive(pVCpu))
    6110             Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT);
    6111 #endif
    6112         if (CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, true /* save DR6 */))
    6113             pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
    6114         Assert(!CPUMIsGuestDebugStateActive(pVCpu));
    6115         Assert(!CPUMIsHyperDebugStateActive(pVCpu));
    6116 
    6117         /* Restore host-state bits that VT-x only restores partially. */
    6118         if (pVCpu->hm.s.vmx.fRestoreHostFlags)
    6119         {
    6120             Log4Func(("Restoring Host State: fRestoreHostFlags=%#RX32 HostCpuId=%u\n", pVCpu->hm.s.vmx.fRestoreHostFlags, idCpu));
    6121             VMXRestoreHostState(pVCpu->hm.s.vmx.fRestoreHostFlags, &pVCpu->hm.s.vmx.RestoreHost);
    6122             pVCpu->hm.s.vmx.fRestoreHostFlags = 0;
    6123         }
    6124 
    6125         STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatEntry);
    6126         STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatLoadGuestState);
    6127         STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit1);
    6128         STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit2);
    6129         STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitIO);
    6130         STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitMovCRx);
    6131         STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitXcptNmi);
    6132         STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
    6133 
    6134         VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
    6135 
    6136         /** @todo This kinda defeats the purpose of having preemption hooks.
    6137          *  The problem is, deregistering the hooks should be moved to a place that
    6138          *  lasts until the EMT is about to be destroyed not everytime while leaving HM
    6139          *  context.
    6140          */
    6141         if (pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_ACTIVE)
    6142         {
    6143             int rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
    6144             AssertRC(rc);
    6145             pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_CLEAR;
    6146             Log4Func(("Cleared Vmcs. HostCpuId=%u\n", idCpu));
    6147         }
    6148 
    6149         pVCpu->hm.s.vmx.uVmcsState &= ~HMVMX_VMCS_STATE_LAUNCHED;
     6177        hmR0VmxLeave(pVM, pVCpu, pMixedCtx);
    61506178        pVCpu->hm.s.fLeaveDone = true;
    61516179    }
    61526180
    6153     NOREF(idCpu);
     6181    /* Deregister hook now that we've left HM context before re-enabling preemption. */
     6182    /** @todo This is bad. Deregistering here means we need to VMCLEAR always
     6183     *        (longjmp/exit-to-r3) in VT-x which is not efficient. */
     6184    if (VMMR0ThreadCtxHooksAreRegistered(pVCpu))
     6185        VMMR0ThreadCtxHooksDeregister(pVCpu);
     6186
     6187    /* Leave HM context. This takes care of local init (term). */
     6188    int rc = HMR0LeaveCpu(pVCpu);
     6189    AssertRC(rc); NOREF(rc);
    61546190
    61556191    /* Restore preemption if we previous disabled it ourselves. */
     
    61726208DECLINLINE(void) hmR0VmxLongJmpToRing3(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    61736209{
    6174     hmR0VmxLeave(pVM, pVCpu, pMixedCtx);
     6210    hmR0VmxLeaveSession(pVM, pVCpu, pMixedCtx);
    61756211}
    61766212
     
    62256261
    62266262    /* Save guest state and restore host state bits. */
    6227     hmR0VmxLeave(pVM, pVCpu, pMixedCtx);
     6263    hmR0VmxLeaveSession(pVM, pVCpu, pMixedCtx);
    62286264    STAM_COUNTER_DEC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
    62296265
     
    68956931
    68966932            /* Save the guest-state, restore host-state (FPU, debug etc.). */
    6897             hmR0VmxLeave(pVM, pVCpu, pMixedCtx);
     6933            if (!pVCpu->hm.s.fLeaveDone)
     6934            {
     6935                hmR0VmxLeave(pVM, pVCpu, pMixedCtx);
     6936                pVCpu->hm.s.fLeaveDone = true;
     6937            }
    68986938
    68996939            /* Leave HM context, takes care of local init (term). */
    6900             int rc = HMR0LeaveEx(pVCpu);
    6901             AssertRC(rc);
     6940            int rc = HMR0LeaveCpu(pVCpu);
     6941            AssertRC(rc); NOREF(rc);
    69026942
    69036943            /* Restore longjmp state. */
    69046944            VMMRZCallRing3Enable(pVCpu);
    6905             NOREF(rc);
    69066945            break;
    69076946        }
     
    69206959            /* Initialize the bare minimum state required for HM. This takes care of
    69216960               initializing VT-x if necessary (onlined CPUs, local init etc.) */
    6922             HMR0EnterEx(pVCpu);
    6923             Assert(pVCpu->hm.s.fContextUseFlags & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_GUEST_CR0));
     6961            int rc = HMR0EnterCpu(pVCpu);
     6962            AssertRC(rc);
     6963            Assert(pVCpu->hm.s.fContextUseFlags & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE));
    69246964
    69256965            /* Load the active VMCS as the current one. */
    69266966            if (pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_CLEAR)
    69276967            {
    6928                 int rc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
     6968                rc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
    69296969                AssertRC(rc); NOREF(rc);
    69306970                pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_ACTIVE;
     
    70757115    AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestActivityState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
    70767116
    7077     rc = hmR0VmxLoadGuestControlRegs(pVCpu, pMixedCtx);
    7078     AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestControlRegs: rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
    7079 
    7080     /* Must be done after CR0 is loaded (strict builds require CR0 for segment register validation checks). */
     7117    rc = hmR0VmxLoadGuestCR3AndCR4(pVCpu, pMixedCtx);
     7118    AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestCR3AndCR4: rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
     7119
     7120    /* Assumes CR0 is up-to-date (strict builds require CR0 for segment register validation checks). */
    70817121    rc = hmR0VmxLoadGuestSegmentRegs(pVCpu, pMixedCtx);
    70827122    AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestSegmentRegs: rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
    70837123
    7084     rc = hmR0VmxLoadGuestDebugState(pVCpu, pMixedCtx);
    7085     AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestDebugState: rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
    7086 
    70877124    rc = hmR0VmxLoadGuestMsrs(pVCpu, pMixedCtx);
    70887125    AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestMsrs! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
     
    70917128    AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestApicState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
    70927129
    7093     /* Must be done after hmR0VmxLoadGuestDebugState() as it may have updated eflags.TF for debugging purposes. */
     7130    /*
     7131     * Loading Rflags here is fine, even though Rflags.TF might depend on guest debug state (which is not loaded here).
     7132     * It is re-evaluated and updated if necessary in hmR0VmxLoadSharedState().
     7133     */
    70947134    rc = hmR0VmxLoadGuestRipRspRflags(pVCpu, pMixedCtx);
    70957135    AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestRipRspRflags! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
     
    71097149    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestState, x);
    71107150    return rc;
     7151}
     7152
     7153
     7154/**
     7155 * Loads the state shared between the host and guest into the VMCS.
     7156 *
     7157 * @param   pVM         Pointer to the VM.
     7158 * @param   pVCpu       Pointer to the VMCPU.
     7159 * @param   pCtx        Pointer to the guest-CPU context.
     7160 *
     7161 * @remarks No-long-jump zone!!!
     7162 */
     7163static void hmR0VmxLoadSharedState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
     7164{
     7165    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     7166    Assert(!VMMRZCallRing3IsEnabled(pVCpu));
     7167
     7168    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR0)
     7169    {
     7170        int rc = hmR0VmxLoadSharedCR0(pVCpu, pCtx);
     7171        AssertRC(rc);
     7172    }
     7173
     7174    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_DEBUG)
     7175    {
     7176        int rc = hmR0VmxLoadSharedDebugState(pVCpu, pCtx);
     7177        AssertRC(rc);
     7178
     7179        /* Loading shared debug bits might have changed eflags.TF bit for debugging purposes. */
     7180        if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_RFLAGS)
     7181        {
     7182            rc = hmR0VmxLoadGuestRflags(pVCpu, pCtx);
     7183            AssertRC(rc);
     7184        }
     7185    }
     7186
     7187    AssertMsg(!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_GUEST_SHARED_STATE), ("fContextUseFlags=%#x\n",
     7188                                                                                     pVCpu->hm.s.fContextUseFlags));
    71117189}
    71127190
     
    71377215}
    71387216
    7139 /**
    7140  * Wrapper for loading the guest-state bits in the inner VT-x execution loop.
     7217
     7218/**
     7219 * Worker for loading the guest-state bits in the inner VT-x execution loop.
    71417220 *
    71427221 * @param   pVM             Pointer to the VM.
     
    71667245    }
    71677246
    7168 #ifdef VBOX_STRICT
    7169     /* When thread-context hooks are available, we could be preempted which means re-updating Guest.CR0
    7170        (shared FPU state) and debug controls (shared debug state). This is done in hmR0VmxPreRunGuestCommitted() */
    7171     if (VMMR0ThreadCtxHooksAreRegistered(pVCpu))
    7172     {
    7173         AssertMsg(   !(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_ALL_GUEST)
    7174                   ||  (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_ALL_GUEST) == HM_CHANGED_GUEST_CR0
    7175                   ||  (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_ALL_GUEST) == (HM_CHANGED_GUEST_CR0 | HM_CHANGED_GUEST_DEBUG),
    7176                      ("fContextUseFlags=%#x\n", pVCpu->hm.s.fContextUseFlags));
    7177     }
    7178     else
    7179     {
    7180         AssertMsg(!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_ALL_GUEST), ("fContextUseFlags=%#x\n",
    7181                                                                            pVCpu->hm.s.fContextUseFlags));
    7182     }
    7183 #endif
     7247    /* All the guest state bits should be loaded except maybe the host context and shared host/guest bits. */
     7248    AssertMsg(   !(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_ALL_GUEST)
     7249              || !(pVCpu->hm.s.fContextUseFlags & ~(HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE)),
     7250              ("fContextUseFlags=%#x\n", pVCpu->hm.s.fContextUseFlags));
    71847251
    71857252#ifdef HMVMX_ALWAYS_CHECK_GUEST_STATE
     
    72567323    /*
    72577324     * When thread-context hooks are used, load the required guest-state bits
    7258      * here before we go ahead and disable interrupts.
     7325     * here before we go ahead and disable interrupts. We can handle getting preempted
     7326     * while loading the guest state.
    72597327     */
    72607328    if (VMMR0ThreadCtxHooksAreRegistered(pVCpu))
     
    73117379    Assert(!VMMRZCallRing3IsEnabled(pVCpu));
    73127380    Assert(VMMR0IsLogFlushDisabled(pVCpu));
    7313 
    73147381#ifndef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
    73157382    /** @todo I don't see the point of this, VMMR0EntryFast() already disables interrupts for the entire period. */
     
    73177384    VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
    73187385#endif
     7386    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    73197387
    73207388    /*
     
    73407408    {
    73417409        /*
    7342          * If we got preempted previously while loading the guest state, the guest FPU and debug
    7343          * state need to be re-updated because we share them with the host state.
     7410         * If we are injecting events real-on-v86 mode guest then we potentially have to update
     7411         * RIP and other registers, i.e. hmR0VmxPreRunGuest()->hmR0VmxInjectPendingEvent().
     7412         * Just reload the state here if we're in real-on-v86 mode.
    73447413         */
    7345         if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
    7346         {
    7347             if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR0)
    7348                 hmR0VmxLoadGuestControlRegs(pVCpu, pMixedCtx);
    7349             if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_DEBUG)
    7350                 hmR0VmxLoadGuestDebugState(pVCpu, pMixedCtx);
    7351         }
    7352         else
    7353         {
    7354             /*
    7355              * If we are injecting events real-on-v86 mode guest then we potentially have to update
    7356              * RIP and other registers. Just reload the state here if we're in real-on-v86 mode.
    7357              */
     7414        if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
    73587415            hmR0VmxLoadGuestStateOptimal(pVM, pVCpu, pMixedCtx);
    7359         }
    7360     }
     7416    }
     7417
     7418    /* Load the state shared between host and guest (FPU, debug). */
     7419    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_GUEST_SHARED_STATE)
     7420        hmR0VmxLoadSharedState(pVM, pVCpu, pMixedCtx);
    73617421    AssertMsg(!pVCpu->hm.s.fContextUseFlags, ("fContextUseFlags=%#x\n", pVCpu->hm.s.fContextUseFlags));
     7422
    73627423
    73637424    /*
  • trunk/src/VBox/VMM/include/HMInternal.h

    r47990 r48037  
    131131
    132132#define HM_CHANGED_HOST_CONTEXT                  RT_BIT(21)
     133
     134/* Bits shared between host and guest. */
     135#define HM_CHANGED_HOST_GUEST_SHARED_STATE       (  HM_CHANGED_GUEST_CR0                \
     136                                                  | HM_CHANGED_GUEST_DEBUG)
    133137/** @} */
    134138
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette