VirtualBox

Ignore:
Timestamp:
Aug 23, 2013 6:11:36 PM (11 years ago)
Author:
vboxsync
Message:

VMM/HM: Preemption hooks, work in progress. Hopefully I didn't break the non-hook case.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r48019 r48037  
    16111611            VMMRZCallRing3Disable(pVCpu);                        /* No longjmps (log-flush, locks) in this fragile context. */
    16121612
    1613             hmR0SvmLeave(pVM, pVCpu, pCtx);
    1614 
    1615             int rc = HMR0LeaveEx(pVCpu);                         /* Leave HM context, takes care of local init (term). */
     1613            if (!pVCpu->hm.s.fLeaveDone)
     1614            {
     1615                hmR0SvmLeave(pVM, pVCpu, pCtx);
     1616                pVCpu->hm.s.fLeaveDone = true;
     1617            }
     1618
     1619            int rc = HMR0LeaveCpu(pVCpu);                         /* Leave HM context, takes care of local init (term). */
    16161620            AssertRC(rc); NOREF(rc);
    16171621
     
    16321636             * initializing AMD-V if necessary (onlined CPUs, local init etc.)
    16331637             */
    1634             HMR0EnterEx(pVCpu);
    1635             Assert(pVCpu->hm.s.fContextUseFlags & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_GUEST_CR0));
     1638            int rc = HMR0EnterCpu(pVCpu);
     1639            AssertRC(rc); NOREF(rc);
     1640            Assert(pVCpu->hm.s.fContextUseFlags & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE));
    16361641
    16371642            pVCpu->hm.s.fLeaveDone = false;
     
    17181723                                      | HM_CHANGED_SVM_RESERVED3);
    17191724
    1720     AssertMsg(!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_ALL_GUEST),
    1721              ("Missed updating flags while loading guest state. pVM=%p pVCpu=%p fContextUseFlags=%#RX32\n",
    1722               pVM, pVCpu, pVCpu->hm.s.fContextUseFlags));
     1725    /* All the guest state bits should be loaded except maybe the host context and shared host/guest bits. */
     1726    AssertMsg(   !(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_ALL_GUEST)
     1727              || !(pVCpu->hm.s.fContextUseFlags & ~(HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE)),
     1728               ("Missed updating flags while loading guest state. pVM=%p pVCpu=%p fContextUseFlags=%#RX32\n",
     1729                pVM, pVCpu, pVCpu->hm.s.fContextUseFlags));
    17231730
    17241731    Log4(("Load: CS:RIP=%04x:%RX64 EFL=%#x SS:RSP=%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip, pCtx->eflags.u, pCtx->ss, pCtx->rsp));
     
    18991906static void hmR0SvmLeave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
    19001907{
     1908    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    19011909    Assert(!VMMRZCallRing3IsEnabled(pVCpu));
    19021910    Assert(VMMR0IsLogFlushDisabled(pVCpu));
    19031911
     1912    /* Restore host FPU state if necessary and resync on next R0 reentry .*/
     1913    if (CPUMIsGuestFPUStateActive(pVCpu))
     1914    {
     1915        CPUMR0SaveGuestFPU(pVM, pVCpu, pCtx);
     1916        Assert(!CPUMIsGuestFPUStateActive(pVCpu));
     1917        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
     1918    }
     1919
     1920    /*
     1921     * Restore host debug registers if necessary and resync on next R0 reentry.
     1922     */
     1923#ifdef VBOX_STRICT
     1924    if (CPUMIsHyperDebugStateActive(pVCpu))
     1925    {
     1926        PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
     1927        Assert(pVmcb->ctrl.u16InterceptRdDRx == 0xffff);
     1928        Assert(pVmcb->ctrl.u16InterceptWrDRx == 0xffff);
     1929    }
     1930#endif
     1931    if (CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, false /* save DR6 */))
     1932        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
     1933
     1934    Assert(!CPUMIsHyperDebugStateActive(pVCpu));
     1935    Assert(!CPUMIsGuestDebugStateActive(pVCpu));
     1936
     1937    STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatEntry);
     1938    STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatLoadGuestState);
     1939    STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit1);
     1940    STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit2);
     1941    STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
     1942
     1943    VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
     1944}
     1945
     1946
     1947DECLINLINE(void) hmR0SvmLeaveSession(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
     1948{
     1949    Assert(!VMMRZCallRing3IsEnabled(pVCpu));
     1950
    19041951    /* Avoid repeating this work when thread-context hooks are used and we had been preempted before
    1905        which would've done this work from the SVMR0ThreadCtxCallback(). */
     1952       which would've done this work from the VMXR0ThreadCtxCallback(). */
    19061953    RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
    19071954    bool fPreemptDisabled = false;
     
    19151962    if (!pVCpu->hm.s.fLeaveDone)
    19161963    {
    1917         /* Restore host FPU state if necessary and resync on next R0 reentry .*/
    1918         if (CPUMIsGuestFPUStateActive(pVCpu))
    1919         {
    1920             CPUMR0SaveGuestFPU(pVM, pVCpu, pCtx);
    1921             Assert(!CPUMIsGuestFPUStateActive(pVCpu));
    1922             pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
    1923         }
    1924 
    1925         /*
    1926          * Restore host debug registers if necessary and resync on next R0 reentry.
    1927          */
    1928 #ifdef VBOX_STRICT
    1929         if (CPUMIsHyperDebugStateActive(pVCpu))
    1930         {
    1931             PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
    1932             Assert(pVmcb->ctrl.u16InterceptRdDRx == 0xffff);
    1933             Assert(pVmcb->ctrl.u16InterceptWrDRx == 0xffff);
    1934         }
    1935 #endif
    1936         if (CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, false /* save DR6 */))
    1937             pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
    1938 
    1939         Assert(!CPUMIsHyperDebugStateActive(pVCpu));
    1940         Assert(!CPUMIsGuestDebugStateActive(pVCpu));
    1941 
    1942         STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatEntry);
    1943         STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatLoadGuestState);
    1944         STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit1);
    1945         STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit2);
    1946         STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
    1947 
    1948         VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
    1949 
     1964        hmR0SvmLeave(pVM, pVCpu, pCtx);
    19501965        pVCpu->hm.s.fLeaveDone = true;
    19511966    }
     1967
     1968    /* Deregister hook now that we've left HM context before re-enabling preemption. */
     1969    /** @todo This is bad. Deregistering here means we need to VMCLEAR always
     1970     *        (longjmp/exit-to-r3) in VT-x which is not efficient. */
     1971    if (VMMR0ThreadCtxHooksAreRegistered(pVCpu))
     1972        VMMR0ThreadCtxHooksDeregister(pVCpu);
     1973
     1974    /* Leave HM context. This takes care of local init (term). */
     1975    int rc = HMR0LeaveCpu(pVCpu);
     1976    AssertRC(rc); NOREF(rc);
    19521977
    19531978    /* Restore preemption if we previous disabled it ourselves. */
     
    19681993static void hmR0SvmLongJmpToRing3(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
    19691994{
    1970     hmR0SvmLeave(pVM, pVCpu, pCtx);
     1995    hmR0SvmLeaveSession(pVM, pVCpu, pCtx);
    19711996}
    19721997
     
    20412066
    20422067    /* Sync. the necessary state for going back to ring-3. */
    2043     hmR0SvmLeave(pVM, pVCpu, pCtx);
     2068    hmR0SvmLeaveSession(pVM, pVCpu, pCtx);
    20442069    STAM_COUNTER_DEC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
    20452070
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette