Changeset 48037 in vbox for trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
- Timestamp:
- Aug 23, 2013 6:11:36 PM (11 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r48019 r48037 1611 1611 VMMRZCallRing3Disable(pVCpu); /* No longjmps (log-flush, locks) in this fragile context. */ 1612 1612 1613 hmR0SvmLeave(pVM, pVCpu, pCtx); 1614 1615 int rc = HMR0LeaveEx(pVCpu); /* Leave HM context, takes care of local init (term). */ 1613 if (!pVCpu->hm.s.fLeaveDone) 1614 { 1615 hmR0SvmLeave(pVM, pVCpu, pCtx); 1616 pVCpu->hm.s.fLeaveDone = true; 1617 } 1618 1619 int rc = HMR0LeaveCpu(pVCpu); /* Leave HM context, takes care of local init (term). */ 1616 1620 AssertRC(rc); NOREF(rc); 1617 1621 … … 1632 1636 * initializing AMD-V if necessary (onlined CPUs, local init etc.) 1633 1637 */ 1634 HMR0EnterEx(pVCpu); 1635 Assert(pVCpu->hm.s.fContextUseFlags & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_GUEST_CR0)); 1638 int rc = HMR0EnterCpu(pVCpu); 1639 AssertRC(rc); NOREF(rc); 1640 Assert(pVCpu->hm.s.fContextUseFlags & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE)); 1636 1641 1637 1642 pVCpu->hm.s.fLeaveDone = false; … … 1718 1723 | HM_CHANGED_SVM_RESERVED3); 1719 1724 1720 AssertMsg(!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_ALL_GUEST), 1721 ("Missed updating flags while loading guest state. pVM=%p pVCpu=%p fContextUseFlags=%#RX32\n", 1722 pVM, pVCpu, pVCpu->hm.s.fContextUseFlags)); 1725 /* All the guest state bits should be loaded except maybe the host context and shared host/guest bits. */ 1726 AssertMsg( !(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_ALL_GUEST) 1727 || !(pVCpu->hm.s.fContextUseFlags & ~(HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE)), 1728 ("Missed updating flags while loading guest state. pVM=%p pVCpu=%p fContextUseFlags=%#RX32\n", 1729 pVM, pVCpu, pVCpu->hm.s.fContextUseFlags)); 1723 1730 1724 1731 Log4(("Load: CS:RIP=%04x:%RX64 EFL=%#x SS:RSP=%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip, pCtx->eflags.u, pCtx->ss, pCtx->rsp)); … … 1899 1906 static void hmR0SvmLeave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 1900 1907 { 1908 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 1901 1909 Assert(!VMMRZCallRing3IsEnabled(pVCpu)); 1902 1910 Assert(VMMR0IsLogFlushDisabled(pVCpu)); 1903 1911 1912 /* Restore host FPU state if necessary and resync on next R0 reentry .*/ 1913 if (CPUMIsGuestFPUStateActive(pVCpu)) 1914 { 1915 CPUMR0SaveGuestFPU(pVM, pVCpu, pCtx); 1916 Assert(!CPUMIsGuestFPUStateActive(pVCpu)); 1917 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0; 1918 } 1919 1920 /* 1921 * Restore host debug registers if necessary and resync on next R0 reentry. 1922 */ 1923 #ifdef VBOX_STRICT 1924 if (CPUMIsHyperDebugStateActive(pVCpu)) 1925 { 1926 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb; 1927 Assert(pVmcb->ctrl.u16InterceptRdDRx == 0xffff); 1928 Assert(pVmcb->ctrl.u16InterceptWrDRx == 0xffff); 1929 } 1930 #endif 1931 if (CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, false /* save DR6 */)) 1932 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG; 1933 1934 Assert(!CPUMIsHyperDebugStateActive(pVCpu)); 1935 Assert(!CPUMIsGuestDebugStateActive(pVCpu)); 1936 1937 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatEntry); 1938 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatLoadGuestState); 1939 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit1); 1940 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit2); 1941 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchLongJmpToR3); 1942 1943 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC); 1944 } 1945 1946 1947 DECLINLINE(void) hmR0SvmLeaveSession(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 1948 { 1949 Assert(!VMMRZCallRing3IsEnabled(pVCpu)); 1950 1904 1951 /* Avoid repeating this work when thread-context hooks are used and we had been preempted before 1905 which would've done this work from the SVMR0ThreadCtxCallback(). */1952 which would've done this work from the VMXR0ThreadCtxCallback(). */ 1906 1953 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER; 1907 1954 bool fPreemptDisabled = false; … … 1915 1962 if (!pVCpu->hm.s.fLeaveDone) 1916 1963 { 1917 /* Restore host FPU state if necessary and resync on next R0 reentry .*/ 1918 if (CPUMIsGuestFPUStateActive(pVCpu)) 1919 { 1920 CPUMR0SaveGuestFPU(pVM, pVCpu, pCtx); 1921 Assert(!CPUMIsGuestFPUStateActive(pVCpu)); 1922 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0; 1923 } 1924 1925 /* 1926 * Restore host debug registers if necessary and resync on next R0 reentry. 1927 */ 1928 #ifdef VBOX_STRICT 1929 if (CPUMIsHyperDebugStateActive(pVCpu)) 1930 { 1931 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb; 1932 Assert(pVmcb->ctrl.u16InterceptRdDRx == 0xffff); 1933 Assert(pVmcb->ctrl.u16InterceptWrDRx == 0xffff); 1934 } 1935 #endif 1936 if (CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, false /* save DR6 */)) 1937 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG; 1938 1939 Assert(!CPUMIsHyperDebugStateActive(pVCpu)); 1940 Assert(!CPUMIsGuestDebugStateActive(pVCpu)); 1941 1942 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatEntry); 1943 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatLoadGuestState); 1944 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit1); 1945 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit2); 1946 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchLongJmpToR3); 1947 1948 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC); 1949 1964 hmR0SvmLeave(pVM, pVCpu, pCtx); 1950 1965 pVCpu->hm.s.fLeaveDone = true; 1951 1966 } 1967 1968 /* Deregister hook now that we've left HM context before re-enabling preemption. */ 1969 /** @todo This is bad. Deregistering here means we need to VMCLEAR always 1970 * (longjmp/exit-to-r3) in VT-x which is not efficient. */ 1971 if (VMMR0ThreadCtxHooksAreRegistered(pVCpu)) 1972 VMMR0ThreadCtxHooksDeregister(pVCpu); 1973 1974 /* Leave HM context. This takes care of local init (term). */ 1975 int rc = HMR0LeaveCpu(pVCpu); 1976 AssertRC(rc); NOREF(rc); 1952 1977 1953 1978 /* Restore preemption if we previous disabled it ourselves. */ … … 1968 1993 static void hmR0SvmLongJmpToRing3(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 1969 1994 { 1970 hmR0SvmLeave (pVM, pVCpu, pCtx);1995 hmR0SvmLeaveSession(pVM, pVCpu, pCtx); 1971 1996 } 1972 1997 … … 2041 2066 2042 2067 /* Sync. the necessary state for going back to ring-3. */ 2043 hmR0SvmLeave (pVM, pVCpu, pCtx);2068 hmR0SvmLeaveSession(pVM, pVCpu, pCtx); 2044 2069 STAM_COUNTER_DEC(&pVCpu->hm.s.StatSwitchLongJmpToR3); 2045 2070
Note:
See TracChangeset
for help on using the changeset viewer.