Changeset 48037 in vbox
- Timestamp:
- Aug 23, 2013 6:11:36 PM (12 years ago)
- svn:sync-xref-src-repo-rev:
- 88372
- Location:
- trunk
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/hm.h
r47760 r48037 224 224 VMMR0_INT_DECL(int) HMR0Enter(PVM pVM, PVMCPU pVCpu); 225 225 VMMR0_INT_DECL(int) HMR0Leave(PVM pVM, PVMCPU pVCpu); 226 VMMR0_INT_DECL( void) HMR0EnterEx(PVMCPU pVCpu);227 VMMR0_INT_DECL(int) HMR0Leave Ex(PVMCPU pVCpu);226 VMMR0_INT_DECL(int) HMR0EnterCpu(PVMCPU pVCpu); 227 VMMR0_INT_DECL(int) HMR0LeaveCpu(PVMCPU pVCpu); 228 228 VMMR0_INT_DECL(void) HMR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, void *pvUser); 229 229 VMMR0_INT_DECL(bool) HMR0SuspendPending(void); -
trunk/src/VBox/VMM/VMMR0/HMR0.cpp
r47989 r48037 1402 1402 1403 1403 /** 1404 * Initializes the bare minimum state required for entering HM context. 1405 * 1404 * Turns on HM on the CPU if necessary and initializes the bare minimum state 1405 * required for entering HM context. 1406 * 1407 * @returns VBox status code. 1406 1408 * @param pvCpu Pointer to the VMCPU. 1407 1409 * 1408 1410 * @remarks No-long-jump zone!!! 1409 1411 */ 1410 VMMR0_INT_DECL( void) HMR0EnterEx(PVMCPU pVCpu)1412 VMMR0_INT_DECL(int) HMR0EnterCpu(PVMCPU pVCpu) 1411 1413 { 1412 1414 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 1413 1415 1416 int rc = VINF_SUCCESS; 1414 1417 RTCPUID idCpu = RTMpCpuId(); 1415 1418 PHMGLOBALCPUINFO pCpu = &g_HvmR0.aCpuInfo[idCpu]; … … 1418 1421 /* Enable VT-x or AMD-V if local init is required, or enable if it's a freshly onlined CPU. */ 1419 1422 if (!pCpu->fConfigured) 1420 hmR0EnableCpu(pVCpu->CTX_SUFF(pVM), idCpu);1421 1422 /* Reload host-context (back from ring-3/migrated CPUs), reload guest CR0 (for FPU bits). */1423 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_HOST_CONTEXT | HM_CHANGED_ GUEST_CR0;1423 rc = hmR0EnableCpu(pVCpu->CTX_SUFF(pVM), idCpu); 1424 1425 /* Reload host-context (back from ring-3/migrated CPUs), reload host context & shared bits. */ 1426 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE; 1424 1427 pVCpu->hm.s.idEnteredCpu = idCpu; 1428 return rc; 1425 1429 } 1426 1430 … … 1437 1441 VMMR0_INT_DECL(int) HMR0Enter(PVM pVM, PVMCPU pVCpu) 1438 1442 { 1439 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));1440 1441 1443 /* Make sure we can't enter a session after we've disabled HM in preparation of a suspend. */ 1442 1444 AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HM_SUSPEND_PENDING); 1445 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 1443 1446 1444 1447 /* Load the bare minimum state required for entering HM. */ 1445 HMR0EnterEx(pVCpu); 1448 int rc = HMR0EnterCpu(pVCpu); 1449 AssertRCReturn(rc, rc); 1446 1450 1447 1451 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE … … 1455 1459 Assert(pCpu); 1456 1460 Assert(pCtx); 1457 Assert(pVCpu->hm.s.fContextUseFlags & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_ GUEST_CR0));1458 1459 int rc= g_HvmR0.pfnEnterSession(pVM, pVCpu, pCpu);1460 AssertMsgRC (rc, ("pfnEnterSession failed. rc=%Rrc pVCpu=%p HostCpuId=%u\n", rc, pVCpu, idCpu));1461 Assert(pVCpu->hm.s.fContextUseFlags & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE)); 1462 1463 rc = g_HvmR0.pfnEnterSession(pVM, pVCpu, pCpu); 1464 AssertMsgRCReturn(rc, ("pfnEnterSession failed. rc=%Rrc pVCpu=%p HostCpuId=%u\n", rc, pVCpu, idCpu), rc); 1461 1465 1462 1466 /* Load the host as we may be resuming code after a longjmp and quite 1463 1467 possibly be scheduled on a different CPU. */ 1464 rc |= g_HvmR0.pfnSaveHostState(pVM, pVCpu);1465 AssertMsgRC (rc, ("pfnSaveHostState failed. rc=%Rrc pVCpu=%p HostCpuId=%u\n", rc, pVCpu, idCpu));1468 rc = g_HvmR0.pfnSaveHostState(pVM, pVCpu); 1469 AssertMsgRCReturn(rc, ("pfnSaveHostState failed. rc=%Rrc pVCpu=%p HostCpuId=%u\n", rc, pVCpu, idCpu), rc); 1466 1470 1467 1471 /** @todo This is not needed to be done here anymore, can fix/optimize later. */ 1468 rc |= g_HvmR0.pfnLoadGuestState(pVM, pVCpu, pCtx);1469 AssertMsgRC (rc, ("pfnLoadGuestState failed. rc=%Rrc pVCpu=%p HostCpuId=%u\n", rc, pVCpu, idCpu));1472 rc = g_HvmR0.pfnLoadGuestState(pVM, pVCpu, pCtx); 1473 AssertMsgRCReturn(rc, ("pfnLoadGuestState failed. rc=%Rrc pVCpu=%p HostCpuId=%u\n", rc, pVCpu, idCpu), rc); 1470 1474 1471 1475 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE … … 1483 1487 1484 1488 /** 1485 * Deinitializes the bare minimum state used for HM context. 1489 * Deinitializes the bare minimum state used for HM context and if necessary 1490 * disable HM on the CPU. 1486 1491 * 1487 1492 * @returns VBox status code. 1488 1493 * @param pVCpu Pointer to the VMCPU. 1489 1494 */ 1490 VMMR0_INT_DECL(int) HMR0Leave Ex(PVMCPU pVCpu)1495 VMMR0_INT_DECL(int) HMR0LeaveCpu(PVMCPU pVCpu) 1491 1496 { 1492 1497 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); … … 1509 1514 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH); 1510 1515 1516 /* Clear the VCPU <-> host CPU mapping as we've left HM context. */ 1517 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID); 1518 1511 1519 return VINF_SUCCESS; 1512 1520 } … … 1525 1533 VMMR0_INT_DECL(int) HMR0Leave(PVM pVM, PVMCPU pVCpu) 1526 1534 { 1527 /** @todo r=bird: This can't be entirely right? */ 1528 AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HM_SUSPEND_PENDING); 1529 1530 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu); 1531 AssertPtr(pCtx); 1532 1533 bool fDisabledPreempt = false; 1534 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER; 1535 if (RTThreadPreemptIsEnabled(NIL_RTTHREAD)) 1536 { 1537 Assert(VMMR0ThreadCtxHooksAreRegistered(pVCpu)); 1538 RTThreadPreemptDisable(&PreemptState); 1539 fDisabledPreempt = true; 1540 } 1541 1542 int rc = g_HvmR0.pfnLeaveSession(pVM, pVCpu, pCtx); 1543 1544 if (!VMMR0ThreadCtxHooksAreRegistered(pVCpu)) 1545 { 1546 /* Keep track of the CPU owning the VMCS for debugging scheduling weirdness 1547 and ring-3 calls when thread-context hooks are not supported. */ 1548 RTCPUID idCpu = RTMpCpuId(); 1549 AssertMsgStmt( pVCpu->hm.s.idEnteredCpu == idCpu 1550 || RT_FAILURE_NP(rc), ("Owner is %u, I'm %u", pVCpu->hm.s.idEnteredCpu, idCpu), 1551 rc = VERR_HM_WRONG_CPU_1); 1552 } 1553 1554 /* Leave HM context, takes care of local init (term). */ 1555 if (RT_SUCCESS(rc)) 1556 { 1557 rc = HMR0LeaveEx(pVCpu); 1558 AssertRCReturn(rc, rc); 1559 } 1560 1561 /* Deregister hook now that we've left HM context before re-enabling preemption. */ 1562 /** @todo This is bad. Deregistering here means we need to VMCLEAR always 1563 * (longjmp/exit-to-r3) in VT-x which is not efficient. */ 1564 if (VMMR0ThreadCtxHooksAreRegistered(pVCpu)) 1565 VMMR0ThreadCtxHooksDeregister(pVCpu); 1566 1567 if (fDisabledPreempt) 1568 RTThreadPreemptRestore(&PreemptState); 1569 1570 /* Guest FPU and debug state shouldn't be active now, it's likely that we're going back to ring-3. */ 1571 Assert(!CPUMIsGuestFPUStateActive(pVCpu)); 1572 Assert(!CPUMIsGuestDebugStateActive(pVCpu)); 1573 1574 return rc; 1535 /* Nothing to do currently. Taken care of HMR0LeaveCpu() and in hmR0VmxLeaveSession() and hmR0SvmLeaveSession(). */ 1536 /** @todo refactor later to more common code. */ 1537 return VINF_SUCCESS; 1575 1538 } 1576 1539 -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r48019 r48037 1611 1611 VMMRZCallRing3Disable(pVCpu); /* No longjmps (log-flush, locks) in this fragile context. */ 1612 1612 1613 hmR0SvmLeave(pVM, pVCpu, pCtx); 1614 1615 int rc = HMR0LeaveEx(pVCpu); /* Leave HM context, takes care of local init (term). */ 1613 if (!pVCpu->hm.s.fLeaveDone) 1614 { 1615 hmR0SvmLeave(pVM, pVCpu, pCtx); 1616 pVCpu->hm.s.fLeaveDone = true; 1617 } 1618 1619 int rc = HMR0LeaveCpu(pVCpu); /* Leave HM context, takes care of local init (term). */ 1616 1620 AssertRC(rc); NOREF(rc); 1617 1621 … … 1632 1636 * initializing AMD-V if necessary (onlined CPUs, local init etc.) 1633 1637 */ 1634 HMR0EnterEx(pVCpu); 1635 Assert(pVCpu->hm.s.fContextUseFlags & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_GUEST_CR0)); 1638 int rc = HMR0EnterCpu(pVCpu); 1639 AssertRC(rc); NOREF(rc); 1640 Assert(pVCpu->hm.s.fContextUseFlags & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE)); 1636 1641 1637 1642 pVCpu->hm.s.fLeaveDone = false; … … 1718 1723 | HM_CHANGED_SVM_RESERVED3); 1719 1724 1720 AssertMsg(!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_ALL_GUEST), 1721 ("Missed updating flags while loading guest state. pVM=%p pVCpu=%p fContextUseFlags=%#RX32\n", 1722 pVM, pVCpu, pVCpu->hm.s.fContextUseFlags)); 1725 /* All the guest state bits should be loaded except maybe the host context and shared host/guest bits. */ 1726 AssertMsg( !(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_ALL_GUEST) 1727 || !(pVCpu->hm.s.fContextUseFlags & ~(HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE)), 1728 ("Missed updating flags while loading guest state. pVM=%p pVCpu=%p fContextUseFlags=%#RX32\n", 1729 pVM, pVCpu, pVCpu->hm.s.fContextUseFlags)); 1723 1730 1724 1731 Log4(("Load: CS:RIP=%04x:%RX64 EFL=%#x SS:RSP=%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip, pCtx->eflags.u, pCtx->ss, pCtx->rsp)); … … 1899 1906 static void hmR0SvmLeave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 1900 1907 { 1908 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 1901 1909 Assert(!VMMRZCallRing3IsEnabled(pVCpu)); 1902 1910 Assert(VMMR0IsLogFlushDisabled(pVCpu)); 1903 1911 1912 /* Restore host FPU state if necessary and resync on next R0 reentry .*/ 1913 if (CPUMIsGuestFPUStateActive(pVCpu)) 1914 { 1915 CPUMR0SaveGuestFPU(pVM, pVCpu, pCtx); 1916 Assert(!CPUMIsGuestFPUStateActive(pVCpu)); 1917 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0; 1918 } 1919 1920 /* 1921 * Restore host debug registers if necessary and resync on next R0 reentry. 1922 */ 1923 #ifdef VBOX_STRICT 1924 if (CPUMIsHyperDebugStateActive(pVCpu)) 1925 { 1926 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb; 1927 Assert(pVmcb->ctrl.u16InterceptRdDRx == 0xffff); 1928 Assert(pVmcb->ctrl.u16InterceptWrDRx == 0xffff); 1929 } 1930 #endif 1931 if (CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, false /* save DR6 */)) 1932 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG; 1933 1934 Assert(!CPUMIsHyperDebugStateActive(pVCpu)); 1935 Assert(!CPUMIsGuestDebugStateActive(pVCpu)); 1936 1937 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatEntry); 1938 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatLoadGuestState); 1939 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit1); 1940 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit2); 1941 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchLongJmpToR3); 1942 1943 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC); 1944 } 1945 1946 1947 DECLINLINE(void) hmR0SvmLeaveSession(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 1948 { 1949 Assert(!VMMRZCallRing3IsEnabled(pVCpu)); 1950 1904 1951 /* Avoid repeating this work when thread-context hooks are used and we had been preempted before 1905 which would've done this work from the SVMR0ThreadCtxCallback(). */1952 which would've done this work from the VMXR0ThreadCtxCallback(). */ 1906 1953 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER; 1907 1954 bool fPreemptDisabled = false; … … 1915 1962 if (!pVCpu->hm.s.fLeaveDone) 1916 1963 { 1917 /* Restore host FPU state if necessary and resync on next R0 reentry .*/ 1918 if (CPUMIsGuestFPUStateActive(pVCpu)) 1919 { 1920 CPUMR0SaveGuestFPU(pVM, pVCpu, pCtx); 1921 Assert(!CPUMIsGuestFPUStateActive(pVCpu)); 1922 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0; 1923 } 1924 1925 /* 1926 * Restore host debug registers if necessary and resync on next R0 reentry. 1927 */ 1928 #ifdef VBOX_STRICT 1929 if (CPUMIsHyperDebugStateActive(pVCpu)) 1930 { 1931 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb; 1932 Assert(pVmcb->ctrl.u16InterceptRdDRx == 0xffff); 1933 Assert(pVmcb->ctrl.u16InterceptWrDRx == 0xffff); 1934 } 1935 #endif 1936 if (CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, false /* save DR6 */)) 1937 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG; 1938 1939 Assert(!CPUMIsHyperDebugStateActive(pVCpu)); 1940 Assert(!CPUMIsGuestDebugStateActive(pVCpu)); 1941 1942 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatEntry); 1943 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatLoadGuestState); 1944 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit1); 1945 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit2); 1946 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchLongJmpToR3); 1947 1948 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC); 1949 1964 hmR0SvmLeave(pVM, pVCpu, pCtx); 1950 1965 pVCpu->hm.s.fLeaveDone = true; 1951 1966 } 1967 1968 /* Deregister hook now that we've left HM context before re-enabling preemption. */ 1969 /** @todo This is bad. Deregistering here means we need to VMCLEAR always 1970 * (longjmp/exit-to-r3) in VT-x which is not efficient. */ 1971 if (VMMR0ThreadCtxHooksAreRegistered(pVCpu)) 1972 VMMR0ThreadCtxHooksDeregister(pVCpu); 1973 1974 /* Leave HM context. This takes care of local init (term). */ 1975 int rc = HMR0LeaveCpu(pVCpu); 1976 AssertRC(rc); NOREF(rc); 1952 1977 1953 1978 /* Restore preemption if we previous disabled it ourselves. */ … … 1968 1993 static void hmR0SvmLongJmpToRing3(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 1969 1994 { 1970 hmR0SvmLeave (pVM, pVCpu, pCtx);1995 hmR0SvmLeaveSession(pVM, pVCpu, pCtx); 1971 1996 } 1972 1997 … … 2041 2066 2042 2067 /* Sync. the necessary state for going back to ring-3. */ 2043 hmR0SvmLeave (pVM, pVCpu, pCtx);2068 hmR0SvmLeaveSession(pVM, pVCpu, pCtx); 2044 2069 STAM_COUNTER_DEC(&pVCpu->hm.s.StatSwitchLongJmpToR3); 2045 2070 -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r48029 r48037 2963 2963 2964 2964 /** 2965 * Loads the guest control registers (CR0, CR3, CR4) into the guest-state area2966 * in the VMCS.2965 * Loads the guest CR0 control register into the guest-state area in the VMCS. 2966 * CR0 is partially shared with the host and we have to consider the FPU bits. 2967 2967 * 2968 2968 * @returns VBox status code. … … 2975 2975 * @remarks No-long-jump zone!!! 2976 2976 */ 2977 static int hmR0VmxLoadGuestControlRegs(PVMCPU pVCpu, PCPUMCTX pCtx) 2978 { 2979 int rc = VINF_SUCCESS; 2980 PVM pVM = pVCpu->CTX_SUFF(pVM); 2981 2977 static int hmR0VmxLoadSharedCR0(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 2978 { 2982 2979 /* 2983 2980 * Guest CR0. 2984 2981 * Guest FPU. 2985 2982 */ 2983 int rc = VINF_SUCCESS; 2986 2984 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR0) 2987 2985 { 2988 Assert(!(pCtx->cr0 >> 32)); 2989 uint32_t u32GuestCR0 = pCtx->cr0; 2986 Assert(!(pMixedCtx->cr0 >> 32)); 2987 uint32_t u32GuestCR0 = pMixedCtx->cr0; 2988 PVM pVM = pVCpu->CTX_SUFF(pVM); 2990 2989 2991 2990 /* The guest's view (read access) of its CR0 is unblemished. */ … … 2998 2997 if (pVM->hm.s.fNestedPaging) 2999 2998 { 3000 if (CPUMIsGuestPagingEnabledEx(p Ctx))2999 if (CPUMIsGuestPagingEnabledEx(pMixedCtx)) 3001 3000 { 3002 3001 /* The guest has paging enabled, let it access CR3 without causing a VM exit if supported. */ … … 3043 3042 /* Catch floating point exceptions if we need to report them to the guest in a different way. */ 3044 3043 bool fInterceptMF = false; 3045 if (!(p Ctx->cr0 & X86_CR0_NE))3044 if (!(pMixedCtx->cr0 & X86_CR0_NE)) 3046 3045 fInterceptMF = true; 3047 3046 … … 3138 3137 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR0; 3139 3138 } 3139 return rc; 3140 } 3141 3142 3143 /** 3144 * Loads the guest control registers (CR3, CR4) into the guest-state area 3145 * in the VMCS. 3146 * 3147 * @returns VBox status code. 3148 * @param pVM Pointer to the VM. 3149 * @param pVCpu Pointer to the VMCPU. 3150 * @param pMixedCtx Pointer to the guest-CPU context. The data may be 3151 * out-of-sync. Make sure to update the required fields 3152 * before using them. 3153 * 3154 * @remarks No-long-jump zone!!! 3155 */ 3156 static int hmR0VmxLoadGuestCR3AndCR4(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 3157 { 3158 int rc = VINF_SUCCESS; 3159 PVM pVM = pVCpu->CTX_SUFF(pVM); 3140 3160 3141 3161 /* … … 3173 3193 3174 3194 if ( pVM->hm.s.vmx.fUnrestrictedGuest 3175 || CPUMIsGuestPagingEnabledEx(p Ctx))3195 || CPUMIsGuestPagingEnabledEx(pMixedCtx)) 3176 3196 { 3177 3197 /* If the guest is in PAE mode, pass the PDPEs to VT-x using the VMCS fields. */ 3178 if (CPUMIsGuestInPAEModeEx(p Ctx))3198 if (CPUMIsGuestInPAEModeEx(pMixedCtx)) 3179 3199 { 3180 3200 rc = PGMGstGetPaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]); AssertRCReturn(rc, rc); … … 3187 3207 /* The guest's view of its CR3 is unblemished with Nested Paging when the guest is using paging or we 3188 3208 have Unrestricted Execution to handle the guest when it's not using paging. */ 3189 GCPhysGuestCR3 = p Ctx->cr3;3209 GCPhysGuestCR3 = pMixedCtx->cr3; 3190 3210 } 3191 3211 else … … 3228 3248 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR4) 3229 3249 { 3230 Assert(!(p Ctx->cr4 >> 32));3231 uint32_t u32GuestCR4 = p Ctx->cr4;3250 Assert(!(pMixedCtx->cr4 >> 32)); 3251 uint32_t u32GuestCR4 = pMixedCtx->cr4; 3232 3252 3233 3253 /* The guest's view of its CR4 is unblemished. */ … … 3251 3271 if (pVM->hm.s.fNestedPaging) 3252 3272 { 3253 if ( !CPUMIsGuestPagingEnabledEx(p Ctx)3273 if ( !CPUMIsGuestPagingEnabledEx(pMixedCtx) 3254 3274 && !pVM->hm.s.vmx.fUnrestrictedGuest) 3255 3275 { … … 3327 3347 * This also sets up whether #DB and MOV DRx accesses cause VM exits. 3328 3348 * 3349 * The guest debug bits are partially shared with the host (e.g. DR6, DR0-3). 3350 * 3329 3351 * @returns VBox status code. 3330 3352 * @param pVCpu Pointer to the VMCPU. … … 3335 3357 * @remarks No-long-jump zone!!! 3336 3358 */ 3337 static int hmR0VmxLoad GuestDebugState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)3359 static int hmR0VmxLoadSharedDebugState(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 3338 3360 { 3339 3361 if (!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_DEBUG)) … … 3457 3479 * Strict function to validate segment registers. 3458 3480 * 3459 * @remarks Requires CR0.3481 * @remarks ASSUMES CR0 is up to date. 3460 3482 */ 3461 3483 static void hmR0VmxValidateSegmentRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) … … 3488 3510 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL)); 3489 3511 Assert(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL)); 3490 Assert(!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR0));3491 3512 if ( !(pCtx->cr0 & X86_CR0_PE) 3492 3513 || pCtx->cs.Attr.n.u4Type == 3) … … 3689 3710 * before using them. 3690 3711 * 3691 * @remarks Requires CR0(strict builds validation).3712 * @remarks ASSUMES CR0 is up to date (strict builds validation). 3692 3713 * @remarks No-long-jump zone!!! 3693 3714 */ … … 6071 6092 Assert(VMMR0IsLogFlushDisabled(pVCpu)); 6072 6093 6094 RTCPUID idCpu = RTMpCpuId(); 6095 Log4Func(("HostCpuId=%u\n", idCpu)); 6096 6097 /* Save the guest state if necessary. */ 6098 if (pVCpu->hm.s.vmx.fUpdatedGuestState != HMVMX_UPDATED_GUEST_ALL) 6099 { 6100 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx); 6101 AssertRC(rc); 6102 Assert(pVCpu->hm.s.vmx.fUpdatedGuestState == HMVMX_UPDATED_GUEST_ALL); 6103 } 6104 6105 /* Restore host FPU state if necessary and resync on next R0 reentry .*/ 6106 if (CPUMIsGuestFPUStateActive(pVCpu)) 6107 { 6108 CPUMR0SaveGuestFPU(pVM, pVCpu, pMixedCtx); 6109 Assert(!CPUMIsGuestFPUStateActive(pVCpu)); 6110 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0; 6111 } 6112 6113 /* Restore host debug registers if necessary and resync on next R0 reentry. */ 6114 #ifdef VBOX_STRICT 6115 if (CPUMIsHyperDebugStateActive(pVCpu)) 6116 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT); 6117 #endif 6118 if (CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, true /* save DR6 */)) 6119 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG; 6120 Assert(!CPUMIsGuestDebugStateActive(pVCpu)); 6121 Assert(!CPUMIsHyperDebugStateActive(pVCpu)); 6122 6123 /* Restore host-state bits that VT-x only restores partially. */ 6124 if (pVCpu->hm.s.vmx.fRestoreHostFlags) 6125 { 6126 Log4Func(("Restoring Host State: fRestoreHostFlags=%#RX32 HostCpuId=%u\n", pVCpu->hm.s.vmx.fRestoreHostFlags, idCpu)); 6127 VMXRestoreHostState(pVCpu->hm.s.vmx.fRestoreHostFlags, &pVCpu->hm.s.vmx.RestoreHost); 6128 pVCpu->hm.s.vmx.fRestoreHostFlags = 0; 6129 } 6130 6131 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatEntry); 6132 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatLoadGuestState); 6133 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit1); 6134 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit2); 6135 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitIO); 6136 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitMovCRx); 6137 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitXcptNmi); 6138 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchLongJmpToR3); 6139 6140 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC); 6141 6142 /** @todo This kinda defeats the purpose of having preemption hooks. 6143 * The problem is, deregistering the hooks should be moved to a place that 6144 * lasts until the EMT is about to be destroyed not everytime while leaving HM 6145 * context. 6146 */ 6147 if (pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_ACTIVE) 6148 { 6149 int rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs); 6150 AssertRC(rc); 6151 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_CLEAR; 6152 Log4Func(("Cleared Vmcs. HostCpuId=%u\n", idCpu)); 6153 } 6154 6155 pVCpu->hm.s.vmx.uVmcsState &= ~HMVMX_VMCS_STATE_LAUNCHED; 6156 NOREF(idCpu); 6157 } 6158 6159 6160 DECLINLINE(void) hmR0VmxLeaveSession(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx) 6161 { 6162 Assert(!VMMRZCallRing3IsEnabled(pVCpu)); 6163 6073 6164 /* Avoid repeating this work when thread-context hooks are used and we had been preempted before 6074 6165 which would've done this work from the VMXR0ThreadCtxCallback(). */ … … 6082 6173 } 6083 6174 6084 RTCPUID idCpu = RTMpCpuId();6085 Log4Func(("HostCpuId=%u\n", idCpu));6086 6087 6175 if (!pVCpu->hm.s.fLeaveDone) 6088 6176 { 6089 Log4Func(("Leaving: HostCpuId=%u\n", idCpu)); 6090 6091 /* Save the guest state if necessary. */ 6092 if (pVCpu->hm.s.vmx.fUpdatedGuestState != HMVMX_UPDATED_GUEST_ALL) 6093 { 6094 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx); 6095 AssertRC(rc); 6096 Assert(pVCpu->hm.s.vmx.fUpdatedGuestState == HMVMX_UPDATED_GUEST_ALL); 6097 } 6098 6099 /* Restore host FPU state if necessary and resync on next R0 reentry .*/ 6100 if (CPUMIsGuestFPUStateActive(pVCpu)) 6101 { 6102 CPUMR0SaveGuestFPU(pVM, pVCpu, pMixedCtx); 6103 Assert(!CPUMIsGuestFPUStateActive(pVCpu)); 6104 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0; 6105 } 6106 6107 /* Restore host debug registers if necessary and resync on next R0 reentry. */ 6108 #ifdef VBOX_STRICT 6109 if (CPUMIsHyperDebugStateActive(pVCpu)) 6110 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT); 6111 #endif 6112 if (CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, true /* save DR6 */)) 6113 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG; 6114 Assert(!CPUMIsGuestDebugStateActive(pVCpu)); 6115 Assert(!CPUMIsHyperDebugStateActive(pVCpu)); 6116 6117 /* Restore host-state bits that VT-x only restores partially. */ 6118 if (pVCpu->hm.s.vmx.fRestoreHostFlags) 6119 { 6120 Log4Func(("Restoring Host State: fRestoreHostFlags=%#RX32 HostCpuId=%u\n", pVCpu->hm.s.vmx.fRestoreHostFlags, idCpu)); 6121 VMXRestoreHostState(pVCpu->hm.s.vmx.fRestoreHostFlags, &pVCpu->hm.s.vmx.RestoreHost); 6122 pVCpu->hm.s.vmx.fRestoreHostFlags = 0; 6123 } 6124 6125 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatEntry); 6126 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatLoadGuestState); 6127 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit1); 6128 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit2); 6129 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitIO); 6130 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitMovCRx); 6131 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitXcptNmi); 6132 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchLongJmpToR3); 6133 6134 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC); 6135 6136 /** @todo This kinda defeats the purpose of having preemption hooks. 6137 * The problem is, deregistering the hooks should be moved to a place that 6138 * lasts until the EMT is about to be destroyed not everytime while leaving HM 6139 * context. 6140 */ 6141 if (pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_ACTIVE) 6142 { 6143 int rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs); 6144 AssertRC(rc); 6145 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_CLEAR; 6146 Log4Func(("Cleared Vmcs. HostCpuId=%u\n", idCpu)); 6147 } 6148 6149 pVCpu->hm.s.vmx.uVmcsState &= ~HMVMX_VMCS_STATE_LAUNCHED; 6177 hmR0VmxLeave(pVM, pVCpu, pMixedCtx); 6150 6178 pVCpu->hm.s.fLeaveDone = true; 6151 6179 } 6152 6180 6153 NOREF(idCpu); 6181 /* Deregister hook now that we've left HM context before re-enabling preemption. */ 6182 /** @todo This is bad. Deregistering here means we need to VMCLEAR always 6183 * (longjmp/exit-to-r3) in VT-x which is not efficient. */ 6184 if (VMMR0ThreadCtxHooksAreRegistered(pVCpu)) 6185 VMMR0ThreadCtxHooksDeregister(pVCpu); 6186 6187 /* Leave HM context. This takes care of local init (term). */ 6188 int rc = HMR0LeaveCpu(pVCpu); 6189 AssertRC(rc); NOREF(rc); 6154 6190 6155 6191 /* Restore preemption if we previous disabled it ourselves. */ … … 6172 6208 DECLINLINE(void) hmR0VmxLongJmpToRing3(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx) 6173 6209 { 6174 hmR0VmxLeave (pVM, pVCpu, pMixedCtx);6210 hmR0VmxLeaveSession(pVM, pVCpu, pMixedCtx); 6175 6211 } 6176 6212 … … 6225 6261 6226 6262 /* Save guest state and restore host state bits. */ 6227 hmR0VmxLeave (pVM, pVCpu, pMixedCtx);6263 hmR0VmxLeaveSession(pVM, pVCpu, pMixedCtx); 6228 6264 STAM_COUNTER_DEC(&pVCpu->hm.s.StatSwitchLongJmpToR3); 6229 6265 … … 6895 6931 6896 6932 /* Save the guest-state, restore host-state (FPU, debug etc.). */ 6897 hmR0VmxLeave(pVM, pVCpu, pMixedCtx); 6933 if (!pVCpu->hm.s.fLeaveDone) 6934 { 6935 hmR0VmxLeave(pVM, pVCpu, pMixedCtx); 6936 pVCpu->hm.s.fLeaveDone = true; 6937 } 6898 6938 6899 6939 /* Leave HM context, takes care of local init (term). */ 6900 int rc = HMR0Leave Ex(pVCpu);6901 AssertRC(rc); 6940 int rc = HMR0LeaveCpu(pVCpu); 6941 AssertRC(rc); NOREF(rc); 6902 6942 6903 6943 /* Restore longjmp state. */ 6904 6944 VMMRZCallRing3Enable(pVCpu); 6905 NOREF(rc);6906 6945 break; 6907 6946 } … … 6920 6959 /* Initialize the bare minimum state required for HM. This takes care of 6921 6960 initializing VT-x if necessary (onlined CPUs, local init etc.) */ 6922 HMR0EnterEx(pVCpu); 6923 Assert(pVCpu->hm.s.fContextUseFlags & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_GUEST_CR0)); 6961 int rc = HMR0EnterCpu(pVCpu); 6962 AssertRC(rc); 6963 Assert(pVCpu->hm.s.fContextUseFlags & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE)); 6924 6964 6925 6965 /* Load the active VMCS as the current one. */ 6926 6966 if (pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_CLEAR) 6927 6967 { 6928 intrc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);6968 rc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs); 6929 6969 AssertRC(rc); NOREF(rc); 6930 6970 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_ACTIVE; … … 7075 7115 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestActivityState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); 7076 7116 7077 rc = hmR0VmxLoadGuestC ontrolRegs(pVCpu, pMixedCtx);7078 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestC ontrolRegs: rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);7079 7080 /* Must be done after CR0 is loaded(strict builds require CR0 for segment register validation checks). */7117 rc = hmR0VmxLoadGuestCR3AndCR4(pVCpu, pMixedCtx); 7118 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestCR3AndCR4: rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); 7119 7120 /* Assumes CR0 is up-to-date (strict builds require CR0 for segment register validation checks). */ 7081 7121 rc = hmR0VmxLoadGuestSegmentRegs(pVCpu, pMixedCtx); 7082 7122 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestSegmentRegs: rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); 7083 7123 7084 rc = hmR0VmxLoadGuestDebugState(pVCpu, pMixedCtx);7085 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestDebugState: rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);7086 7087 7124 rc = hmR0VmxLoadGuestMsrs(pVCpu, pMixedCtx); 7088 7125 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestMsrs! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); … … 7091 7128 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestApicState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); 7092 7129 7093 /* Must be done after hmR0VmxLoadGuestDebugState() as it may have updated eflags.TF for debugging purposes. */ 7130 /* 7131 * Loading Rflags here is fine, even though Rflags.TF might depend on guest debug state (which is not loaded here). 7132 * It is re-evaluated and updated if necessary in hmR0VmxLoadSharedState(). 7133 */ 7094 7134 rc = hmR0VmxLoadGuestRipRspRflags(pVCpu, pMixedCtx); 7095 7135 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestRipRspRflags! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); … … 7109 7149 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestState, x); 7110 7150 return rc; 7151 } 7152 7153 7154 /** 7155 * Loads the state shared between the host and guest into the VMCS. 7156 * 7157 * @param pVM Pointer to the VM. 7158 * @param pVCpu Pointer to the VMCPU. 7159 * @param pCtx Pointer to the guest-CPU context. 7160 * 7161 * @remarks No-long-jump zone!!! 7162 */ 7163 static void hmR0VmxLoadSharedState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 7164 { 7165 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 7166 Assert(!VMMRZCallRing3IsEnabled(pVCpu)); 7167 7168 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR0) 7169 { 7170 int rc = hmR0VmxLoadSharedCR0(pVCpu, pCtx); 7171 AssertRC(rc); 7172 } 7173 7174 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_DEBUG) 7175 { 7176 int rc = hmR0VmxLoadSharedDebugState(pVCpu, pCtx); 7177 AssertRC(rc); 7178 7179 /* Loading shared debug bits might have changed eflags.TF bit for debugging purposes. */ 7180 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_RFLAGS) 7181 { 7182 rc = hmR0VmxLoadGuestRflags(pVCpu, pCtx); 7183 AssertRC(rc); 7184 } 7185 } 7186 7187 AssertMsg(!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_GUEST_SHARED_STATE), ("fContextUseFlags=%#x\n", 7188 pVCpu->hm.s.fContextUseFlags)); 7111 7189 } 7112 7190 … … 7137 7215 } 7138 7216 7139 /** 7140 * Wrapper for loading the guest-state bits in the inner VT-x execution loop. 7217 7218 /** 7219 * Worker for loading the guest-state bits in the inner VT-x execution loop. 7141 7220 * 7142 7221 * @param pVM Pointer to the VM. … … 7166 7245 } 7167 7246 7168 #ifdef VBOX_STRICT 7169 /* When thread-context hooks are available, we could be preempted which means re-updating Guest.CR0 7170 (shared FPU state) and debug controls (shared debug state). This is done in hmR0VmxPreRunGuestCommitted() */ 7171 if (VMMR0ThreadCtxHooksAreRegistered(pVCpu)) 7172 { 7173 AssertMsg( !(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_ALL_GUEST) 7174 || (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_ALL_GUEST) == HM_CHANGED_GUEST_CR0 7175 || (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_ALL_GUEST) == (HM_CHANGED_GUEST_CR0 | HM_CHANGED_GUEST_DEBUG), 7176 ("fContextUseFlags=%#x\n", pVCpu->hm.s.fContextUseFlags)); 7177 } 7178 else 7179 { 7180 AssertMsg(!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_ALL_GUEST), ("fContextUseFlags=%#x\n", 7181 pVCpu->hm.s.fContextUseFlags)); 7182 } 7183 #endif 7247 /* All the guest state bits should be loaded except maybe the host context and shared host/guest bits. */ 7248 AssertMsg( !(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_ALL_GUEST) 7249 || !(pVCpu->hm.s.fContextUseFlags & ~(HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE)), 7250 ("fContextUseFlags=%#x\n", pVCpu->hm.s.fContextUseFlags)); 7184 7251 7185 7252 #ifdef HMVMX_ALWAYS_CHECK_GUEST_STATE … … 7256 7323 /* 7257 7324 * When thread-context hooks are used, load the required guest-state bits 7258 * here before we go ahead and disable interrupts. 7325 * here before we go ahead and disable interrupts. We can handle getting preempted 7326 * while loading the guest state. 7259 7327 */ 7260 7328 if (VMMR0ThreadCtxHooksAreRegistered(pVCpu)) … … 7311 7379 Assert(!VMMRZCallRing3IsEnabled(pVCpu)); 7312 7380 Assert(VMMR0IsLogFlushDisabled(pVCpu)); 7313 7314 7381 #ifndef VBOX_WITH_VMMR0_DISABLE_PREEMPTION 7315 7382 /** @todo I don't see the point of this, VMMR0EntryFast() already disables interrupts for the entire period. */ … … 7317 7384 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC); 7318 7385 #endif 7386 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 7319 7387 7320 7388 /* … … 7340 7408 { 7341 7409 /* 7342 * If we got preempted previously while loading the guest state, the guest FPU and debug 7343 * state need to be re-updated because we share them with the host state. 7410 * If we are injecting events real-on-v86 mode guest then we potentially have to update 7411 * RIP and other registers, i.e. hmR0VmxPreRunGuest()->hmR0VmxInjectPendingEvent(). 7412 * Just reload the state here if we're in real-on-v86 mode. 7344 7413 */ 7345 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 7346 { 7347 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR0) 7348 hmR0VmxLoadGuestControlRegs(pVCpu, pMixedCtx); 7349 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_DEBUG) 7350 hmR0VmxLoadGuestDebugState(pVCpu, pMixedCtx); 7351 } 7352 else 7353 { 7354 /* 7355 * If we are injecting events real-on-v86 mode guest then we potentially have to update 7356 * RIP and other registers. Just reload the state here if we're in real-on-v86 mode. 7357 */ 7414 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 7358 7415 hmR0VmxLoadGuestStateOptimal(pVM, pVCpu, pMixedCtx); 7359 } 7360 } 7416 } 7417 7418 /* Load the state shared between host and guest (FPU, debug). */ 7419 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_GUEST_SHARED_STATE) 7420 hmR0VmxLoadSharedState(pVM, pVCpu, pMixedCtx); 7361 7421 AssertMsg(!pVCpu->hm.s.fContextUseFlags, ("fContextUseFlags=%#x\n", pVCpu->hm.s.fContextUseFlags)); 7422 7362 7423 7363 7424 /* -
trunk/src/VBox/VMM/include/HMInternal.h
r47990 r48037 131 131 132 132 #define HM_CHANGED_HOST_CONTEXT RT_BIT(21) 133 134 /* Bits shared between host and guest. */ 135 #define HM_CHANGED_HOST_GUEST_SHARED_STATE ( HM_CHANGED_GUEST_CR0 \ 136 | HM_CHANGED_GUEST_DEBUG) 133 137 /** @} */ 134 138
Note:
See TracChangeset
for help on using the changeset viewer.