Changeset 47989 in vbox
- Timestamp:
- Aug 22, 2013 1:56:52 PM (11 years ago)
- Location:
- trunk
- Files:
-
- 8 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/vm.h
r47671 r47989 651 651 # define VMCPU_ASSERT_EMT(pVCpu) Assert(VMCPU_IS_EMT(pVCpu)) 652 652 #elif defined(IN_RING0) 653 # define VMCPU_ASSERT_EMT(pVCpu) Assert(VMCPU_IS_EMT(pVCpu)) 653 # define VMCPU_ASSERT_EMT(pVCpu) AssertMsg(VMCPU_IS_EMT(pVCpu), \ 654 ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd idCpu=%u\n", \ 655 RTThreadNativeSelf(), (pVCpu) ? (pVCpu)->hNativeThreadR0 : 0, \ 656 (pVCpu) ? (pVCpu)->idCpu : 0)) 654 657 #else 655 658 # define VMCPU_ASSERT_EMT(pVCpu) \ -
trunk/include/VBox/vmm/vmm.h
r47760 r47989 498 498 VMMR0DECL(int) VMMR0TermVM(PVM pVM, PGVM pGVM); 499 499 VMMR0_INT_DECL(bool) VMMR0IsLongJumpArmed(PVMCPU pVCpu); 500 VMMR0_INT_DECL(bool) VMMR0IsInRing3LongJump(PVMCPU pVCpu); 500 501 VMMR0DECL(int) VMMR0ThreadCtxHooksCreate(PVMCPU pVCpu); 501 502 VMMR0DECL(void) VMMR0ThreadCtxHooksRelease(PVMCPU pVCpu); … … 506 507 507 508 # ifdef LOG_ENABLED 508 VMMR0DECL(void) VMMR0LogFlushDisable(PVMCPU pVCpu);509 VMMR0DECL(void) VMMR0LogFlushEnable(PVMCPU pVCpu);510 VMMR0DECL(bool) VMMR0IsLogFlushDisabled(PVMCPU pVCpu);509 VMMR0DECL(void) VMMR0LogFlushDisable(PVMCPU pVCpu); 510 VMMR0DECL(void) VMMR0LogFlushEnable(PVMCPU pVCpu); 511 VMMR0DECL(bool) VMMR0IsLogFlushDisabled(PVMCPU pVCpu); 511 512 # else 512 # define VMMR0LogFlushDisable(pVCpu) do { } while(0)513 # define VMMR0LogFlushEnable(pVCpu) do { } while(0)514 # define VMMR0IsLogFlushDisabled(pVCpu) (true)513 # define VMMR0LogFlushDisable(pVCpu) do { } while(0) 514 # define VMMR0LogFlushEnable(pVCpu) do { } while(0) 515 # define VMMR0IsLogFlushDisabled(pVCpu) (true) 515 516 # endif /* LOG_ENABLED */ 516 517 #endif /* IN_RING0 */ -
trunk/src/VBox/VMM/VMMAll/VMMAll.cpp
r46861 r47989 253 253 return &pVM->aCpus[0]; 254 254 255 /* Search first by host cpu id (most common case) 255 /* 256 * Search first by host cpu id (most common case) 256 257 * and then by native thread id (page fusion case). 257 258 */ 258 259 /* RTMpCpuId had better be cheap. */ 260 RTCPUID idHostCpu = RTMpCpuId(); 261 262 /** @todo optimize for large number of VCPUs when that becomes more common. */ 263 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++) 264 { 265 PVMCPU pVCpu = &pVM->aCpus[idCpu]; 266 267 if (pVCpu->idHostCpu == idHostCpu) 268 return pVCpu; 259 if (!RTThreadPreemptIsEnabled(NIL_RTTHREAD)) 260 { 261 /** @todo r=ramshankar: This doesn't buy us anything in terms of performance 262 * leaving it here for hysterical raisins and as a reference if we 263 * implemented a hashing approach in the future. */ 264 265 /* RTMpCpuId had better be cheap. */ 266 RTCPUID idHostCpu = RTMpCpuId(); 267 268 /** @todo optimize for large number of VCPUs when that becomes more common. */ 269 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++) 270 { 271 PVMCPU pVCpu = &pVM->aCpus[idCpu]; 272 273 if (pVCpu->idHostCpu == idHostCpu) 274 return pVCpu; 275 } 269 276 } 270 277 -
trunk/src/VBox/VMM/VMMR0/HMR0.cpp
r47959 r47989 475 475 } 476 476 477 /* Enter VMX Root Mode */ 477 /* 478 * The only way of checking if we're in VMX root mode or not is to try and enter it. 479 * There is no instruction or control bit that tells us if we're in VMX root mode. 480 * Therefore, try and enter VMX root mode here. 481 */ 478 482 rc = VMXEnable(HCPhysScratchPage); 479 483 if (RT_SUCCESS(rc)) … … 989 993 { 990 994 Assert(g_HvmR0.aCpuInfo[i].hMemObj == NIL_RTR0MEMOBJ); 991 g_HvmR0.aCpuInfo[i].fConfigured = false; 992 g_HvmR0.aCpuInfo[i].cTlbFlushes = 0; 995 g_HvmR0.aCpuInfo[i].fConfigured = false; 996 g_HvmR0.aCpuInfo[i].cTlbFlushes = 0; 997 g_HvmR0.aCpuInfo[i].uCurrentAsid = 0; 993 998 } 994 999 … … 1012 1017 * We're doing the job ourselves. 1013 1018 */ 1014 /* Allocate one page per cpu for the global vt-x and amd-vpages */1019 /* Allocate one page per cpu for the global VT-x and AMD-V pages */ 1015 1020 for (unsigned i = 0; i < RT_ELEMENTS(g_HvmR0.aCpuInfo); i++) 1016 1021 { … … 1030 1035 } 1031 1036 1032 if (RT_SUCCESS(rc) && g_HvmR0.fGlobalInit) 1037 if ( RT_SUCCESS(rc) 1038 && g_HvmR0.fGlobalInit) 1033 1039 { 1034 1040 /* First time, so initialize each cpu/core. */ … … 1411 1417 1412 1418 /* Enable VT-x or AMD-V if local init is required, or enable if it's a freshly onlined CPU. */ 1413 if ( !pCpu->fConfigured 1414 || !g_HvmR0.fGlobalInit) 1415 { 1419 if (!pCpu->fConfigured) 1416 1420 hmR0EnableCpu(pVCpu->CTX_SUFF(pVM), idCpu); 1417 }1418 1421 1419 1422 /* Reload host-context (back from ring-3/migrated CPUs), reload guest CR0 (for FPU bits). */ … … 1455 1458 1456 1459 int rc = g_HvmR0.pfnEnterSession(pVM, pVCpu, pCpu); 1457 Assert RC(rc);1460 AssertMsgRC(rc, ("pfnEnterSession failed. rc=%Rrc pVCpu=%p HostCpuId=%u\n", rc, pVCpu, idCpu)); 1458 1461 1459 1462 /* Load the host as we may be resuming code after a longjmp and quite 1460 1463 possibly be scheduled on a different CPU. */ 1461 1464 rc |= g_HvmR0.pfnSaveHostState(pVM, pVCpu); 1462 Assert RC(rc);1465 AssertMsgRC(rc, ("pfnSaveHostState failed. rc=%Rrc pVCpu=%p HostCpuId=%u\n", rc, pVCpu, idCpu)); 1463 1466 1464 1467 /** @todo This is not needed to be done here anymore, can fix/optimize later. */ 1465 1468 rc |= g_HvmR0.pfnLoadGuestState(pVM, pVCpu, pCtx); 1466 Assert RC(rc);1469 AssertMsgRC(rc, ("pfnLoadGuestState failed. rc=%Rrc pVCpu=%p HostCpuId=%u\n", rc, pVCpu, idCpu)); 1467 1470 1468 1471 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE … … 1489 1492 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 1490 1493 1491 if (!g_HvmR0.fGlobalInit) 1492 { 1493 RTCPUID idCpu = RTMpCpuId(); 1494 RTCPUID idCpu = RTMpCpuId(); 1495 PHMGLOBALCPUINFO pCpu = &g_HvmR0.aCpuInfo[idCpu]; 1496 1497 if ( !g_HvmR0.fGlobalInit 1498 && pCpu->fConfigured) 1499 { 1494 1500 int rc = hmR0DisableCpu(idCpu); 1495 1501 AssertRCReturn(rc, rc); 1502 Assert(!pCpu->fConfigured); 1496 1503 } 1497 1504 1498 1505 /* Reset these to force a TLB flush for the next entry. */ 1499 1506 pVCpu->hm.s.idLastCpu = NIL_RTCPUID; 1507 pVCpu->hm.s.idEnteredCpu = NIL_RTCPUID; 1500 1508 pVCpu->hm.s.uCurrentAsid = 0; 1501 1509 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH); … … 1520 1528 AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HM_SUSPEND_PENDING); 1521 1529 1522 PCPUMCTX pCtx 1530 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu); 1523 1531 AssertPtr(pCtx); 1524 1532 1533 bool fDisabledPreempt = false; 1534 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER; 1535 if (RTThreadPreemptIsEnabled(NIL_RTTHREAD)) 1536 { 1537 Assert(VMMR0ThreadCtxHooksAreRegistered(pVCpu)); 1538 RTThreadPreemptDisable(&PreemptState); 1539 fDisabledPreempt = true; 1540 } 1541 1525 1542 int rc = g_HvmR0.pfnLeaveSession(pVM, pVCpu, pCtx); 1526 1543 1527 /*1528 * When thread-context hooks are not used, leave HM context and if necessary disable HM on the CPU.1529 * When thread-context hooks -are- used, this work would be done in the VT-x and AMD-V thread-context callback.1530 */1531 1544 if (!VMMR0ThreadCtxHooksAreRegistered(pVCpu)) 1532 1545 { 1533 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));1534 RTCPUID idCpu = RTMpCpuId();1535 1536 1546 /* Keep track of the CPU owning the VMCS for debugging scheduling weirdness 1537 1547 and ring-3 calls when thread-context hooks are not supported. */ 1548 RTCPUID idCpu = RTMpCpuId(); 1538 1549 AssertMsgStmt( pVCpu->hm.s.idEnteredCpu == idCpu 1539 1550 || RT_FAILURE_NP(rc), ("Owner is %u, I'm %u", pVCpu->hm.s.idEnteredCpu, idCpu), 1540 1551 rc = VERR_HM_WRONG_CPU_1); 1541 1552 } 1553 1554 /* Leave HM context, takes care of local init (term). */ 1555 if (RT_SUCCESS(rc)) 1556 { 1542 1557 rc = HMR0LeaveEx(pVCpu); 1543 1558 AssertRCReturn(rc, rc); 1544 1559 } 1560 1561 /* Deregister hook now that we've left HM context before re-enabling preemption. */ 1562 /** @todo This is bad. Deregistering here means we need to VMCLEAR always 1563 * (longjmp/exit-to-r3) in VT-x which is not efficient. */ 1564 if (VMMR0ThreadCtxHooksAreRegistered(pVCpu)) 1565 VMMR0ThreadCtxHooksDeregister(pVCpu); 1566 1567 if (fDisabledPreempt) 1568 RTThreadPreemptRestore(&PreemptState); 1545 1569 1546 1570 /* Guest FPU and debug state shouldn't be active now, it's likely that we're going back to ring-3. */ … … 1548 1572 Assert(!CPUMIsGuestDebugStateActive(pVCpu)); 1549 1573 1550 pVCpu->hm.s.idEnteredCpu = NIL_RTCPUID;1551 1574 return rc; 1552 1575 } -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r47844 r47989 119 119 } while (0) 120 120 121 /** Assert that preemption is disabled or covered by thread-context hooks. */ 122 #define HMSVM_ASSERT_PREEMPT_SAFE() Assert( VMMR0ThreadCtxHooksAreRegistered(pVCpu) \ 123 || !RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 124 125 /** Assert that we haven't migrated CPUs when thread-context hooks are not 126 * used. */ 127 #define HMSVM_ASSERT_CPU_SAFE() AssertMsg( VMMR0ThreadCtxHooksAreRegistered(pVCpu) \ 128 || pVCpu->hm.s.idEnteredCpu == RTMpCpuId(), \ 129 ("Illegal migration! Entered on CPU %u Current %u\n", \ 130 pVCpu->hm.s.idEnteredCpu, RTMpCpuId())); 121 131 122 132 /** Exception bitmap mask for all contributory exceptions. … … 1571 1581 VMMR0DECL(int) SVMR0Leave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 1572 1582 { 1573 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));1574 1583 NOREF(pVM); 1575 1584 NOREF(pVCpu); … … 1595 1604 { 1596 1605 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 1606 Assert(VMMR0ThreadCtxHooksAreRegistered(pVCpu)); 1607 VMCPU_ASSERT_EMT(pVCpu); 1597 1608 1598 1609 PVM pVM = pVCpu->CTX_SUFF(pVM); … … 1611 1622 case RTTHREADCTXEVENT_RESUMED: 1612 1623 { 1613 /* Disable preemption, we don't want to be migrated to another CPU while re-initializing AMD-V state. */ 1614 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER; 1615 RTThreadPreemptDisable(&PreemptState); 1616 1617 /* Initialize the bare minimum state required for HM. This takes care of 1618 initializing AMD-V if necessary (onlined CPUs, local init etc.) */ 1624 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 1625 Assert(VMMR0ThreadCtxHooksAreRegistered(pVCpu)); 1626 VMCPU_ASSERT_EMT(pVCpu); 1627 1628 VMMRZCallRing3Disable(pVCpu); /* No longjmps (log-flush, locks) in this fragile context. */ 1629 1630 /* 1631 * Initialize the bare minimum state required for HM. This takes care of 1632 * initializing AMD-V if necessary (onlined CPUs, local init etc.) 1633 */ 1619 1634 HMR0EnterEx(pVCpu); 1620 1635 Assert(pVCpu->hm.s.fContextUseFlags & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_GUEST_CR0)); 1621 1636 1622 1637 pVCpu->hm.s.fLeaveDone = false; 1623 RTThreadPreemptRestore(&PreemptState);1638 VMMRZCallRing3Enable(pVCpu); /* Restore longjmp state. */ 1624 1639 break; 1625 1640 } … … 1911 1926 * Restore host debug registers if necessary and resync on next R0 reentry. 1912 1927 */ 1913 1928 #ifdef VBOX_STRICT 1914 1929 if (CPUMIsHyperDebugStateActive(pVCpu)) 1915 1930 { … … 1918 1933 Assert(pVmcb->ctrl.u16InterceptWrDRx == 0xffff); 1919 1934 } 1920 1935 #endif 1921 1936 if (CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, false /* save DR6 */)) 1922 1937 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG; … … 1976 1991 Assert(pvUser); 1977 1992 Assert(VMMRZCallRing3IsEnabled(pVCpu)); 1978 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));1993 HMSVM_ASSERT_PREEMPT_SAFE(); 1979 1994 1980 1995 VMMRZCallRing3Disable(pVCpu); … … 2006 2021 Assert(pVCpu); 2007 2022 Assert(pCtx); 2008 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));2023 HMSVM_ASSERT_PREEMPT_SAFE(); 2009 2024 2010 2025 if (RT_UNLIKELY(rcExit == VERR_SVM_INVALID_GUEST_STATE)) … … 2450 2465 static void hmR0SvmReportWorldSwitchError(PVM pVM, PVMCPU pVCpu, int rcVMRun, PCPUMCTX pCtx) 2451 2466 { 2452 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));2467 HMSVM_ASSERT_PREEMPT_SAFE(); 2453 2468 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb; 2454 2469 … … 2931 2946 { 2932 2947 Assert(VMMRZCallRing3IsEnabled(pVCpu)); 2933 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));2948 HMSVM_ASSERT_PREEMPT_SAFE(); 2934 2949 2935 2950 SVMTRANSIENT SvmTransient; … … 2942 2957 { 2943 2958 Assert(!HMR0SuspendPending()); 2944 AssertMsg(pVCpu->hm.s.idEnteredCpu == RTMpCpuId(), 2945 ("Illegal migration! Entered on CPU %u Current %u cLoops=%u\n", (unsigned)pVCpu->hm.s.idEnteredCpu, 2946 (unsigned)RTMpCpuId(), cLoops)); 2959 HMSVM_ASSERT_CPU_SAFE(); 2947 2960 2948 2961 /* Preparatory work for running guest code, this may return to ring-3 for some last minute updates. */ … … 3252 3265 AssertPtr(pSvmTransient); \ 3253 3266 Assert(ASMIntAreEnabled()); \ 3254 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); \3267 HMSVM_ASSERT_PREEMPT_SAFE(); \ 3255 3268 HMSVM_ASSERT_PREEMPT_CPUID_VAR(); \ 3256 3269 Log4Func(("vcpu[%u] -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-\n", (uint32_t)pVCpu->idCpu)); \ 3257 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); \3270 HMSVM_ASSERT_PREEMPT_SAFE(); \ 3258 3271 if (VMMR0IsLogFlushDisabled(pVCpu)) \ 3259 3272 HMSVM_ASSERT_PREEMPT_CPUID(); \ -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r47844 r47989 64 64 65 65 /** Determine which tagged-TLB flush handler to use. */ 66 #define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID 067 #define HMVMX_FLUSH_TAGGED_TLB_EPT 168 #define HMVMX_FLUSH_TAGGED_TLB_VPID 269 #define HMVMX_FLUSH_TAGGED_TLB_NONE 366 #define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID 0 67 #define HMVMX_FLUSH_TAGGED_TLB_EPT 1 68 #define HMVMX_FLUSH_TAGGED_TLB_VPID 2 69 #define HMVMX_FLUSH_TAGGED_TLB_NONE 3 70 70 71 71 /** @name Updated-guest-state flags. … … 125 125 /** @} */ 126 126 127 /** @name 128 * States of the VMCS. 129 * 130 * This does not reflect all possible VMCS states but currently only those 131 * needed for maintaining the VMCS consistently even when thread-context hooks 132 * are used. Maybe later this can be extended (i.e. Nested Virtualization). 133 */ 134 #define HMVMX_VMCS_STATE_CLEAR RT_BIT(0) 135 #define HMVMX_VMCS_STATE_ACTIVE RT_BIT(1) 136 #define HMVMX_VMCS_STATE_LAUNCHED RT_BIT(2) 137 /** @} */ 138 127 139 /** 128 140 * Exception bitmap mask for real-mode guests (real-on-v86). … … 161 173 #endif 162 174 175 /** Assert that preemption is disabled or covered by thread-context hooks. */ 176 #define HMVMX_ASSERT_PREEMPT_SAFE() Assert( VMMR0ThreadCtxHooksAreRegistered(pVCpu) \ 177 || !RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 178 179 /** Assert that we haven't migrated CPUs when thread-context hooks are not 180 * used. */ 181 #define HMVMX_ASSERT_CPU_SAFE() AssertMsg( VMMR0ThreadCtxHooksAreRegistered(pVCpu) \ 182 || pVCpu->hm.s.idEnteredCpu == RTMpCpuId(), \ 183 ("Illegal migration! Entered on CPU %u Current %u\n", \ 184 pVCpu->hm.s.idEnteredCpu, RTMpCpuId())); \ 163 185 164 186 /******************************************************************************* … … 208 230 uint32_t u6Reserved0 : 6; 209 231 /** The address size; 0=16-bit, 1=32-bit, 2=64-bit, rest undefined. */ 210 uint32_t u3AddrSize : 3;232 uint32_t u3AddrSize : 3; 211 233 uint32_t u5Reserved1 : 5; 212 234 /** The segment register (X86_SREG_XXX). */ 213 uint32_t iSegReg : 3;214 uint32_t uReserved2 : 14;235 uint32_t iSegReg : 3; 236 uint32_t uReserved2 : 14; 215 237 } StrIo; 216 238 } ExitInstrInfo; … … 2149 2171 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc); 2150 2172 2173 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_CLEAR; 2174 2151 2175 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc); 2152 2176 } … … 2831 2855 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RIP, pMixedCtx->rip); 2832 2856 AssertRCReturn(rc, rc); 2833 Log4(("Load: VMX_VMCS_GUEST_RIP=%#RX64\n", pMixedCtx->rip));2834 2857 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_RIP; 2858 Log4(("Load: VMX_VMCS_GUEST_RIP=%#RX64 fContextUseFlags=%#x\n", pMixedCtx->rip, pVCpu->hm.s.fContextUseFlags)); 2835 2859 } 2836 2860 return rc; … … 2856 2880 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RSP, pMixedCtx->rsp); 2857 2881 AssertRCReturn(rc, rc); 2882 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_RSP; 2858 2883 Log4(("Load: VMX_VMCS_GUEST_RSP=%#RX64\n", pMixedCtx->rsp)); 2859 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_RSP;2860 2884 } 2861 2885 return rc; … … 2902 2926 AssertRCReturn(rc, rc); 2903 2927 2928 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_RFLAGS; 2904 2929 Log4(("Load: VMX_VMCS_GUEST_RFLAGS=%#RX32\n", Eflags.u32)); 2905 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_RFLAGS;2906 2930 } 2907 2931 return rc; … … 4047 4071 * Refer MSDN docs. "Configuring Programs for 64-bit / x64 Software Conventions / Register Usage" for details. 4048 4072 */ 4073 const bool fResumeVM = !!(pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_LAUNCHED); 4074 /** @todo Add stats for resume vs launch. */ 4049 4075 #ifdef VBOX_WITH_KERNEL_USING_XMM 4050 return HMR0VMXStartVMWrapXMM( pVCpu->hm.s.fResumeVM, pCtx, &pVCpu->hm.s.vmx.VMCSCache, pVM, pVCpu, pVCpu->hm.s.vmx.pfnStartVM);4076 return HMR0VMXStartVMWrapXMM(fResumeVM, pCtx, &pVCpu->hm.s.vmx.VMCSCache, pVM, pVCpu, pVCpu->hm.s.vmx.pfnStartVM); 4051 4077 #else 4052 return pVCpu->hm.s.vmx.pfnStartVM( pVCpu->hm.s.fResumeVM, pCtx, &pVCpu->hm.s.vmx.VMCSCache, pVM, pVCpu);4078 return pVCpu->hm.s.vmx.pfnStartVM(fResumeVM, pCtx, &pVCpu->hm.s.vmx.VMCSCache, pVM, pVCpu); 4053 4079 #endif 4054 4080 } … … 4071 4097 Assert(pCtx); 4072 4098 Assert(pVmxTransient); 4073 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));4099 HMVMX_ASSERT_PREEMPT_SAFE(); 4074 4100 4075 4101 Log4(("VM-entry failure: %Rrc\n", rcVMRun)); … … 4086 4112 rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient); 4087 4113 AssertRC(rc); 4114 4115 pVCpu->hm.s.vmx.LastError.idEnteredCpu = pVCpu->hm.s.idEnteredCpu; 4116 /* LastError.idCurrentCpu was already updated in hmR0VmxPreRunGuestCommitted(). 4117 Cannot do it here as we may have been long preempted. */ 4088 4118 4089 4119 #ifdef VBOX_STRICT … … 6045 6075 if (!pVCpu->hm.s.fLeaveDone) 6046 6076 { 6077 Log4Func(("HostCpuId=%u\n", RTMpCpuId())); 6078 6047 6079 /* Save the guest state if necessary. */ 6048 6080 if (pVCpu->hm.s.vmx.fUpdatedGuestState != HMVMX_UPDATED_GUEST_ALL) … … 6062 6094 6063 6095 /* Restore host debug registers if necessary and resync on next R0 reentry. */ 6064 6096 #ifdef VBOX_STRICT 6065 6097 if (CPUMIsHyperDebugStateActive(pVCpu)) 6066 6098 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT); 6067 6099 #endif 6068 6100 if (CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, true /* save DR6 */)) 6069 6101 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG; … … 6089 6121 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC); 6090 6122 6123 /** @todo This kinda defeats the purpose of having preemption hooks. 6124 * The problem is, deregistering the hooks should be moved to a place that 6125 * lasts until the EMT is about to be destroyed not everytime while leaving HM 6126 * context. 6127 */ 6128 if (pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_ACTIVE) 6129 { 6130 int rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs); 6131 AssertRC(rc); 6132 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_CLEAR; 6133 } 6134 6091 6135 pVCpu->hm.s.fLeaveDone = true; 6092 6136 } … … 6136 6180 Assert(pVCpu); 6137 6181 Assert(pMixedCtx); 6138 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));6182 HMVMX_ASSERT_PREEMPT_SAFE(); 6139 6183 6140 6184 if (RT_UNLIKELY(rcExit == VERR_VMX_INVALID_GUEST_STATE)) … … 6196 6240 6197 6241 /* On our way back from ring-3 the following needs to be done. */ 6198 /** @todo This can change with preemption hooks. */6199 6242 if (rcExit == VINF_EM_RAW_INTERRUPT) 6200 6243 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_HOST_CONTEXT; … … 6225 6268 Assert(pvUser); 6226 6269 Assert(VMMRZCallRing3IsEnabled(pVCpu)); 6227 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));6270 HMVMX_ASSERT_PREEMPT_SAFE(); 6228 6271 6229 6272 VMMRZCallRing3Disable(pVCpu); … … 6787 6830 #endif 6788 6831 6789 /* Load the active VMCS as the current one. */ 6832 /* 6833 * The VMCS state here will not be reliable because we deregister the hook in VMMR0EntryFast() 6834 * on the way out. If we had got a preempt/resume callback -after- hmR0VmxLeave() but before 6835 * deregistering the hook, the VMCS state will be ACTIVE. Once deregistered we no longer get 6836 * notifications and lose track. Following that if we get rescheduled to another host CPU, the 6837 * VMCS state says ACTIVE even though it really is not. 6838 * 6839 * Load the VCPU's VMCS as the current (and active) one. 6840 */ 6790 6841 int rc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs); 6791 6842 if (RT_FAILURE(rc)) 6792 6843 return rc; 6793 6794 /** @todo this will change with preemption hooks where can VMRESUME as long 6795 * as we're no preempted. */ 6796 pVCpu->hm.s.fResumeVM = false; 6844 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_ACTIVE; 6845 6797 6846 pVCpu->hm.s.fLeaveDone = false; 6798 6847 return VINF_SUCCESS; … … 6814 6863 case RTTHREADCTXEVENT_PREEMPTING: 6815 6864 { 6865 /** @todo Stats. */ 6816 6866 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 6817 Assert(VMMR0ThreadCtxHooksAreRegistered(pVCpu)); /* Paranoia. */ 6818 6819 PVM pVM = pVCpu->CTX_SUFF(pVM); 6820 PCPUMCTX pMixedCtx = CPUMQueryGuestCtxPtr(pVCpu); 6821 VMMRZCallRing3Disable(pVCpu); /* No longjmps (log-flush, locks) in this fragile context. */ 6822 hmR0VmxLeave(pVM, pVCpu, pMixedCtx); /* Save the guest-state, restore host-state (FPU, debug etc.). */ 6823 6824 int rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs); /* Flush VMCS CPU state to VMCS region in memory. */ 6825 AssertRC(rc); NOREF(rc); 6826 6827 rc = HMR0LeaveEx(pVCpu); /* Leave HM context, takes care of local init (term). */ 6828 AssertRC(rc); NOREF(rc); 6829 6830 VMMRZCallRing3Enable(pVCpu); /* Restore longjmp state. */ 6867 Assert(VMMR0ThreadCtxHooksAreRegistered(pVCpu)); 6868 VMCPU_ASSERT_EMT(pVCpu); 6869 6870 PVM pVM = pVCpu->CTX_SUFF(pVM); 6871 PCPUMCTX pMixedCtx = CPUMQueryGuestCtxPtr(pVCpu); 6872 6873 /* No longjmps (logger flushes, locks) in this fragile context. */ 6874 VMMRZCallRing3Disable(pVCpu); 6875 Log4Func(("Preempting: HostCpuId=%u\n", RTMpCpuId())); 6876 6877 /* Save the guest-state, restore host-state (FPU, debug etc.). */ 6878 hmR0VmxLeave(pVM, pVCpu, pMixedCtx); 6879 6880 /* Flush VMCS CPU state to the VMCS region in memory. */ 6881 int rc; 6882 if (pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_ACTIVE) 6883 { 6884 rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs); 6885 AssertRC(rc); 6886 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_CLEAR; 6887 } 6888 6889 /* Leave HM context, takes care of local init (term). */ 6890 rc = HMR0LeaveEx(pVCpu); 6891 AssertRC(rc); 6892 6893 /* Restore longjmp state. */ 6894 VMMRZCallRing3Enable(pVCpu); 6895 NOREF(rc); 6831 6896 break; 6832 6897 } … … 6834 6899 case RTTHREADCTXEVENT_RESUMED: 6835 6900 { 6836 /* Disable preemption, we don't want to be migrated to another CPU while re-initializing VT-x state. */ 6837 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER; 6838 RTThreadPreemptDisable(&PreemptState); 6901 /** @todo Stats. */ 6902 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 6903 Assert(VMMR0ThreadCtxHooksAreRegistered(pVCpu)); 6904 VMCPU_ASSERT_EMT(pVCpu); 6905 6906 /* No longjmps here, as we don't want to trigger preemption (& its hook) while resuming. */ 6907 VMMRZCallRing3Disable(pVCpu); 6908 Log4Func(("Resumed: HostCpuId=%u\n", RTMpCpuId())); 6839 6909 6840 6910 /* Initialize the bare minimum state required for HM. This takes care of … … 6844 6914 6845 6915 /* Load the active VMCS as the current one. */ 6846 int rc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs); 6847 AssertRC(rc); 6848 6849 pVCpu->hm.s.fResumeVM = false; 6916 if (pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_CLEAR) 6917 { 6918 int rc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs); 6919 AssertRC(rc); NOREF(rc); 6920 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_ACTIVE; 6921 Log4Func(("Activated: HostCpuId=%u\n", RTMpCpuId())); 6922 } 6850 6923 pVCpu->hm.s.fLeaveDone = false; 6851 6852 /* Restore preemption, migrating to another CPU should be fine now. */ 6853 RTThreadPreemptRestore(&PreemptState); 6924 VMMRZCallRing3Enable(pVCpu); 6854 6925 break; 6855 6926 } … … 6871 6942 VMMR0DECL(int) VMXR0Leave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 6872 6943 { 6873 AssertPtr(pVCpu);6944 NOREF(pVCpu); 6874 6945 NOREF(pVM); 6875 6946 NOREF(pCtx); 6876 6947 6877 if (!VMMR0ThreadCtxHooksAreRegistered(pVCpu)) 6878 { 6879 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 6880 6881 /* 6882 * Sync the current VMCS (writes back internal data back into the VMCS region in memory) 6883 * and mark the VMCS launch-state as "clear". 6884 */ 6885 int rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs); 6886 return rc; 6887 } 6888 6889 /* With thread-context hooks, nothing to do here. It's taken care of in VMXR0ThreadCtxCallback(). */ 6948 /* Everything is taken care of in hmR0VmxLeave() and VMXR0ThreadCtxCallback()'s preempt event. */ 6890 6949 return VINF_SUCCESS; 6891 6950 } … … 6904 6963 * @remarks No-long-jump zone!!! 6905 6964 */ 6965 static int hmR0VmxSaveHostState(PVM pVM, PVMCPU pVCpu) 6966 { 6967 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 6968 6969 if (!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_CONTEXT)) 6970 return VINF_SUCCESS; 6971 6972 int rc = hmR0VmxSaveHostControlRegs(pVM, pVCpu); 6973 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostControlRegisters failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); 6974 6975 rc = hmR0VmxSaveHostSegmentRegs(pVM, pVCpu); 6976 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostSegmentRegisters failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); 6977 6978 rc = hmR0VmxSaveHostMsrs(pVM, pVCpu); 6979 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostMsrs failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); 6980 6981 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_HOST_CONTEXT; 6982 return rc; 6983 } 6984 6985 6986 /** 6987 * Saves the host state in the VMCS host-state. 6988 * 6989 * @returns VBox status code. 6990 * @param pVM Pointer to the VM. 6991 * @param pVCpu Pointer to the VMCPU. 6992 * 6993 * @remarks No-long-jump zone!!! 6994 */ 6906 6995 VMMR0DECL(int) VMXR0SaveHostState(PVM pVM, PVMCPU pVCpu) 6907 6996 { 6908 6997 AssertPtr(pVM); 6909 6998 AssertPtr(pVCpu); 6910 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));6911 6999 6912 7000 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu)); 6913 6914 /* Nothing to do if the host-state-changed flag isn't set. This will later be optimized when preemption hooks are in place. */ 6915 if (!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_CONTEXT)) 6916 return VINF_SUCCESS; 6917 6918 int rc = hmR0VmxSaveHostControlRegs(pVM, pVCpu); 6919 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostControlRegisters failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); 6920 6921 rc = hmR0VmxSaveHostSegmentRegs(pVM, pVCpu); 6922 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostSegmentRegisters failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); 6923 6924 rc = hmR0VmxSaveHostMsrs(pVM, pVCpu); 6925 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostMsrs failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); 6926 6927 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_HOST_CONTEXT; 6928 return rc; 7001 return hmR0VmxSaveHostState(pVM, pVCpu); 6929 7002 } 6930 7003 … … 6953 7026 AssertPtr(pVCpu); 6954 7027 AssertPtr(pMixedCtx); 6955 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));7028 HMVMX_ASSERT_PREEMPT_SAFE(); 6956 7029 6957 7030 #ifdef LOG_ENABLED 6958 7031 /** @todo r=ramshankar: I'm not able to use VMMRZCallRing3Disable() here, 6959 * probably not initialized yet? Anyway this will do for now. */ 7032 * probably not initialized yet? Anyway this will do for now. 7033 * 7034 * Update: Should be possible once VMXR0LoadGuestState() is removed as an 7035 * interface and disable ring-3 calls when thread-context hooks are not 7036 * available. */ 6960 7037 bool fCallerDisabledLogFlush = VMMR0IsLogFlushDisabled(pVCpu); 6961 7038 VMMR0LogFlushDisable(pVCpu); … … 7014 7091 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR2; 7015 7092 7016 AssertMsg(! pVCpu->hm.s.fContextUseFlags,7093 AssertMsg(!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_ALL_GUEST), 7017 7094 ("Missed updating flags while loading guest state. pVM=%p pVCpu=%p idCpu=%RU32 fContextUseFlags=%#RX32\n", 7018 7095 pVM, pVCpu, pVCpu->idCpu, pVCpu->hm.s.fContextUseFlags)); … … 7045 7122 /* 7046 7123 * Avoid reloading the guest state on longjmp reentrants and do it lazily just before executing the guest. 7047 * This only helps when we get rescheduled more than once to a different host CPU on a longjmp trip before 7048 * finally executing guest code. 7124 * When thread-context hooks are not used: This only helps when we get rescheduled more than once to a 7125 * different host CPU on a longjmp trip before finally executing guest code. 7126 * 7127 * When thread-context hooks are used: We avoid loading the guest state here for the above reason plus 7128 * we can avoid doing it while preemption is disabled (which it is here). 7049 7129 */ 7050 7130 return VINF_SUCCESS; 7131 } 7132 7133 /** 7134 * Wrapper for loading the guest-state bits in the inner VT-x execution loop. 7135 * 7136 * @param pVM Pointer to the VM. 7137 * @param pVCpu Pointer to the VMCPU. 7138 * @param pMixedCtx Pointer to the guest-CPU context. The data may be 7139 * out-of-sync. Make sure to update the required fields 7140 * before using them. 7141 */ 7142 DECLINLINE(void) hmR0VmxLoadGuestStateOptimal(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx) 7143 { 7144 Log5(("LoadFlags=%#RX32\n", pVCpu->hm.s.fContextUseFlags)); 7145 #ifdef HMVMX_SYNC_FULL_GUEST_STATE 7146 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL_GUEST; 7147 #endif 7148 7149 if (pVCpu->hm.s.fContextUseFlags == HM_CHANGED_GUEST_RIP) 7150 { 7151 int rc = hmR0VmxLoadGuestRip(pVCpu, pMixedCtx); 7152 AssertRC(rc); 7153 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadMinimal); 7154 } 7155 else if (pVCpu->hm.s.fContextUseFlags) 7156 { 7157 int rc = hmR0VmxLoadGuestState(pVM, pVCpu, pMixedCtx); 7158 AssertRC(rc); 7159 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadFull); 7160 } 7161 7162 /* When thread-context hooks are available, we could be preempted which means re-updating Guest.CR0 7163 (shared FPU state) and debug controls (shared debug state). This is done in hmR0VmxPreRunGuestCommitted() */ 7164 #ifdef VBOX_STRICT 7165 if (VMMR0ThreadCtxHooksAreRegistered(pVCpu)) 7166 { 7167 AssertMsg( !(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_ALL_GUEST) 7168 || (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_ALL_GUEST) == HM_CHANGED_GUEST_CR0 7169 || (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_ALL_GUEST) == (HM_CHANGED_GUEST_CR0 | HM_CHANGED_GUEST_DEBUG), 7170 ("fContextUseFlags=%#x\n", pVCpu->hm.s.fContextUseFlags)); 7171 } 7172 else 7173 { 7174 AssertMsg(!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_ALL_GUEST), ("fContextUseFlags=%#x\n", 7175 pVCpu->hm.s.fContextUseFlags)); 7176 } 7177 #endif 7178 7179 #ifdef HMVMX_ALWAYS_CHECK_GUEST_STATE 7180 uint32_t uInvalidReason = hmR0VmxCheckGuestState(pVM, pVCpu, pMixedCtx); 7181 if (uInvalidReason != VMX_IGS_REASON_NOT_FOUND) 7182 Log4(("hmR0VmxCheckGuestState returned %#x\n", uInvalidReason)); 7183 #endif 7051 7184 } 7052 7185 … … 7114 7247 } 7115 7248 #endif /* !IEM_VERIFICATION_MODE_FULL */ 7249 7250 /* 7251 * When thread-context hooks are used, load the required guest-state bits 7252 * here before we go ahead and disable interrupts. 7253 */ 7254 if (VMMR0ThreadCtxHooksAreRegistered(pVCpu)) 7255 hmR0VmxLoadGuestStateOptimal(pVM, pVCpu, pMixedCtx); 7116 7256 7117 7257 #ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION … … 7172 7312 #endif 7173 7313 7174 /* Load the required guest state bits (for guest-state changes in the inner execution loop). */ 7314 /* 7315 * Load the host state bits as we may've been preempted 7316 * (only happens when thread-context hooks are used). 7317 */ 7318 int rc = VINF_SUCCESS; 7319 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_CONTEXT) 7320 { 7321 Assert(VMMR0ThreadCtxHooksAreRegistered(pVCpu)); 7322 rc = hmR0VmxSaveHostState(pVM, pVCpu); 7323 AssertRC(rc); 7324 } 7175 7325 Assert(!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_CONTEXT)); 7176 Log5(("LoadFlags=%#RX32\n", pVCpu->hm.s.fContextUseFlags)); 7177 #ifdef HMVMX_SYNC_FULL_GUEST_STATE 7178 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL_GUEST; 7179 #endif 7180 int rc = VINF_SUCCESS; 7181 if (pVCpu->hm.s.fContextUseFlags == HM_CHANGED_GUEST_RIP) 7182 { 7183 rc = hmR0VmxLoadGuestRip(pVCpu, pMixedCtx); 7184 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadMinimal); 7185 } 7186 else if (pVCpu->hm.s.fContextUseFlags) 7187 { 7188 rc = hmR0VmxLoadGuestState(pVM, pVCpu, pMixedCtx); 7189 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadFull); 7190 } 7191 AssertRC(rc); 7192 AssertMsg(!pVCpu->hm.s.fContextUseFlags, ("fContextUseFlags =%#x\n", pVCpu->hm.s.fContextUseFlags)); 7193 7194 #ifdef HMVMX_ALWAYS_CHECK_GUEST_STATE 7195 uint32_t uInvalidReason = hmR0VmxCheckGuestState(pVM, pVCpu, pMixedCtx); 7196 if (uInvalidReason != VMX_IGS_REASON_NOT_FOUND) 7197 Log4(("hmR0VmxCheckGuestState returned %#x\n", uInvalidReason)); 7198 #endif 7199 7200 /* Cache the TPR-shadow for checking on every VM-exit if it might have changed. */ 7326 7327 /* 7328 * When thread-context hooks are not used we need to load the required 7329 * guest state bits here i.e. when we can no longer be preempted. 7330 */ 7331 if (!VMMR0ThreadCtxHooksAreRegistered(pVCpu)) 7332 hmR0VmxLoadGuestStateOptimal(pVM, pVCpu, pMixedCtx); 7333 else 7334 { 7335 /* 7336 * If we got preempted previously while loading the guest state, the guest FPU and debug 7337 * state need to be re-updated because we share them with the host state. 7338 */ 7339 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 7340 { 7341 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR0) 7342 hmR0VmxLoadGuestControlRegs(pVCpu, pMixedCtx); 7343 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_DEBUG) 7344 hmR0VmxLoadGuestDebugState(pVCpu, pMixedCtx); 7345 } 7346 else 7347 { 7348 /* 7349 * If we are injecting events real-on-v86 mode guest then we potentially have to update 7350 * RIP and other registers. Just reload the state here if we're in real-on-v86 mode. 7351 */ 7352 hmR0VmxLoadGuestStateOptimal(pVM, pVCpu, pMixedCtx); 7353 } 7354 } 7355 AssertMsg(!pVCpu->hm.s.fContextUseFlags, ("fContextUseFlags=%#x\n", pVCpu->hm.s.fContextUseFlags)); 7356 7357 /* 7358 * Cache the TPR-shadow for checking on every VM-exit if it might have changed. 7359 */ 7201 7360 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW) 7202 7361 pVmxTransient->u8GuestTpr = pVCpu->hm.s.vmx.pbVirtApic[0x80]; … … 7211 7370 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true); /* Used for TLB-shootdowns, set this across the world switch. */ 7212 7371 hmR0VmxFlushTaggedTlb(pVCpu); /* Invalidate the appropriate guest entries from the TLB. */ 7213 Assert(HMR0GetCurrentCpu()->idCpu == pVCpu->hm.s.idLastCpu); 7372 7373 RTCPUID idCurrentCpu = HMR0GetCurrentCpu()->idCpu; 7374 Assert(idCurrentCpu == pVCpu->hm.s.idLastCpu); 7375 pVCpu->hm.s.vmx.LastError.idCurrentCpu = idCurrentCpu; /* Update the error reporting info. with the current host CPU. */ 7214 7376 7215 7377 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatEntry, &pVCpu->hm.s.StatInGC, x); 7216 7378 7217 7379 TMNotifyStartOfExecution(pVCpu); /* Finally, notify TM to resume its clocks as we're about 7218 7380 to start executing. */ 7219 7381 7220 7382 #ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE … … 7280 7442 7281 7443 ASMSetFlags(pVmxTransient->uEflags); /* Enable interrupts. */ 7282 pVCpu->hm.s. fResumeVM = true;/* Use VMRESUME instead of VMLAUNCH in the next run. */7444 pVCpu->hm.s.vmx.uVmcsState |= HMVMX_VMCS_STATE_LAUNCHED; /* Use VMRESUME instead of VMLAUNCH in the next run. */ 7283 7445 7284 7446 /* Save the basic VM-exit reason. Refer Intel spec. 24.9.1 "Basic VM-exit Information". */ … … 7348 7510 { 7349 7511 Assert(!HMR0SuspendPending()); 7350 AssertMsg(pVCpu->hm.s.idEnteredCpu == RTMpCpuId(), 7351 ("Illegal migration! Entered on CPU %u Current %u cLoops=%u\n", (unsigned)pVCpu->hm.s.idEnteredCpu, 7352 (unsigned)RTMpCpuId(), cLoops)); 7512 HMVMX_ASSERT_CPU_SAFE(); 7353 7513 7354 7514 /* Preparatory work for running guest code, this may return to ring-3 for some last minute updates. */ … … 7431 7591 { 7432 7592 Assert(!HMR0SuspendPending()); 7433 AssertMsg(pVCpu->hm.s.idEnteredCpu == RTMpCpuId(), 7434 ("Illegal migration! Entered on CPU %u Current %u cLoops=%u\n", (unsigned)pVCpu->hm.s.idEnteredCpu, 7435 (unsigned)RTMpCpuId(), cLoops)); 7593 HMVMX_ASSERT_CPU_SAFE(); 7436 7594 7437 7595 /* Preparatory work for running guest code, this may return to ring-3 for some last minute updates. */ … … 7519 7677 { 7520 7678 Assert(VMMRZCallRing3IsEnabled(pVCpu)); 7521 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));7679 HMVMX_ASSERT_PREEMPT_SAFE(); 7522 7680 7523 7681 int rc; … … 7628 7786 Assert(pVmxTransient->fVMEntryFailed == false); \ 7629 7787 Assert(ASMIntAreEnabled()); \ 7630 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); \7788 HMVMX_ASSERT_PREEMPT_SAFE(); \ 7631 7789 HMVMX_ASSERT_PREEMPT_CPUID_VAR(); \ 7632 7790 Log4Func(("vcpu[%RU32] -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v\n", pVCpu->idCpu)); \ 7633 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); \7791 HMVMX_ASSERT_PREEMPT_SAFE(); \ 7634 7792 if (VMMR0IsLogFlushDisabled(pVCpu)) \ 7635 7793 HMVMX_ASSERT_PREEMPT_CPUID(); \ … … 9335 9493 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP; 9336 9494 fUpdateRipAlready = true; 9337 9338 9495 #else 9339 9496 PDISCPUSTATE pDis = &pVCpu->hm.s.DisState; -
trunk/src/VBox/VMM/VMMR0/VMMR0.cpp
r47844 r47989 468 468 * Registers the thread-context hook for this VCPU. 469 469 * 470 * @returns VBox status code. 470 471 * @param pVCpu Pointer to the VMCPU. 471 472 * @param pfnThreadHook Pointer to the thread-context callback. 472 * @returns VBox status code.473 473 * 474 474 * @thread EMT. … … 476 476 VMMR0DECL(int) VMMR0ThreadCtxHooksRegister(PVMCPU pVCpu, PFNRTTHREADCTXHOOK pfnThreadHook) 477 477 { 478 VMCPU_ASSERT_EMT(pVCpu); 478 479 return RTThreadCtxHooksRegister(pVCpu->vmm.s.hR0ThreadCtx, pfnThreadHook, pVCpu); 479 480 } … … 485 486 * @returns VBox status code. 486 487 * @param pVCpu Pointer to the VMCPU. 488 * 487 489 * @thread EMT. 488 490 */ … … 517 519 { 518 520 return RTThreadCtxHooksAreRegistered(pVCpu->vmm.s.hR0ThreadCtx); 521 } 522 523 524 /** 525 * VMM ring-0 thread-context callback. 526 * 527 * This does common HM state updating and calls the HM-specific thread-context 528 * callback. 529 * 530 * @param enmEvent The thread-context event. 531 * @param pvUser Opaque pointer to the VMCPU. 532 */ 533 static void vmmR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, void *pvUser) 534 { 535 PVMCPU pVCpu = (PVMCPU)pvUser; 536 537 switch (enmEvent) 538 { 539 case RTTHREADCTXEVENT_RESUMED: 540 { 541 /** @todo Linux may call us with preemption enabled (really!) but technically we 542 * cannot get preempted here, otherwise we end up in an infinite recursion 543 * scenario (i.e. preempted in resume hook -> preempt hook -> resume hook... ad 544 * infinitum). Let's just disable preemption for now... 545 */ 546 bool fPreemptDisabled = false; 547 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER; 548 if (RTThreadPreemptIsEnabled(NIL_RTTHREAD)) 549 { 550 RTThreadPreemptDisable(&PreemptState); 551 fPreemptDisabled = true; 552 } 553 554 /* We need to update the VCPU <-> host CPU mapping. */ 555 RTCPUID idHostCpu = RTMpCpuId(); 556 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu); 557 558 /* Invoke the HM-specific thread-context callback. */ 559 HMR0ThreadCtxCallback(enmEvent, pvUser); 560 561 /* Restore preemption. */ 562 if (fPreemptDisabled) 563 RTThreadPreemptRestore(&PreemptState); 564 break; 565 } 566 567 case RTTHREADCTXEVENT_PREEMPTING: 568 /* 569 * Sigh. See VMMGetCpu() used by VMCPU_ASSERT_EMT(). We cannot let several VCPUs 570 * have the same host CPU associated with it. 571 */ 572 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID); 573 /* fallthru, no break! */ 574 default: 575 /* Invoke the HM-specific thread-context callback. */ 576 HMR0ThreadCtxCallback(enmEvent, pvUser); 577 break; 578 } 579 519 580 } 520 581 … … 838 899 RTCCUINTREG uFlags = ASMIntDisableFlags(); 839 900 #endif 901 /* Update the VCPU <-> host CPU mapping before doing anything else. */ 840 902 ASMAtomicWriteU32(&pVCpu->idHostCpu, RTMpCpuId()); 841 903 if (pVM->vmm.s.fUsePeriodicPreemptionTimers) 842 904 GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu)); 843 844 905 #ifdef LOG_ENABLED 845 906 if (pVCpu->idCpu > 0) … … 855 916 } 856 917 #endif 857 int rc; 918 919 int rc; 920 bool fPreemptRestored = false; 858 921 if (!HMR0SuspendPending()) 859 922 { 860 /** @todo VMMR0ThreadCtxHooks support. */ 923 /* Register thread-context hooks if required. */ 924 #ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION 925 if ( VMMR0ThreadCtxHooksAreCreated(pVCpu) 926 && !VMMR0ThreadCtxHooksAreRegistered(pVCpu)) 927 { 928 rc = VMMR0ThreadCtxHooksRegister(pVCpu, vmmR0ThreadCtxCallback); 929 AssertRC(rc); 930 } 931 #endif 932 933 /* Enter HM context. */ 861 934 rc = HMR0Enter(pVM, pVCpu); 935 936 /* When preemption hooks are in place, enable preemption now that we're in HM context. */ 937 if (VMMR0ThreadCtxHooksAreRegistered(pVCpu)) 938 { 939 fPreemptRestored = true; 940 RTThreadPreemptRestore(&PreemptState); 941 } 942 862 943 if (RT_SUCCESS(rc)) 863 944 { 864 945 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM); 865 946 866 rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, HMR0RunGuestCode, pVM, pVCpu); /* this may resume code. */ 947 /* Setup the longjmp machinery and execute guest code. */ 948 rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, HMR0RunGuestCode, pVM, pVCpu); 949 950 /* Leave HM context. This deregisters thread-context hooks if any. */ 867 951 int rc2 = HMR0Leave(pVM, pVCpu); 868 952 AssertRC(rc2); … … 888 972 pVCpu->vmm.s.iLastGZRc = rc; 889 973 974 /* Clear the VCPU <-> host CPU mapping as we've left HM context. */ 890 975 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID); 976 891 977 #ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION 892 RTThreadPreemptRestore(&PreemptState); 978 if (!fPreemptRestored) 979 RTThreadPreemptRestore(&PreemptState); 893 980 #elif !defined(RT_OS_WINDOWS) 894 981 ASMSetFlags(uFlags); … … 1487 1574 * 1488 1575 * @returns @c true / @c false 1489 * @param pVCpu The caller's cross context virtual CPU structure.1576 * @param pVCpu Pointer to the VMCPU. 1490 1577 * @thread EMT 1491 1578 * @sa VMMIsLongJumpArmed … … 1500 1587 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call; 1501 1588 #endif 1589 } 1590 1591 1592 /** 1593 * Checks whether we've done a ring-3 long jump. 1594 * 1595 * @returns @c true / @c false 1596 * @param pVCpu Pointer to the VMCPU. 1597 * @thread EMT 1598 */ 1599 VMMR0_INT_DECL(bool) VMMR0IsInRing3LongJump(PVMCPU pVCpu) 1600 { 1601 return pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call; 1502 1602 } 1503 1603 -
trunk/src/VBox/VMM/include/HMInternal.h
r47771 r47989 88 88 #define HM_CHANGED_GUEST_SEGMENT_REGS RT_BIT(7) 89 89 #define HM_CHANGED_GUEST_DEBUG RT_BIT(8) 90 # define HM_CHANGED_GUEST_RIPRT_BIT(9)91 # define HM_CHANGED_GUEST_RSPRT_BIT(10)92 # define HM_CHANGED_GUEST_RFLAGSRT_BIT(11)93 # define HM_CHANGED_GUEST_CR2RT_BIT(12)94 # define HM_CHANGED_GUEST_SYSENTER_CS_MSRRT_BIT(13)95 # define HM_CHANGED_GUEST_SYSENTER_EIP_MSRRT_BIT(14)96 # define HM_CHANGED_GUEST_SYSENTER_ESP_MSRRT_BIT(15)90 #define HM_CHANGED_GUEST_RIP RT_BIT(9) 91 #define HM_CHANGED_GUEST_RSP RT_BIT(10) 92 #define HM_CHANGED_GUEST_RFLAGS RT_BIT(11) 93 #define HM_CHANGED_GUEST_CR2 RT_BIT(12) 94 #define HM_CHANGED_GUEST_SYSENTER_CS_MSR RT_BIT(13) 95 #define HM_CHANGED_GUEST_SYSENTER_EIP_MSR RT_BIT(14) 96 #define HM_CHANGED_GUEST_SYSENTER_ESP_MSR RT_BIT(15) 97 97 /* VT-x specific state. */ 98 # define HM_CHANGED_VMX_GUEST_AUTO_MSRSRT_BIT(16)99 # define HM_CHANGED_VMX_GUEST_ACTIVITY_STATERT_BIT(17)100 # define HM_CHANGED_VMX_GUEST_APIC_STATERT_BIT(18)101 # define HM_CHANGED_VMX_ENTRY_CTLSRT_BIT(19)102 # define HM_CHANGED_VMX_EXIT_CTLSRT_BIT(20)98 #define HM_CHANGED_VMX_GUEST_AUTO_MSRS RT_BIT(16) 99 #define HM_CHANGED_VMX_GUEST_ACTIVITY_STATE RT_BIT(17) 100 #define HM_CHANGED_VMX_GUEST_APIC_STATE RT_BIT(18) 101 #define HM_CHANGED_VMX_ENTRY_CTLS RT_BIT(19) 102 #define HM_CHANGED_VMX_EXIT_CTLS RT_BIT(20) 103 103 /* AMD-V specific state. */ 104 # define HM_CHANGED_SVM_GUEST_EFER_MSRRT_BIT(16)105 # define HM_CHANGED_SVM_GUEST_APIC_STATERT_BIT(17)106 # define HM_CHANGED_SVM_RESERVED1RT_BIT(18)107 # define HM_CHANGED_SVM_RESERVED2RT_BIT(19)108 # define HM_CHANGED_SVM_RESERVED3RT_BIT(20)109 110 # define HM_CHANGED_ALL_GUEST( HM_CHANGED_GUEST_CR0 \111 | HM_CHANGED_GUEST_CR3 \112 | HM_CHANGED_GUEST_CR4 \113 | HM_CHANGED_GUEST_GDTR \114 | HM_CHANGED_GUEST_IDTR \115 | HM_CHANGED_GUEST_LDTR \116 | HM_CHANGED_GUEST_TR \117 | HM_CHANGED_GUEST_SEGMENT_REGS \118 | HM_CHANGED_GUEST_DEBUG \119 | HM_CHANGED_GUEST_RIP \120 | HM_CHANGED_GUEST_RSP \121 | HM_CHANGED_GUEST_RFLAGS \122 | HM_CHANGED_GUEST_CR2 \123 | HM_CHANGED_GUEST_SYSENTER_CS_MSR \124 | HM_CHANGED_GUEST_SYSENTER_EIP_MSR \125 | HM_CHANGED_GUEST_SYSENTER_ESP_MSR \126 | HM_CHANGED_VMX_GUEST_AUTO_MSRS \127 | HM_CHANGED_VMX_GUEST_ACTIVITY_STATE \128 | HM_CHANGED_VMX_GUEST_APIC_STATE \129 | HM_CHANGED_VMX_ENTRY_CTLS \130 | HM_CHANGED_VMX_EXIT_CTLS)131 132 #define HM_CHANGED_HOST_CONTEXT RT_BIT(21)104 #define HM_CHANGED_SVM_GUEST_EFER_MSR RT_BIT(16) 105 #define HM_CHANGED_SVM_GUEST_APIC_STATE RT_BIT(17) 106 #define HM_CHANGED_SVM_RESERVED1 RT_BIT(18) 107 #define HM_CHANGED_SVM_RESERVED2 RT_BIT(19) 108 #define HM_CHANGED_SVM_RESERVED3 RT_BIT(20) 109 110 #define HM_CHANGED_ALL_GUEST ( HM_CHANGED_GUEST_CR0 \ 111 | HM_CHANGED_GUEST_CR3 \ 112 | HM_CHANGED_GUEST_CR4 \ 113 | HM_CHANGED_GUEST_GDTR \ 114 | HM_CHANGED_GUEST_IDTR \ 115 | HM_CHANGED_GUEST_LDTR \ 116 | HM_CHANGED_GUEST_TR \ 117 | HM_CHANGED_GUEST_SEGMENT_REGS \ 118 | HM_CHANGED_GUEST_DEBUG \ 119 | HM_CHANGED_GUEST_RIP \ 120 | HM_CHANGED_GUEST_RSP \ 121 | HM_CHANGED_GUEST_RFLAGS \ 122 | HM_CHANGED_GUEST_CR2 \ 123 | HM_CHANGED_GUEST_SYSENTER_CS_MSR \ 124 | HM_CHANGED_GUEST_SYSENTER_EIP_MSR \ 125 | HM_CHANGED_GUEST_SYSENTER_ESP_MSR \ 126 | HM_CHANGED_VMX_GUEST_AUTO_MSRS \ 127 | HM_CHANGED_VMX_GUEST_ACTIVITY_STATE \ 128 | HM_CHANGED_VMX_GUEST_APIC_STATE \ 129 | HM_CHANGED_VMX_ENTRY_CTLS \ 130 | HM_CHANGED_VMX_EXIT_CTLS) 131 132 #define HM_CHANGED_HOST_CONTEXT RT_BIT(21) 133 133 /** @} */ 134 134 … … 514 514 typedef struct HMCPU 515 515 { 516 /** Set if we don't have to flush the TLB on VM entry. */517 bool fResumeVM;518 516 /** Set if we need to flush the TLB during the world switch. */ 519 517 bool fForceTLBFlush; … … 528 526 /** Whether we've completed the inner HM leave function. */ 529 527 bool fLeaveDone; 530 uint8_t abAlignment[ 1];528 uint8_t abAlignment[2]; 531 529 532 530 /** World switch exit counter. */ … … 652 650 } LastError; 653 651 652 /** State of the VMCS. */ 653 uint32_t uVmcsState; 654 654 /** Which host-state bits to restore before being preempted. */ 655 655 uint32_t fRestoreHostFlags;
Note:
See TracChangeset
for help on using the changeset viewer.