Changeset 54339 in vbox for trunk/src/VBox/VMM/VMMR0
- Timestamp:
- Feb 20, 2015 6:10:12 PM (10 years ago)
- Location:
- trunk/src/VBox/VMM/VMMR0
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/CPUMR0.cpp
r53836 r54339 998 998 * @param pVCpu Pointer to the cross context CPU structure of the 999 999 * calling EMT. 1000 * @param i dHostCpu The IDof the current host CPU.1001 */ 1002 VMMR0_INT_DECL(void) CPUMR0SetLApic(PVMCPU pVCpu, RTCPUID idHostCpu)1003 { 1004 int idxCpu = RTMpCpuIdToSetIndex(idHostCpu);1005 pVCpu->cpum.s.pvApicBase = g_aLApics[i dxCpu].pv;1006 pVCpu->cpum.s.fX2Apic = g_aLApics[i dxCpu].fX2Apic;1000 * @param iHostCpuSet The CPU set index of the current host CPU. 1001 */ 1002 VMMR0_INT_DECL(void) CPUMR0SetLApic(PVMCPU pVCpu, uint32_t iHostCpuSet) 1003 { 1004 Assert(iHostCpuSet <= RT_ELEMENTS(g_aLApics)); 1005 pVCpu->cpum.s.pvApicBase = g_aLApics[iHostCpuSet].pv; 1006 pVCpu->cpum.s.fX2Apic = g_aLApics[iHostCpuSet].fX2Apic; 1007 1007 // Log6(("CPUMR0SetLApic: pvApicBase=%p fX2Apic=%d\n", g_aLApics[idxCpu].pv, g_aLApics[idxCpu].fX2Apic)); 1008 1008 } -
trunk/src/VBox/VMM/VMMR0/VMMR0.cpp
r54256 r54339 566 566 567 567 /* We need to update the VCPU <-> host CPU mapping. */ 568 RTCPUID idHostCpu = RTMpCpuId(); 569 pVCpu->iHostCpuSet = RTMpCpuIdToSetIndex(idHostCpu); 568 RTCPUID idHostCpu = RTMpCpuId(); 569 uint32_t iHostCpuSet = RTMpCpuIdToSetIndex(idHostCpu); 570 pVCpu->iHostCpuSet = iHostCpuSet; 570 571 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu); 572 573 /* In the very unlikely event that the GIP delta for the CPU we're 574 rescheduled needs calculating, try force a return to ring-3. 575 We unfortunately cannot do the measurements right here. */ 576 if (RT_UNLIKELY(SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet))) 577 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3); 571 578 572 579 /* Invoke the HM-specific thread-context callback. */ … … 862 869 #endif 863 870 864 /* Disable preemption and update the periodic preemption timer. */ 871 /* 872 * Disable preemption. 873 */ 865 874 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER; 866 875 RTThreadPreemptDisable(&PreemptState); 867 RTCPUID idHostCpu = RTMpCpuId(); 876 877 /* 878 * Get the host CPU identifiers, make sure they are valid and that 879 * we've got a TSC delta for the CPU. 880 */ 881 RTCPUID idHostCpu = RTMpCpuId(); 882 uint32_t iHostCpuSet = RTMpCpuIdToSetIndex(idHostCpu); 883 if (RT_LIKELY( iHostCpuSet < RTCPUSET_MAX_CPUS 884 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet))) 885 { 886 /* 887 * Commit the CPU identifiers and update the periodict preemption timer if it's active. 888 */ 868 889 #ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI 869 CPUMR0SetLApic(pVCpu, idHostCpu); 870 #endif 871 pVCpu->iHostCpuSet = RTMpCpuIdToSetIndex(idHostCpu); 872 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu); 873 if (pVM->vmm.s.fUsePeriodicPreemptionTimers) 874 GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu)); 875 876 /* We might need to disable VT-x if the active switcher turns off paging. */ 877 bool fVTxDisabled; 878 int rc = HMR0EnterSwitcher(pVM, pVM->vmm.s.enmSwitcher, &fVTxDisabled); 879 if (RT_SUCCESS(rc)) 890 CPUMR0SetLApic(pVCpu, iHostCpuSet); 891 #endif 892 pVCpu->iHostCpuSet = iHostCpuSet; 893 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu); 894 895 if (pVM->vmm.s.fUsePeriodicPreemptionTimers) 896 GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu)); 897 898 /* 899 * We might need to disable VT-x if the active switcher turns off paging. 900 */ 901 bool fVTxDisabled; 902 int rc = HMR0EnterSwitcher(pVM, pVM->vmm.s.enmSwitcher, &fVTxDisabled); 903 if (RT_SUCCESS(rc)) 904 { 905 /* 906 * Disable interrupts and run raw-mode code. The loop is for efficiently 907 * dispatching tracepoints that fired in raw-mode context. 908 */ 909 RTCCUINTREG uFlags = ASMIntDisableFlags(); 910 911 for (;;) 912 { 913 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC); 914 TMNotifyStartOfExecution(pVCpu); 915 916 rc = pVM->vmm.s.pfnR0ToRawMode(pVM); 917 pVCpu->vmm.s.iLastGZRc = rc; 918 919 TMNotifyEndOfExecution(pVCpu); 920 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED); 921 922 if (rc != VINF_VMM_CALL_TRACER) 923 break; 924 SUPR0TracerUmodProbeFire(pVM->pSession, &pVCpu->vmm.s.TracerCtx); 925 } 926 927 /* 928 * Re-enable VT-x before we dispatch any pending host interrupts and 929 * re-enables interrupts. 930 */ 931 HMR0LeaveSwitcher(pVM, fVTxDisabled); 932 933 if ( rc == VINF_EM_RAW_INTERRUPT 934 || rc == VINF_EM_RAW_INTERRUPT_HYPER) 935 TRPMR0DispatchHostInterrupt(pVM); 936 937 ASMSetFlags(uFlags); 938 939 /* Fire dtrace probe and collect statistics. */ 940 VBOXVMM_R0_VMM_RETURN_TO_RING3_RC(pVCpu, CPUMQueryGuestCtxPtr(pVCpu), rc); 941 #ifdef VBOX_WITH_STATISTICS 942 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC); 943 vmmR0RecordRC(pVM, pVCpu, rc); 944 #endif 945 } 946 else 947 pVCpu->vmm.s.iLastGZRc = rc; 948 949 /* 950 * Invalidate the host CPU identifiers as we restore preemption. 951 */ 952 pVCpu->iHostCpuSet = UINT32_MAX; 953 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID); 954 955 RTThreadPreemptRestore(&PreemptState); 956 } 957 /* 958 * Invalid CPU set index or TSC delta in need of measuring. 959 */ 960 else 880 961 { 881 RTCCUINTREG uFlags = ASMIntDisableFlags(); 882 883 for (;;) 962 RTThreadPreemptRestore(&PreemptState); 963 if (iHostCpuSet < RTCPUSET_MAX_CPUS) 884 964 { 885 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC); 886 TMNotifyStartOfExecution(pVCpu); 887 888 rc = pVM->vmm.s.pfnR0ToRawMode(pVM); 889 pVCpu->vmm.s.iLastGZRc = rc; 890 891 TMNotifyEndOfExecution(pVCpu); 892 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED); 893 894 if (rc != VINF_VMM_CALL_TRACER) 895 break; 896 SUPR0TracerUmodProbeFire(pVM->pSession, &pVCpu->vmm.s.TracerCtx); 965 int rc = SUPR0TscDeltaMeasureBySetIndex(pVM->pSession, iHostCpuSet, 0 /*fFlags*/, 966 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/, 967 0 /*default cTries*/); 968 if (RT_SUCCESS(rc) || rc == VERR_CPU_OFFLINE) 969 pVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3; 970 else 971 pVCpu->vmm.s.iLastGZRc = rc; 897 972 } 898 899 /* Re-enable VT-x if previously turned off. */ 900 HMR0LeaveSwitcher(pVM, fVTxDisabled); 901 902 if ( rc == VINF_EM_RAW_INTERRUPT 903 || rc == VINF_EM_RAW_INTERRUPT_HYPER) 904 TRPMR0DispatchHostInterrupt(pVM); 905 906 ASMSetFlags(uFlags); 907 908 VBOXVMM_R0_VMM_RETURN_TO_RING3_RC(pVCpu, CPUMQueryGuestCtxPtr(pVCpu), rc); 909 #ifdef VBOX_WITH_STATISTICS 910 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC); 911 vmmR0RecordRC(pVM, pVCpu, rc); 912 #endif 973 else 974 pVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX; 913 975 } 914 else915 pVCpu->vmm.s.iLastGZRc = rc;916 pVCpu->iHostCpuSet = UINT32_MAX;917 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);918 RTThreadPreemptRestore(&PreemptState);919 976 break; 920 977 } … … 925 982 case VMMR0_DO_HM_RUN: 926 983 { 984 /* 985 * Disable preemption. 986 */ 927 987 Assert(!VMMR0ThreadCtxHooksAreRegistered(pVCpu)); 928 988 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER; 929 989 RTThreadPreemptDisable(&PreemptState); 930 990 931 /* Update the VCPU <-> host CPU mapping before doing anything else. */ 932 RTCPUID idHostCpu = RTMpCpuId(); 933 pVCpu->iHostCpuSet = RTMpCpuIdToSetIndex(idHostCpu); 934 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu); 935 936 if (pVM->vmm.s.fUsePeriodicPreemptionTimers) 937 GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu)); 991 /* 992 * Get the host CPU identifiers, make sure they are valid and that 993 * we've got a TSC delta for the CPU. 994 */ 995 RTCPUID idHostCpu = RTMpCpuId(); 996 uint32_t iHostCpuSet = RTMpCpuIdToSetIndex(idHostCpu); 997 if (RT_LIKELY( iHostCpuSet < RTCPUSET_MAX_CPUS 998 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet))) 999 { 1000 pVCpu->iHostCpuSet = iHostCpuSet; 1001 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu); 1002 1003 /* 1004 * Update the periodict preemption timer if it's active. 1005 */ 1006 if (pVM->vmm.s.fUsePeriodicPreemptionTimers) 1007 GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu)); 1008 938 1009 #ifdef LOG_ENABLED 939 if (pVCpu->idCpu > 0) 940 { 941 /* Lazy registration of ring 0 loggers. */ 942 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0; 943 if ( pR0Logger 944 && !pR0Logger->fRegistered) 1010 /* 1011 * Ugly: Lazy registration of ring 0 loggers. 1012 */ 1013 if (pVCpu->idCpu > 0) 945 1014 { 946 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession); 947 pR0Logger->fRegistered = true; 1015 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0; 1016 if ( pR0Logger 1017 && RT_UNLIKELY(!pR0Logger->fRegistered)) 1018 { 1019 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession); 1020 pR0Logger->fRegistered = true; 1021 } 948 1022 } 1023 #endif 1024 1025 int rc; 1026 bool fPreemptRestored = false; 1027 if (!HMR0SuspendPending()) 1028 { 1029 /* 1030 * Register thread-context hooks if required. 1031 */ 1032 if ( VMMR0ThreadCtxHooksAreCreated(pVCpu) 1033 && !VMMR0ThreadCtxHooksAreRegistered(pVCpu)) 1034 { 1035 rc = VMMR0ThreadCtxHooksRegister(pVCpu, vmmR0ThreadCtxCallback); 1036 AssertRC(rc); 1037 } 1038 1039 /* 1040 * Enter HM context. 1041 */ 1042 rc = HMR0Enter(pVM, pVCpu); 1043 if (RT_SUCCESS(rc)) 1044 { 1045 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM); 1046 1047 /* 1048 * When preemption hooks are in place, enable preemption now that 1049 * we're in HM context. 1050 */ 1051 if (VMMR0ThreadCtxHooksAreRegistered(pVCpu)) 1052 { 1053 fPreemptRestored = true; 1054 RTThreadPreemptRestore(&PreemptState); 1055 } 1056 1057 /* 1058 * Setup the longjmp machinery and execute guest code (calls HMR0RunGuestCode). 1059 */ 1060 rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, HMR0RunGuestCode, pVM, pVCpu); 1061 1062 /* 1063 * Assert sanity on the way out. Using manual assertions code here as normal 1064 * assertions are going to panic the host since we're outside the setjmp/longjmp zone. 1065 */ 1066 if (RT_UNLIKELY( VMCPU_GET_STATE(pVCpu) != VMCPUSTATE_STARTED_HM 1067 && RT_SUCCESS_NP(rc) && rc != VINF_VMM_CALL_HOST )) 1068 { 1069 pVM->vmm.s.szRing0AssertMsg1[0] = '\0'; 1070 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2), 1071 "Got VMCPU state %d expected %d.\n", VMCPU_GET_STATE(pVCpu), VMCPUSTATE_STARTED_HM); 1072 rc = VERR_VMM_WRONG_HM_VMCPU_STATE; 1073 } 1074 else if (RT_UNLIKELY(VMMR0ThreadCtxHooksAreRegistered(pVCpu))) 1075 { 1076 pVM->vmm.s.szRing0AssertMsg1[0] = '\0'; 1077 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2), 1078 "Thread-context hooks still registered! VCPU=%p Id=%u rc=%d.\n", pVCpu, pVCpu->idCpu, rc); 1079 rc = VERR_INVALID_STATE; 1080 } 1081 1082 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED); 1083 } 1084 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC); 1085 } 1086 /* 1087 * The system is about to go into suspend mode; go back to ring 3. 1088 */ 1089 else 1090 rc = VINF_EM_RAW_INTERRUPT; 1091 1092 pVCpu->vmm.s.iLastGZRc = rc; 1093 1094 /* 1095 * Invalidate the host CPU identifiers as we restore preemption. 1096 */ 1097 pVCpu->iHostCpuSet = UINT32_MAX; 1098 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID); 1099 1100 if (!fPreemptRestored) 1101 RTThreadPreemptRestore(&PreemptState); 1102 1103 /* Fire dtrace probe and collect statistics. */ 1104 VBOXVMM_R0_VMM_RETURN_TO_RING3_HM(pVCpu, CPUMQueryGuestCtxPtr(pVCpu), rc); 1105 #ifdef VBOX_WITH_STATISTICS 1106 vmmR0RecordRC(pVM, pVCpu, rc); 1107 #endif 949 1108 } 950 #endif 951 952 int rc; 953 bool fPreemptRestored = false; 954 if (!HMR0SuspendPending()) 955 { 956 /* Register thread-context hooks if required. */ 957 if ( VMMR0ThreadCtxHooksAreCreated(pVCpu) 958 && !VMMR0ThreadCtxHooksAreRegistered(pVCpu)) 959 { 960 rc = VMMR0ThreadCtxHooksRegister(pVCpu, vmmR0ThreadCtxCallback); 961 AssertRC(rc); 962 } 963 964 /* Enter HM context. */ 965 rc = HMR0Enter(pVM, pVCpu); 966 if (RT_SUCCESS(rc)) 967 { 968 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM); 969 970 /* When preemption hooks are in place, enable preemption now that we're in HM context. */ 971 if (VMMR0ThreadCtxHooksAreRegistered(pVCpu)) 972 { 973 fPreemptRestored = true; 974 RTThreadPreemptRestore(&PreemptState); 975 } 976 977 /* Setup the longjmp machinery and execute guest code. */ 978 rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, HMR0RunGuestCode, pVM, pVCpu); 979 980 /* Manual assert as normal assertions are going to crash in this case. */ 981 if (RT_UNLIKELY( VMCPU_GET_STATE(pVCpu) != VMCPUSTATE_STARTED_HM 982 && RT_SUCCESS_NP(rc) && rc != VINF_VMM_CALL_HOST )) 983 { 984 pVM->vmm.s.szRing0AssertMsg1[0] = '\0'; 985 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2), 986 "Got VMCPU state %d expected %d.\n", VMCPU_GET_STATE(pVCpu), VMCPUSTATE_STARTED_HM); 987 rc = VERR_VMM_WRONG_HM_VMCPU_STATE; 988 } 989 else if (RT_UNLIKELY(VMMR0ThreadCtxHooksAreRegistered(pVCpu))) 990 { 991 pVM->vmm.s.szRing0AssertMsg1[0] = '\0'; 992 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2), 993 "Thread-context hooks still registered! VCPU=%p Id=%u rc=%d.\n", pVCpu, pVCpu->idCpu, rc); 994 rc = VERR_INVALID_STATE; 995 } 996 997 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED); 998 } 999 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC); 1000 } 1109 /* 1110 * Invalid CPU set index or TSC delta in need of measuring. 1111 */ 1001 1112 else 1002 1113 { 1003 /* System is about to go into suspend mode; go back to ring 3. */ 1004 rc = VINF_EM_RAW_INTERRUPT; 1114 RTThreadPreemptRestore(&PreemptState); 1115 if (iHostCpuSet < RTCPUSET_MAX_CPUS) 1116 { 1117 int rc = SUPR0TscDeltaMeasureBySetIndex(pVM->pSession, iHostCpuSet, 0 /*fFlags*/, 1118 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/, 1119 0 /*default cTries*/); 1120 if (RT_SUCCESS(rc) || rc == VERR_CPU_OFFLINE) 1121 pVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3; 1122 else 1123 pVCpu->vmm.s.iLastGZRc = rc; 1124 } 1125 else 1126 pVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX; 1005 1127 } 1006 pVCpu->vmm.s.iLastGZRc = rc;1007 1008 /* Clear the VCPU <-> host CPU mapping as we've left HM context. */1009 pVCpu->iHostCpuSet = UINT32_MAX;1010 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);1011 1012 if (!fPreemptRestored)1013 RTThreadPreemptRestore(&PreemptState);1014 1015 VBOXVMM_R0_VMM_RETURN_TO_RING3_HM(pVCpu, CPUMQueryGuestCtxPtr(pVCpu), rc);1016 #ifdef VBOX_WITH_STATISTICS1017 vmmR0RecordRC(pVM, pVCpu, rc);1018 #endif1019 /* No special action required for external interrupts, just return. */1020 1128 break; 1021 1129 } … … 1197 1305 case VMMR0_DO_CALL_HYPERVISOR: 1198 1306 { 1199 int rc; 1307 /* 1308 * Validate input / context. 1309 */ 1310 if (RT_UNLIKELY(idCpu != 0)) 1311 return VERR_INVALID_CPU_ID; 1312 if (RT_UNLIKELY(pVM->cCpus != 1)) 1313 return VERR_INVALID_PARAMETER; 1314 PVMCPU pVCpu = &pVM->aCpus[idCpu]; 1315 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 1316 if (RT_UNLIKELY(!PGMGetHyperCR3(pVCpu))) 1317 return VERR_PGM_NO_CR3_SHADOW_ROOT; 1318 #endif 1319 1320 /* 1321 * Disable interrupts. 1322 */ 1323 RTCCUINTREG fFlags = ASMIntDisableFlags(); 1324 1325 /* 1326 * Get the host CPU identifiers, make sure they are valid and that 1327 * we've got a TSC delta for the CPU. 1328 */ 1329 RTCPUID idHostCpu = RTMpCpuId(); 1330 uint32_t iHostCpuSet = RTMpCpuIdToSetIndex(idHostCpu); 1331 if (RT_UNLIKELY(iHostCpuSet >= RTCPUSET_MAX_CPUS)) 1332 { 1333 ASMSetFlags(fFlags); 1334 return VERR_INVALID_CPU_INDEX; 1335 } 1336 if (RT_UNLIKELY(!SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet))) 1337 { 1338 ASMSetFlags(fFlags); 1339 int rc = SUPR0TscDeltaMeasureBySetIndex(pVM->pSession, iHostCpuSet, 0 /*fFlags*/, 1340 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/, 1341 0 /*default cTries*/); 1342 if (RT_FAILURE(rc) && rc != VERR_CPU_OFFLINE) 1343 return rc; 1344 } 1345 1346 /* 1347 * Commit the CPU identifiers. 1348 */ 1349 #ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI 1350 CPUMR0SetLApic(pVCpu, iHostCpuSet); 1351 #endif 1352 pVCpu->iHostCpuSet = iHostCpuSet; 1353 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu); 1354 1355 /* 1356 * We might need to disable VT-x if the active switcher turns off paging. 1357 */ 1200 1358 bool fVTxDisabled; 1201 1202 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 1203 if (RT_UNLIKELY(!PGMGetHyperCR3(VMMGetCpu0(pVM)))) 1204 return VERR_PGM_NO_CR3_SHADOW_ROOT; 1205 #endif 1206 1207 RTCCUINTREG fFlags = ASMIntDisableFlags(); 1208 1209 #ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI 1210 RTCPUID idHostCpu = RTMpCpuId(); 1211 CPUMR0SetLApic(&pVM->aCpus[0], idHostCpu); 1212 #endif 1213 1214 /* We might need to disable VT-x if the active switcher turns off paging. */ 1215 rc = HMR0EnterSwitcher(pVM, pVM->vmm.s.enmSwitcher, &fVTxDisabled); 1216 if (RT_FAILURE(rc)) 1217 return rc; 1218 1219 rc = pVM->vmm.s.pfnR0ToRawMode(pVM); 1220 1221 /* Re-enable VT-x if previously turned off. */ 1222 HMR0LeaveSwitcher(pVM, fVTxDisabled); 1223 1224 /** @todo dispatch interrupts? */ 1359 int rc = HMR0EnterSwitcher(pVM, pVM->vmm.s.enmSwitcher, &fVTxDisabled); 1360 if (RT_SUCCESS(rc)) 1361 { 1362 /* 1363 * Go through the wormhole... 1364 */ 1365 rc = pVM->vmm.s.pfnR0ToRawMode(pVM); 1366 1367 /* 1368 * Re-enable VT-x before we dispatch any pending host interrupts. 1369 */ 1370 HMR0LeaveSwitcher(pVM, fVTxDisabled); 1371 1372 if ( rc == VINF_EM_RAW_INTERRUPT 1373 || rc == VINF_EM_RAW_INTERRUPT_HYPER) 1374 TRPMR0DispatchHostInterrupt(pVM); 1375 } 1376 1377 /* 1378 * Invalidate the host CPU identifiers as we restore interrupts. 1379 */ 1380 pVCpu->iHostCpuSet = UINT32_MAX; 1381 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID); 1225 1382 ASMSetFlags(fFlags); 1226 1383 return rc;
Note:
See TracChangeset
for help on using the changeset viewer.