- Timestamp:
- Jun 12, 2009 1:46:08 PM (16 years ago)
- Location:
- trunk/src/VBox/VMM/VMMR0
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp
r20491 r20516 945 945 } 946 946 947 /* When external interrupts are pending, we should exit the VM when IF is set. */ 948 /* Note! *After* VM_FF_INHIBIT_INTERRUPTS check!!! */ 949 rc = SVMR0CheckPendingInterrupt(pVM, pVCpu, pVMCB, pCtx); 950 if (RT_FAILURE(rc)) 951 { 952 STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatEntry, x); 953 goto end; 954 } 955 956 /* TPR caching using CR8 is only available in 64 bits mode or with 32 bits guests when X86_CPUID_AMD_FEATURE_ECX_CR8L is supported. */ 957 /* Note: we can't do this in LoadGuestState as PDMApicGetTPR can jump back to ring 3 (lock)!!!!!!!! */ 958 if (pVM->hwaccm.s.fHasIoApic) 959 { 960 bool fPending; 961 962 /* TPR caching in CR8 */ 963 int rc = PDMApicGetTPR(pVCpu, &u8LastVTPR, &fPending); 964 AssertRC(rc); 965 pVMCB->ctrl.IntCtrl.n.u8VTPR = u8LastVTPR; 966 967 if (fPending) 968 { 969 /* A TPR change could activate a pending interrupt, so catch cr8 writes. */ 970 pVMCB->ctrl.u16InterceptWrCRx |= RT_BIT(8); 971 } 972 else 973 /* No interrupts are pending, so we don't need to be explicitely notified. 974 * There are enough world switches for detecting pending interrupts. 975 */ 976 pVMCB->ctrl.u16InterceptWrCRx &= ~RT_BIT(8); 977 978 fSyncTPR = !fPending; 979 } 980 981 /* All done! Let's start VM execution. */ 982 STAM_PROFILE_ADV_START(&pVCpu->hwaccm.s.StatInGC, x); 983 984 /* Enable nested paging if necessary (disabled each time after #VMEXIT). */ 985 pVMCB->ctrl.NestedPaging.n.u1NestedPaging = pVM->hwaccm.s.fNestedPaging; 986 987 #ifdef LOG_ENABLED 988 pCpu = HWACCMR0GetCurrentCpu(); 989 if ( pVCpu->hwaccm.s.idLastCpu != pCpu->idCpu 990 || pVCpu->hwaccm.s.cTLBFlushes != pCpu->cTLBFlushes) 991 { 992 if (pVCpu->hwaccm.s.idLastCpu != pCpu->idCpu) 993 Log(("Force TLB flush due to rescheduling to a different cpu (%d vs %d)\n", pVCpu->hwaccm.s.idLastCpu, pCpu->idCpu)); 994 else 995 Log(("Force TLB flush due to changed TLB flush count (%x vs %x)\n", pVCpu->hwaccm.s.cTLBFlushes, pCpu->cTLBFlushes)); 996 } 997 if (pCpu->fFlushTLB) 998 Log(("Force TLB flush: first time cpu %d is used -> flush\n", pCpu->idCpu)); 999 #endif 1000 1001 /* 1002 * NOTE: DO NOT DO ANYTHING AFTER THIS POINT THAT MIGHT JUMP BACK TO RING 3! 1003 * (until the actual world switch) 1004 */ 1005 #ifdef VBOX_STRICT 1006 idCpuCheck = RTMpCpuId(); 1007 #endif 1008 #ifdef LOG_ENABLED 1009 VMMR0LogFlushDisable(pVCpu); 1010 #endif 1011 1012 /* Load the guest state; *must* be here as it sets up the shadow cr0 for lazy fpu syncing! */ 1013 rc = SVMR0LoadGuestState(pVM, pVCpu, pCtx); 1014 if (rc != VINF_SUCCESS) 1015 { 1016 STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatEntry, x); 1017 goto end; 1018 } 1019 947 1020 #ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION 948 1021 /* … … 950 1023 * 951 1024 * Interrupts are disabled before the call to make sure we don't miss any interrupt 952 * that would flag preemption (IPI, timer tick, ++). (Would've been nice to do this 953 * further down, but SVMR0CheckPendingInterrupt makes that hard.) 1025 * that would flag preemption (IPI, timer tick, ++). 954 1026 * 955 1027 * Note! Interrupts must be disabled done *before* we check for TLB flushes; TLB … … 964 1036 } 965 1037 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC); 966 #endif 967 968 /* When external interrupts are pending, we should exit the VM when IF is set. */ 969 /* Note! *After* VM_FF_INHIBIT_INTERRUPTS check!!! */ 970 rc = SVMR0CheckPendingInterrupt(pVM, pVCpu, pVMCB, pCtx); 971 if (RT_FAILURE(rc)) 972 { 973 STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatEntry, x); 974 goto end; 975 } 976 977 /* TPR caching using CR8 is only available in 64 bits mode or with 32 bits guests when X86_CPUID_AMD_FEATURE_ECX_CR8L is supported. */ 978 /* Note: we can't do this in LoadGuestState as PDMApicGetTPR can jump back to ring 3 (lock)!!!!!!!! */ 979 if (pVM->hwaccm.s.fHasIoApic) 980 { 981 bool fPending; 982 983 /* TPR caching in CR8 */ 984 int rc = PDMApicGetTPR(pVCpu, &u8LastVTPR, &fPending); 985 AssertRC(rc); 986 pVMCB->ctrl.IntCtrl.n.u8VTPR = u8LastVTPR; 987 988 if (fPending) 989 { 990 /* A TPR change could activate a pending interrupt, so catch cr8 writes. */ 991 pVMCB->ctrl.u16InterceptWrCRx |= RT_BIT(8); 992 } 993 else 994 /* No interrupts are pending, so we don't need to be explicitely notified. 995 * There are enough world switches for detecting pending interrupts. 996 */ 997 pVMCB->ctrl.u16InterceptWrCRx &= ~RT_BIT(8); 998 999 fSyncTPR = !fPending; 1000 } 1001 1002 /* All done! Let's start VM execution. */ 1003 STAM_PROFILE_ADV_START(&pVCpu->hwaccm.s.StatInGC, x); 1004 1005 /* Enable nested paging if necessary (disabled each time after #VMEXIT). */ 1006 pVMCB->ctrl.NestedPaging.n.u1NestedPaging = pVM->hwaccm.s.fNestedPaging; 1007 1008 #ifdef LOG_ENABLED 1009 pCpu = HWACCMR0GetCurrentCpu(); 1010 if ( pVCpu->hwaccm.s.idLastCpu != pCpu->idCpu 1011 || pVCpu->hwaccm.s.cTLBFlushes != pCpu->cTLBFlushes) 1012 { 1013 if (pVCpu->hwaccm.s.idLastCpu != pCpu->idCpu) 1014 Log(("Force TLB flush due to rescheduling to a different cpu (%d vs %d)\n", pVCpu->hwaccm.s.idLastCpu, pCpu->idCpu)); 1015 else 1016 Log(("Force TLB flush due to changed TLB flush count (%x vs %x)\n", pVCpu->hwaccm.s.cTLBFlushes, pCpu->cTLBFlushes)); 1017 } 1018 if (pCpu->fFlushTLB) 1019 Log(("Force TLB flush: first time cpu %d is used -> flush\n", pCpu->idCpu)); 1020 #endif 1021 1022 /* 1023 * NOTE: DO NOT DO ANYTHING AFTER THIS POINT THAT MIGHT JUMP BACK TO RING 3! 1024 * (until the actual world switch) 1025 */ 1026 #ifdef VBOX_STRICT 1027 idCpuCheck = RTMpCpuId(); 1028 #endif 1029 #ifdef LOG_ENABLED 1030 VMMR0LogFlushDisable(pVCpu); 1031 #endif 1032 1033 /* Load the guest state; *must* be here as it sets up the shadow cr0 for lazy fpu syncing! */ 1034 rc = SVMR0LoadGuestState(pVM, pVCpu, pCtx); 1035 if (rc != VINF_SUCCESS) 1036 { 1037 STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatEntry, x); 1038 goto end; 1039 } 1040 1041 #ifndef VBOX_WITH_VMMR0_DISABLE_PREEMPTION 1038 #else 1042 1039 /* Disable interrupts to make sure a poke will interrupt execution. 1043 1040 * This must be done *before* we check for TLB flushes; TLB shootdowns rely on this. -
trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp
r20491 r20516 2180 2180 } 2181 2181 2182 #ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION2183 /*2184 * Exit to ring-3 preemption/work is pending.2185 *2186 * Interrupts are disabled before the call to make sure we don't miss any interrupt2187 * that would flag preemption (IPI, timer tick, ++). (Would've been nice to do this2188 * further down, but VMXR0CheckPendingInterrupt makes that hard.)2189 *2190 * Note! Interrupts must be disabled done *before* we check for TLB flushes; TLB2191 * shootdowns rely on this.2192 */2193 uOldEFlags = ASMIntDisableFlags();2194 if (RTThreadPreemptIsPending(NIL_RTTHREAD))2195 {2196 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitPreemptPending);2197 rc = VINF_EM_RAW_INTERRUPT;2198 goto end;2199 }2200 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);2201 #endif2202 2203 2182 /* When external interrupts are pending, we should exit the VM when IF is set. */ 2204 2183 /* Note! *After* VM_FF_INHIBIT_INTERRUPTS check!!! */ … … 2285 2264 goto end; 2286 2265 2287 #ifndef VBOX_WITH_VMMR0_DISABLE_PREEMPTION 2266 #ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION 2267 /* 2268 * Exit to ring-3 preemption/work is pending. 2269 * 2270 * Interrupts are disabled before the call to make sure we don't miss any interrupt 2271 * that would flag preemption (IPI, timer tick, ++). 2272 * 2273 * Note! Interrupts must be disabled done *before* we check for TLB flushes; TLB 2274 * shootdowns rely on this. 2275 */ 2276 uOldEFlags = ASMIntDisableFlags(); 2277 if (RTThreadPreemptIsPending(NIL_RTTHREAD)) 2278 { 2279 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitPreemptPending); 2280 rc = VINF_EM_RAW_INTERRUPT; 2281 goto end; 2282 } 2283 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC); 2284 #else 2288 2285 /* Disable interrupts to make sure a poke will interrupt execution. 2289 2286 * This must be done *before* we check for TLB flushes; TLB shootdowns rely on this.
Note:
See TracChangeset
for help on using the changeset viewer.