Changeset 55306 in vbox
- Timestamp:
- Apr 16, 2015 12:56:05 PM (10 years ago)
- Location:
- trunk
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/hm.h
r55248 r55306 183 183 unsigned uPort, unsigned uAndVal, unsigned cbSize); 184 184 #ifdef VBOX_STRICT 185 # define HM_DISABLE_PREEMPT _IF_NEEDED() \185 # define HM_DISABLE_PREEMPT() \ 186 186 RTTHREADPREEMPTSTATE PreemptStateInternal = RTTHREADPREEMPTSTATE_INITIALIZER; \ 187 187 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD) || VMMR0ThreadCtxHooksAreRegistered(pVCpu)); \ 188 188 RTThreadPreemptDisable(&PreemptStateInternal); 189 189 #else 190 # define HM_DISABLE_PREEMPT _IF_NEEDED() \190 # define HM_DISABLE_PREEMPT() \ 191 191 RTTHREADPREEMPTSTATE PreemptStateInternal = RTTHREADPREEMPTSTATE_INITIALIZER; \ 192 192 RTThreadPreemptDisable(&PreemptStateInternal); 193 193 #endif /* VBOX_STRICT */ 194 # define HM_RESTORE_PREEMPT _IF_NEEDED()do { RTThreadPreemptRestore(&PreemptStateInternal); } while(0)194 # define HM_RESTORE_PREEMPT() do { RTThreadPreemptRestore(&PreemptStateInternal); } while(0) 195 195 196 196 -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r55290 r55306 2115 2115 static int hmR0SvmLeaveSession(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 2116 2116 { 2117 HM_DISABLE_PREEMPT _IF_NEEDED();2117 HM_DISABLE_PREEMPT(); 2118 2118 Assert(!VMMRZCallRing3IsEnabled(pVCpu)); 2119 2119 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); … … 2138 2138 int rc = HMR0LeaveCpu(pVCpu); 2139 2139 2140 HM_RESTORE_PREEMPT _IF_NEEDED();2140 HM_RESTORE_PREEMPT(); 2141 2141 return rc; 2142 2142 } … … 2180 2180 VMMRZCallRing3RemoveNotification(pVCpu); 2181 2181 VMMRZCallRing3Disable(pVCpu); 2182 HM_DISABLE_PREEMPT _IF_NEEDED();2182 HM_DISABLE_PREEMPT(); 2183 2183 2184 2184 /* Restore host FPU state if necessary and resync on next R0 reentry .*/ … … 2195 2195 HMR0LeaveCpu(pVCpu); 2196 2196 2197 HM_RESTORE_PREEMPT _IF_NEEDED();2197 HM_RESTORE_PREEMPT(); 2198 2198 return VINF_SUCCESS; 2199 2199 } … … 4664 4664 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */ 4665 4665 VMMRZCallRing3Disable(pVCpu); 4666 HM_DISABLE_PREEMPT _IF_NEEDED();4666 HM_DISABLE_PREEMPT(); 4667 4667 4668 4668 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */ … … 4670 4670 Assert(CPUMIsGuestDebugStateActive(pVCpu) || HC_ARCH_BITS == 32); 4671 4671 4672 HM_RESTORE_PREEMPT _IF_NEEDED();4672 HM_RESTORE_PREEMPT(); 4673 4673 VMMRZCallRing3Enable(pVCpu); 4674 4674 … … 4817 4817 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */ 4818 4818 VMMRZCallRing3Disable(pVCpu); 4819 HM_DISABLE_PREEMPT _IF_NEEDED();4819 HM_DISABLE_PREEMPT(); 4820 4820 4821 4821 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxIoCheck); … … 4836 4836 rcStrict = rcStrict2; 4837 4837 4838 HM_RESTORE_PREEMPT _IF_NEEDED();4838 HM_RESTORE_PREEMPT(); 4839 4839 VMMRZCallRing3Enable(pVCpu); 4840 4840 } … … 5208 5208 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */ 5209 5209 VMMRZCallRing3Disable(pVCpu); 5210 HM_DISABLE_PREEMPT _IF_NEEDED();5210 HM_DISABLE_PREEMPT(); 5211 5211 5212 5212 int rc; … … 5226 5226 } 5227 5227 5228 HM_RESTORE_PREEMPT _IF_NEEDED();5228 HM_RESTORE_PREEMPT(); 5229 5229 VMMRZCallRing3Enable(pVCpu); 5230 5230 -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r55292 r55306 5981 5981 */ 5982 5982 VMMRZCallRing3Disable(pVCpu); 5983 HM_DISABLE_PREEMPT _IF_NEEDED();5983 HM_DISABLE_PREEMPT(); 5984 5984 5985 5985 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR0)) … … 5998 5998 } 5999 5999 6000 HM_RESTORE_PREEMPT _IF_NEEDED();6000 HM_RESTORE_PREEMPT(); 6001 6001 VMMRZCallRing3Enable(pVCpu); 6002 6002 return VINF_SUCCESS; … … 6275 6275 /* Since this can be called from our preemption hook it's safer to make the guest-MSRs update non-preemptible. */ 6276 6276 VMMRZCallRing3Disable(pVCpu); 6277 HM_DISABLE_PREEMPT _IF_NEEDED();6277 HM_DISABLE_PREEMPT(); 6278 6278 6279 6279 /* Doing the check here ensures we don't overwrite already-saved guest MSRs from a preemption hook. */ … … 6284 6284 } 6285 6285 6286 HM_RESTORE_PREEMPT _IF_NEEDED();6286 HM_RESTORE_PREEMPT(); 6287 6287 VMMRZCallRing3Enable(pVCpu); 6288 6288 } … … 7200 7200 DECLINLINE(int) hmR0VmxLeaveSession(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx) 7201 7201 { 7202 HM_DISABLE_PREEMPT _IF_NEEDED();7202 HM_DISABLE_PREEMPT(); 7203 7203 HMVMX_ASSERT_CPU_SAFE(); 7204 7204 Assert(!VMMRZCallRing3IsEnabled(pVCpu)); … … 7210 7210 { 7211 7211 int rc2 = hmR0VmxLeave(pVM, pVCpu, pMixedCtx, true /* fSaveGuestState */); 7212 AssertRCReturnStmt(rc2, HM_RESTORE_PREEMPT _IF_NEEDED(), rc2);7212 AssertRCReturnStmt(rc2, HM_RESTORE_PREEMPT(), rc2); 7213 7213 pVCpu->hm.s.fLeaveDone = true; 7214 7214 } … … 7228 7228 int rc = HMR0LeaveCpu(pVCpu); 7229 7229 7230 HM_RESTORE_PREEMPT_IF_NEEDED(); 7231 7230 HM_RESTORE_PREEMPT(); 7232 7231 return rc; 7233 7232 } … … 7359 7358 VMMRZCallRing3RemoveNotification(pVCpu); 7360 7359 VMMRZCallRing3Disable(pVCpu); 7361 HM_DISABLE_PREEMPT _IF_NEEDED();7360 HM_DISABLE_PREEMPT(); 7362 7361 7363 7362 PVM pVM = pVCpu->CTX_SUFF(pVM); … … 7393 7392 7394 7393 HMR0LeaveCpu(pVCpu); 7395 HM_RESTORE_PREEMPT _IF_NEEDED();7394 HM_RESTORE_PREEMPT(); 7396 7395 return VINF_SUCCESS; 7397 7396 } … … 11305 11304 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */ 11306 11305 VMMRZCallRing3Disable(pVCpu); 11307 HM_DISABLE_PREEMPT _IF_NEEDED();11306 HM_DISABLE_PREEMPT(); 11308 11307 11309 11308 bool fIsGuestDbgActive = CPUMR0DebugStateMaybeSaveGuest(pVCpu, true /* fDr6 */); … … 11325 11324 rcStrict = rcStrict2; 11326 11325 11327 HM_RESTORE_PREEMPT _IF_NEEDED();11326 HM_RESTORE_PREEMPT(); 11328 11327 VMMRZCallRing3Enable(pVCpu); 11329 11328 } … … 11533 11532 /* We're playing with the host CPU state here, make sure we can't preempt or longjmp. */ 11534 11533 VMMRZCallRing3Disable(pVCpu); 11535 HM_DISABLE_PREEMPT _IF_NEEDED();11534 HM_DISABLE_PREEMPT(); 11536 11535 11537 11536 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */ … … 11539 11538 Assert(CPUMIsGuestDebugStateActive(pVCpu) || HC_ARCH_BITS == 32); 11540 11539 11541 HM_RESTORE_PREEMPT _IF_NEEDED();11540 HM_RESTORE_PREEMPT(); 11542 11541 VMMRZCallRing3Enable(pVCpu); 11543 11542 … … 11819 11818 */ 11820 11819 VMMRZCallRing3Disable(pVCpu); 11821 HM_DISABLE_PREEMPT _IF_NEEDED();11820 HM_DISABLE_PREEMPT(); 11822 11821 11823 11822 pMixedCtx->dr[6] &= ~X86_DR6_B_MASK; … … 11826 11825 ASMSetDR6(pMixedCtx->dr[6]); 11827 11826 11828 HM_RESTORE_PREEMPT _IF_NEEDED();11827 HM_RESTORE_PREEMPT(); 11829 11828 VMMRZCallRing3Enable(pVCpu); 11830 11829 … … 11886 11885 /* We're playing with the host CPU state here, have to disable preemption or longjmp. */ 11887 11886 VMMRZCallRing3Disable(pVCpu); 11888 HM_DISABLE_PREEMPT _IF_NEEDED();11887 HM_DISABLE_PREEMPT(); 11889 11888 11890 11889 /* If the guest FPU was active at the time of the #NM exit, then it's a guest fault. */ … … 11903 11902 } 11904 11903 11905 HM_RESTORE_PREEMPT _IF_NEEDED();11904 HM_RESTORE_PREEMPT(); 11906 11905 VMMRZCallRing3Enable(pVCpu); 11907 11906 -
trunk/src/VBox/VMM/VMMR0/VMMR0.cpp
r54720 r55306 565 565 case RTTHREADCTXEVENT_RESUMED: 566 566 { 567 /** @todo Linux may call us with preemption enabled (really!) but technically we 567 /* 568 * Linux may call us with preemption enabled (really!) but technically we 568 569 * cannot get preempted here, otherwise we end up in an infinite recursion 569 * scenario (i.e. preempted in resume hook -> preempt hook -> resume hook... ad570 * infinitum). Let's just disable preemption for now...570 * scenario (i.e. preempted in resume hook -> preempt hook -> resume hook... 571 * ad infinitum). Let's just disable preemption for now... 571 572 */ 572 HM_DISABLE_PREEMPT _IF_NEEDED();573 HM_DISABLE_PREEMPT(); 573 574 574 575 /* We need to update the VCPU <-> host CPU mapping. */ … … 588 589 589 590 /* Restore preemption. */ 590 HM_RESTORE_PREEMPT _IF_NEEDED();591 HM_RESTORE_PREEMPT(); 591 592 break; 592 593 }
Note:
See TracChangeset
for help on using the changeset viewer.