- Timestamp:
- Aug 28, 2013 5:27:43 PM (12 years ago)
- svn:sync-xref-src-repo-rev:
- 88512
- Location:
- trunk/src/VBox
- Files:
-
- 7 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/HostDrivers/Support/Makefile.kmk
r46045 r48132 275 275 ifdef VBOX_WITHOUT_NATIVE_R0_LOADER 276 276 VBoxDrv_DEFS.win += VBOX_WITHOUT_NATIVE_R0_LOADER 277 endif278 ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION279 VBoxDrv_DEFS.win += VBOX_WITH_VMMR0_DISABLE_PREEMPTION280 277 endif 281 278 -
trunk/src/VBox/HostDrivers/Support/win/SUPDrv-win.cpp
r47541 r48132 452 452 && pSession->fUnrestricted == true) 453 453 { 454 #ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION455 454 int rc = supdrvIOCtlFast(ulCmd, (unsigned)(uintptr_t)pIrp->UserBuffer /* VMCPU id */, pDevExt, pSession); 456 #else 455 456 #if 0 /* When preemption was not used i.e. !VBOX_WITH_VMMR0_DISABLE_PREEMPTION. That's no longer required. */ 457 457 /* Raise the IRQL to DISPATCH_LEVEL to prevent Windows from rescheduling us to another CPU/core. */ 458 458 Assert(KeGetCurrentIrql() <= DISPATCH_LEVEL); -
trunk/src/VBox/VMM/Makefile.kmk
r47740 r48132 493 493 $(VMM_COMMON_DEFS) RTASSERT_HAVE_SHOULD_PANIC 494 494 ## @todo eliminate IN_GVMM_R0 IN_GMM_R0 495 ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION496 VMMR0_DEFS += VBOX_WITH_VMMR0_DISABLE_PREEMPTION497 endif498 495 ifdef VBOX_WITH_PCI_PASSTHROUGH 499 496 VMMR0_DEFS += IN_PCIRAW_R0 -
trunk/src/VBox/VMM/VMMR0/HMR0.cpp
r48044 r48132 1285 1285 { 1286 1286 pVM->hm.s.cMaxResumeLoops = 1024; 1287 #ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION1288 1287 if (RTThreadPreemptIsPendingTrusty()) 1289 1288 pVM->hm.s.cMaxResumeLoops = 8192; 1290 #endif1291 1289 } 1292 1290 -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r48130 r48132 2740 2740 hmR0SvmEvaluatePendingEvent(pVCpu, pCtx); 2741 2741 2742 #ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION2743 2742 /* 2744 2743 * We disable interrupts so that we don't miss any interrupts that would flag preemption (IPI/timers etc.) … … 2769 2768 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED_HM); 2770 2769 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC); 2771 #endif2772 2770 2773 2771 return VINF_SUCCESS; … … 2792 2790 Assert(!VMMRZCallRing3IsEnabled(pVCpu)); 2793 2791 Assert(VMMR0IsLogFlushDisabled(pVCpu)); 2794 2795 #ifndef VBOX_WITH_VMMR0_DISABLE_PREEMPTION2796 /** @todo I don't see the point of this, VMMR0EntryFast() already disables interrupts for the entire period. */2797 /** @todo get rid of this. */2798 pSvmTransient->uEflags = ASMIntDisableFlags();2799 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);2800 #endif2801 2792 2802 2793 hmR0SvmInjectPendingEvent(pVCpu, pCtx); -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r48130 r48132 7359 7359 hmR0VmxEvaluatePendingEvent(pVCpu, pMixedCtx); 7360 7360 7361 #ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION7362 7361 /* 7363 7362 * We disable interrupts so that we don't miss any interrupts that would flag preemption (IPI/timers etc.) … … 7388 7387 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED_HM); 7389 7388 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC); 7390 #endif7391 7389 7392 7390 /* … … 7418 7416 Assert(!VMMRZCallRing3IsEnabled(pVCpu)); 7419 7417 Assert(VMMR0IsLogFlushDisabled(pVCpu)); 7420 #ifndef VBOX_WITH_VMMR0_DISABLE_PREEMPTION7421 /** @todo I don't see the point of this, VMMR0EntryFast() already disables interrupts for the entire period. */7422 /** @todo get rid of this. */7423 pVmxTransient->uEflags = ASMIntDisableFlags();7424 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);7425 #endif7426 7418 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 7427 7419 … … 8549 8541 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitExtInt); 8550 8542 /* 32-bit Windows hosts (4 cores) has trouble with this; causes higher interrupt latency. */ 8551 #if HC_ARCH_BITS == 64 && defined(VBOX_WITH_VMMR0_DISABLE_PREEMPTION)8543 #if HC_ARCH_BITS == 64 8552 8544 Assert(ASMIntAreEnabled()); 8553 8545 if (pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUsePreemptTimer) -
trunk/src/VBox/VMM/VMMR0/VMMR0.cpp
r48130 r48132 886 886 /* 887 887 * Run guest code using the available hardware acceleration technology. 888 *889 * Disable interrupts before we do anything interesting. On Windows we avoid890 * this by having the support driver raise the IRQL before calling us, this way891 * we hope to get away with page faults and later calling into the kernel.892 888 */ 893 889 case VMMR0_DO_HM_RUN: 894 890 { 895 #ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION896 891 Assert(!VMMR0ThreadCtxHooksAreRegistered(pVCpu)); 897 892 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER; 898 893 RTThreadPreemptDisable(&PreemptState); 899 #elif !defined(RT_OS_WINDOWS) 900 RTCCUINTREG uFlags = ASMIntDisableFlags(); 901 #endif 894 902 895 /* Update the VCPU <-> host CPU mapping before doing anything else. */ 903 896 ASMAtomicWriteU32(&pVCpu->idHostCpu, RTMpCpuId()); … … 923 916 { 924 917 /* Register thread-context hooks if required. */ 925 #ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION926 918 if ( VMMR0ThreadCtxHooksAreCreated(pVCpu) 927 919 && !VMMR0ThreadCtxHooksAreRegistered(pVCpu)) … … 930 922 AssertRC(rc); 931 923 } 932 #endif933 924 934 925 /* Enter HM context. */ … … 975 966 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID); 976 967 977 #ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION978 968 if (!fPreemptRestored) 979 969 RTThreadPreemptRestore(&PreemptState); 980 #elif !defined(RT_OS_WINDOWS)981 ASMSetFlags(uFlags);982 #endif983 970 984 971 #ifdef VBOX_WITH_STATISTICS
Note:
See TracChangeset
for help on using the changeset viewer.