Changeset 55830 in vbox for trunk/src/VBox/VMM/VMMR0
- Timestamp:
- May 12, 2015 2:05:25 PM (10 years ago)
- Location:
- trunk/src/VBox/VMM/VMMR0
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r55737 r55830 196 196 { 197 197 /** The host's rflags/eflags. */ 198 RTCCUINTREG uEflags;198 RTCCUINTREG fEFlags; 199 199 #if HC_ARCH_BITS == 32 200 200 uint32_t u32Alignment0; … … 340 340 341 341 /* Paranoid: Disable interrupt as, in theory, interrupt handlers might mess with EFER. */ 342 RTCCUINTREG uEflags = ASMIntDisableFlags();342 RTCCUINTREG fEFlags = ASMIntDisableFlags(); 343 343 344 344 /* … … 357 357 if (!pCpu->fIgnoreAMDVInUseError) 358 358 { 359 ASMSetFlags( uEflags);359 ASMSetFlags(fEFlags); 360 360 return VERR_SVM_IN_USE; 361 361 } … … 369 369 370 370 /* Restore interrupts. */ 371 ASMSetFlags( uEflags);371 ASMSetFlags(fEFlags); 372 372 373 373 /* … … 405 405 406 406 /* Paranoid: Disable interrupts as, in theory, interrupt handlers might mess with EFER. */ 407 RTCCUINTREG uEflags = ASMIntDisableFlags();407 RTCCUINTREG fEFlags = ASMIntDisableFlags(); 408 408 409 409 /* Turn off AMD-V in the EFER MSR. */ … … 415 415 416 416 /* Restore interrupts. */ 417 ASMSetFlags( uEflags);417 ASMSetFlags(fEFlags); 418 418 419 419 return VINF_SUCCESS; … … 3025 3025 * executing guest code. 3026 3026 */ 3027 pSvmTransient-> uEflags = ASMIntDisableFlags();3027 pSvmTransient->fEFlags = ASMIntDisableFlags(); 3028 3028 if ( VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC) 3029 3029 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK)) 3030 3030 { 3031 ASMSetFlags(pSvmTransient-> uEflags);3031 ASMSetFlags(pSvmTransient->fEFlags); 3032 3032 VMMRZCallRing3Enable(pVCpu); 3033 3033 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF); … … 3036 3036 if (RTThreadPreemptIsPending(NIL_RTTHREAD)) 3037 3037 { 3038 ASMSetFlags(pSvmTransient-> uEflags);3038 ASMSetFlags(pSvmTransient->fEFlags); 3039 3039 VMMRZCallRing3Enable(pVCpu); 3040 3040 STAM_COUNTER_INC(&pVCpu->hm.s.StatPendingHostIrq); … … 3239 3239 3240 3240 Assert(!(ASMGetFlags() & X86_EFL_IF)); 3241 ASMSetFlags(pSvmTransient-> uEflags); /* Enable interrupts. */3241 ASMSetFlags(pSvmTransient->fEFlags); /* Enable interrupts. */ 3242 3242 VMMRZCallRing3Enable(pVCpu); /* It is now safe to do longjmps to ring-3!!! */ 3243 3243 … … 4737 4737 4738 4738 pVCpu->hm.s.fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0(); 4739 Log4(("hmR0SvmExitXsetbv: New XCR0=%#RX64 fLoadSaveGuestXcr0=%d (cr4=%RX64) rcStrict=%Rrc\n", 4739 Log4(("hmR0SvmExitXsetbv: New XCR0=%#RX64 fLoadSaveGuestXcr0=%d (cr4=%RX64) rcStrict=%Rrc\n", 4740 4740 pCtx->aXcr[0], pVCpu->hm.s.fLoadSaveGuestXcr0, pCtx->cr4, VBOXSTRICTRC_VAL(rcStrict))); 4741 4741 -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r55815 r55830 214 214 { 215 215 /** The host's rflags/eflags. */ 216 RTCCUINTREG uEflags;216 RTCCUINTREG fEFlags; 217 217 #if HC_ARCH_BITS == 32 218 218 uint32_t u32Alignment0; … … 775 775 776 776 /* Paranoid: Disable interrupts as, in theory, interrupt handlers might mess with CR4. */ 777 RTCCUINTREG uEflags = ASMIntDisableFlags();777 RTCCUINTREG fEFlags = ASMIntDisableFlags(); 778 778 779 779 /* Enable the VMX bit in CR4 if necessary. */ … … 792 792 793 793 /* Restore interrupts. */ 794 ASMSetFlags( uEflags);794 ASMSetFlags(fEFlags); 795 795 return rc; 796 796 } … … 807 807 808 808 /* Paranoid: Disable interrupts as, in theory, interrupts handlers might mess with CR4. */ 809 RTCCUINTREG uEflags = ASMIntDisableFlags();809 RTCCUINTREG fEFlags = ASMIntDisableFlags(); 810 810 811 811 /* If we're for some reason not in VMX root mode, then don't leave it. */ … … 824 824 825 825 /* Restore interrupts. */ 826 ASMSetFlags( uEflags);826 ASMSetFlags(fEFlags); 827 827 return rc; 828 828 } … … 5244 5244 PHMGLOBALCPUINFO pCpu; 5245 5245 RTHCPHYS HCPhysCpuPage; 5246 RTCCUINTREG uOldEflags;5246 RTCCUINTREG fOldEFlags; 5247 5247 5248 5248 AssertReturn(pVM->hm.s.pfnHost32ToGuest64R0, VERR_HM_NO_32_TO_64_SWITCHER); … … 5260 5260 5261 5261 /* Disable interrupts. */ 5262 uOldEflags = ASMIntDisableFlags();5262 fOldEFlags = ASMIntDisableFlags(); 5263 5263 5264 5264 #ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI … … 5298 5298 { 5299 5299 SUPR0ChangeCR4(0, ~X86_CR4_VMXE); 5300 ASMSetFlags( uOldEflags);5300 ASMSetFlags(fOldEFlags); 5301 5301 pVM->hm.s.vmx.HCPhysVmxEnableError = HCPhysCpuPage; 5302 5302 return rc2; … … 5306 5306 AssertRC(rc2); 5307 5307 Assert(!(ASMGetFlags() & X86_EFL_IF)); 5308 ASMSetFlags( uOldEflags);5308 ASMSetFlags(fOldEFlags); 5309 5309 return rc; 5310 5310 } … … 8651 8651 * executing guest code. 8652 8652 */ 8653 pVmxTransient-> uEflags = ASMIntDisableFlags();8653 pVmxTransient->fEFlags = ASMIntDisableFlags(); 8654 8654 if ( ( VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC) 8655 8655 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK)) … … 8658 8658 { 8659 8659 hmR0VmxClearEventVmcs(pVCpu); 8660 ASMSetFlags(pVmxTransient-> uEflags);8660 ASMSetFlags(pVmxTransient->fEFlags); 8661 8661 VMMRZCallRing3Enable(pVCpu); 8662 8662 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF); … … 8667 8667 { 8668 8668 hmR0VmxClearEventVmcs(pVCpu); 8669 ASMSetFlags(pVmxTransient-> uEflags);8669 ASMSetFlags(pVmxTransient->fEFlags); 8670 8670 VMMRZCallRing3Enable(pVCpu); 8671 8671 STAM_COUNTER_INC(&pVCpu->hm.s.StatPendingHostIrq); … … 8883 8883 hmR0VmxCheckHostEferMsr(pVCpu); /* Verify that VMRUN/VMLAUNCH didn't modify host EFER. */ 8884 8884 #endif 8885 ASMSetFlags(pVmxTransient-> uEflags); /* Enable interrupts. */8885 ASMSetFlags(pVmxTransient->fEFlags); /* Enable interrupts. */ 8886 8886 VMMRZCallRing3Enable(pVCpu); /* It is now safe to do longjmps to ring-3!!! */ 8887 8887
Note:
See TracChangeset
for help on using the changeset viewer.