Changeset 48473 in vbox for trunk/src/VBox/VMM/VMMR0
- Timestamp:
- Sep 13, 2013 2:17:58 PM (11 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r48457 r48473 3115 3115 | X86_CR0_CD /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.CD */ 3116 3116 | X86_CR0_NW; /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.NW */ 3117 /** @todo Temporarily intercept CR0.PE changes with unrestricted. Fix PGM 3118 * enmGuestMode to not be out-of-sync. See @bugref{6398}. */ 3117 3118 /** @todo Avoid intercepting CR0.PE with unrestricted guests. Fix PGM 3119 * enmGuestMode to be in-sync with the current mode. See @bugref{6398} 3120 * and @bugref{6944}. */ 3119 3121 #if 0 3120 3122 if (pVM->hm.s.vmx.fUnrestrictedGuest) … … 4111 4113 * Refer MSDN docs. "Configuring Programs for 64-bit / x64 Software Conventions / Register Usage" for details. 4112 4114 */ 4113 const bool fResumeVM = !!(pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_LAUNCHED);4115 const bool fResumeVM = RT_BOOL(pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_LAUNCHED); 4114 4116 /** @todo Add stats for resume vs launch. */ 4115 4117 #ifdef VBOX_WITH_KERNEL_USING_XMM … … 6338 6340 * may be out-of-sync. Make sure to update the required 6339 6341 * fields before using them. 6340 *6341 * @remarks Must never be called with @a enmOperation ==6342 * VMMCALLRING3_VM_R0_ASSERTION. We can't assert it here because if it6343 * it -does- get called with VMMCALLRING3_VM_R0_ASSERTION, we'll end up6344 * with an infinite recursion.6345 6342 */ 6346 6343 DECLCALLBACK(int) hmR0VmxCallRing3Callback(PVMCPU pVCpu, VMMCALLRING3 enmOperation, void *pvUser) 6347 6344 { 6348 /* VMMRZCallRing3() already makes sure we never get called as a result of an longjmp due to an assertion. */ 6345 if (enmOperation == VMMCALLRING3_VM_R0_ASSERTION) 6346 { 6347 VMMRZCallRing3RemoveNotification(pVCpu); 6348 HM_DISABLE_PREEMPT_IF_NEEDED(); 6349 6350 /* If anything here asserts or fails, good luck. */ 6351 if (CPUMIsGuestFPUStateActive(pVCpu)) 6352 CPUMR0SaveGuestFPU(pVCpu->CTX_SUFF(pVM), pVCpu, (PCPUMCTX)pvUser); 6353 6354 CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, true /* save DR6 */); 6355 6356 #if HC_ARCH_BITS == 64 6357 /* Restore host-state bits that VT-x only restores partially. */ 6358 if ( (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_REQUIRED) 6359 && (pVCpu->hm.s.vmx.fRestoreHostFlags & ~VMX_RESTORE_HOST_REQUIRED)) 6360 { 6361 VMXRestoreHostState(pVCpu->hm.s.vmx.fRestoreHostFlags, &pVCpu->hm.s.vmx.RestoreHost); 6362 pVCpu->hm.s.vmx.fRestoreHostFlags = 0; 6363 } 6364 #endif 6365 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC); 6366 if (pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_ACTIVE) 6367 { 6368 VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs); 6369 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_CLEAR; 6370 } 6371 6372 if (VMMR0ThreadCtxHooksAreRegistered(pVCpu)) 6373 VMMR0ThreadCtxHooksDeregister(pVCpu); 6374 6375 HMR0LeaveCpu(pVCpu); 6376 HM_RESTORE_PREEMPT_IF_NEEDED(); 6377 return VINF_SUCCESS; 6378 } 6379 6349 6380 Assert(pVCpu); 6350 6381 Assert(pvUser); … … 6399 6430 /* Get the current interruptibility-state of the guest and then figure out what can be injected. */ 6400 6431 uint32_t uIntrState = hmR0VmxGetGuestIntrState(pVCpu, pMixedCtx); 6401 bool fBlockMovSS = !!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);6402 bool fBlockSti = !!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);6432 bool fBlockMovSS = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS); 6433 bool fBlockSti = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI); 6403 6434 6404 6435 Assert(!fBlockSti || (pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RFLAGS)); … … 6481 6512 /* Get the current interruptibility-state of the guest and then figure out what can be injected. */ 6482 6513 uint32_t uIntrState = hmR0VmxGetGuestIntrState(pVCpu, pMixedCtx); 6483 bool fBlockMovSS = !!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);6484 bool fBlockSti = !!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);6514 bool fBlockMovSS = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS); 6515 bool fBlockSti = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI); 6485 6516 6486 6517 Assert(!fBlockSti || (pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RFLAGS)); … … 6517 6548 /* Update the interruptibility-state as it could have been changed by 6518 6549 hmR0VmxInjectEventVmcs() (e.g. real-on-v86 guest injecting software interrupts) */ 6519 fBlockMovSS = !!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);6520 fBlockSti = !!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);6550 fBlockMovSS = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS); 6551 fBlockSti = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI); 6521 6552 6522 6553 #ifdef VBOX_WITH_STATISTICS … … 8062 8093 Assert(u32Val == pVCpu->hm.s.vmx.u32ProcCtls); 8063 8094 #endif 8064 bool const fLongModeGuest = !!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST);8095 bool const fLongModeGuest = RT_BOOL(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST); 8065 8096 8066 8097 /* … … 10308 10339 PVM pVM = pVCpu->CTX_SUFF(pVM); 10309 10340 rc = CPUMR0LoadGuestFPU(pVM, pVCpu, pMixedCtx); 10341 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0; 10342 10310 10343 if (rc == VINF_SUCCESS) 10311 10344 { … … 10313 10346 HM_RESTORE_PREEMPT_IF_NEEDED(); 10314 10347 10315 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;10316 10348 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowNM); 10317 10349 return VINF_SUCCESS; 10318 10350 } 10319 10320 10351 HM_RESTORE_PREEMPT_IF_NEEDED(); 10321 10352
Note:
See TracChangeset
for help on using the changeset viewer.