Changeset 45575 in vbox for trunk/src/VBox
- Timestamp:
- Apr 16, 2013 3:47:57 PM (12 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r45558 r45575 4737 4737 { 4738 4738 if ( hmR0VmxIsBenignXcpt(uIdtVector) 4739 || hmR0VmxIsBenignXcpt(uExitVector) 4740 || ( hmR0VmxIsContributoryXcpt(uIdtVector) 4741 && uExitVector == X86_XCPT_PF)) 4739 || hmR0VmxIsBenignXcpt(uExitVector)) 4742 4740 { 4743 4741 enmReflect = VMXREFLECTXCPT_XCPT; 4742 } 4743 if ( hmR0VmxIsContributoryXcpt(uIdtVector) 4744 && uExitVector == X86_XCPT_PF) 4745 { 4746 enmReflect = VMXREFLECTXCPT_XCPT; 4747 GCPtrFaultAddress = pMixedCtx->cr2; 4748 Log(("IDT: Contributory #PF uCR2=%#RGv\n", pMixedCtx->cr2)); 4744 4749 } 4745 4750 else if ( uExitVector == X86_XCPT_PF … … 4749 4754 enmReflect = VMXREFLECTXCPT_XCPT; 4750 4755 GCPtrFaultAddress = pMixedCtx->cr2; 4751 Log((" Vectoring #PF uCR2=%#RGv\n", pMixedCtx->cr2));4756 Log(("IDT: Vectoring #PF uCR2=%#RGv\n", pMixedCtx->cr2)); 4752 4757 } 4753 4758 else if ( (pVCpu->hm.s.vmx.u32XcptBitmap & VMX_CONTRIBUTORY_XCPT_BITMAP) … … 4760 4765 else if (uIdtVector == X86_XCPT_DF) 4761 4766 enmReflect = VMXREFLECTXCPT_TF; 4767 else 4768 AssertMsgFailed(("Invalid!\n")); 4762 4769 } 4763 4770 else if ( uIntType != VMX_IDT_VECTORING_INFO_TYPE_SW_INT … … 4786 4793 0 /* cbInstr */, u32ErrCode, GCPtrFaultAddress); 4787 4794 rc = VINF_SUCCESS; 4788 Log((" Pending vectoring event %#RX64 Err=%#RX32\n", pVCpu->hm.s.Event.u64IntrInfo, pVCpu->hm.s.Event.u32ErrCode));4795 Log(("IDT: Pending vectoring event %#RX64 Err=%#RX32\n", pVCpu->hm.s.Event.u64IntrInfo, pVCpu->hm.s.Event.u32ErrCode)); 4789 4796 break; 4790 4797 } … … 4794 4801 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx); 4795 4802 rc = VINF_VMX_DOUBLE_FAULT; 4796 Log((" Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", pVCpu->hm.s.Event.u64IntrInfo, uIdtVector,4803 Log(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", pVCpu->hm.s.Event.u64IntrInfo, uIdtVector, 4797 4804 uExitVector)); 4798 4805 break; … … 4801 4808 case VMXREFLECTXCPT_TF: 4802 4809 { 4803 Log((" Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", uIdtVector, uExitVector));4810 Log(("IDT: Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", uIdtVector, uExitVector)); 4804 4811 rc = VINF_EM_RESET; 4805 4812 break; … … 6284 6291 6285 6292 /* Inject. */ 6286 Log(("Injecting u32IntrInfo=%#x u32ErrCode=%#x instrlen=%#x\n", u32IntrInfo, u32ErrCode, cbInstr));6293 Log(("Injecting u32IntrInfo=%#x u32ErrCode=%#x cbInstr=%#x uCR2=%#RGv\n", u32IntrInfo, u32ErrCode, cbInstr, pMixedCtx->cr2)); 6287 6294 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntrInfo); 6288 6295 if (VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(u32IntrInfo)) … … 6901 6908 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); \ 6902 6909 VMX_ASSERT_PREEMPT_CPUID_VAR(); \ 6903 LogFunc(("vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv\n")); \ 6910 LogFunc(("vcpu[%u] vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv\n", \ 6911 (unsigned)pVCpu->idCpu)); \ 6904 6912 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); \ 6905 6913 if (VMMR0IsLogFlushDisabled(pVCpu)) \ … … 7628 7636 { 7629 7637 VMX_VALIDATE_EXIT_HANDLER_PARAMS(); 7638 PVM pVM = pVCpu->CTX_SUFF(pVM); 7630 7639 int rc = VINF_SUCCESS; 7631 PVM pVM = pVCpu->CTX_SUFF(pVM);7632 7640 7633 7641 /* If TPR patching is active, LSTAR holds the guest TPR, writes to it must be propagated to the APIC. */ … … 7647 7655 return VINF_SUCCESS; 7648 7656 } 7649 7650 /* Update MSRs that are part of the VMCS when MSR-bitmaps are not supported. */7651 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS))7652 {7653 switch (pMixedCtx->ecx)7654 {7655 case MSR_IA32_SYSENTER_CS: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SYSENTER_CS_MSR; break;7656 case MSR_IA32_SYSENTER_EIP: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SYSENTER_EIP_MSR; break;7657 case MSR_IA32_SYSENTER_ESP: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SYSENTER_ESP_MSR; break;7658 case MSR_K8_FS_BASE: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_FS_BASE_MSR; break;7659 case MSR_K8_GS_BASE: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_GS_BASE_MSR; break;7660 }7661 }7662 #ifdef VBOX_STRICT7663 else7664 {7665 /* Paranoia. Validate that MSRs in the MSR-bitmaps with write-passthru are not intercepted. */7666 switch (pMixedCtx->ecx)7667 {7668 case MSR_IA32_SYSENTER_CS:7669 case MSR_IA32_SYSENTER_EIP:7670 case MSR_IA32_SYSENTER_ESP:7671 case MSR_K8_FS_BASE:7672 case MSR_K8_GS_BASE:7673 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%RX32\n", pMixedCtx->ecx));7674 return VERR_VMX_UNEXPECTED_EXIT_CODE;7675 case MSR_K8_LSTAR:7676 case MSR_K6_STAR:7677 case MSR_K8_SF_MASK:7678 case MSR_K8_TSC_AUX:7679 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%RX32\n", pMixedCtx->ecx));7680 return VERR_VMX_UNEXPECTED_EXIT_CODE;7681 }7682 }7683 #endif7684 7657 7685 7658 /* EMInterpretWrmsr() requires CR0, EFLAGS and SS segment register. */ … … 7704 7677 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_APIC_STATE; 7705 7678 } 7679 else if (pMixedCtx->ecx == MSR_K6_EFER) /* EFER is the only MSR we auto-load but don't allow write-passthrough. */ 7680 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_AUTO_MSRS; 7681 7682 /* Update MSRs that are part of the VMCS when MSR-bitmaps are not supported. */ 7683 if (RT_UNLIKELY(!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS))) 7684 { 7685 switch (pMixedCtx->ecx) 7686 { 7687 case MSR_IA32_SYSENTER_CS: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SYSENTER_CS_MSR; break; 7688 case MSR_IA32_SYSENTER_EIP: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SYSENTER_EIP_MSR; break; 7689 case MSR_IA32_SYSENTER_ESP: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SYSENTER_ESP_MSR; break; 7690 case MSR_K8_FS_BASE: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_FS_BASE_MSR; break; 7691 case MSR_K8_GS_BASE: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_GS_BASE_MSR; break; 7692 case MSR_K8_KERNEL_GS_BASE: /* If we auto-load it, update HM_CHANGED_VMX_GUEST_AUTO_MSRS. */ break; 7693 } 7694 } 7695 #ifdef VBOX_STRICT 7696 else 7697 { 7698 /* Paranoia. Validate that MSRs in the MSR-bitmaps with write-passthru are not intercepted. */ 7699 switch (pMixedCtx->ecx) 7700 { 7701 case MSR_IA32_SYSENTER_CS: 7702 case MSR_IA32_SYSENTER_EIP: 7703 case MSR_IA32_SYSENTER_ESP: 7704 case MSR_K8_FS_BASE: 7705 case MSR_K8_GS_BASE: 7706 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", pMixedCtx->ecx)); 7707 return VERR_VMX_UNEXPECTED_EXIT_CODE; 7708 7709 case MSR_K8_LSTAR: 7710 case MSR_K6_STAR: 7711 case MSR_K8_SF_MASK: 7712 case MSR_K8_TSC_AUX: 7713 { 7714 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n", 7715 pMixedCtx->ecx)); 7716 return VERR_VMX_UNEXPECTED_EXIT_CODE; 7717 } 7718 7719 case MSR_K8_KERNEL_GS_BASE: 7720 { 7721 AssertMsgFailed(("Unexpected WRMSR for an MSR that is manually loaded/stored on every VM-exit. ecx=%#RX32\n", 7722 pMixedCtx->ecx)); 7723 return VERR_VMX_UNEXPECTED_EXIT_CODE; 7724 } 7725 } 7726 } 7727 #endif /* VBOX_STRICT */ 7706 7728 } 7707 7729 return rc;
Note:
See TracChangeset
for help on using the changeset viewer.