Changeset 46391 in vbox
- Timestamp:
- Jun 5, 2013 9:43:58 AM (12 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r46381 r46391 1566 1566 { 1567 1567 /* Shouldn't happen. VPID is supported but INVVPID is not supported by the CPU. Ignore VPID capability. */ 1568 Log (("hmR0VmxSetupTaggedTlb: VPID supported without INVEPT support. Ignoring VPID.\n"));1568 Log4(("hmR0VmxSetupTaggedTlb: VPID supported without INVEPT support. Ignoring VPID.\n")); 1569 1569 pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_NOT_SUPPORTED; 1570 1570 pVM->hm.s.vmx.fVpid = false; … … 2714 2714 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RIP, pMixedCtx->rip); 2715 2715 AssertRCReturn(rc, rc); 2716 Log (("Load: VMX_VMCS_GUEST_RIP=%#RX64\n", pMixedCtx->rip));2716 Log4(("Load: VMX_VMCS_GUEST_RIP=%#RX64\n", pMixedCtx->rip)); 2717 2717 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_RIP; 2718 2718 } … … 2784 2784 AssertRCReturn(rc, rc); 2785 2785 2786 Log (("Load: VMX_VMCS_GUEST_RFLAGS=%#RX32\n", uEFlags.u32));2786 Log4(("Load: VMX_VMCS_GUEST_RFLAGS=%#RX32\n", uEFlags.u32)); 2787 2787 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_RFLAGS; 2788 2788 } … … 2844 2844 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, u32GuestCR0); 2845 2845 AssertRCReturn(rc, rc); 2846 Log (("Load: VMX_VMCS_CTRL_CR0_READ_SHADOW=%#RX32\n", u32GuestCR0));2846 Log4(("Load: VMX_VMCS_CTRL_CR0_READ_SHADOW=%#RX32\n", u32GuestCR0)); 2847 2847 2848 2848 /* Setup VT-x's view of the guest CR0. */ … … 2955 2955 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap); 2956 2956 AssertRCReturn(rc, rc); 2957 Log (("Load: VMX_VMCS_GUEST_CR0=%#RX32 (uSetCR0=%#RX32 uZapCR0=%#RX32)\n", u32GuestCR0, uSetCR0, uZapCR0));2957 Log4(("Load: VMX_VMCS_GUEST_CR0=%#RX32 (uSetCR0=%#RX32 uZapCR0=%#RX32)\n", u32GuestCR0, uSetCR0, uZapCR0)); 2958 2958 2959 2959 /* … … 3020 3020 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, pVCpu->hm.s.vmx.HCPhysEPTP); 3021 3021 AssertRCReturn(rc, rc); 3022 Log (("Load: VMX_VMCS64_CTRL_EPTP_FULL=%#RX64\n", pVCpu->hm.s.vmx.HCPhysEPTP));3022 Log4(("Load: VMX_VMCS64_CTRL_EPTP_FULL=%#RX64\n", pVCpu->hm.s.vmx.HCPhysEPTP)); 3023 3023 3024 3024 if ( pVM->hm.s.vmx.fUnrestrictedGuest … … 3057 3057 } 3058 3058 3059 Log (("Load: VMX_VMCS_GUEST_CR3=%#RGv (GstN)\n", GCPhysGuestCR3));3059 Log4(("Load: VMX_VMCS_GUEST_CR3=%#RGv (GstN)\n", GCPhysGuestCR3)); 3060 3060 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_CR3, GCPhysGuestCR3); 3061 3061 } … … 3065 3065 RTHCPHYS HCPhysGuestCR3 = PGMGetHyperCR3(pVCpu); 3066 3066 3067 Log (("Load: VMX_VMCS_GUEST_CR3=%#RHv (HstN)\n", HCPhysGuestCR3));3067 Log4(("Load: VMX_VMCS_GUEST_CR3=%#RHv (HstN)\n", HCPhysGuestCR3)); 3068 3068 rc = VMXWriteVmcsHstN(VMX_VMCS_GUEST_CR3, HCPhysGuestCR3); 3069 3069 } … … 3084 3084 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, u32GuestCR4); 3085 3085 AssertRCReturn(rc, rc); 3086 Log (("Load: VMX_VMCS_CTRL_CR4_READ_SHADOW=%#RX32\n", u32GuestCR4));3086 Log4(("Load: VMX_VMCS_CTRL_CR4_READ_SHADOW=%#RX32\n", u32GuestCR4)); 3087 3087 3088 3088 /* Setup VT-x's view of the guest CR4. */ … … 3152 3152 3153 3153 /* Write VT-x's view of the guest CR4 into the VMCS. */ 3154 Log (("Load: VMX_VMCS_GUEST_CR4=%#RX32 (Set=%#RX32 Zap=%#RX32)\n", u32GuestCR4, uSetCR4, uZapCR4));3154 Log4(("Load: VMX_VMCS_GUEST_CR4=%#RX32 (Set=%#RX32 Zap=%#RX32)\n", u32GuestCR4, uSetCR4, uZapCR4)); 3155 3155 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR4, u32GuestCR4); 3156 3156 AssertRCReturn(rc, rc); … … 3554 3554 in real-mode (e.g. OpenBSD 4.0) */ 3555 3555 REMFlushTBs(pVM); 3556 Log (("Load: Switch to protected mode detected!\n"));3556 Log4(("Load: Switch to protected mode detected!\n")); 3557 3557 pVCpu->hm.s.vmx.fWasInRealMode = false; 3558 3558 } … … 3578 3578 AssertRCReturn(rc, rc); 3579 3579 3580 Log (("Load: CS=%#RX16 Base=%#RX64 Limit=%#RX32 Attr=%#RX32\n", pMixedCtx->cs.Sel, pMixedCtx->cs.u64Base,3580 Log4(("Load: CS=%#RX16 Base=%#RX64 Limit=%#RX32 Attr=%#RX32\n", pMixedCtx->cs.Sel, pMixedCtx->cs.u64Base, 3581 3581 pMixedCtx->cs.u32Limit, pMixedCtx->cs.Attr.u)); 3582 3582 #ifdef VBOX_STRICT … … 3648 3648 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights); AssertRCReturn(rc, rc); 3649 3649 3650 Log (("Load: VMX_VMCS_GUEST_TR_BASE=%#RX64\n", u64Base));3650 Log4(("Load: VMX_VMCS_GUEST_TR_BASE=%#RX64\n", u64Base)); 3651 3651 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_TR; 3652 3652 } … … 3661 3661 3662 3662 Assert(!(pMixedCtx->gdtr.cbGdt & UINT64_C(0xffff0000))); /* Bits 31:16 MBZ. */ 3663 Log (("Load: VMX_VMCS_GUEST_GDTR_BASE=%#RX64\n", pMixedCtx->gdtr.pGdt));3663 Log4(("Load: VMX_VMCS_GUEST_GDTR_BASE=%#RX64\n", pMixedCtx->gdtr.pGdt)); 3664 3664 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_GDTR; 3665 3665 } … … 3697 3697 } 3698 3698 3699 Log (("Load: VMX_VMCS_GUEST_LDTR_BASE=%#RX64\n", pMixedCtx->ldtr.u64Base));3699 Log4(("Load: VMX_VMCS_GUEST_LDTR_BASE=%#RX64\n", pMixedCtx->ldtr.u64Base)); 3700 3700 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_LDTR; 3701 3701 } … … 3710 3710 3711 3711 Assert(!(pMixedCtx->idtr.cbIdt & UINT64_C(0xffff0000))); /* Bits 31:16 MBZ. */ 3712 Log (("Load: VMX_VMCS_GUEST_IDTR_BASE=%#RX64\n", pMixedCtx->idtr.pIdt));3712 Log4(("Load: VMX_VMCS_GUEST_IDTR_BASE=%#RX64\n", pMixedCtx->idtr.pIdt)); 3713 3713 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_IDTR; 3714 3714 } … … 3933 3933 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 3934 3934 3935 Log (("VM-entry failure: %Rrc\n", rcVMRun));3935 Log4(("VM-entry failure: %Rrc\n", rcVMRun)); 3936 3936 switch (rcVMRun) 3937 3937 { … … 3948 3948 3949 3949 #ifdef VBOX_STRICT 3950 Log (("uExitReason %#RX32 (VmxTransient %#RX16)\n", pVCpu->hm.s.vmx.lasterror.u32ExitReason,3950 Log4(("uExitReason %#RX32 (VmxTransient %#RX16)\n", pVCpu->hm.s.vmx.lasterror.u32ExitReason, 3951 3951 pVmxTransient->uExitReason)); 3952 Log (("Exit Qualification %#RX64\n", pVmxTransient->uExitQualification));3953 Log (("InstrError %#RX32\n", pVCpu->hm.s.vmx.lasterror.u32InstrError));3952 Log4(("Exit Qualification %#RX64\n", pVmxTransient->uExitQualification)); 3953 Log4(("InstrError %#RX32\n", pVCpu->hm.s.vmx.lasterror.u32InstrError)); 3954 3954 if (pVCpu->hm.s.vmx.lasterror.u32InstrError <= HMVMX_INSTR_ERROR_MAX) 3955 Log (("InstrError Desc. \"%s\"\n", g_apszVmxInstrErrors[pVCpu->hm.s.vmx.lasterror.u32InstrError]));3955 Log4(("InstrError Desc. \"%s\"\n", g_apszVmxInstrErrors[pVCpu->hm.s.vmx.lasterror.u32InstrError])); 3956 3956 else 3957 Log (("InstrError Desc. Range exceeded %u\n", HMVMX_INSTR_ERROR_MAX));3957 Log4(("InstrError Desc. Range exceeded %u\n", HMVMX_INSTR_ERROR_MAX)); 3958 3958 3959 3959 /* VMX control bits. */ … … 3962 3962 HMVMXHCUINTREG uHCReg; 3963 3963 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, &u32Val); AssertRC(rc); 3964 Log (("VMX_VMCS32_CTRL_PIN_EXEC %#RX32\n", u32Val));3964 Log4(("VMX_VMCS32_CTRL_PIN_EXEC %#RX32\n", u32Val)); 3965 3965 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, &u32Val); AssertRC(rc); 3966 Log (("VMX_VMCS32_CTRL_PROC_EXEC %#RX32\n", u32Val));3966 Log4(("VMX_VMCS32_CTRL_PROC_EXEC %#RX32\n", u32Val)); 3967 3967 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val); AssertRC(rc); 3968 Log (("VMX_VMCS32_CTRL_PROC_EXEC2 %#RX32\n", u32Val));3968 Log4(("VMX_VMCS32_CTRL_PROC_EXEC2 %#RX32\n", u32Val)); 3969 3969 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY, &u32Val); AssertRC(rc); 3970 Log (("VMX_VMCS32_CTRL_ENTRY %#RX32\n", u32Val));3970 Log4(("VMX_VMCS32_CTRL_ENTRY %#RX32\n", u32Val)); 3971 3971 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT, &u32Val); AssertRC(rc); 3972 Log (("VMX_VMCS32_CTRL_EXIT %#RX32\n", u32Val));3972 Log4(("VMX_VMCS32_CTRL_EXIT %#RX32\n", u32Val)); 3973 3973 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_CR3_TARGET_COUNT, &u32Val); AssertRC(rc); 3974 Log (("VMX_VMCS32_CTRL_CR3_TARGET_COUNT %#RX32\n", u32Val));3974 Log4(("VMX_VMCS32_CTRL_CR3_TARGET_COUNT %#RX32\n", u32Val)); 3975 3975 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32Val); AssertRC(rc); 3976 Log (("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", u32Val));3976 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", u32Val)); 3977 3977 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &u32Val); AssertRC(rc); 3978 Log (("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", u32Val));3978 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", u32Val)); 3979 3979 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &u32Val); AssertRC(rc); 3980 Log (("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %u\n", u32Val));3980 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %u\n", u32Val)); 3981 3981 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, &u32Val); AssertRC(rc); 3982 Log (("VMX_VMCS32_CTRL_TPR_THRESHOLD %u\n", u32Val));3982 Log4(("VMX_VMCS32_CTRL_TPR_THRESHOLD %u\n", u32Val)); 3983 3983 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, &u32Val); AssertRC(rc); 3984 Log (("VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT %u (guest MSRs)\n", u32Val));3984 Log4(("VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT %u (guest MSRs)\n", u32Val)); 3985 3985 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, &u32Val); AssertRC(rc); 3986 Log (("VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT %u (host MSRs)\n", u32Val));3986 Log4(("VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT %u (host MSRs)\n", u32Val)); 3987 3987 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, &u32Val); AssertRC(rc); 3988 Log (("VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT %u (guest MSRs)\n", u32Val));3988 Log4(("VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT %u (guest MSRs)\n", u32Val)); 3989 3989 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val); AssertRC(rc); 3990 Log (("VMX_VMCS32_CTRL_EXCEPTION_BITMAP %#RX32\n", u32Val));3990 Log4(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP %#RX32\n", u32Val)); 3991 3991 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK, &u32Val); AssertRC(rc); 3992 Log (("VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK %#RX32\n", u32Val));3992 Log4(("VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK %#RX32\n", u32Val)); 3993 3993 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH, &u32Val); AssertRC(rc); 3994 Log (("VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH %#RX32\n", u32Val));3994 Log4(("VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH %#RX32\n", u32Val)); 3995 3995 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_MASK, &uHCReg); AssertRC(rc); 3996 Log (("VMX_VMCS_CTRL_CR0_MASK %#RHr\n", uHCReg));3996 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RHr\n", uHCReg)); 3997 3997 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uHCReg); AssertRC(rc); 3998 Log (("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));3998 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg)); 3999 3999 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_MASK, &uHCReg); AssertRC(rc); 4000 Log (("VMX_VMCS_CTRL_CR4_MASK %#RHr\n", uHCReg));4000 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RHr\n", uHCReg)); 4001 4001 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uHCReg); AssertRC(rc); 4002 Log (("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));4002 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg)); 4003 4003 rc = VMXReadVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc); 4004 Log (("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));4004 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val)); 4005 4005 4006 4006 /* Guest bits. */ 4007 4007 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RIP, &u64Val); AssertRC(rc); 4008 Log (("Old Guest Rip %#RX64 New %#RX64\n", pCtx->rip, u64Val));4008 Log4(("Old Guest Rip %#RX64 New %#RX64\n", pCtx->rip, u64Val)); 4009 4009 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RSP, &u64Val); AssertRC(rc); 4010 Log (("Old Guest Rsp %#RX64 New %#RX64\n", pCtx->rsp, u64Val));4010 Log4(("Old Guest Rsp %#RX64 New %#RX64\n", pCtx->rsp, u64Val)); 4011 4011 rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &u32Val); AssertRC(rc); 4012 Log (("Old Guest Rflags %#RX32 New %#RX32\n", pCtx->eflags.u32, u32Val));4012 Log4(("Old Guest Rflags %#RX32 New %#RX32\n", pCtx->eflags.u32, u32Val)); 4013 4013 rc = VMXReadVmcs32(VMX_VMCS16_GUEST_FIELD_VPID, &u32Val); AssertRC(rc); 4014 Log (("VMX_VMCS16_GUEST_FIELD_VPID %u\n", u32Val));4014 Log4(("VMX_VMCS16_GUEST_FIELD_VPID %u\n", u32Val)); 4015 4015 4016 4016 /* Host bits. */ 4017 4017 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR0, &uHCReg); AssertRC(rc); 4018 Log (("Host CR0 %#RHr\n", uHCReg));4018 Log4(("Host CR0 %#RHr\n", uHCReg)); 4019 4019 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR3, &uHCReg); AssertRC(rc); 4020 Log (("Host CR3 %#RHr\n", uHCReg));4020 Log4(("Host CR3 %#RHr\n", uHCReg)); 4021 4021 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR4, &uHCReg); AssertRC(rc); 4022 Log (("Host CR4 %#RHr\n", uHCReg));4022 Log4(("Host CR4 %#RHr\n", uHCReg)); 4023 4023 4024 4024 RTGDTR HostGdtr; … … 4026 4026 ASMGetGDTR(&HostGdtr); 4027 4027 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_CS, &u32Val); AssertRC(rc); 4028 Log (("Host CS %#08x\n", u32Val));4028 Log4(("Host CS %#08x\n", u32Val)); 4029 4029 if (u32Val < HostGdtr.cbGdt) 4030 4030 { … … 4034 4034 4035 4035 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_DS, &u32Val); AssertRC(rc); 4036 Log (("Host DS %#08x\n", u32Val));4036 Log4(("Host DS %#08x\n", u32Val)); 4037 4037 if (u32Val < HostGdtr.cbGdt) 4038 4038 { … … 4042 4042 4043 4043 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_ES, &u32Val); AssertRC(rc); 4044 Log (("Host ES %#08x\n", u32Val));4044 Log4(("Host ES %#08x\n", u32Val)); 4045 4045 if (u32Val < HostGdtr.cbGdt) 4046 4046 { … … 4050 4050 4051 4051 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_FS, &u32Val); AssertRC(rc); 4052 Log (("Host FS %#08x\n", u32Val));4052 Log4(("Host FS %#08x\n", u32Val)); 4053 4053 if (u32Val < HostGdtr.cbGdt) 4054 4054 { … … 4058 4058 4059 4059 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_GS, &u32Val); AssertRC(rc); 4060 Log (("Host GS %#08x\n", u32Val));4060 Log4(("Host GS %#08x\n", u32Val)); 4061 4061 if (u32Val < HostGdtr.cbGdt) 4062 4062 { … … 4066 4066 4067 4067 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_SS, &u32Val); AssertRC(rc); 4068 Log (("Host SS %#08x\n", u32Val));4068 Log4(("Host SS %#08x\n", u32Val)); 4069 4069 if (u32Val < HostGdtr.cbGdt) 4070 4070 { … … 4074 4074 4075 4075 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_TR, &u32Val); AssertRC(rc); 4076 Log (("Host TR %#08x\n", u32Val));4076 Log4(("Host TR %#08x\n", u32Val)); 4077 4077 if (u32Val < HostGdtr.cbGdt) 4078 4078 { … … 4082 4082 4083 4083 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_TR_BASE, &uHCReg); AssertRC(rc); 4084 Log (("Host TR Base %#RHv\n", uHCReg));4084 Log4(("Host TR Base %#RHv\n", uHCReg)); 4085 4085 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_GDTR_BASE, &uHCReg); AssertRC(rc); 4086 Log (("Host GDTR Base %#RHv\n", uHCReg));4086 Log4(("Host GDTR Base %#RHv\n", uHCReg)); 4087 4087 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_IDTR_BASE, &uHCReg); AssertRC(rc); 4088 Log (("Host IDTR Base %#RHv\n", uHCReg));4088 Log4(("Host IDTR Base %#RHv\n", uHCReg)); 4089 4089 rc = VMXReadVmcs32(VMX_VMCS32_HOST_SYSENTER_CS, &u32Val); AssertRC(rc); 4090 Log (("Host SYSENTER CS %#08x\n", u32Val));4090 Log4(("Host SYSENTER CS %#08x\n", u32Val)); 4091 4091 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_SYSENTER_EIP, &uHCReg); AssertRC(rc); 4092 Log (("Host SYSENTER EIP %#RHv\n", uHCReg));4092 Log4(("Host SYSENTER EIP %#RHv\n", uHCReg)); 4093 4093 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_SYSENTER_ESP, &uHCReg); AssertRC(rc); 4094 Log (("Host SYSENTER ESP %#RHv\n", uHCReg));4094 Log4(("Host SYSENTER ESP %#RHv\n", uHCReg)); 4095 4095 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_RSP, &uHCReg); AssertRC(rc); 4096 Log (("Host RSP %#RHv\n", uHCReg));4096 Log4(("Host RSP %#RHv\n", uHCReg)); 4097 4097 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_RIP, &uHCReg); AssertRC(rc); 4098 Log (("Host RIP %#RHv\n", uHCReg));4098 Log4(("Host RIP %#RHv\n", uHCReg)); 4099 4099 # if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 4100 4100 if (HMVMX_IS_64BIT_HOST_MODE()) 4101 4101 { 4102 Log (("MSR_K6_EFER = %#RX64\n", ASMRdMsr(MSR_K6_EFER)));4103 Log (("MSR_K6_STAR = %#RX64\n", ASMRdMsr(MSR_K6_STAR)));4104 Log (("MSR_K8_LSTAR = %#RX64\n", ASMRdMsr(MSR_K8_LSTAR)));4105 Log (("MSR_K8_CSTAR = %#RX64\n", ASMRdMsr(MSR_K8_CSTAR)));4106 Log (("MSR_K8_SF_MASK = %#RX64\n", ASMRdMsr(MSR_K8_SF_MASK)));4107 Log (("MSR_K8_KERNEL_GS_BASE = %#RX64\n", ASMRdMsr(MSR_K8_KERNEL_GS_BASE)));4102 Log4(("MSR_K6_EFER = %#RX64\n", ASMRdMsr(MSR_K6_EFER))); 4103 Log4(("MSR_K6_STAR = %#RX64\n", ASMRdMsr(MSR_K6_STAR))); 4104 Log4(("MSR_K8_LSTAR = %#RX64\n", ASMRdMsr(MSR_K8_LSTAR))); 4105 Log4(("MSR_K8_CSTAR = %#RX64\n", ASMRdMsr(MSR_K8_CSTAR))); 4106 Log4(("MSR_K8_SF_MASK = %#RX64\n", ASMRdMsr(MSR_K8_SF_MASK))); 4107 Log4(("MSR_K8_KERNEL_GS_BASE = %#RX64\n", ASMRdMsr(MSR_K8_KERNEL_GS_BASE))); 4108 4108 } 4109 4109 # endif … … 4766 4766 && uExitVector == X86_XCPT_PF) 4767 4767 { 4768 Log (("IDT: Contributory #PF uCR2=%#RX64\n", pMixedCtx->cr2));4768 Log4(("IDT: Contributory #PF uCR2=%#RX64\n", pMixedCtx->cr2)); 4769 4769 } 4770 4770 #endif … … 4773 4773 { 4774 4774 pVmxTransient->fVectoringPF = true; 4775 Log (("IDT: Vectoring #PF uCR2=%#RX64\n", pMixedCtx->cr2));4775 Log4(("IDT: Vectoring #PF uCR2=%#RX64\n", pMixedCtx->cr2)); 4776 4776 } 4777 4777 else if ( (pVCpu->hm.s.vmx.u32XcptBitmap & HMVMX_CONTRIBUTORY_XCPT_MASK) … … 4812 4812 0 /* cbInstr */, u32ErrCode, pMixedCtx->cr2); 4813 4813 rc = VINF_SUCCESS; 4814 Log (("IDT: Pending vectoring event %#RX64 Err=%#RX32\n", pVCpu->hm.s.Event.u64IntrInfo,4814 Log4(("IDT: Pending vectoring event %#RX64 Err=%#RX32\n", pVCpu->hm.s.Event.u64IntrInfo, 4815 4815 pVCpu->hm.s.Event.u32ErrCode)); 4816 4816 break; … … 4821 4821 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx); 4822 4822 rc = VINF_VMX_DOUBLE_FAULT; 4823 Log (("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", pVCpu->hm.s.Event.u64IntrInfo,4823 Log4(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", pVCpu->hm.s.Event.u64IntrInfo, 4824 4824 uIdtVector, uExitVector)); 4825 4825 break; … … 4828 4828 case VMXREFLECTXCPT_TF: 4829 4829 { 4830 Log (("IDT: Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", uIdtVector, uExitVector));4830 Log4(("IDT: Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", uIdtVector, uExitVector)); 4831 4831 rc = VINF_EM_RESET; 4832 4832 break; … … 4981 4981 { 4982 4982 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS); 4983 Log (("Saving real-mode EFLAGS VT-x view=%#RX32\n", pMixedCtx->eflags.u32));4983 Log4(("Saving real-mode EFLAGS VT-x view=%#RX32\n", pMixedCtx->eflags.u32)); 4984 4984 4985 4985 pMixedCtx->eflags.Bits.u1VM = 0; … … 5643 5643 { 5644 5644 AssertRC(rc); 5645 Log (("hmR0VmxCheckForceFlags: PGMSyncCR3 forcing us back to ring-3. rc=%d\n", rc));5645 Log4(("hmR0VmxCheckForceFlags: PGMSyncCR3 forcing us back to ring-3. rc=%d\n", rc)); 5646 5646 return rc; 5647 5647 } … … 5655 5655 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF); 5656 5656 rc = RT_UNLIKELY(VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_NO_MEMORY : VINF_EM_RAW_TO_R3; 5657 Log (("hmR0VmxCheckForceFlags: HM_TO_R3 forcing us back to ring-3. rc=%d\n", rc));5657 Log4(("hmR0VmxCheckForceFlags: HM_TO_R3 forcing us back to ring-3. rc=%d\n", rc)); 5658 5658 return rc; 5659 5659 } … … 5663 5663 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST)) 5664 5664 { 5665 Log (("hmR0VmxCheckForceFlags: Pending VM request forcing us back to ring-3\n"));5665 Log4(("hmR0VmxCheckForceFlags: Pending VM request forcing us back to ring-3\n")); 5666 5666 return VINF_EM_PENDING_REQUEST; 5667 5667 } … … 5670 5670 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_POOL_FLUSH_PENDING)) 5671 5671 { 5672 Log (("hmR0VmxCheckForceFlags: PGM pool flush pending forcing us back to ring-3\n"));5672 Log4(("hmR0VmxCheckForceFlags: PGM pool flush pending forcing us back to ring-3\n")); 5673 5673 return VINF_PGM_POOL_FLUSH_PENDING; 5674 5674 } … … 5677 5677 if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_DMA)) 5678 5678 { 5679 Log (("hmR0VmxCheckForceFlags: Pending DMA request forcing us back to ring-3\n"));5679 Log4(("hmR0VmxCheckForceFlags: Pending DMA request forcing us back to ring-3\n")); 5680 5680 return VINF_EM_RAW_TO_R3; 5681 5681 } … … 5751 5751 rc = TRPMResetTrap(pVCpu); 5752 5752 AssertRC(rc); 5753 Log (("TRPM->HM event: u32IntrInfo=%#RX32 enmTrpmEvent=%d cbInstr=%u uErrCode=%#RX32 GCPtrFaultAddress=%#RGv\n",5753 Log4(("TRPM->HM event: u32IntrInfo=%#RX32 enmTrpmEvent=%d cbInstr=%u uErrCode=%#RX32 GCPtrFaultAddress=%#RGv\n", 5754 5754 u32IntrInfo, enmTrpmEvent, cbInstr, uErrCode, GCPtrFaultAddress)); 5755 5755 hmR0VmxSetPendingEvent(pVCpu, u32IntrInfo, cbInstr, uErrCode, GCPtrFaultAddress); … … 5796 5796 } 5797 5797 5798 Log (("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, enmTrapType));5798 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, enmTrapType)); 5799 5799 5800 5800 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType); … … 5913 5913 /* Please, no longjumps here (any logging shouldn't flush jump back to ring-3). NO LOGGING BEFORE THIS POINT! */ 5914 5914 VMMRZCallRing3Disable(pVCpu); 5915 Log (("hmR0VmxExitToRing3: rcExit=%d\n", rcExit));5915 Log4(("hmR0VmxExitToRing3: rcExit=%d\n", rcExit)); 5916 5916 5917 5917 /* We need to do this only while truly exiting the "inner loop" back to ring-3 and -not- for any longjmp to ring3. */ … … 5968 5968 VMMRZCallRing3Disable(pVCpu); 5969 5969 Assert(VMMR0IsLogFlushDisabled(pVCpu)); 5970 Log (("hmR0VmxCallRing3Callback->hmR0VmxLongJmpToRing3\n"));5970 Log4(("hmR0VmxCallRing3Callback->hmR0VmxLongJmpToRing3\n")); 5971 5971 hmR0VmxLongJmpToRing3(pVCpu->CTX_SUFF(pVM), pVCpu, (PCPUMCTX)pvUser, VINF_VMM_UNKNOWN_RING3_CALL); 5972 5972 VMMRZCallRing3Enable(pVCpu); … … 6045 6045 if (fInject) 6046 6046 { 6047 Log (("Injecting pending event\n"));6047 Log4(("Injecting pending event\n")); 6048 6048 rc = hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, pVCpu->hm.s.Event.u64IntrInfo, pVCpu->hm.s.Event.cbInstr, 6049 6049 pVCpu->hm.s.Event.u32ErrCode, pVCpu->hm.s.Event.GCPtrFaultAddress, &uIntrState); … … 6061 6061 && !fBlockSti) 6062 6062 { 6063 Log (("Injecting NMI\n"));6063 Log4(("Injecting NMI\n")); 6064 6064 uint32_t u32IntrInfo = X86_XCPT_NMI | VMX_EXIT_INTERRUPTION_INFO_VALID; 6065 6065 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT); … … 6086 6086 if (RT_SUCCESS(rc)) 6087 6087 { 6088 Log (("Injecting interrupt u8Interrupt=%#x\n", u8Interrupt));6088 Log4(("Injecting interrupt u8Interrupt=%#x\n", u8Interrupt)); 6089 6089 uint32_t u32IntrInfo = u8Interrupt | VMX_EXIT_INTERRUPTION_INFO_VALID; 6090 6090 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT); … … 6421 6421 Assert( uIntrType != VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI 6422 6422 && uIntrType != VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT); 6423 Log (("Clearing inhibition due to STI.\n"));6423 Log4(("Clearing inhibition due to STI.\n")); 6424 6424 *puIntrState &= ~VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI; 6425 6425 } 6426 Log (("Injecting real-mode: u32IntrInfo=%#x u32ErrCode=%#x instrlen=%#x\n", u32IntrInfo, u32ErrCode, cbInstr));6426 Log4(("Injecting real-mode: u32IntrInfo=%#x u32ErrCode=%#x instrlen=%#x\n", u32IntrInfo, u32ErrCode, cbInstr)); 6427 6427 } 6428 6428 Assert(rc == VINF_SUCCESS || rc == VINF_EM_RESET); … … 6455 6455 pMixedCtx->cr2 = GCPtrFaultAddress; 6456 6456 } 6457 Log(("Injecting u32IntrInfo=%#x u32ErrCode=%#x cbInstr=%#x uCR2=%#RX64\n", u32IntrInfo, u32ErrCode, cbInstr, pMixedCtx->cr2)); 6457 6458 Log4(("Injecting u32IntrInfo=%#x u32ErrCode=%#x cbInstr=%#x pMixedCtx->uCR2=%#RX64\n", u32IntrInfo, u32ErrCode, cbInstr, 6459 pMixedCtx->cr2)); 6458 6460 6459 6461 AssertRCReturn(rc, rc); … … 6722 6724 6723 6725 /* Map the HC APIC-access page into the GC space, this also updates the shadow page tables if necessary. */ 6724 Log (("Mapped HC APIC-access page into GC: GCPhysApicBase=%#RGv\n", GCPhysApicBase));6726 Log4(("Mapped HC APIC-access page into GC: GCPhysApicBase=%#RGv\n", GCPhysApicBase)); 6725 6727 rc = IOMMMIOMapMMIOHCPage(pVM, pVCpu, GCPhysApicBase, pVM->hm.s.vmx.HCPhysApicAccess, X86_PTE_RW | X86_PTE_P); 6726 6728 AssertRCReturn(rc, rc); … … 6786 6788 /* Load the required guest state bits (for guest-state changes in the inner execution loop). */ 6787 6789 Assert(!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_CONTEXT)); 6788 Log 4(("LoadFlags=%#RX32\n", pVCpu->hm.s.fContextUseFlags));6790 Log5(("LoadFlags=%#RX32\n", pVCpu->hm.s.fContextUseFlags)); 6789 6791 #ifdef HMVMX_SYNC_FULL_GUEST_STATE 6790 6792 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL_GUEST; … … 6928 6930 if (RT_UNLIKELY(rcVMRun != VINF_SUCCESS)) 6929 6931 { 6930 Log (("VM-entry failure: rcVMRun=%Rrc fVMEntryFailed=%RTbool\n", rcVMRun, pVmxTransient->fVMEntryFailed));6932 Log4(("VM-entry failure: rcVMRun=%Rrc fVMEntryFailed=%RTbool\n", rcVMRun, pVmxTransient->fVMEntryFailed)); 6931 6933 return; 6932 6934 } … … 7752 7754 AssertRCReturn(rc, rc); 7753 7755 7754 Log (("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", pVmxTransient->uEntryIntrInfo));7755 Log (("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", pVmxTransient->uEntryXcptErrorCode));7756 Log (("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %#RX32\n", pVmxTransient->cbEntryInstr));7757 Log (("VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE %#RX32\n", uIntrState));7756 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", pVmxTransient->uEntryIntrInfo)); 7757 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", pVmxTransient->uEntryXcptErrorCode)); 7758 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %#RX32\n", pVmxTransient->cbEntryInstr)); 7759 Log4(("VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE %#RX32\n", uIntrState)); 7758 7760 7759 7761 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &u32Val); AssertRC(rc); 7760 Log (("VMX_VMCS_GUEST_CR0 %#RX32\n", u32Val));7762 Log4(("VMX_VMCS_GUEST_CR0 %#RX32\n", u32Val)); 7761 7763 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_MASK, &uHCReg); AssertRC(rc); 7762 Log (("VMX_VMCS_CTRL_CR0_MASK %#RHr\n", uHCReg));7764 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RHr\n", uHCReg)); 7763 7765 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uHCReg); AssertRC(rc); 7764 Log (("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));7766 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg)); 7765 7767 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_MASK, &uHCReg); AssertRC(rc); 7766 Log (("VMX_VMCS_CTRL_CR4_MASK %#RHr\n", uHCReg));7768 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RHr\n", uHCReg)); 7767 7769 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uHCReg); AssertRC(rc); 7768 Log (("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));7770 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg)); 7769 7771 rc = VMXReadVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc); 7770 Log (("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));7772 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val)); 7771 7773 7772 7774 PVM pVM = pVCpu->CTX_SUFF(pVM); … … 7903 7905 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); 7904 7906 AssertRCReturn(rc, rc); 7905 Log (("ecx=%#RX32\n", pMixedCtx->ecx));7907 Log4(("ecx=%#RX32\n", pMixedCtx->ecx)); 7906 7908 7907 7909 rc = EMInterpretWrmsr(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx)); … … 8054 8056 { 8055 8057 case 0: /* CR0 */ 8056 Log (("CRX CR0 write rc=%d CR0=%#RX64\n", rc, pMixedCtx->cr0));8058 Log4(("CRX CR0 write rc=%d CR0=%#RX64\n", rc, pMixedCtx->cr0)); 8057 8059 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0; 8058 8060 break; … … 8062 8064 case 3: /* CR3 */ 8063 8065 Assert(!pVM->hm.s.fNestedPaging || !CPUMIsGuestPagingEnabledEx(pMixedCtx)); 8064 Log (("CRX CR3 write rc=%d CR3=%#RX64\n", rc, pMixedCtx->cr3));8066 Log4(("CRX CR3 write rc=%d CR3=%#RX64\n", rc, pMixedCtx->cr3)); 8065 8067 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR3; 8066 8068 break; 8067 8069 case 4: /* CR4 */ 8068 Log (("CRX CR4 write rc=%d CR4=%#RX64\n", rc, pMixedCtx->cr4));8070 Log4(("CRX CR4 write rc=%d CR4=%#RX64\n", rc, pMixedCtx->cr4)); 8069 8071 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR4; 8070 8072 break; … … 8101 8103 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER); 8102 8104 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxRead[VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification)]); 8103 Log (("CRX CR%d Read access rc=%d\n", VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification), rc));8105 Log4(("CRX CR%d Read access rc=%d\n", VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification), rc)); 8104 8106 break; 8105 8107 } … … 8113 8115 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0; 8114 8116 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitClts); 8115 Log (("CRX CLTS write rc=%d\n", rc));8117 Log4(("CRX CLTS write rc=%d\n", rc)); 8116 8118 break; 8117 8119 } … … 8125 8127 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0; 8126 8128 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitLmsw); 8127 Log (("CRX LMSW write rc=%d\n", rc));8129 Log4(("CRX LMSW write rc=%d\n", rc)); 8128 8130 break; 8129 8131 } … … 8167 8169 /* EFER also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */ 8168 8170 AssertRCReturn(rc, rc); 8169 Log (("CS:RIP=%04x:%#RX64\n", pMixedCtx->cs.Sel, pMixedCtx->rip));8171 Log4(("CS:RIP=%04x:%#RX64\n", pMixedCtx->cs.Sel, pMixedCtx->rip)); 8170 8172 8171 8173 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */ … … 8363 8365 pVCpu->hm.s.Event.GCPtrFaultAddress = pMixedCtx->cr2; 8364 8366 } 8365 Log (("Pending event on TaskSwitch uIntType=%#x uVector=%#x\n", uIntType, uVector));8367 Log4(("Pending event on TaskSwitch uIntType=%#x uVector=%#x\n", uIntType, uVector)); 8366 8368 } 8367 8369 } … … 8433 8435 GCPhys += VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification); 8434 8436 PVM pVM = pVCpu->CTX_SUFF(pVM); 8435 Log (("ApicAccess uAccessType=%#x GCPhys=%#RGv Off=%#x\n", uAccessType, GCPhys,8437 Log4(("ApicAccess uAccessType=%#x GCPhys=%#RGv Off=%#x\n", uAccessType, GCPhys, 8436 8438 VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification))); 8437 8439 … … 8440 8442 CPUMCTX2CORE(pMixedCtx), GCPhys); 8441 8443 rc = VBOXSTRICTRC_VAL(rc2); 8442 Log (("ApicAccess rc=%d\n", rc));8444 Log4(("ApicAccess rc=%d\n", rc)); 8443 8445 if ( rc == VINF_SUCCESS 8444 8446 || rc == VERR_PAGE_TABLE_NOT_PRESENT … … 8453 8455 8454 8456 default: 8455 Log (("ApicAccess uAccessType=%#x\n", uAccessType));8457 Log4(("ApicAccess uAccessType=%#x\n", uAccessType)); 8456 8458 rc = VINF_EM_RAW_EMULATE_INSTR; 8457 8459 break; … … 8580 8582 VBOXSTRICTRC rc2 = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, CPUMCTX2CORE(pMixedCtx), GCPhys, UINT32_MAX); 8581 8583 rc = VBOXSTRICTRC_VAL(rc2); 8582 Log (("EPT misconfig at %#RGv RIP=%#RX64 rc=%d\n", GCPhys, pMixedCtx->rip, rc));8584 Log4(("EPT misconfig at %#RGv RIP=%#RX64 rc=%d\n", GCPhys, pMixedCtx->rip, rc)); 8583 8585 if ( rc == VINF_SUCCESS 8584 8586 || rc == VERR_PAGE_TABLE_NOT_PRESENT … … 8635 8637 TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode); 8636 8638 8637 Log (("EPT violation %#x at %#RX64 ErrorCode %#x CS:EIP=%04x:%#RX64\n", pVmxTransient->uExitQualification, GCPhys,8639 Log4(("EPT violation %#x at %#RX64 ErrorCode %#x CS:EIP=%04x:%#RX64\n", pVmxTransient->uExitQualification, GCPhys, 8638 8640 uErrorCode, pMixedCtx->cs.Sel, pMixedCtx->rip)); 8639 8641 … … 8655 8657 } 8656 8658 8657 Log (("EPT return to ring-3 rc=%d\n"));8659 Log4(("EPT return to ring-3 rc=%d\n")); 8658 8660 return rc; 8659 8661 } … … 8826 8828 rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx); 8827 8829 AssertRCReturn(rc, rc); 8828 Log (("#GP Gst: RIP %#RX64 ErrorCode=%#x CR0=%#RX64 CPL=%u\n", pMixedCtx->rip, pVmxTransient->uExitIntrErrorCode,8830 Log4(("#GP Gst: RIP %#RX64 ErrorCode=%#x CR0=%#RX64 CPL=%u\n", pMixedCtx->rip, pVmxTransient->uExitIntrErrorCode, 8829 8831 pMixedCtx->cr0, CPUMGetGuestCPL(pVCpu))); 8830 8832 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo), … … 8853 8855 rc = VINF_SUCCESS; 8854 8856 Assert(cbOp == pDis->cbInstr); 8855 Log (("#GP Disas OpCode=%u CS:EIP %04x:%#RX64\n", pDis->pCurInstr->uOpcode, pMixedCtx->cs.Sel, pMixedCtx->rip));8857 Log4(("#GP Disas OpCode=%u CS:EIP %04x:%#RX64\n", pDis->pCurInstr->uOpcode, pMixedCtx->cs.Sel, pMixedCtx->rip)); 8856 8858 switch (pDis->pCurInstr->uOpcode) 8857 8859 { … … 8881 8883 case OP_POPF: 8882 8884 { 8883 Log (("POPF CS:RIP %04x:%#RX64\n", pMixedCtx->cs.Sel, pMixedCtx->rip));8885 Log4(("POPF CS:RIP %04x:%#RX64\n", pMixedCtx->cs.Sel, pMixedCtx->rip)); 8884 8886 uint32_t cbParm = 0; 8885 8887 uint32_t uMask = 0; … … 8911 8913 break; 8912 8914 } 8913 Log (("POPF %x -> %#RX64 mask=%x RIP=%#RX64\n", uEflags.u, pMixedCtx->rsp, uMask, pMixedCtx->rip));8915 Log4(("POPF %x -> %#RX64 mask=%x RIP=%#RX64\n", uEflags.u, pMixedCtx->rsp, uMask, pMixedCtx->rip)); 8914 8916 pMixedCtx->eflags.u32 = (pMixedCtx->eflags.u32 & ~(X86_EFL_POPF_BITS & uMask)) 8915 8917 | (uEflags.u32 & X86_EFL_POPF_BITS & uMask); … … 8960 8962 break; 8961 8963 } 8962 Log (("PUSHF %x -> %#RGv\n", uEflags.u, GCPtrStack));8964 Log4(("PUSHF %x -> %#RGv\n", uEflags.u, GCPtrStack)); 8963 8965 pMixedCtx->esp -= cbParm; 8964 8966 pMixedCtx->esp &= uMask; … … 9000 9002 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_SEGMENT_REGS | HM_CHANGED_GUEST_RSP 9001 9003 | HM_CHANGED_GUEST_RFLAGS; 9002 Log (("IRET %#RX32 to %04x:%x\n", GCPtrStack, pMixedCtx->cs.Sel, pMixedCtx->ip));9004 Log4(("IRET %#RX32 to %04x:%x\n", GCPtrStack, pMixedCtx->cs.Sel, pMixedCtx->ip)); 9003 9005 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIret); 9004 9006 break; … … 9029 9031 rc = VBOXSTRICTRC_VAL(rc2); 9030 9032 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL_GUEST; 9031 Log (("#GP rc=%Rrc\n", rc));9033 Log4(("#GP rc=%Rrc\n", rc)); 9032 9034 break; 9033 9035 } … … 9094 9096 pVCpu->hm.s.Event.fPending = false; /* A vectoring #PF. */ 9095 9097 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx); 9096 Log (("Pending #DF due to vectoring #PF. NP\n"));9098 Log4(("Pending #DF due to vectoring #PF. NP\n")); 9097 9099 } 9098 9100 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF); … … 9137 9139 AssertRCReturn(rc, rc); 9138 9140 9139 Log (("#PF: cr2=%#RX64 cs:rip=%#04x:%#RX64 uErrCode %#RX32 cr3=%#RX64\n", pVmxTransient->uExitQualification, pMixedCtx->cs.Sel,9140 pMixedCtx->rip, pVmxTransient->uExitIntrErrorCode, pMixedCtx->cr3));9141 Log4(("#PF: cr2=%#RX64 cs:rip=%#04x:%#RX64 uErrCode %#RX32 cr3=%#RX64\n", pVmxTransient->uExitQualification, 9142 pMixedCtx->cs.Sel, pMixedCtx->rip, pVmxTransient->uExitIntrErrorCode, pMixedCtx->cr3)); 9141 9143 9142 9144 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQualification, (RTGCUINT)pVmxTransient->uExitIntrErrorCode); … … 9144 9146 (RTGCPTR)pVmxTransient->uExitQualification); 9145 9147 9146 Log (("#PF: rc=%Rrc\n", rc));9148 Log4(("#PF: rc=%Rrc\n", rc)); 9147 9149 if (rc == VINF_SUCCESS) 9148 9150 { … … 9174 9176 pVCpu->hm.s.Event.fPending = false; /* Clear pending #PF for replace it with #DF. */ 9175 9177 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx); 9176 Log (("#PF: Pending #DF due to vectoring #PF\n"));9178 Log4(("#PF: Pending #DF due to vectoring #PF\n")); 9177 9179 } 9178 9180
Note:
See TracChangeset
for help on using the changeset viewer.