- Timestamp:
- Jun 17, 2013 2:35:56 PM (12 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r46588 r46594 101 101 #define HMSVM_VMCB_CLEAN_NP RT_BIT(4) 102 102 /** Control registers (CR0, CR3, CR4, EFER). */ 103 #define HMSVM_VMCB_CLEAN_CRX 103 #define HMSVM_VMCB_CLEAN_CRX_EFER RT_BIT(5) 104 104 /** Debug registers (DR6, DR7). */ 105 105 #define HMSVM_VMCB_CLEAN_DRX RT_BIT(6) … … 955 955 956 956 pVmcb->guest.u64CR0 = u64GuestCR0; 957 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX ;957 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER; 958 958 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR0; 959 959 } … … 992 992 pVmcb->guest.u64CR3 = PGMGetHyperCR3(pVCpu); 993 993 994 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX ;994 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER; 995 995 pVCpu->hm.s.fContextUseFlags &= HM_CHANGED_GUEST_CR3; 996 996 } … … 1037 1037 1038 1038 pVmcb->guest.u64CR4 = u64GuestCR4; 1039 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX ;1039 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER; 1040 1040 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR4; 1041 1041 } … … 1126 1126 * See AMD spec. 15.5.1 "Basic Operation" | "Canonicalization and Consistency Checks". 1127 1127 */ 1128 pVmcb->guest.u64EFER = pCtx->msrEFER | MSR_K6_EFER_SVME; 1128 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_SVM_GUEST_EFER_MSR 1129 { 1130 pVmcb->guest.u64EFER = pCtx->msrEFER | MSR_K6_EFER_SVME; 1131 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER; 1132 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_SVM_GUEST_EFER_MSR; 1133 } 1129 1134 1130 1135 /* 64-bit MSRs. */ … … 1137 1142 { 1138 1143 /* If the guest isn't in 64-bit mode, clear MSR_K6_LME bit from guest EFER otherwise AMD-V expects amd64 shadow paging. */ 1139 pVmcb->guest.u64EFER &= ~MSR_K6_EFER_LME; 1140 } 1144 if (pCtx->msrEFER & MSR_K6_EFER_LME) 1145 { 1146 pVmcb->guest.u64EFER &= ~MSR_K6_EFER_LME; 1147 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER; 1148 } 1149 } 1150 1141 1151 1142 1152 /** @todo The following are used in 64-bit only (SYSCALL/SYSRET) but they might … … 2885 2895 static int hmR0SvmEmulateMovTpr(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 2886 2896 { 2887 int rc;2888 2897 Log4(("Emulated VMMCall TPR access replacement at RIP=%RGv\n", pCtx->rip)); 2889 2890 2898 for (;;) 2891 2899 { … … 2900 2908 { 2901 2909 case HMTPRINSTR_READ: 2902 rc = PDMApicGetTPR(pVCpu, &u8Tpr, &fPending, NULL /* pu8PendingIrq */); 2910 { 2911 int rc = PDMApicGetTPR(pVCpu, &u8Tpr, &fPending, NULL /* pu8PendingIrq */); 2903 2912 AssertRC(rc); 2904 2913 … … 2907 2916 pCtx->rip += pPatch->cbOp; 2908 2917 break; 2918 } 2909 2919 2910 2920 case HMTPRINSTR_WRITE_REG: 2911 2921 case HMTPRINSTR_WRITE_IMM: 2922 { 2912 2923 if (pPatch->enmType == HMTPRINSTR_WRITE_REG) 2913 2924 { 2914 2925 uint32_t u32Val; 2915 rc = DISFetchReg32(CPUMCTX2CORE(pCtx), pPatch->uSrcOperand, &u32Val);2926 int rc = DISFetchReg32(CPUMCTX2CORE(pCtx), pPatch->uSrcOperand, &u32Val); 2916 2927 AssertRC(rc); 2917 2928 u8Tpr = u32Val; … … 2920 2931 u8Tpr = (uint8_t)pPatch->uSrcOperand; 2921 2932 2922 rc= PDMApicSetTPR(pVCpu, u8Tpr);2923 AssertRC(rc );2933 int rc2 = PDMApicSetTPR(pVCpu, u8Tpr); 2934 AssertRC(rc2); 2924 2935 pCtx->rip += pPatch->cbOp; 2925 2936 break; 2937 } 2926 2938 2927 2939 default: … … 3247 3259 rc = EMInterpretWrmsr(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx)); 3248 3260 AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER, ("hmR0SvmExitMsr: EMInterpretWrmsr failed rc=%Rrc\n", rc)); 3261 3262 if (pCtx->ecx == MSR_K6_EFER) 3263 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_SVM_GUEST_EFER_MSR; 3249 3264 } 3250 3265 else
Note:
See TracChangeset
for help on using the changeset viewer.