Changeset 45585 in vbox for trunk/src/VBox
- Timestamp:
- Apr 17, 2013 11:31:45 AM (12 years ago)
- svn:sync-xref-src-repo-rev:
- 85079
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r45576 r45585 36 36 #endif 37 37 #ifdef DEBUG_ramshankar 38 #define VBOX_ALWAYS_SAVE_FULL_VTX_STATE 39 #define VBOX_ALWAYS_SYNC_FULL_VTX_STATE 38 40 #define VBOX_ALWAYS_TRAP_ALL_EXCEPTIONS 39 41 #endif … … 3530 3532 } 3531 3533 3532 /*3533 * Guest FS & GS base MSRs.3534 * We already initialized the FS & GS base as part of the guest segment registers, but the guest's FS/GS base3535 * MSRs might have changed (e.g. due to WRMSR) and we need to update the bases if that happened. These MSRs3536 * are only available in 64-bit mode.3537 */3538 /** @todo Avoid duplication of this code in assembly (see MYPUSHSEGS) - it3539 * should not be necessary to do it in assembly again. */3540 if (CPUMIsGuestInLongModeEx(pCtx))3541 {3542 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_FS_BASE_MSR)3543 {3544 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_FS_BASE, pCtx->fs.u64Base);3545 AssertRCReturn(rc, rc);3546 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_FS_BASE_MSR;3547 }3548 3549 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_GS_BASE_MSR)3550 {3551 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_GS_BASE, pCtx->gs.u64Base);3552 AssertRCReturn(rc, rc);3553 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_GS_BASE_MSR;3554 }3555 }3556 else3557 pVCpu->hm.s.fContextUseFlags &= ~(HM_CHANGED_GUEST_FS_BASE_MSR | HM_CHANGED_GUEST_GS_BASE_MSR);3558 3559 3534 return VINF_SUCCESS; 3560 3535 } … … 5147 5122 switch (pMsr->u32IndexMSR) 5148 5123 { 5149 case MSR_K8_LSTAR: pMixedCtx->msrLSTAR = pMsr->u64Value;break;5150 case MSR_K6_STAR: pMixedCtx->msrSTAR = pMsr->u64Value;break;5124 case MSR_K8_LSTAR: pMixedCtx->msrLSTAR = pMsr->u64Value; break; 5125 case MSR_K6_STAR: pMixedCtx->msrSTAR = pMsr->u64Value; break; 5151 5126 case MSR_K8_SF_MASK: pMixedCtx->msrSFMASK = pMsr->u64Value; break; 5152 5127 case MSR_K8_TSC_AUX: CPUMSetGuestMsr(pVCpu, MSR_K8_TSC_AUX, pMsr->u64Value); break; … … 5440 5415 5441 5416 VMMRZCallRing3Disable(pVCpu); 5417 Assert(VMMR0IsLogFlushDisabled(pVCpu)); 5418 LogFunc(("\n")); 5442 5419 5443 5420 int rc = hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx); … … 5738 5715 { 5739 5716 Assert(!VMMRZCallRing3IsEnabled(pVCpu)); 5717 Assert(VMMR0IsLogFlushDisabled(pVCpu)); 5740 5718 5741 5719 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx); … … 5854 5832 5855 5833 VMMRZCallRing3Disable(pVCpu); 5856 Log(("hmR0VmxLongJmpToRing3\n")); 5834 Assert(VMMR0IsLogFlushDisabled(pVCpu)); 5835 Log(("hmR0VmxCallRing3Callback->hmR0VmxLongJmpToRing3\n")); 5857 5836 hmR0VmxLongJmpToRing3(pVCpu->CTX_SUFF(pVM), pVCpu, (PCPUMCTX)pvUser, VINF_VMM_UNKNOWN_RING3_CALL); 5858 5837 VMMRZCallRing3Enable(pVCpu); … … 6593 6572 Assert(!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_CONTEXT)); 6594 6573 Log4(("LoadFlags=%#RX32\n", pVCpu->hm.s.fContextUseFlags)); 6574 #ifdef VBOX_ALWAYS_SYNC_FULL_VTX_STATE 6575 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL_GUEST; 6576 #endif 6595 6577 int rc = VINF_SUCCESS; 6596 6578 if (pVCpu->hm.s.fContextUseFlags == HM_CHANGED_GUEST_RIP) … … 6717 6699 /* Update the guest interruptibility-state from the VMCS. */ 6718 6700 hmR0VmxSaveGuestIntrState(pVCpu, pMixedCtx); 6719 6701 #if defined(VBOX_ALWAYS_SYNC_FULL_VTX_STATE) || defined(VBOX_ALWAYS_SAVE_FULL_VTX_STATE) 6702 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx); 6703 AssertRC(rc); 6704 #endif 6720 6705 /* 6721 6706 * If the TPR was raised by the guest, it wouldn't cause a VM-exit immediately. Instead we sync the TPR lazily whenever … … 7661 7646 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); 7662 7647 AssertRCReturn(rc, rc); 7648 Log(("ecx=%#RX32\n", pMixedCtx->ecx)); 7663 7649 7664 7650 rc = EMInterpretWrmsr(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx)); … … 7678 7664 } 7679 7665 else if (pMixedCtx->ecx == MSR_K6_EFER) /* EFER is the only MSR we auto-load but don't allow write-passthrough. */ 7666 { 7667 rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx); 7668 AssertRCReturn(rc, rc); 7680 7669 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_AUTO_MSRS; 7670 } 7681 7671 7682 7672 /* Update MSRs that are part of the VMCS when MSR-bitmaps are not supported. */ … … 7688 7678 case MSR_IA32_SYSENTER_EIP: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SYSENTER_EIP_MSR; break; 7689 7679 case MSR_IA32_SYSENTER_ESP: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SYSENTER_ESP_MSR; break; 7690 case MSR_K8_FS_BASE: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_ FS_BASE_MSR;break;7691 case MSR_K8_GS_BASE: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_ GS_BASE_MSR;break;7680 case MSR_K8_FS_BASE: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SEGMENT_REGS; break; 7681 case MSR_K8_GS_BASE: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SEGMENT_REGS; break; 7692 7682 case MSR_K8_KERNEL_GS_BASE: /* If we auto-load it, update HM_CHANGED_VMX_GUEST_AUTO_MSRS. */ break; 7693 7683 } … … 7921 7911 /* EFER also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */ 7922 7912 AssertRCReturn(rc, rc); 7913 Log(("CS:RIP=%04x:%#RGv\n", pMixedCtx->cs.Sel, pMixedCtx->rip)); 7923 7914 7924 7915 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */ … … 8141 8132 { 8142 8133 VMX_VALIDATE_EXIT_HANDLER_PARAMS(); 8143 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);8144 8134 8145 8135 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */ 8146 rc = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);8136 int rc = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient); 8147 8137 if (RT_UNLIKELY(rc == VINF_VMX_DOUBLE_FAULT)) 8148 8138 return VINF_SUCCESS; … … 8160 8150 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); 8161 8151 #endif 8152 rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient); 8162 8153 AssertRCReturn(rc, rc); 8163 8154 … … 8172 8163 && VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification) == 0x80) 8173 8164 { 8174 AssertMsgFailed(("hmR0VmxExitApicAccess: can't touchTPR offset while using TPR shadowing.\n"));8165 AssertMsgFailed(("hmR0VmxExitApicAccess: can't access TPR offset while using TPR shadowing.\n")); 8175 8166 } 8176 8167 … … 8179 8170 GCPhys += VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification); 8180 8171 PVM pVM = pVCpu->CTX_SUFF(pVM); 8172 Log(("ApicAccess uAccessType=%#x GCPhys=%RGp Off=%#x\n", uAccessType, GCPhys, 8173 VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification))); 8174 8181 8175 VBOXSTRICTRC rc2 = IOMMMIOPhysHandler(pVM, pVCpu, 8182 8176 (uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ) ? 0 : X86_TRAP_PF_RW, 8183 8177 CPUMCTX2CORE(pMixedCtx), GCPhys); 8184 8178 rc = VBOXSTRICTRC_VAL(rc2); 8185 Log(("ApicAccess %RGp %#x rc=%d\n", GCPhys, 8186 VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification), rc)); 8179 Log(("ApicAccess rc=%d\n", rc)); 8187 8180 if ( rc == VINF_SUCCESS 8188 8181 || rc == VERR_PAGE_TABLE_NOT_PRESENT … … 8197 8190 8198 8191 default: 8192 Log(("ApicAccess uAccessType=%#x\n", uAccessType)); 8199 8193 rc = VINF_EM_RAW_EMULATE_INSTR; 8200 8194 break; -
trunk/src/VBox/VMM/include/HMInternal.h
r45531 r45585 111 111 # define HM_CHANGED_GUEST_SEGMENT_REGS RT_BIT(10) 112 112 # define HM_CHANGED_GUEST_DEBUG RT_BIT(11) 113 # define HM_CHANGED_GUEST_FS_BASE_MSR RT_BIT(12) 114 # define HM_CHANGED_GUEST_GS_BASE_MSR RT_BIT(13) 115 # define HM_CHANGED_GUEST_SYSENTER_CS_MSR RT_BIT(14) 116 # define HM_CHANGED_GUEST_SYSENTER_EIP_MSR RT_BIT(15) 117 # define HM_CHANGED_GUEST_SYSENTER_ESP_MSR RT_BIT(16) 118 # define HM_CHANGED_VMX_GUEST_AUTO_MSRS RT_BIT(17) 119 # define HM_CHANGED_VMX_GUEST_ACTIVITY_STATE RT_BIT(18) 120 # define HM_CHANGED_VMX_GUEST_APIC_STATE RT_BIT(19) 121 # define HM_CHANGED_VMX_ENTRY_CTLS RT_BIT(20) 122 # define HM_CHANGED_VMX_EXIT_CTLS RT_BIT(21) 123 124 # define HM_CHANGED_HOST_CONTEXT RT_BIT(22) 113 # define HM_CHANGED_GUEST_SYSENTER_CS_MSR RT_BIT(12) 114 # define HM_CHANGED_GUEST_SYSENTER_EIP_MSR RT_BIT(13) 115 # define HM_CHANGED_GUEST_SYSENTER_ESP_MSR RT_BIT(14) 116 # define HM_CHANGED_VMX_GUEST_AUTO_MSRS RT_BIT(15) 117 # define HM_CHANGED_VMX_GUEST_ACTIVITY_STATE RT_BIT(16) 118 # define HM_CHANGED_VMX_GUEST_APIC_STATE RT_BIT(17) 119 # define HM_CHANGED_VMX_ENTRY_CTLS RT_BIT(18) 120 # define HM_CHANGED_VMX_EXIT_CTLS RT_BIT(19) 121 122 # define HM_CHANGED_HOST_CONTEXT RT_BIT(20) 125 123 126 124 # define HM_CHANGED_ALL_GUEST ( HM_CHANGED_GUEST_RIP \ … … 136 134 | HM_CHANGED_GUEST_SEGMENT_REGS \ 137 135 | HM_CHANGED_GUEST_DEBUG \ 138 | HM_CHANGED_GUEST_FS_BASE_MSR \139 | HM_CHANGED_GUEST_GS_BASE_MSR \140 136 | HM_CHANGED_GUEST_SYSENTER_CS_MSR \ 141 137 | HM_CHANGED_GUEST_SYSENTER_EIP_MSR \
Note:
See TracChangeset
for help on using the changeset viewer.