Changeset 46566 in vbox
- Timestamp:
- Jun 14, 2013 4:07:45 PM (12 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r46562 r46566 2651 2651 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold); 2652 2652 AssertRCReturn(rc, rc); 2653 2654 /* 32-bit guests uses LSTAR MSR for patching guest code which touches the TPR. */2655 if (pVCpu->CTX_SUFF(pVM)->hm.s.fTPRPatchingActive)2656 {2657 Assert(!CPUMIsGuestInLongModeEx(pMixedCtx)); /* EFER always up-to-date. */2658 pMixedCtx->msrLSTAR = u8Tpr;2659 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)2660 {2661 /* If there are interrupts pending, intercept LSTAR writes, otherwise don't intercept reads or writes. */2662 if (fPendingIntr)2663 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_LSTAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_INTERCEPT_WRITE);2664 else2665 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_LSTAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);2666 }2667 }2668 2653 } 2669 2654 … … 6857 6842 Assert(HMR0GetCurrentCpu()->idCpu == pVCpu->hm.s.idLastCpu); 6858 6843 6859 /*6860 * TPR patching (only active for 32-bit guests on 64-bit capable CPUs) when the CPU does not supported virtualizing6861 * APIC accesses feature (VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC).6862 */6863 if (pVM->hm.s.fTPRPatchingActive)6864 {6865 Assert(!CPUMIsGuestInLongMode(pVCpu));6866 6867 /* Need guest's LSTAR MSR (which is part of the auto load/store MSRs in the VMCS), ensure we have the updated one. */6868 rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);6869 AssertRC(rc);6870 6871 /* The patch code uses the LSTAR as it's not used by a guest in 32-bit mode implicitly (i.e. SYSCALL is 64-bit only). */6872 pVmxTransient->u64LStarMsr = ASMRdMsr(MSR_K8_LSTAR);6873 ASMWrMsr(MSR_K8_LSTAR, pMixedCtx->msrLSTAR); /* pMixedCtx->msrLSTAR contains the guest's TPR,6874 see hmR0VmxLoadGuestApicState(). */6875 }6876 6877 6844 #ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE 6878 6845 /* … … 6939 6906 Assert(!(ASMGetFlags() & X86_EFL_IF)); 6940 6907 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM); 6941 6942 /* Restore the effects of TPR patching if any. */6943 if (pVM->hm.s.fTPRPatchingActive)6944 {6945 int rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);6946 AssertRC(rc);6947 pMixedCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR); /* MSR_K8_LSTAR contains the guest TPR. */6948 ASMWrMsr(MSR_K8_LSTAR, pVmxTransient->u64LStarMsr);6949 }6950 6908 6951 6909 ASMSetFlags(pVmxTransient->uEFlags); /* Enable interrupts. */ … … 7920 7878 PVM pVM = pVCpu->CTX_SUFF(pVM); 7921 7879 int rc = VINF_SUCCESS; 7922 7923 /* If TPR patching is active, LSTAR holds the guest TPR, writes to it must be propagated to the APIC. */7924 if ( pVM->hm.s.fTPRPatchingActive7925 && pMixedCtx->ecx == MSR_K8_LSTAR)7926 {7927 Assert(!CPUMIsGuestInLongModeEx(pMixedCtx)); /* Requires EFER but it's always up-to-date. */7928 if ((pMixedCtx->eax & 0xff) != pVmxTransient->u8GuestTpr)7929 {7930 rc = PDMApicSetTPR(pVCpu, pMixedCtx->eax & 0xff);7931 AssertRC(rc);7932 }7933 7934 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);7935 Assert(pVmxTransient->cbInstr == 2);7936 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWrmsr);7937 return VINF_SUCCESS;7938 }7939 7880 7940 7881 /* EMInterpretWrmsr() requires CR0, EFLAGS and SS segment register. */ … … 9152 9093 #endif 9153 9094 9154 #ifdef VBOX_HM_WITH_GUEST_PATCHING9155 rc = hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);9156 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);9157 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);9158 AssertRCReturn(rc, rc);9159 /* Shortcut for APIC TPR access, only for 32-bit guests. */9160 if ( pVM->hm.s.fTRPPatchingAllowed9161 && pVM->hm.s.pGuestPatchMem9162 && (pVmxTransient->uExitQualification & 0xfff) == 0x80 /* TPR offset */9163 && !(pVmxTransient->uExitIntrErrorCode & X86_TRAP_PF_P) /* Page not present */9164 && CPUMGetGuestCPL(pVCpu) == 0 /* Requires CR0, EFLAGS, segments. */9165 && !CPUMIsGuestInLongModeEx(pMixedCtx) /* Requires EFER. */9166 && pVM->hm.s.cPatches < RT_ELEMENTS(pVM->hm.s.aPatches))9167 {9168 RTGCPHYS GCPhys;9169 RTGCPHYS GCPhysApicBase = (pMixedCtx->msrApicBase & PAGE_BASE_GC_MASK);9170 rc = PGMGstGetPage(pVCpu, (RTGCPTR)pVmxTransient->uExitQualification, NULL /* pfFlags */, &GCPhys);9171 if ( rc == VINF_SUCCESS9172 && GCPhys == GCPhysApicBase)9173 {9174 rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);9175 AssertRCReturn(rc, rc);9176 9177 /* Only attempt to patch the instruction once. */9178 PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pMixedCtx->eip);9179 if (!pPatch)9180 return VINF_EM_HM_PATCH_TPR_INSTR;9181 }9182 }9183 #endif9184 9185 9095 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx); 9186 9096 AssertRCReturn(rc, rc);
Note:
See TracChangeset
for help on using the changeset viewer.