Changeset 72590 in vbox
- Timestamp:
- Jun 17, 2018 7:26:27 PM (6 years ago)
- Location:
- trunk
- Files:
-
- 7 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/em.h
r72569 r72590 313 313 #endif 314 314 VMM_INT_DECL(int) EMInterpretCpuId(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame); 315 #if 1 /** @todo Remove after testing and enabling @bugref{6973}. */ 315 316 VMM_INT_DECL(int) EMInterpretRdtsc(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame); 317 VMM_INT_DECL(int) EMInterpretRdtscp(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx); 318 #endif 316 319 VMM_INT_DECL(int) EMInterpretRdpmc(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame); 317 VMM_INT_DECL(int) EMInterpretRdtscp(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);318 320 VMM_INT_DECL(VBOXSTRICTRC) EMInterpretInvlpg(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pAddrGC); 319 321 VMM_INT_DECL(VBOXSTRICTRC) EMInterpretMWait(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame); -
trunk/include/VBox/vmm/iem.h
r72569 r72590 252 252 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPU pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage); 253 253 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPU pVCpu, uint8_t cbInstr, uint8_t uType, RTGCPTR GCPtrInvpcidDesc); 254 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPU pVCpu, uint8_t cbInstr); 255 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPU pVCpu, uint8_t cbInstr); 256 254 257 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 255 258 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPU pVCpu, uint8_t cbInstr); -
trunk/src/VBox/VMM/VMMAll/EMAll.cpp
r72582 r72590 1866 1866 1867 1867 1868 #if 1 /** @todo Remove after testing and enabling @bugref{6973}. */ 1869 1868 1870 /** 1869 1871 * Interpret RDTSC. … … 1939 1941 return VINF_SUCCESS; 1940 1942 } 1943 1944 #endif /* Trying to use IEM APIs instead. */ 1941 1945 1942 1946 /** -
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r72586 r72590 14944 14944 14945 14945 /** 14946 * Interface for HM and EM to emulate the RDTSC instruction. 14947 * 14948 * @returns Strict VBox status code. 14949 * @retval VINF_EM_RESCHEDULE (VINF_IEM_RAISED_XCPT) if exception is raised. 14950 * 14951 * @param pVCpu The cross context virtual CPU structure. 14952 * @param cbInstr The instruction length in bytes. 14953 * 14954 * @remarks Not all of the state needs to be synced in. 14955 */ 14956 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPU pVCpu, uint8_t cbInstr) 14957 { 14958 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2); 14959 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS); 14960 14961 iemInitExec(pVCpu, false /*fBypassHandlers*/); 14962 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc); 14963 Assert(!pVCpu->iem.s.cActiveMappings); 14964 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict); 14965 } 14966 14967 14968 /** 14969 * Interface for HM and EM to emulate the RDTSCP instruction. 14970 * 14971 * @returns Strict VBox status code. 14972 * @retval VINF_EM_RESCHEDULE (VINF_IEM_RAISED_XCPT) if exception is raised. 14973 * 14974 * @param pVCpu The cross context virtual CPU structure. 14975 * @param cbInstr The instruction length in bytes. 14976 * 14977 * @remarks Not all of the state needs to be synced in. Recommended 14978 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call. 14979 */ 14980 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPU pVCpu, uint8_t cbInstr) 14981 { 14982 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2); 14983 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS); 14984 14985 iemInitExec(pVCpu, false /*fBypassHandlers*/); 14986 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp); 14987 Assert(!pVCpu->iem.s.cActiveMappings); 14988 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict); 14989 } 14990 14991 14992 /** 14946 14993 * Checks if IEM is in the process of delivering an event (interrupt or 14947 14994 * exception). -
trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h
r72569 r72590 6094 6094 return iemRaiseUndefinedOpcode(pVCpu); 6095 6095 6096 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4); 6097 if ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_TSD) 6098 && pVCpu->iem.s.uCpl != 0) 6099 { 6100 Log(("rdtsc: CR4.TSD and CPL=%u -> #GP(0)\n", pVCpu->iem.s.uCpl)); 6101 return iemRaiseGeneralProtectionFault0(pVCpu); 6096 if (pVCpu->iem.s.uCpl != 0) 6097 { 6098 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4); 6099 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_TSD) 6100 { 6101 Log(("rdtsc: CR4.TSD and CPL=%u -> #GP(0)\n", pVCpu->iem.s.uCpl)); 6102 return iemRaiseGeneralProtectionFault0(pVCpu); 6103 } 6102 6104 } 6103 6105 … … 6118 6120 pVCpu->cpum.GstCtx.rax = RT_LO_U32(uTicks); 6119 6121 pVCpu->cpum.GstCtx.rdx = RT_HI_U32(uTicks); 6122 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX); /* For IEMExecDecodedRdtsc. */ 6120 6123 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 6121 6124 return VINF_SUCCESS; … … 6134 6137 return iemRaiseUndefinedOpcode(pVCpu); 6135 6138 6136 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4); 6137 if ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_TSD) 6138 && pVCpu->iem.s.uCpl != 0) 6139 { 6140 Log(("rdtscp: CR4.TSD and CPL=%u -> #GP(0)\n", pVCpu->iem.s.uCpl)); 6141 return iemRaiseGeneralProtectionFault0(pVCpu); 6139 if (pVCpu->iem.s.uCpl != 0) 6140 { 6141 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4); 6142 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_TSD) 6143 { 6144 Log(("rdtscp: CR4.TSD and CPL=%u -> #GP(0)\n", pVCpu->iem.s.uCpl)); 6145 return iemRaiseGeneralProtectionFault0(pVCpu); 6146 } 6142 6147 } 6143 6148 … … 6166 6171 pVCpu->cpum.GstCtx.rax = RT_LO_U32(uTicks); 6167 6172 pVCpu->cpum.GstCtx.rdx = RT_HI_U32(uTicks); 6173 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RCX); /* For IEMExecDecodedRdtscp. */ 6168 6174 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 6169 6175 } -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r72560 r72590 6124 6124 6125 6125 6126 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM6127 6126 /** 6128 6127 * Gets the length of the current instruction if the CPU supports the NRIP_SAVE … … 6146 6145 return cbLikely; 6147 6146 } 6148 #endif6149 6147 6150 6148 … … 6257 6255 { 6258 6256 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 6259 PVM pVM = pVCpu->CTX_SUFF(pVM); 6260 int rc = EMInterpretRdtsc(pVM, pVCpu, CPUMCTX2CORE(pCtx)); 6261 if (RT_LIKELY(rc == VINF_SUCCESS)) 6262 { 6257 #if 0 /** @todo Needs testing. @bugref{6973} */ 6258 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtsc(pVCpu, hmR0SvmGetInstrLengthHwAssist(pVCpu, pCtx, 2)); 6259 if (rcStrict == VINF_SUCCESS) 6263 6260 pSvmTransient->fUpdateTscOffsetting = true; 6264 hmR0SvmAdvanceRipHwAssist(pVCpu, pCtx, 2); 6261 else if (rcStrict == VINF_EM_RESCHEDULE) 6262 rcStrict = VINF_SUCCESS; 6263 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict); 6264 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtsc); 6265 return VBOXSTRICTRC_TODO(rcStrict); 6266 #else 6267 int rc = EMInterpretRdtsc(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx)); 6268 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 6269 { 6270 pSvmTransient->fUpdateTscOffsetting = true; 6265 6271 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc); 6266 6272 } … … 6272 6278 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtsc); 6273 6279 return rc; 6280 #endif 6274 6281 } 6275 6282 … … 6281 6288 { 6282 6289 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 6290 #if 0 /** @todo Needs testing. @bugref{6973} */ 6291 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtscp(pVCpu, hmR0SvmGetInstrLengthHwAssist(pVCpu, pCtx, 2)); 6292 if (rcStrict == VINF_SUCCESS) 6293 pSvmTransient->fUpdateTscOffsetting = true; 6294 else if (rcStrict == VINF_EM_RESCHEDULE) 6295 rcStrict = VINF_SUCCESS; 6296 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict); 6297 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtscp); 6298 return VBOXSTRICTRC_TODO(rcStrict); 6299 #else 6283 6300 int rc = EMInterpretRdtscp(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx); 6284 6301 if (RT_LIKELY(rc == VINF_SUCCESS)) … … 6295 6312 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtscp); 6296 6313 return rc; 6314 #endif 6297 6315 } 6298 6316 -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r72561 r72590 7100 7100 if (fNeedRsp) 7101 7101 rc |= hmR0VmxSaveGuestRsp(pVCpu, pMixedCtx); 7102 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); 7102 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); /** @todo Only CS and SS are required here. */ 7103 7103 if (!fMemory) 7104 7104 rc |= hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); … … 11829 11829 { 11830 11830 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); 11831 #if 0 /** @todo Needs testing. @bugref{6973} */ 11832 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); /* Needed for CPL < 0 only, really. */ 11833 rc |= hmR0VmxSaveGuestRegsForIemExec(pVCpu, pMixedCtx, false /*fMemory*/, false /*fNeedRsp*/); 11834 AssertRCReturn(rc, rc); 11835 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtsc(pVCpu, pVmxTransient->cbInstr); 11836 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 11837 { 11838 /* If we get a spurious VM-exit when offsetting is enabled, we must reset offsetting on VM-reentry. See @bugref{6634}. */ 11839 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING) 11840 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true; 11841 } 11842 else if (rcStrict == VINF_EM_RESCHEDULE) 11843 rcStrict = VINF_SUCCESS; 11844 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtsc); 11845 return rcStrict; 11846 #else 11831 11847 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); 11832 11848 AssertRCReturn(rc, rc); … … 11846 11862 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtsc); 11847 11863 return rc; 11864 #endif 11848 11865 } 11849 11866 … … 11855 11872 { 11856 11873 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); 11874 #if 0 /** @todo Needs testing. @bugref{6973} */ 11875 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); /* Needed for CPL < 0 only, really. */ 11876 rc |= hmR0VmxSaveGuestRegsForIemExec(pVCpu, pMixedCtx, false /*fMemory*/, false /*fNeedRsp*/); 11877 rc |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx); /* For MSR_K8_TSC_AUX */ 11878 AssertRCReturn(rc, rc); 11879 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtscp(pVCpu, pVmxTransient->cbInstr); 11880 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 11881 { 11882 /* If we get a spurious VM-exit when offsetting is enabled, we must reset offsetting on VM-reentry. See @bugref{6634}. */ 11883 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING) 11884 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true; 11885 } 11886 else if (rcStrict == VINF_EM_RESCHEDULE) 11887 rcStrict = VINF_SUCCESS; 11888 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtscp); 11889 return rcStrict; 11890 #else 11857 11891 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); 11858 11892 rc |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx); /* For MSR_K8_TSC_AUX */ … … 11874 11908 rc = VERR_EM_INTERPRETER; 11875 11909 } 11876 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtsc );11910 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtscp); 11877 11911 return rc; 11912 #endif 11878 11913 } 11879 11914
Note:
See TracChangeset
for help on using the changeset viewer.