Changeset 72522 in vbox for trunk/src/VBox/VMM/VMMAll
- Timestamp:
- Jun 12, 2018 8:45:27 AM (7 years ago)
- Location:
- trunk/src/VBox/VMM/VMMAll
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/CPUMAllMsrs.cpp
r72358 r72522 6305 6305 } 6306 6306 6307 #ifdef IN_RING06308 6307 6309 6308 /** … … 6314 6313 * @thread EMT(pVCpu) 6315 6314 */ 6316 VMMR0_INT_DECL(uint64_t) CPUMR0GetGuestTscAux(PVMCPU pVCpu) 6317 { 6315 VMM_INT_DECL(uint64_t) CPUMGetGuestTscAux(PVMCPU pVCpu) 6316 { 6317 Assert(!(pVCpu->cpum.s.Guest.fExtrn & CPUMCTX_EXTRN_TSC_AUX)); 6318 6318 return pVCpu->cpum.s.GuestMsrs.msr.TscAux; 6319 6319 } … … 6327 6327 * @thread EMT(pVCpu) 6328 6328 */ 6329 VMMR0_INT_DECL(void) CPUMR0SetGuestTscAux(PVMCPU pVCpu, uint64_t uValue) 6330 { 6329 VMM_INT_DECL(void) CPUMSetGuestTscAux(PVMCPU pVCpu, uint64_t uValue) 6330 { 6331 pVCpu->cpum.s.Guest.fExtrn &= ~CPUMCTX_EXTRN_TSC_AUX; 6331 6332 pVCpu->cpum.s.GuestMsrs.msr.TscAux = uValue; 6332 6333 } … … 6339 6340 * @thread EMT(pVCpu) 6340 6341 */ 6341 VMM R0_INT_DECL(uint64_t) CPUMR0GetGuestSpecCtrl(PVMCPU pVCpu)6342 VMM_INT_DECL(uint64_t) CPUMGetGuestSpecCtrl(PVMCPU pVCpu) 6342 6343 { 6343 6344 return pVCpu->cpum.s.GuestMsrs.msr.SpecCtrl; … … 6352 6353 * @thread EMT(pVCpu) 6353 6354 */ 6354 VMM R0_INT_DECL(void) CPUMR0SetGuestSpecCtrl(PVMCPU pVCpu, uint64_t uValue)6355 VMM_INT_DECL(void) CPUMSetGuestSpecCtrl(PVMCPU pVCpu, uint64_t uValue) 6355 6356 { 6356 6357 pVCpu->cpum.s.GuestMsrs.msr.SpecCtrl = uValue; 6357 6358 } 6358 6359 6359 #endif /* IN_RING0 */6360 -
trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsTwoByte0f.cpp.h
r72518 r72522 6904 6904 } 6905 6905 6906 6906 6907 /** Opcode 0xf3 0x0f 0xae 11b/1. */ 6907 6908 FNIEMOP_DEF_1(iemOp_Grp15_rdgsbase, uint8_t, bRm) … … 6931 6932 return VINF_SUCCESS; 6932 6933 } 6934 6933 6935 6934 6936 /** Opcode 0xf3 0x0f 0xae 11b/2. */ … … 6960 6962 return VINF_SUCCESS; 6961 6963 } 6964 6962 6965 6963 6966 /** Opcode 0xf3 0x0f 0xae 11b/3. */ -
trunk/src/VBox/VMM/VMMAll/NEMAll.cpp
r72484 r72522 131 131 #endif 132 132 133 #ifndef VBOX_WITH_NATIVE_NEM 134 VMM_INT_DECL(int) NEMHCQueryCpuTick(PVMCPU pVCpu, uint64_t *pcTicks, uint32_t *puAux) 135 { 136 RT_NOREF(pVCpu, pcTicks, puAux); 137 AssertFailed(); 138 return VERR_NEM_IPE_9; 139 } 140 #endif 141 -
trunk/src/VBox/VMM/VMMAll/NEMAllNativeTemplate-win.cpp.h
r72490 r72522 1092 1092 return nemHCWinCopyStateFromHyperV(pVCpu->pVMR3, pVCpu, pCtx, fWhat); 1093 1093 #endif 1094 } 1095 1096 1097 /** 1098 * Query the CPU tick counter and optionally the TSC_AUX MSR value. 1099 * 1100 * @returns VBox status code. 1101 * @param pVCpu The cross context CPU structure. 1102 * @param pcTicks Where to return the CPU tick count. 1103 * @param puAux Where to return the TSC_AUX register value. 1104 */ 1105 VMM_INT_DECL(int) NEMHCQueryCpuTick(PVMCPU pVCpu, uint64_t *pcTicks, uint32_t *puAux) 1106 { 1107 #ifdef IN_RING3 1108 PVM pVM = pVCpu->CTX_SUFF(pVM); 1109 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT); 1110 AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_NEM_IPE_9); 1111 1112 # ifdef NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS 1113 /* Call ring-0 and get the values. */ 1114 int rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_QUERY_CPU_TICK, 0, NULL); 1115 AssertLogRelRCReturn(rc, rc); 1116 *pcTicks = pVCpu->nem.s.Hypercall.QueryCpuTick.cTicks; 1117 if (puAux) 1118 *puAux = pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_TSC_AUX 1119 ? pVCpu->nem.s.Hypercall.QueryCpuTick.uAux : CPUMGetGuestTscAux(pVCpu); 1120 return VINF_SUCCESS; 1121 1122 # else 1123 /* Call the offical API. */ 1124 WHV_REGISTER_NAME aenmNames[2] = { WHvX64RegisterTsc, WHvX64RegisterTscAux }; 1125 WHV_REGISTER_VALUE aValues[2] = { {0, 0}, {0, 0} }; 1126 Assert(RT_ELEMENTS(aenmNames) == RT_ELEMENTS(aValues)); 1127 HRESULT hrc = WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, 2, aValues); 1128 AssertLogRelMsgReturn(SUCCEEDED(hrc), 1129 ("WHvGetVirtualProcessorRegisters(%p, %u,{tsc,tsc_aux},2,) -> %Rhrc (Last=%#x/%u)\n", 1130 pVM->nem.s.hPartition, pVCpu->idCpu, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()) 1131 , VERR_NEM_GET_REGISTERS_FAILED); 1132 *pcTicks = aValues[0].Reg64; 1133 if (puAux) 1134 *pcTicks = pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_TSC_AUX ? aValues[0].Reg64 : CPUMGetGuestTscAux(pVCpu); 1135 return VINF_SUCCESS; 1136 #endif 1137 #else /* IN_RING0 */ 1138 /** @todo improve and secure this translation */ 1139 PGVM pGVM = GVMMR0ByHandle(pVCpu->pVMR0->hSelf); 1140 AssertReturn(pGVM, VERR_INVALID_VMCPU_HANDLE); 1141 VMCPUID idCpu = pVCpu->idCpu; 1142 ASMCompilerBarrier(); 1143 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_VMCPU_HANDLE); 1144 1145 int rc = nemR0WinQueryCpuTick(pGVM, &pGVM->aCpus[idCpu], pcTicks, puAux); 1146 if (RT_SUCCESS(rc) && puAux && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_TSC_AUX)) 1147 *puAux = CPUMGetGuestTscAux(pVCpu); 1148 return rc; 1149 #endif /* IN_RING0 */ 1094 1150 } 1095 1151 … … 4222 4278 } 4223 4279 4280 -
trunk/src/VBox/VMM/VMMAll/TMAllCpu.cpp
r69111 r72522 22 22 #define LOG_GROUP LOG_GROUP_TM 23 23 #include <VBox/vmm/tm.h> 24 #include <VBox/vmm/gim.h> 25 #include <VBox/vmm/dbgf.h> 26 #include <VBox/vmm/nem.h> 24 27 #include <iprt/asm-amd64-x86.h> /* for SUPGetCpuHzFromGIP */ 25 28 #include "TMInternal.h" 26 29 #include <VBox/vmm/vm.h> 27 #include <VBox/vmm/gim.h>28 #include <VBox/vmm/dbgf.h>29 30 #include <VBox/sup.h> 30 31 … … 82 83 /** @todo Test that pausing and resuming doesn't cause lag! (I.e. that we're 83 84 * unpaused before the virtual time and stopped after it. */ 84 if (pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET) 85 pVCpu->tm.s.offTSCRawSrc = SUPReadTsc() - pVCpu->tm.s.u64TSC; 86 else 87 pVCpu->tm.s.offTSCRawSrc = tmCpuTickGetRawVirtual(pVM, false /* don't check for pending timers */) 88 - pVCpu->tm.s.u64TSC; 85 switch (pVM->tm.s.enmTSCMode) 86 { 87 case TMTSCMODE_REAL_TSC_OFFSET: 88 pVCpu->tm.s.offTSCRawSrc = SUPReadTsc() - pVCpu->tm.s.u64TSC; 89 break; 90 case TMTSCMODE_VIRT_TSC_EMULATED: 91 case TMTSCMODE_DYNAMIC: 92 pVCpu->tm.s.offTSCRawSrc = tmCpuTickGetRawVirtual(pVM, false /* don't check for pending timers */) 93 - pVCpu->tm.s.u64TSC; 94 break; 95 case TMTSCMODE_NATIVE_API: 96 pVCpu->tm.s.offTSCRawSrc = 0; /** @todo ?? */ 97 break; 98 default: 99 AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE); 100 } 89 101 return VINF_SUCCESS; 90 102 } … … 117 129 118 130 /* When resuming, use the TSC value of the last stopped VCPU to avoid the TSC going back. */ 119 if (pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET) 120 pVCpu->tm.s.offTSCRawSrc = SUPReadTsc() - pVM->tm.s.u64LastPausedTSC; 121 else 122 pVCpu->tm.s.offTSCRawSrc = tmCpuTickGetRawVirtual(pVM, false /* don't check for pending timers */) 123 - pVM->tm.s.u64LastPausedTSC; 131 switch (pVM->tm.s.enmTSCMode) 132 { 133 case TMTSCMODE_REAL_TSC_OFFSET: 134 pVCpu->tm.s.offTSCRawSrc = SUPReadTsc() - pVM->tm.s.u64LastPausedTSC; 135 break; 136 case TMTSCMODE_VIRT_TSC_EMULATED: 137 case TMTSCMODE_DYNAMIC: 138 pVCpu->tm.s.offTSCRawSrc = tmCpuTickGetRawVirtual(pVM, false /* don't check for pending timers */) 139 - pVM->tm.s.u64LastPausedTSC; 140 break; 141 case TMTSCMODE_NATIVE_API: 142 pVCpu->tm.s.offTSCRawSrc = 0; /** @todo ?? */ 143 break; 144 default: 145 AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE); 146 } 124 147 125 148 /* Calculate the offset for other VCPUs to use. */ … … 413 436 { 414 437 PVM pVM = pVCpu->CTX_SUFF(pVM); 415 if (pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET) 416 u64 = SUPReadTsc(); 417 else 418 u64 = tmCpuTickGetRawVirtual(pVM, fCheckTimers); 438 switch (pVM->tm.s.enmTSCMode) 439 { 440 case TMTSCMODE_REAL_TSC_OFFSET: 441 u64 = SUPReadTsc(); 442 break; 443 case TMTSCMODE_VIRT_TSC_EMULATED: 444 case TMTSCMODE_DYNAMIC: 445 u64 = tmCpuTickGetRawVirtual(pVM, fCheckTimers); 446 break; 447 #ifndef IN_RC 448 case TMTSCMODE_NATIVE_API: 449 { 450 u64 = 0; 451 int rcNem = NEMHCQueryCpuTick(pVCpu, &u64, NULL); 452 AssertLogRelRCReturn(rcNem, SUPReadTsc()); 453 break; 454 } 455 #endif 456 default: 457 AssertFailedBreakStmt(u64 = SUPReadTsc()); 458 } 419 459 u64 -= pVCpu->tm.s.offTSCRawSrc; 420 460
Note:
See TracChangeset
for help on using the changeset viewer.