- Timestamp:
- Jun 13, 2018 3:45:39 PM (7 years ago)
- Location:
- trunk
- Files:
-
- 9 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/err.h
r72490 r72546 2849 2849 /** Get register caller must update the APIC base. */ 2850 2850 #define VINF_NEM_UPDATE_APIC_BASE (6811) 2851 /** NEM failed to set TSC. */ 2852 #define VERR_NEM_SET_TSC (-6812) 2851 2853 2852 2854 /** NEM internal processing error \#0. */ -
trunk/include/VBox/vmm/nem.h
r72541 r72546 93 93 VMMR0_INT_DECL(int) NEMR0ImportState(PGVM pGVM, PVM pVM, VMCPUID idCpu, uint64_t fWhat); 94 94 VMMR0_INT_DECL(int) NEMR0QueryCpuTick(PGVM pGVM, PVM pVM, VMCPUID idCpu); 95 VMMR0_INT_DECL(int) NEMR0ResumeCpuTickOnAll(PGVM pGVM, PVM pVM, VMCPUID idCpu, uint64_t uPausedTscValue); 95 96 VMMR0_INT_DECL(VBOXSTRICTRC) NEMR0RunGuestCode(PGVM pGVM, VMCPUID idCpu); 96 97 VMMR0_INT_DECL(int) NEMR0UpdateStatistics(PGVM pGVM, PVM pVM, VMCPUID idCpu); … … 126 127 127 128 VMM_INT_DECL(int) NEMHCQueryCpuTick(PVMCPU pVCpu, uint64_t *pcTicks, uint32_t *puAux); 129 VMM_INT_DECL(int) NEMHCResumeCpuTickOnAll(PVM pVM, PVMCPU pVCpu, uint64_t uPausedTscValue); 130 128 131 /** @} */ 129 132 -
trunk/include/VBox/vmm/vmm.h
r72541 r72546 445 445 /** Call NEMR0QueryCpuTick() (host specific). */ 446 446 VMMR0_DO_NEM_QUERY_CPU_TICK, 447 /** Call NEMR0ResumeCpuTickOnAll() (host specific). */ 448 VMMR0_DO_NEM_RESUME_CPU_TICK_ON_ALL, 447 449 /** Call NEMR0UpdateStatistics() (host specific). */ 448 450 VMMR0_DO_NEM_UPDATE_STATISTICS, -
trunk/src/VBox/VMM/VMMAll/NEMAll.cpp
r72522 r72546 131 131 #endif 132 132 133 133 134 #ifndef VBOX_WITH_NATIVE_NEM 134 135 VMM_INT_DECL(int) NEMHCQueryCpuTick(PVMCPU pVCpu, uint64_t *pcTicks, uint32_t *puAux) … … 140 141 #endif 141 142 143 144 #ifndef VBOX_WITH_NATIVE_NEM 145 VMM_INT_DECL(int) NEMHCResumeCpuTickOnAll(PVM pVM, PVMCPU pVCpu, uint64_t uPausedTscValue) 146 { 147 RT_NOREF(pVM, pVCpu, uPausedTscValue); 148 AssertFailed(); 149 return VERR_NEM_IPE_9; 150 } 151 #endif 152 -
trunk/src/VBox/VMM/VMMAll/NEMAllNativeTemplate-win.cpp.h
r72542 r72546 1150 1150 return rc; 1151 1151 #endif /* IN_RING0 */ 1152 } 1153 1154 1155 /** 1156 * Resumes CPU clock (TSC) on all virtual CPUs. 1157 * 1158 * This is called by TM when the VM is started, restored, resumed or similar. 1159 * 1160 * @returns VBox status code. 1161 * @param pVM The cross context VM structure. 1162 * @param pVCpu The cross context CPU structure of the calling EMT. 1163 * @param uPausedTscValue The TSC value at the time of pausing. 1164 */ 1165 VMM_INT_DECL(int) NEMHCResumeCpuTickOnAll(PVM pVM, PVMCPU pVCpu, uint64_t uPausedTscValue) 1166 { 1167 #ifdef IN_RING0 1168 /** @todo improve and secure this translation */ 1169 PGVM pGVM = GVMMR0ByHandle(pVM->hSelf); 1170 AssertReturn(pGVM, VERR_INVALID_VMCPU_HANDLE); 1171 VMCPUID idCpu = pVCpu->idCpu; 1172 ASMCompilerBarrier(); 1173 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_VMCPU_HANDLE); 1174 1175 return nemR0WinResumeCpuTickOnAll(pGVM, &pGVM->aCpus[idCpu], uPausedTscValue); 1176 #else /* IN_RING3 */ 1177 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT); 1178 AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_NEM_IPE_9); 1179 1180 # ifdef NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS 1181 /* Call ring-0 and do it all there. */ 1182 return VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_RESUME_CPU_TICK_ON_ALL, uPausedTscValue, NULL); 1183 1184 # else 1185 /* 1186 * Call the offical API to do the job. 1187 */ 1188 if (pVM->cCpus > 1) 1189 RTThreadYield(); /* Try decrease the chance that we get rescheduled in the middle. */ 1190 1191 /* Start with the first CPU. */ 1192 WHV_REGISTER_NAME enmName = WHvX64RegisterTsc; 1193 WHV_REGISTER_VALUE Value = {0, 0}; 1194 aValue.Reg64 = uPausedTscValue; 1195 uint64_t const uFirstTsc = ASMReadTSC(); 1196 HRESULT hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, 0 /*iCpu*/, &enmName, 1, &Value); 1197 AssertLogRelMsgReturn(SUCCEEDED(hrc), 1198 ("WHvSetVirtualProcessorRegisters(%p, 0,{tsc},2,%#RX64) -> %Rhrc (Last=%#x/%u)\n", 1199 pVM->nem.s.hPartition, uPausedTscValue, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()) 1200 , VERR_NEM_SET_TSC); 1201 1202 /* Do the other CPUs, adjusting for elapsed TSC and keeping finger crossed 1203 that we don't introduce too much drift here. */ 1204 for (VMCPUID iCpu = 1; iCpu < pVM->cCpus; iCpu++) 1205 { 1206 Assert(enmName == WHvX64RegisterTsc); 1207 const uint64_t offDelta = (ASMReadTSC() - uFirstTsc); 1208 aValue.Reg64 = uPausedTscValue + offDelta; 1209 HRESULT hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, iCpu, &enmName, 1, &Value); 1210 AssertLogRelMsgReturn(SUCCEEDED(hrc), 1211 ("WHvSetVirtualProcessorRegisters(%p, 0,{tsc},2,%#RX64 + %#RX64) -> %Rhrc (Last=%#x/%u)\n", 1212 pVM->nem.s.hPartition, iCpu, uPausedTscValue, offDelta, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()) 1213 , VERR_NEM_SET_TSC); 1214 } 1215 1216 return VINF_SUCCESS; 1217 # endif 1218 #endif /* IN_RING3 */ 1152 1219 } 1153 1220 -
trunk/src/VBox/VMM/VMMAll/TMAllCpu.cpp
r72522 r72546 95 95 case TMTSCMODE_NATIVE_API: 96 96 pVCpu->tm.s.offTSCRawSrc = 0; /** @todo ?? */ 97 /* Looks like this is only used by weird modes and MSR TSC writes. We cannot support either on NEM/win. */ 97 98 break; 98 99 default: … … 140 141 break; 141 142 case TMTSCMODE_NATIVE_API: 142 pVCpu->tm.s.offTSCRawSrc = 0; /** @todo ?? */ 143 { 144 #ifndef IN_RC 145 int rc = NEMHCResumeCpuTickOnAll(pVM, pVCpu, pVM->tm.s.u64LastPausedTSC); 146 AssertRCReturn(rc, rc); 147 pVCpu->tm.s.offTSCRawSrc = offTSCRawSrcOld = 0; 148 #else 149 AssertFailedReturn(VERR_INTERNAL_ERROR_2); 150 #endif 143 151 break; 152 } 144 153 default: 145 154 AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE); 146 155 } 147 156 148 /* Calculate the offset for other VCPUs to use. */157 /* Calculate the offset addendum for other VCPUs to use. */ 149 158 pVM->tm.s.offTSCPause = pVCpu->tm.s.offTSCRawSrc - offTSCRawSrcOld; 150 159 } -
trunk/src/VBox/VMM/VMMR0/NEMR0Native-win.cpp
r72544 r72546 84 84 NEM_TMPL_STATIC int nemR0WinImportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx, uint64_t fWhat); 85 85 NEM_TMPL_STATIC int nemR0WinQueryCpuTick(PGVM pGVM, PGVMCPU pGVCpu, uint64_t *pcTicks, uint32_t *pcAux); 86 NEM_TMPL_STATIC int nemR0WinResumeCpuTickOnAll(PGVM pGVM, PGVMCPU pGVCpu, uint64_t uPausedTscValue); 86 87 DECLINLINE(NTSTATUS) nemR0NtPerformIoControl(PGVM pGVM, uint32_t uFunction, void *pvInput, uint32_t cbInput, 87 88 void *pvOutput, uint32_t cbOutput); … … 595 596 HV_INPUT_SET_VP_REGISTERS *pInput = (HV_INPUT_SET_VP_REGISTERS *)pGVCpu->nem.s.HypercallData.pbPage; 596 597 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3); 598 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API); 597 599 598 600 pInput->PartitionId = pGVM->nem.s.idHvPartition; … … 1304 1306 HV_INPUT_GET_VP_REGISTERS *pInput = (HV_INPUT_GET_VP_REGISTERS *)pGVCpu->nem.s.HypercallData.pbPage; 1305 1307 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3); 1308 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API); 1306 1309 1307 1310 fWhat &= pCtx->fExtrn; … … 2222 2225 HV_INPUT_GET_VP_REGISTERS *pInput = (HV_INPUT_GET_VP_REGISTERS *)pGVCpu->nem.s.HypercallData.pbPage; 2223 2226 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3); 2227 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API); 2224 2228 2225 2229 pInput->PartitionId = pGVM->nem.s.idHvPartition; … … 2280 2284 rc = nemR0WinQueryCpuTick(pGVM, pGVCpu, &pVCpu->nem.s.Hypercall.QueryCpuTick.cTicks, 2281 2285 &pVCpu->nem.s.Hypercall.QueryCpuTick.uAux); 2286 } 2287 return rc; 2288 } 2289 2290 2291 /** 2292 * Worker for NEMR0ResumeCpuTickOnAll and the ring-0 NEMHCResumeCpuTickOnAll. 2293 * 2294 * @returns VBox status code. 2295 * @param pGVM The ring-0 VM handle. 2296 * @param pGVCpu The ring-0 VCPU handle. 2297 * @param uPausedTscValue The TSC value at the time of pausing. 2298 */ 2299 NEM_TMPL_STATIC int nemR0WinResumeCpuTickOnAll(PGVM pGVM, PGVMCPU pGVCpu, uint64_t uPausedTscValue) 2300 { 2301 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API); 2302 2303 /* 2304 * Set up the hypercall parameters. 2305 */ 2306 HV_INPUT_SET_VP_REGISTERS *pInput = (HV_INPUT_SET_VP_REGISTERS *)pGVCpu->nem.s.HypercallData.pbPage; 2307 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3); 2308 2309 pInput->PartitionId = pGVM->nem.s.idHvPartition; 2310 pInput->VpIndex = 0; 2311 pInput->RsvdZ = 0; 2312 pInput->Elements[0].Name = HvX64RegisterTsc; 2313 pInput->Elements[0].Pad0 = 0; 2314 pInput->Elements[0].Pad1 = 0; 2315 pInput->Elements[0].Value.Reg128.High64 = 0; 2316 pInput->Elements[0].Value.Reg64 = uPausedTscValue; 2317 2318 /* 2319 * Disable interrupts and do the first virtual CPU. 2320 */ 2321 RTCCINTREG const fSavedFlags = ASMIntDisableFlags(); 2322 uint64_t const uFirstTsc = ASMReadTSC(); 2323 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallSetVpRegisters, 1), 2324 pGVCpu->nem.s.HypercallData.HCPhysPage, 0 /* no output */); 2325 AssertLogRelMsgReturnStmt(uResult == HV_MAKE_CALL_REP_RET(1), ("uResult=%RX64 uTsc=%#RX64\n", uResult, uPausedTscValue), 2326 ASMSetFlags(fSavedFlags), VERR_NEM_SET_TSC); 2327 2328 /* 2329 * Do secondary processors, adjusting for elapsed TSC and keeping finger crossed 2330 * that we don't introduce too much drift here. 2331 */ 2332 for (VMCPUID iCpu = 1; iCpu < pGVM->cCpus; iCpu++) 2333 { 2334 Assert(pInput->PartitionId == pGVM->nem.s.idHvPartition); 2335 Assert(pInput->RsvdZ == 0); 2336 Assert(pInput->Elements[0].Name == HvX64RegisterTsc); 2337 Assert(pInput->Elements[0].Pad0 == 0); 2338 Assert(pInput->Elements[0].Pad1 == 0); 2339 Assert(pInput->Elements[0].Value.Reg128.High64 == 0); 2340 2341 pInput->VpIndex = iCpu; 2342 const uint64_t offDelta = (ASMReadTSC() - uFirstTsc); 2343 pInput->Elements[0].Value.Reg64 = uPausedTscValue + offDelta; 2344 2345 uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallSetVpRegisters, 1), 2346 pGVCpu->nem.s.HypercallData.HCPhysPage, 0 /* no output */); 2347 AssertLogRelMsgReturnStmt(uResult == HV_MAKE_CALL_REP_RET(1), 2348 ("uResult=%RX64 uTsc=%#RX64 + %#RX64\n", uResult, uPausedTscValue, offDelta), 2349 ASMSetFlags(fSavedFlags), VERR_NEM_SET_TSC); 2350 } 2351 2352 /* 2353 * Done. 2354 */ 2355 ASMSetFlags(fSavedFlags); 2356 return VINF_SUCCESS; 2357 } 2358 2359 2360 /** 2361 * Sets the TSC register to @a uPausedTscValue on all CPUs. 2362 * 2363 * @returns VBox status code 2364 * @param pGVM The ring-0 VM handle. 2365 * @param pVM The cross context VM handle. 2366 * @param idCpu The calling EMT. Necessary for getting the 2367 * hypercall page and arguments. 2368 * @param uPausedTscValue The TSC value at the time of pausing. 2369 */ 2370 VMMR0_INT_DECL(int) NEMR0ResumeCpuTickOnAll(PGVM pGVM, PVM pVM, VMCPUID idCpu, uint64_t uPausedTscValue) 2371 { 2372 /* 2373 * Validate the call. 2374 */ 2375 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu); 2376 if (RT_SUCCESS(rc)) 2377 { 2378 PVMCPU pVCpu = &pVM->aCpus[idCpu]; 2379 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu]; 2380 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API); 2381 2382 /* 2383 * Call worker. 2384 */ 2385 pVCpu->nem.s.Hypercall.QueryCpuTick.cTicks = 0; 2386 pVCpu->nem.s.Hypercall.QueryCpuTick.uAux = 0; 2387 rc = nemR0WinResumeCpuTickOnAll(pGVM, pGVCpu, uPausedTscValue); 2282 2388 } 2283 2389 return rc; … … 2387 2493 if (RT_SUCCESS(rc)) 2388 2494 { 2389 Assert PtrReturn(g_pfnHvlInvokeHypercall, VERR_INTERNAL_ERROR_3);2495 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API); 2390 2496 2391 2497 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu]; -
trunk/src/VBox/VMM/VMMR0/VMMR0.cpp
r72541 r72546 2070 2070 break; 2071 2071 2072 case VMMR0_DO_NEM_RESUME_CPU_TICK_ON_ALL: 2073 if (pReqHdr || idCpu == NIL_VMCPUID) 2074 return VERR_INVALID_PARAMETER; 2075 rc = NEMR0ResumeCpuTickOnAll(pGVM, pVM, idCpu, u64Arg); 2076 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING); 2077 break; 2078 2072 2079 case VMMR0_DO_NEM_UPDATE_STATISTICS: 2073 2080 if (u64Arg || pReqHdr) -
trunk/src/VBox/VMM/VMMR3/NEMR3Native-win.cpp
r72544 r72546 2614 2614 * 2615 2615 * 2616 * - How do we modify the TSC offset (or bias if you like). 2617 * 2618 * This is a show stopper as it breaks both pausing the VM and restoring 2619 * of saved state. 2616 * - We need a way to directly modify the TSC offset (or bias if you like). 2617 * 2618 * The current approach of setting the WHvX64RegisterTsc register one by one 2619 * on each virtual CPU in sequence will introduce random inaccuracies, 2620 * especially if the thread doing the job is reschduled at a bad time. 2620 2621 * 2621 2622 *
Note:
See TracChangeset
for help on using the changeset viewer.