VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/TMAllVirtual.cpp@ 93666

Last change on this file since 93666 was 93657, checked in by vboxsync, 3 years ago

VMM/TM,VMM/*: Moved RTTIMENANOTSDATAR0 into the ring-0 only part of the VM structure. Added a VMCC_CTX macro for selecting between tm and tmr0 VM components depending on the compilation context. Added a bunch of missing padding checks for GVM. [build fix] bugref:10094

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 43.4 KB
Line 
1/* $Id: TMAllVirtual.cpp 93657 2022-02-08 14:01:17Z vboxsync $ */
2/** @file
3 * TM - Timeout Manager, Virtual Time, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_TM
23#include <VBox/vmm/tm.h>
24#include <VBox/vmm/dbgftrace.h>
25#ifdef IN_RING3
26# include <iprt/thread.h>
27#endif
28#include "TMInternal.h"
29#include <VBox/vmm/vmcc.h>
30#include <VBox/vmm/vmm.h>
31#include <VBox/err.h>
32#include <VBox/log.h>
33#include <VBox/sup.h>
34
35#include <iprt/time.h>
36#include <iprt/assert.h>
37#include <iprt/asm.h>
38#include <iprt/asm-math.h>
39
40
41
42/**
43 * @interface_method_impl{RTTIMENANOTSDATA,pfnBad}
44 */
45DECLCALLBACK(DECLEXPORT(void)) tmVirtualNanoTSBad(PRTTIMENANOTSDATA pData, uint64_t u64NanoTS, uint64_t u64DeltaPrev,
46 uint64_t u64PrevNanoTS)
47{
48 PVMCC pVM = RT_FROM_CPP_MEMBER(pData, VMCC, VMCC_CTX(tm).s.VirtualGetRawData);
49 pData->cBadPrev++;
50 if ((int64_t)u64DeltaPrev < 0)
51 LogRel(("TM: u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64 pVM=%p\n",
52 u64DeltaPrev, u64PrevNanoTS, u64NanoTS, pVM));
53 else
54 Log(("TM: u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64 pVM=%p (debugging?)\n",
55 u64DeltaPrev, u64PrevNanoTS, u64NanoTS, pVM));
56}
57
58
59#ifdef IN_RING3
60/**
61 * @callback_method_impl{FNTIMENANOTSINTERNAL, For driverless mode.}
62 */
63static DECLCALLBACK(uint64_t) tmR3VirtualNanoTSDriverless(PRTTIMENANOTSDATA pData, PRTITMENANOTSEXTRA pExtra)
64{
65 RT_NOREF(pData);
66 if (pExtra)
67 pExtra->uTSCValue = ASMReadTSC();
68 return RTTimeSystemNanoTS();
69}
70#endif
71
72
73/**
74 * @interface_method_impl{RTTIMENANOTSDATA,pfnRediscover}
75 *
76 * This is the initial worker, so the first call in each context ends up here.
77 * It is also used should the delta rating of the host CPUs change or if the
78 * fGetGipCpu feature the current worker relies upon becomes unavailable. The
79 * last two events may occur as CPUs are taken online.
80 */
81DECLCALLBACK(DECLEXPORT(uint64_t)) tmVirtualNanoTSRediscover(PRTTIMENANOTSDATA pData, PRTITMENANOTSEXTRA pExtra)
82{
83 PVMCC pVM = RT_FROM_CPP_MEMBER(pData, VMCC, VMCC_CTX(tm).s.VirtualGetRawData);
84 PFNTIMENANOTSINTERNAL pfnWorker;
85
86 /*
87 * We require a valid GIP for the selection below.
88 * Invalid GIP is fatal, though we have to allow no GIP in driverless mode (ring-3 only).
89 */
90 PSUPGLOBALINFOPAGE pGip = g_pSUPGlobalInfoPage;
91#ifdef IN_RING3
92 if (pGip)
93#endif
94 {
95 AssertFatalMsg(RT_VALID_PTR(pGip), ("pVM=%p pGip=%p\n", pVM, pGip));
96 AssertFatalMsg(pGip->u32Magic == SUPGLOBALINFOPAGE_MAGIC, ("pVM=%p pGip=%p u32Magic=%#x\n", pVM, pGip, pGip->u32Magic));
97 AssertFatalMsg(pGip->u32Mode > SUPGIPMODE_INVALID && pGip->u32Mode < SUPGIPMODE_END,
98 ("pVM=%p pGip=%p u32Mode=%#x\n", pVM, pGip, pGip->u32Mode));
99
100 /*
101 * Determine the new worker.
102 */
103 bool const fLFence = RT_BOOL(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SSE2);
104 switch (pGip->u32Mode)
105 {
106 case SUPGIPMODE_SYNC_TSC:
107 case SUPGIPMODE_INVARIANT_TSC:
108#ifdef IN_RING0
109 if (pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_ROUGHLY_ZERO)
110 pfnWorker = fLFence ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarNoDelta;
111 else
112 pfnWorker = fLFence ? RTTimeNanoTSLFenceSyncInvarWithDelta : RTTimeNanoTSLegacySyncInvarWithDelta;
113#else
114 if (pGip->fGetGipCpu & SUPGIPGETCPU_IDTR_LIMIT_MASK_MAX_SET_CPUS)
115 pfnWorker = pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_PRACTICALLY_ZERO
116 ? fLFence ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarNoDelta
117 : fLFence ? RTTimeNanoTSLFenceSyncInvarWithDeltaUseIdtrLim : RTTimeNanoTSLegacySyncInvarWithDeltaUseIdtrLim;
118 else if (pGip->fGetGipCpu & SUPGIPGETCPU_RDTSCP_MASK_MAX_SET_CPUS)
119 pfnWorker = pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_PRACTICALLY_ZERO
120 ? fLFence ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarNoDelta
121 : fLFence ? RTTimeNanoTSLFenceSyncInvarWithDeltaUseRdtscp : RTTimeNanoTSLegacySyncInvarWithDeltaUseRdtscp;
122 else if (pGip->fGetGipCpu & SUPGIPGETCPU_APIC_ID_EXT_0B)
123 pfnWorker = pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_ROUGHLY_ZERO
124 ? fLFence ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarNoDelta
125 : fLFence ? RTTimeNanoTSLFenceSyncInvarWithDeltaUseApicIdExt0B : RTTimeNanoTSLegacySyncInvarWithDeltaUseApicIdExt0B;
126 else if (pGip->fGetGipCpu & SUPGIPGETCPU_APIC_ID_EXT_8000001E)
127 pfnWorker = pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_ROUGHLY_ZERO
128 ? fLFence ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarNoDelta
129 : fLFence ? RTTimeNanoTSLFenceSyncInvarWithDeltaUseApicIdExt8000001E : RTTimeNanoTSLegacySyncInvarWithDeltaUseApicIdExt8000001E;
130 else
131 pfnWorker = pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_ROUGHLY_ZERO
132 ? fLFence ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarNoDelta
133 : fLFence ? RTTimeNanoTSLFenceSyncInvarWithDeltaUseApicId : RTTimeNanoTSLegacySyncInvarWithDeltaUseApicId;
134#endif
135 break;
136
137 case SUPGIPMODE_ASYNC_TSC:
138#ifdef IN_RING0
139 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsync : RTTimeNanoTSLegacyAsync;
140#else
141 if (pGip->fGetGipCpu & SUPGIPGETCPU_IDTR_LIMIT_MASK_MAX_SET_CPUS)
142 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsyncUseIdtrLim : RTTimeNanoTSLegacyAsyncUseIdtrLim;
143 else if (pGip->fGetGipCpu & SUPGIPGETCPU_RDTSCP_MASK_MAX_SET_CPUS)
144 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsyncUseRdtscp : RTTimeNanoTSLegacyAsyncUseRdtscp;
145 else if (pGip->fGetGipCpu & SUPGIPGETCPU_RDTSCP_GROUP_IN_CH_NUMBER_IN_CL)
146 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsyncUseRdtscpGroupChNumCl : RTTimeNanoTSLegacyAsyncUseRdtscpGroupChNumCl;
147 else if (pGip->fGetGipCpu & SUPGIPGETCPU_APIC_ID_EXT_0B)
148 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsyncUseApicIdExt0B : RTTimeNanoTSLegacyAsyncUseApicIdExt0B;
149 else if (pGip->fGetGipCpu & SUPGIPGETCPU_APIC_ID_EXT_8000001E)
150 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsyncUseApicIdExt8000001E : RTTimeNanoTSLegacyAsyncUseApicIdExt8000001E;
151 else
152 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsyncUseApicId : RTTimeNanoTSLegacyAsyncUseApicId;
153#endif
154 break;
155
156 default:
157 AssertFatalMsgFailed(("pVM=%p pGip=%p u32Mode=%#x\n", pVM, pGip, pGip->u32Mode));
158 }
159 }
160#ifdef IN_RING3
161 else
162 pfnWorker = tmR3VirtualNanoTSDriverless;
163#endif
164
165 /*
166 * Update the pfnVirtualGetRaw pointer and call the worker we selected.
167 */
168 ASMAtomicWritePtr((void * volatile *)&pVM->VMCC_CTX(tm).s.pfnVirtualGetRaw, (void *)(uintptr_t)pfnWorker);
169 return pfnWorker(pData, pExtra);
170}
171
172
173/**
174 * @interface_method_impl{RTTIMENANOTSDATA,pfnBadCpuIndex}
175 */
176DECLCALLBACK(DECLEXPORT(uint64_t)) tmVirtualNanoTSBadCpuIndex(PRTTIMENANOTSDATA pData, PRTITMENANOTSEXTRA pExtra,
177 uint16_t idApic, uint16_t iCpuSet, uint16_t iGipCpu)
178{
179 PVMCC pVM = RT_FROM_CPP_MEMBER(pData, VMCC, VMCC_CTX(tm).s.VirtualGetRawData);
180 AssertFatalMsgFailed(("pVM=%p idApic=%#x iCpuSet=%#x iGipCpu=%#x pExtra=%p\n", pVM, idApic, iCpuSet, iGipCpu, pExtra));
181#ifndef _MSC_VER
182 return UINT64_MAX;
183#endif
184}
185
186
187/**
188 * Wrapper around the IPRT GIP time methods.
189 */
190DECLINLINE(uint64_t) tmVirtualGetRawNanoTS(PVMCC pVM)
191{
192#ifdef IN_RING3
193 uint64_t u64 = pVM->tm.s.pfnVirtualGetRaw(&pVM->tm.s.VirtualGetRawData, NULL /*pExtra*/);
194#elif defined(IN_RING0)
195 uint32_t cPrevSteps = pVM->tmr0.s.VirtualGetRawData.c1nsSteps;
196 uint64_t u64 = pVM->tmr0.s.pfnVirtualGetRaw(&pVM->tmr0.s.VirtualGetRawData, NULL /*pExtra*/);
197 if (cPrevSteps != pVM->tmr0.s.VirtualGetRawData.c1nsSteps)
198 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3);
199#else
200# error "unsupported context"
201#endif
202 /*DBGFTRACE_POS_U64(pVM, u64);*/
203 return u64;
204}
205
206
207/**
208 * Wrapper around the IPRT GIP time methods, extended version.
209 */
210DECLINLINE(uint64_t) tmVirtualGetRawNanoTSEx(PVMCC pVM, uint64_t *puTscNow)
211{
212 RTITMENANOTSEXTRA Extra;
213#ifdef IN_RING3
214 uint64_t u64 = pVM->tm.s.pfnVirtualGetRaw(&pVM->tm.s.VirtualGetRawData, &Extra);
215#elif defined(IN_RING0)
216 uint32_t cPrevSteps = pVM->tmr0.s.VirtualGetRawData.c1nsSteps;
217 uint64_t u64 = pVM->tmr0.s.pfnVirtualGetRaw(&pVM->tmr0.s.VirtualGetRawData, &Extra);
218 if (cPrevSteps != pVM->tmr0.s.VirtualGetRawData.c1nsSteps)
219 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3);
220#else
221# error "unsupported context"
222#endif
223 if (puTscNow)
224 *puTscNow = Extra.uTSCValue;
225 /*DBGFTRACE_POS_U64(pVM, u64);*/
226 return u64;
227}
228
229
230/**
231 * Get the time when we're not running at 100%
232 *
233 * @returns The timestamp.
234 * @param pVM The cross context VM structure.
235 * @param puTscNow Where to return the TSC corresponding to the returned
236 * timestamp (delta adjusted). Optional.
237 */
238static uint64_t tmVirtualGetRawNonNormal(PVMCC pVM, uint64_t *puTscNow)
239{
240 /*
241 * Recalculate the RTTimeNanoTS() value for the period where
242 * warp drive has been enabled.
243 */
244 uint64_t u64 = tmVirtualGetRawNanoTSEx(pVM, puTscNow);
245 u64 -= pVM->tm.s.u64VirtualWarpDriveStart;
246 u64 *= pVM->tm.s.u32VirtualWarpDrivePercentage;
247 u64 /= 100;
248 u64 += pVM->tm.s.u64VirtualWarpDriveStart;
249
250 /*
251 * Now we apply the virtual time offset.
252 * (Which is the negated tmVirtualGetRawNanoTS() value for when the virtual
253 * machine started if it had been running continuously without any suspends.)
254 */
255 u64 -= pVM->tm.s.u64VirtualOffset;
256 return u64;
257}
258
259
260/**
261 * Get the raw virtual time.
262 *
263 * @returns The current time stamp.
264 * @param pVM The cross context VM structure.
265 */
266DECLINLINE(uint64_t) tmVirtualGetRaw(PVMCC pVM)
267{
268 if (RT_LIKELY(!pVM->tm.s.fVirtualWarpDrive))
269 return tmVirtualGetRawNanoTS(pVM) - pVM->tm.s.u64VirtualOffset;
270 return tmVirtualGetRawNonNormal(pVM, NULL /*puTscNow*/);
271}
272
273
274/**
275 * Get the raw virtual time, extended version.
276 *
277 * @returns The current time stamp.
278 * @param pVM The cross context VM structure.
279 * @param puTscNow Where to return the TSC corresponding to the returned
280 * timestamp (delta adjusted). Optional.
281 */
282DECLINLINE(uint64_t) tmVirtualGetRawEx(PVMCC pVM, uint64_t *puTscNow)
283{
284 if (RT_LIKELY(!pVM->tm.s.fVirtualWarpDrive))
285 return tmVirtualGetRawNanoTSEx(pVM, puTscNow) - pVM->tm.s.u64VirtualOffset;
286 return tmVirtualGetRawNonNormal(pVM, puTscNow);
287}
288
289
290/**
291 * Inlined version of tmVirtualGetEx.
292 */
293DECLINLINE(uint64_t) tmVirtualGet(PVMCC pVM, bool fCheckTimers)
294{
295 uint64_t u64;
296 if (RT_LIKELY(pVM->tm.s.cVirtualTicking))
297 {
298 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGet);
299 u64 = tmVirtualGetRaw(pVM);
300
301 /*
302 * Use the chance to check for expired timers.
303 */
304 if (fCheckTimers)
305 {
306 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
307 if ( !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)
308 && !pVM->tm.s.fRunningQueues
309 && ( pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL].u64Expire <= u64
310 || ( pVM->tm.s.fVirtualSyncTicking
311 && pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire <= u64 - pVM->tm.s.offVirtualSync
312 )
313 )
314 && !pVM->tm.s.fRunningQueues
315 )
316 {
317 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSetFF);
318 Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
319 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
320#ifdef IN_RING3
321 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
322#endif
323 }
324 }
325 }
326 else
327 u64 = pVM->tm.s.u64Virtual;
328 return u64;
329}
330
331
332/**
333 * Gets the current TMCLOCK_VIRTUAL time
334 *
335 * @returns The timestamp.
336 * @param pVM The cross context VM structure.
337 *
338 * @remark While the flow of time will never go backwards, the speed of the
339 * progress varies due to inaccurate RTTimeNanoTS and TSC. The latter can be
340 * influenced by power saving (SpeedStep, PowerNow!), while the former
341 * makes use of TSC and kernel timers.
342 */
343VMM_INT_DECL(uint64_t) TMVirtualGet(PVMCC pVM)
344{
345 return tmVirtualGet(pVM, true /*fCheckTimers*/);
346}
347
348
349/**
350 * Gets the current TMCLOCK_VIRTUAL time without checking
351 * timers or anything.
352 *
353 * Meaning, this has no side effect on FFs like TMVirtualGet may have.
354 *
355 * @returns The timestamp.
356 * @param pVM The cross context VM structure.
357 *
358 * @remarks See TMVirtualGet.
359 */
360VMM_INT_DECL(uint64_t) TMVirtualGetNoCheck(PVMCC pVM)
361{
362 return tmVirtualGet(pVM, false /*fCheckTimers*/);
363}
364
365
366/**
367 * Converts the dead line interval from TMCLOCK_VIRTUAL to host nano seconds.
368 *
369 * @returns Host nano second count.
370 * @param pVM The cross context VM structure.
371 * @param cVirtTicksToDeadline The TMCLOCK_VIRTUAL interval.
372 */
373DECLINLINE(uint64_t) tmVirtualVirtToNsDeadline(PVM pVM, uint64_t cVirtTicksToDeadline)
374{
375 if (RT_UNLIKELY(pVM->tm.s.fVirtualWarpDrive))
376 return ASMMultU64ByU32DivByU32(cVirtTicksToDeadline, 100, pVM->tm.s.u32VirtualWarpDrivePercentage);
377 return cVirtTicksToDeadline;
378}
379
380
381/**
382 * tmVirtualSyncGetLocked worker for handling catch-up when owning the lock.
383 *
384 * @returns The timestamp.
385 * @param pVM The cross context VM structure.
386 * @param u64 raw virtual time.
387 * @param off offVirtualSync.
388 * @param pcNsToDeadline Where to return the number of nano seconds to
389 * the next virtual sync timer deadline. Can be
390 * NULL.
391 * @param pnsAbsDeadline Where to return the absolute deadline.
392 * Optional.
393 */
394DECLINLINE(uint64_t) tmVirtualSyncGetHandleCatchUpLocked(PVMCC pVM, uint64_t u64, uint64_t off,
395 uint64_t *pcNsToDeadline, uint64_t *pnsAbsDeadline)
396{
397 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
398
399 /*
400 * Don't make updates until we've check the timer queue.
401 */
402 bool fUpdatePrev = true;
403 bool fUpdateOff = true;
404 bool fStop = false;
405 const uint64_t u64Prev = pVM->tm.s.u64VirtualSyncCatchUpPrev;
406 uint64_t u64Delta = u64 - u64Prev;
407 if (RT_LIKELY(!(u64Delta >> 32)))
408 {
409 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, pVM->tm.s.u32VirtualSyncCatchUpPercentage, 100);
410 if (off > u64Sub + pVM->tm.s.offVirtualSyncGivenUp)
411 {
412 off -= u64Sub;
413 Log4(("TM: %'RU64/-%'8RU64: sub %RU32 [vsghcul]\n", u64 - off, off - pVM->tm.s.offVirtualSyncGivenUp, u64Sub));
414 }
415 else
416 {
417 /* we've completely caught up. */
418 STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c);
419 off = pVM->tm.s.offVirtualSyncGivenUp;
420 fStop = true;
421 Log4(("TM: %'RU64/0: caught up [vsghcul]\n", u64));
422 }
423 }
424 else
425 {
426 /* More than 4 seconds since last time (or negative), ignore it. */
427 fUpdateOff = false;
428 fUpdatePrev = !(u64Delta & RT_BIT_64(63));
429 Log(("TMVirtualGetSync: u64Delta=%RX64\n", u64Delta));
430 }
431
432 /*
433 * Complete the calculation of the current TMCLOCK_VIRTUAL_SYNC time. The current
434 * approach is to never pass the head timer. So, when we do stop the clock and
435 * set the timer pending flag.
436 */
437 u64 -= off;
438
439 uint64_t u64Last = ASMAtomicUoReadU64(&pVM->tm.s.u64VirtualSync);
440 if (u64Last > u64)
441 {
442 u64 = u64Last + 1;
443 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetAdjLast);
444 }
445
446 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire);
447 if (pnsAbsDeadline)
448 *pnsAbsDeadline = u64Expire; /* Always return the unadjusted absolute deadline, or HM will waste time going
449 thru this code over an over again even if there aren't any timer changes. */
450 if (u64 < u64Expire)
451 {
452 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64);
453 if (fUpdateOff)
454 ASMAtomicWriteU64(&pVM->tm.s.offVirtualSync, off);
455 if (fStop)
456 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncCatchUp, false);
457 if (fUpdatePrev)
458 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev, u64);
459 if (pcNsToDeadline)
460 {
461 uint64_t cNsToDeadline = u64Expire - u64;
462 if (pVM->tm.s.fVirtualSyncCatchUp)
463 cNsToDeadline = ASMMultU64ByU32DivByU32(cNsToDeadline, 100,
464 pVM->tm.s.u32VirtualSyncCatchUpPercentage + 100);
465 *pcNsToDeadline = tmVirtualVirtToNsDeadline(pVM, cNsToDeadline);
466 }
467 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
468 }
469 else
470 {
471 u64 = u64Expire;
472 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64);
473 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, false);
474
475 VM_FF_SET(pVM, VM_FF_TM_VIRTUAL_SYNC);
476 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
477 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
478 Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
479 Log4(("TM: %'RU64/-%'8RU64: exp tmr=>ff [vsghcul]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
480 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
481
482 if (pcNsToDeadline)
483 *pcNsToDeadline = 0;
484#ifdef IN_RING3
485 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
486#endif
487 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
488 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetExpired);
489 }
490 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
491
492 Log6(("tmVirtualSyncGetHandleCatchUpLocked -> %'RU64\n", u64));
493 DBGFTRACE_U64_TAG(pVM, u64, "tmVirtualSyncGetHandleCatchUpLocked");
494 return u64;
495}
496
497
498/**
499 * tmVirtualSyncGetEx worker for when we get the lock.
500 *
501 * @returns timesamp.
502 * @param pVM The cross context VM structure.
503 * @param u64 The virtual clock timestamp.
504 * @param pcNsToDeadline Where to return the number of nano seconds to
505 * the next virtual sync timer deadline. Can be
506 * NULL.
507 * @param pnsAbsDeadline Where to return the absolute deadline.
508 * Optional.
509 */
510DECLINLINE(uint64_t) tmVirtualSyncGetLocked(PVMCC pVM, uint64_t u64, uint64_t *pcNsToDeadline, uint64_t *pnsAbsDeadline)
511{
512 /*
513 * Not ticking?
514 */
515 if (!pVM->tm.s.fVirtualSyncTicking)
516 {
517 u64 = ASMAtomicUoReadU64(&pVM->tm.s.u64VirtualSync);
518 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
519 if (pcNsToDeadline)
520 *pcNsToDeadline = 0;
521 if (pnsAbsDeadline)
522 *pnsAbsDeadline = u64;
523 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
524 Log6(("tmVirtualSyncGetLocked -> %'RU64 [stopped]\n", u64));
525 DBGFTRACE_U64_TAG(pVM, u64, "tmVirtualSyncGetLocked-stopped");
526 return u64;
527 }
528
529 /*
530 * Handle catch up in a separate function.
531 */
532 uint64_t off = ASMAtomicUoReadU64(&pVM->tm.s.offVirtualSync);
533 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
534 return tmVirtualSyncGetHandleCatchUpLocked(pVM, u64, off, pcNsToDeadline, pnsAbsDeadline);
535
536 /*
537 * Complete the calculation of the current TMCLOCK_VIRTUAL_SYNC time. The current
538 * approach is to never pass the head timer. So, when we do stop the clock and
539 * set the timer pending flag.
540 */
541 u64 -= off;
542
543 uint64_t u64Last = ASMAtomicUoReadU64(&pVM->tm.s.u64VirtualSync);
544 if (u64Last > u64)
545 {
546 u64 = u64Last + 1;
547 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetAdjLast);
548 }
549
550 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire);
551 if (pnsAbsDeadline)
552 *pnsAbsDeadline = u64Expire;
553 if (u64 < u64Expire)
554 {
555 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64);
556 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
557 if (pcNsToDeadline)
558 *pcNsToDeadline = tmVirtualVirtToNsDeadline(pVM, u64Expire - u64);
559 }
560 else
561 {
562 u64 = u64Expire;
563 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64);
564 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, false);
565
566 VM_FF_SET(pVM, VM_FF_TM_VIRTUAL_SYNC);
567 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
568 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
569 Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
570 Log4(("TM: %'RU64/-%'8RU64: exp tmr=>ff [vsgl]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
571 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
572
573#ifdef IN_RING3
574 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
575#endif
576 if (pcNsToDeadline)
577 *pcNsToDeadline = 0;
578 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
579 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetExpired);
580 }
581 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
582 Log6(("tmVirtualSyncGetLocked -> %'RU64\n", u64));
583 DBGFTRACE_U64_TAG(pVM, u64, "tmVirtualSyncGetLocked");
584 return u64;
585}
586
587
588/**
589 * Gets the current TMCLOCK_VIRTUAL_SYNC time.
590 *
591 * @returns The timestamp.
592 * @param pVM The cross context VM structure.
593 * @param fCheckTimers Check timers or not
594 * @param pcNsToDeadline Where to return the number of nano seconds to
595 * the next virtual sync timer deadline. Can be
596 * NULL.
597 * @param pnsAbsDeadline Where to return the absolute deadline.
598 * Optional.
599 * @param puTscNow Where to return the TSC corresponding to the
600 * returned timestamp (delta adjusted). Optional.
601 * @thread EMT.
602 */
603DECLINLINE(uint64_t) tmVirtualSyncGetEx(PVMCC pVM, bool fCheckTimers, uint64_t *pcNsToDeadline,
604 uint64_t *pnsAbsDeadline, uint64_t *puTscNow)
605{
606 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGet);
607
608 uint64_t u64;
609 if (!pVM->tm.s.fVirtualSyncTicking)
610 {
611 if (pcNsToDeadline)
612 *pcNsToDeadline = 0;
613 u64 = pVM->tm.s.u64VirtualSync;
614 DBGFTRACE_U64_TAG(pVM, u64, "tmVirtualSyncGetEx-stopped1");
615 return u64;
616 }
617
618 /*
619 * Query the virtual clock and do the usual expired timer check.
620 */
621 Assert(pVM->tm.s.cVirtualTicking);
622 u64 = tmVirtualGetRawEx(pVM, puTscNow);
623 if (fCheckTimers)
624 {
625 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
626 if ( !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)
627 && pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL].u64Expire <= u64)
628 {
629 Log5(("TMAllVirtual(%u): FF: 0 -> 1\n", __LINE__));
630 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
631#ifdef IN_RING3
632 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM /** @todo |VMNOTIFYFF_FLAGS_POKE*/);
633#endif
634 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
635 }
636 }
637
638 /*
639 * If we can get the lock, get it. The result is much more reliable.
640 *
641 * Note! This is where all clock source devices branch off because they
642 * will be owning the lock already. The 'else' is taken by code
643 * which is less picky or hasn't been adjusted yet
644 */
645 /** @todo switch this around, have the tmVirtualSyncGetLocked code inlined
646 * here and the remainder of this function in a static worker. */
647 if (PDMCritSectTryEnter(pVM, &pVM->tm.s.VirtualSyncLock) == VINF_SUCCESS)
648 return tmVirtualSyncGetLocked(pVM, u64, pcNsToDeadline, pnsAbsDeadline);
649
650 /*
651 * When the clock is ticking, not doing catch ups and not running into an
652 * expired time, we can get away without locking. Try this first.
653 */
654 uint64_t off;
655 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
656 {
657 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
658 {
659 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
660 if (RT_LIKELY( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
661 && !ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
662 && off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)))
663 {
664 off = u64 - off;
665 uint64_t const u64Expire = ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire);
666 if (off < u64Expire)
667 {
668 if (pnsAbsDeadline)
669 *pnsAbsDeadline = u64Expire;
670 if (pcNsToDeadline)
671 *pcNsToDeadline = tmVirtualVirtToNsDeadline(pVM, u64Expire - off);
672 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLockless);
673 Log6(("tmVirtualSyncGetEx -> %'RU64 [lockless]\n", off));
674 DBGFTRACE_U64_TAG(pVM, off, "tmVirtualSyncGetEx-lockless");
675 return off;
676 }
677 }
678 }
679 }
680 else
681 {
682 off = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSync);
683 if (RT_LIKELY(!ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking)))
684 {
685 if (pcNsToDeadline)
686 *pcNsToDeadline = 0;
687 if (pnsAbsDeadline)
688 *pnsAbsDeadline = off;
689 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLockless);
690 Log6(("tmVirtualSyncGetEx -> %'RU64 [lockless/stopped]\n", off));
691 DBGFTRACE_U64_TAG(pVM, off, "tmVirtualSyncGetEx-stopped2");
692 return off;
693 }
694 }
695
696 /*
697 * Read the offset and adjust if we're playing catch-up.
698 *
699 * The catch-up adjusting work by us decrementing the offset by a percentage of
700 * the time elapsed since the previous TMVirtualGetSync call.
701 *
702 * It's possible to get a very long or even negative interval between two read
703 * for the following reasons:
704 * - Someone might have suspended the process execution, frequently the case when
705 * debugging the process.
706 * - We might be on a different CPU which TSC isn't quite in sync with the
707 * other CPUs in the system.
708 * - Another thread is racing us and we might have been preempted while inside
709 * this function.
710 *
711 * Assuming nano second virtual time, we can simply ignore any intervals which has
712 * any of the upper 32 bits set.
713 */
714 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
715 int cOuterTries = 42;
716 for (;; cOuterTries--)
717 {
718 /* Try grab the lock, things get simpler when owning the lock. */
719 int rcLock = PDMCritSectTryEnter(pVM, &pVM->tm.s.VirtualSyncLock);
720 if (RT_SUCCESS_NP(rcLock))
721 return tmVirtualSyncGetLocked(pVM, u64, pcNsToDeadline, pnsAbsDeadline);
722
723 /* Re-check the ticking flag. */
724 if (!ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
725 {
726 off = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSync);
727 if ( ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking)
728 && cOuterTries > 0)
729 continue;
730 if (pcNsToDeadline)
731 *pcNsToDeadline = 0;
732 if (pnsAbsDeadline)
733 *pnsAbsDeadline = off;
734 Log6(("tmVirtualSyncGetEx -> %'RU64 [stopped]\n", off));
735 DBGFTRACE_U64_TAG(pVM, off, "tmVirtualSyncGetEx-stopped3");
736 return off;
737 }
738
739 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
740 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
741 {
742 /* No changes allowed, try get a consistent set of parameters. */
743 uint64_t const u64Prev = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev);
744 uint64_t const offGivenUp = ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp);
745 uint32_t const u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
746 if ( ( u64Prev == ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev)
747 && offGivenUp == ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp)
748 && u32Pct == ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage)
749 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
750 || cOuterTries <= 0)
751 {
752 uint64_t u64Delta = u64 - u64Prev;
753 if (RT_LIKELY(!(u64Delta >> 32)))
754 {
755 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, u32Pct, 100);
756 if (off > u64Sub + offGivenUp)
757 {
758 off -= u64Sub;
759 Log4(("TM: %'RU64/-%'8RU64: sub %RU32 [NoLock]\n", u64 - off, pVM->tm.s.offVirtualSync - offGivenUp, u64Sub));
760 }
761 else
762 {
763 /* we've completely caught up. */
764 STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c);
765 off = offGivenUp;
766 Log4(("TM: %'RU64/0: caught up [NoLock]\n", u64));
767 }
768 }
769 else
770 /* More than 4 seconds since last time (or negative), ignore it. */
771 Log(("TMVirtualGetSync: u64Delta=%RX64 (NoLock)\n", u64Delta));
772
773 /* Check that we're still running and in catch up. */
774 if ( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
775 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
776 break;
777 if (cOuterTries <= 0)
778 break; /* enough */
779 }
780 }
781 else if ( off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
782 && !ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
783 break; /* Got an consistent offset */
784 else if (cOuterTries <= 0)
785 break; /* enough */
786 }
787 if (cOuterTries <= 0)
788 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetELoop);
789
790 /*
791 * Complete the calculation of the current TMCLOCK_VIRTUAL_SYNC time. The current
792 * approach is to never pass the head timer. So, when we do stop the clock and
793 * set the timer pending flag.
794 */
795 u64 -= off;
796/** @todo u64VirtualSyncLast */
797 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire);
798 if (pnsAbsDeadline)
799 *pnsAbsDeadline = u64Expire;
800 if (u64 >= u64Expire)
801 {
802 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
803 if (!VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
804 {
805 Log5(("TMAllVirtual(%u): FF: %d -> 1 (NoLock)\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
806 VM_FF_SET(pVM, VM_FF_TM_VIRTUAL_SYNC); /* Hmm? */
807 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
808#ifdef IN_RING3
809 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
810#endif
811 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
812 Log4(("TM: %'RU64/-%'8RU64: exp tmr=>ff [NoLock]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
813 }
814 else
815 Log4(("TM: %'RU64/-%'8RU64: exp tmr [NoLock]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
816 if (pcNsToDeadline)
817 *pcNsToDeadline = 0;
818 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetExpired);
819 }
820 else if (pcNsToDeadline)
821 {
822 uint64_t cNsToDeadline = u64Expire - u64;
823 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
824 cNsToDeadline = ASMMultU64ByU32DivByU32(cNsToDeadline, 100,
825 ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage) + 100);
826 *pcNsToDeadline = tmVirtualVirtToNsDeadline(pVM, cNsToDeadline);
827 }
828
829 Log6(("tmVirtualSyncGetEx -> %'RU64\n", u64));
830 DBGFTRACE_U64_TAG(pVM, u64, "tmVirtualSyncGetEx-nolock");
831 return u64;
832}
833
834
835/**
836 * Gets the current TMCLOCK_VIRTUAL_SYNC time.
837 *
838 * @returns The timestamp.
839 * @param pVM The cross context VM structure.
840 * @thread EMT.
841 * @remarks May set the timer and virtual sync FFs.
842 */
843VMM_INT_DECL(uint64_t) TMVirtualSyncGet(PVMCC pVM)
844{
845 return tmVirtualSyncGetEx(pVM, true /*fCheckTimers*/, NULL /*pcNsToDeadline*/, NULL /*pnsAbsDeadline*/, NULL /*puTscNow*/);
846}
847
848
849/**
850 * Gets the current TMCLOCK_VIRTUAL_SYNC time without checking timers running on
851 * TMCLOCK_VIRTUAL.
852 *
853 * @returns The timestamp.
854 * @param pVM The cross context VM structure.
855 * @thread EMT.
856 * @remarks May set the timer and virtual sync FFs.
857 */
858VMM_INT_DECL(uint64_t) TMVirtualSyncGetNoCheck(PVMCC pVM)
859{
860 return tmVirtualSyncGetEx(pVM, false /*fCheckTimers*/, NULL /*pcNsToDeadline*/, NULL /*pnsAbsDeadline*/, NULL /*puTscNow*/);
861}
862
863
864/**
865 * Gets the current TMCLOCK_VIRTUAL_SYNC time without checking timers running on
866 * TMCLOCK_VIRTUAL, also returning corresponding TSC value.
867 *
868 * @returns The timestamp.
869 * @param pVM The cross context VM structure.
870 * @param puTscNow Where to return the TSC value that the return
871 * value is relative to. This is delta adjusted.
872 * @thread EMT.
873 * @remarks May set the timer and virtual sync FFs.
874 */
875VMM_INT_DECL(uint64_t) TMVirtualSyncGetNoCheckWithTsc(PVMCC pVM, uint64_t *puTscNow)
876{
877 return tmVirtualSyncGetEx(pVM, false /*fCheckTimers*/, NULL /*pcNsToDeadline*/, NULL /*pnsAbsDeadline*/, puTscNow);
878}
879
880
881/**
882 * Gets the current TMCLOCK_VIRTUAL_SYNC time.
883 *
884 * @returns The timestamp.
885 * @param pVM The cross context VM structure.
886 * @param fCheckTimers Check timers on the virtual clock or not.
887 * @thread EMT.
888 * @remarks May set the timer and virtual sync FFs.
889 */
890VMM_INT_DECL(uint64_t) TMVirtualSyncGetEx(PVMCC pVM, bool fCheckTimers)
891{
892 return tmVirtualSyncGetEx(pVM, fCheckTimers, NULL /*pcNsToDeadline*/, NULL /*pnsAbsDeadline*/, NULL /*puTscNow*/);
893}
894
895
896/**
897 * Gets the current TMCLOCK_VIRTUAL_SYNC time and ticks to the next deadline
898 * without checking timers running on TMCLOCK_VIRTUAL.
899 *
900 * @returns The timestamp.
901 * @param pVM The cross context VM structure.
902 * @param pcNsToDeadline Where to return the number of nano seconds to
903 * the next virtual sync timer deadline.
904 * @param puTscNow Where to return the TSC value that the return
905 * value is relative to. This is delta adjusted.
906 * @param puDeadlineVersion Where to return the deadline "version" number.
907 * Use with TMVirtualSyncIsCurrentDeadlineVersion()
908 * to check if the absolute deadline is still up to
909 * date and the caller can skip calling this
910 * function.
911 * @thread EMT.
912 * @remarks May set the timer and virtual sync FFs.
913 */
914VMM_INT_DECL(uint64_t) TMVirtualSyncGetWithDeadlineNoCheck(PVMCC pVM, uint64_t *pcNsToDeadline,
915 uint64_t *puDeadlineVersion, uint64_t *puTscNow)
916{
917 uint64_t cNsToDeadlineTmp; /* try convince the compiler to skip the if tests. */
918 uint64_t u64Now = tmVirtualSyncGetEx(pVM, false /*fCheckTimers*/, &cNsToDeadlineTmp, puDeadlineVersion, puTscNow);
919 *pcNsToDeadline = cNsToDeadlineTmp;
920 return u64Now;
921}
922
923
924/**
925 * Gets the number of nano seconds to the next virtual sync deadline.
926 *
927 * @returns The number of TMCLOCK_VIRTUAL ticks.
928 * @param pVM The cross context VM structure.
929 * @param puTscNow Where to return the TSC value that the return
930 * value is relative to. This is delta adjusted.
931 * @param puDeadlineVersion Where to return the deadline "version" number.
932 * Use with TMVirtualSyncIsCurrentDeadlineVersion()
933 * to check if the absolute deadline is still up to
934 * date and the caller can skip calling this
935 * function.
936 * @thread EMT.
937 * @remarks May set the timer and virtual sync FFs.
938 */
939VMMDECL(uint64_t) TMVirtualSyncGetNsToDeadline(PVMCC pVM, uint64_t *puDeadlineVersion, uint64_t *puTscNow)
940{
941 uint64_t cNsToDeadline;
942 tmVirtualSyncGetEx(pVM, false /*fCheckTimers*/, &cNsToDeadline, puDeadlineVersion, puTscNow);
943 return cNsToDeadline;
944}
945
946
947/**
948 * Checks if the given deadline is still current.
949 *
950 * @retval true if the deadline is still current.
951 * @retval false if the deadline is outdated.
952 * @param pVM The cross context VM structure.
953 * @param uDeadlineVersion The deadline version to check.
954 */
955VMM_INT_DECL(bool) TMVirtualSyncIsCurrentDeadlineVersion(PVMCC pVM, uint64_t uDeadlineVersion)
956{
957 /** @todo Try use ASMAtomicUoReadU64 instead. */
958 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire);
959 return u64Expire == uDeadlineVersion;
960}
961
962
963/**
964 * Gets the current lag of the synchronous virtual clock (relative to the virtual clock).
965 *
966 * @return The current lag.
967 * @param pVM The cross context VM structure.
968 */
969VMM_INT_DECL(uint64_t) TMVirtualSyncGetLag(PVMCC pVM)
970{
971 return pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp;
972}
973
974
975/**
976 * Get the current catch-up percent.
977 *
978 * @return The current catch0up percent. 0 means running at the same speed as the virtual clock.
979 * @param pVM The cross context VM structure.
980 */
981VMM_INT_DECL(uint32_t) TMVirtualSyncGetCatchUpPct(PVMCC pVM)
982{
983 if (pVM->tm.s.fVirtualSyncCatchUp)
984 return pVM->tm.s.u32VirtualSyncCatchUpPercentage;
985 return 0;
986}
987
988
989/**
990 * Gets the current TMCLOCK_VIRTUAL frequency.
991 *
992 * @returns The frequency.
993 * @param pVM The cross context VM structure.
994 */
995VMM_INT_DECL(uint64_t) TMVirtualGetFreq(PVM pVM)
996{
997 NOREF(pVM);
998 return TMCLOCK_FREQ_VIRTUAL;
999}
1000
1001
1002/**
1003 * Worker for TMR3PauseClocks.
1004 *
1005 * @returns VINF_SUCCESS or VERR_TM_VIRTUAL_TICKING_IPE (asserted).
1006 * @param pVM The cross context VM structure.
1007 */
1008int tmVirtualPauseLocked(PVMCC pVM)
1009{
1010 uint32_t c = ASMAtomicDecU32(&pVM->tm.s.cVirtualTicking);
1011 AssertMsgReturn(c < pVM->cCpus, ("%u vs %u\n", c, pVM->cCpus), VERR_TM_VIRTUAL_TICKING_IPE);
1012 if (c == 0)
1013 {
1014 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualPause);
1015 pVM->tm.s.u64Virtual = tmVirtualGetRaw(pVM);
1016 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, false);
1017 }
1018 return VINF_SUCCESS;
1019}
1020
1021
1022/**
1023 * Worker for TMR3ResumeClocks.
1024 *
1025 * @returns VINF_SUCCESS or VERR_TM_VIRTUAL_TICKING_IPE (asserted).
1026 * @param pVM The cross context VM structure.
1027 */
1028int tmVirtualResumeLocked(PVMCC pVM)
1029{
1030 uint32_t c = ASMAtomicIncU32(&pVM->tm.s.cVirtualTicking);
1031 AssertMsgReturn(c <= pVM->cCpus, ("%u vs %u\n", c, pVM->cCpus), VERR_TM_VIRTUAL_TICKING_IPE);
1032 if (c == 1)
1033 {
1034 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualResume);
1035 pVM->tm.s.u64VirtualRawPrev = 0;
1036 pVM->tm.s.u64VirtualWarpDriveStart = tmVirtualGetRawNanoTS(pVM);
1037 pVM->tm.s.u64VirtualOffset = pVM->tm.s.u64VirtualWarpDriveStart - pVM->tm.s.u64Virtual;
1038 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, true);
1039 }
1040 return VINF_SUCCESS;
1041}
1042
1043
1044/**
1045 * Converts from virtual ticks to nanoseconds.
1046 *
1047 * @returns nanoseconds.
1048 * @param pVM The cross context VM structure.
1049 * @param u64VirtualTicks The virtual ticks to convert.
1050 * @remark There could be rounding errors here. We just do a simple integer divide
1051 * without any adjustments.
1052 */
1053VMM_INT_DECL(uint64_t) TMVirtualToNano(PVM pVM, uint64_t u64VirtualTicks)
1054{
1055 NOREF(pVM);
1056 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1057 return u64VirtualTicks;
1058}
1059
1060
1061/**
1062 * Converts from virtual ticks to microseconds.
1063 *
1064 * @returns microseconds.
1065 * @param pVM The cross context VM structure.
1066 * @param u64VirtualTicks The virtual ticks to convert.
1067 * @remark There could be rounding errors here. We just do a simple integer divide
1068 * without any adjustments.
1069 */
1070VMM_INT_DECL(uint64_t) TMVirtualToMicro(PVM pVM, uint64_t u64VirtualTicks)
1071{
1072 NOREF(pVM);
1073 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1074 return u64VirtualTicks / 1000;
1075}
1076
1077
1078/**
1079 * Converts from virtual ticks to milliseconds.
1080 *
1081 * @returns milliseconds.
1082 * @param pVM The cross context VM structure.
1083 * @param u64VirtualTicks The virtual ticks to convert.
1084 * @remark There could be rounding errors here. We just do a simple integer divide
1085 * without any adjustments.
1086 */
1087VMM_INT_DECL(uint64_t) TMVirtualToMilli(PVM pVM, uint64_t u64VirtualTicks)
1088{
1089 NOREF(pVM);
1090 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1091 return u64VirtualTicks / 1000000;
1092}
1093
1094
1095/**
1096 * Converts from nanoseconds to virtual ticks.
1097 *
1098 * @returns virtual ticks.
1099 * @param pVM The cross context VM structure.
1100 * @param u64NanoTS The nanosecond value ticks to convert.
1101 * @remark There could be rounding and overflow errors here.
1102 */
1103VMM_INT_DECL(uint64_t) TMVirtualFromNano(PVM pVM, uint64_t u64NanoTS)
1104{
1105 NOREF(pVM);
1106 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1107 return u64NanoTS;
1108}
1109
1110
1111/**
1112 * Converts from microseconds to virtual ticks.
1113 *
1114 * @returns virtual ticks.
1115 * @param pVM The cross context VM structure.
1116 * @param u64MicroTS The microsecond value ticks to convert.
1117 * @remark There could be rounding and overflow errors here.
1118 */
1119VMM_INT_DECL(uint64_t) TMVirtualFromMicro(PVM pVM, uint64_t u64MicroTS)
1120{
1121 NOREF(pVM);
1122 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1123 return u64MicroTS * 1000;
1124}
1125
1126
1127/**
1128 * Converts from milliseconds to virtual ticks.
1129 *
1130 * @returns virtual ticks.
1131 * @param pVM The cross context VM structure.
1132 * @param u64MilliTS The millisecond value ticks to convert.
1133 * @remark There could be rounding and overflow errors here.
1134 */
1135VMM_INT_DECL(uint64_t) TMVirtualFromMilli(PVM pVM, uint64_t u64MilliTS)
1136{
1137 NOREF(pVM);
1138 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1139 return u64MilliTS * 1000000;
1140}
1141
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette