VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/TMAllVirtual.cpp@ 37518

Last change on this file since 37518 was 37517, checked in by vboxsync, 14 years ago

TM: Simplified the virtual sync timers by requiring changes to be done while holding the virtual sync lock. This means we can skip all the pending states and move timers on and off the active list immediately, avoiding the problems with timers being on the pending-scheduling list. Also made u64VirtualSync keep track of the last time stamp all the time (when under the lock) and thus really making sure time does not jump backwards.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 37.6 KB
Line 
1/* $Id: TMAllVirtual.cpp 37517 2011-06-16 19:24:00Z vboxsync $ */
2/** @file
3 * TM - Timeout Manager, Virtual Time, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_TM
23#include <VBox/vmm/tm.h>
24#include <VBox/vmm/dbgftrace.h>
25#ifdef IN_RING3
26# include <VBox/vmm/rem.h>
27# include <iprt/thread.h>
28#endif
29#include "TMInternal.h"
30#include <VBox/vmm/vm.h>
31#include <VBox/vmm/vmm.h>
32#include <VBox/err.h>
33#include <VBox/log.h>
34#include <VBox/sup.h>
35
36#include <iprt/time.h>
37#include <iprt/assert.h>
38#include <iprt/asm.h>
39#include <iprt/asm-math.h>
40
41
42
43/**
44 * Helper function that's used by the assembly routines when something goes bust.
45 *
46 * @param pData Pointer to the data structure.
47 * @param u64NanoTS The calculated nano ts.
48 * @param u64DeltaPrev The delta relative to the previously returned timestamp.
49 * @param u64PrevNanoTS The previously returned timestamp (as it was read it).
50 */
51DECLEXPORT(void) tmVirtualNanoTSBad(PRTTIMENANOTSDATA pData, uint64_t u64NanoTS, uint64_t u64DeltaPrev, uint64_t u64PrevNanoTS)
52{
53 //PVM pVM = (PVM)((uint8_t *)pData - RT_OFFSETOF(VM, CTXALLSUFF(s.tm.VirtualGetRawData)));
54 pData->cBadPrev++;
55 if ((int64_t)u64DeltaPrev < 0)
56 LogRel(("TM: u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64\n",
57 u64DeltaPrev, u64PrevNanoTS, u64NanoTS));
58 else
59 Log(("TM: u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64 (debugging?)\n",
60 u64DeltaPrev, u64PrevNanoTS, u64NanoTS));
61}
62
63
64/**
65 * Called the first time somebody asks for the time or when the GIP
66 * is mapped/unmapped.
67 *
68 * This should never ever happen.
69 */
70DECLEXPORT(uint64_t) tmVirtualNanoTSRediscover(PRTTIMENANOTSDATA pData)
71{
72 //PVM pVM = (PVM)((uint8_t *)pData - RT_OFFSETOF(VM, CTXALLSUFF(s.tm.VirtualGetRawData)));
73 PSUPGLOBALINFOPAGE pGip = g_pSUPGlobalInfoPage;
74 AssertFatalMsgFailed(("pGip=%p u32Magic=%#x\n", pGip, VALID_PTR(pGip) ? pGip->u32Magic : 0));
75 return 0; /* gcc false positive warning */
76}
77
78
79#if 1
80
81/**
82 * Wrapper around the IPRT GIP time methods.
83 */
84DECLINLINE(uint64_t) tmVirtualGetRawNanoTS(PVM pVM)
85{
86# ifdef IN_RING3
87 uint64_t u64 = CTXALLSUFF(pVM->tm.s.pfnVirtualGetRaw)(&CTXALLSUFF(pVM->tm.s.VirtualGetRawData));
88# else /* !IN_RING3 */
89 uint32_t cPrevSteps = pVM->tm.s.CTX_SUFF(VirtualGetRawData).c1nsSteps;
90 uint64_t u64 = pVM->tm.s.CTX_SUFF(pfnVirtualGetRaw)(&pVM->tm.s.CTX_SUFF(VirtualGetRawData));
91 if (cPrevSteps != pVM->tm.s.CTX_SUFF(VirtualGetRawData).c1nsSteps)
92 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3);
93# endif /* !IN_RING3 */
94 /*DBGFTRACE_POS_U64(pVM, u64);*/
95 return u64;
96}
97
98#else
99
100/**
101 * This is (mostly) the same as rtTimeNanoTSInternal() except
102 * for the two globals which live in TM.
103 *
104 * @returns Nanosecond timestamp.
105 * @param pVM The VM handle.
106 */
107static uint64_t tmVirtualGetRawNanoTS(PVM pVM)
108{
109 uint64_t u64Delta;
110 uint32_t u32NanoTSFactor0;
111 uint64_t u64TSC;
112 uint64_t u64NanoTS;
113 uint32_t u32UpdateIntervalTSC;
114 uint64_t u64PrevNanoTS;
115
116 /*
117 * Read the GIP data and the previous value.
118 */
119 for (;;)
120 {
121 uint32_t u32TransactionId;
122 PSUPGLOBALINFOPAGE pGip = g_pSUPGlobalInfoPage;
123#ifdef IN_RING3
124 if (RT_UNLIKELY(!pGip || pGip->u32Magic != SUPGLOBALINFOPAGE_MAGIC))
125 return RTTimeSystemNanoTS();
126#endif
127
128 if (pGip->u32Mode != SUPGIPMODE_ASYNC_TSC)
129 {
130 u32TransactionId = pGip->aCPUs[0].u32TransactionId;
131#ifdef RT_OS_L4
132 Assert((u32TransactionId & 1) == 0);
133#endif
134 u32UpdateIntervalTSC = pGip->aCPUs[0].u32UpdateIntervalTSC;
135 u64NanoTS = pGip->aCPUs[0].u64NanoTS;
136 u64TSC = pGip->aCPUs[0].u64TSC;
137 u32NanoTSFactor0 = pGip->u32UpdateIntervalNS;
138 u64Delta = ASMReadTSC();
139 u64PrevNanoTS = ASMAtomicReadU64(&pVM->tm.s.u64VirtualRawPrev);
140 if (RT_UNLIKELY( pGip->aCPUs[0].u32TransactionId != u32TransactionId
141 || (u32TransactionId & 1)))
142 continue;
143 }
144 else
145 {
146 /* SUPGIPMODE_ASYNC_TSC */
147 PSUPGIPCPU pGipCpu;
148
149 uint8_t u8ApicId = ASMGetApicId();
150 if (RT_LIKELY(u8ApicId < RT_ELEMENTS(pGip->aCPUs)))
151 pGipCpu = &pGip->aCPUs[u8ApicId];
152 else
153 {
154 AssertMsgFailed(("%x\n", u8ApicId));
155 pGipCpu = &pGip->aCPUs[0];
156 }
157
158 u32TransactionId = pGipCpu->u32TransactionId;
159#ifdef RT_OS_L4
160 Assert((u32TransactionId & 1) == 0);
161#endif
162 u32UpdateIntervalTSC = pGipCpu->u32UpdateIntervalTSC;
163 u64NanoTS = pGipCpu->u64NanoTS;
164 u64TSC = pGipCpu->u64TSC;
165 u32NanoTSFactor0 = pGip->u32UpdateIntervalNS;
166 u64Delta = ASMReadTSC();
167 u64PrevNanoTS = ASMAtomicReadU64(&pVM->tm.s.u64VirtualRawPrev);
168#ifdef IN_RC
169 Assert(!(ASMGetFlags() & X86_EFL_IF));
170#else
171 if (RT_UNLIKELY(u8ApicId != ASMGetApicId()))
172 continue;
173 if (RT_UNLIKELY( pGipCpu->u32TransactionId != u32TransactionId
174 || (u32TransactionId & 1)))
175 continue;
176#endif
177 }
178 break;
179 }
180
181 /*
182 * Calc NanoTS delta.
183 */
184 u64Delta -= u64TSC;
185 if (u64Delta > u32UpdateIntervalTSC)
186 {
187 /*
188 * We've expired the interval, cap it. If we're here for the 2nd
189 * time without any GIP update in-between, the checks against
190 * pVM->tm.s.u64VirtualRawPrev below will force 1ns stepping.
191 */
192 u64Delta = u32UpdateIntervalTSC;
193 }
194#if !defined(_MSC_VER) || defined(RT_ARCH_AMD64) /* GCC makes very pretty code from these two inline calls, while MSC cannot. */
195 u64Delta = ASMMult2xU32RetU64((uint32_t)u64Delta, u32NanoTSFactor0);
196 u64Delta = ASMDivU64ByU32RetU32(u64Delta, u32UpdateIntervalTSC);
197#else
198 __asm
199 {
200 mov eax, dword ptr [u64Delta]
201 mul dword ptr [u32NanoTSFactor0]
202 div dword ptr [u32UpdateIntervalTSC]
203 mov dword ptr [u64Delta], eax
204 xor edx, edx
205 mov dword ptr [u64Delta + 4], edx
206 }
207#endif
208
209 /*
210 * Calculate the time and compare it with the previously returned value.
211 *
212 * Since this function is called *very* frequently when the VM is running
213 * and then mostly on EMT, we can restrict the valid range of the delta
214 * (-1s to 2*GipUpdates) and simplify/optimize the default path.
215 */
216 u64NanoTS += u64Delta;
217 uint64_t u64DeltaPrev = u64NanoTS - u64PrevNanoTS;
218 if (RT_LIKELY(u64DeltaPrev < 1000000000 /* 1s */))
219 /* frequent - less than 1s since last call. */;
220 else if ( (int64_t)u64DeltaPrev < 0
221 && (int64_t)u64DeltaPrev + u32NanoTSFactor0 * 2 > 0)
222 {
223 /* occasional - u64NanoTS is in the 'past' relative to previous returns. */
224 ASMAtomicIncU32(&pVM->tm.s.CTX_SUFF(VirtualGetRawData).c1nsSteps);
225 u64NanoTS = u64PrevNanoTS + 1;
226#ifndef IN_RING3
227 VM_FF_SET(pVM, VM_FF_TO_R3); /* S10 hack */
228#endif
229 }
230 else if (u64PrevNanoTS)
231 {
232 /* Something has gone bust, if negative offset it's real bad. */
233 ASMAtomicIncU32(&pVM->tm.s.CTX_SUFF(VirtualGetRawData).cBadPrev);
234 if ((int64_t)u64DeltaPrev < 0)
235 LogRel(("TM: u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64 u64Delta=%#RX64\n",
236 u64DeltaPrev, u64PrevNanoTS, u64NanoTS, u64Delta));
237 else
238 Log(("TM: u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64 u64Delta=%#RX64 (debugging?)\n",
239 u64DeltaPrev, u64PrevNanoTS, u64NanoTS, u64Delta));
240#ifdef DEBUG_bird
241 /** @todo there are some hickups during boot and reset that can cause 2-5 seconds delays. Investigate... */
242 AssertMsg(u64PrevNanoTS > UINT64_C(100000000000) /* 100s */,
243 ("u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64 u64Delta=%#RX64\n",
244 u64DeltaPrev, u64PrevNanoTS, u64NanoTS, u64Delta));
245#endif
246 }
247 /* else: We're resuming (see TMVirtualResume). */
248 if (RT_LIKELY(ASMAtomicCmpXchgU64(&pVM->tm.s.u64VirtualRawPrev, u64NanoTS, u64PrevNanoTS)))
249 return u64NanoTS;
250
251 /*
252 * Attempt updating the previous value, provided we're still ahead of it.
253 *
254 * There is no point in recalculating u64NanoTS because we got preempted or if
255 * we raced somebody while the GIP was updated, since these are events
256 * that might occur at any point in the return path as well.
257 */
258 for (int cTries = 50;;)
259 {
260 u64PrevNanoTS = ASMAtomicReadU64(&pVM->tm.s.u64VirtualRawPrev);
261 if (u64PrevNanoTS >= u64NanoTS)
262 break;
263 if (ASMAtomicCmpXchgU64(&pVM->tm.s.u64VirtualRawPrev, u64NanoTS, u64PrevNanoTS))
264 break;
265 AssertBreak(--cTries <= 0);
266 if (cTries < 25 && !VM_IS_EMT(pVM)) /* give up early */
267 break;
268 }
269
270 return u64NanoTS;
271}
272
273#endif
274
275
276/**
277 * Get the time when we're not running at 100%
278 *
279 * @returns The timestamp.
280 * @param pVM The VM handle.
281 */
282static uint64_t tmVirtualGetRawNonNormal(PVM pVM)
283{
284 /*
285 * Recalculate the RTTimeNanoTS() value for the period where
286 * warp drive has been enabled.
287 */
288 uint64_t u64 = tmVirtualGetRawNanoTS(pVM);
289 u64 -= pVM->tm.s.u64VirtualWarpDriveStart;
290 u64 *= pVM->tm.s.u32VirtualWarpDrivePercentage;
291 u64 /= 100;
292 u64 += pVM->tm.s.u64VirtualWarpDriveStart;
293
294 /*
295 * Now we apply the virtual time offset.
296 * (Which is the negated tmVirtualGetRawNanoTS() value for when the virtual
297 * machine started if it had been running continuously without any suspends.)
298 */
299 u64 -= pVM->tm.s.u64VirtualOffset;
300 return u64;
301}
302
303
304/**
305 * Get the raw virtual time.
306 *
307 * @returns The current time stamp.
308 * @param pVM The VM handle.
309 */
310DECLINLINE(uint64_t) tmVirtualGetRaw(PVM pVM)
311{
312 if (RT_LIKELY(!pVM->tm.s.fVirtualWarpDrive))
313 return tmVirtualGetRawNanoTS(pVM) - pVM->tm.s.u64VirtualOffset;
314 return tmVirtualGetRawNonNormal(pVM);
315}
316
317
318/**
319 * Inlined version of tmVirtualGetEx.
320 */
321DECLINLINE(uint64_t) tmVirtualGet(PVM pVM, bool fCheckTimers)
322{
323 uint64_t u64;
324 if (RT_LIKELY(pVM->tm.s.cVirtualTicking))
325 {
326 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGet);
327 u64 = tmVirtualGetRaw(pVM);
328
329 /*
330 * Use the chance to check for expired timers.
331 */
332 if (fCheckTimers)
333 {
334 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
335 if ( !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER)
336 && !pVM->tm.s.fRunningQueues
337 && ( pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire <= u64
338 || ( pVM->tm.s.fVirtualSyncTicking
339 && pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire <= u64 - pVM->tm.s.offVirtualSync
340 )
341 )
342 && !pVM->tm.s.fRunningQueues
343 )
344 {
345 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSetFF);
346 Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
347 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
348#ifdef IN_RING3
349 REMR3NotifyTimerPending(pVM, pVCpuDst);
350 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
351#endif
352 }
353 }
354 }
355 else
356 u64 = pVM->tm.s.u64Virtual;
357 return u64;
358}
359
360
361/**
362 * Gets the current TMCLOCK_VIRTUAL time
363 *
364 * @returns The timestamp.
365 * @param pVM VM handle.
366 *
367 * @remark While the flow of time will never go backwards, the speed of the
368 * progress varies due to inaccurate RTTimeNanoTS and TSC. The latter can be
369 * influenced by power saving (SpeedStep, PowerNow!), while the former
370 * makes use of TSC and kernel timers.
371 */
372VMM_INT_DECL(uint64_t) TMVirtualGet(PVM pVM)
373{
374 return tmVirtualGet(pVM, true /*fCheckTimers*/);
375}
376
377
378/**
379 * Gets the current TMCLOCK_VIRTUAL time without checking
380 * timers or anything.
381 *
382 * Meaning, this has no side effect on FFs like TMVirtualGet may have.
383 *
384 * @returns The timestamp.
385 * @param pVM VM handle.
386 *
387 * @remarks See TMVirtualGet.
388 */
389VMM_INT_DECL(uint64_t) TMVirtualGetNoCheck(PVM pVM)
390{
391 return tmVirtualGet(pVM, false /*fCheckTimers*/);
392}
393
394
395/**
396 * Converts the dead line interval from TMCLOCK_VIRTUAL to host nano seconds.
397 *
398 * @returns Host nano second count.
399 * @param pVM The VM handle.
400 * @param cVirtTicksToDeadline The TMCLOCK_VIRTUAL interval.
401 */
402DECLINLINE(uint64_t) tmVirtualVirtToNsDeadline(PVM pVM, uint64_t cVirtTicksToDeadline)
403{
404 if (RT_UNLIKELY(pVM->tm.s.fVirtualWarpDrive))
405 return ASMMultU64ByU32DivByU32(cVirtTicksToDeadline, 100, pVM->tm.s.u32VirtualWarpDrivePercentage);
406 return cVirtTicksToDeadline;
407}
408
409
410/**
411 * tmVirtualSyncGetLocked worker for handling catch-up when owning the lock.
412 *
413 * @returns The timestamp.
414 * @param pVM VM handle.
415 * @param u64 raw virtual time.
416 * @param off offVirtualSync.
417 * @param pcNsToDeadline Where to return the number of nano seconds to
418 * the next virtual sync timer deadline. Can be
419 * NULL.
420 */
421DECLINLINE(uint64_t) tmVirtualSyncGetHandleCatchUpLocked(PVM pVM, uint64_t u64, uint64_t off, uint64_t *pcNsToDeadline)
422{
423 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
424
425 /*
426 * Don't make updates until we've check the timer queue.
427 */
428 bool fUpdatePrev = true;
429 bool fUpdateOff = true;
430 bool fStop = false;
431 const uint64_t u64Prev = pVM->tm.s.u64VirtualSyncCatchUpPrev;
432 uint64_t u64Delta = u64 - u64Prev;
433 if (RT_LIKELY(!(u64Delta >> 32)))
434 {
435 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, pVM->tm.s.u32VirtualSyncCatchUpPercentage, 100);
436 if (off > u64Sub + pVM->tm.s.offVirtualSyncGivenUp)
437 {
438 off -= u64Sub;
439 Log4(("TM: %'RU64/-%'8RU64: sub %RU32 [vsghcul]\n", u64 - off, off - pVM->tm.s.offVirtualSyncGivenUp, u64Sub));
440 }
441 else
442 {
443 /* we've completely caught up. */
444 STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c);
445 off = pVM->tm.s.offVirtualSyncGivenUp;
446 fStop = true;
447 Log4(("TM: %'RU64/0: caught up [vsghcul]\n", u64));
448 }
449 }
450 else
451 {
452 /* More than 4 seconds since last time (or negative), ignore it. */
453 fUpdateOff = false;
454 fUpdatePrev = !(u64Delta & RT_BIT_64(63));
455 Log(("TMVirtualGetSync: u64Delta=%RX64\n", u64Delta));
456 }
457
458 /*
459 * Complete the calculation of the current TMCLOCK_VIRTUAL_SYNC time. The current
460 * approach is to never pass the head timer. So, when we do stop the clock and
461 * set the timer pending flag.
462 */
463 u64 -= off;
464
465 uint64_t u64Last = ASMAtomicUoReadU64(&pVM->tm.s.u64VirtualSync);
466 if (u64Last > u64)
467 {
468 u64 = u64Last + 1;
469 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetAdjLast);
470 }
471
472 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
473 if (u64 < u64Expire)
474 {
475 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64);
476 if (fUpdateOff)
477 ASMAtomicWriteU64(&pVM->tm.s.offVirtualSync, off);
478 if (fStop)
479 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncCatchUp, false);
480 if (fUpdatePrev)
481 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev, u64);
482 if (pcNsToDeadline)
483 {
484 uint64_t cNsToDeadline = u64Expire - u64;
485 if (pVM->tm.s.fVirtualSyncCatchUp)
486 cNsToDeadline = ASMMultU64ByU32DivByU32(cNsToDeadline, 100,
487 pVM->tm.s.u32VirtualSyncCatchUpPercentage + 100);
488 *pcNsToDeadline = tmVirtualVirtToNsDeadline(pVM, cNsToDeadline);
489 }
490 tmVirtualSyncUnlock(pVM);
491 }
492 else
493 {
494 u64 = u64Expire;
495 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64);
496 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, false);
497
498 VM_FF_SET(pVM, VM_FF_TM_VIRTUAL_SYNC);
499 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
500 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
501 Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
502 Log4(("TM: %'RU64/-%'8RU64: exp tmr=>ff [vsghcul]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
503 tmVirtualSyncUnlock(pVM);
504
505 if (pcNsToDeadline)
506 *pcNsToDeadline = 0;
507#ifdef IN_RING3
508 REMR3NotifyTimerPending(pVM, pVCpuDst);
509 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
510#endif
511 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
512 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetExpired);
513 }
514 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
515
516 Log6(("tmVirtualSyncGetHandleCatchUpLocked -> %'RU64\n", u64));
517 DBGFTRACE_U64_TAG(pVM, u64, "tmVirtualSyncGetHandleCatchUpLocked");
518 return u64;
519}
520
521
522/**
523 * tmVirtualSyncGetEx worker for when we get the lock.
524 *
525 * @returns timesamp.
526 * @param pVM The VM handle.
527 * @param u64 The virtual clock timestamp.
528 * @param pcNsToDeadline Where to return the number of nano seconds to
529 * the next virtual sync timer deadline. Can be
530 * NULL.
531 */
532DECLINLINE(uint64_t) tmVirtualSyncGetLocked(PVM pVM, uint64_t u64, uint64_t *pcNsToDeadline)
533{
534 /*
535 * Not ticking?
536 */
537 if (!pVM->tm.s.fVirtualSyncTicking)
538 {
539 u64 = ASMAtomicUoReadU64(&pVM->tm.s.u64VirtualSync);
540 tmVirtualSyncUnlock(pVM);
541 if (pcNsToDeadline)
542 *pcNsToDeadline = 0;
543 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
544 Log6(("tmVirtualSyncGetLocked -> %'RU64 [stopped]\n", u64));
545 DBGFTRACE_U64_TAG(pVM, u64, "tmVirtualSyncGetLocked-stopped");
546 return u64;
547 }
548
549 /*
550 * Handle catch up in a separate function.
551 */
552 uint64_t off = ASMAtomicUoReadU64(&pVM->tm.s.offVirtualSync);
553 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
554 return tmVirtualSyncGetHandleCatchUpLocked(pVM, u64, off, pcNsToDeadline);
555
556 /*
557 * Complete the calculation of the current TMCLOCK_VIRTUAL_SYNC time. The current
558 * approach is to never pass the head timer. So, when we do stop the clock and
559 * set the timer pending flag.
560 */
561 u64 -= off;
562
563 uint64_t u64Last = ASMAtomicUoReadU64(&pVM->tm.s.u64VirtualSync);
564 if (u64Last > u64)
565 {
566 u64 = u64Last + 1;
567 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetAdjLast);
568 }
569
570 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
571 if (u64 < u64Expire)
572 {
573 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64);
574 tmVirtualSyncUnlock(pVM);
575 if (pcNsToDeadline)
576 *pcNsToDeadline = tmVirtualVirtToNsDeadline(pVM, u64Expire - u64);
577 }
578 else
579 {
580 u64 = u64Expire;
581 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64);
582 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, false);
583
584 VM_FF_SET(pVM, VM_FF_TM_VIRTUAL_SYNC);
585 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
586 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
587 Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, !!VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
588 Log4(("TM: %'RU64/-%'8RU64: exp tmr=>ff [vsgl]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
589 tmVirtualSyncUnlock(pVM);
590
591#ifdef IN_RING3
592 REMR3NotifyTimerPending(pVM, pVCpuDst);
593 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
594#endif
595 if (pcNsToDeadline)
596 *pcNsToDeadline = 0;
597 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
598 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetExpired);
599 }
600 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
601 Log6(("tmVirtualSyncGetLocked -> %'RU64\n", u64));
602 DBGFTRACE_U64_TAG(pVM, u64, "tmVirtualSyncGetLocked");
603 return u64;
604}
605
606
607/**
608 * Gets the current TMCLOCK_VIRTUAL_SYNC time.
609 *
610 * @returns The timestamp.
611 * @param pVM VM handle.
612 * @param fCheckTimers Check timers or not
613 * @param pcNsToDeadline Where to return the number of nano seconds to
614 * the next virtual sync timer deadline. Can be
615 * NULL.
616 * @thread EMT.
617 */
618DECLINLINE(uint64_t) tmVirtualSyncGetEx(PVM pVM, bool fCheckTimers, uint64_t *pcNsToDeadline)
619{
620 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGet);
621
622 uint64_t u64;
623 if (!pVM->tm.s.fVirtualSyncTicking)
624 {
625 if (pcNsToDeadline)
626 *pcNsToDeadline = 0;
627 u64 = pVM->tm.s.u64VirtualSync;
628 DBGFTRACE_U64_TAG(pVM, u64, "tmVirtualSyncGetEx-stopped1");
629 return u64;
630 }
631
632 /*
633 * Query the virtual clock and do the usual expired timer check.
634 */
635 Assert(pVM->tm.s.cVirtualTicking);
636 u64 = tmVirtualGetRaw(pVM);
637 if (fCheckTimers)
638 {
639 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
640 if ( !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER)
641 && pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire <= u64)
642 {
643 Log5(("TMAllVirtual(%u): FF: 0 -> 1\n", __LINE__));
644 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
645#ifdef IN_RING3
646 REMR3NotifyTimerPending(pVM, pVCpuDst);
647 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM /** @todo |VMNOTIFYFF_FLAGS_POKE*/);
648#endif
649 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
650 }
651 }
652
653 /*
654 * If we can get the lock, get it. The result is much more reliable.
655 *
656 * Note! This is where all clock source devices branch off because they
657 * will be owning the lock already. The 'else' is taken by code
658 * which is less picky or hasn't been adjusted yet
659 */
660 if (tmVirtualSyncTryLock(pVM) == VINF_SUCCESS)
661 return tmVirtualSyncGetLocked(pVM, u64, pcNsToDeadline);
662
663 /*
664 * When the clock is ticking, not doing catch ups and not running into an
665 * expired time, we can get away without locking. Try this first.
666 */
667 uint64_t off;
668 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
669 {
670 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
671 {
672 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
673 if (RT_LIKELY( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
674 && !ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
675 && off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)))
676 {
677 off = u64 - off;
678 uint64_t const u64Expire = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
679 if (off < u64Expire)
680 {
681 if (pcNsToDeadline)
682 *pcNsToDeadline = tmVirtualVirtToNsDeadline(pVM, u64Expire - off);
683 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLockless);
684 Log6(("tmVirtualSyncGetEx -> %'RU64 [lockless]\n", off));
685 DBGFTRACE_U64_TAG(pVM, off, "tmVirtualSyncGetEx-lockless");
686 return off;
687 }
688 }
689 }
690 }
691 else
692 {
693 off = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSync);
694 if (RT_LIKELY(!ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking)))
695 {
696 if (pcNsToDeadline)
697 *pcNsToDeadline = 0;
698 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLockless);
699 Log6(("tmVirtualSyncGetEx -> %'RU64 [lockless/stopped]\n", off));
700 DBGFTRACE_U64_TAG(pVM, off, "tmVirtualSyncGetEx-stopped2");
701 return off;
702 }
703 }
704
705 /*
706 * Read the offset and adjust if we're playing catch-up.
707 *
708 * The catch-up adjusting work by us decrementing the offset by a percentage of
709 * the time elapsed since the previous TMVirtualGetSync call.
710 *
711 * It's possible to get a very long or even negative interval between two read
712 * for the following reasons:
713 * - Someone might have suspended the process execution, frequently the case when
714 * debugging the process.
715 * - We might be on a different CPU which TSC isn't quite in sync with the
716 * other CPUs in the system.
717 * - Another thread is racing us and we might have been preempted while inside
718 * this function.
719 *
720 * Assuming nano second virtual time, we can simply ignore any intervals which has
721 * any of the upper 32 bits set.
722 */
723 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
724 int cOuterTries = 42;
725 for (;; cOuterTries--)
726 {
727 /* Try grab the lock, things get simpler when owning the lock. */
728 int rcLock = tmVirtualSyncTryLock(pVM);
729 if (RT_SUCCESS_NP(rcLock))
730 return tmVirtualSyncGetLocked(pVM, u64, pcNsToDeadline);
731
732 /* Re-check the ticking flag. */
733 if (!ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
734 {
735 off = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSync);
736 if ( ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking)
737 && cOuterTries > 0)
738 continue;
739 if (pcNsToDeadline)
740 *pcNsToDeadline = 0;
741 Log6(("tmVirtualSyncGetEx -> %'RU64 [stopped]\n", off));
742 DBGFTRACE_U64_TAG(pVM, off, "tmVirtualSyncGetEx-stopped3");
743 return off;
744 }
745
746 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
747 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
748 {
749 /* No changes allowed, try get a consistent set of parameters. */
750 uint64_t const u64Prev = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev);
751 uint64_t const offGivenUp = ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp);
752 uint32_t const u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
753 if ( ( u64Prev == ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev)
754 && offGivenUp == ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp)
755 && u32Pct == ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage)
756 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
757 || cOuterTries <= 0)
758 {
759 uint64_t u64Delta = u64 - u64Prev;
760 if (RT_LIKELY(!(u64Delta >> 32)))
761 {
762 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, u32Pct, 100);
763 if (off > u64Sub + offGivenUp)
764 {
765 off -= u64Sub;
766 Log4(("TM: %'RU64/-%'8RU64: sub %RU32 [NoLock]\n", u64 - off, pVM->tm.s.offVirtualSync - offGivenUp, u64Sub));
767 }
768 else
769 {
770 /* we've completely caught up. */
771 STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c);
772 off = offGivenUp;
773 Log4(("TM: %'RU64/0: caught up [NoLock]\n", u64));
774 }
775 }
776 else
777 /* More than 4 seconds since last time (or negative), ignore it. */
778 Log(("TMVirtualGetSync: u64Delta=%RX64 (NoLock)\n", u64Delta));
779
780 /* Check that we're still running and in catch up. */
781 if ( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
782 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
783 break;
784 if (cOuterTries <= 0)
785 break; /* enough */
786 }
787 }
788 else if ( off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
789 && !ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
790 break; /* Got an consistent offset */
791 else if (cOuterTries <= 0)
792 break; /* enough */
793 }
794 if (cOuterTries <= 0)
795 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetELoop);
796
797 /*
798 * Complete the calculation of the current TMCLOCK_VIRTUAL_SYNC time. The current
799 * approach is to never pass the head timer. So, when we do stop the clock and
800 * set the timer pending flag.
801 */
802 u64 -= off;
803/** @todo u64VirtualSyncLast */
804 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
805 if (u64 >= u64Expire)
806 {
807 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
808 if (!VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
809 {
810 Log5(("TMAllVirtual(%u): FF: %d -> 1 (NoLock)\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
811 VM_FF_SET(pVM, VM_FF_TM_VIRTUAL_SYNC); /* Hmm? */
812 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
813#ifdef IN_RING3
814 REMR3NotifyTimerPending(pVM, pVCpuDst);
815 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
816#endif
817 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
818 Log4(("TM: %'RU64/-%'8RU64: exp tmr=>ff [NoLock]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
819 }
820 else
821 Log4(("TM: %'RU64/-%'8RU64: exp tmr [NoLock]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
822 if (pcNsToDeadline)
823 *pcNsToDeadline = 0;
824 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetExpired);
825 }
826 else if (pcNsToDeadline)
827 {
828 uint64_t cNsToDeadline = u64Expire - u64;
829 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
830 cNsToDeadline = ASMMultU64ByU32DivByU32(cNsToDeadline, 100,
831 ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage) + 100);
832 *pcNsToDeadline = tmVirtualVirtToNsDeadline(pVM, cNsToDeadline);
833 }
834
835 Log6(("tmVirtualSyncGetEx -> %'RU64\n", u64));
836 DBGFTRACE_U64_TAG(pVM, u64, "tmVirtualSyncGetEx-nolock");
837 return u64;
838}
839
840
841/**
842 * Gets the current TMCLOCK_VIRTUAL_SYNC time.
843 *
844 * @returns The timestamp.
845 * @param pVM VM handle.
846 * @thread EMT.
847 * @remarks May set the timer and virtual sync FFs.
848 */
849VMM_INT_DECL(uint64_t) TMVirtualSyncGet(PVM pVM)
850{
851 return tmVirtualSyncGetEx(pVM, true /*fCheckTimers*/, NULL /*pcNsToDeadline*/);
852}
853
854
855/**
856 * Gets the current TMCLOCK_VIRTUAL_SYNC time without checking timers running on
857 * TMCLOCK_VIRTUAL.
858 *
859 * @returns The timestamp.
860 * @param pVM VM handle.
861 * @thread EMT.
862 * @remarks May set the timer and virtual sync FFs.
863 */
864VMM_INT_DECL(uint64_t) TMVirtualSyncGetNoCheck(PVM pVM)
865{
866 return tmVirtualSyncGetEx(pVM, false /*fCheckTimers*/, NULL /*pcNsToDeadline*/);
867}
868
869
870/**
871 * Gets the current TMCLOCK_VIRTUAL_SYNC time.
872 *
873 * @returns The timestamp.
874 * @param pVM VM handle.
875 * @param fCheckTimers Check timers on the virtual clock or not.
876 * @thread EMT.
877 * @remarks May set the timer and virtual sync FFs.
878 */
879VMM_INT_DECL(uint64_t) TMVirtualSyncGetEx(PVM pVM, bool fCheckTimers)
880{
881 return tmVirtualSyncGetEx(pVM, fCheckTimers, NULL /*pcNsToDeadline*/);
882}
883
884
885/**
886 * Gets the current TMCLOCK_VIRTUAL_SYNC time and ticks to the next deadline
887 * without checking timers running on TMCLOCK_VIRTUAL.
888 *
889 * @returns The timestamp.
890 * @param pVM VM handle.
891 * @param pcNsToDeadline Where to return the number of nano seconds to
892 * the next virtual sync timer deadline.
893 * @thread EMT.
894 * @remarks May set the timer and virtual sync FFs.
895 */
896VMM_INT_DECL(uint64_t) TMVirtualSyncGetWithDeadlineNoCheck(PVM pVM, uint64_t *pcNsToDeadline)
897{
898 uint64_t cNsToDeadlineTmp; /* try convince the compiler to skip the if tests. */
899 uint64_t u64Now = tmVirtualSyncGetEx(pVM, false /*fCheckTimers*/, &cNsToDeadlineTmp);
900 *pcNsToDeadline = cNsToDeadlineTmp;
901 return u64Now;
902}
903
904
905/**
906 * Gets the number of nano seconds to the next virtual sync deadline.
907 *
908 * @returns The number of TMCLOCK_VIRTUAL ticks.
909 * @param pVM VM handle.
910 * @thread EMT.
911 * @remarks May set the timer and virtual sync FFs.
912 */
913VMM_INT_DECL(uint64_t) TMVirtualSyncGetNsToDeadline(PVM pVM)
914{
915 uint64_t cNsToDeadline;
916 tmVirtualSyncGetEx(pVM, false /*fCheckTimers*/, &cNsToDeadline);
917 return cNsToDeadline;
918}
919
920
921/**
922 * Gets the current lag of the synchronous virtual clock (relative to the virtual clock).
923 *
924 * @return The current lag.
925 * @param pVM VM handle.
926 */
927VMM_INT_DECL(uint64_t) TMVirtualSyncGetLag(PVM pVM)
928{
929 return pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp;
930}
931
932
933/**
934 * Get the current catch-up percent.
935 *
936 * @return The current catch0up percent. 0 means running at the same speed as the virtual clock.
937 * @param pVM VM handle.
938 */
939VMM_INT_DECL(uint32_t) TMVirtualSyncGetCatchUpPct(PVM pVM)
940{
941 if (pVM->tm.s.fVirtualSyncCatchUp)
942 return pVM->tm.s.u32VirtualSyncCatchUpPercentage;
943 return 0;
944}
945
946
947/**
948 * Gets the current TMCLOCK_VIRTUAL frequency.
949 *
950 * @returns The frequency.
951 * @param pVM VM handle.
952 */
953VMM_INT_DECL(uint64_t) TMVirtualGetFreq(PVM pVM)
954{
955 return TMCLOCK_FREQ_VIRTUAL;
956}
957
958
959/**
960 * Worker for TMR3PauseClocks.
961 *
962 * @returns VINF_SUCCESS or VERR_INTERNAL_ERROR (asserted).
963 * @param pVM The VM handle.
964 */
965int tmVirtualPauseLocked(PVM pVM)
966{
967 uint32_t c = ASMAtomicDecU32(&pVM->tm.s.cVirtualTicking);
968 AssertMsgReturn(c < pVM->cCpus, ("%u vs %u\n", c, pVM->cCpus), VERR_INTERNAL_ERROR);
969 if (c == 0)
970 {
971 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualPause);
972 pVM->tm.s.u64Virtual = tmVirtualGetRaw(pVM);
973 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, false);
974 }
975 return VINF_SUCCESS;
976}
977
978
979/**
980 * Worker for TMR3ResumeClocks.
981 *
982 * @returns VINF_SUCCESS or VERR_INTERNAL_ERROR (asserted).
983 * @param pVM The VM handle.
984 */
985int tmVirtualResumeLocked(PVM pVM)
986{
987 uint32_t c = ASMAtomicIncU32(&pVM->tm.s.cVirtualTicking);
988 AssertMsgReturn(c <= pVM->cCpus, ("%u vs %u\n", c, pVM->cCpus), VERR_INTERNAL_ERROR);
989 if (c == 1)
990 {
991 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualResume);
992 pVM->tm.s.u64VirtualRawPrev = 0;
993 pVM->tm.s.u64VirtualWarpDriveStart = tmVirtualGetRawNanoTS(pVM);
994 pVM->tm.s.u64VirtualOffset = pVM->tm.s.u64VirtualWarpDriveStart - pVM->tm.s.u64Virtual;
995 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, true);
996 }
997 return VINF_SUCCESS;
998}
999
1000
1001/**
1002 * Converts from virtual ticks to nanoseconds.
1003 *
1004 * @returns nanoseconds.
1005 * @param pVM The VM handle.
1006 * @param u64VirtualTicks The virtual ticks to convert.
1007 * @remark There could be rounding errors here. We just do a simple integer divide
1008 * without any adjustments.
1009 */
1010VMM_INT_DECL(uint64_t) TMVirtualToNano(PVM pVM, uint64_t u64VirtualTicks)
1011{
1012 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1013 return u64VirtualTicks;
1014}
1015
1016
1017/**
1018 * Converts from virtual ticks to microseconds.
1019 *
1020 * @returns microseconds.
1021 * @param pVM The VM handle.
1022 * @param u64VirtualTicks The virtual ticks to convert.
1023 * @remark There could be rounding errors here. We just do a simple integer divide
1024 * without any adjustments.
1025 */
1026VMM_INT_DECL(uint64_t) TMVirtualToMicro(PVM pVM, uint64_t u64VirtualTicks)
1027{
1028 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1029 return u64VirtualTicks / 1000;
1030}
1031
1032
1033/**
1034 * Converts from virtual ticks to milliseconds.
1035 *
1036 * @returns milliseconds.
1037 * @param pVM The VM handle.
1038 * @param u64VirtualTicks The virtual ticks to convert.
1039 * @remark There could be rounding errors here. We just do a simple integer divide
1040 * without any adjustments.
1041 */
1042VMM_INT_DECL(uint64_t) TMVirtualToMilli(PVM pVM, uint64_t u64VirtualTicks)
1043{
1044 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1045 return u64VirtualTicks / 1000000;
1046}
1047
1048
1049/**
1050 * Converts from nanoseconds to virtual ticks.
1051 *
1052 * @returns virtual ticks.
1053 * @param pVM The VM handle.
1054 * @param u64NanoTS The nanosecond value ticks to convert.
1055 * @remark There could be rounding and overflow errors here.
1056 */
1057VMM_INT_DECL(uint64_t) TMVirtualFromNano(PVM pVM, uint64_t u64NanoTS)
1058{
1059 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1060 return u64NanoTS;
1061}
1062
1063
1064/**
1065 * Converts from microseconds to virtual ticks.
1066 *
1067 * @returns virtual ticks.
1068 * @param pVM The VM handle.
1069 * @param u64MicroTS The microsecond value ticks to convert.
1070 * @remark There could be rounding and overflow errors here.
1071 */
1072VMM_INT_DECL(uint64_t) TMVirtualFromMicro(PVM pVM, uint64_t u64MicroTS)
1073{
1074 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1075 return u64MicroTS * 1000;
1076}
1077
1078
1079/**
1080 * Converts from milliseconds to virtual ticks.
1081 *
1082 * @returns virtual ticks.
1083 * @param pVM The VM handle.
1084 * @param u64MilliTS The millisecond value ticks to convert.
1085 * @remark There could be rounding and overflow errors here.
1086 */
1087VMM_INT_DECL(uint64_t) TMVirtualFromMilli(PVM pVM, uint64_t u64MilliTS)
1088{
1089 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1090 return u64MilliTS * 1000000;
1091}
1092
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette