VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/TMAllVirtual.cpp@ 85416

Last change on this file since 85416 was 85186, checked in by vboxsync, 4 years ago

VMM/TMAllVirtual.cpp: Missing DECLCALLBACK on tmVirtualNanoTSBadCpuIndex defininition. bugref:9794

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 37.0 KB
Line 
1/* $Id: TMAllVirtual.cpp 85186 2020-07-10 13:23:30Z vboxsync $ */
2/** @file
3 * TM - Timeout Manager, Virtual Time, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_TM
23#include <VBox/vmm/tm.h>
24#include <VBox/vmm/dbgftrace.h>
25#ifdef IN_RING3
26# include <iprt/thread.h>
27#endif
28#include "TMInternal.h"
29#include <VBox/vmm/vmcc.h>
30#include <VBox/vmm/vmm.h>
31#include <VBox/err.h>
32#include <VBox/log.h>
33#include <VBox/sup.h>
34
35#include <iprt/time.h>
36#include <iprt/assert.h>
37#include <iprt/asm.h>
38#include <iprt/asm-math.h>
39
40
41
42/**
43 * @interface_method_impl{RTTIMENANOTSDATA,pfnBad}
44 */
45DECLCALLBACK(DECLEXPORT(void)) tmVirtualNanoTSBad(PRTTIMENANOTSDATA pData, uint64_t u64NanoTS, uint64_t u64DeltaPrev,
46 uint64_t u64PrevNanoTS)
47{
48 PVM pVM = RT_FROM_MEMBER(pData, VM, CTX_SUFF(tm.s.VirtualGetRawData));
49 pData->cBadPrev++;
50 if ((int64_t)u64DeltaPrev < 0)
51 LogRel(("TM: u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64 pVM=%p\n",
52 u64DeltaPrev, u64PrevNanoTS, u64NanoTS, pVM));
53 else
54 Log(("TM: u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64 pVM=%p (debugging?)\n",
55 u64DeltaPrev, u64PrevNanoTS, u64NanoTS, pVM));
56}
57
58
59/**
60 * @interface_method_impl{RTTIMENANOTSDATA,pfnRediscover}
61 *
62 * This is the initial worker, so the first call in each context ends up here.
63 * It is also used should the delta rating of the host CPUs change or if the
64 * fGetGipCpu feature the current worker relies upon becomes unavailable. The
65 * last two events may occur as CPUs are taken online.
66 */
67DECLCALLBACK(DECLEXPORT(uint64_t)) tmVirtualNanoTSRediscover(PRTTIMENANOTSDATA pData)
68{
69 PVM pVM = RT_FROM_MEMBER(pData, VM, CTX_SUFF(tm.s.VirtualGetRawData));
70
71 /*
72 * We require a valid GIP for the selection below. Invalid GIP is fatal.
73 */
74 PSUPGLOBALINFOPAGE pGip = g_pSUPGlobalInfoPage;
75 AssertFatalMsg(RT_VALID_PTR(pGip), ("pVM=%p pGip=%p\n", pVM, pGip));
76 AssertFatalMsg(pGip->u32Magic == SUPGLOBALINFOPAGE_MAGIC, ("pVM=%p pGip=%p u32Magic=%#x\n", pVM, pGip, pGip->u32Magic));
77 AssertFatalMsg(pGip->u32Mode > SUPGIPMODE_INVALID && pGip->u32Mode < SUPGIPMODE_END,
78 ("pVM=%p pGip=%p u32Mode=%#x\n", pVM, pGip, pGip->u32Mode));
79
80 /*
81 * Determine the new worker.
82 */
83 PFNTIMENANOTSINTERNAL pfnWorker;
84 bool const fLFence = RT_BOOL(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SSE2);
85 switch (pGip->u32Mode)
86 {
87 case SUPGIPMODE_SYNC_TSC:
88 case SUPGIPMODE_INVARIANT_TSC:
89#ifdef IN_RING0
90 if (pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_ROUGHLY_ZERO)
91 pfnWorker = fLFence ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarNoDelta;
92 else
93 pfnWorker = fLFence ? RTTimeNanoTSLFenceSyncInvarWithDelta : RTTimeNanoTSLegacySyncInvarWithDelta;
94#else
95 if (pGip->fGetGipCpu & SUPGIPGETCPU_IDTR_LIMIT_MASK_MAX_SET_CPUS)
96 pfnWorker = pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_PRACTICALLY_ZERO
97 ? fLFence ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarNoDelta
98 : fLFence ? RTTimeNanoTSLFenceSyncInvarWithDeltaUseIdtrLim : RTTimeNanoTSLegacySyncInvarWithDeltaUseIdtrLim;
99 else if (pGip->fGetGipCpu & SUPGIPGETCPU_RDTSCP_MASK_MAX_SET_CPUS)
100 pfnWorker = pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_PRACTICALLY_ZERO
101 ? fLFence ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarNoDelta
102 : fLFence ? RTTimeNanoTSLFenceSyncInvarWithDeltaUseRdtscp : RTTimeNanoTSLegacySyncInvarWithDeltaUseRdtscp;
103 else if (pGip->fGetGipCpu & SUPGIPGETCPU_APIC_ID_EXT_0B)
104 pfnWorker = pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_ROUGHLY_ZERO
105 ? fLFence ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarNoDelta
106 : fLFence ? RTTimeNanoTSLFenceSyncInvarWithDeltaUseApicIdExt0B : RTTimeNanoTSLegacySyncInvarWithDeltaUseApicIdExt0B;
107 else if (pGip->fGetGipCpu & SUPGIPGETCPU_APIC_ID_EXT_8000001E)
108 pfnWorker = pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_ROUGHLY_ZERO
109 ? fLFence ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarNoDelta
110 : fLFence ? RTTimeNanoTSLFenceSyncInvarWithDeltaUseApicIdExt8000001E : RTTimeNanoTSLegacySyncInvarWithDeltaUseApicIdExt8000001E;
111 else
112 pfnWorker = pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_ROUGHLY_ZERO
113 ? fLFence ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarNoDelta
114 : fLFence ? RTTimeNanoTSLFenceSyncInvarWithDeltaUseApicId : RTTimeNanoTSLegacySyncInvarWithDeltaUseApicId;
115#endif
116 break;
117
118 case SUPGIPMODE_ASYNC_TSC:
119#ifdef IN_RING0
120 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsync : RTTimeNanoTSLegacyAsync;
121#else
122 if (pGip->fGetGipCpu & SUPGIPGETCPU_IDTR_LIMIT_MASK_MAX_SET_CPUS)
123 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsyncUseIdtrLim : RTTimeNanoTSLegacyAsyncUseIdtrLim;
124 else if (pGip->fGetGipCpu & SUPGIPGETCPU_RDTSCP_MASK_MAX_SET_CPUS)
125 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsyncUseRdtscp : RTTimeNanoTSLegacyAsyncUseRdtscp;
126 else if (pGip->fGetGipCpu & SUPGIPGETCPU_RDTSCP_GROUP_IN_CH_NUMBER_IN_CL)
127 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsyncUseRdtscpGroupChNumCl : RTTimeNanoTSLegacyAsyncUseRdtscpGroupChNumCl;
128 else if (pGip->fGetGipCpu & SUPGIPGETCPU_APIC_ID_EXT_0B)
129 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsyncUseApicIdExt0B : RTTimeNanoTSLegacyAsyncUseApicIdExt0B;
130 else if (pGip->fGetGipCpu & SUPGIPGETCPU_APIC_ID_EXT_8000001E)
131 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsyncUseApicIdExt8000001E : RTTimeNanoTSLegacyAsyncUseApicIdExt8000001E;
132 else
133 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsyncUseApicId : RTTimeNanoTSLegacyAsyncUseApicId;
134#endif
135 break;
136
137 default:
138 AssertFatalMsgFailed(("pVM=%p pGip=%p u32Mode=%#x\n", pVM, pGip, pGip->u32Mode));
139 }
140
141 /*
142 * Update the pfnVirtualGetRaw pointer and call the worker we selected.
143 */
144 ASMAtomicWritePtr((void * volatile *)&CTX_SUFF(pVM->tm.s.pfnVirtualGetRaw), (void *)(uintptr_t)pfnWorker);
145 return pfnWorker(pData);
146}
147
148
149/**
150 * @interface_method_impl{RTTIMENANOTSDATA,pfnBadCpuIndex}
151 */
152DECLCALLBACK(DECLEXPORT(uint64_t)) tmVirtualNanoTSBadCpuIndex(PRTTIMENANOTSDATA pData, uint16_t idApic, uint16_t iCpuSet,
153 uint16_t iGipCpu)
154{
155 PVM pVM = RT_FROM_MEMBER(pData, VM, CTX_SUFF(tm.s.VirtualGetRawData));
156 AssertFatalMsgFailed(("pVM=%p idApic=%#x iCpuSet=%#x iGipCpu=%#x\n", pVM, idApic, iCpuSet, iGipCpu));
157#ifndef _MSC_VER
158 return UINT64_MAX;
159#endif
160}
161
162
163/**
164 * Wrapper around the IPRT GIP time methods.
165 */
166DECLINLINE(uint64_t) tmVirtualGetRawNanoTS(PVMCC pVM)
167{
168# ifdef IN_RING3
169 uint64_t u64 = CTXALLSUFF(pVM->tm.s.pfnVirtualGetRaw)(&CTXALLSUFF(pVM->tm.s.VirtualGetRawData));
170# else /* !IN_RING3 */
171 uint32_t cPrevSteps = pVM->tm.s.CTX_SUFF(VirtualGetRawData).c1nsSteps;
172 uint64_t u64 = pVM->tm.s.CTX_SUFF(pfnVirtualGetRaw)(&pVM->tm.s.CTX_SUFF(VirtualGetRawData));
173 if (cPrevSteps != pVM->tm.s.CTX_SUFF(VirtualGetRawData).c1nsSteps)
174 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3);
175# endif /* !IN_RING3 */
176 /*DBGFTRACE_POS_U64(pVM, u64);*/
177 return u64;
178}
179
180
181/**
182 * Get the time when we're not running at 100%
183 *
184 * @returns The timestamp.
185 * @param pVM The cross context VM structure.
186 */
187static uint64_t tmVirtualGetRawNonNormal(PVMCC pVM)
188{
189 /*
190 * Recalculate the RTTimeNanoTS() value for the period where
191 * warp drive has been enabled.
192 */
193 uint64_t u64 = tmVirtualGetRawNanoTS(pVM);
194 u64 -= pVM->tm.s.u64VirtualWarpDriveStart;
195 u64 *= pVM->tm.s.u32VirtualWarpDrivePercentage;
196 u64 /= 100;
197 u64 += pVM->tm.s.u64VirtualWarpDriveStart;
198
199 /*
200 * Now we apply the virtual time offset.
201 * (Which is the negated tmVirtualGetRawNanoTS() value for when the virtual
202 * machine started if it had been running continuously without any suspends.)
203 */
204 u64 -= pVM->tm.s.u64VirtualOffset;
205 return u64;
206}
207
208
209/**
210 * Get the raw virtual time.
211 *
212 * @returns The current time stamp.
213 * @param pVM The cross context VM structure.
214 */
215DECLINLINE(uint64_t) tmVirtualGetRaw(PVMCC pVM)
216{
217 if (RT_LIKELY(!pVM->tm.s.fVirtualWarpDrive))
218 return tmVirtualGetRawNanoTS(pVM) - pVM->tm.s.u64VirtualOffset;
219 return tmVirtualGetRawNonNormal(pVM);
220}
221
222
223/**
224 * Inlined version of tmVirtualGetEx.
225 */
226DECLINLINE(uint64_t) tmVirtualGet(PVMCC pVM, bool fCheckTimers)
227{
228 uint64_t u64;
229 if (RT_LIKELY(pVM->tm.s.cVirtualTicking))
230 {
231 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGet);
232 u64 = tmVirtualGetRaw(pVM);
233
234 /*
235 * Use the chance to check for expired timers.
236 */
237 if (fCheckTimers)
238 {
239 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
240 if ( !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)
241 && !pVM->tm.s.fRunningQueues
242 && ( pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire <= u64
243 || ( pVM->tm.s.fVirtualSyncTicking
244 && pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire <= u64 - pVM->tm.s.offVirtualSync
245 )
246 )
247 && !pVM->tm.s.fRunningQueues
248 )
249 {
250 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSetFF);
251 Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
252 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
253#ifdef IN_RING3
254 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
255#endif
256 }
257 }
258 }
259 else
260 u64 = pVM->tm.s.u64Virtual;
261 return u64;
262}
263
264
265/**
266 * Gets the current TMCLOCK_VIRTUAL time
267 *
268 * @returns The timestamp.
269 * @param pVM The cross context VM structure.
270 *
271 * @remark While the flow of time will never go backwards, the speed of the
272 * progress varies due to inaccurate RTTimeNanoTS and TSC. The latter can be
273 * influenced by power saving (SpeedStep, PowerNow!), while the former
274 * makes use of TSC and kernel timers.
275 */
276VMM_INT_DECL(uint64_t) TMVirtualGet(PVMCC pVM)
277{
278 return tmVirtualGet(pVM, true /*fCheckTimers*/);
279}
280
281
282/**
283 * Gets the current TMCLOCK_VIRTUAL time without checking
284 * timers or anything.
285 *
286 * Meaning, this has no side effect on FFs like TMVirtualGet may have.
287 *
288 * @returns The timestamp.
289 * @param pVM The cross context VM structure.
290 *
291 * @remarks See TMVirtualGet.
292 */
293VMM_INT_DECL(uint64_t) TMVirtualGetNoCheck(PVMCC pVM)
294{
295 return tmVirtualGet(pVM, false /*fCheckTimers*/);
296}
297
298
299/**
300 * Converts the dead line interval from TMCLOCK_VIRTUAL to host nano seconds.
301 *
302 * @returns Host nano second count.
303 * @param pVM The cross context VM structure.
304 * @param cVirtTicksToDeadline The TMCLOCK_VIRTUAL interval.
305 */
306DECLINLINE(uint64_t) tmVirtualVirtToNsDeadline(PVM pVM, uint64_t cVirtTicksToDeadline)
307{
308 if (RT_UNLIKELY(pVM->tm.s.fVirtualWarpDrive))
309 return ASMMultU64ByU32DivByU32(cVirtTicksToDeadline, 100, pVM->tm.s.u32VirtualWarpDrivePercentage);
310 return cVirtTicksToDeadline;
311}
312
313
314/**
315 * tmVirtualSyncGetLocked worker for handling catch-up when owning the lock.
316 *
317 * @returns The timestamp.
318 * @param pVM The cross context VM structure.
319 * @param u64 raw virtual time.
320 * @param off offVirtualSync.
321 * @param pcNsToDeadline Where to return the number of nano seconds to
322 * the next virtual sync timer deadline. Can be
323 * NULL.
324 */
325DECLINLINE(uint64_t) tmVirtualSyncGetHandleCatchUpLocked(PVMCC pVM, uint64_t u64, uint64_t off, uint64_t *pcNsToDeadline)
326{
327 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
328
329 /*
330 * Don't make updates until we've check the timer queue.
331 */
332 bool fUpdatePrev = true;
333 bool fUpdateOff = true;
334 bool fStop = false;
335 const uint64_t u64Prev = pVM->tm.s.u64VirtualSyncCatchUpPrev;
336 uint64_t u64Delta = u64 - u64Prev;
337 if (RT_LIKELY(!(u64Delta >> 32)))
338 {
339 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, pVM->tm.s.u32VirtualSyncCatchUpPercentage, 100);
340 if (off > u64Sub + pVM->tm.s.offVirtualSyncGivenUp)
341 {
342 off -= u64Sub;
343 Log4(("TM: %'RU64/-%'8RU64: sub %RU32 [vsghcul]\n", u64 - off, off - pVM->tm.s.offVirtualSyncGivenUp, u64Sub));
344 }
345 else
346 {
347 /* we've completely caught up. */
348 STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c);
349 off = pVM->tm.s.offVirtualSyncGivenUp;
350 fStop = true;
351 Log4(("TM: %'RU64/0: caught up [vsghcul]\n", u64));
352 }
353 }
354 else
355 {
356 /* More than 4 seconds since last time (or negative), ignore it. */
357 fUpdateOff = false;
358 fUpdatePrev = !(u64Delta & RT_BIT_64(63));
359 Log(("TMVirtualGetSync: u64Delta=%RX64\n", u64Delta));
360 }
361
362 /*
363 * Complete the calculation of the current TMCLOCK_VIRTUAL_SYNC time. The current
364 * approach is to never pass the head timer. So, when we do stop the clock and
365 * set the timer pending flag.
366 */
367 u64 -= off;
368
369 uint64_t u64Last = ASMAtomicUoReadU64(&pVM->tm.s.u64VirtualSync);
370 if (u64Last > u64)
371 {
372 u64 = u64Last + 1;
373 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetAdjLast);
374 }
375
376 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
377 if (u64 < u64Expire)
378 {
379 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64);
380 if (fUpdateOff)
381 ASMAtomicWriteU64(&pVM->tm.s.offVirtualSync, off);
382 if (fStop)
383 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncCatchUp, false);
384 if (fUpdatePrev)
385 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev, u64);
386 if (pcNsToDeadline)
387 {
388 uint64_t cNsToDeadline = u64Expire - u64;
389 if (pVM->tm.s.fVirtualSyncCatchUp)
390 cNsToDeadline = ASMMultU64ByU32DivByU32(cNsToDeadline, 100,
391 pVM->tm.s.u32VirtualSyncCatchUpPercentage + 100);
392 *pcNsToDeadline = tmVirtualVirtToNsDeadline(pVM, cNsToDeadline);
393 }
394 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
395 }
396 else
397 {
398 u64 = u64Expire;
399 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64);
400 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, false);
401
402 VM_FF_SET(pVM, VM_FF_TM_VIRTUAL_SYNC);
403 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
404 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
405 Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
406 Log4(("TM: %'RU64/-%'8RU64: exp tmr=>ff [vsghcul]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
407 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
408
409 if (pcNsToDeadline)
410 *pcNsToDeadline = 0;
411#ifdef IN_RING3
412 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
413#endif
414 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
415 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetExpired);
416 }
417 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
418
419 Log6(("tmVirtualSyncGetHandleCatchUpLocked -> %'RU64\n", u64));
420 DBGFTRACE_U64_TAG(pVM, u64, "tmVirtualSyncGetHandleCatchUpLocked");
421 return u64;
422}
423
424
425/**
426 * tmVirtualSyncGetEx worker for when we get the lock.
427 *
428 * @returns timesamp.
429 * @param pVM The cross context VM structure.
430 * @param u64 The virtual clock timestamp.
431 * @param pcNsToDeadline Where to return the number of nano seconds to
432 * the next virtual sync timer deadline. Can be
433 * NULL.
434 */
435DECLINLINE(uint64_t) tmVirtualSyncGetLocked(PVMCC pVM, uint64_t u64, uint64_t *pcNsToDeadline)
436{
437 /*
438 * Not ticking?
439 */
440 if (!pVM->tm.s.fVirtualSyncTicking)
441 {
442 u64 = ASMAtomicUoReadU64(&pVM->tm.s.u64VirtualSync);
443 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
444 if (pcNsToDeadline)
445 *pcNsToDeadline = 0;
446 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
447 Log6(("tmVirtualSyncGetLocked -> %'RU64 [stopped]\n", u64));
448 DBGFTRACE_U64_TAG(pVM, u64, "tmVirtualSyncGetLocked-stopped");
449 return u64;
450 }
451
452 /*
453 * Handle catch up in a separate function.
454 */
455 uint64_t off = ASMAtomicUoReadU64(&pVM->tm.s.offVirtualSync);
456 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
457 return tmVirtualSyncGetHandleCatchUpLocked(pVM, u64, off, pcNsToDeadline);
458
459 /*
460 * Complete the calculation of the current TMCLOCK_VIRTUAL_SYNC time. The current
461 * approach is to never pass the head timer. So, when we do stop the clock and
462 * set the timer pending flag.
463 */
464 u64 -= off;
465
466 uint64_t u64Last = ASMAtomicUoReadU64(&pVM->tm.s.u64VirtualSync);
467 if (u64Last > u64)
468 {
469 u64 = u64Last + 1;
470 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetAdjLast);
471 }
472
473 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
474 if (u64 < u64Expire)
475 {
476 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64);
477 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
478 if (pcNsToDeadline)
479 *pcNsToDeadline = tmVirtualVirtToNsDeadline(pVM, u64Expire - u64);
480 }
481 else
482 {
483 u64 = u64Expire;
484 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64);
485 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, false);
486
487 VM_FF_SET(pVM, VM_FF_TM_VIRTUAL_SYNC);
488 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
489 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
490 Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
491 Log4(("TM: %'RU64/-%'8RU64: exp tmr=>ff [vsgl]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
492 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
493
494#ifdef IN_RING3
495 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
496#endif
497 if (pcNsToDeadline)
498 *pcNsToDeadline = 0;
499 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
500 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetExpired);
501 }
502 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
503 Log6(("tmVirtualSyncGetLocked -> %'RU64\n", u64));
504 DBGFTRACE_U64_TAG(pVM, u64, "tmVirtualSyncGetLocked");
505 return u64;
506}
507
508
509/**
510 * Gets the current TMCLOCK_VIRTUAL_SYNC time.
511 *
512 * @returns The timestamp.
513 * @param pVM The cross context VM structure.
514 * @param fCheckTimers Check timers or not
515 * @param pcNsToDeadline Where to return the number of nano seconds to
516 * the next virtual sync timer deadline. Can be
517 * NULL.
518 * @thread EMT.
519 */
520DECLINLINE(uint64_t) tmVirtualSyncGetEx(PVMCC pVM, bool fCheckTimers, uint64_t *pcNsToDeadline)
521{
522 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGet);
523
524 uint64_t u64;
525 if (!pVM->tm.s.fVirtualSyncTicking)
526 {
527 if (pcNsToDeadline)
528 *pcNsToDeadline = 0;
529 u64 = pVM->tm.s.u64VirtualSync;
530 DBGFTRACE_U64_TAG(pVM, u64, "tmVirtualSyncGetEx-stopped1");
531 return u64;
532 }
533
534 /*
535 * Query the virtual clock and do the usual expired timer check.
536 */
537 Assert(pVM->tm.s.cVirtualTicking);
538 u64 = tmVirtualGetRaw(pVM);
539 if (fCheckTimers)
540 {
541 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
542 if ( !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)
543 && pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire <= u64)
544 {
545 Log5(("TMAllVirtual(%u): FF: 0 -> 1\n", __LINE__));
546 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
547#ifdef IN_RING3
548 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM /** @todo |VMNOTIFYFF_FLAGS_POKE*/);
549#endif
550 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
551 }
552 }
553
554 /*
555 * If we can get the lock, get it. The result is much more reliable.
556 *
557 * Note! This is where all clock source devices branch off because they
558 * will be owning the lock already. The 'else' is taken by code
559 * which is less picky or hasn't been adjusted yet
560 */
561 if (PDMCritSectTryEnter(&pVM->tm.s.VirtualSyncLock) == VINF_SUCCESS)
562 return tmVirtualSyncGetLocked(pVM, u64, pcNsToDeadline);
563
564 /*
565 * When the clock is ticking, not doing catch ups and not running into an
566 * expired time, we can get away without locking. Try this first.
567 */
568 uint64_t off;
569 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
570 {
571 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
572 {
573 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
574 if (RT_LIKELY( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
575 && !ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
576 && off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)))
577 {
578 off = u64 - off;
579 uint64_t const u64Expire = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
580 if (off < u64Expire)
581 {
582 if (pcNsToDeadline)
583 *pcNsToDeadline = tmVirtualVirtToNsDeadline(pVM, u64Expire - off);
584 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLockless);
585 Log6(("tmVirtualSyncGetEx -> %'RU64 [lockless]\n", off));
586 DBGFTRACE_U64_TAG(pVM, off, "tmVirtualSyncGetEx-lockless");
587 return off;
588 }
589 }
590 }
591 }
592 else
593 {
594 off = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSync);
595 if (RT_LIKELY(!ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking)))
596 {
597 if (pcNsToDeadline)
598 *pcNsToDeadline = 0;
599 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLockless);
600 Log6(("tmVirtualSyncGetEx -> %'RU64 [lockless/stopped]\n", off));
601 DBGFTRACE_U64_TAG(pVM, off, "tmVirtualSyncGetEx-stopped2");
602 return off;
603 }
604 }
605
606 /*
607 * Read the offset and adjust if we're playing catch-up.
608 *
609 * The catch-up adjusting work by us decrementing the offset by a percentage of
610 * the time elapsed since the previous TMVirtualGetSync call.
611 *
612 * It's possible to get a very long or even negative interval between two read
613 * for the following reasons:
614 * - Someone might have suspended the process execution, frequently the case when
615 * debugging the process.
616 * - We might be on a different CPU which TSC isn't quite in sync with the
617 * other CPUs in the system.
618 * - Another thread is racing us and we might have been preempted while inside
619 * this function.
620 *
621 * Assuming nano second virtual time, we can simply ignore any intervals which has
622 * any of the upper 32 bits set.
623 */
624 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
625 int cOuterTries = 42;
626 for (;; cOuterTries--)
627 {
628 /* Try grab the lock, things get simpler when owning the lock. */
629 int rcLock = PDMCritSectTryEnter(&pVM->tm.s.VirtualSyncLock);
630 if (RT_SUCCESS_NP(rcLock))
631 return tmVirtualSyncGetLocked(pVM, u64, pcNsToDeadline);
632
633 /* Re-check the ticking flag. */
634 if (!ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
635 {
636 off = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSync);
637 if ( ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking)
638 && cOuterTries > 0)
639 continue;
640 if (pcNsToDeadline)
641 *pcNsToDeadline = 0;
642 Log6(("tmVirtualSyncGetEx -> %'RU64 [stopped]\n", off));
643 DBGFTRACE_U64_TAG(pVM, off, "tmVirtualSyncGetEx-stopped3");
644 return off;
645 }
646
647 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
648 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
649 {
650 /* No changes allowed, try get a consistent set of parameters. */
651 uint64_t const u64Prev = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev);
652 uint64_t const offGivenUp = ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp);
653 uint32_t const u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
654 if ( ( u64Prev == ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev)
655 && offGivenUp == ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp)
656 && u32Pct == ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage)
657 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
658 || cOuterTries <= 0)
659 {
660 uint64_t u64Delta = u64 - u64Prev;
661 if (RT_LIKELY(!(u64Delta >> 32)))
662 {
663 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, u32Pct, 100);
664 if (off > u64Sub + offGivenUp)
665 {
666 off -= u64Sub;
667 Log4(("TM: %'RU64/-%'8RU64: sub %RU32 [NoLock]\n", u64 - off, pVM->tm.s.offVirtualSync - offGivenUp, u64Sub));
668 }
669 else
670 {
671 /* we've completely caught up. */
672 STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c);
673 off = offGivenUp;
674 Log4(("TM: %'RU64/0: caught up [NoLock]\n", u64));
675 }
676 }
677 else
678 /* More than 4 seconds since last time (or negative), ignore it. */
679 Log(("TMVirtualGetSync: u64Delta=%RX64 (NoLock)\n", u64Delta));
680
681 /* Check that we're still running and in catch up. */
682 if ( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
683 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
684 break;
685 if (cOuterTries <= 0)
686 break; /* enough */
687 }
688 }
689 else if ( off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
690 && !ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
691 break; /* Got an consistent offset */
692 else if (cOuterTries <= 0)
693 break; /* enough */
694 }
695 if (cOuterTries <= 0)
696 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetELoop);
697
698 /*
699 * Complete the calculation of the current TMCLOCK_VIRTUAL_SYNC time. The current
700 * approach is to never pass the head timer. So, when we do stop the clock and
701 * set the timer pending flag.
702 */
703 u64 -= off;
704/** @todo u64VirtualSyncLast */
705 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
706 if (u64 >= u64Expire)
707 {
708 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
709 if (!VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
710 {
711 Log5(("TMAllVirtual(%u): FF: %d -> 1 (NoLock)\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
712 VM_FF_SET(pVM, VM_FF_TM_VIRTUAL_SYNC); /* Hmm? */
713 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
714#ifdef IN_RING3
715 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
716#endif
717 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
718 Log4(("TM: %'RU64/-%'8RU64: exp tmr=>ff [NoLock]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
719 }
720 else
721 Log4(("TM: %'RU64/-%'8RU64: exp tmr [NoLock]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
722 if (pcNsToDeadline)
723 *pcNsToDeadline = 0;
724 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetExpired);
725 }
726 else if (pcNsToDeadline)
727 {
728 uint64_t cNsToDeadline = u64Expire - u64;
729 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
730 cNsToDeadline = ASMMultU64ByU32DivByU32(cNsToDeadline, 100,
731 ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage) + 100);
732 *pcNsToDeadline = tmVirtualVirtToNsDeadline(pVM, cNsToDeadline);
733 }
734
735 Log6(("tmVirtualSyncGetEx -> %'RU64\n", u64));
736 DBGFTRACE_U64_TAG(pVM, u64, "tmVirtualSyncGetEx-nolock");
737 return u64;
738}
739
740
741/**
742 * Gets the current TMCLOCK_VIRTUAL_SYNC time.
743 *
744 * @returns The timestamp.
745 * @param pVM The cross context VM structure.
746 * @thread EMT.
747 * @remarks May set the timer and virtual sync FFs.
748 */
749VMM_INT_DECL(uint64_t) TMVirtualSyncGet(PVMCC pVM)
750{
751 return tmVirtualSyncGetEx(pVM, true /*fCheckTimers*/, NULL /*pcNsToDeadline*/);
752}
753
754
755/**
756 * Gets the current TMCLOCK_VIRTUAL_SYNC time without checking timers running on
757 * TMCLOCK_VIRTUAL.
758 *
759 * @returns The timestamp.
760 * @param pVM The cross context VM structure.
761 * @thread EMT.
762 * @remarks May set the timer and virtual sync FFs.
763 */
764VMM_INT_DECL(uint64_t) TMVirtualSyncGetNoCheck(PVMCC pVM)
765{
766 return tmVirtualSyncGetEx(pVM, false /*fCheckTimers*/, NULL /*pcNsToDeadline*/);
767}
768
769
770/**
771 * Gets the current TMCLOCK_VIRTUAL_SYNC time.
772 *
773 * @returns The timestamp.
774 * @param pVM The cross context VM structure.
775 * @param fCheckTimers Check timers on the virtual clock or not.
776 * @thread EMT.
777 * @remarks May set the timer and virtual sync FFs.
778 */
779VMM_INT_DECL(uint64_t) TMVirtualSyncGetEx(PVMCC pVM, bool fCheckTimers)
780{
781 return tmVirtualSyncGetEx(pVM, fCheckTimers, NULL /*pcNsToDeadline*/);
782}
783
784
785/**
786 * Gets the current TMCLOCK_VIRTUAL_SYNC time and ticks to the next deadline
787 * without checking timers running on TMCLOCK_VIRTUAL.
788 *
789 * @returns The timestamp.
790 * @param pVM The cross context VM structure.
791 * @param pcNsToDeadline Where to return the number of nano seconds to
792 * the next virtual sync timer deadline.
793 * @thread EMT.
794 * @remarks May set the timer and virtual sync FFs.
795 */
796VMM_INT_DECL(uint64_t) TMVirtualSyncGetWithDeadlineNoCheck(PVMCC pVM, uint64_t *pcNsToDeadline)
797{
798 uint64_t cNsToDeadlineTmp; /* try convince the compiler to skip the if tests. */
799 uint64_t u64Now = tmVirtualSyncGetEx(pVM, false /*fCheckTimers*/, &cNsToDeadlineTmp);
800 *pcNsToDeadline = cNsToDeadlineTmp;
801 return u64Now;
802}
803
804
805/**
806 * Gets the number of nano seconds to the next virtual sync deadline.
807 *
808 * @returns The number of TMCLOCK_VIRTUAL ticks.
809 * @param pVM The cross context VM structure.
810 * @thread EMT.
811 * @remarks May set the timer and virtual sync FFs.
812 */
813VMMDECL(uint64_t) TMVirtualSyncGetNsToDeadline(PVMCC pVM)
814{
815 uint64_t cNsToDeadline;
816 tmVirtualSyncGetEx(pVM, false /*fCheckTimers*/, &cNsToDeadline);
817 return cNsToDeadline;
818}
819
820
821/**
822 * Gets the current lag of the synchronous virtual clock (relative to the virtual clock).
823 *
824 * @return The current lag.
825 * @param pVM The cross context VM structure.
826 */
827VMM_INT_DECL(uint64_t) TMVirtualSyncGetLag(PVMCC pVM)
828{
829 return pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp;
830}
831
832
833/**
834 * Get the current catch-up percent.
835 *
836 * @return The current catch0up percent. 0 means running at the same speed as the virtual clock.
837 * @param pVM The cross context VM structure.
838 */
839VMM_INT_DECL(uint32_t) TMVirtualSyncGetCatchUpPct(PVMCC pVM)
840{
841 if (pVM->tm.s.fVirtualSyncCatchUp)
842 return pVM->tm.s.u32VirtualSyncCatchUpPercentage;
843 return 0;
844}
845
846
847/**
848 * Gets the current TMCLOCK_VIRTUAL frequency.
849 *
850 * @returns The frequency.
851 * @param pVM The cross context VM structure.
852 */
853VMM_INT_DECL(uint64_t) TMVirtualGetFreq(PVM pVM)
854{
855 NOREF(pVM);
856 return TMCLOCK_FREQ_VIRTUAL;
857}
858
859
860/**
861 * Worker for TMR3PauseClocks.
862 *
863 * @returns VINF_SUCCESS or VERR_TM_VIRTUAL_TICKING_IPE (asserted).
864 * @param pVM The cross context VM structure.
865 */
866int tmVirtualPauseLocked(PVMCC pVM)
867{
868 uint32_t c = ASMAtomicDecU32(&pVM->tm.s.cVirtualTicking);
869 AssertMsgReturn(c < pVM->cCpus, ("%u vs %u\n", c, pVM->cCpus), VERR_TM_VIRTUAL_TICKING_IPE);
870 if (c == 0)
871 {
872 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualPause);
873 pVM->tm.s.u64Virtual = tmVirtualGetRaw(pVM);
874 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, false);
875 }
876 return VINF_SUCCESS;
877}
878
879
880/**
881 * Worker for TMR3ResumeClocks.
882 *
883 * @returns VINF_SUCCESS or VERR_TM_VIRTUAL_TICKING_IPE (asserted).
884 * @param pVM The cross context VM structure.
885 */
886int tmVirtualResumeLocked(PVMCC pVM)
887{
888 uint32_t c = ASMAtomicIncU32(&pVM->tm.s.cVirtualTicking);
889 AssertMsgReturn(c <= pVM->cCpus, ("%u vs %u\n", c, pVM->cCpus), VERR_TM_VIRTUAL_TICKING_IPE);
890 if (c == 1)
891 {
892 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualResume);
893 pVM->tm.s.u64VirtualRawPrev = 0;
894 pVM->tm.s.u64VirtualWarpDriveStart = tmVirtualGetRawNanoTS(pVM);
895 pVM->tm.s.u64VirtualOffset = pVM->tm.s.u64VirtualWarpDriveStart - pVM->tm.s.u64Virtual;
896 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, true);
897 }
898 return VINF_SUCCESS;
899}
900
901
902/**
903 * Converts from virtual ticks to nanoseconds.
904 *
905 * @returns nanoseconds.
906 * @param pVM The cross context VM structure.
907 * @param u64VirtualTicks The virtual ticks to convert.
908 * @remark There could be rounding errors here. We just do a simple integer divide
909 * without any adjustments.
910 */
911VMM_INT_DECL(uint64_t) TMVirtualToNano(PVM pVM, uint64_t u64VirtualTicks)
912{
913 NOREF(pVM);
914 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
915 return u64VirtualTicks;
916}
917
918
919/**
920 * Converts from virtual ticks to microseconds.
921 *
922 * @returns microseconds.
923 * @param pVM The cross context VM structure.
924 * @param u64VirtualTicks The virtual ticks to convert.
925 * @remark There could be rounding errors here. We just do a simple integer divide
926 * without any adjustments.
927 */
928VMM_INT_DECL(uint64_t) TMVirtualToMicro(PVM pVM, uint64_t u64VirtualTicks)
929{
930 NOREF(pVM);
931 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
932 return u64VirtualTicks / 1000;
933}
934
935
936/**
937 * Converts from virtual ticks to milliseconds.
938 *
939 * @returns milliseconds.
940 * @param pVM The cross context VM structure.
941 * @param u64VirtualTicks The virtual ticks to convert.
942 * @remark There could be rounding errors here. We just do a simple integer divide
943 * without any adjustments.
944 */
945VMM_INT_DECL(uint64_t) TMVirtualToMilli(PVM pVM, uint64_t u64VirtualTicks)
946{
947 NOREF(pVM);
948 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
949 return u64VirtualTicks / 1000000;
950}
951
952
953/**
954 * Converts from nanoseconds to virtual ticks.
955 *
956 * @returns virtual ticks.
957 * @param pVM The cross context VM structure.
958 * @param u64NanoTS The nanosecond value ticks to convert.
959 * @remark There could be rounding and overflow errors here.
960 */
961VMM_INT_DECL(uint64_t) TMVirtualFromNano(PVM pVM, uint64_t u64NanoTS)
962{
963 NOREF(pVM);
964 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
965 return u64NanoTS;
966}
967
968
969/**
970 * Converts from microseconds to virtual ticks.
971 *
972 * @returns virtual ticks.
973 * @param pVM The cross context VM structure.
974 * @param u64MicroTS The microsecond value ticks to convert.
975 * @remark There could be rounding and overflow errors here.
976 */
977VMM_INT_DECL(uint64_t) TMVirtualFromMicro(PVM pVM, uint64_t u64MicroTS)
978{
979 NOREF(pVM);
980 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
981 return u64MicroTS * 1000;
982}
983
984
985/**
986 * Converts from milliseconds to virtual ticks.
987 *
988 * @returns virtual ticks.
989 * @param pVM The cross context VM structure.
990 * @param u64MilliTS The millisecond value ticks to convert.
991 * @remark There could be rounding and overflow errors here.
992 */
993VMM_INT_DECL(uint64_t) TMVirtualFromMilli(PVM pVM, uint64_t u64MilliTS)
994{
995 NOREF(pVM);
996 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
997 return u64MilliTS * 1000000;
998}
999
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette