VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/TMAllVirtual.cpp@ 107608

Last change on this file since 107608 was 107203, checked in by vboxsync, 2 months ago

VMM/TM: darwin.arm64 build fix. jiraref:VBP-1466

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 44.9 KB
Line 
1/* $Id: TMAllVirtual.cpp 107203 2024-11-30 01:59:26Z vboxsync $ */
2/** @file
3 * TM - Timeout Manager, Virtual Time, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_TM
33#include <VBox/vmm/tm.h>
34#include <VBox/vmm/dbgftrace.h>
35#ifdef IN_RING3
36# include <iprt/thread.h>
37#endif
38#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
39# include <iprt/x86.h>
40#endif
41#include "TMInternal.h"
42#include <VBox/vmm/vmcc.h>
43#include <VBox/vmm/vmm.h>
44#include <VBox/err.h>
45#include <VBox/log.h>
46#include <VBox/sup.h>
47
48#include <iprt/time.h>
49#include <iprt/assert.h>
50#include <iprt/asm.h>
51#include <iprt/asm-math.h>
52
53
54
55/**
56 * @interface_method_impl{RTTIMENANOTSDATA,pfnBad}
57 */
58DECLCALLBACK(DECLEXPORT(void)) tmVirtualNanoTSBad(PRTTIMENANOTSDATA pData, uint64_t u64NanoTS, uint64_t u64DeltaPrev,
59 uint64_t u64PrevNanoTS)
60{
61 PVMCC pVM = RT_FROM_CPP_MEMBER(pData, VMCC, VMCC_CTX(tm).s.VirtualGetRawData);
62 pData->cBadPrev++;
63 if ((int64_t)u64DeltaPrev < 0)
64 LogRel(("TM: u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64 pVM=%p\n",
65 u64DeltaPrev, u64PrevNanoTS, u64NanoTS, pVM));
66 else
67 Log(("TM: u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64 pVM=%p (debugging?)\n",
68 u64DeltaPrev, u64PrevNanoTS, u64NanoTS, pVM));
69}
70
71
72#ifdef IN_RING3
73/**
74 * @callback_method_impl{FNTIMENANOTSINTERNAL, For driverless mode.}
75 */
76static DECLCALLBACK(uint64_t) tmR3VirtualNanoTSDriverless(PRTTIMENANOTSDATA pData, PRTITMENANOTSEXTRA pExtra)
77{
78 RT_NOREF(pData);
79 if (pExtra)
80 pExtra->uTSCValue = ASMReadTSC();
81 return RTTimeSystemNanoTS();
82}
83#endif
84
85
86/**
87 * @interface_method_impl{RTTIMENANOTSDATA,pfnRediscover}
88 *
89 * This is the initial worker, so the first call in each context ends up here.
90 * It is also used should the delta rating of the host CPUs change or if the
91 * fGetGipCpu feature the current worker relies upon becomes unavailable. The
92 * last two events may occur as CPUs are taken online.
93 */
94DECLCALLBACK(DECLEXPORT(uint64_t)) tmVirtualNanoTSRediscover(PRTTIMENANOTSDATA pData, PRTITMENANOTSEXTRA pExtra)
95{
96 PVMCC pVM = RT_FROM_CPP_MEMBER(pData, VMCC, VMCC_CTX(tm).s.VirtualGetRawData);
97 PFNTIMENANOTSINTERNAL pfnWorker;
98
99 /*
100 * We require a valid GIP for the selection below.
101 * Invalid GIP is fatal, though we have to allow no GIP in driverless mode (ring-3 only).
102 */
103#ifdef VBOX_WITH_R0_MODULES
104 PSUPGLOBALINFOPAGE pGip = g_pSUPGlobalInfoPage;
105# ifdef IN_RING3
106 if (pGip)
107# endif
108 {
109 AssertFatalMsg(RT_VALID_PTR(pGip), ("pVM=%p pGip=%p\n", pVM, pGip));
110 AssertFatalMsg(pGip->u32Magic == SUPGLOBALINFOPAGE_MAGIC, ("pVM=%p pGip=%p u32Magic=%#x\n", pVM, pGip, pGip->u32Magic));
111 AssertFatalMsg(pGip->u32Mode > SUPGIPMODE_INVALID && pGip->u32Mode < SUPGIPMODE_END,
112 ("pVM=%p pGip=%p u32Mode=%#x\n", pVM, pGip, pGip->u32Mode));
113
114 /*
115 * Determine the new worker.
116 */
117# if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
118 bool const fLFence = RT_BOOL(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SSE2);
119# endif
120 switch (pGip->u32Mode)
121 {
122 case SUPGIPMODE_INVARIANT_TSC:
123 case SUPGIPMODE_SYNC_TSC:
124# if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
125# ifdef IN_RING0
126 if (pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_ROUGHLY_ZERO)
127 pfnWorker = fLFence ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarNoDelta;
128 else
129 pfnWorker = fLFence ? RTTimeNanoTSLFenceSyncInvarWithDelta : RTTimeNanoTSLegacySyncInvarWithDelta;
130# else
131 if (pGip->fGetGipCpu & SUPGIPGETCPU_IDTR_LIMIT_MASK_MAX_SET_CPUS)
132 pfnWorker = pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_PRACTICALLY_ZERO
133 ? fLFence ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarNoDelta
134 : fLFence ? RTTimeNanoTSLFenceSyncInvarWithDeltaUseIdtrLim : RTTimeNanoTSLegacySyncInvarWithDeltaUseIdtrLim;
135 else if (pGip->fGetGipCpu & SUPGIPGETCPU_RDTSCP_MASK_MAX_SET_CPUS)
136 pfnWorker = pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_PRACTICALLY_ZERO
137 ? fLFence ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarNoDelta
138 : fLFence ? RTTimeNanoTSLFenceSyncInvarWithDeltaUseRdtscp : RTTimeNanoTSLegacySyncInvarWithDeltaUseRdtscp;
139 else if (pGip->fGetGipCpu & SUPGIPGETCPU_APIC_ID_EXT_0B)
140 pfnWorker = pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_ROUGHLY_ZERO
141 ? fLFence ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarNoDelta
142 : fLFence ? RTTimeNanoTSLFenceSyncInvarWithDeltaUseApicIdExt0B : RTTimeNanoTSLegacySyncInvarWithDeltaUseApicIdExt0B;
143 else if (pGip->fGetGipCpu & SUPGIPGETCPU_APIC_ID_EXT_8000001E)
144 pfnWorker = pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_ROUGHLY_ZERO
145 ? fLFence ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarNoDelta
146 : fLFence ? RTTimeNanoTSLFenceSyncInvarWithDeltaUseApicIdExt8000001E : RTTimeNanoTSLegacySyncInvarWithDeltaUseApicIdExt8000001E;
147 else
148 pfnWorker = pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_ROUGHLY_ZERO
149 ? fLFence ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarNoDelta
150 : fLFence ? RTTimeNanoTSLFenceSyncInvarWithDeltaUseApicId : RTTimeNanoTSLegacySyncInvarWithDeltaUseApicId;
151# endif
152# else /* !AMD64 && !X86 */
153 if (pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_ROUGHLY_ZERO)
154 pfnWorker = RTTimeNanoTSSyncInvarNoDelta;
155 else
156 {
157# ifdef IN_RING0
158 pfnWorker = RTTimeNanoTSSyncInvarWithDelta;
159# elif defined(RT_ARCH_ARM64)
160 AssertFatal(pGip->fGetGipCpu & SUPGIPGETCPU_TPIDRRO_EL0);
161 pfnWorker = RTTimeNanoTSSyncInvarWithDeltaUseTpIdRRo;
162# else
163# error "port me"
164# endif
165 }
166# endif /* !AMD64 && !X86 */
167 break;
168
169 case SUPGIPMODE_ASYNC_TSC:
170# if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
171# ifdef IN_RING0
172 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsync : RTTimeNanoTSLegacyAsync;
173# else
174 if (pGip->fGetGipCpu & SUPGIPGETCPU_IDTR_LIMIT_MASK_MAX_SET_CPUS)
175 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsyncUseIdtrLim : RTTimeNanoTSLegacyAsyncUseIdtrLim;
176 else if (pGip->fGetGipCpu & SUPGIPGETCPU_RDTSCP_MASK_MAX_SET_CPUS)
177 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsyncUseRdtscp : RTTimeNanoTSLegacyAsyncUseRdtscp;
178 else if (pGip->fGetGipCpu & SUPGIPGETCPU_RDTSCP_GROUP_IN_CH_NUMBER_IN_CL)
179 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsyncUseRdtscpGroupChNumCl : RTTimeNanoTSLegacyAsyncUseRdtscpGroupChNumCl;
180 else if (pGip->fGetGipCpu & SUPGIPGETCPU_APIC_ID_EXT_0B)
181 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsyncUseApicIdExt0B : RTTimeNanoTSLegacyAsyncUseApicIdExt0B;
182 else if (pGip->fGetGipCpu & SUPGIPGETCPU_APIC_ID_EXT_8000001E)
183 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsyncUseApicIdExt8000001E : RTTimeNanoTSLegacyAsyncUseApicIdExt8000001E;
184 else
185 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsyncUseApicId : RTTimeNanoTSLegacyAsyncUseApicId;
186# endif
187# else /* !AMD64 && !X86 */
188# ifdef IN_RING0
189 pfnWorker = RTTimeNanoTSASync;
190# elif defined(RT_ARCH_ARM64)
191 AssertFatal(pGip->fGetGipCpu & SUPGIPGETCPU_TPIDRRO_EL0);
192 pfnWorker = RTTimeNanoTSSyncInvarWithDeltaUseTpIdRRo;
193# else
194# error "port me"
195# endif
196# endif /* !AMD64 && !X86 */
197 break;
198
199 default:
200 AssertFatalMsgFailed(("pVM=%p pGip=%p u32Mode=%#x\n", pVM, pGip, pGip->u32Mode));
201 }
202 }
203# ifdef IN_RING3
204 else
205# endif
206#endif /* VBOX_WITH_R0_MODULES */
207#ifdef IN_RING3
208 pfnWorker = tmR3VirtualNanoTSDriverless;
209#endif
210
211 /*
212 * Update the pfnVirtualGetRaw pointer and call the worker we selected.
213 */
214 ASMAtomicWritePtr((void * volatile *)&pVM->VMCC_CTX(tm).s.pfnVirtualGetRaw, (void *)(uintptr_t)pfnWorker);
215 return pfnWorker(pData, pExtra);
216}
217
218
219/**
220 * @interface_method_impl{RTTIMENANOTSDATA,pfnBadCpuIndex}
221 */
222DECLCALLBACK(DECLEXPORT(uint64_t)) tmVirtualNanoTSBadCpuIndex(PRTTIMENANOTSDATA pData, PRTITMENANOTSEXTRA pExtra,
223 uint16_t idApic, uint16_t iCpuSet, uint16_t iGipCpu)
224{
225 PVMCC pVM = RT_FROM_CPP_MEMBER(pData, VMCC, VMCC_CTX(tm).s.VirtualGetRawData);
226 AssertFatalMsgFailed(("pVM=%p idApic=%#x iCpuSet=%#x iGipCpu=%#x pExtra=%p\n", pVM, idApic, iCpuSet, iGipCpu, pExtra));
227#ifndef _MSC_VER
228 return UINT64_MAX;
229#endif
230}
231
232
233/**
234 * Wrapper around the IPRT GIP time methods.
235 */
236DECLINLINE(uint64_t) tmVirtualGetRawNanoTS(PVMCC pVM)
237{
238#ifdef IN_RING3
239 uint64_t u64 = pVM->tm.s.pfnVirtualGetRaw(&pVM->tm.s.VirtualGetRawData, NULL /*pExtra*/);
240#elif defined(IN_RING0)
241 uint32_t cPrevSteps = pVM->tmr0.s.VirtualGetRawData.c1nsSteps;
242 uint64_t u64 = pVM->tmr0.s.pfnVirtualGetRaw(&pVM->tmr0.s.VirtualGetRawData, NULL /*pExtra*/);
243 if (cPrevSteps != pVM->tmr0.s.VirtualGetRawData.c1nsSteps)
244 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3);
245#else
246# error "unsupported context"
247#endif
248 /*DBGFTRACE_POS_U64(pVM, u64);*/
249 return u64;
250}
251
252
253/**
254 * Wrapper around the IPRT GIP time methods, extended version.
255 */
256DECLINLINE(uint64_t) tmVirtualGetRawNanoTSEx(PVMCC pVM, uint64_t *puTscNow)
257{
258 RTITMENANOTSEXTRA Extra;
259#ifdef IN_RING3
260 uint64_t u64 = pVM->tm.s.pfnVirtualGetRaw(&pVM->tm.s.VirtualGetRawData, &Extra);
261#elif defined(IN_RING0)
262 uint32_t cPrevSteps = pVM->tmr0.s.VirtualGetRawData.c1nsSteps;
263 uint64_t u64 = pVM->tmr0.s.pfnVirtualGetRaw(&pVM->tmr0.s.VirtualGetRawData, &Extra);
264 if (cPrevSteps != pVM->tmr0.s.VirtualGetRawData.c1nsSteps)
265 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3);
266#else
267# error "unsupported context"
268#endif
269 if (puTscNow)
270 *puTscNow = Extra.uTSCValue;
271 /*DBGFTRACE_POS_U64(pVM, u64);*/
272 return u64;
273}
274
275
276/**
277 * Get the time when we're not running at 100%
278 *
279 * @returns The timestamp.
280 * @param pVM The cross context VM structure.
281 * @param puTscNow Where to return the TSC corresponding to the returned
282 * timestamp (delta adjusted). Optional.
283 */
284static uint64_t tmVirtualGetRawNonNormal(PVMCC pVM, uint64_t *puTscNow)
285{
286 /*
287 * Recalculate the RTTimeNanoTS() value for the period where
288 * warp drive has been enabled.
289 */
290 uint64_t u64 = tmVirtualGetRawNanoTSEx(pVM, puTscNow);
291 u64 -= pVM->tm.s.u64VirtualWarpDriveStart;
292 u64 *= pVM->tm.s.u32VirtualWarpDrivePercentage;
293 u64 /= 100;
294 u64 += pVM->tm.s.u64VirtualWarpDriveStart;
295
296 /*
297 * Now we apply the virtual time offset.
298 * (Which is the negated tmVirtualGetRawNanoTS() value for when the virtual
299 * machine started if it had been running continuously without any suspends.)
300 */
301 u64 -= pVM->tm.s.u64VirtualOffset;
302 return u64;
303}
304
305
306/**
307 * Get the raw virtual time.
308 *
309 * @returns The current time stamp.
310 * @param pVM The cross context VM structure.
311 */
312DECLINLINE(uint64_t) tmVirtualGetRaw(PVMCC pVM)
313{
314 if (RT_LIKELY(!pVM->tm.s.fVirtualWarpDrive))
315 return tmVirtualGetRawNanoTS(pVM) - pVM->tm.s.u64VirtualOffset;
316 return tmVirtualGetRawNonNormal(pVM, NULL /*puTscNow*/);
317}
318
319
320/**
321 * Get the raw virtual time, extended version.
322 *
323 * @returns The current time stamp.
324 * @param pVM The cross context VM structure.
325 * @param puTscNow Where to return the TSC corresponding to the returned
326 * timestamp (delta adjusted). Optional.
327 */
328DECLINLINE(uint64_t) tmVirtualGetRawEx(PVMCC pVM, uint64_t *puTscNow)
329{
330 if (RT_LIKELY(!pVM->tm.s.fVirtualWarpDrive))
331 return tmVirtualGetRawNanoTSEx(pVM, puTscNow) - pVM->tm.s.u64VirtualOffset;
332 return tmVirtualGetRawNonNormal(pVM, puTscNow);
333}
334
335
336/**
337 * Inlined version of tmVirtualGetEx.
338 */
339DECLINLINE(uint64_t) tmVirtualGet(PVMCC pVM, bool fCheckTimers)
340{
341 uint64_t u64;
342 if (RT_LIKELY(pVM->tm.s.cVirtualTicking))
343 {
344 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGet);
345 u64 = tmVirtualGetRaw(pVM);
346
347 /*
348 * Use the chance to check for expired timers.
349 */
350 if (fCheckTimers)
351 {
352 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
353 if ( !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)
354 && !pVM->tm.s.fRunningQueues
355 && ( pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL].u64Expire <= u64
356 || ( pVM->tm.s.fVirtualSyncTicking
357 && pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire <= u64 - pVM->tm.s.offVirtualSync
358 )
359 )
360 && !pVM->tm.s.fRunningQueues
361 )
362 {
363 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSetFF);
364 Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
365 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
366#ifdef IN_RING3
367 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
368#endif
369 }
370 }
371 }
372 else
373 u64 = pVM->tm.s.u64Virtual;
374 return u64;
375}
376
377
378/**
379 * Gets the current TMCLOCK_VIRTUAL time
380 *
381 * @returns The timestamp.
382 * @param pVM The cross context VM structure.
383 *
384 * @remark While the flow of time will never go backwards, the speed of the
385 * progress varies due to inaccurate RTTimeNanoTS and TSC. The latter can be
386 * influenced by power saving (SpeedStep, PowerNow!), while the former
387 * makes use of TSC and kernel timers.
388 */
389VMM_INT_DECL(uint64_t) TMVirtualGet(PVMCC pVM)
390{
391 return tmVirtualGet(pVM, true /*fCheckTimers*/);
392}
393
394
395/**
396 * Gets the current TMCLOCK_VIRTUAL time without checking
397 * timers or anything.
398 *
399 * Meaning, this has no side effect on FFs like TMVirtualGet may have.
400 *
401 * @returns The timestamp.
402 * @param pVM The cross context VM structure.
403 *
404 * @remarks See TMVirtualGet.
405 */
406VMM_INT_DECL(uint64_t) TMVirtualGetNoCheck(PVMCC pVM)
407{
408 return tmVirtualGet(pVM, false /*fCheckTimers*/);
409}
410
411
412/**
413 * Converts the dead line interval from TMCLOCK_VIRTUAL to host nano seconds.
414 *
415 * @returns Host nano second count.
416 * @param pVM The cross context VM structure.
417 * @param cVirtTicksToDeadline The TMCLOCK_VIRTUAL interval.
418 */
419DECLINLINE(uint64_t) tmVirtualVirtToNsDeadline(PVM pVM, uint64_t cVirtTicksToDeadline)
420{
421 if (RT_UNLIKELY(pVM->tm.s.fVirtualWarpDrive))
422 return ASMMultU64ByU32DivByU32(cVirtTicksToDeadline, 100, pVM->tm.s.u32VirtualWarpDrivePercentage);
423 return cVirtTicksToDeadline;
424}
425
426
427/**
428 * tmVirtualSyncGetLocked worker for handling catch-up when owning the lock.
429 *
430 * @returns The timestamp.
431 * @param pVM The cross context VM structure.
432 * @param u64 raw virtual time.
433 * @param off offVirtualSync.
434 * @param pcNsToDeadline Where to return the number of nano seconds to
435 * the next virtual sync timer deadline. Can be
436 * NULL.
437 * @param pnsAbsDeadline Where to return the absolute deadline.
438 * Optional.
439 */
440DECLINLINE(uint64_t) tmVirtualSyncGetHandleCatchUpLocked(PVMCC pVM, uint64_t u64, uint64_t off,
441 uint64_t *pcNsToDeadline, uint64_t *pnsAbsDeadline)
442{
443 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
444
445 /*
446 * Don't make updates until we've check the timer queue.
447 */
448 bool fUpdatePrev = true;
449 bool fUpdateOff = true;
450 bool fStop = false;
451 const uint64_t u64Prev = pVM->tm.s.u64VirtualSyncCatchUpPrev;
452 uint64_t u64Delta = u64 - u64Prev;
453 if (RT_LIKELY(!(u64Delta >> 32)))
454 {
455 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, pVM->tm.s.u32VirtualSyncCatchUpPercentage, 100);
456 if (off > u64Sub + pVM->tm.s.offVirtualSyncGivenUp)
457 {
458 off -= u64Sub;
459 Log4(("TM: %'RU64/-%'8RU64: sub %RU32 [vsghcul]\n", u64 - off, off - pVM->tm.s.offVirtualSyncGivenUp, u64Sub));
460 }
461 else
462 {
463 /* we've completely caught up. */
464 STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c);
465 off = pVM->tm.s.offVirtualSyncGivenUp;
466 fStop = true;
467 Log4(("TM: %'RU64/0: caught up [vsghcul]\n", u64));
468 }
469 }
470 else
471 {
472 /* More than 4 seconds since last time (or negative), ignore it. */
473 fUpdateOff = false;
474 fUpdatePrev = !(u64Delta & RT_BIT_64(63));
475 Log(("TMVirtualGetSync: u64Delta=%RX64\n", u64Delta));
476 }
477
478 /*
479 * Complete the calculation of the current TMCLOCK_VIRTUAL_SYNC time. The current
480 * approach is to never pass the head timer. So, when we do stop the clock and
481 * set the timer pending flag.
482 */
483 u64 -= off;
484
485 uint64_t u64Last = ASMAtomicUoReadU64(&pVM->tm.s.u64VirtualSync);
486 if (u64Last > u64)
487 {
488 u64 = u64Last + 1;
489 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetAdjLast);
490 }
491
492 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire);
493 if (pnsAbsDeadline)
494 *pnsAbsDeadline = u64Expire; /* Always return the unadjusted absolute deadline, or HM will waste time going
495 thru this code over an over again even if there aren't any timer changes. */
496 if (u64 < u64Expire)
497 {
498 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64);
499 if (fUpdateOff)
500 ASMAtomicWriteU64(&pVM->tm.s.offVirtualSync, off);
501 if (fStop)
502 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncCatchUp, false);
503 if (fUpdatePrev)
504 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev, u64);
505 if (pcNsToDeadline)
506 {
507 uint64_t cNsToDeadline = u64Expire - u64;
508 if (pVM->tm.s.fVirtualSyncCatchUp)
509 cNsToDeadline = ASMMultU64ByU32DivByU32(cNsToDeadline, 100,
510 pVM->tm.s.u32VirtualSyncCatchUpPercentage + 100);
511 *pcNsToDeadline = tmVirtualVirtToNsDeadline(pVM, cNsToDeadline);
512 }
513 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
514 }
515 else
516 {
517 u64 = u64Expire;
518 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64);
519 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, false);
520
521 VM_FF_SET(pVM, VM_FF_TM_VIRTUAL_SYNC);
522 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
523 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
524 Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
525 Log4(("TM: %'RU64/-%'8RU64: exp tmr=>ff [vsghcul]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
526 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
527
528 if (pcNsToDeadline)
529 *pcNsToDeadline = 0;
530#ifdef IN_RING3
531 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
532#endif
533 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
534 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetExpired);
535 }
536 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
537
538 Log6(("tmVirtualSyncGetHandleCatchUpLocked -> %'RU64\n", u64));
539 DBGFTRACE_U64_TAG(pVM, u64, "tmVirtualSyncGetHandleCatchUpLocked");
540 return u64;
541}
542
543
544/**
545 * tmVirtualSyncGetEx worker for when we get the lock.
546 *
547 * @returns timesamp.
548 * @param pVM The cross context VM structure.
549 * @param u64 The virtual clock timestamp.
550 * @param pcNsToDeadline Where to return the number of nano seconds to
551 * the next virtual sync timer deadline. Can be
552 * NULL.
553 * @param pnsAbsDeadline Where to return the absolute deadline.
554 * Optional.
555 */
556DECLINLINE(uint64_t) tmVirtualSyncGetLocked(PVMCC pVM, uint64_t u64, uint64_t *pcNsToDeadline, uint64_t *pnsAbsDeadline)
557{
558 /*
559 * Not ticking?
560 */
561 if (!pVM->tm.s.fVirtualSyncTicking)
562 {
563 u64 = ASMAtomicUoReadU64(&pVM->tm.s.u64VirtualSync);
564 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
565 if (pcNsToDeadline)
566 *pcNsToDeadline = 0;
567 if (pnsAbsDeadline)
568 *pnsAbsDeadline = u64;
569 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
570 Log6(("tmVirtualSyncGetLocked -> %'RU64 [stopped]\n", u64));
571 DBGFTRACE_U64_TAG(pVM, u64, "tmVirtualSyncGetLocked-stopped");
572 return u64;
573 }
574
575 /*
576 * Handle catch up in a separate function.
577 */
578 uint64_t off = ASMAtomicUoReadU64(&pVM->tm.s.offVirtualSync);
579 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
580 return tmVirtualSyncGetHandleCatchUpLocked(pVM, u64, off, pcNsToDeadline, pnsAbsDeadline);
581
582 /*
583 * Complete the calculation of the current TMCLOCK_VIRTUAL_SYNC time. The current
584 * approach is to never pass the head timer. So, when we do stop the clock and
585 * set the timer pending flag.
586 */
587 u64 -= off;
588
589 uint64_t u64Last = ASMAtomicUoReadU64(&pVM->tm.s.u64VirtualSync);
590 if (u64Last > u64)
591 {
592 u64 = u64Last + 1;
593 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetAdjLast);
594 }
595
596 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire);
597 if (pnsAbsDeadline)
598 *pnsAbsDeadline = u64Expire;
599 if (u64 < u64Expire)
600 {
601 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64);
602 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
603 if (pcNsToDeadline)
604 *pcNsToDeadline = tmVirtualVirtToNsDeadline(pVM, u64Expire - u64);
605 }
606 else
607 {
608 u64 = u64Expire;
609 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64);
610 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, false);
611
612 VM_FF_SET(pVM, VM_FF_TM_VIRTUAL_SYNC);
613 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
614 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
615 Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
616 Log4(("TM: %'RU64/-%'8RU64: exp tmr=>ff [vsgl]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
617 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
618
619#ifdef IN_RING3
620 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
621#endif
622 if (pcNsToDeadline)
623 *pcNsToDeadline = 0;
624 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
625 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetExpired);
626 }
627 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
628 Log6(("tmVirtualSyncGetLocked -> %'RU64\n", u64));
629 DBGFTRACE_U64_TAG(pVM, u64, "tmVirtualSyncGetLocked");
630 return u64;
631}
632
633
634/**
635 * Gets the current TMCLOCK_VIRTUAL_SYNC time.
636 *
637 * @returns The timestamp.
638 * @param pVM The cross context VM structure.
639 * @param fCheckTimers Check timers or not
640 * @param pcNsToDeadline Where to return the number of nano seconds to
641 * the next virtual sync timer deadline. Can be
642 * NULL.
643 * @param pnsAbsDeadline Where to return the absolute deadline.
644 * Optional.
645 * @param puTscNow Where to return the TSC corresponding to the
646 * returned timestamp (delta adjusted). Optional.
647 * @thread EMT.
648 */
649DECLINLINE(uint64_t) tmVirtualSyncGetEx(PVMCC pVM, bool fCheckTimers, uint64_t *pcNsToDeadline,
650 uint64_t *pnsAbsDeadline, uint64_t *puTscNow)
651{
652 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGet);
653
654 uint64_t u64;
655 if (!pVM->tm.s.fVirtualSyncTicking)
656 {
657 if (pcNsToDeadline)
658 *pcNsToDeadline = 0;
659 u64 = pVM->tm.s.u64VirtualSync;
660 DBGFTRACE_U64_TAG(pVM, u64, "tmVirtualSyncGetEx-stopped1");
661 return u64;
662 }
663
664 /*
665 * Query the virtual clock and do the usual expired timer check.
666 */
667 Assert(pVM->tm.s.cVirtualTicking);
668 u64 = tmVirtualGetRawEx(pVM, puTscNow);
669 if (fCheckTimers)
670 {
671 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
672 if ( !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)
673 && pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL].u64Expire <= u64)
674 {
675 Log5(("TMAllVirtual(%u): FF: 0 -> 1\n", __LINE__));
676 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
677#ifdef IN_RING3
678 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM /** @todo |VMNOTIFYFF_FLAGS_POKE*/);
679#endif
680 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
681 }
682 }
683
684 /*
685 * If we can get the lock, get it. The result is much more reliable.
686 *
687 * Note! This is where all clock source devices branch off because they
688 * will be owning the lock already. The 'else' is taken by code
689 * which is less picky or hasn't been adjusted yet
690 */
691 /** @todo switch this around, have the tmVirtualSyncGetLocked code inlined
692 * here and the remainder of this function in a static worker. */
693 if (PDMCritSectTryEnter(pVM, &pVM->tm.s.VirtualSyncLock) == VINF_SUCCESS)
694 return tmVirtualSyncGetLocked(pVM, u64, pcNsToDeadline, pnsAbsDeadline);
695
696 /*
697 * When the clock is ticking, not doing catch ups and not running into an
698 * expired time, we can get away without locking. Try this first.
699 */
700 uint64_t off;
701 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
702 {
703 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
704 {
705 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
706 if (RT_LIKELY( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
707 && !ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
708 && off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)))
709 {
710 off = u64 - off;
711 uint64_t const u64Expire = ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire);
712 if (off < u64Expire)
713 {
714 if (pnsAbsDeadline)
715 *pnsAbsDeadline = u64Expire;
716 if (pcNsToDeadline)
717 *pcNsToDeadline = tmVirtualVirtToNsDeadline(pVM, u64Expire - off);
718 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLockless);
719 Log6(("tmVirtualSyncGetEx -> %'RU64 [lockless]\n", off));
720 DBGFTRACE_U64_TAG(pVM, off, "tmVirtualSyncGetEx-lockless");
721 return off;
722 }
723 }
724 }
725 }
726 else
727 {
728 off = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSync);
729 if (RT_LIKELY(!ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking)))
730 {
731 if (pcNsToDeadline)
732 *pcNsToDeadline = 0;
733 if (pnsAbsDeadline)
734 *pnsAbsDeadline = off;
735 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLockless);
736 Log6(("tmVirtualSyncGetEx -> %'RU64 [lockless/stopped]\n", off));
737 DBGFTRACE_U64_TAG(pVM, off, "tmVirtualSyncGetEx-stopped2");
738 return off;
739 }
740 }
741
742 /*
743 * Read the offset and adjust if we're playing catch-up.
744 *
745 * The catch-up adjusting work by us decrementing the offset by a percentage of
746 * the time elapsed since the previous TMVirtualGetSync call.
747 *
748 * It's possible to get a very long or even negative interval between two read
749 * for the following reasons:
750 * - Someone might have suspended the process execution, frequently the case when
751 * debugging the process.
752 * - We might be on a different CPU which TSC isn't quite in sync with the
753 * other CPUs in the system.
754 * - Another thread is racing us and we might have been preempted while inside
755 * this function.
756 *
757 * Assuming nano second virtual time, we can simply ignore any intervals which has
758 * any of the upper 32 bits set.
759 */
760 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
761 int cOuterTries = 42;
762 for (;; cOuterTries--)
763 {
764 /* Try grab the lock, things get simpler when owning the lock. */
765 int rcLock = PDMCritSectTryEnter(pVM, &pVM->tm.s.VirtualSyncLock);
766 if (RT_SUCCESS_NP(rcLock))
767 return tmVirtualSyncGetLocked(pVM, u64, pcNsToDeadline, pnsAbsDeadline);
768
769 /* Re-check the ticking flag. */
770 if (!ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
771 {
772 off = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSync);
773 if ( ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking)
774 && cOuterTries > 0)
775 continue;
776 if (pcNsToDeadline)
777 *pcNsToDeadline = 0;
778 if (pnsAbsDeadline)
779 *pnsAbsDeadline = off;
780 Log6(("tmVirtualSyncGetEx -> %'RU64 [stopped]\n", off));
781 DBGFTRACE_U64_TAG(pVM, off, "tmVirtualSyncGetEx-stopped3");
782 return off;
783 }
784
785 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
786 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
787 {
788 /* No changes allowed, try get a consistent set of parameters. */
789 uint64_t const u64Prev = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev);
790 uint64_t const offGivenUp = ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp);
791 uint32_t const u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
792 if ( ( u64Prev == ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev)
793 && offGivenUp == ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp)
794 && u32Pct == ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage)
795 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
796 || cOuterTries <= 0)
797 {
798 uint64_t u64Delta = u64 - u64Prev;
799 if (RT_LIKELY(!(u64Delta >> 32)))
800 {
801 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, u32Pct, 100);
802 if (off > u64Sub + offGivenUp)
803 {
804 off -= u64Sub;
805 Log4(("TM: %'RU64/-%'8RU64: sub %RU32 [NoLock]\n", u64 - off, pVM->tm.s.offVirtualSync - offGivenUp, u64Sub));
806 }
807 else
808 {
809 /* we've completely caught up. */
810 STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c);
811 off = offGivenUp;
812 Log4(("TM: %'RU64/0: caught up [NoLock]\n", u64));
813 }
814 }
815 else
816 /* More than 4 seconds since last time (or negative), ignore it. */
817 Log(("TMVirtualGetSync: u64Delta=%RX64 (NoLock)\n", u64Delta));
818
819 /* Check that we're still running and in catch up. */
820 if ( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
821 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
822 break;
823 if (cOuterTries <= 0)
824 break; /* enough */
825 }
826 }
827 else if ( off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
828 && !ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
829 break; /* Got an consistent offset */
830 else if (cOuterTries <= 0)
831 break; /* enough */
832 }
833 if (cOuterTries <= 0)
834 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetELoop);
835
836 /*
837 * Complete the calculation of the current TMCLOCK_VIRTUAL_SYNC time. The current
838 * approach is to never pass the head timer. So, when we do stop the clock and
839 * set the timer pending flag.
840 */
841 u64 -= off;
842/** @todo u64VirtualSyncLast */
843 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire);
844 if (pnsAbsDeadline)
845 *pnsAbsDeadline = u64Expire;
846 if (u64 >= u64Expire)
847 {
848 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
849 if (!VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
850 {
851 Log5(("TMAllVirtual(%u): FF: %d -> 1 (NoLock)\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
852 VM_FF_SET(pVM, VM_FF_TM_VIRTUAL_SYNC); /* Hmm? */
853 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
854#ifdef IN_RING3
855 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
856#endif
857 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
858 Log4(("TM: %'RU64/-%'8RU64: exp tmr=>ff [NoLock]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
859 }
860 else
861 Log4(("TM: %'RU64/-%'8RU64: exp tmr [NoLock]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
862 if (pcNsToDeadline)
863 *pcNsToDeadline = 0;
864 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetExpired);
865 }
866 else if (pcNsToDeadline)
867 {
868 uint64_t cNsToDeadline = u64Expire - u64;
869 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
870 cNsToDeadline = ASMMultU64ByU32DivByU32(cNsToDeadline, 100,
871 ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage) + 100);
872 *pcNsToDeadline = tmVirtualVirtToNsDeadline(pVM, cNsToDeadline);
873 }
874
875 Log6(("tmVirtualSyncGetEx -> %'RU64\n", u64));
876 DBGFTRACE_U64_TAG(pVM, u64, "tmVirtualSyncGetEx-nolock");
877 return u64;
878}
879
880
881/**
882 * Gets the current TMCLOCK_VIRTUAL_SYNC time.
883 *
884 * @returns The timestamp.
885 * @param pVM The cross context VM structure.
886 * @thread EMT.
887 * @remarks May set the timer and virtual sync FFs.
888 */
889VMM_INT_DECL(uint64_t) TMVirtualSyncGet(PVMCC pVM)
890{
891 return tmVirtualSyncGetEx(pVM, true /*fCheckTimers*/, NULL /*pcNsToDeadline*/, NULL /*pnsAbsDeadline*/, NULL /*puTscNow*/);
892}
893
894
895/**
896 * Gets the current TMCLOCK_VIRTUAL_SYNC time without checking timers running on
897 * TMCLOCK_VIRTUAL.
898 *
899 * @returns The timestamp.
900 * @param pVM The cross context VM structure.
901 * @thread EMT.
902 * @remarks May set the timer and virtual sync FFs.
903 */
904VMM_INT_DECL(uint64_t) TMVirtualSyncGetNoCheck(PVMCC pVM)
905{
906 return tmVirtualSyncGetEx(pVM, false /*fCheckTimers*/, NULL /*pcNsToDeadline*/, NULL /*pnsAbsDeadline*/, NULL /*puTscNow*/);
907}
908
909
910/**
911 * Gets the current TMCLOCK_VIRTUAL_SYNC time without checking timers running on
912 * TMCLOCK_VIRTUAL, also returning corresponding TSC value.
913 *
914 * @returns The timestamp.
915 * @param pVM The cross context VM structure.
916 * @param puTscNow Where to return the TSC value that the return
917 * value is relative to. This is delta adjusted.
918 * @thread EMT.
919 * @remarks May set the timer and virtual sync FFs.
920 */
921VMM_INT_DECL(uint64_t) TMVirtualSyncGetNoCheckWithTsc(PVMCC pVM, uint64_t *puTscNow)
922{
923 return tmVirtualSyncGetEx(pVM, false /*fCheckTimers*/, NULL /*pcNsToDeadline*/, NULL /*pnsAbsDeadline*/, puTscNow);
924}
925
926
927/**
928 * Gets the current TMCLOCK_VIRTUAL_SYNC time.
929 *
930 * @returns The timestamp.
931 * @param pVM The cross context VM structure.
932 * @param fCheckTimers Check timers on the virtual clock or not.
933 * @thread EMT.
934 * @remarks May set the timer and virtual sync FFs.
935 */
936VMM_INT_DECL(uint64_t) TMVirtualSyncGetEx(PVMCC pVM, bool fCheckTimers)
937{
938 return tmVirtualSyncGetEx(pVM, fCheckTimers, NULL /*pcNsToDeadline*/, NULL /*pnsAbsDeadline*/, NULL /*puTscNow*/);
939}
940
941
942/**
943 * Gets the current TMCLOCK_VIRTUAL_SYNC time and ticks to the next deadline
944 * without checking timers running on TMCLOCK_VIRTUAL.
945 *
946 * @returns The timestamp.
947 * @param pVM The cross context VM structure.
948 * @param pcNsToDeadline Where to return the number of nano seconds to
949 * the next virtual sync timer deadline.
950 * @param puTscNow Where to return the TSC value that the return
951 * value is relative to. This is delta adjusted.
952 * @param puDeadlineVersion Where to return the deadline "version" number.
953 * Use with TMVirtualSyncIsCurrentDeadlineVersion()
954 * to check if the absolute deadline is still up to
955 * date and the caller can skip calling this
956 * function.
957 * @thread EMT.
958 * @remarks May set the timer and virtual sync FFs.
959 */
960VMM_INT_DECL(uint64_t) TMVirtualSyncGetWithDeadlineNoCheck(PVMCC pVM, uint64_t *pcNsToDeadline,
961 uint64_t *puDeadlineVersion, uint64_t *puTscNow)
962{
963 uint64_t cNsToDeadlineTmp; /* try convince the compiler to skip the if tests. */
964 uint64_t u64Now = tmVirtualSyncGetEx(pVM, false /*fCheckTimers*/, &cNsToDeadlineTmp, puDeadlineVersion, puTscNow);
965 *pcNsToDeadline = cNsToDeadlineTmp;
966 return u64Now;
967}
968
969
970/**
971 * Gets the number of nano seconds to the next virtual sync deadline.
972 *
973 * @returns The number of TMCLOCK_VIRTUAL ticks.
974 * @param pVM The cross context VM structure.
975 * @param puTscNow Where to return the TSC value that the return
976 * value is relative to. This is delta adjusted.
977 * @param puDeadlineVersion Where to return the deadline "version" number.
978 * Use with TMVirtualSyncIsCurrentDeadlineVersion()
979 * to check if the absolute deadline is still up to
980 * date and the caller can skip calling this
981 * function.
982 * @thread EMT.
983 * @remarks May set the timer and virtual sync FFs.
984 */
985VMMDECL(uint64_t) TMVirtualSyncGetNsToDeadline(PVMCC pVM, uint64_t *puDeadlineVersion, uint64_t *puTscNow)
986{
987 uint64_t cNsToDeadline;
988 tmVirtualSyncGetEx(pVM, false /*fCheckTimers*/, &cNsToDeadline, puDeadlineVersion, puTscNow);
989 return cNsToDeadline;
990}
991
992
993/**
994 * Checks if the given deadline is still current.
995 *
996 * @retval true if the deadline is still current.
997 * @retval false if the deadline is outdated.
998 * @param pVM The cross context VM structure.
999 * @param uDeadlineVersion The deadline version to check.
1000 */
1001VMM_INT_DECL(bool) TMVirtualSyncIsCurrentDeadlineVersion(PVMCC pVM, uint64_t uDeadlineVersion)
1002{
1003 /** @todo Try use ASMAtomicUoReadU64 instead. */
1004 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire);
1005 return u64Expire == uDeadlineVersion;
1006}
1007
1008
1009/**
1010 * Gets the current lag of the synchronous virtual clock (relative to the virtual clock).
1011 *
1012 * @return The current lag.
1013 * @param pVM The cross context VM structure.
1014 */
1015VMM_INT_DECL(uint64_t) TMVirtualSyncGetLag(PVMCC pVM)
1016{
1017 return pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp;
1018}
1019
1020
1021/**
1022 * Get the current catch-up percent.
1023 *
1024 * @return The current catch0up percent. 0 means running at the same speed as the virtual clock.
1025 * @param pVM The cross context VM structure.
1026 */
1027VMM_INT_DECL(uint32_t) TMVirtualSyncGetCatchUpPct(PVMCC pVM)
1028{
1029 if (pVM->tm.s.fVirtualSyncCatchUp)
1030 return pVM->tm.s.u32VirtualSyncCatchUpPercentage;
1031 return 0;
1032}
1033
1034
1035/**
1036 * Gets the current TMCLOCK_VIRTUAL frequency.
1037 *
1038 * @returns The frequency.
1039 * @param pVM The cross context VM structure.
1040 */
1041VMM_INT_DECL(uint64_t) TMVirtualGetFreq(PVM pVM)
1042{
1043 NOREF(pVM);
1044 return TMCLOCK_FREQ_VIRTUAL;
1045}
1046
1047
1048/**
1049 * Worker for TMR3PauseClocks.
1050 *
1051 * @returns VINF_SUCCESS or VERR_TM_VIRTUAL_TICKING_IPE (asserted).
1052 * @param pVM The cross context VM structure.
1053 */
1054int tmVirtualPauseLocked(PVMCC pVM)
1055{
1056 uint32_t c = ASMAtomicDecU32(&pVM->tm.s.cVirtualTicking);
1057 AssertMsgReturn(c < pVM->cCpus, ("%u vs %u\n", c, pVM->cCpus), VERR_TM_VIRTUAL_TICKING_IPE);
1058 if (c == 0)
1059 {
1060 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualPause);
1061 pVM->tm.s.u64Virtual = tmVirtualGetRaw(pVM);
1062 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, false);
1063 }
1064 return VINF_SUCCESS;
1065}
1066
1067
1068/**
1069 * Worker for TMR3ResumeClocks.
1070 *
1071 * @returns VINF_SUCCESS or VERR_TM_VIRTUAL_TICKING_IPE (asserted).
1072 * @param pVM The cross context VM structure.
1073 */
1074int tmVirtualResumeLocked(PVMCC pVM)
1075{
1076 uint32_t c = ASMAtomicIncU32(&pVM->tm.s.cVirtualTicking);
1077 AssertMsgReturn(c <= pVM->cCpus, ("%u vs %u\n", c, pVM->cCpus), VERR_TM_VIRTUAL_TICKING_IPE);
1078 if (c == 1)
1079 {
1080 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualResume);
1081 pVM->tm.s.u64VirtualRawPrev = 0;
1082 pVM->tm.s.u64VirtualWarpDriveStart = tmVirtualGetRawNanoTS(pVM);
1083 pVM->tm.s.u64VirtualOffset = pVM->tm.s.u64VirtualWarpDriveStart - pVM->tm.s.u64Virtual;
1084 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, true);
1085 }
1086 return VINF_SUCCESS;
1087}
1088
1089
1090/**
1091 * Converts from virtual ticks to nanoseconds.
1092 *
1093 * @returns nanoseconds.
1094 * @param pVM The cross context VM structure.
1095 * @param u64VirtualTicks The virtual ticks to convert.
1096 * @remark There could be rounding errors here. We just do a simple integer divide
1097 * without any adjustments.
1098 */
1099VMM_INT_DECL(uint64_t) TMVirtualToNano(PVM pVM, uint64_t u64VirtualTicks)
1100{
1101 NOREF(pVM);
1102 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1103 return u64VirtualTicks;
1104}
1105
1106
1107/**
1108 * Converts from virtual ticks to microseconds.
1109 *
1110 * @returns microseconds.
1111 * @param pVM The cross context VM structure.
1112 * @param u64VirtualTicks The virtual ticks to convert.
1113 * @remark There could be rounding errors here. We just do a simple integer divide
1114 * without any adjustments.
1115 */
1116VMM_INT_DECL(uint64_t) TMVirtualToMicro(PVM pVM, uint64_t u64VirtualTicks)
1117{
1118 NOREF(pVM);
1119 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1120 return u64VirtualTicks / 1000;
1121}
1122
1123
1124/**
1125 * Converts from virtual ticks to milliseconds.
1126 *
1127 * @returns milliseconds.
1128 * @param pVM The cross context VM structure.
1129 * @param u64VirtualTicks The virtual ticks to convert.
1130 * @remark There could be rounding errors here. We just do a simple integer divide
1131 * without any adjustments.
1132 */
1133VMM_INT_DECL(uint64_t) TMVirtualToMilli(PVM pVM, uint64_t u64VirtualTicks)
1134{
1135 NOREF(pVM);
1136 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1137 return u64VirtualTicks / 1000000;
1138}
1139
1140
1141/**
1142 * Converts from nanoseconds to virtual ticks.
1143 *
1144 * @returns virtual ticks.
1145 * @param pVM The cross context VM structure.
1146 * @param u64NanoTS The nanosecond value ticks to convert.
1147 * @remark There could be rounding and overflow errors here.
1148 */
1149VMM_INT_DECL(uint64_t) TMVirtualFromNano(PVM pVM, uint64_t u64NanoTS)
1150{
1151 NOREF(pVM);
1152 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1153 return u64NanoTS;
1154}
1155
1156
1157/**
1158 * Converts from microseconds to virtual ticks.
1159 *
1160 * @returns virtual ticks.
1161 * @param pVM The cross context VM structure.
1162 * @param u64MicroTS The microsecond value ticks to convert.
1163 * @remark There could be rounding and overflow errors here.
1164 */
1165VMM_INT_DECL(uint64_t) TMVirtualFromMicro(PVM pVM, uint64_t u64MicroTS)
1166{
1167 NOREF(pVM);
1168 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1169 return u64MicroTS * 1000;
1170}
1171
1172
1173/**
1174 * Converts from milliseconds to virtual ticks.
1175 *
1176 * @returns virtual ticks.
1177 * @param pVM The cross context VM structure.
1178 * @param u64MilliTS The millisecond value ticks to convert.
1179 * @remark There could be rounding and overflow errors here.
1180 */
1181VMM_INT_DECL(uint64_t) TMVirtualFromMilli(PVM pVM, uint64_t u64MilliTS)
1182{
1183 NOREF(pVM);
1184 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1185 return u64MilliTS * 1000000;
1186}
1187
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette