VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/TMAllVirtual.cpp@ 100764

Last change on this file since 100764 was 98103, checked in by vboxsync, 2 years ago

Copyright year updates by scm.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 43.8 KB
Line 
1/* $Id: TMAllVirtual.cpp 98103 2023-01-17 14:15:46Z vboxsync $ */
2/** @file
3 * TM - Timeout Manager, Virtual Time, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_TM
33#include <VBox/vmm/tm.h>
34#include <VBox/vmm/dbgftrace.h>
35#ifdef IN_RING3
36# include <iprt/thread.h>
37#endif
38#include "TMInternal.h"
39#include <VBox/vmm/vmcc.h>
40#include <VBox/vmm/vmm.h>
41#include <VBox/err.h>
42#include <VBox/log.h>
43#include <VBox/sup.h>
44
45#include <iprt/time.h>
46#include <iprt/assert.h>
47#include <iprt/asm.h>
48#include <iprt/asm-math.h>
49
50
51
52/**
53 * @interface_method_impl{RTTIMENANOTSDATA,pfnBad}
54 */
55DECLCALLBACK(DECLEXPORT(void)) tmVirtualNanoTSBad(PRTTIMENANOTSDATA pData, uint64_t u64NanoTS, uint64_t u64DeltaPrev,
56 uint64_t u64PrevNanoTS)
57{
58 PVMCC pVM = RT_FROM_CPP_MEMBER(pData, VMCC, VMCC_CTX(tm).s.VirtualGetRawData);
59 pData->cBadPrev++;
60 if ((int64_t)u64DeltaPrev < 0)
61 LogRel(("TM: u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64 pVM=%p\n",
62 u64DeltaPrev, u64PrevNanoTS, u64NanoTS, pVM));
63 else
64 Log(("TM: u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64 pVM=%p (debugging?)\n",
65 u64DeltaPrev, u64PrevNanoTS, u64NanoTS, pVM));
66}
67
68
69#ifdef IN_RING3
70/**
71 * @callback_method_impl{FNTIMENANOTSINTERNAL, For driverless mode.}
72 */
73static DECLCALLBACK(uint64_t) tmR3VirtualNanoTSDriverless(PRTTIMENANOTSDATA pData, PRTITMENANOTSEXTRA pExtra)
74{
75 RT_NOREF(pData);
76 if (pExtra)
77 pExtra->uTSCValue = ASMReadTSC();
78 return RTTimeSystemNanoTS();
79}
80#endif
81
82
83/**
84 * @interface_method_impl{RTTIMENANOTSDATA,pfnRediscover}
85 *
86 * This is the initial worker, so the first call in each context ends up here.
87 * It is also used should the delta rating of the host CPUs change or if the
88 * fGetGipCpu feature the current worker relies upon becomes unavailable. The
89 * last two events may occur as CPUs are taken online.
90 */
91DECLCALLBACK(DECLEXPORT(uint64_t)) tmVirtualNanoTSRediscover(PRTTIMENANOTSDATA pData, PRTITMENANOTSEXTRA pExtra)
92{
93 PVMCC pVM = RT_FROM_CPP_MEMBER(pData, VMCC, VMCC_CTX(tm).s.VirtualGetRawData);
94 PFNTIMENANOTSINTERNAL pfnWorker;
95
96 /*
97 * We require a valid GIP for the selection below.
98 * Invalid GIP is fatal, though we have to allow no GIP in driverless mode (ring-3 only).
99 */
100 PSUPGLOBALINFOPAGE pGip = g_pSUPGlobalInfoPage;
101#ifdef IN_RING3
102 if (pGip)
103#endif
104 {
105 AssertFatalMsg(RT_VALID_PTR(pGip), ("pVM=%p pGip=%p\n", pVM, pGip));
106 AssertFatalMsg(pGip->u32Magic == SUPGLOBALINFOPAGE_MAGIC, ("pVM=%p pGip=%p u32Magic=%#x\n", pVM, pGip, pGip->u32Magic));
107 AssertFatalMsg(pGip->u32Mode > SUPGIPMODE_INVALID && pGip->u32Mode < SUPGIPMODE_END,
108 ("pVM=%p pGip=%p u32Mode=%#x\n", pVM, pGip, pGip->u32Mode));
109
110 /*
111 * Determine the new worker.
112 */
113#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
114 bool const fLFence = RT_BOOL(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SSE2);
115#endif
116 switch (pGip->u32Mode)
117 {
118#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
119 case SUPGIPMODE_SYNC_TSC:
120 case SUPGIPMODE_INVARIANT_TSC:
121# ifdef IN_RING0
122 if (pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_ROUGHLY_ZERO)
123 pfnWorker = fLFence ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarNoDelta;
124 else
125 pfnWorker = fLFence ? RTTimeNanoTSLFenceSyncInvarWithDelta : RTTimeNanoTSLegacySyncInvarWithDelta;
126# else
127 if (pGip->fGetGipCpu & SUPGIPGETCPU_IDTR_LIMIT_MASK_MAX_SET_CPUS)
128 pfnWorker = pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_PRACTICALLY_ZERO
129 ? fLFence ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarNoDelta
130 : fLFence ? RTTimeNanoTSLFenceSyncInvarWithDeltaUseIdtrLim : RTTimeNanoTSLegacySyncInvarWithDeltaUseIdtrLim;
131 else if (pGip->fGetGipCpu & SUPGIPGETCPU_RDTSCP_MASK_MAX_SET_CPUS)
132 pfnWorker = pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_PRACTICALLY_ZERO
133 ? fLFence ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarNoDelta
134 : fLFence ? RTTimeNanoTSLFenceSyncInvarWithDeltaUseRdtscp : RTTimeNanoTSLegacySyncInvarWithDeltaUseRdtscp;
135 else if (pGip->fGetGipCpu & SUPGIPGETCPU_APIC_ID_EXT_0B)
136 pfnWorker = pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_ROUGHLY_ZERO
137 ? fLFence ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarNoDelta
138 : fLFence ? RTTimeNanoTSLFenceSyncInvarWithDeltaUseApicIdExt0B : RTTimeNanoTSLegacySyncInvarWithDeltaUseApicIdExt0B;
139 else if (pGip->fGetGipCpu & SUPGIPGETCPU_APIC_ID_EXT_8000001E)
140 pfnWorker = pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_ROUGHLY_ZERO
141 ? fLFence ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarNoDelta
142 : fLFence ? RTTimeNanoTSLFenceSyncInvarWithDeltaUseApicIdExt8000001E : RTTimeNanoTSLegacySyncInvarWithDeltaUseApicIdExt8000001E;
143 else
144 pfnWorker = pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_ROUGHLY_ZERO
145 ? fLFence ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarNoDelta
146 : fLFence ? RTTimeNanoTSLFenceSyncInvarWithDeltaUseApicId : RTTimeNanoTSLegacySyncInvarWithDeltaUseApicId;
147# endif
148 break;
149
150 case SUPGIPMODE_ASYNC_TSC:
151# ifdef IN_RING0
152 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsync : RTTimeNanoTSLegacyAsync;
153# else
154 if (pGip->fGetGipCpu & SUPGIPGETCPU_IDTR_LIMIT_MASK_MAX_SET_CPUS)
155 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsyncUseIdtrLim : RTTimeNanoTSLegacyAsyncUseIdtrLim;
156 else if (pGip->fGetGipCpu & SUPGIPGETCPU_RDTSCP_MASK_MAX_SET_CPUS)
157 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsyncUseRdtscp : RTTimeNanoTSLegacyAsyncUseRdtscp;
158 else if (pGip->fGetGipCpu & SUPGIPGETCPU_RDTSCP_GROUP_IN_CH_NUMBER_IN_CL)
159 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsyncUseRdtscpGroupChNumCl : RTTimeNanoTSLegacyAsyncUseRdtscpGroupChNumCl;
160 else if (pGip->fGetGipCpu & SUPGIPGETCPU_APIC_ID_EXT_0B)
161 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsyncUseApicIdExt0B : RTTimeNanoTSLegacyAsyncUseApicIdExt0B;
162 else if (pGip->fGetGipCpu & SUPGIPGETCPU_APIC_ID_EXT_8000001E)
163 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsyncUseApicIdExt8000001E : RTTimeNanoTSLegacyAsyncUseApicIdExt8000001E;
164 else
165 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsyncUseApicId : RTTimeNanoTSLegacyAsyncUseApicId;
166# endif
167 break;
168#endif
169 default:
170 AssertFatalMsgFailed(("pVM=%p pGip=%p u32Mode=%#x\n", pVM, pGip, pGip->u32Mode));
171 }
172 }
173#ifdef IN_RING3
174 else
175 pfnWorker = tmR3VirtualNanoTSDriverless;
176#endif
177
178 /*
179 * Update the pfnVirtualGetRaw pointer and call the worker we selected.
180 */
181 ASMAtomicWritePtr((void * volatile *)&pVM->VMCC_CTX(tm).s.pfnVirtualGetRaw, (void *)(uintptr_t)pfnWorker);
182 return pfnWorker(pData, pExtra);
183}
184
185
186/**
187 * @interface_method_impl{RTTIMENANOTSDATA,pfnBadCpuIndex}
188 */
189DECLCALLBACK(DECLEXPORT(uint64_t)) tmVirtualNanoTSBadCpuIndex(PRTTIMENANOTSDATA pData, PRTITMENANOTSEXTRA pExtra,
190 uint16_t idApic, uint16_t iCpuSet, uint16_t iGipCpu)
191{
192 PVMCC pVM = RT_FROM_CPP_MEMBER(pData, VMCC, VMCC_CTX(tm).s.VirtualGetRawData);
193 AssertFatalMsgFailed(("pVM=%p idApic=%#x iCpuSet=%#x iGipCpu=%#x pExtra=%p\n", pVM, idApic, iCpuSet, iGipCpu, pExtra));
194#ifndef _MSC_VER
195 return UINT64_MAX;
196#endif
197}
198
199
200/**
201 * Wrapper around the IPRT GIP time methods.
202 */
203DECLINLINE(uint64_t) tmVirtualGetRawNanoTS(PVMCC pVM)
204{
205#ifdef IN_RING3
206 uint64_t u64 = pVM->tm.s.pfnVirtualGetRaw(&pVM->tm.s.VirtualGetRawData, NULL /*pExtra*/);
207#elif defined(IN_RING0)
208 uint32_t cPrevSteps = pVM->tmr0.s.VirtualGetRawData.c1nsSteps;
209 uint64_t u64 = pVM->tmr0.s.pfnVirtualGetRaw(&pVM->tmr0.s.VirtualGetRawData, NULL /*pExtra*/);
210 if (cPrevSteps != pVM->tmr0.s.VirtualGetRawData.c1nsSteps)
211 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3);
212#else
213# error "unsupported context"
214#endif
215 /*DBGFTRACE_POS_U64(pVM, u64);*/
216 return u64;
217}
218
219
220/**
221 * Wrapper around the IPRT GIP time methods, extended version.
222 */
223DECLINLINE(uint64_t) tmVirtualGetRawNanoTSEx(PVMCC pVM, uint64_t *puTscNow)
224{
225 RTITMENANOTSEXTRA Extra;
226#ifdef IN_RING3
227 uint64_t u64 = pVM->tm.s.pfnVirtualGetRaw(&pVM->tm.s.VirtualGetRawData, &Extra);
228#elif defined(IN_RING0)
229 uint32_t cPrevSteps = pVM->tmr0.s.VirtualGetRawData.c1nsSteps;
230 uint64_t u64 = pVM->tmr0.s.pfnVirtualGetRaw(&pVM->tmr0.s.VirtualGetRawData, &Extra);
231 if (cPrevSteps != pVM->tmr0.s.VirtualGetRawData.c1nsSteps)
232 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3);
233#else
234# error "unsupported context"
235#endif
236 if (puTscNow)
237 *puTscNow = Extra.uTSCValue;
238 /*DBGFTRACE_POS_U64(pVM, u64);*/
239 return u64;
240}
241
242
243/**
244 * Get the time when we're not running at 100%
245 *
246 * @returns The timestamp.
247 * @param pVM The cross context VM structure.
248 * @param puTscNow Where to return the TSC corresponding to the returned
249 * timestamp (delta adjusted). Optional.
250 */
251static uint64_t tmVirtualGetRawNonNormal(PVMCC pVM, uint64_t *puTscNow)
252{
253 /*
254 * Recalculate the RTTimeNanoTS() value for the period where
255 * warp drive has been enabled.
256 */
257 uint64_t u64 = tmVirtualGetRawNanoTSEx(pVM, puTscNow);
258 u64 -= pVM->tm.s.u64VirtualWarpDriveStart;
259 u64 *= pVM->tm.s.u32VirtualWarpDrivePercentage;
260 u64 /= 100;
261 u64 += pVM->tm.s.u64VirtualWarpDriveStart;
262
263 /*
264 * Now we apply the virtual time offset.
265 * (Which is the negated tmVirtualGetRawNanoTS() value for when the virtual
266 * machine started if it had been running continuously without any suspends.)
267 */
268 u64 -= pVM->tm.s.u64VirtualOffset;
269 return u64;
270}
271
272
273/**
274 * Get the raw virtual time.
275 *
276 * @returns The current time stamp.
277 * @param pVM The cross context VM structure.
278 */
279DECLINLINE(uint64_t) tmVirtualGetRaw(PVMCC pVM)
280{
281 if (RT_LIKELY(!pVM->tm.s.fVirtualWarpDrive))
282 return tmVirtualGetRawNanoTS(pVM) - pVM->tm.s.u64VirtualOffset;
283 return tmVirtualGetRawNonNormal(pVM, NULL /*puTscNow*/);
284}
285
286
287/**
288 * Get the raw virtual time, extended version.
289 *
290 * @returns The current time stamp.
291 * @param pVM The cross context VM structure.
292 * @param puTscNow Where to return the TSC corresponding to the returned
293 * timestamp (delta adjusted). Optional.
294 */
295DECLINLINE(uint64_t) tmVirtualGetRawEx(PVMCC pVM, uint64_t *puTscNow)
296{
297 if (RT_LIKELY(!pVM->tm.s.fVirtualWarpDrive))
298 return tmVirtualGetRawNanoTSEx(pVM, puTscNow) - pVM->tm.s.u64VirtualOffset;
299 return tmVirtualGetRawNonNormal(pVM, puTscNow);
300}
301
302
303/**
304 * Inlined version of tmVirtualGetEx.
305 */
306DECLINLINE(uint64_t) tmVirtualGet(PVMCC pVM, bool fCheckTimers)
307{
308 uint64_t u64;
309 if (RT_LIKELY(pVM->tm.s.cVirtualTicking))
310 {
311 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGet);
312 u64 = tmVirtualGetRaw(pVM);
313
314 /*
315 * Use the chance to check for expired timers.
316 */
317 if (fCheckTimers)
318 {
319 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
320 if ( !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)
321 && !pVM->tm.s.fRunningQueues
322 && ( pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL].u64Expire <= u64
323 || ( pVM->tm.s.fVirtualSyncTicking
324 && pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire <= u64 - pVM->tm.s.offVirtualSync
325 )
326 )
327 && !pVM->tm.s.fRunningQueues
328 )
329 {
330 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSetFF);
331 Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
332 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
333#ifdef IN_RING3
334 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
335#endif
336 }
337 }
338 }
339 else
340 u64 = pVM->tm.s.u64Virtual;
341 return u64;
342}
343
344
345/**
346 * Gets the current TMCLOCK_VIRTUAL time
347 *
348 * @returns The timestamp.
349 * @param pVM The cross context VM structure.
350 *
351 * @remark While the flow of time will never go backwards, the speed of the
352 * progress varies due to inaccurate RTTimeNanoTS and TSC. The latter can be
353 * influenced by power saving (SpeedStep, PowerNow!), while the former
354 * makes use of TSC and kernel timers.
355 */
356VMM_INT_DECL(uint64_t) TMVirtualGet(PVMCC pVM)
357{
358 return tmVirtualGet(pVM, true /*fCheckTimers*/);
359}
360
361
362/**
363 * Gets the current TMCLOCK_VIRTUAL time without checking
364 * timers or anything.
365 *
366 * Meaning, this has no side effect on FFs like TMVirtualGet may have.
367 *
368 * @returns The timestamp.
369 * @param pVM The cross context VM structure.
370 *
371 * @remarks See TMVirtualGet.
372 */
373VMM_INT_DECL(uint64_t) TMVirtualGetNoCheck(PVMCC pVM)
374{
375 return tmVirtualGet(pVM, false /*fCheckTimers*/);
376}
377
378
379/**
380 * Converts the dead line interval from TMCLOCK_VIRTUAL to host nano seconds.
381 *
382 * @returns Host nano second count.
383 * @param pVM The cross context VM structure.
384 * @param cVirtTicksToDeadline The TMCLOCK_VIRTUAL interval.
385 */
386DECLINLINE(uint64_t) tmVirtualVirtToNsDeadline(PVM pVM, uint64_t cVirtTicksToDeadline)
387{
388 if (RT_UNLIKELY(pVM->tm.s.fVirtualWarpDrive))
389 return ASMMultU64ByU32DivByU32(cVirtTicksToDeadline, 100, pVM->tm.s.u32VirtualWarpDrivePercentage);
390 return cVirtTicksToDeadline;
391}
392
393
394/**
395 * tmVirtualSyncGetLocked worker for handling catch-up when owning the lock.
396 *
397 * @returns The timestamp.
398 * @param pVM The cross context VM structure.
399 * @param u64 raw virtual time.
400 * @param off offVirtualSync.
401 * @param pcNsToDeadline Where to return the number of nano seconds to
402 * the next virtual sync timer deadline. Can be
403 * NULL.
404 * @param pnsAbsDeadline Where to return the absolute deadline.
405 * Optional.
406 */
407DECLINLINE(uint64_t) tmVirtualSyncGetHandleCatchUpLocked(PVMCC pVM, uint64_t u64, uint64_t off,
408 uint64_t *pcNsToDeadline, uint64_t *pnsAbsDeadline)
409{
410 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
411
412 /*
413 * Don't make updates until we've check the timer queue.
414 */
415 bool fUpdatePrev = true;
416 bool fUpdateOff = true;
417 bool fStop = false;
418 const uint64_t u64Prev = pVM->tm.s.u64VirtualSyncCatchUpPrev;
419 uint64_t u64Delta = u64 - u64Prev;
420 if (RT_LIKELY(!(u64Delta >> 32)))
421 {
422 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, pVM->tm.s.u32VirtualSyncCatchUpPercentage, 100);
423 if (off > u64Sub + pVM->tm.s.offVirtualSyncGivenUp)
424 {
425 off -= u64Sub;
426 Log4(("TM: %'RU64/-%'8RU64: sub %RU32 [vsghcul]\n", u64 - off, off - pVM->tm.s.offVirtualSyncGivenUp, u64Sub));
427 }
428 else
429 {
430 /* we've completely caught up. */
431 STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c);
432 off = pVM->tm.s.offVirtualSyncGivenUp;
433 fStop = true;
434 Log4(("TM: %'RU64/0: caught up [vsghcul]\n", u64));
435 }
436 }
437 else
438 {
439 /* More than 4 seconds since last time (or negative), ignore it. */
440 fUpdateOff = false;
441 fUpdatePrev = !(u64Delta & RT_BIT_64(63));
442 Log(("TMVirtualGetSync: u64Delta=%RX64\n", u64Delta));
443 }
444
445 /*
446 * Complete the calculation of the current TMCLOCK_VIRTUAL_SYNC time. The current
447 * approach is to never pass the head timer. So, when we do stop the clock and
448 * set the timer pending flag.
449 */
450 u64 -= off;
451
452 uint64_t u64Last = ASMAtomicUoReadU64(&pVM->tm.s.u64VirtualSync);
453 if (u64Last > u64)
454 {
455 u64 = u64Last + 1;
456 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetAdjLast);
457 }
458
459 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire);
460 if (pnsAbsDeadline)
461 *pnsAbsDeadline = u64Expire; /* Always return the unadjusted absolute deadline, or HM will waste time going
462 thru this code over an over again even if there aren't any timer changes. */
463 if (u64 < u64Expire)
464 {
465 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64);
466 if (fUpdateOff)
467 ASMAtomicWriteU64(&pVM->tm.s.offVirtualSync, off);
468 if (fStop)
469 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncCatchUp, false);
470 if (fUpdatePrev)
471 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev, u64);
472 if (pcNsToDeadline)
473 {
474 uint64_t cNsToDeadline = u64Expire - u64;
475 if (pVM->tm.s.fVirtualSyncCatchUp)
476 cNsToDeadline = ASMMultU64ByU32DivByU32(cNsToDeadline, 100,
477 pVM->tm.s.u32VirtualSyncCatchUpPercentage + 100);
478 *pcNsToDeadline = tmVirtualVirtToNsDeadline(pVM, cNsToDeadline);
479 }
480 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
481 }
482 else
483 {
484 u64 = u64Expire;
485 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64);
486 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, false);
487
488 VM_FF_SET(pVM, VM_FF_TM_VIRTUAL_SYNC);
489 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
490 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
491 Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
492 Log4(("TM: %'RU64/-%'8RU64: exp tmr=>ff [vsghcul]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
493 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
494
495 if (pcNsToDeadline)
496 *pcNsToDeadline = 0;
497#ifdef IN_RING3
498 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
499#endif
500 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
501 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetExpired);
502 }
503 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
504
505 Log6(("tmVirtualSyncGetHandleCatchUpLocked -> %'RU64\n", u64));
506 DBGFTRACE_U64_TAG(pVM, u64, "tmVirtualSyncGetHandleCatchUpLocked");
507 return u64;
508}
509
510
511/**
512 * tmVirtualSyncGetEx worker for when we get the lock.
513 *
514 * @returns timesamp.
515 * @param pVM The cross context VM structure.
516 * @param u64 The virtual clock timestamp.
517 * @param pcNsToDeadline Where to return the number of nano seconds to
518 * the next virtual sync timer deadline. Can be
519 * NULL.
520 * @param pnsAbsDeadline Where to return the absolute deadline.
521 * Optional.
522 */
523DECLINLINE(uint64_t) tmVirtualSyncGetLocked(PVMCC pVM, uint64_t u64, uint64_t *pcNsToDeadline, uint64_t *pnsAbsDeadline)
524{
525 /*
526 * Not ticking?
527 */
528 if (!pVM->tm.s.fVirtualSyncTicking)
529 {
530 u64 = ASMAtomicUoReadU64(&pVM->tm.s.u64VirtualSync);
531 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
532 if (pcNsToDeadline)
533 *pcNsToDeadline = 0;
534 if (pnsAbsDeadline)
535 *pnsAbsDeadline = u64;
536 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
537 Log6(("tmVirtualSyncGetLocked -> %'RU64 [stopped]\n", u64));
538 DBGFTRACE_U64_TAG(pVM, u64, "tmVirtualSyncGetLocked-stopped");
539 return u64;
540 }
541
542 /*
543 * Handle catch up in a separate function.
544 */
545 uint64_t off = ASMAtomicUoReadU64(&pVM->tm.s.offVirtualSync);
546 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
547 return tmVirtualSyncGetHandleCatchUpLocked(pVM, u64, off, pcNsToDeadline, pnsAbsDeadline);
548
549 /*
550 * Complete the calculation of the current TMCLOCK_VIRTUAL_SYNC time. The current
551 * approach is to never pass the head timer. So, when we do stop the clock and
552 * set the timer pending flag.
553 */
554 u64 -= off;
555
556 uint64_t u64Last = ASMAtomicUoReadU64(&pVM->tm.s.u64VirtualSync);
557 if (u64Last > u64)
558 {
559 u64 = u64Last + 1;
560 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetAdjLast);
561 }
562
563 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire);
564 if (pnsAbsDeadline)
565 *pnsAbsDeadline = u64Expire;
566 if (u64 < u64Expire)
567 {
568 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64);
569 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
570 if (pcNsToDeadline)
571 *pcNsToDeadline = tmVirtualVirtToNsDeadline(pVM, u64Expire - u64);
572 }
573 else
574 {
575 u64 = u64Expire;
576 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64);
577 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, false);
578
579 VM_FF_SET(pVM, VM_FF_TM_VIRTUAL_SYNC);
580 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
581 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
582 Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
583 Log4(("TM: %'RU64/-%'8RU64: exp tmr=>ff [vsgl]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
584 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
585
586#ifdef IN_RING3
587 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
588#endif
589 if (pcNsToDeadline)
590 *pcNsToDeadline = 0;
591 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
592 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetExpired);
593 }
594 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
595 Log6(("tmVirtualSyncGetLocked -> %'RU64\n", u64));
596 DBGFTRACE_U64_TAG(pVM, u64, "tmVirtualSyncGetLocked");
597 return u64;
598}
599
600
601/**
602 * Gets the current TMCLOCK_VIRTUAL_SYNC time.
603 *
604 * @returns The timestamp.
605 * @param pVM The cross context VM structure.
606 * @param fCheckTimers Check timers or not
607 * @param pcNsToDeadline Where to return the number of nano seconds to
608 * the next virtual sync timer deadline. Can be
609 * NULL.
610 * @param pnsAbsDeadline Where to return the absolute deadline.
611 * Optional.
612 * @param puTscNow Where to return the TSC corresponding to the
613 * returned timestamp (delta adjusted). Optional.
614 * @thread EMT.
615 */
616DECLINLINE(uint64_t) tmVirtualSyncGetEx(PVMCC pVM, bool fCheckTimers, uint64_t *pcNsToDeadline,
617 uint64_t *pnsAbsDeadline, uint64_t *puTscNow)
618{
619 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGet);
620
621 uint64_t u64;
622 if (!pVM->tm.s.fVirtualSyncTicking)
623 {
624 if (pcNsToDeadline)
625 *pcNsToDeadline = 0;
626 u64 = pVM->tm.s.u64VirtualSync;
627 DBGFTRACE_U64_TAG(pVM, u64, "tmVirtualSyncGetEx-stopped1");
628 return u64;
629 }
630
631 /*
632 * Query the virtual clock and do the usual expired timer check.
633 */
634 Assert(pVM->tm.s.cVirtualTicking);
635 u64 = tmVirtualGetRawEx(pVM, puTscNow);
636 if (fCheckTimers)
637 {
638 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
639 if ( !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)
640 && pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL].u64Expire <= u64)
641 {
642 Log5(("TMAllVirtual(%u): FF: 0 -> 1\n", __LINE__));
643 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
644#ifdef IN_RING3
645 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM /** @todo |VMNOTIFYFF_FLAGS_POKE*/);
646#endif
647 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
648 }
649 }
650
651 /*
652 * If we can get the lock, get it. The result is much more reliable.
653 *
654 * Note! This is where all clock source devices branch off because they
655 * will be owning the lock already. The 'else' is taken by code
656 * which is less picky or hasn't been adjusted yet
657 */
658 /** @todo switch this around, have the tmVirtualSyncGetLocked code inlined
659 * here and the remainder of this function in a static worker. */
660 if (PDMCritSectTryEnter(pVM, &pVM->tm.s.VirtualSyncLock) == VINF_SUCCESS)
661 return tmVirtualSyncGetLocked(pVM, u64, pcNsToDeadline, pnsAbsDeadline);
662
663 /*
664 * When the clock is ticking, not doing catch ups and not running into an
665 * expired time, we can get away without locking. Try this first.
666 */
667 uint64_t off;
668 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
669 {
670 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
671 {
672 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
673 if (RT_LIKELY( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
674 && !ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
675 && off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)))
676 {
677 off = u64 - off;
678 uint64_t const u64Expire = ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire);
679 if (off < u64Expire)
680 {
681 if (pnsAbsDeadline)
682 *pnsAbsDeadline = u64Expire;
683 if (pcNsToDeadline)
684 *pcNsToDeadline = tmVirtualVirtToNsDeadline(pVM, u64Expire - off);
685 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLockless);
686 Log6(("tmVirtualSyncGetEx -> %'RU64 [lockless]\n", off));
687 DBGFTRACE_U64_TAG(pVM, off, "tmVirtualSyncGetEx-lockless");
688 return off;
689 }
690 }
691 }
692 }
693 else
694 {
695 off = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSync);
696 if (RT_LIKELY(!ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking)))
697 {
698 if (pcNsToDeadline)
699 *pcNsToDeadline = 0;
700 if (pnsAbsDeadline)
701 *pnsAbsDeadline = off;
702 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLockless);
703 Log6(("tmVirtualSyncGetEx -> %'RU64 [lockless/stopped]\n", off));
704 DBGFTRACE_U64_TAG(pVM, off, "tmVirtualSyncGetEx-stopped2");
705 return off;
706 }
707 }
708
709 /*
710 * Read the offset and adjust if we're playing catch-up.
711 *
712 * The catch-up adjusting work by us decrementing the offset by a percentage of
713 * the time elapsed since the previous TMVirtualGetSync call.
714 *
715 * It's possible to get a very long or even negative interval between two read
716 * for the following reasons:
717 * - Someone might have suspended the process execution, frequently the case when
718 * debugging the process.
719 * - We might be on a different CPU which TSC isn't quite in sync with the
720 * other CPUs in the system.
721 * - Another thread is racing us and we might have been preempted while inside
722 * this function.
723 *
724 * Assuming nano second virtual time, we can simply ignore any intervals which has
725 * any of the upper 32 bits set.
726 */
727 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
728 int cOuterTries = 42;
729 for (;; cOuterTries--)
730 {
731 /* Try grab the lock, things get simpler when owning the lock. */
732 int rcLock = PDMCritSectTryEnter(pVM, &pVM->tm.s.VirtualSyncLock);
733 if (RT_SUCCESS_NP(rcLock))
734 return tmVirtualSyncGetLocked(pVM, u64, pcNsToDeadline, pnsAbsDeadline);
735
736 /* Re-check the ticking flag. */
737 if (!ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
738 {
739 off = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSync);
740 if ( ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking)
741 && cOuterTries > 0)
742 continue;
743 if (pcNsToDeadline)
744 *pcNsToDeadline = 0;
745 if (pnsAbsDeadline)
746 *pnsAbsDeadline = off;
747 Log6(("tmVirtualSyncGetEx -> %'RU64 [stopped]\n", off));
748 DBGFTRACE_U64_TAG(pVM, off, "tmVirtualSyncGetEx-stopped3");
749 return off;
750 }
751
752 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
753 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
754 {
755 /* No changes allowed, try get a consistent set of parameters. */
756 uint64_t const u64Prev = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev);
757 uint64_t const offGivenUp = ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp);
758 uint32_t const u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
759 if ( ( u64Prev == ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev)
760 && offGivenUp == ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp)
761 && u32Pct == ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage)
762 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
763 || cOuterTries <= 0)
764 {
765 uint64_t u64Delta = u64 - u64Prev;
766 if (RT_LIKELY(!(u64Delta >> 32)))
767 {
768 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, u32Pct, 100);
769 if (off > u64Sub + offGivenUp)
770 {
771 off -= u64Sub;
772 Log4(("TM: %'RU64/-%'8RU64: sub %RU32 [NoLock]\n", u64 - off, pVM->tm.s.offVirtualSync - offGivenUp, u64Sub));
773 }
774 else
775 {
776 /* we've completely caught up. */
777 STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c);
778 off = offGivenUp;
779 Log4(("TM: %'RU64/0: caught up [NoLock]\n", u64));
780 }
781 }
782 else
783 /* More than 4 seconds since last time (or negative), ignore it. */
784 Log(("TMVirtualGetSync: u64Delta=%RX64 (NoLock)\n", u64Delta));
785
786 /* Check that we're still running and in catch up. */
787 if ( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
788 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
789 break;
790 if (cOuterTries <= 0)
791 break; /* enough */
792 }
793 }
794 else if ( off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
795 && !ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
796 break; /* Got an consistent offset */
797 else if (cOuterTries <= 0)
798 break; /* enough */
799 }
800 if (cOuterTries <= 0)
801 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetELoop);
802
803 /*
804 * Complete the calculation of the current TMCLOCK_VIRTUAL_SYNC time. The current
805 * approach is to never pass the head timer. So, when we do stop the clock and
806 * set the timer pending flag.
807 */
808 u64 -= off;
809/** @todo u64VirtualSyncLast */
810 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire);
811 if (pnsAbsDeadline)
812 *pnsAbsDeadline = u64Expire;
813 if (u64 >= u64Expire)
814 {
815 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
816 if (!VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
817 {
818 Log5(("TMAllVirtual(%u): FF: %d -> 1 (NoLock)\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
819 VM_FF_SET(pVM, VM_FF_TM_VIRTUAL_SYNC); /* Hmm? */
820 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
821#ifdef IN_RING3
822 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
823#endif
824 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
825 Log4(("TM: %'RU64/-%'8RU64: exp tmr=>ff [NoLock]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
826 }
827 else
828 Log4(("TM: %'RU64/-%'8RU64: exp tmr [NoLock]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
829 if (pcNsToDeadline)
830 *pcNsToDeadline = 0;
831 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetExpired);
832 }
833 else if (pcNsToDeadline)
834 {
835 uint64_t cNsToDeadline = u64Expire - u64;
836 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
837 cNsToDeadline = ASMMultU64ByU32DivByU32(cNsToDeadline, 100,
838 ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage) + 100);
839 *pcNsToDeadline = tmVirtualVirtToNsDeadline(pVM, cNsToDeadline);
840 }
841
842 Log6(("tmVirtualSyncGetEx -> %'RU64\n", u64));
843 DBGFTRACE_U64_TAG(pVM, u64, "tmVirtualSyncGetEx-nolock");
844 return u64;
845}
846
847
848/**
849 * Gets the current TMCLOCK_VIRTUAL_SYNC time.
850 *
851 * @returns The timestamp.
852 * @param pVM The cross context VM structure.
853 * @thread EMT.
854 * @remarks May set the timer and virtual sync FFs.
855 */
856VMM_INT_DECL(uint64_t) TMVirtualSyncGet(PVMCC pVM)
857{
858 return tmVirtualSyncGetEx(pVM, true /*fCheckTimers*/, NULL /*pcNsToDeadline*/, NULL /*pnsAbsDeadline*/, NULL /*puTscNow*/);
859}
860
861
862/**
863 * Gets the current TMCLOCK_VIRTUAL_SYNC time without checking timers running on
864 * TMCLOCK_VIRTUAL.
865 *
866 * @returns The timestamp.
867 * @param pVM The cross context VM structure.
868 * @thread EMT.
869 * @remarks May set the timer and virtual sync FFs.
870 */
871VMM_INT_DECL(uint64_t) TMVirtualSyncGetNoCheck(PVMCC pVM)
872{
873 return tmVirtualSyncGetEx(pVM, false /*fCheckTimers*/, NULL /*pcNsToDeadline*/, NULL /*pnsAbsDeadline*/, NULL /*puTscNow*/);
874}
875
876
877/**
878 * Gets the current TMCLOCK_VIRTUAL_SYNC time without checking timers running on
879 * TMCLOCK_VIRTUAL, also returning corresponding TSC value.
880 *
881 * @returns The timestamp.
882 * @param pVM The cross context VM structure.
883 * @param puTscNow Where to return the TSC value that the return
884 * value is relative to. This is delta adjusted.
885 * @thread EMT.
886 * @remarks May set the timer and virtual sync FFs.
887 */
888VMM_INT_DECL(uint64_t) TMVirtualSyncGetNoCheckWithTsc(PVMCC pVM, uint64_t *puTscNow)
889{
890 return tmVirtualSyncGetEx(pVM, false /*fCheckTimers*/, NULL /*pcNsToDeadline*/, NULL /*pnsAbsDeadline*/, puTscNow);
891}
892
893
894/**
895 * Gets the current TMCLOCK_VIRTUAL_SYNC time.
896 *
897 * @returns The timestamp.
898 * @param pVM The cross context VM structure.
899 * @param fCheckTimers Check timers on the virtual clock or not.
900 * @thread EMT.
901 * @remarks May set the timer and virtual sync FFs.
902 */
903VMM_INT_DECL(uint64_t) TMVirtualSyncGetEx(PVMCC pVM, bool fCheckTimers)
904{
905 return tmVirtualSyncGetEx(pVM, fCheckTimers, NULL /*pcNsToDeadline*/, NULL /*pnsAbsDeadline*/, NULL /*puTscNow*/);
906}
907
908
909/**
910 * Gets the current TMCLOCK_VIRTUAL_SYNC time and ticks to the next deadline
911 * without checking timers running on TMCLOCK_VIRTUAL.
912 *
913 * @returns The timestamp.
914 * @param pVM The cross context VM structure.
915 * @param pcNsToDeadline Where to return the number of nano seconds to
916 * the next virtual sync timer deadline.
917 * @param puTscNow Where to return the TSC value that the return
918 * value is relative to. This is delta adjusted.
919 * @param puDeadlineVersion Where to return the deadline "version" number.
920 * Use with TMVirtualSyncIsCurrentDeadlineVersion()
921 * to check if the absolute deadline is still up to
922 * date and the caller can skip calling this
923 * function.
924 * @thread EMT.
925 * @remarks May set the timer and virtual sync FFs.
926 */
927VMM_INT_DECL(uint64_t) TMVirtualSyncGetWithDeadlineNoCheck(PVMCC pVM, uint64_t *pcNsToDeadline,
928 uint64_t *puDeadlineVersion, uint64_t *puTscNow)
929{
930 uint64_t cNsToDeadlineTmp; /* try convince the compiler to skip the if tests. */
931 uint64_t u64Now = tmVirtualSyncGetEx(pVM, false /*fCheckTimers*/, &cNsToDeadlineTmp, puDeadlineVersion, puTscNow);
932 *pcNsToDeadline = cNsToDeadlineTmp;
933 return u64Now;
934}
935
936
937/**
938 * Gets the number of nano seconds to the next virtual sync deadline.
939 *
940 * @returns The number of TMCLOCK_VIRTUAL ticks.
941 * @param pVM The cross context VM structure.
942 * @param puTscNow Where to return the TSC value that the return
943 * value is relative to. This is delta adjusted.
944 * @param puDeadlineVersion Where to return the deadline "version" number.
945 * Use with TMVirtualSyncIsCurrentDeadlineVersion()
946 * to check if the absolute deadline is still up to
947 * date and the caller can skip calling this
948 * function.
949 * @thread EMT.
950 * @remarks May set the timer and virtual sync FFs.
951 */
952VMMDECL(uint64_t) TMVirtualSyncGetNsToDeadline(PVMCC pVM, uint64_t *puDeadlineVersion, uint64_t *puTscNow)
953{
954 uint64_t cNsToDeadline;
955 tmVirtualSyncGetEx(pVM, false /*fCheckTimers*/, &cNsToDeadline, puDeadlineVersion, puTscNow);
956 return cNsToDeadline;
957}
958
959
960/**
961 * Checks if the given deadline is still current.
962 *
963 * @retval true if the deadline is still current.
964 * @retval false if the deadline is outdated.
965 * @param pVM The cross context VM structure.
966 * @param uDeadlineVersion The deadline version to check.
967 */
968VMM_INT_DECL(bool) TMVirtualSyncIsCurrentDeadlineVersion(PVMCC pVM, uint64_t uDeadlineVersion)
969{
970 /** @todo Try use ASMAtomicUoReadU64 instead. */
971 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire);
972 return u64Expire == uDeadlineVersion;
973}
974
975
976/**
977 * Gets the current lag of the synchronous virtual clock (relative to the virtual clock).
978 *
979 * @return The current lag.
980 * @param pVM The cross context VM structure.
981 */
982VMM_INT_DECL(uint64_t) TMVirtualSyncGetLag(PVMCC pVM)
983{
984 return pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp;
985}
986
987
988/**
989 * Get the current catch-up percent.
990 *
991 * @return The current catch0up percent. 0 means running at the same speed as the virtual clock.
992 * @param pVM The cross context VM structure.
993 */
994VMM_INT_DECL(uint32_t) TMVirtualSyncGetCatchUpPct(PVMCC pVM)
995{
996 if (pVM->tm.s.fVirtualSyncCatchUp)
997 return pVM->tm.s.u32VirtualSyncCatchUpPercentage;
998 return 0;
999}
1000
1001
1002/**
1003 * Gets the current TMCLOCK_VIRTUAL frequency.
1004 *
1005 * @returns The frequency.
1006 * @param pVM The cross context VM structure.
1007 */
1008VMM_INT_DECL(uint64_t) TMVirtualGetFreq(PVM pVM)
1009{
1010 NOREF(pVM);
1011 return TMCLOCK_FREQ_VIRTUAL;
1012}
1013
1014
1015/**
1016 * Worker for TMR3PauseClocks.
1017 *
1018 * @returns VINF_SUCCESS or VERR_TM_VIRTUAL_TICKING_IPE (asserted).
1019 * @param pVM The cross context VM structure.
1020 */
1021int tmVirtualPauseLocked(PVMCC pVM)
1022{
1023 uint32_t c = ASMAtomicDecU32(&pVM->tm.s.cVirtualTicking);
1024 AssertMsgReturn(c < pVM->cCpus, ("%u vs %u\n", c, pVM->cCpus), VERR_TM_VIRTUAL_TICKING_IPE);
1025 if (c == 0)
1026 {
1027 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualPause);
1028 pVM->tm.s.u64Virtual = tmVirtualGetRaw(pVM);
1029 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, false);
1030 }
1031 return VINF_SUCCESS;
1032}
1033
1034
1035/**
1036 * Worker for TMR3ResumeClocks.
1037 *
1038 * @returns VINF_SUCCESS or VERR_TM_VIRTUAL_TICKING_IPE (asserted).
1039 * @param pVM The cross context VM structure.
1040 */
1041int tmVirtualResumeLocked(PVMCC pVM)
1042{
1043 uint32_t c = ASMAtomicIncU32(&pVM->tm.s.cVirtualTicking);
1044 AssertMsgReturn(c <= pVM->cCpus, ("%u vs %u\n", c, pVM->cCpus), VERR_TM_VIRTUAL_TICKING_IPE);
1045 if (c == 1)
1046 {
1047 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualResume);
1048 pVM->tm.s.u64VirtualRawPrev = 0;
1049 pVM->tm.s.u64VirtualWarpDriveStart = tmVirtualGetRawNanoTS(pVM);
1050 pVM->tm.s.u64VirtualOffset = pVM->tm.s.u64VirtualWarpDriveStart - pVM->tm.s.u64Virtual;
1051 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, true);
1052 }
1053 return VINF_SUCCESS;
1054}
1055
1056
1057/**
1058 * Converts from virtual ticks to nanoseconds.
1059 *
1060 * @returns nanoseconds.
1061 * @param pVM The cross context VM structure.
1062 * @param u64VirtualTicks The virtual ticks to convert.
1063 * @remark There could be rounding errors here. We just do a simple integer divide
1064 * without any adjustments.
1065 */
1066VMM_INT_DECL(uint64_t) TMVirtualToNano(PVM pVM, uint64_t u64VirtualTicks)
1067{
1068 NOREF(pVM);
1069 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1070 return u64VirtualTicks;
1071}
1072
1073
1074/**
1075 * Converts from virtual ticks to microseconds.
1076 *
1077 * @returns microseconds.
1078 * @param pVM The cross context VM structure.
1079 * @param u64VirtualTicks The virtual ticks to convert.
1080 * @remark There could be rounding errors here. We just do a simple integer divide
1081 * without any adjustments.
1082 */
1083VMM_INT_DECL(uint64_t) TMVirtualToMicro(PVM pVM, uint64_t u64VirtualTicks)
1084{
1085 NOREF(pVM);
1086 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1087 return u64VirtualTicks / 1000;
1088}
1089
1090
1091/**
1092 * Converts from virtual ticks to milliseconds.
1093 *
1094 * @returns milliseconds.
1095 * @param pVM The cross context VM structure.
1096 * @param u64VirtualTicks The virtual ticks to convert.
1097 * @remark There could be rounding errors here. We just do a simple integer divide
1098 * without any adjustments.
1099 */
1100VMM_INT_DECL(uint64_t) TMVirtualToMilli(PVM pVM, uint64_t u64VirtualTicks)
1101{
1102 NOREF(pVM);
1103 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1104 return u64VirtualTicks / 1000000;
1105}
1106
1107
1108/**
1109 * Converts from nanoseconds to virtual ticks.
1110 *
1111 * @returns virtual ticks.
1112 * @param pVM The cross context VM structure.
1113 * @param u64NanoTS The nanosecond value ticks to convert.
1114 * @remark There could be rounding and overflow errors here.
1115 */
1116VMM_INT_DECL(uint64_t) TMVirtualFromNano(PVM pVM, uint64_t u64NanoTS)
1117{
1118 NOREF(pVM);
1119 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1120 return u64NanoTS;
1121}
1122
1123
1124/**
1125 * Converts from microseconds to virtual ticks.
1126 *
1127 * @returns virtual ticks.
1128 * @param pVM The cross context VM structure.
1129 * @param u64MicroTS The microsecond value ticks to convert.
1130 * @remark There could be rounding and overflow errors here.
1131 */
1132VMM_INT_DECL(uint64_t) TMVirtualFromMicro(PVM pVM, uint64_t u64MicroTS)
1133{
1134 NOREF(pVM);
1135 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1136 return u64MicroTS * 1000;
1137}
1138
1139
1140/**
1141 * Converts from milliseconds to virtual ticks.
1142 *
1143 * @returns virtual ticks.
1144 * @param pVM The cross context VM structure.
1145 * @param u64MilliTS The millisecond value ticks to convert.
1146 * @remark There could be rounding and overflow errors here.
1147 */
1148VMM_INT_DECL(uint64_t) TMVirtualFromMilli(PVM pVM, uint64_t u64MilliTS)
1149{
1150 NOREF(pVM);
1151 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1152 return u64MilliTS * 1000000;
1153}
1154
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette