VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/TMAllVirtual.cpp@ 103832

Last change on this file since 103832 was 101025, checked in by vboxsync, 15 months ago

VMM/VMMAll: Compilation fixes.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 43.9 KB
Line 
1/* $Id: TMAllVirtual.cpp 101025 2023-09-06 08:29:11Z vboxsync $ */
2/** @file
3 * TM - Timeout Manager, Virtual Time, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_TM
33#include <VBox/vmm/tm.h>
34#include <VBox/vmm/dbgftrace.h>
35#ifdef IN_RING3
36# include <iprt/thread.h>
37#endif
38#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
39# include <iprt/x86.h>
40#endif
41#include "TMInternal.h"
42#include <VBox/vmm/vmcc.h>
43#include <VBox/vmm/vmm.h>
44#include <VBox/err.h>
45#include <VBox/log.h>
46#include <VBox/sup.h>
47
48#include <iprt/time.h>
49#include <iprt/assert.h>
50#include <iprt/asm.h>
51#include <iprt/asm-math.h>
52
53
54
55/**
56 * @interface_method_impl{RTTIMENANOTSDATA,pfnBad}
57 */
58DECLCALLBACK(DECLEXPORT(void)) tmVirtualNanoTSBad(PRTTIMENANOTSDATA pData, uint64_t u64NanoTS, uint64_t u64DeltaPrev,
59 uint64_t u64PrevNanoTS)
60{
61 PVMCC pVM = RT_FROM_CPP_MEMBER(pData, VMCC, VMCC_CTX(tm).s.VirtualGetRawData);
62 pData->cBadPrev++;
63 if ((int64_t)u64DeltaPrev < 0)
64 LogRel(("TM: u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64 pVM=%p\n",
65 u64DeltaPrev, u64PrevNanoTS, u64NanoTS, pVM));
66 else
67 Log(("TM: u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64 pVM=%p (debugging?)\n",
68 u64DeltaPrev, u64PrevNanoTS, u64NanoTS, pVM));
69}
70
71
72#ifdef IN_RING3
73/**
74 * @callback_method_impl{FNTIMENANOTSINTERNAL, For driverless mode.}
75 */
76static DECLCALLBACK(uint64_t) tmR3VirtualNanoTSDriverless(PRTTIMENANOTSDATA pData, PRTITMENANOTSEXTRA pExtra)
77{
78 RT_NOREF(pData);
79 if (pExtra)
80 pExtra->uTSCValue = ASMReadTSC();
81 return RTTimeSystemNanoTS();
82}
83#endif
84
85
86/**
87 * @interface_method_impl{RTTIMENANOTSDATA,pfnRediscover}
88 *
89 * This is the initial worker, so the first call in each context ends up here.
90 * It is also used should the delta rating of the host CPUs change or if the
91 * fGetGipCpu feature the current worker relies upon becomes unavailable. The
92 * last two events may occur as CPUs are taken online.
93 */
94DECLCALLBACK(DECLEXPORT(uint64_t)) tmVirtualNanoTSRediscover(PRTTIMENANOTSDATA pData, PRTITMENANOTSEXTRA pExtra)
95{
96 PVMCC pVM = RT_FROM_CPP_MEMBER(pData, VMCC, VMCC_CTX(tm).s.VirtualGetRawData);
97 PFNTIMENANOTSINTERNAL pfnWorker;
98
99 /*
100 * We require a valid GIP for the selection below.
101 * Invalid GIP is fatal, though we have to allow no GIP in driverless mode (ring-3 only).
102 */
103 PSUPGLOBALINFOPAGE pGip = g_pSUPGlobalInfoPage;
104#ifdef IN_RING3
105 if (pGip)
106#endif
107 {
108 AssertFatalMsg(RT_VALID_PTR(pGip), ("pVM=%p pGip=%p\n", pVM, pGip));
109 AssertFatalMsg(pGip->u32Magic == SUPGLOBALINFOPAGE_MAGIC, ("pVM=%p pGip=%p u32Magic=%#x\n", pVM, pGip, pGip->u32Magic));
110 AssertFatalMsg(pGip->u32Mode > SUPGIPMODE_INVALID && pGip->u32Mode < SUPGIPMODE_END,
111 ("pVM=%p pGip=%p u32Mode=%#x\n", pVM, pGip, pGip->u32Mode));
112
113 /*
114 * Determine the new worker.
115 */
116#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
117 bool const fLFence = RT_BOOL(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SSE2);
118#endif
119 switch (pGip->u32Mode)
120 {
121#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
122 case SUPGIPMODE_SYNC_TSC:
123 case SUPGIPMODE_INVARIANT_TSC:
124# ifdef IN_RING0
125 if (pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_ROUGHLY_ZERO)
126 pfnWorker = fLFence ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarNoDelta;
127 else
128 pfnWorker = fLFence ? RTTimeNanoTSLFenceSyncInvarWithDelta : RTTimeNanoTSLegacySyncInvarWithDelta;
129# else
130 if (pGip->fGetGipCpu & SUPGIPGETCPU_IDTR_LIMIT_MASK_MAX_SET_CPUS)
131 pfnWorker = pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_PRACTICALLY_ZERO
132 ? fLFence ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarNoDelta
133 : fLFence ? RTTimeNanoTSLFenceSyncInvarWithDeltaUseIdtrLim : RTTimeNanoTSLegacySyncInvarWithDeltaUseIdtrLim;
134 else if (pGip->fGetGipCpu & SUPGIPGETCPU_RDTSCP_MASK_MAX_SET_CPUS)
135 pfnWorker = pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_PRACTICALLY_ZERO
136 ? fLFence ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarNoDelta
137 : fLFence ? RTTimeNanoTSLFenceSyncInvarWithDeltaUseRdtscp : RTTimeNanoTSLegacySyncInvarWithDeltaUseRdtscp;
138 else if (pGip->fGetGipCpu & SUPGIPGETCPU_APIC_ID_EXT_0B)
139 pfnWorker = pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_ROUGHLY_ZERO
140 ? fLFence ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarNoDelta
141 : fLFence ? RTTimeNanoTSLFenceSyncInvarWithDeltaUseApicIdExt0B : RTTimeNanoTSLegacySyncInvarWithDeltaUseApicIdExt0B;
142 else if (pGip->fGetGipCpu & SUPGIPGETCPU_APIC_ID_EXT_8000001E)
143 pfnWorker = pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_ROUGHLY_ZERO
144 ? fLFence ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarNoDelta
145 : fLFence ? RTTimeNanoTSLFenceSyncInvarWithDeltaUseApicIdExt8000001E : RTTimeNanoTSLegacySyncInvarWithDeltaUseApicIdExt8000001E;
146 else
147 pfnWorker = pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_ROUGHLY_ZERO
148 ? fLFence ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarNoDelta
149 : fLFence ? RTTimeNanoTSLFenceSyncInvarWithDeltaUseApicId : RTTimeNanoTSLegacySyncInvarWithDeltaUseApicId;
150# endif
151 break;
152
153 case SUPGIPMODE_ASYNC_TSC:
154# ifdef IN_RING0
155 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsync : RTTimeNanoTSLegacyAsync;
156# else
157 if (pGip->fGetGipCpu & SUPGIPGETCPU_IDTR_LIMIT_MASK_MAX_SET_CPUS)
158 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsyncUseIdtrLim : RTTimeNanoTSLegacyAsyncUseIdtrLim;
159 else if (pGip->fGetGipCpu & SUPGIPGETCPU_RDTSCP_MASK_MAX_SET_CPUS)
160 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsyncUseRdtscp : RTTimeNanoTSLegacyAsyncUseRdtscp;
161 else if (pGip->fGetGipCpu & SUPGIPGETCPU_RDTSCP_GROUP_IN_CH_NUMBER_IN_CL)
162 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsyncUseRdtscpGroupChNumCl : RTTimeNanoTSLegacyAsyncUseRdtscpGroupChNumCl;
163 else if (pGip->fGetGipCpu & SUPGIPGETCPU_APIC_ID_EXT_0B)
164 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsyncUseApicIdExt0B : RTTimeNanoTSLegacyAsyncUseApicIdExt0B;
165 else if (pGip->fGetGipCpu & SUPGIPGETCPU_APIC_ID_EXT_8000001E)
166 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsyncUseApicIdExt8000001E : RTTimeNanoTSLegacyAsyncUseApicIdExt8000001E;
167 else
168 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsyncUseApicId : RTTimeNanoTSLegacyAsyncUseApicId;
169# endif
170 break;
171#endif
172 default:
173 AssertFatalMsgFailed(("pVM=%p pGip=%p u32Mode=%#x\n", pVM, pGip, pGip->u32Mode));
174 }
175 }
176#ifdef IN_RING3
177 else
178 pfnWorker = tmR3VirtualNanoTSDriverless;
179#endif
180
181 /*
182 * Update the pfnVirtualGetRaw pointer and call the worker we selected.
183 */
184 ASMAtomicWritePtr((void * volatile *)&pVM->VMCC_CTX(tm).s.pfnVirtualGetRaw, (void *)(uintptr_t)pfnWorker);
185 return pfnWorker(pData, pExtra);
186}
187
188
189/**
190 * @interface_method_impl{RTTIMENANOTSDATA,pfnBadCpuIndex}
191 */
192DECLCALLBACK(DECLEXPORT(uint64_t)) tmVirtualNanoTSBadCpuIndex(PRTTIMENANOTSDATA pData, PRTITMENANOTSEXTRA pExtra,
193 uint16_t idApic, uint16_t iCpuSet, uint16_t iGipCpu)
194{
195 PVMCC pVM = RT_FROM_CPP_MEMBER(pData, VMCC, VMCC_CTX(tm).s.VirtualGetRawData);
196 AssertFatalMsgFailed(("pVM=%p idApic=%#x iCpuSet=%#x iGipCpu=%#x pExtra=%p\n", pVM, idApic, iCpuSet, iGipCpu, pExtra));
197#ifndef _MSC_VER
198 return UINT64_MAX;
199#endif
200}
201
202
203/**
204 * Wrapper around the IPRT GIP time methods.
205 */
206DECLINLINE(uint64_t) tmVirtualGetRawNanoTS(PVMCC pVM)
207{
208#ifdef IN_RING3
209 uint64_t u64 = pVM->tm.s.pfnVirtualGetRaw(&pVM->tm.s.VirtualGetRawData, NULL /*pExtra*/);
210#elif defined(IN_RING0)
211 uint32_t cPrevSteps = pVM->tmr0.s.VirtualGetRawData.c1nsSteps;
212 uint64_t u64 = pVM->tmr0.s.pfnVirtualGetRaw(&pVM->tmr0.s.VirtualGetRawData, NULL /*pExtra*/);
213 if (cPrevSteps != pVM->tmr0.s.VirtualGetRawData.c1nsSteps)
214 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3);
215#else
216# error "unsupported context"
217#endif
218 /*DBGFTRACE_POS_U64(pVM, u64);*/
219 return u64;
220}
221
222
223/**
224 * Wrapper around the IPRT GIP time methods, extended version.
225 */
226DECLINLINE(uint64_t) tmVirtualGetRawNanoTSEx(PVMCC pVM, uint64_t *puTscNow)
227{
228 RTITMENANOTSEXTRA Extra;
229#ifdef IN_RING3
230 uint64_t u64 = pVM->tm.s.pfnVirtualGetRaw(&pVM->tm.s.VirtualGetRawData, &Extra);
231#elif defined(IN_RING0)
232 uint32_t cPrevSteps = pVM->tmr0.s.VirtualGetRawData.c1nsSteps;
233 uint64_t u64 = pVM->tmr0.s.pfnVirtualGetRaw(&pVM->tmr0.s.VirtualGetRawData, &Extra);
234 if (cPrevSteps != pVM->tmr0.s.VirtualGetRawData.c1nsSteps)
235 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3);
236#else
237# error "unsupported context"
238#endif
239 if (puTscNow)
240 *puTscNow = Extra.uTSCValue;
241 /*DBGFTRACE_POS_U64(pVM, u64);*/
242 return u64;
243}
244
245
246/**
247 * Get the time when we're not running at 100%
248 *
249 * @returns The timestamp.
250 * @param pVM The cross context VM structure.
251 * @param puTscNow Where to return the TSC corresponding to the returned
252 * timestamp (delta adjusted). Optional.
253 */
254static uint64_t tmVirtualGetRawNonNormal(PVMCC pVM, uint64_t *puTscNow)
255{
256 /*
257 * Recalculate the RTTimeNanoTS() value for the period where
258 * warp drive has been enabled.
259 */
260 uint64_t u64 = tmVirtualGetRawNanoTSEx(pVM, puTscNow);
261 u64 -= pVM->tm.s.u64VirtualWarpDriveStart;
262 u64 *= pVM->tm.s.u32VirtualWarpDrivePercentage;
263 u64 /= 100;
264 u64 += pVM->tm.s.u64VirtualWarpDriveStart;
265
266 /*
267 * Now we apply the virtual time offset.
268 * (Which is the negated tmVirtualGetRawNanoTS() value for when the virtual
269 * machine started if it had been running continuously without any suspends.)
270 */
271 u64 -= pVM->tm.s.u64VirtualOffset;
272 return u64;
273}
274
275
276/**
277 * Get the raw virtual time.
278 *
279 * @returns The current time stamp.
280 * @param pVM The cross context VM structure.
281 */
282DECLINLINE(uint64_t) tmVirtualGetRaw(PVMCC pVM)
283{
284 if (RT_LIKELY(!pVM->tm.s.fVirtualWarpDrive))
285 return tmVirtualGetRawNanoTS(pVM) - pVM->tm.s.u64VirtualOffset;
286 return tmVirtualGetRawNonNormal(pVM, NULL /*puTscNow*/);
287}
288
289
290/**
291 * Get the raw virtual time, extended version.
292 *
293 * @returns The current time stamp.
294 * @param pVM The cross context VM structure.
295 * @param puTscNow Where to return the TSC corresponding to the returned
296 * timestamp (delta adjusted). Optional.
297 */
298DECLINLINE(uint64_t) tmVirtualGetRawEx(PVMCC pVM, uint64_t *puTscNow)
299{
300 if (RT_LIKELY(!pVM->tm.s.fVirtualWarpDrive))
301 return tmVirtualGetRawNanoTSEx(pVM, puTscNow) - pVM->tm.s.u64VirtualOffset;
302 return tmVirtualGetRawNonNormal(pVM, puTscNow);
303}
304
305
306/**
307 * Inlined version of tmVirtualGetEx.
308 */
309DECLINLINE(uint64_t) tmVirtualGet(PVMCC pVM, bool fCheckTimers)
310{
311 uint64_t u64;
312 if (RT_LIKELY(pVM->tm.s.cVirtualTicking))
313 {
314 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGet);
315 u64 = tmVirtualGetRaw(pVM);
316
317 /*
318 * Use the chance to check for expired timers.
319 */
320 if (fCheckTimers)
321 {
322 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
323 if ( !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)
324 && !pVM->tm.s.fRunningQueues
325 && ( pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL].u64Expire <= u64
326 || ( pVM->tm.s.fVirtualSyncTicking
327 && pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire <= u64 - pVM->tm.s.offVirtualSync
328 )
329 )
330 && !pVM->tm.s.fRunningQueues
331 )
332 {
333 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSetFF);
334 Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
335 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
336#ifdef IN_RING3
337 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
338#endif
339 }
340 }
341 }
342 else
343 u64 = pVM->tm.s.u64Virtual;
344 return u64;
345}
346
347
348/**
349 * Gets the current TMCLOCK_VIRTUAL time
350 *
351 * @returns The timestamp.
352 * @param pVM The cross context VM structure.
353 *
354 * @remark While the flow of time will never go backwards, the speed of the
355 * progress varies due to inaccurate RTTimeNanoTS and TSC. The latter can be
356 * influenced by power saving (SpeedStep, PowerNow!), while the former
357 * makes use of TSC and kernel timers.
358 */
359VMM_INT_DECL(uint64_t) TMVirtualGet(PVMCC pVM)
360{
361 return tmVirtualGet(pVM, true /*fCheckTimers*/);
362}
363
364
365/**
366 * Gets the current TMCLOCK_VIRTUAL time without checking
367 * timers or anything.
368 *
369 * Meaning, this has no side effect on FFs like TMVirtualGet may have.
370 *
371 * @returns The timestamp.
372 * @param pVM The cross context VM structure.
373 *
374 * @remarks See TMVirtualGet.
375 */
376VMM_INT_DECL(uint64_t) TMVirtualGetNoCheck(PVMCC pVM)
377{
378 return tmVirtualGet(pVM, false /*fCheckTimers*/);
379}
380
381
382/**
383 * Converts the dead line interval from TMCLOCK_VIRTUAL to host nano seconds.
384 *
385 * @returns Host nano second count.
386 * @param pVM The cross context VM structure.
387 * @param cVirtTicksToDeadline The TMCLOCK_VIRTUAL interval.
388 */
389DECLINLINE(uint64_t) tmVirtualVirtToNsDeadline(PVM pVM, uint64_t cVirtTicksToDeadline)
390{
391 if (RT_UNLIKELY(pVM->tm.s.fVirtualWarpDrive))
392 return ASMMultU64ByU32DivByU32(cVirtTicksToDeadline, 100, pVM->tm.s.u32VirtualWarpDrivePercentage);
393 return cVirtTicksToDeadline;
394}
395
396
397/**
398 * tmVirtualSyncGetLocked worker for handling catch-up when owning the lock.
399 *
400 * @returns The timestamp.
401 * @param pVM The cross context VM structure.
402 * @param u64 raw virtual time.
403 * @param off offVirtualSync.
404 * @param pcNsToDeadline Where to return the number of nano seconds to
405 * the next virtual sync timer deadline. Can be
406 * NULL.
407 * @param pnsAbsDeadline Where to return the absolute deadline.
408 * Optional.
409 */
410DECLINLINE(uint64_t) tmVirtualSyncGetHandleCatchUpLocked(PVMCC pVM, uint64_t u64, uint64_t off,
411 uint64_t *pcNsToDeadline, uint64_t *pnsAbsDeadline)
412{
413 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
414
415 /*
416 * Don't make updates until we've check the timer queue.
417 */
418 bool fUpdatePrev = true;
419 bool fUpdateOff = true;
420 bool fStop = false;
421 const uint64_t u64Prev = pVM->tm.s.u64VirtualSyncCatchUpPrev;
422 uint64_t u64Delta = u64 - u64Prev;
423 if (RT_LIKELY(!(u64Delta >> 32)))
424 {
425 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, pVM->tm.s.u32VirtualSyncCatchUpPercentage, 100);
426 if (off > u64Sub + pVM->tm.s.offVirtualSyncGivenUp)
427 {
428 off -= u64Sub;
429 Log4(("TM: %'RU64/-%'8RU64: sub %RU32 [vsghcul]\n", u64 - off, off - pVM->tm.s.offVirtualSyncGivenUp, u64Sub));
430 }
431 else
432 {
433 /* we've completely caught up. */
434 STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c);
435 off = pVM->tm.s.offVirtualSyncGivenUp;
436 fStop = true;
437 Log4(("TM: %'RU64/0: caught up [vsghcul]\n", u64));
438 }
439 }
440 else
441 {
442 /* More than 4 seconds since last time (or negative), ignore it. */
443 fUpdateOff = false;
444 fUpdatePrev = !(u64Delta & RT_BIT_64(63));
445 Log(("TMVirtualGetSync: u64Delta=%RX64\n", u64Delta));
446 }
447
448 /*
449 * Complete the calculation of the current TMCLOCK_VIRTUAL_SYNC time. The current
450 * approach is to never pass the head timer. So, when we do stop the clock and
451 * set the timer pending flag.
452 */
453 u64 -= off;
454
455 uint64_t u64Last = ASMAtomicUoReadU64(&pVM->tm.s.u64VirtualSync);
456 if (u64Last > u64)
457 {
458 u64 = u64Last + 1;
459 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetAdjLast);
460 }
461
462 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire);
463 if (pnsAbsDeadline)
464 *pnsAbsDeadline = u64Expire; /* Always return the unadjusted absolute deadline, or HM will waste time going
465 thru this code over an over again even if there aren't any timer changes. */
466 if (u64 < u64Expire)
467 {
468 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64);
469 if (fUpdateOff)
470 ASMAtomicWriteU64(&pVM->tm.s.offVirtualSync, off);
471 if (fStop)
472 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncCatchUp, false);
473 if (fUpdatePrev)
474 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev, u64);
475 if (pcNsToDeadline)
476 {
477 uint64_t cNsToDeadline = u64Expire - u64;
478 if (pVM->tm.s.fVirtualSyncCatchUp)
479 cNsToDeadline = ASMMultU64ByU32DivByU32(cNsToDeadline, 100,
480 pVM->tm.s.u32VirtualSyncCatchUpPercentage + 100);
481 *pcNsToDeadline = tmVirtualVirtToNsDeadline(pVM, cNsToDeadline);
482 }
483 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
484 }
485 else
486 {
487 u64 = u64Expire;
488 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64);
489 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, false);
490
491 VM_FF_SET(pVM, VM_FF_TM_VIRTUAL_SYNC);
492 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
493 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
494 Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
495 Log4(("TM: %'RU64/-%'8RU64: exp tmr=>ff [vsghcul]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
496 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
497
498 if (pcNsToDeadline)
499 *pcNsToDeadline = 0;
500#ifdef IN_RING3
501 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
502#endif
503 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
504 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetExpired);
505 }
506 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
507
508 Log6(("tmVirtualSyncGetHandleCatchUpLocked -> %'RU64\n", u64));
509 DBGFTRACE_U64_TAG(pVM, u64, "tmVirtualSyncGetHandleCatchUpLocked");
510 return u64;
511}
512
513
514/**
515 * tmVirtualSyncGetEx worker for when we get the lock.
516 *
517 * @returns timesamp.
518 * @param pVM The cross context VM structure.
519 * @param u64 The virtual clock timestamp.
520 * @param pcNsToDeadline Where to return the number of nano seconds to
521 * the next virtual sync timer deadline. Can be
522 * NULL.
523 * @param pnsAbsDeadline Where to return the absolute deadline.
524 * Optional.
525 */
526DECLINLINE(uint64_t) tmVirtualSyncGetLocked(PVMCC pVM, uint64_t u64, uint64_t *pcNsToDeadline, uint64_t *pnsAbsDeadline)
527{
528 /*
529 * Not ticking?
530 */
531 if (!pVM->tm.s.fVirtualSyncTicking)
532 {
533 u64 = ASMAtomicUoReadU64(&pVM->tm.s.u64VirtualSync);
534 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
535 if (pcNsToDeadline)
536 *pcNsToDeadline = 0;
537 if (pnsAbsDeadline)
538 *pnsAbsDeadline = u64;
539 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
540 Log6(("tmVirtualSyncGetLocked -> %'RU64 [stopped]\n", u64));
541 DBGFTRACE_U64_TAG(pVM, u64, "tmVirtualSyncGetLocked-stopped");
542 return u64;
543 }
544
545 /*
546 * Handle catch up in a separate function.
547 */
548 uint64_t off = ASMAtomicUoReadU64(&pVM->tm.s.offVirtualSync);
549 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
550 return tmVirtualSyncGetHandleCatchUpLocked(pVM, u64, off, pcNsToDeadline, pnsAbsDeadline);
551
552 /*
553 * Complete the calculation of the current TMCLOCK_VIRTUAL_SYNC time. The current
554 * approach is to never pass the head timer. So, when we do stop the clock and
555 * set the timer pending flag.
556 */
557 u64 -= off;
558
559 uint64_t u64Last = ASMAtomicUoReadU64(&pVM->tm.s.u64VirtualSync);
560 if (u64Last > u64)
561 {
562 u64 = u64Last + 1;
563 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetAdjLast);
564 }
565
566 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire);
567 if (pnsAbsDeadline)
568 *pnsAbsDeadline = u64Expire;
569 if (u64 < u64Expire)
570 {
571 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64);
572 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
573 if (pcNsToDeadline)
574 *pcNsToDeadline = tmVirtualVirtToNsDeadline(pVM, u64Expire - u64);
575 }
576 else
577 {
578 u64 = u64Expire;
579 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64);
580 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, false);
581
582 VM_FF_SET(pVM, VM_FF_TM_VIRTUAL_SYNC);
583 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
584 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
585 Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
586 Log4(("TM: %'RU64/-%'8RU64: exp tmr=>ff [vsgl]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
587 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
588
589#ifdef IN_RING3
590 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
591#endif
592 if (pcNsToDeadline)
593 *pcNsToDeadline = 0;
594 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
595 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetExpired);
596 }
597 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
598 Log6(("tmVirtualSyncGetLocked -> %'RU64\n", u64));
599 DBGFTRACE_U64_TAG(pVM, u64, "tmVirtualSyncGetLocked");
600 return u64;
601}
602
603
604/**
605 * Gets the current TMCLOCK_VIRTUAL_SYNC time.
606 *
607 * @returns The timestamp.
608 * @param pVM The cross context VM structure.
609 * @param fCheckTimers Check timers or not
610 * @param pcNsToDeadline Where to return the number of nano seconds to
611 * the next virtual sync timer deadline. Can be
612 * NULL.
613 * @param pnsAbsDeadline Where to return the absolute deadline.
614 * Optional.
615 * @param puTscNow Where to return the TSC corresponding to the
616 * returned timestamp (delta adjusted). Optional.
617 * @thread EMT.
618 */
619DECLINLINE(uint64_t) tmVirtualSyncGetEx(PVMCC pVM, bool fCheckTimers, uint64_t *pcNsToDeadline,
620 uint64_t *pnsAbsDeadline, uint64_t *puTscNow)
621{
622 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGet);
623
624 uint64_t u64;
625 if (!pVM->tm.s.fVirtualSyncTicking)
626 {
627 if (pcNsToDeadline)
628 *pcNsToDeadline = 0;
629 u64 = pVM->tm.s.u64VirtualSync;
630 DBGFTRACE_U64_TAG(pVM, u64, "tmVirtualSyncGetEx-stopped1");
631 return u64;
632 }
633
634 /*
635 * Query the virtual clock and do the usual expired timer check.
636 */
637 Assert(pVM->tm.s.cVirtualTicking);
638 u64 = tmVirtualGetRawEx(pVM, puTscNow);
639 if (fCheckTimers)
640 {
641 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
642 if ( !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)
643 && pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL].u64Expire <= u64)
644 {
645 Log5(("TMAllVirtual(%u): FF: 0 -> 1\n", __LINE__));
646 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
647#ifdef IN_RING3
648 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM /** @todo |VMNOTIFYFF_FLAGS_POKE*/);
649#endif
650 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
651 }
652 }
653
654 /*
655 * If we can get the lock, get it. The result is much more reliable.
656 *
657 * Note! This is where all clock source devices branch off because they
658 * will be owning the lock already. The 'else' is taken by code
659 * which is less picky or hasn't been adjusted yet
660 */
661 /** @todo switch this around, have the tmVirtualSyncGetLocked code inlined
662 * here and the remainder of this function in a static worker. */
663 if (PDMCritSectTryEnter(pVM, &pVM->tm.s.VirtualSyncLock) == VINF_SUCCESS)
664 return tmVirtualSyncGetLocked(pVM, u64, pcNsToDeadline, pnsAbsDeadline);
665
666 /*
667 * When the clock is ticking, not doing catch ups and not running into an
668 * expired time, we can get away without locking. Try this first.
669 */
670 uint64_t off;
671 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
672 {
673 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
674 {
675 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
676 if (RT_LIKELY( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
677 && !ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
678 && off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)))
679 {
680 off = u64 - off;
681 uint64_t const u64Expire = ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire);
682 if (off < u64Expire)
683 {
684 if (pnsAbsDeadline)
685 *pnsAbsDeadline = u64Expire;
686 if (pcNsToDeadline)
687 *pcNsToDeadline = tmVirtualVirtToNsDeadline(pVM, u64Expire - off);
688 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLockless);
689 Log6(("tmVirtualSyncGetEx -> %'RU64 [lockless]\n", off));
690 DBGFTRACE_U64_TAG(pVM, off, "tmVirtualSyncGetEx-lockless");
691 return off;
692 }
693 }
694 }
695 }
696 else
697 {
698 off = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSync);
699 if (RT_LIKELY(!ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking)))
700 {
701 if (pcNsToDeadline)
702 *pcNsToDeadline = 0;
703 if (pnsAbsDeadline)
704 *pnsAbsDeadline = off;
705 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLockless);
706 Log6(("tmVirtualSyncGetEx -> %'RU64 [lockless/stopped]\n", off));
707 DBGFTRACE_U64_TAG(pVM, off, "tmVirtualSyncGetEx-stopped2");
708 return off;
709 }
710 }
711
712 /*
713 * Read the offset and adjust if we're playing catch-up.
714 *
715 * The catch-up adjusting work by us decrementing the offset by a percentage of
716 * the time elapsed since the previous TMVirtualGetSync call.
717 *
718 * It's possible to get a very long or even negative interval between two read
719 * for the following reasons:
720 * - Someone might have suspended the process execution, frequently the case when
721 * debugging the process.
722 * - We might be on a different CPU which TSC isn't quite in sync with the
723 * other CPUs in the system.
724 * - Another thread is racing us and we might have been preempted while inside
725 * this function.
726 *
727 * Assuming nano second virtual time, we can simply ignore any intervals which has
728 * any of the upper 32 bits set.
729 */
730 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
731 int cOuterTries = 42;
732 for (;; cOuterTries--)
733 {
734 /* Try grab the lock, things get simpler when owning the lock. */
735 int rcLock = PDMCritSectTryEnter(pVM, &pVM->tm.s.VirtualSyncLock);
736 if (RT_SUCCESS_NP(rcLock))
737 return tmVirtualSyncGetLocked(pVM, u64, pcNsToDeadline, pnsAbsDeadline);
738
739 /* Re-check the ticking flag. */
740 if (!ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
741 {
742 off = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSync);
743 if ( ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking)
744 && cOuterTries > 0)
745 continue;
746 if (pcNsToDeadline)
747 *pcNsToDeadline = 0;
748 if (pnsAbsDeadline)
749 *pnsAbsDeadline = off;
750 Log6(("tmVirtualSyncGetEx -> %'RU64 [stopped]\n", off));
751 DBGFTRACE_U64_TAG(pVM, off, "tmVirtualSyncGetEx-stopped3");
752 return off;
753 }
754
755 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
756 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
757 {
758 /* No changes allowed, try get a consistent set of parameters. */
759 uint64_t const u64Prev = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev);
760 uint64_t const offGivenUp = ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp);
761 uint32_t const u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
762 if ( ( u64Prev == ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev)
763 && offGivenUp == ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp)
764 && u32Pct == ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage)
765 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
766 || cOuterTries <= 0)
767 {
768 uint64_t u64Delta = u64 - u64Prev;
769 if (RT_LIKELY(!(u64Delta >> 32)))
770 {
771 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, u32Pct, 100);
772 if (off > u64Sub + offGivenUp)
773 {
774 off -= u64Sub;
775 Log4(("TM: %'RU64/-%'8RU64: sub %RU32 [NoLock]\n", u64 - off, pVM->tm.s.offVirtualSync - offGivenUp, u64Sub));
776 }
777 else
778 {
779 /* we've completely caught up. */
780 STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c);
781 off = offGivenUp;
782 Log4(("TM: %'RU64/0: caught up [NoLock]\n", u64));
783 }
784 }
785 else
786 /* More than 4 seconds since last time (or negative), ignore it. */
787 Log(("TMVirtualGetSync: u64Delta=%RX64 (NoLock)\n", u64Delta));
788
789 /* Check that we're still running and in catch up. */
790 if ( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
791 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
792 break;
793 if (cOuterTries <= 0)
794 break; /* enough */
795 }
796 }
797 else if ( off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
798 && !ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
799 break; /* Got an consistent offset */
800 else if (cOuterTries <= 0)
801 break; /* enough */
802 }
803 if (cOuterTries <= 0)
804 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetELoop);
805
806 /*
807 * Complete the calculation of the current TMCLOCK_VIRTUAL_SYNC time. The current
808 * approach is to never pass the head timer. So, when we do stop the clock and
809 * set the timer pending flag.
810 */
811 u64 -= off;
812/** @todo u64VirtualSyncLast */
813 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire);
814 if (pnsAbsDeadline)
815 *pnsAbsDeadline = u64Expire;
816 if (u64 >= u64Expire)
817 {
818 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
819 if (!VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
820 {
821 Log5(("TMAllVirtual(%u): FF: %d -> 1 (NoLock)\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
822 VM_FF_SET(pVM, VM_FF_TM_VIRTUAL_SYNC); /* Hmm? */
823 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
824#ifdef IN_RING3
825 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
826#endif
827 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
828 Log4(("TM: %'RU64/-%'8RU64: exp tmr=>ff [NoLock]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
829 }
830 else
831 Log4(("TM: %'RU64/-%'8RU64: exp tmr [NoLock]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
832 if (pcNsToDeadline)
833 *pcNsToDeadline = 0;
834 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetExpired);
835 }
836 else if (pcNsToDeadline)
837 {
838 uint64_t cNsToDeadline = u64Expire - u64;
839 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
840 cNsToDeadline = ASMMultU64ByU32DivByU32(cNsToDeadline, 100,
841 ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage) + 100);
842 *pcNsToDeadline = tmVirtualVirtToNsDeadline(pVM, cNsToDeadline);
843 }
844
845 Log6(("tmVirtualSyncGetEx -> %'RU64\n", u64));
846 DBGFTRACE_U64_TAG(pVM, u64, "tmVirtualSyncGetEx-nolock");
847 return u64;
848}
849
850
851/**
852 * Gets the current TMCLOCK_VIRTUAL_SYNC time.
853 *
854 * @returns The timestamp.
855 * @param pVM The cross context VM structure.
856 * @thread EMT.
857 * @remarks May set the timer and virtual sync FFs.
858 */
859VMM_INT_DECL(uint64_t) TMVirtualSyncGet(PVMCC pVM)
860{
861 return tmVirtualSyncGetEx(pVM, true /*fCheckTimers*/, NULL /*pcNsToDeadline*/, NULL /*pnsAbsDeadline*/, NULL /*puTscNow*/);
862}
863
864
865/**
866 * Gets the current TMCLOCK_VIRTUAL_SYNC time without checking timers running on
867 * TMCLOCK_VIRTUAL.
868 *
869 * @returns The timestamp.
870 * @param pVM The cross context VM structure.
871 * @thread EMT.
872 * @remarks May set the timer and virtual sync FFs.
873 */
874VMM_INT_DECL(uint64_t) TMVirtualSyncGetNoCheck(PVMCC pVM)
875{
876 return tmVirtualSyncGetEx(pVM, false /*fCheckTimers*/, NULL /*pcNsToDeadline*/, NULL /*pnsAbsDeadline*/, NULL /*puTscNow*/);
877}
878
879
880/**
881 * Gets the current TMCLOCK_VIRTUAL_SYNC time without checking timers running on
882 * TMCLOCK_VIRTUAL, also returning corresponding TSC value.
883 *
884 * @returns The timestamp.
885 * @param pVM The cross context VM structure.
886 * @param puTscNow Where to return the TSC value that the return
887 * value is relative to. This is delta adjusted.
888 * @thread EMT.
889 * @remarks May set the timer and virtual sync FFs.
890 */
891VMM_INT_DECL(uint64_t) TMVirtualSyncGetNoCheckWithTsc(PVMCC pVM, uint64_t *puTscNow)
892{
893 return tmVirtualSyncGetEx(pVM, false /*fCheckTimers*/, NULL /*pcNsToDeadline*/, NULL /*pnsAbsDeadline*/, puTscNow);
894}
895
896
897/**
898 * Gets the current TMCLOCK_VIRTUAL_SYNC time.
899 *
900 * @returns The timestamp.
901 * @param pVM The cross context VM structure.
902 * @param fCheckTimers Check timers on the virtual clock or not.
903 * @thread EMT.
904 * @remarks May set the timer and virtual sync FFs.
905 */
906VMM_INT_DECL(uint64_t) TMVirtualSyncGetEx(PVMCC pVM, bool fCheckTimers)
907{
908 return tmVirtualSyncGetEx(pVM, fCheckTimers, NULL /*pcNsToDeadline*/, NULL /*pnsAbsDeadline*/, NULL /*puTscNow*/);
909}
910
911
912/**
913 * Gets the current TMCLOCK_VIRTUAL_SYNC time and ticks to the next deadline
914 * without checking timers running on TMCLOCK_VIRTUAL.
915 *
916 * @returns The timestamp.
917 * @param pVM The cross context VM structure.
918 * @param pcNsToDeadline Where to return the number of nano seconds to
919 * the next virtual sync timer deadline.
920 * @param puTscNow Where to return the TSC value that the return
921 * value is relative to. This is delta adjusted.
922 * @param puDeadlineVersion Where to return the deadline "version" number.
923 * Use with TMVirtualSyncIsCurrentDeadlineVersion()
924 * to check if the absolute deadline is still up to
925 * date and the caller can skip calling this
926 * function.
927 * @thread EMT.
928 * @remarks May set the timer and virtual sync FFs.
929 */
930VMM_INT_DECL(uint64_t) TMVirtualSyncGetWithDeadlineNoCheck(PVMCC pVM, uint64_t *pcNsToDeadline,
931 uint64_t *puDeadlineVersion, uint64_t *puTscNow)
932{
933 uint64_t cNsToDeadlineTmp; /* try convince the compiler to skip the if tests. */
934 uint64_t u64Now = tmVirtualSyncGetEx(pVM, false /*fCheckTimers*/, &cNsToDeadlineTmp, puDeadlineVersion, puTscNow);
935 *pcNsToDeadline = cNsToDeadlineTmp;
936 return u64Now;
937}
938
939
940/**
941 * Gets the number of nano seconds to the next virtual sync deadline.
942 *
943 * @returns The number of TMCLOCK_VIRTUAL ticks.
944 * @param pVM The cross context VM structure.
945 * @param puTscNow Where to return the TSC value that the return
946 * value is relative to. This is delta adjusted.
947 * @param puDeadlineVersion Where to return the deadline "version" number.
948 * Use with TMVirtualSyncIsCurrentDeadlineVersion()
949 * to check if the absolute deadline is still up to
950 * date and the caller can skip calling this
951 * function.
952 * @thread EMT.
953 * @remarks May set the timer and virtual sync FFs.
954 */
955VMMDECL(uint64_t) TMVirtualSyncGetNsToDeadline(PVMCC pVM, uint64_t *puDeadlineVersion, uint64_t *puTscNow)
956{
957 uint64_t cNsToDeadline;
958 tmVirtualSyncGetEx(pVM, false /*fCheckTimers*/, &cNsToDeadline, puDeadlineVersion, puTscNow);
959 return cNsToDeadline;
960}
961
962
963/**
964 * Checks if the given deadline is still current.
965 *
966 * @retval true if the deadline is still current.
967 * @retval false if the deadline is outdated.
968 * @param pVM The cross context VM structure.
969 * @param uDeadlineVersion The deadline version to check.
970 */
971VMM_INT_DECL(bool) TMVirtualSyncIsCurrentDeadlineVersion(PVMCC pVM, uint64_t uDeadlineVersion)
972{
973 /** @todo Try use ASMAtomicUoReadU64 instead. */
974 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire);
975 return u64Expire == uDeadlineVersion;
976}
977
978
979/**
980 * Gets the current lag of the synchronous virtual clock (relative to the virtual clock).
981 *
982 * @return The current lag.
983 * @param pVM The cross context VM structure.
984 */
985VMM_INT_DECL(uint64_t) TMVirtualSyncGetLag(PVMCC pVM)
986{
987 return pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp;
988}
989
990
991/**
992 * Get the current catch-up percent.
993 *
994 * @return The current catch0up percent. 0 means running at the same speed as the virtual clock.
995 * @param pVM The cross context VM structure.
996 */
997VMM_INT_DECL(uint32_t) TMVirtualSyncGetCatchUpPct(PVMCC pVM)
998{
999 if (pVM->tm.s.fVirtualSyncCatchUp)
1000 return pVM->tm.s.u32VirtualSyncCatchUpPercentage;
1001 return 0;
1002}
1003
1004
1005/**
1006 * Gets the current TMCLOCK_VIRTUAL frequency.
1007 *
1008 * @returns The frequency.
1009 * @param pVM The cross context VM structure.
1010 */
1011VMM_INT_DECL(uint64_t) TMVirtualGetFreq(PVM pVM)
1012{
1013 NOREF(pVM);
1014 return TMCLOCK_FREQ_VIRTUAL;
1015}
1016
1017
1018/**
1019 * Worker for TMR3PauseClocks.
1020 *
1021 * @returns VINF_SUCCESS or VERR_TM_VIRTUAL_TICKING_IPE (asserted).
1022 * @param pVM The cross context VM structure.
1023 */
1024int tmVirtualPauseLocked(PVMCC pVM)
1025{
1026 uint32_t c = ASMAtomicDecU32(&pVM->tm.s.cVirtualTicking);
1027 AssertMsgReturn(c < pVM->cCpus, ("%u vs %u\n", c, pVM->cCpus), VERR_TM_VIRTUAL_TICKING_IPE);
1028 if (c == 0)
1029 {
1030 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualPause);
1031 pVM->tm.s.u64Virtual = tmVirtualGetRaw(pVM);
1032 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, false);
1033 }
1034 return VINF_SUCCESS;
1035}
1036
1037
1038/**
1039 * Worker for TMR3ResumeClocks.
1040 *
1041 * @returns VINF_SUCCESS or VERR_TM_VIRTUAL_TICKING_IPE (asserted).
1042 * @param pVM The cross context VM structure.
1043 */
1044int tmVirtualResumeLocked(PVMCC pVM)
1045{
1046 uint32_t c = ASMAtomicIncU32(&pVM->tm.s.cVirtualTicking);
1047 AssertMsgReturn(c <= pVM->cCpus, ("%u vs %u\n", c, pVM->cCpus), VERR_TM_VIRTUAL_TICKING_IPE);
1048 if (c == 1)
1049 {
1050 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualResume);
1051 pVM->tm.s.u64VirtualRawPrev = 0;
1052 pVM->tm.s.u64VirtualWarpDriveStart = tmVirtualGetRawNanoTS(pVM);
1053 pVM->tm.s.u64VirtualOffset = pVM->tm.s.u64VirtualWarpDriveStart - pVM->tm.s.u64Virtual;
1054 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, true);
1055 }
1056 return VINF_SUCCESS;
1057}
1058
1059
1060/**
1061 * Converts from virtual ticks to nanoseconds.
1062 *
1063 * @returns nanoseconds.
1064 * @param pVM The cross context VM structure.
1065 * @param u64VirtualTicks The virtual ticks to convert.
1066 * @remark There could be rounding errors here. We just do a simple integer divide
1067 * without any adjustments.
1068 */
1069VMM_INT_DECL(uint64_t) TMVirtualToNano(PVM pVM, uint64_t u64VirtualTicks)
1070{
1071 NOREF(pVM);
1072 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1073 return u64VirtualTicks;
1074}
1075
1076
1077/**
1078 * Converts from virtual ticks to microseconds.
1079 *
1080 * @returns microseconds.
1081 * @param pVM The cross context VM structure.
1082 * @param u64VirtualTicks The virtual ticks to convert.
1083 * @remark There could be rounding errors here. We just do a simple integer divide
1084 * without any adjustments.
1085 */
1086VMM_INT_DECL(uint64_t) TMVirtualToMicro(PVM pVM, uint64_t u64VirtualTicks)
1087{
1088 NOREF(pVM);
1089 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1090 return u64VirtualTicks / 1000;
1091}
1092
1093
1094/**
1095 * Converts from virtual ticks to milliseconds.
1096 *
1097 * @returns milliseconds.
1098 * @param pVM The cross context VM structure.
1099 * @param u64VirtualTicks The virtual ticks to convert.
1100 * @remark There could be rounding errors here. We just do a simple integer divide
1101 * without any adjustments.
1102 */
1103VMM_INT_DECL(uint64_t) TMVirtualToMilli(PVM pVM, uint64_t u64VirtualTicks)
1104{
1105 NOREF(pVM);
1106 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1107 return u64VirtualTicks / 1000000;
1108}
1109
1110
1111/**
1112 * Converts from nanoseconds to virtual ticks.
1113 *
1114 * @returns virtual ticks.
1115 * @param pVM The cross context VM structure.
1116 * @param u64NanoTS The nanosecond value ticks to convert.
1117 * @remark There could be rounding and overflow errors here.
1118 */
1119VMM_INT_DECL(uint64_t) TMVirtualFromNano(PVM pVM, uint64_t u64NanoTS)
1120{
1121 NOREF(pVM);
1122 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1123 return u64NanoTS;
1124}
1125
1126
1127/**
1128 * Converts from microseconds to virtual ticks.
1129 *
1130 * @returns virtual ticks.
1131 * @param pVM The cross context VM structure.
1132 * @param u64MicroTS The microsecond value ticks to convert.
1133 * @remark There could be rounding and overflow errors here.
1134 */
1135VMM_INT_DECL(uint64_t) TMVirtualFromMicro(PVM pVM, uint64_t u64MicroTS)
1136{
1137 NOREF(pVM);
1138 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1139 return u64MicroTS * 1000;
1140}
1141
1142
1143/**
1144 * Converts from milliseconds to virtual ticks.
1145 *
1146 * @returns virtual ticks.
1147 * @param pVM The cross context VM structure.
1148 * @param u64MilliTS The millisecond value ticks to convert.
1149 * @remark There could be rounding and overflow errors here.
1150 */
1151VMM_INT_DECL(uint64_t) TMVirtualFromMilli(PVM pVM, uint64_t u64MilliTS)
1152{
1153 NOREF(pVM);
1154 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1155 return u64MilliTS * 1000000;
1156}
1157
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette