VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/TMAllVirtual.cpp@ 92780

Last change on this file since 92780 was 92709, checked in by vboxsync, 3 years ago

VMM/TM,SUP: Made it thru TM init in driverless mode... bugref:10138

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 43.4 KB
Line 
1/* $Id: TMAllVirtual.cpp 92709 2021-12-02 13:56:44Z vboxsync $ */
2/** @file
3 * TM - Timeout Manager, Virtual Time, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_TM
23#include <VBox/vmm/tm.h>
24#include <VBox/vmm/dbgftrace.h>
25#ifdef IN_RING3
26# include <iprt/thread.h>
27#endif
28#include "TMInternal.h"
29#include <VBox/vmm/vmcc.h>
30#include <VBox/vmm/vmm.h>
31#include <VBox/err.h>
32#include <VBox/log.h>
33#include <VBox/sup.h>
34
35#include <iprt/time.h>
36#include <iprt/assert.h>
37#include <iprt/asm.h>
38#include <iprt/asm-math.h>
39
40
41
42/**
43 * @interface_method_impl{RTTIMENANOTSDATA,pfnBad}
44 */
45DECLCALLBACK(DECLEXPORT(void)) tmVirtualNanoTSBad(PRTTIMENANOTSDATA pData, uint64_t u64NanoTS, uint64_t u64DeltaPrev,
46 uint64_t u64PrevNanoTS)
47{
48 PVM pVM = RT_FROM_MEMBER(pData, VM, CTX_SUFF(tm.s.VirtualGetRawData));
49 pData->cBadPrev++;
50 if ((int64_t)u64DeltaPrev < 0)
51 LogRel(("TM: u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64 pVM=%p\n",
52 u64DeltaPrev, u64PrevNanoTS, u64NanoTS, pVM));
53 else
54 Log(("TM: u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64 pVM=%p (debugging?)\n",
55 u64DeltaPrev, u64PrevNanoTS, u64NanoTS, pVM));
56}
57
58
59#ifdef IN_RING3
60/**
61 * @callback_method_impl{FNTIMENANOTSINTERNAL, For driverless mode.}
62 */
63static DECLCALLBACK(uint64_t) tmR3VirtualNanoTSDriverless(PRTTIMENANOTSDATA pData, PRTITMENANOTSEXTRA pExtra)
64{
65 RT_NOREF(pData);
66 if (pExtra)
67 pExtra->uTSCValue = ASMReadTSC();
68 return RTTimeSystemNanoTS();
69}
70#endif
71
72
73/**
74 * @interface_method_impl{RTTIMENANOTSDATA,pfnRediscover}
75 *
76 * This is the initial worker, so the first call in each context ends up here.
77 * It is also used should the delta rating of the host CPUs change or if the
78 * fGetGipCpu feature the current worker relies upon becomes unavailable. The
79 * last two events may occur as CPUs are taken online.
80 */
81DECLCALLBACK(DECLEXPORT(uint64_t)) tmVirtualNanoTSRediscover(PRTTIMENANOTSDATA pData, PRTITMENANOTSEXTRA pExtra)
82{
83 PVM pVM = RT_FROM_MEMBER(pData, VM, CTX_SUFF(tm.s.VirtualGetRawData));
84 PFNTIMENANOTSINTERNAL pfnWorker;
85
86 /*
87 * We require a valid GIP for the selection below.
88 * Invalid GIP is fatal, though we have to allow no GIP in driverless mode (ring-3 only).
89 */
90 PSUPGLOBALINFOPAGE pGip = g_pSUPGlobalInfoPage;
91#ifdef IN_RING3
92 if (pGip)
93#endif
94 {
95 AssertFatalMsg(RT_VALID_PTR(pGip), ("pVM=%p pGip=%p\n", pVM, pGip));
96 AssertFatalMsg(pGip->u32Magic == SUPGLOBALINFOPAGE_MAGIC, ("pVM=%p pGip=%p u32Magic=%#x\n", pVM, pGip, pGip->u32Magic));
97 AssertFatalMsg(pGip->u32Mode > SUPGIPMODE_INVALID && pGip->u32Mode < SUPGIPMODE_END,
98 ("pVM=%p pGip=%p u32Mode=%#x\n", pVM, pGip, pGip->u32Mode));
99
100 /*
101 * Determine the new worker.
102 */
103 bool const fLFence = RT_BOOL(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SSE2);
104 switch (pGip->u32Mode)
105 {
106 case SUPGIPMODE_SYNC_TSC:
107 case SUPGIPMODE_INVARIANT_TSC:
108#ifdef IN_RING0
109 if (pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_ROUGHLY_ZERO)
110 pfnWorker = fLFence ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarNoDelta;
111 else
112 pfnWorker = fLFence ? RTTimeNanoTSLFenceSyncInvarWithDelta : RTTimeNanoTSLegacySyncInvarWithDelta;
113#else
114 if (pGip->fGetGipCpu & SUPGIPGETCPU_IDTR_LIMIT_MASK_MAX_SET_CPUS)
115 pfnWorker = pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_PRACTICALLY_ZERO
116 ? fLFence ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarNoDelta
117 : fLFence ? RTTimeNanoTSLFenceSyncInvarWithDeltaUseIdtrLim : RTTimeNanoTSLegacySyncInvarWithDeltaUseIdtrLim;
118 else if (pGip->fGetGipCpu & SUPGIPGETCPU_RDTSCP_MASK_MAX_SET_CPUS)
119 pfnWorker = pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_PRACTICALLY_ZERO
120 ? fLFence ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarNoDelta
121 : fLFence ? RTTimeNanoTSLFenceSyncInvarWithDeltaUseRdtscp : RTTimeNanoTSLegacySyncInvarWithDeltaUseRdtscp;
122 else if (pGip->fGetGipCpu & SUPGIPGETCPU_APIC_ID_EXT_0B)
123 pfnWorker = pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_ROUGHLY_ZERO
124 ? fLFence ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarNoDelta
125 : fLFence ? RTTimeNanoTSLFenceSyncInvarWithDeltaUseApicIdExt0B : RTTimeNanoTSLegacySyncInvarWithDeltaUseApicIdExt0B;
126 else if (pGip->fGetGipCpu & SUPGIPGETCPU_APIC_ID_EXT_8000001E)
127 pfnWorker = pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_ROUGHLY_ZERO
128 ? fLFence ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarNoDelta
129 : fLFence ? RTTimeNanoTSLFenceSyncInvarWithDeltaUseApicIdExt8000001E : RTTimeNanoTSLegacySyncInvarWithDeltaUseApicIdExt8000001E;
130 else
131 pfnWorker = pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_ROUGHLY_ZERO
132 ? fLFence ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarNoDelta
133 : fLFence ? RTTimeNanoTSLFenceSyncInvarWithDeltaUseApicId : RTTimeNanoTSLegacySyncInvarWithDeltaUseApicId;
134#endif
135 break;
136
137 case SUPGIPMODE_ASYNC_TSC:
138#ifdef IN_RING0
139 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsync : RTTimeNanoTSLegacyAsync;
140#else
141 if (pGip->fGetGipCpu & SUPGIPGETCPU_IDTR_LIMIT_MASK_MAX_SET_CPUS)
142 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsyncUseIdtrLim : RTTimeNanoTSLegacyAsyncUseIdtrLim;
143 else if (pGip->fGetGipCpu & SUPGIPGETCPU_RDTSCP_MASK_MAX_SET_CPUS)
144 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsyncUseRdtscp : RTTimeNanoTSLegacyAsyncUseRdtscp;
145 else if (pGip->fGetGipCpu & SUPGIPGETCPU_RDTSCP_GROUP_IN_CH_NUMBER_IN_CL)
146 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsyncUseRdtscpGroupChNumCl : RTTimeNanoTSLegacyAsyncUseRdtscpGroupChNumCl;
147 else if (pGip->fGetGipCpu & SUPGIPGETCPU_APIC_ID_EXT_0B)
148 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsyncUseApicIdExt0B : RTTimeNanoTSLegacyAsyncUseApicIdExt0B;
149 else if (pGip->fGetGipCpu & SUPGIPGETCPU_APIC_ID_EXT_8000001E)
150 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsyncUseApicIdExt8000001E : RTTimeNanoTSLegacyAsyncUseApicIdExt8000001E;
151 else
152 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsyncUseApicId : RTTimeNanoTSLegacyAsyncUseApicId;
153#endif
154 break;
155
156 default:
157 AssertFatalMsgFailed(("pVM=%p pGip=%p u32Mode=%#x\n", pVM, pGip, pGip->u32Mode));
158 }
159 }
160#ifdef IN_RING3
161 else
162 pfnWorker = tmR3VirtualNanoTSDriverless;
163#endif
164
165 /*
166 * Update the pfnVirtualGetRaw pointer and call the worker we selected.
167 */
168 ASMAtomicWritePtr((void * volatile *)&CTX_SUFF(pVM->tm.s.pfnVirtualGetRaw), (void *)(uintptr_t)pfnWorker);
169 return pfnWorker(pData, pExtra);
170}
171
172
173/**
174 * @interface_method_impl{RTTIMENANOTSDATA,pfnBadCpuIndex}
175 */
176DECLCALLBACK(DECLEXPORT(uint64_t)) tmVirtualNanoTSBadCpuIndex(PRTTIMENANOTSDATA pData, PRTITMENANOTSEXTRA pExtra,
177 uint16_t idApic, uint16_t iCpuSet, uint16_t iGipCpu)
178{
179 PVM pVM = RT_FROM_MEMBER(pData, VM, CTX_SUFF(tm.s.VirtualGetRawData));
180 AssertFatalMsgFailed(("pVM=%p idApic=%#x iCpuSet=%#x iGipCpu=%#x pExtra=%p\n", pVM, idApic, iCpuSet, iGipCpu, pExtra));
181#ifndef _MSC_VER
182 return UINT64_MAX;
183#endif
184}
185
186
187/**
188 * Wrapper around the IPRT GIP time methods.
189 */
190DECLINLINE(uint64_t) tmVirtualGetRawNanoTS(PVMCC pVM)
191{
192# ifdef IN_RING3
193 uint64_t u64 = CTXALLSUFF(pVM->tm.s.pfnVirtualGetRaw)(&CTXALLSUFF(pVM->tm.s.VirtualGetRawData), NULL /*pExtra*/);
194# else /* !IN_RING3 */
195 uint32_t cPrevSteps = pVM->tm.s.CTX_SUFF(VirtualGetRawData).c1nsSteps;
196 uint64_t u64 = pVM->tm.s.CTX_SUFF(pfnVirtualGetRaw)(&pVM->tm.s.CTX_SUFF(VirtualGetRawData), NULL /*pExtra*/);
197 if (cPrevSteps != pVM->tm.s.CTX_SUFF(VirtualGetRawData).c1nsSteps)
198 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3);
199# endif /* !IN_RING3 */
200 /*DBGFTRACE_POS_U64(pVM, u64);*/
201 return u64;
202}
203
204
205/**
206 * Wrapper around the IPRT GIP time methods, extended version.
207 */
208DECLINLINE(uint64_t) tmVirtualGetRawNanoTSEx(PVMCC pVM, uint64_t *puTscNow)
209{
210 RTITMENANOTSEXTRA Extra;
211# ifdef IN_RING3
212 uint64_t u64 = CTXALLSUFF(pVM->tm.s.pfnVirtualGetRaw)(&CTXALLSUFF(pVM->tm.s.VirtualGetRawData), &Extra);
213# else /* !IN_RING3 */
214 uint32_t cPrevSteps = pVM->tm.s.CTX_SUFF(VirtualGetRawData).c1nsSteps;
215 uint64_t u64 = pVM->tm.s.CTX_SUFF(pfnVirtualGetRaw)(&pVM->tm.s.CTX_SUFF(VirtualGetRawData), &Extra);
216 if (cPrevSteps != pVM->tm.s.CTX_SUFF(VirtualGetRawData).c1nsSteps)
217 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3);
218# endif /* !IN_RING3 */
219 if (puTscNow)
220 *puTscNow = Extra.uTSCValue;
221 /*DBGFTRACE_POS_U64(pVM, u64);*/
222 return u64;
223}
224
225
226/**
227 * Get the time when we're not running at 100%
228 *
229 * @returns The timestamp.
230 * @param pVM The cross context VM structure.
231 * @param puTscNow Where to return the TSC corresponding to the returned
232 * timestamp (delta adjusted). Optional.
233 */
234static uint64_t tmVirtualGetRawNonNormal(PVMCC pVM, uint64_t *puTscNow)
235{
236 /*
237 * Recalculate the RTTimeNanoTS() value for the period where
238 * warp drive has been enabled.
239 */
240 uint64_t u64 = tmVirtualGetRawNanoTSEx(pVM, puTscNow);
241 u64 -= pVM->tm.s.u64VirtualWarpDriveStart;
242 u64 *= pVM->tm.s.u32VirtualWarpDrivePercentage;
243 u64 /= 100;
244 u64 += pVM->tm.s.u64VirtualWarpDriveStart;
245
246 /*
247 * Now we apply the virtual time offset.
248 * (Which is the negated tmVirtualGetRawNanoTS() value for when the virtual
249 * machine started if it had been running continuously without any suspends.)
250 */
251 u64 -= pVM->tm.s.u64VirtualOffset;
252 return u64;
253}
254
255
256/**
257 * Get the raw virtual time.
258 *
259 * @returns The current time stamp.
260 * @param pVM The cross context VM structure.
261 */
262DECLINLINE(uint64_t) tmVirtualGetRaw(PVMCC pVM)
263{
264 if (RT_LIKELY(!pVM->tm.s.fVirtualWarpDrive))
265 return tmVirtualGetRawNanoTS(pVM) - pVM->tm.s.u64VirtualOffset;
266 return tmVirtualGetRawNonNormal(pVM, NULL /*puTscNow*/);
267}
268
269
270/**
271 * Get the raw virtual time, extended version.
272 *
273 * @returns The current time stamp.
274 * @param pVM The cross context VM structure.
275 * @param puTscNow Where to return the TSC corresponding to the returned
276 * timestamp (delta adjusted). Optional.
277 */
278DECLINLINE(uint64_t) tmVirtualGetRawEx(PVMCC pVM, uint64_t *puTscNow)
279{
280 if (RT_LIKELY(!pVM->tm.s.fVirtualWarpDrive))
281 return tmVirtualGetRawNanoTSEx(pVM, puTscNow) - pVM->tm.s.u64VirtualOffset;
282 return tmVirtualGetRawNonNormal(pVM, puTscNow);
283}
284
285
286/**
287 * Inlined version of tmVirtualGetEx.
288 */
289DECLINLINE(uint64_t) tmVirtualGet(PVMCC pVM, bool fCheckTimers)
290{
291 uint64_t u64;
292 if (RT_LIKELY(pVM->tm.s.cVirtualTicking))
293 {
294 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGet);
295 u64 = tmVirtualGetRaw(pVM);
296
297 /*
298 * Use the chance to check for expired timers.
299 */
300 if (fCheckTimers)
301 {
302 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
303 if ( !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)
304 && !pVM->tm.s.fRunningQueues
305 && ( pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL].u64Expire <= u64
306 || ( pVM->tm.s.fVirtualSyncTicking
307 && pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire <= u64 - pVM->tm.s.offVirtualSync
308 )
309 )
310 && !pVM->tm.s.fRunningQueues
311 )
312 {
313 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSetFF);
314 Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
315 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
316#ifdef IN_RING3
317 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
318#endif
319 }
320 }
321 }
322 else
323 u64 = pVM->tm.s.u64Virtual;
324 return u64;
325}
326
327
328/**
329 * Gets the current TMCLOCK_VIRTUAL time
330 *
331 * @returns The timestamp.
332 * @param pVM The cross context VM structure.
333 *
334 * @remark While the flow of time will never go backwards, the speed of the
335 * progress varies due to inaccurate RTTimeNanoTS and TSC. The latter can be
336 * influenced by power saving (SpeedStep, PowerNow!), while the former
337 * makes use of TSC and kernel timers.
338 */
339VMM_INT_DECL(uint64_t) TMVirtualGet(PVMCC pVM)
340{
341 return tmVirtualGet(pVM, true /*fCheckTimers*/);
342}
343
344
345/**
346 * Gets the current TMCLOCK_VIRTUAL time without checking
347 * timers or anything.
348 *
349 * Meaning, this has no side effect on FFs like TMVirtualGet may have.
350 *
351 * @returns The timestamp.
352 * @param pVM The cross context VM structure.
353 *
354 * @remarks See TMVirtualGet.
355 */
356VMM_INT_DECL(uint64_t) TMVirtualGetNoCheck(PVMCC pVM)
357{
358 return tmVirtualGet(pVM, false /*fCheckTimers*/);
359}
360
361
362/**
363 * Converts the dead line interval from TMCLOCK_VIRTUAL to host nano seconds.
364 *
365 * @returns Host nano second count.
366 * @param pVM The cross context VM structure.
367 * @param cVirtTicksToDeadline The TMCLOCK_VIRTUAL interval.
368 */
369DECLINLINE(uint64_t) tmVirtualVirtToNsDeadline(PVM pVM, uint64_t cVirtTicksToDeadline)
370{
371 if (RT_UNLIKELY(pVM->tm.s.fVirtualWarpDrive))
372 return ASMMultU64ByU32DivByU32(cVirtTicksToDeadline, 100, pVM->tm.s.u32VirtualWarpDrivePercentage);
373 return cVirtTicksToDeadline;
374}
375
376
377/**
378 * tmVirtualSyncGetLocked worker for handling catch-up when owning the lock.
379 *
380 * @returns The timestamp.
381 * @param pVM The cross context VM structure.
382 * @param u64 raw virtual time.
383 * @param off offVirtualSync.
384 * @param pcNsToDeadline Where to return the number of nano seconds to
385 * the next virtual sync timer deadline. Can be
386 * NULL.
387 * @param pnsAbsDeadline Where to return the absolute deadline.
388 * Optional.
389 */
390DECLINLINE(uint64_t) tmVirtualSyncGetHandleCatchUpLocked(PVMCC pVM, uint64_t u64, uint64_t off,
391 uint64_t *pcNsToDeadline, uint64_t *pnsAbsDeadline)
392{
393 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
394
395 /*
396 * Don't make updates until we've check the timer queue.
397 */
398 bool fUpdatePrev = true;
399 bool fUpdateOff = true;
400 bool fStop = false;
401 const uint64_t u64Prev = pVM->tm.s.u64VirtualSyncCatchUpPrev;
402 uint64_t u64Delta = u64 - u64Prev;
403 if (RT_LIKELY(!(u64Delta >> 32)))
404 {
405 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, pVM->tm.s.u32VirtualSyncCatchUpPercentage, 100);
406 if (off > u64Sub + pVM->tm.s.offVirtualSyncGivenUp)
407 {
408 off -= u64Sub;
409 Log4(("TM: %'RU64/-%'8RU64: sub %RU32 [vsghcul]\n", u64 - off, off - pVM->tm.s.offVirtualSyncGivenUp, u64Sub));
410 }
411 else
412 {
413 /* we've completely caught up. */
414 STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c);
415 off = pVM->tm.s.offVirtualSyncGivenUp;
416 fStop = true;
417 Log4(("TM: %'RU64/0: caught up [vsghcul]\n", u64));
418 }
419 }
420 else
421 {
422 /* More than 4 seconds since last time (or negative), ignore it. */
423 fUpdateOff = false;
424 fUpdatePrev = !(u64Delta & RT_BIT_64(63));
425 Log(("TMVirtualGetSync: u64Delta=%RX64\n", u64Delta));
426 }
427
428 /*
429 * Complete the calculation of the current TMCLOCK_VIRTUAL_SYNC time. The current
430 * approach is to never pass the head timer. So, when we do stop the clock and
431 * set the timer pending flag.
432 */
433 u64 -= off;
434
435 uint64_t u64Last = ASMAtomicUoReadU64(&pVM->tm.s.u64VirtualSync);
436 if (u64Last > u64)
437 {
438 u64 = u64Last + 1;
439 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetAdjLast);
440 }
441
442 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire);
443 if (pnsAbsDeadline)
444 *pnsAbsDeadline = u64Expire; /* Always return the unadjusted absolute deadline, or HM will waste time going
445 thru this code over an over again even if there aren't any timer changes. */
446 if (u64 < u64Expire)
447 {
448 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64);
449 if (fUpdateOff)
450 ASMAtomicWriteU64(&pVM->tm.s.offVirtualSync, off);
451 if (fStop)
452 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncCatchUp, false);
453 if (fUpdatePrev)
454 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev, u64);
455 if (pcNsToDeadline)
456 {
457 uint64_t cNsToDeadline = u64Expire - u64;
458 if (pVM->tm.s.fVirtualSyncCatchUp)
459 cNsToDeadline = ASMMultU64ByU32DivByU32(cNsToDeadline, 100,
460 pVM->tm.s.u32VirtualSyncCatchUpPercentage + 100);
461 *pcNsToDeadline = tmVirtualVirtToNsDeadline(pVM, cNsToDeadline);
462 }
463 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
464 }
465 else
466 {
467 u64 = u64Expire;
468 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64);
469 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, false);
470
471 VM_FF_SET(pVM, VM_FF_TM_VIRTUAL_SYNC);
472 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
473 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
474 Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
475 Log4(("TM: %'RU64/-%'8RU64: exp tmr=>ff [vsghcul]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
476 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
477
478 if (pcNsToDeadline)
479 *pcNsToDeadline = 0;
480#ifdef IN_RING3
481 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
482#endif
483 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
484 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetExpired);
485 }
486 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
487
488 Log6(("tmVirtualSyncGetHandleCatchUpLocked -> %'RU64\n", u64));
489 DBGFTRACE_U64_TAG(pVM, u64, "tmVirtualSyncGetHandleCatchUpLocked");
490 return u64;
491}
492
493
494/**
495 * tmVirtualSyncGetEx worker for when we get the lock.
496 *
497 * @returns timesamp.
498 * @param pVM The cross context VM structure.
499 * @param u64 The virtual clock timestamp.
500 * @param pcNsToDeadline Where to return the number of nano seconds to
501 * the next virtual sync timer deadline. Can be
502 * NULL.
503 * @param pnsAbsDeadline Where to return the absolute deadline.
504 * Optional.
505 */
506DECLINLINE(uint64_t) tmVirtualSyncGetLocked(PVMCC pVM, uint64_t u64, uint64_t *pcNsToDeadline, uint64_t *pnsAbsDeadline)
507{
508 /*
509 * Not ticking?
510 */
511 if (!pVM->tm.s.fVirtualSyncTicking)
512 {
513 u64 = ASMAtomicUoReadU64(&pVM->tm.s.u64VirtualSync);
514 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
515 if (pcNsToDeadline)
516 *pcNsToDeadline = 0;
517 if (pnsAbsDeadline)
518 *pnsAbsDeadline = u64;
519 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
520 Log6(("tmVirtualSyncGetLocked -> %'RU64 [stopped]\n", u64));
521 DBGFTRACE_U64_TAG(pVM, u64, "tmVirtualSyncGetLocked-stopped");
522 return u64;
523 }
524
525 /*
526 * Handle catch up in a separate function.
527 */
528 uint64_t off = ASMAtomicUoReadU64(&pVM->tm.s.offVirtualSync);
529 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
530 return tmVirtualSyncGetHandleCatchUpLocked(pVM, u64, off, pcNsToDeadline, pnsAbsDeadline);
531
532 /*
533 * Complete the calculation of the current TMCLOCK_VIRTUAL_SYNC time. The current
534 * approach is to never pass the head timer. So, when we do stop the clock and
535 * set the timer pending flag.
536 */
537 u64 -= off;
538
539 uint64_t u64Last = ASMAtomicUoReadU64(&pVM->tm.s.u64VirtualSync);
540 if (u64Last > u64)
541 {
542 u64 = u64Last + 1;
543 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetAdjLast);
544 }
545
546 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire);
547 if (pnsAbsDeadline)
548 *pnsAbsDeadline = u64Expire;
549 if (u64 < u64Expire)
550 {
551 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64);
552 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
553 if (pcNsToDeadline)
554 *pcNsToDeadline = tmVirtualVirtToNsDeadline(pVM, u64Expire - u64);
555 }
556 else
557 {
558 u64 = u64Expire;
559 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64);
560 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, false);
561
562 VM_FF_SET(pVM, VM_FF_TM_VIRTUAL_SYNC);
563 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
564 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
565 Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
566 Log4(("TM: %'RU64/-%'8RU64: exp tmr=>ff [vsgl]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
567 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
568
569#ifdef IN_RING3
570 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
571#endif
572 if (pcNsToDeadline)
573 *pcNsToDeadline = 0;
574 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
575 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetExpired);
576 }
577 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
578 Log6(("tmVirtualSyncGetLocked -> %'RU64\n", u64));
579 DBGFTRACE_U64_TAG(pVM, u64, "tmVirtualSyncGetLocked");
580 return u64;
581}
582
583
584/**
585 * Gets the current TMCLOCK_VIRTUAL_SYNC time.
586 *
587 * @returns The timestamp.
588 * @param pVM The cross context VM structure.
589 * @param fCheckTimers Check timers or not
590 * @param pcNsToDeadline Where to return the number of nano seconds to
591 * the next virtual sync timer deadline. Can be
592 * NULL.
593 * @param pnsAbsDeadline Where to return the absolute deadline.
594 * Optional.
595 * @param puTscNow Where to return the TSC corresponding to the
596 * returned timestamp (delta adjusted). Optional.
597 * @thread EMT.
598 */
599DECLINLINE(uint64_t) tmVirtualSyncGetEx(PVMCC pVM, bool fCheckTimers, uint64_t *pcNsToDeadline,
600 uint64_t *pnsAbsDeadline, uint64_t *puTscNow)
601{
602 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGet);
603
604 uint64_t u64;
605 if (!pVM->tm.s.fVirtualSyncTicking)
606 {
607 if (pcNsToDeadline)
608 *pcNsToDeadline = 0;
609 u64 = pVM->tm.s.u64VirtualSync;
610 DBGFTRACE_U64_TAG(pVM, u64, "tmVirtualSyncGetEx-stopped1");
611 return u64;
612 }
613
614 /*
615 * Query the virtual clock and do the usual expired timer check.
616 */
617 Assert(pVM->tm.s.cVirtualTicking);
618 u64 = tmVirtualGetRawEx(pVM, puTscNow);
619 if (fCheckTimers)
620 {
621 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
622 if ( !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)
623 && pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL].u64Expire <= u64)
624 {
625 Log5(("TMAllVirtual(%u): FF: 0 -> 1\n", __LINE__));
626 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
627#ifdef IN_RING3
628 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM /** @todo |VMNOTIFYFF_FLAGS_POKE*/);
629#endif
630 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
631 }
632 }
633
634 /*
635 * If we can get the lock, get it. The result is much more reliable.
636 *
637 * Note! This is where all clock source devices branch off because they
638 * will be owning the lock already. The 'else' is taken by code
639 * which is less picky or hasn't been adjusted yet
640 */
641 /** @todo switch this around, have the tmVirtualSyncGetLocked code inlined
642 * here and the remainder of this function in a static worker. */
643 if (PDMCritSectTryEnter(pVM, &pVM->tm.s.VirtualSyncLock) == VINF_SUCCESS)
644 return tmVirtualSyncGetLocked(pVM, u64, pcNsToDeadline, pnsAbsDeadline);
645
646 /*
647 * When the clock is ticking, not doing catch ups and not running into an
648 * expired time, we can get away without locking. Try this first.
649 */
650 uint64_t off;
651 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
652 {
653 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
654 {
655 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
656 if (RT_LIKELY( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
657 && !ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
658 && off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)))
659 {
660 off = u64 - off;
661 uint64_t const u64Expire = ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire);
662 if (off < u64Expire)
663 {
664 if (pnsAbsDeadline)
665 *pnsAbsDeadline = u64Expire;
666 if (pcNsToDeadline)
667 *pcNsToDeadline = tmVirtualVirtToNsDeadline(pVM, u64Expire - off);
668 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLockless);
669 Log6(("tmVirtualSyncGetEx -> %'RU64 [lockless]\n", off));
670 DBGFTRACE_U64_TAG(pVM, off, "tmVirtualSyncGetEx-lockless");
671 return off;
672 }
673 }
674 }
675 }
676 else
677 {
678 off = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSync);
679 if (RT_LIKELY(!ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking)))
680 {
681 if (pcNsToDeadline)
682 *pcNsToDeadline = 0;
683 if (pnsAbsDeadline)
684 *pnsAbsDeadline = off;
685 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLockless);
686 Log6(("tmVirtualSyncGetEx -> %'RU64 [lockless/stopped]\n", off));
687 DBGFTRACE_U64_TAG(pVM, off, "tmVirtualSyncGetEx-stopped2");
688 return off;
689 }
690 }
691
692 /*
693 * Read the offset and adjust if we're playing catch-up.
694 *
695 * The catch-up adjusting work by us decrementing the offset by a percentage of
696 * the time elapsed since the previous TMVirtualGetSync call.
697 *
698 * It's possible to get a very long or even negative interval between two read
699 * for the following reasons:
700 * - Someone might have suspended the process execution, frequently the case when
701 * debugging the process.
702 * - We might be on a different CPU which TSC isn't quite in sync with the
703 * other CPUs in the system.
704 * - Another thread is racing us and we might have been preempted while inside
705 * this function.
706 *
707 * Assuming nano second virtual time, we can simply ignore any intervals which has
708 * any of the upper 32 bits set.
709 */
710 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
711 int cOuterTries = 42;
712 for (;; cOuterTries--)
713 {
714 /* Try grab the lock, things get simpler when owning the lock. */
715 int rcLock = PDMCritSectTryEnter(pVM, &pVM->tm.s.VirtualSyncLock);
716 if (RT_SUCCESS_NP(rcLock))
717 return tmVirtualSyncGetLocked(pVM, u64, pcNsToDeadline, pnsAbsDeadline);
718
719 /* Re-check the ticking flag. */
720 if (!ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
721 {
722 off = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSync);
723 if ( ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking)
724 && cOuterTries > 0)
725 continue;
726 if (pcNsToDeadline)
727 *pcNsToDeadline = 0;
728 if (pnsAbsDeadline)
729 *pnsAbsDeadline = off;
730 Log6(("tmVirtualSyncGetEx -> %'RU64 [stopped]\n", off));
731 DBGFTRACE_U64_TAG(pVM, off, "tmVirtualSyncGetEx-stopped3");
732 return off;
733 }
734
735 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
736 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
737 {
738 /* No changes allowed, try get a consistent set of parameters. */
739 uint64_t const u64Prev = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev);
740 uint64_t const offGivenUp = ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp);
741 uint32_t const u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
742 if ( ( u64Prev == ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev)
743 && offGivenUp == ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp)
744 && u32Pct == ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage)
745 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
746 || cOuterTries <= 0)
747 {
748 uint64_t u64Delta = u64 - u64Prev;
749 if (RT_LIKELY(!(u64Delta >> 32)))
750 {
751 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, u32Pct, 100);
752 if (off > u64Sub + offGivenUp)
753 {
754 off -= u64Sub;
755 Log4(("TM: %'RU64/-%'8RU64: sub %RU32 [NoLock]\n", u64 - off, pVM->tm.s.offVirtualSync - offGivenUp, u64Sub));
756 }
757 else
758 {
759 /* we've completely caught up. */
760 STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c);
761 off = offGivenUp;
762 Log4(("TM: %'RU64/0: caught up [NoLock]\n", u64));
763 }
764 }
765 else
766 /* More than 4 seconds since last time (or negative), ignore it. */
767 Log(("TMVirtualGetSync: u64Delta=%RX64 (NoLock)\n", u64Delta));
768
769 /* Check that we're still running and in catch up. */
770 if ( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
771 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
772 break;
773 if (cOuterTries <= 0)
774 break; /* enough */
775 }
776 }
777 else if ( off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
778 && !ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
779 break; /* Got an consistent offset */
780 else if (cOuterTries <= 0)
781 break; /* enough */
782 }
783 if (cOuterTries <= 0)
784 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetELoop);
785
786 /*
787 * Complete the calculation of the current TMCLOCK_VIRTUAL_SYNC time. The current
788 * approach is to never pass the head timer. So, when we do stop the clock and
789 * set the timer pending flag.
790 */
791 u64 -= off;
792/** @todo u64VirtualSyncLast */
793 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire);
794 if (pnsAbsDeadline)
795 *pnsAbsDeadline = u64Expire;
796 if (u64 >= u64Expire)
797 {
798 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
799 if (!VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
800 {
801 Log5(("TMAllVirtual(%u): FF: %d -> 1 (NoLock)\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
802 VM_FF_SET(pVM, VM_FF_TM_VIRTUAL_SYNC); /* Hmm? */
803 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
804#ifdef IN_RING3
805 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
806#endif
807 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
808 Log4(("TM: %'RU64/-%'8RU64: exp tmr=>ff [NoLock]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
809 }
810 else
811 Log4(("TM: %'RU64/-%'8RU64: exp tmr [NoLock]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
812 if (pcNsToDeadline)
813 *pcNsToDeadline = 0;
814 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetExpired);
815 }
816 else if (pcNsToDeadline)
817 {
818 uint64_t cNsToDeadline = u64Expire - u64;
819 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
820 cNsToDeadline = ASMMultU64ByU32DivByU32(cNsToDeadline, 100,
821 ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage) + 100);
822 *pcNsToDeadline = tmVirtualVirtToNsDeadline(pVM, cNsToDeadline);
823 }
824
825 Log6(("tmVirtualSyncGetEx -> %'RU64\n", u64));
826 DBGFTRACE_U64_TAG(pVM, u64, "tmVirtualSyncGetEx-nolock");
827 return u64;
828}
829
830
831/**
832 * Gets the current TMCLOCK_VIRTUAL_SYNC time.
833 *
834 * @returns The timestamp.
835 * @param pVM The cross context VM structure.
836 * @thread EMT.
837 * @remarks May set the timer and virtual sync FFs.
838 */
839VMM_INT_DECL(uint64_t) TMVirtualSyncGet(PVMCC pVM)
840{
841 return tmVirtualSyncGetEx(pVM, true /*fCheckTimers*/, NULL /*pcNsToDeadline*/, NULL /*pnsAbsDeadline*/, NULL /*puTscNow*/);
842}
843
844
845/**
846 * Gets the current TMCLOCK_VIRTUAL_SYNC time without checking timers running on
847 * TMCLOCK_VIRTUAL.
848 *
849 * @returns The timestamp.
850 * @param pVM The cross context VM structure.
851 * @thread EMT.
852 * @remarks May set the timer and virtual sync FFs.
853 */
854VMM_INT_DECL(uint64_t) TMVirtualSyncGetNoCheck(PVMCC pVM)
855{
856 return tmVirtualSyncGetEx(pVM, false /*fCheckTimers*/, NULL /*pcNsToDeadline*/, NULL /*pnsAbsDeadline*/, NULL /*puTscNow*/);
857}
858
859
860/**
861 * Gets the current TMCLOCK_VIRTUAL_SYNC time without checking timers running on
862 * TMCLOCK_VIRTUAL, also returning corresponding TSC value.
863 *
864 * @returns The timestamp.
865 * @param pVM The cross context VM structure.
866 * @param puTscNow Where to return the TSC value that the return
867 * value is relative to. This is delta adjusted.
868 * @thread EMT.
869 * @remarks May set the timer and virtual sync FFs.
870 */
871VMM_INT_DECL(uint64_t) TMVirtualSyncGetNoCheckWithTsc(PVMCC pVM, uint64_t *puTscNow)
872{
873 return tmVirtualSyncGetEx(pVM, false /*fCheckTimers*/, NULL /*pcNsToDeadline*/, NULL /*pnsAbsDeadline*/, puTscNow);
874}
875
876
877/**
878 * Gets the current TMCLOCK_VIRTUAL_SYNC time.
879 *
880 * @returns The timestamp.
881 * @param pVM The cross context VM structure.
882 * @param fCheckTimers Check timers on the virtual clock or not.
883 * @thread EMT.
884 * @remarks May set the timer and virtual sync FFs.
885 */
886VMM_INT_DECL(uint64_t) TMVirtualSyncGetEx(PVMCC pVM, bool fCheckTimers)
887{
888 return tmVirtualSyncGetEx(pVM, fCheckTimers, NULL /*pcNsToDeadline*/, NULL /*pnsAbsDeadline*/, NULL /*puTscNow*/);
889}
890
891
892/**
893 * Gets the current TMCLOCK_VIRTUAL_SYNC time and ticks to the next deadline
894 * without checking timers running on TMCLOCK_VIRTUAL.
895 *
896 * @returns The timestamp.
897 * @param pVM The cross context VM structure.
898 * @param pcNsToDeadline Where to return the number of nano seconds to
899 * the next virtual sync timer deadline.
900 * @param puTscNow Where to return the TSC value that the return
901 * value is relative to. This is delta adjusted.
902 * @param puDeadlineVersion Where to return the deadline "version" number.
903 * Use with TMVirtualSyncIsCurrentDeadlineVersion()
904 * to check if the absolute deadline is still up to
905 * date and the caller can skip calling this
906 * function.
907 * @thread EMT.
908 * @remarks May set the timer and virtual sync FFs.
909 */
910VMM_INT_DECL(uint64_t) TMVirtualSyncGetWithDeadlineNoCheck(PVMCC pVM, uint64_t *pcNsToDeadline,
911 uint64_t *puDeadlineVersion, uint64_t *puTscNow)
912{
913 uint64_t cNsToDeadlineTmp; /* try convince the compiler to skip the if tests. */
914 uint64_t u64Now = tmVirtualSyncGetEx(pVM, false /*fCheckTimers*/, &cNsToDeadlineTmp, puDeadlineVersion, puTscNow);
915 *pcNsToDeadline = cNsToDeadlineTmp;
916 return u64Now;
917}
918
919
920/**
921 * Gets the number of nano seconds to the next virtual sync deadline.
922 *
923 * @returns The number of TMCLOCK_VIRTUAL ticks.
924 * @param pVM The cross context VM structure.
925 * @param puTscNow Where to return the TSC value that the return
926 * value is relative to. This is delta adjusted.
927 * @param puDeadlineVersion Where to return the deadline "version" number.
928 * Use with TMVirtualSyncIsCurrentDeadlineVersion()
929 * to check if the absolute deadline is still up to
930 * date and the caller can skip calling this
931 * function.
932 * @thread EMT.
933 * @remarks May set the timer and virtual sync FFs.
934 */
935VMMDECL(uint64_t) TMVirtualSyncGetNsToDeadline(PVMCC pVM, uint64_t *puDeadlineVersion, uint64_t *puTscNow)
936{
937 uint64_t cNsToDeadline;
938 tmVirtualSyncGetEx(pVM, false /*fCheckTimers*/, &cNsToDeadline, puDeadlineVersion, puTscNow);
939 return cNsToDeadline;
940}
941
942
943/**
944 * Checks if the given deadline is still current.
945 *
946 * @retval true if the deadline is still current.
947 * @retval false if the deadline is outdated.
948 * @param pVM The cross context VM structure.
949 * @param uDeadlineVersion The deadline version to check.
950 */
951VMM_INT_DECL(bool) TMVirtualSyncIsCurrentDeadlineVersion(PVMCC pVM, uint64_t uDeadlineVersion)
952{
953 /** @todo Try use ASMAtomicUoReadU64 instead. */
954 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire);
955 return u64Expire == uDeadlineVersion;
956}
957
958
959/**
960 * Gets the current lag of the synchronous virtual clock (relative to the virtual clock).
961 *
962 * @return The current lag.
963 * @param pVM The cross context VM structure.
964 */
965VMM_INT_DECL(uint64_t) TMVirtualSyncGetLag(PVMCC pVM)
966{
967 return pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp;
968}
969
970
971/**
972 * Get the current catch-up percent.
973 *
974 * @return The current catch0up percent. 0 means running at the same speed as the virtual clock.
975 * @param pVM The cross context VM structure.
976 */
977VMM_INT_DECL(uint32_t) TMVirtualSyncGetCatchUpPct(PVMCC pVM)
978{
979 if (pVM->tm.s.fVirtualSyncCatchUp)
980 return pVM->tm.s.u32VirtualSyncCatchUpPercentage;
981 return 0;
982}
983
984
985/**
986 * Gets the current TMCLOCK_VIRTUAL frequency.
987 *
988 * @returns The frequency.
989 * @param pVM The cross context VM structure.
990 */
991VMM_INT_DECL(uint64_t) TMVirtualGetFreq(PVM pVM)
992{
993 NOREF(pVM);
994 return TMCLOCK_FREQ_VIRTUAL;
995}
996
997
998/**
999 * Worker for TMR3PauseClocks.
1000 *
1001 * @returns VINF_SUCCESS or VERR_TM_VIRTUAL_TICKING_IPE (asserted).
1002 * @param pVM The cross context VM structure.
1003 */
1004int tmVirtualPauseLocked(PVMCC pVM)
1005{
1006 uint32_t c = ASMAtomicDecU32(&pVM->tm.s.cVirtualTicking);
1007 AssertMsgReturn(c < pVM->cCpus, ("%u vs %u\n", c, pVM->cCpus), VERR_TM_VIRTUAL_TICKING_IPE);
1008 if (c == 0)
1009 {
1010 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualPause);
1011 pVM->tm.s.u64Virtual = tmVirtualGetRaw(pVM);
1012 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, false);
1013 }
1014 return VINF_SUCCESS;
1015}
1016
1017
1018/**
1019 * Worker for TMR3ResumeClocks.
1020 *
1021 * @returns VINF_SUCCESS or VERR_TM_VIRTUAL_TICKING_IPE (asserted).
1022 * @param pVM The cross context VM structure.
1023 */
1024int tmVirtualResumeLocked(PVMCC pVM)
1025{
1026 uint32_t c = ASMAtomicIncU32(&pVM->tm.s.cVirtualTicking);
1027 AssertMsgReturn(c <= pVM->cCpus, ("%u vs %u\n", c, pVM->cCpus), VERR_TM_VIRTUAL_TICKING_IPE);
1028 if (c == 1)
1029 {
1030 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualResume);
1031 pVM->tm.s.u64VirtualRawPrev = 0;
1032 pVM->tm.s.u64VirtualWarpDriveStart = tmVirtualGetRawNanoTS(pVM);
1033 pVM->tm.s.u64VirtualOffset = pVM->tm.s.u64VirtualWarpDriveStart - pVM->tm.s.u64Virtual;
1034 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, true);
1035 }
1036 return VINF_SUCCESS;
1037}
1038
1039
1040/**
1041 * Converts from virtual ticks to nanoseconds.
1042 *
1043 * @returns nanoseconds.
1044 * @param pVM The cross context VM structure.
1045 * @param u64VirtualTicks The virtual ticks to convert.
1046 * @remark There could be rounding errors here. We just do a simple integer divide
1047 * without any adjustments.
1048 */
1049VMM_INT_DECL(uint64_t) TMVirtualToNano(PVM pVM, uint64_t u64VirtualTicks)
1050{
1051 NOREF(pVM);
1052 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1053 return u64VirtualTicks;
1054}
1055
1056
1057/**
1058 * Converts from virtual ticks to microseconds.
1059 *
1060 * @returns microseconds.
1061 * @param pVM The cross context VM structure.
1062 * @param u64VirtualTicks The virtual ticks to convert.
1063 * @remark There could be rounding errors here. We just do a simple integer divide
1064 * without any adjustments.
1065 */
1066VMM_INT_DECL(uint64_t) TMVirtualToMicro(PVM pVM, uint64_t u64VirtualTicks)
1067{
1068 NOREF(pVM);
1069 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1070 return u64VirtualTicks / 1000;
1071}
1072
1073
1074/**
1075 * Converts from virtual ticks to milliseconds.
1076 *
1077 * @returns milliseconds.
1078 * @param pVM The cross context VM structure.
1079 * @param u64VirtualTicks The virtual ticks to convert.
1080 * @remark There could be rounding errors here. We just do a simple integer divide
1081 * without any adjustments.
1082 */
1083VMM_INT_DECL(uint64_t) TMVirtualToMilli(PVM pVM, uint64_t u64VirtualTicks)
1084{
1085 NOREF(pVM);
1086 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1087 return u64VirtualTicks / 1000000;
1088}
1089
1090
1091/**
1092 * Converts from nanoseconds to virtual ticks.
1093 *
1094 * @returns virtual ticks.
1095 * @param pVM The cross context VM structure.
1096 * @param u64NanoTS The nanosecond value ticks to convert.
1097 * @remark There could be rounding and overflow errors here.
1098 */
1099VMM_INT_DECL(uint64_t) TMVirtualFromNano(PVM pVM, uint64_t u64NanoTS)
1100{
1101 NOREF(pVM);
1102 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1103 return u64NanoTS;
1104}
1105
1106
1107/**
1108 * Converts from microseconds to virtual ticks.
1109 *
1110 * @returns virtual ticks.
1111 * @param pVM The cross context VM structure.
1112 * @param u64MicroTS The microsecond value ticks to convert.
1113 * @remark There could be rounding and overflow errors here.
1114 */
1115VMM_INT_DECL(uint64_t) TMVirtualFromMicro(PVM pVM, uint64_t u64MicroTS)
1116{
1117 NOREF(pVM);
1118 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1119 return u64MicroTS * 1000;
1120}
1121
1122
1123/**
1124 * Converts from milliseconds to virtual ticks.
1125 *
1126 * @returns virtual ticks.
1127 * @param pVM The cross context VM structure.
1128 * @param u64MilliTS The millisecond value ticks to convert.
1129 * @remark There could be rounding and overflow errors here.
1130 */
1131VMM_INT_DECL(uint64_t) TMVirtualFromMilli(PVM pVM, uint64_t u64MilliTS)
1132{
1133 NOREF(pVM);
1134 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1135 return u64MilliTS * 1000000;
1136}
1137
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette