VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/TMAllVirtual.cpp@ 92561

Last change on this file since 92561 was 90346, checked in by vboxsync, 3 years ago
  • VMM: Pass pVM to PDMCritSect APIs. bugref:9218 bugref:10074
  • DrvNetShaper: Do bandwidth allocation via PDMDrvHlp. bugref:10074
  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 42.7 KB
Line 
1/* $Id: TMAllVirtual.cpp 90346 2021-07-26 19:55:53Z vboxsync $ */
2/** @file
3 * TM - Timeout Manager, Virtual Time, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_TM
23#include <VBox/vmm/tm.h>
24#include <VBox/vmm/dbgftrace.h>
25#ifdef IN_RING3
26# include <iprt/thread.h>
27#endif
28#include "TMInternal.h"
29#include <VBox/vmm/vmcc.h>
30#include <VBox/vmm/vmm.h>
31#include <VBox/err.h>
32#include <VBox/log.h>
33#include <VBox/sup.h>
34
35#include <iprt/time.h>
36#include <iprt/assert.h>
37#include <iprt/asm.h>
38#include <iprt/asm-math.h>
39
40
41
42/**
43 * @interface_method_impl{RTTIMENANOTSDATA,pfnBad}
44 */
45DECLCALLBACK(DECLEXPORT(void)) tmVirtualNanoTSBad(PRTTIMENANOTSDATA pData, uint64_t u64NanoTS, uint64_t u64DeltaPrev,
46 uint64_t u64PrevNanoTS)
47{
48 PVM pVM = RT_FROM_MEMBER(pData, VM, CTX_SUFF(tm.s.VirtualGetRawData));
49 pData->cBadPrev++;
50 if ((int64_t)u64DeltaPrev < 0)
51 LogRel(("TM: u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64 pVM=%p\n",
52 u64DeltaPrev, u64PrevNanoTS, u64NanoTS, pVM));
53 else
54 Log(("TM: u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64 pVM=%p (debugging?)\n",
55 u64DeltaPrev, u64PrevNanoTS, u64NanoTS, pVM));
56}
57
58
59/**
60 * @interface_method_impl{RTTIMENANOTSDATA,pfnRediscover}
61 *
62 * This is the initial worker, so the first call in each context ends up here.
63 * It is also used should the delta rating of the host CPUs change or if the
64 * fGetGipCpu feature the current worker relies upon becomes unavailable. The
65 * last two events may occur as CPUs are taken online.
66 */
67DECLCALLBACK(DECLEXPORT(uint64_t)) tmVirtualNanoTSRediscover(PRTTIMENANOTSDATA pData, PRTITMENANOTSEXTRA pExtra)
68{
69 PVM pVM = RT_FROM_MEMBER(pData, VM, CTX_SUFF(tm.s.VirtualGetRawData));
70
71 /*
72 * We require a valid GIP for the selection below. Invalid GIP is fatal.
73 */
74 PSUPGLOBALINFOPAGE pGip = g_pSUPGlobalInfoPage;
75 AssertFatalMsg(RT_VALID_PTR(pGip), ("pVM=%p pGip=%p\n", pVM, pGip));
76 AssertFatalMsg(pGip->u32Magic == SUPGLOBALINFOPAGE_MAGIC, ("pVM=%p pGip=%p u32Magic=%#x\n", pVM, pGip, pGip->u32Magic));
77 AssertFatalMsg(pGip->u32Mode > SUPGIPMODE_INVALID && pGip->u32Mode < SUPGIPMODE_END,
78 ("pVM=%p pGip=%p u32Mode=%#x\n", pVM, pGip, pGip->u32Mode));
79
80 /*
81 * Determine the new worker.
82 */
83 PFNTIMENANOTSINTERNAL pfnWorker;
84 bool const fLFence = RT_BOOL(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SSE2);
85 switch (pGip->u32Mode)
86 {
87 case SUPGIPMODE_SYNC_TSC:
88 case SUPGIPMODE_INVARIANT_TSC:
89#ifdef IN_RING0
90 if (pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_ROUGHLY_ZERO)
91 pfnWorker = fLFence ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarNoDelta;
92 else
93 pfnWorker = fLFence ? RTTimeNanoTSLFenceSyncInvarWithDelta : RTTimeNanoTSLegacySyncInvarWithDelta;
94#else
95 if (pGip->fGetGipCpu & SUPGIPGETCPU_IDTR_LIMIT_MASK_MAX_SET_CPUS)
96 pfnWorker = pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_PRACTICALLY_ZERO
97 ? fLFence ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarNoDelta
98 : fLFence ? RTTimeNanoTSLFenceSyncInvarWithDeltaUseIdtrLim : RTTimeNanoTSLegacySyncInvarWithDeltaUseIdtrLim;
99 else if (pGip->fGetGipCpu & SUPGIPGETCPU_RDTSCP_MASK_MAX_SET_CPUS)
100 pfnWorker = pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_PRACTICALLY_ZERO
101 ? fLFence ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarNoDelta
102 : fLFence ? RTTimeNanoTSLFenceSyncInvarWithDeltaUseRdtscp : RTTimeNanoTSLegacySyncInvarWithDeltaUseRdtscp;
103 else if (pGip->fGetGipCpu & SUPGIPGETCPU_APIC_ID_EXT_0B)
104 pfnWorker = pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_ROUGHLY_ZERO
105 ? fLFence ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarNoDelta
106 : fLFence ? RTTimeNanoTSLFenceSyncInvarWithDeltaUseApicIdExt0B : RTTimeNanoTSLegacySyncInvarWithDeltaUseApicIdExt0B;
107 else if (pGip->fGetGipCpu & SUPGIPGETCPU_APIC_ID_EXT_8000001E)
108 pfnWorker = pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_ROUGHLY_ZERO
109 ? fLFence ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarNoDelta
110 : fLFence ? RTTimeNanoTSLFenceSyncInvarWithDeltaUseApicIdExt8000001E : RTTimeNanoTSLegacySyncInvarWithDeltaUseApicIdExt8000001E;
111 else
112 pfnWorker = pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_ROUGHLY_ZERO
113 ? fLFence ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarNoDelta
114 : fLFence ? RTTimeNanoTSLFenceSyncInvarWithDeltaUseApicId : RTTimeNanoTSLegacySyncInvarWithDeltaUseApicId;
115#endif
116 break;
117
118 case SUPGIPMODE_ASYNC_TSC:
119#ifdef IN_RING0
120 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsync : RTTimeNanoTSLegacyAsync;
121#else
122 if (pGip->fGetGipCpu & SUPGIPGETCPU_IDTR_LIMIT_MASK_MAX_SET_CPUS)
123 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsyncUseIdtrLim : RTTimeNanoTSLegacyAsyncUseIdtrLim;
124 else if (pGip->fGetGipCpu & SUPGIPGETCPU_RDTSCP_MASK_MAX_SET_CPUS)
125 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsyncUseRdtscp : RTTimeNanoTSLegacyAsyncUseRdtscp;
126 else if (pGip->fGetGipCpu & SUPGIPGETCPU_RDTSCP_GROUP_IN_CH_NUMBER_IN_CL)
127 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsyncUseRdtscpGroupChNumCl : RTTimeNanoTSLegacyAsyncUseRdtscpGroupChNumCl;
128 else if (pGip->fGetGipCpu & SUPGIPGETCPU_APIC_ID_EXT_0B)
129 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsyncUseApicIdExt0B : RTTimeNanoTSLegacyAsyncUseApicIdExt0B;
130 else if (pGip->fGetGipCpu & SUPGIPGETCPU_APIC_ID_EXT_8000001E)
131 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsyncUseApicIdExt8000001E : RTTimeNanoTSLegacyAsyncUseApicIdExt8000001E;
132 else
133 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsyncUseApicId : RTTimeNanoTSLegacyAsyncUseApicId;
134#endif
135 break;
136
137 default:
138 AssertFatalMsgFailed(("pVM=%p pGip=%p u32Mode=%#x\n", pVM, pGip, pGip->u32Mode));
139 }
140
141 /*
142 * Update the pfnVirtualGetRaw pointer and call the worker we selected.
143 */
144 ASMAtomicWritePtr((void * volatile *)&CTX_SUFF(pVM->tm.s.pfnVirtualGetRaw), (void *)(uintptr_t)pfnWorker);
145 return pfnWorker(pData, pExtra);
146}
147
148
149/**
150 * @interface_method_impl{RTTIMENANOTSDATA,pfnBadCpuIndex}
151 */
152DECLCALLBACK(DECLEXPORT(uint64_t)) tmVirtualNanoTSBadCpuIndex(PRTTIMENANOTSDATA pData, PRTITMENANOTSEXTRA pExtra,
153 uint16_t idApic, uint16_t iCpuSet, uint16_t iGipCpu)
154{
155 PVM pVM = RT_FROM_MEMBER(pData, VM, CTX_SUFF(tm.s.VirtualGetRawData));
156 AssertFatalMsgFailed(("pVM=%p idApic=%#x iCpuSet=%#x iGipCpu=%#x pExtra=%p\n", pVM, idApic, iCpuSet, iGipCpu, pExtra));
157#ifndef _MSC_VER
158 return UINT64_MAX;
159#endif
160}
161
162
163/**
164 * Wrapper around the IPRT GIP time methods.
165 */
166DECLINLINE(uint64_t) tmVirtualGetRawNanoTS(PVMCC pVM)
167{
168# ifdef IN_RING3
169 uint64_t u64 = CTXALLSUFF(pVM->tm.s.pfnVirtualGetRaw)(&CTXALLSUFF(pVM->tm.s.VirtualGetRawData), NULL /*pExtra*/);
170# else /* !IN_RING3 */
171 uint32_t cPrevSteps = pVM->tm.s.CTX_SUFF(VirtualGetRawData).c1nsSteps;
172 uint64_t u64 = pVM->tm.s.CTX_SUFF(pfnVirtualGetRaw)(&pVM->tm.s.CTX_SUFF(VirtualGetRawData), NULL /*pExtra*/);
173 if (cPrevSteps != pVM->tm.s.CTX_SUFF(VirtualGetRawData).c1nsSteps)
174 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3);
175# endif /* !IN_RING3 */
176 /*DBGFTRACE_POS_U64(pVM, u64);*/
177 return u64;
178}
179
180
181/**
182 * Wrapper around the IPRT GIP time methods, extended version.
183 */
184DECLINLINE(uint64_t) tmVirtualGetRawNanoTSEx(PVMCC pVM, uint64_t *puTscNow)
185{
186 RTITMENANOTSEXTRA Extra;
187# ifdef IN_RING3
188 uint64_t u64 = CTXALLSUFF(pVM->tm.s.pfnVirtualGetRaw)(&CTXALLSUFF(pVM->tm.s.VirtualGetRawData), &Extra);
189# else /* !IN_RING3 */
190 uint32_t cPrevSteps = pVM->tm.s.CTX_SUFF(VirtualGetRawData).c1nsSteps;
191 uint64_t u64 = pVM->tm.s.CTX_SUFF(pfnVirtualGetRaw)(&pVM->tm.s.CTX_SUFF(VirtualGetRawData), &Extra);
192 if (cPrevSteps != pVM->tm.s.CTX_SUFF(VirtualGetRawData).c1nsSteps)
193 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3);
194# endif /* !IN_RING3 */
195 if (puTscNow)
196 *puTscNow = Extra.uTSCValue;
197 /*DBGFTRACE_POS_U64(pVM, u64);*/
198 return u64;
199}
200
201
202/**
203 * Get the time when we're not running at 100%
204 *
205 * @returns The timestamp.
206 * @param pVM The cross context VM structure.
207 * @param puTscNow Where to return the TSC corresponding to the returned
208 * timestamp (delta adjusted). Optional.
209 */
210static uint64_t tmVirtualGetRawNonNormal(PVMCC pVM, uint64_t *puTscNow)
211{
212 /*
213 * Recalculate the RTTimeNanoTS() value for the period where
214 * warp drive has been enabled.
215 */
216 uint64_t u64 = tmVirtualGetRawNanoTSEx(pVM, puTscNow);
217 u64 -= pVM->tm.s.u64VirtualWarpDriveStart;
218 u64 *= pVM->tm.s.u32VirtualWarpDrivePercentage;
219 u64 /= 100;
220 u64 += pVM->tm.s.u64VirtualWarpDriveStart;
221
222 /*
223 * Now we apply the virtual time offset.
224 * (Which is the negated tmVirtualGetRawNanoTS() value for when the virtual
225 * machine started if it had been running continuously without any suspends.)
226 */
227 u64 -= pVM->tm.s.u64VirtualOffset;
228 return u64;
229}
230
231
232/**
233 * Get the raw virtual time.
234 *
235 * @returns The current time stamp.
236 * @param pVM The cross context VM structure.
237 */
238DECLINLINE(uint64_t) tmVirtualGetRaw(PVMCC pVM)
239{
240 if (RT_LIKELY(!pVM->tm.s.fVirtualWarpDrive))
241 return tmVirtualGetRawNanoTS(pVM) - pVM->tm.s.u64VirtualOffset;
242 return tmVirtualGetRawNonNormal(pVM, NULL /*puTscNow*/);
243}
244
245
246/**
247 * Get the raw virtual time, extended version.
248 *
249 * @returns The current time stamp.
250 * @param pVM The cross context VM structure.
251 * @param puTscNow Where to return the TSC corresponding to the returned
252 * timestamp (delta adjusted). Optional.
253 */
254DECLINLINE(uint64_t) tmVirtualGetRawEx(PVMCC pVM, uint64_t *puTscNow)
255{
256 if (RT_LIKELY(!pVM->tm.s.fVirtualWarpDrive))
257 return tmVirtualGetRawNanoTSEx(pVM, puTscNow) - pVM->tm.s.u64VirtualOffset;
258 return tmVirtualGetRawNonNormal(pVM, puTscNow);
259}
260
261
262/**
263 * Inlined version of tmVirtualGetEx.
264 */
265DECLINLINE(uint64_t) tmVirtualGet(PVMCC pVM, bool fCheckTimers)
266{
267 uint64_t u64;
268 if (RT_LIKELY(pVM->tm.s.cVirtualTicking))
269 {
270 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGet);
271 u64 = tmVirtualGetRaw(pVM);
272
273 /*
274 * Use the chance to check for expired timers.
275 */
276 if (fCheckTimers)
277 {
278 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
279 if ( !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)
280 && !pVM->tm.s.fRunningQueues
281 && ( pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL].u64Expire <= u64
282 || ( pVM->tm.s.fVirtualSyncTicking
283 && pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire <= u64 - pVM->tm.s.offVirtualSync
284 )
285 )
286 && !pVM->tm.s.fRunningQueues
287 )
288 {
289 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSetFF);
290 Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
291 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
292#ifdef IN_RING3
293 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
294#endif
295 }
296 }
297 }
298 else
299 u64 = pVM->tm.s.u64Virtual;
300 return u64;
301}
302
303
304/**
305 * Gets the current TMCLOCK_VIRTUAL time
306 *
307 * @returns The timestamp.
308 * @param pVM The cross context VM structure.
309 *
310 * @remark While the flow of time will never go backwards, the speed of the
311 * progress varies due to inaccurate RTTimeNanoTS and TSC. The latter can be
312 * influenced by power saving (SpeedStep, PowerNow!), while the former
313 * makes use of TSC and kernel timers.
314 */
315VMM_INT_DECL(uint64_t) TMVirtualGet(PVMCC pVM)
316{
317 return tmVirtualGet(pVM, true /*fCheckTimers*/);
318}
319
320
321/**
322 * Gets the current TMCLOCK_VIRTUAL time without checking
323 * timers or anything.
324 *
325 * Meaning, this has no side effect on FFs like TMVirtualGet may have.
326 *
327 * @returns The timestamp.
328 * @param pVM The cross context VM structure.
329 *
330 * @remarks See TMVirtualGet.
331 */
332VMM_INT_DECL(uint64_t) TMVirtualGetNoCheck(PVMCC pVM)
333{
334 return tmVirtualGet(pVM, false /*fCheckTimers*/);
335}
336
337
338/**
339 * Converts the dead line interval from TMCLOCK_VIRTUAL to host nano seconds.
340 *
341 * @returns Host nano second count.
342 * @param pVM The cross context VM structure.
343 * @param cVirtTicksToDeadline The TMCLOCK_VIRTUAL interval.
344 */
345DECLINLINE(uint64_t) tmVirtualVirtToNsDeadline(PVM pVM, uint64_t cVirtTicksToDeadline)
346{
347 if (RT_UNLIKELY(pVM->tm.s.fVirtualWarpDrive))
348 return ASMMultU64ByU32DivByU32(cVirtTicksToDeadline, 100, pVM->tm.s.u32VirtualWarpDrivePercentage);
349 return cVirtTicksToDeadline;
350}
351
352
353/**
354 * tmVirtualSyncGetLocked worker for handling catch-up when owning the lock.
355 *
356 * @returns The timestamp.
357 * @param pVM The cross context VM structure.
358 * @param u64 raw virtual time.
359 * @param off offVirtualSync.
360 * @param pcNsToDeadline Where to return the number of nano seconds to
361 * the next virtual sync timer deadline. Can be
362 * NULL.
363 * @param pnsAbsDeadline Where to return the absolute deadline.
364 * Optional.
365 */
366DECLINLINE(uint64_t) tmVirtualSyncGetHandleCatchUpLocked(PVMCC pVM, uint64_t u64, uint64_t off,
367 uint64_t *pcNsToDeadline, uint64_t *pnsAbsDeadline)
368{
369 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
370
371 /*
372 * Don't make updates until we've check the timer queue.
373 */
374 bool fUpdatePrev = true;
375 bool fUpdateOff = true;
376 bool fStop = false;
377 const uint64_t u64Prev = pVM->tm.s.u64VirtualSyncCatchUpPrev;
378 uint64_t u64Delta = u64 - u64Prev;
379 if (RT_LIKELY(!(u64Delta >> 32)))
380 {
381 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, pVM->tm.s.u32VirtualSyncCatchUpPercentage, 100);
382 if (off > u64Sub + pVM->tm.s.offVirtualSyncGivenUp)
383 {
384 off -= u64Sub;
385 Log4(("TM: %'RU64/-%'8RU64: sub %RU32 [vsghcul]\n", u64 - off, off - pVM->tm.s.offVirtualSyncGivenUp, u64Sub));
386 }
387 else
388 {
389 /* we've completely caught up. */
390 STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c);
391 off = pVM->tm.s.offVirtualSyncGivenUp;
392 fStop = true;
393 Log4(("TM: %'RU64/0: caught up [vsghcul]\n", u64));
394 }
395 }
396 else
397 {
398 /* More than 4 seconds since last time (or negative), ignore it. */
399 fUpdateOff = false;
400 fUpdatePrev = !(u64Delta & RT_BIT_64(63));
401 Log(("TMVirtualGetSync: u64Delta=%RX64\n", u64Delta));
402 }
403
404 /*
405 * Complete the calculation of the current TMCLOCK_VIRTUAL_SYNC time. The current
406 * approach is to never pass the head timer. So, when we do stop the clock and
407 * set the timer pending flag.
408 */
409 u64 -= off;
410
411 uint64_t u64Last = ASMAtomicUoReadU64(&pVM->tm.s.u64VirtualSync);
412 if (u64Last > u64)
413 {
414 u64 = u64Last + 1;
415 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetAdjLast);
416 }
417
418 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire);
419 if (pnsAbsDeadline)
420 *pnsAbsDeadline = u64Expire; /* Always return the unadjusted absolute deadline, or HM will waste time going
421 thru this code over an over again even if there aren't any timer changes. */
422 if (u64 < u64Expire)
423 {
424 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64);
425 if (fUpdateOff)
426 ASMAtomicWriteU64(&pVM->tm.s.offVirtualSync, off);
427 if (fStop)
428 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncCatchUp, false);
429 if (fUpdatePrev)
430 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev, u64);
431 if (pcNsToDeadline)
432 {
433 uint64_t cNsToDeadline = u64Expire - u64;
434 if (pVM->tm.s.fVirtualSyncCatchUp)
435 cNsToDeadline = ASMMultU64ByU32DivByU32(cNsToDeadline, 100,
436 pVM->tm.s.u32VirtualSyncCatchUpPercentage + 100);
437 *pcNsToDeadline = tmVirtualVirtToNsDeadline(pVM, cNsToDeadline);
438 }
439 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
440 }
441 else
442 {
443 u64 = u64Expire;
444 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64);
445 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, false);
446
447 VM_FF_SET(pVM, VM_FF_TM_VIRTUAL_SYNC);
448 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
449 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
450 Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
451 Log4(("TM: %'RU64/-%'8RU64: exp tmr=>ff [vsghcul]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
452 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
453
454 if (pcNsToDeadline)
455 *pcNsToDeadline = 0;
456#ifdef IN_RING3
457 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
458#endif
459 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
460 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetExpired);
461 }
462 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
463
464 Log6(("tmVirtualSyncGetHandleCatchUpLocked -> %'RU64\n", u64));
465 DBGFTRACE_U64_TAG(pVM, u64, "tmVirtualSyncGetHandleCatchUpLocked");
466 return u64;
467}
468
469
470/**
471 * tmVirtualSyncGetEx worker for when we get the lock.
472 *
473 * @returns timesamp.
474 * @param pVM The cross context VM structure.
475 * @param u64 The virtual clock timestamp.
476 * @param pcNsToDeadline Where to return the number of nano seconds to
477 * the next virtual sync timer deadline. Can be
478 * NULL.
479 * @param pnsAbsDeadline Where to return the absolute deadline.
480 * Optional.
481 */
482DECLINLINE(uint64_t) tmVirtualSyncGetLocked(PVMCC pVM, uint64_t u64, uint64_t *pcNsToDeadline, uint64_t *pnsAbsDeadline)
483{
484 /*
485 * Not ticking?
486 */
487 if (!pVM->tm.s.fVirtualSyncTicking)
488 {
489 u64 = ASMAtomicUoReadU64(&pVM->tm.s.u64VirtualSync);
490 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
491 if (pcNsToDeadline)
492 *pcNsToDeadline = 0;
493 if (pnsAbsDeadline)
494 *pnsAbsDeadline = u64;
495 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
496 Log6(("tmVirtualSyncGetLocked -> %'RU64 [stopped]\n", u64));
497 DBGFTRACE_U64_TAG(pVM, u64, "tmVirtualSyncGetLocked-stopped");
498 return u64;
499 }
500
501 /*
502 * Handle catch up in a separate function.
503 */
504 uint64_t off = ASMAtomicUoReadU64(&pVM->tm.s.offVirtualSync);
505 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
506 return tmVirtualSyncGetHandleCatchUpLocked(pVM, u64, off, pcNsToDeadline, pnsAbsDeadline);
507
508 /*
509 * Complete the calculation of the current TMCLOCK_VIRTUAL_SYNC time. The current
510 * approach is to never pass the head timer. So, when we do stop the clock and
511 * set the timer pending flag.
512 */
513 u64 -= off;
514
515 uint64_t u64Last = ASMAtomicUoReadU64(&pVM->tm.s.u64VirtualSync);
516 if (u64Last > u64)
517 {
518 u64 = u64Last + 1;
519 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetAdjLast);
520 }
521
522 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire);
523 if (pnsAbsDeadline)
524 *pnsAbsDeadline = u64Expire;
525 if (u64 < u64Expire)
526 {
527 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64);
528 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
529 if (pcNsToDeadline)
530 *pcNsToDeadline = tmVirtualVirtToNsDeadline(pVM, u64Expire - u64);
531 }
532 else
533 {
534 u64 = u64Expire;
535 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64);
536 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, false);
537
538 VM_FF_SET(pVM, VM_FF_TM_VIRTUAL_SYNC);
539 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
540 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
541 Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
542 Log4(("TM: %'RU64/-%'8RU64: exp tmr=>ff [vsgl]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
543 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
544
545#ifdef IN_RING3
546 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
547#endif
548 if (pcNsToDeadline)
549 *pcNsToDeadline = 0;
550 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
551 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetExpired);
552 }
553 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
554 Log6(("tmVirtualSyncGetLocked -> %'RU64\n", u64));
555 DBGFTRACE_U64_TAG(pVM, u64, "tmVirtualSyncGetLocked");
556 return u64;
557}
558
559
560/**
561 * Gets the current TMCLOCK_VIRTUAL_SYNC time.
562 *
563 * @returns The timestamp.
564 * @param pVM The cross context VM structure.
565 * @param fCheckTimers Check timers or not
566 * @param pcNsToDeadline Where to return the number of nano seconds to
567 * the next virtual sync timer deadline. Can be
568 * NULL.
569 * @param pnsAbsDeadline Where to return the absolute deadline.
570 * Optional.
571 * @param puTscNow Where to return the TSC corresponding to the
572 * returned timestamp (delta adjusted). Optional.
573 * @thread EMT.
574 */
575DECLINLINE(uint64_t) tmVirtualSyncGetEx(PVMCC pVM, bool fCheckTimers, uint64_t *pcNsToDeadline,
576 uint64_t *pnsAbsDeadline, uint64_t *puTscNow)
577{
578 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGet);
579
580 uint64_t u64;
581 if (!pVM->tm.s.fVirtualSyncTicking)
582 {
583 if (pcNsToDeadline)
584 *pcNsToDeadline = 0;
585 u64 = pVM->tm.s.u64VirtualSync;
586 DBGFTRACE_U64_TAG(pVM, u64, "tmVirtualSyncGetEx-stopped1");
587 return u64;
588 }
589
590 /*
591 * Query the virtual clock and do the usual expired timer check.
592 */
593 Assert(pVM->tm.s.cVirtualTicking);
594 u64 = tmVirtualGetRawEx(pVM, puTscNow);
595 if (fCheckTimers)
596 {
597 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
598 if ( !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)
599 && pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL].u64Expire <= u64)
600 {
601 Log5(("TMAllVirtual(%u): FF: 0 -> 1\n", __LINE__));
602 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
603#ifdef IN_RING3
604 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM /** @todo |VMNOTIFYFF_FLAGS_POKE*/);
605#endif
606 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
607 }
608 }
609
610 /*
611 * If we can get the lock, get it. The result is much more reliable.
612 *
613 * Note! This is where all clock source devices branch off because they
614 * will be owning the lock already. The 'else' is taken by code
615 * which is less picky or hasn't been adjusted yet
616 */
617 /** @todo switch this around, have the tmVirtualSyncGetLocked code inlined
618 * here and the remainder of this function in a static worker. */
619 if (PDMCritSectTryEnter(pVM, &pVM->tm.s.VirtualSyncLock) == VINF_SUCCESS)
620 return tmVirtualSyncGetLocked(pVM, u64, pcNsToDeadline, pnsAbsDeadline);
621
622 /*
623 * When the clock is ticking, not doing catch ups and not running into an
624 * expired time, we can get away without locking. Try this first.
625 */
626 uint64_t off;
627 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
628 {
629 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
630 {
631 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
632 if (RT_LIKELY( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
633 && !ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
634 && off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)))
635 {
636 off = u64 - off;
637 uint64_t const u64Expire = ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire);
638 if (off < u64Expire)
639 {
640 if (pnsAbsDeadline)
641 *pnsAbsDeadline = u64Expire;
642 if (pcNsToDeadline)
643 *pcNsToDeadline = tmVirtualVirtToNsDeadline(pVM, u64Expire - off);
644 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLockless);
645 Log6(("tmVirtualSyncGetEx -> %'RU64 [lockless]\n", off));
646 DBGFTRACE_U64_TAG(pVM, off, "tmVirtualSyncGetEx-lockless");
647 return off;
648 }
649 }
650 }
651 }
652 else
653 {
654 off = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSync);
655 if (RT_LIKELY(!ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking)))
656 {
657 if (pcNsToDeadline)
658 *pcNsToDeadline = 0;
659 if (pnsAbsDeadline)
660 *pnsAbsDeadline = off;
661 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLockless);
662 Log6(("tmVirtualSyncGetEx -> %'RU64 [lockless/stopped]\n", off));
663 DBGFTRACE_U64_TAG(pVM, off, "tmVirtualSyncGetEx-stopped2");
664 return off;
665 }
666 }
667
668 /*
669 * Read the offset and adjust if we're playing catch-up.
670 *
671 * The catch-up adjusting work by us decrementing the offset by a percentage of
672 * the time elapsed since the previous TMVirtualGetSync call.
673 *
674 * It's possible to get a very long or even negative interval between two read
675 * for the following reasons:
676 * - Someone might have suspended the process execution, frequently the case when
677 * debugging the process.
678 * - We might be on a different CPU which TSC isn't quite in sync with the
679 * other CPUs in the system.
680 * - Another thread is racing us and we might have been preempted while inside
681 * this function.
682 *
683 * Assuming nano second virtual time, we can simply ignore any intervals which has
684 * any of the upper 32 bits set.
685 */
686 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
687 int cOuterTries = 42;
688 for (;; cOuterTries--)
689 {
690 /* Try grab the lock, things get simpler when owning the lock. */
691 int rcLock = PDMCritSectTryEnter(pVM, &pVM->tm.s.VirtualSyncLock);
692 if (RT_SUCCESS_NP(rcLock))
693 return tmVirtualSyncGetLocked(pVM, u64, pcNsToDeadline, pnsAbsDeadline);
694
695 /* Re-check the ticking flag. */
696 if (!ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
697 {
698 off = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSync);
699 if ( ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking)
700 && cOuterTries > 0)
701 continue;
702 if (pcNsToDeadline)
703 *pcNsToDeadline = 0;
704 if (pnsAbsDeadline)
705 *pnsAbsDeadline = off;
706 Log6(("tmVirtualSyncGetEx -> %'RU64 [stopped]\n", off));
707 DBGFTRACE_U64_TAG(pVM, off, "tmVirtualSyncGetEx-stopped3");
708 return off;
709 }
710
711 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
712 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
713 {
714 /* No changes allowed, try get a consistent set of parameters. */
715 uint64_t const u64Prev = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev);
716 uint64_t const offGivenUp = ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp);
717 uint32_t const u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
718 if ( ( u64Prev == ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev)
719 && offGivenUp == ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp)
720 && u32Pct == ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage)
721 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
722 || cOuterTries <= 0)
723 {
724 uint64_t u64Delta = u64 - u64Prev;
725 if (RT_LIKELY(!(u64Delta >> 32)))
726 {
727 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, u32Pct, 100);
728 if (off > u64Sub + offGivenUp)
729 {
730 off -= u64Sub;
731 Log4(("TM: %'RU64/-%'8RU64: sub %RU32 [NoLock]\n", u64 - off, pVM->tm.s.offVirtualSync - offGivenUp, u64Sub));
732 }
733 else
734 {
735 /* we've completely caught up. */
736 STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c);
737 off = offGivenUp;
738 Log4(("TM: %'RU64/0: caught up [NoLock]\n", u64));
739 }
740 }
741 else
742 /* More than 4 seconds since last time (or negative), ignore it. */
743 Log(("TMVirtualGetSync: u64Delta=%RX64 (NoLock)\n", u64Delta));
744
745 /* Check that we're still running and in catch up. */
746 if ( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
747 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
748 break;
749 if (cOuterTries <= 0)
750 break; /* enough */
751 }
752 }
753 else if ( off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
754 && !ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
755 break; /* Got an consistent offset */
756 else if (cOuterTries <= 0)
757 break; /* enough */
758 }
759 if (cOuterTries <= 0)
760 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetELoop);
761
762 /*
763 * Complete the calculation of the current TMCLOCK_VIRTUAL_SYNC time. The current
764 * approach is to never pass the head timer. So, when we do stop the clock and
765 * set the timer pending flag.
766 */
767 u64 -= off;
768/** @todo u64VirtualSyncLast */
769 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire);
770 if (pnsAbsDeadline)
771 *pnsAbsDeadline = u64Expire;
772 if (u64 >= u64Expire)
773 {
774 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
775 if (!VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
776 {
777 Log5(("TMAllVirtual(%u): FF: %d -> 1 (NoLock)\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
778 VM_FF_SET(pVM, VM_FF_TM_VIRTUAL_SYNC); /* Hmm? */
779 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
780#ifdef IN_RING3
781 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
782#endif
783 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
784 Log4(("TM: %'RU64/-%'8RU64: exp tmr=>ff [NoLock]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
785 }
786 else
787 Log4(("TM: %'RU64/-%'8RU64: exp tmr [NoLock]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
788 if (pcNsToDeadline)
789 *pcNsToDeadline = 0;
790 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetExpired);
791 }
792 else if (pcNsToDeadline)
793 {
794 uint64_t cNsToDeadline = u64Expire - u64;
795 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
796 cNsToDeadline = ASMMultU64ByU32DivByU32(cNsToDeadline, 100,
797 ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage) + 100);
798 *pcNsToDeadline = tmVirtualVirtToNsDeadline(pVM, cNsToDeadline);
799 }
800
801 Log6(("tmVirtualSyncGetEx -> %'RU64\n", u64));
802 DBGFTRACE_U64_TAG(pVM, u64, "tmVirtualSyncGetEx-nolock");
803 return u64;
804}
805
806
807/**
808 * Gets the current TMCLOCK_VIRTUAL_SYNC time.
809 *
810 * @returns The timestamp.
811 * @param pVM The cross context VM structure.
812 * @thread EMT.
813 * @remarks May set the timer and virtual sync FFs.
814 */
815VMM_INT_DECL(uint64_t) TMVirtualSyncGet(PVMCC pVM)
816{
817 return tmVirtualSyncGetEx(pVM, true /*fCheckTimers*/, NULL /*pcNsToDeadline*/, NULL /*pnsAbsDeadline*/, NULL /*puTscNow*/);
818}
819
820
821/**
822 * Gets the current TMCLOCK_VIRTUAL_SYNC time without checking timers running on
823 * TMCLOCK_VIRTUAL.
824 *
825 * @returns The timestamp.
826 * @param pVM The cross context VM structure.
827 * @thread EMT.
828 * @remarks May set the timer and virtual sync FFs.
829 */
830VMM_INT_DECL(uint64_t) TMVirtualSyncGetNoCheck(PVMCC pVM)
831{
832 return tmVirtualSyncGetEx(pVM, false /*fCheckTimers*/, NULL /*pcNsToDeadline*/, NULL /*pnsAbsDeadline*/, NULL /*puTscNow*/);
833}
834
835
836/**
837 * Gets the current TMCLOCK_VIRTUAL_SYNC time without checking timers running on
838 * TMCLOCK_VIRTUAL, also returning corresponding TSC value.
839 *
840 * @returns The timestamp.
841 * @param pVM The cross context VM structure.
842 * @param puTscNow Where to return the TSC value that the return
843 * value is relative to. This is delta adjusted.
844 * @thread EMT.
845 * @remarks May set the timer and virtual sync FFs.
846 */
847VMM_INT_DECL(uint64_t) TMVirtualSyncGetNoCheckWithTsc(PVMCC pVM, uint64_t *puTscNow)
848{
849 return tmVirtualSyncGetEx(pVM, false /*fCheckTimers*/, NULL /*pcNsToDeadline*/, NULL /*pnsAbsDeadline*/, puTscNow);
850}
851
852
853/**
854 * Gets the current TMCLOCK_VIRTUAL_SYNC time.
855 *
856 * @returns The timestamp.
857 * @param pVM The cross context VM structure.
858 * @param fCheckTimers Check timers on the virtual clock or not.
859 * @thread EMT.
860 * @remarks May set the timer and virtual sync FFs.
861 */
862VMM_INT_DECL(uint64_t) TMVirtualSyncGetEx(PVMCC pVM, bool fCheckTimers)
863{
864 return tmVirtualSyncGetEx(pVM, fCheckTimers, NULL /*pcNsToDeadline*/, NULL /*pnsAbsDeadline*/, NULL /*puTscNow*/);
865}
866
867
868/**
869 * Gets the current TMCLOCK_VIRTUAL_SYNC time and ticks to the next deadline
870 * without checking timers running on TMCLOCK_VIRTUAL.
871 *
872 * @returns The timestamp.
873 * @param pVM The cross context VM structure.
874 * @param pcNsToDeadline Where to return the number of nano seconds to
875 * the next virtual sync timer deadline.
876 * @param puTscNow Where to return the TSC value that the return
877 * value is relative to. This is delta adjusted.
878 * @param puDeadlineVersion Where to return the deadline "version" number.
879 * Use with TMVirtualSyncIsCurrentDeadlineVersion()
880 * to check if the absolute deadline is still up to
881 * date and the caller can skip calling this
882 * function.
883 * @thread EMT.
884 * @remarks May set the timer and virtual sync FFs.
885 */
886VMM_INT_DECL(uint64_t) TMVirtualSyncGetWithDeadlineNoCheck(PVMCC pVM, uint64_t *pcNsToDeadline,
887 uint64_t *puDeadlineVersion, uint64_t *puTscNow)
888{
889 uint64_t cNsToDeadlineTmp; /* try convince the compiler to skip the if tests. */
890 uint64_t u64Now = tmVirtualSyncGetEx(pVM, false /*fCheckTimers*/, &cNsToDeadlineTmp, puDeadlineVersion, puTscNow);
891 *pcNsToDeadline = cNsToDeadlineTmp;
892 return u64Now;
893}
894
895
896/**
897 * Gets the number of nano seconds to the next virtual sync deadline.
898 *
899 * @returns The number of TMCLOCK_VIRTUAL ticks.
900 * @param pVM The cross context VM structure.
901 * @param puTscNow Where to return the TSC value that the return
902 * value is relative to. This is delta adjusted.
903 * @param puDeadlineVersion Where to return the deadline "version" number.
904 * Use with TMVirtualSyncIsCurrentDeadlineVersion()
905 * to check if the absolute deadline is still up to
906 * date and the caller can skip calling this
907 * function.
908 * @thread EMT.
909 * @remarks May set the timer and virtual sync FFs.
910 */
911VMMDECL(uint64_t) TMVirtualSyncGetNsToDeadline(PVMCC pVM, uint64_t *puDeadlineVersion, uint64_t *puTscNow)
912{
913 uint64_t cNsToDeadline;
914 tmVirtualSyncGetEx(pVM, false /*fCheckTimers*/, &cNsToDeadline, puDeadlineVersion, puTscNow);
915 return cNsToDeadline;
916}
917
918
919/**
920 * Checks if the given deadline is still current.
921 *
922 * @retval true if the deadline is still current.
923 * @retval false if the deadline is outdated.
924 * @param pVM The cross context VM structure.
925 * @param uDeadlineVersion The deadline version to check.
926 */
927VMM_INT_DECL(bool) TMVirtualSyncIsCurrentDeadlineVersion(PVMCC pVM, uint64_t uDeadlineVersion)
928{
929 /** @todo Try use ASMAtomicUoReadU64 instead. */
930 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire);
931 return u64Expire == uDeadlineVersion;
932}
933
934
935/**
936 * Gets the current lag of the synchronous virtual clock (relative to the virtual clock).
937 *
938 * @return The current lag.
939 * @param pVM The cross context VM structure.
940 */
941VMM_INT_DECL(uint64_t) TMVirtualSyncGetLag(PVMCC pVM)
942{
943 return pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp;
944}
945
946
947/**
948 * Get the current catch-up percent.
949 *
950 * @return The current catch0up percent. 0 means running at the same speed as the virtual clock.
951 * @param pVM The cross context VM structure.
952 */
953VMM_INT_DECL(uint32_t) TMVirtualSyncGetCatchUpPct(PVMCC pVM)
954{
955 if (pVM->tm.s.fVirtualSyncCatchUp)
956 return pVM->tm.s.u32VirtualSyncCatchUpPercentage;
957 return 0;
958}
959
960
961/**
962 * Gets the current TMCLOCK_VIRTUAL frequency.
963 *
964 * @returns The frequency.
965 * @param pVM The cross context VM structure.
966 */
967VMM_INT_DECL(uint64_t) TMVirtualGetFreq(PVM pVM)
968{
969 NOREF(pVM);
970 return TMCLOCK_FREQ_VIRTUAL;
971}
972
973
974/**
975 * Worker for TMR3PauseClocks.
976 *
977 * @returns VINF_SUCCESS or VERR_TM_VIRTUAL_TICKING_IPE (asserted).
978 * @param pVM The cross context VM structure.
979 */
980int tmVirtualPauseLocked(PVMCC pVM)
981{
982 uint32_t c = ASMAtomicDecU32(&pVM->tm.s.cVirtualTicking);
983 AssertMsgReturn(c < pVM->cCpus, ("%u vs %u\n", c, pVM->cCpus), VERR_TM_VIRTUAL_TICKING_IPE);
984 if (c == 0)
985 {
986 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualPause);
987 pVM->tm.s.u64Virtual = tmVirtualGetRaw(pVM);
988 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, false);
989 }
990 return VINF_SUCCESS;
991}
992
993
994/**
995 * Worker for TMR3ResumeClocks.
996 *
997 * @returns VINF_SUCCESS or VERR_TM_VIRTUAL_TICKING_IPE (asserted).
998 * @param pVM The cross context VM structure.
999 */
1000int tmVirtualResumeLocked(PVMCC pVM)
1001{
1002 uint32_t c = ASMAtomicIncU32(&pVM->tm.s.cVirtualTicking);
1003 AssertMsgReturn(c <= pVM->cCpus, ("%u vs %u\n", c, pVM->cCpus), VERR_TM_VIRTUAL_TICKING_IPE);
1004 if (c == 1)
1005 {
1006 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualResume);
1007 pVM->tm.s.u64VirtualRawPrev = 0;
1008 pVM->tm.s.u64VirtualWarpDriveStart = tmVirtualGetRawNanoTS(pVM);
1009 pVM->tm.s.u64VirtualOffset = pVM->tm.s.u64VirtualWarpDriveStart - pVM->tm.s.u64Virtual;
1010 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, true);
1011 }
1012 return VINF_SUCCESS;
1013}
1014
1015
1016/**
1017 * Converts from virtual ticks to nanoseconds.
1018 *
1019 * @returns nanoseconds.
1020 * @param pVM The cross context VM structure.
1021 * @param u64VirtualTicks The virtual ticks to convert.
1022 * @remark There could be rounding errors here. We just do a simple integer divide
1023 * without any adjustments.
1024 */
1025VMM_INT_DECL(uint64_t) TMVirtualToNano(PVM pVM, uint64_t u64VirtualTicks)
1026{
1027 NOREF(pVM);
1028 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1029 return u64VirtualTicks;
1030}
1031
1032
1033/**
1034 * Converts from virtual ticks to microseconds.
1035 *
1036 * @returns microseconds.
1037 * @param pVM The cross context VM structure.
1038 * @param u64VirtualTicks The virtual ticks to convert.
1039 * @remark There could be rounding errors here. We just do a simple integer divide
1040 * without any adjustments.
1041 */
1042VMM_INT_DECL(uint64_t) TMVirtualToMicro(PVM pVM, uint64_t u64VirtualTicks)
1043{
1044 NOREF(pVM);
1045 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1046 return u64VirtualTicks / 1000;
1047}
1048
1049
1050/**
1051 * Converts from virtual ticks to milliseconds.
1052 *
1053 * @returns milliseconds.
1054 * @param pVM The cross context VM structure.
1055 * @param u64VirtualTicks The virtual ticks to convert.
1056 * @remark There could be rounding errors here. We just do a simple integer divide
1057 * without any adjustments.
1058 */
1059VMM_INT_DECL(uint64_t) TMVirtualToMilli(PVM pVM, uint64_t u64VirtualTicks)
1060{
1061 NOREF(pVM);
1062 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1063 return u64VirtualTicks / 1000000;
1064}
1065
1066
1067/**
1068 * Converts from nanoseconds to virtual ticks.
1069 *
1070 * @returns virtual ticks.
1071 * @param pVM The cross context VM structure.
1072 * @param u64NanoTS The nanosecond value ticks to convert.
1073 * @remark There could be rounding and overflow errors here.
1074 */
1075VMM_INT_DECL(uint64_t) TMVirtualFromNano(PVM pVM, uint64_t u64NanoTS)
1076{
1077 NOREF(pVM);
1078 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1079 return u64NanoTS;
1080}
1081
1082
1083/**
1084 * Converts from microseconds to virtual ticks.
1085 *
1086 * @returns virtual ticks.
1087 * @param pVM The cross context VM structure.
1088 * @param u64MicroTS The microsecond value ticks to convert.
1089 * @remark There could be rounding and overflow errors here.
1090 */
1091VMM_INT_DECL(uint64_t) TMVirtualFromMicro(PVM pVM, uint64_t u64MicroTS)
1092{
1093 NOREF(pVM);
1094 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1095 return u64MicroTS * 1000;
1096}
1097
1098
1099/**
1100 * Converts from milliseconds to virtual ticks.
1101 *
1102 * @returns virtual ticks.
1103 * @param pVM The cross context VM structure.
1104 * @param u64MilliTS The millisecond value ticks to convert.
1105 * @remark There could be rounding and overflow errors here.
1106 */
1107VMM_INT_DECL(uint64_t) TMVirtualFromMilli(PVM pVM, uint64_t u64MilliTS)
1108{
1109 NOREF(pVM);
1110 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1111 return u64MilliTS * 1000000;
1112}
1113
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette