VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/TMAllVirtual.cpp@ 19979

Last change on this file since 19979 was 19810, checked in by vboxsync, 16 years ago

TM: TMTimerPoll hacking.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 31.5 KB
Line 
1/* $Id: TMAllVirtual.cpp 19810 2009-05-19 09:59:20Z vboxsync $ */
2/** @file
3 * TM - Timeout Manager, Virtual Time, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_TM
27#include <VBox/tm.h>
28#ifdef IN_RING3
29# include <VBox/rem.h>
30# include <iprt/thread.h>
31#endif
32#include "TMInternal.h"
33#include <VBox/vm.h>
34#include <VBox/vmm.h>
35#include <VBox/err.h>
36#include <VBox/log.h>
37#include <VBox/sup.h>
38
39#include <iprt/time.h>
40#include <iprt/assert.h>
41#include <iprt/asm.h>
42
43
44
45/**
46 * Helper function that's used by the assembly routines when something goes bust.
47 *
48 * @param pData Pointer to the data structure.
49 * @param u64NanoTS The calculated nano ts.
50 * @param u64DeltaPrev The delta relative to the previously returned timestamp.
51 * @param u64PrevNanoTS The previously returned timestamp (as it was read it).
52 */
53DECLEXPORT(void) tmVirtualNanoTSBad(PRTTIMENANOTSDATA pData, uint64_t u64NanoTS, uint64_t u64DeltaPrev, uint64_t u64PrevNanoTS)
54{
55 //PVM pVM = (PVM)((uint8_t *)pData - RT_OFFSETOF(VM, CTXALLSUFF(s.tm.VirtualGetRawData)));
56 pData->cBadPrev++;
57 if ((int64_t)u64DeltaPrev < 0)
58 LogRel(("TM: u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64\n",
59 u64DeltaPrev, u64PrevNanoTS, u64NanoTS));
60 else
61 Log(("TM: u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64 (debugging?)\n",
62 u64DeltaPrev, u64PrevNanoTS, u64NanoTS));
63}
64
65
66/**
67 * Called the first time somebody asks for the time or when the GIP
68 * is mapped/unmapped.
69 *
70 * This should never ever happen.
71 */
72DECLEXPORT(uint64_t) tmVirtualNanoTSRediscover(PRTTIMENANOTSDATA pData)
73{
74 //PVM pVM = (PVM)((uint8_t *)pData - RT_OFFSETOF(VM, CTXALLSUFF(s.tm.VirtualGetRawData)));
75 PSUPGLOBALINFOPAGE pGip = g_pSUPGlobalInfoPage;
76 AssertFatalMsgFailed(("pGip=%p u32Magic=%#x\n", pGip, VALID_PTR(pGip) ? pGip->u32Magic : 0));
77}
78
79
80#if 1
81
82/**
83 * Wrapper around the IPRT GIP time methods.
84 */
85DECLINLINE(uint64_t) tmVirtualGetRawNanoTS(PVM pVM)
86{
87#ifdef IN_RING3
88 return CTXALLSUFF(pVM->tm.s.pfnVirtualGetRaw)(&CTXALLSUFF(pVM->tm.s.VirtualGetRawData));
89# else /* !IN_RING3 */
90 uint32_t cPrevSteps = pVM->tm.s.CTX_SUFF(VirtualGetRawData).c1nsSteps;
91 uint64_t u64 = pVM->tm.s.CTX_SUFF(pfnVirtualGetRaw)(&pVM->tm.s.CTX_SUFF(VirtualGetRawData));
92 if (cPrevSteps != pVM->tm.s.CTX_SUFF(VirtualGetRawData).c1nsSteps)
93 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3);
94 return u64;
95# endif /* !IN_RING3 */
96}
97
98#else
99
100/**
101 * This is (mostly) the same as rtTimeNanoTSInternal() except
102 * for the two globals which live in TM.
103 *
104 * @returns Nanosecond timestamp.
105 * @param pVM The VM handle.
106 */
107static uint64_t tmVirtualGetRawNanoTS(PVM pVM)
108{
109 uint64_t u64Delta;
110 uint32_t u32NanoTSFactor0;
111 uint64_t u64TSC;
112 uint64_t u64NanoTS;
113 uint32_t u32UpdateIntervalTSC;
114 uint64_t u64PrevNanoTS;
115
116 /*
117 * Read the GIP data and the previous value.
118 */
119 for (;;)
120 {
121 uint32_t u32TransactionId;
122 PSUPGLOBALINFOPAGE pGip = g_pSUPGlobalInfoPage;
123#ifdef IN_RING3
124 if (RT_UNLIKELY(!pGip || pGip->u32Magic != SUPGLOBALINFOPAGE_MAGIC))
125 return RTTimeSystemNanoTS();
126#endif
127
128 if (pGip->u32Mode != SUPGIPMODE_ASYNC_TSC)
129 {
130 u32TransactionId = pGip->aCPUs[0].u32TransactionId;
131#ifdef RT_OS_L4
132 Assert((u32TransactionId & 1) == 0);
133#endif
134 u32UpdateIntervalTSC = pGip->aCPUs[0].u32UpdateIntervalTSC;
135 u64NanoTS = pGip->aCPUs[0].u64NanoTS;
136 u64TSC = pGip->aCPUs[0].u64TSC;
137 u32NanoTSFactor0 = pGip->u32UpdateIntervalNS;
138 u64Delta = ASMReadTSC();
139 u64PrevNanoTS = ASMAtomicReadU64(&pVM->tm.s.u64VirtualRawPrev);
140 if (RT_UNLIKELY( pGip->aCPUs[0].u32TransactionId != u32TransactionId
141 || (u32TransactionId & 1)))
142 continue;
143 }
144 else
145 {
146 /* SUPGIPMODE_ASYNC_TSC */
147 PSUPGIPCPU pGipCpu;
148
149 uint8_t u8ApicId = ASMGetApicId();
150 if (RT_LIKELY(u8ApicId < RT_ELEMENTS(pGip->aCPUs)))
151 pGipCpu = &pGip->aCPUs[u8ApicId];
152 else
153 {
154 AssertMsgFailed(("%x\n", u8ApicId));
155 pGipCpu = &pGip->aCPUs[0];
156 }
157
158 u32TransactionId = pGipCpu->u32TransactionId;
159#ifdef RT_OS_L4
160 Assert((u32TransactionId & 1) == 0);
161#endif
162 u32UpdateIntervalTSC = pGipCpu->u32UpdateIntervalTSC;
163 u64NanoTS = pGipCpu->u64NanoTS;
164 u64TSC = pGipCpu->u64TSC;
165 u32NanoTSFactor0 = pGip->u32UpdateIntervalNS;
166 u64Delta = ASMReadTSC();
167 u64PrevNanoTS = ASMAtomicReadU64(&pVM->tm.s.u64VirtualRawPrev);
168#ifdef IN_RC
169 Assert(!(ASMGetFlags() & X86_EFL_IF));
170#else
171 if (RT_UNLIKELY(u8ApicId != ASMGetApicId()))
172 continue;
173 if (RT_UNLIKELY( pGipCpu->u32TransactionId != u32TransactionId
174 || (u32TransactionId & 1)))
175 continue;
176#endif
177 }
178 break;
179 }
180
181 /*
182 * Calc NanoTS delta.
183 */
184 u64Delta -= u64TSC;
185 if (u64Delta > u32UpdateIntervalTSC)
186 {
187 /*
188 * We've expired the interval, cap it. If we're here for the 2nd
189 * time without any GIP update inbetween, the checks against
190 * pVM->tm.s.u64VirtualRawPrev below will force 1ns stepping.
191 */
192 u64Delta = u32UpdateIntervalTSC;
193 }
194#if !defined(_MSC_VER) || defined(RT_ARCH_AMD64) /* GCC makes very pretty code from these two inline calls, while MSC cannot. */
195 u64Delta = ASMMult2xU32RetU64((uint32_t)u64Delta, u32NanoTSFactor0);
196 u64Delta = ASMDivU64ByU32RetU32(u64Delta, u32UpdateIntervalTSC);
197#else
198 __asm
199 {
200 mov eax, dword ptr [u64Delta]
201 mul dword ptr [u32NanoTSFactor0]
202 div dword ptr [u32UpdateIntervalTSC]
203 mov dword ptr [u64Delta], eax
204 xor edx, edx
205 mov dword ptr [u64Delta + 4], edx
206 }
207#endif
208
209 /*
210 * Calculate the time and compare it with the previously returned value.
211 *
212 * Since this function is called *very* frequently when the VM is running
213 * and then mostly on EMT, we can restrict the valid range of the delta
214 * (-1s to 2*GipUpdates) and simplify/optimize the default path.
215 */
216 u64NanoTS += u64Delta;
217 uint64_t u64DeltaPrev = u64NanoTS - u64PrevNanoTS;
218 if (RT_LIKELY(u64DeltaPrev < 1000000000 /* 1s */))
219 /* frequent - less than 1s since last call. */;
220 else if ( (int64_t)u64DeltaPrev < 0
221 && (int64_t)u64DeltaPrev + u32NanoTSFactor0 * 2 > 0)
222 {
223 /* occasional - u64NanoTS is in the 'past' relative to previous returns. */
224 ASMAtomicIncU32(&pVM->tm.s.CTX_SUFF(VirtualGetRawData).c1nsSteps);
225 u64NanoTS = u64PrevNanoTS + 1;
226#ifndef IN_RING3
227 VM_FF_SET(pVM, VM_FF_TO_R3); /* S10 hack */
228#endif
229 }
230 else if (u64PrevNanoTS)
231 {
232 /* Something has gone bust, if negative offset it's real bad. */
233 ASMAtomicIncU32(&pVM->tm.s.CTX_SUFF(VirtualGetRawData).cBadPrev);
234 if ((int64_t)u64DeltaPrev < 0)
235 LogRel(("TM: u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64 u64Delta=%#RX64\n",
236 u64DeltaPrev, u64PrevNanoTS, u64NanoTS, u64Delta));
237 else
238 Log(("TM: u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64 u64Delta=%#RX64 (debugging?)\n",
239 u64DeltaPrev, u64PrevNanoTS, u64NanoTS, u64Delta));
240#ifdef DEBUG_bird
241 /** @todo there are some hickups during boot and reset that can cause 2-5 seconds delays. Investigate... */
242 AssertMsg(u64PrevNanoTS > UINT64_C(100000000000) /* 100s */,
243 ("u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64 u64Delta=%#RX64\n",
244 u64DeltaPrev, u64PrevNanoTS, u64NanoTS, u64Delta));
245#endif
246 }
247 /* else: We're resuming (see TMVirtualResume). */
248 if (RT_LIKELY(ASMAtomicCmpXchgU64(&pVM->tm.s.u64VirtualRawPrev, u64NanoTS, u64PrevNanoTS)))
249 return u64NanoTS;
250
251 /*
252 * Attempt updating the previous value, provided we're still ahead of it.
253 *
254 * There is no point in recalculating u64NanoTS because we got preemted or if
255 * we raced somebody while the GIP was updated, since these are events
256 * that might occure at any point in the return path as well.
257 */
258 for (int cTries = 50;;)
259 {
260 u64PrevNanoTS = ASMAtomicReadU64(&pVM->tm.s.u64VirtualRawPrev);
261 if (u64PrevNanoTS >= u64NanoTS)
262 break;
263 if (ASMAtomicCmpXchgU64(&pVM->tm.s.u64VirtualRawPrev, u64NanoTS, u64PrevNanoTS))
264 break;
265 AssertBreak(--cTries <= 0);
266 if (cTries < 25 && !VM_IS_EMT(pVM)) /* give up early */
267 break;
268 }
269
270 return u64NanoTS;
271}
272
273#endif
274
275
276/**
277 * Get the time when we're not running at 100%
278 *
279 * @returns The timestamp.
280 * @param pVM The VM handle.
281 */
282static uint64_t tmVirtualGetRawNonNormal(PVM pVM)
283{
284 /*
285 * Recalculate the RTTimeNanoTS() value for the period where
286 * warp drive has been enabled.
287 */
288 uint64_t u64 = tmVirtualGetRawNanoTS(pVM);
289 u64 -= pVM->tm.s.u64VirtualWarpDriveStart;
290 u64 *= pVM->tm.s.u32VirtualWarpDrivePercentage;
291 u64 /= 100;
292 u64 += pVM->tm.s.u64VirtualWarpDriveStart;
293
294 /*
295 * Now we apply the virtual time offset.
296 * (Which is the negated tmVirtualGetRawNanoTS() value for when the virtual
297 * machine started if it had been running continuously without any suspends.)
298 */
299 u64 -= pVM->tm.s.u64VirtualOffset;
300 return u64;
301}
302
303
304/**
305 * Get the raw virtual time.
306 *
307 * @returns The current time stamp.
308 * @param pVM The VM handle.
309 */
310DECLINLINE(uint64_t) tmVirtualGetRaw(PVM pVM)
311{
312 if (RT_LIKELY(!pVM->tm.s.fVirtualWarpDrive))
313 return tmVirtualGetRawNanoTS(pVM) - pVM->tm.s.u64VirtualOffset;
314 return tmVirtualGetRawNonNormal(pVM);
315}
316
317
318/**
319 * Inlined version of tmVirtualGetEx.
320 */
321DECLINLINE(uint64_t) tmVirtualGet(PVM pVM, bool fCheckTimers)
322{
323 uint64_t u64;
324 if (RT_LIKELY(pVM->tm.s.cVirtualTicking))
325 {
326 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGet);
327 u64 = tmVirtualGetRaw(pVM);
328
329 /*
330 * Use the chance to check for expired timers.
331 */
332 if (fCheckTimers)
333 {
334 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
335 if ( !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER)
336 && !pVM->tm.s.fRunningQueues
337 && ( pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire <= u64
338 || ( pVM->tm.s.fVirtualSyncTicking
339 && pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire <= u64 - pVM->tm.s.offVirtualSync
340 )
341 )
342 && !pVM->tm.s.fRunningQueues
343 )
344 {
345 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSetFF);
346 Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
347 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
348#ifdef IN_RING3
349 REMR3NotifyTimerPending(pVM, pVCpuDst);
350 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
351#endif
352 }
353 }
354 }
355 else
356 u64 = pVM->tm.s.u64Virtual;
357 return u64;
358}
359
360
361/**
362 * Gets the current TMCLOCK_VIRTUAL time
363 *
364 * @returns The timestamp.
365 * @param pVM VM handle.
366 *
367 * @remark While the flow of time will never go backwards, the speed of the
368 * progress varies due to inaccurate RTTimeNanoTS and TSC. The latter can be
369 * influenced by power saving (SpeedStep, PowerNow!), while the former
370 * makes use of TSC and kernel timers.
371 */
372VMMDECL(uint64_t) TMVirtualGet(PVM pVM)
373{
374 return tmVirtualGet(pVM, true /* check timers */);
375}
376
377
378/**
379 * Gets the current TMCLOCK_VIRTUAL time without checking
380 * timers or anything.
381 *
382 * Meaning, this has no side effect on FFs like TMVirtualGet may have.
383 *
384 * @returns The timestamp.
385 * @param pVM VM handle.
386 *
387 * @remarks See TMVirtualGet.
388 */
389VMMDECL(uint64_t) TMVirtualGetNoCheck(PVM pVM)
390{
391 return tmVirtualGet(pVM, false /*fCheckTimers*/);
392}
393
394
395/**
396 * tmVirtualSyncGetLocked worker for handling catch-up when owning the lock.
397 *
398 * @returns The timestamp.
399 * @param pVM VM handle.
400 * @param u64 raw virtual time.
401 * @param off offVirtualSync.
402 */
403DECLINLINE(uint64_t) tmVirtualSyncGetHandleCatchUpLocked(PVM pVM, uint64_t u64, uint64_t off)
404{
405 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
406
407 /*
408 * Don't make updates untill
409 */
410 bool fUpdatePrev = true;
411 bool fUpdateOff = true;
412 bool fStop = false;
413 const uint64_t u64Prev = pVM->tm.s.u64VirtualSyncCatchUpPrev;
414 uint64_t u64Delta = u64 - u64Prev;
415 if (RT_LIKELY(!(u64Delta >> 32)))
416 {
417 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, pVM->tm.s.u32VirtualSyncCatchUpPercentage, 100);
418 if (off > u64Sub + pVM->tm.s.offVirtualSyncGivenUp)
419 {
420 off -= u64Sub;
421 Log4(("TM: %RU64/%RU64: sub %RU32\n", u64 - off, off - pVM->tm.s.offVirtualSyncGivenUp, u64Sub));
422 }
423 else
424 {
425 /* we've completely caught up. */
426 STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c);
427 off = pVM->tm.s.offVirtualSyncGivenUp;
428 fStop = true;
429 Log4(("TM: %RU64/0: caught up\n", u64));
430 }
431 }
432 else
433 {
434 /* More than 4 seconds since last time (or negative), ignore it. */
435 fUpdateOff = false;
436 fUpdatePrev = !(u64Delta & RT_BIT_64(63));
437 Log(("TMVirtualGetSync: u64Delta=%RX64\n", u64Delta));
438 }
439
440 /*
441 * Complete the calculation of the current TMCLOCK_VIRTUAL_SYNC time. The current
442 * approach is to never pass the head timer. So, when we do stop the clock and
443 * set the timer pending flag.
444 */
445 u64 -= off;
446 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
447 if (u64 < u64Expire)
448 {
449 if (fUpdateOff)
450 ASMAtomicWriteU64(&pVM->tm.s.offVirtualSync, off);
451 if (fStop)
452 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncCatchUp, false);
453 if (fUpdatePrev)
454 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev, u64);
455 tmVirtualSyncUnlock(pVM);
456 }
457 else
458 {
459 u64 = u64Expire;
460 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64);
461 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, false);
462
463 VM_FF_SET(pVM, VM_FF_TM_VIRTUAL_SYNC);
464 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
465 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
466 Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
467 Log4(("TM: %RU64/%RU64: exp tmr=>ff\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
468 tmVirtualSyncUnlock(pVM);
469
470#ifdef IN_RING3
471 REMR3NotifyTimerPending(pVM, pVCpuDst);
472 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
473#endif
474 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
475 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetExpired);
476 }
477 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
478
479 return u64;
480}
481
482
483/**
484 * tmVirtualSyncGetEx worker for when we get the lock.
485 *
486 * @returns timesamp.
487 * @param pVM The VM handle.
488 * @param u64 The virtual clock timestamp.
489 */
490DECLINLINE(uint64_t) tmVirtualSyncGetLocked(PVM pVM, uint64_t u64)
491{
492 /*
493 * Not ticking?
494 */
495 if (!pVM->tm.s.fVirtualSyncTicking)
496 {
497 u64 = ASMAtomicUoReadU64(&pVM->tm.s.u64VirtualSync);
498 tmVirtualSyncUnlock(pVM);
499 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
500 return u64;
501 }
502
503 /*
504 * Handle catch up in a separate function.
505 */
506 uint64_t off = ASMAtomicUoReadU64(&pVM->tm.s.offVirtualSync);
507 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
508 return tmVirtualSyncGetHandleCatchUpLocked(pVM, u64, off);
509
510 /*
511 * Complete the calculation of the current TMCLOCK_VIRTUAL_SYNC time. The current
512 * approach is to never pass the head timer. So, when we do stop the clock and
513 * set the timer pending flag.
514 */
515 u64 -= off;
516 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
517 if (u64 < u64Expire)
518 tmVirtualSyncUnlock(pVM);
519 else
520 {
521 u64 = u64Expire;
522 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64);
523 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, false);
524
525 VM_FF_SET(pVM, VM_FF_TM_VIRTUAL_SYNC);
526 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
527 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
528 Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
529 Log4(("TM: %RU64/%RU64: exp tmr=>ff\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
530 tmVirtualSyncUnlock(pVM);
531
532#ifdef IN_RING3
533 REMR3NotifyTimerPending(pVM, pVCpuDst);
534 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
535#endif
536 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
537 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetExpired);
538 }
539 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
540 return u64;
541}
542
543
544/**
545 * Gets the current TMCLOCK_VIRTUAL_SYNC time.
546 *
547 * @returns The timestamp.
548 * @param pVM VM handle.
549 * @param fCheckTimers Check timers or not
550 * @thread EMT.
551 */
552DECLINLINE(uint64_t) tmVirtualSyncGetEx(PVM pVM, bool fCheckTimers)
553{
554 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGet);
555
556 if (!pVM->tm.s.fVirtualSyncTicking)
557 return pVM->tm.s.u64VirtualSync;
558
559 /*
560 * Query the virtual clock and do the usual expired timer check.
561 */
562 Assert(pVM->tm.s.cVirtualTicking);
563 uint64_t u64 = tmVirtualGetRaw(pVM);
564 if (fCheckTimers)
565 {
566 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
567 if ( !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER)
568 && pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire <= u64)
569 {
570 Log5(("TMAllVirtual(%u): FF: 0 -> 1\n", __LINE__));
571 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
572#ifdef IN_RING3
573 REMR3NotifyTimerPending(pVM, pVCpuDst);
574 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM /** @todo |VMNOTIFYFF_FLAGS_POKE*/);
575#endif
576 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
577 }
578 }
579
580 /*
581 * When the clock is ticking, not doing catch ups and not running into an
582 * expired time, we can get away without locking. Try this first.
583 */
584 uint64_t off;
585 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
586 {
587 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
588 {
589 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
590 if (RT_LIKELY( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
591 && !ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
592 && off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)))
593 {
594 off = u64 - off;
595 if (off < ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire))
596 {
597 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLockless);
598 return off;
599 }
600 }
601 }
602 }
603 else
604 {
605 off = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSync);
606 if (RT_LIKELY(!ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking)))
607 {
608 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLockless);
609 return off;
610 }
611 }
612
613 /*
614 * Read the offset and adjust if we're playing catch-up.
615 *
616 * The catch-up adjusting work by us decrementing the offset by a percentage of
617 * the time elapsed since the previous TMVirtualGetSync call.
618 *
619 * It's possible to get a very long or even negative interval between two read
620 * for the following reasons:
621 * - Someone might have suspended the process execution, frequently the case when
622 * debugging the process.
623 * - We might be on a different CPU which TSC isn't quite in sync with the
624 * other CPUs in the system.
625 * - Another thread is racing us and we might have been preemnted while inside
626 * this function.
627 *
628 * Assuming nano second virtual time, we can simply ignore any intervals which has
629 * any of the upper 32 bits set.
630 */
631 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
632 int cOuterTries = 42;
633 for (;; cOuterTries--)
634 {
635 /* Try grab the lock, things get simpler when owning the lock. */
636 int rcLock = tmVirtualSyncTryLock(pVM);
637 if (RT_SUCCESS_NP(rcLock))
638 return tmVirtualSyncGetLocked(pVM, u64);
639
640 /* Re-check the ticking flag. */
641 if (!ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
642 {
643 off = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSync);
644 if ( ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking)
645 && cOuterTries > 0)
646 continue;
647 return off;
648 }
649
650 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
651 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
652 {
653 /* No changes allowed, try get a consistent set of parameters. */
654 uint64_t const u64Prev = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev);
655 uint64_t const offGivenUp = ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp);
656 uint32_t const u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
657 if ( ( u64Prev == ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev)
658 && offGivenUp == ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp)
659 && u32Pct == ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage)
660 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
661 || cOuterTries <= 0)
662 {
663 uint64_t u64Delta = u64 - u64Prev;
664 if (RT_LIKELY(!(u64Delta >> 32)))
665 {
666 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, u32Pct, 100);
667 if (off > u64Sub + offGivenUp)
668 {
669 off -= u64Sub;
670 Log4(("TM: %RU64/%RU64: sub %RU32 (NoLock)\n", u64 - off, pVM->tm.s.offVirtualSync - offGivenUp, u64Sub));
671 }
672 else
673 {
674 /* we've completely caught up. */
675 STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c);
676 off = offGivenUp;
677 Log4(("TM: %RU64/0: caught up\n", u64));
678 }
679 }
680 else
681 /* More than 4 seconds since last time (or negative), ignore it. */
682 Log(("TMVirtualGetSync: u64Delta=%RX64 (NoLock)\n", u64Delta));
683
684 /* Check that we're still running and in catch up. */
685 if ( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
686 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
687 break;
688 if (cOuterTries <= 0)
689 break; /* enough */
690 }
691 }
692 else if ( off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
693 && !ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
694 break; /* Got an consistent offset */
695 else if (cOuterTries <= 0)
696 break; /* enough */
697 }
698 if (cOuterTries <= 0)
699 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetELoop);
700
701 /*
702 * Complete the calculation of the current TMCLOCK_VIRTUAL_SYNC time. The current
703 * approach is to never pass the head timer. So, when we do stop the clock and
704 * set the timer pending flag.
705 */
706 u64 -= off;
707 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
708 if (u64 >= u64Expire)
709 {
710 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
711 if (!VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
712 {
713 Log5(("TMAllVirtual(%u): FF: %d -> 1 (NoLock)\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
714 VM_FF_SET(pVM, VM_FF_TM_VIRTUAL_SYNC); /* Hmm? */
715 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
716#ifdef IN_RING3
717 REMR3NotifyTimerPending(pVM, pVCpuDst);
718 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
719#endif
720 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
721 Log4(("TM: %RU64/%RU64: exp tmr=>ff (NoLock)\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
722 }
723 else
724 Log4(("TM: %RU64/%RU64: exp tmr (NoLock)\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
725 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetExpired);
726 }
727
728 return u64;
729}
730
731
732/**
733 * Gets the current TMCLOCK_VIRTUAL_SYNC time.
734 *
735 * @returns The timestamp.
736 * @param pVM VM handle.
737 * @thread EMT.
738 * @remarks May set the timer and virtual sync FFs.
739 */
740VMMDECL(uint64_t) TMVirtualSyncGet(PVM pVM)
741{
742 return tmVirtualSyncGetEx(pVM, true /* check timers */);
743}
744
745
746/**
747 * Gets the current TMCLOCK_VIRTUAL_SYNC time without checking timers running on
748 * TMCLOCK_VIRTUAL.
749 *
750 * @returns The timestamp.
751 * @param pVM VM handle.
752 * @thread EMT.
753 * @remarks May set the timer and virtual sync FFs.
754 */
755VMMDECL(uint64_t) TMVirtualSyncGetNoCheck(PVM pVM)
756{
757 return tmVirtualSyncGetEx(pVM, false /* check timers */);
758}
759
760
761/**
762 * Gets the current TMCLOCK_VIRTUAL_SYNC time.
763 *
764 * @returns The timestamp.
765 * @param pVM VM handle.
766 * @param fCheckTimers Check timers on the virtual clock or not.
767 * @thread EMT.
768 * @remarks May set the timer and virtual sync FFs.
769 */
770VMMDECL(uint64_t) TMVirtualSyncGetEx(PVM pVM, bool fCheckTimers)
771{
772 return tmVirtualSyncGetEx(pVM, fCheckTimers);
773}
774
775
776/**
777 * Gets the current lag of the synchronous virtual clock (relative to the virtual clock).
778 *
779 * @return The current lag.
780 * @param pVM VM handle.
781 */
782VMMDECL(uint64_t) TMVirtualSyncGetLag(PVM pVM)
783{
784 return pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp;
785}
786
787
788/**
789 * Get the current catch-up percent.
790 *
791 * @return The current catch0up percent. 0 means running at the same speed as the virtual clock.
792 * @param pVM VM handle.
793 */
794VMMDECL(uint32_t) TMVirtualSyncGetCatchUpPct(PVM pVM)
795{
796 if (pVM->tm.s.fVirtualSyncCatchUp)
797 return pVM->tm.s.u32VirtualSyncCatchUpPercentage;
798 return 0;
799}
800
801
802/**
803 * Gets the current TMCLOCK_VIRTUAL frequency.
804 *
805 * @returns The freqency.
806 * @param pVM VM handle.
807 */
808VMMDECL(uint64_t) TMVirtualGetFreq(PVM pVM)
809{
810 return TMCLOCK_FREQ_VIRTUAL;
811}
812
813
814/**
815 * Worker for TMR3PauseClocks.
816 *
817 * @returns VINF_SUCCESS or VERR_INTERNAL_ERROR (asserted).
818 * @param pVM The VM handle.
819 */
820int tmVirtualPauseLocked(PVM pVM)
821{
822 uint32_t c = ASMAtomicDecU32(&pVM->tm.s.cVirtualTicking);
823 AssertMsgReturn(c < pVM->cCPUs, ("%u vs %u\n", c, pVM->cCPUs), VERR_INTERNAL_ERROR);
824 if (c == 0)
825 {
826 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualPause);
827 pVM->tm.s.u64Virtual = tmVirtualGetRaw(pVM);
828 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, false);
829 }
830 return VINF_SUCCESS;
831}
832
833
834/**
835 * Worker for TMR3ResumeClocks.
836 *
837 * @returns VINF_SUCCESS or VERR_INTERNAL_ERROR (asserted).
838 * @param pVM The VM handle.
839 */
840int tmVirtualResumeLocked(PVM pVM)
841{
842 uint32_t c = ASMAtomicIncU32(&pVM->tm.s.cVirtualTicking);
843 AssertMsgReturn(c <= pVM->cCPUs, ("%u vs %u\n", c, pVM->cCPUs), VERR_INTERNAL_ERROR);
844 if (c == 1)
845 {
846 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualResume);
847 pVM->tm.s.u64VirtualRawPrev = 0;
848 pVM->tm.s.u64VirtualWarpDriveStart = tmVirtualGetRawNanoTS(pVM);
849 pVM->tm.s.u64VirtualOffset = pVM->tm.s.u64VirtualWarpDriveStart - pVM->tm.s.u64Virtual;
850 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, true);
851 }
852 return VINF_SUCCESS;
853}
854
855
856/**
857 * Converts from virtual ticks to nanoseconds.
858 *
859 * @returns nanoseconds.
860 * @param pVM The VM handle.
861 * @param u64VirtualTicks The virtual ticks to convert.
862 * @remark There could be rounding errors here. We just do a simple integere divide
863 * without any adjustments.
864 */
865VMMDECL(uint64_t) TMVirtualToNano(PVM pVM, uint64_t u64VirtualTicks)
866{
867 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
868 return u64VirtualTicks;
869}
870
871
872/**
873 * Converts from virtual ticks to microseconds.
874 *
875 * @returns microseconds.
876 * @param pVM The VM handle.
877 * @param u64VirtualTicks The virtual ticks to convert.
878 * @remark There could be rounding errors here. We just do a simple integere divide
879 * without any adjustments.
880 */
881VMMDECL(uint64_t) TMVirtualToMicro(PVM pVM, uint64_t u64VirtualTicks)
882{
883 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
884 return u64VirtualTicks / 1000;
885}
886
887
888/**
889 * Converts from virtual ticks to milliseconds.
890 *
891 * @returns milliseconds.
892 * @param pVM The VM handle.
893 * @param u64VirtualTicks The virtual ticks to convert.
894 * @remark There could be rounding errors here. We just do a simple integere divide
895 * without any adjustments.
896 */
897VMMDECL(uint64_t) TMVirtualToMilli(PVM pVM, uint64_t u64VirtualTicks)
898{
899 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
900 return u64VirtualTicks / 1000000;
901}
902
903
904/**
905 * Converts from nanoseconds to virtual ticks.
906 *
907 * @returns virtual ticks.
908 * @param pVM The VM handle.
909 * @param u64NanoTS The nanosecond value ticks to convert.
910 * @remark There could be rounding and overflow errors here.
911 */
912VMMDECL(uint64_t) TMVirtualFromNano(PVM pVM, uint64_t u64NanoTS)
913{
914 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
915 return u64NanoTS;
916}
917
918
919/**
920 * Converts from microseconds to virtual ticks.
921 *
922 * @returns virtual ticks.
923 * @param pVM The VM handle.
924 * @param u64MicroTS The microsecond value ticks to convert.
925 * @remark There could be rounding and overflow errors here.
926 */
927VMMDECL(uint64_t) TMVirtualFromMicro(PVM pVM, uint64_t u64MicroTS)
928{
929 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
930 return u64MicroTS * 1000;
931}
932
933
934/**
935 * Converts from milliseconds to virtual ticks.
936 *
937 * @returns virtual ticks.
938 * @param pVM The VM handle.
939 * @param u64MilliTS The millisecond value ticks to convert.
940 * @remark There could be rounding and overflow errors here.
941 */
942VMMDECL(uint64_t) TMVirtualFromMilli(PVM pVM, uint64_t u64MilliTS)
943{
944 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
945 return u64MilliTS * 1000000;
946}
947
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette