VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/TMAllVirtual.cpp@ 2878

Last change on this file since 2878 was 2869, checked in by vboxsync, 18 years ago

Create a speciallized version of the RTTimeNanoTS code in timesup.cpp for calculating the virtual time. I hope this will eliminate the w32_2 trouble and related issues seen on the black box and my laptop.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 22.4 KB
Line 
1/* $Id: TMAllVirtual.cpp 2869 2007-05-25 13:15:39Z vboxsync $ */
2/** @file
3 * TM - Timeout Manager, Virtual Time, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2006 InnoTek Systemberatung GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * If you received this file as part of a commercial VirtualBox
18 * distribution, then only the terms of your commercial VirtualBox
19 * license agreement apply instead of the previous paragraph.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_TM
27#include <VBox/tm.h>
28#ifdef IN_RING3
29# include <VBox/rem.h>
30# include <iprt/thread.h>
31#endif
32#include "TMInternal.h"
33#include <VBox/vm.h>
34#include <VBox/err.h>
35#include <VBox/log.h>
36#include <VBox/sup.h>
37
38#include <iprt/time.h>
39#include <iprt/assert.h>
40#include <iprt/asm.h>
41
42
43/*******************************************************************************
44* Internal Functions *
45*******************************************************************************/
46static DECLCALLBACK(int) tmVirtualSetWarpDrive(PVM pVM, uint32_t u32Percent);
47
48
49/**
50 * This is (mostly) the same as rtTimeNanoTSInternal() except
51 * for the two globals which live in TM.
52 *
53 * @returns Nanosecond timestamp.
54 * @param pVM The VM handle.
55 */
56static uint64_t tmVirtualGetRawNanoTS(PVM pVM)
57{
58 uint64_t u64Delta;
59 uint32_t u32NanoTSFactor0;
60 uint64_t u64TSC;
61 uint64_t u64NanoTS;
62 uint32_t u32UpdateIntervalTSC;
63
64 /*
65 * Read the GIP data.
66 */
67 for (;;)
68 {
69 uint32_t u32TransactionId;
70 PCSUPGLOBALINFOPAGE pGip = g_pSUPGlobalInfoPage;
71#ifdef IN_RING3
72 if (RT_UNLIKELY(!pGip || pGip->u32Magic != SUPGLOBALINFOPAGE_MAGIC))
73 return RTTimeSystemNanoTS();
74#endif
75
76 if (pGip->u32Mode != SUPGIPMODE_ASYNC_TSC)
77 {
78 u32TransactionId = pGip->aCPUs[0].u32TransactionId;
79#ifdef __L4__
80 Assert((u32TransactionId & 1) == 0);
81#endif
82 u32UpdateIntervalTSC = pGip->aCPUs[0].u32UpdateIntervalTSC;
83 u64NanoTS = pGip->aCPUs[0].u64NanoTS;
84 u64TSC = pGip->aCPUs[0].u64TSC;
85 u32NanoTSFactor0 = pGip->u32UpdateIntervalNS;
86 u64Delta = ASMReadTSC();
87 if (RT_UNLIKELY( pGip->aCPUs[0].u32TransactionId != u32TransactionId
88 || (u32TransactionId & 1)))
89 continue;
90 }
91 else
92 {
93 /* SUPGIPMODE_ASYNC_TSC */
94 PCSUPGIPCPU pGipCpu;
95
96 uint8_t u8ApicId = ASMGetApicId();
97 if (RT_LIKELY(u8ApicId < RT_ELEMENTS(pGip->aCPUs)))
98 pGipCpu = &pGip->aCPUs[u8ApicId];
99 else
100 {
101 AssertMsgFailed(("%x\n", u8ApicId));
102 pGipCpu = &pGip->aCPUs[0];
103 }
104
105 u32TransactionId = pGipCpu->u32TransactionId;
106#ifdef __L4__
107 Assert((u32TransactionId & 1) == 0);
108#endif
109 u32UpdateIntervalTSC = pGipCpu->u32UpdateIntervalTSC;
110 u64NanoTS = pGipCpu->u64NanoTS;
111 u64TSC = pGipCpu->u64TSC;
112 u32NanoTSFactor0 = pGip->u32UpdateIntervalNS;
113 u64Delta = ASMReadTSC();
114 if (RT_UNLIKELY(u8ApicId != ASMGetApicId()))
115 continue;
116 if (RT_UNLIKELY( pGipCpu->u32TransactionId != u32TransactionId
117 || (u32TransactionId & 1)))
118 continue;
119 }
120 break;
121 }
122
123 /*
124 * Calc NanoTS delta.
125 */
126 u64Delta -= u64TSC;
127 if (u64Delta > u32UpdateIntervalTSC)
128 {
129 /*
130 * We've expired the interval, cap it. If we're here for the 2nd
131 * time without any GIP update inbetween, the checks against
132 * pVM->tm.s.u64VirtualRawPrev below will force 1ns stepping.
133 */
134 u64Delta = u32UpdateIntervalTSC;
135 }
136#if !defined(_MSC_VER) || defined(__AMD64__) /* GCC makes very pretty code from these two inline calls, while MSC cannot. */
137 u64Delta = ASMMult2xU32RetU64((uint32_t)u64Delta, u32NanoTSFactor0);
138 u64Delta = ASMDivU64ByU32RetU32(u64Delta, u32UpdateIntervalTSC);
139#else
140 __asm
141 {
142 mov eax, dword ptr [u64Delta]
143 mul dword ptr [u32NanoTSFactor0]
144 div dword ptr [u32UpdateIntervalTSC]
145 mov dword ptr [u64Delta], eax
146 xor edx, edx
147 mov dword ptr [u64Delta + 4], edx
148 }
149#endif
150
151 /*
152 * Calculate the time and compare it with the previously returned value.
153 *
154 * Since this function is called *very* frequently when the VM is running
155 * and then mostly on EMT, we can restrict the valid range of the delta
156 * (-1s to 2*GipUpdates) and simplify/optimize the default path.
157 */
158 u64NanoTS += u64Delta;
159 uint64_t u64PrevNanoTS = ASMAtomicReadU64(&pVM->tm.s.u64VirtualRawPrev);
160 uint64_t u64DeltaPrev = u64NanoTS - u64PrevNanoTS;
161 if (RT_LIKELY(u64DeltaPrev < 1000000000 /* 1s */))
162 /* frequent - less than 1s since last call. */;
163 else if ( (int64_t)u64DeltaPrev < 0
164 && (int64_t)u64DeltaPrev + u32NanoTSFactor0 * 2 > 0)
165 {
166 /* occasional - u64NanoTS is in the 'past' relative to previous returns. */
167 ASMAtomicIncU32(&pVM->tm.s.c1nsVirtualRawSteps);
168 u64NanoTS = u64PrevNanoTS + 1;
169 }
170 else if (u64PrevNanoTS)
171 {
172 /* Something has gone bust, if negative offset it's real bad.*/
173 ASMAtomicIncU32(&pVM->tm.s.cVirtualRawBadRawPrev);
174 if ((int64_t)u64DeltaPrev < 0)
175 LogRel(("TM: u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64 u64Delta=%#RX64\n",
176 u64DeltaPrev, u64PrevNanoTS, u64NanoTS, u64Delta));
177 else
178 Log(("TM: u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64 u64Delta=%#RX64 (debugging?)\n",
179 u64DeltaPrev, u64PrevNanoTS, u64NanoTS, u64Delta));
180#ifdef DEBUG_bird
181 AssertMsgFailed(("u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64 u64Delta=%#RX64\n",
182 u64DeltaPrev, u64PrevNanoTS, u64NanoTS, u64Delta));
183#endif
184 }
185 /* else: We're resuming (see TMVirtualResume). */
186 if (RT_LIKELY(ASMAtomicCmpXchgU64(&pVM->tm.s.u64VirtualRawPrev, u64NanoTS, u64PrevNanoTS)))
187 return u64NanoTS;
188
189 /*
190 * Attempt updating the previous value, provided we're still ahead of it.
191 *
192 * There is no point in recalculating u64NanoTS because we got preemted or if
193 * we raced somebody while the GIP was updated, since these are events
194 * that might occure at any point in the return path as well.
195 */
196 for (int cTries = 100;;)
197 {
198 u64PrevNanoTS = ASMAtomicReadU64(&pVM->tm.s.u64VirtualRawPrev);
199 if (u64PrevNanoTS >= u64NanoTS)
200 break;
201 if (ASMAtomicCmpXchgU64(&pVM->tm.s.u64VirtualRawPrev, u64NanoTS, u64PrevNanoTS))
202 break;
203 AssertBreak(--cTries <= 0, );
204 }
205
206 return u64NanoTS;
207}
208
209
210
211/**
212 * Get the time when we're not running at 100%
213 *
214 * @returns The timestamp.
215 * @param pVM The VM handle.
216 */
217static uint64_t tmVirtualGetRawNonNormal(PVM pVM)
218{
219 /*
220 * Recalculate the RTTimeNanoTS() value for the period where
221 * warp drive has been enabled.
222 */
223 uint64_t u64 = tmVirtualGetRawNanoTS(pVM);
224 u64 -= pVM->tm.s.u64VirtualWarpDriveStart;
225 u64 *= pVM->tm.s.u32VirtualWarpDrivePercentage;
226 u64 /= 100;
227 u64 += pVM->tm.s.u64VirtualWarpDriveStart;
228
229 /*
230 * Now we apply the virtual time offset.
231 * (Which is the negated tmVirtualGetRawNanoTS() value for when the virtual
232 * machine started if it had been running continuously without any suspends.)
233 */
234 u64 -= pVM->tm.s.u64VirtualOffset;
235 return u64;
236}
237
238
239/**
240 * Get the raw virtual time.
241 *
242 * @returns The current time stamp.
243 * @param pVM The VM handle.
244 */
245DECLINLINE(uint64_t) tmVirtualGetRaw(PVM pVM)
246{
247 if (RT_LIKELY(!pVM->tm.s.fVirtualWarpDrive))
248 return tmVirtualGetRawNanoTS(pVM) - pVM->tm.s.u64VirtualOffset;
249 return tmVirtualGetRawNonNormal(pVM);
250}
251
252
253/**
254 * Inlined version of tmVirtualGetEx.
255 */
256DECLINLINE(uint64_t) tmVirtualGet(PVM pVM, bool fCheckTimers)
257{
258 uint64_t u64;
259 if (RT_LIKELY(pVM->tm.s.fVirtualTicking))
260 {
261 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGet);
262 u64 = tmVirtualGetRaw(pVM);
263
264 /*
265 * Use the chance to check for expired timers.
266 */
267 if ( fCheckTimers
268 && !VM_FF_ISSET(pVM, VM_FF_TIMER)
269 && ( pVM->tm.s.CTXALLSUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire <= u64
270 || ( pVM->tm.s.fVirtualSyncTicking
271 && pVM->tm.s.CTXALLSUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire <= u64 - pVM->tm.s.offVirtualSync
272 )
273 )
274 )
275 {
276 VM_FF_SET(pVM, VM_FF_TIMER);
277 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSetFF);
278#ifdef IN_RING3
279 REMR3NotifyTimerPending(pVM);
280 VMR3NotifyFF(pVM, true);
281#endif
282 }
283 }
284 else
285 u64 = pVM->tm.s.u64Virtual;
286 return u64;
287}
288
289
290/**
291 * Gets the current TMCLOCK_VIRTUAL time
292 *
293 * @returns The timestamp.
294 * @param pVM VM handle.
295 *
296 * @remark While the flow of time will never go backwards, the speed of the
297 * progress varies due to inaccurate RTTimeNanoTS and TSC. The latter can be
298 * influenced by power saving (SpeedStep, PowerNow!), while the former
299 * makes use of TSC and kernel timers.
300 */
301TMDECL(uint64_t) TMVirtualGet(PVM pVM)
302{
303 return TMVirtualGetEx(pVM, true /* check timers */);
304}
305
306
307/**
308 * Gets the current TMCLOCK_VIRTUAL time
309 *
310 * @returns The timestamp.
311 * @param pVM VM handle.
312 * @param fCheckTimers Check timers or not
313 *
314 * @remark While the flow of time will never go backwards, the speed of the
315 * progress varies due to inaccurate RTTimeNanoTS and TSC. The latter can be
316 * influenced by power saving (SpeedStep, PowerNow!), while the former
317 * makes use of TSC and kernel timers.
318 */
319TMDECL(uint64_t) TMVirtualGetEx(PVM pVM, bool fCheckTimers)
320{
321 return tmVirtualGet(pVM, fCheckTimers);
322}
323
324
325/**
326 * Gets the current TMCLOCK_VIRTUAL_SYNC time.
327 *
328 * @returns The timestamp.
329 * @param pVM VM handle.
330 * @param fCheckTimers Check timers or not
331 * @thread EMT.
332 */
333TMDECL(uint64_t) TMVirtualSyncGetEx(PVM pVM, bool fCheckTimers)
334{
335 VM_ASSERT_EMT(pVM);
336
337 uint64_t u64;
338 if (pVM->tm.s.fVirtualSyncTicking)
339 {
340 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSync);
341
342 /*
343 * Query the virtual clock and do the usual expired timer check.
344 */
345 Assert(pVM->tm.s.fVirtualTicking);
346 u64 = tmVirtualGetRaw(pVM);
347const uint64_t u64VirtualNow = u64;
348 if ( fCheckTimers
349 && !VM_FF_ISSET(pVM, VM_FF_TIMER)
350 && pVM->tm.s.CTXALLSUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire <= u64)
351 {
352 VM_FF_SET(pVM, VM_FF_TIMER);
353#ifdef IN_RING3
354 REMR3NotifyTimerPending(pVM);
355 VMR3NotifyFF(pVM, true);
356#endif
357 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSyncSetFF);
358 }
359
360 /*
361 * Read the offset and adjust if we're playing catch-up.
362 *
363 * The catch-up adjusting work by us decrementing the offset by a percentage of
364 * the time elapsed since the previous TMVirtualGetSync call.
365 *
366 * It's possible to get a very long or even negative interval between two read
367 * for the following reasons:
368 * - Someone might have suspended the process execution, frequently the case when
369 * debugging the process.
370 * - We might be on a different CPU which TSC isn't quite in sync with the
371 * other CPUs in the system.
372 * - Another thread is racing us and we might have been preemnted while inside
373 * this function.
374 *
375 * Assuming nano second virtual time, we can simply ignore any intervals which has
376 * any of the upper 32 bits set.
377 */
378 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
379 uint64_t off = pVM->tm.s.offVirtualSync;
380 if (pVM->tm.s.fVirtualSyncCatchUp)
381 {
382 const uint64_t u64Prev = pVM->tm.s.u64VirtualSyncCatchUpPrev;
383 uint64_t u64Delta = u64 - u64Prev;
384 if (RT_LIKELY(!(u64Delta >> 32)))
385 {
386 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, pVM->tm.s.u32VirtualSyncCatchUpPercentage, 100);
387 if (off > u64Sub + pVM->tm.s.offVirtualSyncGivenUp)
388 {
389 off -= u64Sub;
390 ASMAtomicXchgU64(&pVM->tm.s.offVirtualSync, off);
391 pVM->tm.s.u64VirtualSyncCatchUpPrev = u64;
392 Log4(("TM: %RU64/%RU64: sub %RU32\n", u64 - off, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp, u64Sub));
393 }
394 else
395 {
396 /* we've completely caught up. */
397 STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c);
398 off = pVM->tm.s.offVirtualSyncGivenUp;
399 ASMAtomicXchgU64(&pVM->tm.s.offVirtualSync, off);
400 ASMAtomicXchgBool(&pVM->tm.s.fVirtualSyncCatchUp, false);
401 pVM->tm.s.u64VirtualSyncCatchUpPrev = u64;
402 Log4(("TM: %RU64/0: caught up\n", u64));
403 }
404 }
405 else
406 {
407 /* More than 4 seconds since last time (or negative), ignore it. */
408 if (!(u64Delta & RT_BIT_64(63)))
409 pVM->tm.s.u64VirtualSyncCatchUpPrev = u64;
410 Log(("TMVirtualGetSync: u64Delta=%RX64\n", u64Delta));
411 }
412 }
413
414 /*
415 * Complete the calculation of the current TMCLOCK_VIRTUAL_SYNC time. The current
416 * approach is to never pass the head timer. So, when we do stop the clock and
417 * set the the timer pending flag.
418 */
419 u64 -= off;
420 const uint64_t u64Expire = pVM->tm.s.CTXALLSUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire;
421 if (u64 >= u64Expire)
422 {
423 u64 = u64Expire;
424 ASMAtomicXchgU64(&pVM->tm.s.u64VirtualSync, u64);
425 ASMAtomicXchgBool(&pVM->tm.s.fVirtualSyncTicking, false);
426pVM->tm.s.u64VirtualSyncStoppedTS = u64VirtualNow;
427 if ( fCheckTimers
428 && !VM_FF_ISSET(pVM, VM_FF_TIMER))
429 {
430 VM_FF_SET(pVM, VM_FF_TIMER);
431#ifdef IN_RING3
432 REMR3NotifyTimerPending(pVM);
433 VMR3NotifyFF(pVM, true);
434#endif
435 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSyncSetFF);
436 Log4(("TM: %RU64/%RU64: exp tmr=>ff\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
437 }
438 else
439 Log4(("TM: %RU64/%RU64: exp tmr\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
440 }
441 }
442 else
443 u64 = pVM->tm.s.u64VirtualSync;
444 return u64;
445}
446
447
448/**
449 * Gets the current TMCLOCK_VIRTUAL_SYNC time.
450 *
451 * @returns The timestamp.
452 * @param pVM VM handle.
453 * @thread EMT.
454 */
455TMDECL(uint64_t) TMVirtualSyncGet(PVM pVM)
456{
457 return TMVirtualSyncGetEx(pVM, true /* check timers */);
458}
459
460
461/**
462 * Gets the current lag of the synchronous virtual clock (relative to the virtual clock).
463 *
464 * @return The current lag.
465 * @param pVM VM handle.
466 */
467TMDECL(uint64_t) TMVirtualSyncGetLag(PVM pVM)
468{
469 return pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp;
470}
471
472
473/**
474 * Get the current catch-up percent.
475 *
476 * @return The current catch0up percent. 0 means running at the same speed as the virtual clock.
477 * @param pVM VM handle.
478 */
479TMDECL(uint32_t) TMVirtualSyncGetCatchUpPct(PVM pVM)
480{
481 if (pVM->tm.s.fVirtualSyncCatchUp)
482 return pVM->tm.s.u32VirtualSyncCatchUpPercentage;
483 return 0;
484}
485
486
487/**
488 * Gets the current TMCLOCK_VIRTUAL frequency.
489 *
490 * @returns The freqency.
491 * @param pVM VM handle.
492 */
493TMDECL(uint64_t) TMVirtualGetFreq(PVM pVM)
494{
495 return TMCLOCK_FREQ_VIRTUAL;
496}
497
498
499/**
500 * Resumes the virtual clock.
501 *
502 * @returns VINF_SUCCESS on success.
503 * @returns VINF_INTERNAL_ERROR and VBOX_STRICT assertion if called out of order.
504 * @param pVM VM handle.
505 */
506TMDECL(int) TMVirtualResume(PVM pVM)
507{
508 if (!pVM->tm.s.fVirtualTicking)
509 {
510 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualResume);
511 pVM->tm.s.u64VirtualRawPrev = 0;
512 pVM->tm.s.u64VirtualWarpDriveStart = tmVirtualGetRawNanoTS(pVM);
513 pVM->tm.s.u64VirtualOffset = pVM->tm.s.u64VirtualWarpDriveStart - pVM->tm.s.u64Virtual;
514 pVM->tm.s.fVirtualTicking = true;
515 pVM->tm.s.fVirtualSyncTicking = true;
516 return VINF_SUCCESS;
517 }
518
519 AssertFailed();
520 return VERR_INTERNAL_ERROR;
521}
522
523
524/**
525 * Pauses the virtual clock.
526 *
527 * @returns VINF_SUCCESS on success.
528 * @returns VINF_INTERNAL_ERROR and VBOX_STRICT assertion if called out of order.
529 * @param pVM VM handle.
530 */
531TMDECL(int) TMVirtualPause(PVM pVM)
532{
533 if (pVM->tm.s.fVirtualTicking)
534 {
535 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualPause);
536 pVM->tm.s.u64Virtual = tmVirtualGetRaw(pVM);
537 pVM->tm.s.fVirtualSyncTicking = false;
538 pVM->tm.s.fVirtualTicking = false;
539 return VINF_SUCCESS;
540 }
541
542 AssertFailed();
543 return VERR_INTERNAL_ERROR;
544}
545
546
547/**
548 * Gets the current warp drive percent.
549 *
550 * @returns The warp drive percent.
551 * @param pVM The VM handle.
552 */
553TMDECL(uint32_t) TMVirtualGetWarpDrive(PVM pVM)
554{
555 return pVM->tm.s.u32VirtualWarpDrivePercentage;
556}
557
558
559/**
560 * Sets the warp drive percent of the virtual time.
561 *
562 * @returns VBox status code.
563 * @param pVM The VM handle.
564 * @param u32Percent The new percentage. 100 means normal operation.
565 */
566TMDECL(int) TMVirtualSetWarpDrive(PVM pVM, uint32_t u32Percent)
567{
568/** @todo This isn't a feature specific to virtual time, move to TM level. (It
569 * should affect the TMR3UCTNow as well! */
570#ifdef IN_RING3
571 PVMREQ pReq;
572 int rc = VMR3ReqCall(pVM, &pReq, RT_INDEFINITE_WAIT, (PFNRT)tmVirtualSetWarpDrive, 2, pVM, u32Percent);
573 if (VBOX_SUCCESS(rc))
574 rc = pReq->iStatus;
575 VMR3ReqFree(pReq);
576 return rc;
577#else
578
579 return tmVirtualSetWarpDrive(pVM, u32Percent);
580#endif
581}
582
583
584/**
585 * EMT worker for tmVirtualSetWarpDrive.
586 *
587 * @returns VBox status code.
588 * @param pVM The VM handle.
589 * @param u32Percent See TMVirtualSetWarpDrive().
590 * @internal
591 */
592static DECLCALLBACK(int) tmVirtualSetWarpDrive(PVM pVM, uint32_t u32Percent)
593{
594 /*
595 * Validate it.
596 */
597 AssertMsgReturn(u32Percent >= 2 && u32Percent <= 20000,
598 ("%RX32 is not between 2 and 20000 (inclusive).\n", u32Percent),
599 VERR_INVALID_PARAMETER);
600
601 /*
602 * If the time is running we'll have to pause it before we can change
603 * the warp drive settings.
604 */
605 bool fPaused = pVM->tm.s.fVirtualTicking;
606 if (fPaused)
607 {
608 int rc = TMVirtualPause(pVM);
609 AssertRCReturn(rc, rc);
610 rc = TMCpuTickPause(pVM);
611 AssertRCReturn(rc, rc);
612 }
613
614 pVM->tm.s.u32VirtualWarpDrivePercentage = u32Percent;
615 pVM->tm.s.fVirtualWarpDrive = u32Percent != 100;
616 LogRel(("TM: u32VirtualWarpDrivePercentage=%RI32 fVirtualWarpDrive=%RTbool\n",
617 pVM->tm.s.u32VirtualWarpDrivePercentage, pVM->tm.s.fVirtualWarpDrive));
618
619 if (fPaused)
620 {
621 int rc = TMVirtualResume(pVM);
622 AssertRCReturn(rc, rc);
623 rc = TMCpuTickResume(pVM);
624 AssertRCReturn(rc, rc);
625 }
626
627 return VINF_SUCCESS;
628}
629
630
631/**
632 * Converts from virtual ticks to nanoseconds.
633 *
634 * @returns nanoseconds.
635 * @param pVM The VM handle.
636 * @param u64VirtualTicks The virtual ticks to convert.
637 * @remark There could be rounding errors here. We just do a simple integere divide
638 * without any adjustments.
639 */
640TMDECL(uint64_t) TMVirtualToNano(PVM pVM, uint64_t u64VirtualTicks)
641{
642 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
643 return u64VirtualTicks;
644}
645
646
647/**
648 * Converts from virtual ticks to microseconds.
649 *
650 * @returns microseconds.
651 * @param pVM The VM handle.
652 * @param u64VirtualTicks The virtual ticks to convert.
653 * @remark There could be rounding errors here. We just do a simple integere divide
654 * without any adjustments.
655 */
656TMDECL(uint64_t) TMVirtualToMicro(PVM pVM, uint64_t u64VirtualTicks)
657{
658 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
659 return u64VirtualTicks / 1000;
660}
661
662
663/**
664 * Converts from virtual ticks to milliseconds.
665 *
666 * @returns milliseconds.
667 * @param pVM The VM handle.
668 * @param u64VirtualTicks The virtual ticks to convert.
669 * @remark There could be rounding errors here. We just do a simple integere divide
670 * without any adjustments.
671 */
672TMDECL(uint64_t) TMVirtualToMilli(PVM pVM, uint64_t u64VirtualTicks)
673{
674 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
675 return u64VirtualTicks / 1000000;
676}
677
678
679/**
680 * Converts from nanoseconds to virtual ticks.
681 *
682 * @returns virtual ticks.
683 * @param pVM The VM handle.
684 * @param u64NanoTS The nanosecond value ticks to convert.
685 * @remark There could be rounding and overflow errors here.
686 */
687TMDECL(uint64_t) TMVirtualFromNano(PVM pVM, uint64_t u64NanoTS)
688{
689 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
690 return u64NanoTS;
691}
692
693
694/**
695 * Converts from microseconds to virtual ticks.
696 *
697 * @returns virtual ticks.
698 * @param pVM The VM handle.
699 * @param u64MicroTS The microsecond value ticks to convert.
700 * @remark There could be rounding and overflow errors here.
701 */
702TMDECL(uint64_t) TMVirtualFromMicro(PVM pVM, uint64_t u64MicroTS)
703{
704 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
705 return u64MicroTS * 1000;
706}
707
708
709/**
710 * Converts from milliseconds to virtual ticks.
711 *
712 * @returns virtual ticks.
713 * @param pVM The VM handle.
714 * @param u64MilliTS The millisecond value ticks to convert.
715 * @remark There could be rounding and overflow errors here.
716 */
717TMDECL(uint64_t) TMVirtualFromMilli(PVM pVM, uint64_t u64MilliTS)
718{
719 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
720 return u64MilliTS * 1000000;
721}
722
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette