VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/TMAll.cpp@ 20590

Last change on this file since 20590 was 20120, checked in by vboxsync, 16 years ago

TMAll.cpp: Better assertion.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 59.8 KB
Line 
1/* $Id: TMAll.cpp 20120 2009-05-28 13:59:29Z vboxsync $ */
2/** @file
3 * TM - Timeout Manager, all contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_TM
27#include <VBox/tm.h>
28#include <VBox/mm.h>
29#ifdef IN_RING3
30# include <VBox/rem.h>
31#endif
32#include "TMInternal.h"
33#include <VBox/vm.h>
34
35#include <VBox/param.h>
36#include <VBox/err.h>
37#include <VBox/log.h>
38#include <VBox/sup.h>
39#include <iprt/time.h>
40#include <iprt/assert.h>
41#include <iprt/asm.h>
42#ifdef IN_RING3
43# include <iprt/thread.h>
44#endif
45
46
47/*******************************************************************************
48* Defined Constants And Macros *
49*******************************************************************************/
50/** @def TMTIMER_ASSERT_CRITSECT
51 * Checks that the caller owns the critical section if one is associated with
52 * the timer. */
53#ifdef VBOX_STRICT
54# define TMTIMER_ASSERT_CRITSECT(pTimer) \
55 do { \
56 if ((pTimer)->pCritSect) \
57 { \
58 PPDMCRITSECT pCritSect = (PPDMCRITSECT)MMHyperR3ToCC((pTimer)->CTX_SUFF(pVM), (pTimer)->pCritSect); \
59 AssertMsg(pCritSect && PDMCritSectIsOwner(pCritSect), \
60 ("pTimer=%p (%s) pCritSect=%p\n", pTimer, R3STRING(pTimer->pszDesc), (pTimer)->pCritSect)); \
61 } \
62 } while (0)
63#else
64# define TMTIMER_ASSERT_CRITSECT(pTimer) do { } while (0)
65#endif
66
67
68#ifndef tmLock
69
70/**
71 * Try take the EMT/TM lock, wait in ring-3 return VERR_SEM_BUSY in R0/RC.
72 *
73 * @retval VINF_SUCCESS on success (always in ring-3).
74 * @retval VERR_SEM_BUSY in RC and R0 if the semaphore is busy.
75 *
76 * @param pVM The VM handle.
77 */
78int tmLock(PVM pVM)
79{
80 VM_ASSERT_EMT(pVM);
81 int rc = PDMCritSectEnter(&pVM->tm.s.EmtLock, VERR_SEM_BUSY);
82 return rc;
83}
84
85
86/**
87 * Try take the EMT/TM lock, no waiting.
88 *
89 * @retval VINF_SUCCESS on success.
90 * @retval VERR_SEM_BUSY if busy.
91 *
92 * @param pVM The VM handle.
93 */
94int tmTryLock(PVM pVM)
95{
96 VM_ASSERT_EMT(pVM);
97 int rc = PDMCritSectTryEnter(&pVM->tm.s.EmtLock);
98 return rc;
99}
100
101
102/**
103 * Release the EMT/TM lock.
104 *
105 * @param pVM The VM handle.
106 */
107void tmUnlock(PVM pVM)
108{
109 PDMCritSectLeave(&pVM->tm.s.EmtLock);
110}
111
112
113/**
114 * Try take the VirtualSync lock, wait in ring-3 return VERR_SEM_BUSY in R0/RC.
115 *
116 * @retval VINF_SUCCESS on success (always in ring-3).
117 * @retval VERR_SEM_BUSY in RC and R0 if the semaphore is busy.
118 *
119 * @param pVM The VM handle.
120 */
121int tmVirtualSyncLock(PVM pVM)
122{
123 VM_ASSERT_EMT(pVM);
124 int rc = PDMCritSectEnter(&pVM->tm.s.VirtualSyncLock, VERR_SEM_BUSY);
125 return rc;
126}
127
128
129/**
130 * Try take the VirtualSync lock, no waiting.
131 *
132 * @retval VINF_SUCCESS on success.
133 * @retval VERR_SEM_BUSY if busy.
134 *
135 * @param pVM The VM handle.
136 */
137int tmVirtualSyncTryLock(PVM pVM)
138{
139 VM_ASSERT_EMT(pVM);
140 int rc = PDMCritSectTryEnter(&pVM->tm.s.VirtualSyncLock);
141 return rc;
142}
143
144
145/**
146 * Release the VirtualSync lock.
147 *
148 * @param pVM The VM handle.
149 */
150void tmVirtualSyncUnlock(PVM pVM)
151{
152 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
153}
154
155#endif /* ! macros */
156
157/**
158 * Notification that execution is about to start.
159 *
160 * This call must always be paired with a TMNotifyEndOfExecution call.
161 *
162 * The function may, depending on the configuration, resume the TSC and future
163 * clocks that only ticks when we're executing guest code.
164 *
165 * @param pVCpu The VMCPU to operate on.
166 */
167VMMDECL(void) TMNotifyStartOfExecution(PVMCPU pVCpu)
168{
169 PVM pVM = pVCpu->CTX_SUFF(pVM);
170
171 if (pVM->tm.s.fTSCTiedToExecution)
172 tmCpuTickResume(pVM, pVCpu);
173}
174
175
176/**
177 * Notification that execution is about to start.
178 *
179 * This call must always be paired with a TMNotifyStartOfExecution call.
180 *
181 * The function may, depending on the configuration, suspend the TSC and future
182 * clocks that only ticks when we're executing guest code.
183 *
184 * @param pVCpu The VMCPU to operate on.
185 */
186VMMDECL(void) TMNotifyEndOfExecution(PVMCPU pVCpu)
187{
188 PVM pVM = pVCpu->CTX_SUFF(pVM);
189
190 if (pVM->tm.s.fTSCTiedToExecution)
191 tmCpuTickPause(pVM, pVCpu);
192}
193
194
195/**
196 * Notification that the cpu is entering the halt state
197 *
198 * This call must always be paired with a TMNotifyEndOfExecution call.
199 *
200 * The function may, depending on the configuration, resume the TSC and future
201 * clocks that only ticks when we're halted.
202 *
203 * @param pVCpu The VMCPU to operate on.
204 */
205VMMDECL(void) TMNotifyStartOfHalt(PVMCPU pVCpu)
206{
207 PVM pVM = pVCpu->CTX_SUFF(pVM);
208
209 if ( pVM->tm.s.fTSCTiedToExecution
210 && !pVM->tm.s.fTSCNotTiedToHalt)
211 tmCpuTickResume(pVM, pVCpu);
212}
213
214
215/**
216 * Notification that the cpu is leaving the halt state
217 *
218 * This call must always be paired with a TMNotifyStartOfHalt call.
219 *
220 * The function may, depending on the configuration, suspend the TSC and future
221 * clocks that only ticks when we're halted.
222 *
223 * @param pVCpu The VMCPU to operate on.
224 */
225VMMDECL(void) TMNotifyEndOfHalt(PVMCPU pVCpu)
226{
227 PVM pVM = pVCpu->CTX_SUFF(pVM);
228
229 if ( pVM->tm.s.fTSCTiedToExecution
230 && !pVM->tm.s.fTSCNotTiedToHalt)
231 tmCpuTickPause(pVM, pVCpu);
232}
233
234
235/**
236 * Raise the timer force action flag and notify the dedicated timer EMT.
237 *
238 * @param pVM The VM handle.
239 */
240DECLINLINE(void) tmScheduleNotify(PVM pVM)
241{
242 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
243 if (!VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
244 {
245 Log5(("TMAll(%u): FF: 0 -> 1\n", __LINE__));
246 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
247#ifdef IN_RING3
248 REMR3NotifyTimerPending(pVM, pVCpuDst);
249 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
250#endif
251 STAM_COUNTER_INC(&pVM->tm.s.StatScheduleSetFF);
252 }
253}
254
255
256/**
257 * Schedule the queue which was changed.
258 */
259DECLINLINE(void) tmSchedule(PTMTIMER pTimer)
260{
261 PVM pVM = pTimer->CTX_SUFF(pVM);
262 if ( VM_IS_EMT(pVM)
263 && RT_SUCCESS(tmTryLock(pVM)))
264 {
265 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
266 Log3(("tmSchedule: tmTimerQueueSchedule\n"));
267 tmTimerQueueSchedule(pVM, &pVM->tm.s.CTX_SUFF(paTimerQueues)[pTimer->enmClock]);
268#ifdef VBOX_STRICT
269 tmTimerQueuesSanityChecks(pVM, "tmSchedule");
270#endif
271 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
272 tmUnlock(pVM);
273 }
274 else
275 {
276 TMTIMERSTATE enmState = pTimer->enmState;
277 if (TMTIMERSTATE_IS_PENDING_SCHEDULING(enmState))
278 tmScheduleNotify(pVM);
279 }
280}
281
282
283/**
284 * Try change the state to enmStateNew from enmStateOld
285 * and link the timer into the scheduling queue.
286 *
287 * @returns Success indicator.
288 * @param pTimer Timer in question.
289 * @param enmStateNew The new timer state.
290 * @param enmStateOld The old timer state.
291 */
292DECLINLINE(bool) tmTimerTry(PTMTIMER pTimer, TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
293{
294 /*
295 * Attempt state change.
296 */
297 bool fRc;
298 TM_TRY_SET_STATE(pTimer, enmStateNew, enmStateOld, fRc);
299 return fRc;
300}
301
302
303/**
304 * Links the timer onto the scheduling queue.
305 *
306 * @param pQueue The timer queue the timer belongs to.
307 * @param pTimer The timer.
308 *
309 * @todo FIXME: Look into potential race with the thread running the queues
310 * and stuff.
311 */
312DECLINLINE(void) tmTimerLink(PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
313{
314 Assert(!pTimer->offScheduleNext);
315 const int32_t offHeadNew = (intptr_t)pTimer - (intptr_t)pQueue;
316 int32_t offHead;
317 do
318 {
319 offHead = pQueue->offSchedule;
320 if (offHead)
321 pTimer->offScheduleNext = ((intptr_t)pQueue + offHead) - (intptr_t)pTimer;
322 else
323 pTimer->offScheduleNext = 0;
324 } while (!ASMAtomicCmpXchgS32(&pQueue->offSchedule, offHeadNew, offHead));
325}
326
327
328/**
329 * Try change the state to enmStateNew from enmStateOld
330 * and link the timer into the scheduling queue.
331 *
332 * @returns Success indicator.
333 * @param pTimer Timer in question.
334 * @param enmStateNew The new timer state.
335 * @param enmStateOld The old timer state.
336 */
337DECLINLINE(bool) tmTimerTryWithLink(PTMTIMER pTimer, TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
338{
339 if (tmTimerTry(pTimer, enmStateNew, enmStateOld))
340 {
341 tmTimerLink(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF(paTimerQueues)[pTimer->enmClock], pTimer);
342 return true;
343 }
344 return false;
345}
346
347
348#ifdef VBOX_HIGH_RES_TIMERS_HACK
349
350/**
351 * Worker for tmTimerPollInternal that handles misses when the decidate timer
352 * EMT is polling.
353 *
354 * @returns See tmTimerPollInternal.
355 * @param pVM Pointer to the shared VM structure.
356 * @param u64Now Current virtual clock timestamp.
357 * @param u64Delta The delta to the next even in ticks of the
358 * virtual clock.
359 * @param pu64Delta Where to return the delta.
360 * @param pCounter The statistics counter to update.
361 */
362DECLINLINE(uint64_t) tmTimerPollReturnMiss(PVM pVM, uint64_t u64Now, uint64_t u64Delta, uint64_t *pu64Delta)
363{
364 Assert(!(u64Delta & RT_BIT_64(63)));
365
366 if (!pVM->tm.s.fVirtualWarpDrive)
367 {
368 *pu64Delta = u64Delta;
369 return u64Delta + u64Now + pVM->tm.s.u64VirtualOffset;
370 }
371
372 /*
373 * Warp drive adjustments - this is the reverse of what tmVirtualGetRaw is doing.
374 */
375 uint64_t const u64Start = pVM->tm.s.u64VirtualWarpDriveStart;
376 uint32_t const u32Pct = pVM->tm.s.u32VirtualWarpDrivePercentage;
377
378 uint64_t u64GipTime = u64Delta + u64Now + pVM->tm.s.u64VirtualOffset;
379 u64GipTime -= u64Start; /* the start is GIP time. */
380 if (u64GipTime >= u64Delta)
381 {
382 ASMMultU64ByU32DivByU32(u64GipTime, 100, u32Pct);
383 ASMMultU64ByU32DivByU32(u64Delta, 100, u32Pct);
384 }
385 else
386 {
387 u64Delta -= u64GipTime;
388 ASMMultU64ByU32DivByU32(u64GipTime, 100, u32Pct);
389 u64Delta += u64GipTime;
390 }
391 *pu64Delta = u64Delta;
392 u64GipTime += u64Start;
393 return u64GipTime;
394}
395
396
397/**
398 * Worker for tmTimerPollInternal dealing with returns on virtual CPUs other
399 * than the one dedicated to timer work.
400 *
401 * @returns See tmTimerPollInternal.
402 * @param pVM Pointer to the shared VM structure.
403 * @param u64Now Current virtual clock timestamp.
404 * @param pu64Delta Where to return the delta.
405 */
406DECL_FORCE_INLINE(uint64_t) tmTimerPollReturnOtherCpu(PVM pVM, uint64_t u64Now, uint64_t *pu64Delta)
407{
408 static const uint64_t s_u64OtherRet = 500000000; /* 500 ms for non-timer EMTs. */
409 *pu64Delta = s_u64OtherRet;
410 return u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet;
411}
412
413
414/**
415 * Worker for tmTimerPollInternal.
416 *
417 * @returns See tmTimerPollInternal.
418 * @param pVM Pointer to the shared VM structure.
419 * @param pVCpu Pointer to the shared VMCPU structure of the
420 * caller.
421 * @param pVCpuDst Pointer to the shared VMCPU structure of the
422 * dedicated timer EMT.
423 * @param u64Now Current virtual clock timestamp.
424 * @param pu64Delta Where to return the delta.
425 * @param pCounter The statistics counter to update.
426 */
427DECL_FORCE_INLINE(uint64_t) tmTimerPollReturnHit(PVM pVM, PVMCPU pVCpu, PVMCPU pVCpuDst, uint64_t u64Now,
428 uint64_t *pu64Delta, PSTAMCOUNTER pCounter)
429{
430 STAM_COUNTER_INC(pCounter);
431 if (pVCpuDst != pVCpu)
432 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
433 *pu64Delta = 0;
434 return 0;
435}
436
437/**
438 * Common worker for TMTimerPollGIP and TMTimerPoll.
439 *
440 * This function is called before FFs are checked in the inner execution EM loops.
441 *
442 * @returns The GIP timestamp of the next event.
443 * 0 if the next event has already expired.
444 *
445 * @param pVM Pointer to the shared VM structure.
446 * @param pVCpu Pointer to the shared VMCPU structure of the caller.
447 * @param pu64Delta Where to store the delta.
448 *
449 * @thread The emulation thread.
450 *
451 * @remarks GIP uses ns ticks.
452 */
453DECL_FORCE_INLINE(uint64_t) tmTimerPollInternal(PVM pVM, PVMCPU pVCpu, uint64_t *pu64Delta)
454{
455 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
456 const uint64_t u64Now = TMVirtualGetNoCheck(pVM);
457 STAM_COUNTER_INC(&pVM->tm.s.StatPoll);
458
459 /*
460 * Return straight away if the timer FF is already set ...
461 */
462 if (VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
463 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet);
464
465 /*
466 * ... or if timers are being run.
467 */
468 if (ASMAtomicReadBool(&pVM->tm.s.fRunningQueues))
469 {
470 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
471 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
472 }
473
474 /*
475 * Check for TMCLOCK_VIRTUAL expiration.
476 */
477 const uint64_t u64Expire1 = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire);
478 const int64_t i64Delta1 = u64Expire1 - u64Now;
479 if (i64Delta1 <= 0)
480 {
481 if (!VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
482 {
483 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
484 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
485#ifdef IN_RING3
486 REMR3NotifyTimerPending(pVM, pVCpuDst);
487#endif
488 }
489 LogFlow(("TMTimerPoll: expire1=%'RU64 <= now=%'RU64\n", u64Expire1, u64Now));
490 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtual);
491 }
492
493 /*
494 * Check for TMCLOCK_VIRTUAL_SYNC expiration.
495 * This isn't quite as stright forward if in a catch-up, not only do
496 * we have to adjust the 'now' but when have to adjust the delta as well.
497 */
498
499 /*
500 * Optimistic lockless approach.
501 */
502 uint64_t u64VirtualSyncNow;
503 uint64_t u64Expire2 = ASMAtomicUoReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
504 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
505 {
506 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
507 {
508 u64VirtualSyncNow = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
509 if (RT_LIKELY( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
510 && !ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
511 && u64VirtualSyncNow == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
512 && u64Expire2 == ASMAtomicUoReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)))
513 {
514 u64VirtualSyncNow = u64Now - u64VirtualSyncNow;
515 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
516 if (i64Delta2 > 0)
517 {
518 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
519 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
520
521 if (pVCpu == pVCpuDst)
522 return tmTimerPollReturnMiss(pVM, u64Now, RT_MIN(i64Delta1, i64Delta2), pu64Delta);
523 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
524 }
525
526 if ( !pVM->tm.s.fRunningQueues
527 && !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
528 {
529 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
530 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
531#ifdef IN_RING3
532 REMR3NotifyTimerPending(pVM, pVCpuDst);
533#endif
534 }
535
536 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
537 LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now));
538 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
539 }
540 }
541 }
542 else
543 {
544 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
545 LogFlow(("TMTimerPoll: stopped\n"));
546 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
547 }
548
549 /*
550 * Complicated lockless approach.
551 */
552 uint64_t off;
553 uint32_t u32Pct = 0;
554 bool fCatchUp;
555 int cOuterTries = 42;
556 for (;; cOuterTries--)
557 {
558 fCatchUp = ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp);
559 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
560 u64Expire2 = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
561 if (fCatchUp)
562 {
563 /* No changes allowed, try get a consistent set of parameters. */
564 uint64_t const u64Prev = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev);
565 uint64_t const offGivenUp = ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp);
566 u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
567 if ( ( u64Prev == ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev)
568 && offGivenUp == ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp)
569 && u32Pct == ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage)
570 && off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
571 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)
572 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
573 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
574 || cOuterTries <= 0)
575 {
576 uint64_t u64Delta = u64Now - u64Prev;
577 if (RT_LIKELY(!(u64Delta >> 32)))
578 {
579 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, u32Pct, 100);
580 if (off > u64Sub + offGivenUp)
581 off -= u64Sub;
582 else /* we've completely caught up. */
583 off = offGivenUp;
584 }
585 else
586 /* More than 4 seconds since last time (or negative), ignore it. */
587 Log(("TMVirtualGetSync: u64Delta=%RX64 (NoLock)\n", u64Delta));
588
589 /* Check that we're still running and in catch up. */
590 if ( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
591 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
592 break;
593 }
594 }
595 else if ( off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
596 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)
597 && !ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
598 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
599 break; /* Got an consistent offset */
600
601 /* Repeat the initial checks before iterating. */
602 if (VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
603 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet);
604 if (ASMAtomicUoReadBool(&pVM->tm.s.fRunningQueues))
605 {
606 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
607 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
608 }
609 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
610 {
611 LogFlow(("TMTimerPoll: stopped\n"));
612 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
613 }
614 if (cOuterTries <= 0)
615 break; /* that's enough */
616 }
617 if (cOuterTries <= 0)
618 STAM_COUNTER_INC(&pVM->tm.s.StatPollELoop);
619 u64VirtualSyncNow = u64Now - off;
620
621 /* Calc delta and see if we've got a virtual sync hit. */
622 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
623 if (i64Delta2 <= 0)
624 {
625 if ( !pVM->tm.s.fRunningQueues
626 && !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
627 {
628 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
629 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
630#ifdef IN_RING3
631 REMR3NotifyTimerPending(pVM, pVCpuDst);
632#endif
633 }
634 STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtualSync);
635 LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now));
636 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
637 }
638
639 /*
640 * Return the time left to the next event.
641 */
642 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
643 if (pVCpu == pVCpuDst)
644 {
645 if (fCatchUp)
646 i64Delta2 = ASMMultU64ByU32DivByU32(i64Delta2, 100, u32Pct + 100);
647 return tmTimerPollReturnMiss(pVM, u64Now, RT_MIN(i64Delta1, i64Delta2), pu64Delta);
648 }
649 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
650}
651
652
653/**
654 * Set FF if we've passed the next virtual event.
655 *
656 * This function is called before FFs are checked in the inner execution EM loops.
657 *
658 * @returns true if timers are pending, false if not.
659 *
660 * @param pVM Pointer to the shared VM structure.
661 * @param pVCpu Pointer to the shared VMCPU structure of the caller.
662 * @thread The emulation thread.
663 */
664VMMDECL(bool) TMTimerPollBool(PVM pVM, PVMCPU pVCpu)
665{
666 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
667 uint64_t off = 0;
668 tmTimerPollInternal(pVM, pVCpu, &off);
669 return off == 0;
670}
671
672
673/**
674 * Set FF if we've passed the next virtual event.
675 *
676 * This function is called before FFs are checked in the inner execution EM loops.
677 *
678 * @param pVM Pointer to the shared VM structure.
679 * @param pVCpu Pointer to the shared VMCPU structure of the caller.
680 * @thread The emulation thread.
681 */
682VMMDECL(void) TMTimerPollVoid(PVM pVM, PVMCPU pVCpu)
683{
684 uint64_t off;
685 tmTimerPollInternal(pVM, pVCpu, &off);
686}
687
688
689/**
690 * Set FF if we've passed the next virtual event.
691 *
692 * This function is called before FFs are checked in the inner execution EM loops.
693 *
694 * @returns The GIP timestamp of the next event.
695 * 0 if the next event has already expired.
696 * @param pVM Pointer to the shared VM structure.
697 * @param pVCpu Pointer to the shared VMCPU structure of the caller.
698 * @param pu64Delta Where to store the delta.
699 * @thread The emulation thread.
700 */
701VMMDECL(uint64_t) TMTimerPollGIP(PVM pVM, PVMCPU pVCpu, uint64_t *pu64Delta)
702{
703 return tmTimerPollInternal(pVM, pVCpu, pu64Delta);
704}
705
706#endif /* VBOX_HIGH_RES_TIMERS_HACK */
707
708/**
709 * Gets the host context ring-3 pointer of the timer.
710 *
711 * @returns HC R3 pointer.
712 * @param pTimer Timer handle as returned by one of the create functions.
713 */
714VMMDECL(PTMTIMERR3) TMTimerR3Ptr(PTMTIMER pTimer)
715{
716 return (PTMTIMERR3)MMHyperCCToR3(pTimer->CTX_SUFF(pVM), pTimer);
717}
718
719
720/**
721 * Gets the host context ring-0 pointer of the timer.
722 *
723 * @returns HC R0 pointer.
724 * @param pTimer Timer handle as returned by one of the create functions.
725 */
726VMMDECL(PTMTIMERR0) TMTimerR0Ptr(PTMTIMER pTimer)
727{
728 return (PTMTIMERR0)MMHyperCCToR0(pTimer->CTX_SUFF(pVM), pTimer);
729}
730
731
732/**
733 * Gets the RC pointer of the timer.
734 *
735 * @returns RC pointer.
736 * @param pTimer Timer handle as returned by one of the create functions.
737 */
738VMMDECL(PTMTIMERRC) TMTimerRCPtr(PTMTIMER pTimer)
739{
740 return (PTMTIMERRC)MMHyperCCToRC(pTimer->CTX_SUFF(pVM), pTimer);
741}
742
743
744/**
745 * Arm a timer with a (new) expire time.
746 *
747 * @returns VBox status.
748 * @param pTimer Timer handle as returned by one of the create functions.
749 * @param u64Expire New expire time.
750 */
751VMMDECL(int) TMTimerSet(PTMTIMER pTimer, uint64_t u64Expire)
752{
753 STAM_PROFILE_START(&pTimer->CTX_SUFF(pVM)->tm.s.CTXALLSUFF(StatTimerSet), a);
754 TMTIMER_ASSERT_CRITSECT(pTimer);
755
756 /** @todo find the most frequently used paths and make them skip tmSchedule and tmTimerTryWithLink. */
757 int cRetries = 1000;
758 do
759 {
760 /*
761 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
762 */
763 TMTIMERSTATE enmState = pTimer->enmState;
764 Log2(("TMTimerSet: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d u64Expire=%'RU64\n",
765 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), cRetries, u64Expire));
766 switch (enmState)
767 {
768 case TMTIMERSTATE_EXPIRED_DELIVER:
769 case TMTIMERSTATE_STOPPED:
770 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
771 {
772 Assert(!pTimer->offPrev);
773 Assert(!pTimer->offNext);
774 AssertMsg( pTimer->enmClock != TMCLOCK_VIRTUAL_SYNC
775 || pTimer->CTX_SUFF(pVM)->tm.s.fVirtualSyncTicking
776 || u64Expire >= pTimer->CTX_SUFF(pVM)->tm.s.u64VirtualSync,
777 ("%'RU64 < %'RU64 %s\n", u64Expire, pTimer->CTX_SUFF(pVM)->tm.s.u64VirtualSync, R3STRING(pTimer->pszDesc)));
778 pTimer->u64Expire = u64Expire;
779 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
780 tmSchedule(pTimer);
781 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSet), a);
782 return VINF_SUCCESS;
783 }
784 break;
785
786 case TMTIMERSTATE_PENDING_SCHEDULE:
787 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
788 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
789 {
790 pTimer->u64Expire = u64Expire;
791 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
792 tmSchedule(pTimer);
793 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSet), a);
794 return VINF_SUCCESS;
795 }
796 break;
797
798
799 case TMTIMERSTATE_ACTIVE:
800 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
801 {
802 pTimer->u64Expire = u64Expire;
803 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
804 tmSchedule(pTimer);
805 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSet), a);
806 return VINF_SUCCESS;
807 }
808 break;
809
810 case TMTIMERSTATE_PENDING_RESCHEDULE:
811 case TMTIMERSTATE_PENDING_STOP:
812 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
813 {
814 pTimer->u64Expire = u64Expire;
815 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
816 tmSchedule(pTimer);
817 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSet), a);
818 return VINF_SUCCESS;
819 }
820 break;
821
822
823 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
824 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
825 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
826#ifdef IN_RING3
827 if (!RTThreadYield())
828 RTThreadSleep(1);
829#else
830/** @todo call host context and yield after a couple of iterations */
831#endif
832 break;
833
834 /*
835 * Invalid states.
836 */
837 case TMTIMERSTATE_DESTROY:
838 case TMTIMERSTATE_FREE:
839 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
840 return VERR_TM_INVALID_STATE;
841 default:
842 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
843 return VERR_TM_UNKNOWN_STATE;
844 }
845 } while (cRetries-- > 0);
846
847 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
848 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSet), a);
849 return VERR_INTERNAL_ERROR;
850}
851
852
853/**
854 * Arm a timer with a (new) expire time relative to current time.
855 *
856 * @returns VBox status.
857 * @param pTimer Timer handle as returned by one of the create functions.
858 * @param cMilliesToNext Number of millieseconds to the next tick.
859 */
860VMMDECL(int) TMTimerSetMillies(PTMTIMER pTimer, uint32_t cMilliesToNext)
861{
862 PVM pVM = pTimer->CTX_SUFF(pVM);
863 PVMCPU pVCpu = &pVM->aCpus[0]; /* just take the first VCPU */
864
865 switch (pTimer->enmClock)
866 {
867 case TMCLOCK_VIRTUAL:
868 return TMTimerSet(pTimer, cMilliesToNext * (uint64_t)TMCLOCK_FREQ_VIRTUAL / 1000 + TMVirtualGet(pVM));
869 case TMCLOCK_VIRTUAL_SYNC:
870 return TMTimerSet(pTimer, cMilliesToNext * (uint64_t)TMCLOCK_FREQ_VIRTUAL / 1000 + TMVirtualSyncGet(pVM));
871 case TMCLOCK_REAL:
872 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
873 return TMTimerSet(pTimer, cMilliesToNext + TMRealGet(pVM));
874 case TMCLOCK_TSC:
875 return TMTimerSet(pTimer, cMilliesToNext * pVM->tm.s.cTSCTicksPerSecond / 1000 + TMCpuTickGet(pVCpu));
876
877 default:
878 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
879 return VERR_INTERNAL_ERROR;
880 }
881}
882
883
884/**
885 * Arm a timer with a (new) expire time relative to current time.
886 *
887 * @returns VBox status.
888 * @param pTimer Timer handle as returned by one of the create functions.
889 * @param cMicrosToNext Number of microseconds to the next tick.
890 */
891VMMDECL(int) TMTimerSetMicro(PTMTIMER pTimer, uint64_t cMicrosToNext)
892{
893 PVM pVM = pTimer->CTX_SUFF(pVM);
894 PVMCPU pVCpu = &pVM->aCpus[0]; /* just take the first VCPU */
895
896 switch (pTimer->enmClock)
897 {
898 case TMCLOCK_VIRTUAL:
899 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
900 return TMTimerSet(pTimer, cMicrosToNext * 1000 + TMVirtualGet(pVM));
901
902 case TMCLOCK_VIRTUAL_SYNC:
903 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
904 return TMTimerSet(pTimer, cMicrosToNext * 1000 + TMVirtualSyncGet(pVM));
905
906 case TMCLOCK_REAL:
907 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
908 return TMTimerSet(pTimer, cMicrosToNext / 1000 + TMRealGet(pVM));
909
910 case TMCLOCK_TSC:
911 return TMTimerSet(pTimer, TMTimerFromMicro(pTimer, cMicrosToNext) + TMCpuTickGet(pVCpu));
912
913 default:
914 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
915 return VERR_INTERNAL_ERROR;
916 }
917}
918
919
920/**
921 * Arm a timer with a (new) expire time relative to current time.
922 *
923 * @returns VBox status.
924 * @param pTimer Timer handle as returned by one of the create functions.
925 * @param cNanosToNext Number of nanoseconds to the next tick.
926 */
927VMMDECL(int) TMTimerSetNano(PTMTIMER pTimer, uint64_t cNanosToNext)
928{
929 PVM pVM = pTimer->CTX_SUFF(pVM);
930 PVMCPU pVCpu = &pVM->aCpus[0]; /* just take the first VCPU */
931
932 switch (pTimer->enmClock)
933 {
934 case TMCLOCK_VIRTUAL:
935 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
936 return TMTimerSet(pTimer, cNanosToNext + TMVirtualGet(pVM));
937
938 case TMCLOCK_VIRTUAL_SYNC:
939 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
940 return TMTimerSet(pTimer, cNanosToNext + TMVirtualSyncGet(pVM));
941
942 case TMCLOCK_REAL:
943 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
944 return TMTimerSet(pTimer, cNanosToNext / 1000000 + TMRealGet(pVM));
945
946 case TMCLOCK_TSC:
947 return TMTimerSet(pTimer, TMTimerFromNano(pTimer, cNanosToNext) + TMCpuTickGet(pVCpu));
948
949 default:
950 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
951 return VERR_INTERNAL_ERROR;
952 }
953}
954
955
956/**
957 * Stop the timer.
958 * Use TMR3TimerArm() to "un-stop" the timer.
959 *
960 * @returns VBox status.
961 * @param pTimer Timer handle as returned by one of the create functions.
962 */
963VMMDECL(int) TMTimerStop(PTMTIMER pTimer)
964{
965 STAM_PROFILE_START(&pTimer->CTX_SUFF(pVM)->tm.s.CTXALLSUFF(StatTimerStop), a);
966 TMTIMER_ASSERT_CRITSECT(pTimer);
967
968 /** @todo see if this function needs optimizing. */
969 int cRetries = 1000;
970 do
971 {
972 /*
973 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
974 */
975 TMTIMERSTATE enmState = pTimer->enmState;
976 Log2(("TMTimerStop: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d\n",
977 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), cRetries));
978 switch (enmState)
979 {
980 case TMTIMERSTATE_EXPIRED_DELIVER:
981 //AssertMsgFailed(("You don't stop an expired timer dude!\n"));
982 return VERR_INVALID_PARAMETER;
983
984 case TMTIMERSTATE_STOPPED:
985 case TMTIMERSTATE_PENDING_STOP:
986 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
987 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
988 return VINF_SUCCESS;
989
990 case TMTIMERSTATE_PENDING_SCHEDULE:
991 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, enmState))
992 {
993 tmSchedule(pTimer);
994 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
995 return VINF_SUCCESS;
996 }
997
998 case TMTIMERSTATE_PENDING_RESCHEDULE:
999 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
1000 {
1001 tmSchedule(pTimer);
1002 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1003 return VINF_SUCCESS;
1004 }
1005 break;
1006
1007 case TMTIMERSTATE_ACTIVE:
1008 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
1009 {
1010 tmSchedule(pTimer);
1011 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1012 return VINF_SUCCESS;
1013 }
1014 break;
1015
1016 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1017 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1018 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1019#ifdef IN_RING3
1020 if (!RTThreadYield())
1021 RTThreadSleep(1);
1022#else
1023/**@todo call host and yield cpu after a while. */
1024#endif
1025 break;
1026
1027 /*
1028 * Invalid states.
1029 */
1030 case TMTIMERSTATE_DESTROY:
1031 case TMTIMERSTATE_FREE:
1032 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1033 return VERR_TM_INVALID_STATE;
1034 default:
1035 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1036 return VERR_TM_UNKNOWN_STATE;
1037 }
1038 } while (cRetries-- > 0);
1039
1040 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1041 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1042 return VERR_INTERNAL_ERROR;
1043}
1044
1045
1046/**
1047 * Get the current clock time.
1048 * Handy for calculating the new expire time.
1049 *
1050 * @returns Current clock time.
1051 * @param pTimer Timer handle as returned by one of the create functions.
1052 */
1053VMMDECL(uint64_t) TMTimerGet(PTMTIMER pTimer)
1054{
1055 uint64_t u64;
1056 PVM pVM = pTimer->CTX_SUFF(pVM);
1057
1058 switch (pTimer->enmClock)
1059 {
1060 case TMCLOCK_VIRTUAL:
1061 u64 = TMVirtualGet(pVM);
1062 break;
1063 case TMCLOCK_VIRTUAL_SYNC:
1064 u64 = TMVirtualSyncGet(pVM);
1065 break;
1066 case TMCLOCK_REAL:
1067 u64 = TMRealGet(pVM);
1068 break;
1069 case TMCLOCK_TSC:
1070 {
1071 PVMCPU pVCpu = &pVM->aCpus[0]; /* just take the first VCPU */
1072 u64 = TMCpuTickGet(pVCpu);
1073 break;
1074 }
1075 default:
1076 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1077 return ~(uint64_t)0;
1078 }
1079 //Log2(("TMTimerGet: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1080 // u64, pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1081 return u64;
1082}
1083
1084
1085/**
1086 * Get the freqency of the timer clock.
1087 *
1088 * @returns Clock frequency (as Hz of course).
1089 * @param pTimer Timer handle as returned by one of the create functions.
1090 */
1091VMMDECL(uint64_t) TMTimerGetFreq(PTMTIMER pTimer)
1092{
1093 switch (pTimer->enmClock)
1094 {
1095 case TMCLOCK_VIRTUAL:
1096 case TMCLOCK_VIRTUAL_SYNC:
1097 return TMCLOCK_FREQ_VIRTUAL;
1098
1099 case TMCLOCK_REAL:
1100 return TMCLOCK_FREQ_REAL;
1101
1102 case TMCLOCK_TSC:
1103 return TMCpuTicksPerSecond(pTimer->CTX_SUFF(pVM));
1104
1105 default:
1106 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1107 return 0;
1108 }
1109}
1110
1111
1112/**
1113 * Get the current clock time as nanoseconds.
1114 *
1115 * @returns The timer clock as nanoseconds.
1116 * @param pTimer Timer handle as returned by one of the create functions.
1117 */
1118VMMDECL(uint64_t) TMTimerGetNano(PTMTIMER pTimer)
1119{
1120 return TMTimerToNano(pTimer, TMTimerGet(pTimer));
1121}
1122
1123
1124/**
1125 * Get the current clock time as microseconds.
1126 *
1127 * @returns The timer clock as microseconds.
1128 * @param pTimer Timer handle as returned by one of the create functions.
1129 */
1130VMMDECL(uint64_t) TMTimerGetMicro(PTMTIMER pTimer)
1131{
1132 return TMTimerToMicro(pTimer, TMTimerGet(pTimer));
1133}
1134
1135
1136/**
1137 * Get the current clock time as milliseconds.
1138 *
1139 * @returns The timer clock as milliseconds.
1140 * @param pTimer Timer handle as returned by one of the create functions.
1141 */
1142VMMDECL(uint64_t) TMTimerGetMilli(PTMTIMER pTimer)
1143{
1144 return TMTimerToMilli(pTimer, TMTimerGet(pTimer));
1145}
1146
1147
1148/**
1149 * Converts the specified timer clock time to nanoseconds.
1150 *
1151 * @returns nanoseconds.
1152 * @param pTimer Timer handle as returned by one of the create functions.
1153 * @param u64Ticks The clock ticks.
1154 * @remark There could be rounding errors here. We just do a simple integere divide
1155 * without any adjustments.
1156 */
1157VMMDECL(uint64_t) TMTimerToNano(PTMTIMER pTimer, uint64_t u64Ticks)
1158{
1159 switch (pTimer->enmClock)
1160 {
1161 case TMCLOCK_VIRTUAL:
1162 case TMCLOCK_VIRTUAL_SYNC:
1163 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1164 return u64Ticks;
1165
1166 case TMCLOCK_REAL:
1167 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1168 return u64Ticks * 1000000;
1169
1170 case TMCLOCK_TSC:
1171 AssertReleaseMsgFailed(("TMCLOCK_TSC conversions are not implemented\n"));
1172 return 0;
1173
1174 default:
1175 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1176 return 0;
1177 }
1178}
1179
1180
1181/**
1182 * Converts the specified timer clock time to microseconds.
1183 *
1184 * @returns microseconds.
1185 * @param pTimer Timer handle as returned by one of the create functions.
1186 * @param u64Ticks The clock ticks.
1187 * @remark There could be rounding errors here. We just do a simple integere divide
1188 * without any adjustments.
1189 */
1190VMMDECL(uint64_t) TMTimerToMicro(PTMTIMER pTimer, uint64_t u64Ticks)
1191{
1192 switch (pTimer->enmClock)
1193 {
1194 case TMCLOCK_VIRTUAL:
1195 case TMCLOCK_VIRTUAL_SYNC:
1196 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1197 return u64Ticks / 1000;
1198
1199 case TMCLOCK_REAL:
1200 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1201 return u64Ticks * 1000;
1202
1203 case TMCLOCK_TSC:
1204 AssertReleaseMsgFailed(("TMCLOCK_TSC conversions are not implemented\n"));
1205 return 0;
1206
1207 default:
1208 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1209 return 0;
1210 }
1211}
1212
1213
1214/**
1215 * Converts the specified timer clock time to milliseconds.
1216 *
1217 * @returns milliseconds.
1218 * @param pTimer Timer handle as returned by one of the create functions.
1219 * @param u64Ticks The clock ticks.
1220 * @remark There could be rounding errors here. We just do a simple integere divide
1221 * without any adjustments.
1222 */
1223VMMDECL(uint64_t) TMTimerToMilli(PTMTIMER pTimer, uint64_t u64Ticks)
1224{
1225 switch (pTimer->enmClock)
1226 {
1227 case TMCLOCK_VIRTUAL:
1228 case TMCLOCK_VIRTUAL_SYNC:
1229 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1230 return u64Ticks / 1000000;
1231
1232 case TMCLOCK_REAL:
1233 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1234 return u64Ticks;
1235
1236 case TMCLOCK_TSC:
1237 AssertReleaseMsgFailed(("TMCLOCK_TSC conversions are not implemented\n"));
1238 return 0;
1239
1240 default:
1241 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1242 return 0;
1243 }
1244}
1245
1246
1247/**
1248 * Converts the specified nanosecond timestamp to timer clock ticks.
1249 *
1250 * @returns timer clock ticks.
1251 * @param pTimer Timer handle as returned by one of the create functions.
1252 * @param u64NanoTS The nanosecond value ticks to convert.
1253 * @remark There could be rounding and overflow errors here.
1254 */
1255VMMDECL(uint64_t) TMTimerFromNano(PTMTIMER pTimer, uint64_t u64NanoTS)
1256{
1257 switch (pTimer->enmClock)
1258 {
1259 case TMCLOCK_VIRTUAL:
1260 case TMCLOCK_VIRTUAL_SYNC:
1261 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1262 return u64NanoTS;
1263
1264 case TMCLOCK_REAL:
1265 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1266 return u64NanoTS / 1000000;
1267
1268 case TMCLOCK_TSC:
1269 AssertReleaseMsgFailed(("TMCLOCK_TSC conversions are not implemented\n"));
1270 return 0;
1271
1272 default:
1273 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1274 return 0;
1275 }
1276}
1277
1278
1279/**
1280 * Converts the specified microsecond timestamp to timer clock ticks.
1281 *
1282 * @returns timer clock ticks.
1283 * @param pTimer Timer handle as returned by one of the create functions.
1284 * @param u64MicroTS The microsecond value ticks to convert.
1285 * @remark There could be rounding and overflow errors here.
1286 */
1287VMMDECL(uint64_t) TMTimerFromMicro(PTMTIMER pTimer, uint64_t u64MicroTS)
1288{
1289 switch (pTimer->enmClock)
1290 {
1291 case TMCLOCK_VIRTUAL:
1292 case TMCLOCK_VIRTUAL_SYNC:
1293 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1294 return u64MicroTS * 1000;
1295
1296 case TMCLOCK_REAL:
1297 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1298 return u64MicroTS / 1000;
1299
1300 case TMCLOCK_TSC:
1301 AssertReleaseMsgFailed(("TMCLOCK_TSC conversions are not implemented\n"));
1302 return 0;
1303
1304 default:
1305 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1306 return 0;
1307 }
1308}
1309
1310
1311/**
1312 * Converts the specified millisecond timestamp to timer clock ticks.
1313 *
1314 * @returns timer clock ticks.
1315 * @param pTimer Timer handle as returned by one of the create functions.
1316 * @param u64MilliTS The millisecond value ticks to convert.
1317 * @remark There could be rounding and overflow errors here.
1318 */
1319VMMDECL(uint64_t) TMTimerFromMilli(PTMTIMER pTimer, uint64_t u64MilliTS)
1320{
1321 switch (pTimer->enmClock)
1322 {
1323 case TMCLOCK_VIRTUAL:
1324 case TMCLOCK_VIRTUAL_SYNC:
1325 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1326 return u64MilliTS * 1000000;
1327
1328 case TMCLOCK_REAL:
1329 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1330 return u64MilliTS;
1331
1332 case TMCLOCK_TSC:
1333 AssertReleaseMsgFailed(("TMCLOCK_TSC conversions are not implemented\n"));
1334 return 0;
1335
1336 default:
1337 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1338 return 0;
1339 }
1340}
1341
1342
1343/**
1344 * Get the expire time of the timer.
1345 * Only valid for active timers.
1346 *
1347 * @returns Expire time of the timer.
1348 * @param pTimer Timer handle as returned by one of the create functions.
1349 */
1350VMMDECL(uint64_t) TMTimerGetExpire(PTMTIMER pTimer)
1351{
1352 TMTIMER_ASSERT_CRITSECT(pTimer);
1353 int cRetries = 1000;
1354 do
1355 {
1356 TMTIMERSTATE enmState = pTimer->enmState;
1357 switch (enmState)
1358 {
1359 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1360 case TMTIMERSTATE_EXPIRED_DELIVER:
1361 case TMTIMERSTATE_STOPPED:
1362 case TMTIMERSTATE_PENDING_STOP:
1363 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1364 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1365 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1366 return ~(uint64_t)0;
1367
1368 case TMTIMERSTATE_ACTIVE:
1369 case TMTIMERSTATE_PENDING_RESCHEDULE:
1370 case TMTIMERSTATE_PENDING_SCHEDULE:
1371 Log2(("TMTimerGetExpire: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1372 pTimer->u64Expire, pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1373 return pTimer->u64Expire;
1374
1375 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1376 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1377#ifdef IN_RING3
1378 if (!RTThreadYield())
1379 RTThreadSleep(1);
1380#endif
1381 break;
1382
1383 /*
1384 * Invalid states.
1385 */
1386 case TMTIMERSTATE_DESTROY:
1387 case TMTIMERSTATE_FREE:
1388 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1389 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1390 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1391 return ~(uint64_t)0;
1392 default:
1393 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1394 return ~(uint64_t)0;
1395 }
1396 } while (cRetries-- > 0);
1397
1398 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1399 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1400 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1401 return ~(uint64_t)0;
1402}
1403
1404
1405/**
1406 * Checks if a timer is active or not.
1407 *
1408 * @returns True if active.
1409 * @returns False if not active.
1410 * @param pTimer Timer handle as returned by one of the create functions.
1411 */
1412VMMDECL(bool) TMTimerIsActive(PTMTIMER pTimer)
1413{
1414 TMTIMERSTATE enmState = pTimer->enmState;
1415 switch (enmState)
1416 {
1417 case TMTIMERSTATE_STOPPED:
1418 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1419 case TMTIMERSTATE_EXPIRED_DELIVER:
1420 case TMTIMERSTATE_PENDING_STOP:
1421 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1422 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1423 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1424 return false;
1425
1426 case TMTIMERSTATE_ACTIVE:
1427 case TMTIMERSTATE_PENDING_RESCHEDULE:
1428 case TMTIMERSTATE_PENDING_SCHEDULE:
1429 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1430 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1431 Log2(("TMTimerIsActive: returns true (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1432 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1433 return true;
1434
1435 /*
1436 * Invalid states.
1437 */
1438 case TMTIMERSTATE_DESTROY:
1439 case TMTIMERSTATE_FREE:
1440 AssertMsgFailed(("Invalid timer state %s (%s)\n", tmTimerState(enmState), R3STRING(pTimer->pszDesc)));
1441 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1442 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1443 return false;
1444 default:
1445 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1446 return false;
1447 }
1448}
1449
1450
1451/**
1452 * Convert state to string.
1453 *
1454 * @returns Readonly status name.
1455 * @param enmState State.
1456 */
1457const char *tmTimerState(TMTIMERSTATE enmState)
1458{
1459 switch (enmState)
1460 {
1461#define CASE(num, state) \
1462 case TMTIMERSTATE_##state: \
1463 AssertCompile(TMTIMERSTATE_##state == (num)); \
1464 return #num "-" #state
1465 CASE( 1,STOPPED);
1466 CASE( 2,ACTIVE);
1467 CASE( 3,EXPIRED_GET_UNLINK);
1468 CASE( 4,EXPIRED_DELIVER);
1469 CASE( 5,PENDING_STOP);
1470 CASE( 6,PENDING_STOP_SCHEDULE);
1471 CASE( 7,PENDING_SCHEDULE_SET_EXPIRE);
1472 CASE( 8,PENDING_SCHEDULE);
1473 CASE( 9,PENDING_RESCHEDULE_SET_EXPIRE);
1474 CASE(10,PENDING_RESCHEDULE);
1475 CASE(11,DESTROY);
1476 CASE(12,FREE);
1477 default:
1478 AssertMsgFailed(("Invalid state enmState=%d\n", enmState));
1479 return "Invalid state!";
1480#undef CASE
1481 }
1482}
1483
1484
1485/**
1486 * Schedules the given timer on the given queue.
1487 *
1488 * @param pQueue The timer queue.
1489 * @param pTimer The timer that needs scheduling.
1490 *
1491 * @remarks Called while owning the lock.
1492 */
1493DECLINLINE(void) tmTimerQueueScheduleOne(PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
1494{
1495 /*
1496 * Processing.
1497 */
1498 unsigned cRetries = 2;
1499 do
1500 {
1501 TMTIMERSTATE enmState = pTimer->enmState;
1502 switch (enmState)
1503 {
1504 /*
1505 * Reschedule timer (in the active list).
1506 */
1507 case TMTIMERSTATE_PENDING_RESCHEDULE:
1508 {
1509 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE, TMTIMERSTATE_PENDING_RESCHEDULE)))
1510 break; /* retry */
1511
1512 const PTMTIMER pPrev = TMTIMER_GET_PREV(pTimer);
1513 const PTMTIMER pNext = TMTIMER_GET_NEXT(pTimer);
1514 if (pPrev)
1515 TMTIMER_SET_NEXT(pPrev, pNext);
1516 else
1517 {
1518 TMTIMER_SET_HEAD(pQueue, pNext);
1519 pQueue->u64Expire = pNext ? pNext->u64Expire : INT64_MAX;
1520 }
1521 if (pNext)
1522 TMTIMER_SET_PREV(pNext, pPrev);
1523 pTimer->offNext = 0;
1524 pTimer->offPrev = 0;
1525 /* fall thru */
1526 }
1527
1528 /*
1529 * Schedule timer (insert into the active list).
1530 */
1531 case TMTIMERSTATE_PENDING_SCHEDULE:
1532 {
1533 Assert(!pTimer->offNext); Assert(!pTimer->offPrev);
1534 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, TMTIMERSTATE_PENDING_SCHEDULE)))
1535 break; /* retry */
1536
1537 PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue);
1538 if (pCur)
1539 {
1540 const uint64_t u64Expire = pTimer->u64Expire;
1541 for (;; pCur = TMTIMER_GET_NEXT(pCur))
1542 {
1543 if (pCur->u64Expire > u64Expire)
1544 {
1545 const PTMTIMER pPrev = TMTIMER_GET_PREV(pCur);
1546 TMTIMER_SET_NEXT(pTimer, pCur);
1547 TMTIMER_SET_PREV(pTimer, pPrev);
1548 if (pPrev)
1549 TMTIMER_SET_NEXT(pPrev, pTimer);
1550 else
1551 {
1552 TMTIMER_SET_HEAD(pQueue, pTimer);
1553 pQueue->u64Expire = u64Expire;
1554 }
1555 TMTIMER_SET_PREV(pCur, pTimer);
1556 return;
1557 }
1558 if (!pCur->offNext)
1559 {
1560 TMTIMER_SET_NEXT(pCur, pTimer);
1561 TMTIMER_SET_PREV(pTimer, pCur);
1562 return;
1563 }
1564 }
1565 }
1566 else
1567 {
1568 TMTIMER_SET_HEAD(pQueue, pTimer);
1569 pQueue->u64Expire = pTimer->u64Expire;
1570 }
1571 return;
1572 }
1573
1574 /*
1575 * Stop the timer in active list.
1576 */
1577 case TMTIMERSTATE_PENDING_STOP:
1578 {
1579 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, TMTIMERSTATE_PENDING_STOP)))
1580 break; /* retry */
1581
1582 const PTMTIMER pPrev = TMTIMER_GET_PREV(pTimer);
1583 const PTMTIMER pNext = TMTIMER_GET_NEXT(pTimer);
1584 if (pPrev)
1585 TMTIMER_SET_NEXT(pPrev, pNext);
1586 else
1587 {
1588 TMTIMER_SET_HEAD(pQueue, pNext);
1589 pQueue->u64Expire = pNext ? pNext->u64Expire : INT64_MAX;
1590 }
1591 if (pNext)
1592 TMTIMER_SET_PREV(pNext, pPrev);
1593 pTimer->offNext = 0;
1594 pTimer->offPrev = 0;
1595 /* fall thru */
1596 }
1597
1598 /*
1599 * Stop the timer (not on the active list).
1600 */
1601 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1602 Assert(!pTimer->offNext); Assert(!pTimer->offPrev);
1603 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_STOPPED, TMTIMERSTATE_PENDING_STOP_SCHEDULE)))
1604 break;
1605 return;
1606
1607 /*
1608 * The timer is pending destruction by TMR3TimerDestroy, our caller.
1609 * Nothing to do here.
1610 */
1611 case TMTIMERSTATE_DESTROY:
1612 break;
1613
1614 /*
1615 * Postpone these until they get into the right state.
1616 */
1617 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1618 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1619 tmTimerLink(pQueue, pTimer);
1620 STAM_COUNTER_INC(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatPostponed));
1621 return;
1622
1623 /*
1624 * None of these can be in the schedule.
1625 */
1626 case TMTIMERSTATE_FREE:
1627 case TMTIMERSTATE_STOPPED:
1628 case TMTIMERSTATE_ACTIVE:
1629 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1630 case TMTIMERSTATE_EXPIRED_DELIVER:
1631 default:
1632 AssertMsgFailed(("Timer (%p) in the scheduling list has an invalid state %s (%d)!",
1633 pTimer, tmTimerState(pTimer->enmState), pTimer->enmState));
1634 return;
1635 }
1636 } while (cRetries-- > 0);
1637}
1638
1639
1640/**
1641 * Schedules the specified timer queue.
1642 *
1643 * @param pVM The VM to run the timers for.
1644 * @param pQueue The queue to schedule.
1645 *
1646 * @remarks Called while owning the lock.
1647 */
1648void tmTimerQueueSchedule(PVM pVM, PTMTIMERQUEUE pQueue)
1649{
1650 TM_ASSERT_EMT_LOCK(pVM);
1651
1652 /*
1653 * Dequeue the scheduling list and iterate it.
1654 */
1655 int32_t offNext = ASMAtomicXchgS32(&pQueue->offSchedule, 0);
1656 Log2(("tmTimerQueueSchedule: pQueue=%p:{.enmClock=%d, offNext=%RI32, .u64Expired=%'RU64}\n", pQueue, pQueue->enmClock, offNext, pQueue->u64Expire));
1657 if (!offNext)
1658 return;
1659 PTMTIMER pNext = (PTMTIMER)((intptr_t)pQueue + offNext);
1660 while (pNext)
1661 {
1662 /*
1663 * Unlink the head timer and find the next one.
1664 */
1665 PTMTIMER pTimer = pNext;
1666 pNext = pNext->offScheduleNext ? (PTMTIMER)((intptr_t)pNext + pNext->offScheduleNext) : NULL;
1667 pTimer->offScheduleNext = 0;
1668
1669 /*
1670 * Do the scheduling.
1671 */
1672 Log2(("tmTimerQueueSchedule: %p:{.enmState=%s, .enmClock=%d, .enmType=%d, .pszDesc=%s}\n",
1673 pTimer, tmTimerState(pTimer->enmState), pTimer->enmClock, pTimer->enmType, R3STRING(pTimer->pszDesc)));
1674 tmTimerQueueScheduleOne(pQueue, pTimer);
1675 Log2(("tmTimerQueueSchedule: %p: new %s\n", pTimer, tmTimerState(pTimer->enmState)));
1676 } /* foreach timer in current schedule batch. */
1677 Log2(("tmTimerQueueSchedule: u64Expired=%'RU64\n", pQueue->u64Expire));
1678}
1679
1680
1681#ifdef VBOX_STRICT
1682/**
1683 * Checks that the timer queues are sane.
1684 *
1685 * @param pVM VM handle.
1686 *
1687 * @remarks Called while owning the lock.
1688 */
1689void tmTimerQueuesSanityChecks(PVM pVM, const char *pszWhere)
1690{
1691 TM_ASSERT_EMT_LOCK(pVM);
1692
1693 /*
1694 * Check the linking of the active lists.
1695 */
1696 for (int i = 0; i < TMCLOCK_MAX; i++)
1697 {
1698 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[i];
1699 Assert((int)pQueue->enmClock == i);
1700 PTMTIMER pPrev = NULL;
1701 for (PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue); pCur; pPrev = pCur, pCur = TMTIMER_GET_NEXT(pCur))
1702 {
1703 AssertMsg((int)pCur->enmClock == i, ("%s: %d != %d\n", pszWhere, pCur->enmClock, i));
1704 AssertMsg(TMTIMER_GET_PREV(pCur) == pPrev, ("%s: %p != %p\n", pszWhere, TMTIMER_GET_PREV(pCur), pPrev));
1705 TMTIMERSTATE enmState = pCur->enmState;
1706 switch (enmState)
1707 {
1708 case TMTIMERSTATE_ACTIVE:
1709 AssertMsg( !pCur->offScheduleNext
1710 || pCur->enmState != TMTIMERSTATE_ACTIVE,
1711 ("%s: %RI32\n", pszWhere, pCur->offScheduleNext));
1712 break;
1713 case TMTIMERSTATE_PENDING_STOP:
1714 case TMTIMERSTATE_PENDING_RESCHEDULE:
1715 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1716 break;
1717 default:
1718 AssertMsgFailed(("%s: Invalid state enmState=%d %s\n", pszWhere, enmState, tmTimerState(enmState)));
1719 break;
1720 }
1721 }
1722 }
1723
1724
1725# ifdef IN_RING3
1726 /*
1727 * Do the big list and check that active timers all are in the active lists.
1728 */
1729 PTMTIMERR3 pPrev = NULL;
1730 for (PTMTIMERR3 pCur = pVM->tm.s.pCreated; pCur; pPrev = pCur, pCur = pCur->pBigNext)
1731 {
1732 Assert(pCur->pBigPrev == pPrev);
1733 Assert((unsigned)pCur->enmClock < (unsigned)TMCLOCK_MAX);
1734
1735 TMTIMERSTATE enmState = pCur->enmState;
1736 switch (enmState)
1737 {
1738 case TMTIMERSTATE_ACTIVE:
1739 case TMTIMERSTATE_PENDING_STOP:
1740 case TMTIMERSTATE_PENDING_RESCHEDULE:
1741 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1742 {
1743 PTMTIMERR3 pCurAct = TMTIMER_GET_HEAD(&pVM->tm.s.CTX_SUFF(paTimerQueues)[pCur->enmClock]);
1744 Assert(pCur->offPrev || pCur == pCurAct);
1745 while (pCurAct && pCurAct != pCur)
1746 pCurAct = TMTIMER_GET_NEXT(pCurAct);
1747 Assert(pCurAct == pCur);
1748 break;
1749 }
1750
1751 case TMTIMERSTATE_PENDING_SCHEDULE:
1752 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1753 case TMTIMERSTATE_STOPPED:
1754 case TMTIMERSTATE_EXPIRED_DELIVER:
1755 {
1756 Assert(!pCur->offNext);
1757 Assert(!pCur->offPrev);
1758 for (PTMTIMERR3 pCurAct = TMTIMER_GET_HEAD(&pVM->tm.s.CTX_SUFF(paTimerQueues)[pCur->enmClock]);
1759 pCurAct;
1760 pCurAct = TMTIMER_GET_NEXT(pCurAct))
1761 {
1762 Assert(pCurAct != pCur);
1763 Assert(TMTIMER_GET_NEXT(pCurAct) != pCur);
1764 Assert(TMTIMER_GET_PREV(pCurAct) != pCur);
1765 }
1766 break;
1767 }
1768
1769 /* ignore */
1770 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1771 break;
1772
1773 /* shouldn't get here! */
1774 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1775 case TMTIMERSTATE_DESTROY:
1776 default:
1777 AssertMsgFailed(("Invalid state enmState=%d %s\n", enmState, tmTimerState(enmState)));
1778 break;
1779 }
1780 }
1781# endif /* IN_RING3 */
1782}
1783#endif /* !VBOX_STRICT */
1784
1785
1786/**
1787 * Gets the current warp drive percent.
1788 *
1789 * @returns The warp drive percent.
1790 * @param pVM The VM handle.
1791 */
1792VMMDECL(uint32_t) TMGetWarpDrive(PVM pVM)
1793{
1794 return pVM->tm.s.u32VirtualWarpDrivePercentage;
1795}
1796
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette