VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/TMAll.cpp@ 26278

Last change on this file since 26278 was 25247, checked in by vboxsync, 15 years ago

TMAll.cpp: -Wshadow

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 74.1 KB
Line 
1/* $Id: TMAll.cpp 25247 2009-12-08 13:58:56Z vboxsync $ */
2/** @file
3 * TM - Timeout Manager, all contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_TM
27#include <VBox/tm.h>
28#include <VBox/mm.h>
29#ifdef IN_RING3
30# include <VBox/rem.h>
31#endif
32#include "TMInternal.h"
33#include <VBox/vm.h>
34
35#include <VBox/param.h>
36#include <VBox/err.h>
37#include <VBox/log.h>
38#include <VBox/sup.h>
39#include <iprt/time.h>
40#include <iprt/assert.h>
41#include <iprt/asm.h>
42#ifdef IN_RING3
43# include <iprt/thread.h>
44#endif
45
46
47/*******************************************************************************
48* Defined Constants And Macros *
49*******************************************************************************/
50/** @def TMTIMER_ASSERT_CRITSECT
51 * Checks that the caller owns the critical section if one is associated with
52 * the timer. */
53#ifdef VBOX_STRICT
54# define TMTIMER_ASSERT_CRITSECT(pTimer) \
55 do { \
56 if ((pTimer)->pCritSect) \
57 { \
58 PPDMCRITSECT pCritSect = (PPDMCRITSECT)MMHyperR3ToCC((pTimer)->CTX_SUFF(pVM), (pTimer)->pCritSect); \
59 AssertMsg(pCritSect && PDMCritSectIsOwner(pCritSect), \
60 ("pTimer=%p (%s) pCritSect=%p\n", pTimer, R3STRING(pTimer->pszDesc), (pTimer)->pCritSect)); \
61 } \
62 } while (0)
63#else
64# define TMTIMER_ASSERT_CRITSECT(pTimer) do { } while (0)
65#endif
66
67
68#ifndef tmTimerLock
69
70/**
71 * Try take the timer lock, wait in ring-3 return VERR_SEM_BUSY in R0/RC.
72 *
73 * @retval VINF_SUCCESS on success (always in ring-3).
74 * @retval VERR_SEM_BUSY in RC and R0 if the semaphore is busy.
75 *
76 * @param pVM The VM handle.
77 *
78 * @thread EMTs for the time being.
79 */
80int tmTimerLock(PVM pVM)
81{
82 VM_ASSERT_EMT(pVM);
83 int rc = PDMCritSectEnter(&pVM->tm.s.TimerCritSect, VERR_SEM_BUSY);
84 return rc;
85}
86
87
88/**
89 * Try take the timer lock, no waiting.
90 *
91 * @retval VINF_SUCCESS on success.
92 * @retval VERR_SEM_BUSY if busy.
93 *
94 * @param pVM The VM handle.
95 */
96int tmTimerTryLock(PVM pVM)
97{
98 int rc = PDMCritSectTryEnter(&pVM->tm.s.TimerCritSect);
99 return rc;
100}
101
102
103/**
104 * Release the EMT/TM lock.
105 *
106 * @param pVM The VM handle.
107 */
108void tmTimerUnlock(PVM pVM)
109{
110 PDMCritSectLeave(&pVM->tm.s.TimerCritSect);
111}
112
113
114/**
115 * Try take the VirtualSync lock, wait in ring-3 return VERR_SEM_BUSY in R0/RC.
116 *
117 * @retval VINF_SUCCESS on success (always in ring-3).
118 * @retval VERR_SEM_BUSY in RC and R0 if the semaphore is busy.
119 *
120 * @param pVM The VM handle.
121 */
122int tmVirtualSyncLock(PVM pVM)
123{
124 VM_ASSERT_EMT(pVM);
125 int rc = PDMCritSectEnter(&pVM->tm.s.VirtualSyncLock, VERR_SEM_BUSY);
126 return rc;
127}
128
129
130/**
131 * Try take the VirtualSync lock, no waiting.
132 *
133 * @retval VINF_SUCCESS on success.
134 * @retval VERR_SEM_BUSY if busy.
135 *
136 * @param pVM The VM handle.
137 */
138int tmVirtualSyncTryLock(PVM pVM)
139{
140 VM_ASSERT_EMT(pVM);
141 int rc = PDMCritSectTryEnter(&pVM->tm.s.VirtualSyncLock);
142 return rc;
143}
144
145
146/**
147 * Release the VirtualSync lock.
148 *
149 * @param pVM The VM handle.
150 */
151void tmVirtualSyncUnlock(PVM pVM)
152{
153 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
154}
155
156#endif /* ! macros */
157
158/**
159 * Notification that execution is about to start.
160 *
161 * This call must always be paired with a TMNotifyEndOfExecution call.
162 *
163 * The function may, depending on the configuration, resume the TSC and future
164 * clocks that only ticks when we're executing guest code.
165 *
166 * @param pVCpu The VMCPU to operate on.
167 */
168VMMDECL(void) TMNotifyStartOfExecution(PVMCPU pVCpu)
169{
170 PVM pVM = pVCpu->CTX_SUFF(pVM);
171
172 if (pVM->tm.s.fTSCTiedToExecution)
173 tmCpuTickResume(pVM, pVCpu);
174}
175
176
177/**
178 * Notification that execution is about to start.
179 *
180 * This call must always be paired with a TMNotifyStartOfExecution call.
181 *
182 * The function may, depending on the configuration, suspend the TSC and future
183 * clocks that only ticks when we're executing guest code.
184 *
185 * @param pVCpu The VMCPU to operate on.
186 */
187VMMDECL(void) TMNotifyEndOfExecution(PVMCPU pVCpu)
188{
189 PVM pVM = pVCpu->CTX_SUFF(pVM);
190
191 if (pVM->tm.s.fTSCTiedToExecution)
192 tmCpuTickPause(pVM, pVCpu);
193}
194
195
196/**
197 * Notification that the cpu is entering the halt state
198 *
199 * This call must always be paired with a TMNotifyEndOfExecution call.
200 *
201 * The function may, depending on the configuration, resume the TSC and future
202 * clocks that only ticks when we're halted.
203 *
204 * @param pVCpu The VMCPU to operate on.
205 */
206VMM_INT_DECL(void) TMNotifyStartOfHalt(PVMCPU pVCpu)
207{
208 PVM pVM = pVCpu->CTX_SUFF(pVM);
209
210 if ( pVM->tm.s.fTSCTiedToExecution
211 && !pVM->tm.s.fTSCNotTiedToHalt)
212 tmCpuTickResume(pVM, pVCpu);
213}
214
215
216/**
217 * Notification that the cpu is leaving the halt state
218 *
219 * This call must always be paired with a TMNotifyStartOfHalt call.
220 *
221 * The function may, depending on the configuration, suspend the TSC and future
222 * clocks that only ticks when we're halted.
223 *
224 * @param pVCpu The VMCPU to operate on.
225 */
226VMM_INT_DECL(void) TMNotifyEndOfHalt(PVMCPU pVCpu)
227{
228 PVM pVM = pVCpu->CTX_SUFF(pVM);
229
230 if ( pVM->tm.s.fTSCTiedToExecution
231 && !pVM->tm.s.fTSCNotTiedToHalt)
232 tmCpuTickPause(pVM, pVCpu);
233}
234
235
236/**
237 * Raise the timer force action flag and notify the dedicated timer EMT.
238 *
239 * @param pVM The VM handle.
240 */
241DECLINLINE(void) tmScheduleNotify(PVM pVM)
242{
243 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
244 if (!VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
245 {
246 Log5(("TMAll(%u): FF: 0 -> 1\n", __LINE__));
247 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
248#ifdef IN_RING3
249 REMR3NotifyTimerPending(pVM, pVCpuDst);
250 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
251#endif
252 STAM_COUNTER_INC(&pVM->tm.s.StatScheduleSetFF);
253 }
254}
255
256
257/**
258 * Schedule the queue which was changed.
259 */
260DECLINLINE(void) tmSchedule(PTMTIMER pTimer)
261{
262 PVM pVM = pTimer->CTX_SUFF(pVM);
263 if ( VM_IS_EMT(pVM)
264 && RT_SUCCESS(tmTimerTryLock(pVM)))
265 {
266 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
267 Log3(("tmSchedule: tmTimerQueueSchedule\n"));
268 tmTimerQueueSchedule(pVM, &pVM->tm.s.CTX_SUFF(paTimerQueues)[pTimer->enmClock]);
269#ifdef VBOX_STRICT
270 tmTimerQueuesSanityChecks(pVM, "tmSchedule");
271#endif
272 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
273 tmTimerUnlock(pVM);
274 }
275 else
276 {
277 TMTIMERSTATE enmState = pTimer->enmState;
278 if (TMTIMERSTATE_IS_PENDING_SCHEDULING(enmState))
279 tmScheduleNotify(pVM);
280 }
281}
282
283
284/**
285 * Try change the state to enmStateNew from enmStateOld
286 * and link the timer into the scheduling queue.
287 *
288 * @returns Success indicator.
289 * @param pTimer Timer in question.
290 * @param enmStateNew The new timer state.
291 * @param enmStateOld The old timer state.
292 */
293DECLINLINE(bool) tmTimerTry(PTMTIMER pTimer, TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
294{
295 /*
296 * Attempt state change.
297 */
298 bool fRc;
299 TM_TRY_SET_STATE(pTimer, enmStateNew, enmStateOld, fRc);
300 return fRc;
301}
302
303
304/**
305 * Links the timer onto the scheduling queue.
306 *
307 * @param pQueue The timer queue the timer belongs to.
308 * @param pTimer The timer.
309 *
310 * @todo FIXME: Look into potential race with the thread running the queues
311 * and stuff.
312 */
313DECLINLINE(void) tmTimerLink(PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
314{
315 Assert(!pTimer->offScheduleNext);
316 const int32_t offHeadNew = (intptr_t)pTimer - (intptr_t)pQueue;
317 int32_t offHead;
318 do
319 {
320 offHead = pQueue->offSchedule;
321 if (offHead)
322 pTimer->offScheduleNext = ((intptr_t)pQueue + offHead) - (intptr_t)pTimer;
323 else
324 pTimer->offScheduleNext = 0;
325 } while (!ASMAtomicCmpXchgS32(&pQueue->offSchedule, offHeadNew, offHead));
326}
327
328
329/**
330 * Try change the state to enmStateNew from enmStateOld
331 * and link the timer into the scheduling queue.
332 *
333 * @returns Success indicator.
334 * @param pTimer Timer in question.
335 * @param enmStateNew The new timer state.
336 * @param enmStateOld The old timer state.
337 */
338DECLINLINE(bool) tmTimerTryWithLink(PTMTIMER pTimer, TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
339{
340 if (tmTimerTry(pTimer, enmStateNew, enmStateOld))
341 {
342 tmTimerLink(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF(paTimerQueues)[pTimer->enmClock], pTimer);
343 return true;
344 }
345 return false;
346}
347
348
349#ifdef VBOX_HIGH_RES_TIMERS_HACK
350
351/**
352 * Worker for tmTimerPollInternal that handles misses when the decidate timer
353 * EMT is polling.
354 *
355 * @returns See tmTimerPollInternal.
356 * @param pVM Pointer to the shared VM structure.
357 * @param u64Now Current virtual clock timestamp.
358 * @param u64Delta The delta to the next even in ticks of the
359 * virtual clock.
360 * @param pu64Delta Where to return the delta.
361 * @param pCounter The statistics counter to update.
362 */
363DECLINLINE(uint64_t) tmTimerPollReturnMiss(PVM pVM, uint64_t u64Now, uint64_t u64Delta, uint64_t *pu64Delta)
364{
365 Assert(!(u64Delta & RT_BIT_64(63)));
366
367 if (!pVM->tm.s.fVirtualWarpDrive)
368 {
369 *pu64Delta = u64Delta;
370 return u64Delta + u64Now + pVM->tm.s.u64VirtualOffset;
371 }
372
373 /*
374 * Warp drive adjustments - this is the reverse of what tmVirtualGetRaw is doing.
375 */
376 uint64_t const u64Start = pVM->tm.s.u64VirtualWarpDriveStart;
377 uint32_t const u32Pct = pVM->tm.s.u32VirtualWarpDrivePercentage;
378
379 uint64_t u64GipTime = u64Delta + u64Now + pVM->tm.s.u64VirtualOffset;
380 u64GipTime -= u64Start; /* the start is GIP time. */
381 if (u64GipTime >= u64Delta)
382 {
383 ASMMultU64ByU32DivByU32(u64GipTime, 100, u32Pct);
384 ASMMultU64ByU32DivByU32(u64Delta, 100, u32Pct);
385 }
386 else
387 {
388 u64Delta -= u64GipTime;
389 ASMMultU64ByU32DivByU32(u64GipTime, 100, u32Pct);
390 u64Delta += u64GipTime;
391 }
392 *pu64Delta = u64Delta;
393 u64GipTime += u64Start;
394 return u64GipTime;
395}
396
397
398/**
399 * Worker for tmTimerPollInternal dealing with returns on virtual CPUs other
400 * than the one dedicated to timer work.
401 *
402 * @returns See tmTimerPollInternal.
403 * @param pVM Pointer to the shared VM structure.
404 * @param u64Now Current virtual clock timestamp.
405 * @param pu64Delta Where to return the delta.
406 */
407DECL_FORCE_INLINE(uint64_t) tmTimerPollReturnOtherCpu(PVM pVM, uint64_t u64Now, uint64_t *pu64Delta)
408{
409 static const uint64_t s_u64OtherRet = 500000000; /* 500 ms for non-timer EMTs. */
410 *pu64Delta = s_u64OtherRet;
411 return u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet;
412}
413
414
415/**
416 * Worker for tmTimerPollInternal.
417 *
418 * @returns See tmTimerPollInternal.
419 * @param pVM Pointer to the shared VM structure.
420 * @param pVCpu Pointer to the shared VMCPU structure of the
421 * caller.
422 * @param pVCpuDst Pointer to the shared VMCPU structure of the
423 * dedicated timer EMT.
424 * @param u64Now Current virtual clock timestamp.
425 * @param pu64Delta Where to return the delta.
426 * @param pCounter The statistics counter to update.
427 */
428DECL_FORCE_INLINE(uint64_t) tmTimerPollReturnHit(PVM pVM, PVMCPU pVCpu, PVMCPU pVCpuDst, uint64_t u64Now,
429 uint64_t *pu64Delta, PSTAMCOUNTER pCounter)
430{
431 STAM_COUNTER_INC(pCounter);
432 if (pVCpuDst != pVCpu)
433 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
434 *pu64Delta = 0;
435 return 0;
436}
437
438/**
439 * Common worker for TMTimerPollGIP and TMTimerPoll.
440 *
441 * This function is called before FFs are checked in the inner execution EM loops.
442 *
443 * @returns The GIP timestamp of the next event.
444 * 0 if the next event has already expired.
445 *
446 * @param pVM Pointer to the shared VM structure.
447 * @param pVCpu Pointer to the shared VMCPU structure of the caller.
448 * @param pu64Delta Where to store the delta.
449 *
450 * @thread The emulation thread.
451 *
452 * @remarks GIP uses ns ticks.
453 */
454DECL_FORCE_INLINE(uint64_t) tmTimerPollInternal(PVM pVM, PVMCPU pVCpu, uint64_t *pu64Delta)
455{
456 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
457 const uint64_t u64Now = TMVirtualGetNoCheck(pVM);
458 STAM_COUNTER_INC(&pVM->tm.s.StatPoll);
459
460 /*
461 * Return straight away if the timer FF is already set ...
462 */
463 if (VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
464 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet);
465
466 /*
467 * ... or if timers are being run.
468 */
469 if (ASMAtomicReadBool(&pVM->tm.s.fRunningQueues))
470 {
471 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
472 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
473 }
474
475 /*
476 * Check for TMCLOCK_VIRTUAL expiration.
477 */
478 const uint64_t u64Expire1 = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire);
479 const int64_t i64Delta1 = u64Expire1 - u64Now;
480 if (i64Delta1 <= 0)
481 {
482 if (!VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
483 {
484 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
485 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
486#ifdef IN_RING3
487 REMR3NotifyTimerPending(pVM, pVCpuDst);
488#endif
489 }
490 LogFlow(("TMTimerPoll: expire1=%'RU64 <= now=%'RU64\n", u64Expire1, u64Now));
491 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtual);
492 }
493
494 /*
495 * Check for TMCLOCK_VIRTUAL_SYNC expiration.
496 * This isn't quite as stright forward if in a catch-up, not only do
497 * we have to adjust the 'now' but when have to adjust the delta as well.
498 */
499
500 /*
501 * Optimistic lockless approach.
502 */
503 uint64_t u64VirtualSyncNow;
504 uint64_t u64Expire2 = ASMAtomicUoReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
505 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
506 {
507 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
508 {
509 u64VirtualSyncNow = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
510 if (RT_LIKELY( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
511 && !ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
512 && u64VirtualSyncNow == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
513 && u64Expire2 == ASMAtomicUoReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)))
514 {
515 u64VirtualSyncNow = u64Now - u64VirtualSyncNow;
516 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
517 if (i64Delta2 > 0)
518 {
519 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
520 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
521
522 if (pVCpu == pVCpuDst)
523 return tmTimerPollReturnMiss(pVM, u64Now, RT_MIN(i64Delta1, i64Delta2), pu64Delta);
524 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
525 }
526
527 if ( !pVM->tm.s.fRunningQueues
528 && !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
529 {
530 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
531 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
532#ifdef IN_RING3
533 REMR3NotifyTimerPending(pVM, pVCpuDst);
534#endif
535 }
536
537 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
538 LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now));
539 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
540 }
541 }
542 }
543 else
544 {
545 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
546 LogFlow(("TMTimerPoll: stopped\n"));
547 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
548 }
549
550 /*
551 * Complicated lockless approach.
552 */
553 uint64_t off;
554 uint32_t u32Pct = 0;
555 bool fCatchUp;
556 int cOuterTries = 42;
557 for (;; cOuterTries--)
558 {
559 fCatchUp = ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp);
560 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
561 u64Expire2 = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
562 if (fCatchUp)
563 {
564 /* No changes allowed, try get a consistent set of parameters. */
565 uint64_t const u64Prev = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev);
566 uint64_t const offGivenUp = ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp);
567 u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
568 if ( ( u64Prev == ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev)
569 && offGivenUp == ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp)
570 && u32Pct == ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage)
571 && off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
572 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)
573 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
574 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
575 || cOuterTries <= 0)
576 {
577 uint64_t u64Delta = u64Now - u64Prev;
578 if (RT_LIKELY(!(u64Delta >> 32)))
579 {
580 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, u32Pct, 100);
581 if (off > u64Sub + offGivenUp)
582 off -= u64Sub;
583 else /* we've completely caught up. */
584 off = offGivenUp;
585 }
586 else
587 /* More than 4 seconds since last time (or negative), ignore it. */
588 Log(("TMVirtualGetSync: u64Delta=%RX64 (NoLock)\n", u64Delta));
589
590 /* Check that we're still running and in catch up. */
591 if ( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
592 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
593 break;
594 }
595 }
596 else if ( off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
597 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)
598 && !ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
599 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
600 break; /* Got an consistent offset */
601
602 /* Repeat the initial checks before iterating. */
603 if (VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
604 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet);
605 if (ASMAtomicUoReadBool(&pVM->tm.s.fRunningQueues))
606 {
607 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
608 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
609 }
610 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
611 {
612 LogFlow(("TMTimerPoll: stopped\n"));
613 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
614 }
615 if (cOuterTries <= 0)
616 break; /* that's enough */
617 }
618 if (cOuterTries <= 0)
619 STAM_COUNTER_INC(&pVM->tm.s.StatPollELoop);
620 u64VirtualSyncNow = u64Now - off;
621
622 /* Calc delta and see if we've got a virtual sync hit. */
623 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
624 if (i64Delta2 <= 0)
625 {
626 if ( !pVM->tm.s.fRunningQueues
627 && !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
628 {
629 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
630 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
631#ifdef IN_RING3
632 REMR3NotifyTimerPending(pVM, pVCpuDst);
633#endif
634 }
635 STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtualSync);
636 LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now));
637 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
638 }
639
640 /*
641 * Return the time left to the next event.
642 */
643 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
644 if (pVCpu == pVCpuDst)
645 {
646 if (fCatchUp)
647 i64Delta2 = ASMMultU64ByU32DivByU32(i64Delta2, 100, u32Pct + 100);
648 return tmTimerPollReturnMiss(pVM, u64Now, RT_MIN(i64Delta1, i64Delta2), pu64Delta);
649 }
650 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
651}
652
653
654/**
655 * Set FF if we've passed the next virtual event.
656 *
657 * This function is called before FFs are checked in the inner execution EM loops.
658 *
659 * @returns true if timers are pending, false if not.
660 *
661 * @param pVM Pointer to the shared VM structure.
662 * @param pVCpu Pointer to the shared VMCPU structure of the caller.
663 * @thread The emulation thread.
664 */
665VMMDECL(bool) TMTimerPollBool(PVM pVM, PVMCPU pVCpu)
666{
667 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
668 uint64_t off = 0;
669 tmTimerPollInternal(pVM, pVCpu, &off);
670 return off == 0;
671}
672
673
674/**
675 * Set FF if we've passed the next virtual event.
676 *
677 * This function is called before FFs are checked in the inner execution EM loops.
678 *
679 * @param pVM Pointer to the shared VM structure.
680 * @param pVCpu Pointer to the shared VMCPU structure of the caller.
681 * @thread The emulation thread.
682 */
683VMM_INT_DECL(void) TMTimerPollVoid(PVM pVM, PVMCPU pVCpu)
684{
685 uint64_t off;
686 tmTimerPollInternal(pVM, pVCpu, &off);
687}
688
689
690/**
691 * Set FF if we've passed the next virtual event.
692 *
693 * This function is called before FFs are checked in the inner execution EM loops.
694 *
695 * @returns The GIP timestamp of the next event.
696 * 0 if the next event has already expired.
697 * @param pVM Pointer to the shared VM structure.
698 * @param pVCpu Pointer to the shared VMCPU structure of the caller.
699 * @param pu64Delta Where to store the delta.
700 * @thread The emulation thread.
701 */
702VMM_INT_DECL(uint64_t) TMTimerPollGIP(PVM pVM, PVMCPU pVCpu, uint64_t *pu64Delta)
703{
704 return tmTimerPollInternal(pVM, pVCpu, pu64Delta);
705}
706
707#endif /* VBOX_HIGH_RES_TIMERS_HACK */
708
709/**
710 * Gets the host context ring-3 pointer of the timer.
711 *
712 * @returns HC R3 pointer.
713 * @param pTimer Timer handle as returned by one of the create functions.
714 */
715VMMDECL(PTMTIMERR3) TMTimerR3Ptr(PTMTIMER pTimer)
716{
717 return (PTMTIMERR3)MMHyperCCToR3(pTimer->CTX_SUFF(pVM), pTimer);
718}
719
720
721/**
722 * Gets the host context ring-0 pointer of the timer.
723 *
724 * @returns HC R0 pointer.
725 * @param pTimer Timer handle as returned by one of the create functions.
726 */
727VMMDECL(PTMTIMERR0) TMTimerR0Ptr(PTMTIMER pTimer)
728{
729 return (PTMTIMERR0)MMHyperCCToR0(pTimer->CTX_SUFF(pVM), pTimer);
730}
731
732
733/**
734 * Gets the RC pointer of the timer.
735 *
736 * @returns RC pointer.
737 * @param pTimer Timer handle as returned by one of the create functions.
738 */
739VMMDECL(PTMTIMERRC) TMTimerRCPtr(PTMTIMER pTimer)
740{
741 return (PTMTIMERRC)MMHyperCCToRC(pTimer->CTX_SUFF(pVM), pTimer);
742}
743
744
745/**
746 * Links a timer into the active list of a timer queue.
747 *
748 * The caller must have taken the TM semaphore before calling this function.
749 *
750 * @param pQueue The queue.
751 * @param pTimer The timer.
752 * @param u64Expire The timer expiration time.
753 */
754DECL_FORCE_INLINE(void) tmTimerActiveLink(PTMTIMERQUEUE pQueue, PTMTIMER pTimer, uint64_t u64Expire)
755{
756 PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue);
757 if (pCur)
758 {
759 for (;; pCur = TMTIMER_GET_NEXT(pCur))
760 {
761 if (pCur->u64Expire > u64Expire)
762 {
763 const PTMTIMER pPrev = TMTIMER_GET_PREV(pCur);
764 TMTIMER_SET_NEXT(pTimer, pCur);
765 TMTIMER_SET_PREV(pTimer, pPrev);
766 if (pPrev)
767 TMTIMER_SET_NEXT(pPrev, pTimer);
768 else
769 {
770 TMTIMER_SET_HEAD(pQueue, pTimer);
771 pQueue->u64Expire = u64Expire;
772 }
773 TMTIMER_SET_PREV(pCur, pTimer);
774 return;
775 }
776 if (!pCur->offNext)
777 {
778 TMTIMER_SET_NEXT(pCur, pTimer);
779 TMTIMER_SET_PREV(pTimer, pCur);
780 return;
781 }
782 }
783 }
784 else
785 {
786 TMTIMER_SET_HEAD(pQueue, pTimer);
787 pQueue->u64Expire = u64Expire;
788 }
789}
790
791
792/**
793 * Optimized TMTimerSet code path for starting an inactive timer.
794 *
795 * @returns VBox status code.
796 *
797 * @param pVM The VM handle.
798 * @param pTimer The timer handle.
799 * @param u64Expire The new expire time.
800 */
801static int tmTimerSetOptimizedStart(PVM pVM, PTMTIMER pTimer, uint64_t u64Expire)
802{
803 Assert(!pTimer->offPrev);
804 Assert(!pTimer->offNext);
805 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE);
806
807 /*
808 * Calculate and set the expiration time.
809 */
810 pTimer->u64Expire = u64Expire;
811 Log2(("tmTimerSetOptimizedStart: %p:{.pszDesc='%s', .u64Expire=%'RU64}\n", pTimer, R3STRING(pTimer->pszDesc), u64Expire));
812
813 /*
814 * Link the timer into the active list.
815 */
816 TMCLOCK const enmClock = pTimer->enmClock;
817 tmTimerActiveLink(&pVM->tm.s.CTX_SUFF(paTimerQueues)[enmClock], pTimer, u64Expire);
818
819 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetOpt);
820 tmTimerUnlock(pVM);
821 return VINF_SUCCESS;
822}
823
824
825
826
827
828/**
829 * Arm a timer with a (new) expire time.
830 *
831 * @returns VBox status.
832 * @param pTimer Timer handle as returned by one of the create functions.
833 * @param u64Expire New expire time.
834 */
835VMMDECL(int) TMTimerSet(PTMTIMER pTimer, uint64_t u64Expire)
836{
837 PVM pVM = pTimer->CTX_SUFF(pVM);
838 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
839 TMTIMER_ASSERT_CRITSECT(pTimer);
840
841#ifdef VBOX_WITH_STATISTICS
842 /* Gather optimization info. */
843 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSet);
844 TMTIMERSTATE enmOrgState = pTimer->enmState;
845 switch (enmOrgState)
846 {
847 case TMTIMERSTATE_STOPPED: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStStopped); break;
848 case TMTIMERSTATE_EXPIRED_DELIVER: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStExpDeliver); break;
849 case TMTIMERSTATE_ACTIVE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStActive); break;
850 case TMTIMERSTATE_PENDING_STOP: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendStop); break;
851 case TMTIMERSTATE_PENDING_STOP_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendStopSched); break;
852 case TMTIMERSTATE_PENDING_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendSched); break;
853 case TMTIMERSTATE_PENDING_RESCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendResched); break;
854 default: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStOther); break;
855 }
856#endif
857
858 /*
859 * The most common case is setting the timer again during the callback.
860 * The second most common case is starting a timer at some other time.
861 */
862#if 1
863 TMTIMERSTATE enmState1 = pTimer->enmState;
864 if ( enmState1 == TMTIMERSTATE_EXPIRED_DELIVER
865 || ( enmState1 == TMTIMERSTATE_STOPPED
866 && pTimer->pCritSect))
867 {
868 /* Try take the TM lock and check the state again. */
869 if (RT_SUCCESS_NP(tmTimerTryLock(pVM)))
870 {
871 if (RT_LIKELY(tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, enmState1)))
872 {
873 tmTimerSetOptimizedStart(pVM, pTimer, u64Expire);
874 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
875 return VINF_SUCCESS;
876 }
877 tmTimerUnlock(pVM);
878 }
879 }
880#endif
881
882 /*
883 * Unoptimized code path.
884 */
885 int cRetries = 1000;
886 do
887 {
888 /*
889 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
890 */
891 TMTIMERSTATE enmState = pTimer->enmState;
892 Log2(("TMTimerSet: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d u64Expire=%'RU64\n",
893 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), cRetries, u64Expire));
894 switch (enmState)
895 {
896 case TMTIMERSTATE_EXPIRED_DELIVER:
897 case TMTIMERSTATE_STOPPED:
898 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
899 {
900 Assert(!pTimer->offPrev);
901 Assert(!pTimer->offNext);
902 AssertMsg( pTimer->enmClock != TMCLOCK_VIRTUAL_SYNC
903 || pVM->tm.s.fVirtualSyncTicking
904 || u64Expire >= pVM->tm.s.u64VirtualSync,
905 ("%'RU64 < %'RU64 %s\n", u64Expire, pVM->tm.s.u64VirtualSync, R3STRING(pTimer->pszDesc)));
906 pTimer->u64Expire = u64Expire;
907 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
908 tmSchedule(pTimer);
909 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
910 return VINF_SUCCESS;
911 }
912 break;
913
914 case TMTIMERSTATE_PENDING_SCHEDULE:
915 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
916 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
917 {
918 pTimer->u64Expire = u64Expire;
919 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
920 tmSchedule(pTimer);
921 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
922 return VINF_SUCCESS;
923 }
924 break;
925
926
927 case TMTIMERSTATE_ACTIVE:
928 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
929 {
930 pTimer->u64Expire = u64Expire;
931 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
932 tmSchedule(pTimer);
933 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
934 return VINF_SUCCESS;
935 }
936 break;
937
938 case TMTIMERSTATE_PENDING_RESCHEDULE:
939 case TMTIMERSTATE_PENDING_STOP:
940 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
941 {
942 pTimer->u64Expire = u64Expire;
943 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
944 tmSchedule(pTimer);
945 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
946 return VINF_SUCCESS;
947 }
948 break;
949
950
951 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
952 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
953 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
954#ifdef IN_RING3
955 if (!RTThreadYield())
956 RTThreadSleep(1);
957#else
958/** @todo call host context and yield after a couple of iterations */
959#endif
960 break;
961
962 /*
963 * Invalid states.
964 */
965 case TMTIMERSTATE_DESTROY:
966 case TMTIMERSTATE_FREE:
967 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
968 return VERR_TM_INVALID_STATE;
969 default:
970 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
971 return VERR_TM_UNKNOWN_STATE;
972 }
973 } while (cRetries-- > 0);
974
975 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
976 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
977 return VERR_INTERNAL_ERROR;
978}
979
980
981/**
982 * Return the current time for the specified clock, setting pu64Now if not NULL.
983 *
984 * @returns Current time.
985 * @param pVM The VM handle.
986 * @param enmClock The clock to query.
987 * @param pu64Now Optional pointer where to store the return time
988 */
989DECL_FORCE_INLINE(uint64_t) tmTimerSetRelativeNowWorker(PVM pVM, TMCLOCK enmClock, uint64_t *pu64Now)
990{
991 uint64_t u64Now;
992 switch (enmClock)
993 {
994 case TMCLOCK_VIRTUAL_SYNC:
995 u64Now = TMVirtualSyncGet(pVM);
996 break;
997 case TMCLOCK_VIRTUAL:
998 u64Now = TMVirtualGet(pVM);
999 break;
1000 case TMCLOCK_REAL:
1001 u64Now = TMRealGet(pVM);
1002 break;
1003 default:
1004 AssertFatalMsgFailed(("%d\n", enmClock));
1005 }
1006
1007 if (pu64Now)
1008 *pu64Now = u64Now;
1009 return u64Now;
1010}
1011
1012
1013/**
1014 * Optimized TMTimerSetRelative code path.
1015 *
1016 * @returns VBox status code.
1017 *
1018 * @param pVM The VM handle.
1019 * @param pTimer The timer handle.
1020 * @param cTicksToNext Clock ticks until the next time expiration.
1021 * @param pu64Now Where to return the current time stamp used.
1022 * Optional.
1023 */
1024static int tmTimerSetRelativeOptimizedStart(PVM pVM, PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1025{
1026 Assert(!pTimer->offPrev);
1027 Assert(!pTimer->offNext);
1028 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE);
1029
1030 /*
1031 * Calculate and set the expiration time.
1032 */
1033 TMCLOCK const enmClock = pTimer->enmClock;
1034 uint64_t const u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1035 pTimer->u64Expire = u64Expire;
1036 Log2(("tmTimerSetRelativeOptimizedStart: %p:{.pszDesc='%s', .u64Expire=%'RU64} cTicksToNext=%'RU64\n", pTimer, R3STRING(pTimer->pszDesc), u64Expire, cTicksToNext));
1037
1038 /*
1039 * Link the timer into the active list.
1040 */
1041 tmTimerActiveLink(&pVM->tm.s.CTX_SUFF(paTimerQueues)[enmClock], pTimer, u64Expire);
1042
1043 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeOpt);
1044 tmTimerUnlock(pVM);
1045 return VINF_SUCCESS;
1046}
1047
1048
1049/**
1050 * Arm a timer with a expire time relative to the current time.
1051 *
1052 * @returns VBox status.
1053 * @param pTimer Timer handle as returned by one of the create functions.
1054 * @param cTicksToNext Clock ticks until the next time expiration.
1055 * @param pu64Now Where to return the current time stamp used.
1056 * Optional.
1057 */
1058VMMDECL(int) TMTimerSetRelative(PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1059{
1060 STAM_PROFILE_START(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1061 TMTIMER_ASSERT_CRITSECT(pTimer);
1062 PVM pVM = pTimer->CTX_SUFF(pVM);
1063 int rc;
1064
1065#ifdef VBOX_WITH_STATISTICS
1066 /* Gather optimization info. */
1067 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelative);
1068 TMTIMERSTATE enmOrgState = pTimer->enmState;
1069 switch (enmOrgState)
1070 {
1071 case TMTIMERSTATE_STOPPED: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStStopped); break;
1072 case TMTIMERSTATE_EXPIRED_DELIVER: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStExpDeliver); break;
1073 case TMTIMERSTATE_ACTIVE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStActive); break;
1074 case TMTIMERSTATE_PENDING_STOP: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendStop); break;
1075 case TMTIMERSTATE_PENDING_STOP_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendStopSched); break;
1076 case TMTIMERSTATE_PENDING_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendSched); break;
1077 case TMTIMERSTATE_PENDING_RESCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendResched); break;
1078 default: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStOther); break;
1079 }
1080#endif
1081
1082 /*
1083 * Try to take the TM lock and optimize the common cases.
1084 *
1085 * With the TM lock we can safely make optimizations like immediate
1086 * scheduling and we can also be 100% sure that we're not racing the
1087 * running of the timer queues. As an additional restraint we require the
1088 * timer to have a critical section associated with to be 100% there aren't
1089 * concurrent operations on the timer. (This latter isn't necessary any
1090 * longer as this isn't supported for any timers, critsect or not.)
1091 *
1092 * Note! Lock ordering doesn't apply when we only tries to
1093 * get the innermost locks.
1094 */
1095 bool fOwnTMLock = RT_SUCCESS_NP(tmTimerTryLock(pVM));
1096#if 1
1097 if ( fOwnTMLock
1098 && pTimer->pCritSect)
1099 {
1100 TMTIMERSTATE enmState = pTimer->enmState;
1101 if (RT_LIKELY( ( enmState == TMTIMERSTATE_EXPIRED_DELIVER
1102 || enmState == TMTIMERSTATE_STOPPED)
1103 && tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, enmState)))
1104 {
1105 tmTimerSetRelativeOptimizedStart(pVM, pTimer, cTicksToNext, pu64Now);
1106 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1107 return VINF_SUCCESS;
1108 }
1109
1110 /* Optimize other states when it becomes necessary. */
1111 }
1112#endif
1113
1114 /*
1115 * Unoptimized path.
1116 */
1117 TMCLOCK const enmClock = pTimer->enmClock;
1118 bool fOwnVirtSyncLock;
1119 fOwnVirtSyncLock = !fOwnTMLock
1120 && enmClock == TMCLOCK_VIRTUAL_SYNC
1121 && RT_SUCCESS(tmVirtualSyncTryLock(pVM));
1122 for (int cRetries = 1000; ; cRetries--)
1123 {
1124 /*
1125 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1126 */
1127 TMTIMERSTATE enmState = pTimer->enmState;
1128 switch (enmState)
1129 {
1130 case TMTIMERSTATE_STOPPED:
1131 if (enmClock == TMCLOCK_VIRTUAL_SYNC)
1132 {
1133 /** @todo To fix assertion in tmR3TimerQueueRunVirtualSync:
1134 * Figure a safe way of activating this timer while the queue is
1135 * being run.
1136 * (99.9% sure this that the assertion is caused by DevAPIC.cpp
1137 * re-starting the timer in respons to a initial_count write.) */
1138 }
1139 /* fall thru */
1140 case TMTIMERSTATE_EXPIRED_DELIVER:
1141 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1142 {
1143 Assert(!pTimer->offPrev);
1144 Assert(!pTimer->offNext);
1145 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1146 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [EXP/STOP]\n",
1147 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1148 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1149 tmSchedule(pTimer);
1150 rc = VINF_SUCCESS;
1151 break;
1152 }
1153 rc = VERR_TRY_AGAIN;
1154 break;
1155
1156 case TMTIMERSTATE_PENDING_SCHEDULE:
1157 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1158 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1159 {
1160 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1161 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [PEND_SCHED]\n",
1162 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1163 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1164 tmSchedule(pTimer);
1165 rc = VINF_SUCCESS;
1166 break;
1167 }
1168 rc = VERR_TRY_AGAIN;
1169 break;
1170
1171
1172 case TMTIMERSTATE_ACTIVE:
1173 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1174 {
1175 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1176 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [ACTIVE]\n",
1177 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1178 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1179 tmSchedule(pTimer);
1180 rc = VINF_SUCCESS;
1181 break;
1182 }
1183 rc = VERR_TRY_AGAIN;
1184 break;
1185
1186 case TMTIMERSTATE_PENDING_RESCHEDULE:
1187 case TMTIMERSTATE_PENDING_STOP:
1188 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1189 {
1190 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1191 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [PEND_RESCH/STOP]\n",
1192 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1193 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1194 tmSchedule(pTimer);
1195 rc = VINF_SUCCESS;
1196 break;
1197 }
1198 rc = VERR_TRY_AGAIN;
1199 break;
1200
1201
1202 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1203 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1204 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1205#ifdef IN_RING3
1206 if (!RTThreadYield())
1207 RTThreadSleep(1);
1208#else
1209/** @todo call host context and yield after a couple of iterations */
1210#endif
1211 rc = VERR_TRY_AGAIN;
1212 break;
1213
1214 /*
1215 * Invalid states.
1216 */
1217 case TMTIMERSTATE_DESTROY:
1218 case TMTIMERSTATE_FREE:
1219 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1220 rc = VERR_TM_INVALID_STATE;
1221 break;
1222
1223 default:
1224 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1225 rc = VERR_TM_UNKNOWN_STATE;
1226 break;
1227 }
1228
1229 /* switch + loop is tedious to break out of. */
1230 if (rc == VINF_SUCCESS)
1231 break;
1232
1233 if (rc != VERR_TRY_AGAIN)
1234 {
1235 tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1236 break;
1237 }
1238 if (cRetries <= 0)
1239 {
1240 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1241 rc = VERR_INTERNAL_ERROR;
1242 tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1243 break;
1244 }
1245
1246 /*
1247 * Retry to gain locks.
1248 */
1249 if (!fOwnTMLock)
1250 {
1251 fOwnTMLock = RT_SUCCESS_NP(tmTimerTryLock(pVM));
1252 if ( !fOwnTMLock
1253 && enmClock == TMCLOCK_VIRTUAL_SYNC
1254 && !fOwnVirtSyncLock)
1255 fOwnVirtSyncLock = RT_SUCCESS_NP(tmVirtualSyncTryLock(pVM));
1256 }
1257
1258 } /* for (;;) */
1259
1260 /*
1261 * Clean up and return.
1262 */
1263 if (fOwnVirtSyncLock)
1264 tmVirtualSyncUnlock(pVM);
1265 if (fOwnTMLock)
1266 tmTimerUnlock(pVM);
1267
1268 if ( !fOwnTMLock
1269 && !fOwnVirtSyncLock
1270 && enmClock == TMCLOCK_VIRTUAL_SYNC)
1271 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeRacyVirtSync);
1272
1273 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1274 return rc;
1275}
1276
1277
1278/**
1279 * Arm a timer with a (new) expire time relative to current time.
1280 *
1281 * @returns VBox status.
1282 * @param pTimer Timer handle as returned by one of the create functions.
1283 * @param cMilliesToNext Number of millieseconds to the next tick.
1284 */
1285VMMDECL(int) TMTimerSetMillies(PTMTIMER pTimer, uint32_t cMilliesToNext)
1286{
1287 PVM pVM = pTimer->CTX_SUFF(pVM);
1288 PVMCPU pVCpu = &pVM->aCpus[0]; /* just take the first VCPU */
1289
1290 switch (pTimer->enmClock)
1291 {
1292 case TMCLOCK_VIRTUAL:
1293 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1294 return TMTimerSetRelative(pTimer, cMilliesToNext * UINT64_C(1000000), NULL);
1295
1296 case TMCLOCK_VIRTUAL_SYNC:
1297 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1298 return TMTimerSetRelative(pTimer, cMilliesToNext * UINT64_C(1000000), NULL);
1299
1300 case TMCLOCK_REAL:
1301 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1302 return TMTimerSetRelative(pTimer, cMilliesToNext, NULL);
1303
1304 default:
1305 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1306 return VERR_INTERNAL_ERROR;
1307 }
1308}
1309
1310
1311/**
1312 * Arm a timer with a (new) expire time relative to current time.
1313 *
1314 * @returns VBox status.
1315 * @param pTimer Timer handle as returned by one of the create functions.
1316 * @param cMicrosToNext Number of microseconds to the next tick.
1317 */
1318VMMDECL(int) TMTimerSetMicro(PTMTIMER pTimer, uint64_t cMicrosToNext)
1319{
1320 PVM pVM = pTimer->CTX_SUFF(pVM);
1321 PVMCPU pVCpu = &pVM->aCpus[0]; /* just take the first VCPU */
1322
1323 switch (pTimer->enmClock)
1324 {
1325 case TMCLOCK_VIRTUAL:
1326 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1327 return TMTimerSetRelative(pTimer, cMicrosToNext * 1000, NULL);
1328
1329 case TMCLOCK_VIRTUAL_SYNC:
1330 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1331 return TMTimerSetRelative(pTimer, cMicrosToNext * 1000, NULL);
1332
1333 case TMCLOCK_REAL:
1334 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1335 return TMTimerSetRelative(pTimer, cMicrosToNext / 1000, NULL);
1336
1337 default:
1338 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1339 return VERR_INTERNAL_ERROR;
1340 }
1341}
1342
1343
1344/**
1345 * Arm a timer with a (new) expire time relative to current time.
1346 *
1347 * @returns VBox status.
1348 * @param pTimer Timer handle as returned by one of the create functions.
1349 * @param cNanosToNext Number of nanoseconds to the next tick.
1350 */
1351VMMDECL(int) TMTimerSetNano(PTMTIMER pTimer, uint64_t cNanosToNext)
1352{
1353 PVM pVM = pTimer->CTX_SUFF(pVM);
1354 PVMCPU pVCpu = &pVM->aCpus[0]; /* just take the first VCPU */
1355
1356 switch (pTimer->enmClock)
1357 {
1358 case TMCLOCK_VIRTUAL:
1359 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1360 return TMTimerSetRelative(pTimer, cNanosToNext, NULL);
1361
1362 case TMCLOCK_VIRTUAL_SYNC:
1363 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1364 return TMTimerSetRelative(pTimer, cNanosToNext, NULL);
1365
1366 case TMCLOCK_REAL:
1367 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1368 return TMTimerSetRelative(pTimer, cNanosToNext / 1000000, NULL);
1369
1370 default:
1371 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1372 return VERR_INTERNAL_ERROR;
1373 }
1374}
1375
1376
1377/**
1378 * Stop the timer.
1379 * Use TMR3TimerArm() to "un-stop" the timer.
1380 *
1381 * @returns VBox status.
1382 * @param pTimer Timer handle as returned by one of the create functions.
1383 */
1384VMMDECL(int) TMTimerStop(PTMTIMER pTimer)
1385{
1386 STAM_PROFILE_START(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1387 TMTIMER_ASSERT_CRITSECT(pTimer);
1388
1389 /** @todo see if this function needs optimizing. */
1390 int cRetries = 1000;
1391 do
1392 {
1393 /*
1394 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1395 */
1396 TMTIMERSTATE enmState = pTimer->enmState;
1397 Log2(("TMTimerStop: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d\n",
1398 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), cRetries));
1399 switch (enmState)
1400 {
1401 case TMTIMERSTATE_EXPIRED_DELIVER:
1402 //AssertMsgFailed(("You don't stop an expired timer dude!\n"));
1403 return VERR_INVALID_PARAMETER;
1404
1405 case TMTIMERSTATE_STOPPED:
1406 case TMTIMERSTATE_PENDING_STOP:
1407 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1408 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1409 return VINF_SUCCESS;
1410
1411 case TMTIMERSTATE_PENDING_SCHEDULE:
1412 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, enmState))
1413 {
1414 tmSchedule(pTimer);
1415 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1416 return VINF_SUCCESS;
1417 }
1418
1419 case TMTIMERSTATE_PENDING_RESCHEDULE:
1420 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
1421 {
1422 tmSchedule(pTimer);
1423 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1424 return VINF_SUCCESS;
1425 }
1426 break;
1427
1428 case TMTIMERSTATE_ACTIVE:
1429 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
1430 {
1431 tmSchedule(pTimer);
1432 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1433 return VINF_SUCCESS;
1434 }
1435 break;
1436
1437 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1438 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1439 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1440#ifdef IN_RING3
1441 if (!RTThreadYield())
1442 RTThreadSleep(1);
1443#else
1444/**@todo call host and yield cpu after a while. */
1445#endif
1446 break;
1447
1448 /*
1449 * Invalid states.
1450 */
1451 case TMTIMERSTATE_DESTROY:
1452 case TMTIMERSTATE_FREE:
1453 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1454 return VERR_TM_INVALID_STATE;
1455 default:
1456 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1457 return VERR_TM_UNKNOWN_STATE;
1458 }
1459 } while (cRetries-- > 0);
1460
1461 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1462 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1463 return VERR_INTERNAL_ERROR;
1464}
1465
1466
1467/**
1468 * Get the current clock time.
1469 * Handy for calculating the new expire time.
1470 *
1471 * @returns Current clock time.
1472 * @param pTimer Timer handle as returned by one of the create functions.
1473 */
1474VMMDECL(uint64_t) TMTimerGet(PTMTIMER pTimer)
1475{
1476 uint64_t u64;
1477 PVM pVM = pTimer->CTX_SUFF(pVM);
1478
1479 switch (pTimer->enmClock)
1480 {
1481 case TMCLOCK_VIRTUAL:
1482 u64 = TMVirtualGet(pVM);
1483 break;
1484 case TMCLOCK_VIRTUAL_SYNC:
1485 u64 = TMVirtualSyncGet(pVM);
1486 break;
1487 case TMCLOCK_REAL:
1488 u64 = TMRealGet(pVM);
1489 break;
1490 default:
1491 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1492 return ~(uint64_t)0;
1493 }
1494 //Log2(("TMTimerGet: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1495 // u64, pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1496 return u64;
1497}
1498
1499
1500/**
1501 * Get the freqency of the timer clock.
1502 *
1503 * @returns Clock frequency (as Hz of course).
1504 * @param pTimer Timer handle as returned by one of the create functions.
1505 */
1506VMMDECL(uint64_t) TMTimerGetFreq(PTMTIMER pTimer)
1507{
1508 switch (pTimer->enmClock)
1509 {
1510 case TMCLOCK_VIRTUAL:
1511 case TMCLOCK_VIRTUAL_SYNC:
1512 return TMCLOCK_FREQ_VIRTUAL;
1513
1514 case TMCLOCK_REAL:
1515 return TMCLOCK_FREQ_REAL;
1516
1517 default:
1518 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1519 return 0;
1520 }
1521}
1522
1523
1524/**
1525 * Get the current clock time as nanoseconds.
1526 *
1527 * @returns The timer clock as nanoseconds.
1528 * @param pTimer Timer handle as returned by one of the create functions.
1529 */
1530VMMDECL(uint64_t) TMTimerGetNano(PTMTIMER pTimer)
1531{
1532 return TMTimerToNano(pTimer, TMTimerGet(pTimer));
1533}
1534
1535
1536/**
1537 * Get the current clock time as microseconds.
1538 *
1539 * @returns The timer clock as microseconds.
1540 * @param pTimer Timer handle as returned by one of the create functions.
1541 */
1542VMMDECL(uint64_t) TMTimerGetMicro(PTMTIMER pTimer)
1543{
1544 return TMTimerToMicro(pTimer, TMTimerGet(pTimer));
1545}
1546
1547
1548/**
1549 * Get the current clock time as milliseconds.
1550 *
1551 * @returns The timer clock as milliseconds.
1552 * @param pTimer Timer handle as returned by one of the create functions.
1553 */
1554VMMDECL(uint64_t) TMTimerGetMilli(PTMTIMER pTimer)
1555{
1556 return TMTimerToMilli(pTimer, TMTimerGet(pTimer));
1557}
1558
1559
1560/**
1561 * Converts the specified timer clock time to nanoseconds.
1562 *
1563 * @returns nanoseconds.
1564 * @param pTimer Timer handle as returned by one of the create functions.
1565 * @param u64Ticks The clock ticks.
1566 * @remark There could be rounding errors here. We just do a simple integere divide
1567 * without any adjustments.
1568 */
1569VMMDECL(uint64_t) TMTimerToNano(PTMTIMER pTimer, uint64_t u64Ticks)
1570{
1571 switch (pTimer->enmClock)
1572 {
1573 case TMCLOCK_VIRTUAL:
1574 case TMCLOCK_VIRTUAL_SYNC:
1575 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1576 return u64Ticks;
1577
1578 case TMCLOCK_REAL:
1579 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1580 return u64Ticks * 1000000;
1581
1582 default:
1583 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1584 return 0;
1585 }
1586}
1587
1588
1589/**
1590 * Converts the specified timer clock time to microseconds.
1591 *
1592 * @returns microseconds.
1593 * @param pTimer Timer handle as returned by one of the create functions.
1594 * @param u64Ticks The clock ticks.
1595 * @remark There could be rounding errors here. We just do a simple integere divide
1596 * without any adjustments.
1597 */
1598VMMDECL(uint64_t) TMTimerToMicro(PTMTIMER pTimer, uint64_t u64Ticks)
1599{
1600 switch (pTimer->enmClock)
1601 {
1602 case TMCLOCK_VIRTUAL:
1603 case TMCLOCK_VIRTUAL_SYNC:
1604 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1605 return u64Ticks / 1000;
1606
1607 case TMCLOCK_REAL:
1608 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1609 return u64Ticks * 1000;
1610
1611 default:
1612 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1613 return 0;
1614 }
1615}
1616
1617
1618/**
1619 * Converts the specified timer clock time to milliseconds.
1620 *
1621 * @returns milliseconds.
1622 * @param pTimer Timer handle as returned by one of the create functions.
1623 * @param u64Ticks The clock ticks.
1624 * @remark There could be rounding errors here. We just do a simple integere divide
1625 * without any adjustments.
1626 */
1627VMMDECL(uint64_t) TMTimerToMilli(PTMTIMER pTimer, uint64_t u64Ticks)
1628{
1629 switch (pTimer->enmClock)
1630 {
1631 case TMCLOCK_VIRTUAL:
1632 case TMCLOCK_VIRTUAL_SYNC:
1633 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1634 return u64Ticks / 1000000;
1635
1636 case TMCLOCK_REAL:
1637 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1638 return u64Ticks;
1639
1640 default:
1641 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1642 return 0;
1643 }
1644}
1645
1646
1647/**
1648 * Converts the specified nanosecond timestamp to timer clock ticks.
1649 *
1650 * @returns timer clock ticks.
1651 * @param pTimer Timer handle as returned by one of the create functions.
1652 * @param u64NanoTS The nanosecond value ticks to convert.
1653 * @remark There could be rounding and overflow errors here.
1654 */
1655VMMDECL(uint64_t) TMTimerFromNano(PTMTIMER pTimer, uint64_t u64NanoTS)
1656{
1657 switch (pTimer->enmClock)
1658 {
1659 case TMCLOCK_VIRTUAL:
1660 case TMCLOCK_VIRTUAL_SYNC:
1661 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1662 return u64NanoTS;
1663
1664 case TMCLOCK_REAL:
1665 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1666 return u64NanoTS / 1000000;
1667
1668 default:
1669 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1670 return 0;
1671 }
1672}
1673
1674
1675/**
1676 * Converts the specified microsecond timestamp to timer clock ticks.
1677 *
1678 * @returns timer clock ticks.
1679 * @param pTimer Timer handle as returned by one of the create functions.
1680 * @param u64MicroTS The microsecond value ticks to convert.
1681 * @remark There could be rounding and overflow errors here.
1682 */
1683VMMDECL(uint64_t) TMTimerFromMicro(PTMTIMER pTimer, uint64_t u64MicroTS)
1684{
1685 switch (pTimer->enmClock)
1686 {
1687 case TMCLOCK_VIRTUAL:
1688 case TMCLOCK_VIRTUAL_SYNC:
1689 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1690 return u64MicroTS * 1000;
1691
1692 case TMCLOCK_REAL:
1693 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1694 return u64MicroTS / 1000;
1695
1696 default:
1697 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1698 return 0;
1699 }
1700}
1701
1702
1703/**
1704 * Converts the specified millisecond timestamp to timer clock ticks.
1705 *
1706 * @returns timer clock ticks.
1707 * @param pTimer Timer handle as returned by one of the create functions.
1708 * @param u64MilliTS The millisecond value ticks to convert.
1709 * @remark There could be rounding and overflow errors here.
1710 */
1711VMMDECL(uint64_t) TMTimerFromMilli(PTMTIMER pTimer, uint64_t u64MilliTS)
1712{
1713 switch (pTimer->enmClock)
1714 {
1715 case TMCLOCK_VIRTUAL:
1716 case TMCLOCK_VIRTUAL_SYNC:
1717 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1718 return u64MilliTS * 1000000;
1719
1720 case TMCLOCK_REAL:
1721 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1722 return u64MilliTS;
1723
1724 default:
1725 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1726 return 0;
1727 }
1728}
1729
1730
1731/**
1732 * Get the expire time of the timer.
1733 * Only valid for active timers.
1734 *
1735 * @returns Expire time of the timer.
1736 * @param pTimer Timer handle as returned by one of the create functions.
1737 */
1738VMMDECL(uint64_t) TMTimerGetExpire(PTMTIMER pTimer)
1739{
1740 TMTIMER_ASSERT_CRITSECT(pTimer);
1741 int cRetries = 1000;
1742 do
1743 {
1744 TMTIMERSTATE enmState = pTimer->enmState;
1745 switch (enmState)
1746 {
1747 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1748 case TMTIMERSTATE_EXPIRED_DELIVER:
1749 case TMTIMERSTATE_STOPPED:
1750 case TMTIMERSTATE_PENDING_STOP:
1751 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1752 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1753 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1754 return ~(uint64_t)0;
1755
1756 case TMTIMERSTATE_ACTIVE:
1757 case TMTIMERSTATE_PENDING_RESCHEDULE:
1758 case TMTIMERSTATE_PENDING_SCHEDULE:
1759 Log2(("TMTimerGetExpire: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1760 pTimer->u64Expire, pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1761 return pTimer->u64Expire;
1762
1763 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1764 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1765#ifdef IN_RING3
1766 if (!RTThreadYield())
1767 RTThreadSleep(1);
1768#endif
1769 break;
1770
1771 /*
1772 * Invalid states.
1773 */
1774 case TMTIMERSTATE_DESTROY:
1775 case TMTIMERSTATE_FREE:
1776 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1777 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1778 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1779 return ~(uint64_t)0;
1780 default:
1781 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1782 return ~(uint64_t)0;
1783 }
1784 } while (cRetries-- > 0);
1785
1786 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1787 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1788 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1789 return ~(uint64_t)0;
1790}
1791
1792
1793/**
1794 * Checks if a timer is active or not.
1795 *
1796 * @returns True if active.
1797 * @returns False if not active.
1798 * @param pTimer Timer handle as returned by one of the create functions.
1799 */
1800VMMDECL(bool) TMTimerIsActive(PTMTIMER pTimer)
1801{
1802 TMTIMERSTATE enmState = pTimer->enmState;
1803 switch (enmState)
1804 {
1805 case TMTIMERSTATE_STOPPED:
1806 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1807 case TMTIMERSTATE_EXPIRED_DELIVER:
1808 case TMTIMERSTATE_PENDING_STOP:
1809 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1810 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1811 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1812 return false;
1813
1814 case TMTIMERSTATE_ACTIVE:
1815 case TMTIMERSTATE_PENDING_RESCHEDULE:
1816 case TMTIMERSTATE_PENDING_SCHEDULE:
1817 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1818 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1819 Log2(("TMTimerIsActive: returns true (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1820 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1821 return true;
1822
1823 /*
1824 * Invalid states.
1825 */
1826 case TMTIMERSTATE_DESTROY:
1827 case TMTIMERSTATE_FREE:
1828 AssertMsgFailed(("Invalid timer state %s (%s)\n", tmTimerState(enmState), R3STRING(pTimer->pszDesc)));
1829 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1830 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1831 return false;
1832 default:
1833 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1834 return false;
1835 }
1836}
1837
1838
1839/**
1840 * Convert state to string.
1841 *
1842 * @returns Readonly status name.
1843 * @param enmState State.
1844 */
1845const char *tmTimerState(TMTIMERSTATE enmState)
1846{
1847 switch (enmState)
1848 {
1849#define CASE(num, state) \
1850 case TMTIMERSTATE_##state: \
1851 AssertCompile(TMTIMERSTATE_##state == (num)); \
1852 return #num "-" #state
1853 CASE( 1,STOPPED);
1854 CASE( 2,ACTIVE);
1855 CASE( 3,EXPIRED_GET_UNLINK);
1856 CASE( 4,EXPIRED_DELIVER);
1857 CASE( 5,PENDING_STOP);
1858 CASE( 6,PENDING_STOP_SCHEDULE);
1859 CASE( 7,PENDING_SCHEDULE_SET_EXPIRE);
1860 CASE( 8,PENDING_SCHEDULE);
1861 CASE( 9,PENDING_RESCHEDULE_SET_EXPIRE);
1862 CASE(10,PENDING_RESCHEDULE);
1863 CASE(11,DESTROY);
1864 CASE(12,FREE);
1865 default:
1866 AssertMsgFailed(("Invalid state enmState=%d\n", enmState));
1867 return "Invalid state!";
1868#undef CASE
1869 }
1870}
1871
1872
1873/**
1874 * Schedules the given timer on the given queue.
1875 *
1876 * @param pQueue The timer queue.
1877 * @param pTimer The timer that needs scheduling.
1878 *
1879 * @remarks Called while owning the lock.
1880 */
1881DECLINLINE(void) tmTimerQueueScheduleOne(PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
1882{
1883 /*
1884 * Processing.
1885 */
1886 unsigned cRetries = 2;
1887 do
1888 {
1889 TMTIMERSTATE enmState = pTimer->enmState;
1890 switch (enmState)
1891 {
1892 /*
1893 * Reschedule timer (in the active list).
1894 */
1895 case TMTIMERSTATE_PENDING_RESCHEDULE:
1896 {
1897 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE, TMTIMERSTATE_PENDING_RESCHEDULE)))
1898 break; /* retry */
1899
1900 const PTMTIMER pPrev = TMTIMER_GET_PREV(pTimer);
1901 const PTMTIMER pNext = TMTIMER_GET_NEXT(pTimer);
1902 if (pPrev)
1903 TMTIMER_SET_NEXT(pPrev, pNext);
1904 else
1905 {
1906 TMTIMER_SET_HEAD(pQueue, pNext);
1907 pQueue->u64Expire = pNext ? pNext->u64Expire : INT64_MAX;
1908 }
1909 if (pNext)
1910 TMTIMER_SET_PREV(pNext, pPrev);
1911 pTimer->offNext = 0;
1912 pTimer->offPrev = 0;
1913 /* fall thru */
1914 }
1915
1916 /*
1917 * Schedule timer (insert into the active list).
1918 */
1919 case TMTIMERSTATE_PENDING_SCHEDULE:
1920 {
1921 Assert(!pTimer->offNext); Assert(!pTimer->offPrev);
1922 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, TMTIMERSTATE_PENDING_SCHEDULE)))
1923 break; /* retry */
1924
1925 PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue);
1926 if (pCur)
1927 {
1928 const uint64_t u64Expire = pTimer->u64Expire;
1929 for (;; pCur = TMTIMER_GET_NEXT(pCur))
1930 {
1931 if (pCur->u64Expire > u64Expire)
1932 {
1933 const PTMTIMER pPrev = TMTIMER_GET_PREV(pCur);
1934 TMTIMER_SET_NEXT(pTimer, pCur);
1935 TMTIMER_SET_PREV(pTimer, pPrev);
1936 if (pPrev)
1937 TMTIMER_SET_NEXT(pPrev, pTimer);
1938 else
1939 {
1940 TMTIMER_SET_HEAD(pQueue, pTimer);
1941 pQueue->u64Expire = u64Expire;
1942 }
1943 TMTIMER_SET_PREV(pCur, pTimer);
1944 return;
1945 }
1946 if (!pCur->offNext)
1947 {
1948 TMTIMER_SET_NEXT(pCur, pTimer);
1949 TMTIMER_SET_PREV(pTimer, pCur);
1950 return;
1951 }
1952 }
1953 }
1954 else
1955 {
1956 TMTIMER_SET_HEAD(pQueue, pTimer);
1957 pQueue->u64Expire = pTimer->u64Expire;
1958 }
1959 return;
1960 }
1961
1962 /*
1963 * Stop the timer in active list.
1964 */
1965 case TMTIMERSTATE_PENDING_STOP:
1966 {
1967 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, TMTIMERSTATE_PENDING_STOP)))
1968 break; /* retry */
1969
1970 const PTMTIMER pPrev = TMTIMER_GET_PREV(pTimer);
1971 const PTMTIMER pNext = TMTIMER_GET_NEXT(pTimer);
1972 if (pPrev)
1973 TMTIMER_SET_NEXT(pPrev, pNext);
1974 else
1975 {
1976 TMTIMER_SET_HEAD(pQueue, pNext);
1977 pQueue->u64Expire = pNext ? pNext->u64Expire : INT64_MAX;
1978 }
1979 if (pNext)
1980 TMTIMER_SET_PREV(pNext, pPrev);
1981 pTimer->offNext = 0;
1982 pTimer->offPrev = 0;
1983 /* fall thru */
1984 }
1985
1986 /*
1987 * Stop the timer (not on the active list).
1988 */
1989 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1990 Assert(!pTimer->offNext); Assert(!pTimer->offPrev);
1991 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_STOPPED, TMTIMERSTATE_PENDING_STOP_SCHEDULE)))
1992 break;
1993 return;
1994
1995 /*
1996 * The timer is pending destruction by TMR3TimerDestroy, our caller.
1997 * Nothing to do here.
1998 */
1999 case TMTIMERSTATE_DESTROY:
2000 break;
2001
2002 /*
2003 * Postpone these until they get into the right state.
2004 */
2005 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2006 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2007 tmTimerLink(pQueue, pTimer);
2008 STAM_COUNTER_INC(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatPostponed));
2009 return;
2010
2011 /*
2012 * None of these can be in the schedule.
2013 */
2014 case TMTIMERSTATE_FREE:
2015 case TMTIMERSTATE_STOPPED:
2016 case TMTIMERSTATE_ACTIVE:
2017 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2018 case TMTIMERSTATE_EXPIRED_DELIVER:
2019 default:
2020 AssertMsgFailed(("Timer (%p) in the scheduling list has an invalid state %s (%d)!",
2021 pTimer, tmTimerState(pTimer->enmState), pTimer->enmState));
2022 return;
2023 }
2024 } while (cRetries-- > 0);
2025}
2026
2027
2028/**
2029 * Schedules the specified timer queue.
2030 *
2031 * @param pVM The VM to run the timers for.
2032 * @param pQueue The queue to schedule.
2033 *
2034 * @remarks Called while owning the lock.
2035 */
2036void tmTimerQueueSchedule(PVM pVM, PTMTIMERQUEUE pQueue)
2037{
2038 TM_ASSERT_LOCK(pVM);
2039
2040 /*
2041 * Dequeue the scheduling list and iterate it.
2042 */
2043 int32_t offNext = ASMAtomicXchgS32(&pQueue->offSchedule, 0);
2044 Log2(("tmTimerQueueSchedule: pQueue=%p:{.enmClock=%d, offNext=%RI32, .u64Expired=%'RU64}\n", pQueue, pQueue->enmClock, offNext, pQueue->u64Expire));
2045 if (!offNext)
2046 return;
2047 PTMTIMER pNext = (PTMTIMER)((intptr_t)pQueue + offNext);
2048 while (pNext)
2049 {
2050 /*
2051 * Unlink the head timer and find the next one.
2052 */
2053 PTMTIMER pTimer = pNext;
2054 pNext = pNext->offScheduleNext ? (PTMTIMER)((intptr_t)pNext + pNext->offScheduleNext) : NULL;
2055 pTimer->offScheduleNext = 0;
2056
2057 /*
2058 * Do the scheduling.
2059 */
2060 Log2(("tmTimerQueueSchedule: %p:{.enmState=%s, .enmClock=%d, .enmType=%d, .pszDesc=%s}\n",
2061 pTimer, tmTimerState(pTimer->enmState), pTimer->enmClock, pTimer->enmType, R3STRING(pTimer->pszDesc)));
2062 tmTimerQueueScheduleOne(pQueue, pTimer);
2063 Log2(("tmTimerQueueSchedule: %p: new %s\n", pTimer, tmTimerState(pTimer->enmState)));
2064 } /* foreach timer in current schedule batch. */
2065 Log2(("tmTimerQueueSchedule: u64Expired=%'RU64\n", pQueue->u64Expire));
2066}
2067
2068
2069#ifdef VBOX_STRICT
2070/**
2071 * Checks that the timer queues are sane.
2072 *
2073 * @param pVM VM handle.
2074 *
2075 * @remarks Called while owning the lock.
2076 */
2077void tmTimerQueuesSanityChecks(PVM pVM, const char *pszWhere)
2078{
2079 TM_ASSERT_LOCK(pVM);
2080
2081 /*
2082 * Check the linking of the active lists.
2083 */
2084 for (int i = 0; i < TMCLOCK_MAX; i++)
2085 {
2086 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[i];
2087 Assert((int)pQueue->enmClock == i);
2088 PTMTIMER pPrev = NULL;
2089 for (PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue); pCur; pPrev = pCur, pCur = TMTIMER_GET_NEXT(pCur))
2090 {
2091 AssertMsg((int)pCur->enmClock == i, ("%s: %d != %d\n", pszWhere, pCur->enmClock, i));
2092 AssertMsg(TMTIMER_GET_PREV(pCur) == pPrev, ("%s: %p != %p\n", pszWhere, TMTIMER_GET_PREV(pCur), pPrev));
2093 TMTIMERSTATE enmState = pCur->enmState;
2094 switch (enmState)
2095 {
2096 case TMTIMERSTATE_ACTIVE:
2097 AssertMsg( !pCur->offScheduleNext
2098 || pCur->enmState != TMTIMERSTATE_ACTIVE,
2099 ("%s: %RI32\n", pszWhere, pCur->offScheduleNext));
2100 break;
2101 case TMTIMERSTATE_PENDING_STOP:
2102 case TMTIMERSTATE_PENDING_RESCHEDULE:
2103 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2104 break;
2105 default:
2106 AssertMsgFailed(("%s: Invalid state enmState=%d %s\n", pszWhere, enmState, tmTimerState(enmState)));
2107 break;
2108 }
2109 }
2110 }
2111
2112
2113# ifdef IN_RING3
2114 /*
2115 * Do the big list and check that active timers all are in the active lists.
2116 */
2117 PTMTIMERR3 pPrev = NULL;
2118 for (PTMTIMERR3 pCur = pVM->tm.s.pCreated; pCur; pPrev = pCur, pCur = pCur->pBigNext)
2119 {
2120 Assert(pCur->pBigPrev == pPrev);
2121 Assert((unsigned)pCur->enmClock < (unsigned)TMCLOCK_MAX);
2122
2123 TMTIMERSTATE enmState = pCur->enmState;
2124 switch (enmState)
2125 {
2126 case TMTIMERSTATE_ACTIVE:
2127 case TMTIMERSTATE_PENDING_STOP:
2128 case TMTIMERSTATE_PENDING_RESCHEDULE:
2129 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2130 {
2131 PTMTIMERR3 pCurAct = TMTIMER_GET_HEAD(&pVM->tm.s.CTX_SUFF(paTimerQueues)[pCur->enmClock]);
2132 Assert(pCur->offPrev || pCur == pCurAct);
2133 while (pCurAct && pCurAct != pCur)
2134 pCurAct = TMTIMER_GET_NEXT(pCurAct);
2135 Assert(pCurAct == pCur);
2136 break;
2137 }
2138
2139 case TMTIMERSTATE_PENDING_SCHEDULE:
2140 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2141 case TMTIMERSTATE_STOPPED:
2142 case TMTIMERSTATE_EXPIRED_DELIVER:
2143 {
2144 Assert(!pCur->offNext);
2145 Assert(!pCur->offPrev);
2146 for (PTMTIMERR3 pCurAct = TMTIMER_GET_HEAD(&pVM->tm.s.CTX_SUFF(paTimerQueues)[pCur->enmClock]);
2147 pCurAct;
2148 pCurAct = TMTIMER_GET_NEXT(pCurAct))
2149 {
2150 Assert(pCurAct != pCur);
2151 Assert(TMTIMER_GET_NEXT(pCurAct) != pCur);
2152 Assert(TMTIMER_GET_PREV(pCurAct) != pCur);
2153 }
2154 break;
2155 }
2156
2157 /* ignore */
2158 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2159 break;
2160
2161 /* shouldn't get here! */
2162 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2163 case TMTIMERSTATE_DESTROY:
2164 default:
2165 AssertMsgFailed(("Invalid state enmState=%d %s\n", enmState, tmTimerState(enmState)));
2166 break;
2167 }
2168 }
2169# endif /* IN_RING3 */
2170}
2171#endif /* !VBOX_STRICT */
2172
2173
2174/**
2175 * Gets the current warp drive percent.
2176 *
2177 * @returns The warp drive percent.
2178 * @param pVM The VM handle.
2179 */
2180VMMDECL(uint32_t) TMGetWarpDrive(PVM pVM)
2181{
2182 return pVM->tm.s.u32VirtualWarpDrivePercentage;
2183}
2184
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette