VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/TMAll.cpp@ 29450

Last change on this file since 29450 was 29250, checked in by vboxsync, 15 years ago

iprt/asm*.h: split out asm-math.h, don't include asm-*.h from asm.h, don't include asm.h from sup.h. Fixed a couple file headers.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 74.0 KB
Line 
1/* $Id: TMAll.cpp 29250 2010-05-09 17:53:58Z vboxsync $ */
2/** @file
3 * TM - Timeout Manager, all contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_TM
23#include <VBox/tm.h>
24#include <VBox/mm.h>
25#ifdef IN_RING3
26# include <VBox/rem.h>
27#endif
28#include "TMInternal.h"
29#include <VBox/vm.h>
30
31#include <VBox/param.h>
32#include <VBox/err.h>
33#include <VBox/log.h>
34#include <VBox/sup.h>
35#include <iprt/time.h>
36#include <iprt/assert.h>
37#include <iprt/asm.h>
38#include <iprt/asm-math.h>
39#ifdef IN_RING3
40# include <iprt/thread.h>
41#endif
42
43
44/*******************************************************************************
45* Defined Constants And Macros *
46*******************************************************************************/
47/** @def TMTIMER_ASSERT_CRITSECT
48 * Checks that the caller owns the critical section if one is associated with
49 * the timer. */
50#ifdef VBOX_STRICT
51# define TMTIMER_ASSERT_CRITSECT(pTimer) \
52 do { \
53 if ((pTimer)->pCritSect) \
54 { \
55 PPDMCRITSECT pCritSect = (PPDMCRITSECT)MMHyperR3ToCC((pTimer)->CTX_SUFF(pVM), (pTimer)->pCritSect); \
56 AssertMsg(pCritSect && PDMCritSectIsOwner(pCritSect), \
57 ("pTimer=%p (%s) pCritSect=%p\n", pTimer, R3STRING(pTimer->pszDesc), (pTimer)->pCritSect)); \
58 } \
59 } while (0)
60#else
61# define TMTIMER_ASSERT_CRITSECT(pTimer) do { } while (0)
62#endif
63
64
65#ifndef tmTimerLock
66
67/**
68 * Try take the timer lock, wait in ring-3 return VERR_SEM_BUSY in R0/RC.
69 *
70 * @retval VINF_SUCCESS on success (always in ring-3).
71 * @retval VERR_SEM_BUSY in RC and R0 if the semaphore is busy.
72 *
73 * @param pVM The VM handle.
74 *
75 * @thread EMTs for the time being.
76 */
77int tmTimerLock(PVM pVM)
78{
79 VM_ASSERT_EMT(pVM);
80 int rc = PDMCritSectEnter(&pVM->tm.s.TimerCritSect, VERR_SEM_BUSY);
81 return rc;
82}
83
84
85/**
86 * Try take the timer lock, no waiting.
87 *
88 * @retval VINF_SUCCESS on success.
89 * @retval VERR_SEM_BUSY if busy.
90 *
91 * @param pVM The VM handle.
92 */
93int tmTimerTryLock(PVM pVM)
94{
95 int rc = PDMCritSectTryEnter(&pVM->tm.s.TimerCritSect);
96 return rc;
97}
98
99
100/**
101 * Release the EMT/TM lock.
102 *
103 * @param pVM The VM handle.
104 */
105void tmTimerUnlock(PVM pVM)
106{
107 PDMCritSectLeave(&pVM->tm.s.TimerCritSect);
108}
109
110
111/**
112 * Try take the VirtualSync lock, wait in ring-3 return VERR_SEM_BUSY in R0/RC.
113 *
114 * @retval VINF_SUCCESS on success (always in ring-3).
115 * @retval VERR_SEM_BUSY in RC and R0 if the semaphore is busy.
116 *
117 * @param pVM The VM handle.
118 */
119int tmVirtualSyncLock(PVM pVM)
120{
121 VM_ASSERT_EMT(pVM);
122 int rc = PDMCritSectEnter(&pVM->tm.s.VirtualSyncLock, VERR_SEM_BUSY);
123 return rc;
124}
125
126
127/**
128 * Try take the VirtualSync lock, no waiting.
129 *
130 * @retval VINF_SUCCESS on success.
131 * @retval VERR_SEM_BUSY if busy.
132 *
133 * @param pVM The VM handle.
134 */
135int tmVirtualSyncTryLock(PVM pVM)
136{
137 VM_ASSERT_EMT(pVM);
138 int rc = PDMCritSectTryEnter(&pVM->tm.s.VirtualSyncLock);
139 return rc;
140}
141
142
143/**
144 * Release the VirtualSync lock.
145 *
146 * @param pVM The VM handle.
147 */
148void tmVirtualSyncUnlock(PVM pVM)
149{
150 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
151}
152
153#endif /* ! macros */
154
155/**
156 * Notification that execution is about to start.
157 *
158 * This call must always be paired with a TMNotifyEndOfExecution call.
159 *
160 * The function may, depending on the configuration, resume the TSC and future
161 * clocks that only ticks when we're executing guest code.
162 *
163 * @param pVCpu The VMCPU to operate on.
164 */
165VMMDECL(void) TMNotifyStartOfExecution(PVMCPU pVCpu)
166{
167 PVM pVM = pVCpu->CTX_SUFF(pVM);
168
169 if (pVM->tm.s.fTSCTiedToExecution)
170 tmCpuTickResume(pVM, pVCpu);
171}
172
173
174/**
175 * Notification that execution is about to start.
176 *
177 * This call must always be paired with a TMNotifyStartOfExecution call.
178 *
179 * The function may, depending on the configuration, suspend the TSC and future
180 * clocks that only ticks when we're executing guest code.
181 *
182 * @param pVCpu The VMCPU to operate on.
183 */
184VMMDECL(void) TMNotifyEndOfExecution(PVMCPU pVCpu)
185{
186 PVM pVM = pVCpu->CTX_SUFF(pVM);
187
188 if (pVM->tm.s.fTSCTiedToExecution)
189 tmCpuTickPause(pVM, pVCpu);
190}
191
192
193/**
194 * Notification that the cpu is entering the halt state
195 *
196 * This call must always be paired with a TMNotifyEndOfExecution call.
197 *
198 * The function may, depending on the configuration, resume the TSC and future
199 * clocks that only ticks when we're halted.
200 *
201 * @param pVCpu The VMCPU to operate on.
202 */
203VMM_INT_DECL(void) TMNotifyStartOfHalt(PVMCPU pVCpu)
204{
205 PVM pVM = pVCpu->CTX_SUFF(pVM);
206
207 if ( pVM->tm.s.fTSCTiedToExecution
208 && !pVM->tm.s.fTSCNotTiedToHalt)
209 tmCpuTickResume(pVM, pVCpu);
210}
211
212
213/**
214 * Notification that the cpu is leaving the halt state
215 *
216 * This call must always be paired with a TMNotifyStartOfHalt call.
217 *
218 * The function may, depending on the configuration, suspend the TSC and future
219 * clocks that only ticks when we're halted.
220 *
221 * @param pVCpu The VMCPU to operate on.
222 */
223VMM_INT_DECL(void) TMNotifyEndOfHalt(PVMCPU pVCpu)
224{
225 PVM pVM = pVCpu->CTX_SUFF(pVM);
226
227 if ( pVM->tm.s.fTSCTiedToExecution
228 && !pVM->tm.s.fTSCNotTiedToHalt)
229 tmCpuTickPause(pVM, pVCpu);
230}
231
232
233/**
234 * Raise the timer force action flag and notify the dedicated timer EMT.
235 *
236 * @param pVM The VM handle.
237 */
238DECLINLINE(void) tmScheduleNotify(PVM pVM)
239{
240 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
241 if (!VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
242 {
243 Log5(("TMAll(%u): FF: 0 -> 1\n", __LINE__));
244 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
245#ifdef IN_RING3
246 REMR3NotifyTimerPending(pVM, pVCpuDst);
247 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
248#endif
249 STAM_COUNTER_INC(&pVM->tm.s.StatScheduleSetFF);
250 }
251}
252
253
254/**
255 * Schedule the queue which was changed.
256 */
257DECLINLINE(void) tmSchedule(PTMTIMER pTimer)
258{
259 PVM pVM = pTimer->CTX_SUFF(pVM);
260 if ( VM_IS_EMT(pVM)
261 && RT_SUCCESS(tmTimerTryLock(pVM)))
262 {
263 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
264 Log3(("tmSchedule: tmTimerQueueSchedule\n"));
265 tmTimerQueueSchedule(pVM, &pVM->tm.s.CTX_SUFF(paTimerQueues)[pTimer->enmClock]);
266#ifdef VBOX_STRICT
267 tmTimerQueuesSanityChecks(pVM, "tmSchedule");
268#endif
269 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
270 tmTimerUnlock(pVM);
271 }
272 else
273 {
274 TMTIMERSTATE enmState = pTimer->enmState;
275 if (TMTIMERSTATE_IS_PENDING_SCHEDULING(enmState))
276 tmScheduleNotify(pVM);
277 }
278}
279
280
281/**
282 * Try change the state to enmStateNew from enmStateOld
283 * and link the timer into the scheduling queue.
284 *
285 * @returns Success indicator.
286 * @param pTimer Timer in question.
287 * @param enmStateNew The new timer state.
288 * @param enmStateOld The old timer state.
289 */
290DECLINLINE(bool) tmTimerTry(PTMTIMER pTimer, TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
291{
292 /*
293 * Attempt state change.
294 */
295 bool fRc;
296 TM_TRY_SET_STATE(pTimer, enmStateNew, enmStateOld, fRc);
297 return fRc;
298}
299
300
301/**
302 * Links the timer onto the scheduling queue.
303 *
304 * @param pQueue The timer queue the timer belongs to.
305 * @param pTimer The timer.
306 *
307 * @todo FIXME: Look into potential race with the thread running the queues
308 * and stuff.
309 */
310DECLINLINE(void) tmTimerLink(PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
311{
312 Assert(!pTimer->offScheduleNext);
313 const int32_t offHeadNew = (intptr_t)pTimer - (intptr_t)pQueue;
314 int32_t offHead;
315 do
316 {
317 offHead = pQueue->offSchedule;
318 if (offHead)
319 pTimer->offScheduleNext = ((intptr_t)pQueue + offHead) - (intptr_t)pTimer;
320 else
321 pTimer->offScheduleNext = 0;
322 } while (!ASMAtomicCmpXchgS32(&pQueue->offSchedule, offHeadNew, offHead));
323}
324
325
326/**
327 * Try change the state to enmStateNew from enmStateOld
328 * and link the timer into the scheduling queue.
329 *
330 * @returns Success indicator.
331 * @param pTimer Timer in question.
332 * @param enmStateNew The new timer state.
333 * @param enmStateOld The old timer state.
334 */
335DECLINLINE(bool) tmTimerTryWithLink(PTMTIMER pTimer, TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
336{
337 if (tmTimerTry(pTimer, enmStateNew, enmStateOld))
338 {
339 tmTimerLink(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF(paTimerQueues)[pTimer->enmClock], pTimer);
340 return true;
341 }
342 return false;
343}
344
345
346#ifdef VBOX_HIGH_RES_TIMERS_HACK
347
348/**
349 * Worker for tmTimerPollInternal that handles misses when the decidate timer
350 * EMT is polling.
351 *
352 * @returns See tmTimerPollInternal.
353 * @param pVM Pointer to the shared VM structure.
354 * @param u64Now Current virtual clock timestamp.
355 * @param u64Delta The delta to the next even in ticks of the
356 * virtual clock.
357 * @param pu64Delta Where to return the delta.
358 * @param pCounter The statistics counter to update.
359 */
360DECLINLINE(uint64_t) tmTimerPollReturnMiss(PVM pVM, uint64_t u64Now, uint64_t u64Delta, uint64_t *pu64Delta)
361{
362 Assert(!(u64Delta & RT_BIT_64(63)));
363
364 if (!pVM->tm.s.fVirtualWarpDrive)
365 {
366 *pu64Delta = u64Delta;
367 return u64Delta + u64Now + pVM->tm.s.u64VirtualOffset;
368 }
369
370 /*
371 * Warp drive adjustments - this is the reverse of what tmVirtualGetRaw is doing.
372 */
373 uint64_t const u64Start = pVM->tm.s.u64VirtualWarpDriveStart;
374 uint32_t const u32Pct = pVM->tm.s.u32VirtualWarpDrivePercentage;
375
376 uint64_t u64GipTime = u64Delta + u64Now + pVM->tm.s.u64VirtualOffset;
377 u64GipTime -= u64Start; /* the start is GIP time. */
378 if (u64GipTime >= u64Delta)
379 {
380 ASMMultU64ByU32DivByU32(u64GipTime, 100, u32Pct);
381 ASMMultU64ByU32DivByU32(u64Delta, 100, u32Pct);
382 }
383 else
384 {
385 u64Delta -= u64GipTime;
386 ASMMultU64ByU32DivByU32(u64GipTime, 100, u32Pct);
387 u64Delta += u64GipTime;
388 }
389 *pu64Delta = u64Delta;
390 u64GipTime += u64Start;
391 return u64GipTime;
392}
393
394
395/**
396 * Worker for tmTimerPollInternal dealing with returns on virtual CPUs other
397 * than the one dedicated to timer work.
398 *
399 * @returns See tmTimerPollInternal.
400 * @param pVM Pointer to the shared VM structure.
401 * @param u64Now Current virtual clock timestamp.
402 * @param pu64Delta Where to return the delta.
403 */
404DECL_FORCE_INLINE(uint64_t) tmTimerPollReturnOtherCpu(PVM pVM, uint64_t u64Now, uint64_t *pu64Delta)
405{
406 static const uint64_t s_u64OtherRet = 500000000; /* 500 ms for non-timer EMTs. */
407 *pu64Delta = s_u64OtherRet;
408 return u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet;
409}
410
411
412/**
413 * Worker for tmTimerPollInternal.
414 *
415 * @returns See tmTimerPollInternal.
416 * @param pVM Pointer to the shared VM structure.
417 * @param pVCpu Pointer to the shared VMCPU structure of the
418 * caller.
419 * @param pVCpuDst Pointer to the shared VMCPU structure of the
420 * dedicated timer EMT.
421 * @param u64Now Current virtual clock timestamp.
422 * @param pu64Delta Where to return the delta.
423 * @param pCounter The statistics counter to update.
424 */
425DECL_FORCE_INLINE(uint64_t) tmTimerPollReturnHit(PVM pVM, PVMCPU pVCpu, PVMCPU pVCpuDst, uint64_t u64Now,
426 uint64_t *pu64Delta, PSTAMCOUNTER pCounter)
427{
428 STAM_COUNTER_INC(pCounter);
429 if (pVCpuDst != pVCpu)
430 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
431 *pu64Delta = 0;
432 return 0;
433}
434
435/**
436 * Common worker for TMTimerPollGIP and TMTimerPoll.
437 *
438 * This function is called before FFs are checked in the inner execution EM loops.
439 *
440 * @returns The GIP timestamp of the next event.
441 * 0 if the next event has already expired.
442 *
443 * @param pVM Pointer to the shared VM structure.
444 * @param pVCpu Pointer to the shared VMCPU structure of the caller.
445 * @param pu64Delta Where to store the delta.
446 *
447 * @thread The emulation thread.
448 *
449 * @remarks GIP uses ns ticks.
450 */
451DECL_FORCE_INLINE(uint64_t) tmTimerPollInternal(PVM pVM, PVMCPU pVCpu, uint64_t *pu64Delta)
452{
453 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
454 const uint64_t u64Now = TMVirtualGetNoCheck(pVM);
455 STAM_COUNTER_INC(&pVM->tm.s.StatPoll);
456
457 /*
458 * Return straight away if the timer FF is already set ...
459 */
460 if (VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
461 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet);
462
463 /*
464 * ... or if timers are being run.
465 */
466 if (ASMAtomicReadBool(&pVM->tm.s.fRunningQueues))
467 {
468 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
469 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
470 }
471
472 /*
473 * Check for TMCLOCK_VIRTUAL expiration.
474 */
475 const uint64_t u64Expire1 = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire);
476 const int64_t i64Delta1 = u64Expire1 - u64Now;
477 if (i64Delta1 <= 0)
478 {
479 if (!VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
480 {
481 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
482 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
483#ifdef IN_RING3
484 REMR3NotifyTimerPending(pVM, pVCpuDst);
485#endif
486 }
487 LogFlow(("TMTimerPoll: expire1=%'RU64 <= now=%'RU64\n", u64Expire1, u64Now));
488 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtual);
489 }
490
491 /*
492 * Check for TMCLOCK_VIRTUAL_SYNC expiration.
493 * This isn't quite as stright forward if in a catch-up, not only do
494 * we have to adjust the 'now' but when have to adjust the delta as well.
495 */
496
497 /*
498 * Optimistic lockless approach.
499 */
500 uint64_t u64VirtualSyncNow;
501 uint64_t u64Expire2 = ASMAtomicUoReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
502 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
503 {
504 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
505 {
506 u64VirtualSyncNow = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
507 if (RT_LIKELY( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
508 && !ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
509 && u64VirtualSyncNow == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
510 && u64Expire2 == ASMAtomicUoReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)))
511 {
512 u64VirtualSyncNow = u64Now - u64VirtualSyncNow;
513 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
514 if (i64Delta2 > 0)
515 {
516 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
517 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
518
519 if (pVCpu == pVCpuDst)
520 return tmTimerPollReturnMiss(pVM, u64Now, RT_MIN(i64Delta1, i64Delta2), pu64Delta);
521 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
522 }
523
524 if ( !pVM->tm.s.fRunningQueues
525 && !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
526 {
527 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
528 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
529#ifdef IN_RING3
530 REMR3NotifyTimerPending(pVM, pVCpuDst);
531#endif
532 }
533
534 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
535 LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now));
536 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
537 }
538 }
539 }
540 else
541 {
542 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
543 LogFlow(("TMTimerPoll: stopped\n"));
544 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
545 }
546
547 /*
548 * Complicated lockless approach.
549 */
550 uint64_t off;
551 uint32_t u32Pct = 0;
552 bool fCatchUp;
553 int cOuterTries = 42;
554 for (;; cOuterTries--)
555 {
556 fCatchUp = ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp);
557 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
558 u64Expire2 = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
559 if (fCatchUp)
560 {
561 /* No changes allowed, try get a consistent set of parameters. */
562 uint64_t const u64Prev = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev);
563 uint64_t const offGivenUp = ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp);
564 u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
565 if ( ( u64Prev == ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev)
566 && offGivenUp == ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp)
567 && u32Pct == ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage)
568 && off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
569 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)
570 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
571 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
572 || cOuterTries <= 0)
573 {
574 uint64_t u64Delta = u64Now - u64Prev;
575 if (RT_LIKELY(!(u64Delta >> 32)))
576 {
577 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, u32Pct, 100);
578 if (off > u64Sub + offGivenUp)
579 off -= u64Sub;
580 else /* we've completely caught up. */
581 off = offGivenUp;
582 }
583 else
584 /* More than 4 seconds since last time (or negative), ignore it. */
585 Log(("TMVirtualGetSync: u64Delta=%RX64 (NoLock)\n", u64Delta));
586
587 /* Check that we're still running and in catch up. */
588 if ( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
589 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
590 break;
591 }
592 }
593 else if ( off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
594 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)
595 && !ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
596 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
597 break; /* Got an consistent offset */
598
599 /* Repeat the initial checks before iterating. */
600 if (VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
601 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet);
602 if (ASMAtomicUoReadBool(&pVM->tm.s.fRunningQueues))
603 {
604 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
605 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
606 }
607 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
608 {
609 LogFlow(("TMTimerPoll: stopped\n"));
610 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
611 }
612 if (cOuterTries <= 0)
613 break; /* that's enough */
614 }
615 if (cOuterTries <= 0)
616 STAM_COUNTER_INC(&pVM->tm.s.StatPollELoop);
617 u64VirtualSyncNow = u64Now - off;
618
619 /* Calc delta and see if we've got a virtual sync hit. */
620 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
621 if (i64Delta2 <= 0)
622 {
623 if ( !pVM->tm.s.fRunningQueues
624 && !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
625 {
626 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
627 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
628#ifdef IN_RING3
629 REMR3NotifyTimerPending(pVM, pVCpuDst);
630#endif
631 }
632 STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtualSync);
633 LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now));
634 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
635 }
636
637 /*
638 * Return the time left to the next event.
639 */
640 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
641 if (pVCpu == pVCpuDst)
642 {
643 if (fCatchUp)
644 i64Delta2 = ASMMultU64ByU32DivByU32(i64Delta2, 100, u32Pct + 100);
645 return tmTimerPollReturnMiss(pVM, u64Now, RT_MIN(i64Delta1, i64Delta2), pu64Delta);
646 }
647 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
648}
649
650
651/**
652 * Set FF if we've passed the next virtual event.
653 *
654 * This function is called before FFs are checked in the inner execution EM loops.
655 *
656 * @returns true if timers are pending, false if not.
657 *
658 * @param pVM Pointer to the shared VM structure.
659 * @param pVCpu Pointer to the shared VMCPU structure of the caller.
660 * @thread The emulation thread.
661 */
662VMMDECL(bool) TMTimerPollBool(PVM pVM, PVMCPU pVCpu)
663{
664 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
665 uint64_t off = 0;
666 tmTimerPollInternal(pVM, pVCpu, &off);
667 return off == 0;
668}
669
670
671/**
672 * Set FF if we've passed the next virtual event.
673 *
674 * This function is called before FFs are checked in the inner execution EM loops.
675 *
676 * @param pVM Pointer to the shared VM structure.
677 * @param pVCpu Pointer to the shared VMCPU structure of the caller.
678 * @thread The emulation thread.
679 */
680VMM_INT_DECL(void) TMTimerPollVoid(PVM pVM, PVMCPU pVCpu)
681{
682 uint64_t off;
683 tmTimerPollInternal(pVM, pVCpu, &off);
684}
685
686
687/**
688 * Set FF if we've passed the next virtual event.
689 *
690 * This function is called before FFs are checked in the inner execution EM loops.
691 *
692 * @returns The GIP timestamp of the next event.
693 * 0 if the next event has already expired.
694 * @param pVM Pointer to the shared VM structure.
695 * @param pVCpu Pointer to the shared VMCPU structure of the caller.
696 * @param pu64Delta Where to store the delta.
697 * @thread The emulation thread.
698 */
699VMM_INT_DECL(uint64_t) TMTimerPollGIP(PVM pVM, PVMCPU pVCpu, uint64_t *pu64Delta)
700{
701 return tmTimerPollInternal(pVM, pVCpu, pu64Delta);
702}
703
704#endif /* VBOX_HIGH_RES_TIMERS_HACK */
705
706/**
707 * Gets the host context ring-3 pointer of the timer.
708 *
709 * @returns HC R3 pointer.
710 * @param pTimer Timer handle as returned by one of the create functions.
711 */
712VMMDECL(PTMTIMERR3) TMTimerR3Ptr(PTMTIMER pTimer)
713{
714 return (PTMTIMERR3)MMHyperCCToR3(pTimer->CTX_SUFF(pVM), pTimer);
715}
716
717
718/**
719 * Gets the host context ring-0 pointer of the timer.
720 *
721 * @returns HC R0 pointer.
722 * @param pTimer Timer handle as returned by one of the create functions.
723 */
724VMMDECL(PTMTIMERR0) TMTimerR0Ptr(PTMTIMER pTimer)
725{
726 return (PTMTIMERR0)MMHyperCCToR0(pTimer->CTX_SUFF(pVM), pTimer);
727}
728
729
730/**
731 * Gets the RC pointer of the timer.
732 *
733 * @returns RC pointer.
734 * @param pTimer Timer handle as returned by one of the create functions.
735 */
736VMMDECL(PTMTIMERRC) TMTimerRCPtr(PTMTIMER pTimer)
737{
738 return (PTMTIMERRC)MMHyperCCToRC(pTimer->CTX_SUFF(pVM), pTimer);
739}
740
741
742/**
743 * Links a timer into the active list of a timer queue.
744 *
745 * The caller must have taken the TM semaphore before calling this function.
746 *
747 * @param pQueue The queue.
748 * @param pTimer The timer.
749 * @param u64Expire The timer expiration time.
750 */
751DECL_FORCE_INLINE(void) tmTimerActiveLink(PTMTIMERQUEUE pQueue, PTMTIMER pTimer, uint64_t u64Expire)
752{
753 PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue);
754 if (pCur)
755 {
756 for (;; pCur = TMTIMER_GET_NEXT(pCur))
757 {
758 if (pCur->u64Expire > u64Expire)
759 {
760 const PTMTIMER pPrev = TMTIMER_GET_PREV(pCur);
761 TMTIMER_SET_NEXT(pTimer, pCur);
762 TMTIMER_SET_PREV(pTimer, pPrev);
763 if (pPrev)
764 TMTIMER_SET_NEXT(pPrev, pTimer);
765 else
766 {
767 TMTIMER_SET_HEAD(pQueue, pTimer);
768 pQueue->u64Expire = u64Expire;
769 }
770 TMTIMER_SET_PREV(pCur, pTimer);
771 return;
772 }
773 if (!pCur->offNext)
774 {
775 TMTIMER_SET_NEXT(pCur, pTimer);
776 TMTIMER_SET_PREV(pTimer, pCur);
777 return;
778 }
779 }
780 }
781 else
782 {
783 TMTIMER_SET_HEAD(pQueue, pTimer);
784 pQueue->u64Expire = u64Expire;
785 }
786}
787
788
789/**
790 * Optimized TMTimerSet code path for starting an inactive timer.
791 *
792 * @returns VBox status code.
793 *
794 * @param pVM The VM handle.
795 * @param pTimer The timer handle.
796 * @param u64Expire The new expire time.
797 */
798static int tmTimerSetOptimizedStart(PVM pVM, PTMTIMER pTimer, uint64_t u64Expire)
799{
800 Assert(!pTimer->offPrev);
801 Assert(!pTimer->offNext);
802 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE);
803
804 /*
805 * Calculate and set the expiration time.
806 */
807 pTimer->u64Expire = u64Expire;
808 Log2(("tmTimerSetOptimizedStart: %p:{.pszDesc='%s', .u64Expire=%'RU64}\n", pTimer, R3STRING(pTimer->pszDesc), u64Expire));
809
810 /*
811 * Link the timer into the active list.
812 */
813 TMCLOCK const enmClock = pTimer->enmClock;
814 tmTimerActiveLink(&pVM->tm.s.CTX_SUFF(paTimerQueues)[enmClock], pTimer, u64Expire);
815
816 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetOpt);
817 tmTimerUnlock(pVM);
818 return VINF_SUCCESS;
819}
820
821
822
823
824
825/**
826 * Arm a timer with a (new) expire time.
827 *
828 * @returns VBox status.
829 * @param pTimer Timer handle as returned by one of the create functions.
830 * @param u64Expire New expire time.
831 */
832VMMDECL(int) TMTimerSet(PTMTIMER pTimer, uint64_t u64Expire)
833{
834 PVM pVM = pTimer->CTX_SUFF(pVM);
835 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
836 TMTIMER_ASSERT_CRITSECT(pTimer);
837
838#ifdef VBOX_WITH_STATISTICS
839 /* Gather optimization info. */
840 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSet);
841 TMTIMERSTATE enmOrgState = pTimer->enmState;
842 switch (enmOrgState)
843 {
844 case TMTIMERSTATE_STOPPED: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStStopped); break;
845 case TMTIMERSTATE_EXPIRED_DELIVER: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStExpDeliver); break;
846 case TMTIMERSTATE_ACTIVE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStActive); break;
847 case TMTIMERSTATE_PENDING_STOP: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendStop); break;
848 case TMTIMERSTATE_PENDING_STOP_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendStopSched); break;
849 case TMTIMERSTATE_PENDING_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendSched); break;
850 case TMTIMERSTATE_PENDING_RESCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendResched); break;
851 default: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStOther); break;
852 }
853#endif
854
855 /*
856 * The most common case is setting the timer again during the callback.
857 * The second most common case is starting a timer at some other time.
858 */
859#if 1
860 TMTIMERSTATE enmState1 = pTimer->enmState;
861 if ( enmState1 == TMTIMERSTATE_EXPIRED_DELIVER
862 || ( enmState1 == TMTIMERSTATE_STOPPED
863 && pTimer->pCritSect))
864 {
865 /* Try take the TM lock and check the state again. */
866 if (RT_SUCCESS_NP(tmTimerTryLock(pVM)))
867 {
868 if (RT_LIKELY(tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, enmState1)))
869 {
870 tmTimerSetOptimizedStart(pVM, pTimer, u64Expire);
871 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
872 return VINF_SUCCESS;
873 }
874 tmTimerUnlock(pVM);
875 }
876 }
877#endif
878
879 /*
880 * Unoptimized code path.
881 */
882 int cRetries = 1000;
883 do
884 {
885 /*
886 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
887 */
888 TMTIMERSTATE enmState = pTimer->enmState;
889 Log2(("TMTimerSet: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d u64Expire=%'RU64\n",
890 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), cRetries, u64Expire));
891 switch (enmState)
892 {
893 case TMTIMERSTATE_EXPIRED_DELIVER:
894 case TMTIMERSTATE_STOPPED:
895 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
896 {
897 Assert(!pTimer->offPrev);
898 Assert(!pTimer->offNext);
899 AssertMsg( pTimer->enmClock != TMCLOCK_VIRTUAL_SYNC
900 || pVM->tm.s.fVirtualSyncTicking
901 || u64Expire >= pVM->tm.s.u64VirtualSync,
902 ("%'RU64 < %'RU64 %s\n", u64Expire, pVM->tm.s.u64VirtualSync, R3STRING(pTimer->pszDesc)));
903 pTimer->u64Expire = u64Expire;
904 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
905 tmSchedule(pTimer);
906 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
907 return VINF_SUCCESS;
908 }
909 break;
910
911 case TMTIMERSTATE_PENDING_SCHEDULE:
912 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
913 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
914 {
915 pTimer->u64Expire = u64Expire;
916 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
917 tmSchedule(pTimer);
918 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
919 return VINF_SUCCESS;
920 }
921 break;
922
923
924 case TMTIMERSTATE_ACTIVE:
925 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
926 {
927 pTimer->u64Expire = u64Expire;
928 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
929 tmSchedule(pTimer);
930 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
931 return VINF_SUCCESS;
932 }
933 break;
934
935 case TMTIMERSTATE_PENDING_RESCHEDULE:
936 case TMTIMERSTATE_PENDING_STOP:
937 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
938 {
939 pTimer->u64Expire = u64Expire;
940 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
941 tmSchedule(pTimer);
942 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
943 return VINF_SUCCESS;
944 }
945 break;
946
947
948 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
949 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
950 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
951#ifdef IN_RING3
952 if (!RTThreadYield())
953 RTThreadSleep(1);
954#else
955/** @todo call host context and yield after a couple of iterations */
956#endif
957 break;
958
959 /*
960 * Invalid states.
961 */
962 case TMTIMERSTATE_DESTROY:
963 case TMTIMERSTATE_FREE:
964 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
965 return VERR_TM_INVALID_STATE;
966 default:
967 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
968 return VERR_TM_UNKNOWN_STATE;
969 }
970 } while (cRetries-- > 0);
971
972 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
973 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
974 return VERR_INTERNAL_ERROR;
975}
976
977
978/**
979 * Return the current time for the specified clock, setting pu64Now if not NULL.
980 *
981 * @returns Current time.
982 * @param pVM The VM handle.
983 * @param enmClock The clock to query.
984 * @param pu64Now Optional pointer where to store the return time
985 */
986DECL_FORCE_INLINE(uint64_t) tmTimerSetRelativeNowWorker(PVM pVM, TMCLOCK enmClock, uint64_t *pu64Now)
987{
988 uint64_t u64Now;
989 switch (enmClock)
990 {
991 case TMCLOCK_VIRTUAL_SYNC:
992 u64Now = TMVirtualSyncGet(pVM);
993 break;
994 case TMCLOCK_VIRTUAL:
995 u64Now = TMVirtualGet(pVM);
996 break;
997 case TMCLOCK_REAL:
998 u64Now = TMRealGet(pVM);
999 break;
1000 default:
1001 AssertFatalMsgFailed(("%d\n", enmClock));
1002 }
1003
1004 if (pu64Now)
1005 *pu64Now = u64Now;
1006 return u64Now;
1007}
1008
1009
1010/**
1011 * Optimized TMTimerSetRelative code path.
1012 *
1013 * @returns VBox status code.
1014 *
1015 * @param pVM The VM handle.
1016 * @param pTimer The timer handle.
1017 * @param cTicksToNext Clock ticks until the next time expiration.
1018 * @param pu64Now Where to return the current time stamp used.
1019 * Optional.
1020 */
1021static int tmTimerSetRelativeOptimizedStart(PVM pVM, PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1022{
1023 Assert(!pTimer->offPrev);
1024 Assert(!pTimer->offNext);
1025 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE);
1026
1027 /*
1028 * Calculate and set the expiration time.
1029 */
1030 TMCLOCK const enmClock = pTimer->enmClock;
1031 uint64_t const u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1032 pTimer->u64Expire = u64Expire;
1033 Log2(("tmTimerSetRelativeOptimizedStart: %p:{.pszDesc='%s', .u64Expire=%'RU64} cTicksToNext=%'RU64\n", pTimer, R3STRING(pTimer->pszDesc), u64Expire, cTicksToNext));
1034
1035 /*
1036 * Link the timer into the active list.
1037 */
1038 tmTimerActiveLink(&pVM->tm.s.CTX_SUFF(paTimerQueues)[enmClock], pTimer, u64Expire);
1039
1040 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeOpt);
1041 tmTimerUnlock(pVM);
1042 return VINF_SUCCESS;
1043}
1044
1045
1046/**
1047 * Arm a timer with a expire time relative to the current time.
1048 *
1049 * @returns VBox status.
1050 * @param pTimer Timer handle as returned by one of the create functions.
1051 * @param cTicksToNext Clock ticks until the next time expiration.
1052 * @param pu64Now Where to return the current time stamp used.
1053 * Optional.
1054 */
1055VMMDECL(int) TMTimerSetRelative(PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1056{
1057 STAM_PROFILE_START(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1058 TMTIMER_ASSERT_CRITSECT(pTimer);
1059 PVM pVM = pTimer->CTX_SUFF(pVM);
1060 int rc;
1061
1062#ifdef VBOX_WITH_STATISTICS
1063 /* Gather optimization info. */
1064 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelative);
1065 TMTIMERSTATE enmOrgState = pTimer->enmState;
1066 switch (enmOrgState)
1067 {
1068 case TMTIMERSTATE_STOPPED: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStStopped); break;
1069 case TMTIMERSTATE_EXPIRED_DELIVER: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStExpDeliver); break;
1070 case TMTIMERSTATE_ACTIVE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStActive); break;
1071 case TMTIMERSTATE_PENDING_STOP: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendStop); break;
1072 case TMTIMERSTATE_PENDING_STOP_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendStopSched); break;
1073 case TMTIMERSTATE_PENDING_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendSched); break;
1074 case TMTIMERSTATE_PENDING_RESCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendResched); break;
1075 default: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStOther); break;
1076 }
1077#endif
1078
1079 /*
1080 * Try to take the TM lock and optimize the common cases.
1081 *
1082 * With the TM lock we can safely make optimizations like immediate
1083 * scheduling and we can also be 100% sure that we're not racing the
1084 * running of the timer queues. As an additional restraint we require the
1085 * timer to have a critical section associated with to be 100% there aren't
1086 * concurrent operations on the timer. (This latter isn't necessary any
1087 * longer as this isn't supported for any timers, critsect or not.)
1088 *
1089 * Note! Lock ordering doesn't apply when we only tries to
1090 * get the innermost locks.
1091 */
1092 bool fOwnTMLock = RT_SUCCESS_NP(tmTimerTryLock(pVM));
1093#if 1
1094 if ( fOwnTMLock
1095 && pTimer->pCritSect)
1096 {
1097 TMTIMERSTATE enmState = pTimer->enmState;
1098 if (RT_LIKELY( ( enmState == TMTIMERSTATE_EXPIRED_DELIVER
1099 || enmState == TMTIMERSTATE_STOPPED)
1100 && tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, enmState)))
1101 {
1102 tmTimerSetRelativeOptimizedStart(pVM, pTimer, cTicksToNext, pu64Now);
1103 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1104 return VINF_SUCCESS;
1105 }
1106
1107 /* Optimize other states when it becomes necessary. */
1108 }
1109#endif
1110
1111 /*
1112 * Unoptimized path.
1113 */
1114 TMCLOCK const enmClock = pTimer->enmClock;
1115 bool fOwnVirtSyncLock;
1116 fOwnVirtSyncLock = !fOwnTMLock
1117 && enmClock == TMCLOCK_VIRTUAL_SYNC
1118 && RT_SUCCESS(tmVirtualSyncTryLock(pVM));
1119 for (int cRetries = 1000; ; cRetries--)
1120 {
1121 /*
1122 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1123 */
1124 TMTIMERSTATE enmState = pTimer->enmState;
1125 switch (enmState)
1126 {
1127 case TMTIMERSTATE_STOPPED:
1128 if (enmClock == TMCLOCK_VIRTUAL_SYNC)
1129 {
1130 /** @todo To fix assertion in tmR3TimerQueueRunVirtualSync:
1131 * Figure a safe way of activating this timer while the queue is
1132 * being run.
1133 * (99.9% sure this that the assertion is caused by DevAPIC.cpp
1134 * re-starting the timer in respons to a initial_count write.) */
1135 }
1136 /* fall thru */
1137 case TMTIMERSTATE_EXPIRED_DELIVER:
1138 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1139 {
1140 Assert(!pTimer->offPrev);
1141 Assert(!pTimer->offNext);
1142 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1143 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [EXP/STOP]\n",
1144 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1145 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1146 tmSchedule(pTimer);
1147 rc = VINF_SUCCESS;
1148 break;
1149 }
1150 rc = VERR_TRY_AGAIN;
1151 break;
1152
1153 case TMTIMERSTATE_PENDING_SCHEDULE:
1154 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1155 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1156 {
1157 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1158 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [PEND_SCHED]\n",
1159 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1160 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1161 tmSchedule(pTimer);
1162 rc = VINF_SUCCESS;
1163 break;
1164 }
1165 rc = VERR_TRY_AGAIN;
1166 break;
1167
1168
1169 case TMTIMERSTATE_ACTIVE:
1170 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1171 {
1172 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1173 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [ACTIVE]\n",
1174 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1175 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1176 tmSchedule(pTimer);
1177 rc = VINF_SUCCESS;
1178 break;
1179 }
1180 rc = VERR_TRY_AGAIN;
1181 break;
1182
1183 case TMTIMERSTATE_PENDING_RESCHEDULE:
1184 case TMTIMERSTATE_PENDING_STOP:
1185 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1186 {
1187 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1188 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [PEND_RESCH/STOP]\n",
1189 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1190 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1191 tmSchedule(pTimer);
1192 rc = VINF_SUCCESS;
1193 break;
1194 }
1195 rc = VERR_TRY_AGAIN;
1196 break;
1197
1198
1199 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1200 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1201 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1202#ifdef IN_RING3
1203 if (!RTThreadYield())
1204 RTThreadSleep(1);
1205#else
1206/** @todo call host context and yield after a couple of iterations */
1207#endif
1208 rc = VERR_TRY_AGAIN;
1209 break;
1210
1211 /*
1212 * Invalid states.
1213 */
1214 case TMTIMERSTATE_DESTROY:
1215 case TMTIMERSTATE_FREE:
1216 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1217 rc = VERR_TM_INVALID_STATE;
1218 break;
1219
1220 default:
1221 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1222 rc = VERR_TM_UNKNOWN_STATE;
1223 break;
1224 }
1225
1226 /* switch + loop is tedious to break out of. */
1227 if (rc == VINF_SUCCESS)
1228 break;
1229
1230 if (rc != VERR_TRY_AGAIN)
1231 {
1232 tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1233 break;
1234 }
1235 if (cRetries <= 0)
1236 {
1237 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1238 rc = VERR_INTERNAL_ERROR;
1239 tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1240 break;
1241 }
1242
1243 /*
1244 * Retry to gain locks.
1245 */
1246 if (!fOwnTMLock)
1247 {
1248 fOwnTMLock = RT_SUCCESS_NP(tmTimerTryLock(pVM));
1249 if ( !fOwnTMLock
1250 && enmClock == TMCLOCK_VIRTUAL_SYNC
1251 && !fOwnVirtSyncLock)
1252 fOwnVirtSyncLock = RT_SUCCESS_NP(tmVirtualSyncTryLock(pVM));
1253 }
1254
1255 } /* for (;;) */
1256
1257 /*
1258 * Clean up and return.
1259 */
1260 if (fOwnVirtSyncLock)
1261 tmVirtualSyncUnlock(pVM);
1262 if (fOwnTMLock)
1263 tmTimerUnlock(pVM);
1264
1265 if ( !fOwnTMLock
1266 && !fOwnVirtSyncLock
1267 && enmClock == TMCLOCK_VIRTUAL_SYNC)
1268 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeRacyVirtSync);
1269
1270 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1271 return rc;
1272}
1273
1274
1275/**
1276 * Arm a timer with a (new) expire time relative to current time.
1277 *
1278 * @returns VBox status.
1279 * @param pTimer Timer handle as returned by one of the create functions.
1280 * @param cMilliesToNext Number of millieseconds to the next tick.
1281 */
1282VMMDECL(int) TMTimerSetMillies(PTMTIMER pTimer, uint32_t cMilliesToNext)
1283{
1284 PVM pVM = pTimer->CTX_SUFF(pVM);
1285 PVMCPU pVCpu = &pVM->aCpus[0]; /* just take the first VCPU */
1286
1287 switch (pTimer->enmClock)
1288 {
1289 case TMCLOCK_VIRTUAL:
1290 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1291 return TMTimerSetRelative(pTimer, cMilliesToNext * UINT64_C(1000000), NULL);
1292
1293 case TMCLOCK_VIRTUAL_SYNC:
1294 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1295 return TMTimerSetRelative(pTimer, cMilliesToNext * UINT64_C(1000000), NULL);
1296
1297 case TMCLOCK_REAL:
1298 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1299 return TMTimerSetRelative(pTimer, cMilliesToNext, NULL);
1300
1301 default:
1302 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1303 return VERR_INTERNAL_ERROR;
1304 }
1305}
1306
1307
1308/**
1309 * Arm a timer with a (new) expire time relative to current time.
1310 *
1311 * @returns VBox status.
1312 * @param pTimer Timer handle as returned by one of the create functions.
1313 * @param cMicrosToNext Number of microseconds to the next tick.
1314 */
1315VMMDECL(int) TMTimerSetMicro(PTMTIMER pTimer, uint64_t cMicrosToNext)
1316{
1317 PVM pVM = pTimer->CTX_SUFF(pVM);
1318 PVMCPU pVCpu = &pVM->aCpus[0]; /* just take the first VCPU */
1319
1320 switch (pTimer->enmClock)
1321 {
1322 case TMCLOCK_VIRTUAL:
1323 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1324 return TMTimerSetRelative(pTimer, cMicrosToNext * 1000, NULL);
1325
1326 case TMCLOCK_VIRTUAL_SYNC:
1327 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1328 return TMTimerSetRelative(pTimer, cMicrosToNext * 1000, NULL);
1329
1330 case TMCLOCK_REAL:
1331 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1332 return TMTimerSetRelative(pTimer, cMicrosToNext / 1000, NULL);
1333
1334 default:
1335 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1336 return VERR_INTERNAL_ERROR;
1337 }
1338}
1339
1340
1341/**
1342 * Arm a timer with a (new) expire time relative to current time.
1343 *
1344 * @returns VBox status.
1345 * @param pTimer Timer handle as returned by one of the create functions.
1346 * @param cNanosToNext Number of nanoseconds to the next tick.
1347 */
1348VMMDECL(int) TMTimerSetNano(PTMTIMER pTimer, uint64_t cNanosToNext)
1349{
1350 PVM pVM = pTimer->CTX_SUFF(pVM);
1351 PVMCPU pVCpu = &pVM->aCpus[0]; /* just take the first VCPU */
1352
1353 switch (pTimer->enmClock)
1354 {
1355 case TMCLOCK_VIRTUAL:
1356 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1357 return TMTimerSetRelative(pTimer, cNanosToNext, NULL);
1358
1359 case TMCLOCK_VIRTUAL_SYNC:
1360 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1361 return TMTimerSetRelative(pTimer, cNanosToNext, NULL);
1362
1363 case TMCLOCK_REAL:
1364 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1365 return TMTimerSetRelative(pTimer, cNanosToNext / 1000000, NULL);
1366
1367 default:
1368 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1369 return VERR_INTERNAL_ERROR;
1370 }
1371}
1372
1373
1374/**
1375 * Stop the timer.
1376 * Use TMR3TimerArm() to "un-stop" the timer.
1377 *
1378 * @returns VBox status.
1379 * @param pTimer Timer handle as returned by one of the create functions.
1380 */
1381VMMDECL(int) TMTimerStop(PTMTIMER pTimer)
1382{
1383 STAM_PROFILE_START(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1384 TMTIMER_ASSERT_CRITSECT(pTimer);
1385
1386 /** @todo see if this function needs optimizing. */
1387 int cRetries = 1000;
1388 do
1389 {
1390 /*
1391 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1392 */
1393 TMTIMERSTATE enmState = pTimer->enmState;
1394 Log2(("TMTimerStop: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d\n",
1395 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), cRetries));
1396 switch (enmState)
1397 {
1398 case TMTIMERSTATE_EXPIRED_DELIVER:
1399 //AssertMsgFailed(("You don't stop an expired timer dude!\n"));
1400 return VERR_INVALID_PARAMETER;
1401
1402 case TMTIMERSTATE_STOPPED:
1403 case TMTIMERSTATE_PENDING_STOP:
1404 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1405 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1406 return VINF_SUCCESS;
1407
1408 case TMTIMERSTATE_PENDING_SCHEDULE:
1409 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, enmState))
1410 {
1411 tmSchedule(pTimer);
1412 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1413 return VINF_SUCCESS;
1414 }
1415
1416 case TMTIMERSTATE_PENDING_RESCHEDULE:
1417 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
1418 {
1419 tmSchedule(pTimer);
1420 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1421 return VINF_SUCCESS;
1422 }
1423 break;
1424
1425 case TMTIMERSTATE_ACTIVE:
1426 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
1427 {
1428 tmSchedule(pTimer);
1429 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1430 return VINF_SUCCESS;
1431 }
1432 break;
1433
1434 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1435 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1436 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1437#ifdef IN_RING3
1438 if (!RTThreadYield())
1439 RTThreadSleep(1);
1440#else
1441/**@todo call host and yield cpu after a while. */
1442#endif
1443 break;
1444
1445 /*
1446 * Invalid states.
1447 */
1448 case TMTIMERSTATE_DESTROY:
1449 case TMTIMERSTATE_FREE:
1450 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1451 return VERR_TM_INVALID_STATE;
1452 default:
1453 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1454 return VERR_TM_UNKNOWN_STATE;
1455 }
1456 } while (cRetries-- > 0);
1457
1458 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1459 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1460 return VERR_INTERNAL_ERROR;
1461}
1462
1463
1464/**
1465 * Get the current clock time.
1466 * Handy for calculating the new expire time.
1467 *
1468 * @returns Current clock time.
1469 * @param pTimer Timer handle as returned by one of the create functions.
1470 */
1471VMMDECL(uint64_t) TMTimerGet(PTMTIMER pTimer)
1472{
1473 uint64_t u64;
1474 PVM pVM = pTimer->CTX_SUFF(pVM);
1475
1476 switch (pTimer->enmClock)
1477 {
1478 case TMCLOCK_VIRTUAL:
1479 u64 = TMVirtualGet(pVM);
1480 break;
1481 case TMCLOCK_VIRTUAL_SYNC:
1482 u64 = TMVirtualSyncGet(pVM);
1483 break;
1484 case TMCLOCK_REAL:
1485 u64 = TMRealGet(pVM);
1486 break;
1487 default:
1488 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1489 return ~(uint64_t)0;
1490 }
1491 //Log2(("TMTimerGet: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1492 // u64, pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1493 return u64;
1494}
1495
1496
1497/**
1498 * Get the freqency of the timer clock.
1499 *
1500 * @returns Clock frequency (as Hz of course).
1501 * @param pTimer Timer handle as returned by one of the create functions.
1502 */
1503VMMDECL(uint64_t) TMTimerGetFreq(PTMTIMER pTimer)
1504{
1505 switch (pTimer->enmClock)
1506 {
1507 case TMCLOCK_VIRTUAL:
1508 case TMCLOCK_VIRTUAL_SYNC:
1509 return TMCLOCK_FREQ_VIRTUAL;
1510
1511 case TMCLOCK_REAL:
1512 return TMCLOCK_FREQ_REAL;
1513
1514 default:
1515 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1516 return 0;
1517 }
1518}
1519
1520
1521/**
1522 * Get the current clock time as nanoseconds.
1523 *
1524 * @returns The timer clock as nanoseconds.
1525 * @param pTimer Timer handle as returned by one of the create functions.
1526 */
1527VMMDECL(uint64_t) TMTimerGetNano(PTMTIMER pTimer)
1528{
1529 return TMTimerToNano(pTimer, TMTimerGet(pTimer));
1530}
1531
1532
1533/**
1534 * Get the current clock time as microseconds.
1535 *
1536 * @returns The timer clock as microseconds.
1537 * @param pTimer Timer handle as returned by one of the create functions.
1538 */
1539VMMDECL(uint64_t) TMTimerGetMicro(PTMTIMER pTimer)
1540{
1541 return TMTimerToMicro(pTimer, TMTimerGet(pTimer));
1542}
1543
1544
1545/**
1546 * Get the current clock time as milliseconds.
1547 *
1548 * @returns The timer clock as milliseconds.
1549 * @param pTimer Timer handle as returned by one of the create functions.
1550 */
1551VMMDECL(uint64_t) TMTimerGetMilli(PTMTIMER pTimer)
1552{
1553 return TMTimerToMilli(pTimer, TMTimerGet(pTimer));
1554}
1555
1556
1557/**
1558 * Converts the specified timer clock time to nanoseconds.
1559 *
1560 * @returns nanoseconds.
1561 * @param pTimer Timer handle as returned by one of the create functions.
1562 * @param u64Ticks The clock ticks.
1563 * @remark There could be rounding errors here. We just do a simple integere divide
1564 * without any adjustments.
1565 */
1566VMMDECL(uint64_t) TMTimerToNano(PTMTIMER pTimer, uint64_t u64Ticks)
1567{
1568 switch (pTimer->enmClock)
1569 {
1570 case TMCLOCK_VIRTUAL:
1571 case TMCLOCK_VIRTUAL_SYNC:
1572 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1573 return u64Ticks;
1574
1575 case TMCLOCK_REAL:
1576 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1577 return u64Ticks * 1000000;
1578
1579 default:
1580 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1581 return 0;
1582 }
1583}
1584
1585
1586/**
1587 * Converts the specified timer clock time to microseconds.
1588 *
1589 * @returns microseconds.
1590 * @param pTimer Timer handle as returned by one of the create functions.
1591 * @param u64Ticks The clock ticks.
1592 * @remark There could be rounding errors here. We just do a simple integere divide
1593 * without any adjustments.
1594 */
1595VMMDECL(uint64_t) TMTimerToMicro(PTMTIMER pTimer, uint64_t u64Ticks)
1596{
1597 switch (pTimer->enmClock)
1598 {
1599 case TMCLOCK_VIRTUAL:
1600 case TMCLOCK_VIRTUAL_SYNC:
1601 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1602 return u64Ticks / 1000;
1603
1604 case TMCLOCK_REAL:
1605 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1606 return u64Ticks * 1000;
1607
1608 default:
1609 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1610 return 0;
1611 }
1612}
1613
1614
1615/**
1616 * Converts the specified timer clock time to milliseconds.
1617 *
1618 * @returns milliseconds.
1619 * @param pTimer Timer handle as returned by one of the create functions.
1620 * @param u64Ticks The clock ticks.
1621 * @remark There could be rounding errors here. We just do a simple integere divide
1622 * without any adjustments.
1623 */
1624VMMDECL(uint64_t) TMTimerToMilli(PTMTIMER pTimer, uint64_t u64Ticks)
1625{
1626 switch (pTimer->enmClock)
1627 {
1628 case TMCLOCK_VIRTUAL:
1629 case TMCLOCK_VIRTUAL_SYNC:
1630 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1631 return u64Ticks / 1000000;
1632
1633 case TMCLOCK_REAL:
1634 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1635 return u64Ticks;
1636
1637 default:
1638 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1639 return 0;
1640 }
1641}
1642
1643
1644/**
1645 * Converts the specified nanosecond timestamp to timer clock ticks.
1646 *
1647 * @returns timer clock ticks.
1648 * @param pTimer Timer handle as returned by one of the create functions.
1649 * @param u64NanoTS The nanosecond value ticks to convert.
1650 * @remark There could be rounding and overflow errors here.
1651 */
1652VMMDECL(uint64_t) TMTimerFromNano(PTMTIMER pTimer, uint64_t u64NanoTS)
1653{
1654 switch (pTimer->enmClock)
1655 {
1656 case TMCLOCK_VIRTUAL:
1657 case TMCLOCK_VIRTUAL_SYNC:
1658 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1659 return u64NanoTS;
1660
1661 case TMCLOCK_REAL:
1662 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1663 return u64NanoTS / 1000000;
1664
1665 default:
1666 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1667 return 0;
1668 }
1669}
1670
1671
1672/**
1673 * Converts the specified microsecond timestamp to timer clock ticks.
1674 *
1675 * @returns timer clock ticks.
1676 * @param pTimer Timer handle as returned by one of the create functions.
1677 * @param u64MicroTS The microsecond value ticks to convert.
1678 * @remark There could be rounding and overflow errors here.
1679 */
1680VMMDECL(uint64_t) TMTimerFromMicro(PTMTIMER pTimer, uint64_t u64MicroTS)
1681{
1682 switch (pTimer->enmClock)
1683 {
1684 case TMCLOCK_VIRTUAL:
1685 case TMCLOCK_VIRTUAL_SYNC:
1686 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1687 return u64MicroTS * 1000;
1688
1689 case TMCLOCK_REAL:
1690 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1691 return u64MicroTS / 1000;
1692
1693 default:
1694 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1695 return 0;
1696 }
1697}
1698
1699
1700/**
1701 * Converts the specified millisecond timestamp to timer clock ticks.
1702 *
1703 * @returns timer clock ticks.
1704 * @param pTimer Timer handle as returned by one of the create functions.
1705 * @param u64MilliTS The millisecond value ticks to convert.
1706 * @remark There could be rounding and overflow errors here.
1707 */
1708VMMDECL(uint64_t) TMTimerFromMilli(PTMTIMER pTimer, uint64_t u64MilliTS)
1709{
1710 switch (pTimer->enmClock)
1711 {
1712 case TMCLOCK_VIRTUAL:
1713 case TMCLOCK_VIRTUAL_SYNC:
1714 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1715 return u64MilliTS * 1000000;
1716
1717 case TMCLOCK_REAL:
1718 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1719 return u64MilliTS;
1720
1721 default:
1722 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1723 return 0;
1724 }
1725}
1726
1727
1728/**
1729 * Get the expire time of the timer.
1730 * Only valid for active timers.
1731 *
1732 * @returns Expire time of the timer.
1733 * @param pTimer Timer handle as returned by one of the create functions.
1734 */
1735VMMDECL(uint64_t) TMTimerGetExpire(PTMTIMER pTimer)
1736{
1737 TMTIMER_ASSERT_CRITSECT(pTimer);
1738 int cRetries = 1000;
1739 do
1740 {
1741 TMTIMERSTATE enmState = pTimer->enmState;
1742 switch (enmState)
1743 {
1744 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1745 case TMTIMERSTATE_EXPIRED_DELIVER:
1746 case TMTIMERSTATE_STOPPED:
1747 case TMTIMERSTATE_PENDING_STOP:
1748 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1749 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1750 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1751 return ~(uint64_t)0;
1752
1753 case TMTIMERSTATE_ACTIVE:
1754 case TMTIMERSTATE_PENDING_RESCHEDULE:
1755 case TMTIMERSTATE_PENDING_SCHEDULE:
1756 Log2(("TMTimerGetExpire: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1757 pTimer->u64Expire, pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1758 return pTimer->u64Expire;
1759
1760 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1761 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1762#ifdef IN_RING3
1763 if (!RTThreadYield())
1764 RTThreadSleep(1);
1765#endif
1766 break;
1767
1768 /*
1769 * Invalid states.
1770 */
1771 case TMTIMERSTATE_DESTROY:
1772 case TMTIMERSTATE_FREE:
1773 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1774 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1775 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1776 return ~(uint64_t)0;
1777 default:
1778 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1779 return ~(uint64_t)0;
1780 }
1781 } while (cRetries-- > 0);
1782
1783 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1784 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1785 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1786 return ~(uint64_t)0;
1787}
1788
1789
1790/**
1791 * Checks if a timer is active or not.
1792 *
1793 * @returns True if active.
1794 * @returns False if not active.
1795 * @param pTimer Timer handle as returned by one of the create functions.
1796 */
1797VMMDECL(bool) TMTimerIsActive(PTMTIMER pTimer)
1798{
1799 TMTIMERSTATE enmState = pTimer->enmState;
1800 switch (enmState)
1801 {
1802 case TMTIMERSTATE_STOPPED:
1803 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1804 case TMTIMERSTATE_EXPIRED_DELIVER:
1805 case TMTIMERSTATE_PENDING_STOP:
1806 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1807 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1808 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1809 return false;
1810
1811 case TMTIMERSTATE_ACTIVE:
1812 case TMTIMERSTATE_PENDING_RESCHEDULE:
1813 case TMTIMERSTATE_PENDING_SCHEDULE:
1814 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1815 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1816 Log2(("TMTimerIsActive: returns true (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1817 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1818 return true;
1819
1820 /*
1821 * Invalid states.
1822 */
1823 case TMTIMERSTATE_DESTROY:
1824 case TMTIMERSTATE_FREE:
1825 AssertMsgFailed(("Invalid timer state %s (%s)\n", tmTimerState(enmState), R3STRING(pTimer->pszDesc)));
1826 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1827 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1828 return false;
1829 default:
1830 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1831 return false;
1832 }
1833}
1834
1835
1836/**
1837 * Convert state to string.
1838 *
1839 * @returns Readonly status name.
1840 * @param enmState State.
1841 */
1842const char *tmTimerState(TMTIMERSTATE enmState)
1843{
1844 switch (enmState)
1845 {
1846#define CASE(num, state) \
1847 case TMTIMERSTATE_##state: \
1848 AssertCompile(TMTIMERSTATE_##state == (num)); \
1849 return #num "-" #state
1850 CASE( 1,STOPPED);
1851 CASE( 2,ACTIVE);
1852 CASE( 3,EXPIRED_GET_UNLINK);
1853 CASE( 4,EXPIRED_DELIVER);
1854 CASE( 5,PENDING_STOP);
1855 CASE( 6,PENDING_STOP_SCHEDULE);
1856 CASE( 7,PENDING_SCHEDULE_SET_EXPIRE);
1857 CASE( 8,PENDING_SCHEDULE);
1858 CASE( 9,PENDING_RESCHEDULE_SET_EXPIRE);
1859 CASE(10,PENDING_RESCHEDULE);
1860 CASE(11,DESTROY);
1861 CASE(12,FREE);
1862 default:
1863 AssertMsgFailed(("Invalid state enmState=%d\n", enmState));
1864 return "Invalid state!";
1865#undef CASE
1866 }
1867}
1868
1869
1870/**
1871 * Schedules the given timer on the given queue.
1872 *
1873 * @param pQueue The timer queue.
1874 * @param pTimer The timer that needs scheduling.
1875 *
1876 * @remarks Called while owning the lock.
1877 */
1878DECLINLINE(void) tmTimerQueueScheduleOne(PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
1879{
1880 /*
1881 * Processing.
1882 */
1883 unsigned cRetries = 2;
1884 do
1885 {
1886 TMTIMERSTATE enmState = pTimer->enmState;
1887 switch (enmState)
1888 {
1889 /*
1890 * Reschedule timer (in the active list).
1891 */
1892 case TMTIMERSTATE_PENDING_RESCHEDULE:
1893 {
1894 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE, TMTIMERSTATE_PENDING_RESCHEDULE)))
1895 break; /* retry */
1896
1897 const PTMTIMER pPrev = TMTIMER_GET_PREV(pTimer);
1898 const PTMTIMER pNext = TMTIMER_GET_NEXT(pTimer);
1899 if (pPrev)
1900 TMTIMER_SET_NEXT(pPrev, pNext);
1901 else
1902 {
1903 TMTIMER_SET_HEAD(pQueue, pNext);
1904 pQueue->u64Expire = pNext ? pNext->u64Expire : INT64_MAX;
1905 }
1906 if (pNext)
1907 TMTIMER_SET_PREV(pNext, pPrev);
1908 pTimer->offNext = 0;
1909 pTimer->offPrev = 0;
1910 /* fall thru */
1911 }
1912
1913 /*
1914 * Schedule timer (insert into the active list).
1915 */
1916 case TMTIMERSTATE_PENDING_SCHEDULE:
1917 {
1918 Assert(!pTimer->offNext); Assert(!pTimer->offPrev);
1919 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, TMTIMERSTATE_PENDING_SCHEDULE)))
1920 break; /* retry */
1921
1922 PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue);
1923 if (pCur)
1924 {
1925 const uint64_t u64Expire = pTimer->u64Expire;
1926 for (;; pCur = TMTIMER_GET_NEXT(pCur))
1927 {
1928 if (pCur->u64Expire > u64Expire)
1929 {
1930 const PTMTIMER pPrev = TMTIMER_GET_PREV(pCur);
1931 TMTIMER_SET_NEXT(pTimer, pCur);
1932 TMTIMER_SET_PREV(pTimer, pPrev);
1933 if (pPrev)
1934 TMTIMER_SET_NEXT(pPrev, pTimer);
1935 else
1936 {
1937 TMTIMER_SET_HEAD(pQueue, pTimer);
1938 pQueue->u64Expire = u64Expire;
1939 }
1940 TMTIMER_SET_PREV(pCur, pTimer);
1941 return;
1942 }
1943 if (!pCur->offNext)
1944 {
1945 TMTIMER_SET_NEXT(pCur, pTimer);
1946 TMTIMER_SET_PREV(pTimer, pCur);
1947 return;
1948 }
1949 }
1950 }
1951 else
1952 {
1953 TMTIMER_SET_HEAD(pQueue, pTimer);
1954 pQueue->u64Expire = pTimer->u64Expire;
1955 }
1956 return;
1957 }
1958
1959 /*
1960 * Stop the timer in active list.
1961 */
1962 case TMTIMERSTATE_PENDING_STOP:
1963 {
1964 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, TMTIMERSTATE_PENDING_STOP)))
1965 break; /* retry */
1966
1967 const PTMTIMER pPrev = TMTIMER_GET_PREV(pTimer);
1968 const PTMTIMER pNext = TMTIMER_GET_NEXT(pTimer);
1969 if (pPrev)
1970 TMTIMER_SET_NEXT(pPrev, pNext);
1971 else
1972 {
1973 TMTIMER_SET_HEAD(pQueue, pNext);
1974 pQueue->u64Expire = pNext ? pNext->u64Expire : INT64_MAX;
1975 }
1976 if (pNext)
1977 TMTIMER_SET_PREV(pNext, pPrev);
1978 pTimer->offNext = 0;
1979 pTimer->offPrev = 0;
1980 /* fall thru */
1981 }
1982
1983 /*
1984 * Stop the timer (not on the active list).
1985 */
1986 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1987 Assert(!pTimer->offNext); Assert(!pTimer->offPrev);
1988 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_STOPPED, TMTIMERSTATE_PENDING_STOP_SCHEDULE)))
1989 break;
1990 return;
1991
1992 /*
1993 * The timer is pending destruction by TMR3TimerDestroy, our caller.
1994 * Nothing to do here.
1995 */
1996 case TMTIMERSTATE_DESTROY:
1997 break;
1998
1999 /*
2000 * Postpone these until they get into the right state.
2001 */
2002 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2003 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2004 tmTimerLink(pQueue, pTimer);
2005 STAM_COUNTER_INC(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatPostponed));
2006 return;
2007
2008 /*
2009 * None of these can be in the schedule.
2010 */
2011 case TMTIMERSTATE_FREE:
2012 case TMTIMERSTATE_STOPPED:
2013 case TMTIMERSTATE_ACTIVE:
2014 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2015 case TMTIMERSTATE_EXPIRED_DELIVER:
2016 default:
2017 AssertMsgFailed(("Timer (%p) in the scheduling list has an invalid state %s (%d)!",
2018 pTimer, tmTimerState(pTimer->enmState), pTimer->enmState));
2019 return;
2020 }
2021 } while (cRetries-- > 0);
2022}
2023
2024
2025/**
2026 * Schedules the specified timer queue.
2027 *
2028 * @param pVM The VM to run the timers for.
2029 * @param pQueue The queue to schedule.
2030 *
2031 * @remarks Called while owning the lock.
2032 */
2033void tmTimerQueueSchedule(PVM pVM, PTMTIMERQUEUE pQueue)
2034{
2035 TM_ASSERT_LOCK(pVM);
2036
2037 /*
2038 * Dequeue the scheduling list and iterate it.
2039 */
2040 int32_t offNext = ASMAtomicXchgS32(&pQueue->offSchedule, 0);
2041 Log2(("tmTimerQueueSchedule: pQueue=%p:{.enmClock=%d, offNext=%RI32, .u64Expired=%'RU64}\n", pQueue, pQueue->enmClock, offNext, pQueue->u64Expire));
2042 if (!offNext)
2043 return;
2044 PTMTIMER pNext = (PTMTIMER)((intptr_t)pQueue + offNext);
2045 while (pNext)
2046 {
2047 /*
2048 * Unlink the head timer and find the next one.
2049 */
2050 PTMTIMER pTimer = pNext;
2051 pNext = pNext->offScheduleNext ? (PTMTIMER)((intptr_t)pNext + pNext->offScheduleNext) : NULL;
2052 pTimer->offScheduleNext = 0;
2053
2054 /*
2055 * Do the scheduling.
2056 */
2057 Log2(("tmTimerQueueSchedule: %p:{.enmState=%s, .enmClock=%d, .enmType=%d, .pszDesc=%s}\n",
2058 pTimer, tmTimerState(pTimer->enmState), pTimer->enmClock, pTimer->enmType, R3STRING(pTimer->pszDesc)));
2059 tmTimerQueueScheduleOne(pQueue, pTimer);
2060 Log2(("tmTimerQueueSchedule: %p: new %s\n", pTimer, tmTimerState(pTimer->enmState)));
2061 } /* foreach timer in current schedule batch. */
2062 Log2(("tmTimerQueueSchedule: u64Expired=%'RU64\n", pQueue->u64Expire));
2063}
2064
2065
2066#ifdef VBOX_STRICT
2067/**
2068 * Checks that the timer queues are sane.
2069 *
2070 * @param pVM VM handle.
2071 *
2072 * @remarks Called while owning the lock.
2073 */
2074void tmTimerQueuesSanityChecks(PVM pVM, const char *pszWhere)
2075{
2076 TM_ASSERT_LOCK(pVM);
2077
2078 /*
2079 * Check the linking of the active lists.
2080 */
2081 for (int i = 0; i < TMCLOCK_MAX; i++)
2082 {
2083 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[i];
2084 Assert((int)pQueue->enmClock == i);
2085 PTMTIMER pPrev = NULL;
2086 for (PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue); pCur; pPrev = pCur, pCur = TMTIMER_GET_NEXT(pCur))
2087 {
2088 AssertMsg((int)pCur->enmClock == i, ("%s: %d != %d\n", pszWhere, pCur->enmClock, i));
2089 AssertMsg(TMTIMER_GET_PREV(pCur) == pPrev, ("%s: %p != %p\n", pszWhere, TMTIMER_GET_PREV(pCur), pPrev));
2090 TMTIMERSTATE enmState = pCur->enmState;
2091 switch (enmState)
2092 {
2093 case TMTIMERSTATE_ACTIVE:
2094 AssertMsg( !pCur->offScheduleNext
2095 || pCur->enmState != TMTIMERSTATE_ACTIVE,
2096 ("%s: %RI32\n", pszWhere, pCur->offScheduleNext));
2097 break;
2098 case TMTIMERSTATE_PENDING_STOP:
2099 case TMTIMERSTATE_PENDING_RESCHEDULE:
2100 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2101 break;
2102 default:
2103 AssertMsgFailed(("%s: Invalid state enmState=%d %s\n", pszWhere, enmState, tmTimerState(enmState)));
2104 break;
2105 }
2106 }
2107 }
2108
2109
2110# ifdef IN_RING3
2111 /*
2112 * Do the big list and check that active timers all are in the active lists.
2113 */
2114 PTMTIMERR3 pPrev = NULL;
2115 for (PTMTIMERR3 pCur = pVM->tm.s.pCreated; pCur; pPrev = pCur, pCur = pCur->pBigNext)
2116 {
2117 Assert(pCur->pBigPrev == pPrev);
2118 Assert((unsigned)pCur->enmClock < (unsigned)TMCLOCK_MAX);
2119
2120 TMTIMERSTATE enmState = pCur->enmState;
2121 switch (enmState)
2122 {
2123 case TMTIMERSTATE_ACTIVE:
2124 case TMTIMERSTATE_PENDING_STOP:
2125 case TMTIMERSTATE_PENDING_RESCHEDULE:
2126 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2127 {
2128 PTMTIMERR3 pCurAct = TMTIMER_GET_HEAD(&pVM->tm.s.CTX_SUFF(paTimerQueues)[pCur->enmClock]);
2129 Assert(pCur->offPrev || pCur == pCurAct);
2130 while (pCurAct && pCurAct != pCur)
2131 pCurAct = TMTIMER_GET_NEXT(pCurAct);
2132 Assert(pCurAct == pCur);
2133 break;
2134 }
2135
2136 case TMTIMERSTATE_PENDING_SCHEDULE:
2137 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2138 case TMTIMERSTATE_STOPPED:
2139 case TMTIMERSTATE_EXPIRED_DELIVER:
2140 {
2141 Assert(!pCur->offNext);
2142 Assert(!pCur->offPrev);
2143 for (PTMTIMERR3 pCurAct = TMTIMER_GET_HEAD(&pVM->tm.s.CTX_SUFF(paTimerQueues)[pCur->enmClock]);
2144 pCurAct;
2145 pCurAct = TMTIMER_GET_NEXT(pCurAct))
2146 {
2147 Assert(pCurAct != pCur);
2148 Assert(TMTIMER_GET_NEXT(pCurAct) != pCur);
2149 Assert(TMTIMER_GET_PREV(pCurAct) != pCur);
2150 }
2151 break;
2152 }
2153
2154 /* ignore */
2155 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2156 break;
2157
2158 /* shouldn't get here! */
2159 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2160 case TMTIMERSTATE_DESTROY:
2161 default:
2162 AssertMsgFailed(("Invalid state enmState=%d %s\n", enmState, tmTimerState(enmState)));
2163 break;
2164 }
2165 }
2166# endif /* IN_RING3 */
2167}
2168#endif /* !VBOX_STRICT */
2169
2170
2171/**
2172 * Gets the current warp drive percent.
2173 *
2174 * @returns The warp drive percent.
2175 * @param pVM The VM handle.
2176 */
2177VMMDECL(uint32_t) TMGetWarpDrive(PVM pVM)
2178{
2179 return pVM->tm.s.u32VirtualWarpDrivePercentage;
2180}
2181
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette