VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/TMAll.cpp@ 37419

Last change on this file since 37419 was 37414, checked in by vboxsync, 14 years ago

TM: Added TMTimerLock, TMTimerUnlock and TMTimerIsLockOwner for locking the virtual sync clock to avoid races.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 84.4 KB
Line 
1/* $Id: TMAll.cpp 37414 2011-06-10 15:53:59Z vboxsync $ */
2/** @file
3 * TM - Timeout Manager, all contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_TM
23#include <VBox/vmm/tm.h>
24#include <VBox/vmm/mm.h>
25#ifdef IN_RING3
26# include <VBox/vmm/rem.h>
27#endif
28#include "TMInternal.h"
29#include <VBox/vmm/vm.h>
30
31#include <VBox/param.h>
32#include <VBox/err.h>
33#include <VBox/log.h>
34#include <VBox/sup.h>
35#include <iprt/time.h>
36#include <iprt/assert.h>
37#include <iprt/asm.h>
38#include <iprt/asm-math.h>
39#ifdef IN_RING3
40# include <iprt/thread.h>
41#endif
42
43
44/*******************************************************************************
45* Defined Constants And Macros *
46*******************************************************************************/
47/** @def TMTIMER_ASSERT_CRITSECT
48 * Checks that the caller owns the critical section if one is associated with
49 * the timer. */
50#ifdef VBOX_STRICT
51# define TMTIMER_ASSERT_CRITSECT(pTimer) \
52 do { \
53 if ((pTimer)->pCritSect) \
54 { \
55 VMSTATE enmState; \
56 PPDMCRITSECT pCritSect = (PPDMCRITSECT)MMHyperR3ToCC((pTimer)->CTX_SUFF(pVM), (pTimer)->pCritSect); \
57 AssertMsg( pCritSect \
58 && ( PDMCritSectIsOwner(pCritSect) \
59 || (enmState = (pTimer)->CTX_SUFF(pVM)->enmVMState) == VMSTATE_CREATING \
60 || enmState == VMSTATE_RESETTING \
61 || enmState == VMSTATE_RESETTING_LS ),\
62 ("pTimer=%p (%s) pCritSect=%p (%s)\n", pTimer, R3STRING(pTimer->pszDesc), \
63 (pTimer)->pCritSect, R3STRING(PDMR3CritSectName((pTimer)->pCritSect)) )); \
64 } \
65 } while (0)
66#else
67# define TMTIMER_ASSERT_CRITSECT(pTimer) do { } while (0)
68#endif
69
70
71#ifndef tmTimerLock
72
73/**
74 * Try take the timer lock, wait in ring-3 return VERR_SEM_BUSY in R0/RC.
75 *
76 * @retval VINF_SUCCESS on success (always in ring-3).
77 * @retval VERR_SEM_BUSY in RC and R0 if the semaphore is busy.
78 *
79 * @param pVM The VM handle.
80 *
81 * @thread EMTs for the time being.
82 */
83int tmTimerLock(PVM pVM)
84{
85 VM_ASSERT_EMT(pVM);
86 int rc = PDMCritSectEnter(&pVM->tm.s.TimerCritSect, VERR_SEM_BUSY);
87 return rc;
88}
89
90
91/**
92 * Try take the timer lock, no waiting.
93 *
94 * @retval VINF_SUCCESS on success.
95 * @retval VERR_SEM_BUSY if busy.
96 *
97 * @param pVM The VM handle.
98 */
99int tmTimerTryLock(PVM pVM)
100{
101 int rc = PDMCritSectTryEnter(&pVM->tm.s.TimerCritSect);
102 return rc;
103}
104
105
106/**
107 * Release the EMT/TM lock.
108 *
109 * @param pVM The VM handle.
110 */
111void tmTimerUnlock(PVM pVM)
112{
113 PDMCritSectLeave(&pVM->tm.s.TimerCritSect);
114}
115
116
117/**
118 * Try take the VirtualSync lock, wait in ring-3 return VERR_SEM_BUSY in R0/RC.
119 *
120 * @retval VINF_SUCCESS on success (always in ring-3).
121 * @retval VERR_SEM_BUSY in RC and R0 if the semaphore is busy.
122 *
123 * @param pVM The VM handle.
124 */
125int tmVirtualSyncLock(PVM pVM)
126{
127 VM_ASSERT_EMT(pVM);
128 int rc = PDMCritSectEnter(&pVM->tm.s.VirtualSyncLock, VERR_SEM_BUSY);
129 return rc;
130}
131
132
133/**
134 * Try take the VirtualSync lock, no waiting.
135 *
136 * @retval VINF_SUCCESS on success.
137 * @retval VERR_SEM_BUSY if busy.
138 *
139 * @param pVM The VM handle.
140 */
141int tmVirtualSyncTryLock(PVM pVM)
142{
143 VM_ASSERT_EMT(pVM);
144 int rc = PDMCritSectTryEnter(&pVM->tm.s.VirtualSyncLock);
145 return rc;
146}
147
148
149/**
150 * Release the VirtualSync lock.
151 *
152 * @param pVM The VM handle.
153 */
154void tmVirtualSyncUnlock(PVM pVM)
155{
156 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
157}
158
159#endif /* ! macros */
160
161/**
162 * Notification that execution is about to start.
163 *
164 * This call must always be paired with a TMNotifyEndOfExecution call.
165 *
166 * The function may, depending on the configuration, resume the TSC and future
167 * clocks that only ticks when we're executing guest code.
168 *
169 * @param pVCpu The VMCPU to operate on.
170 */
171VMMDECL(void) TMNotifyStartOfExecution(PVMCPU pVCpu)
172{
173 PVM pVM = pVCpu->CTX_SUFF(pVM);
174
175#ifndef VBOX_WITHOUT_NS_ACCOUNTING
176 pVCpu->tm.s.u64NsTsStartExecuting = RTTimeNanoTS();
177#endif
178 if (pVM->tm.s.fTSCTiedToExecution)
179 tmCpuTickResume(pVM, pVCpu);
180}
181
182
183/**
184 * Notification that execution is about to start.
185 *
186 * This call must always be paired with a TMNotifyStartOfExecution call.
187 *
188 * The function may, depending on the configuration, suspend the TSC and future
189 * clocks that only ticks when we're executing guest code.
190 *
191 * @param pVCpu The VMCPU to operate on.
192 */
193VMMDECL(void) TMNotifyEndOfExecution(PVMCPU pVCpu)
194{
195 PVM pVM = pVCpu->CTX_SUFF(pVM);
196
197 if (pVM->tm.s.fTSCTiedToExecution)
198 tmCpuTickPause(pVM, pVCpu);
199
200#ifndef VBOX_WITHOUT_NS_ACCOUNTING
201 uint64_t const u64NsTs = RTTimeNanoTS();
202 uint64_t const cNsTotalNew = u64NsTs - pVCpu->tm.s.u64NsTsStartTotal;
203 uint64_t const cNsExecutingDelta = u64NsTs - pVCpu->tm.s.u64NsTsStartExecuting;
204 uint64_t const cNsExecutingNew = pVCpu->tm.s.cNsExecuting + cNsExecutingDelta;
205 uint64_t const cNsOtherNew = cNsTotalNew - cNsExecutingNew - pVCpu->tm.s.cNsHalted;
206
207# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
208 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecuting, cNsExecutingDelta);
209 if (cNsExecutingDelta < 5000)
210 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecTiny, cNsExecutingDelta);
211 else if (cNsExecutingDelta < 50000)
212 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecShort, cNsExecutingDelta);
213 else
214 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecLong, cNsExecutingDelta);
215 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsTotal, cNsTotalNew - pVCpu->tm.s.cNsTotal);
216 int64_t const cNsOtherNewDelta = cNsOtherNew - pVCpu->tm.s.cNsOther;
217 if (cNsOtherNewDelta > 0)
218 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsOther, cNsOtherNewDelta); /* (the period before execution) */
219# endif
220
221 uint32_t uGen = ASMAtomicIncU32(&pVCpu->tm.s.uTimesGen); Assert(uGen & 1);
222 pVCpu->tm.s.cNsExecuting = cNsExecutingNew;
223 pVCpu->tm.s.cNsTotal = cNsTotalNew;
224 pVCpu->tm.s.cNsOther = cNsOtherNew;
225 pVCpu->tm.s.cPeriodsExecuting++;
226 ASMAtomicWriteU32(&pVCpu->tm.s.uTimesGen, (uGen | 1) + 1);
227#endif
228}
229
230
231/**
232 * Notification that the cpu is entering the halt state
233 *
234 * This call must always be paired with a TMNotifyEndOfExecution call.
235 *
236 * The function may, depending on the configuration, resume the TSC and future
237 * clocks that only ticks when we're halted.
238 *
239 * @param pVCpu The VMCPU to operate on.
240 */
241VMM_INT_DECL(void) TMNotifyStartOfHalt(PVMCPU pVCpu)
242{
243 PVM pVM = pVCpu->CTX_SUFF(pVM);
244
245#ifndef VBOX_WITHOUT_NS_ACCOUNTING
246 pVCpu->tm.s.u64NsTsStartHalting = RTTimeNanoTS();
247#endif
248
249 if ( pVM->tm.s.fTSCTiedToExecution
250 && !pVM->tm.s.fTSCNotTiedToHalt)
251 tmCpuTickResume(pVM, pVCpu);
252}
253
254
255/**
256 * Notification that the cpu is leaving the halt state
257 *
258 * This call must always be paired with a TMNotifyStartOfHalt call.
259 *
260 * The function may, depending on the configuration, suspend the TSC and future
261 * clocks that only ticks when we're halted.
262 *
263 * @param pVCpu The VMCPU to operate on.
264 */
265VMM_INT_DECL(void) TMNotifyEndOfHalt(PVMCPU pVCpu)
266{
267 PVM pVM = pVCpu->CTX_SUFF(pVM);
268
269 if ( pVM->tm.s.fTSCTiedToExecution
270 && !pVM->tm.s.fTSCNotTiedToHalt)
271 tmCpuTickPause(pVM, pVCpu);
272
273#ifndef VBOX_WITHOUT_NS_ACCOUNTING
274 uint64_t const u64NsTs = RTTimeNanoTS();
275 uint64_t const cNsTotalNew = u64NsTs - pVCpu->tm.s.u64NsTsStartTotal;
276 uint64_t const cNsHaltedDelta = u64NsTs - pVCpu->tm.s.u64NsTsStartHalting;
277 uint64_t const cNsHaltedNew = pVCpu->tm.s.cNsHalted + cNsHaltedDelta;
278 uint64_t const cNsOtherNew = cNsTotalNew - pVCpu->tm.s.cNsExecuting - cNsHaltedNew;
279
280# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
281 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsHalted, cNsHaltedDelta);
282 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsTotal, cNsTotalNew - pVCpu->tm.s.cNsTotal);
283 int64_t const cNsOtherNewDelta = cNsOtherNew - pVCpu->tm.s.cNsOther;
284 if (cNsOtherNewDelta > 0)
285 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsOther, cNsOtherNewDelta); /* (the period before halting) */
286# endif
287
288 uint32_t uGen = ASMAtomicIncU32(&pVCpu->tm.s.uTimesGen); Assert(uGen & 1);
289 pVCpu->tm.s.cNsHalted = cNsHaltedNew;
290 pVCpu->tm.s.cNsTotal = cNsTotalNew;
291 pVCpu->tm.s.cNsOther = cNsOtherNew;
292 pVCpu->tm.s.cPeriodsHalted++;
293 ASMAtomicWriteU32(&pVCpu->tm.s.uTimesGen, (uGen | 1) + 1);
294#endif
295}
296
297
298/**
299 * Raise the timer force action flag and notify the dedicated timer EMT.
300 *
301 * @param pVM The VM handle.
302 */
303DECLINLINE(void) tmScheduleNotify(PVM pVM)
304{
305 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
306 if (!VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
307 {
308 Log5(("TMAll(%u): FF: 0 -> 1\n", __LINE__));
309 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
310#ifdef IN_RING3
311 REMR3NotifyTimerPending(pVM, pVCpuDst);
312 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
313#endif
314 STAM_COUNTER_INC(&pVM->tm.s.StatScheduleSetFF);
315 }
316}
317
318
319/**
320 * Schedule the queue which was changed.
321 */
322DECLINLINE(void) tmSchedule(PTMTIMER pTimer)
323{
324 PVM pVM = pTimer->CTX_SUFF(pVM);
325 if ( VM_IS_EMT(pVM)
326 && RT_SUCCESS(tmTimerTryLock(pVM)))
327 {
328 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
329 Log3(("tmSchedule: tmTimerQueueSchedule\n"));
330 tmTimerQueueSchedule(pVM, &pVM->tm.s.CTX_SUFF(paTimerQueues)[pTimer->enmClock]);
331#ifdef VBOX_STRICT
332 tmTimerQueuesSanityChecks(pVM, "tmSchedule");
333#endif
334 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
335 tmTimerUnlock(pVM);
336 }
337 else
338 {
339 TMTIMERSTATE enmState = pTimer->enmState;
340 if (TMTIMERSTATE_IS_PENDING_SCHEDULING(enmState))
341 tmScheduleNotify(pVM);
342 }
343}
344
345
346/**
347 * Try change the state to enmStateNew from enmStateOld
348 * and link the timer into the scheduling queue.
349 *
350 * @returns Success indicator.
351 * @param pTimer Timer in question.
352 * @param enmStateNew The new timer state.
353 * @param enmStateOld The old timer state.
354 */
355DECLINLINE(bool) tmTimerTry(PTMTIMER pTimer, TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
356{
357 /*
358 * Attempt state change.
359 */
360 bool fRc;
361 TM_TRY_SET_STATE(pTimer, enmStateNew, enmStateOld, fRc);
362 return fRc;
363}
364
365
366/**
367 * Links the timer onto the scheduling queue.
368 *
369 * @param pQueue The timer queue the timer belongs to.
370 * @param pTimer The timer.
371 *
372 * @todo FIXME: Look into potential race with the thread running the queues
373 * and stuff.
374 */
375DECLINLINE(void) tmTimerLink(PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
376{
377 Assert(!pTimer->offScheduleNext);
378 const int32_t offHeadNew = (intptr_t)pTimer - (intptr_t)pQueue;
379 int32_t offHead;
380 do
381 {
382 offHead = pQueue->offSchedule;
383 if (offHead)
384 pTimer->offScheduleNext = ((intptr_t)pQueue + offHead) - (intptr_t)pTimer;
385 else
386 pTimer->offScheduleNext = 0;
387 } while (!ASMAtomicCmpXchgS32(&pQueue->offSchedule, offHeadNew, offHead));
388}
389
390
391/**
392 * Try change the state to enmStateNew from enmStateOld
393 * and link the timer into the scheduling queue.
394 *
395 * @returns Success indicator.
396 * @param pTimer Timer in question.
397 * @param enmStateNew The new timer state.
398 * @param enmStateOld The old timer state.
399 */
400DECLINLINE(bool) tmTimerTryWithLink(PTMTIMER pTimer, TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
401{
402 if (tmTimerTry(pTimer, enmStateNew, enmStateOld))
403 {
404 tmTimerLink(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF(paTimerQueues)[pTimer->enmClock], pTimer);
405 return true;
406 }
407 return false;
408}
409
410
411#ifdef VBOX_HIGH_RES_TIMERS_HACK
412
413/**
414 * Worker for tmTimerPollInternal that handles misses when the dedicated timer
415 * EMT is polling.
416 *
417 * @returns See tmTimerPollInternal.
418 * @param pVM Pointer to the shared VM structure.
419 * @param u64Now Current virtual clock timestamp.
420 * @param u64Delta The delta to the next even in ticks of the
421 * virtual clock.
422 * @param pu64Delta Where to return the delta.
423 * @param pCounter The statistics counter to update.
424 */
425DECLINLINE(uint64_t) tmTimerPollReturnMiss(PVM pVM, uint64_t u64Now, uint64_t u64Delta, uint64_t *pu64Delta)
426{
427 Assert(!(u64Delta & RT_BIT_64(63)));
428
429 if (!pVM->tm.s.fVirtualWarpDrive)
430 {
431 *pu64Delta = u64Delta;
432 return u64Delta + u64Now + pVM->tm.s.u64VirtualOffset;
433 }
434
435 /*
436 * Warp drive adjustments - this is the reverse of what tmVirtualGetRaw is doing.
437 */
438 uint64_t const u64Start = pVM->tm.s.u64VirtualWarpDriveStart;
439 uint32_t const u32Pct = pVM->tm.s.u32VirtualWarpDrivePercentage;
440
441 uint64_t u64GipTime = u64Delta + u64Now + pVM->tm.s.u64VirtualOffset;
442 u64GipTime -= u64Start; /* the start is GIP time. */
443 if (u64GipTime >= u64Delta)
444 {
445 ASMMultU64ByU32DivByU32(u64GipTime, 100, u32Pct);
446 ASMMultU64ByU32DivByU32(u64Delta, 100, u32Pct);
447 }
448 else
449 {
450 u64Delta -= u64GipTime;
451 ASMMultU64ByU32DivByU32(u64GipTime, 100, u32Pct);
452 u64Delta += u64GipTime;
453 }
454 *pu64Delta = u64Delta;
455 u64GipTime += u64Start;
456 return u64GipTime;
457}
458
459
460/**
461 * Worker for tmTimerPollInternal dealing with returns on virtual CPUs other
462 * than the one dedicated to timer work.
463 *
464 * @returns See tmTimerPollInternal.
465 * @param pVM Pointer to the shared VM structure.
466 * @param u64Now Current virtual clock timestamp.
467 * @param pu64Delta Where to return the delta.
468 */
469DECL_FORCE_INLINE(uint64_t) tmTimerPollReturnOtherCpu(PVM pVM, uint64_t u64Now, uint64_t *pu64Delta)
470{
471 static const uint64_t s_u64OtherRet = 500000000; /* 500 ms for non-timer EMTs. */
472 *pu64Delta = s_u64OtherRet;
473 return u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet;
474}
475
476
477/**
478 * Worker for tmTimerPollInternal.
479 *
480 * @returns See tmTimerPollInternal.
481 * @param pVM Pointer to the shared VM structure.
482 * @param pVCpu Pointer to the shared VMCPU structure of the
483 * caller.
484 * @param pVCpuDst Pointer to the shared VMCPU structure of the
485 * dedicated timer EMT.
486 * @param u64Now Current virtual clock timestamp.
487 * @param pu64Delta Where to return the delta.
488 * @param pCounter The statistics counter to update.
489 */
490DECL_FORCE_INLINE(uint64_t) tmTimerPollReturnHit(PVM pVM, PVMCPU pVCpu, PVMCPU pVCpuDst, uint64_t u64Now,
491 uint64_t *pu64Delta, PSTAMCOUNTER pCounter)
492{
493 STAM_COUNTER_INC(pCounter);
494 if (pVCpuDst != pVCpu)
495 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
496 *pu64Delta = 0;
497 return 0;
498}
499
500/**
501 * Common worker for TMTimerPollGIP and TMTimerPoll.
502 *
503 * This function is called before FFs are checked in the inner execution EM loops.
504 *
505 * @returns The GIP timestamp of the next event.
506 * 0 if the next event has already expired.
507 *
508 * @param pVM Pointer to the shared VM structure.
509 * @param pVCpu Pointer to the shared VMCPU structure of the caller.
510 * @param pu64Delta Where to store the delta.
511 *
512 * @thread The emulation thread.
513 *
514 * @remarks GIP uses ns ticks.
515 */
516DECL_FORCE_INLINE(uint64_t) tmTimerPollInternal(PVM pVM, PVMCPU pVCpu, uint64_t *pu64Delta)
517{
518 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
519 const uint64_t u64Now = TMVirtualGetNoCheck(pVM);
520 STAM_COUNTER_INC(&pVM->tm.s.StatPoll);
521
522 /*
523 * Return straight away if the timer FF is already set ...
524 */
525 if (VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
526 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet);
527
528 /*
529 * ... or if timers are being run.
530 */
531 if (ASMAtomicReadBool(&pVM->tm.s.fRunningQueues))
532 {
533 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
534 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
535 }
536
537 /*
538 * Check for TMCLOCK_VIRTUAL expiration.
539 */
540 const uint64_t u64Expire1 = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire);
541 const int64_t i64Delta1 = u64Expire1 - u64Now;
542 if (i64Delta1 <= 0)
543 {
544 if (!VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
545 {
546 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
547 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
548#ifdef IN_RING3
549 REMR3NotifyTimerPending(pVM, pVCpuDst);
550#endif
551 }
552 LogFlow(("TMTimerPoll: expire1=%'RU64 <= now=%'RU64\n", u64Expire1, u64Now));
553 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtual);
554 }
555
556 /*
557 * Check for TMCLOCK_VIRTUAL_SYNC expiration.
558 * This isn't quite as straight forward if in a catch-up, not only do
559 * we have to adjust the 'now' but when have to adjust the delta as well.
560 */
561
562 /*
563 * Optimistic lockless approach.
564 */
565 uint64_t u64VirtualSyncNow;
566 uint64_t u64Expire2 = ASMAtomicUoReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
567 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
568 {
569 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
570 {
571 u64VirtualSyncNow = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
572 if (RT_LIKELY( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
573 && !ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
574 && u64VirtualSyncNow == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
575 && u64Expire2 == ASMAtomicUoReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)))
576 {
577 u64VirtualSyncNow = u64Now - u64VirtualSyncNow;
578 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
579 if (i64Delta2 > 0)
580 {
581 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
582 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
583
584 if (pVCpu == pVCpuDst)
585 return tmTimerPollReturnMiss(pVM, u64Now, RT_MIN(i64Delta1, i64Delta2), pu64Delta);
586 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
587 }
588
589 if ( !pVM->tm.s.fRunningQueues
590 && !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
591 {
592 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
593 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
594#ifdef IN_RING3
595 REMR3NotifyTimerPending(pVM, pVCpuDst);
596#endif
597 }
598
599 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
600 LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now));
601 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
602 }
603 }
604 }
605 else
606 {
607 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
608 LogFlow(("TMTimerPoll: stopped\n"));
609 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
610 }
611
612 /*
613 * Complicated lockless approach.
614 */
615 uint64_t off;
616 uint32_t u32Pct = 0;
617 bool fCatchUp;
618 int cOuterTries = 42;
619 for (;; cOuterTries--)
620 {
621 fCatchUp = ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp);
622 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
623 u64Expire2 = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
624 if (fCatchUp)
625 {
626 /* No changes allowed, try get a consistent set of parameters. */
627 uint64_t const u64Prev = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev);
628 uint64_t const offGivenUp = ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp);
629 u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
630 if ( ( u64Prev == ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev)
631 && offGivenUp == ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp)
632 && u32Pct == ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage)
633 && off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
634 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)
635 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
636 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
637 || cOuterTries <= 0)
638 {
639 uint64_t u64Delta = u64Now - u64Prev;
640 if (RT_LIKELY(!(u64Delta >> 32)))
641 {
642 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, u32Pct, 100);
643 if (off > u64Sub + offGivenUp)
644 off -= u64Sub;
645 else /* we've completely caught up. */
646 off = offGivenUp;
647 }
648 else
649 /* More than 4 seconds since last time (or negative), ignore it. */
650 Log(("TMVirtualGetSync: u64Delta=%RX64 (NoLock)\n", u64Delta));
651
652 /* Check that we're still running and in catch up. */
653 if ( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
654 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
655 break;
656 }
657 }
658 else if ( off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
659 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)
660 && !ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
661 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
662 break; /* Got an consistent offset */
663
664 /* Repeat the initial checks before iterating. */
665 if (VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
666 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet);
667 if (ASMAtomicUoReadBool(&pVM->tm.s.fRunningQueues))
668 {
669 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
670 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
671 }
672 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
673 {
674 LogFlow(("TMTimerPoll: stopped\n"));
675 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
676 }
677 if (cOuterTries <= 0)
678 break; /* that's enough */
679 }
680 if (cOuterTries <= 0)
681 STAM_COUNTER_INC(&pVM->tm.s.StatPollELoop);
682 u64VirtualSyncNow = u64Now - off;
683
684 /* Calc delta and see if we've got a virtual sync hit. */
685 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
686 if (i64Delta2 <= 0)
687 {
688 if ( !pVM->tm.s.fRunningQueues
689 && !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
690 {
691 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
692 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
693#ifdef IN_RING3
694 REMR3NotifyTimerPending(pVM, pVCpuDst);
695#endif
696 }
697 STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtualSync);
698 LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now));
699 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
700 }
701
702 /*
703 * Return the time left to the next event.
704 */
705 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
706 if (pVCpu == pVCpuDst)
707 {
708 if (fCatchUp)
709 i64Delta2 = ASMMultU64ByU32DivByU32(i64Delta2, 100, u32Pct + 100);
710 return tmTimerPollReturnMiss(pVM, u64Now, RT_MIN(i64Delta1, i64Delta2), pu64Delta);
711 }
712 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
713}
714
715
716/**
717 * Set FF if we've passed the next virtual event.
718 *
719 * This function is called before FFs are checked in the inner execution EM loops.
720 *
721 * @returns true if timers are pending, false if not.
722 *
723 * @param pVM Pointer to the shared VM structure.
724 * @param pVCpu Pointer to the shared VMCPU structure of the caller.
725 * @thread The emulation thread.
726 */
727VMMDECL(bool) TMTimerPollBool(PVM pVM, PVMCPU pVCpu)
728{
729 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
730 uint64_t off = 0;
731 tmTimerPollInternal(pVM, pVCpu, &off);
732 return off == 0;
733}
734
735
736/**
737 * Set FF if we've passed the next virtual event.
738 *
739 * This function is called before FFs are checked in the inner execution EM loops.
740 *
741 * @param pVM Pointer to the shared VM structure.
742 * @param pVCpu Pointer to the shared VMCPU structure of the caller.
743 * @thread The emulation thread.
744 */
745VMM_INT_DECL(void) TMTimerPollVoid(PVM pVM, PVMCPU pVCpu)
746{
747 uint64_t off;
748 tmTimerPollInternal(pVM, pVCpu, &off);
749}
750
751
752/**
753 * Set FF if we've passed the next virtual event.
754 *
755 * This function is called before FFs are checked in the inner execution EM loops.
756 *
757 * @returns The GIP timestamp of the next event.
758 * 0 if the next event has already expired.
759 * @param pVM Pointer to the shared VM structure.
760 * @param pVCpu Pointer to the shared VMCPU structure of the caller.
761 * @param pu64Delta Where to store the delta.
762 * @thread The emulation thread.
763 */
764VMM_INT_DECL(uint64_t) TMTimerPollGIP(PVM pVM, PVMCPU pVCpu, uint64_t *pu64Delta)
765{
766 return tmTimerPollInternal(pVM, pVCpu, pu64Delta);
767}
768
769#endif /* VBOX_HIGH_RES_TIMERS_HACK */
770
771/**
772 * Gets the host context ring-3 pointer of the timer.
773 *
774 * @returns HC R3 pointer.
775 * @param pTimer Timer handle as returned by one of the create functions.
776 */
777VMMDECL(PTMTIMERR3) TMTimerR3Ptr(PTMTIMER pTimer)
778{
779 return (PTMTIMERR3)MMHyperCCToR3(pTimer->CTX_SUFF(pVM), pTimer);
780}
781
782
783/**
784 * Gets the host context ring-0 pointer of the timer.
785 *
786 * @returns HC R0 pointer.
787 * @param pTimer Timer handle as returned by one of the create functions.
788 */
789VMMDECL(PTMTIMERR0) TMTimerR0Ptr(PTMTIMER pTimer)
790{
791 return (PTMTIMERR0)MMHyperCCToR0(pTimer->CTX_SUFF(pVM), pTimer);
792}
793
794
795/**
796 * Gets the RC pointer of the timer.
797 *
798 * @returns RC pointer.
799 * @param pTimer Timer handle as returned by one of the create functions.
800 */
801VMMDECL(PTMTIMERRC) TMTimerRCPtr(PTMTIMER pTimer)
802{
803 return (PTMTIMERRC)MMHyperCCToRC(pTimer->CTX_SUFF(pVM), pTimer);
804}
805
806
807/**
808 * Locks the timer clock.
809 *
810 * @returns VINF_SUCCESS on success, @a rcBusy if busy, and VERR_NOT_SUPPORTED
811 * if the clock does not have a lock.
812 * @param pTimer The timer which clock lock we wish to take.
813 * @param rcBusy What to return in ring-0 and raw-mode context
814 * if the lock is busy.
815 *
816 * @remarks Currently only supported on timers using the virtual sync clock.
817 */
818VMMDECL(int) TMTimerLock(PTMTIMER pTimer, int rcBusy)
819{
820 AssertPtr(pTimer);
821 AssertReturn(pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC, VERR_NOT_SUPPORTED);
822 return PDMCritSectEnter(&pTimer->CTX_SUFF(pVM)->tm.s.VirtualSyncLock, rcBusy);
823}
824
825
826/**
827 * Unlocks a timer clock locked by TMTimerLock.
828 *
829 * @param pTimer The timer which clock to unlock.
830 */
831VMMDECL(void) TMTimerUnlock(PTMTIMER pTimer)
832{
833 AssertPtr(pTimer);
834 AssertReturnVoid(pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC);
835 PDMCritSectLeave(&pTimer->CTX_SUFF(pVM)->tm.s.VirtualSyncLock);
836}
837
838
839/**
840 * Checks if the current thread owns the timer clock lock.
841 *
842 * @returns @c true if its the owner, @c false if not.
843 * @param pTimer The timer handle.
844 */
845VMMDECL(bool) TMTimerIsLockOwner(PTMTIMER pTimer)
846{
847 AssertPtr(pTimer);
848 AssertReturn(pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC, false);
849 return PDMCritSectIsOwner(&pTimer->CTX_SUFF(pVM)->tm.s.VirtualSyncLock);
850}
851
852
853/**
854 * Links a timer into the active list of a timer queue.
855 *
856 * The caller must have taken the TM semaphore before calling this function.
857 *
858 * @param pQueue The queue.
859 * @param pTimer The timer.
860 * @param u64Expire The timer expiration time.
861 */
862DECL_FORCE_INLINE(void) tmTimerActiveLink(PTMTIMERQUEUE pQueue, PTMTIMER pTimer, uint64_t u64Expire)
863{
864 PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue);
865 if (pCur)
866 {
867 for (;; pCur = TMTIMER_GET_NEXT(pCur))
868 {
869 if (pCur->u64Expire > u64Expire)
870 {
871 const PTMTIMER pPrev = TMTIMER_GET_PREV(pCur);
872 TMTIMER_SET_NEXT(pTimer, pCur);
873 TMTIMER_SET_PREV(pTimer, pPrev);
874 if (pPrev)
875 TMTIMER_SET_NEXT(pPrev, pTimer);
876 else
877 {
878 TMTIMER_SET_HEAD(pQueue, pTimer);
879 pQueue->u64Expire = u64Expire;
880 }
881 TMTIMER_SET_PREV(pCur, pTimer);
882 return;
883 }
884 if (!pCur->offNext)
885 {
886 TMTIMER_SET_NEXT(pCur, pTimer);
887 TMTIMER_SET_PREV(pTimer, pCur);
888 return;
889 }
890 }
891 }
892 else
893 {
894 TMTIMER_SET_HEAD(pQueue, pTimer);
895 pQueue->u64Expire = u64Expire;
896 }
897}
898
899
900/**
901 * Optimized TMTimerSet code path for starting an inactive timer.
902 *
903 * @returns VBox status code.
904 *
905 * @param pVM The VM handle.
906 * @param pTimer The timer handle.
907 * @param u64Expire The new expire time.
908 */
909static int tmTimerSetOptimizedStart(PVM pVM, PTMTIMER pTimer, uint64_t u64Expire)
910{
911 Assert(!pTimer->offPrev);
912 Assert(!pTimer->offNext);
913 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE);
914
915 /*
916 * Calculate and set the expiration time.
917 */
918 pTimer->u64Expire = u64Expire;
919 Log2(("tmTimerSetOptimizedStart: %p:{.pszDesc='%s', .u64Expire=%'RU64}\n", pTimer, R3STRING(pTimer->pszDesc), u64Expire));
920
921 /*
922 * Link the timer into the active list.
923 */
924 TMCLOCK const enmClock = pTimer->enmClock;
925 tmTimerActiveLink(&pVM->tm.s.CTX_SUFF(paTimerQueues)[enmClock], pTimer, u64Expire);
926
927 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetOpt);
928 tmTimerUnlock(pVM);
929 return VINF_SUCCESS;
930}
931
932
933
934
935
936/**
937 * Arm a timer with a (new) expire time.
938 *
939 * @returns VBox status.
940 * @param pTimer Timer handle as returned by one of the create functions.
941 * @param u64Expire New expire time.
942 */
943VMMDECL(int) TMTimerSet(PTMTIMER pTimer, uint64_t u64Expire)
944{
945 PVM pVM = pTimer->CTX_SUFF(pVM);
946 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
947 TMTIMER_ASSERT_CRITSECT(pTimer);
948
949#ifdef VBOX_WITH_STATISTICS
950 /* Gather optimization info. */
951 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSet);
952 TMTIMERSTATE enmOrgState = pTimer->enmState;
953 switch (enmOrgState)
954 {
955 case TMTIMERSTATE_STOPPED: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStStopped); break;
956 case TMTIMERSTATE_EXPIRED_DELIVER: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStExpDeliver); break;
957 case TMTIMERSTATE_ACTIVE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStActive); break;
958 case TMTIMERSTATE_PENDING_STOP: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendStop); break;
959 case TMTIMERSTATE_PENDING_STOP_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendStopSched); break;
960 case TMTIMERSTATE_PENDING_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendSched); break;
961 case TMTIMERSTATE_PENDING_RESCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendResched); break;
962 default: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStOther); break;
963 }
964#endif
965
966 /*
967 * The most common case is setting the timer again during the callback.
968 * The second most common case is starting a timer at some other time.
969 */
970#if 1
971 TMTIMERSTATE enmState1 = pTimer->enmState;
972 if ( enmState1 == TMTIMERSTATE_EXPIRED_DELIVER
973 || ( enmState1 == TMTIMERSTATE_STOPPED
974 && pTimer->pCritSect))
975 {
976 /* Try take the TM lock and check the state again. */
977 if (RT_SUCCESS_NP(tmTimerTryLock(pVM)))
978 {
979 if (RT_LIKELY(tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, enmState1)))
980 {
981 tmTimerSetOptimizedStart(pVM, pTimer, u64Expire);
982 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
983 return VINF_SUCCESS;
984 }
985 tmTimerUnlock(pVM);
986 }
987 }
988#endif
989
990 /*
991 * Unoptimized code path.
992 */
993 int cRetries = 1000;
994 do
995 {
996 /*
997 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
998 */
999 TMTIMERSTATE enmState = pTimer->enmState;
1000 Log2(("TMTimerSet: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d u64Expire=%'RU64\n",
1001 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), cRetries, u64Expire));
1002 switch (enmState)
1003 {
1004 case TMTIMERSTATE_EXPIRED_DELIVER:
1005 case TMTIMERSTATE_STOPPED:
1006 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1007 {
1008 Assert(!pTimer->offPrev);
1009 Assert(!pTimer->offNext);
1010 AssertMsg( pTimer->enmClock != TMCLOCK_VIRTUAL_SYNC
1011 || pVM->tm.s.fVirtualSyncTicking
1012 || u64Expire >= pVM->tm.s.u64VirtualSync,
1013 ("%'RU64 < %'RU64 %s\n", u64Expire, pVM->tm.s.u64VirtualSync, R3STRING(pTimer->pszDesc)));
1014 pTimer->u64Expire = u64Expire;
1015 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1016 tmSchedule(pTimer);
1017 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1018 return VINF_SUCCESS;
1019 }
1020 break;
1021
1022 case TMTIMERSTATE_PENDING_SCHEDULE:
1023 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1024 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1025 {
1026 pTimer->u64Expire = u64Expire;
1027 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1028 tmSchedule(pTimer);
1029 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1030 return VINF_SUCCESS;
1031 }
1032 break;
1033
1034
1035 case TMTIMERSTATE_ACTIVE:
1036 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1037 {
1038 pTimer->u64Expire = u64Expire;
1039 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1040 tmSchedule(pTimer);
1041 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1042 return VINF_SUCCESS;
1043 }
1044 break;
1045
1046 case TMTIMERSTATE_PENDING_RESCHEDULE:
1047 case TMTIMERSTATE_PENDING_STOP:
1048 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1049 {
1050 pTimer->u64Expire = u64Expire;
1051 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1052 tmSchedule(pTimer);
1053 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1054 return VINF_SUCCESS;
1055 }
1056 break;
1057
1058
1059 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1060 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1061 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1062#ifdef IN_RING3
1063 if (!RTThreadYield())
1064 RTThreadSleep(1);
1065#else
1066/** @todo call host context and yield after a couple of iterations */
1067#endif
1068 break;
1069
1070 /*
1071 * Invalid states.
1072 */
1073 case TMTIMERSTATE_DESTROY:
1074 case TMTIMERSTATE_FREE:
1075 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1076 return VERR_TM_INVALID_STATE;
1077 default:
1078 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1079 return VERR_TM_UNKNOWN_STATE;
1080 }
1081 } while (cRetries-- > 0);
1082
1083 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1084 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1085 return VERR_INTERNAL_ERROR;
1086}
1087
1088
1089/**
1090 * Return the current time for the specified clock, setting pu64Now if not NULL.
1091 *
1092 * @returns Current time.
1093 * @param pVM The VM handle.
1094 * @param enmClock The clock to query.
1095 * @param pu64Now Optional pointer where to store the return time
1096 */
1097DECL_FORCE_INLINE(uint64_t) tmTimerSetRelativeNowWorker(PVM pVM, TMCLOCK enmClock, uint64_t *pu64Now)
1098{
1099 uint64_t u64Now;
1100 switch (enmClock)
1101 {
1102 case TMCLOCK_VIRTUAL_SYNC:
1103 u64Now = TMVirtualSyncGet(pVM);
1104 break;
1105 case TMCLOCK_VIRTUAL:
1106 u64Now = TMVirtualGet(pVM);
1107 break;
1108 case TMCLOCK_REAL:
1109 u64Now = TMRealGet(pVM);
1110 break;
1111 default:
1112 AssertFatalMsgFailed(("%d\n", enmClock));
1113 }
1114
1115 if (pu64Now)
1116 *pu64Now = u64Now;
1117 return u64Now;
1118}
1119
1120
1121/**
1122 * Optimized TMTimerSetRelative code path.
1123 *
1124 * @returns VBox status code.
1125 *
1126 * @param pVM The VM handle.
1127 * @param pTimer The timer handle.
1128 * @param cTicksToNext Clock ticks until the next time expiration.
1129 * @param pu64Now Where to return the current time stamp used.
1130 * Optional.
1131 */
1132static int tmTimerSetRelativeOptimizedStart(PVM pVM, PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1133{
1134 Assert(!pTimer->offPrev);
1135 Assert(!pTimer->offNext);
1136 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE);
1137
1138 /*
1139 * Calculate and set the expiration time.
1140 */
1141 TMCLOCK const enmClock = pTimer->enmClock;
1142 uint64_t const u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1143 pTimer->u64Expire = u64Expire;
1144 Log2(("tmTimerSetRelativeOptimizedStart: %p:{.pszDesc='%s', .u64Expire=%'RU64} cTicksToNext=%'RU64\n", pTimer, R3STRING(pTimer->pszDesc), u64Expire, cTicksToNext));
1145
1146 /*
1147 * Link the timer into the active list.
1148 */
1149 tmTimerActiveLink(&pVM->tm.s.CTX_SUFF(paTimerQueues)[enmClock], pTimer, u64Expire);
1150
1151 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeOpt);
1152 tmTimerUnlock(pVM);
1153 return VINF_SUCCESS;
1154}
1155
1156
1157/**
1158 * Arm a timer with a expire time relative to the current time.
1159 *
1160 * @returns VBox status.
1161 * @param pTimer Timer handle as returned by one of the create functions.
1162 * @param cTicksToNext Clock ticks until the next time expiration.
1163 * @param pu64Now Where to return the current time stamp used.
1164 * Optional.
1165 */
1166VMMDECL(int) TMTimerSetRelative(PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1167{
1168 STAM_PROFILE_START(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1169 TMTIMER_ASSERT_CRITSECT(pTimer);
1170 PVM pVM = pTimer->CTX_SUFF(pVM);
1171 int rc;
1172
1173#ifdef VBOX_WITH_STATISTICS
1174 /* Gather optimization info. */
1175 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelative);
1176 TMTIMERSTATE enmOrgState = pTimer->enmState;
1177 switch (enmOrgState)
1178 {
1179 case TMTIMERSTATE_STOPPED: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStStopped); break;
1180 case TMTIMERSTATE_EXPIRED_DELIVER: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStExpDeliver); break;
1181 case TMTIMERSTATE_ACTIVE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStActive); break;
1182 case TMTIMERSTATE_PENDING_STOP: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendStop); break;
1183 case TMTIMERSTATE_PENDING_STOP_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendStopSched); break;
1184 case TMTIMERSTATE_PENDING_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendSched); break;
1185 case TMTIMERSTATE_PENDING_RESCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendResched); break;
1186 default: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStOther); break;
1187 }
1188#endif
1189
1190 /*
1191 * Try to take the TM lock and optimize the common cases.
1192 *
1193 * With the TM lock we can safely make optimizations like immediate
1194 * scheduling and we can also be 100% sure that we're not racing the
1195 * running of the timer queues. As an additional restraint we require the
1196 * timer to have a critical section associated with to be 100% there aren't
1197 * concurrent operations on the timer. (This latter isn't necessary any
1198 * longer as this isn't supported for any timers, critsect or not.)
1199 *
1200 * Note! Lock ordering doesn't apply when we only tries to
1201 * get the innermost locks.
1202 */
1203 bool fOwnTMLock = RT_SUCCESS_NP(tmTimerTryLock(pVM));
1204#if 1
1205 if ( fOwnTMLock
1206 && pTimer->pCritSect)
1207 {
1208 TMTIMERSTATE enmState = pTimer->enmState;
1209 if (RT_LIKELY( ( enmState == TMTIMERSTATE_EXPIRED_DELIVER
1210 || enmState == TMTIMERSTATE_STOPPED)
1211 && tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, enmState)))
1212 {
1213 tmTimerSetRelativeOptimizedStart(pVM, pTimer, cTicksToNext, pu64Now);
1214 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1215 return VINF_SUCCESS;
1216 }
1217
1218 /* Optimize other states when it becomes necessary. */
1219 }
1220#endif
1221
1222 /*
1223 * Unoptimized path.
1224 */
1225 TMCLOCK const enmClock = pTimer->enmClock;
1226 bool fOwnVirtSyncLock;
1227 fOwnVirtSyncLock = !fOwnTMLock
1228 && enmClock == TMCLOCK_VIRTUAL_SYNC
1229 && RT_SUCCESS(tmVirtualSyncTryLock(pVM));
1230 for (int cRetries = 1000; ; cRetries--)
1231 {
1232 /*
1233 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1234 */
1235 TMTIMERSTATE enmState = pTimer->enmState;
1236 switch (enmState)
1237 {
1238 case TMTIMERSTATE_STOPPED:
1239 if (enmClock == TMCLOCK_VIRTUAL_SYNC)
1240 {
1241 /** @todo To fix assertion in tmR3TimerQueueRunVirtualSync:
1242 * Figure a safe way of activating this timer while the queue is
1243 * being run.
1244 * (99.9% sure this that the assertion is caused by DevAPIC.cpp
1245 * re-starting the timer in response to a initial_count write.) */
1246 }
1247 /* fall thru */
1248 case TMTIMERSTATE_EXPIRED_DELIVER:
1249 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1250 {
1251 Assert(!pTimer->offPrev);
1252 Assert(!pTimer->offNext);
1253 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1254 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [EXP/STOP]\n",
1255 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1256 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1257 tmSchedule(pTimer);
1258 rc = VINF_SUCCESS;
1259 break;
1260 }
1261 rc = VERR_TRY_AGAIN;
1262 break;
1263
1264 case TMTIMERSTATE_PENDING_SCHEDULE:
1265 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1266 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1267 {
1268 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1269 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [PEND_SCHED]\n",
1270 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1271 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1272 tmSchedule(pTimer);
1273 rc = VINF_SUCCESS;
1274 break;
1275 }
1276 rc = VERR_TRY_AGAIN;
1277 break;
1278
1279
1280 case TMTIMERSTATE_ACTIVE:
1281 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1282 {
1283 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1284 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [ACTIVE]\n",
1285 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1286 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1287 tmSchedule(pTimer);
1288 rc = VINF_SUCCESS;
1289 break;
1290 }
1291 rc = VERR_TRY_AGAIN;
1292 break;
1293
1294 case TMTIMERSTATE_PENDING_RESCHEDULE:
1295 case TMTIMERSTATE_PENDING_STOP:
1296 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1297 {
1298 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1299 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [PEND_RESCH/STOP]\n",
1300 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1301 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1302 tmSchedule(pTimer);
1303 rc = VINF_SUCCESS;
1304 break;
1305 }
1306 rc = VERR_TRY_AGAIN;
1307 break;
1308
1309
1310 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1311 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1312 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1313#ifdef IN_RING3
1314 if (!RTThreadYield())
1315 RTThreadSleep(1);
1316#else
1317/** @todo call host context and yield after a couple of iterations */
1318#endif
1319 rc = VERR_TRY_AGAIN;
1320 break;
1321
1322 /*
1323 * Invalid states.
1324 */
1325 case TMTIMERSTATE_DESTROY:
1326 case TMTIMERSTATE_FREE:
1327 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1328 rc = VERR_TM_INVALID_STATE;
1329 break;
1330
1331 default:
1332 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1333 rc = VERR_TM_UNKNOWN_STATE;
1334 break;
1335 }
1336
1337 /* switch + loop is tedious to break out of. */
1338 if (rc == VINF_SUCCESS)
1339 break;
1340
1341 if (rc != VERR_TRY_AGAIN)
1342 {
1343 tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1344 break;
1345 }
1346 if (cRetries <= 0)
1347 {
1348 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1349 rc = VERR_INTERNAL_ERROR;
1350 tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1351 break;
1352 }
1353
1354 /*
1355 * Retry to gain locks.
1356 */
1357 if (!fOwnTMLock)
1358 {
1359 fOwnTMLock = RT_SUCCESS_NP(tmTimerTryLock(pVM));
1360 if ( !fOwnTMLock
1361 && enmClock == TMCLOCK_VIRTUAL_SYNC
1362 && !fOwnVirtSyncLock)
1363 fOwnVirtSyncLock = RT_SUCCESS_NP(tmVirtualSyncTryLock(pVM));
1364 }
1365
1366 } /* for (;;) */
1367
1368 /*
1369 * Clean up and return.
1370 */
1371 if (fOwnVirtSyncLock)
1372 tmVirtualSyncUnlock(pVM);
1373 if (fOwnTMLock)
1374 tmTimerUnlock(pVM);
1375
1376 if ( !fOwnTMLock
1377 && !fOwnVirtSyncLock
1378 && enmClock == TMCLOCK_VIRTUAL_SYNC)
1379 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeRacyVirtSync);
1380
1381 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1382 return rc;
1383}
1384
1385
1386/**
1387 * Arm a timer with a (new) expire time relative to current time.
1388 *
1389 * @returns VBox status.
1390 * @param pTimer Timer handle as returned by one of the create functions.
1391 * @param cMilliesToNext Number of milliseconds to the next tick.
1392 */
1393VMMDECL(int) TMTimerSetMillies(PTMTIMER pTimer, uint32_t cMilliesToNext)
1394{
1395 PVM pVM = pTimer->CTX_SUFF(pVM);
1396 PVMCPU pVCpu = &pVM->aCpus[0]; /* just take the first VCPU */
1397
1398 switch (pTimer->enmClock)
1399 {
1400 case TMCLOCK_VIRTUAL:
1401 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1402 return TMTimerSetRelative(pTimer, cMilliesToNext * UINT64_C(1000000), NULL);
1403
1404 case TMCLOCK_VIRTUAL_SYNC:
1405 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1406 return TMTimerSetRelative(pTimer, cMilliesToNext * UINT64_C(1000000), NULL);
1407
1408 case TMCLOCK_REAL:
1409 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1410 return TMTimerSetRelative(pTimer, cMilliesToNext, NULL);
1411
1412 default:
1413 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1414 return VERR_INTERNAL_ERROR;
1415 }
1416}
1417
1418
1419/**
1420 * Arm a timer with a (new) expire time relative to current time.
1421 *
1422 * @returns VBox status.
1423 * @param pTimer Timer handle as returned by one of the create functions.
1424 * @param cMicrosToNext Number of microseconds to the next tick.
1425 */
1426VMMDECL(int) TMTimerSetMicro(PTMTIMER pTimer, uint64_t cMicrosToNext)
1427{
1428 PVM pVM = pTimer->CTX_SUFF(pVM);
1429 PVMCPU pVCpu = &pVM->aCpus[0]; /* just take the first VCPU */
1430
1431 switch (pTimer->enmClock)
1432 {
1433 case TMCLOCK_VIRTUAL:
1434 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1435 return TMTimerSetRelative(pTimer, cMicrosToNext * 1000, NULL);
1436
1437 case TMCLOCK_VIRTUAL_SYNC:
1438 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1439 return TMTimerSetRelative(pTimer, cMicrosToNext * 1000, NULL);
1440
1441 case TMCLOCK_REAL:
1442 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1443 return TMTimerSetRelative(pTimer, cMicrosToNext / 1000, NULL);
1444
1445 default:
1446 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1447 return VERR_INTERNAL_ERROR;
1448 }
1449}
1450
1451
1452/**
1453 * Arm a timer with a (new) expire time relative to current time.
1454 *
1455 * @returns VBox status.
1456 * @param pTimer Timer handle as returned by one of the create functions.
1457 * @param cNanosToNext Number of nanoseconds to the next tick.
1458 */
1459VMMDECL(int) TMTimerSetNano(PTMTIMER pTimer, uint64_t cNanosToNext)
1460{
1461 PVM pVM = pTimer->CTX_SUFF(pVM);
1462 PVMCPU pVCpu = &pVM->aCpus[0]; /* just take the first VCPU */
1463
1464 switch (pTimer->enmClock)
1465 {
1466 case TMCLOCK_VIRTUAL:
1467 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1468 return TMTimerSetRelative(pTimer, cNanosToNext, NULL);
1469
1470 case TMCLOCK_VIRTUAL_SYNC:
1471 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1472 return TMTimerSetRelative(pTimer, cNanosToNext, NULL);
1473
1474 case TMCLOCK_REAL:
1475 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1476 return TMTimerSetRelative(pTimer, cNanosToNext / 1000000, NULL);
1477
1478 default:
1479 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1480 return VERR_INTERNAL_ERROR;
1481 }
1482}
1483
1484
1485/**
1486 * Drops a hint about the frequency of the timer.
1487 *
1488 * This is used by TM and the VMM to calculate how often guest execution needs
1489 * to be interrupted. The hint is automatically cleared by TMTimerStop.
1490 *
1491 * @returns VBox status code.
1492 * @param pTimer Timer handle as returned by one of the create
1493 * functions.
1494 * @param uHzHint The frequency hint. Pass 0 to clear the hint.
1495 *
1496 * @remarks We're using an integer hertz value here since anything above 1 HZ
1497 * is not going to be any trouble satisfying scheduling wise. The
1498 * range where it makes sense is >= 100 HZ.
1499 */
1500VMMDECL(int) TMTimerSetFrequencyHint(PTMTIMER pTimer, uint32_t uHzHint)
1501{
1502 TMTIMER_ASSERT_CRITSECT(pTimer);
1503
1504 uint32_t const uHzOldHint = pTimer->uHzHint;
1505 pTimer->uHzHint = uHzHint;
1506
1507 PVM pVM = pTimer->CTX_SUFF(pVM);
1508 uint32_t const uMaxHzHint = pVM->tm.s.uMaxHzHint;
1509 if ( uHzHint > uMaxHzHint
1510 || uHzOldHint >= uMaxHzHint)
1511 ASMAtomicWriteBool(&pVM->tm.s.fHzHintNeedsUpdating, true);
1512
1513 return VINF_SUCCESS;
1514}
1515
1516
1517/**
1518 * Stop the timer.
1519 * Use TMR3TimerArm() to "un-stop" the timer.
1520 *
1521 * @returns VBox status.
1522 * @param pTimer Timer handle as returned by one of the create functions.
1523 */
1524VMMDECL(int) TMTimerStop(PTMTIMER pTimer)
1525{
1526 STAM_PROFILE_START(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1527 TMTIMER_ASSERT_CRITSECT(pTimer);
1528
1529 /* Reset the HZ hint. */
1530 if (pTimer->uHzHint)
1531 {
1532 PVM pVM = pTimer->CTX_SUFF(pVM);
1533 if (pTimer->uHzHint >= pVM->tm.s.uMaxHzHint)
1534 ASMAtomicWriteBool(&pVM->tm.s.fHzHintNeedsUpdating, true);
1535 pTimer->uHzHint = 0;
1536 }
1537
1538 /** @todo see if this function needs optimizing. */
1539 int cRetries = 1000;
1540 do
1541 {
1542 /*
1543 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1544 */
1545 TMTIMERSTATE enmState = pTimer->enmState;
1546 Log2(("TMTimerStop: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d\n",
1547 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), cRetries));
1548 switch (enmState)
1549 {
1550 case TMTIMERSTATE_EXPIRED_DELIVER:
1551 //AssertMsgFailed(("You don't stop an expired timer dude!\n"));
1552 return VERR_INVALID_PARAMETER;
1553
1554 case TMTIMERSTATE_STOPPED:
1555 case TMTIMERSTATE_PENDING_STOP:
1556 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1557 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1558 return VINF_SUCCESS;
1559
1560 case TMTIMERSTATE_PENDING_SCHEDULE:
1561 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, enmState))
1562 {
1563 tmSchedule(pTimer);
1564 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1565 return VINF_SUCCESS;
1566 }
1567
1568 case TMTIMERSTATE_PENDING_RESCHEDULE:
1569 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
1570 {
1571 tmSchedule(pTimer);
1572 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1573 return VINF_SUCCESS;
1574 }
1575 break;
1576
1577 case TMTIMERSTATE_ACTIVE:
1578 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
1579 {
1580 tmSchedule(pTimer);
1581 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1582 return VINF_SUCCESS;
1583 }
1584 break;
1585
1586 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1587 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1588 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1589#ifdef IN_RING3
1590 if (!RTThreadYield())
1591 RTThreadSleep(1);
1592#else
1593/**@todo call host and yield cpu after a while. */
1594#endif
1595 break;
1596
1597 /*
1598 * Invalid states.
1599 */
1600 case TMTIMERSTATE_DESTROY:
1601 case TMTIMERSTATE_FREE:
1602 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1603 return VERR_TM_INVALID_STATE;
1604 default:
1605 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1606 return VERR_TM_UNKNOWN_STATE;
1607 }
1608 } while (cRetries-- > 0);
1609
1610 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1611 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1612 return VERR_INTERNAL_ERROR;
1613}
1614
1615
1616/**
1617 * Get the current clock time.
1618 * Handy for calculating the new expire time.
1619 *
1620 * @returns Current clock time.
1621 * @param pTimer Timer handle as returned by one of the create functions.
1622 */
1623VMMDECL(uint64_t) TMTimerGet(PTMTIMER pTimer)
1624{
1625 uint64_t u64;
1626 PVM pVM = pTimer->CTX_SUFF(pVM);
1627
1628 switch (pTimer->enmClock)
1629 {
1630 case TMCLOCK_VIRTUAL:
1631 u64 = TMVirtualGet(pVM);
1632 break;
1633 case TMCLOCK_VIRTUAL_SYNC:
1634 u64 = TMVirtualSyncGet(pVM);
1635 break;
1636 case TMCLOCK_REAL:
1637 u64 = TMRealGet(pVM);
1638 break;
1639 default:
1640 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1641 return ~(uint64_t)0;
1642 }
1643 //Log2(("TMTimerGet: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1644 // u64, pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1645 return u64;
1646}
1647
1648
1649/**
1650 * Get the frequency of the timer clock.
1651 *
1652 * @returns Clock frequency (as Hz of course).
1653 * @param pTimer Timer handle as returned by one of the create functions.
1654 */
1655VMMDECL(uint64_t) TMTimerGetFreq(PTMTIMER pTimer)
1656{
1657 switch (pTimer->enmClock)
1658 {
1659 case TMCLOCK_VIRTUAL:
1660 case TMCLOCK_VIRTUAL_SYNC:
1661 return TMCLOCK_FREQ_VIRTUAL;
1662
1663 case TMCLOCK_REAL:
1664 return TMCLOCK_FREQ_REAL;
1665
1666 default:
1667 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1668 return 0;
1669 }
1670}
1671
1672
1673/**
1674 * Get the current clock time as nanoseconds.
1675 *
1676 * @returns The timer clock as nanoseconds.
1677 * @param pTimer Timer handle as returned by one of the create functions.
1678 */
1679VMMDECL(uint64_t) TMTimerGetNano(PTMTIMER pTimer)
1680{
1681 return TMTimerToNano(pTimer, TMTimerGet(pTimer));
1682}
1683
1684
1685/**
1686 * Get the current clock time as microseconds.
1687 *
1688 * @returns The timer clock as microseconds.
1689 * @param pTimer Timer handle as returned by one of the create functions.
1690 */
1691VMMDECL(uint64_t) TMTimerGetMicro(PTMTIMER pTimer)
1692{
1693 return TMTimerToMicro(pTimer, TMTimerGet(pTimer));
1694}
1695
1696
1697/**
1698 * Get the current clock time as milliseconds.
1699 *
1700 * @returns The timer clock as milliseconds.
1701 * @param pTimer Timer handle as returned by one of the create functions.
1702 */
1703VMMDECL(uint64_t) TMTimerGetMilli(PTMTIMER pTimer)
1704{
1705 return TMTimerToMilli(pTimer, TMTimerGet(pTimer));
1706}
1707
1708
1709/**
1710 * Converts the specified timer clock time to nanoseconds.
1711 *
1712 * @returns nanoseconds.
1713 * @param pTimer Timer handle as returned by one of the create functions.
1714 * @param u64Ticks The clock ticks.
1715 * @remark There could be rounding errors here. We just do a simple integer divide
1716 * without any adjustments.
1717 */
1718VMMDECL(uint64_t) TMTimerToNano(PTMTIMER pTimer, uint64_t u64Ticks)
1719{
1720 switch (pTimer->enmClock)
1721 {
1722 case TMCLOCK_VIRTUAL:
1723 case TMCLOCK_VIRTUAL_SYNC:
1724 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1725 return u64Ticks;
1726
1727 case TMCLOCK_REAL:
1728 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1729 return u64Ticks * 1000000;
1730
1731 default:
1732 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1733 return 0;
1734 }
1735}
1736
1737
1738/**
1739 * Converts the specified timer clock time to microseconds.
1740 *
1741 * @returns microseconds.
1742 * @param pTimer Timer handle as returned by one of the create functions.
1743 * @param u64Ticks The clock ticks.
1744 * @remark There could be rounding errors here. We just do a simple integer divide
1745 * without any adjustments.
1746 */
1747VMMDECL(uint64_t) TMTimerToMicro(PTMTIMER pTimer, uint64_t u64Ticks)
1748{
1749 switch (pTimer->enmClock)
1750 {
1751 case TMCLOCK_VIRTUAL:
1752 case TMCLOCK_VIRTUAL_SYNC:
1753 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1754 return u64Ticks / 1000;
1755
1756 case TMCLOCK_REAL:
1757 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1758 return u64Ticks * 1000;
1759
1760 default:
1761 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1762 return 0;
1763 }
1764}
1765
1766
1767/**
1768 * Converts the specified timer clock time to milliseconds.
1769 *
1770 * @returns milliseconds.
1771 * @param pTimer Timer handle as returned by one of the create functions.
1772 * @param u64Ticks The clock ticks.
1773 * @remark There could be rounding errors here. We just do a simple integer divide
1774 * without any adjustments.
1775 */
1776VMMDECL(uint64_t) TMTimerToMilli(PTMTIMER pTimer, uint64_t u64Ticks)
1777{
1778 switch (pTimer->enmClock)
1779 {
1780 case TMCLOCK_VIRTUAL:
1781 case TMCLOCK_VIRTUAL_SYNC:
1782 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1783 return u64Ticks / 1000000;
1784
1785 case TMCLOCK_REAL:
1786 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1787 return u64Ticks;
1788
1789 default:
1790 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1791 return 0;
1792 }
1793}
1794
1795
1796/**
1797 * Converts the specified nanosecond timestamp to timer clock ticks.
1798 *
1799 * @returns timer clock ticks.
1800 * @param pTimer Timer handle as returned by one of the create functions.
1801 * @param u64NanoTS The nanosecond value ticks to convert.
1802 * @remark There could be rounding and overflow errors here.
1803 */
1804VMMDECL(uint64_t) TMTimerFromNano(PTMTIMER pTimer, uint64_t u64NanoTS)
1805{
1806 switch (pTimer->enmClock)
1807 {
1808 case TMCLOCK_VIRTUAL:
1809 case TMCLOCK_VIRTUAL_SYNC:
1810 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1811 return u64NanoTS;
1812
1813 case TMCLOCK_REAL:
1814 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1815 return u64NanoTS / 1000000;
1816
1817 default:
1818 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1819 return 0;
1820 }
1821}
1822
1823
1824/**
1825 * Converts the specified microsecond timestamp to timer clock ticks.
1826 *
1827 * @returns timer clock ticks.
1828 * @param pTimer Timer handle as returned by one of the create functions.
1829 * @param u64MicroTS The microsecond value ticks to convert.
1830 * @remark There could be rounding and overflow errors here.
1831 */
1832VMMDECL(uint64_t) TMTimerFromMicro(PTMTIMER pTimer, uint64_t u64MicroTS)
1833{
1834 switch (pTimer->enmClock)
1835 {
1836 case TMCLOCK_VIRTUAL:
1837 case TMCLOCK_VIRTUAL_SYNC:
1838 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1839 return u64MicroTS * 1000;
1840
1841 case TMCLOCK_REAL:
1842 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1843 return u64MicroTS / 1000;
1844
1845 default:
1846 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1847 return 0;
1848 }
1849}
1850
1851
1852/**
1853 * Converts the specified millisecond timestamp to timer clock ticks.
1854 *
1855 * @returns timer clock ticks.
1856 * @param pTimer Timer handle as returned by one of the create functions.
1857 * @param u64MilliTS The millisecond value ticks to convert.
1858 * @remark There could be rounding and overflow errors here.
1859 */
1860VMMDECL(uint64_t) TMTimerFromMilli(PTMTIMER pTimer, uint64_t u64MilliTS)
1861{
1862 switch (pTimer->enmClock)
1863 {
1864 case TMCLOCK_VIRTUAL:
1865 case TMCLOCK_VIRTUAL_SYNC:
1866 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1867 return u64MilliTS * 1000000;
1868
1869 case TMCLOCK_REAL:
1870 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1871 return u64MilliTS;
1872
1873 default:
1874 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1875 return 0;
1876 }
1877}
1878
1879
1880/**
1881 * Get the expire time of the timer.
1882 * Only valid for active timers.
1883 *
1884 * @returns Expire time of the timer.
1885 * @param pTimer Timer handle as returned by one of the create functions.
1886 */
1887VMMDECL(uint64_t) TMTimerGetExpire(PTMTIMER pTimer)
1888{
1889 TMTIMER_ASSERT_CRITSECT(pTimer);
1890 int cRetries = 1000;
1891 do
1892 {
1893 TMTIMERSTATE enmState = pTimer->enmState;
1894 switch (enmState)
1895 {
1896 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1897 case TMTIMERSTATE_EXPIRED_DELIVER:
1898 case TMTIMERSTATE_STOPPED:
1899 case TMTIMERSTATE_PENDING_STOP:
1900 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1901 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1902 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1903 return ~(uint64_t)0;
1904
1905 case TMTIMERSTATE_ACTIVE:
1906 case TMTIMERSTATE_PENDING_RESCHEDULE:
1907 case TMTIMERSTATE_PENDING_SCHEDULE:
1908 Log2(("TMTimerGetExpire: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1909 pTimer->u64Expire, pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1910 return pTimer->u64Expire;
1911
1912 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1913 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1914#ifdef IN_RING3
1915 if (!RTThreadYield())
1916 RTThreadSleep(1);
1917#endif
1918 break;
1919
1920 /*
1921 * Invalid states.
1922 */
1923 case TMTIMERSTATE_DESTROY:
1924 case TMTIMERSTATE_FREE:
1925 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1926 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1927 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1928 return ~(uint64_t)0;
1929 default:
1930 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1931 return ~(uint64_t)0;
1932 }
1933 } while (cRetries-- > 0);
1934
1935 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1936 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1937 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1938 return ~(uint64_t)0;
1939}
1940
1941
1942/**
1943 * Checks if a timer is active or not.
1944 *
1945 * @returns True if active.
1946 * @returns False if not active.
1947 * @param pTimer Timer handle as returned by one of the create functions.
1948 */
1949VMMDECL(bool) TMTimerIsActive(PTMTIMER pTimer)
1950{
1951 TMTIMERSTATE enmState = pTimer->enmState;
1952 switch (enmState)
1953 {
1954 case TMTIMERSTATE_STOPPED:
1955 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1956 case TMTIMERSTATE_EXPIRED_DELIVER:
1957 case TMTIMERSTATE_PENDING_STOP:
1958 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1959 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1960 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1961 return false;
1962
1963 case TMTIMERSTATE_ACTIVE:
1964 case TMTIMERSTATE_PENDING_RESCHEDULE:
1965 case TMTIMERSTATE_PENDING_SCHEDULE:
1966 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1967 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1968 Log2(("TMTimerIsActive: returns true (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1969 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1970 return true;
1971
1972 /*
1973 * Invalid states.
1974 */
1975 case TMTIMERSTATE_DESTROY:
1976 case TMTIMERSTATE_FREE:
1977 AssertMsgFailed(("Invalid timer state %s (%s)\n", tmTimerState(enmState), R3STRING(pTimer->pszDesc)));
1978 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1979 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1980 return false;
1981 default:
1982 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1983 return false;
1984 }
1985}
1986
1987
1988/**
1989 * Convert state to string.
1990 *
1991 * @returns Readonly status name.
1992 * @param enmState State.
1993 */
1994const char *tmTimerState(TMTIMERSTATE enmState)
1995{
1996 switch (enmState)
1997 {
1998#define CASE(num, state) \
1999 case TMTIMERSTATE_##state: \
2000 AssertCompile(TMTIMERSTATE_##state == (num)); \
2001 return #num "-" #state
2002 CASE( 1,STOPPED);
2003 CASE( 2,ACTIVE);
2004 CASE( 3,EXPIRED_GET_UNLINK);
2005 CASE( 4,EXPIRED_DELIVER);
2006 CASE( 5,PENDING_STOP);
2007 CASE( 6,PENDING_STOP_SCHEDULE);
2008 CASE( 7,PENDING_SCHEDULE_SET_EXPIRE);
2009 CASE( 8,PENDING_SCHEDULE);
2010 CASE( 9,PENDING_RESCHEDULE_SET_EXPIRE);
2011 CASE(10,PENDING_RESCHEDULE);
2012 CASE(11,DESTROY);
2013 CASE(12,FREE);
2014 default:
2015 AssertMsgFailed(("Invalid state enmState=%d\n", enmState));
2016 return "Invalid state!";
2017#undef CASE
2018 }
2019}
2020
2021
2022/**
2023 * Schedules the given timer on the given queue.
2024 *
2025 * @param pQueue The timer queue.
2026 * @param pTimer The timer that needs scheduling.
2027 *
2028 * @remarks Called while owning the lock.
2029 */
2030DECLINLINE(void) tmTimerQueueScheduleOne(PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
2031{
2032 /*
2033 * Processing.
2034 */
2035 unsigned cRetries = 2;
2036 do
2037 {
2038 TMTIMERSTATE enmState = pTimer->enmState;
2039 switch (enmState)
2040 {
2041 /*
2042 * Reschedule timer (in the active list).
2043 */
2044 case TMTIMERSTATE_PENDING_RESCHEDULE:
2045 {
2046 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE, TMTIMERSTATE_PENDING_RESCHEDULE)))
2047 break; /* retry */
2048
2049 const PTMTIMER pPrev = TMTIMER_GET_PREV(pTimer);
2050 const PTMTIMER pNext = TMTIMER_GET_NEXT(pTimer);
2051 if (pPrev)
2052 TMTIMER_SET_NEXT(pPrev, pNext);
2053 else
2054 {
2055 TMTIMER_SET_HEAD(pQueue, pNext);
2056 pQueue->u64Expire = pNext ? pNext->u64Expire : INT64_MAX;
2057 }
2058 if (pNext)
2059 TMTIMER_SET_PREV(pNext, pPrev);
2060 pTimer->offNext = 0;
2061 pTimer->offPrev = 0;
2062 /* fall thru */
2063 }
2064
2065 /*
2066 * Schedule timer (insert into the active list).
2067 */
2068 case TMTIMERSTATE_PENDING_SCHEDULE:
2069 {
2070 Assert(!pTimer->offNext); Assert(!pTimer->offPrev);
2071 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, TMTIMERSTATE_PENDING_SCHEDULE)))
2072 break; /* retry */
2073
2074 PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue);
2075 if (pCur)
2076 {
2077 const uint64_t u64Expire = pTimer->u64Expire;
2078 for (;; pCur = TMTIMER_GET_NEXT(pCur))
2079 {
2080 if (pCur->u64Expire > u64Expire)
2081 {
2082 const PTMTIMER pPrev = TMTIMER_GET_PREV(pCur);
2083 TMTIMER_SET_NEXT(pTimer, pCur);
2084 TMTIMER_SET_PREV(pTimer, pPrev);
2085 if (pPrev)
2086 TMTIMER_SET_NEXT(pPrev, pTimer);
2087 else
2088 {
2089 TMTIMER_SET_HEAD(pQueue, pTimer);
2090 pQueue->u64Expire = u64Expire;
2091 }
2092 TMTIMER_SET_PREV(pCur, pTimer);
2093 return;
2094 }
2095 if (!pCur->offNext)
2096 {
2097 TMTIMER_SET_NEXT(pCur, pTimer);
2098 TMTIMER_SET_PREV(pTimer, pCur);
2099 return;
2100 }
2101 }
2102 }
2103 else
2104 {
2105 TMTIMER_SET_HEAD(pQueue, pTimer);
2106 pQueue->u64Expire = pTimer->u64Expire;
2107 }
2108 return;
2109 }
2110
2111 /*
2112 * Stop the timer in active list.
2113 */
2114 case TMTIMERSTATE_PENDING_STOP:
2115 {
2116 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, TMTIMERSTATE_PENDING_STOP)))
2117 break; /* retry */
2118
2119 const PTMTIMER pPrev = TMTIMER_GET_PREV(pTimer);
2120 const PTMTIMER pNext = TMTIMER_GET_NEXT(pTimer);
2121 if (pPrev)
2122 TMTIMER_SET_NEXT(pPrev, pNext);
2123 else
2124 {
2125 TMTIMER_SET_HEAD(pQueue, pNext);
2126 pQueue->u64Expire = pNext ? pNext->u64Expire : INT64_MAX;
2127 }
2128 if (pNext)
2129 TMTIMER_SET_PREV(pNext, pPrev);
2130 pTimer->offNext = 0;
2131 pTimer->offPrev = 0;
2132 /* fall thru */
2133 }
2134
2135 /*
2136 * Stop the timer (not on the active list).
2137 */
2138 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2139 Assert(!pTimer->offNext); Assert(!pTimer->offPrev);
2140 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_STOPPED, TMTIMERSTATE_PENDING_STOP_SCHEDULE)))
2141 break;
2142 return;
2143
2144 /*
2145 * The timer is pending destruction by TMR3TimerDestroy, our caller.
2146 * Nothing to do here.
2147 */
2148 case TMTIMERSTATE_DESTROY:
2149 break;
2150
2151 /*
2152 * Postpone these until they get into the right state.
2153 */
2154 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2155 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2156 tmTimerLink(pQueue, pTimer);
2157 STAM_COUNTER_INC(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatPostponed));
2158 return;
2159
2160 /*
2161 * None of these can be in the schedule.
2162 */
2163 case TMTIMERSTATE_FREE:
2164 case TMTIMERSTATE_STOPPED:
2165 case TMTIMERSTATE_ACTIVE:
2166 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2167 case TMTIMERSTATE_EXPIRED_DELIVER:
2168 default:
2169 AssertMsgFailed(("Timer (%p) in the scheduling list has an invalid state %s (%d)!",
2170 pTimer, tmTimerState(pTimer->enmState), pTimer->enmState));
2171 return;
2172 }
2173 } while (cRetries-- > 0);
2174}
2175
2176
2177/**
2178 * Schedules the specified timer queue.
2179 *
2180 * @param pVM The VM to run the timers for.
2181 * @param pQueue The queue to schedule.
2182 *
2183 * @remarks Called while owning the lock.
2184 */
2185void tmTimerQueueSchedule(PVM pVM, PTMTIMERQUEUE pQueue)
2186{
2187 TM_ASSERT_LOCK(pVM);
2188
2189 /*
2190 * Dequeue the scheduling list and iterate it.
2191 */
2192 int32_t offNext = ASMAtomicXchgS32(&pQueue->offSchedule, 0);
2193 Log2(("tmTimerQueueSchedule: pQueue=%p:{.enmClock=%d, offNext=%RI32, .u64Expired=%'RU64}\n", pQueue, pQueue->enmClock, offNext, pQueue->u64Expire));
2194 if (!offNext)
2195 return;
2196 PTMTIMER pNext = (PTMTIMER)((intptr_t)pQueue + offNext);
2197 while (pNext)
2198 {
2199 /*
2200 * Unlink the head timer and find the next one.
2201 */
2202 PTMTIMER pTimer = pNext;
2203 pNext = pNext->offScheduleNext ? (PTMTIMER)((intptr_t)pNext + pNext->offScheduleNext) : NULL;
2204 pTimer->offScheduleNext = 0;
2205
2206 /*
2207 * Do the scheduling.
2208 */
2209 Log2(("tmTimerQueueSchedule: %p:{.enmState=%s, .enmClock=%d, .enmType=%d, .pszDesc=%s}\n",
2210 pTimer, tmTimerState(pTimer->enmState), pTimer->enmClock, pTimer->enmType, R3STRING(pTimer->pszDesc)));
2211 tmTimerQueueScheduleOne(pQueue, pTimer);
2212 Log2(("tmTimerQueueSchedule: %p: new %s\n", pTimer, tmTimerState(pTimer->enmState)));
2213 } /* foreach timer in current schedule batch. */
2214 Log2(("tmTimerQueueSchedule: u64Expired=%'RU64\n", pQueue->u64Expire));
2215}
2216
2217
2218#ifdef VBOX_STRICT
2219/**
2220 * Checks that the timer queues are sane.
2221 *
2222 * @param pVM VM handle.
2223 *
2224 * @remarks Called while owning the lock.
2225 */
2226void tmTimerQueuesSanityChecks(PVM pVM, const char *pszWhere)
2227{
2228 TM_ASSERT_LOCK(pVM);
2229
2230 /*
2231 * Check the linking of the active lists.
2232 */
2233 for (int i = 0; i < TMCLOCK_MAX; i++)
2234 {
2235 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[i];
2236 Assert((int)pQueue->enmClock == i);
2237 PTMTIMER pPrev = NULL;
2238 for (PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue); pCur; pPrev = pCur, pCur = TMTIMER_GET_NEXT(pCur))
2239 {
2240 AssertMsg((int)pCur->enmClock == i, ("%s: %d != %d\n", pszWhere, pCur->enmClock, i));
2241 AssertMsg(TMTIMER_GET_PREV(pCur) == pPrev, ("%s: %p != %p\n", pszWhere, TMTIMER_GET_PREV(pCur), pPrev));
2242 TMTIMERSTATE enmState = pCur->enmState;
2243 switch (enmState)
2244 {
2245 case TMTIMERSTATE_ACTIVE:
2246 AssertMsg( !pCur->offScheduleNext
2247 || pCur->enmState != TMTIMERSTATE_ACTIVE,
2248 ("%s: %RI32\n", pszWhere, pCur->offScheduleNext));
2249 break;
2250 case TMTIMERSTATE_PENDING_STOP:
2251 case TMTIMERSTATE_PENDING_RESCHEDULE:
2252 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2253 break;
2254 default:
2255 AssertMsgFailed(("%s: Invalid state enmState=%d %s\n", pszWhere, enmState, tmTimerState(enmState)));
2256 break;
2257 }
2258 }
2259 }
2260
2261
2262# ifdef IN_RING3
2263 /*
2264 * Do the big list and check that active timers all are in the active lists.
2265 */
2266 PTMTIMERR3 pPrev = NULL;
2267 for (PTMTIMERR3 pCur = pVM->tm.s.pCreated; pCur; pPrev = pCur, pCur = pCur->pBigNext)
2268 {
2269 Assert(pCur->pBigPrev == pPrev);
2270 Assert((unsigned)pCur->enmClock < (unsigned)TMCLOCK_MAX);
2271
2272 TMTIMERSTATE enmState = pCur->enmState;
2273 switch (enmState)
2274 {
2275 case TMTIMERSTATE_ACTIVE:
2276 case TMTIMERSTATE_PENDING_STOP:
2277 case TMTIMERSTATE_PENDING_RESCHEDULE:
2278 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2279 {
2280 PTMTIMERR3 pCurAct = TMTIMER_GET_HEAD(&pVM->tm.s.CTX_SUFF(paTimerQueues)[pCur->enmClock]);
2281 Assert(pCur->offPrev || pCur == pCurAct);
2282 while (pCurAct && pCurAct != pCur)
2283 pCurAct = TMTIMER_GET_NEXT(pCurAct);
2284 Assert(pCurAct == pCur);
2285 break;
2286 }
2287
2288 case TMTIMERSTATE_PENDING_SCHEDULE:
2289 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2290 case TMTIMERSTATE_STOPPED:
2291 case TMTIMERSTATE_EXPIRED_DELIVER:
2292 {
2293 Assert(!pCur->offNext);
2294 Assert(!pCur->offPrev);
2295 for (PTMTIMERR3 pCurAct = TMTIMER_GET_HEAD(&pVM->tm.s.CTX_SUFF(paTimerQueues)[pCur->enmClock]);
2296 pCurAct;
2297 pCurAct = TMTIMER_GET_NEXT(pCurAct))
2298 {
2299 Assert(pCurAct != pCur);
2300 Assert(TMTIMER_GET_NEXT(pCurAct) != pCur);
2301 Assert(TMTIMER_GET_PREV(pCurAct) != pCur);
2302 }
2303 break;
2304 }
2305
2306 /* ignore */
2307 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2308 break;
2309
2310 /* shouldn't get here! */
2311 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2312 case TMTIMERSTATE_DESTROY:
2313 default:
2314 AssertMsgFailed(("Invalid state enmState=%d %s\n", enmState, tmTimerState(enmState)));
2315 break;
2316 }
2317 }
2318# endif /* IN_RING3 */
2319}
2320#endif /* !VBOX_STRICT */
2321
2322
2323/**
2324 * Gets the current warp drive percent.
2325 *
2326 * @returns The warp drive percent.
2327 * @param pVM The VM handle.
2328 */
2329VMMDECL(uint32_t) TMGetWarpDrive(PVM pVM)
2330{
2331 return pVM->tm.s.u32VirtualWarpDrivePercentage;
2332}
2333
2334
2335/**
2336 * Gets the highest frequency hint for all the important timers.
2337 *
2338 * @returns The highest frequency. 0 if no timers care.
2339 * @param pVM The VM handle.
2340 */
2341static uint32_t tmGetFrequencyHint(PVM pVM)
2342{
2343 /*
2344 * Query the value, recalculate it if necessary.
2345 *
2346 * The "right" highest frequency value isn't so important that we'll block
2347 * waiting on the timer semaphore.
2348 */
2349 uint32_t uMaxHzHint = ASMAtomicUoReadU32(&pVM->tm.s.uMaxHzHint);
2350 if (RT_UNLIKELY(ASMAtomicReadBool(&pVM->tm.s.fHzHintNeedsUpdating)))
2351 {
2352 if (RT_SUCCESS(tmTimerTryLock(pVM)))
2353 {
2354 ASMAtomicWriteBool(&pVM->tm.s.fHzHintNeedsUpdating, false);
2355
2356 /*
2357 * Loop over the timers associated with each clock.
2358 */
2359 uMaxHzHint = 0;
2360 for (int i = 0; i < TMCLOCK_MAX; i++)
2361 {
2362 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[i];
2363 for (PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue); pCur; pCur = TMTIMER_GET_NEXT(pCur))
2364 {
2365 uint32_t uHzHint = ASMAtomicUoReadU32(&pCur->uHzHint);
2366 if (uHzHint > uMaxHzHint)
2367 {
2368 switch (pCur->enmState)
2369 {
2370 case TMTIMERSTATE_ACTIVE:
2371 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2372 case TMTIMERSTATE_EXPIRED_DELIVER:
2373 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2374 case TMTIMERSTATE_PENDING_SCHEDULE:
2375 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2376 case TMTIMERSTATE_PENDING_RESCHEDULE:
2377 uMaxHzHint = uHzHint;
2378 break;
2379
2380 case TMTIMERSTATE_STOPPED:
2381 case TMTIMERSTATE_PENDING_STOP:
2382 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2383 case TMTIMERSTATE_DESTROY:
2384 case TMTIMERSTATE_FREE:
2385 break;
2386 /* no default, want gcc warnings when adding more states. */
2387 }
2388 }
2389 }
2390 }
2391 ASMAtomicWriteU32(&pVM->tm.s.uMaxHzHint, uMaxHzHint);
2392 Log(("tmGetFrequencyHint: New value %u Hz\n", uMaxHzHint));
2393 tmTimerUnlock(pVM);
2394 }
2395 }
2396 return uMaxHzHint;
2397}
2398
2399
2400/**
2401 * Calculates a host timer frequency that would be suitable for the current
2402 * timer load.
2403 *
2404 * This will take the highest timer frequency, adjust for catch-up and warp
2405 * driver, and finally add a little fudge factor. The caller (VMM) will use
2406 * the result to adjust the per-cpu preemption timer.
2407 *
2408 * @returns The highest frequency. 0 if no important timers around.
2409 * @param pVM The VM handle.
2410 * @param pVCpu The current CPU.
2411 */
2412VMM_INT_DECL(uint32_t) TMCalcHostTimerFrequency(PVM pVM, PVMCPU pVCpu)
2413{
2414 uint32_t uHz = tmGetFrequencyHint(pVM);
2415
2416 /* Catch up, we have to be more aggressive than the % indicates at the
2417 beginning of the effort. */
2418 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
2419 {
2420 uint32_t u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
2421 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
2422 {
2423 if (u32Pct <= 100)
2424 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp100 / 100;
2425 else if (u32Pct <= 200)
2426 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp200 / 100;
2427 else if (u32Pct <= 400)
2428 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp400 / 100;
2429 uHz *= u32Pct + 100;
2430 uHz /= 100;
2431 }
2432 }
2433
2434 /* Warp drive. */
2435 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualWarpDrive))
2436 {
2437 uint32_t u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualWarpDrivePercentage);
2438 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualWarpDrive))
2439 {
2440 uHz *= u32Pct;
2441 uHz /= 100;
2442 }
2443 }
2444
2445 /* Fudge factor. */
2446 if (pVCpu->idCpu == pVM->tm.s.idTimerCpu)
2447 uHz *= pVM->tm.s.cPctHostHzFudgeFactorTimerCpu;
2448 else
2449 uHz *= pVM->tm.s.cPctHostHzFudgeFactorOtherCpu;
2450 uHz /= 100;
2451
2452 /* Make sure it isn't too high. */
2453 if (uHz > pVM->tm.s.cHostHzMax)
2454 uHz = pVM->tm.s.cHostHzMax;
2455
2456 return uHz;
2457}
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette