VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/TMAll.cpp@ 37527

Last change on this file since 37527 was 37527, checked in by vboxsync, 13 years ago

TM: Virtual sync timer locking fixes and assertions.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 89.7 KB
Line 
1/* $Id: TMAll.cpp 37527 2011-06-17 10:18:02Z vboxsync $ */
2/** @file
3 * TM - Timeout Manager, all contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2011 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_TM
23#include <VBox/vmm/tm.h>
24#include <VBox/vmm/mm.h>
25#include <VBox/vmm/dbgftrace.h>
26#ifdef IN_RING3
27# include <VBox/vmm/rem.h>
28#endif
29#include "TMInternal.h"
30#include <VBox/vmm/vm.h>
31
32#include <VBox/param.h>
33#include <VBox/err.h>
34#include <VBox/log.h>
35#include <VBox/sup.h>
36#include <iprt/time.h>
37#include <iprt/assert.h>
38#include <iprt/asm.h>
39#include <iprt/asm-math.h>
40#ifdef IN_RING3
41# include <iprt/thread.h>
42#endif
43
44#include "TMInline.h"
45
46
47/*******************************************************************************
48* Defined Constants And Macros *
49*******************************************************************************/
50/** @def TMTIMER_ASSERT_CRITSECT
51 * Checks that the caller owns the critical section if one is associated with
52 * the timer. */
53#ifdef VBOX_STRICT
54# define TMTIMER_ASSERT_CRITSECT(pTimer) \
55 do { \
56 if ((pTimer)->pCritSect) \
57 { \
58 VMSTATE enmState; \
59 PPDMCRITSECT pCritSect = (PPDMCRITSECT)MMHyperR3ToCC((pTimer)->CTX_SUFF(pVM), (pTimer)->pCritSect); \
60 AssertMsg( pCritSect \
61 && ( PDMCritSectIsOwner(pCritSect) \
62 || (enmState = (pTimer)->CTX_SUFF(pVM)->enmVMState) == VMSTATE_CREATING \
63 || enmState == VMSTATE_RESETTING \
64 || enmState == VMSTATE_RESETTING_LS ),\
65 ("pTimer=%p (%s) pCritSect=%p (%s)\n", pTimer, R3STRING(pTimer->pszDesc), \
66 (pTimer)->pCritSect, R3STRING(PDMR3CritSectName((pTimer)->pCritSect)) )); \
67 } \
68 } while (0)
69#else
70# define TMTIMER_ASSERT_CRITSECT(pTimer) do { } while (0)
71#endif
72
73
74/**
75 * Gets the current warp drive percent.
76 *
77 * @returns The warp drive percent.
78 * @param pVM The VM handle.
79 */
80VMMDECL(uint32_t) TMGetWarpDrive(PVM pVM)
81{
82 return pVM->tm.s.u32VirtualWarpDrivePercentage;
83}
84
85
86/**
87 * Notification that execution is about to start.
88 *
89 * This call must always be paired with a TMNotifyEndOfExecution call.
90 *
91 * The function may, depending on the configuration, resume the TSC and future
92 * clocks that only ticks when we're executing guest code.
93 *
94 * @param pVCpu The VMCPU to operate on.
95 */
96VMMDECL(void) TMNotifyStartOfExecution(PVMCPU pVCpu)
97{
98 PVM pVM = pVCpu->CTX_SUFF(pVM);
99
100#ifndef VBOX_WITHOUT_NS_ACCOUNTING
101 pVCpu->tm.s.u64NsTsStartExecuting = RTTimeNanoTS();
102#endif
103 if (pVM->tm.s.fTSCTiedToExecution)
104 tmCpuTickResume(pVM, pVCpu);
105}
106
107
108/**
109 * Notification that execution is about to start.
110 *
111 * This call must always be paired with a TMNotifyStartOfExecution call.
112 *
113 * The function may, depending on the configuration, suspend the TSC and future
114 * clocks that only ticks when we're executing guest code.
115 *
116 * @param pVCpu The VMCPU to operate on.
117 */
118VMMDECL(void) TMNotifyEndOfExecution(PVMCPU pVCpu)
119{
120 PVM pVM = pVCpu->CTX_SUFF(pVM);
121
122 if (pVM->tm.s.fTSCTiedToExecution)
123 tmCpuTickPause(pVM, pVCpu);
124
125#ifndef VBOX_WITHOUT_NS_ACCOUNTING
126 uint64_t const u64NsTs = RTTimeNanoTS();
127 uint64_t const cNsTotalNew = u64NsTs - pVCpu->tm.s.u64NsTsStartTotal;
128 uint64_t const cNsExecutingDelta = u64NsTs - pVCpu->tm.s.u64NsTsStartExecuting;
129 uint64_t const cNsExecutingNew = pVCpu->tm.s.cNsExecuting + cNsExecutingDelta;
130 uint64_t const cNsOtherNew = cNsTotalNew - cNsExecutingNew - pVCpu->tm.s.cNsHalted;
131
132# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
133 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecuting, cNsExecutingDelta);
134 if (cNsExecutingDelta < 5000)
135 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecTiny, cNsExecutingDelta);
136 else if (cNsExecutingDelta < 50000)
137 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecShort, cNsExecutingDelta);
138 else
139 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecLong, cNsExecutingDelta);
140 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsTotal, cNsTotalNew - pVCpu->tm.s.cNsTotal);
141 int64_t const cNsOtherNewDelta = cNsOtherNew - pVCpu->tm.s.cNsOther;
142 if (cNsOtherNewDelta > 0)
143 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsOther, cNsOtherNewDelta); /* (the period before execution) */
144# endif
145
146 uint32_t uGen = ASMAtomicIncU32(&pVCpu->tm.s.uTimesGen); Assert(uGen & 1);
147 pVCpu->tm.s.cNsExecuting = cNsExecutingNew;
148 pVCpu->tm.s.cNsTotal = cNsTotalNew;
149 pVCpu->tm.s.cNsOther = cNsOtherNew;
150 pVCpu->tm.s.cPeriodsExecuting++;
151 ASMAtomicWriteU32(&pVCpu->tm.s.uTimesGen, (uGen | 1) + 1);
152#endif
153}
154
155
156/**
157 * Notification that the cpu is entering the halt state
158 *
159 * This call must always be paired with a TMNotifyEndOfExecution call.
160 *
161 * The function may, depending on the configuration, resume the TSC and future
162 * clocks that only ticks when we're halted.
163 *
164 * @param pVCpu The VMCPU to operate on.
165 */
166VMM_INT_DECL(void) TMNotifyStartOfHalt(PVMCPU pVCpu)
167{
168 PVM pVM = pVCpu->CTX_SUFF(pVM);
169
170#ifndef VBOX_WITHOUT_NS_ACCOUNTING
171 pVCpu->tm.s.u64NsTsStartHalting = RTTimeNanoTS();
172#endif
173
174 if ( pVM->tm.s.fTSCTiedToExecution
175 && !pVM->tm.s.fTSCNotTiedToHalt)
176 tmCpuTickResume(pVM, pVCpu);
177}
178
179
180/**
181 * Notification that the cpu is leaving the halt state
182 *
183 * This call must always be paired with a TMNotifyStartOfHalt call.
184 *
185 * The function may, depending on the configuration, suspend the TSC and future
186 * clocks that only ticks when we're halted.
187 *
188 * @param pVCpu The VMCPU to operate on.
189 */
190VMM_INT_DECL(void) TMNotifyEndOfHalt(PVMCPU pVCpu)
191{
192 PVM pVM = pVCpu->CTX_SUFF(pVM);
193
194 if ( pVM->tm.s.fTSCTiedToExecution
195 && !pVM->tm.s.fTSCNotTiedToHalt)
196 tmCpuTickPause(pVM, pVCpu);
197
198#ifndef VBOX_WITHOUT_NS_ACCOUNTING
199 uint64_t const u64NsTs = RTTimeNanoTS();
200 uint64_t const cNsTotalNew = u64NsTs - pVCpu->tm.s.u64NsTsStartTotal;
201 uint64_t const cNsHaltedDelta = u64NsTs - pVCpu->tm.s.u64NsTsStartHalting;
202 uint64_t const cNsHaltedNew = pVCpu->tm.s.cNsHalted + cNsHaltedDelta;
203 uint64_t const cNsOtherNew = cNsTotalNew - pVCpu->tm.s.cNsExecuting - cNsHaltedNew;
204
205# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
206 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsHalted, cNsHaltedDelta);
207 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsTotal, cNsTotalNew - pVCpu->tm.s.cNsTotal);
208 int64_t const cNsOtherNewDelta = cNsOtherNew - pVCpu->tm.s.cNsOther;
209 if (cNsOtherNewDelta > 0)
210 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsOther, cNsOtherNewDelta); /* (the period before halting) */
211# endif
212
213 uint32_t uGen = ASMAtomicIncU32(&pVCpu->tm.s.uTimesGen); Assert(uGen & 1);
214 pVCpu->tm.s.cNsHalted = cNsHaltedNew;
215 pVCpu->tm.s.cNsTotal = cNsTotalNew;
216 pVCpu->tm.s.cNsOther = cNsOtherNew;
217 pVCpu->tm.s.cPeriodsHalted++;
218 ASMAtomicWriteU32(&pVCpu->tm.s.uTimesGen, (uGen | 1) + 1);
219#endif
220}
221
222
223/**
224 * Raise the timer force action flag and notify the dedicated timer EMT.
225 *
226 * @param pVM The VM handle.
227 */
228DECLINLINE(void) tmScheduleNotify(PVM pVM)
229{
230 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
231 if (!VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
232 {
233 Log5(("TMAll(%u): FF: 0 -> 1\n", __LINE__));
234 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
235#ifdef IN_RING3
236 REMR3NotifyTimerPending(pVM, pVCpuDst);
237 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
238#endif
239 STAM_COUNTER_INC(&pVM->tm.s.StatScheduleSetFF);
240 }
241}
242
243
244/**
245 * Schedule the queue which was changed.
246 */
247DECLINLINE(void) tmSchedule(PTMTIMER pTimer)
248{
249 PVM pVM = pTimer->CTX_SUFF(pVM);
250 if ( VM_IS_EMT(pVM)
251 && RT_SUCCESS(TM_TRY_LOCK_TIMERS(pVM)))
252 {
253 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
254 Log3(("tmSchedule: tmTimerQueueSchedule\n"));
255 tmTimerQueueSchedule(pVM, &pVM->tm.s.CTX_SUFF(paTimerQueues)[pTimer->enmClock]);
256#ifdef VBOX_STRICT
257 tmTimerQueuesSanityChecks(pVM, "tmSchedule");
258#endif
259 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
260 TM_UNLOCK_TIMERS(pVM);
261 }
262 else
263 {
264 TMTIMERSTATE enmState = pTimer->enmState;
265 if (TMTIMERSTATE_IS_PENDING_SCHEDULING(enmState))
266 tmScheduleNotify(pVM);
267 }
268}
269
270
271/**
272 * Try change the state to enmStateNew from enmStateOld
273 * and link the timer into the scheduling queue.
274 *
275 * @returns Success indicator.
276 * @param pTimer Timer in question.
277 * @param enmStateNew The new timer state.
278 * @param enmStateOld The old timer state.
279 */
280DECLINLINE(bool) tmTimerTry(PTMTIMER pTimer, TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
281{
282 /*
283 * Attempt state change.
284 */
285 bool fRc;
286 TM_TRY_SET_STATE(pTimer, enmStateNew, enmStateOld, fRc);
287 return fRc;
288}
289
290
291/**
292 * Links the timer onto the scheduling queue.
293 *
294 * @param pQueue The timer queue the timer belongs to.
295 * @param pTimer The timer.
296 *
297 * @todo FIXME: Look into potential race with the thread running the queues
298 * and stuff.
299 */
300DECLINLINE(void) tmTimerLinkSchedule(PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
301{
302 Assert(!pTimer->offScheduleNext);
303 const int32_t offHeadNew = (intptr_t)pTimer - (intptr_t)pQueue;
304 int32_t offHead;
305 do
306 {
307 offHead = pQueue->offSchedule;
308 if (offHead)
309 pTimer->offScheduleNext = ((intptr_t)pQueue + offHead) - (intptr_t)pTimer;
310 else
311 pTimer->offScheduleNext = 0;
312 } while (!ASMAtomicCmpXchgS32(&pQueue->offSchedule, offHeadNew, offHead));
313}
314
315
316/**
317 * Try change the state to enmStateNew from enmStateOld
318 * and link the timer into the scheduling queue.
319 *
320 * @returns Success indicator.
321 * @param pTimer Timer in question.
322 * @param enmStateNew The new timer state.
323 * @param enmStateOld The old timer state.
324 */
325DECLINLINE(bool) tmTimerTryWithLink(PTMTIMER pTimer, TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
326{
327 if (tmTimerTry(pTimer, enmStateNew, enmStateOld))
328 {
329 tmTimerLinkSchedule(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF(paTimerQueues)[pTimer->enmClock], pTimer);
330 return true;
331 }
332 return false;
333}
334
335
336/**
337 * Links a timer into the active list of a timer queue.
338 *
339 * @param pQueue The queue.
340 * @param pTimer The timer.
341 * @param u64Expire The timer expiration time.
342 *
343 * @remarks Called while owning the relevant queue lock.
344 */
345DECL_FORCE_INLINE(void) tmTimerQueueLinkActive(PTMTIMERQUEUE pQueue, PTMTIMER pTimer, uint64_t u64Expire)
346{
347 Assert(!pTimer->offNext);
348 Assert(!pTimer->offPrev);
349 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE || pTimer->enmClock != TMCLOCK_VIRTUAL_SYNC); /* (active is not a stable state) */
350
351 PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue);
352 if (pCur)
353 {
354 for (;; pCur = TMTIMER_GET_NEXT(pCur))
355 {
356 if (pCur->u64Expire > u64Expire)
357 {
358 const PTMTIMER pPrev = TMTIMER_GET_PREV(pCur);
359 TMTIMER_SET_NEXT(pTimer, pCur);
360 TMTIMER_SET_PREV(pTimer, pPrev);
361 if (pPrev)
362 TMTIMER_SET_NEXT(pPrev, pTimer);
363 else
364 {
365 TMTIMER_SET_HEAD(pQueue, pTimer);
366 ASMAtomicWriteU64(&pQueue->u64Expire, u64Expire);
367 DBGFTRACE_U64_TAG2(pTimer->CTX_SUFF(pVM), u64Expire, "tmTimerQueueLinkActive head", R3STRING(pTimer->pszDesc));
368 }
369 TMTIMER_SET_PREV(pCur, pTimer);
370 return;
371 }
372 if (!pCur->offNext)
373 {
374 TMTIMER_SET_NEXT(pCur, pTimer);
375 TMTIMER_SET_PREV(pTimer, pCur);
376 DBGFTRACE_U64_TAG2(pTimer->CTX_SUFF(pVM), u64Expire, "tmTimerQueueLinkActive tail", R3STRING(pTimer->pszDesc));
377 return;
378 }
379 }
380 }
381 else
382 {
383 TMTIMER_SET_HEAD(pQueue, pTimer);
384 ASMAtomicWriteU64(&pQueue->u64Expire, u64Expire);
385 DBGFTRACE_U64_TAG2(pTimer->CTX_SUFF(pVM), u64Expire, "tmTimerQueueLinkActive empty", R3STRING(pTimer->pszDesc));
386 }
387}
388
389
390
391/**
392 * Schedules the given timer on the given queue.
393 *
394 * @param pQueue The timer queue.
395 * @param pTimer The timer that needs scheduling.
396 *
397 * @remarks Called while owning the lock.
398 */
399DECLINLINE(void) tmTimerQueueScheduleOne(PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
400{
401 Assert(pQueue->enmClock != TMCLOCK_VIRTUAL_SYNC);
402
403 /*
404 * Processing.
405 */
406 unsigned cRetries = 2;
407 do
408 {
409 TMTIMERSTATE enmState = pTimer->enmState;
410 switch (enmState)
411 {
412 /*
413 * Reschedule timer (in the active list).
414 */
415 case TMTIMERSTATE_PENDING_RESCHEDULE:
416 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE, TMTIMERSTATE_PENDING_RESCHEDULE)))
417 break; /* retry */
418 tmTimerQueueUnlinkActive(pQueue, pTimer);
419 /* fall thru */
420
421 /*
422 * Schedule timer (insert into the active list).
423 */
424 case TMTIMERSTATE_PENDING_SCHEDULE:
425 Assert(!pTimer->offNext); Assert(!pTimer->offPrev);
426 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, TMTIMERSTATE_PENDING_SCHEDULE)))
427 break; /* retry */
428 tmTimerQueueLinkActive(pQueue, pTimer, pTimer->u64Expire);
429 return;
430
431 /*
432 * Stop the timer in active list.
433 */
434 case TMTIMERSTATE_PENDING_STOP:
435 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, TMTIMERSTATE_PENDING_STOP)))
436 break; /* retry */
437 tmTimerQueueUnlinkActive(pQueue, pTimer);
438 /* fall thru */
439
440 /*
441 * Stop the timer (not on the active list).
442 */
443 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
444 Assert(!pTimer->offNext); Assert(!pTimer->offPrev);
445 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_STOPPED, TMTIMERSTATE_PENDING_STOP_SCHEDULE)))
446 break;
447 return;
448
449 /*
450 * The timer is pending destruction by TMR3TimerDestroy, our caller.
451 * Nothing to do here.
452 */
453 case TMTIMERSTATE_DESTROY:
454 break;
455
456 /*
457 * Postpone these until they get into the right state.
458 */
459 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
460 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
461 tmTimerLinkSchedule(pQueue, pTimer);
462 STAM_COUNTER_INC(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatPostponed));
463 return;
464
465 /*
466 * None of these can be in the schedule.
467 */
468 case TMTIMERSTATE_FREE:
469 case TMTIMERSTATE_STOPPED:
470 case TMTIMERSTATE_ACTIVE:
471 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
472 case TMTIMERSTATE_EXPIRED_DELIVER:
473 default:
474 AssertMsgFailed(("Timer (%p) in the scheduling list has an invalid state %s (%d)!",
475 pTimer, tmTimerState(pTimer->enmState), pTimer->enmState));
476 return;
477 }
478 } while (cRetries-- > 0);
479}
480
481
482/**
483 * Schedules the specified timer queue.
484 *
485 * @param pVM The VM to run the timers for.
486 * @param pQueue The queue to schedule.
487 *
488 * @remarks Called while owning the lock.
489 */
490void tmTimerQueueSchedule(PVM pVM, PTMTIMERQUEUE pQueue)
491{
492 TM_ASSERT_TIMER_LOCK_OWNERSHIP(pVM);
493
494 /*
495 * Dequeue the scheduling list and iterate it.
496 */
497 int32_t offNext = ASMAtomicXchgS32(&pQueue->offSchedule, 0);
498 Log2(("tmTimerQueueSchedule: pQueue=%p:{.enmClock=%d, offNext=%RI32, .u64Expired=%'RU64}\n", pQueue, pQueue->enmClock, offNext, pQueue->u64Expire));
499 if (!offNext)
500 return;
501 PTMTIMER pNext = (PTMTIMER)((intptr_t)pQueue + offNext);
502 while (pNext)
503 {
504 /*
505 * Unlink the head timer and find the next one.
506 */
507 PTMTIMER pTimer = pNext;
508 pNext = pNext->offScheduleNext ? (PTMTIMER)((intptr_t)pNext + pNext->offScheduleNext) : NULL;
509 pTimer->offScheduleNext = 0;
510
511 /*
512 * Do the scheduling.
513 */
514 Log2(("tmTimerQueueSchedule: %p:{.enmState=%s, .enmClock=%d, .enmType=%d, .pszDesc=%s}\n",
515 pTimer, tmTimerState(pTimer->enmState), pTimer->enmClock, pTimer->enmType, R3STRING(pTimer->pszDesc)));
516 tmTimerQueueScheduleOne(pQueue, pTimer);
517 Log2(("tmTimerQueueSchedule: %p: new %s\n", pTimer, tmTimerState(pTimer->enmState)));
518 } /* foreach timer in current schedule batch. */
519 Log2(("tmTimerQueueSchedule: u64Expired=%'RU64\n", pQueue->u64Expire));
520}
521
522
523#ifdef VBOX_STRICT
524/**
525 * Checks that the timer queues are sane.
526 *
527 * @param pVM VM handle.
528 *
529 * @remarks Called while owning the lock.
530 */
531void tmTimerQueuesSanityChecks(PVM pVM, const char *pszWhere)
532{
533 TM_ASSERT_TIMER_LOCK_OWNERSHIP(pVM);
534
535 /*
536 * Check the linking of the active lists.
537 */
538 bool fHaveVirtualSyncLock = false;
539 for (int i = 0; i < TMCLOCK_MAX; i++)
540 {
541 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[i];
542 Assert((int)pQueue->enmClock == i);
543 if (pQueue->enmClock == TMCLOCK_VIRTUAL_SYNC)
544 {
545 if (PDMCritSectTryEnter(&pVM->tm.s.VirtualSyncLock) != VINF_SUCCESS)
546 continue;
547 fHaveVirtualSyncLock = true;
548 }
549 PTMTIMER pPrev = NULL;
550 for (PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue); pCur; pPrev = pCur, pCur = TMTIMER_GET_NEXT(pCur))
551 {
552 AssertMsg((int)pCur->enmClock == i, ("%s: %d != %d\n", pszWhere, pCur->enmClock, i));
553 AssertMsg(TMTIMER_GET_PREV(pCur) == pPrev, ("%s: %p != %p\n", pszWhere, TMTIMER_GET_PREV(pCur), pPrev));
554 TMTIMERSTATE enmState = pCur->enmState;
555 switch (enmState)
556 {
557 case TMTIMERSTATE_ACTIVE:
558 AssertMsg( !pCur->offScheduleNext
559 || pCur->enmState != TMTIMERSTATE_ACTIVE,
560 ("%s: %RI32\n", pszWhere, pCur->offScheduleNext));
561 break;
562 case TMTIMERSTATE_PENDING_STOP:
563 case TMTIMERSTATE_PENDING_RESCHEDULE:
564 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
565 break;
566 default:
567 AssertMsgFailed(("%s: Invalid state enmState=%d %s\n", pszWhere, enmState, tmTimerState(enmState)));
568 break;
569 }
570 }
571 }
572
573
574# ifdef IN_RING3
575 /*
576 * Do the big list and check that active timers all are in the active lists.
577 */
578 PTMTIMERR3 pPrev = NULL;
579 for (PTMTIMERR3 pCur = pVM->tm.s.pCreated; pCur; pPrev = pCur, pCur = pCur->pBigNext)
580 {
581 Assert(pCur->pBigPrev == pPrev);
582 Assert((unsigned)pCur->enmClock < (unsigned)TMCLOCK_MAX);
583
584 TMTIMERSTATE enmState = pCur->enmState;
585 switch (enmState)
586 {
587 case TMTIMERSTATE_ACTIVE:
588 case TMTIMERSTATE_PENDING_STOP:
589 case TMTIMERSTATE_PENDING_RESCHEDULE:
590 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
591 if (fHaveVirtualSyncLock || pCur->enmClock != TMCLOCK_VIRTUAL_SYNC)
592 {
593 PTMTIMERR3 pCurAct = TMTIMER_GET_HEAD(&pVM->tm.s.CTX_SUFF(paTimerQueues)[pCur->enmClock]);
594 Assert(pCur->offPrev || pCur == pCurAct);
595 while (pCurAct && pCurAct != pCur)
596 pCurAct = TMTIMER_GET_NEXT(pCurAct);
597 Assert(pCurAct == pCur);
598 }
599 break;
600
601 case TMTIMERSTATE_PENDING_SCHEDULE:
602 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
603 case TMTIMERSTATE_STOPPED:
604 case TMTIMERSTATE_EXPIRED_DELIVER:
605 if (fHaveVirtualSyncLock || pCur->enmClock != TMCLOCK_VIRTUAL_SYNC)
606 {
607 Assert(!pCur->offNext);
608 Assert(!pCur->offPrev);
609 for (PTMTIMERR3 pCurAct = TMTIMER_GET_HEAD(&pVM->tm.s.CTX_SUFF(paTimerQueues)[pCur->enmClock]);
610 pCurAct;
611 pCurAct = TMTIMER_GET_NEXT(pCurAct))
612 {
613 Assert(pCurAct != pCur);
614 Assert(TMTIMER_GET_NEXT(pCurAct) != pCur);
615 Assert(TMTIMER_GET_PREV(pCurAct) != pCur);
616 }
617 }
618 break;
619
620 /* ignore */
621 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
622 break;
623
624 /* shouldn't get here! */
625 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
626 case TMTIMERSTATE_DESTROY:
627 default:
628 AssertMsgFailed(("Invalid state enmState=%d %s\n", enmState, tmTimerState(enmState)));
629 break;
630 }
631 }
632# endif /* IN_RING3 */
633
634 if (fHaveVirtualSyncLock)
635 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
636}
637#endif /* !VBOX_STRICT */
638
639#ifdef VBOX_HIGH_RES_TIMERS_HACK
640
641/**
642 * Worker for tmTimerPollInternal that handles misses when the dedicated timer
643 * EMT is polling.
644 *
645 * @returns See tmTimerPollInternal.
646 * @param pVM Pointer to the shared VM structure.
647 * @param u64Now Current virtual clock timestamp.
648 * @param u64Delta The delta to the next even in ticks of the
649 * virtual clock.
650 * @param pu64Delta Where to return the delta.
651 * @param pCounter The statistics counter to update.
652 */
653DECLINLINE(uint64_t) tmTimerPollReturnMiss(PVM pVM, uint64_t u64Now, uint64_t u64Delta, uint64_t *pu64Delta)
654{
655 Assert(!(u64Delta & RT_BIT_64(63)));
656
657 if (!pVM->tm.s.fVirtualWarpDrive)
658 {
659 *pu64Delta = u64Delta;
660 return u64Delta + u64Now + pVM->tm.s.u64VirtualOffset;
661 }
662
663 /*
664 * Warp drive adjustments - this is the reverse of what tmVirtualGetRaw is doing.
665 */
666 uint64_t const u64Start = pVM->tm.s.u64VirtualWarpDriveStart;
667 uint32_t const u32Pct = pVM->tm.s.u32VirtualWarpDrivePercentage;
668
669 uint64_t u64GipTime = u64Delta + u64Now + pVM->tm.s.u64VirtualOffset;
670 u64GipTime -= u64Start; /* the start is GIP time. */
671 if (u64GipTime >= u64Delta)
672 {
673 ASMMultU64ByU32DivByU32(u64GipTime, 100, u32Pct);
674 ASMMultU64ByU32DivByU32(u64Delta, 100, u32Pct);
675 }
676 else
677 {
678 u64Delta -= u64GipTime;
679 ASMMultU64ByU32DivByU32(u64GipTime, 100, u32Pct);
680 u64Delta += u64GipTime;
681 }
682 *pu64Delta = u64Delta;
683 u64GipTime += u64Start;
684 return u64GipTime;
685}
686
687
688/**
689 * Worker for tmTimerPollInternal dealing with returns on virtual CPUs other
690 * than the one dedicated to timer work.
691 *
692 * @returns See tmTimerPollInternal.
693 * @param pVM Pointer to the shared VM structure.
694 * @param u64Now Current virtual clock timestamp.
695 * @param pu64Delta Where to return the delta.
696 */
697DECL_FORCE_INLINE(uint64_t) tmTimerPollReturnOtherCpu(PVM pVM, uint64_t u64Now, uint64_t *pu64Delta)
698{
699 static const uint64_t s_u64OtherRet = 500000000; /* 500 ms for non-timer EMTs. */
700 *pu64Delta = s_u64OtherRet;
701 return u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet;
702}
703
704
705/**
706 * Worker for tmTimerPollInternal.
707 *
708 * @returns See tmTimerPollInternal.
709 * @param pVM Pointer to the shared VM structure.
710 * @param pVCpu Pointer to the shared VMCPU structure of the
711 * caller.
712 * @param pVCpuDst Pointer to the shared VMCPU structure of the
713 * dedicated timer EMT.
714 * @param u64Now Current virtual clock timestamp.
715 * @param pu64Delta Where to return the delta.
716 * @param pCounter The statistics counter to update.
717 */
718DECL_FORCE_INLINE(uint64_t) tmTimerPollReturnHit(PVM pVM, PVMCPU pVCpu, PVMCPU pVCpuDst, uint64_t u64Now,
719 uint64_t *pu64Delta, PSTAMCOUNTER pCounter)
720{
721 STAM_COUNTER_INC(pCounter);
722 if (pVCpuDst != pVCpu)
723 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
724 *pu64Delta = 0;
725 return 0;
726}
727
728/**
729 * Common worker for TMTimerPollGIP and TMTimerPoll.
730 *
731 * This function is called before FFs are checked in the inner execution EM loops.
732 *
733 * @returns The GIP timestamp of the next event.
734 * 0 if the next event has already expired.
735 *
736 * @param pVM Pointer to the shared VM structure.
737 * @param pVCpu Pointer to the shared VMCPU structure of the caller.
738 * @param pu64Delta Where to store the delta.
739 *
740 * @thread The emulation thread.
741 *
742 * @remarks GIP uses ns ticks.
743 */
744DECL_FORCE_INLINE(uint64_t) tmTimerPollInternal(PVM pVM, PVMCPU pVCpu, uint64_t *pu64Delta)
745{
746 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
747 const uint64_t u64Now = TMVirtualGetNoCheck(pVM);
748 STAM_COUNTER_INC(&pVM->tm.s.StatPoll);
749
750 /*
751 * Return straight away if the timer FF is already set ...
752 */
753 if (VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
754 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet);
755
756 /*
757 * ... or if timers are being run.
758 */
759 if (ASMAtomicReadBool(&pVM->tm.s.fRunningQueues))
760 {
761 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
762 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
763 }
764
765 /*
766 * Check for TMCLOCK_VIRTUAL expiration.
767 */
768 const uint64_t u64Expire1 = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire);
769 const int64_t i64Delta1 = u64Expire1 - u64Now;
770 if (i64Delta1 <= 0)
771 {
772 if (!VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
773 {
774 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
775 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
776#ifdef IN_RING3
777 REMR3NotifyTimerPending(pVM, pVCpuDst);
778#endif
779 }
780 LogFlow(("TMTimerPoll: expire1=%'RU64 <= now=%'RU64\n", u64Expire1, u64Now));
781 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtual);
782 }
783
784 /*
785 * Check for TMCLOCK_VIRTUAL_SYNC expiration.
786 * This isn't quite as straight forward if in a catch-up, not only do
787 * we have to adjust the 'now' but when have to adjust the delta as well.
788 */
789
790 /*
791 * Optimistic lockless approach.
792 */
793 uint64_t u64VirtualSyncNow;
794 uint64_t u64Expire2 = ASMAtomicUoReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
795 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
796 {
797 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
798 {
799 u64VirtualSyncNow = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
800 if (RT_LIKELY( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
801 && !ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
802 && u64VirtualSyncNow == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
803 && u64Expire2 == ASMAtomicUoReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)))
804 {
805 u64VirtualSyncNow = u64Now - u64VirtualSyncNow;
806 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
807 if (i64Delta2 > 0)
808 {
809 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
810 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
811
812 if (pVCpu == pVCpuDst)
813 return tmTimerPollReturnMiss(pVM, u64Now, RT_MIN(i64Delta1, i64Delta2), pu64Delta);
814 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
815 }
816
817 if ( !pVM->tm.s.fRunningQueues
818 && !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
819 {
820 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
821 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
822#ifdef IN_RING3
823 REMR3NotifyTimerPending(pVM, pVCpuDst);
824#endif
825 }
826
827 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
828 LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now));
829 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
830 }
831 }
832 }
833 else
834 {
835 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
836 LogFlow(("TMTimerPoll: stopped\n"));
837 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
838 }
839
840 /*
841 * Complicated lockless approach.
842 */
843 uint64_t off;
844 uint32_t u32Pct = 0;
845 bool fCatchUp;
846 int cOuterTries = 42;
847 for (;; cOuterTries--)
848 {
849 fCatchUp = ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp);
850 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
851 u64Expire2 = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
852 if (fCatchUp)
853 {
854 /* No changes allowed, try get a consistent set of parameters. */
855 uint64_t const u64Prev = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev);
856 uint64_t const offGivenUp = ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp);
857 u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
858 if ( ( u64Prev == ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev)
859 && offGivenUp == ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp)
860 && u32Pct == ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage)
861 && off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
862 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)
863 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
864 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
865 || cOuterTries <= 0)
866 {
867 uint64_t u64Delta = u64Now - u64Prev;
868 if (RT_LIKELY(!(u64Delta >> 32)))
869 {
870 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, u32Pct, 100);
871 if (off > u64Sub + offGivenUp)
872 off -= u64Sub;
873 else /* we've completely caught up. */
874 off = offGivenUp;
875 }
876 else
877 /* More than 4 seconds since last time (or negative), ignore it. */
878 Log(("TMVirtualGetSync: u64Delta=%RX64 (NoLock)\n", u64Delta));
879
880 /* Check that we're still running and in catch up. */
881 if ( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
882 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
883 break;
884 }
885 }
886 else if ( off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
887 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)
888 && !ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
889 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
890 break; /* Got an consistent offset */
891
892 /* Repeat the initial checks before iterating. */
893 if (VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
894 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet);
895 if (ASMAtomicUoReadBool(&pVM->tm.s.fRunningQueues))
896 {
897 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
898 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
899 }
900 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
901 {
902 LogFlow(("TMTimerPoll: stopped\n"));
903 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
904 }
905 if (cOuterTries <= 0)
906 break; /* that's enough */
907 }
908 if (cOuterTries <= 0)
909 STAM_COUNTER_INC(&pVM->tm.s.StatPollELoop);
910 u64VirtualSyncNow = u64Now - off;
911
912 /* Calc delta and see if we've got a virtual sync hit. */
913 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
914 if (i64Delta2 <= 0)
915 {
916 if ( !pVM->tm.s.fRunningQueues
917 && !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
918 {
919 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
920 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
921#ifdef IN_RING3
922 REMR3NotifyTimerPending(pVM, pVCpuDst);
923#endif
924 }
925 STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtualSync);
926 LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now));
927 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
928 }
929
930 /*
931 * Return the time left to the next event.
932 */
933 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
934 if (pVCpu == pVCpuDst)
935 {
936 if (fCatchUp)
937 i64Delta2 = ASMMultU64ByU32DivByU32(i64Delta2, 100, u32Pct + 100);
938 return tmTimerPollReturnMiss(pVM, u64Now, RT_MIN(i64Delta1, i64Delta2), pu64Delta);
939 }
940 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
941}
942
943
944/**
945 * Set FF if we've passed the next virtual event.
946 *
947 * This function is called before FFs are checked in the inner execution EM loops.
948 *
949 * @returns true if timers are pending, false if not.
950 *
951 * @param pVM Pointer to the shared VM structure.
952 * @param pVCpu Pointer to the shared VMCPU structure of the caller.
953 * @thread The emulation thread.
954 */
955VMMDECL(bool) TMTimerPollBool(PVM pVM, PVMCPU pVCpu)
956{
957 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
958 uint64_t off = 0;
959 tmTimerPollInternal(pVM, pVCpu, &off);
960 return off == 0;
961}
962
963
964/**
965 * Set FF if we've passed the next virtual event.
966 *
967 * This function is called before FFs are checked in the inner execution EM loops.
968 *
969 * @param pVM Pointer to the shared VM structure.
970 * @param pVCpu Pointer to the shared VMCPU structure of the caller.
971 * @thread The emulation thread.
972 */
973VMM_INT_DECL(void) TMTimerPollVoid(PVM pVM, PVMCPU pVCpu)
974{
975 uint64_t off;
976 tmTimerPollInternal(pVM, pVCpu, &off);
977}
978
979
980/**
981 * Set FF if we've passed the next virtual event.
982 *
983 * This function is called before FFs are checked in the inner execution EM loops.
984 *
985 * @returns The GIP timestamp of the next event.
986 * 0 if the next event has already expired.
987 * @param pVM Pointer to the shared VM structure.
988 * @param pVCpu Pointer to the shared VMCPU structure of the caller.
989 * @param pu64Delta Where to store the delta.
990 * @thread The emulation thread.
991 */
992VMM_INT_DECL(uint64_t) TMTimerPollGIP(PVM pVM, PVMCPU pVCpu, uint64_t *pu64Delta)
993{
994 return tmTimerPollInternal(pVM, pVCpu, pu64Delta);
995}
996
997#endif /* VBOX_HIGH_RES_TIMERS_HACK */
998
999/**
1000 * Gets the host context ring-3 pointer of the timer.
1001 *
1002 * @returns HC R3 pointer.
1003 * @param pTimer Timer handle as returned by one of the create functions.
1004 */
1005VMMDECL(PTMTIMERR3) TMTimerR3Ptr(PTMTIMER pTimer)
1006{
1007 return (PTMTIMERR3)MMHyperCCToR3(pTimer->CTX_SUFF(pVM), pTimer);
1008}
1009
1010
1011/**
1012 * Gets the host context ring-0 pointer of the timer.
1013 *
1014 * @returns HC R0 pointer.
1015 * @param pTimer Timer handle as returned by one of the create functions.
1016 */
1017VMMDECL(PTMTIMERR0) TMTimerR0Ptr(PTMTIMER pTimer)
1018{
1019 return (PTMTIMERR0)MMHyperCCToR0(pTimer->CTX_SUFF(pVM), pTimer);
1020}
1021
1022
1023/**
1024 * Gets the RC pointer of the timer.
1025 *
1026 * @returns RC pointer.
1027 * @param pTimer Timer handle as returned by one of the create functions.
1028 */
1029VMMDECL(PTMTIMERRC) TMTimerRCPtr(PTMTIMER pTimer)
1030{
1031 return (PTMTIMERRC)MMHyperCCToRC(pTimer->CTX_SUFF(pVM), pTimer);
1032}
1033
1034
1035/**
1036 * Locks the timer clock.
1037 *
1038 * @returns VINF_SUCCESS on success, @a rcBusy if busy, and VERR_NOT_SUPPORTED
1039 * if the clock does not have a lock.
1040 * @param pTimer The timer which clock lock we wish to take.
1041 * @param rcBusy What to return in ring-0 and raw-mode context
1042 * if the lock is busy. Pass VINF_SUCCESS to
1043 * acquired the critical section thru a ring-3
1044 call if necessary.
1045 *
1046 * @remarks Currently only supported on timers using the virtual sync clock.
1047 */
1048VMMDECL(int) TMTimerLock(PTMTIMER pTimer, int rcBusy)
1049{
1050 AssertPtr(pTimer);
1051 AssertReturn(pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC, VERR_NOT_SUPPORTED);
1052 return PDMCritSectEnter(&pTimer->CTX_SUFF(pVM)->tm.s.VirtualSyncLock, rcBusy);
1053}
1054
1055
1056/**
1057 * Unlocks a timer clock locked by TMTimerLock.
1058 *
1059 * @param pTimer The timer which clock to unlock.
1060 */
1061VMMDECL(void) TMTimerUnlock(PTMTIMER pTimer)
1062{
1063 AssertPtr(pTimer);
1064 AssertReturnVoid(pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC);
1065 PDMCritSectLeave(&pTimer->CTX_SUFF(pVM)->tm.s.VirtualSyncLock);
1066}
1067
1068
1069/**
1070 * Checks if the current thread owns the timer clock lock.
1071 *
1072 * @returns @c true if its the owner, @c false if not.
1073 * @param pTimer The timer handle.
1074 */
1075VMMDECL(bool) TMTimerIsLockOwner(PTMTIMER pTimer)
1076{
1077 AssertPtr(pTimer);
1078 AssertReturn(pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC, false);
1079 return PDMCritSectIsOwner(&pTimer->CTX_SUFF(pVM)->tm.s.VirtualSyncLock);
1080}
1081
1082
1083/**
1084 * Optimized TMTimerSet code path for starting an inactive timer.
1085 *
1086 * @returns VBox status code.
1087 *
1088 * @param pVM The VM handle.
1089 * @param pTimer The timer handle.
1090 * @param u64Expire The new expire time.
1091 */
1092static int tmTimerSetOptimizedStart(PVM pVM, PTMTIMER pTimer, uint64_t u64Expire)
1093{
1094 Assert(!pTimer->offPrev);
1095 Assert(!pTimer->offNext);
1096 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE);
1097
1098 TMCLOCK const enmClock = pTimer->enmClock;
1099
1100 /*
1101 * Calculate and set the expiration time.
1102 */
1103 if (enmClock == TMCLOCK_VIRTUAL_SYNC)
1104 {
1105 uint64_t u64Last = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSync);
1106 AssertMsgStmt(u64Expire >= u64Last,
1107 ("exp=%#llx last=%#llx\n", u64Expire, u64Last),
1108 u64Expire = u64Last);
1109 }
1110 ASMAtomicWriteU64(&pTimer->u64Expire, u64Expire);
1111 Log2(("tmTimerSetOptimizedStart: %p:{.pszDesc='%s', .u64Expire=%'RU64}\n", pTimer, R3STRING(pTimer->pszDesc), u64Expire));
1112
1113 /*
1114 * Link the timer into the active list.
1115 */
1116 tmTimerQueueLinkActive(&pVM->tm.s.CTX_SUFF(paTimerQueues)[enmClock], pTimer, u64Expire);
1117
1118 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetOpt);
1119 TM_UNLOCK_TIMERS(pVM);
1120 return VINF_SUCCESS;
1121}
1122
1123
1124/**
1125 * TMTimerSet for the virtual sync timer queue.
1126 *
1127 * This employs a greatly simplified state machine by always acquiring the
1128 * queue lock and bypassing the scheduling list.
1129 *
1130 * @returns VBox status code
1131 * @param pVM The VM handle.
1132 * @param pTimer The timer handle.
1133 * @param u64Expire The expiration time.
1134 */
1135static int tmTimerVirtualSyncSet(PVM pVM, PTMTIMER pTimer, uint64_t u64Expire)
1136{
1137 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetVs), a);
1138 VM_ASSERT_EMT(pVM);
1139 Assert(PDMCritSectIsOwner(&pVM->tm.s.VirtualSyncLock));
1140 int rc = PDMCritSectEnter(&pVM->tm.s.VirtualSyncLock, VINF_SUCCESS);
1141 AssertRCReturn(rc, rc);
1142
1143 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC];
1144 TMTIMERSTATE enmState = pTimer->enmState;
1145 switch (enmState)
1146 {
1147 case TMTIMERSTATE_EXPIRED_DELIVER:
1148 case TMTIMERSTATE_STOPPED:
1149 if (enmState == TMTIMERSTATE_EXPIRED_DELIVER)
1150 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetVsStExpDeliver);
1151 else
1152 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetVsStStopped);
1153
1154 AssertMsg(u64Expire >= pVM->tm.s.u64VirtualSync,
1155 ("%'RU64 < %'RU64 %s\n", u64Expire, pVM->tm.s.u64VirtualSync, R3STRING(pTimer->pszDesc)));
1156 pTimer->u64Expire = u64Expire;
1157 TM_SET_STATE(pTimer, TMTIMERSTATE_ACTIVE);
1158 tmTimerQueueLinkActive(pQueue, pTimer, u64Expire);
1159 rc = VINF_SUCCESS;
1160 break;
1161
1162 case TMTIMERSTATE_ACTIVE:
1163 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetVsStActive);
1164 tmTimerQueueUnlinkActive(pQueue, pTimer);
1165 pTimer->u64Expire = u64Expire;
1166 tmTimerQueueLinkActive(pQueue, pTimer, u64Expire);
1167 rc = VINF_SUCCESS;
1168 break;
1169
1170 case TMTIMERSTATE_PENDING_RESCHEDULE:
1171 case TMTIMERSTATE_PENDING_STOP:
1172 case TMTIMERSTATE_PENDING_SCHEDULE:
1173 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1174 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1175 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1176 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1177 case TMTIMERSTATE_DESTROY:
1178 case TMTIMERSTATE_FREE:
1179 AssertLogRelMsgFailed(("Invalid timer state %s: %s\n", tmTimerState(enmState), R3STRING(pTimer->pszDesc)));
1180 rc = VERR_TM_INVALID_STATE;
1181 break;
1182
1183 default:
1184 AssertMsgFailed(("Unknown timer state %d: %s\n", enmState, R3STRING(pTimer->pszDesc)));
1185 rc = VERR_TM_UNKNOWN_STATE;
1186 break;
1187 }
1188
1189 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetVs), a);
1190 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
1191 return rc;
1192}
1193
1194
1195/**
1196 * Arm a timer with a (new) expire time.
1197 *
1198 * @returns VBox status.
1199 * @param pTimer Timer handle as returned by one of the create functions.
1200 * @param u64Expire New expire time.
1201 */
1202VMMDECL(int) TMTimerSet(PTMTIMER pTimer, uint64_t u64Expire)
1203{
1204 PVM pVM = pTimer->CTX_SUFF(pVM);
1205
1206 /* Treat virtual sync timers specially. */
1207 if (pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC)
1208 return tmTimerVirtualSyncSet(pVM, pTimer, u64Expire);
1209
1210 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1211 TMTIMER_ASSERT_CRITSECT(pTimer);
1212
1213 DBGFTRACE_U64_TAG2(pVM, u64Expire, "TMTimerSet", R3STRING(pTimer->pszDesc));
1214
1215#ifdef VBOX_WITH_STATISTICS
1216 /*
1217 * Gather optimization info.
1218 */
1219 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSet);
1220 TMTIMERSTATE enmOrgState = pTimer->enmState;
1221 switch (enmOrgState)
1222 {
1223 case TMTIMERSTATE_STOPPED: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStStopped); break;
1224 case TMTIMERSTATE_EXPIRED_DELIVER: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStExpDeliver); break;
1225 case TMTIMERSTATE_ACTIVE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStActive); break;
1226 case TMTIMERSTATE_PENDING_STOP: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendStop); break;
1227 case TMTIMERSTATE_PENDING_STOP_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendStopSched); break;
1228 case TMTIMERSTATE_PENDING_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendSched); break;
1229 case TMTIMERSTATE_PENDING_RESCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendResched); break;
1230 default: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStOther); break;
1231 }
1232#endif
1233
1234 /*
1235 * The most common case is setting the timer again during the callback.
1236 * The second most common case is starting a timer at some other time.
1237 */
1238#if 1
1239 TMTIMERSTATE enmState1 = pTimer->enmState;
1240 if ( enmState1 == TMTIMERSTATE_EXPIRED_DELIVER
1241 || ( enmState1 == TMTIMERSTATE_STOPPED
1242 && pTimer->pCritSect))
1243 {
1244 /* Try take the TM lock and check the state again. */
1245 if (RT_SUCCESS_NP(TM_TRY_LOCK_TIMERS(pVM)))
1246 {
1247 if (RT_LIKELY(tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, enmState1)))
1248 {
1249 tmTimerSetOptimizedStart(pVM, pTimer, u64Expire);
1250 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1251 return VINF_SUCCESS;
1252 }
1253 TM_UNLOCK_TIMERS(pVM);
1254 }
1255 }
1256#endif
1257
1258 /*
1259 * Unoptimized code path.
1260 */
1261 int cRetries = 1000;
1262 do
1263 {
1264 /*
1265 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1266 */
1267 TMTIMERSTATE enmState = pTimer->enmState;
1268 Log2(("TMTimerSet: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d u64Expire=%'RU64\n",
1269 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), cRetries, u64Expire));
1270 switch (enmState)
1271 {
1272 case TMTIMERSTATE_EXPIRED_DELIVER:
1273 case TMTIMERSTATE_STOPPED:
1274 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1275 {
1276 Assert(!pTimer->offPrev);
1277 Assert(!pTimer->offNext);
1278 pTimer->u64Expire = u64Expire;
1279 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1280 tmSchedule(pTimer);
1281 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1282 return VINF_SUCCESS;
1283 }
1284 break;
1285
1286 case TMTIMERSTATE_PENDING_SCHEDULE:
1287 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1288 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1289 {
1290 pTimer->u64Expire = u64Expire;
1291 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1292 tmSchedule(pTimer);
1293 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1294 return VINF_SUCCESS;
1295 }
1296 break;
1297
1298
1299 case TMTIMERSTATE_ACTIVE:
1300 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1301 {
1302 pTimer->u64Expire = u64Expire;
1303 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1304 tmSchedule(pTimer);
1305 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1306 return VINF_SUCCESS;
1307 }
1308 break;
1309
1310 case TMTIMERSTATE_PENDING_RESCHEDULE:
1311 case TMTIMERSTATE_PENDING_STOP:
1312 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1313 {
1314 pTimer->u64Expire = u64Expire;
1315 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1316 tmSchedule(pTimer);
1317 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1318 return VINF_SUCCESS;
1319 }
1320 break;
1321
1322
1323 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1324 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1325 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1326#ifdef IN_RING3
1327 if (!RTThreadYield())
1328 RTThreadSleep(1);
1329#else
1330/** @todo call host context and yield after a couple of iterations */
1331#endif
1332 break;
1333
1334 /*
1335 * Invalid states.
1336 */
1337 case TMTIMERSTATE_DESTROY:
1338 case TMTIMERSTATE_FREE:
1339 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1340 return VERR_TM_INVALID_STATE;
1341 default:
1342 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1343 return VERR_TM_UNKNOWN_STATE;
1344 }
1345 } while (cRetries-- > 0);
1346
1347 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1348 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1349 return VERR_INTERNAL_ERROR;
1350}
1351
1352
1353/**
1354 * Return the current time for the specified clock, setting pu64Now if not NULL.
1355 *
1356 * @returns Current time.
1357 * @param pVM The VM handle.
1358 * @param enmClock The clock to query.
1359 * @param pu64Now Optional pointer where to store the return time
1360 */
1361DECL_FORCE_INLINE(uint64_t) tmTimerSetRelativeNowWorker(PVM pVM, TMCLOCK enmClock, uint64_t *pu64Now)
1362{
1363 uint64_t u64Now;
1364 switch (enmClock)
1365 {
1366 case TMCLOCK_VIRTUAL_SYNC:
1367 u64Now = TMVirtualSyncGet(pVM);
1368 break;
1369 case TMCLOCK_VIRTUAL:
1370 u64Now = TMVirtualGet(pVM);
1371 break;
1372 case TMCLOCK_REAL:
1373 u64Now = TMRealGet(pVM);
1374 break;
1375 default:
1376 AssertFatalMsgFailed(("%d\n", enmClock));
1377 }
1378
1379 if (pu64Now)
1380 *pu64Now = u64Now;
1381 return u64Now;
1382}
1383
1384
1385/**
1386 * Optimized TMTimerSetRelative code path.
1387 *
1388 * @returns VBox status code.
1389 *
1390 * @param pVM The VM handle.
1391 * @param pTimer The timer handle.
1392 * @param cTicksToNext Clock ticks until the next time expiration.
1393 * @param pu64Now Where to return the current time stamp used.
1394 * Optional.
1395 */
1396static int tmTimerSetRelativeOptimizedStart(PVM pVM, PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1397{
1398 Assert(!pTimer->offPrev);
1399 Assert(!pTimer->offNext);
1400 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE);
1401
1402 /*
1403 * Calculate and set the expiration time.
1404 */
1405 TMCLOCK const enmClock = pTimer->enmClock;
1406 uint64_t const u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1407 pTimer->u64Expire = u64Expire;
1408 Log2(("tmTimerSetRelativeOptimizedStart: %p:{.pszDesc='%s', .u64Expire=%'RU64} cTicksToNext=%'RU64\n", pTimer, R3STRING(pTimer->pszDesc), u64Expire, cTicksToNext));
1409
1410 /*
1411 * Link the timer into the active list.
1412 */
1413 DBGFTRACE_U64_TAG2(pVM, u64Expire, "tmTimerSetRelativeOptimizedStart", R3STRING(pTimer->pszDesc));
1414 tmTimerQueueLinkActive(&pVM->tm.s.CTX_SUFF(paTimerQueues)[enmClock], pTimer, u64Expire);
1415
1416 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeOpt);
1417 TM_UNLOCK_TIMERS(pVM);
1418 return VINF_SUCCESS;
1419}
1420
1421
1422/**
1423 * TMTimerSetRelative for the virtual sync timer queue.
1424 *
1425 * This employs a greatly simplified state machine by always acquiring the
1426 * queue lock and bypassing the scheduling list.
1427 *
1428 * @returns VBox status code
1429 * @param pVM The VM handle.
1430 * @param cTicksToNext Clock ticks until the next time expiration.
1431 * @param pu64Now Where to return the current time stamp used.
1432 * Optional.
1433 */
1434static int tmTimerVirtualSyncSetRelative(PVM pVM, PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1435{
1436 STAM_PROFILE_START(pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelativeVs), a);
1437 VM_ASSERT_EMT(pVM);
1438 Assert(PDMCritSectIsOwner(&pVM->tm.s.VirtualSyncLock));
1439 int rc = PDMCritSectEnter(&pVM->tm.s.VirtualSyncLock, VINF_SUCCESS);
1440 AssertRCReturn(rc, rc);
1441
1442 /* Calculate the expiration tick. */
1443 uint64_t u64Expire = TMVirtualSyncGetNoCheck(pVM);
1444 if (pu64Now)
1445 *pu64Now = u64Expire;
1446 u64Expire += cTicksToNext;
1447
1448 /* Update the timer. */
1449 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC];
1450 TMTIMERSTATE enmState = pTimer->enmState;
1451 switch (enmState)
1452 {
1453 case TMTIMERSTATE_EXPIRED_DELIVER:
1454 case TMTIMERSTATE_STOPPED:
1455 if (enmState == TMTIMERSTATE_EXPIRED_DELIVER)
1456 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeVsStExpDeliver);
1457 else
1458 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeVsStStopped);
1459 pTimer->u64Expire = u64Expire;
1460 TM_SET_STATE(pTimer, TMTIMERSTATE_ACTIVE);
1461 tmTimerQueueLinkActive(pQueue, pTimer, u64Expire);
1462 rc = VINF_SUCCESS;
1463 break;
1464
1465 case TMTIMERSTATE_ACTIVE:
1466 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeVsStActive);
1467 tmTimerQueueUnlinkActive(pQueue, pTimer);
1468 pTimer->u64Expire = u64Expire;
1469 tmTimerQueueLinkActive(pQueue, pTimer, u64Expire);
1470 rc = VINF_SUCCESS;
1471 break;
1472
1473 case TMTIMERSTATE_PENDING_RESCHEDULE:
1474 case TMTIMERSTATE_PENDING_STOP:
1475 case TMTIMERSTATE_PENDING_SCHEDULE:
1476 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1477 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1478 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1479 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1480 case TMTIMERSTATE_DESTROY:
1481 case TMTIMERSTATE_FREE:
1482 AssertLogRelMsgFailed(("Invalid timer state %s: %s\n", tmTimerState(enmState), R3STRING(pTimer->pszDesc)));
1483 rc = VERR_TM_INVALID_STATE;
1484 break;
1485
1486 default:
1487 AssertMsgFailed(("Unknown timer state %d: %s\n", enmState, R3STRING(pTimer->pszDesc)));
1488 rc = VERR_TM_UNKNOWN_STATE;
1489 break;
1490 }
1491
1492 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelativeVs), a);
1493 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
1494 return rc;
1495}
1496
1497
1498/**
1499 * Arm a timer with a expire time relative to the current time.
1500 *
1501 * @returns VBox status.
1502 * @param pTimer Timer handle as returned by one of the create functions.
1503 * @param cTicksToNext Clock ticks until the next time expiration.
1504 * @param pu64Now Where to return the current time stamp used.
1505 * Optional.
1506 */
1507VMMDECL(int) TMTimerSetRelative(PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1508{
1509 PVM pVM = pTimer->CTX_SUFF(pVM);
1510
1511 /* Treat virtual sync timers specially. */
1512 if (pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC)
1513 return tmTimerVirtualSyncSetRelative(pVM, pTimer, cTicksToNext, pu64Now);
1514
1515 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1516 TMTIMER_ASSERT_CRITSECT(pTimer);
1517
1518 DBGFTRACE_U64_TAG2(pVM, cTicksToNext, "TMTimerSetRelative", R3STRING(pTimer->pszDesc));
1519
1520#ifdef VBOX_WITH_STATISTICS
1521 /*
1522 * Gather optimization info.
1523 */
1524 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelative);
1525 TMTIMERSTATE enmOrgState = pTimer->enmState;
1526 switch (enmOrgState)
1527 {
1528 case TMTIMERSTATE_STOPPED: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStStopped); break;
1529 case TMTIMERSTATE_EXPIRED_DELIVER: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStExpDeliver); break;
1530 case TMTIMERSTATE_ACTIVE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStActive); break;
1531 case TMTIMERSTATE_PENDING_STOP: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendStop); break;
1532 case TMTIMERSTATE_PENDING_STOP_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendStopSched); break;
1533 case TMTIMERSTATE_PENDING_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendSched); break;
1534 case TMTIMERSTATE_PENDING_RESCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendResched); break;
1535 default: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStOther); break;
1536 }
1537#endif
1538
1539 /*
1540 * Try to take the TM lock and optimize the common cases.
1541 *
1542 * With the TM lock we can safely make optimizations like immediate
1543 * scheduling and we can also be 100% sure that we're not racing the
1544 * running of the timer queues. As an additional restraint we require the
1545 * timer to have a critical section associated with to be 100% there aren't
1546 * concurrent operations on the timer. (This latter isn't necessary any
1547 * longer as this isn't supported for any timers, critsect or not.)
1548 *
1549 * Note! Lock ordering doesn't apply when we only tries to
1550 * get the innermost locks.
1551 */
1552 bool fOwnTMLock = RT_SUCCESS_NP(TM_TRY_LOCK_TIMERS(pVM));
1553#if 1
1554 if ( fOwnTMLock
1555 && pTimer->pCritSect)
1556 {
1557 TMTIMERSTATE enmState = pTimer->enmState;
1558 if (RT_LIKELY( ( enmState == TMTIMERSTATE_EXPIRED_DELIVER
1559 || enmState == TMTIMERSTATE_STOPPED)
1560 && tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, enmState)))
1561 {
1562 tmTimerSetRelativeOptimizedStart(pVM, pTimer, cTicksToNext, pu64Now);
1563 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1564 return VINF_SUCCESS;
1565 }
1566
1567 /* Optimize other states when it becomes necessary. */
1568 }
1569#endif
1570
1571 /*
1572 * Unoptimized path.
1573 */
1574 int rc;
1575 TMCLOCK const enmClock = pTimer->enmClock;
1576 for (int cRetries = 1000; ; cRetries--)
1577 {
1578 /*
1579 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1580 */
1581 TMTIMERSTATE enmState = pTimer->enmState;
1582 switch (enmState)
1583 {
1584 case TMTIMERSTATE_STOPPED:
1585 if (enmClock == TMCLOCK_VIRTUAL_SYNC)
1586 {
1587 /** @todo To fix assertion in tmR3TimerQueueRunVirtualSync:
1588 * Figure a safe way of activating this timer while the queue is
1589 * being run.
1590 * (99.9% sure this that the assertion is caused by DevAPIC.cpp
1591 * re-starting the timer in response to a initial_count write.) */
1592 }
1593 /* fall thru */
1594 case TMTIMERSTATE_EXPIRED_DELIVER:
1595 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1596 {
1597 Assert(!pTimer->offPrev);
1598 Assert(!pTimer->offNext);
1599 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1600 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [EXP/STOP]\n",
1601 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1602 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1603 tmSchedule(pTimer);
1604 rc = VINF_SUCCESS;
1605 break;
1606 }
1607 rc = VERR_TRY_AGAIN;
1608 break;
1609
1610 case TMTIMERSTATE_PENDING_SCHEDULE:
1611 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1612 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1613 {
1614 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1615 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [PEND_SCHED]\n",
1616 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1617 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1618 tmSchedule(pTimer);
1619 rc = VINF_SUCCESS;
1620 break;
1621 }
1622 rc = VERR_TRY_AGAIN;
1623 break;
1624
1625
1626 case TMTIMERSTATE_ACTIVE:
1627 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1628 {
1629 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1630 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [ACTIVE]\n",
1631 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1632 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1633 tmSchedule(pTimer);
1634 rc = VINF_SUCCESS;
1635 break;
1636 }
1637 rc = VERR_TRY_AGAIN;
1638 break;
1639
1640 case TMTIMERSTATE_PENDING_RESCHEDULE:
1641 case TMTIMERSTATE_PENDING_STOP:
1642 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1643 {
1644 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1645 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [PEND_RESCH/STOP]\n",
1646 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1647 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1648 tmSchedule(pTimer);
1649 rc = VINF_SUCCESS;
1650 break;
1651 }
1652 rc = VERR_TRY_AGAIN;
1653 break;
1654
1655
1656 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1657 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1658 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1659#ifdef IN_RING3
1660 if (!RTThreadYield())
1661 RTThreadSleep(1);
1662#else
1663/** @todo call host context and yield after a couple of iterations */
1664#endif
1665 rc = VERR_TRY_AGAIN;
1666 break;
1667
1668 /*
1669 * Invalid states.
1670 */
1671 case TMTIMERSTATE_DESTROY:
1672 case TMTIMERSTATE_FREE:
1673 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1674 rc = VERR_TM_INVALID_STATE;
1675 break;
1676
1677 default:
1678 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1679 rc = VERR_TM_UNKNOWN_STATE;
1680 break;
1681 }
1682
1683 /* switch + loop is tedious to break out of. */
1684 if (rc == VINF_SUCCESS)
1685 break;
1686
1687 if (rc != VERR_TRY_AGAIN)
1688 {
1689 tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1690 break;
1691 }
1692 if (cRetries <= 0)
1693 {
1694 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1695 rc = VERR_INTERNAL_ERROR;
1696 tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1697 break;
1698 }
1699
1700 /*
1701 * Retry to gain locks.
1702 */
1703 if (!fOwnTMLock)
1704 fOwnTMLock = RT_SUCCESS_NP(TM_TRY_LOCK_TIMERS(pVM));
1705
1706 } /* for (;;) */
1707
1708 /*
1709 * Clean up and return.
1710 */
1711 if (fOwnTMLock)
1712 TM_UNLOCK_TIMERS(pVM);
1713
1714 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1715 return rc;
1716}
1717
1718
1719/**
1720 * Drops a hint about the frequency of the timer.
1721 *
1722 * This is used by TM and the VMM to calculate how often guest execution needs
1723 * to be interrupted. The hint is automatically cleared by TMTimerStop.
1724 *
1725 * @returns VBox status code.
1726 * @param pTimer Timer handle as returned by one of the create
1727 * functions.
1728 * @param uHzHint The frequency hint. Pass 0 to clear the hint.
1729 *
1730 * @remarks We're using an integer hertz value here since anything above 1 HZ
1731 * is not going to be any trouble satisfying scheduling wise. The
1732 * range where it makes sense is >= 100 HZ.
1733 */
1734VMMDECL(int) TMTimerSetFrequencyHint(PTMTIMER pTimer, uint32_t uHzHint)
1735{
1736 TMTIMER_ASSERT_CRITSECT(pTimer);
1737
1738 uint32_t const uHzOldHint = pTimer->uHzHint;
1739 pTimer->uHzHint = uHzHint;
1740
1741 PVM pVM = pTimer->CTX_SUFF(pVM);
1742 uint32_t const uMaxHzHint = pVM->tm.s.uMaxHzHint;
1743 if ( uHzHint > uMaxHzHint
1744 || uHzOldHint >= uMaxHzHint)
1745 ASMAtomicWriteBool(&pVM->tm.s.fHzHintNeedsUpdating, true);
1746
1747 return VINF_SUCCESS;
1748}
1749
1750
1751/**
1752 * TMTimerStop for the virtual sync timer queue.
1753 *
1754 * This employs a greatly simplified state machine by always acquiring the
1755 * queue lock and bypassing the scheduling list.
1756 *
1757 * @returns VBox status code
1758 * @param pVM The VM handle.
1759 * @param pTimer The timer handle.
1760 */
1761static int tmTimerVirtualSyncStop(PVM pVM, PTMTIMER pTimer)
1762{
1763 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerStopVs), a);
1764 VM_ASSERT_EMT(pVM);
1765 Assert(PDMCritSectIsOwner(&pVM->tm.s.VirtualSyncLock));
1766 int rc = PDMCritSectEnter(&pVM->tm.s.VirtualSyncLock, VINF_SUCCESS);
1767 AssertRCReturn(rc, rc);
1768
1769 /* Reset the HZ hint. */
1770 if (pTimer->uHzHint)
1771 {
1772 if (pTimer->uHzHint >= pVM->tm.s.uMaxHzHint)
1773 ASMAtomicWriteBool(&pVM->tm.s.fHzHintNeedsUpdating, true);
1774 pTimer->uHzHint = 0;
1775 }
1776
1777 /* Update the timer state. */
1778 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC];
1779 TMTIMERSTATE enmState = pTimer->enmState;
1780 switch (enmState)
1781 {
1782 case TMTIMERSTATE_ACTIVE:
1783 tmTimerQueueUnlinkActive(pQueue, pTimer);
1784 TM_SET_STATE(pTimer, TMTIMERSTATE_STOPPED);
1785 rc = VINF_SUCCESS;
1786 break;
1787
1788 case TMTIMERSTATE_EXPIRED_DELIVER:
1789 TM_SET_STATE(pTimer, TMTIMERSTATE_STOPPED);
1790 rc = VINF_SUCCESS;
1791 break;
1792
1793 case TMTIMERSTATE_STOPPED:
1794 rc = VINF_SUCCESS;
1795 break;
1796
1797 case TMTIMERSTATE_PENDING_RESCHEDULE:
1798 case TMTIMERSTATE_PENDING_STOP:
1799 case TMTIMERSTATE_PENDING_SCHEDULE:
1800 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1801 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1802 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1803 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1804 case TMTIMERSTATE_DESTROY:
1805 case TMTIMERSTATE_FREE:
1806 AssertLogRelMsgFailed(("Invalid timer state %s: %s\n", tmTimerState(enmState), R3STRING(pTimer->pszDesc)));
1807 rc = VERR_TM_INVALID_STATE;
1808 break;
1809
1810 default:
1811 AssertMsgFailed(("Unknown timer state %d: %s\n", enmState, R3STRING(pTimer->pszDesc)));
1812 rc = VERR_TM_UNKNOWN_STATE;
1813 break;
1814 }
1815
1816 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStopVs), a);
1817 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
1818 return rc;
1819}
1820
1821
1822/**
1823 * Stop the timer.
1824 * Use TMR3TimerArm() to "un-stop" the timer.
1825 *
1826 * @returns VBox status.
1827 * @param pTimer Timer handle as returned by one of the create functions.
1828 */
1829VMMDECL(int) TMTimerStop(PTMTIMER pTimer)
1830{
1831 PVM pVM = pTimer->CTX_SUFF(pVM);
1832
1833 /* Treat virtual sync timers specially. */
1834 if (pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC)
1835 return tmTimerVirtualSyncStop(pVM, pTimer);
1836
1837 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1838 TMTIMER_ASSERT_CRITSECT(pTimer);
1839
1840 /*
1841 * Reset the HZ hint.
1842 */
1843 if (pTimer->uHzHint)
1844 {
1845 if (pTimer->uHzHint >= pVM->tm.s.uMaxHzHint)
1846 ASMAtomicWriteBool(&pVM->tm.s.fHzHintNeedsUpdating, true);
1847 pTimer->uHzHint = 0;
1848 }
1849
1850 /** @todo see if this function needs optimizing. */
1851 int cRetries = 1000;
1852 do
1853 {
1854 /*
1855 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1856 */
1857 TMTIMERSTATE enmState = pTimer->enmState;
1858 Log2(("TMTimerStop: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d\n",
1859 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), cRetries));
1860 switch (enmState)
1861 {
1862 case TMTIMERSTATE_EXPIRED_DELIVER:
1863 //AssertMsgFailed(("You don't stop an expired timer dude!\n"));
1864 return VERR_INVALID_PARAMETER;
1865
1866 case TMTIMERSTATE_STOPPED:
1867 case TMTIMERSTATE_PENDING_STOP:
1868 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1869 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1870 return VINF_SUCCESS;
1871
1872 case TMTIMERSTATE_PENDING_SCHEDULE:
1873 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, enmState))
1874 {
1875 tmSchedule(pTimer);
1876 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1877 return VINF_SUCCESS;
1878 }
1879
1880 case TMTIMERSTATE_PENDING_RESCHEDULE:
1881 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
1882 {
1883 tmSchedule(pTimer);
1884 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1885 return VINF_SUCCESS;
1886 }
1887 break;
1888
1889 case TMTIMERSTATE_ACTIVE:
1890 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
1891 {
1892 tmSchedule(pTimer);
1893 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1894 return VINF_SUCCESS;
1895 }
1896 break;
1897
1898 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1899 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1900 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1901#ifdef IN_RING3
1902 if (!RTThreadYield())
1903 RTThreadSleep(1);
1904#else
1905/**@todo call host and yield cpu after a while. */
1906#endif
1907 break;
1908
1909 /*
1910 * Invalid states.
1911 */
1912 case TMTIMERSTATE_DESTROY:
1913 case TMTIMERSTATE_FREE:
1914 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1915 return VERR_TM_INVALID_STATE;
1916 default:
1917 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1918 return VERR_TM_UNKNOWN_STATE;
1919 }
1920 } while (cRetries-- > 0);
1921
1922 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1923 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1924 return VERR_INTERNAL_ERROR;
1925}
1926
1927
1928/**
1929 * Get the current clock time.
1930 * Handy for calculating the new expire time.
1931 *
1932 * @returns Current clock time.
1933 * @param pTimer Timer handle as returned by one of the create functions.
1934 */
1935VMMDECL(uint64_t) TMTimerGet(PTMTIMER pTimer)
1936{
1937 PVM pVM = pTimer->CTX_SUFF(pVM);
1938
1939 uint64_t u64;
1940 switch (pTimer->enmClock)
1941 {
1942 case TMCLOCK_VIRTUAL:
1943 u64 = TMVirtualGet(pVM);
1944 break;
1945 case TMCLOCK_VIRTUAL_SYNC:
1946 u64 = TMVirtualSyncGet(pVM);
1947 break;
1948 case TMCLOCK_REAL:
1949 u64 = TMRealGet(pVM);
1950 break;
1951 default:
1952 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1953 return UINT64_MAX;
1954 }
1955 //Log2(("TMTimerGet: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1956 // u64, pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1957 return u64;
1958}
1959
1960
1961/**
1962 * Get the frequency of the timer clock.
1963 *
1964 * @returns Clock frequency (as Hz of course).
1965 * @param pTimer Timer handle as returned by one of the create functions.
1966 */
1967VMMDECL(uint64_t) TMTimerGetFreq(PTMTIMER pTimer)
1968{
1969 switch (pTimer->enmClock)
1970 {
1971 case TMCLOCK_VIRTUAL:
1972 case TMCLOCK_VIRTUAL_SYNC:
1973 return TMCLOCK_FREQ_VIRTUAL;
1974
1975 case TMCLOCK_REAL:
1976 return TMCLOCK_FREQ_REAL;
1977
1978 default:
1979 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1980 return 0;
1981 }
1982}
1983
1984
1985/**
1986 * Get the expire time of the timer.
1987 * Only valid for active timers.
1988 *
1989 * @returns Expire time of the timer.
1990 * @param pTimer Timer handle as returned by one of the create functions.
1991 */
1992VMMDECL(uint64_t) TMTimerGetExpire(PTMTIMER pTimer)
1993{
1994 TMTIMER_ASSERT_CRITSECT(pTimer);
1995 int cRetries = 1000;
1996 do
1997 {
1998 TMTIMERSTATE enmState = pTimer->enmState;
1999 switch (enmState)
2000 {
2001 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2002 case TMTIMERSTATE_EXPIRED_DELIVER:
2003 case TMTIMERSTATE_STOPPED:
2004 case TMTIMERSTATE_PENDING_STOP:
2005 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2006 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2007 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2008 return ~(uint64_t)0;
2009
2010 case TMTIMERSTATE_ACTIVE:
2011 case TMTIMERSTATE_PENDING_RESCHEDULE:
2012 case TMTIMERSTATE_PENDING_SCHEDULE:
2013 Log2(("TMTimerGetExpire: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2014 pTimer->u64Expire, pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2015 return pTimer->u64Expire;
2016
2017 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2018 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2019#ifdef IN_RING3
2020 if (!RTThreadYield())
2021 RTThreadSleep(1);
2022#endif
2023 break;
2024
2025 /*
2026 * Invalid states.
2027 */
2028 case TMTIMERSTATE_DESTROY:
2029 case TMTIMERSTATE_FREE:
2030 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
2031 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2032 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2033 return ~(uint64_t)0;
2034 default:
2035 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
2036 return ~(uint64_t)0;
2037 }
2038 } while (cRetries-- > 0);
2039
2040 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
2041 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2042 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2043 return ~(uint64_t)0;
2044}
2045
2046
2047/**
2048 * Checks if a timer is active or not.
2049 *
2050 * @returns True if active.
2051 * @returns False if not active.
2052 * @param pTimer Timer handle as returned by one of the create functions.
2053 */
2054VMMDECL(bool) TMTimerIsActive(PTMTIMER pTimer)
2055{
2056 TMTIMERSTATE enmState = pTimer->enmState;
2057 switch (enmState)
2058 {
2059 case TMTIMERSTATE_STOPPED:
2060 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2061 case TMTIMERSTATE_EXPIRED_DELIVER:
2062 case TMTIMERSTATE_PENDING_STOP:
2063 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2064 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2065 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2066 return false;
2067
2068 case TMTIMERSTATE_ACTIVE:
2069 case TMTIMERSTATE_PENDING_RESCHEDULE:
2070 case TMTIMERSTATE_PENDING_SCHEDULE:
2071 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2072 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2073 Log2(("TMTimerIsActive: returns true (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2074 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2075 return true;
2076
2077 /*
2078 * Invalid states.
2079 */
2080 case TMTIMERSTATE_DESTROY:
2081 case TMTIMERSTATE_FREE:
2082 AssertMsgFailed(("Invalid timer state %s (%s)\n", tmTimerState(enmState), R3STRING(pTimer->pszDesc)));
2083 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2084 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2085 return false;
2086 default:
2087 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
2088 return false;
2089 }
2090}
2091
2092
2093/* -=-=-=-=-=-=- Convenience APIs -=-=-=-=-=-=- */
2094
2095
2096/**
2097 * Arm a timer with a (new) expire time relative to current time.
2098 *
2099 * @returns VBox status.
2100 * @param pTimer Timer handle as returned by one of the create functions.
2101 * @param cMilliesToNext Number of milliseconds to the next tick.
2102 */
2103VMMDECL(int) TMTimerSetMillies(PTMTIMER pTimer, uint32_t cMilliesToNext)
2104{
2105 PVM pVM = pTimer->CTX_SUFF(pVM);
2106 PVMCPU pVCpu = &pVM->aCpus[0]; /* just take the first VCPU */
2107
2108 switch (pTimer->enmClock)
2109 {
2110 case TMCLOCK_VIRTUAL:
2111 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2112 return TMTimerSetRelative(pTimer, cMilliesToNext * UINT64_C(1000000), NULL);
2113
2114 case TMCLOCK_VIRTUAL_SYNC:
2115 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2116 return TMTimerSetRelative(pTimer, cMilliesToNext * UINT64_C(1000000), NULL);
2117
2118 case TMCLOCK_REAL:
2119 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2120 return TMTimerSetRelative(pTimer, cMilliesToNext, NULL);
2121
2122 default:
2123 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2124 return VERR_INTERNAL_ERROR;
2125 }
2126}
2127
2128
2129/**
2130 * Arm a timer with a (new) expire time relative to current time.
2131 *
2132 * @returns VBox status.
2133 * @param pTimer Timer handle as returned by one of the create functions.
2134 * @param cMicrosToNext Number of microseconds to the next tick.
2135 */
2136VMMDECL(int) TMTimerSetMicro(PTMTIMER pTimer, uint64_t cMicrosToNext)
2137{
2138 PVM pVM = pTimer->CTX_SUFF(pVM);
2139 PVMCPU pVCpu = &pVM->aCpus[0]; /* just take the first VCPU */
2140
2141 switch (pTimer->enmClock)
2142 {
2143 case TMCLOCK_VIRTUAL:
2144 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2145 return TMTimerSetRelative(pTimer, cMicrosToNext * 1000, NULL);
2146
2147 case TMCLOCK_VIRTUAL_SYNC:
2148 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2149 return TMTimerSetRelative(pTimer, cMicrosToNext * 1000, NULL);
2150
2151 case TMCLOCK_REAL:
2152 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2153 return TMTimerSetRelative(pTimer, cMicrosToNext / 1000, NULL);
2154
2155 default:
2156 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2157 return VERR_INTERNAL_ERROR;
2158 }
2159}
2160
2161
2162/**
2163 * Arm a timer with a (new) expire time relative to current time.
2164 *
2165 * @returns VBox status.
2166 * @param pTimer Timer handle as returned by one of the create functions.
2167 * @param cNanosToNext Number of nanoseconds to the next tick.
2168 */
2169VMMDECL(int) TMTimerSetNano(PTMTIMER pTimer, uint64_t cNanosToNext)
2170{
2171 PVM pVM = pTimer->CTX_SUFF(pVM);
2172 PVMCPU pVCpu = &pVM->aCpus[0]; /* just take the first VCPU */
2173
2174 switch (pTimer->enmClock)
2175 {
2176 case TMCLOCK_VIRTUAL:
2177 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2178 return TMTimerSetRelative(pTimer, cNanosToNext, NULL);
2179
2180 case TMCLOCK_VIRTUAL_SYNC:
2181 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2182 return TMTimerSetRelative(pTimer, cNanosToNext, NULL);
2183
2184 case TMCLOCK_REAL:
2185 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2186 return TMTimerSetRelative(pTimer, cNanosToNext / 1000000, NULL);
2187
2188 default:
2189 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2190 return VERR_INTERNAL_ERROR;
2191 }
2192}
2193
2194
2195/**
2196 * Get the current clock time as nanoseconds.
2197 *
2198 * @returns The timer clock as nanoseconds.
2199 * @param pTimer Timer handle as returned by one of the create functions.
2200 */
2201VMMDECL(uint64_t) TMTimerGetNano(PTMTIMER pTimer)
2202{
2203 return TMTimerToNano(pTimer, TMTimerGet(pTimer));
2204}
2205
2206
2207/**
2208 * Get the current clock time as microseconds.
2209 *
2210 * @returns The timer clock as microseconds.
2211 * @param pTimer Timer handle as returned by one of the create functions.
2212 */
2213VMMDECL(uint64_t) TMTimerGetMicro(PTMTIMER pTimer)
2214{
2215 return TMTimerToMicro(pTimer, TMTimerGet(pTimer));
2216}
2217
2218
2219/**
2220 * Get the current clock time as milliseconds.
2221 *
2222 * @returns The timer clock as milliseconds.
2223 * @param pTimer Timer handle as returned by one of the create functions.
2224 */
2225VMMDECL(uint64_t) TMTimerGetMilli(PTMTIMER pTimer)
2226{
2227 return TMTimerToMilli(pTimer, TMTimerGet(pTimer));
2228}
2229
2230
2231/**
2232 * Converts the specified timer clock time to nanoseconds.
2233 *
2234 * @returns nanoseconds.
2235 * @param pTimer Timer handle as returned by one of the create functions.
2236 * @param u64Ticks The clock ticks.
2237 * @remark There could be rounding errors here. We just do a simple integer divide
2238 * without any adjustments.
2239 */
2240VMMDECL(uint64_t) TMTimerToNano(PTMTIMER pTimer, uint64_t u64Ticks)
2241{
2242 switch (pTimer->enmClock)
2243 {
2244 case TMCLOCK_VIRTUAL:
2245 case TMCLOCK_VIRTUAL_SYNC:
2246 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2247 return u64Ticks;
2248
2249 case TMCLOCK_REAL:
2250 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2251 return u64Ticks * 1000000;
2252
2253 default:
2254 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2255 return 0;
2256 }
2257}
2258
2259
2260/**
2261 * Converts the specified timer clock time to microseconds.
2262 *
2263 * @returns microseconds.
2264 * @param pTimer Timer handle as returned by one of the create functions.
2265 * @param u64Ticks The clock ticks.
2266 * @remark There could be rounding errors here. We just do a simple integer divide
2267 * without any adjustments.
2268 */
2269VMMDECL(uint64_t) TMTimerToMicro(PTMTIMER pTimer, uint64_t u64Ticks)
2270{
2271 switch (pTimer->enmClock)
2272 {
2273 case TMCLOCK_VIRTUAL:
2274 case TMCLOCK_VIRTUAL_SYNC:
2275 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2276 return u64Ticks / 1000;
2277
2278 case TMCLOCK_REAL:
2279 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2280 return u64Ticks * 1000;
2281
2282 default:
2283 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2284 return 0;
2285 }
2286}
2287
2288
2289/**
2290 * Converts the specified timer clock time to milliseconds.
2291 *
2292 * @returns milliseconds.
2293 * @param pTimer Timer handle as returned by one of the create functions.
2294 * @param u64Ticks The clock ticks.
2295 * @remark There could be rounding errors here. We just do a simple integer divide
2296 * without any adjustments.
2297 */
2298VMMDECL(uint64_t) TMTimerToMilli(PTMTIMER pTimer, uint64_t u64Ticks)
2299{
2300 switch (pTimer->enmClock)
2301 {
2302 case TMCLOCK_VIRTUAL:
2303 case TMCLOCK_VIRTUAL_SYNC:
2304 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2305 return u64Ticks / 1000000;
2306
2307 case TMCLOCK_REAL:
2308 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2309 return u64Ticks;
2310
2311 default:
2312 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2313 return 0;
2314 }
2315}
2316
2317
2318/**
2319 * Converts the specified nanosecond timestamp to timer clock ticks.
2320 *
2321 * @returns timer clock ticks.
2322 * @param pTimer Timer handle as returned by one of the create functions.
2323 * @param cNanoSecs The nanosecond value ticks to convert.
2324 * @remark There could be rounding and overflow errors here.
2325 */
2326VMMDECL(uint64_t) TMTimerFromNano(PTMTIMER pTimer, uint64_t cNanoSecs)
2327{
2328 switch (pTimer->enmClock)
2329 {
2330 case TMCLOCK_VIRTUAL:
2331 case TMCLOCK_VIRTUAL_SYNC:
2332 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2333 return cNanoSecs;
2334
2335 case TMCLOCK_REAL:
2336 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2337 return cNanoSecs / 1000000;
2338
2339 default:
2340 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2341 return 0;
2342 }
2343}
2344
2345
2346/**
2347 * Converts the specified microsecond timestamp to timer clock ticks.
2348 *
2349 * @returns timer clock ticks.
2350 * @param pTimer Timer handle as returned by one of the create functions.
2351 * @param cMicroSecs The microsecond value ticks to convert.
2352 * @remark There could be rounding and overflow errors here.
2353 */
2354VMMDECL(uint64_t) TMTimerFromMicro(PTMTIMER pTimer, uint64_t cMicroSecs)
2355{
2356 switch (pTimer->enmClock)
2357 {
2358 case TMCLOCK_VIRTUAL:
2359 case TMCLOCK_VIRTUAL_SYNC:
2360 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2361 return cMicroSecs * 1000;
2362
2363 case TMCLOCK_REAL:
2364 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2365 return cMicroSecs / 1000;
2366
2367 default:
2368 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2369 return 0;
2370 }
2371}
2372
2373
2374/**
2375 * Converts the specified millisecond timestamp to timer clock ticks.
2376 *
2377 * @returns timer clock ticks.
2378 * @param pTimer Timer handle as returned by one of the create functions.
2379 * @param cMilliSecs The millisecond value ticks to convert.
2380 * @remark There could be rounding and overflow errors here.
2381 */
2382VMMDECL(uint64_t) TMTimerFromMilli(PTMTIMER pTimer, uint64_t cMilliSecs)
2383{
2384 switch (pTimer->enmClock)
2385 {
2386 case TMCLOCK_VIRTUAL:
2387 case TMCLOCK_VIRTUAL_SYNC:
2388 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2389 return cMilliSecs * 1000000;
2390
2391 case TMCLOCK_REAL:
2392 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2393 return cMilliSecs;
2394
2395 default:
2396 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2397 return 0;
2398 }
2399}
2400
2401
2402/**
2403 * Convert state to string.
2404 *
2405 * @returns Readonly status name.
2406 * @param enmState State.
2407 */
2408const char *tmTimerState(TMTIMERSTATE enmState)
2409{
2410 switch (enmState)
2411 {
2412#define CASE(num, state) \
2413 case TMTIMERSTATE_##state: \
2414 AssertCompile(TMTIMERSTATE_##state == (num)); \
2415 return #num "-" #state
2416 CASE( 1,STOPPED);
2417 CASE( 2,ACTIVE);
2418 CASE( 3,EXPIRED_GET_UNLINK);
2419 CASE( 4,EXPIRED_DELIVER);
2420 CASE( 5,PENDING_STOP);
2421 CASE( 6,PENDING_STOP_SCHEDULE);
2422 CASE( 7,PENDING_SCHEDULE_SET_EXPIRE);
2423 CASE( 8,PENDING_SCHEDULE);
2424 CASE( 9,PENDING_RESCHEDULE_SET_EXPIRE);
2425 CASE(10,PENDING_RESCHEDULE);
2426 CASE(11,DESTROY);
2427 CASE(12,FREE);
2428 default:
2429 AssertMsgFailed(("Invalid state enmState=%d\n", enmState));
2430 return "Invalid state!";
2431#undef CASE
2432 }
2433}
2434
2435
2436/**
2437 * Gets the highest frequency hint for all the important timers.
2438 *
2439 * @returns The highest frequency. 0 if no timers care.
2440 * @param pVM The VM handle.
2441 */
2442static uint32_t tmGetFrequencyHint(PVM pVM)
2443{
2444 /*
2445 * Query the value, recalculate it if necessary.
2446 *
2447 * The "right" highest frequency value isn't so important that we'll block
2448 * waiting on the timer semaphore.
2449 */
2450 uint32_t uMaxHzHint = ASMAtomicUoReadU32(&pVM->tm.s.uMaxHzHint);
2451 if (RT_UNLIKELY(ASMAtomicReadBool(&pVM->tm.s.fHzHintNeedsUpdating)))
2452 {
2453 if (RT_SUCCESS(TM_TRY_LOCK_TIMERS(pVM)))
2454 {
2455 ASMAtomicWriteBool(&pVM->tm.s.fHzHintNeedsUpdating, false);
2456
2457 /*
2458 * Loop over the timers associated with each clock.
2459 */
2460 uMaxHzHint = 0;
2461 for (int i = 0; i < TMCLOCK_MAX; i++)
2462 {
2463 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[i];
2464 for (PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue); pCur; pCur = TMTIMER_GET_NEXT(pCur))
2465 {
2466 uint32_t uHzHint = ASMAtomicUoReadU32(&pCur->uHzHint);
2467 if (uHzHint > uMaxHzHint)
2468 {
2469 switch (pCur->enmState)
2470 {
2471 case TMTIMERSTATE_ACTIVE:
2472 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2473 case TMTIMERSTATE_EXPIRED_DELIVER:
2474 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2475 case TMTIMERSTATE_PENDING_SCHEDULE:
2476 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2477 case TMTIMERSTATE_PENDING_RESCHEDULE:
2478 uMaxHzHint = uHzHint;
2479 break;
2480
2481 case TMTIMERSTATE_STOPPED:
2482 case TMTIMERSTATE_PENDING_STOP:
2483 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2484 case TMTIMERSTATE_DESTROY:
2485 case TMTIMERSTATE_FREE:
2486 break;
2487 /* no default, want gcc warnings when adding more states. */
2488 }
2489 }
2490 }
2491 }
2492 ASMAtomicWriteU32(&pVM->tm.s.uMaxHzHint, uMaxHzHint);
2493 Log(("tmGetFrequencyHint: New value %u Hz\n", uMaxHzHint));
2494 TM_UNLOCK_TIMERS(pVM);
2495 }
2496 }
2497 return uMaxHzHint;
2498}
2499
2500
2501/**
2502 * Calculates a host timer frequency that would be suitable for the current
2503 * timer load.
2504 *
2505 * This will take the highest timer frequency, adjust for catch-up and warp
2506 * driver, and finally add a little fudge factor. The caller (VMM) will use
2507 * the result to adjust the per-cpu preemption timer.
2508 *
2509 * @returns The highest frequency. 0 if no important timers around.
2510 * @param pVM The VM handle.
2511 * @param pVCpu The current CPU.
2512 */
2513VMM_INT_DECL(uint32_t) TMCalcHostTimerFrequency(PVM pVM, PVMCPU pVCpu)
2514{
2515 uint32_t uHz = tmGetFrequencyHint(pVM);
2516
2517 /* Catch up, we have to be more aggressive than the % indicates at the
2518 beginning of the effort. */
2519 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
2520 {
2521 uint32_t u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
2522 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
2523 {
2524 if (u32Pct <= 100)
2525 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp100 / 100;
2526 else if (u32Pct <= 200)
2527 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp200 / 100;
2528 else if (u32Pct <= 400)
2529 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp400 / 100;
2530 uHz *= u32Pct + 100;
2531 uHz /= 100;
2532 }
2533 }
2534
2535 /* Warp drive. */
2536 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualWarpDrive))
2537 {
2538 uint32_t u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualWarpDrivePercentage);
2539 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualWarpDrive))
2540 {
2541 uHz *= u32Pct;
2542 uHz /= 100;
2543 }
2544 }
2545
2546 /* Fudge factor. */
2547 if (pVCpu->idCpu == pVM->tm.s.idTimerCpu)
2548 uHz *= pVM->tm.s.cPctHostHzFudgeFactorTimerCpu;
2549 else
2550 uHz *= pVM->tm.s.cPctHostHzFudgeFactorOtherCpu;
2551 uHz /= 100;
2552
2553 /* Make sure it isn't too high. */
2554 if (uHz > pVM->tm.s.cHostHzMax)
2555 uHz = pVM->tm.s.cHostHzMax;
2556
2557 return uHz;
2558}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette