VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/TMAll.cpp@ 41829

Last change on this file since 41829 was 41801, checked in by vboxsync, 12 years ago

Doxygen.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 89.5 KB
Line 
1/* $Id: TMAll.cpp 41801 2012-06-17 16:46:51Z vboxsync $ */
2/** @file
3 * TM - Timeout Manager, all contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2011 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_TM
23#include <VBox/vmm/tm.h>
24#include <VBox/vmm/mm.h>
25#include <VBox/vmm/dbgftrace.h>
26#ifdef IN_RING3
27# ifdef VBOX_WITH_REM
28# include <VBox/vmm/rem.h>
29# endif
30#endif
31#include "TMInternal.h"
32#include <VBox/vmm/vm.h>
33
34#include <VBox/param.h>
35#include <VBox/err.h>
36#include <VBox/log.h>
37#include <VBox/sup.h>
38#include <iprt/time.h>
39#include <iprt/assert.h>
40#include <iprt/asm.h>
41#include <iprt/asm-math.h>
42#ifdef IN_RING3
43# include <iprt/thread.h>
44#endif
45
46#include "TMInline.h"
47
48
49/*******************************************************************************
50* Defined Constants And Macros *
51*******************************************************************************/
52/** @def TMTIMER_ASSERT_CRITSECT
53 * Checks that the caller owns the critical section if one is associated with
54 * the timer. */
55#ifdef VBOX_STRICT
56# define TMTIMER_ASSERT_CRITSECT(pTimer) \
57 do { \
58 if ((pTimer)->pCritSect) \
59 { \
60 VMSTATE enmState; \
61 PPDMCRITSECT pCritSect = (PPDMCRITSECT)MMHyperR3ToCC((pTimer)->CTX_SUFF(pVM), (pTimer)->pCritSect); \
62 AssertMsg( pCritSect \
63 && ( PDMCritSectIsOwner(pCritSect) \
64 || (enmState = (pTimer)->CTX_SUFF(pVM)->enmVMState) == VMSTATE_CREATING \
65 || enmState == VMSTATE_RESETTING \
66 || enmState == VMSTATE_RESETTING_LS ),\
67 ("pTimer=%p (%s) pCritSect=%p (%s)\n", pTimer, R3STRING(pTimer->pszDesc), \
68 (pTimer)->pCritSect, R3STRING(PDMR3CritSectName((pTimer)->pCritSect)) )); \
69 } \
70 } while (0)
71#else
72# define TMTIMER_ASSERT_CRITSECT(pTimer) do { } while (0)
73#endif
74
75
76/**
77 * Gets the current warp drive percent.
78 *
79 * @returns The warp drive percent.
80 * @param pVM Pointer to the VM.
81 */
82VMMDECL(uint32_t) TMGetWarpDrive(PVM pVM)
83{
84 return pVM->tm.s.u32VirtualWarpDrivePercentage;
85}
86
87
88/**
89 * Notification that execution is about to start.
90 *
91 * This call must always be paired with a TMNotifyEndOfExecution call.
92 *
93 * The function may, depending on the configuration, resume the TSC and future
94 * clocks that only ticks when we're executing guest code.
95 *
96 * @param pVCpu Pointer to the VMCPU.
97 */
98VMMDECL(void) TMNotifyStartOfExecution(PVMCPU pVCpu)
99{
100 PVM pVM = pVCpu->CTX_SUFF(pVM);
101
102#ifndef VBOX_WITHOUT_NS_ACCOUNTING
103 pVCpu->tm.s.u64NsTsStartExecuting = RTTimeNanoTS();
104#endif
105 if (pVM->tm.s.fTSCTiedToExecution)
106 tmCpuTickResume(pVM, pVCpu);
107}
108
109
110/**
111 * Notification that execution is about to start.
112 *
113 * This call must always be paired with a TMNotifyStartOfExecution call.
114 *
115 * The function may, depending on the configuration, suspend the TSC and future
116 * clocks that only ticks when we're executing guest code.
117 *
118 * @param pVCpu Pointer to the VMCPU.
119 */
120VMMDECL(void) TMNotifyEndOfExecution(PVMCPU pVCpu)
121{
122 PVM pVM = pVCpu->CTX_SUFF(pVM);
123
124 if (pVM->tm.s.fTSCTiedToExecution)
125 tmCpuTickPause(pVCpu);
126
127#ifndef VBOX_WITHOUT_NS_ACCOUNTING
128 uint64_t const u64NsTs = RTTimeNanoTS();
129 uint64_t const cNsTotalNew = u64NsTs - pVCpu->tm.s.u64NsTsStartTotal;
130 uint64_t const cNsExecutingDelta = u64NsTs - pVCpu->tm.s.u64NsTsStartExecuting;
131 uint64_t const cNsExecutingNew = pVCpu->tm.s.cNsExecuting + cNsExecutingDelta;
132 uint64_t const cNsOtherNew = cNsTotalNew - cNsExecutingNew - pVCpu->tm.s.cNsHalted;
133
134# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
135 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecuting, cNsExecutingDelta);
136 if (cNsExecutingDelta < 5000)
137 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecTiny, cNsExecutingDelta);
138 else if (cNsExecutingDelta < 50000)
139 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecShort, cNsExecutingDelta);
140 else
141 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecLong, cNsExecutingDelta);
142 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsTotal, cNsTotalNew - pVCpu->tm.s.cNsTotal);
143 int64_t const cNsOtherNewDelta = cNsOtherNew - pVCpu->tm.s.cNsOther;
144 if (cNsOtherNewDelta > 0)
145 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsOther, cNsOtherNewDelta); /* (the period before execution) */
146# endif
147
148 uint32_t uGen = ASMAtomicIncU32(&pVCpu->tm.s.uTimesGen); Assert(uGen & 1);
149 pVCpu->tm.s.cNsExecuting = cNsExecutingNew;
150 pVCpu->tm.s.cNsTotal = cNsTotalNew;
151 pVCpu->tm.s.cNsOther = cNsOtherNew;
152 pVCpu->tm.s.cPeriodsExecuting++;
153 ASMAtomicWriteU32(&pVCpu->tm.s.uTimesGen, (uGen | 1) + 1);
154#endif
155}
156
157
158/**
159 * Notification that the cpu is entering the halt state
160 *
161 * This call must always be paired with a TMNotifyEndOfExecution call.
162 *
163 * The function may, depending on the configuration, resume the TSC and future
164 * clocks that only ticks when we're halted.
165 *
166 * @param pVCpu Pointer to the VMCPU.
167 */
168VMM_INT_DECL(void) TMNotifyStartOfHalt(PVMCPU pVCpu)
169{
170 PVM pVM = pVCpu->CTX_SUFF(pVM);
171
172#ifndef VBOX_WITHOUT_NS_ACCOUNTING
173 pVCpu->tm.s.u64NsTsStartHalting = RTTimeNanoTS();
174#endif
175
176 if ( pVM->tm.s.fTSCTiedToExecution
177 && !pVM->tm.s.fTSCNotTiedToHalt)
178 tmCpuTickResume(pVM, pVCpu);
179}
180
181
182/**
183 * Notification that the cpu is leaving the halt state
184 *
185 * This call must always be paired with a TMNotifyStartOfHalt call.
186 *
187 * The function may, depending on the configuration, suspend the TSC and future
188 * clocks that only ticks when we're halted.
189 *
190 * @param pVCpu Pointer to the VMCPU.
191 */
192VMM_INT_DECL(void) TMNotifyEndOfHalt(PVMCPU pVCpu)
193{
194 PVM pVM = pVCpu->CTX_SUFF(pVM);
195
196 if ( pVM->tm.s.fTSCTiedToExecution
197 && !pVM->tm.s.fTSCNotTiedToHalt)
198 tmCpuTickPause(pVCpu);
199
200#ifndef VBOX_WITHOUT_NS_ACCOUNTING
201 uint64_t const u64NsTs = RTTimeNanoTS();
202 uint64_t const cNsTotalNew = u64NsTs - pVCpu->tm.s.u64NsTsStartTotal;
203 uint64_t const cNsHaltedDelta = u64NsTs - pVCpu->tm.s.u64NsTsStartHalting;
204 uint64_t const cNsHaltedNew = pVCpu->tm.s.cNsHalted + cNsHaltedDelta;
205 uint64_t const cNsOtherNew = cNsTotalNew - pVCpu->tm.s.cNsExecuting - cNsHaltedNew;
206
207# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
208 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsHalted, cNsHaltedDelta);
209 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsTotal, cNsTotalNew - pVCpu->tm.s.cNsTotal);
210 int64_t const cNsOtherNewDelta = cNsOtherNew - pVCpu->tm.s.cNsOther;
211 if (cNsOtherNewDelta > 0)
212 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsOther, cNsOtherNewDelta); /* (the period before halting) */
213# endif
214
215 uint32_t uGen = ASMAtomicIncU32(&pVCpu->tm.s.uTimesGen); Assert(uGen & 1);
216 pVCpu->tm.s.cNsHalted = cNsHaltedNew;
217 pVCpu->tm.s.cNsTotal = cNsTotalNew;
218 pVCpu->tm.s.cNsOther = cNsOtherNew;
219 pVCpu->tm.s.cPeriodsHalted++;
220 ASMAtomicWriteU32(&pVCpu->tm.s.uTimesGen, (uGen | 1) + 1);
221#endif
222}
223
224
225/**
226 * Raise the timer force action flag and notify the dedicated timer EMT.
227 *
228 * @param pVM Pointer to the VM.
229 */
230DECLINLINE(void) tmScheduleNotify(PVM pVM)
231{
232 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
233 if (!VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
234 {
235 Log5(("TMAll(%u): FF: 0 -> 1\n", __LINE__));
236 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
237#ifdef IN_RING3
238# ifdef VBOX_WITH_REM
239 REMR3NotifyTimerPending(pVM, pVCpuDst);
240# endif
241 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
242#endif
243 STAM_COUNTER_INC(&pVM->tm.s.StatScheduleSetFF);
244 }
245}
246
247
248/**
249 * Schedule the queue which was changed.
250 */
251DECLINLINE(void) tmSchedule(PTMTIMER pTimer)
252{
253 PVM pVM = pTimer->CTX_SUFF(pVM);
254 if ( VM_IS_EMT(pVM)
255 && RT_SUCCESS(TM_TRY_LOCK_TIMERS(pVM)))
256 {
257 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
258 Log3(("tmSchedule: tmTimerQueueSchedule\n"));
259 tmTimerQueueSchedule(pVM, &pVM->tm.s.CTX_SUFF(paTimerQueues)[pTimer->enmClock]);
260#ifdef VBOX_STRICT
261 tmTimerQueuesSanityChecks(pVM, "tmSchedule");
262#endif
263 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
264 TM_UNLOCK_TIMERS(pVM);
265 }
266 else
267 {
268 TMTIMERSTATE enmState = pTimer->enmState;
269 if (TMTIMERSTATE_IS_PENDING_SCHEDULING(enmState))
270 tmScheduleNotify(pVM);
271 }
272}
273
274
275/**
276 * Try change the state to enmStateNew from enmStateOld
277 * and link the timer into the scheduling queue.
278 *
279 * @returns Success indicator.
280 * @param pTimer Timer in question.
281 * @param enmStateNew The new timer state.
282 * @param enmStateOld The old timer state.
283 */
284DECLINLINE(bool) tmTimerTry(PTMTIMER pTimer, TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
285{
286 /*
287 * Attempt state change.
288 */
289 bool fRc;
290 TM_TRY_SET_STATE(pTimer, enmStateNew, enmStateOld, fRc);
291 return fRc;
292}
293
294
295/**
296 * Links the timer onto the scheduling queue.
297 *
298 * @param pQueue The timer queue the timer belongs to.
299 * @param pTimer The timer.
300 *
301 * @todo FIXME: Look into potential race with the thread running the queues
302 * and stuff.
303 */
304DECLINLINE(void) tmTimerLinkSchedule(PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
305{
306 Assert(!pTimer->offScheduleNext);
307 const int32_t offHeadNew = (intptr_t)pTimer - (intptr_t)pQueue;
308 int32_t offHead;
309 do
310 {
311 offHead = pQueue->offSchedule;
312 if (offHead)
313 pTimer->offScheduleNext = ((intptr_t)pQueue + offHead) - (intptr_t)pTimer;
314 else
315 pTimer->offScheduleNext = 0;
316 } while (!ASMAtomicCmpXchgS32(&pQueue->offSchedule, offHeadNew, offHead));
317}
318
319
320/**
321 * Try change the state to enmStateNew from enmStateOld
322 * and link the timer into the scheduling queue.
323 *
324 * @returns Success indicator.
325 * @param pTimer Timer in question.
326 * @param enmStateNew The new timer state.
327 * @param enmStateOld The old timer state.
328 */
329DECLINLINE(bool) tmTimerTryWithLink(PTMTIMER pTimer, TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
330{
331 if (tmTimerTry(pTimer, enmStateNew, enmStateOld))
332 {
333 tmTimerLinkSchedule(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF(paTimerQueues)[pTimer->enmClock], pTimer);
334 return true;
335 }
336 return false;
337}
338
339
340/**
341 * Links a timer into the active list of a timer queue.
342 *
343 * @param pQueue The queue.
344 * @param pTimer The timer.
345 * @param u64Expire The timer expiration time.
346 *
347 * @remarks Called while owning the relevant queue lock.
348 */
349DECL_FORCE_INLINE(void) tmTimerQueueLinkActive(PTMTIMERQUEUE pQueue, PTMTIMER pTimer, uint64_t u64Expire)
350{
351 Assert(!pTimer->offNext);
352 Assert(!pTimer->offPrev);
353 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE || pTimer->enmClock != TMCLOCK_VIRTUAL_SYNC); /* (active is not a stable state) */
354
355 PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue);
356 if (pCur)
357 {
358 for (;; pCur = TMTIMER_GET_NEXT(pCur))
359 {
360 if (pCur->u64Expire > u64Expire)
361 {
362 const PTMTIMER pPrev = TMTIMER_GET_PREV(pCur);
363 TMTIMER_SET_NEXT(pTimer, pCur);
364 TMTIMER_SET_PREV(pTimer, pPrev);
365 if (pPrev)
366 TMTIMER_SET_NEXT(pPrev, pTimer);
367 else
368 {
369 TMTIMER_SET_HEAD(pQueue, pTimer);
370 ASMAtomicWriteU64(&pQueue->u64Expire, u64Expire);
371 DBGFTRACE_U64_TAG2(pTimer->CTX_SUFF(pVM), u64Expire, "tmTimerQueueLinkActive head", R3STRING(pTimer->pszDesc));
372 }
373 TMTIMER_SET_PREV(pCur, pTimer);
374 return;
375 }
376 if (!pCur->offNext)
377 {
378 TMTIMER_SET_NEXT(pCur, pTimer);
379 TMTIMER_SET_PREV(pTimer, pCur);
380 DBGFTRACE_U64_TAG2(pTimer->CTX_SUFF(pVM), u64Expire, "tmTimerQueueLinkActive tail", R3STRING(pTimer->pszDesc));
381 return;
382 }
383 }
384 }
385 else
386 {
387 TMTIMER_SET_HEAD(pQueue, pTimer);
388 ASMAtomicWriteU64(&pQueue->u64Expire, u64Expire);
389 DBGFTRACE_U64_TAG2(pTimer->CTX_SUFF(pVM), u64Expire, "tmTimerQueueLinkActive empty", R3STRING(pTimer->pszDesc));
390 }
391}
392
393
394
395/**
396 * Schedules the given timer on the given queue.
397 *
398 * @param pQueue The timer queue.
399 * @param pTimer The timer that needs scheduling.
400 *
401 * @remarks Called while owning the lock.
402 */
403DECLINLINE(void) tmTimerQueueScheduleOne(PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
404{
405 Assert(pQueue->enmClock != TMCLOCK_VIRTUAL_SYNC);
406
407 /*
408 * Processing.
409 */
410 unsigned cRetries = 2;
411 do
412 {
413 TMTIMERSTATE enmState = pTimer->enmState;
414 switch (enmState)
415 {
416 /*
417 * Reschedule timer (in the active list).
418 */
419 case TMTIMERSTATE_PENDING_RESCHEDULE:
420 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE, TMTIMERSTATE_PENDING_RESCHEDULE)))
421 break; /* retry */
422 tmTimerQueueUnlinkActive(pQueue, pTimer);
423 /* fall thru */
424
425 /*
426 * Schedule timer (insert into the active list).
427 */
428 case TMTIMERSTATE_PENDING_SCHEDULE:
429 Assert(!pTimer->offNext); Assert(!pTimer->offPrev);
430 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, TMTIMERSTATE_PENDING_SCHEDULE)))
431 break; /* retry */
432 tmTimerQueueLinkActive(pQueue, pTimer, pTimer->u64Expire);
433 return;
434
435 /*
436 * Stop the timer in active list.
437 */
438 case TMTIMERSTATE_PENDING_STOP:
439 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, TMTIMERSTATE_PENDING_STOP)))
440 break; /* retry */
441 tmTimerQueueUnlinkActive(pQueue, pTimer);
442 /* fall thru */
443
444 /*
445 * Stop the timer (not on the active list).
446 */
447 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
448 Assert(!pTimer->offNext); Assert(!pTimer->offPrev);
449 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_STOPPED, TMTIMERSTATE_PENDING_STOP_SCHEDULE)))
450 break;
451 return;
452
453 /*
454 * The timer is pending destruction by TMR3TimerDestroy, our caller.
455 * Nothing to do here.
456 */
457 case TMTIMERSTATE_DESTROY:
458 break;
459
460 /*
461 * Postpone these until they get into the right state.
462 */
463 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
464 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
465 tmTimerLinkSchedule(pQueue, pTimer);
466 STAM_COUNTER_INC(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatPostponed));
467 return;
468
469 /*
470 * None of these can be in the schedule.
471 */
472 case TMTIMERSTATE_FREE:
473 case TMTIMERSTATE_STOPPED:
474 case TMTIMERSTATE_ACTIVE:
475 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
476 case TMTIMERSTATE_EXPIRED_DELIVER:
477 default:
478 AssertMsgFailed(("Timer (%p) in the scheduling list has an invalid state %s (%d)!",
479 pTimer, tmTimerState(pTimer->enmState), pTimer->enmState));
480 return;
481 }
482 } while (cRetries-- > 0);
483}
484
485
486/**
487 * Schedules the specified timer queue.
488 *
489 * @param pVM The VM to run the timers for.
490 * @param pQueue The queue to schedule.
491 *
492 * @remarks Called while owning the lock.
493 */
494void tmTimerQueueSchedule(PVM pVM, PTMTIMERQUEUE pQueue)
495{
496 TM_ASSERT_TIMER_LOCK_OWNERSHIP(pVM);
497
498 /*
499 * Dequeue the scheduling list and iterate it.
500 */
501 int32_t offNext = ASMAtomicXchgS32(&pQueue->offSchedule, 0);
502 Log2(("tmTimerQueueSchedule: pQueue=%p:{.enmClock=%d, offNext=%RI32, .u64Expired=%'RU64}\n", pQueue, pQueue->enmClock, offNext, pQueue->u64Expire));
503 if (!offNext)
504 return;
505 PTMTIMER pNext = (PTMTIMER)((intptr_t)pQueue + offNext);
506 while (pNext)
507 {
508 /*
509 * Unlink the head timer and find the next one.
510 */
511 PTMTIMER pTimer = pNext;
512 pNext = pNext->offScheduleNext ? (PTMTIMER)((intptr_t)pNext + pNext->offScheduleNext) : NULL;
513 pTimer->offScheduleNext = 0;
514
515 /*
516 * Do the scheduling.
517 */
518 Log2(("tmTimerQueueSchedule: %p:{.enmState=%s, .enmClock=%d, .enmType=%d, .pszDesc=%s}\n",
519 pTimer, tmTimerState(pTimer->enmState), pTimer->enmClock, pTimer->enmType, R3STRING(pTimer->pszDesc)));
520 tmTimerQueueScheduleOne(pQueue, pTimer);
521 Log2(("tmTimerQueueSchedule: %p: new %s\n", pTimer, tmTimerState(pTimer->enmState)));
522 } /* foreach timer in current schedule batch. */
523 Log2(("tmTimerQueueSchedule: u64Expired=%'RU64\n", pQueue->u64Expire));
524}
525
526
527#ifdef VBOX_STRICT
528/**
529 * Checks that the timer queues are sane.
530 *
531 * @param pVM Pointer to the VM.
532 *
533 * @remarks Called while owning the lock.
534 */
535void tmTimerQueuesSanityChecks(PVM pVM, const char *pszWhere)
536{
537 TM_ASSERT_TIMER_LOCK_OWNERSHIP(pVM);
538
539 /*
540 * Check the linking of the active lists.
541 */
542 bool fHaveVirtualSyncLock = false;
543 for (int i = 0; i < TMCLOCK_MAX; i++)
544 {
545 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[i];
546 Assert((int)pQueue->enmClock == i);
547 if (pQueue->enmClock == TMCLOCK_VIRTUAL_SYNC)
548 {
549 if (PDMCritSectTryEnter(&pVM->tm.s.VirtualSyncLock) != VINF_SUCCESS)
550 continue;
551 fHaveVirtualSyncLock = true;
552 }
553 PTMTIMER pPrev = NULL;
554 for (PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue); pCur; pPrev = pCur, pCur = TMTIMER_GET_NEXT(pCur))
555 {
556 AssertMsg((int)pCur->enmClock == i, ("%s: %d != %d\n", pszWhere, pCur->enmClock, i));
557 AssertMsg(TMTIMER_GET_PREV(pCur) == pPrev, ("%s: %p != %p\n", pszWhere, TMTIMER_GET_PREV(pCur), pPrev));
558 TMTIMERSTATE enmState = pCur->enmState;
559 switch (enmState)
560 {
561 case TMTIMERSTATE_ACTIVE:
562 AssertMsg( !pCur->offScheduleNext
563 || pCur->enmState != TMTIMERSTATE_ACTIVE,
564 ("%s: %RI32\n", pszWhere, pCur->offScheduleNext));
565 break;
566 case TMTIMERSTATE_PENDING_STOP:
567 case TMTIMERSTATE_PENDING_RESCHEDULE:
568 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
569 break;
570 default:
571 AssertMsgFailed(("%s: Invalid state enmState=%d %s\n", pszWhere, enmState, tmTimerState(enmState)));
572 break;
573 }
574 }
575 }
576
577
578# ifdef IN_RING3
579 /*
580 * Do the big list and check that active timers all are in the active lists.
581 */
582 PTMTIMERR3 pPrev = NULL;
583 for (PTMTIMERR3 pCur = pVM->tm.s.pCreated; pCur; pPrev = pCur, pCur = pCur->pBigNext)
584 {
585 Assert(pCur->pBigPrev == pPrev);
586 Assert((unsigned)pCur->enmClock < (unsigned)TMCLOCK_MAX);
587
588 TMTIMERSTATE enmState = pCur->enmState;
589 switch (enmState)
590 {
591 case TMTIMERSTATE_ACTIVE:
592 case TMTIMERSTATE_PENDING_STOP:
593 case TMTIMERSTATE_PENDING_RESCHEDULE:
594 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
595 if (fHaveVirtualSyncLock || pCur->enmClock != TMCLOCK_VIRTUAL_SYNC)
596 {
597 PTMTIMERR3 pCurAct = TMTIMER_GET_HEAD(&pVM->tm.s.CTX_SUFF(paTimerQueues)[pCur->enmClock]);
598 Assert(pCur->offPrev || pCur == pCurAct);
599 while (pCurAct && pCurAct != pCur)
600 pCurAct = TMTIMER_GET_NEXT(pCurAct);
601 Assert(pCurAct == pCur);
602 }
603 break;
604
605 case TMTIMERSTATE_PENDING_SCHEDULE:
606 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
607 case TMTIMERSTATE_STOPPED:
608 case TMTIMERSTATE_EXPIRED_DELIVER:
609 if (fHaveVirtualSyncLock || pCur->enmClock != TMCLOCK_VIRTUAL_SYNC)
610 {
611 Assert(!pCur->offNext);
612 Assert(!pCur->offPrev);
613 for (PTMTIMERR3 pCurAct = TMTIMER_GET_HEAD(&pVM->tm.s.CTX_SUFF(paTimerQueues)[pCur->enmClock]);
614 pCurAct;
615 pCurAct = TMTIMER_GET_NEXT(pCurAct))
616 {
617 Assert(pCurAct != pCur);
618 Assert(TMTIMER_GET_NEXT(pCurAct) != pCur);
619 Assert(TMTIMER_GET_PREV(pCurAct) != pCur);
620 }
621 }
622 break;
623
624 /* ignore */
625 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
626 break;
627
628 /* shouldn't get here! */
629 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
630 case TMTIMERSTATE_DESTROY:
631 default:
632 AssertMsgFailed(("Invalid state enmState=%d %s\n", enmState, tmTimerState(enmState)));
633 break;
634 }
635 }
636# endif /* IN_RING3 */
637
638 if (fHaveVirtualSyncLock)
639 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
640}
641#endif /* !VBOX_STRICT */
642
643#ifdef VBOX_HIGH_RES_TIMERS_HACK
644
645/**
646 * Worker for tmTimerPollInternal that handles misses when the dedicated timer
647 * EMT is polling.
648 *
649 * @returns See tmTimerPollInternal.
650 * @param pVM Pointer to the VM.
651 * @param u64Now Current virtual clock timestamp.
652 * @param u64Delta The delta to the next even in ticks of the
653 * virtual clock.
654 * @param pu64Delta Where to return the delta.
655 * @param pCounter The statistics counter to update.
656 */
657DECLINLINE(uint64_t) tmTimerPollReturnMiss(PVM pVM, uint64_t u64Now, uint64_t u64Delta, uint64_t *pu64Delta)
658{
659 Assert(!(u64Delta & RT_BIT_64(63)));
660
661 if (!pVM->tm.s.fVirtualWarpDrive)
662 {
663 *pu64Delta = u64Delta;
664 return u64Delta + u64Now + pVM->tm.s.u64VirtualOffset;
665 }
666
667 /*
668 * Warp drive adjustments - this is the reverse of what tmVirtualGetRaw is doing.
669 */
670 uint64_t const u64Start = pVM->tm.s.u64VirtualWarpDriveStart;
671 uint32_t const u32Pct = pVM->tm.s.u32VirtualWarpDrivePercentage;
672
673 uint64_t u64GipTime = u64Delta + u64Now + pVM->tm.s.u64VirtualOffset;
674 u64GipTime -= u64Start; /* the start is GIP time. */
675 if (u64GipTime >= u64Delta)
676 {
677 ASMMultU64ByU32DivByU32(u64GipTime, 100, u32Pct);
678 ASMMultU64ByU32DivByU32(u64Delta, 100, u32Pct);
679 }
680 else
681 {
682 u64Delta -= u64GipTime;
683 ASMMultU64ByU32DivByU32(u64GipTime, 100, u32Pct);
684 u64Delta += u64GipTime;
685 }
686 *pu64Delta = u64Delta;
687 u64GipTime += u64Start;
688 return u64GipTime;
689}
690
691
692/**
693 * Worker for tmTimerPollInternal dealing with returns on virtual CPUs other
694 * than the one dedicated to timer work.
695 *
696 * @returns See tmTimerPollInternal.
697 * @param pVM Pointer to the VM.
698 * @param u64Now Current virtual clock timestamp.
699 * @param pu64Delta Where to return the delta.
700 */
701DECL_FORCE_INLINE(uint64_t) tmTimerPollReturnOtherCpu(PVM pVM, uint64_t u64Now, uint64_t *pu64Delta)
702{
703 static const uint64_t s_u64OtherRet = 500000000; /* 500 ms for non-timer EMTs. */
704 *pu64Delta = s_u64OtherRet;
705 return u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet;
706}
707
708
709/**
710 * Worker for tmTimerPollInternal.
711 *
712 * @returns See tmTimerPollInternal.
713 * @param pVM Pointer to the VM.
714 * @param pVCpu Pointer to the shared VMCPU structure of the
715 * caller.
716 * @param pVCpuDst Pointer to the shared VMCPU structure of the
717 * dedicated timer EMT.
718 * @param u64Now Current virtual clock timestamp.
719 * @param pu64Delta Where to return the delta.
720 * @param pCounter The statistics counter to update.
721 */
722DECL_FORCE_INLINE(uint64_t) tmTimerPollReturnHit(PVM pVM, PVMCPU pVCpu, PVMCPU pVCpuDst, uint64_t u64Now,
723 uint64_t *pu64Delta, PSTAMCOUNTER pCounter)
724{
725 STAM_COUNTER_INC(pCounter);
726 if (pVCpuDst != pVCpu)
727 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
728 *pu64Delta = 0;
729 return 0;
730}
731
732/**
733 * Common worker for TMTimerPollGIP and TMTimerPoll.
734 *
735 * This function is called before FFs are checked in the inner execution EM loops.
736 *
737 * @returns The GIP timestamp of the next event.
738 * 0 if the next event has already expired.
739 *
740 * @param pVM Pointer to the VM.
741 * @param pVCpu Pointer to the shared VMCPU structure of the caller.
742 * @param pu64Delta Where to store the delta.
743 *
744 * @thread The emulation thread.
745 *
746 * @remarks GIP uses ns ticks.
747 */
748DECL_FORCE_INLINE(uint64_t) tmTimerPollInternal(PVM pVM, PVMCPU pVCpu, uint64_t *pu64Delta)
749{
750 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
751 const uint64_t u64Now = TMVirtualGetNoCheck(pVM);
752 STAM_COUNTER_INC(&pVM->tm.s.StatPoll);
753
754 /*
755 * Return straight away if the timer FF is already set ...
756 */
757 if (VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
758 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet);
759
760 /*
761 * ... or if timers are being run.
762 */
763 if (ASMAtomicReadBool(&pVM->tm.s.fRunningQueues))
764 {
765 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
766 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
767 }
768
769 /*
770 * Check for TMCLOCK_VIRTUAL expiration.
771 */
772 const uint64_t u64Expire1 = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire);
773 const int64_t i64Delta1 = u64Expire1 - u64Now;
774 if (i64Delta1 <= 0)
775 {
776 if (!VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
777 {
778 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
779 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
780#if defined(IN_RING3) && defined(VBOX_WITH_REM)
781 REMR3NotifyTimerPending(pVM, pVCpuDst);
782#endif
783 }
784 LogFlow(("TMTimerPoll: expire1=%'RU64 <= now=%'RU64\n", u64Expire1, u64Now));
785 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtual);
786 }
787
788 /*
789 * Check for TMCLOCK_VIRTUAL_SYNC expiration.
790 * This isn't quite as straight forward if in a catch-up, not only do
791 * we have to adjust the 'now' but when have to adjust the delta as well.
792 */
793
794 /*
795 * Optimistic lockless approach.
796 */
797 uint64_t u64VirtualSyncNow;
798 uint64_t u64Expire2 = ASMAtomicUoReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
799 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
800 {
801 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
802 {
803 u64VirtualSyncNow = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
804 if (RT_LIKELY( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
805 && !ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
806 && u64VirtualSyncNow == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
807 && u64Expire2 == ASMAtomicUoReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)))
808 {
809 u64VirtualSyncNow = u64Now - u64VirtualSyncNow;
810 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
811 if (i64Delta2 > 0)
812 {
813 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
814 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
815
816 if (pVCpu == pVCpuDst)
817 return tmTimerPollReturnMiss(pVM, u64Now, RT_MIN(i64Delta1, i64Delta2), pu64Delta);
818 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
819 }
820
821 if ( !pVM->tm.s.fRunningQueues
822 && !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
823 {
824 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
825 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
826#if defined(IN_RING3) && defined(VBOX_WITH_REM)
827 REMR3NotifyTimerPending(pVM, pVCpuDst);
828#endif
829 }
830
831 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
832 LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now));
833 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
834 }
835 }
836 }
837 else
838 {
839 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
840 LogFlow(("TMTimerPoll: stopped\n"));
841 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
842 }
843
844 /*
845 * Complicated lockless approach.
846 */
847 uint64_t off;
848 uint32_t u32Pct = 0;
849 bool fCatchUp;
850 int cOuterTries = 42;
851 for (;; cOuterTries--)
852 {
853 fCatchUp = ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp);
854 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
855 u64Expire2 = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
856 if (fCatchUp)
857 {
858 /* No changes allowed, try get a consistent set of parameters. */
859 uint64_t const u64Prev = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev);
860 uint64_t const offGivenUp = ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp);
861 u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
862 if ( ( u64Prev == ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev)
863 && offGivenUp == ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp)
864 && u32Pct == ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage)
865 && off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
866 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)
867 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
868 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
869 || cOuterTries <= 0)
870 {
871 uint64_t u64Delta = u64Now - u64Prev;
872 if (RT_LIKELY(!(u64Delta >> 32)))
873 {
874 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, u32Pct, 100);
875 if (off > u64Sub + offGivenUp)
876 off -= u64Sub;
877 else /* we've completely caught up. */
878 off = offGivenUp;
879 }
880 else
881 /* More than 4 seconds since last time (or negative), ignore it. */
882 Log(("TMVirtualGetSync: u64Delta=%RX64 (NoLock)\n", u64Delta));
883
884 /* Check that we're still running and in catch up. */
885 if ( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
886 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
887 break;
888 }
889 }
890 else if ( off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
891 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)
892 && !ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
893 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
894 break; /* Got an consistent offset */
895
896 /* Repeat the initial checks before iterating. */
897 if (VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
898 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet);
899 if (ASMAtomicUoReadBool(&pVM->tm.s.fRunningQueues))
900 {
901 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
902 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
903 }
904 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
905 {
906 LogFlow(("TMTimerPoll: stopped\n"));
907 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
908 }
909 if (cOuterTries <= 0)
910 break; /* that's enough */
911 }
912 if (cOuterTries <= 0)
913 STAM_COUNTER_INC(&pVM->tm.s.StatPollELoop);
914 u64VirtualSyncNow = u64Now - off;
915
916 /* Calc delta and see if we've got a virtual sync hit. */
917 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
918 if (i64Delta2 <= 0)
919 {
920 if ( !pVM->tm.s.fRunningQueues
921 && !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
922 {
923 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
924 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
925#if defined(IN_RING3) && defined(VBOX_WITH_REM)
926 REMR3NotifyTimerPending(pVM, pVCpuDst);
927#endif
928 }
929 STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtualSync);
930 LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now));
931 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
932 }
933
934 /*
935 * Return the time left to the next event.
936 */
937 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
938 if (pVCpu == pVCpuDst)
939 {
940 if (fCatchUp)
941 i64Delta2 = ASMMultU64ByU32DivByU32(i64Delta2, 100, u32Pct + 100);
942 return tmTimerPollReturnMiss(pVM, u64Now, RT_MIN(i64Delta1, i64Delta2), pu64Delta);
943 }
944 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
945}
946
947
948/**
949 * Set FF if we've passed the next virtual event.
950 *
951 * This function is called before FFs are checked in the inner execution EM loops.
952 *
953 * @returns true if timers are pending, false if not.
954 *
955 * @param pVM Pointer to the VM.
956 * @param pVCpu Pointer to the shared VMCPU structure of the caller.
957 * @thread The emulation thread.
958 */
959VMMDECL(bool) TMTimerPollBool(PVM pVM, PVMCPU pVCpu)
960{
961 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
962 uint64_t off = 0;
963 tmTimerPollInternal(pVM, pVCpu, &off);
964 return off == 0;
965}
966
967
968/**
969 * Set FF if we've passed the next virtual event.
970 *
971 * This function is called before FFs are checked in the inner execution EM loops.
972 *
973 * @param pVM Pointer to the VM.
974 * @param pVCpu Pointer to the shared VMCPU structure of the caller.
975 * @thread The emulation thread.
976 */
977VMM_INT_DECL(void) TMTimerPollVoid(PVM pVM, PVMCPU pVCpu)
978{
979 uint64_t off;
980 tmTimerPollInternal(pVM, pVCpu, &off);
981}
982
983
984/**
985 * Set FF if we've passed the next virtual event.
986 *
987 * This function is called before FFs are checked in the inner execution EM loops.
988 *
989 * @returns The GIP timestamp of the next event.
990 * 0 if the next event has already expired.
991 * @param pVM Pointer to the VM.
992 * @param pVCpu Pointer to the shared VMCPU structure of the caller.
993 * @param pu64Delta Where to store the delta.
994 * @thread The emulation thread.
995 */
996VMM_INT_DECL(uint64_t) TMTimerPollGIP(PVM pVM, PVMCPU pVCpu, uint64_t *pu64Delta)
997{
998 return tmTimerPollInternal(pVM, pVCpu, pu64Delta);
999}
1000
1001#endif /* VBOX_HIGH_RES_TIMERS_HACK */
1002
1003/**
1004 * Gets the host context ring-3 pointer of the timer.
1005 *
1006 * @returns HC R3 pointer.
1007 * @param pTimer Timer handle as returned by one of the create functions.
1008 */
1009VMMDECL(PTMTIMERR3) TMTimerR3Ptr(PTMTIMER pTimer)
1010{
1011 return (PTMTIMERR3)MMHyperCCToR3(pTimer->CTX_SUFF(pVM), pTimer);
1012}
1013
1014
1015/**
1016 * Gets the host context ring-0 pointer of the timer.
1017 *
1018 * @returns HC R0 pointer.
1019 * @param pTimer Timer handle as returned by one of the create functions.
1020 */
1021VMMDECL(PTMTIMERR0) TMTimerR0Ptr(PTMTIMER pTimer)
1022{
1023 return (PTMTIMERR0)MMHyperCCToR0(pTimer->CTX_SUFF(pVM), pTimer);
1024}
1025
1026
1027/**
1028 * Gets the RC pointer of the timer.
1029 *
1030 * @returns RC pointer.
1031 * @param pTimer Timer handle as returned by one of the create functions.
1032 */
1033VMMDECL(PTMTIMERRC) TMTimerRCPtr(PTMTIMER pTimer)
1034{
1035 return (PTMTIMERRC)MMHyperCCToRC(pTimer->CTX_SUFF(pVM), pTimer);
1036}
1037
1038
1039/**
1040 * Locks the timer clock.
1041 *
1042 * @returns VINF_SUCCESS on success, @a rcBusy if busy, and VERR_NOT_SUPPORTED
1043 * if the clock does not have a lock.
1044 * @param pTimer The timer which clock lock we wish to take.
1045 * @param rcBusy What to return in ring-0 and raw-mode context
1046 * if the lock is busy. Pass VINF_SUCCESS to
1047 * acquired the critical section thru a ring-3
1048 call if necessary.
1049 *
1050 * @remarks Currently only supported on timers using the virtual sync clock.
1051 */
1052VMMDECL(int) TMTimerLock(PTMTIMER pTimer, int rcBusy)
1053{
1054 AssertPtr(pTimer);
1055 AssertReturn(pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC, VERR_NOT_SUPPORTED);
1056 return PDMCritSectEnter(&pTimer->CTX_SUFF(pVM)->tm.s.VirtualSyncLock, rcBusy);
1057}
1058
1059
1060/**
1061 * Unlocks a timer clock locked by TMTimerLock.
1062 *
1063 * @param pTimer The timer which clock to unlock.
1064 */
1065VMMDECL(void) TMTimerUnlock(PTMTIMER pTimer)
1066{
1067 AssertPtr(pTimer);
1068 AssertReturnVoid(pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC);
1069 PDMCritSectLeave(&pTimer->CTX_SUFF(pVM)->tm.s.VirtualSyncLock);
1070}
1071
1072
1073/**
1074 * Checks if the current thread owns the timer clock lock.
1075 *
1076 * @returns @c true if its the owner, @c false if not.
1077 * @param pTimer The timer handle.
1078 */
1079VMMDECL(bool) TMTimerIsLockOwner(PTMTIMER pTimer)
1080{
1081 AssertPtr(pTimer);
1082 AssertReturn(pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC, false);
1083 return PDMCritSectIsOwner(&pTimer->CTX_SUFF(pVM)->tm.s.VirtualSyncLock);
1084}
1085
1086
1087/**
1088 * Optimized TMTimerSet code path for starting an inactive timer.
1089 *
1090 * @returns VBox status code.
1091 *
1092 * @param pVM Pointer to the VM.
1093 * @param pTimer The timer handle.
1094 * @param u64Expire The new expire time.
1095 */
1096static int tmTimerSetOptimizedStart(PVM pVM, PTMTIMER pTimer, uint64_t u64Expire)
1097{
1098 Assert(!pTimer->offPrev);
1099 Assert(!pTimer->offNext);
1100 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE);
1101
1102 TMCLOCK const enmClock = pTimer->enmClock;
1103
1104 /*
1105 * Calculate and set the expiration time.
1106 */
1107 if (enmClock == TMCLOCK_VIRTUAL_SYNC)
1108 {
1109 uint64_t u64Last = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSync);
1110 AssertMsgStmt(u64Expire >= u64Last,
1111 ("exp=%#llx last=%#llx\n", u64Expire, u64Last),
1112 u64Expire = u64Last);
1113 }
1114 ASMAtomicWriteU64(&pTimer->u64Expire, u64Expire);
1115 Log2(("tmTimerSetOptimizedStart: %p:{.pszDesc='%s', .u64Expire=%'RU64}\n", pTimer, R3STRING(pTimer->pszDesc), u64Expire));
1116
1117 /*
1118 * Link the timer into the active list.
1119 */
1120 tmTimerQueueLinkActive(&pVM->tm.s.CTX_SUFF(paTimerQueues)[enmClock], pTimer, u64Expire);
1121
1122 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetOpt);
1123 TM_UNLOCK_TIMERS(pVM);
1124 return VINF_SUCCESS;
1125}
1126
1127
1128/**
1129 * TMTimerSet for the virtual sync timer queue.
1130 *
1131 * This employs a greatly simplified state machine by always acquiring the
1132 * queue lock and bypassing the scheduling list.
1133 *
1134 * @returns VBox status code
1135 * @param pVM Pointer to the VM.
1136 * @param pTimer The timer handle.
1137 * @param u64Expire The expiration time.
1138 */
1139static int tmTimerVirtualSyncSet(PVM pVM, PTMTIMER pTimer, uint64_t u64Expire)
1140{
1141 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetVs), a);
1142 VM_ASSERT_EMT(pVM);
1143 Assert(PDMCritSectIsOwner(&pVM->tm.s.VirtualSyncLock));
1144 int rc = PDMCritSectEnter(&pVM->tm.s.VirtualSyncLock, VINF_SUCCESS);
1145 AssertRCReturn(rc, rc);
1146
1147 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC];
1148 TMTIMERSTATE enmState = pTimer->enmState;
1149 switch (enmState)
1150 {
1151 case TMTIMERSTATE_EXPIRED_DELIVER:
1152 case TMTIMERSTATE_STOPPED:
1153 if (enmState == TMTIMERSTATE_EXPIRED_DELIVER)
1154 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetVsStExpDeliver);
1155 else
1156 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetVsStStopped);
1157
1158 AssertMsg(u64Expire >= pVM->tm.s.u64VirtualSync,
1159 ("%'RU64 < %'RU64 %s\n", u64Expire, pVM->tm.s.u64VirtualSync, R3STRING(pTimer->pszDesc)));
1160 pTimer->u64Expire = u64Expire;
1161 TM_SET_STATE(pTimer, TMTIMERSTATE_ACTIVE);
1162 tmTimerQueueLinkActive(pQueue, pTimer, u64Expire);
1163 rc = VINF_SUCCESS;
1164 break;
1165
1166 case TMTIMERSTATE_ACTIVE:
1167 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetVsStActive);
1168 tmTimerQueueUnlinkActive(pQueue, pTimer);
1169 pTimer->u64Expire = u64Expire;
1170 tmTimerQueueLinkActive(pQueue, pTimer, u64Expire);
1171 rc = VINF_SUCCESS;
1172 break;
1173
1174 case TMTIMERSTATE_PENDING_RESCHEDULE:
1175 case TMTIMERSTATE_PENDING_STOP:
1176 case TMTIMERSTATE_PENDING_SCHEDULE:
1177 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1178 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1179 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1180 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1181 case TMTIMERSTATE_DESTROY:
1182 case TMTIMERSTATE_FREE:
1183 AssertLogRelMsgFailed(("Invalid timer state %s: %s\n", tmTimerState(enmState), R3STRING(pTimer->pszDesc)));
1184 rc = VERR_TM_INVALID_STATE;
1185 break;
1186
1187 default:
1188 AssertMsgFailed(("Unknown timer state %d: %s\n", enmState, R3STRING(pTimer->pszDesc)));
1189 rc = VERR_TM_UNKNOWN_STATE;
1190 break;
1191 }
1192
1193 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetVs), a);
1194 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
1195 return rc;
1196}
1197
1198
1199/**
1200 * Arm a timer with a (new) expire time.
1201 *
1202 * @returns VBox status.
1203 * @param pTimer Timer handle as returned by one of the create functions.
1204 * @param u64Expire New expire time.
1205 */
1206VMMDECL(int) TMTimerSet(PTMTIMER pTimer, uint64_t u64Expire)
1207{
1208 PVM pVM = pTimer->CTX_SUFF(pVM);
1209
1210 /* Treat virtual sync timers specially. */
1211 if (pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC)
1212 return tmTimerVirtualSyncSet(pVM, pTimer, u64Expire);
1213
1214 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1215 TMTIMER_ASSERT_CRITSECT(pTimer);
1216
1217 DBGFTRACE_U64_TAG2(pVM, u64Expire, "TMTimerSet", R3STRING(pTimer->pszDesc));
1218
1219#ifdef VBOX_WITH_STATISTICS
1220 /*
1221 * Gather optimization info.
1222 */
1223 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSet);
1224 TMTIMERSTATE enmOrgState = pTimer->enmState;
1225 switch (enmOrgState)
1226 {
1227 case TMTIMERSTATE_STOPPED: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStStopped); break;
1228 case TMTIMERSTATE_EXPIRED_DELIVER: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStExpDeliver); break;
1229 case TMTIMERSTATE_ACTIVE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStActive); break;
1230 case TMTIMERSTATE_PENDING_STOP: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendStop); break;
1231 case TMTIMERSTATE_PENDING_STOP_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendStopSched); break;
1232 case TMTIMERSTATE_PENDING_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendSched); break;
1233 case TMTIMERSTATE_PENDING_RESCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendResched); break;
1234 default: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStOther); break;
1235 }
1236#endif
1237
1238 /*
1239 * The most common case is setting the timer again during the callback.
1240 * The second most common case is starting a timer at some other time.
1241 */
1242#if 1
1243 TMTIMERSTATE enmState1 = pTimer->enmState;
1244 if ( enmState1 == TMTIMERSTATE_EXPIRED_DELIVER
1245 || ( enmState1 == TMTIMERSTATE_STOPPED
1246 && pTimer->pCritSect))
1247 {
1248 /* Try take the TM lock and check the state again. */
1249 if (RT_SUCCESS_NP(TM_TRY_LOCK_TIMERS(pVM)))
1250 {
1251 if (RT_LIKELY(tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, enmState1)))
1252 {
1253 tmTimerSetOptimizedStart(pVM, pTimer, u64Expire);
1254 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1255 return VINF_SUCCESS;
1256 }
1257 TM_UNLOCK_TIMERS(pVM);
1258 }
1259 }
1260#endif
1261
1262 /*
1263 * Unoptimized code path.
1264 */
1265 int cRetries = 1000;
1266 do
1267 {
1268 /*
1269 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1270 */
1271 TMTIMERSTATE enmState = pTimer->enmState;
1272 Log2(("TMTimerSet: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d u64Expire=%'RU64\n",
1273 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), cRetries, u64Expire));
1274 switch (enmState)
1275 {
1276 case TMTIMERSTATE_EXPIRED_DELIVER:
1277 case TMTIMERSTATE_STOPPED:
1278 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1279 {
1280 Assert(!pTimer->offPrev);
1281 Assert(!pTimer->offNext);
1282 pTimer->u64Expire = u64Expire;
1283 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1284 tmSchedule(pTimer);
1285 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1286 return VINF_SUCCESS;
1287 }
1288 break;
1289
1290 case TMTIMERSTATE_PENDING_SCHEDULE:
1291 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1292 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1293 {
1294 pTimer->u64Expire = u64Expire;
1295 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1296 tmSchedule(pTimer);
1297 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1298 return VINF_SUCCESS;
1299 }
1300 break;
1301
1302
1303 case TMTIMERSTATE_ACTIVE:
1304 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1305 {
1306 pTimer->u64Expire = u64Expire;
1307 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1308 tmSchedule(pTimer);
1309 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1310 return VINF_SUCCESS;
1311 }
1312 break;
1313
1314 case TMTIMERSTATE_PENDING_RESCHEDULE:
1315 case TMTIMERSTATE_PENDING_STOP:
1316 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1317 {
1318 pTimer->u64Expire = u64Expire;
1319 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1320 tmSchedule(pTimer);
1321 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1322 return VINF_SUCCESS;
1323 }
1324 break;
1325
1326
1327 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1328 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1329 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1330#ifdef IN_RING3
1331 if (!RTThreadYield())
1332 RTThreadSleep(1);
1333#else
1334/** @todo call host context and yield after a couple of iterations */
1335#endif
1336 break;
1337
1338 /*
1339 * Invalid states.
1340 */
1341 case TMTIMERSTATE_DESTROY:
1342 case TMTIMERSTATE_FREE:
1343 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1344 return VERR_TM_INVALID_STATE;
1345 default:
1346 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1347 return VERR_TM_UNKNOWN_STATE;
1348 }
1349 } while (cRetries-- > 0);
1350
1351 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1352 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1353 return VERR_TM_TIMER_UNSTABLE_STATE;
1354}
1355
1356
1357/**
1358 * Return the current time for the specified clock, setting pu64Now if not NULL.
1359 *
1360 * @returns Current time.
1361 * @param pVM Pointer to the VM.
1362 * @param enmClock The clock to query.
1363 * @param pu64Now Optional pointer where to store the return time
1364 */
1365DECL_FORCE_INLINE(uint64_t) tmTimerSetRelativeNowWorker(PVM pVM, TMCLOCK enmClock, uint64_t *pu64Now)
1366{
1367 uint64_t u64Now;
1368 switch (enmClock)
1369 {
1370 case TMCLOCK_VIRTUAL_SYNC:
1371 u64Now = TMVirtualSyncGet(pVM);
1372 break;
1373 case TMCLOCK_VIRTUAL:
1374 u64Now = TMVirtualGet(pVM);
1375 break;
1376 case TMCLOCK_REAL:
1377 u64Now = TMRealGet(pVM);
1378 break;
1379 default:
1380 AssertFatalMsgFailed(("%d\n", enmClock));
1381 }
1382
1383 if (pu64Now)
1384 *pu64Now = u64Now;
1385 return u64Now;
1386}
1387
1388
1389/**
1390 * Optimized TMTimerSetRelative code path.
1391 *
1392 * @returns VBox status code.
1393 *
1394 * @param pVM Pointer to the VM.
1395 * @param pTimer The timer handle.
1396 * @param cTicksToNext Clock ticks until the next time expiration.
1397 * @param pu64Now Where to return the current time stamp used.
1398 * Optional.
1399 */
1400static int tmTimerSetRelativeOptimizedStart(PVM pVM, PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1401{
1402 Assert(!pTimer->offPrev);
1403 Assert(!pTimer->offNext);
1404 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE);
1405
1406 /*
1407 * Calculate and set the expiration time.
1408 */
1409 TMCLOCK const enmClock = pTimer->enmClock;
1410 uint64_t const u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1411 pTimer->u64Expire = u64Expire;
1412 Log2(("tmTimerSetRelativeOptimizedStart: %p:{.pszDesc='%s', .u64Expire=%'RU64} cTicksToNext=%'RU64\n", pTimer, R3STRING(pTimer->pszDesc), u64Expire, cTicksToNext));
1413
1414 /*
1415 * Link the timer into the active list.
1416 */
1417 DBGFTRACE_U64_TAG2(pVM, u64Expire, "tmTimerSetRelativeOptimizedStart", R3STRING(pTimer->pszDesc));
1418 tmTimerQueueLinkActive(&pVM->tm.s.CTX_SUFF(paTimerQueues)[enmClock], pTimer, u64Expire);
1419
1420 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeOpt);
1421 TM_UNLOCK_TIMERS(pVM);
1422 return VINF_SUCCESS;
1423}
1424
1425
1426/**
1427 * TMTimerSetRelative for the virtual sync timer queue.
1428 *
1429 * This employs a greatly simplified state machine by always acquiring the
1430 * queue lock and bypassing the scheduling list.
1431 *
1432 * @returns VBox status code
1433 * @param pVM Pointer to the VM.
1434 * @param cTicksToNext Clock ticks until the next time expiration.
1435 * @param pu64Now Where to return the current time stamp used.
1436 * Optional.
1437 */
1438static int tmTimerVirtualSyncSetRelative(PVM pVM, PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1439{
1440 STAM_PROFILE_START(pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelativeVs), a);
1441 VM_ASSERT_EMT(pVM);
1442 Assert(PDMCritSectIsOwner(&pVM->tm.s.VirtualSyncLock));
1443 int rc = PDMCritSectEnter(&pVM->tm.s.VirtualSyncLock, VINF_SUCCESS);
1444 AssertRCReturn(rc, rc);
1445
1446 /* Calculate the expiration tick. */
1447 uint64_t u64Expire = TMVirtualSyncGetNoCheck(pVM);
1448 if (pu64Now)
1449 *pu64Now = u64Expire;
1450 u64Expire += cTicksToNext;
1451
1452 /* Update the timer. */
1453 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC];
1454 TMTIMERSTATE enmState = pTimer->enmState;
1455 switch (enmState)
1456 {
1457 case TMTIMERSTATE_EXPIRED_DELIVER:
1458 case TMTIMERSTATE_STOPPED:
1459 if (enmState == TMTIMERSTATE_EXPIRED_DELIVER)
1460 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeVsStExpDeliver);
1461 else
1462 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeVsStStopped);
1463 pTimer->u64Expire = u64Expire;
1464 TM_SET_STATE(pTimer, TMTIMERSTATE_ACTIVE);
1465 tmTimerQueueLinkActive(pQueue, pTimer, u64Expire);
1466 rc = VINF_SUCCESS;
1467 break;
1468
1469 case TMTIMERSTATE_ACTIVE:
1470 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeVsStActive);
1471 tmTimerQueueUnlinkActive(pQueue, pTimer);
1472 pTimer->u64Expire = u64Expire;
1473 tmTimerQueueLinkActive(pQueue, pTimer, u64Expire);
1474 rc = VINF_SUCCESS;
1475 break;
1476
1477 case TMTIMERSTATE_PENDING_RESCHEDULE:
1478 case TMTIMERSTATE_PENDING_STOP:
1479 case TMTIMERSTATE_PENDING_SCHEDULE:
1480 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1481 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1482 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1483 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1484 case TMTIMERSTATE_DESTROY:
1485 case TMTIMERSTATE_FREE:
1486 AssertLogRelMsgFailed(("Invalid timer state %s: %s\n", tmTimerState(enmState), R3STRING(pTimer->pszDesc)));
1487 rc = VERR_TM_INVALID_STATE;
1488 break;
1489
1490 default:
1491 AssertMsgFailed(("Unknown timer state %d: %s\n", enmState, R3STRING(pTimer->pszDesc)));
1492 rc = VERR_TM_UNKNOWN_STATE;
1493 break;
1494 }
1495
1496 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelativeVs), a);
1497 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
1498 return rc;
1499}
1500
1501
1502/**
1503 * Arm a timer with a expire time relative to the current time.
1504 *
1505 * @returns VBox status.
1506 * @param pTimer Timer handle as returned by one of the create functions.
1507 * @param cTicksToNext Clock ticks until the next time expiration.
1508 * @param pu64Now Where to return the current time stamp used.
1509 * Optional.
1510 */
1511VMMDECL(int) TMTimerSetRelative(PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1512{
1513 PVM pVM = pTimer->CTX_SUFF(pVM);
1514
1515 /* Treat virtual sync timers specially. */
1516 if (pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC)
1517 return tmTimerVirtualSyncSetRelative(pVM, pTimer, cTicksToNext, pu64Now);
1518
1519 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1520 TMTIMER_ASSERT_CRITSECT(pTimer);
1521
1522 DBGFTRACE_U64_TAG2(pVM, cTicksToNext, "TMTimerSetRelative", R3STRING(pTimer->pszDesc));
1523
1524#ifdef VBOX_WITH_STATISTICS
1525 /*
1526 * Gather optimization info.
1527 */
1528 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelative);
1529 TMTIMERSTATE enmOrgState = pTimer->enmState;
1530 switch (enmOrgState)
1531 {
1532 case TMTIMERSTATE_STOPPED: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStStopped); break;
1533 case TMTIMERSTATE_EXPIRED_DELIVER: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStExpDeliver); break;
1534 case TMTIMERSTATE_ACTIVE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStActive); break;
1535 case TMTIMERSTATE_PENDING_STOP: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendStop); break;
1536 case TMTIMERSTATE_PENDING_STOP_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendStopSched); break;
1537 case TMTIMERSTATE_PENDING_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendSched); break;
1538 case TMTIMERSTATE_PENDING_RESCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendResched); break;
1539 default: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStOther); break;
1540 }
1541#endif
1542
1543 /*
1544 * Try to take the TM lock and optimize the common cases.
1545 *
1546 * With the TM lock we can safely make optimizations like immediate
1547 * scheduling and we can also be 100% sure that we're not racing the
1548 * running of the timer queues. As an additional restraint we require the
1549 * timer to have a critical section associated with to be 100% there aren't
1550 * concurrent operations on the timer. (This latter isn't necessary any
1551 * longer as this isn't supported for any timers, critsect or not.)
1552 *
1553 * Note! Lock ordering doesn't apply when we only tries to
1554 * get the innermost locks.
1555 */
1556 bool fOwnTMLock = RT_SUCCESS_NP(TM_TRY_LOCK_TIMERS(pVM));
1557#if 1
1558 if ( fOwnTMLock
1559 && pTimer->pCritSect)
1560 {
1561 TMTIMERSTATE enmState = pTimer->enmState;
1562 if (RT_LIKELY( ( enmState == TMTIMERSTATE_EXPIRED_DELIVER
1563 || enmState == TMTIMERSTATE_STOPPED)
1564 && tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, enmState)))
1565 {
1566 tmTimerSetRelativeOptimizedStart(pVM, pTimer, cTicksToNext, pu64Now);
1567 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1568 return VINF_SUCCESS;
1569 }
1570
1571 /* Optimize other states when it becomes necessary. */
1572 }
1573#endif
1574
1575 /*
1576 * Unoptimized path.
1577 */
1578 int rc;
1579 TMCLOCK const enmClock = pTimer->enmClock;
1580 for (int cRetries = 1000; ; cRetries--)
1581 {
1582 /*
1583 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1584 */
1585 TMTIMERSTATE enmState = pTimer->enmState;
1586 switch (enmState)
1587 {
1588 case TMTIMERSTATE_STOPPED:
1589 if (enmClock == TMCLOCK_VIRTUAL_SYNC)
1590 {
1591 /** @todo To fix assertion in tmR3TimerQueueRunVirtualSync:
1592 * Figure a safe way of activating this timer while the queue is
1593 * being run.
1594 * (99.9% sure this that the assertion is caused by DevAPIC.cpp
1595 * re-starting the timer in response to a initial_count write.) */
1596 }
1597 /* fall thru */
1598 case TMTIMERSTATE_EXPIRED_DELIVER:
1599 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1600 {
1601 Assert(!pTimer->offPrev);
1602 Assert(!pTimer->offNext);
1603 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1604 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [EXP/STOP]\n",
1605 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1606 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1607 tmSchedule(pTimer);
1608 rc = VINF_SUCCESS;
1609 break;
1610 }
1611 rc = VERR_TRY_AGAIN;
1612 break;
1613
1614 case TMTIMERSTATE_PENDING_SCHEDULE:
1615 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1616 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1617 {
1618 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1619 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [PEND_SCHED]\n",
1620 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1621 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1622 tmSchedule(pTimer);
1623 rc = VINF_SUCCESS;
1624 break;
1625 }
1626 rc = VERR_TRY_AGAIN;
1627 break;
1628
1629
1630 case TMTIMERSTATE_ACTIVE:
1631 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1632 {
1633 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1634 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [ACTIVE]\n",
1635 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1636 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1637 tmSchedule(pTimer);
1638 rc = VINF_SUCCESS;
1639 break;
1640 }
1641 rc = VERR_TRY_AGAIN;
1642 break;
1643
1644 case TMTIMERSTATE_PENDING_RESCHEDULE:
1645 case TMTIMERSTATE_PENDING_STOP:
1646 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1647 {
1648 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1649 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [PEND_RESCH/STOP]\n",
1650 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1651 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1652 tmSchedule(pTimer);
1653 rc = VINF_SUCCESS;
1654 break;
1655 }
1656 rc = VERR_TRY_AGAIN;
1657 break;
1658
1659
1660 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1661 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1662 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1663#ifdef IN_RING3
1664 if (!RTThreadYield())
1665 RTThreadSleep(1);
1666#else
1667/** @todo call host context and yield after a couple of iterations */
1668#endif
1669 rc = VERR_TRY_AGAIN;
1670 break;
1671
1672 /*
1673 * Invalid states.
1674 */
1675 case TMTIMERSTATE_DESTROY:
1676 case TMTIMERSTATE_FREE:
1677 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1678 rc = VERR_TM_INVALID_STATE;
1679 break;
1680
1681 default:
1682 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1683 rc = VERR_TM_UNKNOWN_STATE;
1684 break;
1685 }
1686
1687 /* switch + loop is tedious to break out of. */
1688 if (rc == VINF_SUCCESS)
1689 break;
1690
1691 if (rc != VERR_TRY_AGAIN)
1692 {
1693 tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1694 break;
1695 }
1696 if (cRetries <= 0)
1697 {
1698 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1699 rc = VERR_TM_TIMER_UNSTABLE_STATE;
1700 tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1701 break;
1702 }
1703
1704 /*
1705 * Retry to gain locks.
1706 */
1707 if (!fOwnTMLock)
1708 fOwnTMLock = RT_SUCCESS_NP(TM_TRY_LOCK_TIMERS(pVM));
1709
1710 } /* for (;;) */
1711
1712 /*
1713 * Clean up and return.
1714 */
1715 if (fOwnTMLock)
1716 TM_UNLOCK_TIMERS(pVM);
1717
1718 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1719 return rc;
1720}
1721
1722
1723/**
1724 * Drops a hint about the frequency of the timer.
1725 *
1726 * This is used by TM and the VMM to calculate how often guest execution needs
1727 * to be interrupted. The hint is automatically cleared by TMTimerStop.
1728 *
1729 * @returns VBox status code.
1730 * @param pTimer Timer handle as returned by one of the create
1731 * functions.
1732 * @param uHzHint The frequency hint. Pass 0 to clear the hint.
1733 *
1734 * @remarks We're using an integer hertz value here since anything above 1 HZ
1735 * is not going to be any trouble satisfying scheduling wise. The
1736 * range where it makes sense is >= 100 HZ.
1737 */
1738VMMDECL(int) TMTimerSetFrequencyHint(PTMTIMER pTimer, uint32_t uHzHint)
1739{
1740 TMTIMER_ASSERT_CRITSECT(pTimer);
1741
1742 uint32_t const uHzOldHint = pTimer->uHzHint;
1743 pTimer->uHzHint = uHzHint;
1744
1745 PVM pVM = pTimer->CTX_SUFF(pVM);
1746 uint32_t const uMaxHzHint = pVM->tm.s.uMaxHzHint;
1747 if ( uHzHint > uMaxHzHint
1748 || uHzOldHint >= uMaxHzHint)
1749 ASMAtomicWriteBool(&pVM->tm.s.fHzHintNeedsUpdating, true);
1750
1751 return VINF_SUCCESS;
1752}
1753
1754
1755/**
1756 * TMTimerStop for the virtual sync timer queue.
1757 *
1758 * This employs a greatly simplified state machine by always acquiring the
1759 * queue lock and bypassing the scheduling list.
1760 *
1761 * @returns VBox status code
1762 * @param pVM Pointer to the VM.
1763 * @param pTimer The timer handle.
1764 */
1765static int tmTimerVirtualSyncStop(PVM pVM, PTMTIMER pTimer)
1766{
1767 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerStopVs), a);
1768 VM_ASSERT_EMT(pVM);
1769 Assert(PDMCritSectIsOwner(&pVM->tm.s.VirtualSyncLock));
1770 int rc = PDMCritSectEnter(&pVM->tm.s.VirtualSyncLock, VINF_SUCCESS);
1771 AssertRCReturn(rc, rc);
1772
1773 /* Reset the HZ hint. */
1774 if (pTimer->uHzHint)
1775 {
1776 if (pTimer->uHzHint >= pVM->tm.s.uMaxHzHint)
1777 ASMAtomicWriteBool(&pVM->tm.s.fHzHintNeedsUpdating, true);
1778 pTimer->uHzHint = 0;
1779 }
1780
1781 /* Update the timer state. */
1782 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC];
1783 TMTIMERSTATE enmState = pTimer->enmState;
1784 switch (enmState)
1785 {
1786 case TMTIMERSTATE_ACTIVE:
1787 tmTimerQueueUnlinkActive(pQueue, pTimer);
1788 TM_SET_STATE(pTimer, TMTIMERSTATE_STOPPED);
1789 rc = VINF_SUCCESS;
1790 break;
1791
1792 case TMTIMERSTATE_EXPIRED_DELIVER:
1793 TM_SET_STATE(pTimer, TMTIMERSTATE_STOPPED);
1794 rc = VINF_SUCCESS;
1795 break;
1796
1797 case TMTIMERSTATE_STOPPED:
1798 rc = VINF_SUCCESS;
1799 break;
1800
1801 case TMTIMERSTATE_PENDING_RESCHEDULE:
1802 case TMTIMERSTATE_PENDING_STOP:
1803 case TMTIMERSTATE_PENDING_SCHEDULE:
1804 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1805 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1806 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1807 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1808 case TMTIMERSTATE_DESTROY:
1809 case TMTIMERSTATE_FREE:
1810 AssertLogRelMsgFailed(("Invalid timer state %s: %s\n", tmTimerState(enmState), R3STRING(pTimer->pszDesc)));
1811 rc = VERR_TM_INVALID_STATE;
1812 break;
1813
1814 default:
1815 AssertMsgFailed(("Unknown timer state %d: %s\n", enmState, R3STRING(pTimer->pszDesc)));
1816 rc = VERR_TM_UNKNOWN_STATE;
1817 break;
1818 }
1819
1820 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStopVs), a);
1821 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
1822 return rc;
1823}
1824
1825
1826/**
1827 * Stop the timer.
1828 * Use TMR3TimerArm() to "un-stop" the timer.
1829 *
1830 * @returns VBox status.
1831 * @param pTimer Timer handle as returned by one of the create functions.
1832 */
1833VMMDECL(int) TMTimerStop(PTMTIMER pTimer)
1834{
1835 PVM pVM = pTimer->CTX_SUFF(pVM);
1836
1837 /* Treat virtual sync timers specially. */
1838 if (pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC)
1839 return tmTimerVirtualSyncStop(pVM, pTimer);
1840
1841 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1842 TMTIMER_ASSERT_CRITSECT(pTimer);
1843
1844 /*
1845 * Reset the HZ hint.
1846 */
1847 if (pTimer->uHzHint)
1848 {
1849 if (pTimer->uHzHint >= pVM->tm.s.uMaxHzHint)
1850 ASMAtomicWriteBool(&pVM->tm.s.fHzHintNeedsUpdating, true);
1851 pTimer->uHzHint = 0;
1852 }
1853
1854 /** @todo see if this function needs optimizing. */
1855 int cRetries = 1000;
1856 do
1857 {
1858 /*
1859 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1860 */
1861 TMTIMERSTATE enmState = pTimer->enmState;
1862 Log2(("TMTimerStop: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d\n",
1863 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), cRetries));
1864 switch (enmState)
1865 {
1866 case TMTIMERSTATE_EXPIRED_DELIVER:
1867 //AssertMsgFailed(("You don't stop an expired timer dude!\n"));
1868 return VERR_INVALID_PARAMETER;
1869
1870 case TMTIMERSTATE_STOPPED:
1871 case TMTIMERSTATE_PENDING_STOP:
1872 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1873 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1874 return VINF_SUCCESS;
1875
1876 case TMTIMERSTATE_PENDING_SCHEDULE:
1877 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, enmState))
1878 {
1879 tmSchedule(pTimer);
1880 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1881 return VINF_SUCCESS;
1882 }
1883
1884 case TMTIMERSTATE_PENDING_RESCHEDULE:
1885 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
1886 {
1887 tmSchedule(pTimer);
1888 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1889 return VINF_SUCCESS;
1890 }
1891 break;
1892
1893 case TMTIMERSTATE_ACTIVE:
1894 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
1895 {
1896 tmSchedule(pTimer);
1897 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1898 return VINF_SUCCESS;
1899 }
1900 break;
1901
1902 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1903 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1904 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1905#ifdef IN_RING3
1906 if (!RTThreadYield())
1907 RTThreadSleep(1);
1908#else
1909/**@todo call host and yield cpu after a while. */
1910#endif
1911 break;
1912
1913 /*
1914 * Invalid states.
1915 */
1916 case TMTIMERSTATE_DESTROY:
1917 case TMTIMERSTATE_FREE:
1918 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1919 return VERR_TM_INVALID_STATE;
1920 default:
1921 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1922 return VERR_TM_UNKNOWN_STATE;
1923 }
1924 } while (cRetries-- > 0);
1925
1926 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1927 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1928 return VERR_TM_TIMER_UNSTABLE_STATE;
1929}
1930
1931
1932/**
1933 * Get the current clock time.
1934 * Handy for calculating the new expire time.
1935 *
1936 * @returns Current clock time.
1937 * @param pTimer Timer handle as returned by one of the create functions.
1938 */
1939VMMDECL(uint64_t) TMTimerGet(PTMTIMER pTimer)
1940{
1941 PVM pVM = pTimer->CTX_SUFF(pVM);
1942
1943 uint64_t u64;
1944 switch (pTimer->enmClock)
1945 {
1946 case TMCLOCK_VIRTUAL:
1947 u64 = TMVirtualGet(pVM);
1948 break;
1949 case TMCLOCK_VIRTUAL_SYNC:
1950 u64 = TMVirtualSyncGet(pVM);
1951 break;
1952 case TMCLOCK_REAL:
1953 u64 = TMRealGet(pVM);
1954 break;
1955 default:
1956 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1957 return UINT64_MAX;
1958 }
1959 //Log2(("TMTimerGet: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1960 // u64, pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1961 return u64;
1962}
1963
1964
1965/**
1966 * Get the frequency of the timer clock.
1967 *
1968 * @returns Clock frequency (as Hz of course).
1969 * @param pTimer Timer handle as returned by one of the create functions.
1970 */
1971VMMDECL(uint64_t) TMTimerGetFreq(PTMTIMER pTimer)
1972{
1973 switch (pTimer->enmClock)
1974 {
1975 case TMCLOCK_VIRTUAL:
1976 case TMCLOCK_VIRTUAL_SYNC:
1977 return TMCLOCK_FREQ_VIRTUAL;
1978
1979 case TMCLOCK_REAL:
1980 return TMCLOCK_FREQ_REAL;
1981
1982 default:
1983 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1984 return 0;
1985 }
1986}
1987
1988
1989/**
1990 * Get the expire time of the timer.
1991 * Only valid for active timers.
1992 *
1993 * @returns Expire time of the timer.
1994 * @param pTimer Timer handle as returned by one of the create functions.
1995 */
1996VMMDECL(uint64_t) TMTimerGetExpire(PTMTIMER pTimer)
1997{
1998 TMTIMER_ASSERT_CRITSECT(pTimer);
1999 int cRetries = 1000;
2000 do
2001 {
2002 TMTIMERSTATE enmState = pTimer->enmState;
2003 switch (enmState)
2004 {
2005 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2006 case TMTIMERSTATE_EXPIRED_DELIVER:
2007 case TMTIMERSTATE_STOPPED:
2008 case TMTIMERSTATE_PENDING_STOP:
2009 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2010 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2011 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2012 return ~(uint64_t)0;
2013
2014 case TMTIMERSTATE_ACTIVE:
2015 case TMTIMERSTATE_PENDING_RESCHEDULE:
2016 case TMTIMERSTATE_PENDING_SCHEDULE:
2017 Log2(("TMTimerGetExpire: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2018 pTimer->u64Expire, pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2019 return pTimer->u64Expire;
2020
2021 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2022 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2023#ifdef IN_RING3
2024 if (!RTThreadYield())
2025 RTThreadSleep(1);
2026#endif
2027 break;
2028
2029 /*
2030 * Invalid states.
2031 */
2032 case TMTIMERSTATE_DESTROY:
2033 case TMTIMERSTATE_FREE:
2034 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
2035 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2036 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2037 return ~(uint64_t)0;
2038 default:
2039 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
2040 return ~(uint64_t)0;
2041 }
2042 } while (cRetries-- > 0);
2043
2044 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
2045 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2046 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2047 return ~(uint64_t)0;
2048}
2049
2050
2051/**
2052 * Checks if a timer is active or not.
2053 *
2054 * @returns True if active.
2055 * @returns False if not active.
2056 * @param pTimer Timer handle as returned by one of the create functions.
2057 */
2058VMMDECL(bool) TMTimerIsActive(PTMTIMER pTimer)
2059{
2060 TMTIMERSTATE enmState = pTimer->enmState;
2061 switch (enmState)
2062 {
2063 case TMTIMERSTATE_STOPPED:
2064 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2065 case TMTIMERSTATE_EXPIRED_DELIVER:
2066 case TMTIMERSTATE_PENDING_STOP:
2067 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2068 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2069 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2070 return false;
2071
2072 case TMTIMERSTATE_ACTIVE:
2073 case TMTIMERSTATE_PENDING_RESCHEDULE:
2074 case TMTIMERSTATE_PENDING_SCHEDULE:
2075 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2076 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2077 Log2(("TMTimerIsActive: returns true (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2078 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2079 return true;
2080
2081 /*
2082 * Invalid states.
2083 */
2084 case TMTIMERSTATE_DESTROY:
2085 case TMTIMERSTATE_FREE:
2086 AssertMsgFailed(("Invalid timer state %s (%s)\n", tmTimerState(enmState), R3STRING(pTimer->pszDesc)));
2087 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2088 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2089 return false;
2090 default:
2091 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
2092 return false;
2093 }
2094}
2095
2096
2097/* -=-=-=-=-=-=- Convenience APIs -=-=-=-=-=-=- */
2098
2099
2100/**
2101 * Arm a timer with a (new) expire time relative to current time.
2102 *
2103 * @returns VBox status.
2104 * @param pTimer Timer handle as returned by one of the create functions.
2105 * @param cMilliesToNext Number of milliseconds to the next tick.
2106 */
2107VMMDECL(int) TMTimerSetMillies(PTMTIMER pTimer, uint32_t cMilliesToNext)
2108{
2109 switch (pTimer->enmClock)
2110 {
2111 case TMCLOCK_VIRTUAL:
2112 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2113 return TMTimerSetRelative(pTimer, cMilliesToNext * UINT64_C(1000000), NULL);
2114
2115 case TMCLOCK_VIRTUAL_SYNC:
2116 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2117 return TMTimerSetRelative(pTimer, cMilliesToNext * UINT64_C(1000000), NULL);
2118
2119 case TMCLOCK_REAL:
2120 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2121 return TMTimerSetRelative(pTimer, cMilliesToNext, NULL);
2122
2123 default:
2124 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2125 return VERR_TM_TIMER_BAD_CLOCK;
2126 }
2127}
2128
2129
2130/**
2131 * Arm a timer with a (new) expire time relative to current time.
2132 *
2133 * @returns VBox status.
2134 * @param pTimer Timer handle as returned by one of the create functions.
2135 * @param cMicrosToNext Number of microseconds to the next tick.
2136 */
2137VMMDECL(int) TMTimerSetMicro(PTMTIMER pTimer, uint64_t cMicrosToNext)
2138{
2139 switch (pTimer->enmClock)
2140 {
2141 case TMCLOCK_VIRTUAL:
2142 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2143 return TMTimerSetRelative(pTimer, cMicrosToNext * 1000, NULL);
2144
2145 case TMCLOCK_VIRTUAL_SYNC:
2146 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2147 return TMTimerSetRelative(pTimer, cMicrosToNext * 1000, NULL);
2148
2149 case TMCLOCK_REAL:
2150 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2151 return TMTimerSetRelative(pTimer, cMicrosToNext / 1000, NULL);
2152
2153 default:
2154 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2155 return VERR_TM_TIMER_BAD_CLOCK;
2156 }
2157}
2158
2159
2160/**
2161 * Arm a timer with a (new) expire time relative to current time.
2162 *
2163 * @returns VBox status.
2164 * @param pTimer Timer handle as returned by one of the create functions.
2165 * @param cNanosToNext Number of nanoseconds to the next tick.
2166 */
2167VMMDECL(int) TMTimerSetNano(PTMTIMER pTimer, uint64_t cNanosToNext)
2168{
2169 switch (pTimer->enmClock)
2170 {
2171 case TMCLOCK_VIRTUAL:
2172 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2173 return TMTimerSetRelative(pTimer, cNanosToNext, NULL);
2174
2175 case TMCLOCK_VIRTUAL_SYNC:
2176 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2177 return TMTimerSetRelative(pTimer, cNanosToNext, NULL);
2178
2179 case TMCLOCK_REAL:
2180 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2181 return TMTimerSetRelative(pTimer, cNanosToNext / 1000000, NULL);
2182
2183 default:
2184 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2185 return VERR_TM_TIMER_BAD_CLOCK;
2186 }
2187}
2188
2189
2190/**
2191 * Get the current clock time as nanoseconds.
2192 *
2193 * @returns The timer clock as nanoseconds.
2194 * @param pTimer Timer handle as returned by one of the create functions.
2195 */
2196VMMDECL(uint64_t) TMTimerGetNano(PTMTIMER pTimer)
2197{
2198 return TMTimerToNano(pTimer, TMTimerGet(pTimer));
2199}
2200
2201
2202/**
2203 * Get the current clock time as microseconds.
2204 *
2205 * @returns The timer clock as microseconds.
2206 * @param pTimer Timer handle as returned by one of the create functions.
2207 */
2208VMMDECL(uint64_t) TMTimerGetMicro(PTMTIMER pTimer)
2209{
2210 return TMTimerToMicro(pTimer, TMTimerGet(pTimer));
2211}
2212
2213
2214/**
2215 * Get the current clock time as milliseconds.
2216 *
2217 * @returns The timer clock as milliseconds.
2218 * @param pTimer Timer handle as returned by one of the create functions.
2219 */
2220VMMDECL(uint64_t) TMTimerGetMilli(PTMTIMER pTimer)
2221{
2222 return TMTimerToMilli(pTimer, TMTimerGet(pTimer));
2223}
2224
2225
2226/**
2227 * Converts the specified timer clock time to nanoseconds.
2228 *
2229 * @returns nanoseconds.
2230 * @param pTimer Timer handle as returned by one of the create functions.
2231 * @param u64Ticks The clock ticks.
2232 * @remark There could be rounding errors here. We just do a simple integer divide
2233 * without any adjustments.
2234 */
2235VMMDECL(uint64_t) TMTimerToNano(PTMTIMER pTimer, uint64_t u64Ticks)
2236{
2237 switch (pTimer->enmClock)
2238 {
2239 case TMCLOCK_VIRTUAL:
2240 case TMCLOCK_VIRTUAL_SYNC:
2241 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2242 return u64Ticks;
2243
2244 case TMCLOCK_REAL:
2245 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2246 return u64Ticks * 1000000;
2247
2248 default:
2249 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2250 return 0;
2251 }
2252}
2253
2254
2255/**
2256 * Converts the specified timer clock time to microseconds.
2257 *
2258 * @returns microseconds.
2259 * @param pTimer Timer handle as returned by one of the create functions.
2260 * @param u64Ticks The clock ticks.
2261 * @remark There could be rounding errors here. We just do a simple integer divide
2262 * without any adjustments.
2263 */
2264VMMDECL(uint64_t) TMTimerToMicro(PTMTIMER pTimer, uint64_t u64Ticks)
2265{
2266 switch (pTimer->enmClock)
2267 {
2268 case TMCLOCK_VIRTUAL:
2269 case TMCLOCK_VIRTUAL_SYNC:
2270 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2271 return u64Ticks / 1000;
2272
2273 case TMCLOCK_REAL:
2274 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2275 return u64Ticks * 1000;
2276
2277 default:
2278 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2279 return 0;
2280 }
2281}
2282
2283
2284/**
2285 * Converts the specified timer clock time to milliseconds.
2286 *
2287 * @returns milliseconds.
2288 * @param pTimer Timer handle as returned by one of the create functions.
2289 * @param u64Ticks The clock ticks.
2290 * @remark There could be rounding errors here. We just do a simple integer divide
2291 * without any adjustments.
2292 */
2293VMMDECL(uint64_t) TMTimerToMilli(PTMTIMER pTimer, uint64_t u64Ticks)
2294{
2295 switch (pTimer->enmClock)
2296 {
2297 case TMCLOCK_VIRTUAL:
2298 case TMCLOCK_VIRTUAL_SYNC:
2299 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2300 return u64Ticks / 1000000;
2301
2302 case TMCLOCK_REAL:
2303 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2304 return u64Ticks;
2305
2306 default:
2307 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2308 return 0;
2309 }
2310}
2311
2312
2313/**
2314 * Converts the specified nanosecond timestamp to timer clock ticks.
2315 *
2316 * @returns timer clock ticks.
2317 * @param pTimer Timer handle as returned by one of the create functions.
2318 * @param cNanoSecs The nanosecond value ticks to convert.
2319 * @remark There could be rounding and overflow errors here.
2320 */
2321VMMDECL(uint64_t) TMTimerFromNano(PTMTIMER pTimer, uint64_t cNanoSecs)
2322{
2323 switch (pTimer->enmClock)
2324 {
2325 case TMCLOCK_VIRTUAL:
2326 case TMCLOCK_VIRTUAL_SYNC:
2327 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2328 return cNanoSecs;
2329
2330 case TMCLOCK_REAL:
2331 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2332 return cNanoSecs / 1000000;
2333
2334 default:
2335 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2336 return 0;
2337 }
2338}
2339
2340
2341/**
2342 * Converts the specified microsecond timestamp to timer clock ticks.
2343 *
2344 * @returns timer clock ticks.
2345 * @param pTimer Timer handle as returned by one of the create functions.
2346 * @param cMicroSecs The microsecond value ticks to convert.
2347 * @remark There could be rounding and overflow errors here.
2348 */
2349VMMDECL(uint64_t) TMTimerFromMicro(PTMTIMER pTimer, uint64_t cMicroSecs)
2350{
2351 switch (pTimer->enmClock)
2352 {
2353 case TMCLOCK_VIRTUAL:
2354 case TMCLOCK_VIRTUAL_SYNC:
2355 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2356 return cMicroSecs * 1000;
2357
2358 case TMCLOCK_REAL:
2359 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2360 return cMicroSecs / 1000;
2361
2362 default:
2363 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2364 return 0;
2365 }
2366}
2367
2368
2369/**
2370 * Converts the specified millisecond timestamp to timer clock ticks.
2371 *
2372 * @returns timer clock ticks.
2373 * @param pTimer Timer handle as returned by one of the create functions.
2374 * @param cMilliSecs The millisecond value ticks to convert.
2375 * @remark There could be rounding and overflow errors here.
2376 */
2377VMMDECL(uint64_t) TMTimerFromMilli(PTMTIMER pTimer, uint64_t cMilliSecs)
2378{
2379 switch (pTimer->enmClock)
2380 {
2381 case TMCLOCK_VIRTUAL:
2382 case TMCLOCK_VIRTUAL_SYNC:
2383 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2384 return cMilliSecs * 1000000;
2385
2386 case TMCLOCK_REAL:
2387 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2388 return cMilliSecs;
2389
2390 default:
2391 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2392 return 0;
2393 }
2394}
2395
2396
2397/**
2398 * Convert state to string.
2399 *
2400 * @returns Readonly status name.
2401 * @param enmState State.
2402 */
2403const char *tmTimerState(TMTIMERSTATE enmState)
2404{
2405 switch (enmState)
2406 {
2407#define CASE(num, state) \
2408 case TMTIMERSTATE_##state: \
2409 AssertCompile(TMTIMERSTATE_##state == (num)); \
2410 return #num "-" #state
2411 CASE( 1,STOPPED);
2412 CASE( 2,ACTIVE);
2413 CASE( 3,EXPIRED_GET_UNLINK);
2414 CASE( 4,EXPIRED_DELIVER);
2415 CASE( 5,PENDING_STOP);
2416 CASE( 6,PENDING_STOP_SCHEDULE);
2417 CASE( 7,PENDING_SCHEDULE_SET_EXPIRE);
2418 CASE( 8,PENDING_SCHEDULE);
2419 CASE( 9,PENDING_RESCHEDULE_SET_EXPIRE);
2420 CASE(10,PENDING_RESCHEDULE);
2421 CASE(11,DESTROY);
2422 CASE(12,FREE);
2423 default:
2424 AssertMsgFailed(("Invalid state enmState=%d\n", enmState));
2425 return "Invalid state!";
2426#undef CASE
2427 }
2428}
2429
2430
2431/**
2432 * Gets the highest frequency hint for all the important timers.
2433 *
2434 * @returns The highest frequency. 0 if no timers care.
2435 * @param pVM Pointer to the VM.
2436 */
2437static uint32_t tmGetFrequencyHint(PVM pVM)
2438{
2439 /*
2440 * Query the value, recalculate it if necessary.
2441 *
2442 * The "right" highest frequency value isn't so important that we'll block
2443 * waiting on the timer semaphore.
2444 */
2445 uint32_t uMaxHzHint = ASMAtomicUoReadU32(&pVM->tm.s.uMaxHzHint);
2446 if (RT_UNLIKELY(ASMAtomicReadBool(&pVM->tm.s.fHzHintNeedsUpdating)))
2447 {
2448 if (RT_SUCCESS(TM_TRY_LOCK_TIMERS(pVM)))
2449 {
2450 ASMAtomicWriteBool(&pVM->tm.s.fHzHintNeedsUpdating, false);
2451
2452 /*
2453 * Loop over the timers associated with each clock.
2454 */
2455 uMaxHzHint = 0;
2456 for (int i = 0; i < TMCLOCK_MAX; i++)
2457 {
2458 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[i];
2459 for (PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue); pCur; pCur = TMTIMER_GET_NEXT(pCur))
2460 {
2461 uint32_t uHzHint = ASMAtomicUoReadU32(&pCur->uHzHint);
2462 if (uHzHint > uMaxHzHint)
2463 {
2464 switch (pCur->enmState)
2465 {
2466 case TMTIMERSTATE_ACTIVE:
2467 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2468 case TMTIMERSTATE_EXPIRED_DELIVER:
2469 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2470 case TMTIMERSTATE_PENDING_SCHEDULE:
2471 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2472 case TMTIMERSTATE_PENDING_RESCHEDULE:
2473 uMaxHzHint = uHzHint;
2474 break;
2475
2476 case TMTIMERSTATE_STOPPED:
2477 case TMTIMERSTATE_PENDING_STOP:
2478 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2479 case TMTIMERSTATE_DESTROY:
2480 case TMTIMERSTATE_FREE:
2481 break;
2482 /* no default, want gcc warnings when adding more states. */
2483 }
2484 }
2485 }
2486 }
2487 ASMAtomicWriteU32(&pVM->tm.s.uMaxHzHint, uMaxHzHint);
2488 Log(("tmGetFrequencyHint: New value %u Hz\n", uMaxHzHint));
2489 TM_UNLOCK_TIMERS(pVM);
2490 }
2491 }
2492 return uMaxHzHint;
2493}
2494
2495
2496/**
2497 * Calculates a host timer frequency that would be suitable for the current
2498 * timer load.
2499 *
2500 * This will take the highest timer frequency, adjust for catch-up and warp
2501 * driver, and finally add a little fudge factor. The caller (VMM) will use
2502 * the result to adjust the per-cpu preemption timer.
2503 *
2504 * @returns The highest frequency. 0 if no important timers around.
2505 * @param pVM Pointer to the VM.
2506 * @param pVCpu The current CPU.
2507 */
2508VMM_INT_DECL(uint32_t) TMCalcHostTimerFrequency(PVM pVM, PVMCPU pVCpu)
2509{
2510 uint32_t uHz = tmGetFrequencyHint(pVM);
2511
2512 /* Catch up, we have to be more aggressive than the % indicates at the
2513 beginning of the effort. */
2514 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
2515 {
2516 uint32_t u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
2517 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
2518 {
2519 if (u32Pct <= 100)
2520 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp100 / 100;
2521 else if (u32Pct <= 200)
2522 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp200 / 100;
2523 else if (u32Pct <= 400)
2524 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp400 / 100;
2525 uHz *= u32Pct + 100;
2526 uHz /= 100;
2527 }
2528 }
2529
2530 /* Warp drive. */
2531 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualWarpDrive))
2532 {
2533 uint32_t u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualWarpDrivePercentage);
2534 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualWarpDrive))
2535 {
2536 uHz *= u32Pct;
2537 uHz /= 100;
2538 }
2539 }
2540
2541 /* Fudge factor. */
2542 if (pVCpu->idCpu == pVM->tm.s.idTimerCpu)
2543 uHz *= pVM->tm.s.cPctHostHzFudgeFactorTimerCpu;
2544 else
2545 uHz *= pVM->tm.s.cPctHostHzFudgeFactorOtherCpu;
2546 uHz /= 100;
2547
2548 /* Make sure it isn't too high. */
2549 if (uHz > pVM->tm.s.cHostHzMax)
2550 uHz = pVM->tm.s.cHostHzMax;
2551
2552 return uHz;
2553}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette