VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/TMAll.cpp@ 87765

Last change on this file since 87765 was 87764, checked in by vboxsync, 4 years ago

VMM/TM: Experimenting with changing the signature of TMTimerFromMilli. Work in progress. bugref:9943

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 96.6 KB
Line 
1/* $Id: TMAll.cpp 87764 2021-02-15 23:49:16Z vboxsync $ */
2/** @file
3 * TM - Timeout Manager, all contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_TM
23#ifdef DEBUG_bird
24# define DBGFTRACE_DISABLED /* annoying */
25#endif
26#include <VBox/vmm/tm.h>
27#include <VBox/vmm/mm.h>
28#include <VBox/vmm/dbgftrace.h>
29#ifdef IN_RING3
30#endif
31#include <VBox/vmm/pdmdev.h> /* (for TMTIMER_GET_CRITSECT implementation) */
32#include "TMInternal.h"
33#include <VBox/vmm/vmcc.h>
34
35#include <VBox/param.h>
36#include <VBox/err.h>
37#include <VBox/log.h>
38#include <VBox/sup.h>
39#include <iprt/time.h>
40#include <iprt/assert.h>
41#include <iprt/asm.h>
42#include <iprt/asm-math.h>
43#ifdef IN_RING3
44# include <iprt/thread.h>
45#endif
46
47#include "TMInline.h"
48
49
50/*********************************************************************************************************************************
51* Defined Constants And Macros *
52*********************************************************************************************************************************/
53#ifdef VBOX_STRICT
54/** @def TMTIMER_GET_CRITSECT
55 * Helper for safely resolving the critical section for a timer belonging to a
56 * device instance.
57 * @todo needs reworking later as it uses PDMDEVINSR0::pDevInsR0RemoveMe. */
58# ifdef IN_RING3
59# define TMTIMER_GET_CRITSECT(pTimer) ((pTimer)->pCritSect)
60# else
61# define TMTIMER_GET_CRITSECT(pTimer) tmRZTimerGetCritSect(pTimer)
62# endif
63#endif
64
65/** @def TMTIMER_ASSERT_CRITSECT
66 * Checks that the caller owns the critical section if one is associated with
67 * the timer. */
68#ifdef VBOX_STRICT
69# define TMTIMER_ASSERT_CRITSECT(pTimer) \
70 do { \
71 if ((pTimer)->pCritSect) \
72 { \
73 VMSTATE enmState; \
74 PPDMCRITSECT pCritSect = TMTIMER_GET_CRITSECT(pTimer); \
75 AssertMsg( pCritSect \
76 && ( PDMCritSectIsOwner(pCritSect) \
77 || (enmState = (pTimer)->CTX_SUFF(pVM)->enmVMState) == VMSTATE_CREATING \
78 || enmState == VMSTATE_RESETTING \
79 || enmState == VMSTATE_RESETTING_LS ),\
80 ("pTimer=%p (%s) pCritSect=%p (%s)\n", pTimer, R3STRING(pTimer->pszDesc), \
81 (pTimer)->pCritSect, R3STRING(PDMR3CritSectName((pTimer)->pCritSect)) )); \
82 } \
83 } while (0)
84#else
85# define TMTIMER_ASSERT_CRITSECT(pTimer) do { } while (0)
86#endif
87
88/** @def TMTIMER_ASSERT_SYNC_CRITSECT_ORDER
89 * Checks for lock order trouble between the timer critsect and the critical
90 * section critsect. The virtual sync critsect must always be entered before
91 * the one associated with the timer (see TMR3TimerQueuesDo). It is OK if there
92 * isn't any critical section associated with the timer or if the calling thread
93 * doesn't own it, ASSUMING of course that the thread using this macro is going
94 * to enter the virtual sync critical section anyway.
95 *
96 * @remarks This is a sligtly relaxed timer locking attitude compared to
97 * TMTIMER_ASSERT_CRITSECT, however, the calling device/whatever code
98 * should know what it's doing if it's stopping or starting a timer
99 * without taking the device lock.
100 */
101#ifdef VBOX_STRICT
102# define TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer) \
103 do { \
104 if ((pTimer)->pCritSect) \
105 { \
106 VMSTATE enmState; \
107 PPDMCRITSECT pCritSect = TMTIMER_GET_CRITSECT(pTimer); \
108 AssertMsg( pCritSect \
109 && ( !PDMCritSectIsOwner(pCritSect) \
110 || PDMCritSectIsOwner(&pVM->tm.s.VirtualSyncLock) \
111 || (enmState = (pVM)->enmVMState) == VMSTATE_CREATING \
112 || enmState == VMSTATE_RESETTING \
113 || enmState == VMSTATE_RESETTING_LS ),\
114 ("pTimer=%p (%s) pCritSect=%p (%s)\n", pTimer, R3STRING(pTimer->pszDesc), \
115 (pTimer)->pCritSect, R3STRING(PDMR3CritSectName((pTimer)->pCritSect)) )); \
116 } \
117 } while (0)
118#else
119# define TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer) do { } while (0)
120#endif
121
122
123#if defined(VBOX_STRICT) && defined(IN_RING0)
124/**
125 * Helper for TMTIMER_GET_CRITSECT
126 * @todo This needs a redo!
127 */
128DECLINLINE(PPDMCRITSECT) tmRZTimerGetCritSect(PTMTIMER pTimer)
129{
130 if (pTimer->enmType == TMTIMERTYPE_DEV)
131 {
132 RTCCUINTREG fSavedFlags = ASMAddFlags(X86_EFL_AC); /** @todo fix ring-3 pointer use */
133 PPDMDEVINSR0 pDevInsR0 = ((struct PDMDEVINSR3 *)pTimer->u.Dev.pDevIns)->pDevInsR0RemoveMe; /* !ring-3 read! */
134 ASMSetFlags(fSavedFlags);
135 struct PDMDEVINSR3 *pDevInsR3 = pDevInsR0->pDevInsForR3R0;
136 if (pTimer->pCritSect == pDevInsR3->pCritSectRoR3)
137 return pDevInsR0->pCritSectRoR0;
138 uintptr_t offCritSect = (uintptr_t)pTimer->pCritSect - (uintptr_t)pDevInsR3->pvInstanceDataR3;
139 if (offCritSect < pDevInsR0->pReg->cbInstanceShared)
140 return (PPDMCRITSECT)((uintptr_t)pDevInsR0->pvInstanceDataR0 + offCritSect);
141 }
142 return (PPDMCRITSECT)MMHyperR3ToCC((pTimer)->CTX_SUFF(pVM), pTimer->pCritSect);
143}
144#endif /* VBOX_STRICT && IN_RING0 */
145
146
147/**
148 * Notification that execution is about to start.
149 *
150 * This call must always be paired with a TMNotifyEndOfExecution call.
151 *
152 * The function may, depending on the configuration, resume the TSC and future
153 * clocks that only ticks when we're executing guest code.
154 *
155 * @param pVM The cross context VM structure.
156 * @param pVCpu The cross context virtual CPU structure.
157 */
158VMMDECL(void) TMNotifyStartOfExecution(PVMCC pVM, PVMCPUCC pVCpu)
159{
160#ifndef VBOX_WITHOUT_NS_ACCOUNTING
161 pVCpu->tm.s.uTscStartExecuting = SUPReadTsc();
162 pVCpu->tm.s.fExecuting = true;
163#endif
164 if (pVM->tm.s.fTSCTiedToExecution)
165 tmCpuTickResume(pVM, pVCpu);
166}
167
168
169/**
170 * Notification that execution has ended.
171 *
172 * This call must always be paired with a TMNotifyStartOfExecution call.
173 *
174 * The function may, depending on the configuration, suspend the TSC and future
175 * clocks that only ticks when we're executing guest code.
176 *
177 * @param pVM The cross context VM structure.
178 * @param pVCpu The cross context virtual CPU structure.
179 * @param uTsc TSC value when exiting guest context.
180 */
181VMMDECL(void) TMNotifyEndOfExecution(PVMCC pVM, PVMCPUCC pVCpu, uint64_t uTsc)
182{
183 if (pVM->tm.s.fTSCTiedToExecution)
184 tmCpuTickPause(pVCpu); /** @todo use uTsc here if we can. */
185
186#ifndef VBOX_WITHOUT_NS_ACCOUNTING
187 /*
188 * Calculate the elapsed tick count and convert it to nanoseconds.
189 */
190# ifdef IN_RING3
191 uint64_t cTicks = uTsc - pVCpu->tm.s.uTscStartExecuting - SUPGetTscDelta();
192 uint64_t const uCpuHz = SUPGetCpuHzFromGip(g_pSUPGlobalInfoPage);
193# else
194 uint64_t cTicks = uTsc - pVCpu->tm.s.uTscStartExecuting - SUPGetTscDeltaByCpuSetIndex(pVCpu->iHostCpuSet);
195 uint64_t const uCpuHz = SUPGetCpuHzFromGipBySetIndex(g_pSUPGlobalInfoPage, pVCpu->iHostCpuSet);
196# endif
197 AssertStmt(cTicks <= uCpuHz << 2, cTicks = uCpuHz << 2); /* max 4 sec */
198
199 uint64_t cNsExecutingDelta;
200 if (uCpuHz < _4G)
201 cNsExecutingDelta = ASMMultU64ByU32DivByU32(cTicks, RT_NS_1SEC, uCpuHz);
202 else if (uCpuHz < 16*_1G64)
203 cNsExecutingDelta = ASMMultU64ByU32DivByU32(cTicks >> 2, RT_NS_1SEC, uCpuHz >> 2);
204 else
205 {
206 Assert(uCpuHz < 64 * _1G64);
207 cNsExecutingDelta = ASMMultU64ByU32DivByU32(cTicks >> 4, RT_NS_1SEC, uCpuHz >> 4);
208 }
209
210 /*
211 * Update the data.
212 *
213 * Note! We're not using strict memory ordering here to speed things us.
214 * The data is in a single cache line and this thread is the only
215 * one writing to that line, so I cannot quite imagine why we would
216 * need any strict ordering here.
217 */
218 uint64_t const cNsExecutingNew = pVCpu->tm.s.cNsExecuting + cNsExecutingDelta;
219 uint32_t uGen = ASMAtomicUoIncU32(&pVCpu->tm.s.uTimesGen); Assert(uGen & 1);
220 ASMCompilerBarrier();
221 pVCpu->tm.s.fExecuting = false;
222 pVCpu->tm.s.cNsExecuting = cNsExecutingNew;
223 pVCpu->tm.s.cPeriodsExecuting++;
224 ASMCompilerBarrier();
225 ASMAtomicUoWriteU32(&pVCpu->tm.s.uTimesGen, (uGen | 1) + 1);
226
227 /*
228 * Update stats.
229 */
230# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
231 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecuting, cNsExecutingDelta);
232 if (cNsExecutingDelta < 5000)
233 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecTiny, cNsExecutingDelta);
234 else if (cNsExecutingDelta < 50000)
235 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecShort, cNsExecutingDelta);
236 else
237 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecLong, cNsExecutingDelta);
238# endif
239
240 /* The timer triggers occational updating of the others and total stats: */
241 if (RT_LIKELY(!pVCpu->tm.s.fUpdateStats))
242 { /*likely*/ }
243 else
244 {
245 pVCpu->tm.s.fUpdateStats = false;
246
247 uint64_t const cNsTotalNew = RTTimeNanoTS() - pVCpu->tm.s.nsStartTotal;
248 uint64_t const cNsOtherNew = cNsTotalNew - cNsExecutingNew - pVCpu->tm.s.cNsHalted;
249
250# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
251 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsTotal, cNsTotalNew - pVCpu->tm.s.cNsTotalStat);
252 int64_t const cNsOtherNewDelta = cNsOtherNew - pVCpu->tm.s.cNsOtherStat;
253 if (cNsOtherNewDelta > 0)
254 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsOther, (uint64_t)cNsOtherNewDelta);
255# endif
256
257 pVCpu->tm.s.cNsTotalStat = cNsTotalNew;
258 pVCpu->tm.s.cNsOtherStat = cNsOtherNew;
259 }
260
261#endif
262}
263
264
265/**
266 * Notification that the cpu is entering the halt state
267 *
268 * This call must always be paired with a TMNotifyEndOfExecution call.
269 *
270 * The function may, depending on the configuration, resume the TSC and future
271 * clocks that only ticks when we're halted.
272 *
273 * @param pVCpu The cross context virtual CPU structure.
274 */
275VMM_INT_DECL(void) TMNotifyStartOfHalt(PVMCPUCC pVCpu)
276{
277 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
278
279#ifndef VBOX_WITHOUT_NS_ACCOUNTING
280 pVCpu->tm.s.nsStartHalting = RTTimeNanoTS();
281 pVCpu->tm.s.fHalting = true;
282#endif
283
284 if ( pVM->tm.s.fTSCTiedToExecution
285 && !pVM->tm.s.fTSCNotTiedToHalt)
286 tmCpuTickResume(pVM, pVCpu);
287}
288
289
290/**
291 * Notification that the cpu is leaving the halt state
292 *
293 * This call must always be paired with a TMNotifyStartOfHalt call.
294 *
295 * The function may, depending on the configuration, suspend the TSC and future
296 * clocks that only ticks when we're halted.
297 *
298 * @param pVCpu The cross context virtual CPU structure.
299 */
300VMM_INT_DECL(void) TMNotifyEndOfHalt(PVMCPUCC pVCpu)
301{
302 PVM pVM = pVCpu->CTX_SUFF(pVM);
303
304 if ( pVM->tm.s.fTSCTiedToExecution
305 && !pVM->tm.s.fTSCNotTiedToHalt)
306 tmCpuTickPause(pVCpu);
307
308#ifndef VBOX_WITHOUT_NS_ACCOUNTING
309 uint64_t const u64NsTs = RTTimeNanoTS();
310 uint64_t const cNsTotalNew = u64NsTs - pVCpu->tm.s.nsStartTotal;
311 uint64_t const cNsHaltedDelta = u64NsTs - pVCpu->tm.s.nsStartHalting;
312 uint64_t const cNsHaltedNew = pVCpu->tm.s.cNsHalted + cNsHaltedDelta;
313 uint64_t const cNsOtherNew = cNsTotalNew - pVCpu->tm.s.cNsExecuting - cNsHaltedNew;
314
315 uint32_t uGen = ASMAtomicUoIncU32(&pVCpu->tm.s.uTimesGen); Assert(uGen & 1);
316 ASMCompilerBarrier();
317 pVCpu->tm.s.fHalting = false;
318 pVCpu->tm.s.fUpdateStats = false;
319 pVCpu->tm.s.cNsHalted = cNsHaltedNew;
320 pVCpu->tm.s.cPeriodsHalted++;
321 ASMCompilerBarrier();
322 ASMAtomicUoWriteU32(&pVCpu->tm.s.uTimesGen, (uGen | 1) + 1);
323
324# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
325 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsHalted, cNsHaltedDelta);
326 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsTotal, cNsTotalNew - pVCpu->tm.s.cNsTotalStat);
327 int64_t const cNsOtherNewDelta = cNsOtherNew - pVCpu->tm.s.cNsOtherStat;
328 if (cNsOtherNewDelta > 0)
329 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsOther, (uint64_t)cNsOtherNewDelta);
330# endif
331 pVCpu->tm.s.cNsTotalStat = cNsTotalNew;
332 pVCpu->tm.s.cNsOtherStat = cNsOtherNew;
333#endif
334}
335
336
337/**
338 * Raise the timer force action flag and notify the dedicated timer EMT.
339 *
340 * @param pVM The cross context VM structure.
341 */
342DECLINLINE(void) tmScheduleNotify(PVMCC pVM)
343{
344 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
345 if (!VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
346 {
347 Log5(("TMAll(%u): FF: 0 -> 1\n", __LINE__));
348 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
349#ifdef IN_RING3
350 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
351#endif
352 STAM_COUNTER_INC(&pVM->tm.s.StatScheduleSetFF);
353 }
354}
355
356
357/**
358 * Schedule the queue which was changed.
359 */
360DECLINLINE(void) tmSchedule(PTMTIMER pTimer)
361{
362 PVMCC pVM = pTimer->CTX_SUFF(pVM);
363 if ( VM_IS_EMT(pVM)
364 && RT_SUCCESS(TM_TRY_LOCK_TIMERS(pVM)))
365 {
366 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
367 Log3(("tmSchedule: tmTimerQueueSchedule\n"));
368 tmTimerQueueSchedule(pVM, &pVM->tm.s.CTX_SUFF(paTimerQueues)[pTimer->enmClock]);
369#ifdef VBOX_STRICT
370 tmTimerQueuesSanityChecks(pVM, "tmSchedule");
371#endif
372 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
373 TM_UNLOCK_TIMERS(pVM);
374 }
375 else
376 {
377 TMTIMERSTATE enmState = pTimer->enmState;
378 if (TMTIMERSTATE_IS_PENDING_SCHEDULING(enmState))
379 tmScheduleNotify(pVM);
380 }
381}
382
383
384/**
385 * Try change the state to enmStateNew from enmStateOld
386 * and link the timer into the scheduling queue.
387 *
388 * @returns Success indicator.
389 * @param pTimer Timer in question.
390 * @param enmStateNew The new timer state.
391 * @param enmStateOld The old timer state.
392 */
393DECLINLINE(bool) tmTimerTry(PTMTIMER pTimer, TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
394{
395 /*
396 * Attempt state change.
397 */
398 bool fRc;
399 TM_TRY_SET_STATE(pTimer, enmStateNew, enmStateOld, fRc);
400 return fRc;
401}
402
403
404/**
405 * Links the timer onto the scheduling queue.
406 *
407 * @param pQueue The timer queue the timer belongs to.
408 * @param pTimer The timer.
409 *
410 * @todo FIXME: Look into potential race with the thread running the queues
411 * and stuff.
412 */
413DECLINLINE(void) tmTimerLinkSchedule(PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
414{
415 Assert(!pTimer->offScheduleNext);
416 const int32_t offHeadNew = (intptr_t)pTimer - (intptr_t)pQueue;
417 int32_t offHead;
418 do
419 {
420 offHead = pQueue->offSchedule;
421 if (offHead)
422 pTimer->offScheduleNext = ((intptr_t)pQueue + offHead) - (intptr_t)pTimer;
423 else
424 pTimer->offScheduleNext = 0;
425 } while (!ASMAtomicCmpXchgS32(&pQueue->offSchedule, offHeadNew, offHead));
426}
427
428
429/**
430 * Try change the state to enmStateNew from enmStateOld
431 * and link the timer into the scheduling queue.
432 *
433 * @returns Success indicator.
434 * @param pTimer Timer in question.
435 * @param enmStateNew The new timer state.
436 * @param enmStateOld The old timer state.
437 */
438DECLINLINE(bool) tmTimerTryWithLink(PTMTIMER pTimer, TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
439{
440 if (tmTimerTry(pTimer, enmStateNew, enmStateOld))
441 {
442 tmTimerLinkSchedule(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF(paTimerQueues)[pTimer->enmClock], pTimer);
443 return true;
444 }
445 return false;
446}
447
448
449/**
450 * Links a timer into the active list of a timer queue.
451 *
452 * @param pQueue The queue.
453 * @param pTimer The timer.
454 * @param u64Expire The timer expiration time.
455 *
456 * @remarks Called while owning the relevant queue lock.
457 */
458DECL_FORCE_INLINE(void) tmTimerQueueLinkActive(PTMTIMERQUEUE pQueue, PTMTIMER pTimer, uint64_t u64Expire)
459{
460 Assert(!pTimer->offNext);
461 Assert(!pTimer->offPrev);
462 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE || pTimer->enmClock != TMCLOCK_VIRTUAL_SYNC); /* (active is not a stable state) */
463
464 PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue);
465 if (pCur)
466 {
467 for (;; pCur = TMTIMER_GET_NEXT(pCur))
468 {
469 if (pCur->u64Expire > u64Expire)
470 {
471 const PTMTIMER pPrev = TMTIMER_GET_PREV(pCur);
472 TMTIMER_SET_NEXT(pTimer, pCur);
473 TMTIMER_SET_PREV(pTimer, pPrev);
474 if (pPrev)
475 TMTIMER_SET_NEXT(pPrev, pTimer);
476 else
477 {
478 TMTIMER_SET_HEAD(pQueue, pTimer);
479 ASMAtomicWriteU64(&pQueue->u64Expire, u64Expire);
480 DBGFTRACE_U64_TAG2(pTimer->CTX_SUFF(pVM), u64Expire, "tmTimerQueueLinkActive head", R3STRING(pTimer->pszDesc));
481 }
482 TMTIMER_SET_PREV(pCur, pTimer);
483 return;
484 }
485 if (!pCur->offNext)
486 {
487 TMTIMER_SET_NEXT(pCur, pTimer);
488 TMTIMER_SET_PREV(pTimer, pCur);
489 DBGFTRACE_U64_TAG2(pTimer->CTX_SUFF(pVM), u64Expire, "tmTimerQueueLinkActive tail", R3STRING(pTimer->pszDesc));
490 return;
491 }
492 }
493 }
494 else
495 {
496 TMTIMER_SET_HEAD(pQueue, pTimer);
497 ASMAtomicWriteU64(&pQueue->u64Expire, u64Expire);
498 DBGFTRACE_U64_TAG2(pTimer->CTX_SUFF(pVM), u64Expire, "tmTimerQueueLinkActive empty", R3STRING(pTimer->pszDesc));
499 }
500}
501
502
503
504/**
505 * Schedules the given timer on the given queue.
506 *
507 * @param pQueue The timer queue.
508 * @param pTimer The timer that needs scheduling.
509 *
510 * @remarks Called while owning the lock.
511 */
512DECLINLINE(void) tmTimerQueueScheduleOne(PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
513{
514 Assert(pQueue->enmClock != TMCLOCK_VIRTUAL_SYNC);
515
516 /*
517 * Processing.
518 */
519 unsigned cRetries = 2;
520 do
521 {
522 TMTIMERSTATE enmState = pTimer->enmState;
523 switch (enmState)
524 {
525 /*
526 * Reschedule timer (in the active list).
527 */
528 case TMTIMERSTATE_PENDING_RESCHEDULE:
529 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE, TMTIMERSTATE_PENDING_RESCHEDULE)))
530 break; /* retry */
531 tmTimerQueueUnlinkActive(pQueue, pTimer);
532 RT_FALL_THRU();
533
534 /*
535 * Schedule timer (insert into the active list).
536 */
537 case TMTIMERSTATE_PENDING_SCHEDULE:
538 Assert(!pTimer->offNext); Assert(!pTimer->offPrev);
539 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, TMTIMERSTATE_PENDING_SCHEDULE)))
540 break; /* retry */
541 tmTimerQueueLinkActive(pQueue, pTimer, pTimer->u64Expire);
542 return;
543
544 /*
545 * Stop the timer in active list.
546 */
547 case TMTIMERSTATE_PENDING_STOP:
548 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, TMTIMERSTATE_PENDING_STOP)))
549 break; /* retry */
550 tmTimerQueueUnlinkActive(pQueue, pTimer);
551 RT_FALL_THRU();
552
553 /*
554 * Stop the timer (not on the active list).
555 */
556 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
557 Assert(!pTimer->offNext); Assert(!pTimer->offPrev);
558 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_STOPPED, TMTIMERSTATE_PENDING_STOP_SCHEDULE)))
559 break;
560 return;
561
562 /*
563 * The timer is pending destruction by TMR3TimerDestroy, our caller.
564 * Nothing to do here.
565 */
566 case TMTIMERSTATE_DESTROY:
567 break;
568
569 /*
570 * Postpone these until they get into the right state.
571 */
572 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
573 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
574 tmTimerLinkSchedule(pQueue, pTimer);
575 STAM_COUNTER_INC(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatPostponed));
576 return;
577
578 /*
579 * None of these can be in the schedule.
580 */
581 case TMTIMERSTATE_FREE:
582 case TMTIMERSTATE_STOPPED:
583 case TMTIMERSTATE_ACTIVE:
584 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
585 case TMTIMERSTATE_EXPIRED_DELIVER:
586 default:
587 AssertMsgFailed(("Timer (%p) in the scheduling list has an invalid state %s (%d)!",
588 pTimer, tmTimerState(pTimer->enmState), pTimer->enmState));
589 return;
590 }
591 } while (cRetries-- > 0);
592}
593
594
595/**
596 * Schedules the specified timer queue.
597 *
598 * @param pVM The cross context VM structure.
599 * @param pQueue The queue to schedule.
600 *
601 * @remarks Called while owning the lock.
602 */
603void tmTimerQueueSchedule(PVM pVM, PTMTIMERQUEUE pQueue)
604{
605 TM_ASSERT_TIMER_LOCK_OWNERSHIP(pVM);
606 NOREF(pVM);
607
608 /*
609 * Dequeue the scheduling list and iterate it.
610 */
611 int32_t offNext = ASMAtomicXchgS32(&pQueue->offSchedule, 0);
612 Log2(("tmTimerQueueSchedule: pQueue=%p:{.enmClock=%d, offNext=%RI32, .u64Expired=%'RU64}\n", pQueue, pQueue->enmClock, offNext, pQueue->u64Expire));
613 if (!offNext)
614 return;
615 PTMTIMER pNext = (PTMTIMER)((intptr_t)pQueue + offNext);
616 while (pNext)
617 {
618 /*
619 * Unlink the head timer and find the next one.
620 */
621 PTMTIMER pTimer = pNext;
622 pNext = pNext->offScheduleNext ? (PTMTIMER)((intptr_t)pNext + pNext->offScheduleNext) : NULL;
623 pTimer->offScheduleNext = 0;
624
625 /*
626 * Do the scheduling.
627 */
628 Log2(("tmTimerQueueSchedule: %p:{.enmState=%s, .enmClock=%d, .enmType=%d, .pszDesc=%s}\n",
629 pTimer, tmTimerState(pTimer->enmState), pTimer->enmClock, pTimer->enmType, R3STRING(pTimer->pszDesc)));
630 tmTimerQueueScheduleOne(pQueue, pTimer);
631 Log2(("tmTimerQueueSchedule: %p: new %s\n", pTimer, tmTimerState(pTimer->enmState)));
632 } /* foreach timer in current schedule batch. */
633 Log2(("tmTimerQueueSchedule: u64Expired=%'RU64\n", pQueue->u64Expire));
634}
635
636
637#ifdef VBOX_STRICT
638/**
639 * Checks that the timer queues are sane.
640 *
641 * @param pVM The cross context VM structure.
642 * @param pszWhere Caller location clue.
643 *
644 * @remarks Called while owning the lock.
645 */
646void tmTimerQueuesSanityChecks(PVM pVM, const char *pszWhere)
647{
648 TM_ASSERT_TIMER_LOCK_OWNERSHIP(pVM);
649
650 /*
651 * Check the linking of the active lists.
652 */
653 bool fHaveVirtualSyncLock = false;
654 for (int i = 0; i < TMCLOCK_MAX; i++)
655 {
656 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[i];
657 Assert((int)pQueue->enmClock == i);
658 if (pQueue->enmClock == TMCLOCK_VIRTUAL_SYNC)
659 {
660 if (PDMCritSectTryEnter(&pVM->tm.s.VirtualSyncLock) != VINF_SUCCESS)
661 continue;
662 fHaveVirtualSyncLock = true;
663 }
664 PTMTIMER pPrev = NULL;
665 for (PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue); pCur; pPrev = pCur, pCur = TMTIMER_GET_NEXT(pCur))
666 {
667 AssertMsg((int)pCur->enmClock == i, ("%s: %d != %d\n", pszWhere, pCur->enmClock, i));
668 AssertMsg(TMTIMER_GET_PREV(pCur) == pPrev, ("%s: %p != %p\n", pszWhere, TMTIMER_GET_PREV(pCur), pPrev));
669 TMTIMERSTATE enmState = pCur->enmState;
670 switch (enmState)
671 {
672 case TMTIMERSTATE_ACTIVE:
673 AssertMsg( !pCur->offScheduleNext
674 || pCur->enmState != TMTIMERSTATE_ACTIVE,
675 ("%s: %RI32\n", pszWhere, pCur->offScheduleNext));
676 break;
677 case TMTIMERSTATE_PENDING_STOP:
678 case TMTIMERSTATE_PENDING_RESCHEDULE:
679 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
680 break;
681 default:
682 AssertMsgFailed(("%s: Invalid state enmState=%d %s\n", pszWhere, enmState, tmTimerState(enmState)));
683 break;
684 }
685 }
686 }
687
688
689# ifdef IN_RING3
690 /*
691 * Do the big list and check that active timers all are in the active lists.
692 */
693 PTMTIMERR3 pPrev = NULL;
694 for (PTMTIMERR3 pCur = pVM->tm.s.pCreated; pCur; pPrev = pCur, pCur = pCur->pBigNext)
695 {
696 Assert(pCur->pBigPrev == pPrev);
697 Assert((unsigned)pCur->enmClock < (unsigned)TMCLOCK_MAX);
698
699 TMTIMERSTATE enmState = pCur->enmState;
700 switch (enmState)
701 {
702 case TMTIMERSTATE_ACTIVE:
703 case TMTIMERSTATE_PENDING_STOP:
704 case TMTIMERSTATE_PENDING_RESCHEDULE:
705 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
706 if (fHaveVirtualSyncLock || pCur->enmClock != TMCLOCK_VIRTUAL_SYNC)
707 {
708 PTMTIMERR3 pCurAct = TMTIMER_GET_HEAD(&pVM->tm.s.CTX_SUFF(paTimerQueues)[pCur->enmClock]);
709 Assert(pCur->offPrev || pCur == pCurAct);
710 while (pCurAct && pCurAct != pCur)
711 pCurAct = TMTIMER_GET_NEXT(pCurAct);
712 Assert(pCurAct == pCur);
713 }
714 break;
715
716 case TMTIMERSTATE_PENDING_SCHEDULE:
717 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
718 case TMTIMERSTATE_STOPPED:
719 case TMTIMERSTATE_EXPIRED_DELIVER:
720 if (fHaveVirtualSyncLock || pCur->enmClock != TMCLOCK_VIRTUAL_SYNC)
721 {
722 Assert(!pCur->offNext);
723 Assert(!pCur->offPrev);
724 for (PTMTIMERR3 pCurAct = TMTIMER_GET_HEAD(&pVM->tm.s.CTX_SUFF(paTimerQueues)[pCur->enmClock]);
725 pCurAct;
726 pCurAct = TMTIMER_GET_NEXT(pCurAct))
727 {
728 Assert(pCurAct != pCur);
729 Assert(TMTIMER_GET_NEXT(pCurAct) != pCur);
730 Assert(TMTIMER_GET_PREV(pCurAct) != pCur);
731 }
732 }
733 break;
734
735 /* ignore */
736 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
737 break;
738
739 /* shouldn't get here! */
740 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
741 case TMTIMERSTATE_DESTROY:
742 default:
743 AssertMsgFailed(("Invalid state enmState=%d %s\n", enmState, tmTimerState(enmState)));
744 break;
745 }
746 }
747# endif /* IN_RING3 */
748
749 if (fHaveVirtualSyncLock)
750 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
751}
752#endif /* !VBOX_STRICT */
753
754#ifdef VBOX_HIGH_RES_TIMERS_HACK
755
756/**
757 * Worker for tmTimerPollInternal that handles misses when the dedicated timer
758 * EMT is polling.
759 *
760 * @returns See tmTimerPollInternal.
761 * @param pVM The cross context VM structure.
762 * @param u64Now Current virtual clock timestamp.
763 * @param u64Delta The delta to the next even in ticks of the
764 * virtual clock.
765 * @param pu64Delta Where to return the delta.
766 */
767DECLINLINE(uint64_t) tmTimerPollReturnMiss(PVM pVM, uint64_t u64Now, uint64_t u64Delta, uint64_t *pu64Delta)
768{
769 Assert(!(u64Delta & RT_BIT_64(63)));
770
771 if (!pVM->tm.s.fVirtualWarpDrive)
772 {
773 *pu64Delta = u64Delta;
774 return u64Delta + u64Now + pVM->tm.s.u64VirtualOffset;
775 }
776
777 /*
778 * Warp drive adjustments - this is the reverse of what tmVirtualGetRaw is doing.
779 */
780 uint64_t const u64Start = pVM->tm.s.u64VirtualWarpDriveStart;
781 uint32_t const u32Pct = pVM->tm.s.u32VirtualWarpDrivePercentage;
782
783 uint64_t u64GipTime = u64Delta + u64Now + pVM->tm.s.u64VirtualOffset;
784 u64GipTime -= u64Start; /* the start is GIP time. */
785 if (u64GipTime >= u64Delta)
786 {
787 ASMMultU64ByU32DivByU32(u64GipTime, 100, u32Pct);
788 ASMMultU64ByU32DivByU32(u64Delta, 100, u32Pct);
789 }
790 else
791 {
792 u64Delta -= u64GipTime;
793 ASMMultU64ByU32DivByU32(u64GipTime, 100, u32Pct);
794 u64Delta += u64GipTime;
795 }
796 *pu64Delta = u64Delta;
797 u64GipTime += u64Start;
798 return u64GipTime;
799}
800
801
802/**
803 * Worker for tmTimerPollInternal dealing with returns on virtual CPUs other
804 * than the one dedicated to timer work.
805 *
806 * @returns See tmTimerPollInternal.
807 * @param pVM The cross context VM structure.
808 * @param u64Now Current virtual clock timestamp.
809 * @param pu64Delta Where to return the delta.
810 */
811DECL_FORCE_INLINE(uint64_t) tmTimerPollReturnOtherCpu(PVM pVM, uint64_t u64Now, uint64_t *pu64Delta)
812{
813 static const uint64_t s_u64OtherRet = 500000000; /* 500 ms for non-timer EMTs. */
814 *pu64Delta = s_u64OtherRet;
815 return u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet;
816}
817
818
819/**
820 * Worker for tmTimerPollInternal.
821 *
822 * @returns See tmTimerPollInternal.
823 * @param pVM The cross context VM structure.
824 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
825 * @param pVCpuDst The cross context virtual CPU structure of the dedicated
826 * timer EMT.
827 * @param u64Now Current virtual clock timestamp.
828 * @param pu64Delta Where to return the delta.
829 * @param pCounter The statistics counter to update.
830 */
831DECL_FORCE_INLINE(uint64_t) tmTimerPollReturnHit(PVM pVM, PVMCPU pVCpu, PVMCPU pVCpuDst, uint64_t u64Now,
832 uint64_t *pu64Delta, PSTAMCOUNTER pCounter)
833{
834 STAM_COUNTER_INC(pCounter); NOREF(pCounter);
835 if (pVCpuDst != pVCpu)
836 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
837 *pu64Delta = 0;
838 return 0;
839}
840
841/**
842 * Common worker for TMTimerPollGIP and TMTimerPoll.
843 *
844 * This function is called before FFs are checked in the inner execution EM loops.
845 *
846 * @returns The GIP timestamp of the next event.
847 * 0 if the next event has already expired.
848 *
849 * @param pVM The cross context VM structure.
850 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
851 * @param pu64Delta Where to store the delta.
852 *
853 * @thread The emulation thread.
854 *
855 * @remarks GIP uses ns ticks.
856 */
857DECL_FORCE_INLINE(uint64_t) tmTimerPollInternal(PVMCC pVM, PVMCPUCC pVCpu, uint64_t *pu64Delta)
858{
859 PVMCPU pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
860 const uint64_t u64Now = TMVirtualGetNoCheck(pVM);
861 STAM_COUNTER_INC(&pVM->tm.s.StatPoll);
862
863 /*
864 * Return straight away if the timer FF is already set ...
865 */
866 if (VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
867 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet);
868
869 /*
870 * ... or if timers are being run.
871 */
872 if (ASMAtomicReadBool(&pVM->tm.s.fRunningQueues))
873 {
874 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
875 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
876 }
877
878 /*
879 * Check for TMCLOCK_VIRTUAL expiration.
880 */
881 const uint64_t u64Expire1 = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire);
882 const int64_t i64Delta1 = u64Expire1 - u64Now;
883 if (i64Delta1 <= 0)
884 {
885 if (!VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
886 {
887 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
888 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
889 }
890 LogFlow(("TMTimerPoll: expire1=%'RU64 <= now=%'RU64\n", u64Expire1, u64Now));
891 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtual);
892 }
893
894 /*
895 * Check for TMCLOCK_VIRTUAL_SYNC expiration.
896 * This isn't quite as straight forward if in a catch-up, not only do
897 * we have to adjust the 'now' but when have to adjust the delta as well.
898 */
899
900 /*
901 * Optimistic lockless approach.
902 */
903 uint64_t u64VirtualSyncNow;
904 uint64_t u64Expire2 = ASMAtomicUoReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
905 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
906 {
907 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
908 {
909 u64VirtualSyncNow = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
910 if (RT_LIKELY( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
911 && !ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
912 && u64VirtualSyncNow == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
913 && u64Expire2 == ASMAtomicUoReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)))
914 {
915 u64VirtualSyncNow = u64Now - u64VirtualSyncNow;
916 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
917 if (i64Delta2 > 0)
918 {
919 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
920 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
921
922 if (pVCpu == pVCpuDst)
923 return tmTimerPollReturnMiss(pVM, u64Now, RT_MIN(i64Delta1, i64Delta2), pu64Delta);
924 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
925 }
926
927 if ( !pVM->tm.s.fRunningQueues
928 && !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
929 {
930 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
931 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
932 }
933
934 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
935 LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now));
936 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
937 }
938 }
939 }
940 else
941 {
942 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
943 LogFlow(("TMTimerPoll: stopped\n"));
944 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
945 }
946
947 /*
948 * Complicated lockless approach.
949 */
950 uint64_t off;
951 uint32_t u32Pct = 0;
952 bool fCatchUp;
953 int cOuterTries = 42;
954 for (;; cOuterTries--)
955 {
956 fCatchUp = ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp);
957 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
958 u64Expire2 = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
959 if (fCatchUp)
960 {
961 /* No changes allowed, try get a consistent set of parameters. */
962 uint64_t const u64Prev = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev);
963 uint64_t const offGivenUp = ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp);
964 u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
965 if ( ( u64Prev == ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev)
966 && offGivenUp == ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp)
967 && u32Pct == ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage)
968 && off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
969 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)
970 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
971 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
972 || cOuterTries <= 0)
973 {
974 uint64_t u64Delta = u64Now - u64Prev;
975 if (RT_LIKELY(!(u64Delta >> 32)))
976 {
977 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, u32Pct, 100);
978 if (off > u64Sub + offGivenUp)
979 off -= u64Sub;
980 else /* we've completely caught up. */
981 off = offGivenUp;
982 }
983 else
984 /* More than 4 seconds since last time (or negative), ignore it. */
985 Log(("TMVirtualGetSync: u64Delta=%RX64 (NoLock)\n", u64Delta));
986
987 /* Check that we're still running and in catch up. */
988 if ( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
989 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
990 break;
991 }
992 }
993 else if ( off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
994 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)
995 && !ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
996 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
997 break; /* Got an consistent offset */
998
999 /* Repeat the initial checks before iterating. */
1000 if (VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
1001 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet);
1002 if (ASMAtomicUoReadBool(&pVM->tm.s.fRunningQueues))
1003 {
1004 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
1005 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
1006 }
1007 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
1008 {
1009 LogFlow(("TMTimerPoll: stopped\n"));
1010 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
1011 }
1012 if (cOuterTries <= 0)
1013 break; /* that's enough */
1014 }
1015 if (cOuterTries <= 0)
1016 STAM_COUNTER_INC(&pVM->tm.s.StatPollELoop);
1017 u64VirtualSyncNow = u64Now - off;
1018
1019 /* Calc delta and see if we've got a virtual sync hit. */
1020 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
1021 if (i64Delta2 <= 0)
1022 {
1023 if ( !pVM->tm.s.fRunningQueues
1024 && !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
1025 {
1026 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
1027 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
1028 }
1029 STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtualSync);
1030 LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now));
1031 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
1032 }
1033
1034 /*
1035 * Return the time left to the next event.
1036 */
1037 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
1038 if (pVCpu == pVCpuDst)
1039 {
1040 if (fCatchUp)
1041 i64Delta2 = ASMMultU64ByU32DivByU32(i64Delta2, 100, u32Pct + 100);
1042 return tmTimerPollReturnMiss(pVM, u64Now, RT_MIN(i64Delta1, i64Delta2), pu64Delta);
1043 }
1044 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
1045}
1046
1047
1048/**
1049 * Set FF if we've passed the next virtual event.
1050 *
1051 * This function is called before FFs are checked in the inner execution EM loops.
1052 *
1053 * @returns true if timers are pending, false if not.
1054 *
1055 * @param pVM The cross context VM structure.
1056 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1057 * @thread The emulation thread.
1058 */
1059VMMDECL(bool) TMTimerPollBool(PVMCC pVM, PVMCPUCC pVCpu)
1060{
1061 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1062 uint64_t off = 0;
1063 tmTimerPollInternal(pVM, pVCpu, &off);
1064 return off == 0;
1065}
1066
1067
1068/**
1069 * Set FF if we've passed the next virtual event.
1070 *
1071 * This function is called before FFs are checked in the inner execution EM loops.
1072 *
1073 * @param pVM The cross context VM structure.
1074 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1075 * @thread The emulation thread.
1076 */
1077VMM_INT_DECL(void) TMTimerPollVoid(PVMCC pVM, PVMCPUCC pVCpu)
1078{
1079 uint64_t off;
1080 tmTimerPollInternal(pVM, pVCpu, &off);
1081}
1082
1083
1084/**
1085 * Set FF if we've passed the next virtual event.
1086 *
1087 * This function is called before FFs are checked in the inner execution EM loops.
1088 *
1089 * @returns The GIP timestamp of the next event.
1090 * 0 if the next event has already expired.
1091 * @param pVM The cross context VM structure.
1092 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1093 * @param pu64Delta Where to store the delta.
1094 * @thread The emulation thread.
1095 */
1096VMM_INT_DECL(uint64_t) TMTimerPollGIP(PVMCC pVM, PVMCPUCC pVCpu, uint64_t *pu64Delta)
1097{
1098 return tmTimerPollInternal(pVM, pVCpu, pu64Delta);
1099}
1100
1101#endif /* VBOX_HIGH_RES_TIMERS_HACK */
1102
1103/**
1104 * Gets the host context ring-3 pointer of the timer.
1105 *
1106 * @returns HC R3 pointer.
1107 * @param pTimer Timer handle as returned by one of the create functions.
1108 */
1109VMMDECL(PTMTIMERR3) TMTimerR3Ptr(PTMTIMER pTimer)
1110{
1111#ifdef IN_RING0
1112 Assert(pTimer->fFlags & TMTIMER_FLAGS_RING0);
1113#endif
1114 return (PTMTIMERR3)MMHyperCCToR3(pTimer->CTX_SUFF(pVM), pTimer);
1115}
1116
1117
1118/**
1119 * Gets the host context ring-0 pointer of the timer.
1120 *
1121 * @returns HC R0 pointer.
1122 * @param pTimer Timer handle as returned by one of the create functions.
1123 */
1124VMMDECL(PTMTIMERR0) TMTimerR0Ptr(PTMTIMER pTimer)
1125{
1126#ifdef IN_RING0
1127 Assert(pTimer->fFlags & TMTIMER_FLAGS_RING0);
1128#endif
1129 return (PTMTIMERR0)MMHyperCCToR0(pTimer->CTX_SUFF(pVM), pTimer);
1130}
1131
1132
1133/**
1134 * Gets the RC pointer of the timer.
1135 *
1136 * @returns RC pointer.
1137 * @param pTimer Timer handle as returned by one of the create functions.
1138 */
1139VMMDECL(PTMTIMERRC) TMTimerRCPtr(PTMTIMER pTimer)
1140{
1141#ifdef IN_RING0
1142 Assert(pTimer->fFlags & TMTIMER_FLAGS_RING0);
1143#endif
1144 return (PTMTIMERRC)MMHyperCCToRC(pTimer->CTX_SUFF(pVM), pTimer);
1145}
1146
1147
1148/**
1149 * Locks the timer clock.
1150 *
1151 * @returns VINF_SUCCESS on success, @a rcBusy if busy, and VERR_NOT_SUPPORTED
1152 * if the clock does not have a lock.
1153 * @param pTimer The timer which clock lock we wish to take.
1154 * @param rcBusy What to return in ring-0 and raw-mode context
1155 * if the lock is busy. Pass VINF_SUCCESS to
1156 * acquired the critical section thru a ring-3
1157 call if necessary.
1158 *
1159 * @remarks Currently only supported on timers using the virtual sync clock.
1160 */
1161VMMDECL(int) TMTimerLock(PTMTIMER pTimer, int rcBusy)
1162{
1163#ifdef IN_RING0
1164 Assert(pTimer->fFlags & TMTIMER_FLAGS_RING0);
1165#endif
1166 AssertPtr(pTimer);
1167 AssertReturn(pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC, VERR_NOT_SUPPORTED);
1168 return PDMCritSectEnter(&pTimer->CTX_SUFF(pVM)->tm.s.VirtualSyncLock, rcBusy);
1169}
1170
1171
1172/**
1173 * Unlocks a timer clock locked by TMTimerLock.
1174 *
1175 * @param pTimer The timer which clock to unlock.
1176 */
1177VMMDECL(void) TMTimerUnlock(PTMTIMER pTimer)
1178{
1179#ifdef IN_RING0
1180 Assert(pTimer->fFlags & TMTIMER_FLAGS_RING0);
1181#endif
1182 AssertPtr(pTimer);
1183 AssertReturnVoid(pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC);
1184 PDMCritSectLeave(&pTimer->CTX_SUFF(pVM)->tm.s.VirtualSyncLock);
1185}
1186
1187
1188/**
1189 * Checks if the current thread owns the timer clock lock.
1190 *
1191 * @returns @c true if its the owner, @c false if not.
1192 * @param pTimer The timer handle.
1193 */
1194VMMDECL(bool) TMTimerIsLockOwner(PTMTIMER pTimer)
1195{
1196#ifdef IN_RING0
1197 Assert(pTimer->fFlags & TMTIMER_FLAGS_RING0);
1198#endif
1199 AssertPtr(pTimer);
1200 AssertReturn(pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC, false);
1201 return PDMCritSectIsOwner(&pTimer->CTX_SUFF(pVM)->tm.s.VirtualSyncLock);
1202}
1203
1204
1205/**
1206 * Optimized TMTimerSet code path for starting an inactive timer.
1207 *
1208 * @returns VBox status code.
1209 *
1210 * @param pVM The cross context VM structure.
1211 * @param pTimer The timer handle.
1212 * @param u64Expire The new expire time.
1213 */
1214static int tmTimerSetOptimizedStart(PVM pVM, PTMTIMER pTimer, uint64_t u64Expire)
1215{
1216 Assert(!pTimer->offPrev);
1217 Assert(!pTimer->offNext);
1218 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE);
1219
1220 TMCLOCK const enmClock = pTimer->enmClock;
1221
1222 /*
1223 * Calculate and set the expiration time.
1224 */
1225 if (enmClock == TMCLOCK_VIRTUAL_SYNC)
1226 {
1227 uint64_t u64Last = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSync);
1228 AssertMsgStmt(u64Expire >= u64Last,
1229 ("exp=%#llx last=%#llx\n", u64Expire, u64Last),
1230 u64Expire = u64Last);
1231 }
1232 ASMAtomicWriteU64(&pTimer->u64Expire, u64Expire);
1233 Log2(("tmTimerSetOptimizedStart: %p:{.pszDesc='%s', .u64Expire=%'RU64}\n", pTimer, R3STRING(pTimer->pszDesc), u64Expire));
1234
1235 /*
1236 * Link the timer into the active list.
1237 */
1238 tmTimerQueueLinkActive(&pVM->tm.s.CTX_SUFF(paTimerQueues)[enmClock], pTimer, u64Expire);
1239
1240 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetOpt);
1241 TM_UNLOCK_TIMERS(pVM);
1242 return VINF_SUCCESS;
1243}
1244
1245
1246/**
1247 * TMTimerSet for the virtual sync timer queue.
1248 *
1249 * This employs a greatly simplified state machine by always acquiring the
1250 * queue lock and bypassing the scheduling list.
1251 *
1252 * @returns VBox status code
1253 * @param pVM The cross context VM structure.
1254 * @param pTimer The timer handle.
1255 * @param u64Expire The expiration time.
1256 */
1257static int tmTimerVirtualSyncSet(PVMCC pVM, PTMTIMER pTimer, uint64_t u64Expire)
1258{
1259 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetVs), a);
1260 VM_ASSERT_EMT(pVM);
1261 TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer);
1262 int rc = PDMCritSectEnter(&pVM->tm.s.VirtualSyncLock, VINF_SUCCESS);
1263 AssertRCReturn(rc, rc);
1264
1265 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC];
1266 TMTIMERSTATE enmState = pTimer->enmState;
1267 switch (enmState)
1268 {
1269 case TMTIMERSTATE_EXPIRED_DELIVER:
1270 case TMTIMERSTATE_STOPPED:
1271 if (enmState == TMTIMERSTATE_EXPIRED_DELIVER)
1272 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetVsStExpDeliver);
1273 else
1274 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetVsStStopped);
1275
1276 AssertMsg(u64Expire >= pVM->tm.s.u64VirtualSync,
1277 ("%'RU64 < %'RU64 %s\n", u64Expire, pVM->tm.s.u64VirtualSync, R3STRING(pTimer->pszDesc)));
1278 pTimer->u64Expire = u64Expire;
1279 TM_SET_STATE(pTimer, TMTIMERSTATE_ACTIVE);
1280 tmTimerQueueLinkActive(pQueue, pTimer, u64Expire);
1281 rc = VINF_SUCCESS;
1282 break;
1283
1284 case TMTIMERSTATE_ACTIVE:
1285 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetVsStActive);
1286 tmTimerQueueUnlinkActive(pQueue, pTimer);
1287 pTimer->u64Expire = u64Expire;
1288 tmTimerQueueLinkActive(pQueue, pTimer, u64Expire);
1289 rc = VINF_SUCCESS;
1290 break;
1291
1292 case TMTIMERSTATE_PENDING_RESCHEDULE:
1293 case TMTIMERSTATE_PENDING_STOP:
1294 case TMTIMERSTATE_PENDING_SCHEDULE:
1295 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1296 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1297 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1298 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1299 case TMTIMERSTATE_DESTROY:
1300 case TMTIMERSTATE_FREE:
1301 AssertLogRelMsgFailed(("Invalid timer state %s: %s\n", tmTimerState(enmState), R3STRING(pTimer->pszDesc)));
1302 rc = VERR_TM_INVALID_STATE;
1303 break;
1304
1305 default:
1306 AssertMsgFailed(("Unknown timer state %d: %s\n", enmState, R3STRING(pTimer->pszDesc)));
1307 rc = VERR_TM_UNKNOWN_STATE;
1308 break;
1309 }
1310
1311 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetVs), a);
1312 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
1313 return rc;
1314}
1315
1316
1317/**
1318 * Arm a timer with a (new) expire time.
1319 *
1320 * @returns VBox status code.
1321 * @param pTimer Timer handle as returned by one of the create functions.
1322 * @param u64Expire New expire time.
1323 */
1324VMMDECL(int) TMTimerSet(PTMTIMER pTimer, uint64_t u64Expire)
1325{
1326#ifdef IN_RING0
1327 Assert(pTimer->fFlags & TMTIMER_FLAGS_RING0);
1328#endif
1329 PVMCC pVM = pTimer->CTX_SUFF(pVM);
1330 STAM_COUNTER_INC(&pTimer->StatSetAbsolute);
1331
1332 /* Treat virtual sync timers specially. */
1333 if (pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC)
1334 return tmTimerVirtualSyncSet(pVM, pTimer, u64Expire);
1335
1336 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1337 TMTIMER_ASSERT_CRITSECT(pTimer);
1338
1339 DBGFTRACE_U64_TAG2(pVM, u64Expire, "TMTimerSet", R3STRING(pTimer->pszDesc));
1340
1341#ifdef VBOX_WITH_STATISTICS
1342 /*
1343 * Gather optimization info.
1344 */
1345 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSet);
1346 TMTIMERSTATE enmOrgState = pTimer->enmState;
1347 switch (enmOrgState)
1348 {
1349 case TMTIMERSTATE_STOPPED: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStStopped); break;
1350 case TMTIMERSTATE_EXPIRED_DELIVER: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStExpDeliver); break;
1351 case TMTIMERSTATE_ACTIVE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStActive); break;
1352 case TMTIMERSTATE_PENDING_STOP: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendStop); break;
1353 case TMTIMERSTATE_PENDING_STOP_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendStopSched); break;
1354 case TMTIMERSTATE_PENDING_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendSched); break;
1355 case TMTIMERSTATE_PENDING_RESCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendResched); break;
1356 default: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStOther); break;
1357 }
1358#endif
1359
1360 /*
1361 * The most common case is setting the timer again during the callback.
1362 * The second most common case is starting a timer at some other time.
1363 */
1364#if 1
1365 TMTIMERSTATE enmState1 = pTimer->enmState;
1366 if ( enmState1 == TMTIMERSTATE_EXPIRED_DELIVER
1367 || ( enmState1 == TMTIMERSTATE_STOPPED
1368 && pTimer->pCritSect))
1369 {
1370 /* Try take the TM lock and check the state again. */
1371 if (RT_SUCCESS_NP(TM_TRY_LOCK_TIMERS(pVM)))
1372 {
1373 if (RT_LIKELY(tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, enmState1)))
1374 {
1375 tmTimerSetOptimizedStart(pVM, pTimer, u64Expire);
1376 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1377 return VINF_SUCCESS;
1378 }
1379 TM_UNLOCK_TIMERS(pVM);
1380 }
1381 }
1382#endif
1383
1384 /*
1385 * Unoptimized code path.
1386 */
1387 int cRetries = 1000;
1388 do
1389 {
1390 /*
1391 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1392 */
1393 TMTIMERSTATE enmState = pTimer->enmState;
1394 Log2(("TMTimerSet: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d u64Expire=%'RU64\n",
1395 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), cRetries, u64Expire));
1396 switch (enmState)
1397 {
1398 case TMTIMERSTATE_EXPIRED_DELIVER:
1399 case TMTIMERSTATE_STOPPED:
1400 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1401 {
1402 Assert(!pTimer->offPrev);
1403 Assert(!pTimer->offNext);
1404 pTimer->u64Expire = u64Expire;
1405 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1406 tmSchedule(pTimer);
1407 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1408 return VINF_SUCCESS;
1409 }
1410 break;
1411
1412 case TMTIMERSTATE_PENDING_SCHEDULE:
1413 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1414 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1415 {
1416 pTimer->u64Expire = u64Expire;
1417 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1418 tmSchedule(pTimer);
1419 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1420 return VINF_SUCCESS;
1421 }
1422 break;
1423
1424
1425 case TMTIMERSTATE_ACTIVE:
1426 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1427 {
1428 pTimer->u64Expire = u64Expire;
1429 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1430 tmSchedule(pTimer);
1431 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1432 return VINF_SUCCESS;
1433 }
1434 break;
1435
1436 case TMTIMERSTATE_PENDING_RESCHEDULE:
1437 case TMTIMERSTATE_PENDING_STOP:
1438 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1439 {
1440 pTimer->u64Expire = u64Expire;
1441 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1442 tmSchedule(pTimer);
1443 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1444 return VINF_SUCCESS;
1445 }
1446 break;
1447
1448
1449 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1450 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1451 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1452#ifdef IN_RING3
1453 if (!RTThreadYield())
1454 RTThreadSleep(1);
1455#else
1456/** @todo call host context and yield after a couple of iterations */
1457#endif
1458 break;
1459
1460 /*
1461 * Invalid states.
1462 */
1463 case TMTIMERSTATE_DESTROY:
1464 case TMTIMERSTATE_FREE:
1465 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1466 return VERR_TM_INVALID_STATE;
1467 default:
1468 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1469 return VERR_TM_UNKNOWN_STATE;
1470 }
1471 } while (cRetries-- > 0);
1472
1473 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1474 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1475 return VERR_TM_TIMER_UNSTABLE_STATE;
1476}
1477
1478
1479/**
1480 * Return the current time for the specified clock, setting pu64Now if not NULL.
1481 *
1482 * @returns Current time.
1483 * @param pVM The cross context VM structure.
1484 * @param enmClock The clock to query.
1485 * @param pu64Now Optional pointer where to store the return time
1486 */
1487DECL_FORCE_INLINE(uint64_t) tmTimerSetRelativeNowWorker(PVMCC pVM, TMCLOCK enmClock, uint64_t *pu64Now)
1488{
1489 uint64_t u64Now;
1490 switch (enmClock)
1491 {
1492 case TMCLOCK_VIRTUAL_SYNC:
1493 u64Now = TMVirtualSyncGet(pVM);
1494 break;
1495 case TMCLOCK_VIRTUAL:
1496 u64Now = TMVirtualGet(pVM);
1497 break;
1498 case TMCLOCK_REAL:
1499 u64Now = TMRealGet(pVM);
1500 break;
1501 default:
1502 AssertFatalMsgFailed(("%d\n", enmClock));
1503 }
1504
1505 if (pu64Now)
1506 *pu64Now = u64Now;
1507 return u64Now;
1508}
1509
1510
1511/**
1512 * Optimized TMTimerSetRelative code path.
1513 *
1514 * @returns VBox status code.
1515 *
1516 * @param pVM The cross context VM structure.
1517 * @param pTimer The timer handle.
1518 * @param cTicksToNext Clock ticks until the next time expiration.
1519 * @param pu64Now Where to return the current time stamp used.
1520 * Optional.
1521 */
1522static int tmTimerSetRelativeOptimizedStart(PVMCC pVM, PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1523{
1524 Assert(!pTimer->offPrev);
1525 Assert(!pTimer->offNext);
1526 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE);
1527
1528 /*
1529 * Calculate and set the expiration time.
1530 */
1531 TMCLOCK const enmClock = pTimer->enmClock;
1532 uint64_t const u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1533 pTimer->u64Expire = u64Expire;
1534 Log2(("tmTimerSetRelativeOptimizedStart: %p:{.pszDesc='%s', .u64Expire=%'RU64} cTicksToNext=%'RU64\n", pTimer, R3STRING(pTimer->pszDesc), u64Expire, cTicksToNext));
1535
1536 /*
1537 * Link the timer into the active list.
1538 */
1539 DBGFTRACE_U64_TAG2(pVM, u64Expire, "tmTimerSetRelativeOptimizedStart", R3STRING(pTimer->pszDesc));
1540 tmTimerQueueLinkActive(&pVM->tm.s.CTX_SUFF(paTimerQueues)[enmClock], pTimer, u64Expire);
1541
1542 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeOpt);
1543 TM_UNLOCK_TIMERS(pVM);
1544 return VINF_SUCCESS;
1545}
1546
1547
1548/**
1549 * TMTimerSetRelative for the virtual sync timer queue.
1550 *
1551 * This employs a greatly simplified state machine by always acquiring the
1552 * queue lock and bypassing the scheduling list.
1553 *
1554 * @returns VBox status code
1555 * @param pVM The cross context VM structure.
1556 * @param pTimer The timer to (re-)arm.
1557 * @param cTicksToNext Clock ticks until the next time expiration.
1558 * @param pu64Now Where to return the current time stamp used.
1559 * Optional.
1560 */
1561static int tmTimerVirtualSyncSetRelative(PVMCC pVM, PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1562{
1563 STAM_PROFILE_START(pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelativeVs), a);
1564 VM_ASSERT_EMT(pVM);
1565 TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer);
1566 int rc = PDMCritSectEnter(&pVM->tm.s.VirtualSyncLock, VINF_SUCCESS);
1567 AssertRCReturn(rc, rc);
1568
1569 /* Calculate the expiration tick. */
1570 uint64_t u64Expire = TMVirtualSyncGetNoCheck(pVM);
1571 if (pu64Now)
1572 *pu64Now = u64Expire;
1573 u64Expire += cTicksToNext;
1574
1575 /* Update the timer. */
1576 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC];
1577 TMTIMERSTATE enmState = pTimer->enmState;
1578 switch (enmState)
1579 {
1580 case TMTIMERSTATE_EXPIRED_DELIVER:
1581 case TMTIMERSTATE_STOPPED:
1582 if (enmState == TMTIMERSTATE_EXPIRED_DELIVER)
1583 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeVsStExpDeliver);
1584 else
1585 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeVsStStopped);
1586 pTimer->u64Expire = u64Expire;
1587 TM_SET_STATE(pTimer, TMTIMERSTATE_ACTIVE);
1588 tmTimerQueueLinkActive(pQueue, pTimer, u64Expire);
1589 rc = VINF_SUCCESS;
1590 break;
1591
1592 case TMTIMERSTATE_ACTIVE:
1593 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeVsStActive);
1594 tmTimerQueueUnlinkActive(pQueue, pTimer);
1595 pTimer->u64Expire = u64Expire;
1596 tmTimerQueueLinkActive(pQueue, pTimer, u64Expire);
1597 rc = VINF_SUCCESS;
1598 break;
1599
1600 case TMTIMERSTATE_PENDING_RESCHEDULE:
1601 case TMTIMERSTATE_PENDING_STOP:
1602 case TMTIMERSTATE_PENDING_SCHEDULE:
1603 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1604 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1605 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1606 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1607 case TMTIMERSTATE_DESTROY:
1608 case TMTIMERSTATE_FREE:
1609 AssertLogRelMsgFailed(("Invalid timer state %s: %s\n", tmTimerState(enmState), R3STRING(pTimer->pszDesc)));
1610 rc = VERR_TM_INVALID_STATE;
1611 break;
1612
1613 default:
1614 AssertMsgFailed(("Unknown timer state %d: %s\n", enmState, R3STRING(pTimer->pszDesc)));
1615 rc = VERR_TM_UNKNOWN_STATE;
1616 break;
1617 }
1618
1619 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelativeVs), a);
1620 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
1621 return rc;
1622}
1623
1624
1625/**
1626 * Arm a timer with a expire time relative to the current time.
1627 *
1628 * @returns VBox status code.
1629 * @param pTimer Timer handle as returned by one of the create functions.
1630 * @param cTicksToNext Clock ticks until the next time expiration.
1631 * @param pu64Now Where to return the current time stamp used.
1632 * Optional.
1633 */
1634VMMDECL(int) TMTimerSetRelative(PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1635{
1636#ifdef IN_RING0
1637 Assert(pTimer->fFlags & TMTIMER_FLAGS_RING0);
1638#endif
1639 PVMCC pVM = pTimer->CTX_SUFF(pVM);
1640 STAM_COUNTER_INC(&pTimer->StatSetRelative);
1641
1642 /* Treat virtual sync timers specially. */
1643 if (pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC)
1644 return tmTimerVirtualSyncSetRelative(pVM, pTimer, cTicksToNext, pu64Now);
1645
1646 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1647 TMTIMER_ASSERT_CRITSECT(pTimer);
1648
1649 DBGFTRACE_U64_TAG2(pVM, cTicksToNext, "TMTimerSetRelative", R3STRING(pTimer->pszDesc));
1650
1651#ifdef VBOX_WITH_STATISTICS
1652 /*
1653 * Gather optimization info.
1654 */
1655 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelative);
1656 TMTIMERSTATE enmOrgState = pTimer->enmState;
1657 switch (enmOrgState)
1658 {
1659 case TMTIMERSTATE_STOPPED: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStStopped); break;
1660 case TMTIMERSTATE_EXPIRED_DELIVER: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStExpDeliver); break;
1661 case TMTIMERSTATE_ACTIVE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStActive); break;
1662 case TMTIMERSTATE_PENDING_STOP: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendStop); break;
1663 case TMTIMERSTATE_PENDING_STOP_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendStopSched); break;
1664 case TMTIMERSTATE_PENDING_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendSched); break;
1665 case TMTIMERSTATE_PENDING_RESCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendResched); break;
1666 default: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStOther); break;
1667 }
1668#endif
1669
1670 /*
1671 * Try to take the TM lock and optimize the common cases.
1672 *
1673 * With the TM lock we can safely make optimizations like immediate
1674 * scheduling and we can also be 100% sure that we're not racing the
1675 * running of the timer queues. As an additional restraint we require the
1676 * timer to have a critical section associated with to be 100% there aren't
1677 * concurrent operations on the timer. (This latter isn't necessary any
1678 * longer as this isn't supported for any timers, critsect or not.)
1679 *
1680 * Note! Lock ordering doesn't apply when we only tries to
1681 * get the innermost locks.
1682 */
1683 bool fOwnTMLock = RT_SUCCESS_NP(TM_TRY_LOCK_TIMERS(pVM));
1684#if 1
1685 if ( fOwnTMLock
1686 && pTimer->pCritSect)
1687 {
1688 TMTIMERSTATE enmState = pTimer->enmState;
1689 if (RT_LIKELY( ( enmState == TMTIMERSTATE_EXPIRED_DELIVER
1690 || enmState == TMTIMERSTATE_STOPPED)
1691 && tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, enmState)))
1692 {
1693 tmTimerSetRelativeOptimizedStart(pVM, pTimer, cTicksToNext, pu64Now);
1694 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1695 return VINF_SUCCESS;
1696 }
1697
1698 /* Optimize other states when it becomes necessary. */
1699 }
1700#endif
1701
1702 /*
1703 * Unoptimized path.
1704 */
1705 int rc;
1706 TMCLOCK const enmClock = pTimer->enmClock;
1707 for (int cRetries = 1000; ; cRetries--)
1708 {
1709 /*
1710 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1711 */
1712 TMTIMERSTATE enmState = pTimer->enmState;
1713 switch (enmState)
1714 {
1715 case TMTIMERSTATE_STOPPED:
1716 if (enmClock == TMCLOCK_VIRTUAL_SYNC)
1717 {
1718 /** @todo To fix assertion in tmR3TimerQueueRunVirtualSync:
1719 * Figure a safe way of activating this timer while the queue is
1720 * being run.
1721 * (99.9% sure this that the assertion is caused by DevAPIC.cpp
1722 * re-starting the timer in response to a initial_count write.) */
1723 }
1724 RT_FALL_THRU();
1725 case TMTIMERSTATE_EXPIRED_DELIVER:
1726 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1727 {
1728 Assert(!pTimer->offPrev);
1729 Assert(!pTimer->offNext);
1730 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1731 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [EXP/STOP]\n",
1732 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1733 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1734 tmSchedule(pTimer);
1735 rc = VINF_SUCCESS;
1736 break;
1737 }
1738 rc = VERR_TRY_AGAIN;
1739 break;
1740
1741 case TMTIMERSTATE_PENDING_SCHEDULE:
1742 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1743 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1744 {
1745 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1746 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [PEND_SCHED]\n",
1747 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1748 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1749 tmSchedule(pTimer);
1750 rc = VINF_SUCCESS;
1751 break;
1752 }
1753 rc = VERR_TRY_AGAIN;
1754 break;
1755
1756
1757 case TMTIMERSTATE_ACTIVE:
1758 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1759 {
1760 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1761 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [ACTIVE]\n",
1762 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1763 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1764 tmSchedule(pTimer);
1765 rc = VINF_SUCCESS;
1766 break;
1767 }
1768 rc = VERR_TRY_AGAIN;
1769 break;
1770
1771 case TMTIMERSTATE_PENDING_RESCHEDULE:
1772 case TMTIMERSTATE_PENDING_STOP:
1773 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1774 {
1775 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1776 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [PEND_RESCH/STOP]\n",
1777 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1778 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1779 tmSchedule(pTimer);
1780 rc = VINF_SUCCESS;
1781 break;
1782 }
1783 rc = VERR_TRY_AGAIN;
1784 break;
1785
1786
1787 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1788 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1789 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1790#ifdef IN_RING3
1791 if (!RTThreadYield())
1792 RTThreadSleep(1);
1793#else
1794/** @todo call host context and yield after a couple of iterations */
1795#endif
1796 rc = VERR_TRY_AGAIN;
1797 break;
1798
1799 /*
1800 * Invalid states.
1801 */
1802 case TMTIMERSTATE_DESTROY:
1803 case TMTIMERSTATE_FREE:
1804 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1805 rc = VERR_TM_INVALID_STATE;
1806 break;
1807
1808 default:
1809 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1810 rc = VERR_TM_UNKNOWN_STATE;
1811 break;
1812 }
1813
1814 /* switch + loop is tedious to break out of. */
1815 if (rc == VINF_SUCCESS)
1816 break;
1817
1818 if (rc != VERR_TRY_AGAIN)
1819 {
1820 tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1821 break;
1822 }
1823 if (cRetries <= 0)
1824 {
1825 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1826 rc = VERR_TM_TIMER_UNSTABLE_STATE;
1827 tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1828 break;
1829 }
1830
1831 /*
1832 * Retry to gain locks.
1833 */
1834 if (!fOwnTMLock)
1835 fOwnTMLock = RT_SUCCESS_NP(TM_TRY_LOCK_TIMERS(pVM));
1836
1837 } /* for (;;) */
1838
1839 /*
1840 * Clean up and return.
1841 */
1842 if (fOwnTMLock)
1843 TM_UNLOCK_TIMERS(pVM);
1844
1845 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1846 return rc;
1847}
1848
1849
1850/**
1851 * Drops a hint about the frequency of the timer.
1852 *
1853 * This is used by TM and the VMM to calculate how often guest execution needs
1854 * to be interrupted. The hint is automatically cleared by TMTimerStop.
1855 *
1856 * @returns VBox status code.
1857 * @param pTimer Timer handle as returned by one of the create
1858 * functions.
1859 * @param uHzHint The frequency hint. Pass 0 to clear the hint.
1860 *
1861 * @remarks We're using an integer hertz value here since anything above 1 HZ
1862 * is not going to be any trouble satisfying scheduling wise. The
1863 * range where it makes sense is >= 100 HZ.
1864 */
1865VMMDECL(int) TMTimerSetFrequencyHint(PTMTIMER pTimer, uint32_t uHzHint)
1866{
1867#ifdef IN_RING0
1868 Assert(pTimer->fFlags & TMTIMER_FLAGS_RING0);
1869#endif
1870 TMTIMER_ASSERT_CRITSECT(pTimer);
1871
1872 uint32_t const uHzOldHint = pTimer->uHzHint;
1873 pTimer->uHzHint = uHzHint;
1874
1875 PVM pVM = pTimer->CTX_SUFF(pVM);
1876 uint32_t const uMaxHzHint = pVM->tm.s.uMaxHzHint;
1877 if ( uHzHint > uMaxHzHint
1878 || uHzOldHint >= uMaxHzHint)
1879 ASMAtomicWriteBool(&pVM->tm.s.fHzHintNeedsUpdating, true);
1880
1881 return VINF_SUCCESS;
1882}
1883
1884
1885/**
1886 * TMTimerStop for the virtual sync timer queue.
1887 *
1888 * This employs a greatly simplified state machine by always acquiring the
1889 * queue lock and bypassing the scheduling list.
1890 *
1891 * @returns VBox status code
1892 * @param pVM The cross context VM structure.
1893 * @param pTimer The timer handle.
1894 */
1895static int tmTimerVirtualSyncStop(PVMCC pVM, PTMTIMER pTimer)
1896{
1897 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerStopVs), a);
1898 VM_ASSERT_EMT(pVM);
1899 TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer);
1900 int rc = PDMCritSectEnter(&pVM->tm.s.VirtualSyncLock, VINF_SUCCESS);
1901 AssertRCReturn(rc, rc);
1902
1903 /* Reset the HZ hint. */
1904 if (pTimer->uHzHint)
1905 {
1906 if (pTimer->uHzHint >= pVM->tm.s.uMaxHzHint)
1907 ASMAtomicWriteBool(&pVM->tm.s.fHzHintNeedsUpdating, true);
1908 pTimer->uHzHint = 0;
1909 }
1910
1911 /* Update the timer state. */
1912 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC];
1913 TMTIMERSTATE enmState = pTimer->enmState;
1914 switch (enmState)
1915 {
1916 case TMTIMERSTATE_ACTIVE:
1917 tmTimerQueueUnlinkActive(pQueue, pTimer);
1918 TM_SET_STATE(pTimer, TMTIMERSTATE_STOPPED);
1919 rc = VINF_SUCCESS;
1920 break;
1921
1922 case TMTIMERSTATE_EXPIRED_DELIVER:
1923 TM_SET_STATE(pTimer, TMTIMERSTATE_STOPPED);
1924 rc = VINF_SUCCESS;
1925 break;
1926
1927 case TMTIMERSTATE_STOPPED:
1928 rc = VINF_SUCCESS;
1929 break;
1930
1931 case TMTIMERSTATE_PENDING_RESCHEDULE:
1932 case TMTIMERSTATE_PENDING_STOP:
1933 case TMTIMERSTATE_PENDING_SCHEDULE:
1934 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1935 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1936 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1937 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1938 case TMTIMERSTATE_DESTROY:
1939 case TMTIMERSTATE_FREE:
1940 AssertLogRelMsgFailed(("Invalid timer state %s: %s\n", tmTimerState(enmState), R3STRING(pTimer->pszDesc)));
1941 rc = VERR_TM_INVALID_STATE;
1942 break;
1943
1944 default:
1945 AssertMsgFailed(("Unknown timer state %d: %s\n", enmState, R3STRING(pTimer->pszDesc)));
1946 rc = VERR_TM_UNKNOWN_STATE;
1947 break;
1948 }
1949
1950 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStopVs), a);
1951 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
1952 return rc;
1953}
1954
1955
1956/**
1957 * Stop the timer.
1958 * Use TMR3TimerArm() to "un-stop" the timer.
1959 *
1960 * @returns VBox status code.
1961 * @param pTimer Timer handle as returned by one of the create functions.
1962 */
1963VMMDECL(int) TMTimerStop(PTMTIMER pTimer)
1964{
1965#ifdef IN_RING0
1966 Assert(pTimer->fFlags & TMTIMER_FLAGS_RING0);
1967#endif
1968 PVMCC pVM = pTimer->CTX_SUFF(pVM);
1969 STAM_COUNTER_INC(&pTimer->StatStop);
1970
1971 /* Treat virtual sync timers specially. */
1972 if (pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC)
1973 return tmTimerVirtualSyncStop(pVM, pTimer);
1974
1975 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1976 TMTIMER_ASSERT_CRITSECT(pTimer);
1977
1978 /*
1979 * Reset the HZ hint.
1980 */
1981 if (pTimer->uHzHint)
1982 {
1983 if (pTimer->uHzHint >= pVM->tm.s.uMaxHzHint)
1984 ASMAtomicWriteBool(&pVM->tm.s.fHzHintNeedsUpdating, true);
1985 pTimer->uHzHint = 0;
1986 }
1987
1988 /** @todo see if this function needs optimizing. */
1989 int cRetries = 1000;
1990 do
1991 {
1992 /*
1993 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1994 */
1995 TMTIMERSTATE enmState = pTimer->enmState;
1996 Log2(("TMTimerStop: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d\n",
1997 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), cRetries));
1998 switch (enmState)
1999 {
2000 case TMTIMERSTATE_EXPIRED_DELIVER:
2001 //AssertMsgFailed(("You don't stop an expired timer dude!\n"));
2002 return VERR_INVALID_PARAMETER;
2003
2004 case TMTIMERSTATE_STOPPED:
2005 case TMTIMERSTATE_PENDING_STOP:
2006 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2007 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
2008 return VINF_SUCCESS;
2009
2010 case TMTIMERSTATE_PENDING_SCHEDULE:
2011 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, enmState))
2012 {
2013 tmSchedule(pTimer);
2014 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
2015 return VINF_SUCCESS;
2016 }
2017 break;
2018
2019 case TMTIMERSTATE_PENDING_RESCHEDULE:
2020 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
2021 {
2022 tmSchedule(pTimer);
2023 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
2024 return VINF_SUCCESS;
2025 }
2026 break;
2027
2028 case TMTIMERSTATE_ACTIVE:
2029 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
2030 {
2031 tmSchedule(pTimer);
2032 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
2033 return VINF_SUCCESS;
2034 }
2035 break;
2036
2037 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2038 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2039 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2040#ifdef IN_RING3
2041 if (!RTThreadYield())
2042 RTThreadSleep(1);
2043#else
2044/** @todo call host and yield cpu after a while. */
2045#endif
2046 break;
2047
2048 /*
2049 * Invalid states.
2050 */
2051 case TMTIMERSTATE_DESTROY:
2052 case TMTIMERSTATE_FREE:
2053 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
2054 return VERR_TM_INVALID_STATE;
2055 default:
2056 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
2057 return VERR_TM_UNKNOWN_STATE;
2058 }
2059 } while (cRetries-- > 0);
2060
2061 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
2062 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
2063 return VERR_TM_TIMER_UNSTABLE_STATE;
2064}
2065
2066
2067/**
2068 * Get the current clock time.
2069 * Handy for calculating the new expire time.
2070 *
2071 * @returns Current clock time.
2072 * @param pTimer Timer handle as returned by one of the create functions.
2073 */
2074VMMDECL(uint64_t) TMTimerGet(PTMTIMER pTimer)
2075{
2076#ifdef IN_RING0
2077 Assert(pTimer->fFlags & TMTIMER_FLAGS_RING0);
2078#endif
2079 PVMCC pVM = pTimer->CTX_SUFF(pVM);
2080 STAM_COUNTER_INC(&pTimer->StatGet);
2081
2082 uint64_t u64;
2083 switch (pTimer->enmClock)
2084 {
2085 case TMCLOCK_VIRTUAL:
2086 u64 = TMVirtualGet(pVM);
2087 break;
2088 case TMCLOCK_VIRTUAL_SYNC:
2089 u64 = TMVirtualSyncGet(pVM);
2090 break;
2091 case TMCLOCK_REAL:
2092 u64 = TMRealGet(pVM);
2093 break;
2094 default:
2095 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2096 return UINT64_MAX;
2097 }
2098 //Log2(("TMTimerGet: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2099 // u64, pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2100 return u64;
2101}
2102
2103
2104/**
2105 * Get the frequency of the timer clock.
2106 *
2107 * @returns Clock frequency (as Hz of course).
2108 * @param pTimer Timer handle as returned by one of the create functions.
2109 */
2110VMMDECL(uint64_t) TMTimerGetFreq(PTMTIMER pTimer)
2111{
2112#ifdef IN_RING0
2113 Assert(pTimer->fFlags & TMTIMER_FLAGS_RING0);
2114#endif
2115 switch (pTimer->enmClock)
2116 {
2117 case TMCLOCK_VIRTUAL:
2118 case TMCLOCK_VIRTUAL_SYNC:
2119 return TMCLOCK_FREQ_VIRTUAL;
2120
2121 case TMCLOCK_REAL:
2122 return TMCLOCK_FREQ_REAL;
2123
2124 default:
2125 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2126 return 0;
2127 }
2128}
2129
2130
2131/**
2132 * Get the expire time of the timer.
2133 * Only valid for active timers.
2134 *
2135 * @returns Expire time of the timer.
2136 * @param pTimer Timer handle as returned by one of the create functions.
2137 */
2138VMMDECL(uint64_t) TMTimerGetExpire(PTMTIMER pTimer)
2139{
2140#ifdef IN_RING0
2141 Assert(pTimer->fFlags & TMTIMER_FLAGS_RING0);
2142#endif
2143 TMTIMER_ASSERT_CRITSECT(pTimer);
2144 int cRetries = 1000;
2145 do
2146 {
2147 TMTIMERSTATE enmState = pTimer->enmState;
2148 switch (enmState)
2149 {
2150 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2151 case TMTIMERSTATE_EXPIRED_DELIVER:
2152 case TMTIMERSTATE_STOPPED:
2153 case TMTIMERSTATE_PENDING_STOP:
2154 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2155 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2156 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2157 return ~(uint64_t)0;
2158
2159 case TMTIMERSTATE_ACTIVE:
2160 case TMTIMERSTATE_PENDING_RESCHEDULE:
2161 case TMTIMERSTATE_PENDING_SCHEDULE:
2162 Log2(("TMTimerGetExpire: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2163 pTimer->u64Expire, pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2164 return pTimer->u64Expire;
2165
2166 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2167 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2168#ifdef IN_RING3
2169 if (!RTThreadYield())
2170 RTThreadSleep(1);
2171#endif
2172 break;
2173
2174 /*
2175 * Invalid states.
2176 */
2177 case TMTIMERSTATE_DESTROY:
2178 case TMTIMERSTATE_FREE:
2179 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
2180 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2181 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2182 return ~(uint64_t)0;
2183 default:
2184 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
2185 return ~(uint64_t)0;
2186 }
2187 } while (cRetries-- > 0);
2188
2189 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
2190 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2191 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2192 return ~(uint64_t)0;
2193}
2194
2195
2196/**
2197 * Checks if a timer is active or not.
2198 *
2199 * @returns True if active.
2200 * @returns False if not active.
2201 * @param pTimer Timer handle as returned by one of the create functions.
2202 */
2203VMMDECL(bool) TMTimerIsActive(PTMTIMER pTimer)
2204{
2205#ifdef IN_RING0
2206 Assert(pTimer->fFlags & TMTIMER_FLAGS_RING0);
2207#endif
2208 TMTIMERSTATE enmState = pTimer->enmState;
2209 switch (enmState)
2210 {
2211 case TMTIMERSTATE_STOPPED:
2212 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2213 case TMTIMERSTATE_EXPIRED_DELIVER:
2214 case TMTIMERSTATE_PENDING_STOP:
2215 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2216 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2217 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2218 return false;
2219
2220 case TMTIMERSTATE_ACTIVE:
2221 case TMTIMERSTATE_PENDING_RESCHEDULE:
2222 case TMTIMERSTATE_PENDING_SCHEDULE:
2223 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2224 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2225 Log2(("TMTimerIsActive: returns true (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2226 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2227 return true;
2228
2229 /*
2230 * Invalid states.
2231 */
2232 case TMTIMERSTATE_DESTROY:
2233 case TMTIMERSTATE_FREE:
2234 AssertMsgFailed(("Invalid timer state %s (%s)\n", tmTimerState(enmState), R3STRING(pTimer->pszDesc)));
2235 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2236 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2237 return false;
2238 default:
2239 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
2240 return false;
2241 }
2242}
2243
2244
2245/* -=-=-=-=-=-=- Convenience APIs -=-=-=-=-=-=- */
2246
2247
2248/**
2249 * Arm a timer with a (new) expire time relative to current time.
2250 *
2251 * @returns VBox status code.
2252 * @param pTimer Timer handle as returned by one of the create functions.
2253 * @param cMilliesToNext Number of milliseconds to the next tick.
2254 */
2255VMMDECL(int) TMTimerSetMillies(PTMTIMER pTimer, uint32_t cMilliesToNext)
2256{
2257 switch (pTimer->enmClock)
2258 {
2259 case TMCLOCK_VIRTUAL:
2260 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2261 return TMTimerSetRelative(pTimer, cMilliesToNext * UINT64_C(1000000), NULL);
2262
2263 case TMCLOCK_VIRTUAL_SYNC:
2264 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2265 return TMTimerSetRelative(pTimer, cMilliesToNext * UINT64_C(1000000), NULL);
2266
2267 case TMCLOCK_REAL:
2268 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2269 return TMTimerSetRelative(pTimer, cMilliesToNext, NULL);
2270
2271 default:
2272 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2273 return VERR_TM_TIMER_BAD_CLOCK;
2274 }
2275}
2276
2277
2278/**
2279 * Arm a timer with a (new) expire time relative to current time.
2280 *
2281 * @returns VBox status code.
2282 * @param pTimer Timer handle as returned by one of the create functions.
2283 * @param cMicrosToNext Number of microseconds to the next tick.
2284 */
2285VMMDECL(int) TMTimerSetMicro(PTMTIMER pTimer, uint64_t cMicrosToNext)
2286{
2287 switch (pTimer->enmClock)
2288 {
2289 case TMCLOCK_VIRTUAL:
2290 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2291 return TMTimerSetRelative(pTimer, cMicrosToNext * 1000, NULL);
2292
2293 case TMCLOCK_VIRTUAL_SYNC:
2294 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2295 return TMTimerSetRelative(pTimer, cMicrosToNext * 1000, NULL);
2296
2297 case TMCLOCK_REAL:
2298 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2299 return TMTimerSetRelative(pTimer, cMicrosToNext / 1000, NULL);
2300
2301 default:
2302 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2303 return VERR_TM_TIMER_BAD_CLOCK;
2304 }
2305}
2306
2307
2308/**
2309 * Arm a timer with a (new) expire time relative to current time.
2310 *
2311 * @returns VBox status code.
2312 * @param pTimer Timer handle as returned by one of the create functions.
2313 * @param cNanosToNext Number of nanoseconds to the next tick.
2314 */
2315VMMDECL(int) TMTimerSetNano(PTMTIMER pTimer, uint64_t cNanosToNext)
2316{
2317 switch (pTimer->enmClock)
2318 {
2319 case TMCLOCK_VIRTUAL:
2320 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2321 return TMTimerSetRelative(pTimer, cNanosToNext, NULL);
2322
2323 case TMCLOCK_VIRTUAL_SYNC:
2324 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2325 return TMTimerSetRelative(pTimer, cNanosToNext, NULL);
2326
2327 case TMCLOCK_REAL:
2328 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2329 return TMTimerSetRelative(pTimer, cNanosToNext / 1000000, NULL);
2330
2331 default:
2332 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2333 return VERR_TM_TIMER_BAD_CLOCK;
2334 }
2335}
2336
2337
2338/**
2339 * Get the current clock time as nanoseconds.
2340 *
2341 * @returns The timer clock as nanoseconds.
2342 * @param pTimer Timer handle as returned by one of the create functions.
2343 */
2344VMMDECL(uint64_t) TMTimerGetNano(PTMTIMER pTimer)
2345{
2346 return TMTimerToNano(pTimer, TMTimerGet(pTimer));
2347}
2348
2349
2350/**
2351 * Get the current clock time as microseconds.
2352 *
2353 * @returns The timer clock as microseconds.
2354 * @param pTimer Timer handle as returned by one of the create functions.
2355 */
2356VMMDECL(uint64_t) TMTimerGetMicro(PTMTIMER pTimer)
2357{
2358 return TMTimerToMicro(pTimer, TMTimerGet(pTimer));
2359}
2360
2361
2362/**
2363 * Get the current clock time as milliseconds.
2364 *
2365 * @returns The timer clock as milliseconds.
2366 * @param pTimer Timer handle as returned by one of the create functions.
2367 */
2368VMMDECL(uint64_t) TMTimerGetMilli(PTMTIMER pTimer)
2369{
2370 return TMTimerToMilli(pTimer, TMTimerGet(pTimer));
2371}
2372
2373
2374/**
2375 * Converts the specified timer clock time to nanoseconds.
2376 *
2377 * @returns nanoseconds.
2378 * @param pTimer Timer handle as returned by one of the create functions.
2379 * @param u64Ticks The clock ticks.
2380 * @remark There could be rounding errors here. We just do a simple integer divide
2381 * without any adjustments.
2382 */
2383VMMDECL(uint64_t) TMTimerToNano(PTMTIMER pTimer, uint64_t u64Ticks)
2384{
2385#ifdef IN_RING0
2386 Assert(pTimer->fFlags & TMTIMER_FLAGS_RING0);
2387#endif
2388 switch (pTimer->enmClock)
2389 {
2390 case TMCLOCK_VIRTUAL:
2391 case TMCLOCK_VIRTUAL_SYNC:
2392 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2393 return u64Ticks;
2394
2395 case TMCLOCK_REAL:
2396 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2397 return u64Ticks * 1000000;
2398
2399 default:
2400 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2401 return 0;
2402 }
2403}
2404
2405
2406/**
2407 * Converts the specified timer clock time to microseconds.
2408 *
2409 * @returns microseconds.
2410 * @param pTimer Timer handle as returned by one of the create functions.
2411 * @param u64Ticks The clock ticks.
2412 * @remark There could be rounding errors here. We just do a simple integer divide
2413 * without any adjustments.
2414 */
2415VMMDECL(uint64_t) TMTimerToMicro(PTMTIMER pTimer, uint64_t u64Ticks)
2416{
2417#ifdef IN_RING0
2418 Assert(pTimer->fFlags & TMTIMER_FLAGS_RING0);
2419#endif
2420 switch (pTimer->enmClock)
2421 {
2422 case TMCLOCK_VIRTUAL:
2423 case TMCLOCK_VIRTUAL_SYNC:
2424 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2425 return u64Ticks / 1000;
2426
2427 case TMCLOCK_REAL:
2428 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2429 return u64Ticks * 1000;
2430
2431 default:
2432 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2433 return 0;
2434 }
2435}
2436
2437
2438/**
2439 * Converts the specified timer clock time to milliseconds.
2440 *
2441 * @returns milliseconds.
2442 * @param pTimer Timer handle as returned by one of the create functions.
2443 * @param u64Ticks The clock ticks.
2444 * @remark There could be rounding errors here. We just do a simple integer divide
2445 * without any adjustments.
2446 */
2447VMMDECL(uint64_t) TMTimerToMilli(PTMTIMER pTimer, uint64_t u64Ticks)
2448{
2449#ifdef IN_RING0
2450 Assert(pTimer->fFlags & TMTIMER_FLAGS_RING0);
2451#endif
2452 switch (pTimer->enmClock)
2453 {
2454 case TMCLOCK_VIRTUAL:
2455 case TMCLOCK_VIRTUAL_SYNC:
2456 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2457 return u64Ticks / 1000000;
2458
2459 case TMCLOCK_REAL:
2460 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2461 return u64Ticks;
2462
2463 default:
2464 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2465 return 0;
2466 }
2467}
2468
2469
2470/**
2471 * Converts the specified nanosecond timestamp to timer clock ticks.
2472 *
2473 * @returns timer clock ticks.
2474 * @param pTimer Timer handle as returned by one of the create functions.
2475 * @param cNanoSecs The nanosecond value ticks to convert.
2476 * @remark There could be rounding and overflow errors here.
2477 */
2478VMMDECL(uint64_t) TMTimerFromNano(PTMTIMER pTimer, uint64_t cNanoSecs)
2479{
2480#ifdef IN_RING0
2481 Assert(pTimer->fFlags & TMTIMER_FLAGS_RING0);
2482#endif
2483 switch (pTimer->enmClock)
2484 {
2485 case TMCLOCK_VIRTUAL:
2486 case TMCLOCK_VIRTUAL_SYNC:
2487 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2488 return cNanoSecs;
2489
2490 case TMCLOCK_REAL:
2491 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2492 return cNanoSecs / 1000000;
2493
2494 default:
2495 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2496 return 0;
2497 }
2498}
2499
2500
2501/**
2502 * Converts the specified microsecond timestamp to timer clock ticks.
2503 *
2504 * @returns timer clock ticks.
2505 * @param pTimer Timer handle as returned by one of the create functions.
2506 * @param cMicroSecs The microsecond value ticks to convert.
2507 * @remark There could be rounding and overflow errors here.
2508 */
2509VMMDECL(uint64_t) TMTimerFromMicro(PTMTIMER pTimer, uint64_t cMicroSecs)
2510{
2511#ifdef IN_RING0
2512 Assert(pTimer->fFlags & TMTIMER_FLAGS_RING0);
2513#endif
2514 switch (pTimer->enmClock)
2515 {
2516 case TMCLOCK_VIRTUAL:
2517 case TMCLOCK_VIRTUAL_SYNC:
2518 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2519 return cMicroSecs * 1000;
2520
2521 case TMCLOCK_REAL:
2522 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2523 return cMicroSecs / 1000;
2524
2525 default:
2526 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2527 return 0;
2528 }
2529}
2530
2531
2532/**
2533 * Converts the specified millisecond timestamp to timer clock ticks.
2534 *
2535 * @returns timer clock ticks.
2536 * @param pVM The cross context VM structure.
2537 * @param pTimer Timer handle as returned by one of the create functions.
2538 * @param cMilliSecs The millisecond value ticks to convert.
2539 * @remark There could be rounding and overflow errors here.
2540 */
2541VMMDECL(uint64_t) TMTimerFromMilli(PVMCC pVM, PTMTIMER pTimer, uint64_t cMilliSecs)
2542{
2543 RT_NOREF(pVM);
2544 Assert(pVM == pTimer->CTX_SUFF(pVM));
2545#ifdef IN_RING0
2546 Assert(pTimer->fFlags & TMTIMER_FLAGS_RING0);
2547#endif
2548 switch (pTimer->enmClock)
2549 {
2550 case TMCLOCK_VIRTUAL:
2551 case TMCLOCK_VIRTUAL_SYNC:
2552 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2553 return cMilliSecs * 1000000;
2554
2555 case TMCLOCK_REAL:
2556 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2557 return cMilliSecs;
2558
2559 default:
2560 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2561 return 0;
2562 }
2563}
2564
2565
2566/**
2567 * Convert state to string.
2568 *
2569 * @returns Readonly status name.
2570 * @param enmState State.
2571 */
2572const char *tmTimerState(TMTIMERSTATE enmState)
2573{
2574 switch (enmState)
2575 {
2576#define CASE(num, state) \
2577 case TMTIMERSTATE_##state: \
2578 AssertCompile(TMTIMERSTATE_##state == (num)); \
2579 return #num "-" #state
2580 CASE( 1,STOPPED);
2581 CASE( 2,ACTIVE);
2582 CASE( 3,EXPIRED_GET_UNLINK);
2583 CASE( 4,EXPIRED_DELIVER);
2584 CASE( 5,PENDING_STOP);
2585 CASE( 6,PENDING_STOP_SCHEDULE);
2586 CASE( 7,PENDING_SCHEDULE_SET_EXPIRE);
2587 CASE( 8,PENDING_SCHEDULE);
2588 CASE( 9,PENDING_RESCHEDULE_SET_EXPIRE);
2589 CASE(10,PENDING_RESCHEDULE);
2590 CASE(11,DESTROY);
2591 CASE(12,FREE);
2592 default:
2593 AssertMsgFailed(("Invalid state enmState=%d\n", enmState));
2594 return "Invalid state!";
2595#undef CASE
2596 }
2597}
2598
2599
2600/**
2601 * Gets the highest frequency hint for all the important timers.
2602 *
2603 * @returns The highest frequency. 0 if no timers care.
2604 * @param pVM The cross context VM structure.
2605 */
2606static uint32_t tmGetFrequencyHint(PVM pVM)
2607{
2608 /*
2609 * Query the value, recalculate it if necessary.
2610 *
2611 * The "right" highest frequency value isn't so important that we'll block
2612 * waiting on the timer semaphore.
2613 */
2614 uint32_t uMaxHzHint = ASMAtomicUoReadU32(&pVM->tm.s.uMaxHzHint);
2615 if (RT_UNLIKELY(ASMAtomicReadBool(&pVM->tm.s.fHzHintNeedsUpdating)))
2616 {
2617 if (RT_SUCCESS(TM_TRY_LOCK_TIMERS(pVM)))
2618 {
2619 ASMAtomicWriteBool(&pVM->tm.s.fHzHintNeedsUpdating, false);
2620
2621 /*
2622 * Loop over the timers associated with each clock.
2623 */
2624 uMaxHzHint = 0;
2625 for (int i = 0; i < TMCLOCK_MAX; i++)
2626 {
2627 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[i];
2628 for (PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue); pCur; pCur = TMTIMER_GET_NEXT(pCur))
2629 {
2630 uint32_t uHzHint = ASMAtomicUoReadU32(&pCur->uHzHint);
2631 if (uHzHint > uMaxHzHint)
2632 {
2633 switch (pCur->enmState)
2634 {
2635 case TMTIMERSTATE_ACTIVE:
2636 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2637 case TMTIMERSTATE_EXPIRED_DELIVER:
2638 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2639 case TMTIMERSTATE_PENDING_SCHEDULE:
2640 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2641 case TMTIMERSTATE_PENDING_RESCHEDULE:
2642 uMaxHzHint = uHzHint;
2643 break;
2644
2645 case TMTIMERSTATE_STOPPED:
2646 case TMTIMERSTATE_PENDING_STOP:
2647 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2648 case TMTIMERSTATE_DESTROY:
2649 case TMTIMERSTATE_FREE:
2650 break;
2651 /* no default, want gcc warnings when adding more states. */
2652 }
2653 }
2654 }
2655 }
2656 ASMAtomicWriteU32(&pVM->tm.s.uMaxHzHint, uMaxHzHint);
2657 Log(("tmGetFrequencyHint: New value %u Hz\n", uMaxHzHint));
2658 TM_UNLOCK_TIMERS(pVM);
2659 }
2660 }
2661 return uMaxHzHint;
2662}
2663
2664
2665/**
2666 * Calculates a host timer frequency that would be suitable for the current
2667 * timer load.
2668 *
2669 * This will take the highest timer frequency, adjust for catch-up and warp
2670 * driver, and finally add a little fudge factor. The caller (VMM) will use
2671 * the result to adjust the per-cpu preemption timer.
2672 *
2673 * @returns The highest frequency. 0 if no important timers around.
2674 * @param pVM The cross context VM structure.
2675 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2676 */
2677VMM_INT_DECL(uint32_t) TMCalcHostTimerFrequency(PVMCC pVM, PVMCPUCC pVCpu)
2678{
2679 uint32_t uHz = tmGetFrequencyHint(pVM);
2680
2681 /* Catch up, we have to be more aggressive than the % indicates at the
2682 beginning of the effort. */
2683 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
2684 {
2685 uint32_t u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
2686 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
2687 {
2688 if (u32Pct <= 100)
2689 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp100 / 100;
2690 else if (u32Pct <= 200)
2691 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp200 / 100;
2692 else if (u32Pct <= 400)
2693 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp400 / 100;
2694 uHz *= u32Pct + 100;
2695 uHz /= 100;
2696 }
2697 }
2698
2699 /* Warp drive. */
2700 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualWarpDrive))
2701 {
2702 uint32_t u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualWarpDrivePercentage);
2703 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualWarpDrive))
2704 {
2705 uHz *= u32Pct;
2706 uHz /= 100;
2707 }
2708 }
2709
2710 /* Fudge factor. */
2711 if (pVCpu->idCpu == pVM->tm.s.idTimerCpu)
2712 uHz *= pVM->tm.s.cPctHostHzFudgeFactorTimerCpu;
2713 else
2714 uHz *= pVM->tm.s.cPctHostHzFudgeFactorOtherCpu;
2715 uHz /= 100;
2716
2717 /* Make sure it isn't too high. */
2718 if (uHz > pVM->tm.s.cHostHzMax)
2719 uHz = pVM->tm.s.cHostHzMax;
2720
2721 return uHz;
2722}
2723
2724
2725/**
2726 * Whether the guest virtual clock is ticking.
2727 *
2728 * @returns true if ticking, false otherwise.
2729 * @param pVM The cross context VM structure.
2730 */
2731VMM_INT_DECL(bool) TMVirtualIsTicking(PVM pVM)
2732{
2733 return RT_BOOL(pVM->tm.s.cVirtualTicking);
2734}
2735
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette