VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/TMAll.cpp@ 93133

Last change on this file since 93133 was 93115, checked in by vboxsync, 3 years ago

scm --update-copyright-year

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 106.6 KB
Line 
1/* $Id: TMAll.cpp 93115 2022-01-01 11:31:46Z vboxsync $ */
2/** @file
3 * TM - Timeout Manager, all contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_TM
23#ifdef DEBUG_bird
24# define DBGFTRACE_DISABLED /* annoying */
25#endif
26#include <VBox/vmm/tm.h>
27#include <VBox/vmm/mm.h>
28#include <VBox/vmm/dbgftrace.h>
29#ifdef IN_RING3
30#endif
31#include <VBox/vmm/pdmdev.h> /* (for TMTIMER_GET_CRITSECT implementation) */
32#include "TMInternal.h"
33#include <VBox/vmm/vmcc.h>
34
35#include <VBox/param.h>
36#include <VBox/err.h>
37#include <VBox/log.h>
38#include <VBox/sup.h>
39#include <iprt/time.h>
40#include <iprt/assert.h>
41#include <iprt/asm.h>
42#include <iprt/asm-math.h>
43#include <iprt/string.h>
44#ifdef IN_RING3
45# include <iprt/thread.h>
46#endif
47
48#include "TMInline.h"
49
50
51/*********************************************************************************************************************************
52* Defined Constants And Macros *
53*********************************************************************************************************************************/
54#ifdef VBOX_STRICT
55/** @def TMTIMER_GET_CRITSECT
56 * Helper for safely resolving the critical section for a timer belonging to a
57 * device instance.
58 * @todo needs reworking later as it uses PDMDEVINSR0::pDevInsR0RemoveMe. */
59# ifdef IN_RING3
60# define TMTIMER_GET_CRITSECT(a_pVM, a_pTimer) ((a_pTimer)->pCritSect)
61# else
62# define TMTIMER_GET_CRITSECT(a_pVM, a_pTimer) tmRZTimerGetCritSect(a_pVM, a_pTimer)
63# endif
64#endif
65
66/** @def TMTIMER_ASSERT_CRITSECT
67 * Checks that the caller owns the critical section if one is associated with
68 * the timer. */
69#ifdef VBOX_STRICT
70# define TMTIMER_ASSERT_CRITSECT(a_pVM, a_pTimer) \
71 do { \
72 if ((a_pTimer)->pCritSect) \
73 { \
74 VMSTATE enmState; \
75 PPDMCRITSECT pCritSect = TMTIMER_GET_CRITSECT(a_pVM, a_pTimer); \
76 AssertMsg( pCritSect \
77 && ( PDMCritSectIsOwner((a_pVM), pCritSect) \
78 || (enmState = (a_pVM)->enmVMState) == VMSTATE_CREATING \
79 || enmState == VMSTATE_RESETTING \
80 || enmState == VMSTATE_RESETTING_LS ),\
81 ("pTimer=%p (%s) pCritSect=%p (%s)\n", a_pTimer, (a_pTimer)->szName, \
82 (a_pTimer)->pCritSect, R3STRING(PDMR3CritSectName((a_pTimer)->pCritSect)) )); \
83 } \
84 } while (0)
85#else
86# define TMTIMER_ASSERT_CRITSECT(pVM, pTimer) do { } while (0)
87#endif
88
89/** @def TMTIMER_ASSERT_SYNC_CRITSECT_ORDER
90 * Checks for lock order trouble between the timer critsect and the critical
91 * section critsect. The virtual sync critsect must always be entered before
92 * the one associated with the timer (see TMR3TimerQueuesDo). It is OK if there
93 * isn't any critical section associated with the timer or if the calling thread
94 * doesn't own it, ASSUMING of course that the thread using this macro is going
95 * to enter the virtual sync critical section anyway.
96 *
97 * @remarks This is a sligtly relaxed timer locking attitude compared to
98 * TMTIMER_ASSERT_CRITSECT, however, the calling device/whatever code
99 * should know what it's doing if it's stopping or starting a timer
100 * without taking the device lock.
101 */
102#ifdef VBOX_STRICT
103# define TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer) \
104 do { \
105 if ((pTimer)->pCritSect) \
106 { \
107 VMSTATE enmState; \
108 PPDMCRITSECT pCritSect = TMTIMER_GET_CRITSECT(pVM, pTimer); \
109 AssertMsg( pCritSect \
110 && ( !PDMCritSectIsOwner((pVM), pCritSect) \
111 || PDMCritSectIsOwner((pVM), &(pVM)->tm.s.VirtualSyncLock) \
112 || (enmState = (pVM)->enmVMState) == VMSTATE_CREATING \
113 || enmState == VMSTATE_RESETTING \
114 || enmState == VMSTATE_RESETTING_LS ),\
115 ("pTimer=%p (%s) pCritSect=%p (%s)\n", pTimer, pTimer->szName, \
116 (pTimer)->pCritSect, R3STRING(PDMR3CritSectName((pTimer)->pCritSect)) )); \
117 } \
118 } while (0)
119#else
120# define TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer) do { } while (0)
121#endif
122
123
124#if defined(VBOX_STRICT) && defined(IN_RING0)
125/**
126 * Helper for TMTIMER_GET_CRITSECT
127 * @todo This needs a redo!
128 */
129DECLINLINE(PPDMCRITSECT) tmRZTimerGetCritSect(PVMCC pVM, PTMTIMER pTimer)
130{
131 if (pTimer->enmType == TMTIMERTYPE_DEV)
132 {
133 RTCCUINTREG fSavedFlags = ASMAddFlags(X86_EFL_AC); /** @todo fix ring-3 pointer use */
134 PPDMDEVINSR0 pDevInsR0 = ((struct PDMDEVINSR3 *)pTimer->u.Dev.pDevIns)->pDevInsR0RemoveMe; /* !ring-3 read! */
135 ASMSetFlags(fSavedFlags);
136 struct PDMDEVINSR3 *pDevInsR3 = pDevInsR0->pDevInsForR3R0;
137 if (pTimer->pCritSect == pDevInsR3->pCritSectRoR3)
138 return pDevInsR0->pCritSectRoR0;
139 uintptr_t offCritSect = (uintptr_t)pTimer->pCritSect - (uintptr_t)pDevInsR3->pvInstanceDataR3;
140 if (offCritSect < pDevInsR0->pReg->cbInstanceShared)
141 return (PPDMCRITSECT)((uintptr_t)pDevInsR0->pvInstanceDataR0 + offCritSect);
142 }
143 return (PPDMCRITSECT)MMHyperR3ToCC(pVM, pTimer->pCritSect);
144}
145#endif /* VBOX_STRICT && IN_RING0 */
146
147
148/**
149 * Notification that execution is about to start.
150 *
151 * This call must always be paired with a TMNotifyEndOfExecution call.
152 *
153 * The function may, depending on the configuration, resume the TSC and future
154 * clocks that only ticks when we're executing guest code.
155 *
156 * @param pVM The cross context VM structure.
157 * @param pVCpu The cross context virtual CPU structure.
158 */
159VMMDECL(void) TMNotifyStartOfExecution(PVMCC pVM, PVMCPUCC pVCpu)
160{
161#ifndef VBOX_WITHOUT_NS_ACCOUNTING
162 pVCpu->tm.s.uTscStartExecuting = SUPReadTsc();
163 pVCpu->tm.s.fExecuting = true;
164#endif
165 if (pVM->tm.s.fTSCTiedToExecution)
166 tmCpuTickResume(pVM, pVCpu);
167}
168
169
170/**
171 * Notification that execution has ended.
172 *
173 * This call must always be paired with a TMNotifyStartOfExecution call.
174 *
175 * The function may, depending on the configuration, suspend the TSC and future
176 * clocks that only ticks when we're executing guest code.
177 *
178 * @param pVM The cross context VM structure.
179 * @param pVCpu The cross context virtual CPU structure.
180 * @param uTsc TSC value when exiting guest context.
181 */
182VMMDECL(void) TMNotifyEndOfExecution(PVMCC pVM, PVMCPUCC pVCpu, uint64_t uTsc)
183{
184 if (pVM->tm.s.fTSCTiedToExecution)
185 tmCpuTickPause(pVCpu); /** @todo use uTsc here if we can. */
186
187#ifndef VBOX_WITHOUT_NS_ACCOUNTING
188 /*
189 * Calculate the elapsed tick count and convert it to nanoseconds.
190 */
191# ifdef IN_RING3
192 PSUPGLOBALINFOPAGE const pGip = g_pSUPGlobalInfoPage;
193 uint64_t cTicks = uTsc - pVCpu->tm.s.uTscStartExecuting - SUPGetTscDelta(pGip);
194 uint64_t const uCpuHz = pGip ? SUPGetCpuHzFromGip(pGip) : pVM->tm.s.cTSCTicksPerSecondHost;
195# else
196 uint64_t cTicks = uTsc - pVCpu->tm.s.uTscStartExecuting - SUPGetTscDeltaByCpuSetIndex(pVCpu->iHostCpuSet);
197 uint64_t const uCpuHz = SUPGetCpuHzFromGipBySetIndex(g_pSUPGlobalInfoPage, pVCpu->iHostCpuSet);
198# endif
199 AssertStmt(cTicks <= uCpuHz << 2, cTicks = uCpuHz << 2); /* max 4 sec */
200
201 uint64_t cNsExecutingDelta;
202 if (uCpuHz < _4G)
203 cNsExecutingDelta = ASMMultU64ByU32DivByU32(cTicks, RT_NS_1SEC, uCpuHz);
204 else if (uCpuHz < 16*_1G64)
205 cNsExecutingDelta = ASMMultU64ByU32DivByU32(cTicks >> 2, RT_NS_1SEC, uCpuHz >> 2);
206 else
207 {
208 Assert(uCpuHz < 64 * _1G64);
209 cNsExecutingDelta = ASMMultU64ByU32DivByU32(cTicks >> 4, RT_NS_1SEC, uCpuHz >> 4);
210 }
211
212 /*
213 * Update the data.
214 *
215 * Note! We're not using strict memory ordering here to speed things us.
216 * The data is in a single cache line and this thread is the only
217 * one writing to that line, so I cannot quite imagine why we would
218 * need any strict ordering here.
219 */
220 uint64_t const cNsExecutingNew = pVCpu->tm.s.cNsExecuting + cNsExecutingDelta;
221 uint32_t uGen = ASMAtomicUoIncU32(&pVCpu->tm.s.uTimesGen); Assert(uGen & 1);
222 ASMCompilerBarrier();
223 pVCpu->tm.s.fExecuting = false;
224 pVCpu->tm.s.cNsExecuting = cNsExecutingNew;
225 pVCpu->tm.s.cPeriodsExecuting++;
226 ASMCompilerBarrier();
227 ASMAtomicUoWriteU32(&pVCpu->tm.s.uTimesGen, (uGen | 1) + 1);
228
229 /*
230 * Update stats.
231 */
232# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
233 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecuting, cNsExecutingDelta);
234 if (cNsExecutingDelta < 5000)
235 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecTiny, cNsExecutingDelta);
236 else if (cNsExecutingDelta < 50000)
237 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecShort, cNsExecutingDelta);
238 else
239 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecLong, cNsExecutingDelta);
240# endif
241
242 /* The timer triggers occational updating of the others and total stats: */
243 if (RT_LIKELY(!pVCpu->tm.s.fUpdateStats))
244 { /*likely*/ }
245 else
246 {
247 pVCpu->tm.s.fUpdateStats = false;
248
249 uint64_t const cNsTotalNew = RTTimeNanoTS() - pVCpu->tm.s.nsStartTotal;
250 uint64_t const cNsOtherNew = cNsTotalNew - cNsExecutingNew - pVCpu->tm.s.cNsHalted;
251
252# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
253 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsTotal, cNsTotalNew - pVCpu->tm.s.cNsTotalStat);
254 int64_t const cNsOtherNewDelta = cNsOtherNew - pVCpu->tm.s.cNsOtherStat;
255 if (cNsOtherNewDelta > 0)
256 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsOther, (uint64_t)cNsOtherNewDelta);
257# endif
258
259 pVCpu->tm.s.cNsTotalStat = cNsTotalNew;
260 pVCpu->tm.s.cNsOtherStat = cNsOtherNew;
261 }
262
263#endif
264}
265
266
267/**
268 * Notification that the cpu is entering the halt state
269 *
270 * This call must always be paired with a TMNotifyEndOfExecution call.
271 *
272 * The function may, depending on the configuration, resume the TSC and future
273 * clocks that only ticks when we're halted.
274 *
275 * @param pVCpu The cross context virtual CPU structure.
276 */
277VMM_INT_DECL(void) TMNotifyStartOfHalt(PVMCPUCC pVCpu)
278{
279 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
280
281#ifndef VBOX_WITHOUT_NS_ACCOUNTING
282 pVCpu->tm.s.nsStartHalting = RTTimeNanoTS();
283 pVCpu->tm.s.fHalting = true;
284#endif
285
286 if ( pVM->tm.s.fTSCTiedToExecution
287 && !pVM->tm.s.fTSCNotTiedToHalt)
288 tmCpuTickResume(pVM, pVCpu);
289}
290
291
292/**
293 * Notification that the cpu is leaving the halt state
294 *
295 * This call must always be paired with a TMNotifyStartOfHalt call.
296 *
297 * The function may, depending on the configuration, suspend the TSC and future
298 * clocks that only ticks when we're halted.
299 *
300 * @param pVCpu The cross context virtual CPU structure.
301 */
302VMM_INT_DECL(void) TMNotifyEndOfHalt(PVMCPUCC pVCpu)
303{
304 PVM pVM = pVCpu->CTX_SUFF(pVM);
305
306 if ( pVM->tm.s.fTSCTiedToExecution
307 && !pVM->tm.s.fTSCNotTiedToHalt)
308 tmCpuTickPause(pVCpu);
309
310#ifndef VBOX_WITHOUT_NS_ACCOUNTING
311 uint64_t const u64NsTs = RTTimeNanoTS();
312 uint64_t const cNsTotalNew = u64NsTs - pVCpu->tm.s.nsStartTotal;
313 uint64_t const cNsHaltedDelta = u64NsTs - pVCpu->tm.s.nsStartHalting;
314 uint64_t const cNsHaltedNew = pVCpu->tm.s.cNsHalted + cNsHaltedDelta;
315 uint64_t const cNsOtherNew = cNsTotalNew - pVCpu->tm.s.cNsExecuting - cNsHaltedNew;
316
317 uint32_t uGen = ASMAtomicUoIncU32(&pVCpu->tm.s.uTimesGen); Assert(uGen & 1);
318 ASMCompilerBarrier();
319 pVCpu->tm.s.fHalting = false;
320 pVCpu->tm.s.fUpdateStats = false;
321 pVCpu->tm.s.cNsHalted = cNsHaltedNew;
322 pVCpu->tm.s.cPeriodsHalted++;
323 ASMCompilerBarrier();
324 ASMAtomicUoWriteU32(&pVCpu->tm.s.uTimesGen, (uGen | 1) + 1);
325
326# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
327 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsHalted, cNsHaltedDelta);
328 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsTotal, cNsTotalNew - pVCpu->tm.s.cNsTotalStat);
329 int64_t const cNsOtherNewDelta = cNsOtherNew - pVCpu->tm.s.cNsOtherStat;
330 if (cNsOtherNewDelta > 0)
331 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsOther, (uint64_t)cNsOtherNewDelta);
332# endif
333 pVCpu->tm.s.cNsTotalStat = cNsTotalNew;
334 pVCpu->tm.s.cNsOtherStat = cNsOtherNew;
335#endif
336}
337
338
339/**
340 * Raise the timer force action flag and notify the dedicated timer EMT.
341 *
342 * @param pVM The cross context VM structure.
343 */
344DECLINLINE(void) tmScheduleNotify(PVMCC pVM)
345{
346 VMCPUID idCpu = pVM->tm.s.idTimerCpu;
347 AssertReturnVoid(idCpu < pVM->cCpus);
348 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, idCpu);
349
350 if (!VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
351 {
352 Log5(("TMAll(%u): FF: 0 -> 1\n", __LINE__));
353 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
354#ifdef IN_RING3
355 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
356#endif
357 STAM_COUNTER_INC(&pVM->tm.s.StatScheduleSetFF);
358 }
359}
360
361
362/**
363 * Schedule the queue which was changed.
364 */
365DECLINLINE(void) tmSchedule(PVMCC pVM, PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
366{
367 int rc = PDMCritSectTryEnter(pVM, &pQueue->TimerLock);
368 if (RT_SUCCESS_NP(rc))
369 {
370 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
371 Log3(("tmSchedule: tmTimerQueueSchedule\n"));
372 tmTimerQueueSchedule(pVM, pQueueCC, pQueue);
373#ifdef VBOX_STRICT
374 tmTimerQueuesSanityChecks(pVM, "tmSchedule");
375#endif
376 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
377 PDMCritSectLeave(pVM, &pQueue->TimerLock);
378 return;
379 }
380
381 TMTIMERSTATE enmState = pTimer->enmState;
382 if (TMTIMERSTATE_IS_PENDING_SCHEDULING(enmState))
383 tmScheduleNotify(pVM);
384}
385
386
387/**
388 * Try change the state to enmStateNew from enmStateOld
389 * and link the timer into the scheduling queue.
390 *
391 * @returns Success indicator.
392 * @param pTimer Timer in question.
393 * @param enmStateNew The new timer state.
394 * @param enmStateOld The old timer state.
395 */
396DECLINLINE(bool) tmTimerTry(PTMTIMER pTimer, TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
397{
398 /*
399 * Attempt state change.
400 */
401 bool fRc;
402 TM_TRY_SET_STATE(pTimer, enmStateNew, enmStateOld, fRc);
403 return fRc;
404}
405
406
407/**
408 * Links the timer onto the scheduling queue.
409 *
410 * @param pQueueCC The current context queue (same as @a pQueue for
411 * ring-3).
412 * @param pQueue The shared queue data.
413 * @param pTimer The timer.
414 *
415 * @todo FIXME: Look into potential race with the thread running the queues
416 * and stuff.
417 */
418DECLINLINE(void) tmTimerLinkSchedule(PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
419{
420 Assert(pTimer->idxScheduleNext == UINT32_MAX);
421 const uint32_t idxHeadNew = pTimer - &pQueueCC->paTimers[0];
422 AssertReturnVoid(idxHeadNew < pQueueCC->cTimersAlloc);
423
424 uint32_t idxHead;
425 do
426 {
427 idxHead = pQueue->idxSchedule;
428 Assert(idxHead == UINT32_MAX || idxHead < pQueueCC->cTimersAlloc);
429 pTimer->idxScheduleNext = idxHead;
430 } while (!ASMAtomicCmpXchgU32(&pQueue->idxSchedule, idxHeadNew, idxHead));
431}
432
433
434/**
435 * Try change the state to enmStateNew from enmStateOld
436 * and link the timer into the scheduling queue.
437 *
438 * @returns Success indicator.
439 * @param pQueueCC The current context queue (same as @a pQueue for
440 * ring-3).
441 * @param pQueue The shared queue data.
442 * @param pTimer Timer in question.
443 * @param enmStateNew The new timer state.
444 * @param enmStateOld The old timer state.
445 */
446DECLINLINE(bool) tmTimerTryWithLink(PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue, PTMTIMER pTimer,
447 TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
448{
449 if (tmTimerTry(pTimer, enmStateNew, enmStateOld))
450 {
451 tmTimerLinkSchedule(pQueueCC, pQueue, pTimer);
452 return true;
453 }
454 return false;
455}
456
457
458/**
459 * Links a timer into the active list of a timer queue.
460 *
461 * @param pVM The cross context VM structure.
462 * @param pQueueCC The current context queue (same as @a pQueue for
463 * ring-3).
464 * @param pQueue The shared queue data.
465 * @param pTimer The timer.
466 * @param u64Expire The timer expiration time.
467 *
468 * @remarks Called while owning the relevant queue lock.
469 */
470DECL_FORCE_INLINE(void) tmTimerQueueLinkActive(PVMCC pVM, PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue,
471 PTMTIMER pTimer, uint64_t u64Expire)
472{
473 Assert(pTimer->idxNext == UINT32_MAX);
474 Assert(pTimer->idxPrev == UINT32_MAX);
475 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE || pQueue->enmClock != TMCLOCK_VIRTUAL_SYNC); /* (active is not a stable state) */
476 RT_NOREF(pVM);
477
478 PTMTIMER pCur = tmTimerQueueGetHead(pQueueCC, pQueue);
479 if (pCur)
480 {
481 for (;; pCur = tmTimerGetNext(pQueueCC, pCur))
482 {
483 if (pCur->u64Expire > u64Expire)
484 {
485 const PTMTIMER pPrev = tmTimerGetPrev(pQueueCC, pCur);
486 tmTimerSetNext(pQueueCC, pTimer, pCur);
487 tmTimerSetPrev(pQueueCC, pTimer, pPrev);
488 if (pPrev)
489 tmTimerSetNext(pQueueCC, pPrev, pTimer);
490 else
491 {
492 tmTimerQueueSetHead(pQueueCC, pQueue, pTimer);
493 ASMAtomicWriteU64(&pQueue->u64Expire, u64Expire);
494 DBGFTRACE_U64_TAG2(pVM, u64Expire, "tmTimerQueueLinkActive head", pTimer->szName);
495 }
496 tmTimerSetPrev(pQueueCC, pCur, pTimer);
497 return;
498 }
499 if (pCur->idxNext == UINT32_MAX)
500 {
501 tmTimerSetNext(pQueueCC, pCur, pTimer);
502 tmTimerSetPrev(pQueueCC, pTimer, pCur);
503 DBGFTRACE_U64_TAG2(pVM, u64Expire, "tmTimerQueueLinkActive tail", pTimer->szName);
504 return;
505 }
506 }
507 }
508 else
509 {
510 tmTimerQueueSetHead(pQueueCC, pQueue, pTimer);
511 ASMAtomicWriteU64(&pQueue->u64Expire, u64Expire);
512 DBGFTRACE_U64_TAG2(pVM, u64Expire, "tmTimerQueueLinkActive empty", pTimer->szName);
513 }
514}
515
516
517
518/**
519 * Schedules the given timer on the given queue.
520 *
521 * @param pVM The cross context VM structure.
522 * @param pQueueCC The current context queue (same as @a pQueue for
523 * ring-3).
524 * @param pQueue The shared queue data.
525 * @param pTimer The timer that needs scheduling.
526 *
527 * @remarks Called while owning the lock.
528 */
529DECLINLINE(void) tmTimerQueueScheduleOne(PVMCC pVM, PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
530{
531 Assert(pQueue->enmClock != TMCLOCK_VIRTUAL_SYNC);
532 RT_NOREF(pVM);
533
534 /*
535 * Processing.
536 */
537 unsigned cRetries = 2;
538 do
539 {
540 TMTIMERSTATE enmState = pTimer->enmState;
541 switch (enmState)
542 {
543 /*
544 * Reschedule timer (in the active list).
545 */
546 case TMTIMERSTATE_PENDING_RESCHEDULE:
547 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE, TMTIMERSTATE_PENDING_RESCHEDULE)))
548 break; /* retry */
549 tmTimerQueueUnlinkActive(pVM, pQueueCC, pQueue, pTimer);
550 RT_FALL_THRU();
551
552 /*
553 * Schedule timer (insert into the active list).
554 */
555 case TMTIMERSTATE_PENDING_SCHEDULE:
556 Assert(pTimer->idxNext == UINT32_MAX); Assert(pTimer->idxPrev == UINT32_MAX);
557 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, TMTIMERSTATE_PENDING_SCHEDULE)))
558 break; /* retry */
559 tmTimerQueueLinkActive(pVM, pQueueCC, pQueue, pTimer, pTimer->u64Expire);
560 return;
561
562 /*
563 * Stop the timer in active list.
564 */
565 case TMTIMERSTATE_PENDING_STOP:
566 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, TMTIMERSTATE_PENDING_STOP)))
567 break; /* retry */
568 tmTimerQueueUnlinkActive(pVM, pQueueCC, pQueue, pTimer);
569 RT_FALL_THRU();
570
571 /*
572 * Stop the timer (not on the active list).
573 */
574 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
575 Assert(pTimer->idxNext == UINT32_MAX); Assert(pTimer->idxPrev == UINT32_MAX);
576 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_STOPPED, TMTIMERSTATE_PENDING_STOP_SCHEDULE)))
577 break;
578 return;
579
580 /*
581 * The timer is pending destruction by TMR3TimerDestroy, our caller.
582 * Nothing to do here.
583 */
584 case TMTIMERSTATE_DESTROY:
585 break;
586
587 /*
588 * Postpone these until they get into the right state.
589 */
590 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
591 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
592 tmTimerLinkSchedule(pQueueCC, pQueue, pTimer);
593 STAM_COUNTER_INC(&pVM->tm.s.CTX_SUFF_Z(StatPostponed));
594 return;
595
596 /*
597 * None of these can be in the schedule.
598 */
599 case TMTIMERSTATE_FREE:
600 case TMTIMERSTATE_STOPPED:
601 case TMTIMERSTATE_ACTIVE:
602 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
603 case TMTIMERSTATE_EXPIRED_DELIVER:
604 default:
605 AssertMsgFailed(("Timer (%p) in the scheduling list has an invalid state %s (%d)!",
606 pTimer, tmTimerState(pTimer->enmState), pTimer->enmState));
607 return;
608 }
609 } while (cRetries-- > 0);
610}
611
612
613/**
614 * Schedules the specified timer queue.
615 *
616 * @param pVM The cross context VM structure.
617 * @param pQueueCC The current context queue (same as @a pQueue for
618 * ring-3) data of the queue to schedule.
619 * @param pQueue The shared queue data of the queue to schedule.
620 *
621 * @remarks Called while owning the lock.
622 */
623void tmTimerQueueSchedule(PVMCC pVM, PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue)
624{
625 Assert(PDMCritSectIsOwner(pVM, &pQueue->TimerLock));
626
627 /*
628 * Dequeue the scheduling list and iterate it.
629 */
630 uint32_t idxNext = ASMAtomicXchgU32(&pQueue->idxSchedule, UINT32_MAX);
631 Log2(("tmTimerQueueSchedule: pQueue=%p:{.enmClock=%d, idxNext=%RI32, .u64Expired=%'RU64}\n", pQueue, pQueue->enmClock, idxNext, pQueue->u64Expire));
632 while (idxNext != UINT32_MAX)
633 {
634 AssertBreak(idxNext < pQueueCC->cTimersAlloc);
635
636 /*
637 * Unlink the head timer and take down the index of the next one.
638 */
639 PTMTIMER pTimer = &pQueueCC->paTimers[idxNext];
640 idxNext = pTimer->idxScheduleNext;
641 pTimer->idxScheduleNext = UINT32_MAX;
642
643 /*
644 * Do the scheduling.
645 */
646 Log2(("tmTimerQueueSchedule: %p:{.enmState=%s, .enmClock=%d, .enmType=%d, .szName=%s}\n",
647 pTimer, tmTimerState(pTimer->enmState), pQueue->enmClock, pTimer->enmType, pTimer->szName));
648 tmTimerQueueScheduleOne(pVM, pQueueCC, pQueue, pTimer);
649 Log2(("tmTimerQueueSchedule: %p: new %s\n", pTimer, tmTimerState(pTimer->enmState)));
650 }
651 Log2(("tmTimerQueueSchedule: u64Expired=%'RU64\n", pQueue->u64Expire));
652}
653
654
655#ifdef VBOX_STRICT
656/**
657 * Checks that the timer queues are sane.
658 *
659 * @param pVM The cross context VM structure.
660 * @param pszWhere Caller location clue.
661 */
662void tmTimerQueuesSanityChecks(PVMCC pVM, const char *pszWhere)
663{
664 for (uint32_t idxQueue = 0; idxQueue < RT_ELEMENTS(pVM->tm.s.aTimerQueues); idxQueue++)
665 {
666 PTMTIMERQUEUE const pQueue = &pVM->tm.s.aTimerQueues[idxQueue];
667 PTMTIMERQUEUECC const pQueueCC = TM_GET_TIMER_QUEUE_CC(pVM, idxQueue, pQueue);
668 Assert(pQueue->enmClock == (TMCLOCK)idxQueue);
669
670 int rc = PDMCritSectTryEnter(pVM, &pQueue->TimerLock);
671 if (RT_SUCCESS(rc))
672 {
673 if ( pQueue->enmClock != TMCLOCK_VIRTUAL_SYNC
674 || PDMCritSectTryEnter(pVM, &pVM->tm.s.VirtualSyncLock) == VINF_SUCCESS)
675 {
676 /* Check the linking of the active lists. */
677 PTMTIMER pPrev = NULL;
678 for (PTMTIMER pCur = tmTimerQueueGetHead(pQueueCC, pQueue);
679 pCur;
680 pPrev = pCur, pCur = tmTimerGetNext(pQueueCC, pCur))
681 {
682 AssertMsg(tmTimerGetPrev(pQueueCC, pCur) == pPrev, ("%s: %p != %p\n", pszWhere, tmTimerGetPrev(pQueueCC, pCur), pPrev));
683 TMTIMERSTATE enmState = pCur->enmState;
684 switch (enmState)
685 {
686 case TMTIMERSTATE_ACTIVE:
687 AssertMsg( pCur->idxScheduleNext == UINT32_MAX
688 || pCur->enmState != TMTIMERSTATE_ACTIVE,
689 ("%s: %RI32\n", pszWhere, pCur->idxScheduleNext));
690 break;
691 case TMTIMERSTATE_PENDING_STOP:
692 case TMTIMERSTATE_PENDING_RESCHEDULE:
693 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
694 break;
695 default:
696 AssertMsgFailed(("%s: Invalid state enmState=%d %s\n", pszWhere, enmState, tmTimerState(enmState)));
697 break;
698 }
699 }
700
701# ifdef IN_RING3
702 /* Go thru all the timers and check that the active ones all are in the active lists. */
703 uint32_t idxTimer = pQueue->cTimersAlloc;
704 uint32_t cFree = 0;
705 while (idxTimer-- > 0)
706 {
707 PTMTIMER const pTimer = &pQueue->paTimers[idxTimer];
708 TMTIMERSTATE const enmState = pTimer->enmState;
709 switch (enmState)
710 {
711 case TMTIMERSTATE_FREE:
712 cFree++;
713 break;
714
715 case TMTIMERSTATE_ACTIVE:
716 case TMTIMERSTATE_PENDING_STOP:
717 case TMTIMERSTATE_PENDING_RESCHEDULE:
718 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
719 {
720 PTMTIMERR3 pCurAct = tmTimerQueueGetHead(pQueueCC, pQueue);
721 Assert(pTimer->idxPrev != UINT32_MAX || pTimer == pCurAct);
722 while (pCurAct && pCurAct != pTimer)
723 pCurAct = tmTimerGetNext(pQueueCC, pCurAct);
724 Assert(pCurAct == pTimer);
725 break;
726 }
727
728 case TMTIMERSTATE_PENDING_SCHEDULE:
729 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
730 case TMTIMERSTATE_STOPPED:
731 case TMTIMERSTATE_EXPIRED_DELIVER:
732 {
733 Assert(pTimer->idxNext == UINT32_MAX);
734 Assert(pTimer->idxPrev == UINT32_MAX);
735 for (PTMTIMERR3 pCurAct = tmTimerQueueGetHead(pQueueCC, pQueue);
736 pCurAct;
737 pCurAct = tmTimerGetNext(pQueueCC, pCurAct))
738 {
739 Assert(pCurAct != pTimer);
740 Assert(tmTimerGetNext(pQueueCC, pCurAct) != pTimer);
741 Assert(tmTimerGetPrev(pQueueCC, pCurAct) != pTimer);
742 }
743 break;
744 }
745
746 /* ignore */
747 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
748 break;
749
750 case TMTIMERSTATE_INVALID:
751 Assert(idxTimer == 0);
752 break;
753
754 /* shouldn't get here! */
755 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
756 case TMTIMERSTATE_DESTROY:
757 default:
758 AssertMsgFailed(("Invalid state enmState=%d %s\n", enmState, tmTimerState(enmState)));
759 break;
760 }
761
762 /* Check the handle value. */
763 if (enmState > TMTIMERSTATE_INVALID && enmState < TMTIMERSTATE_DESTROY)
764 {
765 Assert((pTimer->hSelf & TMTIMERHANDLE_TIMER_IDX_MASK) == idxTimer);
766 Assert(((pTimer->hSelf >> TMTIMERHANDLE_QUEUE_IDX_SHIFT) & TMTIMERHANDLE_QUEUE_IDX_SMASK) == idxQueue);
767 }
768 }
769 Assert(cFree == pQueue->cTimersFree);
770# endif /* IN_RING3 */
771
772 if (pQueue->enmClock == TMCLOCK_VIRTUAL_SYNC)
773 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
774 }
775 PDMCritSectLeave(pVM, &pQueue->TimerLock);
776 }
777 }
778}
779#endif /* !VBOX_STRICT */
780
781#ifdef VBOX_HIGH_RES_TIMERS_HACK
782
783/**
784 * Worker for tmTimerPollInternal that handles misses when the dedicated timer
785 * EMT is polling.
786 *
787 * @returns See tmTimerPollInternal.
788 * @param pVM The cross context VM structure.
789 * @param u64Now Current virtual clock timestamp.
790 * @param u64Delta The delta to the next even in ticks of the
791 * virtual clock.
792 * @param pu64Delta Where to return the delta.
793 */
794DECLINLINE(uint64_t) tmTimerPollReturnMiss(PVM pVM, uint64_t u64Now, uint64_t u64Delta, uint64_t *pu64Delta)
795{
796 Assert(!(u64Delta & RT_BIT_64(63)));
797
798 if (!pVM->tm.s.fVirtualWarpDrive)
799 {
800 *pu64Delta = u64Delta;
801 return u64Delta + u64Now + pVM->tm.s.u64VirtualOffset;
802 }
803
804 /*
805 * Warp drive adjustments - this is the reverse of what tmVirtualGetRaw is doing.
806 */
807 uint64_t const u64Start = pVM->tm.s.u64VirtualWarpDriveStart;
808 uint32_t const u32Pct = pVM->tm.s.u32VirtualWarpDrivePercentage;
809
810 uint64_t u64GipTime = u64Delta + u64Now + pVM->tm.s.u64VirtualOffset;
811 u64GipTime -= u64Start; /* the start is GIP time. */
812 if (u64GipTime >= u64Delta)
813 {
814 ASMMultU64ByU32DivByU32(u64GipTime, 100, u32Pct);
815 ASMMultU64ByU32DivByU32(u64Delta, 100, u32Pct);
816 }
817 else
818 {
819 u64Delta -= u64GipTime;
820 ASMMultU64ByU32DivByU32(u64GipTime, 100, u32Pct);
821 u64Delta += u64GipTime;
822 }
823 *pu64Delta = u64Delta;
824 u64GipTime += u64Start;
825 return u64GipTime;
826}
827
828
829/**
830 * Worker for tmTimerPollInternal dealing with returns on virtual CPUs other
831 * than the one dedicated to timer work.
832 *
833 * @returns See tmTimerPollInternal.
834 * @param pVM The cross context VM structure.
835 * @param u64Now Current virtual clock timestamp.
836 * @param pu64Delta Where to return the delta.
837 */
838DECL_FORCE_INLINE(uint64_t) tmTimerPollReturnOtherCpu(PVM pVM, uint64_t u64Now, uint64_t *pu64Delta)
839{
840 static const uint64_t s_u64OtherRet = 500000000; /* 500 ms for non-timer EMTs. */
841 *pu64Delta = s_u64OtherRet;
842 return u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet;
843}
844
845
846/**
847 * Worker for tmTimerPollInternal.
848 *
849 * @returns See tmTimerPollInternal.
850 * @param pVM The cross context VM structure.
851 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
852 * @param pVCpuDst The cross context virtual CPU structure of the dedicated
853 * timer EMT.
854 * @param u64Now Current virtual clock timestamp.
855 * @param pu64Delta Where to return the delta.
856 * @param pCounter The statistics counter to update.
857 */
858DECL_FORCE_INLINE(uint64_t) tmTimerPollReturnHit(PVM pVM, PVMCPU pVCpu, PVMCPU pVCpuDst, uint64_t u64Now,
859 uint64_t *pu64Delta, PSTAMCOUNTER pCounter)
860{
861 STAM_COUNTER_INC(pCounter); NOREF(pCounter);
862 if (pVCpuDst != pVCpu)
863 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
864 *pu64Delta = 0;
865 return 0;
866}
867
868
869/**
870 * Common worker for TMTimerPollGIP and TMTimerPoll.
871 *
872 * This function is called before FFs are checked in the inner execution EM loops.
873 *
874 * @returns The GIP timestamp of the next event.
875 * 0 if the next event has already expired.
876 *
877 * @param pVM The cross context VM structure.
878 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
879 * @param pu64Delta Where to store the delta.
880 *
881 * @thread The emulation thread.
882 *
883 * @remarks GIP uses ns ticks.
884 */
885DECL_FORCE_INLINE(uint64_t) tmTimerPollInternal(PVMCC pVM, PVMCPUCC pVCpu, uint64_t *pu64Delta)
886{
887 VMCPUID idCpu = pVM->tm.s.idTimerCpu;
888 AssertReturn(idCpu < pVM->cCpus, 0);
889 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, idCpu);
890
891 const uint64_t u64Now = TMVirtualGetNoCheck(pVM);
892 STAM_COUNTER_INC(&pVM->tm.s.StatPoll);
893
894 /*
895 * Return straight away if the timer FF is already set ...
896 */
897 if (VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
898 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet);
899
900 /*
901 * ... or if timers are being run.
902 */
903 if (ASMAtomicReadBool(&pVM->tm.s.fRunningQueues))
904 {
905 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
906 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
907 }
908
909 /*
910 * Check for TMCLOCK_VIRTUAL expiration.
911 */
912 const uint64_t u64Expire1 = ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL].u64Expire);
913 const int64_t i64Delta1 = u64Expire1 - u64Now;
914 if (i64Delta1 <= 0)
915 {
916 if (!VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
917 {
918 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
919 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
920 }
921 LogFlow(("TMTimerPoll: expire1=%'RU64 <= now=%'RU64\n", u64Expire1, u64Now));
922 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtual);
923 }
924
925 /*
926 * Check for TMCLOCK_VIRTUAL_SYNC expiration.
927 * This isn't quite as straight forward if in a catch-up, not only do
928 * we have to adjust the 'now' but when have to adjust the delta as well.
929 */
930
931 /*
932 * Optimistic lockless approach.
933 */
934 uint64_t u64VirtualSyncNow;
935 uint64_t u64Expire2 = ASMAtomicUoReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire);
936 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
937 {
938 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
939 {
940 u64VirtualSyncNow = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
941 if (RT_LIKELY( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
942 && !ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
943 && u64VirtualSyncNow == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
944 && u64Expire2 == ASMAtomicUoReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire)))
945 {
946 u64VirtualSyncNow = u64Now - u64VirtualSyncNow;
947 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
948 if (i64Delta2 > 0)
949 {
950 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
951 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
952
953 if (pVCpu == pVCpuDst)
954 return tmTimerPollReturnMiss(pVM, u64Now, RT_MIN(i64Delta1, i64Delta2), pu64Delta);
955 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
956 }
957
958 if ( !pVM->tm.s.fRunningQueues
959 && !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
960 {
961 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
962 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
963 }
964
965 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
966 LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now));
967 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
968 }
969 }
970 }
971 else
972 {
973 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
974 LogFlow(("TMTimerPoll: stopped\n"));
975 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
976 }
977
978 /*
979 * Complicated lockless approach.
980 */
981 uint64_t off;
982 uint32_t u32Pct = 0;
983 bool fCatchUp;
984 int cOuterTries = 42;
985 for (;; cOuterTries--)
986 {
987 fCatchUp = ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp);
988 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
989 u64Expire2 = ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire);
990 if (fCatchUp)
991 {
992 /* No changes allowed, try get a consistent set of parameters. */
993 uint64_t const u64Prev = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev);
994 uint64_t const offGivenUp = ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp);
995 u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
996 if ( ( u64Prev == ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev)
997 && offGivenUp == ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp)
998 && u32Pct == ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage)
999 && off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
1000 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire)
1001 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
1002 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
1003 || cOuterTries <= 0)
1004 {
1005 uint64_t u64Delta = u64Now - u64Prev;
1006 if (RT_LIKELY(!(u64Delta >> 32)))
1007 {
1008 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, u32Pct, 100);
1009 if (off > u64Sub + offGivenUp)
1010 off -= u64Sub;
1011 else /* we've completely caught up. */
1012 off = offGivenUp;
1013 }
1014 else
1015 /* More than 4 seconds since last time (or negative), ignore it. */
1016 Log(("TMVirtualGetSync: u64Delta=%RX64 (NoLock)\n", u64Delta));
1017
1018 /* Check that we're still running and in catch up. */
1019 if ( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
1020 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
1021 break;
1022 }
1023 }
1024 else if ( off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
1025 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire)
1026 && !ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
1027 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
1028 break; /* Got an consistent offset */
1029
1030 /* Repeat the initial checks before iterating. */
1031 if (VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
1032 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet);
1033 if (ASMAtomicUoReadBool(&pVM->tm.s.fRunningQueues))
1034 {
1035 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
1036 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
1037 }
1038 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
1039 {
1040 LogFlow(("TMTimerPoll: stopped\n"));
1041 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
1042 }
1043 if (cOuterTries <= 0)
1044 break; /* that's enough */
1045 }
1046 if (cOuterTries <= 0)
1047 STAM_COUNTER_INC(&pVM->tm.s.StatPollELoop);
1048 u64VirtualSyncNow = u64Now - off;
1049
1050 /* Calc delta and see if we've got a virtual sync hit. */
1051 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
1052 if (i64Delta2 <= 0)
1053 {
1054 if ( !pVM->tm.s.fRunningQueues
1055 && !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
1056 {
1057 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
1058 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
1059 }
1060 STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtualSync);
1061 LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now));
1062 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
1063 }
1064
1065 /*
1066 * Return the time left to the next event.
1067 */
1068 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
1069 if (pVCpu == pVCpuDst)
1070 {
1071 if (fCatchUp)
1072 i64Delta2 = ASMMultU64ByU32DivByU32(i64Delta2, 100, u32Pct + 100);
1073 return tmTimerPollReturnMiss(pVM, u64Now, RT_MIN(i64Delta1, i64Delta2), pu64Delta);
1074 }
1075 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
1076}
1077
1078
1079/**
1080 * Set FF if we've passed the next virtual event.
1081 *
1082 * This function is called before FFs are checked in the inner execution EM loops.
1083 *
1084 * @returns true if timers are pending, false if not.
1085 *
1086 * @param pVM The cross context VM structure.
1087 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1088 * @thread The emulation thread.
1089 */
1090VMMDECL(bool) TMTimerPollBool(PVMCC pVM, PVMCPUCC pVCpu)
1091{
1092 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1093 uint64_t off = 0;
1094 tmTimerPollInternal(pVM, pVCpu, &off);
1095 return off == 0;
1096}
1097
1098
1099/**
1100 * Set FF if we've passed the next virtual event.
1101 *
1102 * This function is called before FFs are checked in the inner execution EM loops.
1103 *
1104 * @param pVM The cross context VM structure.
1105 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1106 * @thread The emulation thread.
1107 */
1108VMM_INT_DECL(void) TMTimerPollVoid(PVMCC pVM, PVMCPUCC pVCpu)
1109{
1110 uint64_t off;
1111 tmTimerPollInternal(pVM, pVCpu, &off);
1112}
1113
1114
1115/**
1116 * Set FF if we've passed the next virtual event.
1117 *
1118 * This function is called before FFs are checked in the inner execution EM loops.
1119 *
1120 * @returns The GIP timestamp of the next event.
1121 * 0 if the next event has already expired.
1122 * @param pVM The cross context VM structure.
1123 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1124 * @param pu64Delta Where to store the delta.
1125 * @thread The emulation thread.
1126 */
1127VMM_INT_DECL(uint64_t) TMTimerPollGIP(PVMCC pVM, PVMCPUCC pVCpu, uint64_t *pu64Delta)
1128{
1129 return tmTimerPollInternal(pVM, pVCpu, pu64Delta);
1130}
1131
1132#endif /* VBOX_HIGH_RES_TIMERS_HACK */
1133
1134/**
1135 * Locks the timer clock.
1136 *
1137 * @returns VINF_SUCCESS on success, @a rcBusy if busy, and VERR_NOT_SUPPORTED
1138 * if the clock does not have a lock.
1139 * @param pVM The cross context VM structure.
1140 * @param hTimer Timer handle as returned by one of the create functions.
1141 * @param rcBusy What to return in ring-0 and raw-mode context if the
1142 * lock is busy. Pass VINF_SUCCESS to acquired the
1143 * critical section thru a ring-3 call if necessary.
1144 *
1145 * @remarks Currently only supported on timers using the virtual sync clock.
1146 */
1147VMMDECL(int) TMTimerLock(PVMCC pVM, TMTIMERHANDLE hTimer, int rcBusy)
1148{
1149 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
1150 AssertReturn(idxQueue == TMCLOCK_VIRTUAL_SYNC, VERR_NOT_SUPPORTED);
1151 return PDMCritSectEnter(pVM, &pVM->tm.s.VirtualSyncLock, rcBusy);
1152}
1153
1154
1155/**
1156 * Unlocks a timer clock locked by TMTimerLock.
1157 *
1158 * @param pVM The cross context VM structure.
1159 * @param hTimer Timer handle as returned by one of the create functions.
1160 */
1161VMMDECL(void) TMTimerUnlock(PVMCC pVM, TMTIMERHANDLE hTimer)
1162{
1163 TMTIMER_HANDLE_TO_VARS_RETURN_VOID(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
1164 AssertReturnVoid(idxQueue == TMCLOCK_VIRTUAL_SYNC);
1165 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
1166}
1167
1168
1169/**
1170 * Checks if the current thread owns the timer clock lock.
1171 *
1172 * @returns @c true if its the owner, @c false if not.
1173 * @param pVM The cross context VM structure.
1174 * @param hTimer Timer handle as returned by one of the create functions.
1175 */
1176VMMDECL(bool) TMTimerIsLockOwner(PVMCC pVM, TMTIMERHANDLE hTimer)
1177{
1178 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, false); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
1179 AssertReturn(idxQueue == TMCLOCK_VIRTUAL_SYNC, false);
1180 return PDMCritSectIsOwner(pVM, &pVM->tm.s.VirtualSyncLock);
1181}
1182
1183
1184/**
1185 * Optimized TMTimerSet code path for starting an inactive timer.
1186 *
1187 * @returns VBox status code.
1188 *
1189 * @param pVM The cross context VM structure.
1190 * @param pTimer The timer handle.
1191 * @param u64Expire The new expire time.
1192 * @param pQueue Pointer to the shared timer queue data.
1193 * @param idxQueue The queue index.
1194 */
1195static int tmTimerSetOptimizedStart(PVMCC pVM, PTMTIMER pTimer, uint64_t u64Expire, PTMTIMERQUEUE pQueue, uint32_t idxQueue)
1196{
1197 Assert(pTimer->idxPrev == UINT32_MAX);
1198 Assert(pTimer->idxNext == UINT32_MAX);
1199 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE);
1200
1201 /*
1202 * Calculate and set the expiration time.
1203 */
1204 if (idxQueue == TMCLOCK_VIRTUAL_SYNC)
1205 {
1206 uint64_t u64Last = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSync);
1207 AssertMsgStmt(u64Expire >= u64Last,
1208 ("exp=%#llx last=%#llx\n", u64Expire, u64Last),
1209 u64Expire = u64Last);
1210 }
1211 ASMAtomicWriteU64(&pTimer->u64Expire, u64Expire);
1212 Log2(("tmTimerSetOptimizedStart: %p:{.pszDesc='%s', .u64Expire=%'RU64}\n", pTimer, pTimer->szName, u64Expire));
1213
1214 /*
1215 * Link the timer into the active list.
1216 */
1217 tmTimerQueueLinkActive(pVM, TM_GET_TIMER_QUEUE_CC(pVM, idxQueue, pQueue), pQueue, pTimer, u64Expire);
1218
1219 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetOpt);
1220 return VINF_SUCCESS;
1221}
1222
1223
1224/**
1225 * TMTimerSet for the virtual sync timer queue.
1226 *
1227 * This employs a greatly simplified state machine by always acquiring the
1228 * queue lock and bypassing the scheduling list.
1229 *
1230 * @returns VBox status code
1231 * @param pVM The cross context VM structure.
1232 * @param pTimer The timer handle.
1233 * @param u64Expire The expiration time.
1234 */
1235static int tmTimerVirtualSyncSet(PVMCC pVM, PTMTIMER pTimer, uint64_t u64Expire)
1236{
1237 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetVs), a);
1238 VM_ASSERT_EMT(pVM);
1239 TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer);
1240 int rc = PDMCritSectEnter(pVM, &pVM->tm.s.VirtualSyncLock, VINF_SUCCESS);
1241 AssertRCReturn(rc, rc);
1242
1243 PTMTIMERQUEUE const pQueue = &pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC];
1244 PTMTIMERQUEUECC const pQueueCC = TM_GET_TIMER_QUEUE_CC(pVM, TMCLOCK_VIRTUAL_SYNC, pQueue);
1245 TMTIMERSTATE const enmState = pTimer->enmState;
1246 switch (enmState)
1247 {
1248 case TMTIMERSTATE_EXPIRED_DELIVER:
1249 case TMTIMERSTATE_STOPPED:
1250 if (enmState == TMTIMERSTATE_EXPIRED_DELIVER)
1251 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetVsStExpDeliver);
1252 else
1253 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetVsStStopped);
1254
1255 AssertMsg(u64Expire >= pVM->tm.s.u64VirtualSync,
1256 ("%'RU64 < %'RU64 %s\n", u64Expire, pVM->tm.s.u64VirtualSync, pTimer->szName));
1257 pTimer->u64Expire = u64Expire;
1258 TM_SET_STATE(pTimer, TMTIMERSTATE_ACTIVE);
1259 tmTimerQueueLinkActive(pVM, pQueueCC, pQueue, pTimer, u64Expire);
1260 rc = VINF_SUCCESS;
1261 break;
1262
1263 case TMTIMERSTATE_ACTIVE:
1264 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetVsStActive);
1265 tmTimerQueueUnlinkActive(pVM, pQueueCC, pQueue, pTimer);
1266 pTimer->u64Expire = u64Expire;
1267 tmTimerQueueLinkActive(pVM, pQueueCC, pQueue, pTimer, u64Expire);
1268 rc = VINF_SUCCESS;
1269 break;
1270
1271 case TMTIMERSTATE_PENDING_RESCHEDULE:
1272 case TMTIMERSTATE_PENDING_STOP:
1273 case TMTIMERSTATE_PENDING_SCHEDULE:
1274 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1275 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1276 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1277 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1278 case TMTIMERSTATE_DESTROY:
1279 case TMTIMERSTATE_FREE:
1280 AssertLogRelMsgFailed(("Invalid timer state %s: %s\n", tmTimerState(enmState), pTimer->szName));
1281 rc = VERR_TM_INVALID_STATE;
1282 break;
1283
1284 default:
1285 AssertMsgFailed(("Unknown timer state %d: %s\n", enmState, pTimer->szName));
1286 rc = VERR_TM_UNKNOWN_STATE;
1287 break;
1288 }
1289
1290 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetVs), a);
1291 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
1292 return rc;
1293}
1294
1295
1296/**
1297 * Arm a timer with a (new) expire time.
1298 *
1299 * @returns VBox status code.
1300 * @param pVM The cross context VM structure.
1301 * @param hTimer Timer handle as returned by one of the create functions.
1302 * @param u64Expire New expire time.
1303 */
1304VMMDECL(int) TMTimerSet(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t u64Expire)
1305{
1306 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
1307 STAM_COUNTER_INC(&pTimer->StatSetAbsolute);
1308
1309 /* Treat virtual sync timers specially. */
1310 if (idxQueue == TMCLOCK_VIRTUAL_SYNC)
1311 return tmTimerVirtualSyncSet(pVM, pTimer, u64Expire);
1312
1313 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1314 TMTIMER_ASSERT_CRITSECT(pVM, pTimer);
1315
1316 DBGFTRACE_U64_TAG2(pVM, u64Expire, "TMTimerSet", pTimer->szName);
1317
1318#ifdef VBOX_WITH_STATISTICS
1319 /*
1320 * Gather optimization info.
1321 */
1322 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSet);
1323 TMTIMERSTATE enmOrgState = pTimer->enmState;
1324 switch (enmOrgState)
1325 {
1326 case TMTIMERSTATE_STOPPED: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStStopped); break;
1327 case TMTIMERSTATE_EXPIRED_DELIVER: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStExpDeliver); break;
1328 case TMTIMERSTATE_ACTIVE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStActive); break;
1329 case TMTIMERSTATE_PENDING_STOP: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendStop); break;
1330 case TMTIMERSTATE_PENDING_STOP_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendStopSched); break;
1331 case TMTIMERSTATE_PENDING_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendSched); break;
1332 case TMTIMERSTATE_PENDING_RESCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendResched); break;
1333 default: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStOther); break;
1334 }
1335#endif
1336
1337#if 1
1338 /*
1339 * The most common case is setting the timer again during the callback.
1340 * The second most common case is starting a timer at some other time.
1341 */
1342 TMTIMERSTATE enmState1 = pTimer->enmState;
1343 if ( enmState1 == TMTIMERSTATE_EXPIRED_DELIVER
1344 || ( enmState1 == TMTIMERSTATE_STOPPED
1345 && pTimer->pCritSect))
1346 {
1347 /* Try take the TM lock and check the state again. */
1348 int rc = PDMCritSectTryEnter(pVM, &pQueue->TimerLock);
1349 if (RT_SUCCESS_NP(rc))
1350 {
1351 if (RT_LIKELY(tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, enmState1)))
1352 {
1353 tmTimerSetOptimizedStart(pVM, pTimer, u64Expire, pQueue, idxQueue);
1354 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1355 PDMCritSectLeave(pVM, &pQueue->TimerLock);
1356 return VINF_SUCCESS;
1357 }
1358 PDMCritSectLeave(pVM, &pQueue->TimerLock);
1359 }
1360 }
1361#endif
1362
1363 /*
1364 * Unoptimized code path.
1365 */
1366 int cRetries = 1000;
1367 do
1368 {
1369 /*
1370 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1371 */
1372 TMTIMERSTATE enmState = pTimer->enmState;
1373 Log2(("TMTimerSet: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d u64Expire=%'RU64\n",
1374 pTimer, tmTimerState(enmState), pTimer->szName, cRetries, u64Expire));
1375 switch (enmState)
1376 {
1377 case TMTIMERSTATE_EXPIRED_DELIVER:
1378 case TMTIMERSTATE_STOPPED:
1379 if (tmTimerTryWithLink(pQueueCC, pQueue, pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1380 {
1381 Assert(pTimer->idxPrev == UINT32_MAX);
1382 Assert(pTimer->idxNext == UINT32_MAX);
1383 pTimer->u64Expire = u64Expire;
1384 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1385 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1386 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1387 return VINF_SUCCESS;
1388 }
1389 break;
1390
1391 case TMTIMERSTATE_PENDING_SCHEDULE:
1392 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1393 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1394 {
1395 pTimer->u64Expire = u64Expire;
1396 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1397 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1398 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1399 return VINF_SUCCESS;
1400 }
1401 break;
1402
1403
1404 case TMTIMERSTATE_ACTIVE:
1405 if (tmTimerTryWithLink(pQueueCC, pQueue, pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1406 {
1407 pTimer->u64Expire = u64Expire;
1408 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1409 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1410 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1411 return VINF_SUCCESS;
1412 }
1413 break;
1414
1415 case TMTIMERSTATE_PENDING_RESCHEDULE:
1416 case TMTIMERSTATE_PENDING_STOP:
1417 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1418 {
1419 pTimer->u64Expire = u64Expire;
1420 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1421 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1422 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1423 return VINF_SUCCESS;
1424 }
1425 break;
1426
1427
1428 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1429 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1430 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1431#ifdef IN_RING3
1432 if (!RTThreadYield())
1433 RTThreadSleep(1);
1434#else
1435/** @todo call host context and yield after a couple of iterations */
1436#endif
1437 break;
1438
1439 /*
1440 * Invalid states.
1441 */
1442 case TMTIMERSTATE_DESTROY:
1443 case TMTIMERSTATE_FREE:
1444 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, pTimer->szName));
1445 return VERR_TM_INVALID_STATE;
1446 default:
1447 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, pTimer->szName));
1448 return VERR_TM_UNKNOWN_STATE;
1449 }
1450 } while (cRetries-- > 0);
1451
1452 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, pTimer->szName));
1453 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1454 return VERR_TM_TIMER_UNSTABLE_STATE;
1455}
1456
1457
1458/**
1459 * Return the current time for the specified clock, setting pu64Now if not NULL.
1460 *
1461 * @returns Current time.
1462 * @param pVM The cross context VM structure.
1463 * @param enmClock The clock to query.
1464 * @param pu64Now Optional pointer where to store the return time
1465 */
1466DECL_FORCE_INLINE(uint64_t) tmTimerSetRelativeNowWorker(PVMCC pVM, TMCLOCK enmClock, uint64_t *pu64Now)
1467{
1468 uint64_t u64Now;
1469 switch (enmClock)
1470 {
1471 case TMCLOCK_VIRTUAL_SYNC:
1472 u64Now = TMVirtualSyncGet(pVM);
1473 break;
1474 case TMCLOCK_VIRTUAL:
1475 u64Now = TMVirtualGet(pVM);
1476 break;
1477 case TMCLOCK_REAL:
1478 u64Now = TMRealGet(pVM);
1479 break;
1480 default:
1481 AssertFatalMsgFailed(("%d\n", enmClock));
1482 }
1483
1484 if (pu64Now)
1485 *pu64Now = u64Now;
1486 return u64Now;
1487}
1488
1489
1490/**
1491 * Optimized TMTimerSetRelative code path.
1492 *
1493 * @returns VBox status code.
1494 *
1495 * @param pVM The cross context VM structure.
1496 * @param pTimer The timer handle.
1497 * @param cTicksToNext Clock ticks until the next time expiration.
1498 * @param pu64Now Where to return the current time stamp used.
1499 * Optional.
1500 * @param pQueueCC The context specific queue data (same as @a pQueue
1501 * for ring-3).
1502 * @param pQueue The shared queue data.
1503 */
1504static int tmTimerSetRelativeOptimizedStart(PVMCC pVM, PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now,
1505 PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue)
1506{
1507 Assert(pTimer->idxPrev == UINT32_MAX);
1508 Assert(pTimer->idxNext == UINT32_MAX);
1509 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE);
1510
1511 /*
1512 * Calculate and set the expiration time.
1513 */
1514 uint64_t const u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, pQueue->enmClock, pu64Now);
1515 pTimer->u64Expire = u64Expire;
1516 Log2(("tmTimerSetRelativeOptimizedStart: %p:{.pszDesc='%s', .u64Expire=%'RU64} cTicksToNext=%'RU64\n", pTimer, pTimer->szName, u64Expire, cTicksToNext));
1517
1518 /*
1519 * Link the timer into the active list.
1520 */
1521 DBGFTRACE_U64_TAG2(pVM, u64Expire, "tmTimerSetRelativeOptimizedStart", pTimer->szName);
1522 tmTimerQueueLinkActive(pVM, pQueueCC, pQueue, pTimer, u64Expire);
1523
1524 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeOpt);
1525 return VINF_SUCCESS;
1526}
1527
1528
1529/**
1530 * TMTimerSetRelative for the virtual sync timer queue.
1531 *
1532 * This employs a greatly simplified state machine by always acquiring the
1533 * queue lock and bypassing the scheduling list.
1534 *
1535 * @returns VBox status code
1536 * @param pVM The cross context VM structure.
1537 * @param pTimer The timer to (re-)arm.
1538 * @param cTicksToNext Clock ticks until the next time expiration.
1539 * @param pu64Now Where to return the current time stamp used.
1540 * Optional.
1541 */
1542static int tmTimerVirtualSyncSetRelative(PVMCC pVM, PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1543{
1544 STAM_PROFILE_START(pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelativeVs), a);
1545 VM_ASSERT_EMT(pVM);
1546 TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer);
1547 int rc = PDMCritSectEnter(pVM, &pVM->tm.s.VirtualSyncLock, VINF_SUCCESS);
1548 AssertRCReturn(rc, rc);
1549
1550 /* Calculate the expiration tick. */
1551 uint64_t u64Expire = TMVirtualSyncGetNoCheck(pVM);
1552 if (pu64Now)
1553 *pu64Now = u64Expire;
1554 u64Expire += cTicksToNext;
1555
1556 /* Update the timer. */
1557 PTMTIMERQUEUE const pQueue = &pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC];
1558 PTMTIMERQUEUECC const pQueueCC = TM_GET_TIMER_QUEUE_CC(pVM, TMCLOCK_VIRTUAL_SYNC, pQueue);
1559 TMTIMERSTATE const enmState = pTimer->enmState;
1560 switch (enmState)
1561 {
1562 case TMTIMERSTATE_EXPIRED_DELIVER:
1563 case TMTIMERSTATE_STOPPED:
1564 if (enmState == TMTIMERSTATE_EXPIRED_DELIVER)
1565 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeVsStExpDeliver);
1566 else
1567 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeVsStStopped);
1568 pTimer->u64Expire = u64Expire;
1569 TM_SET_STATE(pTimer, TMTIMERSTATE_ACTIVE);
1570 tmTimerQueueLinkActive(pVM, pQueueCC, pQueue, pTimer, u64Expire);
1571 rc = VINF_SUCCESS;
1572 break;
1573
1574 case TMTIMERSTATE_ACTIVE:
1575 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeVsStActive);
1576 tmTimerQueueUnlinkActive(pVM, pQueueCC, pQueue, pTimer);
1577 pTimer->u64Expire = u64Expire;
1578 tmTimerQueueLinkActive(pVM, pQueueCC, pQueue, pTimer, u64Expire);
1579 rc = VINF_SUCCESS;
1580 break;
1581
1582 case TMTIMERSTATE_PENDING_RESCHEDULE:
1583 case TMTIMERSTATE_PENDING_STOP:
1584 case TMTIMERSTATE_PENDING_SCHEDULE:
1585 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1586 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1587 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1588 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1589 case TMTIMERSTATE_DESTROY:
1590 case TMTIMERSTATE_FREE:
1591 AssertLogRelMsgFailed(("Invalid timer state %s: %s\n", tmTimerState(enmState), pTimer->szName));
1592 rc = VERR_TM_INVALID_STATE;
1593 break;
1594
1595 default:
1596 AssertMsgFailed(("Unknown timer state %d: %s\n", enmState, pTimer->szName));
1597 rc = VERR_TM_UNKNOWN_STATE;
1598 break;
1599 }
1600
1601 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelativeVs), a);
1602 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
1603 return rc;
1604}
1605
1606
1607/**
1608 * Arm a timer with a expire time relative to the current time.
1609 *
1610 * @returns VBox status code.
1611 * @param pVM The cross context VM structure.
1612 * @param pTimer The timer to arm.
1613 * @param cTicksToNext Clock ticks until the next time expiration.
1614 * @param pu64Now Where to return the current time stamp used.
1615 * Optional.
1616 * @param pQueueCC The context specific queue data (same as @a pQueue
1617 * for ring-3).
1618 * @param pQueue The shared queue data.
1619 */
1620static int tmTimerSetRelative(PVMCC pVM, PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now,
1621 PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue)
1622{
1623 STAM_COUNTER_INC(&pTimer->StatSetRelative);
1624
1625 /* Treat virtual sync timers specially. */
1626 if (pQueue->enmClock == TMCLOCK_VIRTUAL_SYNC)
1627 return tmTimerVirtualSyncSetRelative(pVM, pTimer, cTicksToNext, pu64Now);
1628
1629 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1630 TMTIMER_ASSERT_CRITSECT(pVM, pTimer);
1631
1632 DBGFTRACE_U64_TAG2(pVM, cTicksToNext, "TMTimerSetRelative", pTimer->szName);
1633
1634#ifdef VBOX_WITH_STATISTICS
1635 /*
1636 * Gather optimization info.
1637 */
1638 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelative);
1639 TMTIMERSTATE enmOrgState = pTimer->enmState;
1640 switch (enmOrgState)
1641 {
1642 case TMTIMERSTATE_STOPPED: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStStopped); break;
1643 case TMTIMERSTATE_EXPIRED_DELIVER: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStExpDeliver); break;
1644 case TMTIMERSTATE_ACTIVE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStActive); break;
1645 case TMTIMERSTATE_PENDING_STOP: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendStop); break;
1646 case TMTIMERSTATE_PENDING_STOP_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendStopSched); break;
1647 case TMTIMERSTATE_PENDING_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendSched); break;
1648 case TMTIMERSTATE_PENDING_RESCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendResched); break;
1649 default: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStOther); break;
1650 }
1651#endif
1652
1653 /*
1654 * Try to take the TM lock and optimize the common cases.
1655 *
1656 * With the TM lock we can safely make optimizations like immediate
1657 * scheduling and we can also be 100% sure that we're not racing the
1658 * running of the timer queues. As an additional restraint we require the
1659 * timer to have a critical section associated with to be 100% there aren't
1660 * concurrent operations on the timer. (This latter isn't necessary any
1661 * longer as this isn't supported for any timers, critsect or not.)
1662 *
1663 * Note! Lock ordering doesn't apply when we only _try_ to
1664 * get the innermost locks.
1665 */
1666 bool fOwnTMLock = RT_SUCCESS_NP(PDMCritSectTryEnter(pVM, &pQueue->TimerLock));
1667#if 1
1668 if ( fOwnTMLock
1669 && pTimer->pCritSect)
1670 {
1671 TMTIMERSTATE enmState = pTimer->enmState;
1672 if (RT_LIKELY( ( enmState == TMTIMERSTATE_EXPIRED_DELIVER
1673 || enmState == TMTIMERSTATE_STOPPED)
1674 && tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, enmState)))
1675 {
1676 tmTimerSetRelativeOptimizedStart(pVM, pTimer, cTicksToNext, pu64Now, pQueueCC, pQueue);
1677 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1678 PDMCritSectLeave(pVM, &pQueue->TimerLock);
1679 return VINF_SUCCESS;
1680 }
1681
1682 /* Optimize other states when it becomes necessary. */
1683 }
1684#endif
1685
1686 /*
1687 * Unoptimized path.
1688 */
1689 int rc;
1690 for (int cRetries = 1000; ; cRetries--)
1691 {
1692 /*
1693 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1694 */
1695 TMTIMERSTATE enmState = pTimer->enmState;
1696 switch (enmState)
1697 {
1698 case TMTIMERSTATE_STOPPED:
1699 if (pQueue->enmClock == TMCLOCK_VIRTUAL_SYNC)
1700 {
1701 /** @todo To fix assertion in tmR3TimerQueueRunVirtualSync:
1702 * Figure a safe way of activating this timer while the queue is
1703 * being run.
1704 * (99.9% sure this that the assertion is caused by DevAPIC.cpp
1705 * re-starting the timer in response to a initial_count write.) */
1706 }
1707 RT_FALL_THRU();
1708 case TMTIMERSTATE_EXPIRED_DELIVER:
1709 if (tmTimerTryWithLink(pQueueCC, pQueue, pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1710 {
1711 Assert(pTimer->idxPrev == UINT32_MAX);
1712 Assert(pTimer->idxNext == UINT32_MAX);
1713 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, pQueue->enmClock, pu64Now);
1714 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [EXP/STOP]\n",
1715 pTimer, tmTimerState(enmState), pTimer->szName, pTimer->u64Expire, cRetries));
1716 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1717 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1718 rc = VINF_SUCCESS;
1719 break;
1720 }
1721 rc = VERR_TRY_AGAIN;
1722 break;
1723
1724 case TMTIMERSTATE_PENDING_SCHEDULE:
1725 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1726 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1727 {
1728 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, pQueue->enmClock, pu64Now);
1729 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [PEND_SCHED]\n",
1730 pTimer, tmTimerState(enmState), pTimer->szName, pTimer->u64Expire, cRetries));
1731 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1732 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1733 rc = VINF_SUCCESS;
1734 break;
1735 }
1736 rc = VERR_TRY_AGAIN;
1737 break;
1738
1739
1740 case TMTIMERSTATE_ACTIVE:
1741 if (tmTimerTryWithLink(pQueueCC, pQueue, pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1742 {
1743 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, pQueue->enmClock, pu64Now);
1744 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [ACTIVE]\n",
1745 pTimer, tmTimerState(enmState), pTimer->szName, pTimer->u64Expire, cRetries));
1746 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1747 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1748 rc = VINF_SUCCESS;
1749 break;
1750 }
1751 rc = VERR_TRY_AGAIN;
1752 break;
1753
1754 case TMTIMERSTATE_PENDING_RESCHEDULE:
1755 case TMTIMERSTATE_PENDING_STOP:
1756 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1757 {
1758 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, pQueue->enmClock, pu64Now);
1759 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [PEND_RESCH/STOP]\n",
1760 pTimer, tmTimerState(enmState), pTimer->szName, pTimer->u64Expire, cRetries));
1761 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1762 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1763 rc = VINF_SUCCESS;
1764 break;
1765 }
1766 rc = VERR_TRY_AGAIN;
1767 break;
1768
1769
1770 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1771 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1772 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1773#ifdef IN_RING3
1774 if (!RTThreadYield())
1775 RTThreadSleep(1);
1776#else
1777/** @todo call host context and yield after a couple of iterations */
1778#endif
1779 rc = VERR_TRY_AGAIN;
1780 break;
1781
1782 /*
1783 * Invalid states.
1784 */
1785 case TMTIMERSTATE_DESTROY:
1786 case TMTIMERSTATE_FREE:
1787 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, pTimer->szName));
1788 rc = VERR_TM_INVALID_STATE;
1789 break;
1790
1791 default:
1792 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, pTimer->szName));
1793 rc = VERR_TM_UNKNOWN_STATE;
1794 break;
1795 }
1796
1797 /* switch + loop is tedious to break out of. */
1798 if (rc == VINF_SUCCESS)
1799 break;
1800
1801 if (rc != VERR_TRY_AGAIN)
1802 {
1803 tmTimerSetRelativeNowWorker(pVM, pQueue->enmClock, pu64Now);
1804 break;
1805 }
1806 if (cRetries <= 0)
1807 {
1808 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, pTimer->szName));
1809 rc = VERR_TM_TIMER_UNSTABLE_STATE;
1810 tmTimerSetRelativeNowWorker(pVM, pQueue->enmClock, pu64Now);
1811 break;
1812 }
1813
1814 /*
1815 * Retry to gain locks.
1816 */
1817 if (!fOwnTMLock)
1818 fOwnTMLock = RT_SUCCESS_NP(PDMCritSectTryEnter(pVM, &pQueue->TimerLock));
1819
1820 } /* for (;;) */
1821
1822 /*
1823 * Clean up and return.
1824 */
1825 if (fOwnTMLock)
1826 PDMCritSectLeave(pVM, &pQueue->TimerLock);
1827
1828 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1829 return rc;
1830}
1831
1832
1833/**
1834 * Arm a timer with a expire time relative to the current time.
1835 *
1836 * @returns VBox status code.
1837 * @param pVM The cross context VM structure.
1838 * @param hTimer Timer handle as returned by one of the create functions.
1839 * @param cTicksToNext Clock ticks until the next time expiration.
1840 * @param pu64Now Where to return the current time stamp used.
1841 * Optional.
1842 */
1843VMMDECL(int) TMTimerSetRelative(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1844{
1845 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
1846 return tmTimerSetRelative(pVM, pTimer, cTicksToNext, pu64Now, pQueueCC, pQueue);
1847}
1848
1849
1850/**
1851 * Drops a hint about the frequency of the timer.
1852 *
1853 * This is used by TM and the VMM to calculate how often guest execution needs
1854 * to be interrupted. The hint is automatically cleared by TMTimerStop.
1855 *
1856 * @returns VBox status code.
1857 * @param pVM The cross context VM structure.
1858 * @param hTimer Timer handle as returned by one of the create functions.
1859 * @param uHzHint The frequency hint. Pass 0 to clear the hint.
1860 *
1861 * @remarks We're using an integer hertz value here since anything above 1 HZ
1862 * is not going to be any trouble satisfying scheduling wise. The
1863 * range where it makes sense is >= 100 HZ.
1864 */
1865VMMDECL(int) TMTimerSetFrequencyHint(PVMCC pVM, TMTIMERHANDLE hTimer, uint32_t uHzHint)
1866{
1867 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
1868 TMTIMER_ASSERT_CRITSECT(pVM, pTimer);
1869
1870 uint32_t const uHzOldHint = pTimer->uHzHint;
1871 pTimer->uHzHint = uHzHint;
1872
1873 uint32_t const uMaxHzHint = pQueue->uMaxHzHint;
1874 if ( uHzHint > uMaxHzHint
1875 || uHzOldHint >= uMaxHzHint)
1876 ASMAtomicOrU64(&pVM->tm.s.HzHint.u64Combined, RT_BIT_32(idxQueue) | RT_BIT_32(idxQueue + 16));
1877
1878 return VINF_SUCCESS;
1879}
1880
1881
1882/**
1883 * TMTimerStop for the virtual sync timer queue.
1884 *
1885 * This employs a greatly simplified state machine by always acquiring the
1886 * queue lock and bypassing the scheduling list.
1887 *
1888 * @returns VBox status code
1889 * @param pVM The cross context VM structure.
1890 * @param pTimer The timer handle.
1891 */
1892static int tmTimerVirtualSyncStop(PVMCC pVM, PTMTIMER pTimer)
1893{
1894 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerStopVs), a);
1895 VM_ASSERT_EMT(pVM);
1896 TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer);
1897 int rc = PDMCritSectEnter(pVM, &pVM->tm.s.VirtualSyncLock, VINF_SUCCESS);
1898 AssertRCReturn(rc, rc);
1899
1900 /* Reset the HZ hint. */
1901 uint32_t uOldHzHint = pTimer->uHzHint;
1902 if (uOldHzHint)
1903 {
1904 if (uOldHzHint >= pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].uMaxHzHint)
1905 ASMAtomicOrU64(&pVM->tm.s.HzHint.u64Combined, RT_BIT_32(TMCLOCK_VIRTUAL_SYNC) | RT_BIT_32(TMCLOCK_VIRTUAL_SYNC + 16));
1906 pTimer->uHzHint = 0;
1907 }
1908
1909 /* Update the timer state. */
1910 TMTIMERSTATE const enmState = pTimer->enmState;
1911 switch (enmState)
1912 {
1913 case TMTIMERSTATE_ACTIVE:
1914 {
1915 PTMTIMERQUEUE const pQueue = &pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC];
1916 tmTimerQueueUnlinkActive(pVM, TM_GET_TIMER_QUEUE_CC(pVM, TMCLOCK_VIRTUAL_SYNC, pQueue), pQueue, pTimer);
1917 TM_SET_STATE(pTimer, TMTIMERSTATE_STOPPED);
1918 rc = VINF_SUCCESS;
1919 break;
1920 }
1921
1922 case TMTIMERSTATE_EXPIRED_DELIVER:
1923 TM_SET_STATE(pTimer, TMTIMERSTATE_STOPPED);
1924 rc = VINF_SUCCESS;
1925 break;
1926
1927 case TMTIMERSTATE_STOPPED:
1928 rc = VINF_SUCCESS;
1929 break;
1930
1931 case TMTIMERSTATE_PENDING_RESCHEDULE:
1932 case TMTIMERSTATE_PENDING_STOP:
1933 case TMTIMERSTATE_PENDING_SCHEDULE:
1934 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1935 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1936 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1937 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1938 case TMTIMERSTATE_DESTROY:
1939 case TMTIMERSTATE_FREE:
1940 AssertLogRelMsgFailed(("Invalid timer state %s: %s\n", tmTimerState(enmState), pTimer->szName));
1941 rc = VERR_TM_INVALID_STATE;
1942 break;
1943
1944 default:
1945 AssertMsgFailed(("Unknown timer state %d: %s\n", enmState, pTimer->szName));
1946 rc = VERR_TM_UNKNOWN_STATE;
1947 break;
1948 }
1949
1950 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStopVs), a);
1951 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
1952 return rc;
1953}
1954
1955
1956/**
1957 * Stop the timer.
1958 * Use TMR3TimerArm() to "un-stop" the timer.
1959 *
1960 * @returns VBox status code.
1961 * @param pVM The cross context VM structure.
1962 * @param hTimer Timer handle as returned by one of the create functions.
1963 */
1964VMMDECL(int) TMTimerStop(PVMCC pVM, TMTIMERHANDLE hTimer)
1965{
1966 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
1967 STAM_COUNTER_INC(&pTimer->StatStop);
1968
1969 /* Treat virtual sync timers specially. */
1970 if (idxQueue == TMCLOCK_VIRTUAL_SYNC)
1971 return tmTimerVirtualSyncStop(pVM, pTimer);
1972
1973 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1974 TMTIMER_ASSERT_CRITSECT(pVM, pTimer);
1975
1976 /*
1977 * Reset the HZ hint.
1978 */
1979 uint32_t const uOldHzHint = pTimer->uHzHint;
1980 if (uOldHzHint)
1981 {
1982 if (uOldHzHint >= pQueue->uMaxHzHint)
1983 ASMAtomicOrU64(&pVM->tm.s.HzHint.u64Combined, RT_BIT_32(idxQueue) | RT_BIT_32(idxQueue + 16));
1984 pTimer->uHzHint = 0;
1985 }
1986
1987 /** @todo see if this function needs optimizing. */
1988 int cRetries = 1000;
1989 do
1990 {
1991 /*
1992 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1993 */
1994 TMTIMERSTATE enmState = pTimer->enmState;
1995 Log2(("TMTimerStop: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d\n",
1996 pTimer, tmTimerState(enmState), pTimer->szName, cRetries));
1997 switch (enmState)
1998 {
1999 case TMTIMERSTATE_EXPIRED_DELIVER:
2000 //AssertMsgFailed(("You don't stop an expired timer dude!\n"));
2001 return VERR_INVALID_PARAMETER;
2002
2003 case TMTIMERSTATE_STOPPED:
2004 case TMTIMERSTATE_PENDING_STOP:
2005 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2006 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
2007 return VINF_SUCCESS;
2008
2009 case TMTIMERSTATE_PENDING_SCHEDULE:
2010 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, enmState))
2011 {
2012 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
2013 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
2014 return VINF_SUCCESS;
2015 }
2016 break;
2017
2018 case TMTIMERSTATE_PENDING_RESCHEDULE:
2019 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
2020 {
2021 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
2022 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
2023 return VINF_SUCCESS;
2024 }
2025 break;
2026
2027 case TMTIMERSTATE_ACTIVE:
2028 if (tmTimerTryWithLink(pQueueCC, pQueue, pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
2029 {
2030 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
2031 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
2032 return VINF_SUCCESS;
2033 }
2034 break;
2035
2036 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2037 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2038 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2039#ifdef IN_RING3
2040 if (!RTThreadYield())
2041 RTThreadSleep(1);
2042#else
2043/** @todo call host and yield cpu after a while. */
2044#endif
2045 break;
2046
2047 /*
2048 * Invalid states.
2049 */
2050 case TMTIMERSTATE_DESTROY:
2051 case TMTIMERSTATE_FREE:
2052 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, pTimer->szName));
2053 return VERR_TM_INVALID_STATE;
2054 default:
2055 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, pTimer->szName));
2056 return VERR_TM_UNKNOWN_STATE;
2057 }
2058 } while (cRetries-- > 0);
2059
2060 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, pTimer->szName));
2061 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
2062 return VERR_TM_TIMER_UNSTABLE_STATE;
2063}
2064
2065
2066/**
2067 * Get the current clock time.
2068 * Handy for calculating the new expire time.
2069 *
2070 * @returns Current clock time.
2071 * @param pVM The cross context VM structure.
2072 * @param hTimer Timer handle as returned by one of the create functions.
2073 */
2074VMMDECL(uint64_t) TMTimerGet(PVMCC pVM, TMTIMERHANDLE hTimer)
2075{
2076 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2077 STAM_COUNTER_INC(&pTimer->StatGet);
2078
2079 uint64_t u64;
2080 switch (pQueue->enmClock)
2081 {
2082 case TMCLOCK_VIRTUAL:
2083 u64 = TMVirtualGet(pVM);
2084 break;
2085 case TMCLOCK_VIRTUAL_SYNC:
2086 u64 = TMVirtualSyncGet(pVM);
2087 break;
2088 case TMCLOCK_REAL:
2089 u64 = TMRealGet(pVM);
2090 break;
2091 default:
2092 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2093 return UINT64_MAX;
2094 }
2095 //Log2(("TMTimerGet: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2096 // u64, pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2097 return u64;
2098}
2099
2100
2101/**
2102 * Get the frequency of the timer clock.
2103 *
2104 * @returns Clock frequency (as Hz of course).
2105 * @param pVM The cross context VM structure.
2106 * @param hTimer Timer handle as returned by one of the create functions.
2107 */
2108VMMDECL(uint64_t) TMTimerGetFreq(PVMCC pVM, TMTIMERHANDLE hTimer)
2109{
2110 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2111 switch (pQueue->enmClock)
2112 {
2113 case TMCLOCK_VIRTUAL:
2114 case TMCLOCK_VIRTUAL_SYNC:
2115 return TMCLOCK_FREQ_VIRTUAL;
2116
2117 case TMCLOCK_REAL:
2118 return TMCLOCK_FREQ_REAL;
2119
2120 default:
2121 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2122 return 0;
2123 }
2124}
2125
2126
2127/**
2128 * Get the expire time of the timer.
2129 * Only valid for active timers.
2130 *
2131 * @returns Expire time of the timer.
2132 * @param pVM The cross context VM structure.
2133 * @param hTimer Timer handle as returned by one of the create functions.
2134 */
2135VMMDECL(uint64_t) TMTimerGetExpire(PVMCC pVM, TMTIMERHANDLE hTimer)
2136{
2137 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, UINT64_MAX); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2138 TMTIMER_ASSERT_CRITSECT(pVM, pTimer);
2139 int cRetries = 1000;
2140 do
2141 {
2142 TMTIMERSTATE enmState = pTimer->enmState;
2143 switch (enmState)
2144 {
2145 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2146 case TMTIMERSTATE_EXPIRED_DELIVER:
2147 case TMTIMERSTATE_STOPPED:
2148 case TMTIMERSTATE_PENDING_STOP:
2149 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2150 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2151 pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2152 return UINT64_MAX;
2153
2154 case TMTIMERSTATE_ACTIVE:
2155 case TMTIMERSTATE_PENDING_RESCHEDULE:
2156 case TMTIMERSTATE_PENDING_SCHEDULE:
2157 Log2(("TMTimerGetExpire: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2158 pTimer->u64Expire, pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2159 return pTimer->u64Expire;
2160
2161 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2162 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2163#ifdef IN_RING3
2164 if (!RTThreadYield())
2165 RTThreadSleep(1);
2166#endif
2167 break;
2168
2169 /*
2170 * Invalid states.
2171 */
2172 case TMTIMERSTATE_DESTROY:
2173 case TMTIMERSTATE_FREE:
2174 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, pTimer->szName));
2175 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2176 pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2177 return UINT64_MAX;
2178 default:
2179 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, pTimer->szName));
2180 return UINT64_MAX;
2181 }
2182 } while (cRetries-- > 0);
2183
2184 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, pTimer->szName));
2185 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2186 pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2187 return UINT64_MAX;
2188}
2189
2190
2191/**
2192 * Checks if a timer is active or not.
2193 *
2194 * @returns True if active.
2195 * @returns False if not active.
2196 * @param pVM The cross context VM structure.
2197 * @param hTimer Timer handle as returned by one of the create functions.
2198 */
2199VMMDECL(bool) TMTimerIsActive(PVMCC pVM, TMTIMERHANDLE hTimer)
2200{
2201 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, false); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2202 TMTIMERSTATE enmState = pTimer->enmState;
2203 switch (enmState)
2204 {
2205 case TMTIMERSTATE_STOPPED:
2206 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2207 case TMTIMERSTATE_EXPIRED_DELIVER:
2208 case TMTIMERSTATE_PENDING_STOP:
2209 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2210 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2211 pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2212 return false;
2213
2214 case TMTIMERSTATE_ACTIVE:
2215 case TMTIMERSTATE_PENDING_RESCHEDULE:
2216 case TMTIMERSTATE_PENDING_SCHEDULE:
2217 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2218 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2219 Log2(("TMTimerIsActive: returns true (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2220 pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2221 return true;
2222
2223 /*
2224 * Invalid states.
2225 */
2226 case TMTIMERSTATE_DESTROY:
2227 case TMTIMERSTATE_FREE:
2228 AssertMsgFailed(("Invalid timer state %s (%s)\n", tmTimerState(enmState), pTimer->szName));
2229 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2230 pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2231 return false;
2232 default:
2233 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, pTimer->szName));
2234 return false;
2235 }
2236}
2237
2238
2239/* -=-=-=-=-=-=- Convenience APIs -=-=-=-=-=-=- */
2240
2241
2242/**
2243 * Arm a timer with a (new) expire time relative to current time.
2244 *
2245 * @returns VBox status code.
2246 * @param pVM The cross context VM structure.
2247 * @param hTimer Timer handle as returned by one of the create functions.
2248 * @param cMilliesToNext Number of milliseconds to the next tick.
2249 */
2250VMMDECL(int) TMTimerSetMillies(PVMCC pVM, TMTIMERHANDLE hTimer, uint32_t cMilliesToNext)
2251{
2252 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2253 switch (pQueue->enmClock)
2254 {
2255 case TMCLOCK_VIRTUAL:
2256 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2257 return tmTimerSetRelative(pVM, pTimer, cMilliesToNext * UINT64_C(1000000), NULL, pQueueCC, pQueue);
2258
2259 case TMCLOCK_VIRTUAL_SYNC:
2260 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2261 return tmTimerSetRelative(pVM, pTimer, cMilliesToNext * UINT64_C(1000000), NULL, pQueueCC, pQueue);
2262
2263 case TMCLOCK_REAL:
2264 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2265 return tmTimerSetRelative(pVM, pTimer, cMilliesToNext, NULL, pQueueCC, pQueue);
2266
2267 default:
2268 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2269 return VERR_TM_TIMER_BAD_CLOCK;
2270 }
2271}
2272
2273
2274/**
2275 * Arm a timer with a (new) expire time relative to current time.
2276 *
2277 * @returns VBox status code.
2278 * @param pVM The cross context VM structure.
2279 * @param hTimer Timer handle as returned by one of the create functions.
2280 * @param cMicrosToNext Number of microseconds to the next tick.
2281 */
2282VMMDECL(int) TMTimerSetMicro(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cMicrosToNext)
2283{
2284 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2285 switch (pQueue->enmClock)
2286 {
2287 case TMCLOCK_VIRTUAL:
2288 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2289 return tmTimerSetRelative(pVM, pTimer, cMicrosToNext * 1000, NULL, pQueueCC, pQueue);
2290
2291 case TMCLOCK_VIRTUAL_SYNC:
2292 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2293 return tmTimerSetRelative(pVM, pTimer, cMicrosToNext * 1000, NULL, pQueueCC, pQueue);
2294
2295 case TMCLOCK_REAL:
2296 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2297 return tmTimerSetRelative(pVM, pTimer, cMicrosToNext / 1000, NULL, pQueueCC, pQueue);
2298
2299 default:
2300 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2301 return VERR_TM_TIMER_BAD_CLOCK;
2302 }
2303}
2304
2305
2306/**
2307 * Arm a timer with a (new) expire time relative to current time.
2308 *
2309 * @returns VBox status code.
2310 * @param pVM The cross context VM structure.
2311 * @param hTimer Timer handle as returned by one of the create functions.
2312 * @param cNanosToNext Number of nanoseconds to the next tick.
2313 */
2314VMMDECL(int) TMTimerSetNano(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cNanosToNext)
2315{
2316 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2317 switch (pQueue->enmClock)
2318 {
2319 case TMCLOCK_VIRTUAL:
2320 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2321 return tmTimerSetRelative(pVM, pTimer, cNanosToNext, NULL, pQueueCC, pQueue);
2322
2323 case TMCLOCK_VIRTUAL_SYNC:
2324 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2325 return tmTimerSetRelative(pVM, pTimer, cNanosToNext, NULL, pQueueCC, pQueue);
2326
2327 case TMCLOCK_REAL:
2328 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2329 return tmTimerSetRelative(pVM, pTimer, cNanosToNext / 1000000, NULL, pQueueCC, pQueue);
2330
2331 default:
2332 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2333 return VERR_TM_TIMER_BAD_CLOCK;
2334 }
2335}
2336
2337
2338/**
2339 * Get the current clock time as nanoseconds.
2340 *
2341 * @returns The timer clock as nanoseconds.
2342 * @param pVM The cross context VM structure.
2343 * @param hTimer Timer handle as returned by one of the create functions.
2344 */
2345VMMDECL(uint64_t) TMTimerGetNano(PVMCC pVM, TMTIMERHANDLE hTimer)
2346{
2347 return TMTimerToNano(pVM, hTimer, TMTimerGet(pVM, hTimer));
2348}
2349
2350
2351/**
2352 * Get the current clock time as microseconds.
2353 *
2354 * @returns The timer clock as microseconds.
2355 * @param pVM The cross context VM structure.
2356 * @param hTimer Timer handle as returned by one of the create functions.
2357 */
2358VMMDECL(uint64_t) TMTimerGetMicro(PVMCC pVM, TMTIMERHANDLE hTimer)
2359{
2360 return TMTimerToMicro(pVM, hTimer, TMTimerGet(pVM, hTimer));
2361}
2362
2363
2364/**
2365 * Get the current clock time as milliseconds.
2366 *
2367 * @returns The timer clock as milliseconds.
2368 * @param pVM The cross context VM structure.
2369 * @param hTimer Timer handle as returned by one of the create functions.
2370 */
2371VMMDECL(uint64_t) TMTimerGetMilli(PVMCC pVM, TMTIMERHANDLE hTimer)
2372{
2373 return TMTimerToMilli(pVM, hTimer, TMTimerGet(pVM, hTimer));
2374}
2375
2376
2377/**
2378 * Converts the specified timer clock time to nanoseconds.
2379 *
2380 * @returns nanoseconds.
2381 * @param pVM The cross context VM structure.
2382 * @param hTimer Timer handle as returned by one of the create functions.
2383 * @param cTicks The clock ticks.
2384 * @remark There could be rounding errors here. We just do a simple integer divide
2385 * without any adjustments.
2386 */
2387VMMDECL(uint64_t) TMTimerToNano(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cTicks)
2388{
2389 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2390 switch (pQueue->enmClock)
2391 {
2392 case TMCLOCK_VIRTUAL:
2393 case TMCLOCK_VIRTUAL_SYNC:
2394 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2395 return cTicks;
2396
2397 case TMCLOCK_REAL:
2398 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2399 return cTicks * 1000000;
2400
2401 default:
2402 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2403 return 0;
2404 }
2405}
2406
2407
2408/**
2409 * Converts the specified timer clock time to microseconds.
2410 *
2411 * @returns microseconds.
2412 * @param pVM The cross context VM structure.
2413 * @param hTimer Timer handle as returned by one of the create functions.
2414 * @param cTicks The clock ticks.
2415 * @remark There could be rounding errors here. We just do a simple integer divide
2416 * without any adjustments.
2417 */
2418VMMDECL(uint64_t) TMTimerToMicro(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cTicks)
2419{
2420 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2421 switch (pQueue->enmClock)
2422 {
2423 case TMCLOCK_VIRTUAL:
2424 case TMCLOCK_VIRTUAL_SYNC:
2425 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2426 return cTicks / 1000;
2427
2428 case TMCLOCK_REAL:
2429 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2430 return cTicks * 1000;
2431
2432 default:
2433 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2434 return 0;
2435 }
2436}
2437
2438
2439/**
2440 * Converts the specified timer clock time to milliseconds.
2441 *
2442 * @returns milliseconds.
2443 * @param pVM The cross context VM structure.
2444 * @param hTimer Timer handle as returned by one of the create functions.
2445 * @param cTicks The clock ticks.
2446 * @remark There could be rounding errors here. We just do a simple integer divide
2447 * without any adjustments.
2448 */
2449VMMDECL(uint64_t) TMTimerToMilli(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cTicks)
2450{
2451 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2452 switch (pQueue->enmClock)
2453 {
2454 case TMCLOCK_VIRTUAL:
2455 case TMCLOCK_VIRTUAL_SYNC:
2456 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2457 return cTicks / 1000000;
2458
2459 case TMCLOCK_REAL:
2460 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2461 return cTicks;
2462
2463 default:
2464 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2465 return 0;
2466 }
2467}
2468
2469
2470/**
2471 * Converts the specified nanosecond timestamp to timer clock ticks.
2472 *
2473 * @returns timer clock ticks.
2474 * @param pVM The cross context VM structure.
2475 * @param hTimer Timer handle as returned by one of the create functions.
2476 * @param cNanoSecs The nanosecond value ticks to convert.
2477 * @remark There could be rounding and overflow errors here.
2478 */
2479VMMDECL(uint64_t) TMTimerFromNano(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cNanoSecs)
2480{
2481 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2482 switch (pQueue->enmClock)
2483 {
2484 case TMCLOCK_VIRTUAL:
2485 case TMCLOCK_VIRTUAL_SYNC:
2486 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2487 return cNanoSecs;
2488
2489 case TMCLOCK_REAL:
2490 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2491 return cNanoSecs / 1000000;
2492
2493 default:
2494 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2495 return 0;
2496 }
2497}
2498
2499
2500/**
2501 * Converts the specified microsecond timestamp to timer clock ticks.
2502 *
2503 * @returns timer clock ticks.
2504 * @param pVM The cross context VM structure.
2505 * @param hTimer Timer handle as returned by one of the create functions.
2506 * @param cMicroSecs The microsecond value ticks to convert.
2507 * @remark There could be rounding and overflow errors here.
2508 */
2509VMMDECL(uint64_t) TMTimerFromMicro(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cMicroSecs)
2510{
2511 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2512 switch (pQueue->enmClock)
2513 {
2514 case TMCLOCK_VIRTUAL:
2515 case TMCLOCK_VIRTUAL_SYNC:
2516 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2517 return cMicroSecs * 1000;
2518
2519 case TMCLOCK_REAL:
2520 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2521 return cMicroSecs / 1000;
2522
2523 default:
2524 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2525 return 0;
2526 }
2527}
2528
2529
2530/**
2531 * Converts the specified millisecond timestamp to timer clock ticks.
2532 *
2533 * @returns timer clock ticks.
2534 * @param pVM The cross context VM structure.
2535 * @param hTimer Timer handle as returned by one of the create functions.
2536 * @param cMilliSecs The millisecond value ticks to convert.
2537 * @remark There could be rounding and overflow errors here.
2538 */
2539VMMDECL(uint64_t) TMTimerFromMilli(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cMilliSecs)
2540{
2541 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2542 switch (pQueue->enmClock)
2543 {
2544 case TMCLOCK_VIRTUAL:
2545 case TMCLOCK_VIRTUAL_SYNC:
2546 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2547 return cMilliSecs * 1000000;
2548
2549 case TMCLOCK_REAL:
2550 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2551 return cMilliSecs;
2552
2553 default:
2554 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2555 return 0;
2556 }
2557}
2558
2559
2560/**
2561 * Convert state to string.
2562 *
2563 * @returns Readonly status name.
2564 * @param enmState State.
2565 */
2566const char *tmTimerState(TMTIMERSTATE enmState)
2567{
2568 switch (enmState)
2569 {
2570#define CASE(num, state) \
2571 case TMTIMERSTATE_##state: \
2572 AssertCompile(TMTIMERSTATE_##state == (num)); \
2573 return #num "-" #state
2574 CASE( 0,INVALID);
2575 CASE( 1,STOPPED);
2576 CASE( 2,ACTIVE);
2577 CASE( 3,EXPIRED_GET_UNLINK);
2578 CASE( 4,EXPIRED_DELIVER);
2579 CASE( 5,PENDING_STOP);
2580 CASE( 6,PENDING_STOP_SCHEDULE);
2581 CASE( 7,PENDING_SCHEDULE_SET_EXPIRE);
2582 CASE( 8,PENDING_SCHEDULE);
2583 CASE( 9,PENDING_RESCHEDULE_SET_EXPIRE);
2584 CASE(10,PENDING_RESCHEDULE);
2585 CASE(11,DESTROY);
2586 CASE(12,FREE);
2587 default:
2588 AssertMsgFailed(("Invalid state enmState=%d\n", enmState));
2589 return "Invalid state!";
2590#undef CASE
2591 }
2592}
2593
2594
2595#if defined(IN_RING0) || defined(IN_RING3)
2596/**
2597 * Copies over old timers and initialized newly allocted ones.
2598 *
2599 * Helper for TMR0TimerQueueGrow an tmR3TimerQueueGrow.
2600 *
2601 * @param paTimers The new timer allocation.
2602 * @param paOldTimers The old timers.
2603 * @param cNewTimers Number of new timers.
2604 * @param cOldTimers Number of old timers.
2605 */
2606void tmHCTimerQueueGrowInit(PTMTIMER paTimers, TMTIMER const *paOldTimers, uint32_t cNewTimers, uint32_t cOldTimers)
2607{
2608 Assert(cOldTimers < cNewTimers);
2609
2610 /*
2611 * Copy over the old info and initialize the new handles.
2612 */
2613 if (cOldTimers > 0)
2614 memcpy(paTimers, paOldTimers, sizeof(TMTIMER) * cOldTimers);
2615
2616 size_t i = cNewTimers;
2617 while (i-- > cOldTimers)
2618 {
2619 paTimers[i].u64Expire = UINT64_MAX;
2620 paTimers[i].enmType = TMTIMERTYPE_INVALID;
2621 paTimers[i].enmState = TMTIMERSTATE_FREE;
2622 paTimers[i].idxScheduleNext = UINT32_MAX;
2623 paTimers[i].idxNext = UINT32_MAX;
2624 paTimers[i].idxPrev = UINT32_MAX;
2625 paTimers[i].hSelf = NIL_TMTIMERHANDLE;
2626 }
2627
2628 /*
2629 * Mark the zero'th entry as allocated but invalid if we just allocated it.
2630 */
2631 if (cOldTimers == 0)
2632 {
2633 paTimers[0].enmState = TMTIMERSTATE_INVALID;
2634 paTimers[0].szName[0] = 'n';
2635 paTimers[0].szName[1] = 'i';
2636 paTimers[0].szName[2] = 'l';
2637 paTimers[0].szName[3] = '\0';
2638 }
2639}
2640#endif /* IN_RING0 || IN_RING3 */
2641
2642
2643/**
2644 * The slow path of tmGetFrequencyHint() where we try to recalculate the value.
2645 *
2646 * @returns The highest frequency. 0 if no timers care.
2647 * @param pVM The cross context VM structure.
2648 * @param uOldMaxHzHint The old global hint.
2649 */
2650DECL_NO_INLINE(static, uint32_t) tmGetFrequencyHintSlow(PVMCC pVM, uint32_t uOldMaxHzHint)
2651{
2652 /* Set two bits, though not entirely sure it's needed (too exhaused to think clearly)
2653 but it should force other callers thru the slow path while we're recalculating and
2654 help us detect changes while we're recalculating. */
2655 AssertCompile(RT_ELEMENTS(pVM->tm.s.aTimerQueues) <= 16);
2656
2657 /*
2658 * The "right" highest frequency value isn't so important that we'll block
2659 * waiting on the timer semaphores.
2660 */
2661 uint32_t uMaxHzHint = 0;
2662 for (uint32_t idxQueue = 0; idxQueue < RT_ELEMENTS(pVM->tm.s.aTimerQueues); idxQueue++)
2663 {
2664 PTMTIMERQUEUE pQueue = &pVM->tm.s.aTimerQueues[idxQueue];
2665
2666 /* Get the max Hz hint for the queue. */
2667 uint32_t uMaxHzHintQueue;
2668 if ( !(ASMAtomicUoReadU64(&pVM->tm.s.HzHint.u64Combined) & (RT_BIT_32(idxQueue) | RT_BIT_32(idxQueue + 16)))
2669 || RT_FAILURE_NP(PDMCritSectTryEnter(pVM, &pQueue->TimerLock)))
2670 uMaxHzHintQueue = ASMAtomicReadU32(&pQueue->uMaxHzHint);
2671 else
2672 {
2673 /* Is it still necessary to do updating? */
2674 if (ASMAtomicUoReadU64(&pVM->tm.s.HzHint.u64Combined) & (RT_BIT_32(idxQueue) | RT_BIT_32(idxQueue + 16)))
2675 {
2676 ASMAtomicAndU64(&pVM->tm.s.HzHint.u64Combined, ~RT_BIT_64(idxQueue + 16)); /* clear one flag up front */
2677
2678 PTMTIMERQUEUECC pQueueCC = TM_GET_TIMER_QUEUE_CC(pVM, idxQueue, pQueue);
2679 uMaxHzHintQueue = 0;
2680 for (PTMTIMER pCur = tmTimerQueueGetHead(pQueueCC, pQueue);
2681 pCur;
2682 pCur = tmTimerGetNext(pQueueCC, pCur))
2683 {
2684 uint32_t uHzHint = ASMAtomicUoReadU32(&pCur->uHzHint);
2685 if (uHzHint > uMaxHzHintQueue)
2686 {
2687 TMTIMERSTATE enmState = pCur->enmState;
2688 switch (enmState)
2689 {
2690 case TMTIMERSTATE_ACTIVE:
2691 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2692 case TMTIMERSTATE_EXPIRED_DELIVER:
2693 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2694 case TMTIMERSTATE_PENDING_SCHEDULE:
2695 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2696 case TMTIMERSTATE_PENDING_RESCHEDULE:
2697 uMaxHzHintQueue = uHzHint;
2698 break;
2699
2700 case TMTIMERSTATE_STOPPED:
2701 case TMTIMERSTATE_PENDING_STOP:
2702 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2703 case TMTIMERSTATE_DESTROY:
2704 case TMTIMERSTATE_FREE:
2705 case TMTIMERSTATE_INVALID:
2706 break;
2707 /* no default, want gcc warnings when adding more states. */
2708 }
2709 }
2710 }
2711
2712 /* Write the new Hz hint for the quest and clear the other update flag. */
2713 ASMAtomicUoWriteU32(&pQueue->uMaxHzHint, uMaxHzHintQueue);
2714 ASMAtomicAndU64(&pVM->tm.s.HzHint.u64Combined, ~RT_BIT_64(idxQueue));
2715 }
2716 else
2717 uMaxHzHintQueue = ASMAtomicUoReadU32(&pQueue->uMaxHzHint);
2718
2719 PDMCritSectLeave(pVM, &pQueue->TimerLock);
2720 }
2721
2722 /* Update the global max Hz hint. */
2723 if (uMaxHzHint < uMaxHzHintQueue)
2724 uMaxHzHint = uMaxHzHintQueue;
2725 }
2726
2727 /*
2728 * Update the frequency hint if no pending frequency changes and we didn't race anyone thru here.
2729 */
2730 uint64_t u64Actual = RT_MAKE_U64(0 /*no pending updates*/, uOldMaxHzHint);
2731 if (ASMAtomicCmpXchgExU64(&pVM->tm.s.HzHint.u64Combined, RT_MAKE_U64(0, uMaxHzHint), u64Actual, &u64Actual))
2732 Log(("tmGetFrequencyHintSlow: New value %u Hz\n", uMaxHzHint));
2733 else
2734 for (uint32_t iTry = 1;; iTry++)
2735 {
2736 if (RT_LO_U32(u64Actual) != 0)
2737 Log(("tmGetFrequencyHintSlow: Outdated value %u Hz (%#x, try %u)\n", uMaxHzHint, RT_LO_U32(u64Actual), iTry));
2738 else if (iTry >= 4)
2739 Log(("tmGetFrequencyHintSlow: Unable to set %u Hz (try %u)\n", uMaxHzHint, iTry));
2740 else if (ASMAtomicCmpXchgExU64(&pVM->tm.s.HzHint.u64Combined, RT_MAKE_U64(0, uMaxHzHint), u64Actual, &u64Actual))
2741 Log(("tmGetFrequencyHintSlow: New value %u Hz (try %u)\n", uMaxHzHint, iTry));
2742 else
2743 continue;
2744 break;
2745 }
2746 return uMaxHzHint;
2747}
2748
2749
2750/**
2751 * Gets the highest frequency hint for all the important timers.
2752 *
2753 * @returns The highest frequency. 0 if no timers care.
2754 * @param pVM The cross context VM structure.
2755 */
2756DECLINLINE(uint32_t) tmGetFrequencyHint(PVMCC pVM)
2757{
2758 /*
2759 * Query the value, recalculate it if necessary.
2760 */
2761 uint64_t u64Combined = ASMAtomicReadU64(&pVM->tm.s.HzHint.u64Combined);
2762 if (RT_HI_U32(u64Combined) == 0)
2763 return RT_LO_U32(u64Combined); /* hopefully somewhat likely */
2764 return tmGetFrequencyHintSlow(pVM, RT_LO_U32(u64Combined));
2765}
2766
2767
2768/**
2769 * Calculates a host timer frequency that would be suitable for the current
2770 * timer load.
2771 *
2772 * This will take the highest timer frequency, adjust for catch-up and warp
2773 * driver, and finally add a little fudge factor. The caller (VMM) will use
2774 * the result to adjust the per-cpu preemption timer.
2775 *
2776 * @returns The highest frequency. 0 if no important timers around.
2777 * @param pVM The cross context VM structure.
2778 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2779 */
2780VMM_INT_DECL(uint32_t) TMCalcHostTimerFrequency(PVMCC pVM, PVMCPUCC pVCpu)
2781{
2782 uint32_t uHz = tmGetFrequencyHint(pVM);
2783
2784 /* Catch up, we have to be more aggressive than the % indicates at the
2785 beginning of the effort. */
2786 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
2787 {
2788 uint32_t u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
2789 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
2790 {
2791 if (u32Pct <= 100)
2792 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp100 / 100;
2793 else if (u32Pct <= 200)
2794 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp200 / 100;
2795 else if (u32Pct <= 400)
2796 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp400 / 100;
2797 uHz *= u32Pct + 100;
2798 uHz /= 100;
2799 }
2800 }
2801
2802 /* Warp drive. */
2803 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualWarpDrive))
2804 {
2805 uint32_t u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualWarpDrivePercentage);
2806 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualWarpDrive))
2807 {
2808 uHz *= u32Pct;
2809 uHz /= 100;
2810 }
2811 }
2812
2813 /* Fudge factor. */
2814 if (pVCpu->idCpu == pVM->tm.s.idTimerCpu)
2815 uHz *= pVM->tm.s.cPctHostHzFudgeFactorTimerCpu;
2816 else
2817 uHz *= pVM->tm.s.cPctHostHzFudgeFactorOtherCpu;
2818 uHz /= 100;
2819
2820 /* Make sure it isn't too high. */
2821 if (uHz > pVM->tm.s.cHostHzMax)
2822 uHz = pVM->tm.s.cHostHzMax;
2823
2824 return uHz;
2825}
2826
2827
2828/**
2829 * Whether the guest virtual clock is ticking.
2830 *
2831 * @returns true if ticking, false otherwise.
2832 * @param pVM The cross context VM structure.
2833 */
2834VMM_INT_DECL(bool) TMVirtualIsTicking(PVM pVM)
2835{
2836 return RT_BOOL(pVM->tm.s.cVirtualTicking);
2837}
2838
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette