VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/TMAll.cpp@ 93762

Last change on this file since 93762 was 93717, checked in by vboxsync, 3 years ago

VMM/TM: Internal timers never have a critical section set, so tmRZTimerGetCritSect doesn't need to do anything special and can just return NULL. bugref:10093 bugref:9943

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 106.6 KB
Line 
1/* $Id: TMAll.cpp 93717 2022-02-14 10:55:27Z vboxsync $ */
2/** @file
3 * TM - Timeout Manager, all contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_TM
23#ifdef DEBUG_bird
24# define DBGFTRACE_DISABLED /* annoying */
25#endif
26#include <VBox/vmm/tm.h>
27#include <VBox/vmm/mm.h>
28#include <VBox/vmm/dbgftrace.h>
29#ifdef IN_RING3
30#endif
31#include <VBox/vmm/pdmdev.h> /* (for TMTIMER_GET_CRITSECT implementation) */
32#include "TMInternal.h"
33#include <VBox/vmm/vmcc.h>
34
35#include <VBox/param.h>
36#include <VBox/err.h>
37#include <VBox/log.h>
38#include <VBox/sup.h>
39#include <iprt/time.h>
40#include <iprt/assert.h>
41#include <iprt/asm.h>
42#include <iprt/asm-math.h>
43#include <iprt/string.h>
44#ifdef IN_RING3
45# include <iprt/thread.h>
46#endif
47
48#include "TMInline.h"
49
50
51/*********************************************************************************************************************************
52* Defined Constants And Macros *
53*********************************************************************************************************************************/
54#ifdef VBOX_STRICT
55/** @def TMTIMER_GET_CRITSECT
56 * Helper for safely resolving the critical section for a timer belonging to a
57 * device instance.
58 * @todo needs reworking later as it uses PDMDEVINSR0::pDevInsR0RemoveMe. */
59# ifdef IN_RING3
60# define TMTIMER_GET_CRITSECT(a_pVM, a_pTimer) ((a_pTimer)->pCritSect)
61# else
62# define TMTIMER_GET_CRITSECT(a_pVM, a_pTimer) tmRZTimerGetCritSect(a_pVM, a_pTimer)
63# endif
64#endif
65
66/** @def TMTIMER_ASSERT_CRITSECT
67 * Checks that the caller owns the critical section if one is associated with
68 * the timer. */
69#ifdef VBOX_STRICT
70# define TMTIMER_ASSERT_CRITSECT(a_pVM, a_pTimer) \
71 do { \
72 if ((a_pTimer)->pCritSect) \
73 { \
74 VMSTATE enmState; \
75 PPDMCRITSECT pCritSect = TMTIMER_GET_CRITSECT(a_pVM, a_pTimer); \
76 AssertMsg( pCritSect \
77 && ( PDMCritSectIsOwner((a_pVM), pCritSect) \
78 || (enmState = (a_pVM)->enmVMState) == VMSTATE_CREATING \
79 || enmState == VMSTATE_RESETTING \
80 || enmState == VMSTATE_RESETTING_LS ),\
81 ("pTimer=%p (%s) pCritSect=%p (%s)\n", a_pTimer, (a_pTimer)->szName, \
82 (a_pTimer)->pCritSect, R3STRING(PDMR3CritSectName((a_pTimer)->pCritSect)) )); \
83 } \
84 } while (0)
85#else
86# define TMTIMER_ASSERT_CRITSECT(pVM, pTimer) do { } while (0)
87#endif
88
89/** @def TMTIMER_ASSERT_SYNC_CRITSECT_ORDER
90 * Checks for lock order trouble between the timer critsect and the critical
91 * section critsect. The virtual sync critsect must always be entered before
92 * the one associated with the timer (see TMR3TimerQueuesDo). It is OK if there
93 * isn't any critical section associated with the timer or if the calling thread
94 * doesn't own it, ASSUMING of course that the thread using this macro is going
95 * to enter the virtual sync critical section anyway.
96 *
97 * @remarks This is a sligtly relaxed timer locking attitude compared to
98 * TMTIMER_ASSERT_CRITSECT, however, the calling device/whatever code
99 * should know what it's doing if it's stopping or starting a timer
100 * without taking the device lock.
101 */
102#ifdef VBOX_STRICT
103# define TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer) \
104 do { \
105 if ((pTimer)->pCritSect) \
106 { \
107 VMSTATE enmState; \
108 PPDMCRITSECT pCritSect = TMTIMER_GET_CRITSECT(pVM, pTimer); \
109 AssertMsg( pCritSect \
110 && ( !PDMCritSectIsOwner((pVM), pCritSect) \
111 || PDMCritSectIsOwner((pVM), &(pVM)->tm.s.VirtualSyncLock) \
112 || (enmState = (pVM)->enmVMState) == VMSTATE_CREATING \
113 || enmState == VMSTATE_RESETTING \
114 || enmState == VMSTATE_RESETTING_LS ),\
115 ("pTimer=%p (%s) pCritSect=%p (%s)\n", pTimer, pTimer->szName, \
116 (pTimer)->pCritSect, R3STRING(PDMR3CritSectName((pTimer)->pCritSect)) )); \
117 } \
118 } while (0)
119#else
120# define TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer) do { } while (0)
121#endif
122
123
124#if defined(VBOX_STRICT) && defined(IN_RING0)
125/**
126 * Helper for TMTIMER_GET_CRITSECT
127 * @todo This needs a redo!
128 */
129DECLINLINE(PPDMCRITSECT) tmRZTimerGetCritSect(PVMCC pVM, PTMTIMER pTimer)
130{
131 if (pTimer->enmType == TMTIMERTYPE_DEV)
132 {
133 RTCCUINTREG fSavedFlags = ASMAddFlags(X86_EFL_AC); /** @todo fix ring-3 pointer use */
134 PPDMDEVINSR0 pDevInsR0 = ((struct PDMDEVINSR3 *)pTimer->u.Dev.pDevIns)->pDevInsR0RemoveMe; /* !ring-3 read! */
135 ASMSetFlags(fSavedFlags);
136 struct PDMDEVINSR3 *pDevInsR3 = pDevInsR0->pDevInsForR3R0;
137 if (pTimer->pCritSect == pDevInsR3->pCritSectRoR3)
138 return pDevInsR0->pCritSectRoR0;
139 uintptr_t offCritSect = (uintptr_t)pTimer->pCritSect - (uintptr_t)pDevInsR3->pvInstanceDataR3;
140 if (offCritSect < pDevInsR0->pReg->cbInstanceShared)
141 return (PPDMCRITSECT)((uintptr_t)pDevInsR0->pvInstanceDataR0 + offCritSect);
142 }
143 RT_NOREF(pVM);
144 Assert(pTimer->pCritSect == NULL);
145 return NULL;
146}
147#endif /* VBOX_STRICT && IN_RING0 */
148
149
150/**
151 * Notification that execution is about to start.
152 *
153 * This call must always be paired with a TMNotifyEndOfExecution call.
154 *
155 * The function may, depending on the configuration, resume the TSC and future
156 * clocks that only ticks when we're executing guest code.
157 *
158 * @param pVM The cross context VM structure.
159 * @param pVCpu The cross context virtual CPU structure.
160 */
161VMMDECL(void) TMNotifyStartOfExecution(PVMCC pVM, PVMCPUCC pVCpu)
162{
163#ifndef VBOX_WITHOUT_NS_ACCOUNTING
164 pVCpu->tm.s.uTscStartExecuting = SUPReadTsc();
165 pVCpu->tm.s.fExecuting = true;
166#endif
167 if (pVM->tm.s.fTSCTiedToExecution)
168 tmCpuTickResume(pVM, pVCpu);
169}
170
171
172/**
173 * Notification that execution has ended.
174 *
175 * This call must always be paired with a TMNotifyStartOfExecution call.
176 *
177 * The function may, depending on the configuration, suspend the TSC and future
178 * clocks that only ticks when we're executing guest code.
179 *
180 * @param pVM The cross context VM structure.
181 * @param pVCpu The cross context virtual CPU structure.
182 * @param uTsc TSC value when exiting guest context.
183 */
184VMMDECL(void) TMNotifyEndOfExecution(PVMCC pVM, PVMCPUCC pVCpu, uint64_t uTsc)
185{
186 if (pVM->tm.s.fTSCTiedToExecution)
187 tmCpuTickPause(pVCpu); /** @todo use uTsc here if we can. */
188
189#ifndef VBOX_WITHOUT_NS_ACCOUNTING
190 /*
191 * Calculate the elapsed tick count and convert it to nanoseconds.
192 */
193# ifdef IN_RING3
194 PSUPGLOBALINFOPAGE const pGip = g_pSUPGlobalInfoPage;
195 uint64_t cTicks = uTsc - pVCpu->tm.s.uTscStartExecuting - SUPGetTscDelta(pGip);
196 uint64_t const uCpuHz = pGip ? SUPGetCpuHzFromGip(pGip) : pVM->tm.s.cTSCTicksPerSecondHost;
197# else
198 uint64_t cTicks = uTsc - pVCpu->tm.s.uTscStartExecuting - SUPGetTscDeltaByCpuSetIndex(pVCpu->iHostCpuSet);
199 uint64_t const uCpuHz = SUPGetCpuHzFromGipBySetIndex(g_pSUPGlobalInfoPage, pVCpu->iHostCpuSet);
200# endif
201 AssertStmt(cTicks <= uCpuHz << 2, cTicks = uCpuHz << 2); /* max 4 sec */
202
203 uint64_t cNsExecutingDelta;
204 if (uCpuHz < _4G)
205 cNsExecutingDelta = ASMMultU64ByU32DivByU32(cTicks, RT_NS_1SEC, uCpuHz);
206 else if (uCpuHz < 16*_1G64)
207 cNsExecutingDelta = ASMMultU64ByU32DivByU32(cTicks >> 2, RT_NS_1SEC, uCpuHz >> 2);
208 else
209 {
210 Assert(uCpuHz < 64 * _1G64);
211 cNsExecutingDelta = ASMMultU64ByU32DivByU32(cTicks >> 4, RT_NS_1SEC, uCpuHz >> 4);
212 }
213
214 /*
215 * Update the data.
216 *
217 * Note! We're not using strict memory ordering here to speed things us.
218 * The data is in a single cache line and this thread is the only
219 * one writing to that line, so I cannot quite imagine why we would
220 * need any strict ordering here.
221 */
222 uint64_t const cNsExecutingNew = pVCpu->tm.s.cNsExecuting + cNsExecutingDelta;
223 uint32_t uGen = ASMAtomicUoIncU32(&pVCpu->tm.s.uTimesGen); Assert(uGen & 1);
224 ASMCompilerBarrier();
225 pVCpu->tm.s.fExecuting = false;
226 pVCpu->tm.s.cNsExecuting = cNsExecutingNew;
227 pVCpu->tm.s.cPeriodsExecuting++;
228 ASMCompilerBarrier();
229 ASMAtomicUoWriteU32(&pVCpu->tm.s.uTimesGen, (uGen | 1) + 1);
230
231 /*
232 * Update stats.
233 */
234# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
235 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecuting, cNsExecutingDelta);
236 if (cNsExecutingDelta < 5000)
237 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecTiny, cNsExecutingDelta);
238 else if (cNsExecutingDelta < 50000)
239 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecShort, cNsExecutingDelta);
240 else
241 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecLong, cNsExecutingDelta);
242# endif
243
244 /* The timer triggers occational updating of the others and total stats: */
245 if (RT_LIKELY(!pVCpu->tm.s.fUpdateStats))
246 { /*likely*/ }
247 else
248 {
249 pVCpu->tm.s.fUpdateStats = false;
250
251 uint64_t const cNsTotalNew = RTTimeNanoTS() - pVCpu->tm.s.nsStartTotal;
252 uint64_t const cNsOtherNew = cNsTotalNew - cNsExecutingNew - pVCpu->tm.s.cNsHalted;
253
254# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
255 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsTotal, cNsTotalNew - pVCpu->tm.s.cNsTotalStat);
256 int64_t const cNsOtherNewDelta = cNsOtherNew - pVCpu->tm.s.cNsOtherStat;
257 if (cNsOtherNewDelta > 0)
258 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsOther, (uint64_t)cNsOtherNewDelta);
259# endif
260
261 pVCpu->tm.s.cNsTotalStat = cNsTotalNew;
262 pVCpu->tm.s.cNsOtherStat = cNsOtherNew;
263 }
264
265#endif
266}
267
268
269/**
270 * Notification that the cpu is entering the halt state
271 *
272 * This call must always be paired with a TMNotifyEndOfExecution call.
273 *
274 * The function may, depending on the configuration, resume the TSC and future
275 * clocks that only ticks when we're halted.
276 *
277 * @param pVCpu The cross context virtual CPU structure.
278 */
279VMM_INT_DECL(void) TMNotifyStartOfHalt(PVMCPUCC pVCpu)
280{
281 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
282
283#ifndef VBOX_WITHOUT_NS_ACCOUNTING
284 pVCpu->tm.s.nsStartHalting = RTTimeNanoTS();
285 pVCpu->tm.s.fHalting = true;
286#endif
287
288 if ( pVM->tm.s.fTSCTiedToExecution
289 && !pVM->tm.s.fTSCNotTiedToHalt)
290 tmCpuTickResume(pVM, pVCpu);
291}
292
293
294/**
295 * Notification that the cpu is leaving the halt state
296 *
297 * This call must always be paired with a TMNotifyStartOfHalt call.
298 *
299 * The function may, depending on the configuration, suspend the TSC and future
300 * clocks that only ticks when we're halted.
301 *
302 * @param pVCpu The cross context virtual CPU structure.
303 */
304VMM_INT_DECL(void) TMNotifyEndOfHalt(PVMCPUCC pVCpu)
305{
306 PVM pVM = pVCpu->CTX_SUFF(pVM);
307
308 if ( pVM->tm.s.fTSCTiedToExecution
309 && !pVM->tm.s.fTSCNotTiedToHalt)
310 tmCpuTickPause(pVCpu);
311
312#ifndef VBOX_WITHOUT_NS_ACCOUNTING
313 uint64_t const u64NsTs = RTTimeNanoTS();
314 uint64_t const cNsTotalNew = u64NsTs - pVCpu->tm.s.nsStartTotal;
315 uint64_t const cNsHaltedDelta = u64NsTs - pVCpu->tm.s.nsStartHalting;
316 uint64_t const cNsHaltedNew = pVCpu->tm.s.cNsHalted + cNsHaltedDelta;
317 uint64_t const cNsOtherNew = cNsTotalNew - pVCpu->tm.s.cNsExecuting - cNsHaltedNew;
318
319 uint32_t uGen = ASMAtomicUoIncU32(&pVCpu->tm.s.uTimesGen); Assert(uGen & 1);
320 ASMCompilerBarrier();
321 pVCpu->tm.s.fHalting = false;
322 pVCpu->tm.s.fUpdateStats = false;
323 pVCpu->tm.s.cNsHalted = cNsHaltedNew;
324 pVCpu->tm.s.cPeriodsHalted++;
325 ASMCompilerBarrier();
326 ASMAtomicUoWriteU32(&pVCpu->tm.s.uTimesGen, (uGen | 1) + 1);
327
328# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
329 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsHalted, cNsHaltedDelta);
330 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsTotal, cNsTotalNew - pVCpu->tm.s.cNsTotalStat);
331 int64_t const cNsOtherNewDelta = cNsOtherNew - pVCpu->tm.s.cNsOtherStat;
332 if (cNsOtherNewDelta > 0)
333 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsOther, (uint64_t)cNsOtherNewDelta);
334# endif
335 pVCpu->tm.s.cNsTotalStat = cNsTotalNew;
336 pVCpu->tm.s.cNsOtherStat = cNsOtherNew;
337#endif
338}
339
340
341/**
342 * Raise the timer force action flag and notify the dedicated timer EMT.
343 *
344 * @param pVM The cross context VM structure.
345 */
346DECLINLINE(void) tmScheduleNotify(PVMCC pVM)
347{
348 VMCPUID idCpu = pVM->tm.s.idTimerCpu;
349 AssertReturnVoid(idCpu < pVM->cCpus);
350 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, idCpu);
351
352 if (!VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
353 {
354 Log5(("TMAll(%u): FF: 0 -> 1\n", __LINE__));
355 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
356#ifdef IN_RING3
357 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
358#endif
359 STAM_COUNTER_INC(&pVM->tm.s.StatScheduleSetFF);
360 }
361}
362
363
364/**
365 * Schedule the queue which was changed.
366 */
367DECLINLINE(void) tmSchedule(PVMCC pVM, PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
368{
369 int rc = PDMCritSectTryEnter(pVM, &pQueue->TimerLock);
370 if (RT_SUCCESS_NP(rc))
371 {
372 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
373 Log3(("tmSchedule: tmTimerQueueSchedule\n"));
374 tmTimerQueueSchedule(pVM, pQueueCC, pQueue);
375#ifdef VBOX_STRICT
376 tmTimerQueuesSanityChecks(pVM, "tmSchedule");
377#endif
378 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
379 PDMCritSectLeave(pVM, &pQueue->TimerLock);
380 return;
381 }
382
383 TMTIMERSTATE enmState = pTimer->enmState;
384 if (TMTIMERSTATE_IS_PENDING_SCHEDULING(enmState))
385 tmScheduleNotify(pVM);
386}
387
388
389/**
390 * Try change the state to enmStateNew from enmStateOld
391 * and link the timer into the scheduling queue.
392 *
393 * @returns Success indicator.
394 * @param pTimer Timer in question.
395 * @param enmStateNew The new timer state.
396 * @param enmStateOld The old timer state.
397 */
398DECLINLINE(bool) tmTimerTry(PTMTIMER pTimer, TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
399{
400 /*
401 * Attempt state change.
402 */
403 bool fRc;
404 TM_TRY_SET_STATE(pTimer, enmStateNew, enmStateOld, fRc);
405 return fRc;
406}
407
408
409/**
410 * Links the timer onto the scheduling queue.
411 *
412 * @param pQueueCC The current context queue (same as @a pQueue for
413 * ring-3).
414 * @param pQueue The shared queue data.
415 * @param pTimer The timer.
416 *
417 * @todo FIXME: Look into potential race with the thread running the queues
418 * and stuff.
419 */
420DECLINLINE(void) tmTimerLinkSchedule(PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
421{
422 Assert(pTimer->idxScheduleNext == UINT32_MAX);
423 const uint32_t idxHeadNew = pTimer - &pQueueCC->paTimers[0];
424 AssertReturnVoid(idxHeadNew < pQueueCC->cTimersAlloc);
425
426 uint32_t idxHead;
427 do
428 {
429 idxHead = pQueue->idxSchedule;
430 Assert(idxHead == UINT32_MAX || idxHead < pQueueCC->cTimersAlloc);
431 pTimer->idxScheduleNext = idxHead;
432 } while (!ASMAtomicCmpXchgU32(&pQueue->idxSchedule, idxHeadNew, idxHead));
433}
434
435
436/**
437 * Try change the state to enmStateNew from enmStateOld
438 * and link the timer into the scheduling queue.
439 *
440 * @returns Success indicator.
441 * @param pQueueCC The current context queue (same as @a pQueue for
442 * ring-3).
443 * @param pQueue The shared queue data.
444 * @param pTimer Timer in question.
445 * @param enmStateNew The new timer state.
446 * @param enmStateOld The old timer state.
447 */
448DECLINLINE(bool) tmTimerTryWithLink(PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue, PTMTIMER pTimer,
449 TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
450{
451 if (tmTimerTry(pTimer, enmStateNew, enmStateOld))
452 {
453 tmTimerLinkSchedule(pQueueCC, pQueue, pTimer);
454 return true;
455 }
456 return false;
457}
458
459
460/**
461 * Links a timer into the active list of a timer queue.
462 *
463 * @param pVM The cross context VM structure.
464 * @param pQueueCC The current context queue (same as @a pQueue for
465 * ring-3).
466 * @param pQueue The shared queue data.
467 * @param pTimer The timer.
468 * @param u64Expire The timer expiration time.
469 *
470 * @remarks Called while owning the relevant queue lock.
471 */
472DECL_FORCE_INLINE(void) tmTimerQueueLinkActive(PVMCC pVM, PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue,
473 PTMTIMER pTimer, uint64_t u64Expire)
474{
475 Assert(pTimer->idxNext == UINT32_MAX);
476 Assert(pTimer->idxPrev == UINT32_MAX);
477 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE || pQueue->enmClock != TMCLOCK_VIRTUAL_SYNC); /* (active is not a stable state) */
478 RT_NOREF(pVM);
479
480 PTMTIMER pCur = tmTimerQueueGetHead(pQueueCC, pQueue);
481 if (pCur)
482 {
483 for (;; pCur = tmTimerGetNext(pQueueCC, pCur))
484 {
485 if (pCur->u64Expire > u64Expire)
486 {
487 const PTMTIMER pPrev = tmTimerGetPrev(pQueueCC, pCur);
488 tmTimerSetNext(pQueueCC, pTimer, pCur);
489 tmTimerSetPrev(pQueueCC, pTimer, pPrev);
490 if (pPrev)
491 tmTimerSetNext(pQueueCC, pPrev, pTimer);
492 else
493 {
494 tmTimerQueueSetHead(pQueueCC, pQueue, pTimer);
495 ASMAtomicWriteU64(&pQueue->u64Expire, u64Expire);
496 DBGFTRACE_U64_TAG2(pVM, u64Expire, "tmTimerQueueLinkActive head", pTimer->szName);
497 }
498 tmTimerSetPrev(pQueueCC, pCur, pTimer);
499 return;
500 }
501 if (pCur->idxNext == UINT32_MAX)
502 {
503 tmTimerSetNext(pQueueCC, pCur, pTimer);
504 tmTimerSetPrev(pQueueCC, pTimer, pCur);
505 DBGFTRACE_U64_TAG2(pVM, u64Expire, "tmTimerQueueLinkActive tail", pTimer->szName);
506 return;
507 }
508 }
509 }
510 else
511 {
512 tmTimerQueueSetHead(pQueueCC, pQueue, pTimer);
513 ASMAtomicWriteU64(&pQueue->u64Expire, u64Expire);
514 DBGFTRACE_U64_TAG2(pVM, u64Expire, "tmTimerQueueLinkActive empty", pTimer->szName);
515 }
516}
517
518
519
520/**
521 * Schedules the given timer on the given queue.
522 *
523 * @param pVM The cross context VM structure.
524 * @param pQueueCC The current context queue (same as @a pQueue for
525 * ring-3).
526 * @param pQueue The shared queue data.
527 * @param pTimer The timer that needs scheduling.
528 *
529 * @remarks Called while owning the lock.
530 */
531DECLINLINE(void) tmTimerQueueScheduleOne(PVMCC pVM, PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
532{
533 Assert(pQueue->enmClock != TMCLOCK_VIRTUAL_SYNC);
534 RT_NOREF(pVM);
535
536 /*
537 * Processing.
538 */
539 unsigned cRetries = 2;
540 do
541 {
542 TMTIMERSTATE enmState = pTimer->enmState;
543 switch (enmState)
544 {
545 /*
546 * Reschedule timer (in the active list).
547 */
548 case TMTIMERSTATE_PENDING_RESCHEDULE:
549 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE, TMTIMERSTATE_PENDING_RESCHEDULE)))
550 break; /* retry */
551 tmTimerQueueUnlinkActive(pVM, pQueueCC, pQueue, pTimer);
552 RT_FALL_THRU();
553
554 /*
555 * Schedule timer (insert into the active list).
556 */
557 case TMTIMERSTATE_PENDING_SCHEDULE:
558 Assert(pTimer->idxNext == UINT32_MAX); Assert(pTimer->idxPrev == UINT32_MAX);
559 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, TMTIMERSTATE_PENDING_SCHEDULE)))
560 break; /* retry */
561 tmTimerQueueLinkActive(pVM, pQueueCC, pQueue, pTimer, pTimer->u64Expire);
562 return;
563
564 /*
565 * Stop the timer in active list.
566 */
567 case TMTIMERSTATE_PENDING_STOP:
568 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, TMTIMERSTATE_PENDING_STOP)))
569 break; /* retry */
570 tmTimerQueueUnlinkActive(pVM, pQueueCC, pQueue, pTimer);
571 RT_FALL_THRU();
572
573 /*
574 * Stop the timer (not on the active list).
575 */
576 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
577 Assert(pTimer->idxNext == UINT32_MAX); Assert(pTimer->idxPrev == UINT32_MAX);
578 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_STOPPED, TMTIMERSTATE_PENDING_STOP_SCHEDULE)))
579 break;
580 return;
581
582 /*
583 * The timer is pending destruction by TMR3TimerDestroy, our caller.
584 * Nothing to do here.
585 */
586 case TMTIMERSTATE_DESTROY:
587 break;
588
589 /*
590 * Postpone these until they get into the right state.
591 */
592 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
593 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
594 tmTimerLinkSchedule(pQueueCC, pQueue, pTimer);
595 STAM_COUNTER_INC(&pVM->tm.s.CTX_SUFF_Z(StatPostponed));
596 return;
597
598 /*
599 * None of these can be in the schedule.
600 */
601 case TMTIMERSTATE_FREE:
602 case TMTIMERSTATE_STOPPED:
603 case TMTIMERSTATE_ACTIVE:
604 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
605 case TMTIMERSTATE_EXPIRED_DELIVER:
606 default:
607 AssertMsgFailed(("Timer (%p) in the scheduling list has an invalid state %s (%d)!",
608 pTimer, tmTimerState(pTimer->enmState), pTimer->enmState));
609 return;
610 }
611 } while (cRetries-- > 0);
612}
613
614
615/**
616 * Schedules the specified timer queue.
617 *
618 * @param pVM The cross context VM structure.
619 * @param pQueueCC The current context queue (same as @a pQueue for
620 * ring-3) data of the queue to schedule.
621 * @param pQueue The shared queue data of the queue to schedule.
622 *
623 * @remarks Called while owning the lock.
624 */
625void tmTimerQueueSchedule(PVMCC pVM, PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue)
626{
627 Assert(PDMCritSectIsOwner(pVM, &pQueue->TimerLock));
628
629 /*
630 * Dequeue the scheduling list and iterate it.
631 */
632 uint32_t idxNext = ASMAtomicXchgU32(&pQueue->idxSchedule, UINT32_MAX);
633 Log2(("tmTimerQueueSchedule: pQueue=%p:{.enmClock=%d, idxNext=%RI32, .u64Expired=%'RU64}\n", pQueue, pQueue->enmClock, idxNext, pQueue->u64Expire));
634 while (idxNext != UINT32_MAX)
635 {
636 AssertBreak(idxNext < pQueueCC->cTimersAlloc);
637
638 /*
639 * Unlink the head timer and take down the index of the next one.
640 */
641 PTMTIMER pTimer = &pQueueCC->paTimers[idxNext];
642 idxNext = pTimer->idxScheduleNext;
643 pTimer->idxScheduleNext = UINT32_MAX;
644
645 /*
646 * Do the scheduling.
647 */
648 Log2(("tmTimerQueueSchedule: %p:{.enmState=%s, .enmClock=%d, .enmType=%d, .szName=%s}\n",
649 pTimer, tmTimerState(pTimer->enmState), pQueue->enmClock, pTimer->enmType, pTimer->szName));
650 tmTimerQueueScheduleOne(pVM, pQueueCC, pQueue, pTimer);
651 Log2(("tmTimerQueueSchedule: %p: new %s\n", pTimer, tmTimerState(pTimer->enmState)));
652 }
653 Log2(("tmTimerQueueSchedule: u64Expired=%'RU64\n", pQueue->u64Expire));
654}
655
656
657#ifdef VBOX_STRICT
658/**
659 * Checks that the timer queues are sane.
660 *
661 * @param pVM The cross context VM structure.
662 * @param pszWhere Caller location clue.
663 */
664void tmTimerQueuesSanityChecks(PVMCC pVM, const char *pszWhere)
665{
666 for (uint32_t idxQueue = 0; idxQueue < RT_ELEMENTS(pVM->tm.s.aTimerQueues); idxQueue++)
667 {
668 PTMTIMERQUEUE const pQueue = &pVM->tm.s.aTimerQueues[idxQueue];
669 PTMTIMERQUEUECC const pQueueCC = TM_GET_TIMER_QUEUE_CC(pVM, idxQueue, pQueue);
670 Assert(pQueue->enmClock == (TMCLOCK)idxQueue);
671
672 int rc = PDMCritSectTryEnter(pVM, &pQueue->TimerLock);
673 if (RT_SUCCESS(rc))
674 {
675 if ( pQueue->enmClock != TMCLOCK_VIRTUAL_SYNC
676 || PDMCritSectTryEnter(pVM, &pVM->tm.s.VirtualSyncLock) == VINF_SUCCESS)
677 {
678 /* Check the linking of the active lists. */
679 PTMTIMER pPrev = NULL;
680 for (PTMTIMER pCur = tmTimerQueueGetHead(pQueueCC, pQueue);
681 pCur;
682 pPrev = pCur, pCur = tmTimerGetNext(pQueueCC, pCur))
683 {
684 AssertMsg(tmTimerGetPrev(pQueueCC, pCur) == pPrev, ("%s: %p != %p\n", pszWhere, tmTimerGetPrev(pQueueCC, pCur), pPrev));
685 TMTIMERSTATE enmState = pCur->enmState;
686 switch (enmState)
687 {
688 case TMTIMERSTATE_ACTIVE:
689 AssertMsg( pCur->idxScheduleNext == UINT32_MAX
690 || pCur->enmState != TMTIMERSTATE_ACTIVE,
691 ("%s: %RI32\n", pszWhere, pCur->idxScheduleNext));
692 break;
693 case TMTIMERSTATE_PENDING_STOP:
694 case TMTIMERSTATE_PENDING_RESCHEDULE:
695 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
696 break;
697 default:
698 AssertMsgFailed(("%s: Invalid state enmState=%d %s\n", pszWhere, enmState, tmTimerState(enmState)));
699 break;
700 }
701 }
702
703# ifdef IN_RING3
704 /* Go thru all the timers and check that the active ones all are in the active lists. */
705 uint32_t idxTimer = pQueue->cTimersAlloc;
706 uint32_t cFree = 0;
707 while (idxTimer-- > 0)
708 {
709 PTMTIMER const pTimer = &pQueue->paTimers[idxTimer];
710 TMTIMERSTATE const enmState = pTimer->enmState;
711 switch (enmState)
712 {
713 case TMTIMERSTATE_FREE:
714 cFree++;
715 break;
716
717 case TMTIMERSTATE_ACTIVE:
718 case TMTIMERSTATE_PENDING_STOP:
719 case TMTIMERSTATE_PENDING_RESCHEDULE:
720 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
721 {
722 PTMTIMERR3 pCurAct = tmTimerQueueGetHead(pQueueCC, pQueue);
723 Assert(pTimer->idxPrev != UINT32_MAX || pTimer == pCurAct);
724 while (pCurAct && pCurAct != pTimer)
725 pCurAct = tmTimerGetNext(pQueueCC, pCurAct);
726 Assert(pCurAct == pTimer);
727 break;
728 }
729
730 case TMTIMERSTATE_PENDING_SCHEDULE:
731 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
732 case TMTIMERSTATE_STOPPED:
733 case TMTIMERSTATE_EXPIRED_DELIVER:
734 {
735 Assert(pTimer->idxNext == UINT32_MAX);
736 Assert(pTimer->idxPrev == UINT32_MAX);
737 for (PTMTIMERR3 pCurAct = tmTimerQueueGetHead(pQueueCC, pQueue);
738 pCurAct;
739 pCurAct = tmTimerGetNext(pQueueCC, pCurAct))
740 {
741 Assert(pCurAct != pTimer);
742 Assert(tmTimerGetNext(pQueueCC, pCurAct) != pTimer);
743 Assert(tmTimerGetPrev(pQueueCC, pCurAct) != pTimer);
744 }
745 break;
746 }
747
748 /* ignore */
749 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
750 break;
751
752 case TMTIMERSTATE_INVALID:
753 Assert(idxTimer == 0);
754 break;
755
756 /* shouldn't get here! */
757 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
758 case TMTIMERSTATE_DESTROY:
759 default:
760 AssertMsgFailed(("Invalid state enmState=%d %s\n", enmState, tmTimerState(enmState)));
761 break;
762 }
763
764 /* Check the handle value. */
765 if (enmState > TMTIMERSTATE_INVALID && enmState < TMTIMERSTATE_DESTROY)
766 {
767 Assert((pTimer->hSelf & TMTIMERHANDLE_TIMER_IDX_MASK) == idxTimer);
768 Assert(((pTimer->hSelf >> TMTIMERHANDLE_QUEUE_IDX_SHIFT) & TMTIMERHANDLE_QUEUE_IDX_SMASK) == idxQueue);
769 }
770 }
771 Assert(cFree == pQueue->cTimersFree);
772# endif /* IN_RING3 */
773
774 if (pQueue->enmClock == TMCLOCK_VIRTUAL_SYNC)
775 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
776 }
777 PDMCritSectLeave(pVM, &pQueue->TimerLock);
778 }
779 }
780}
781#endif /* !VBOX_STRICT */
782
783#ifdef VBOX_HIGH_RES_TIMERS_HACK
784
785/**
786 * Worker for tmTimerPollInternal that handles misses when the dedicated timer
787 * EMT is polling.
788 *
789 * @returns See tmTimerPollInternal.
790 * @param pVM The cross context VM structure.
791 * @param u64Now Current virtual clock timestamp.
792 * @param u64Delta The delta to the next even in ticks of the
793 * virtual clock.
794 * @param pu64Delta Where to return the delta.
795 */
796DECLINLINE(uint64_t) tmTimerPollReturnMiss(PVM pVM, uint64_t u64Now, uint64_t u64Delta, uint64_t *pu64Delta)
797{
798 Assert(!(u64Delta & RT_BIT_64(63)));
799
800 if (!pVM->tm.s.fVirtualWarpDrive)
801 {
802 *pu64Delta = u64Delta;
803 return u64Delta + u64Now + pVM->tm.s.u64VirtualOffset;
804 }
805
806 /*
807 * Warp drive adjustments - this is the reverse of what tmVirtualGetRaw is doing.
808 */
809 uint64_t const u64Start = pVM->tm.s.u64VirtualWarpDriveStart;
810 uint32_t const u32Pct = pVM->tm.s.u32VirtualWarpDrivePercentage;
811
812 uint64_t u64GipTime = u64Delta + u64Now + pVM->tm.s.u64VirtualOffset;
813 u64GipTime -= u64Start; /* the start is GIP time. */
814 if (u64GipTime >= u64Delta)
815 {
816 ASMMultU64ByU32DivByU32(u64GipTime, 100, u32Pct);
817 ASMMultU64ByU32DivByU32(u64Delta, 100, u32Pct);
818 }
819 else
820 {
821 u64Delta -= u64GipTime;
822 ASMMultU64ByU32DivByU32(u64GipTime, 100, u32Pct);
823 u64Delta += u64GipTime;
824 }
825 *pu64Delta = u64Delta;
826 u64GipTime += u64Start;
827 return u64GipTime;
828}
829
830
831/**
832 * Worker for tmTimerPollInternal dealing with returns on virtual CPUs other
833 * than the one dedicated to timer work.
834 *
835 * @returns See tmTimerPollInternal.
836 * @param pVM The cross context VM structure.
837 * @param u64Now Current virtual clock timestamp.
838 * @param pu64Delta Where to return the delta.
839 */
840DECL_FORCE_INLINE(uint64_t) tmTimerPollReturnOtherCpu(PVM pVM, uint64_t u64Now, uint64_t *pu64Delta)
841{
842 static const uint64_t s_u64OtherRet = 500000000; /* 500 ms for non-timer EMTs. */
843 *pu64Delta = s_u64OtherRet;
844 return u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet;
845}
846
847
848/**
849 * Worker for tmTimerPollInternal.
850 *
851 * @returns See tmTimerPollInternal.
852 * @param pVM The cross context VM structure.
853 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
854 * @param pVCpuDst The cross context virtual CPU structure of the dedicated
855 * timer EMT.
856 * @param u64Now Current virtual clock timestamp.
857 * @param pu64Delta Where to return the delta.
858 * @param pCounter The statistics counter to update.
859 */
860DECL_FORCE_INLINE(uint64_t) tmTimerPollReturnHit(PVM pVM, PVMCPU pVCpu, PVMCPU pVCpuDst, uint64_t u64Now,
861 uint64_t *pu64Delta, PSTAMCOUNTER pCounter)
862{
863 STAM_COUNTER_INC(pCounter); NOREF(pCounter);
864 if (pVCpuDst != pVCpu)
865 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
866 *pu64Delta = 0;
867 return 0;
868}
869
870
871/**
872 * Common worker for TMTimerPollGIP and TMTimerPoll.
873 *
874 * This function is called before FFs are checked in the inner execution EM loops.
875 *
876 * @returns The GIP timestamp of the next event.
877 * 0 if the next event has already expired.
878 *
879 * @param pVM The cross context VM structure.
880 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
881 * @param pu64Delta Where to store the delta.
882 *
883 * @thread The emulation thread.
884 *
885 * @remarks GIP uses ns ticks.
886 */
887DECL_FORCE_INLINE(uint64_t) tmTimerPollInternal(PVMCC pVM, PVMCPUCC pVCpu, uint64_t *pu64Delta)
888{
889 VMCPUID idCpu = pVM->tm.s.idTimerCpu;
890 AssertReturn(idCpu < pVM->cCpus, 0);
891 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, idCpu);
892
893 const uint64_t u64Now = TMVirtualGetNoCheck(pVM);
894 STAM_COUNTER_INC(&pVM->tm.s.StatPoll);
895
896 /*
897 * Return straight away if the timer FF is already set ...
898 */
899 if (VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
900 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet);
901
902 /*
903 * ... or if timers are being run.
904 */
905 if (ASMAtomicReadBool(&pVM->tm.s.fRunningQueues))
906 {
907 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
908 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
909 }
910
911 /*
912 * Check for TMCLOCK_VIRTUAL expiration.
913 */
914 const uint64_t u64Expire1 = ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL].u64Expire);
915 const int64_t i64Delta1 = u64Expire1 - u64Now;
916 if (i64Delta1 <= 0)
917 {
918 if (!VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
919 {
920 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
921 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
922 }
923 LogFlow(("TMTimerPoll: expire1=%'RU64 <= now=%'RU64\n", u64Expire1, u64Now));
924 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtual);
925 }
926
927 /*
928 * Check for TMCLOCK_VIRTUAL_SYNC expiration.
929 * This isn't quite as straight forward if in a catch-up, not only do
930 * we have to adjust the 'now' but when have to adjust the delta as well.
931 */
932
933 /*
934 * Optimistic lockless approach.
935 */
936 uint64_t u64VirtualSyncNow;
937 uint64_t u64Expire2 = ASMAtomicUoReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire);
938 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
939 {
940 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
941 {
942 u64VirtualSyncNow = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
943 if (RT_LIKELY( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
944 && !ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
945 && u64VirtualSyncNow == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
946 && u64Expire2 == ASMAtomicUoReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire)))
947 {
948 u64VirtualSyncNow = u64Now - u64VirtualSyncNow;
949 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
950 if (i64Delta2 > 0)
951 {
952 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
953 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
954
955 if (pVCpu == pVCpuDst)
956 return tmTimerPollReturnMiss(pVM, u64Now, RT_MIN(i64Delta1, i64Delta2), pu64Delta);
957 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
958 }
959
960 if ( !pVM->tm.s.fRunningQueues
961 && !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
962 {
963 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
964 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
965 }
966
967 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
968 LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now));
969 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
970 }
971 }
972 }
973 else
974 {
975 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
976 LogFlow(("TMTimerPoll: stopped\n"));
977 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
978 }
979
980 /*
981 * Complicated lockless approach.
982 */
983 uint64_t off;
984 uint32_t u32Pct = 0;
985 bool fCatchUp;
986 int cOuterTries = 42;
987 for (;; cOuterTries--)
988 {
989 fCatchUp = ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp);
990 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
991 u64Expire2 = ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire);
992 if (fCatchUp)
993 {
994 /* No changes allowed, try get a consistent set of parameters. */
995 uint64_t const u64Prev = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev);
996 uint64_t const offGivenUp = ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp);
997 u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
998 if ( ( u64Prev == ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev)
999 && offGivenUp == ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp)
1000 && u32Pct == ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage)
1001 && off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
1002 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire)
1003 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
1004 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
1005 || cOuterTries <= 0)
1006 {
1007 uint64_t u64Delta = u64Now - u64Prev;
1008 if (RT_LIKELY(!(u64Delta >> 32)))
1009 {
1010 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, u32Pct, 100);
1011 if (off > u64Sub + offGivenUp)
1012 off -= u64Sub;
1013 else /* we've completely caught up. */
1014 off = offGivenUp;
1015 }
1016 else
1017 /* More than 4 seconds since last time (or negative), ignore it. */
1018 Log(("TMVirtualGetSync: u64Delta=%RX64 (NoLock)\n", u64Delta));
1019
1020 /* Check that we're still running and in catch up. */
1021 if ( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
1022 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
1023 break;
1024 }
1025 }
1026 else if ( off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
1027 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire)
1028 && !ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
1029 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
1030 break; /* Got an consistent offset */
1031
1032 /* Repeat the initial checks before iterating. */
1033 if (VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
1034 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet);
1035 if (ASMAtomicUoReadBool(&pVM->tm.s.fRunningQueues))
1036 {
1037 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
1038 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
1039 }
1040 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
1041 {
1042 LogFlow(("TMTimerPoll: stopped\n"));
1043 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
1044 }
1045 if (cOuterTries <= 0)
1046 break; /* that's enough */
1047 }
1048 if (cOuterTries <= 0)
1049 STAM_COUNTER_INC(&pVM->tm.s.StatPollELoop);
1050 u64VirtualSyncNow = u64Now - off;
1051
1052 /* Calc delta and see if we've got a virtual sync hit. */
1053 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
1054 if (i64Delta2 <= 0)
1055 {
1056 if ( !pVM->tm.s.fRunningQueues
1057 && !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
1058 {
1059 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
1060 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
1061 }
1062 STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtualSync);
1063 LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now));
1064 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
1065 }
1066
1067 /*
1068 * Return the time left to the next event.
1069 */
1070 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
1071 if (pVCpu == pVCpuDst)
1072 {
1073 if (fCatchUp)
1074 i64Delta2 = ASMMultU64ByU32DivByU32(i64Delta2, 100, u32Pct + 100);
1075 return tmTimerPollReturnMiss(pVM, u64Now, RT_MIN(i64Delta1, i64Delta2), pu64Delta);
1076 }
1077 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
1078}
1079
1080
1081/**
1082 * Set FF if we've passed the next virtual event.
1083 *
1084 * This function is called before FFs are checked in the inner execution EM loops.
1085 *
1086 * @returns true if timers are pending, false if not.
1087 *
1088 * @param pVM The cross context VM structure.
1089 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1090 * @thread The emulation thread.
1091 */
1092VMMDECL(bool) TMTimerPollBool(PVMCC pVM, PVMCPUCC pVCpu)
1093{
1094 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1095 uint64_t off = 0;
1096 tmTimerPollInternal(pVM, pVCpu, &off);
1097 return off == 0;
1098}
1099
1100
1101/**
1102 * Set FF if we've passed the next virtual event.
1103 *
1104 * This function is called before FFs are checked in the inner execution EM loops.
1105 *
1106 * @param pVM The cross context VM structure.
1107 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1108 * @thread The emulation thread.
1109 */
1110VMM_INT_DECL(void) TMTimerPollVoid(PVMCC pVM, PVMCPUCC pVCpu)
1111{
1112 uint64_t off;
1113 tmTimerPollInternal(pVM, pVCpu, &off);
1114}
1115
1116
1117/**
1118 * Set FF if we've passed the next virtual event.
1119 *
1120 * This function is called before FFs are checked in the inner execution EM loops.
1121 *
1122 * @returns The GIP timestamp of the next event.
1123 * 0 if the next event has already expired.
1124 * @param pVM The cross context VM structure.
1125 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1126 * @param pu64Delta Where to store the delta.
1127 * @thread The emulation thread.
1128 */
1129VMM_INT_DECL(uint64_t) TMTimerPollGIP(PVMCC pVM, PVMCPUCC pVCpu, uint64_t *pu64Delta)
1130{
1131 return tmTimerPollInternal(pVM, pVCpu, pu64Delta);
1132}
1133
1134#endif /* VBOX_HIGH_RES_TIMERS_HACK */
1135
1136/**
1137 * Locks the timer clock.
1138 *
1139 * @returns VINF_SUCCESS on success, @a rcBusy if busy, and VERR_NOT_SUPPORTED
1140 * if the clock does not have a lock.
1141 * @param pVM The cross context VM structure.
1142 * @param hTimer Timer handle as returned by one of the create functions.
1143 * @param rcBusy What to return in ring-0 and raw-mode context if the
1144 * lock is busy. Pass VINF_SUCCESS to acquired the
1145 * critical section thru a ring-3 call if necessary.
1146 *
1147 * @remarks Currently only supported on timers using the virtual sync clock.
1148 */
1149VMMDECL(int) TMTimerLock(PVMCC pVM, TMTIMERHANDLE hTimer, int rcBusy)
1150{
1151 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
1152 AssertReturn(idxQueue == TMCLOCK_VIRTUAL_SYNC, VERR_NOT_SUPPORTED);
1153 return PDMCritSectEnter(pVM, &pVM->tm.s.VirtualSyncLock, rcBusy);
1154}
1155
1156
1157/**
1158 * Unlocks a timer clock locked by TMTimerLock.
1159 *
1160 * @param pVM The cross context VM structure.
1161 * @param hTimer Timer handle as returned by one of the create functions.
1162 */
1163VMMDECL(void) TMTimerUnlock(PVMCC pVM, TMTIMERHANDLE hTimer)
1164{
1165 TMTIMER_HANDLE_TO_VARS_RETURN_VOID(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
1166 AssertReturnVoid(idxQueue == TMCLOCK_VIRTUAL_SYNC);
1167 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
1168}
1169
1170
1171/**
1172 * Checks if the current thread owns the timer clock lock.
1173 *
1174 * @returns @c true if its the owner, @c false if not.
1175 * @param pVM The cross context VM structure.
1176 * @param hTimer Timer handle as returned by one of the create functions.
1177 */
1178VMMDECL(bool) TMTimerIsLockOwner(PVMCC pVM, TMTIMERHANDLE hTimer)
1179{
1180 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, false); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
1181 AssertReturn(idxQueue == TMCLOCK_VIRTUAL_SYNC, false);
1182 return PDMCritSectIsOwner(pVM, &pVM->tm.s.VirtualSyncLock);
1183}
1184
1185
1186/**
1187 * Optimized TMTimerSet code path for starting an inactive timer.
1188 *
1189 * @returns VBox status code.
1190 *
1191 * @param pVM The cross context VM structure.
1192 * @param pTimer The timer handle.
1193 * @param u64Expire The new expire time.
1194 * @param pQueue Pointer to the shared timer queue data.
1195 * @param idxQueue The queue index.
1196 */
1197static int tmTimerSetOptimizedStart(PVMCC pVM, PTMTIMER pTimer, uint64_t u64Expire, PTMTIMERQUEUE pQueue, uint32_t idxQueue)
1198{
1199 Assert(pTimer->idxPrev == UINT32_MAX);
1200 Assert(pTimer->idxNext == UINT32_MAX);
1201 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE);
1202
1203 /*
1204 * Calculate and set the expiration time.
1205 */
1206 if (idxQueue == TMCLOCK_VIRTUAL_SYNC)
1207 {
1208 uint64_t u64Last = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSync);
1209 AssertMsgStmt(u64Expire >= u64Last,
1210 ("exp=%#llx last=%#llx\n", u64Expire, u64Last),
1211 u64Expire = u64Last);
1212 }
1213 ASMAtomicWriteU64(&pTimer->u64Expire, u64Expire);
1214 Log2(("tmTimerSetOptimizedStart: %p:{.pszDesc='%s', .u64Expire=%'RU64}\n", pTimer, pTimer->szName, u64Expire));
1215
1216 /*
1217 * Link the timer into the active list.
1218 */
1219 tmTimerQueueLinkActive(pVM, TM_GET_TIMER_QUEUE_CC(pVM, idxQueue, pQueue), pQueue, pTimer, u64Expire);
1220
1221 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetOpt);
1222 return VINF_SUCCESS;
1223}
1224
1225
1226/**
1227 * TMTimerSet for the virtual sync timer queue.
1228 *
1229 * This employs a greatly simplified state machine by always acquiring the
1230 * queue lock and bypassing the scheduling list.
1231 *
1232 * @returns VBox status code
1233 * @param pVM The cross context VM structure.
1234 * @param pTimer The timer handle.
1235 * @param u64Expire The expiration time.
1236 */
1237static int tmTimerVirtualSyncSet(PVMCC pVM, PTMTIMER pTimer, uint64_t u64Expire)
1238{
1239 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetVs), a);
1240 VM_ASSERT_EMT(pVM);
1241 TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer);
1242 int rc = PDMCritSectEnter(pVM, &pVM->tm.s.VirtualSyncLock, VINF_SUCCESS);
1243 AssertRCReturn(rc, rc);
1244
1245 PTMTIMERQUEUE const pQueue = &pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC];
1246 PTMTIMERQUEUECC const pQueueCC = TM_GET_TIMER_QUEUE_CC(pVM, TMCLOCK_VIRTUAL_SYNC, pQueue);
1247 TMTIMERSTATE const enmState = pTimer->enmState;
1248 switch (enmState)
1249 {
1250 case TMTIMERSTATE_EXPIRED_DELIVER:
1251 case TMTIMERSTATE_STOPPED:
1252 if (enmState == TMTIMERSTATE_EXPIRED_DELIVER)
1253 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetVsStExpDeliver);
1254 else
1255 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetVsStStopped);
1256
1257 AssertMsg(u64Expire >= pVM->tm.s.u64VirtualSync,
1258 ("%'RU64 < %'RU64 %s\n", u64Expire, pVM->tm.s.u64VirtualSync, pTimer->szName));
1259 pTimer->u64Expire = u64Expire;
1260 TM_SET_STATE(pTimer, TMTIMERSTATE_ACTIVE);
1261 tmTimerQueueLinkActive(pVM, pQueueCC, pQueue, pTimer, u64Expire);
1262 rc = VINF_SUCCESS;
1263 break;
1264
1265 case TMTIMERSTATE_ACTIVE:
1266 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetVsStActive);
1267 tmTimerQueueUnlinkActive(pVM, pQueueCC, pQueue, pTimer);
1268 pTimer->u64Expire = u64Expire;
1269 tmTimerQueueLinkActive(pVM, pQueueCC, pQueue, pTimer, u64Expire);
1270 rc = VINF_SUCCESS;
1271 break;
1272
1273 case TMTIMERSTATE_PENDING_RESCHEDULE:
1274 case TMTIMERSTATE_PENDING_STOP:
1275 case TMTIMERSTATE_PENDING_SCHEDULE:
1276 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1277 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1278 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1279 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1280 case TMTIMERSTATE_DESTROY:
1281 case TMTIMERSTATE_FREE:
1282 AssertLogRelMsgFailed(("Invalid timer state %s: %s\n", tmTimerState(enmState), pTimer->szName));
1283 rc = VERR_TM_INVALID_STATE;
1284 break;
1285
1286 default:
1287 AssertMsgFailed(("Unknown timer state %d: %s\n", enmState, pTimer->szName));
1288 rc = VERR_TM_UNKNOWN_STATE;
1289 break;
1290 }
1291
1292 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetVs), a);
1293 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
1294 return rc;
1295}
1296
1297
1298/**
1299 * Arm a timer with a (new) expire time.
1300 *
1301 * @returns VBox status code.
1302 * @param pVM The cross context VM structure.
1303 * @param hTimer Timer handle as returned by one of the create functions.
1304 * @param u64Expire New expire time.
1305 */
1306VMMDECL(int) TMTimerSet(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t u64Expire)
1307{
1308 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
1309 STAM_COUNTER_INC(&pTimer->StatSetAbsolute);
1310
1311 /* Treat virtual sync timers specially. */
1312 if (idxQueue == TMCLOCK_VIRTUAL_SYNC)
1313 return tmTimerVirtualSyncSet(pVM, pTimer, u64Expire);
1314
1315 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1316 TMTIMER_ASSERT_CRITSECT(pVM, pTimer);
1317
1318 DBGFTRACE_U64_TAG2(pVM, u64Expire, "TMTimerSet", pTimer->szName);
1319
1320#ifdef VBOX_WITH_STATISTICS
1321 /*
1322 * Gather optimization info.
1323 */
1324 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSet);
1325 TMTIMERSTATE enmOrgState = pTimer->enmState;
1326 switch (enmOrgState)
1327 {
1328 case TMTIMERSTATE_STOPPED: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStStopped); break;
1329 case TMTIMERSTATE_EXPIRED_DELIVER: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStExpDeliver); break;
1330 case TMTIMERSTATE_ACTIVE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStActive); break;
1331 case TMTIMERSTATE_PENDING_STOP: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendStop); break;
1332 case TMTIMERSTATE_PENDING_STOP_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendStopSched); break;
1333 case TMTIMERSTATE_PENDING_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendSched); break;
1334 case TMTIMERSTATE_PENDING_RESCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendResched); break;
1335 default: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStOther); break;
1336 }
1337#endif
1338
1339#if 1
1340 /*
1341 * The most common case is setting the timer again during the callback.
1342 * The second most common case is starting a timer at some other time.
1343 */
1344 TMTIMERSTATE enmState1 = pTimer->enmState;
1345 if ( enmState1 == TMTIMERSTATE_EXPIRED_DELIVER
1346 || ( enmState1 == TMTIMERSTATE_STOPPED
1347 && pTimer->pCritSect))
1348 {
1349 /* Try take the TM lock and check the state again. */
1350 int rc = PDMCritSectTryEnter(pVM, &pQueue->TimerLock);
1351 if (RT_SUCCESS_NP(rc))
1352 {
1353 if (RT_LIKELY(tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, enmState1)))
1354 {
1355 tmTimerSetOptimizedStart(pVM, pTimer, u64Expire, pQueue, idxQueue);
1356 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1357 PDMCritSectLeave(pVM, &pQueue->TimerLock);
1358 return VINF_SUCCESS;
1359 }
1360 PDMCritSectLeave(pVM, &pQueue->TimerLock);
1361 }
1362 }
1363#endif
1364
1365 /*
1366 * Unoptimized code path.
1367 */
1368 int cRetries = 1000;
1369 do
1370 {
1371 /*
1372 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1373 */
1374 TMTIMERSTATE enmState = pTimer->enmState;
1375 Log2(("TMTimerSet: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d u64Expire=%'RU64\n",
1376 pTimer, tmTimerState(enmState), pTimer->szName, cRetries, u64Expire));
1377 switch (enmState)
1378 {
1379 case TMTIMERSTATE_EXPIRED_DELIVER:
1380 case TMTIMERSTATE_STOPPED:
1381 if (tmTimerTryWithLink(pQueueCC, pQueue, pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1382 {
1383 Assert(pTimer->idxPrev == UINT32_MAX);
1384 Assert(pTimer->idxNext == UINT32_MAX);
1385 pTimer->u64Expire = u64Expire;
1386 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1387 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1388 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1389 return VINF_SUCCESS;
1390 }
1391 break;
1392
1393 case TMTIMERSTATE_PENDING_SCHEDULE:
1394 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1395 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1396 {
1397 pTimer->u64Expire = u64Expire;
1398 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1399 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1400 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1401 return VINF_SUCCESS;
1402 }
1403 break;
1404
1405
1406 case TMTIMERSTATE_ACTIVE:
1407 if (tmTimerTryWithLink(pQueueCC, pQueue, pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1408 {
1409 pTimer->u64Expire = u64Expire;
1410 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1411 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1412 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1413 return VINF_SUCCESS;
1414 }
1415 break;
1416
1417 case TMTIMERSTATE_PENDING_RESCHEDULE:
1418 case TMTIMERSTATE_PENDING_STOP:
1419 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1420 {
1421 pTimer->u64Expire = u64Expire;
1422 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1423 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1424 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1425 return VINF_SUCCESS;
1426 }
1427 break;
1428
1429
1430 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1431 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1432 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1433#ifdef IN_RING3
1434 if (!RTThreadYield())
1435 RTThreadSleep(1);
1436#else
1437/** @todo call host context and yield after a couple of iterations */
1438#endif
1439 break;
1440
1441 /*
1442 * Invalid states.
1443 */
1444 case TMTIMERSTATE_DESTROY:
1445 case TMTIMERSTATE_FREE:
1446 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, pTimer->szName));
1447 return VERR_TM_INVALID_STATE;
1448 default:
1449 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, pTimer->szName));
1450 return VERR_TM_UNKNOWN_STATE;
1451 }
1452 } while (cRetries-- > 0);
1453
1454 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, pTimer->szName));
1455 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1456 return VERR_TM_TIMER_UNSTABLE_STATE;
1457}
1458
1459
1460/**
1461 * Return the current time for the specified clock, setting pu64Now if not NULL.
1462 *
1463 * @returns Current time.
1464 * @param pVM The cross context VM structure.
1465 * @param enmClock The clock to query.
1466 * @param pu64Now Optional pointer where to store the return time
1467 */
1468DECL_FORCE_INLINE(uint64_t) tmTimerSetRelativeNowWorker(PVMCC pVM, TMCLOCK enmClock, uint64_t *pu64Now)
1469{
1470 uint64_t u64Now;
1471 switch (enmClock)
1472 {
1473 case TMCLOCK_VIRTUAL_SYNC:
1474 u64Now = TMVirtualSyncGet(pVM);
1475 break;
1476 case TMCLOCK_VIRTUAL:
1477 u64Now = TMVirtualGet(pVM);
1478 break;
1479 case TMCLOCK_REAL:
1480 u64Now = TMRealGet(pVM);
1481 break;
1482 default:
1483 AssertFatalMsgFailed(("%d\n", enmClock));
1484 }
1485
1486 if (pu64Now)
1487 *pu64Now = u64Now;
1488 return u64Now;
1489}
1490
1491
1492/**
1493 * Optimized TMTimerSetRelative code path.
1494 *
1495 * @returns VBox status code.
1496 *
1497 * @param pVM The cross context VM structure.
1498 * @param pTimer The timer handle.
1499 * @param cTicksToNext Clock ticks until the next time expiration.
1500 * @param pu64Now Where to return the current time stamp used.
1501 * Optional.
1502 * @param pQueueCC The context specific queue data (same as @a pQueue
1503 * for ring-3).
1504 * @param pQueue The shared queue data.
1505 */
1506static int tmTimerSetRelativeOptimizedStart(PVMCC pVM, PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now,
1507 PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue)
1508{
1509 Assert(pTimer->idxPrev == UINT32_MAX);
1510 Assert(pTimer->idxNext == UINT32_MAX);
1511 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE);
1512
1513 /*
1514 * Calculate and set the expiration time.
1515 */
1516 uint64_t const u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, pQueue->enmClock, pu64Now);
1517 pTimer->u64Expire = u64Expire;
1518 Log2(("tmTimerSetRelativeOptimizedStart: %p:{.pszDesc='%s', .u64Expire=%'RU64} cTicksToNext=%'RU64\n", pTimer, pTimer->szName, u64Expire, cTicksToNext));
1519
1520 /*
1521 * Link the timer into the active list.
1522 */
1523 DBGFTRACE_U64_TAG2(pVM, u64Expire, "tmTimerSetRelativeOptimizedStart", pTimer->szName);
1524 tmTimerQueueLinkActive(pVM, pQueueCC, pQueue, pTimer, u64Expire);
1525
1526 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeOpt);
1527 return VINF_SUCCESS;
1528}
1529
1530
1531/**
1532 * TMTimerSetRelative for the virtual sync timer queue.
1533 *
1534 * This employs a greatly simplified state machine by always acquiring the
1535 * queue lock and bypassing the scheduling list.
1536 *
1537 * @returns VBox status code
1538 * @param pVM The cross context VM structure.
1539 * @param pTimer The timer to (re-)arm.
1540 * @param cTicksToNext Clock ticks until the next time expiration.
1541 * @param pu64Now Where to return the current time stamp used.
1542 * Optional.
1543 */
1544static int tmTimerVirtualSyncSetRelative(PVMCC pVM, PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1545{
1546 STAM_PROFILE_START(pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelativeVs), a);
1547 VM_ASSERT_EMT(pVM);
1548 TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer);
1549 int rc = PDMCritSectEnter(pVM, &pVM->tm.s.VirtualSyncLock, VINF_SUCCESS);
1550 AssertRCReturn(rc, rc);
1551
1552 /* Calculate the expiration tick. */
1553 uint64_t u64Expire = TMVirtualSyncGetNoCheck(pVM);
1554 if (pu64Now)
1555 *pu64Now = u64Expire;
1556 u64Expire += cTicksToNext;
1557
1558 /* Update the timer. */
1559 PTMTIMERQUEUE const pQueue = &pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC];
1560 PTMTIMERQUEUECC const pQueueCC = TM_GET_TIMER_QUEUE_CC(pVM, TMCLOCK_VIRTUAL_SYNC, pQueue);
1561 TMTIMERSTATE const enmState = pTimer->enmState;
1562 switch (enmState)
1563 {
1564 case TMTIMERSTATE_EXPIRED_DELIVER:
1565 case TMTIMERSTATE_STOPPED:
1566 if (enmState == TMTIMERSTATE_EXPIRED_DELIVER)
1567 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeVsStExpDeliver);
1568 else
1569 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeVsStStopped);
1570 pTimer->u64Expire = u64Expire;
1571 TM_SET_STATE(pTimer, TMTIMERSTATE_ACTIVE);
1572 tmTimerQueueLinkActive(pVM, pQueueCC, pQueue, pTimer, u64Expire);
1573 rc = VINF_SUCCESS;
1574 break;
1575
1576 case TMTIMERSTATE_ACTIVE:
1577 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeVsStActive);
1578 tmTimerQueueUnlinkActive(pVM, pQueueCC, pQueue, pTimer);
1579 pTimer->u64Expire = u64Expire;
1580 tmTimerQueueLinkActive(pVM, pQueueCC, pQueue, pTimer, u64Expire);
1581 rc = VINF_SUCCESS;
1582 break;
1583
1584 case TMTIMERSTATE_PENDING_RESCHEDULE:
1585 case TMTIMERSTATE_PENDING_STOP:
1586 case TMTIMERSTATE_PENDING_SCHEDULE:
1587 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1588 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1589 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1590 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1591 case TMTIMERSTATE_DESTROY:
1592 case TMTIMERSTATE_FREE:
1593 AssertLogRelMsgFailed(("Invalid timer state %s: %s\n", tmTimerState(enmState), pTimer->szName));
1594 rc = VERR_TM_INVALID_STATE;
1595 break;
1596
1597 default:
1598 AssertMsgFailed(("Unknown timer state %d: %s\n", enmState, pTimer->szName));
1599 rc = VERR_TM_UNKNOWN_STATE;
1600 break;
1601 }
1602
1603 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelativeVs), a);
1604 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
1605 return rc;
1606}
1607
1608
1609/**
1610 * Arm a timer with a expire time relative to the current time.
1611 *
1612 * @returns VBox status code.
1613 * @param pVM The cross context VM structure.
1614 * @param pTimer The timer to arm.
1615 * @param cTicksToNext Clock ticks until the next time expiration.
1616 * @param pu64Now Where to return the current time stamp used.
1617 * Optional.
1618 * @param pQueueCC The context specific queue data (same as @a pQueue
1619 * for ring-3).
1620 * @param pQueue The shared queue data.
1621 */
1622static int tmTimerSetRelative(PVMCC pVM, PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now,
1623 PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue)
1624{
1625 STAM_COUNTER_INC(&pTimer->StatSetRelative);
1626
1627 /* Treat virtual sync timers specially. */
1628 if (pQueue->enmClock == TMCLOCK_VIRTUAL_SYNC)
1629 return tmTimerVirtualSyncSetRelative(pVM, pTimer, cTicksToNext, pu64Now);
1630
1631 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1632 TMTIMER_ASSERT_CRITSECT(pVM, pTimer);
1633
1634 DBGFTRACE_U64_TAG2(pVM, cTicksToNext, "TMTimerSetRelative", pTimer->szName);
1635
1636#ifdef VBOX_WITH_STATISTICS
1637 /*
1638 * Gather optimization info.
1639 */
1640 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelative);
1641 TMTIMERSTATE enmOrgState = pTimer->enmState;
1642 switch (enmOrgState)
1643 {
1644 case TMTIMERSTATE_STOPPED: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStStopped); break;
1645 case TMTIMERSTATE_EXPIRED_DELIVER: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStExpDeliver); break;
1646 case TMTIMERSTATE_ACTIVE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStActive); break;
1647 case TMTIMERSTATE_PENDING_STOP: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendStop); break;
1648 case TMTIMERSTATE_PENDING_STOP_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendStopSched); break;
1649 case TMTIMERSTATE_PENDING_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendSched); break;
1650 case TMTIMERSTATE_PENDING_RESCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendResched); break;
1651 default: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStOther); break;
1652 }
1653#endif
1654
1655 /*
1656 * Try to take the TM lock and optimize the common cases.
1657 *
1658 * With the TM lock we can safely make optimizations like immediate
1659 * scheduling and we can also be 100% sure that we're not racing the
1660 * running of the timer queues. As an additional restraint we require the
1661 * timer to have a critical section associated with to be 100% there aren't
1662 * concurrent operations on the timer. (This latter isn't necessary any
1663 * longer as this isn't supported for any timers, critsect or not.)
1664 *
1665 * Note! Lock ordering doesn't apply when we only _try_ to
1666 * get the innermost locks.
1667 */
1668 bool fOwnTMLock = RT_SUCCESS_NP(PDMCritSectTryEnter(pVM, &pQueue->TimerLock));
1669#if 1
1670 if ( fOwnTMLock
1671 && pTimer->pCritSect)
1672 {
1673 TMTIMERSTATE enmState = pTimer->enmState;
1674 if (RT_LIKELY( ( enmState == TMTIMERSTATE_EXPIRED_DELIVER
1675 || enmState == TMTIMERSTATE_STOPPED)
1676 && tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, enmState)))
1677 {
1678 tmTimerSetRelativeOptimizedStart(pVM, pTimer, cTicksToNext, pu64Now, pQueueCC, pQueue);
1679 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1680 PDMCritSectLeave(pVM, &pQueue->TimerLock);
1681 return VINF_SUCCESS;
1682 }
1683
1684 /* Optimize other states when it becomes necessary. */
1685 }
1686#endif
1687
1688 /*
1689 * Unoptimized path.
1690 */
1691 int rc;
1692 for (int cRetries = 1000; ; cRetries--)
1693 {
1694 /*
1695 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1696 */
1697 TMTIMERSTATE enmState = pTimer->enmState;
1698 switch (enmState)
1699 {
1700 case TMTIMERSTATE_STOPPED:
1701 if (pQueue->enmClock == TMCLOCK_VIRTUAL_SYNC)
1702 {
1703 /** @todo To fix assertion in tmR3TimerQueueRunVirtualSync:
1704 * Figure a safe way of activating this timer while the queue is
1705 * being run.
1706 * (99.9% sure this that the assertion is caused by DevAPIC.cpp
1707 * re-starting the timer in response to a initial_count write.) */
1708 }
1709 RT_FALL_THRU();
1710 case TMTIMERSTATE_EXPIRED_DELIVER:
1711 if (tmTimerTryWithLink(pQueueCC, pQueue, pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1712 {
1713 Assert(pTimer->idxPrev == UINT32_MAX);
1714 Assert(pTimer->idxNext == UINT32_MAX);
1715 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, pQueue->enmClock, pu64Now);
1716 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [EXP/STOP]\n",
1717 pTimer, tmTimerState(enmState), pTimer->szName, pTimer->u64Expire, cRetries));
1718 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1719 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1720 rc = VINF_SUCCESS;
1721 break;
1722 }
1723 rc = VERR_TRY_AGAIN;
1724 break;
1725
1726 case TMTIMERSTATE_PENDING_SCHEDULE:
1727 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1728 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1729 {
1730 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, pQueue->enmClock, pu64Now);
1731 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [PEND_SCHED]\n",
1732 pTimer, tmTimerState(enmState), pTimer->szName, pTimer->u64Expire, cRetries));
1733 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1734 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1735 rc = VINF_SUCCESS;
1736 break;
1737 }
1738 rc = VERR_TRY_AGAIN;
1739 break;
1740
1741
1742 case TMTIMERSTATE_ACTIVE:
1743 if (tmTimerTryWithLink(pQueueCC, pQueue, pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1744 {
1745 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, pQueue->enmClock, pu64Now);
1746 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [ACTIVE]\n",
1747 pTimer, tmTimerState(enmState), pTimer->szName, pTimer->u64Expire, cRetries));
1748 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1749 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1750 rc = VINF_SUCCESS;
1751 break;
1752 }
1753 rc = VERR_TRY_AGAIN;
1754 break;
1755
1756 case TMTIMERSTATE_PENDING_RESCHEDULE:
1757 case TMTIMERSTATE_PENDING_STOP:
1758 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1759 {
1760 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, pQueue->enmClock, pu64Now);
1761 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [PEND_RESCH/STOP]\n",
1762 pTimer, tmTimerState(enmState), pTimer->szName, pTimer->u64Expire, cRetries));
1763 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1764 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1765 rc = VINF_SUCCESS;
1766 break;
1767 }
1768 rc = VERR_TRY_AGAIN;
1769 break;
1770
1771
1772 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1773 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1774 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1775#ifdef IN_RING3
1776 if (!RTThreadYield())
1777 RTThreadSleep(1);
1778#else
1779/** @todo call host context and yield after a couple of iterations */
1780#endif
1781 rc = VERR_TRY_AGAIN;
1782 break;
1783
1784 /*
1785 * Invalid states.
1786 */
1787 case TMTIMERSTATE_DESTROY:
1788 case TMTIMERSTATE_FREE:
1789 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, pTimer->szName));
1790 rc = VERR_TM_INVALID_STATE;
1791 break;
1792
1793 default:
1794 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, pTimer->szName));
1795 rc = VERR_TM_UNKNOWN_STATE;
1796 break;
1797 }
1798
1799 /* switch + loop is tedious to break out of. */
1800 if (rc == VINF_SUCCESS)
1801 break;
1802
1803 if (rc != VERR_TRY_AGAIN)
1804 {
1805 tmTimerSetRelativeNowWorker(pVM, pQueue->enmClock, pu64Now);
1806 break;
1807 }
1808 if (cRetries <= 0)
1809 {
1810 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, pTimer->szName));
1811 rc = VERR_TM_TIMER_UNSTABLE_STATE;
1812 tmTimerSetRelativeNowWorker(pVM, pQueue->enmClock, pu64Now);
1813 break;
1814 }
1815
1816 /*
1817 * Retry to gain locks.
1818 */
1819 if (!fOwnTMLock)
1820 fOwnTMLock = RT_SUCCESS_NP(PDMCritSectTryEnter(pVM, &pQueue->TimerLock));
1821
1822 } /* for (;;) */
1823
1824 /*
1825 * Clean up and return.
1826 */
1827 if (fOwnTMLock)
1828 PDMCritSectLeave(pVM, &pQueue->TimerLock);
1829
1830 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1831 return rc;
1832}
1833
1834
1835/**
1836 * Arm a timer with a expire time relative to the current time.
1837 *
1838 * @returns VBox status code.
1839 * @param pVM The cross context VM structure.
1840 * @param hTimer Timer handle as returned by one of the create functions.
1841 * @param cTicksToNext Clock ticks until the next time expiration.
1842 * @param pu64Now Where to return the current time stamp used.
1843 * Optional.
1844 */
1845VMMDECL(int) TMTimerSetRelative(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1846{
1847 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
1848 return tmTimerSetRelative(pVM, pTimer, cTicksToNext, pu64Now, pQueueCC, pQueue);
1849}
1850
1851
1852/**
1853 * Drops a hint about the frequency of the timer.
1854 *
1855 * This is used by TM and the VMM to calculate how often guest execution needs
1856 * to be interrupted. The hint is automatically cleared by TMTimerStop.
1857 *
1858 * @returns VBox status code.
1859 * @param pVM The cross context VM structure.
1860 * @param hTimer Timer handle as returned by one of the create functions.
1861 * @param uHzHint The frequency hint. Pass 0 to clear the hint.
1862 *
1863 * @remarks We're using an integer hertz value here since anything above 1 HZ
1864 * is not going to be any trouble satisfying scheduling wise. The
1865 * range where it makes sense is >= 100 HZ.
1866 */
1867VMMDECL(int) TMTimerSetFrequencyHint(PVMCC pVM, TMTIMERHANDLE hTimer, uint32_t uHzHint)
1868{
1869 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
1870 TMTIMER_ASSERT_CRITSECT(pVM, pTimer);
1871
1872 uint32_t const uHzOldHint = pTimer->uHzHint;
1873 pTimer->uHzHint = uHzHint;
1874
1875 uint32_t const uMaxHzHint = pQueue->uMaxHzHint;
1876 if ( uHzHint > uMaxHzHint
1877 || uHzOldHint >= uMaxHzHint)
1878 ASMAtomicOrU64(&pVM->tm.s.HzHint.u64Combined, RT_BIT_32(idxQueue) | RT_BIT_32(idxQueue + 16));
1879
1880 return VINF_SUCCESS;
1881}
1882
1883
1884/**
1885 * TMTimerStop for the virtual sync timer queue.
1886 *
1887 * This employs a greatly simplified state machine by always acquiring the
1888 * queue lock and bypassing the scheduling list.
1889 *
1890 * @returns VBox status code
1891 * @param pVM The cross context VM structure.
1892 * @param pTimer The timer handle.
1893 */
1894static int tmTimerVirtualSyncStop(PVMCC pVM, PTMTIMER pTimer)
1895{
1896 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerStopVs), a);
1897 VM_ASSERT_EMT(pVM);
1898 TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer);
1899 int rc = PDMCritSectEnter(pVM, &pVM->tm.s.VirtualSyncLock, VINF_SUCCESS);
1900 AssertRCReturn(rc, rc);
1901
1902 /* Reset the HZ hint. */
1903 uint32_t uOldHzHint = pTimer->uHzHint;
1904 if (uOldHzHint)
1905 {
1906 if (uOldHzHint >= pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].uMaxHzHint)
1907 ASMAtomicOrU64(&pVM->tm.s.HzHint.u64Combined, RT_BIT_32(TMCLOCK_VIRTUAL_SYNC) | RT_BIT_32(TMCLOCK_VIRTUAL_SYNC + 16));
1908 pTimer->uHzHint = 0;
1909 }
1910
1911 /* Update the timer state. */
1912 TMTIMERSTATE const enmState = pTimer->enmState;
1913 switch (enmState)
1914 {
1915 case TMTIMERSTATE_ACTIVE:
1916 {
1917 PTMTIMERQUEUE const pQueue = &pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC];
1918 tmTimerQueueUnlinkActive(pVM, TM_GET_TIMER_QUEUE_CC(pVM, TMCLOCK_VIRTUAL_SYNC, pQueue), pQueue, pTimer);
1919 TM_SET_STATE(pTimer, TMTIMERSTATE_STOPPED);
1920 rc = VINF_SUCCESS;
1921 break;
1922 }
1923
1924 case TMTIMERSTATE_EXPIRED_DELIVER:
1925 TM_SET_STATE(pTimer, TMTIMERSTATE_STOPPED);
1926 rc = VINF_SUCCESS;
1927 break;
1928
1929 case TMTIMERSTATE_STOPPED:
1930 rc = VINF_SUCCESS;
1931 break;
1932
1933 case TMTIMERSTATE_PENDING_RESCHEDULE:
1934 case TMTIMERSTATE_PENDING_STOP:
1935 case TMTIMERSTATE_PENDING_SCHEDULE:
1936 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1937 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1938 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1939 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1940 case TMTIMERSTATE_DESTROY:
1941 case TMTIMERSTATE_FREE:
1942 AssertLogRelMsgFailed(("Invalid timer state %s: %s\n", tmTimerState(enmState), pTimer->szName));
1943 rc = VERR_TM_INVALID_STATE;
1944 break;
1945
1946 default:
1947 AssertMsgFailed(("Unknown timer state %d: %s\n", enmState, pTimer->szName));
1948 rc = VERR_TM_UNKNOWN_STATE;
1949 break;
1950 }
1951
1952 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStopVs), a);
1953 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
1954 return rc;
1955}
1956
1957
1958/**
1959 * Stop the timer.
1960 * Use TMR3TimerArm() to "un-stop" the timer.
1961 *
1962 * @returns VBox status code.
1963 * @param pVM The cross context VM structure.
1964 * @param hTimer Timer handle as returned by one of the create functions.
1965 */
1966VMMDECL(int) TMTimerStop(PVMCC pVM, TMTIMERHANDLE hTimer)
1967{
1968 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
1969 STAM_COUNTER_INC(&pTimer->StatStop);
1970
1971 /* Treat virtual sync timers specially. */
1972 if (idxQueue == TMCLOCK_VIRTUAL_SYNC)
1973 return tmTimerVirtualSyncStop(pVM, pTimer);
1974
1975 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1976 TMTIMER_ASSERT_CRITSECT(pVM, pTimer);
1977
1978 /*
1979 * Reset the HZ hint.
1980 */
1981 uint32_t const uOldHzHint = pTimer->uHzHint;
1982 if (uOldHzHint)
1983 {
1984 if (uOldHzHint >= pQueue->uMaxHzHint)
1985 ASMAtomicOrU64(&pVM->tm.s.HzHint.u64Combined, RT_BIT_32(idxQueue) | RT_BIT_32(idxQueue + 16));
1986 pTimer->uHzHint = 0;
1987 }
1988
1989 /** @todo see if this function needs optimizing. */
1990 int cRetries = 1000;
1991 do
1992 {
1993 /*
1994 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1995 */
1996 TMTIMERSTATE enmState = pTimer->enmState;
1997 Log2(("TMTimerStop: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d\n",
1998 pTimer, tmTimerState(enmState), pTimer->szName, cRetries));
1999 switch (enmState)
2000 {
2001 case TMTIMERSTATE_EXPIRED_DELIVER:
2002 //AssertMsgFailed(("You don't stop an expired timer dude!\n"));
2003 return VERR_INVALID_PARAMETER;
2004
2005 case TMTIMERSTATE_STOPPED:
2006 case TMTIMERSTATE_PENDING_STOP:
2007 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2008 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
2009 return VINF_SUCCESS;
2010
2011 case TMTIMERSTATE_PENDING_SCHEDULE:
2012 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, enmState))
2013 {
2014 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
2015 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
2016 return VINF_SUCCESS;
2017 }
2018 break;
2019
2020 case TMTIMERSTATE_PENDING_RESCHEDULE:
2021 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
2022 {
2023 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
2024 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
2025 return VINF_SUCCESS;
2026 }
2027 break;
2028
2029 case TMTIMERSTATE_ACTIVE:
2030 if (tmTimerTryWithLink(pQueueCC, pQueue, pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
2031 {
2032 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
2033 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
2034 return VINF_SUCCESS;
2035 }
2036 break;
2037
2038 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2039 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2040 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2041#ifdef IN_RING3
2042 if (!RTThreadYield())
2043 RTThreadSleep(1);
2044#else
2045/** @todo call host and yield cpu after a while. */
2046#endif
2047 break;
2048
2049 /*
2050 * Invalid states.
2051 */
2052 case TMTIMERSTATE_DESTROY:
2053 case TMTIMERSTATE_FREE:
2054 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, pTimer->szName));
2055 return VERR_TM_INVALID_STATE;
2056 default:
2057 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, pTimer->szName));
2058 return VERR_TM_UNKNOWN_STATE;
2059 }
2060 } while (cRetries-- > 0);
2061
2062 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, pTimer->szName));
2063 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
2064 return VERR_TM_TIMER_UNSTABLE_STATE;
2065}
2066
2067
2068/**
2069 * Get the current clock time.
2070 * Handy for calculating the new expire time.
2071 *
2072 * @returns Current clock time.
2073 * @param pVM The cross context VM structure.
2074 * @param hTimer Timer handle as returned by one of the create functions.
2075 */
2076VMMDECL(uint64_t) TMTimerGet(PVMCC pVM, TMTIMERHANDLE hTimer)
2077{
2078 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2079 STAM_COUNTER_INC(&pTimer->StatGet);
2080
2081 uint64_t u64;
2082 switch (pQueue->enmClock)
2083 {
2084 case TMCLOCK_VIRTUAL:
2085 u64 = TMVirtualGet(pVM);
2086 break;
2087 case TMCLOCK_VIRTUAL_SYNC:
2088 u64 = TMVirtualSyncGet(pVM);
2089 break;
2090 case TMCLOCK_REAL:
2091 u64 = TMRealGet(pVM);
2092 break;
2093 default:
2094 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2095 return UINT64_MAX;
2096 }
2097 //Log2(("TMTimerGet: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2098 // u64, pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2099 return u64;
2100}
2101
2102
2103/**
2104 * Get the frequency of the timer clock.
2105 *
2106 * @returns Clock frequency (as Hz of course).
2107 * @param pVM The cross context VM structure.
2108 * @param hTimer Timer handle as returned by one of the create functions.
2109 */
2110VMMDECL(uint64_t) TMTimerGetFreq(PVMCC pVM, TMTIMERHANDLE hTimer)
2111{
2112 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2113 switch (pQueue->enmClock)
2114 {
2115 case TMCLOCK_VIRTUAL:
2116 case TMCLOCK_VIRTUAL_SYNC:
2117 return TMCLOCK_FREQ_VIRTUAL;
2118
2119 case TMCLOCK_REAL:
2120 return TMCLOCK_FREQ_REAL;
2121
2122 default:
2123 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2124 return 0;
2125 }
2126}
2127
2128
2129/**
2130 * Get the expire time of the timer.
2131 * Only valid for active timers.
2132 *
2133 * @returns Expire time of the timer.
2134 * @param pVM The cross context VM structure.
2135 * @param hTimer Timer handle as returned by one of the create functions.
2136 */
2137VMMDECL(uint64_t) TMTimerGetExpire(PVMCC pVM, TMTIMERHANDLE hTimer)
2138{
2139 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, UINT64_MAX); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2140 TMTIMER_ASSERT_CRITSECT(pVM, pTimer);
2141 int cRetries = 1000;
2142 do
2143 {
2144 TMTIMERSTATE enmState = pTimer->enmState;
2145 switch (enmState)
2146 {
2147 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2148 case TMTIMERSTATE_EXPIRED_DELIVER:
2149 case TMTIMERSTATE_STOPPED:
2150 case TMTIMERSTATE_PENDING_STOP:
2151 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2152 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2153 pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2154 return UINT64_MAX;
2155
2156 case TMTIMERSTATE_ACTIVE:
2157 case TMTIMERSTATE_PENDING_RESCHEDULE:
2158 case TMTIMERSTATE_PENDING_SCHEDULE:
2159 Log2(("TMTimerGetExpire: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2160 pTimer->u64Expire, pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2161 return pTimer->u64Expire;
2162
2163 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2164 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2165#ifdef IN_RING3
2166 if (!RTThreadYield())
2167 RTThreadSleep(1);
2168#endif
2169 break;
2170
2171 /*
2172 * Invalid states.
2173 */
2174 case TMTIMERSTATE_DESTROY:
2175 case TMTIMERSTATE_FREE:
2176 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, pTimer->szName));
2177 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2178 pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2179 return UINT64_MAX;
2180 default:
2181 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, pTimer->szName));
2182 return UINT64_MAX;
2183 }
2184 } while (cRetries-- > 0);
2185
2186 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, pTimer->szName));
2187 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2188 pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2189 return UINT64_MAX;
2190}
2191
2192
2193/**
2194 * Checks if a timer is active or not.
2195 *
2196 * @returns True if active.
2197 * @returns False if not active.
2198 * @param pVM The cross context VM structure.
2199 * @param hTimer Timer handle as returned by one of the create functions.
2200 */
2201VMMDECL(bool) TMTimerIsActive(PVMCC pVM, TMTIMERHANDLE hTimer)
2202{
2203 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, false); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2204 TMTIMERSTATE enmState = pTimer->enmState;
2205 switch (enmState)
2206 {
2207 case TMTIMERSTATE_STOPPED:
2208 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2209 case TMTIMERSTATE_EXPIRED_DELIVER:
2210 case TMTIMERSTATE_PENDING_STOP:
2211 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2212 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2213 pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2214 return false;
2215
2216 case TMTIMERSTATE_ACTIVE:
2217 case TMTIMERSTATE_PENDING_RESCHEDULE:
2218 case TMTIMERSTATE_PENDING_SCHEDULE:
2219 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2220 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2221 Log2(("TMTimerIsActive: returns true (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2222 pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2223 return true;
2224
2225 /*
2226 * Invalid states.
2227 */
2228 case TMTIMERSTATE_DESTROY:
2229 case TMTIMERSTATE_FREE:
2230 AssertMsgFailed(("Invalid timer state %s (%s)\n", tmTimerState(enmState), pTimer->szName));
2231 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2232 pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2233 return false;
2234 default:
2235 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, pTimer->szName));
2236 return false;
2237 }
2238}
2239
2240
2241/* -=-=-=-=-=-=- Convenience APIs -=-=-=-=-=-=- */
2242
2243
2244/**
2245 * Arm a timer with a (new) expire time relative to current time.
2246 *
2247 * @returns VBox status code.
2248 * @param pVM The cross context VM structure.
2249 * @param hTimer Timer handle as returned by one of the create functions.
2250 * @param cMilliesToNext Number of milliseconds to the next tick.
2251 */
2252VMMDECL(int) TMTimerSetMillies(PVMCC pVM, TMTIMERHANDLE hTimer, uint32_t cMilliesToNext)
2253{
2254 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2255 switch (pQueue->enmClock)
2256 {
2257 case TMCLOCK_VIRTUAL:
2258 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2259 return tmTimerSetRelative(pVM, pTimer, cMilliesToNext * UINT64_C(1000000), NULL, pQueueCC, pQueue);
2260
2261 case TMCLOCK_VIRTUAL_SYNC:
2262 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2263 return tmTimerSetRelative(pVM, pTimer, cMilliesToNext * UINT64_C(1000000), NULL, pQueueCC, pQueue);
2264
2265 case TMCLOCK_REAL:
2266 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2267 return tmTimerSetRelative(pVM, pTimer, cMilliesToNext, NULL, pQueueCC, pQueue);
2268
2269 default:
2270 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2271 return VERR_TM_TIMER_BAD_CLOCK;
2272 }
2273}
2274
2275
2276/**
2277 * Arm a timer with a (new) expire time relative to current time.
2278 *
2279 * @returns VBox status code.
2280 * @param pVM The cross context VM structure.
2281 * @param hTimer Timer handle as returned by one of the create functions.
2282 * @param cMicrosToNext Number of microseconds to the next tick.
2283 */
2284VMMDECL(int) TMTimerSetMicro(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cMicrosToNext)
2285{
2286 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2287 switch (pQueue->enmClock)
2288 {
2289 case TMCLOCK_VIRTUAL:
2290 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2291 return tmTimerSetRelative(pVM, pTimer, cMicrosToNext * 1000, NULL, pQueueCC, pQueue);
2292
2293 case TMCLOCK_VIRTUAL_SYNC:
2294 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2295 return tmTimerSetRelative(pVM, pTimer, cMicrosToNext * 1000, NULL, pQueueCC, pQueue);
2296
2297 case TMCLOCK_REAL:
2298 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2299 return tmTimerSetRelative(pVM, pTimer, cMicrosToNext / 1000, NULL, pQueueCC, pQueue);
2300
2301 default:
2302 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2303 return VERR_TM_TIMER_BAD_CLOCK;
2304 }
2305}
2306
2307
2308/**
2309 * Arm a timer with a (new) expire time relative to current time.
2310 *
2311 * @returns VBox status code.
2312 * @param pVM The cross context VM structure.
2313 * @param hTimer Timer handle as returned by one of the create functions.
2314 * @param cNanosToNext Number of nanoseconds to the next tick.
2315 */
2316VMMDECL(int) TMTimerSetNano(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cNanosToNext)
2317{
2318 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2319 switch (pQueue->enmClock)
2320 {
2321 case TMCLOCK_VIRTUAL:
2322 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2323 return tmTimerSetRelative(pVM, pTimer, cNanosToNext, NULL, pQueueCC, pQueue);
2324
2325 case TMCLOCK_VIRTUAL_SYNC:
2326 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2327 return tmTimerSetRelative(pVM, pTimer, cNanosToNext, NULL, pQueueCC, pQueue);
2328
2329 case TMCLOCK_REAL:
2330 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2331 return tmTimerSetRelative(pVM, pTimer, cNanosToNext / 1000000, NULL, pQueueCC, pQueue);
2332
2333 default:
2334 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2335 return VERR_TM_TIMER_BAD_CLOCK;
2336 }
2337}
2338
2339
2340/**
2341 * Get the current clock time as nanoseconds.
2342 *
2343 * @returns The timer clock as nanoseconds.
2344 * @param pVM The cross context VM structure.
2345 * @param hTimer Timer handle as returned by one of the create functions.
2346 */
2347VMMDECL(uint64_t) TMTimerGetNano(PVMCC pVM, TMTIMERHANDLE hTimer)
2348{
2349 return TMTimerToNano(pVM, hTimer, TMTimerGet(pVM, hTimer));
2350}
2351
2352
2353/**
2354 * Get the current clock time as microseconds.
2355 *
2356 * @returns The timer clock as microseconds.
2357 * @param pVM The cross context VM structure.
2358 * @param hTimer Timer handle as returned by one of the create functions.
2359 */
2360VMMDECL(uint64_t) TMTimerGetMicro(PVMCC pVM, TMTIMERHANDLE hTimer)
2361{
2362 return TMTimerToMicro(pVM, hTimer, TMTimerGet(pVM, hTimer));
2363}
2364
2365
2366/**
2367 * Get the current clock time as milliseconds.
2368 *
2369 * @returns The timer clock as milliseconds.
2370 * @param pVM The cross context VM structure.
2371 * @param hTimer Timer handle as returned by one of the create functions.
2372 */
2373VMMDECL(uint64_t) TMTimerGetMilli(PVMCC pVM, TMTIMERHANDLE hTimer)
2374{
2375 return TMTimerToMilli(pVM, hTimer, TMTimerGet(pVM, hTimer));
2376}
2377
2378
2379/**
2380 * Converts the specified timer clock time to nanoseconds.
2381 *
2382 * @returns nanoseconds.
2383 * @param pVM The cross context VM structure.
2384 * @param hTimer Timer handle as returned by one of the create functions.
2385 * @param cTicks The clock ticks.
2386 * @remark There could be rounding errors here. We just do a simple integer divide
2387 * without any adjustments.
2388 */
2389VMMDECL(uint64_t) TMTimerToNano(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cTicks)
2390{
2391 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2392 switch (pQueue->enmClock)
2393 {
2394 case TMCLOCK_VIRTUAL:
2395 case TMCLOCK_VIRTUAL_SYNC:
2396 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2397 return cTicks;
2398
2399 case TMCLOCK_REAL:
2400 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2401 return cTicks * 1000000;
2402
2403 default:
2404 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2405 return 0;
2406 }
2407}
2408
2409
2410/**
2411 * Converts the specified timer clock time to microseconds.
2412 *
2413 * @returns microseconds.
2414 * @param pVM The cross context VM structure.
2415 * @param hTimer Timer handle as returned by one of the create functions.
2416 * @param cTicks The clock ticks.
2417 * @remark There could be rounding errors here. We just do a simple integer divide
2418 * without any adjustments.
2419 */
2420VMMDECL(uint64_t) TMTimerToMicro(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cTicks)
2421{
2422 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2423 switch (pQueue->enmClock)
2424 {
2425 case TMCLOCK_VIRTUAL:
2426 case TMCLOCK_VIRTUAL_SYNC:
2427 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2428 return cTicks / 1000;
2429
2430 case TMCLOCK_REAL:
2431 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2432 return cTicks * 1000;
2433
2434 default:
2435 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2436 return 0;
2437 }
2438}
2439
2440
2441/**
2442 * Converts the specified timer clock time to milliseconds.
2443 *
2444 * @returns milliseconds.
2445 * @param pVM The cross context VM structure.
2446 * @param hTimer Timer handle as returned by one of the create functions.
2447 * @param cTicks The clock ticks.
2448 * @remark There could be rounding errors here. We just do a simple integer divide
2449 * without any adjustments.
2450 */
2451VMMDECL(uint64_t) TMTimerToMilli(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cTicks)
2452{
2453 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2454 switch (pQueue->enmClock)
2455 {
2456 case TMCLOCK_VIRTUAL:
2457 case TMCLOCK_VIRTUAL_SYNC:
2458 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2459 return cTicks / 1000000;
2460
2461 case TMCLOCK_REAL:
2462 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2463 return cTicks;
2464
2465 default:
2466 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2467 return 0;
2468 }
2469}
2470
2471
2472/**
2473 * Converts the specified nanosecond timestamp to timer clock ticks.
2474 *
2475 * @returns timer clock ticks.
2476 * @param pVM The cross context VM structure.
2477 * @param hTimer Timer handle as returned by one of the create functions.
2478 * @param cNanoSecs The nanosecond value ticks to convert.
2479 * @remark There could be rounding and overflow errors here.
2480 */
2481VMMDECL(uint64_t) TMTimerFromNano(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cNanoSecs)
2482{
2483 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2484 switch (pQueue->enmClock)
2485 {
2486 case TMCLOCK_VIRTUAL:
2487 case TMCLOCK_VIRTUAL_SYNC:
2488 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2489 return cNanoSecs;
2490
2491 case TMCLOCK_REAL:
2492 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2493 return cNanoSecs / 1000000;
2494
2495 default:
2496 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2497 return 0;
2498 }
2499}
2500
2501
2502/**
2503 * Converts the specified microsecond timestamp to timer clock ticks.
2504 *
2505 * @returns timer clock ticks.
2506 * @param pVM The cross context VM structure.
2507 * @param hTimer Timer handle as returned by one of the create functions.
2508 * @param cMicroSecs The microsecond value ticks to convert.
2509 * @remark There could be rounding and overflow errors here.
2510 */
2511VMMDECL(uint64_t) TMTimerFromMicro(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cMicroSecs)
2512{
2513 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2514 switch (pQueue->enmClock)
2515 {
2516 case TMCLOCK_VIRTUAL:
2517 case TMCLOCK_VIRTUAL_SYNC:
2518 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2519 return cMicroSecs * 1000;
2520
2521 case TMCLOCK_REAL:
2522 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2523 return cMicroSecs / 1000;
2524
2525 default:
2526 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2527 return 0;
2528 }
2529}
2530
2531
2532/**
2533 * Converts the specified millisecond timestamp to timer clock ticks.
2534 *
2535 * @returns timer clock ticks.
2536 * @param pVM The cross context VM structure.
2537 * @param hTimer Timer handle as returned by one of the create functions.
2538 * @param cMilliSecs The millisecond value ticks to convert.
2539 * @remark There could be rounding and overflow errors here.
2540 */
2541VMMDECL(uint64_t) TMTimerFromMilli(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cMilliSecs)
2542{
2543 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2544 switch (pQueue->enmClock)
2545 {
2546 case TMCLOCK_VIRTUAL:
2547 case TMCLOCK_VIRTUAL_SYNC:
2548 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2549 return cMilliSecs * 1000000;
2550
2551 case TMCLOCK_REAL:
2552 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2553 return cMilliSecs;
2554
2555 default:
2556 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2557 return 0;
2558 }
2559}
2560
2561
2562/**
2563 * Convert state to string.
2564 *
2565 * @returns Readonly status name.
2566 * @param enmState State.
2567 */
2568const char *tmTimerState(TMTIMERSTATE enmState)
2569{
2570 switch (enmState)
2571 {
2572#define CASE(num, state) \
2573 case TMTIMERSTATE_##state: \
2574 AssertCompile(TMTIMERSTATE_##state == (num)); \
2575 return #num "-" #state
2576 CASE( 0,INVALID);
2577 CASE( 1,STOPPED);
2578 CASE( 2,ACTIVE);
2579 CASE( 3,EXPIRED_GET_UNLINK);
2580 CASE( 4,EXPIRED_DELIVER);
2581 CASE( 5,PENDING_STOP);
2582 CASE( 6,PENDING_STOP_SCHEDULE);
2583 CASE( 7,PENDING_SCHEDULE_SET_EXPIRE);
2584 CASE( 8,PENDING_SCHEDULE);
2585 CASE( 9,PENDING_RESCHEDULE_SET_EXPIRE);
2586 CASE(10,PENDING_RESCHEDULE);
2587 CASE(11,DESTROY);
2588 CASE(12,FREE);
2589 default:
2590 AssertMsgFailed(("Invalid state enmState=%d\n", enmState));
2591 return "Invalid state!";
2592#undef CASE
2593 }
2594}
2595
2596
2597#if defined(IN_RING0) || defined(IN_RING3)
2598/**
2599 * Copies over old timers and initialized newly allocted ones.
2600 *
2601 * Helper for TMR0TimerQueueGrow an tmR3TimerQueueGrow.
2602 *
2603 * @param paTimers The new timer allocation.
2604 * @param paOldTimers The old timers.
2605 * @param cNewTimers Number of new timers.
2606 * @param cOldTimers Number of old timers.
2607 */
2608void tmHCTimerQueueGrowInit(PTMTIMER paTimers, TMTIMER const *paOldTimers, uint32_t cNewTimers, uint32_t cOldTimers)
2609{
2610 Assert(cOldTimers < cNewTimers);
2611
2612 /*
2613 * Copy over the old info and initialize the new handles.
2614 */
2615 if (cOldTimers > 0)
2616 memcpy(paTimers, paOldTimers, sizeof(TMTIMER) * cOldTimers);
2617
2618 size_t i = cNewTimers;
2619 while (i-- > cOldTimers)
2620 {
2621 paTimers[i].u64Expire = UINT64_MAX;
2622 paTimers[i].enmType = TMTIMERTYPE_INVALID;
2623 paTimers[i].enmState = TMTIMERSTATE_FREE;
2624 paTimers[i].idxScheduleNext = UINT32_MAX;
2625 paTimers[i].idxNext = UINT32_MAX;
2626 paTimers[i].idxPrev = UINT32_MAX;
2627 paTimers[i].hSelf = NIL_TMTIMERHANDLE;
2628 }
2629
2630 /*
2631 * Mark the zero'th entry as allocated but invalid if we just allocated it.
2632 */
2633 if (cOldTimers == 0)
2634 {
2635 paTimers[0].enmState = TMTIMERSTATE_INVALID;
2636 paTimers[0].szName[0] = 'n';
2637 paTimers[0].szName[1] = 'i';
2638 paTimers[0].szName[2] = 'l';
2639 paTimers[0].szName[3] = '\0';
2640 }
2641}
2642#endif /* IN_RING0 || IN_RING3 */
2643
2644
2645/**
2646 * The slow path of tmGetFrequencyHint() where we try to recalculate the value.
2647 *
2648 * @returns The highest frequency. 0 if no timers care.
2649 * @param pVM The cross context VM structure.
2650 * @param uOldMaxHzHint The old global hint.
2651 */
2652DECL_NO_INLINE(static, uint32_t) tmGetFrequencyHintSlow(PVMCC pVM, uint32_t uOldMaxHzHint)
2653{
2654 /* Set two bits, though not entirely sure it's needed (too exhaused to think clearly)
2655 but it should force other callers thru the slow path while we're recalculating and
2656 help us detect changes while we're recalculating. */
2657 AssertCompile(RT_ELEMENTS(pVM->tm.s.aTimerQueues) <= 16);
2658
2659 /*
2660 * The "right" highest frequency value isn't so important that we'll block
2661 * waiting on the timer semaphores.
2662 */
2663 uint32_t uMaxHzHint = 0;
2664 for (uint32_t idxQueue = 0; idxQueue < RT_ELEMENTS(pVM->tm.s.aTimerQueues); idxQueue++)
2665 {
2666 PTMTIMERQUEUE pQueue = &pVM->tm.s.aTimerQueues[idxQueue];
2667
2668 /* Get the max Hz hint for the queue. */
2669 uint32_t uMaxHzHintQueue;
2670 if ( !(ASMAtomicUoReadU64(&pVM->tm.s.HzHint.u64Combined) & (RT_BIT_32(idxQueue) | RT_BIT_32(idxQueue + 16)))
2671 || RT_FAILURE_NP(PDMCritSectTryEnter(pVM, &pQueue->TimerLock)))
2672 uMaxHzHintQueue = ASMAtomicReadU32(&pQueue->uMaxHzHint);
2673 else
2674 {
2675 /* Is it still necessary to do updating? */
2676 if (ASMAtomicUoReadU64(&pVM->tm.s.HzHint.u64Combined) & (RT_BIT_32(idxQueue) | RT_BIT_32(idxQueue + 16)))
2677 {
2678 ASMAtomicAndU64(&pVM->tm.s.HzHint.u64Combined, ~RT_BIT_64(idxQueue + 16)); /* clear one flag up front */
2679
2680 PTMTIMERQUEUECC pQueueCC = TM_GET_TIMER_QUEUE_CC(pVM, idxQueue, pQueue);
2681 uMaxHzHintQueue = 0;
2682 for (PTMTIMER pCur = tmTimerQueueGetHead(pQueueCC, pQueue);
2683 pCur;
2684 pCur = tmTimerGetNext(pQueueCC, pCur))
2685 {
2686 uint32_t uHzHint = ASMAtomicUoReadU32(&pCur->uHzHint);
2687 if (uHzHint > uMaxHzHintQueue)
2688 {
2689 TMTIMERSTATE enmState = pCur->enmState;
2690 switch (enmState)
2691 {
2692 case TMTIMERSTATE_ACTIVE:
2693 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2694 case TMTIMERSTATE_EXPIRED_DELIVER:
2695 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2696 case TMTIMERSTATE_PENDING_SCHEDULE:
2697 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2698 case TMTIMERSTATE_PENDING_RESCHEDULE:
2699 uMaxHzHintQueue = uHzHint;
2700 break;
2701
2702 case TMTIMERSTATE_STOPPED:
2703 case TMTIMERSTATE_PENDING_STOP:
2704 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2705 case TMTIMERSTATE_DESTROY:
2706 case TMTIMERSTATE_FREE:
2707 case TMTIMERSTATE_INVALID:
2708 break;
2709 /* no default, want gcc warnings when adding more states. */
2710 }
2711 }
2712 }
2713
2714 /* Write the new Hz hint for the quest and clear the other update flag. */
2715 ASMAtomicUoWriteU32(&pQueue->uMaxHzHint, uMaxHzHintQueue);
2716 ASMAtomicAndU64(&pVM->tm.s.HzHint.u64Combined, ~RT_BIT_64(idxQueue));
2717 }
2718 else
2719 uMaxHzHintQueue = ASMAtomicUoReadU32(&pQueue->uMaxHzHint);
2720
2721 PDMCritSectLeave(pVM, &pQueue->TimerLock);
2722 }
2723
2724 /* Update the global max Hz hint. */
2725 if (uMaxHzHint < uMaxHzHintQueue)
2726 uMaxHzHint = uMaxHzHintQueue;
2727 }
2728
2729 /*
2730 * Update the frequency hint if no pending frequency changes and we didn't race anyone thru here.
2731 */
2732 uint64_t u64Actual = RT_MAKE_U64(0 /*no pending updates*/, uOldMaxHzHint);
2733 if (ASMAtomicCmpXchgExU64(&pVM->tm.s.HzHint.u64Combined, RT_MAKE_U64(0, uMaxHzHint), u64Actual, &u64Actual))
2734 Log(("tmGetFrequencyHintSlow: New value %u Hz\n", uMaxHzHint));
2735 else
2736 for (uint32_t iTry = 1;; iTry++)
2737 {
2738 if (RT_LO_U32(u64Actual) != 0)
2739 Log(("tmGetFrequencyHintSlow: Outdated value %u Hz (%#x, try %u)\n", uMaxHzHint, RT_LO_U32(u64Actual), iTry));
2740 else if (iTry >= 4)
2741 Log(("tmGetFrequencyHintSlow: Unable to set %u Hz (try %u)\n", uMaxHzHint, iTry));
2742 else if (ASMAtomicCmpXchgExU64(&pVM->tm.s.HzHint.u64Combined, RT_MAKE_U64(0, uMaxHzHint), u64Actual, &u64Actual))
2743 Log(("tmGetFrequencyHintSlow: New value %u Hz (try %u)\n", uMaxHzHint, iTry));
2744 else
2745 continue;
2746 break;
2747 }
2748 return uMaxHzHint;
2749}
2750
2751
2752/**
2753 * Gets the highest frequency hint for all the important timers.
2754 *
2755 * @returns The highest frequency. 0 if no timers care.
2756 * @param pVM The cross context VM structure.
2757 */
2758DECLINLINE(uint32_t) tmGetFrequencyHint(PVMCC pVM)
2759{
2760 /*
2761 * Query the value, recalculate it if necessary.
2762 */
2763 uint64_t u64Combined = ASMAtomicReadU64(&pVM->tm.s.HzHint.u64Combined);
2764 if (RT_HI_U32(u64Combined) == 0)
2765 return RT_LO_U32(u64Combined); /* hopefully somewhat likely */
2766 return tmGetFrequencyHintSlow(pVM, RT_LO_U32(u64Combined));
2767}
2768
2769
2770/**
2771 * Calculates a host timer frequency that would be suitable for the current
2772 * timer load.
2773 *
2774 * This will take the highest timer frequency, adjust for catch-up and warp
2775 * driver, and finally add a little fudge factor. The caller (VMM) will use
2776 * the result to adjust the per-cpu preemption timer.
2777 *
2778 * @returns The highest frequency. 0 if no important timers around.
2779 * @param pVM The cross context VM structure.
2780 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2781 */
2782VMM_INT_DECL(uint32_t) TMCalcHostTimerFrequency(PVMCC pVM, PVMCPUCC pVCpu)
2783{
2784 uint32_t uHz = tmGetFrequencyHint(pVM);
2785
2786 /* Catch up, we have to be more aggressive than the % indicates at the
2787 beginning of the effort. */
2788 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
2789 {
2790 uint32_t u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
2791 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
2792 {
2793 if (u32Pct <= 100)
2794 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp100 / 100;
2795 else if (u32Pct <= 200)
2796 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp200 / 100;
2797 else if (u32Pct <= 400)
2798 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp400 / 100;
2799 uHz *= u32Pct + 100;
2800 uHz /= 100;
2801 }
2802 }
2803
2804 /* Warp drive. */
2805 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualWarpDrive))
2806 {
2807 uint32_t u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualWarpDrivePercentage);
2808 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualWarpDrive))
2809 {
2810 uHz *= u32Pct;
2811 uHz /= 100;
2812 }
2813 }
2814
2815 /* Fudge factor. */
2816 if (pVCpu->idCpu == pVM->tm.s.idTimerCpu)
2817 uHz *= pVM->tm.s.cPctHostHzFudgeFactorTimerCpu;
2818 else
2819 uHz *= pVM->tm.s.cPctHostHzFudgeFactorOtherCpu;
2820 uHz /= 100;
2821
2822 /* Make sure it isn't too high. */
2823 if (uHz > pVM->tm.s.cHostHzMax)
2824 uHz = pVM->tm.s.cHostHzMax;
2825
2826 return uHz;
2827}
2828
2829
2830/**
2831 * Whether the guest virtual clock is ticking.
2832 *
2833 * @returns true if ticking, false otherwise.
2834 * @param pVM The cross context VM structure.
2835 */
2836VMM_INT_DECL(bool) TMVirtualIsTicking(PVM pVM)
2837{
2838 return RT_BOOL(pVM->tm.s.cVirtualTicking);
2839}
2840
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette