VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/TMAll.cpp@ 87814

Last change on this file since 87814 was 87814, checked in by vboxsync, 4 years ago

VMM/TM: Eliminated the enmClock member from TMTIMER, we'll be using TMTIMERQUEUE::enmClock instead since the queue is always handy. bugref:9943

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 104.9 KB
Line 
1/* $Id: TMAll.cpp 87814 2021-02-19 22:03:56Z vboxsync $ */
2/** @file
3 * TM - Timeout Manager, all contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_TM
23#ifdef DEBUG_bird
24# define DBGFTRACE_DISABLED /* annoying */
25#endif
26#include <VBox/vmm/tm.h>
27#include <VBox/vmm/mm.h>
28#include <VBox/vmm/dbgftrace.h>
29#ifdef IN_RING3
30#endif
31#include <VBox/vmm/pdmdev.h> /* (for TMTIMER_GET_CRITSECT implementation) */
32#include "TMInternal.h"
33#include <VBox/vmm/vmcc.h>
34
35#include <VBox/param.h>
36#include <VBox/err.h>
37#include <VBox/log.h>
38#include <VBox/sup.h>
39#include <iprt/time.h>
40#include <iprt/assert.h>
41#include <iprt/asm.h>
42#include <iprt/asm-math.h>
43#ifdef IN_RING3
44# include <iprt/thread.h>
45#endif
46
47#include "TMInline.h"
48
49
50/*********************************************************************************************************************************
51* Defined Constants And Macros *
52*********************************************************************************************************************************/
53#ifdef VBOX_STRICT
54/** @def TMTIMER_GET_CRITSECT
55 * Helper for safely resolving the critical section for a timer belonging to a
56 * device instance.
57 * @todo needs reworking later as it uses PDMDEVINSR0::pDevInsR0RemoveMe. */
58# ifdef IN_RING3
59# define TMTIMER_GET_CRITSECT(a_pVM, a_pTimer) ((a_pTimer)->pCritSect)
60# else
61# define TMTIMER_GET_CRITSECT(a_pVM, a_pTimer) tmRZTimerGetCritSect(a_pVM, a_pTimer)
62# endif
63#endif
64
65/** @def TMTIMER_ASSERT_CRITSECT
66 * Checks that the caller owns the critical section if one is associated with
67 * the timer. */
68#ifdef VBOX_STRICT
69# define TMTIMER_ASSERT_CRITSECT(a_pVM, a_pTimer) \
70 do { \
71 if ((a_pTimer)->pCritSect) \
72 { \
73 VMSTATE enmState; \
74 PPDMCRITSECT pCritSect = TMTIMER_GET_CRITSECT(a_pVM, a_pTimer); \
75 AssertMsg( pCritSect \
76 && ( PDMCritSectIsOwner(pCritSect) \
77 || (enmState = (a_pVM)->enmVMState) == VMSTATE_CREATING \
78 || enmState == VMSTATE_RESETTING \
79 || enmState == VMSTATE_RESETTING_LS ),\
80 ("pTimer=%p (%s) pCritSect=%p (%s)\n", a_pTimer, (a_pTimer)->szName, \
81 (a_pTimer)->pCritSect, R3STRING(PDMR3CritSectName((a_pTimer)->pCritSect)) )); \
82 } \
83 } while (0)
84#else
85# define TMTIMER_ASSERT_CRITSECT(pVM, pTimer) do { } while (0)
86#endif
87
88/** @def TMTIMER_ASSERT_SYNC_CRITSECT_ORDER
89 * Checks for lock order trouble between the timer critsect and the critical
90 * section critsect. The virtual sync critsect must always be entered before
91 * the one associated with the timer (see TMR3TimerQueuesDo). It is OK if there
92 * isn't any critical section associated with the timer or if the calling thread
93 * doesn't own it, ASSUMING of course that the thread using this macro is going
94 * to enter the virtual sync critical section anyway.
95 *
96 * @remarks This is a sligtly relaxed timer locking attitude compared to
97 * TMTIMER_ASSERT_CRITSECT, however, the calling device/whatever code
98 * should know what it's doing if it's stopping or starting a timer
99 * without taking the device lock.
100 */
101#ifdef VBOX_STRICT
102# define TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer) \
103 do { \
104 if ((pTimer)->pCritSect) \
105 { \
106 VMSTATE enmState; \
107 PPDMCRITSECT pCritSect = TMTIMER_GET_CRITSECT(pVM, pTimer); \
108 AssertMsg( pCritSect \
109 && ( !PDMCritSectIsOwner(pCritSect) \
110 || PDMCritSectIsOwner(&pVM->tm.s.VirtualSyncLock) \
111 || (enmState = (pVM)->enmVMState) == VMSTATE_CREATING \
112 || enmState == VMSTATE_RESETTING \
113 || enmState == VMSTATE_RESETTING_LS ),\
114 ("pTimer=%p (%s) pCritSect=%p (%s)\n", pTimer, pTimer->szName, \
115 (pTimer)->pCritSect, R3STRING(PDMR3CritSectName((pTimer)->pCritSect)) )); \
116 } \
117 } while (0)
118#else
119# define TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer) do { } while (0)
120#endif
121
122
123#if defined(VBOX_STRICT) && defined(IN_RING0)
124/**
125 * Helper for TMTIMER_GET_CRITSECT
126 * @todo This needs a redo!
127 */
128DECLINLINE(PPDMCRITSECT) tmRZTimerGetCritSect(PVMCC pVM, PTMTIMER pTimer)
129{
130 if (pTimer->enmType == TMTIMERTYPE_DEV)
131 {
132 RTCCUINTREG fSavedFlags = ASMAddFlags(X86_EFL_AC); /** @todo fix ring-3 pointer use */
133 PPDMDEVINSR0 pDevInsR0 = ((struct PDMDEVINSR3 *)pTimer->u.Dev.pDevIns)->pDevInsR0RemoveMe; /* !ring-3 read! */
134 ASMSetFlags(fSavedFlags);
135 struct PDMDEVINSR3 *pDevInsR3 = pDevInsR0->pDevInsForR3R0;
136 if (pTimer->pCritSect == pDevInsR3->pCritSectRoR3)
137 return pDevInsR0->pCritSectRoR0;
138 uintptr_t offCritSect = (uintptr_t)pTimer->pCritSect - (uintptr_t)pDevInsR3->pvInstanceDataR3;
139 if (offCritSect < pDevInsR0->pReg->cbInstanceShared)
140 return (PPDMCRITSECT)((uintptr_t)pDevInsR0->pvInstanceDataR0 + offCritSect);
141 }
142 return (PPDMCRITSECT)MMHyperR3ToCC(pVM, pTimer->pCritSect);
143}
144#endif /* VBOX_STRICT && IN_RING0 */
145
146
147/**
148 * Notification that execution is about to start.
149 *
150 * This call must always be paired with a TMNotifyEndOfExecution call.
151 *
152 * The function may, depending on the configuration, resume the TSC and future
153 * clocks that only ticks when we're executing guest code.
154 *
155 * @param pVM The cross context VM structure.
156 * @param pVCpu The cross context virtual CPU structure.
157 */
158VMMDECL(void) TMNotifyStartOfExecution(PVMCC pVM, PVMCPUCC pVCpu)
159{
160#ifndef VBOX_WITHOUT_NS_ACCOUNTING
161 pVCpu->tm.s.uTscStartExecuting = SUPReadTsc();
162 pVCpu->tm.s.fExecuting = true;
163#endif
164 if (pVM->tm.s.fTSCTiedToExecution)
165 tmCpuTickResume(pVM, pVCpu);
166}
167
168
169/**
170 * Notification that execution has ended.
171 *
172 * This call must always be paired with a TMNotifyStartOfExecution call.
173 *
174 * The function may, depending on the configuration, suspend the TSC and future
175 * clocks that only ticks when we're executing guest code.
176 *
177 * @param pVM The cross context VM structure.
178 * @param pVCpu The cross context virtual CPU structure.
179 * @param uTsc TSC value when exiting guest context.
180 */
181VMMDECL(void) TMNotifyEndOfExecution(PVMCC pVM, PVMCPUCC pVCpu, uint64_t uTsc)
182{
183 if (pVM->tm.s.fTSCTiedToExecution)
184 tmCpuTickPause(pVCpu); /** @todo use uTsc here if we can. */
185
186#ifndef VBOX_WITHOUT_NS_ACCOUNTING
187 /*
188 * Calculate the elapsed tick count and convert it to nanoseconds.
189 */
190# ifdef IN_RING3
191 uint64_t cTicks = uTsc - pVCpu->tm.s.uTscStartExecuting - SUPGetTscDelta();
192 uint64_t const uCpuHz = SUPGetCpuHzFromGip(g_pSUPGlobalInfoPage);
193# else
194 uint64_t cTicks = uTsc - pVCpu->tm.s.uTscStartExecuting - SUPGetTscDeltaByCpuSetIndex(pVCpu->iHostCpuSet);
195 uint64_t const uCpuHz = SUPGetCpuHzFromGipBySetIndex(g_pSUPGlobalInfoPage, pVCpu->iHostCpuSet);
196# endif
197 AssertStmt(cTicks <= uCpuHz << 2, cTicks = uCpuHz << 2); /* max 4 sec */
198
199 uint64_t cNsExecutingDelta;
200 if (uCpuHz < _4G)
201 cNsExecutingDelta = ASMMultU64ByU32DivByU32(cTicks, RT_NS_1SEC, uCpuHz);
202 else if (uCpuHz < 16*_1G64)
203 cNsExecutingDelta = ASMMultU64ByU32DivByU32(cTicks >> 2, RT_NS_1SEC, uCpuHz >> 2);
204 else
205 {
206 Assert(uCpuHz < 64 * _1G64);
207 cNsExecutingDelta = ASMMultU64ByU32DivByU32(cTicks >> 4, RT_NS_1SEC, uCpuHz >> 4);
208 }
209
210 /*
211 * Update the data.
212 *
213 * Note! We're not using strict memory ordering here to speed things us.
214 * The data is in a single cache line and this thread is the only
215 * one writing to that line, so I cannot quite imagine why we would
216 * need any strict ordering here.
217 */
218 uint64_t const cNsExecutingNew = pVCpu->tm.s.cNsExecuting + cNsExecutingDelta;
219 uint32_t uGen = ASMAtomicUoIncU32(&pVCpu->tm.s.uTimesGen); Assert(uGen & 1);
220 ASMCompilerBarrier();
221 pVCpu->tm.s.fExecuting = false;
222 pVCpu->tm.s.cNsExecuting = cNsExecutingNew;
223 pVCpu->tm.s.cPeriodsExecuting++;
224 ASMCompilerBarrier();
225 ASMAtomicUoWriteU32(&pVCpu->tm.s.uTimesGen, (uGen | 1) + 1);
226
227 /*
228 * Update stats.
229 */
230# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
231 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecuting, cNsExecutingDelta);
232 if (cNsExecutingDelta < 5000)
233 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecTiny, cNsExecutingDelta);
234 else if (cNsExecutingDelta < 50000)
235 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecShort, cNsExecutingDelta);
236 else
237 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecLong, cNsExecutingDelta);
238# endif
239
240 /* The timer triggers occational updating of the others and total stats: */
241 if (RT_LIKELY(!pVCpu->tm.s.fUpdateStats))
242 { /*likely*/ }
243 else
244 {
245 pVCpu->tm.s.fUpdateStats = false;
246
247 uint64_t const cNsTotalNew = RTTimeNanoTS() - pVCpu->tm.s.nsStartTotal;
248 uint64_t const cNsOtherNew = cNsTotalNew - cNsExecutingNew - pVCpu->tm.s.cNsHalted;
249
250# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
251 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsTotal, cNsTotalNew - pVCpu->tm.s.cNsTotalStat);
252 int64_t const cNsOtherNewDelta = cNsOtherNew - pVCpu->tm.s.cNsOtherStat;
253 if (cNsOtherNewDelta > 0)
254 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsOther, (uint64_t)cNsOtherNewDelta);
255# endif
256
257 pVCpu->tm.s.cNsTotalStat = cNsTotalNew;
258 pVCpu->tm.s.cNsOtherStat = cNsOtherNew;
259 }
260
261#endif
262}
263
264
265/**
266 * Notification that the cpu is entering the halt state
267 *
268 * This call must always be paired with a TMNotifyEndOfExecution call.
269 *
270 * The function may, depending on the configuration, resume the TSC and future
271 * clocks that only ticks when we're halted.
272 *
273 * @param pVCpu The cross context virtual CPU structure.
274 */
275VMM_INT_DECL(void) TMNotifyStartOfHalt(PVMCPUCC pVCpu)
276{
277 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
278
279#ifndef VBOX_WITHOUT_NS_ACCOUNTING
280 pVCpu->tm.s.nsStartHalting = RTTimeNanoTS();
281 pVCpu->tm.s.fHalting = true;
282#endif
283
284 if ( pVM->tm.s.fTSCTiedToExecution
285 && !pVM->tm.s.fTSCNotTiedToHalt)
286 tmCpuTickResume(pVM, pVCpu);
287}
288
289
290/**
291 * Notification that the cpu is leaving the halt state
292 *
293 * This call must always be paired with a TMNotifyStartOfHalt call.
294 *
295 * The function may, depending on the configuration, suspend the TSC and future
296 * clocks that only ticks when we're halted.
297 *
298 * @param pVCpu The cross context virtual CPU structure.
299 */
300VMM_INT_DECL(void) TMNotifyEndOfHalt(PVMCPUCC pVCpu)
301{
302 PVM pVM = pVCpu->CTX_SUFF(pVM);
303
304 if ( pVM->tm.s.fTSCTiedToExecution
305 && !pVM->tm.s.fTSCNotTiedToHalt)
306 tmCpuTickPause(pVCpu);
307
308#ifndef VBOX_WITHOUT_NS_ACCOUNTING
309 uint64_t const u64NsTs = RTTimeNanoTS();
310 uint64_t const cNsTotalNew = u64NsTs - pVCpu->tm.s.nsStartTotal;
311 uint64_t const cNsHaltedDelta = u64NsTs - pVCpu->tm.s.nsStartHalting;
312 uint64_t const cNsHaltedNew = pVCpu->tm.s.cNsHalted + cNsHaltedDelta;
313 uint64_t const cNsOtherNew = cNsTotalNew - pVCpu->tm.s.cNsExecuting - cNsHaltedNew;
314
315 uint32_t uGen = ASMAtomicUoIncU32(&pVCpu->tm.s.uTimesGen); Assert(uGen & 1);
316 ASMCompilerBarrier();
317 pVCpu->tm.s.fHalting = false;
318 pVCpu->tm.s.fUpdateStats = false;
319 pVCpu->tm.s.cNsHalted = cNsHaltedNew;
320 pVCpu->tm.s.cPeriodsHalted++;
321 ASMCompilerBarrier();
322 ASMAtomicUoWriteU32(&pVCpu->tm.s.uTimesGen, (uGen | 1) + 1);
323
324# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
325 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsHalted, cNsHaltedDelta);
326 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsTotal, cNsTotalNew - pVCpu->tm.s.cNsTotalStat);
327 int64_t const cNsOtherNewDelta = cNsOtherNew - pVCpu->tm.s.cNsOtherStat;
328 if (cNsOtherNewDelta > 0)
329 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsOther, (uint64_t)cNsOtherNewDelta);
330# endif
331 pVCpu->tm.s.cNsTotalStat = cNsTotalNew;
332 pVCpu->tm.s.cNsOtherStat = cNsOtherNew;
333#endif
334}
335
336
337/**
338 * Raise the timer force action flag and notify the dedicated timer EMT.
339 *
340 * @param pVM The cross context VM structure.
341 */
342DECLINLINE(void) tmScheduleNotify(PVMCC pVM)
343{
344 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
345 if (!VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
346 {
347 Log5(("TMAll(%u): FF: 0 -> 1\n", __LINE__));
348 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
349#ifdef IN_RING3
350 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
351#endif
352 STAM_COUNTER_INC(&pVM->tm.s.StatScheduleSetFF);
353 }
354}
355
356
357/**
358 * Schedule the queue which was changed.
359 */
360DECLINLINE(void) tmSchedule(PVMCC pVM, PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
361{
362 if (VM_IS_EMT(pVM)) /** @todo drop EMT requirement here. */
363 {
364 int rc = PDMCritSectTryEnter(&pQueue->TimerLock);
365 if (RT_SUCCESS_NP(rc))
366 {
367 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
368 Log3(("tmSchedule: tmTimerQueueSchedule\n"));
369 tmTimerQueueSchedule(pVM, pQueueCC, pQueue);
370#ifdef VBOX_STRICT
371 tmTimerQueuesSanityChecks(pVM, "tmSchedule");
372#endif
373 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
374 PDMCritSectLeave(&pQueue->TimerLock);
375 return;
376 }
377 }
378
379 TMTIMERSTATE enmState = pTimer->enmState;
380 if (TMTIMERSTATE_IS_PENDING_SCHEDULING(enmState))
381 tmScheduleNotify(pVM);
382}
383
384
385/**
386 * Try change the state to enmStateNew from enmStateOld
387 * and link the timer into the scheduling queue.
388 *
389 * @returns Success indicator.
390 * @param pTimer Timer in question.
391 * @param enmStateNew The new timer state.
392 * @param enmStateOld The old timer state.
393 */
394DECLINLINE(bool) tmTimerTry(PTMTIMER pTimer, TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
395{
396 /*
397 * Attempt state change.
398 */
399 bool fRc;
400 TM_TRY_SET_STATE(pTimer, enmStateNew, enmStateOld, fRc);
401 return fRc;
402}
403
404
405/**
406 * Links the timer onto the scheduling queue.
407 *
408 * @param pQueueCC The current context queue (same as @a pQueue for
409 * ring-3).
410 * @param pQueue The shared queue data.
411 * @param pTimer The timer.
412 *
413 * @todo FIXME: Look into potential race with the thread running the queues
414 * and stuff.
415 */
416DECLINLINE(void) tmTimerLinkSchedule(PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
417{
418 Assert(pTimer->idxScheduleNext == UINT32_MAX);
419 const uint32_t idxHeadNew = pTimer - &pQueueCC->paTimers[0];
420 AssertReturnVoid(idxHeadNew < pQueueCC->cTimersAlloc);
421
422 uint32_t idxHead;
423 do
424 {
425 idxHead = pQueue->idxSchedule;
426 Assert(idxHead == UINT32_MAX || idxHead < pQueueCC->cTimersAlloc);
427 pTimer->idxScheduleNext = idxHead;
428 } while (!ASMAtomicCmpXchgU32(&pQueue->idxSchedule, idxHeadNew, idxHead));
429}
430
431
432/**
433 * Try change the state to enmStateNew from enmStateOld
434 * and link the timer into the scheduling queue.
435 *
436 * @returns Success indicator.
437 * @param pQueueCC The current context queue (same as @a pQueue for
438 * ring-3).
439 * @param pQueue The shared queue data.
440 * @param pTimer Timer in question.
441 * @param enmStateNew The new timer state.
442 * @param enmStateOld The old timer state.
443 */
444DECLINLINE(bool) tmTimerTryWithLink(PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue, PTMTIMER pTimer,
445 TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
446{
447 if (tmTimerTry(pTimer, enmStateNew, enmStateOld))
448 {
449 tmTimerLinkSchedule(pQueueCC, pQueue, pTimer);
450 return true;
451 }
452 return false;
453}
454
455
456/**
457 * Links a timer into the active list of a timer queue.
458 *
459 * @param pVM The cross context VM structure.
460 * @param pQueueCC The current context queue (same as @a pQueue for
461 * ring-3).
462 * @param pQueue The shared queue data.
463 * @param pTimer The timer.
464 * @param u64Expire The timer expiration time.
465 *
466 * @remarks Called while owning the relevant queue lock.
467 */
468DECL_FORCE_INLINE(void) tmTimerQueueLinkActive(PVMCC pVM, PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue,
469 PTMTIMER pTimer, uint64_t u64Expire)
470{
471 Assert(pTimer->idxNext == UINT32_MAX);
472 Assert(pTimer->idxPrev == UINT32_MAX);
473 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE || pQueue->enmClock != TMCLOCK_VIRTUAL_SYNC); /* (active is not a stable state) */
474 RT_NOREF(pVM);
475
476 PTMTIMER pCur = tmTimerQueueGetHead(pQueueCC, pQueue);
477 if (pCur)
478 {
479 for (;; pCur = tmTimerGetNext(pQueueCC, pCur))
480 {
481 if (pCur->u64Expire > u64Expire)
482 {
483 const PTMTIMER pPrev = tmTimerGetPrev(pQueueCC, pCur);
484 tmTimerSetNext(pQueueCC, pTimer, pCur);
485 tmTimerSetPrev(pQueueCC, pTimer, pPrev);
486 if (pPrev)
487 tmTimerSetNext(pQueueCC, pPrev, pTimer);
488 else
489 {
490 tmTimerQueueSetHead(pQueueCC, pQueue, pTimer);
491 ASMAtomicWriteU64(&pQueue->u64Expire, u64Expire);
492 DBGFTRACE_U64_TAG2(pVM, u64Expire, "tmTimerQueueLinkActive head", pTimer->szName);
493 }
494 tmTimerSetPrev(pQueueCC, pCur, pTimer);
495 return;
496 }
497 if (pCur->idxNext == UINT32_MAX)
498 {
499 tmTimerSetNext(pQueueCC, pCur, pTimer);
500 tmTimerSetPrev(pQueueCC, pTimer, pCur);
501 DBGFTRACE_U64_TAG2(pVM, u64Expire, "tmTimerQueueLinkActive tail", pTimer->szName);
502 return;
503 }
504 }
505 }
506 else
507 {
508 tmTimerQueueSetHead(pQueueCC, pQueue, pTimer);
509 ASMAtomicWriteU64(&pQueue->u64Expire, u64Expire);
510 DBGFTRACE_U64_TAG2(pVM, u64Expire, "tmTimerQueueLinkActive empty", pTimer->szName);
511 }
512}
513
514
515
516/**
517 * Schedules the given timer on the given queue.
518 *
519 * @param pVM The cross context VM structure.
520 * @param pQueueCC The current context queue (same as @a pQueue for
521 * ring-3).
522 * @param pQueue The shared queue data.
523 * @param pTimer The timer that needs scheduling.
524 *
525 * @remarks Called while owning the lock.
526 */
527DECLINLINE(void) tmTimerQueueScheduleOne(PVMCC pVM, PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
528{
529 Assert(pQueue->enmClock != TMCLOCK_VIRTUAL_SYNC);
530 RT_NOREF(pVM);
531
532 /*
533 * Processing.
534 */
535 unsigned cRetries = 2;
536 do
537 {
538 TMTIMERSTATE enmState = pTimer->enmState;
539 switch (enmState)
540 {
541 /*
542 * Reschedule timer (in the active list).
543 */
544 case TMTIMERSTATE_PENDING_RESCHEDULE:
545 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE, TMTIMERSTATE_PENDING_RESCHEDULE)))
546 break; /* retry */
547 tmTimerQueueUnlinkActive(pVM, pQueueCC, pQueue, pTimer);
548 RT_FALL_THRU();
549
550 /*
551 * Schedule timer (insert into the active list).
552 */
553 case TMTIMERSTATE_PENDING_SCHEDULE:
554 Assert(pTimer->idxNext == UINT32_MAX); Assert(pTimer->idxPrev == UINT32_MAX);
555 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, TMTIMERSTATE_PENDING_SCHEDULE)))
556 break; /* retry */
557 tmTimerQueueLinkActive(pVM, pQueueCC, pQueue, pTimer, pTimer->u64Expire);
558 return;
559
560 /*
561 * Stop the timer in active list.
562 */
563 case TMTIMERSTATE_PENDING_STOP:
564 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, TMTIMERSTATE_PENDING_STOP)))
565 break; /* retry */
566 tmTimerQueueUnlinkActive(pVM, pQueueCC, pQueue, pTimer);
567 RT_FALL_THRU();
568
569 /*
570 * Stop the timer (not on the active list).
571 */
572 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
573 Assert(pTimer->idxNext == UINT32_MAX); Assert(pTimer->idxPrev == UINT32_MAX);
574 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_STOPPED, TMTIMERSTATE_PENDING_STOP_SCHEDULE)))
575 break;
576 return;
577
578 /*
579 * The timer is pending destruction by TMR3TimerDestroy, our caller.
580 * Nothing to do here.
581 */
582 case TMTIMERSTATE_DESTROY:
583 break;
584
585 /*
586 * Postpone these until they get into the right state.
587 */
588 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
589 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
590 tmTimerLinkSchedule(pQueueCC, pQueue, pTimer);
591 STAM_COUNTER_INC(&pVM->tm.s.CTX_SUFF_Z(StatPostponed));
592 return;
593
594 /*
595 * None of these can be in the schedule.
596 */
597 case TMTIMERSTATE_FREE:
598 case TMTIMERSTATE_STOPPED:
599 case TMTIMERSTATE_ACTIVE:
600 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
601 case TMTIMERSTATE_EXPIRED_DELIVER:
602 default:
603 AssertMsgFailed(("Timer (%p) in the scheduling list has an invalid state %s (%d)!",
604 pTimer, tmTimerState(pTimer->enmState), pTimer->enmState));
605 return;
606 }
607 } while (cRetries-- > 0);
608}
609
610
611/**
612 * Schedules the specified timer queue.
613 *
614 * @param pVM The cross context VM structure.
615 * @param pQueueCC The current context queue (same as @a pQueue for
616 * ring-3) data of the queue to schedule.
617 * @param pQueue The shared queue data of the queue to schedule.
618 *
619 * @remarks Called while owning the lock.
620 */
621void tmTimerQueueSchedule(PVMCC pVM, PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue)
622{
623 Assert(PDMCritSectIsOwner(&pQueue->TimerLock));
624
625 /*
626 * Dequeue the scheduling list and iterate it.
627 */
628 uint32_t idxNext = ASMAtomicXchgU32(&pQueue->idxSchedule, UINT32_MAX);
629 Log2(("tmTimerQueueSchedule: pQueue=%p:{.enmClock=%d, idxNext=%RI32, .u64Expired=%'RU64}\n", pQueue, pQueue->enmClock, idxNext, pQueue->u64Expire));
630 while (idxNext != UINT32_MAX)
631 {
632 AssertBreak(idxNext < pQueueCC->cTimersAlloc);
633
634 /*
635 * Unlink the head timer and take down the index of the next one.
636 */
637 PTMTIMER pTimer = &pQueueCC->paTimers[idxNext];
638 idxNext = pTimer->idxScheduleNext;
639 pTimer->idxScheduleNext = UINT32_MAX;
640
641 /*
642 * Do the scheduling.
643 */
644 Log2(("tmTimerQueueSchedule: %p:{.enmState=%s, .enmClock=%d, .enmType=%d, .szName=%s}\n",
645 pTimer, tmTimerState(pTimer->enmState), pQueue->enmClock, pTimer->enmType, pTimer->szName));
646 tmTimerQueueScheduleOne(pVM, pQueueCC, pQueue, pTimer);
647 Log2(("tmTimerQueueSchedule: %p: new %s\n", pTimer, tmTimerState(pTimer->enmState)));
648 }
649 Log2(("tmTimerQueueSchedule: u64Expired=%'RU64\n", pQueue->u64Expire));
650}
651
652
653#ifdef VBOX_STRICT
654/**
655 * Checks that the timer queues are sane.
656 *
657 * @param pVM The cross context VM structure.
658 * @param pszWhere Caller location clue.
659 */
660void tmTimerQueuesSanityChecks(PVMCC pVM, const char *pszWhere)
661{
662 for (uint32_t idxQueue = 0; idxQueue < RT_ELEMENTS(pVM->tm.s.aTimerQueues); idxQueue++)
663 {
664 PTMTIMERQUEUE const pQueue = &pVM->tm.s.aTimerQueues[idxQueue];
665 PTMTIMERQUEUECC const pQueueCC = TM_GET_TIMER_QUEUE_CC(pVM, idxQueue, pQueue);
666 Assert(pQueue->enmClock == (TMCLOCK)idxQueue);
667
668 int rc = PDMCritSectTryEnter(&pQueue->TimerLock);
669 if (RT_SUCCESS(rc))
670 {
671 if ( pQueue->enmClock != TMCLOCK_VIRTUAL_SYNC
672 || PDMCritSectTryEnter(&pVM->tm.s.VirtualSyncLock) == VINF_SUCCESS)
673 {
674 /* Check the linking of the active lists. */
675 PTMTIMER pPrev = NULL;
676 for (PTMTIMER pCur = tmTimerQueueGetHead(pQueueCC, pQueue);
677 pCur;
678 pPrev = pCur, pCur = tmTimerGetNext(pQueueCC, pCur))
679 {
680 AssertMsg(tmTimerGetPrev(pQueueCC, pCur) == pPrev, ("%s: %p != %p\n", pszWhere, tmTimerGetPrev(pQueueCC, pCur), pPrev));
681 TMTIMERSTATE enmState = pCur->enmState;
682 switch (enmState)
683 {
684 case TMTIMERSTATE_ACTIVE:
685 AssertMsg( pCur->idxScheduleNext == UINT32_MAX
686 || pCur->enmState != TMTIMERSTATE_ACTIVE,
687 ("%s: %RI32\n", pszWhere, pCur->idxScheduleNext));
688 break;
689 case TMTIMERSTATE_PENDING_STOP:
690 case TMTIMERSTATE_PENDING_RESCHEDULE:
691 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
692 break;
693 default:
694 AssertMsgFailed(("%s: Invalid state enmState=%d %s\n", pszWhere, enmState, tmTimerState(enmState)));
695 break;
696 }
697 }
698
699# ifdef IN_RING3
700 /* Go thru all the timers and check that the active ones all are in the active lists. */
701 uint32_t idxTimer = pQueue->cTimersAlloc;
702 uint32_t cFree = 0;
703 while (idxTimer-- > 0)
704 {
705 PTMTIMER const pTimer = &pQueue->paTimers[idxTimer];
706 TMTIMERSTATE const enmState = pTimer->enmState;
707 switch (enmState)
708 {
709 case TMTIMERSTATE_FREE:
710 cFree++;
711 break;
712
713 case TMTIMERSTATE_ACTIVE:
714 case TMTIMERSTATE_PENDING_STOP:
715 case TMTIMERSTATE_PENDING_RESCHEDULE:
716 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
717 {
718 PTMTIMERR3 pCurAct = tmTimerQueueGetHead(pQueueCC, pQueue);
719 Assert(pTimer->idxPrev != UINT32_MAX || pTimer == pCurAct);
720 while (pCurAct && pCurAct != pTimer)
721 pCurAct = tmTimerGetNext(pQueueCC, pCurAct);
722 Assert(pCurAct == pTimer);
723 break;
724 }
725
726 case TMTIMERSTATE_PENDING_SCHEDULE:
727 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
728 case TMTIMERSTATE_STOPPED:
729 case TMTIMERSTATE_EXPIRED_DELIVER:
730 {
731 Assert(pTimer->idxNext == UINT32_MAX);
732 Assert(pTimer->idxPrev == UINT32_MAX);
733 for (PTMTIMERR3 pCurAct = tmTimerQueueGetHead(pQueueCC, pQueue);
734 pCurAct;
735 pCurAct = tmTimerGetNext(pQueueCC, pCurAct))
736 {
737 Assert(pCurAct != pTimer);
738 Assert(tmTimerGetNext(pQueueCC, pCurAct) != pTimer);
739 Assert(tmTimerGetPrev(pQueueCC, pCurAct) != pTimer);
740 }
741 break;
742 }
743
744 /* ignore */
745 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
746 break;
747
748 case TMTIMERSTATE_INVALID:
749 Assert(idxTimer == 0);
750 break;
751
752 /* shouldn't get here! */
753 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
754 case TMTIMERSTATE_DESTROY:
755 default:
756 AssertMsgFailed(("Invalid state enmState=%d %s\n", enmState, tmTimerState(enmState)));
757 break;
758 }
759
760 /* Check the handle value. */
761 if (enmState > TMTIMERSTATE_INVALID && enmState < TMTIMERSTATE_DESTROY)
762 {
763 Assert((pTimer->hSelf & TMTIMERHANDLE_TIMER_IDX_MASK) == idxTimer);
764 Assert(((pTimer->hSelf >> TMTIMERHANDLE_QUEUE_IDX_SHIFT) & TMTIMERHANDLE_QUEUE_IDX_SMASK) == idxQueue);
765 }
766 }
767 Assert(cFree == pQueue->cTimersFree);
768# endif /* IN_RING3 */
769
770 if (pQueue->enmClock == TMCLOCK_VIRTUAL_SYNC)
771 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
772 }
773 PDMCritSectLeave(&pQueue->TimerLock);
774 }
775 }
776}
777#endif /* !VBOX_STRICT */
778
779#ifdef VBOX_HIGH_RES_TIMERS_HACK
780
781/**
782 * Worker for tmTimerPollInternal that handles misses when the dedicated timer
783 * EMT is polling.
784 *
785 * @returns See tmTimerPollInternal.
786 * @param pVM The cross context VM structure.
787 * @param u64Now Current virtual clock timestamp.
788 * @param u64Delta The delta to the next even in ticks of the
789 * virtual clock.
790 * @param pu64Delta Where to return the delta.
791 */
792DECLINLINE(uint64_t) tmTimerPollReturnMiss(PVM pVM, uint64_t u64Now, uint64_t u64Delta, uint64_t *pu64Delta)
793{
794 Assert(!(u64Delta & RT_BIT_64(63)));
795
796 if (!pVM->tm.s.fVirtualWarpDrive)
797 {
798 *pu64Delta = u64Delta;
799 return u64Delta + u64Now + pVM->tm.s.u64VirtualOffset;
800 }
801
802 /*
803 * Warp drive adjustments - this is the reverse of what tmVirtualGetRaw is doing.
804 */
805 uint64_t const u64Start = pVM->tm.s.u64VirtualWarpDriveStart;
806 uint32_t const u32Pct = pVM->tm.s.u32VirtualWarpDrivePercentage;
807
808 uint64_t u64GipTime = u64Delta + u64Now + pVM->tm.s.u64VirtualOffset;
809 u64GipTime -= u64Start; /* the start is GIP time. */
810 if (u64GipTime >= u64Delta)
811 {
812 ASMMultU64ByU32DivByU32(u64GipTime, 100, u32Pct);
813 ASMMultU64ByU32DivByU32(u64Delta, 100, u32Pct);
814 }
815 else
816 {
817 u64Delta -= u64GipTime;
818 ASMMultU64ByU32DivByU32(u64GipTime, 100, u32Pct);
819 u64Delta += u64GipTime;
820 }
821 *pu64Delta = u64Delta;
822 u64GipTime += u64Start;
823 return u64GipTime;
824}
825
826
827/**
828 * Worker for tmTimerPollInternal dealing with returns on virtual CPUs other
829 * than the one dedicated to timer work.
830 *
831 * @returns See tmTimerPollInternal.
832 * @param pVM The cross context VM structure.
833 * @param u64Now Current virtual clock timestamp.
834 * @param pu64Delta Where to return the delta.
835 */
836DECL_FORCE_INLINE(uint64_t) tmTimerPollReturnOtherCpu(PVM pVM, uint64_t u64Now, uint64_t *pu64Delta)
837{
838 static const uint64_t s_u64OtherRet = 500000000; /* 500 ms for non-timer EMTs. */
839 *pu64Delta = s_u64OtherRet;
840 return u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet;
841}
842
843
844/**
845 * Worker for tmTimerPollInternal.
846 *
847 * @returns See tmTimerPollInternal.
848 * @param pVM The cross context VM structure.
849 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
850 * @param pVCpuDst The cross context virtual CPU structure of the dedicated
851 * timer EMT.
852 * @param u64Now Current virtual clock timestamp.
853 * @param pu64Delta Where to return the delta.
854 * @param pCounter The statistics counter to update.
855 */
856DECL_FORCE_INLINE(uint64_t) tmTimerPollReturnHit(PVM pVM, PVMCPU pVCpu, PVMCPU pVCpuDst, uint64_t u64Now,
857 uint64_t *pu64Delta, PSTAMCOUNTER pCounter)
858{
859 STAM_COUNTER_INC(pCounter); NOREF(pCounter);
860 if (pVCpuDst != pVCpu)
861 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
862 *pu64Delta = 0;
863 return 0;
864}
865
866/**
867 * Common worker for TMTimerPollGIP and TMTimerPoll.
868 *
869 * This function is called before FFs are checked in the inner execution EM loops.
870 *
871 * @returns The GIP timestamp of the next event.
872 * 0 if the next event has already expired.
873 *
874 * @param pVM The cross context VM structure.
875 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
876 * @param pu64Delta Where to store the delta.
877 *
878 * @thread The emulation thread.
879 *
880 * @remarks GIP uses ns ticks.
881 */
882DECL_FORCE_INLINE(uint64_t) tmTimerPollInternal(PVMCC pVM, PVMCPUCC pVCpu, uint64_t *pu64Delta)
883{
884 PVMCPU pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
885 const uint64_t u64Now = TMVirtualGetNoCheck(pVM);
886 STAM_COUNTER_INC(&pVM->tm.s.StatPoll);
887
888 /*
889 * Return straight away if the timer FF is already set ...
890 */
891 if (VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
892 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet);
893
894 /*
895 * ... or if timers are being run.
896 */
897 if (ASMAtomicReadBool(&pVM->tm.s.fRunningQueues))
898 {
899 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
900 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
901 }
902
903 /*
904 * Check for TMCLOCK_VIRTUAL expiration.
905 */
906 const uint64_t u64Expire1 = ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL].u64Expire);
907 const int64_t i64Delta1 = u64Expire1 - u64Now;
908 if (i64Delta1 <= 0)
909 {
910 if (!VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
911 {
912 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
913 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
914 }
915 LogFlow(("TMTimerPoll: expire1=%'RU64 <= now=%'RU64\n", u64Expire1, u64Now));
916 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtual);
917 }
918
919 /*
920 * Check for TMCLOCK_VIRTUAL_SYNC expiration.
921 * This isn't quite as straight forward if in a catch-up, not only do
922 * we have to adjust the 'now' but when have to adjust the delta as well.
923 */
924
925 /*
926 * Optimistic lockless approach.
927 */
928 uint64_t u64VirtualSyncNow;
929 uint64_t u64Expire2 = ASMAtomicUoReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire);
930 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
931 {
932 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
933 {
934 u64VirtualSyncNow = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
935 if (RT_LIKELY( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
936 && !ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
937 && u64VirtualSyncNow == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
938 && u64Expire2 == ASMAtomicUoReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire)))
939 {
940 u64VirtualSyncNow = u64Now - u64VirtualSyncNow;
941 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
942 if (i64Delta2 > 0)
943 {
944 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
945 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
946
947 if (pVCpu == pVCpuDst)
948 return tmTimerPollReturnMiss(pVM, u64Now, RT_MIN(i64Delta1, i64Delta2), pu64Delta);
949 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
950 }
951
952 if ( !pVM->tm.s.fRunningQueues
953 && !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
954 {
955 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
956 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
957 }
958
959 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
960 LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now));
961 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
962 }
963 }
964 }
965 else
966 {
967 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
968 LogFlow(("TMTimerPoll: stopped\n"));
969 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
970 }
971
972 /*
973 * Complicated lockless approach.
974 */
975 uint64_t off;
976 uint32_t u32Pct = 0;
977 bool fCatchUp;
978 int cOuterTries = 42;
979 for (;; cOuterTries--)
980 {
981 fCatchUp = ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp);
982 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
983 u64Expire2 = ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire);
984 if (fCatchUp)
985 {
986 /* No changes allowed, try get a consistent set of parameters. */
987 uint64_t const u64Prev = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev);
988 uint64_t const offGivenUp = ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp);
989 u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
990 if ( ( u64Prev == ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev)
991 && offGivenUp == ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp)
992 && u32Pct == ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage)
993 && off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
994 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire)
995 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
996 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
997 || cOuterTries <= 0)
998 {
999 uint64_t u64Delta = u64Now - u64Prev;
1000 if (RT_LIKELY(!(u64Delta >> 32)))
1001 {
1002 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, u32Pct, 100);
1003 if (off > u64Sub + offGivenUp)
1004 off -= u64Sub;
1005 else /* we've completely caught up. */
1006 off = offGivenUp;
1007 }
1008 else
1009 /* More than 4 seconds since last time (or negative), ignore it. */
1010 Log(("TMVirtualGetSync: u64Delta=%RX64 (NoLock)\n", u64Delta));
1011
1012 /* Check that we're still running and in catch up. */
1013 if ( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
1014 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
1015 break;
1016 }
1017 }
1018 else if ( off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
1019 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire)
1020 && !ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
1021 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
1022 break; /* Got an consistent offset */
1023
1024 /* Repeat the initial checks before iterating. */
1025 if (VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
1026 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet);
1027 if (ASMAtomicUoReadBool(&pVM->tm.s.fRunningQueues))
1028 {
1029 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
1030 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
1031 }
1032 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
1033 {
1034 LogFlow(("TMTimerPoll: stopped\n"));
1035 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
1036 }
1037 if (cOuterTries <= 0)
1038 break; /* that's enough */
1039 }
1040 if (cOuterTries <= 0)
1041 STAM_COUNTER_INC(&pVM->tm.s.StatPollELoop);
1042 u64VirtualSyncNow = u64Now - off;
1043
1044 /* Calc delta and see if we've got a virtual sync hit. */
1045 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
1046 if (i64Delta2 <= 0)
1047 {
1048 if ( !pVM->tm.s.fRunningQueues
1049 && !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
1050 {
1051 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
1052 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
1053 }
1054 STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtualSync);
1055 LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now));
1056 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
1057 }
1058
1059 /*
1060 * Return the time left to the next event.
1061 */
1062 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
1063 if (pVCpu == pVCpuDst)
1064 {
1065 if (fCatchUp)
1066 i64Delta2 = ASMMultU64ByU32DivByU32(i64Delta2, 100, u32Pct + 100);
1067 return tmTimerPollReturnMiss(pVM, u64Now, RT_MIN(i64Delta1, i64Delta2), pu64Delta);
1068 }
1069 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
1070}
1071
1072
1073/**
1074 * Set FF if we've passed the next virtual event.
1075 *
1076 * This function is called before FFs are checked in the inner execution EM loops.
1077 *
1078 * @returns true if timers are pending, false if not.
1079 *
1080 * @param pVM The cross context VM structure.
1081 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1082 * @thread The emulation thread.
1083 */
1084VMMDECL(bool) TMTimerPollBool(PVMCC pVM, PVMCPUCC pVCpu)
1085{
1086 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1087 uint64_t off = 0;
1088 tmTimerPollInternal(pVM, pVCpu, &off);
1089 return off == 0;
1090}
1091
1092
1093/**
1094 * Set FF if we've passed the next virtual event.
1095 *
1096 * This function is called before FFs are checked in the inner execution EM loops.
1097 *
1098 * @param pVM The cross context VM structure.
1099 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1100 * @thread The emulation thread.
1101 */
1102VMM_INT_DECL(void) TMTimerPollVoid(PVMCC pVM, PVMCPUCC pVCpu)
1103{
1104 uint64_t off;
1105 tmTimerPollInternal(pVM, pVCpu, &off);
1106}
1107
1108
1109/**
1110 * Set FF if we've passed the next virtual event.
1111 *
1112 * This function is called before FFs are checked in the inner execution EM loops.
1113 *
1114 * @returns The GIP timestamp of the next event.
1115 * 0 if the next event has already expired.
1116 * @param pVM The cross context VM structure.
1117 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1118 * @param pu64Delta Where to store the delta.
1119 * @thread The emulation thread.
1120 */
1121VMM_INT_DECL(uint64_t) TMTimerPollGIP(PVMCC pVM, PVMCPUCC pVCpu, uint64_t *pu64Delta)
1122{
1123 return tmTimerPollInternal(pVM, pVCpu, pu64Delta);
1124}
1125
1126#endif /* VBOX_HIGH_RES_TIMERS_HACK */
1127
1128/**
1129 * Locks the timer clock.
1130 *
1131 * @returns VINF_SUCCESS on success, @a rcBusy if busy, and VERR_NOT_SUPPORTED
1132 * if the clock does not have a lock.
1133 * @param pVM The cross context VM structure.
1134 * @param hTimer Timer handle as returned by one of the create functions.
1135 * @param rcBusy What to return in ring-0 and raw-mode context if the
1136 * lock is busy. Pass VINF_SUCCESS to acquired the
1137 * critical section thru a ring-3 call if necessary.
1138 *
1139 * @remarks Currently only supported on timers using the virtual sync clock.
1140 */
1141VMMDECL(int) TMTimerLock(PVMCC pVM, TMTIMERHANDLE hTimer, int rcBusy)
1142{
1143 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
1144 AssertReturn(idxQueue == TMCLOCK_VIRTUAL_SYNC, VERR_NOT_SUPPORTED);
1145 return PDMCritSectEnter(&pVM->tm.s.VirtualSyncLock, rcBusy);
1146}
1147
1148
1149/**
1150 * Unlocks a timer clock locked by TMTimerLock.
1151 *
1152 * @param pVM The cross context VM structure.
1153 * @param hTimer Timer handle as returned by one of the create functions.
1154 */
1155VMMDECL(void) TMTimerUnlock(PVMCC pVM, TMTIMERHANDLE hTimer)
1156{
1157 TMTIMER_HANDLE_TO_VARS_RETURN_VOID(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
1158 AssertReturnVoid(idxQueue == TMCLOCK_VIRTUAL_SYNC);
1159 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
1160}
1161
1162
1163/**
1164 * Checks if the current thread owns the timer clock lock.
1165 *
1166 * @returns @c true if its the owner, @c false if not.
1167 * @param pVM The cross context VM structure.
1168 * @param hTimer Timer handle as returned by one of the create functions.
1169 */
1170VMMDECL(bool) TMTimerIsLockOwner(PVMCC pVM, TMTIMERHANDLE hTimer)
1171{
1172 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, false); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
1173 AssertReturn(idxQueue == TMCLOCK_VIRTUAL_SYNC, false);
1174 return PDMCritSectIsOwner(&pVM->tm.s.VirtualSyncLock);
1175}
1176
1177
1178/**
1179 * Optimized TMTimerSet code path for starting an inactive timer.
1180 *
1181 * @returns VBox status code.
1182 *
1183 * @param pVM The cross context VM structure.
1184 * @param pTimer The timer handle.
1185 * @param u64Expire The new expire time.
1186 * @param pQueue Pointer to the shared timer queue data.
1187 * @param idxQueue The queue index.
1188 */
1189static int tmTimerSetOptimizedStart(PVMCC pVM, PTMTIMER pTimer, uint64_t u64Expire, PTMTIMERQUEUE pQueue, uint32_t idxQueue)
1190{
1191 Assert(pTimer->idxPrev == UINT32_MAX);
1192 Assert(pTimer->idxNext == UINT32_MAX);
1193 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE);
1194
1195 /*
1196 * Calculate and set the expiration time.
1197 */
1198 if (idxQueue == TMCLOCK_VIRTUAL_SYNC)
1199 {
1200 uint64_t u64Last = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSync);
1201 AssertMsgStmt(u64Expire >= u64Last,
1202 ("exp=%#llx last=%#llx\n", u64Expire, u64Last),
1203 u64Expire = u64Last);
1204 }
1205 ASMAtomicWriteU64(&pTimer->u64Expire, u64Expire);
1206 Log2(("tmTimerSetOptimizedStart: %p:{.pszDesc='%s', .u64Expire=%'RU64}\n", pTimer, pTimer->szName, u64Expire));
1207
1208 /*
1209 * Link the timer into the active list.
1210 */
1211 tmTimerQueueLinkActive(pVM, TM_GET_TIMER_QUEUE_CC(pVM, idxQueue, pQueue), pQueue, pTimer, u64Expire);
1212
1213 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetOpt);
1214 return VINF_SUCCESS;
1215}
1216
1217
1218/**
1219 * TMTimerSet for the virtual sync timer queue.
1220 *
1221 * This employs a greatly simplified state machine by always acquiring the
1222 * queue lock and bypassing the scheduling list.
1223 *
1224 * @returns VBox status code
1225 * @param pVM The cross context VM structure.
1226 * @param pTimer The timer handle.
1227 * @param u64Expire The expiration time.
1228 */
1229static int tmTimerVirtualSyncSet(PVMCC pVM, PTMTIMER pTimer, uint64_t u64Expire)
1230{
1231 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetVs), a);
1232 VM_ASSERT_EMT(pVM);
1233 TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer);
1234 int rc = PDMCritSectEnter(&pVM->tm.s.VirtualSyncLock, VINF_SUCCESS);
1235 AssertRCReturn(rc, rc);
1236
1237 PTMTIMERQUEUE const pQueue = &pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC];
1238 PTMTIMERQUEUECC const pQueueCC = TM_GET_TIMER_QUEUE_CC(pVM, TMCLOCK_VIRTUAL_SYNC, pQueue);
1239 TMTIMERSTATE const enmState = pTimer->enmState;
1240 switch (enmState)
1241 {
1242 case TMTIMERSTATE_EXPIRED_DELIVER:
1243 case TMTIMERSTATE_STOPPED:
1244 if (enmState == TMTIMERSTATE_EXPIRED_DELIVER)
1245 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetVsStExpDeliver);
1246 else
1247 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetVsStStopped);
1248
1249 AssertMsg(u64Expire >= pVM->tm.s.u64VirtualSync,
1250 ("%'RU64 < %'RU64 %s\n", u64Expire, pVM->tm.s.u64VirtualSync, pTimer->szName));
1251 pTimer->u64Expire = u64Expire;
1252 TM_SET_STATE(pTimer, TMTIMERSTATE_ACTIVE);
1253 tmTimerQueueLinkActive(pVM, pQueueCC, pQueue, pTimer, u64Expire);
1254 rc = VINF_SUCCESS;
1255 break;
1256
1257 case TMTIMERSTATE_ACTIVE:
1258 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetVsStActive);
1259 tmTimerQueueUnlinkActive(pVM, pQueueCC, pQueue, pTimer);
1260 pTimer->u64Expire = u64Expire;
1261 tmTimerQueueLinkActive(pVM, pQueueCC, pQueue, pTimer, u64Expire);
1262 rc = VINF_SUCCESS;
1263 break;
1264
1265 case TMTIMERSTATE_PENDING_RESCHEDULE:
1266 case TMTIMERSTATE_PENDING_STOP:
1267 case TMTIMERSTATE_PENDING_SCHEDULE:
1268 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1269 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1270 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1271 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1272 case TMTIMERSTATE_DESTROY:
1273 case TMTIMERSTATE_FREE:
1274 AssertLogRelMsgFailed(("Invalid timer state %s: %s\n", tmTimerState(enmState), pTimer->szName));
1275 rc = VERR_TM_INVALID_STATE;
1276 break;
1277
1278 default:
1279 AssertMsgFailed(("Unknown timer state %d: %s\n", enmState, pTimer->szName));
1280 rc = VERR_TM_UNKNOWN_STATE;
1281 break;
1282 }
1283
1284 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetVs), a);
1285 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
1286 return rc;
1287}
1288
1289
1290/**
1291 * Arm a timer with a (new) expire time.
1292 *
1293 * @returns VBox status code.
1294 * @param pVM The cross context VM structure.
1295 * @param hTimer Timer handle as returned by one of the create functions.
1296 * @param u64Expire New expire time.
1297 */
1298VMMDECL(int) TMTimerSet(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t u64Expire)
1299{
1300 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
1301 STAM_COUNTER_INC(&pTimer->StatSetAbsolute);
1302
1303 /* Treat virtual sync timers specially. */
1304 if (idxQueue == TMCLOCK_VIRTUAL_SYNC)
1305 return tmTimerVirtualSyncSet(pVM, pTimer, u64Expire);
1306
1307 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1308 TMTIMER_ASSERT_CRITSECT(pVM, pTimer);
1309
1310 DBGFTRACE_U64_TAG2(pVM, u64Expire, "TMTimerSet", pTimer->szName);
1311
1312#ifdef VBOX_WITH_STATISTICS
1313 /*
1314 * Gather optimization info.
1315 */
1316 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSet);
1317 TMTIMERSTATE enmOrgState = pTimer->enmState;
1318 switch (enmOrgState)
1319 {
1320 case TMTIMERSTATE_STOPPED: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStStopped); break;
1321 case TMTIMERSTATE_EXPIRED_DELIVER: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStExpDeliver); break;
1322 case TMTIMERSTATE_ACTIVE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStActive); break;
1323 case TMTIMERSTATE_PENDING_STOP: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendStop); break;
1324 case TMTIMERSTATE_PENDING_STOP_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendStopSched); break;
1325 case TMTIMERSTATE_PENDING_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendSched); break;
1326 case TMTIMERSTATE_PENDING_RESCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendResched); break;
1327 default: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStOther); break;
1328 }
1329#endif
1330
1331#if 1
1332 /*
1333 * The most common case is setting the timer again during the callback.
1334 * The second most common case is starting a timer at some other time.
1335 */
1336 TMTIMERSTATE enmState1 = pTimer->enmState;
1337 if ( enmState1 == TMTIMERSTATE_EXPIRED_DELIVER
1338 || ( enmState1 == TMTIMERSTATE_STOPPED
1339 && pTimer->pCritSect))
1340 {
1341 /* Try take the TM lock and check the state again. */
1342 int rc = PDMCritSectTryEnter(&pQueue->TimerLock);
1343 if (RT_SUCCESS_NP(rc))
1344 {
1345 if (RT_LIKELY(tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, enmState1)))
1346 {
1347 tmTimerSetOptimizedStart(pVM, pTimer, u64Expire, pQueue, idxQueue);
1348 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1349 PDMCritSectLeave(&pQueue->TimerLock);
1350 return VINF_SUCCESS;
1351 }
1352 PDMCritSectLeave(&pQueue->TimerLock);
1353 }
1354 }
1355#endif
1356
1357 /*
1358 * Unoptimized code path.
1359 */
1360 int cRetries = 1000;
1361 do
1362 {
1363 /*
1364 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1365 */
1366 TMTIMERSTATE enmState = pTimer->enmState;
1367 Log2(("TMTimerSet: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d u64Expire=%'RU64\n",
1368 pTimer, tmTimerState(enmState), pTimer->szName, cRetries, u64Expire));
1369 switch (enmState)
1370 {
1371 case TMTIMERSTATE_EXPIRED_DELIVER:
1372 case TMTIMERSTATE_STOPPED:
1373 if (tmTimerTryWithLink(pQueueCC, pQueue, pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1374 {
1375 Assert(pTimer->idxPrev == UINT32_MAX);
1376 Assert(pTimer->idxNext == UINT32_MAX);
1377 pTimer->u64Expire = u64Expire;
1378 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1379 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1380 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1381 return VINF_SUCCESS;
1382 }
1383 break;
1384
1385 case TMTIMERSTATE_PENDING_SCHEDULE:
1386 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1387 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1388 {
1389 pTimer->u64Expire = u64Expire;
1390 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1391 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1392 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1393 return VINF_SUCCESS;
1394 }
1395 break;
1396
1397
1398 case TMTIMERSTATE_ACTIVE:
1399 if (tmTimerTryWithLink(pQueueCC, pQueue, pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1400 {
1401 pTimer->u64Expire = u64Expire;
1402 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1403 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1404 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1405 return VINF_SUCCESS;
1406 }
1407 break;
1408
1409 case TMTIMERSTATE_PENDING_RESCHEDULE:
1410 case TMTIMERSTATE_PENDING_STOP:
1411 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1412 {
1413 pTimer->u64Expire = u64Expire;
1414 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1415 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1416 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1417 return VINF_SUCCESS;
1418 }
1419 break;
1420
1421
1422 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1423 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1424 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1425#ifdef IN_RING3
1426 if (!RTThreadYield())
1427 RTThreadSleep(1);
1428#else
1429/** @todo call host context and yield after a couple of iterations */
1430#endif
1431 break;
1432
1433 /*
1434 * Invalid states.
1435 */
1436 case TMTIMERSTATE_DESTROY:
1437 case TMTIMERSTATE_FREE:
1438 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, pTimer->szName));
1439 return VERR_TM_INVALID_STATE;
1440 default:
1441 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, pTimer->szName));
1442 return VERR_TM_UNKNOWN_STATE;
1443 }
1444 } while (cRetries-- > 0);
1445
1446 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, pTimer->szName));
1447 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1448 return VERR_TM_TIMER_UNSTABLE_STATE;
1449}
1450
1451
1452/**
1453 * Return the current time for the specified clock, setting pu64Now if not NULL.
1454 *
1455 * @returns Current time.
1456 * @param pVM The cross context VM structure.
1457 * @param enmClock The clock to query.
1458 * @param pu64Now Optional pointer where to store the return time
1459 */
1460DECL_FORCE_INLINE(uint64_t) tmTimerSetRelativeNowWorker(PVMCC pVM, TMCLOCK enmClock, uint64_t *pu64Now)
1461{
1462 uint64_t u64Now;
1463 switch (enmClock)
1464 {
1465 case TMCLOCK_VIRTUAL_SYNC:
1466 u64Now = TMVirtualSyncGet(pVM);
1467 break;
1468 case TMCLOCK_VIRTUAL:
1469 u64Now = TMVirtualGet(pVM);
1470 break;
1471 case TMCLOCK_REAL:
1472 u64Now = TMRealGet(pVM);
1473 break;
1474 default:
1475 AssertFatalMsgFailed(("%d\n", enmClock));
1476 }
1477
1478 if (pu64Now)
1479 *pu64Now = u64Now;
1480 return u64Now;
1481}
1482
1483
1484/**
1485 * Optimized TMTimerSetRelative code path.
1486 *
1487 * @returns VBox status code.
1488 *
1489 * @param pVM The cross context VM structure.
1490 * @param pTimer The timer handle.
1491 * @param cTicksToNext Clock ticks until the next time expiration.
1492 * @param pu64Now Where to return the current time stamp used.
1493 * Optional.
1494 * @param pQueueCC The context specific queue data (same as @a pQueue
1495 * for ring-3).
1496 * @param pQueue The shared queue data.
1497 */
1498static int tmTimerSetRelativeOptimizedStart(PVMCC pVM, PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now,
1499 PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue)
1500{
1501 Assert(pTimer->idxPrev == UINT32_MAX);
1502 Assert(pTimer->idxNext == UINT32_MAX);
1503 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE);
1504
1505 /*
1506 * Calculate and set the expiration time.
1507 */
1508 uint64_t const u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, pQueue->enmClock, pu64Now);
1509 pTimer->u64Expire = u64Expire;
1510 Log2(("tmTimerSetRelativeOptimizedStart: %p:{.pszDesc='%s', .u64Expire=%'RU64} cTicksToNext=%'RU64\n", pTimer, pTimer->szName, u64Expire, cTicksToNext));
1511
1512 /*
1513 * Link the timer into the active list.
1514 */
1515 DBGFTRACE_U64_TAG2(pVM, u64Expire, "tmTimerSetRelativeOptimizedStart", pTimer->szName);
1516 tmTimerQueueLinkActive(pVM, pQueueCC, pQueue, pTimer, u64Expire);
1517
1518 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeOpt);
1519 return VINF_SUCCESS;
1520}
1521
1522
1523/**
1524 * TMTimerSetRelative for the virtual sync timer queue.
1525 *
1526 * This employs a greatly simplified state machine by always acquiring the
1527 * queue lock and bypassing the scheduling list.
1528 *
1529 * @returns VBox status code
1530 * @param pVM The cross context VM structure.
1531 * @param pTimer The timer to (re-)arm.
1532 * @param cTicksToNext Clock ticks until the next time expiration.
1533 * @param pu64Now Where to return the current time stamp used.
1534 * Optional.
1535 */
1536static int tmTimerVirtualSyncSetRelative(PVMCC pVM, PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1537{
1538 STAM_PROFILE_START(pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelativeVs), a);
1539 VM_ASSERT_EMT(pVM);
1540 TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer);
1541 int rc = PDMCritSectEnter(&pVM->tm.s.VirtualSyncLock, VINF_SUCCESS);
1542 AssertRCReturn(rc, rc);
1543
1544 /* Calculate the expiration tick. */
1545 uint64_t u64Expire = TMVirtualSyncGetNoCheck(pVM);
1546 if (pu64Now)
1547 *pu64Now = u64Expire;
1548 u64Expire += cTicksToNext;
1549
1550 /* Update the timer. */
1551 PTMTIMERQUEUE const pQueue = &pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC];
1552 PTMTIMERQUEUECC const pQueueCC = TM_GET_TIMER_QUEUE_CC(pVM, TMCLOCK_VIRTUAL_SYNC, pQueue);
1553 TMTIMERSTATE const enmState = pTimer->enmState;
1554 switch (enmState)
1555 {
1556 case TMTIMERSTATE_EXPIRED_DELIVER:
1557 case TMTIMERSTATE_STOPPED:
1558 if (enmState == TMTIMERSTATE_EXPIRED_DELIVER)
1559 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeVsStExpDeliver);
1560 else
1561 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeVsStStopped);
1562 pTimer->u64Expire = u64Expire;
1563 TM_SET_STATE(pTimer, TMTIMERSTATE_ACTIVE);
1564 tmTimerQueueLinkActive(pVM, pQueueCC, pQueue, pTimer, u64Expire);
1565 rc = VINF_SUCCESS;
1566 break;
1567
1568 case TMTIMERSTATE_ACTIVE:
1569 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeVsStActive);
1570 tmTimerQueueUnlinkActive(pVM, pQueueCC, pQueue, pTimer);
1571 pTimer->u64Expire = u64Expire;
1572 tmTimerQueueLinkActive(pVM, pQueueCC, pQueue, pTimer, u64Expire);
1573 rc = VINF_SUCCESS;
1574 break;
1575
1576 case TMTIMERSTATE_PENDING_RESCHEDULE:
1577 case TMTIMERSTATE_PENDING_STOP:
1578 case TMTIMERSTATE_PENDING_SCHEDULE:
1579 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1580 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1581 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1582 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1583 case TMTIMERSTATE_DESTROY:
1584 case TMTIMERSTATE_FREE:
1585 AssertLogRelMsgFailed(("Invalid timer state %s: %s\n", tmTimerState(enmState), pTimer->szName));
1586 rc = VERR_TM_INVALID_STATE;
1587 break;
1588
1589 default:
1590 AssertMsgFailed(("Unknown timer state %d: %s\n", enmState, pTimer->szName));
1591 rc = VERR_TM_UNKNOWN_STATE;
1592 break;
1593 }
1594
1595 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelativeVs), a);
1596 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
1597 return rc;
1598}
1599
1600
1601/**
1602 * Arm a timer with a expire time relative to the current time.
1603 *
1604 * @returns VBox status code.
1605 * @param pVM The cross context VM structure.
1606 * @param pTimer The timer to arm.
1607 * @param cTicksToNext Clock ticks until the next time expiration.
1608 * @param pu64Now Where to return the current time stamp used.
1609 * Optional.
1610 * @param pQueueCC The context specific queue data (same as @a pQueue
1611 * for ring-3).
1612 * @param pQueue The shared queue data.
1613 * @param idxQueue The queue index.
1614 */
1615static int tmTimerSetRelative(PVMCC pVM, PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now,
1616 PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue)
1617{
1618 STAM_COUNTER_INC(&pTimer->StatSetRelative);
1619
1620 /* Treat virtual sync timers specially. */
1621 if (pQueue->enmClock == TMCLOCK_VIRTUAL_SYNC)
1622 return tmTimerVirtualSyncSetRelative(pVM, pTimer, cTicksToNext, pu64Now);
1623
1624 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1625 TMTIMER_ASSERT_CRITSECT(pVM, pTimer);
1626
1627 DBGFTRACE_U64_TAG2(pVM, cTicksToNext, "TMTimerSetRelative", pTimer->szName);
1628
1629#ifdef VBOX_WITH_STATISTICS
1630 /*
1631 * Gather optimization info.
1632 */
1633 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelative);
1634 TMTIMERSTATE enmOrgState = pTimer->enmState;
1635 switch (enmOrgState)
1636 {
1637 case TMTIMERSTATE_STOPPED: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStStopped); break;
1638 case TMTIMERSTATE_EXPIRED_DELIVER: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStExpDeliver); break;
1639 case TMTIMERSTATE_ACTIVE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStActive); break;
1640 case TMTIMERSTATE_PENDING_STOP: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendStop); break;
1641 case TMTIMERSTATE_PENDING_STOP_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendStopSched); break;
1642 case TMTIMERSTATE_PENDING_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendSched); break;
1643 case TMTIMERSTATE_PENDING_RESCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendResched); break;
1644 default: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStOther); break;
1645 }
1646#endif
1647
1648 /*
1649 * Try to take the TM lock and optimize the common cases.
1650 *
1651 * With the TM lock we can safely make optimizations like immediate
1652 * scheduling and we can also be 100% sure that we're not racing the
1653 * running of the timer queues. As an additional restraint we require the
1654 * timer to have a critical section associated with to be 100% there aren't
1655 * concurrent operations on the timer. (This latter isn't necessary any
1656 * longer as this isn't supported for any timers, critsect or not.)
1657 *
1658 * Note! Lock ordering doesn't apply when we only _try_ to
1659 * get the innermost locks.
1660 */
1661 bool fOwnTMLock = RT_SUCCESS_NP(PDMCritSectTryEnter(&pQueue->TimerLock));
1662#if 1
1663 if ( fOwnTMLock
1664 && pTimer->pCritSect)
1665 {
1666 TMTIMERSTATE enmState = pTimer->enmState;
1667 if (RT_LIKELY( ( enmState == TMTIMERSTATE_EXPIRED_DELIVER
1668 || enmState == TMTIMERSTATE_STOPPED)
1669 && tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, enmState)))
1670 {
1671 tmTimerSetRelativeOptimizedStart(pVM, pTimer, cTicksToNext, pu64Now, pQueueCC, pQueue);
1672 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1673 PDMCritSectLeave(&pQueue->TimerLock);
1674 return VINF_SUCCESS;
1675 }
1676
1677 /* Optimize other states when it becomes necessary. */
1678 }
1679#endif
1680
1681 /*
1682 * Unoptimized path.
1683 */
1684 int rc;
1685 for (int cRetries = 1000; ; cRetries--)
1686 {
1687 /*
1688 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1689 */
1690 TMTIMERSTATE enmState = pTimer->enmState;
1691 switch (enmState)
1692 {
1693 case TMTIMERSTATE_STOPPED:
1694 if (pQueue->enmClock == TMCLOCK_VIRTUAL_SYNC)
1695 {
1696 /** @todo To fix assertion in tmR3TimerQueueRunVirtualSync:
1697 * Figure a safe way of activating this timer while the queue is
1698 * being run.
1699 * (99.9% sure this that the assertion is caused by DevAPIC.cpp
1700 * re-starting the timer in response to a initial_count write.) */
1701 }
1702 RT_FALL_THRU();
1703 case TMTIMERSTATE_EXPIRED_DELIVER:
1704 if (tmTimerTryWithLink(pQueueCC, pQueue, pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1705 {
1706 Assert(pTimer->idxPrev == UINT32_MAX);
1707 Assert(pTimer->idxNext == UINT32_MAX);
1708 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, pQueue->enmClock, pu64Now);
1709 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [EXP/STOP]\n",
1710 pTimer, tmTimerState(enmState), pTimer->szName, pTimer->u64Expire, cRetries));
1711 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1712 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1713 rc = VINF_SUCCESS;
1714 break;
1715 }
1716 rc = VERR_TRY_AGAIN;
1717 break;
1718
1719 case TMTIMERSTATE_PENDING_SCHEDULE:
1720 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1721 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1722 {
1723 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, pQueue->enmClock, pu64Now);
1724 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [PEND_SCHED]\n",
1725 pTimer, tmTimerState(enmState), pTimer->szName, pTimer->u64Expire, cRetries));
1726 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1727 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1728 rc = VINF_SUCCESS;
1729 break;
1730 }
1731 rc = VERR_TRY_AGAIN;
1732 break;
1733
1734
1735 case TMTIMERSTATE_ACTIVE:
1736 if (tmTimerTryWithLink(pQueueCC, pQueue, pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1737 {
1738 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, pQueue->enmClock, pu64Now);
1739 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [ACTIVE]\n",
1740 pTimer, tmTimerState(enmState), pTimer->szName, pTimer->u64Expire, cRetries));
1741 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1742 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1743 rc = VINF_SUCCESS;
1744 break;
1745 }
1746 rc = VERR_TRY_AGAIN;
1747 break;
1748
1749 case TMTIMERSTATE_PENDING_RESCHEDULE:
1750 case TMTIMERSTATE_PENDING_STOP:
1751 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1752 {
1753 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, pQueue->enmClock, pu64Now);
1754 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [PEND_RESCH/STOP]\n",
1755 pTimer, tmTimerState(enmState), pTimer->szName, pTimer->u64Expire, cRetries));
1756 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1757 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1758 rc = VINF_SUCCESS;
1759 break;
1760 }
1761 rc = VERR_TRY_AGAIN;
1762 break;
1763
1764
1765 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1766 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1767 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1768#ifdef IN_RING3
1769 if (!RTThreadYield())
1770 RTThreadSleep(1);
1771#else
1772/** @todo call host context and yield after a couple of iterations */
1773#endif
1774 rc = VERR_TRY_AGAIN;
1775 break;
1776
1777 /*
1778 * Invalid states.
1779 */
1780 case TMTIMERSTATE_DESTROY:
1781 case TMTIMERSTATE_FREE:
1782 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, pTimer->szName));
1783 rc = VERR_TM_INVALID_STATE;
1784 break;
1785
1786 default:
1787 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, pTimer->szName));
1788 rc = VERR_TM_UNKNOWN_STATE;
1789 break;
1790 }
1791
1792 /* switch + loop is tedious to break out of. */
1793 if (rc == VINF_SUCCESS)
1794 break;
1795
1796 if (rc != VERR_TRY_AGAIN)
1797 {
1798 tmTimerSetRelativeNowWorker(pVM, pQueue->enmClock, pu64Now);
1799 break;
1800 }
1801 if (cRetries <= 0)
1802 {
1803 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, pTimer->szName));
1804 rc = VERR_TM_TIMER_UNSTABLE_STATE;
1805 tmTimerSetRelativeNowWorker(pVM, pQueue->enmClock, pu64Now);
1806 break;
1807 }
1808
1809 /*
1810 * Retry to gain locks.
1811 */
1812 if (!fOwnTMLock)
1813 fOwnTMLock = RT_SUCCESS_NP(PDMCritSectTryEnter(&pQueue->TimerLock));
1814
1815 } /* for (;;) */
1816
1817 /*
1818 * Clean up and return.
1819 */
1820 if (fOwnTMLock)
1821 PDMCritSectLeave(&pQueue->TimerLock);
1822
1823 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1824 return rc;
1825}
1826
1827
1828/**
1829 * Arm a timer with a expire time relative to the current time.
1830 *
1831 * @returns VBox status code.
1832 * @param pVM The cross context VM structure.
1833 * @param hTimer Timer handle as returned by one of the create functions.
1834 * @param cTicksToNext Clock ticks until the next time expiration.
1835 * @param pu64Now Where to return the current time stamp used.
1836 * Optional.
1837 */
1838VMMDECL(int) TMTimerSetRelative(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1839{
1840 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
1841 return tmTimerSetRelative(pVM, pTimer, cTicksToNext, pu64Now, pQueueCC, pQueue);
1842}
1843
1844
1845/**
1846 * Drops a hint about the frequency of the timer.
1847 *
1848 * This is used by TM and the VMM to calculate how often guest execution needs
1849 * to be interrupted. The hint is automatically cleared by TMTimerStop.
1850 *
1851 * @returns VBox status code.
1852 * @param pVM The cross context VM structure.
1853 * @param hTimer Timer handle as returned by one of the create functions.
1854 * @param uHzHint The frequency hint. Pass 0 to clear the hint.
1855 *
1856 * @remarks We're using an integer hertz value here since anything above 1 HZ
1857 * is not going to be any trouble satisfying scheduling wise. The
1858 * range where it makes sense is >= 100 HZ.
1859 */
1860VMMDECL(int) TMTimerSetFrequencyHint(PVMCC pVM, TMTIMERHANDLE hTimer, uint32_t uHzHint)
1861{
1862 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
1863 TMTIMER_ASSERT_CRITSECT(pVM, pTimer);
1864
1865 uint32_t const uHzOldHint = pTimer->uHzHint;
1866 pTimer->uHzHint = uHzHint;
1867
1868 uint32_t const uMaxHzHint = pQueue->uMaxHzHint;
1869 if ( uHzHint > uMaxHzHint
1870 || uHzOldHint >= uMaxHzHint)
1871 ASMAtomicOrU64(&pVM->tm.s.HzHint.u64Combined, RT_BIT_32(idxQueue) | RT_BIT_32(idxQueue + 16));
1872
1873 return VINF_SUCCESS;
1874}
1875
1876
1877/**
1878 * TMTimerStop for the virtual sync timer queue.
1879 *
1880 * This employs a greatly simplified state machine by always acquiring the
1881 * queue lock and bypassing the scheduling list.
1882 *
1883 * @returns VBox status code
1884 * @param pVM The cross context VM structure.
1885 * @param pTimer The timer handle.
1886 */
1887static int tmTimerVirtualSyncStop(PVMCC pVM, PTMTIMER pTimer)
1888{
1889 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerStopVs), a);
1890 VM_ASSERT_EMT(pVM);
1891 TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer);
1892 int rc = PDMCritSectEnter(&pVM->tm.s.VirtualSyncLock, VINF_SUCCESS);
1893 AssertRCReturn(rc, rc);
1894
1895 /* Reset the HZ hint. */
1896 uint32_t uOldHzHint = pTimer->uHzHint;
1897 if (uOldHzHint)
1898 {
1899 if (uOldHzHint >= pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].uMaxHzHint)
1900 ASMAtomicOrU64(&pVM->tm.s.HzHint.u64Combined, RT_BIT_32(TMCLOCK_VIRTUAL_SYNC) | RT_BIT_32(TMCLOCK_VIRTUAL_SYNC + 16));
1901 pTimer->uHzHint = 0;
1902 }
1903
1904 /* Update the timer state. */
1905 TMTIMERSTATE const enmState = pTimer->enmState;
1906 switch (enmState)
1907 {
1908 case TMTIMERSTATE_ACTIVE:
1909 {
1910 PTMTIMERQUEUE const pQueue = &pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC];
1911 tmTimerQueueUnlinkActive(pVM, TM_GET_TIMER_QUEUE_CC(pVM, TMCLOCK_VIRTUAL_SYNC, pQueue), pQueue, pTimer);
1912 TM_SET_STATE(pTimer, TMTIMERSTATE_STOPPED);
1913 rc = VINF_SUCCESS;
1914 break;
1915 }
1916
1917 case TMTIMERSTATE_EXPIRED_DELIVER:
1918 TM_SET_STATE(pTimer, TMTIMERSTATE_STOPPED);
1919 rc = VINF_SUCCESS;
1920 break;
1921
1922 case TMTIMERSTATE_STOPPED:
1923 rc = VINF_SUCCESS;
1924 break;
1925
1926 case TMTIMERSTATE_PENDING_RESCHEDULE:
1927 case TMTIMERSTATE_PENDING_STOP:
1928 case TMTIMERSTATE_PENDING_SCHEDULE:
1929 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1930 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1931 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1932 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1933 case TMTIMERSTATE_DESTROY:
1934 case TMTIMERSTATE_FREE:
1935 AssertLogRelMsgFailed(("Invalid timer state %s: %s\n", tmTimerState(enmState), pTimer->szName));
1936 rc = VERR_TM_INVALID_STATE;
1937 break;
1938
1939 default:
1940 AssertMsgFailed(("Unknown timer state %d: %s\n", enmState, pTimer->szName));
1941 rc = VERR_TM_UNKNOWN_STATE;
1942 break;
1943 }
1944
1945 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStopVs), a);
1946 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
1947 return rc;
1948}
1949
1950
1951/**
1952 * Stop the timer.
1953 * Use TMR3TimerArm() to "un-stop" the timer.
1954 *
1955 * @returns VBox status code.
1956 * @param pVM The cross context VM structure.
1957 * @param hTimer Timer handle as returned by one of the create functions.
1958 */
1959VMMDECL(int) TMTimerStop(PVMCC pVM, TMTIMERHANDLE hTimer)
1960{
1961 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
1962 STAM_COUNTER_INC(&pTimer->StatStop);
1963
1964 /* Treat virtual sync timers specially. */
1965 if (idxQueue == TMCLOCK_VIRTUAL_SYNC)
1966 return tmTimerVirtualSyncStop(pVM, pTimer);
1967
1968 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1969 TMTIMER_ASSERT_CRITSECT(pVM, pTimer);
1970
1971 /*
1972 * Reset the HZ hint.
1973 */
1974 uint32_t const uOldHzHint = pTimer->uHzHint;
1975 if (uOldHzHint)
1976 {
1977 if (uOldHzHint >= pQueue->uMaxHzHint)
1978 ASMAtomicOrU64(&pVM->tm.s.HzHint.u64Combined, RT_BIT_32(idxQueue) | RT_BIT_32(idxQueue + 16));
1979 pTimer->uHzHint = 0;
1980 }
1981
1982 /** @todo see if this function needs optimizing. */
1983 int cRetries = 1000;
1984 do
1985 {
1986 /*
1987 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1988 */
1989 TMTIMERSTATE enmState = pTimer->enmState;
1990 Log2(("TMTimerStop: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d\n",
1991 pTimer, tmTimerState(enmState), pTimer->szName, cRetries));
1992 switch (enmState)
1993 {
1994 case TMTIMERSTATE_EXPIRED_DELIVER:
1995 //AssertMsgFailed(("You don't stop an expired timer dude!\n"));
1996 return VERR_INVALID_PARAMETER;
1997
1998 case TMTIMERSTATE_STOPPED:
1999 case TMTIMERSTATE_PENDING_STOP:
2000 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2001 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
2002 return VINF_SUCCESS;
2003
2004 case TMTIMERSTATE_PENDING_SCHEDULE:
2005 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, enmState))
2006 {
2007 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
2008 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
2009 return VINF_SUCCESS;
2010 }
2011 break;
2012
2013 case TMTIMERSTATE_PENDING_RESCHEDULE:
2014 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
2015 {
2016 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
2017 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
2018 return VINF_SUCCESS;
2019 }
2020 break;
2021
2022 case TMTIMERSTATE_ACTIVE:
2023 if (tmTimerTryWithLink(pQueueCC, pQueue, pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
2024 {
2025 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
2026 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
2027 return VINF_SUCCESS;
2028 }
2029 break;
2030
2031 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2032 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2033 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2034#ifdef IN_RING3
2035 if (!RTThreadYield())
2036 RTThreadSleep(1);
2037#else
2038/** @todo call host and yield cpu after a while. */
2039#endif
2040 break;
2041
2042 /*
2043 * Invalid states.
2044 */
2045 case TMTIMERSTATE_DESTROY:
2046 case TMTIMERSTATE_FREE:
2047 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, pTimer->szName));
2048 return VERR_TM_INVALID_STATE;
2049 default:
2050 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, pTimer->szName));
2051 return VERR_TM_UNKNOWN_STATE;
2052 }
2053 } while (cRetries-- > 0);
2054
2055 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, pTimer->szName));
2056 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
2057 return VERR_TM_TIMER_UNSTABLE_STATE;
2058}
2059
2060
2061/**
2062 * Get the current clock time.
2063 * Handy for calculating the new expire time.
2064 *
2065 * @returns Current clock time.
2066 * @param pVM The cross context VM structure.
2067 * @param hTimer Timer handle as returned by one of the create functions.
2068 */
2069VMMDECL(uint64_t) TMTimerGet(PVMCC pVM, TMTIMERHANDLE hTimer)
2070{
2071 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2072 STAM_COUNTER_INC(&pTimer->StatGet);
2073
2074 uint64_t u64;
2075 switch (pQueue->enmClock)
2076 {
2077 case TMCLOCK_VIRTUAL:
2078 u64 = TMVirtualGet(pVM);
2079 break;
2080 case TMCLOCK_VIRTUAL_SYNC:
2081 u64 = TMVirtualSyncGet(pVM);
2082 break;
2083 case TMCLOCK_REAL:
2084 u64 = TMRealGet(pVM);
2085 break;
2086 default:
2087 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2088 return UINT64_MAX;
2089 }
2090 //Log2(("TMTimerGet: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2091 // u64, pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2092 return u64;
2093}
2094
2095
2096/**
2097 * Get the frequency of the timer clock.
2098 *
2099 * @returns Clock frequency (as Hz of course).
2100 * @param pVM The cross context VM structure.
2101 * @param hTimer Timer handle as returned by one of the create functions.
2102 */
2103VMMDECL(uint64_t) TMTimerGetFreq(PVMCC pVM, TMTIMERHANDLE hTimer)
2104{
2105 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2106 switch (pQueue->enmClock)
2107 {
2108 case TMCLOCK_VIRTUAL:
2109 case TMCLOCK_VIRTUAL_SYNC:
2110 return TMCLOCK_FREQ_VIRTUAL;
2111
2112 case TMCLOCK_REAL:
2113 return TMCLOCK_FREQ_REAL;
2114
2115 default:
2116 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2117 return 0;
2118 }
2119}
2120
2121
2122/**
2123 * Get the expire time of the timer.
2124 * Only valid for active timers.
2125 *
2126 * @returns Expire time of the timer.
2127 * @param pVM The cross context VM structure.
2128 * @param hTimer Timer handle as returned by one of the create functions.
2129 */
2130VMMDECL(uint64_t) TMTimerGetExpire(PVMCC pVM, TMTIMERHANDLE hTimer)
2131{
2132 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, UINT64_MAX); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2133 TMTIMER_ASSERT_CRITSECT(pVM, pTimer);
2134 int cRetries = 1000;
2135 do
2136 {
2137 TMTIMERSTATE enmState = pTimer->enmState;
2138 switch (enmState)
2139 {
2140 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2141 case TMTIMERSTATE_EXPIRED_DELIVER:
2142 case TMTIMERSTATE_STOPPED:
2143 case TMTIMERSTATE_PENDING_STOP:
2144 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2145 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2146 pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2147 return UINT64_MAX;
2148
2149 case TMTIMERSTATE_ACTIVE:
2150 case TMTIMERSTATE_PENDING_RESCHEDULE:
2151 case TMTIMERSTATE_PENDING_SCHEDULE:
2152 Log2(("TMTimerGetExpire: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2153 pTimer->u64Expire, pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2154 return pTimer->u64Expire;
2155
2156 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2157 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2158#ifdef IN_RING3
2159 if (!RTThreadYield())
2160 RTThreadSleep(1);
2161#endif
2162 break;
2163
2164 /*
2165 * Invalid states.
2166 */
2167 case TMTIMERSTATE_DESTROY:
2168 case TMTIMERSTATE_FREE:
2169 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, pTimer->szName));
2170 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2171 pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2172 return UINT64_MAX;
2173 default:
2174 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, pTimer->szName));
2175 return UINT64_MAX;
2176 }
2177 } while (cRetries-- > 0);
2178
2179 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, pTimer->szName));
2180 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2181 pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2182 return UINT64_MAX;
2183}
2184
2185
2186/**
2187 * Checks if a timer is active or not.
2188 *
2189 * @returns True if active.
2190 * @returns False if not active.
2191 * @param pVM The cross context VM structure.
2192 * @param hTimer Timer handle as returned by one of the create functions.
2193 */
2194VMMDECL(bool) TMTimerIsActive(PVMCC pVM, TMTIMERHANDLE hTimer)
2195{
2196 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, false); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2197 TMTIMERSTATE enmState = pTimer->enmState;
2198 switch (enmState)
2199 {
2200 case TMTIMERSTATE_STOPPED:
2201 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2202 case TMTIMERSTATE_EXPIRED_DELIVER:
2203 case TMTIMERSTATE_PENDING_STOP:
2204 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2205 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2206 pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2207 return false;
2208
2209 case TMTIMERSTATE_ACTIVE:
2210 case TMTIMERSTATE_PENDING_RESCHEDULE:
2211 case TMTIMERSTATE_PENDING_SCHEDULE:
2212 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2213 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2214 Log2(("TMTimerIsActive: returns true (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2215 pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2216 return true;
2217
2218 /*
2219 * Invalid states.
2220 */
2221 case TMTIMERSTATE_DESTROY:
2222 case TMTIMERSTATE_FREE:
2223 AssertMsgFailed(("Invalid timer state %s (%s)\n", tmTimerState(enmState), pTimer->szName));
2224 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2225 pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2226 return false;
2227 default:
2228 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, pTimer->szName));
2229 return false;
2230 }
2231}
2232
2233
2234/* -=-=-=-=-=-=- Convenience APIs -=-=-=-=-=-=- */
2235
2236
2237/**
2238 * Arm a timer with a (new) expire time relative to current time.
2239 *
2240 * @returns VBox status code.
2241 * @param pVM The cross context VM structure.
2242 * @param hTimer Timer handle as returned by one of the create functions.
2243 * @param cMilliesToNext Number of milliseconds to the next tick.
2244 */
2245VMMDECL(int) TMTimerSetMillies(PVMCC pVM, TMTIMERHANDLE hTimer, uint32_t cMilliesToNext)
2246{
2247 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2248 switch (pQueue->enmClock)
2249 {
2250 case TMCLOCK_VIRTUAL:
2251 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2252 return tmTimerSetRelative(pVM, pTimer, cMilliesToNext * UINT64_C(1000000), NULL, pQueueCC, pQueue);
2253
2254 case TMCLOCK_VIRTUAL_SYNC:
2255 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2256 return tmTimerSetRelative(pVM, pTimer, cMilliesToNext * UINT64_C(1000000), NULL, pQueueCC, pQueue);
2257
2258 case TMCLOCK_REAL:
2259 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2260 return tmTimerSetRelative(pVM, pTimer, cMilliesToNext, NULL, pQueueCC, pQueue);
2261
2262 default:
2263 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2264 return VERR_TM_TIMER_BAD_CLOCK;
2265 }
2266}
2267
2268
2269/**
2270 * Arm a timer with a (new) expire time relative to current time.
2271 *
2272 * @returns VBox status code.
2273 * @param pVM The cross context VM structure.
2274 * @param hTimer Timer handle as returned by one of the create functions.
2275 * @param cMicrosToNext Number of microseconds to the next tick.
2276 */
2277VMMDECL(int) TMTimerSetMicro(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cMicrosToNext)
2278{
2279 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2280 switch (pQueue->enmClock)
2281 {
2282 case TMCLOCK_VIRTUAL:
2283 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2284 return tmTimerSetRelative(pVM, pTimer, cMicrosToNext * 1000, NULL, pQueueCC, pQueue);
2285
2286 case TMCLOCK_VIRTUAL_SYNC:
2287 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2288 return tmTimerSetRelative(pVM, pTimer, cMicrosToNext * 1000, NULL, pQueueCC, pQueue);
2289
2290 case TMCLOCK_REAL:
2291 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2292 return tmTimerSetRelative(pVM, pTimer, cMicrosToNext / 1000, NULL, pQueueCC, pQueue);
2293
2294 default:
2295 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2296 return VERR_TM_TIMER_BAD_CLOCK;
2297 }
2298}
2299
2300
2301/**
2302 * Arm a timer with a (new) expire time relative to current time.
2303 *
2304 * @returns VBox status code.
2305 * @param pVM The cross context VM structure.
2306 * @param hTimer Timer handle as returned by one of the create functions.
2307 * @param cNanosToNext Number of nanoseconds to the next tick.
2308 */
2309VMMDECL(int) TMTimerSetNano(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cNanosToNext)
2310{
2311 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2312 switch (pQueue->enmClock)
2313 {
2314 case TMCLOCK_VIRTUAL:
2315 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2316 return tmTimerSetRelative(pVM, pTimer, cNanosToNext, NULL, pQueueCC, pQueue);
2317
2318 case TMCLOCK_VIRTUAL_SYNC:
2319 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2320 return tmTimerSetRelative(pVM, pTimer, cNanosToNext, NULL, pQueueCC, pQueue);
2321
2322 case TMCLOCK_REAL:
2323 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2324 return tmTimerSetRelative(pVM, pTimer, cNanosToNext / 1000000, NULL, pQueueCC, pQueue);
2325
2326 default:
2327 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2328 return VERR_TM_TIMER_BAD_CLOCK;
2329 }
2330}
2331
2332
2333/**
2334 * Get the current clock time as nanoseconds.
2335 *
2336 * @returns The timer clock as nanoseconds.
2337 * @param pVM The cross context VM structure.
2338 * @param hTimer Timer handle as returned by one of the create functions.
2339 */
2340VMMDECL(uint64_t) TMTimerGetNano(PVMCC pVM, TMTIMERHANDLE hTimer)
2341{
2342 return TMTimerToNano(pVM, hTimer, TMTimerGet(pVM, hTimer));
2343}
2344
2345
2346/**
2347 * Get the current clock time as microseconds.
2348 *
2349 * @returns The timer clock as microseconds.
2350 * @param pVM The cross context VM structure.
2351 * @param hTimer Timer handle as returned by one of the create functions.
2352 */
2353VMMDECL(uint64_t) TMTimerGetMicro(PVMCC pVM, TMTIMERHANDLE hTimer)
2354{
2355 return TMTimerToMicro(pVM, hTimer, TMTimerGet(pVM, hTimer));
2356}
2357
2358
2359/**
2360 * Get the current clock time as milliseconds.
2361 *
2362 * @returns The timer clock as milliseconds.
2363 * @param pVM The cross context VM structure.
2364 * @param hTimer Timer handle as returned by one of the create functions.
2365 */
2366VMMDECL(uint64_t) TMTimerGetMilli(PVMCC pVM, TMTIMERHANDLE hTimer)
2367{
2368 return TMTimerToMilli(pVM, hTimer, TMTimerGet(pVM, hTimer));
2369}
2370
2371
2372/**
2373 * Converts the specified timer clock time to nanoseconds.
2374 *
2375 * @returns nanoseconds.
2376 * @param pVM The cross context VM structure.
2377 * @param hTimer Timer handle as returned by one of the create functions.
2378 * @param cTicks The clock ticks.
2379 * @remark There could be rounding errors here. We just do a simple integer divide
2380 * without any adjustments.
2381 */
2382VMMDECL(uint64_t) TMTimerToNano(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cTicks)
2383{
2384 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2385 switch (pQueue->enmClock)
2386 {
2387 case TMCLOCK_VIRTUAL:
2388 case TMCLOCK_VIRTUAL_SYNC:
2389 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2390 return cTicks;
2391
2392 case TMCLOCK_REAL:
2393 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2394 return cTicks * 1000000;
2395
2396 default:
2397 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2398 return 0;
2399 }
2400}
2401
2402
2403/**
2404 * Converts the specified timer clock time to microseconds.
2405 *
2406 * @returns microseconds.
2407 * @param pVM The cross context VM structure.
2408 * @param hTimer Timer handle as returned by one of the create functions.
2409 * @param cTicks The clock ticks.
2410 * @remark There could be rounding errors here. We just do a simple integer divide
2411 * without any adjustments.
2412 */
2413VMMDECL(uint64_t) TMTimerToMicro(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cTicks)
2414{
2415 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2416 switch (pQueue->enmClock)
2417 {
2418 case TMCLOCK_VIRTUAL:
2419 case TMCLOCK_VIRTUAL_SYNC:
2420 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2421 return cTicks / 1000;
2422
2423 case TMCLOCK_REAL:
2424 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2425 return cTicks * 1000;
2426
2427 default:
2428 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2429 return 0;
2430 }
2431}
2432
2433
2434/**
2435 * Converts the specified timer clock time to milliseconds.
2436 *
2437 * @returns milliseconds.
2438 * @param pVM The cross context VM structure.
2439 * @param hTimer Timer handle as returned by one of the create functions.
2440 * @param cTicks The clock ticks.
2441 * @remark There could be rounding errors here. We just do a simple integer divide
2442 * without any adjustments.
2443 */
2444VMMDECL(uint64_t) TMTimerToMilli(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cTicks)
2445{
2446 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2447 switch (pQueue->enmClock)
2448 {
2449 case TMCLOCK_VIRTUAL:
2450 case TMCLOCK_VIRTUAL_SYNC:
2451 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2452 return cTicks / 1000000;
2453
2454 case TMCLOCK_REAL:
2455 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2456 return cTicks;
2457
2458 default:
2459 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2460 return 0;
2461 }
2462}
2463
2464
2465/**
2466 * Converts the specified nanosecond timestamp to timer clock ticks.
2467 *
2468 * @returns timer clock ticks.
2469 * @param pVM The cross context VM structure.
2470 * @param hTimer Timer handle as returned by one of the create functions.
2471 * @param cNanoSecs The nanosecond value ticks to convert.
2472 * @remark There could be rounding and overflow errors here.
2473 */
2474VMMDECL(uint64_t) TMTimerFromNano(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cNanoSecs)
2475{
2476 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2477 switch (pQueue->enmClock)
2478 {
2479 case TMCLOCK_VIRTUAL:
2480 case TMCLOCK_VIRTUAL_SYNC:
2481 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2482 return cNanoSecs;
2483
2484 case TMCLOCK_REAL:
2485 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2486 return cNanoSecs / 1000000;
2487
2488 default:
2489 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2490 return 0;
2491 }
2492}
2493
2494
2495/**
2496 * Converts the specified microsecond timestamp to timer clock ticks.
2497 *
2498 * @returns timer clock ticks.
2499 * @param pVM The cross context VM structure.
2500 * @param hTimer Timer handle as returned by one of the create functions.
2501 * @param cMicroSecs The microsecond value ticks to convert.
2502 * @remark There could be rounding and overflow errors here.
2503 */
2504VMMDECL(uint64_t) TMTimerFromMicro(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cMicroSecs)
2505{
2506 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2507 switch (pQueue->enmClock)
2508 {
2509 case TMCLOCK_VIRTUAL:
2510 case TMCLOCK_VIRTUAL_SYNC:
2511 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2512 return cMicroSecs * 1000;
2513
2514 case TMCLOCK_REAL:
2515 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2516 return cMicroSecs / 1000;
2517
2518 default:
2519 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2520 return 0;
2521 }
2522}
2523
2524
2525/**
2526 * Converts the specified millisecond timestamp to timer clock ticks.
2527 *
2528 * @returns timer clock ticks.
2529 * @param pVM The cross context VM structure.
2530 * @param hTimer Timer handle as returned by one of the create functions.
2531 * @param cMilliSecs The millisecond value ticks to convert.
2532 * @remark There could be rounding and overflow errors here.
2533 */
2534VMMDECL(uint64_t) TMTimerFromMilli(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cMilliSecs)
2535{
2536 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2537 switch (pQueue->enmClock)
2538 {
2539 case TMCLOCK_VIRTUAL:
2540 case TMCLOCK_VIRTUAL_SYNC:
2541 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2542 return cMilliSecs * 1000000;
2543
2544 case TMCLOCK_REAL:
2545 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2546 return cMilliSecs;
2547
2548 default:
2549 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2550 return 0;
2551 }
2552}
2553
2554
2555/**
2556 * Convert state to string.
2557 *
2558 * @returns Readonly status name.
2559 * @param enmState State.
2560 */
2561const char *tmTimerState(TMTIMERSTATE enmState)
2562{
2563 switch (enmState)
2564 {
2565#define CASE(num, state) \
2566 case TMTIMERSTATE_##state: \
2567 AssertCompile(TMTIMERSTATE_##state == (num)); \
2568 return #num "-" #state
2569 CASE( 0,INVALID);
2570 CASE( 1,STOPPED);
2571 CASE( 2,ACTIVE);
2572 CASE( 3,EXPIRED_GET_UNLINK);
2573 CASE( 4,EXPIRED_DELIVER);
2574 CASE( 5,PENDING_STOP);
2575 CASE( 6,PENDING_STOP_SCHEDULE);
2576 CASE( 7,PENDING_SCHEDULE_SET_EXPIRE);
2577 CASE( 8,PENDING_SCHEDULE);
2578 CASE( 9,PENDING_RESCHEDULE_SET_EXPIRE);
2579 CASE(10,PENDING_RESCHEDULE);
2580 CASE(11,DESTROY);
2581 CASE(12,FREE);
2582 default:
2583 AssertMsgFailed(("Invalid state enmState=%d\n", enmState));
2584 return "Invalid state!";
2585#undef CASE
2586 }
2587}
2588
2589
2590/**
2591 * The slow path of tmGetFrequencyHint() where we try to recalculate the value.
2592 *
2593 * @returns The highest frequency. 0 if no timers care.
2594 * @param pVM The cross context VM structure.
2595 * @param uOldMaxHzHint The old global hint.
2596 */
2597DECL_NO_INLINE(static, uint32_t) tmGetFrequencyHintSlow(PVMCC pVM, uint32_t uOldMaxHzHint)
2598{
2599 /* Set two bits, though not entirely sure it's needed (too exhaused to think clearly)
2600 but it should force other callers thru the slow path while we're recalculating and
2601 help us detect changes while we're recalculating. */
2602 AssertCompile(RT_ELEMENTS(pVM->tm.s.aTimerQueues) <= 16);
2603
2604 /*
2605 * The "right" highest frequency value isn't so important that we'll block
2606 * waiting on the timer semaphores.
2607 */
2608 uint32_t uMaxHzHint = 0;
2609 for (uint32_t idxQueue = 0; idxQueue < RT_ELEMENTS(pVM->tm.s.aTimerQueues); idxQueue++)
2610 {
2611 PTMTIMERQUEUE pQueue = &pVM->tm.s.aTimerQueues[idxQueue];
2612
2613 /* Get the max Hz hint for the queue. */
2614 uint32_t uMaxHzHintQueue;
2615 if ( !(ASMAtomicUoReadU64(&pVM->tm.s.HzHint.u64Combined) & (RT_BIT_32(idxQueue) | RT_BIT_32(idxQueue + 16)))
2616 || RT_FAILURE_NP(PDMCritSectTryEnter(&pQueue->TimerLock)))
2617 uMaxHzHintQueue = ASMAtomicReadU32(&pQueue->uMaxHzHint);
2618 else
2619 {
2620 /* Is it still necessary to do updating? */
2621 if (ASMAtomicUoReadU64(&pVM->tm.s.HzHint.u64Combined) & (RT_BIT_32(idxQueue) | RT_BIT_32(idxQueue + 16)))
2622 {
2623 ASMAtomicAndU64(&pVM->tm.s.HzHint.u64Combined, ~RT_BIT_64(idxQueue + 16)); /* clear one flag up front */
2624
2625 PTMTIMERQUEUECC pQueueCC = TM_GET_TIMER_QUEUE_CC(pVM, idxQueue, pQueue);
2626 uMaxHzHintQueue = 0;
2627 for (PTMTIMER pCur = tmTimerQueueGetHead(pQueueCC, pQueue);
2628 pCur;
2629 pCur = tmTimerGetNext(pQueueCC, pCur))
2630 {
2631 uint32_t uHzHint = ASMAtomicUoReadU32(&pCur->uHzHint);
2632 if (uHzHint > uMaxHzHintQueue)
2633 {
2634 TMTIMERSTATE enmState = pCur->enmState;
2635 switch (enmState)
2636 {
2637 case TMTIMERSTATE_ACTIVE:
2638 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2639 case TMTIMERSTATE_EXPIRED_DELIVER:
2640 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2641 case TMTIMERSTATE_PENDING_SCHEDULE:
2642 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2643 case TMTIMERSTATE_PENDING_RESCHEDULE:
2644 uMaxHzHintQueue = uHzHint;
2645 break;
2646
2647 case TMTIMERSTATE_STOPPED:
2648 case TMTIMERSTATE_PENDING_STOP:
2649 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2650 case TMTIMERSTATE_DESTROY:
2651 case TMTIMERSTATE_FREE:
2652 case TMTIMERSTATE_INVALID:
2653 break;
2654 /* no default, want gcc warnings when adding more states. */
2655 }
2656 }
2657 }
2658
2659 /* Write the new Hz hint for the quest and clear the other update flag. */
2660 ASMAtomicUoWriteU32(&pQueue->uMaxHzHint, uMaxHzHintQueue);
2661 ASMAtomicAndU64(&pVM->tm.s.HzHint.u64Combined, ~RT_BIT_64(idxQueue));
2662 }
2663 else
2664 uMaxHzHintQueue = ASMAtomicUoReadU32(&pQueue->uMaxHzHint);
2665
2666 PDMCritSectLeave(&pQueue->TimerLock);
2667 }
2668
2669 /* Update the global max Hz hint. */
2670 if (uMaxHzHint < uMaxHzHintQueue)
2671 uMaxHzHint = uMaxHzHintQueue;
2672 }
2673
2674 /*
2675 * Update the frequency hint if no pending frequency changes and we didn't race anyone thru here.
2676 */
2677 uint64_t u64Actual = RT_MAKE_U64(0 /*no pending updates*/, uOldMaxHzHint);
2678 if (ASMAtomicCmpXchgExU64(&pVM->tm.s.HzHint.u64Combined, RT_MAKE_U64(0, uMaxHzHint), u64Actual, &u64Actual))
2679 Log(("tmGetFrequencyHintSlow: New value %u Hz\n", uMaxHzHint));
2680 else
2681 for (uint32_t iTry = 1;; iTry++)
2682 {
2683 if (RT_LO_U32(u64Actual) != 0)
2684 Log(("tmGetFrequencyHintSlow: Outdated value %u Hz (%#x, try %u)\n", uMaxHzHint, RT_LO_U32(u64Actual), iTry));
2685 else if (iTry >= 4)
2686 Log(("tmGetFrequencyHintSlow: Unable to set %u Hz (try %u)\n", uMaxHzHint, iTry));
2687 else if (ASMAtomicCmpXchgExU64(&pVM->tm.s.HzHint.u64Combined, RT_MAKE_U64(0, uMaxHzHint), u64Actual, &u64Actual))
2688 Log(("tmGetFrequencyHintSlow: New value %u Hz (try %u)\n", uMaxHzHint, iTry));
2689 else
2690 continue;
2691 break;
2692 }
2693 return uMaxHzHint;
2694}
2695
2696
2697/**
2698 * Gets the highest frequency hint for all the important timers.
2699 *
2700 * @returns The highest frequency. 0 if no timers care.
2701 * @param pVM The cross context VM structure.
2702 */
2703DECLINLINE(uint32_t) tmGetFrequencyHint(PVMCC pVM)
2704{
2705 /*
2706 * Query the value, recalculate it if necessary.
2707 */
2708 uint64_t u64Combined = ASMAtomicReadU64(&pVM->tm.s.HzHint.u64Combined);
2709 if (RT_HI_U32(u64Combined) == 0)
2710 return RT_LO_U32(u64Combined); /* hopefully somewhat likely */
2711 return tmGetFrequencyHintSlow(pVM, RT_LO_U32(u64Combined));
2712}
2713
2714
2715/**
2716 * Calculates a host timer frequency that would be suitable for the current
2717 * timer load.
2718 *
2719 * This will take the highest timer frequency, adjust for catch-up and warp
2720 * driver, and finally add a little fudge factor. The caller (VMM) will use
2721 * the result to adjust the per-cpu preemption timer.
2722 *
2723 * @returns The highest frequency. 0 if no important timers around.
2724 * @param pVM The cross context VM structure.
2725 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2726 */
2727VMM_INT_DECL(uint32_t) TMCalcHostTimerFrequency(PVMCC pVM, PVMCPUCC pVCpu)
2728{
2729 uint32_t uHz = tmGetFrequencyHint(pVM);
2730
2731 /* Catch up, we have to be more aggressive than the % indicates at the
2732 beginning of the effort. */
2733 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
2734 {
2735 uint32_t u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
2736 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
2737 {
2738 if (u32Pct <= 100)
2739 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp100 / 100;
2740 else if (u32Pct <= 200)
2741 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp200 / 100;
2742 else if (u32Pct <= 400)
2743 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp400 / 100;
2744 uHz *= u32Pct + 100;
2745 uHz /= 100;
2746 }
2747 }
2748
2749 /* Warp drive. */
2750 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualWarpDrive))
2751 {
2752 uint32_t u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualWarpDrivePercentage);
2753 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualWarpDrive))
2754 {
2755 uHz *= u32Pct;
2756 uHz /= 100;
2757 }
2758 }
2759
2760 /* Fudge factor. */
2761 if (pVCpu->idCpu == pVM->tm.s.idTimerCpu)
2762 uHz *= pVM->tm.s.cPctHostHzFudgeFactorTimerCpu;
2763 else
2764 uHz *= pVM->tm.s.cPctHostHzFudgeFactorOtherCpu;
2765 uHz /= 100;
2766
2767 /* Make sure it isn't too high. */
2768 if (uHz > pVM->tm.s.cHostHzMax)
2769 uHz = pVM->tm.s.cHostHzMax;
2770
2771 return uHz;
2772}
2773
2774
2775/**
2776 * Whether the guest virtual clock is ticking.
2777 *
2778 * @returns true if ticking, false otherwise.
2779 * @param pVM The cross context VM structure.
2780 */
2781VMM_INT_DECL(bool) TMVirtualIsTicking(PVM pVM)
2782{
2783 return RT_BOOL(pVM->tm.s.cVirtualTicking);
2784}
2785
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette