VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/TMAll.cpp@ 87921

Last change on this file since 87921 was 87822, checked in by vboxsync, 4 years ago

VMM/TM: Let non-EMTs schedule timers too. Take care when translating idTimerCpu to a pVCpu. bugref:9943

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 104.9 KB
Line 
1/* $Id: TMAll.cpp 87822 2021-02-20 10:34:06Z vboxsync $ */
2/** @file
3 * TM - Timeout Manager, all contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_TM
23#ifdef DEBUG_bird
24# define DBGFTRACE_DISABLED /* annoying */
25#endif
26#include <VBox/vmm/tm.h>
27#include <VBox/vmm/mm.h>
28#include <VBox/vmm/dbgftrace.h>
29#ifdef IN_RING3
30#endif
31#include <VBox/vmm/pdmdev.h> /* (for TMTIMER_GET_CRITSECT implementation) */
32#include "TMInternal.h"
33#include <VBox/vmm/vmcc.h>
34
35#include <VBox/param.h>
36#include <VBox/err.h>
37#include <VBox/log.h>
38#include <VBox/sup.h>
39#include <iprt/time.h>
40#include <iprt/assert.h>
41#include <iprt/asm.h>
42#include <iprt/asm-math.h>
43#ifdef IN_RING3
44# include <iprt/thread.h>
45#endif
46
47#include "TMInline.h"
48
49
50/*********************************************************************************************************************************
51* Defined Constants And Macros *
52*********************************************************************************************************************************/
53#ifdef VBOX_STRICT
54/** @def TMTIMER_GET_CRITSECT
55 * Helper for safely resolving the critical section for a timer belonging to a
56 * device instance.
57 * @todo needs reworking later as it uses PDMDEVINSR0::pDevInsR0RemoveMe. */
58# ifdef IN_RING3
59# define TMTIMER_GET_CRITSECT(a_pVM, a_pTimer) ((a_pTimer)->pCritSect)
60# else
61# define TMTIMER_GET_CRITSECT(a_pVM, a_pTimer) tmRZTimerGetCritSect(a_pVM, a_pTimer)
62# endif
63#endif
64
65/** @def TMTIMER_ASSERT_CRITSECT
66 * Checks that the caller owns the critical section if one is associated with
67 * the timer. */
68#ifdef VBOX_STRICT
69# define TMTIMER_ASSERT_CRITSECT(a_pVM, a_pTimer) \
70 do { \
71 if ((a_pTimer)->pCritSect) \
72 { \
73 VMSTATE enmState; \
74 PPDMCRITSECT pCritSect = TMTIMER_GET_CRITSECT(a_pVM, a_pTimer); \
75 AssertMsg( pCritSect \
76 && ( PDMCritSectIsOwner(pCritSect) \
77 || (enmState = (a_pVM)->enmVMState) == VMSTATE_CREATING \
78 || enmState == VMSTATE_RESETTING \
79 || enmState == VMSTATE_RESETTING_LS ),\
80 ("pTimer=%p (%s) pCritSect=%p (%s)\n", a_pTimer, (a_pTimer)->szName, \
81 (a_pTimer)->pCritSect, R3STRING(PDMR3CritSectName((a_pTimer)->pCritSect)) )); \
82 } \
83 } while (0)
84#else
85# define TMTIMER_ASSERT_CRITSECT(pVM, pTimer) do { } while (0)
86#endif
87
88/** @def TMTIMER_ASSERT_SYNC_CRITSECT_ORDER
89 * Checks for lock order trouble between the timer critsect and the critical
90 * section critsect. The virtual sync critsect must always be entered before
91 * the one associated with the timer (see TMR3TimerQueuesDo). It is OK if there
92 * isn't any critical section associated with the timer or if the calling thread
93 * doesn't own it, ASSUMING of course that the thread using this macro is going
94 * to enter the virtual sync critical section anyway.
95 *
96 * @remarks This is a sligtly relaxed timer locking attitude compared to
97 * TMTIMER_ASSERT_CRITSECT, however, the calling device/whatever code
98 * should know what it's doing if it's stopping or starting a timer
99 * without taking the device lock.
100 */
101#ifdef VBOX_STRICT
102# define TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer) \
103 do { \
104 if ((pTimer)->pCritSect) \
105 { \
106 VMSTATE enmState; \
107 PPDMCRITSECT pCritSect = TMTIMER_GET_CRITSECT(pVM, pTimer); \
108 AssertMsg( pCritSect \
109 && ( !PDMCritSectIsOwner(pCritSect) \
110 || PDMCritSectIsOwner(&pVM->tm.s.VirtualSyncLock) \
111 || (enmState = (pVM)->enmVMState) == VMSTATE_CREATING \
112 || enmState == VMSTATE_RESETTING \
113 || enmState == VMSTATE_RESETTING_LS ),\
114 ("pTimer=%p (%s) pCritSect=%p (%s)\n", pTimer, pTimer->szName, \
115 (pTimer)->pCritSect, R3STRING(PDMR3CritSectName((pTimer)->pCritSect)) )); \
116 } \
117 } while (0)
118#else
119# define TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer) do { } while (0)
120#endif
121
122
123#if defined(VBOX_STRICT) && defined(IN_RING0)
124/**
125 * Helper for TMTIMER_GET_CRITSECT
126 * @todo This needs a redo!
127 */
128DECLINLINE(PPDMCRITSECT) tmRZTimerGetCritSect(PVMCC pVM, PTMTIMER pTimer)
129{
130 if (pTimer->enmType == TMTIMERTYPE_DEV)
131 {
132 RTCCUINTREG fSavedFlags = ASMAddFlags(X86_EFL_AC); /** @todo fix ring-3 pointer use */
133 PPDMDEVINSR0 pDevInsR0 = ((struct PDMDEVINSR3 *)pTimer->u.Dev.pDevIns)->pDevInsR0RemoveMe; /* !ring-3 read! */
134 ASMSetFlags(fSavedFlags);
135 struct PDMDEVINSR3 *pDevInsR3 = pDevInsR0->pDevInsForR3R0;
136 if (pTimer->pCritSect == pDevInsR3->pCritSectRoR3)
137 return pDevInsR0->pCritSectRoR0;
138 uintptr_t offCritSect = (uintptr_t)pTimer->pCritSect - (uintptr_t)pDevInsR3->pvInstanceDataR3;
139 if (offCritSect < pDevInsR0->pReg->cbInstanceShared)
140 return (PPDMCRITSECT)((uintptr_t)pDevInsR0->pvInstanceDataR0 + offCritSect);
141 }
142 return (PPDMCRITSECT)MMHyperR3ToCC(pVM, pTimer->pCritSect);
143}
144#endif /* VBOX_STRICT && IN_RING0 */
145
146
147/**
148 * Notification that execution is about to start.
149 *
150 * This call must always be paired with a TMNotifyEndOfExecution call.
151 *
152 * The function may, depending on the configuration, resume the TSC and future
153 * clocks that only ticks when we're executing guest code.
154 *
155 * @param pVM The cross context VM structure.
156 * @param pVCpu The cross context virtual CPU structure.
157 */
158VMMDECL(void) TMNotifyStartOfExecution(PVMCC pVM, PVMCPUCC pVCpu)
159{
160#ifndef VBOX_WITHOUT_NS_ACCOUNTING
161 pVCpu->tm.s.uTscStartExecuting = SUPReadTsc();
162 pVCpu->tm.s.fExecuting = true;
163#endif
164 if (pVM->tm.s.fTSCTiedToExecution)
165 tmCpuTickResume(pVM, pVCpu);
166}
167
168
169/**
170 * Notification that execution has ended.
171 *
172 * This call must always be paired with a TMNotifyStartOfExecution call.
173 *
174 * The function may, depending on the configuration, suspend the TSC and future
175 * clocks that only ticks when we're executing guest code.
176 *
177 * @param pVM The cross context VM structure.
178 * @param pVCpu The cross context virtual CPU structure.
179 * @param uTsc TSC value when exiting guest context.
180 */
181VMMDECL(void) TMNotifyEndOfExecution(PVMCC pVM, PVMCPUCC pVCpu, uint64_t uTsc)
182{
183 if (pVM->tm.s.fTSCTiedToExecution)
184 tmCpuTickPause(pVCpu); /** @todo use uTsc here if we can. */
185
186#ifndef VBOX_WITHOUT_NS_ACCOUNTING
187 /*
188 * Calculate the elapsed tick count and convert it to nanoseconds.
189 */
190# ifdef IN_RING3
191 uint64_t cTicks = uTsc - pVCpu->tm.s.uTscStartExecuting - SUPGetTscDelta();
192 uint64_t const uCpuHz = SUPGetCpuHzFromGip(g_pSUPGlobalInfoPage);
193# else
194 uint64_t cTicks = uTsc - pVCpu->tm.s.uTscStartExecuting - SUPGetTscDeltaByCpuSetIndex(pVCpu->iHostCpuSet);
195 uint64_t const uCpuHz = SUPGetCpuHzFromGipBySetIndex(g_pSUPGlobalInfoPage, pVCpu->iHostCpuSet);
196# endif
197 AssertStmt(cTicks <= uCpuHz << 2, cTicks = uCpuHz << 2); /* max 4 sec */
198
199 uint64_t cNsExecutingDelta;
200 if (uCpuHz < _4G)
201 cNsExecutingDelta = ASMMultU64ByU32DivByU32(cTicks, RT_NS_1SEC, uCpuHz);
202 else if (uCpuHz < 16*_1G64)
203 cNsExecutingDelta = ASMMultU64ByU32DivByU32(cTicks >> 2, RT_NS_1SEC, uCpuHz >> 2);
204 else
205 {
206 Assert(uCpuHz < 64 * _1G64);
207 cNsExecutingDelta = ASMMultU64ByU32DivByU32(cTicks >> 4, RT_NS_1SEC, uCpuHz >> 4);
208 }
209
210 /*
211 * Update the data.
212 *
213 * Note! We're not using strict memory ordering here to speed things us.
214 * The data is in a single cache line and this thread is the only
215 * one writing to that line, so I cannot quite imagine why we would
216 * need any strict ordering here.
217 */
218 uint64_t const cNsExecutingNew = pVCpu->tm.s.cNsExecuting + cNsExecutingDelta;
219 uint32_t uGen = ASMAtomicUoIncU32(&pVCpu->tm.s.uTimesGen); Assert(uGen & 1);
220 ASMCompilerBarrier();
221 pVCpu->tm.s.fExecuting = false;
222 pVCpu->tm.s.cNsExecuting = cNsExecutingNew;
223 pVCpu->tm.s.cPeriodsExecuting++;
224 ASMCompilerBarrier();
225 ASMAtomicUoWriteU32(&pVCpu->tm.s.uTimesGen, (uGen | 1) + 1);
226
227 /*
228 * Update stats.
229 */
230# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
231 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecuting, cNsExecutingDelta);
232 if (cNsExecutingDelta < 5000)
233 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecTiny, cNsExecutingDelta);
234 else if (cNsExecutingDelta < 50000)
235 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecShort, cNsExecutingDelta);
236 else
237 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecLong, cNsExecutingDelta);
238# endif
239
240 /* The timer triggers occational updating of the others and total stats: */
241 if (RT_LIKELY(!pVCpu->tm.s.fUpdateStats))
242 { /*likely*/ }
243 else
244 {
245 pVCpu->tm.s.fUpdateStats = false;
246
247 uint64_t const cNsTotalNew = RTTimeNanoTS() - pVCpu->tm.s.nsStartTotal;
248 uint64_t const cNsOtherNew = cNsTotalNew - cNsExecutingNew - pVCpu->tm.s.cNsHalted;
249
250# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
251 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsTotal, cNsTotalNew - pVCpu->tm.s.cNsTotalStat);
252 int64_t const cNsOtherNewDelta = cNsOtherNew - pVCpu->tm.s.cNsOtherStat;
253 if (cNsOtherNewDelta > 0)
254 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsOther, (uint64_t)cNsOtherNewDelta);
255# endif
256
257 pVCpu->tm.s.cNsTotalStat = cNsTotalNew;
258 pVCpu->tm.s.cNsOtherStat = cNsOtherNew;
259 }
260
261#endif
262}
263
264
265/**
266 * Notification that the cpu is entering the halt state
267 *
268 * This call must always be paired with a TMNotifyEndOfExecution call.
269 *
270 * The function may, depending on the configuration, resume the TSC and future
271 * clocks that only ticks when we're halted.
272 *
273 * @param pVCpu The cross context virtual CPU structure.
274 */
275VMM_INT_DECL(void) TMNotifyStartOfHalt(PVMCPUCC pVCpu)
276{
277 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
278
279#ifndef VBOX_WITHOUT_NS_ACCOUNTING
280 pVCpu->tm.s.nsStartHalting = RTTimeNanoTS();
281 pVCpu->tm.s.fHalting = true;
282#endif
283
284 if ( pVM->tm.s.fTSCTiedToExecution
285 && !pVM->tm.s.fTSCNotTiedToHalt)
286 tmCpuTickResume(pVM, pVCpu);
287}
288
289
290/**
291 * Notification that the cpu is leaving the halt state
292 *
293 * This call must always be paired with a TMNotifyStartOfHalt call.
294 *
295 * The function may, depending on the configuration, suspend the TSC and future
296 * clocks that only ticks when we're halted.
297 *
298 * @param pVCpu The cross context virtual CPU structure.
299 */
300VMM_INT_DECL(void) TMNotifyEndOfHalt(PVMCPUCC pVCpu)
301{
302 PVM pVM = pVCpu->CTX_SUFF(pVM);
303
304 if ( pVM->tm.s.fTSCTiedToExecution
305 && !pVM->tm.s.fTSCNotTiedToHalt)
306 tmCpuTickPause(pVCpu);
307
308#ifndef VBOX_WITHOUT_NS_ACCOUNTING
309 uint64_t const u64NsTs = RTTimeNanoTS();
310 uint64_t const cNsTotalNew = u64NsTs - pVCpu->tm.s.nsStartTotal;
311 uint64_t const cNsHaltedDelta = u64NsTs - pVCpu->tm.s.nsStartHalting;
312 uint64_t const cNsHaltedNew = pVCpu->tm.s.cNsHalted + cNsHaltedDelta;
313 uint64_t const cNsOtherNew = cNsTotalNew - pVCpu->tm.s.cNsExecuting - cNsHaltedNew;
314
315 uint32_t uGen = ASMAtomicUoIncU32(&pVCpu->tm.s.uTimesGen); Assert(uGen & 1);
316 ASMCompilerBarrier();
317 pVCpu->tm.s.fHalting = false;
318 pVCpu->tm.s.fUpdateStats = false;
319 pVCpu->tm.s.cNsHalted = cNsHaltedNew;
320 pVCpu->tm.s.cPeriodsHalted++;
321 ASMCompilerBarrier();
322 ASMAtomicUoWriteU32(&pVCpu->tm.s.uTimesGen, (uGen | 1) + 1);
323
324# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
325 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsHalted, cNsHaltedDelta);
326 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsTotal, cNsTotalNew - pVCpu->tm.s.cNsTotalStat);
327 int64_t const cNsOtherNewDelta = cNsOtherNew - pVCpu->tm.s.cNsOtherStat;
328 if (cNsOtherNewDelta > 0)
329 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsOther, (uint64_t)cNsOtherNewDelta);
330# endif
331 pVCpu->tm.s.cNsTotalStat = cNsTotalNew;
332 pVCpu->tm.s.cNsOtherStat = cNsOtherNew;
333#endif
334}
335
336
337/**
338 * Raise the timer force action flag and notify the dedicated timer EMT.
339 *
340 * @param pVM The cross context VM structure.
341 */
342DECLINLINE(void) tmScheduleNotify(PVMCC pVM)
343{
344 VMCPUID idCpu = pVM->tm.s.idTimerCpu;
345 AssertReturnVoid(idCpu < pVM->cCpus);
346 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, idCpu);
347
348 if (!VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
349 {
350 Log5(("TMAll(%u): FF: 0 -> 1\n", __LINE__));
351 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
352#ifdef IN_RING3
353 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
354#endif
355 STAM_COUNTER_INC(&pVM->tm.s.StatScheduleSetFF);
356 }
357}
358
359
360/**
361 * Schedule the queue which was changed.
362 */
363DECLINLINE(void) tmSchedule(PVMCC pVM, PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
364{
365 int rc = PDMCritSectTryEnter(&pQueue->TimerLock);
366 if (RT_SUCCESS_NP(rc))
367 {
368 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
369 Log3(("tmSchedule: tmTimerQueueSchedule\n"));
370 tmTimerQueueSchedule(pVM, pQueueCC, pQueue);
371#ifdef VBOX_STRICT
372 tmTimerQueuesSanityChecks(pVM, "tmSchedule");
373#endif
374 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
375 PDMCritSectLeave(&pQueue->TimerLock);
376 return;
377 }
378
379 TMTIMERSTATE enmState = pTimer->enmState;
380 if (TMTIMERSTATE_IS_PENDING_SCHEDULING(enmState))
381 tmScheduleNotify(pVM);
382}
383
384
385/**
386 * Try change the state to enmStateNew from enmStateOld
387 * and link the timer into the scheduling queue.
388 *
389 * @returns Success indicator.
390 * @param pTimer Timer in question.
391 * @param enmStateNew The new timer state.
392 * @param enmStateOld The old timer state.
393 */
394DECLINLINE(bool) tmTimerTry(PTMTIMER pTimer, TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
395{
396 /*
397 * Attempt state change.
398 */
399 bool fRc;
400 TM_TRY_SET_STATE(pTimer, enmStateNew, enmStateOld, fRc);
401 return fRc;
402}
403
404
405/**
406 * Links the timer onto the scheduling queue.
407 *
408 * @param pQueueCC The current context queue (same as @a pQueue for
409 * ring-3).
410 * @param pQueue The shared queue data.
411 * @param pTimer The timer.
412 *
413 * @todo FIXME: Look into potential race with the thread running the queues
414 * and stuff.
415 */
416DECLINLINE(void) tmTimerLinkSchedule(PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
417{
418 Assert(pTimer->idxScheduleNext == UINT32_MAX);
419 const uint32_t idxHeadNew = pTimer - &pQueueCC->paTimers[0];
420 AssertReturnVoid(idxHeadNew < pQueueCC->cTimersAlloc);
421
422 uint32_t idxHead;
423 do
424 {
425 idxHead = pQueue->idxSchedule;
426 Assert(idxHead == UINT32_MAX || idxHead < pQueueCC->cTimersAlloc);
427 pTimer->idxScheduleNext = idxHead;
428 } while (!ASMAtomicCmpXchgU32(&pQueue->idxSchedule, idxHeadNew, idxHead));
429}
430
431
432/**
433 * Try change the state to enmStateNew from enmStateOld
434 * and link the timer into the scheduling queue.
435 *
436 * @returns Success indicator.
437 * @param pQueueCC The current context queue (same as @a pQueue for
438 * ring-3).
439 * @param pQueue The shared queue data.
440 * @param pTimer Timer in question.
441 * @param enmStateNew The new timer state.
442 * @param enmStateOld The old timer state.
443 */
444DECLINLINE(bool) tmTimerTryWithLink(PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue, PTMTIMER pTimer,
445 TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
446{
447 if (tmTimerTry(pTimer, enmStateNew, enmStateOld))
448 {
449 tmTimerLinkSchedule(pQueueCC, pQueue, pTimer);
450 return true;
451 }
452 return false;
453}
454
455
456/**
457 * Links a timer into the active list of a timer queue.
458 *
459 * @param pVM The cross context VM structure.
460 * @param pQueueCC The current context queue (same as @a pQueue for
461 * ring-3).
462 * @param pQueue The shared queue data.
463 * @param pTimer The timer.
464 * @param u64Expire The timer expiration time.
465 *
466 * @remarks Called while owning the relevant queue lock.
467 */
468DECL_FORCE_INLINE(void) tmTimerQueueLinkActive(PVMCC pVM, PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue,
469 PTMTIMER pTimer, uint64_t u64Expire)
470{
471 Assert(pTimer->idxNext == UINT32_MAX);
472 Assert(pTimer->idxPrev == UINT32_MAX);
473 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE || pQueue->enmClock != TMCLOCK_VIRTUAL_SYNC); /* (active is not a stable state) */
474 RT_NOREF(pVM);
475
476 PTMTIMER pCur = tmTimerQueueGetHead(pQueueCC, pQueue);
477 if (pCur)
478 {
479 for (;; pCur = tmTimerGetNext(pQueueCC, pCur))
480 {
481 if (pCur->u64Expire > u64Expire)
482 {
483 const PTMTIMER pPrev = tmTimerGetPrev(pQueueCC, pCur);
484 tmTimerSetNext(pQueueCC, pTimer, pCur);
485 tmTimerSetPrev(pQueueCC, pTimer, pPrev);
486 if (pPrev)
487 tmTimerSetNext(pQueueCC, pPrev, pTimer);
488 else
489 {
490 tmTimerQueueSetHead(pQueueCC, pQueue, pTimer);
491 ASMAtomicWriteU64(&pQueue->u64Expire, u64Expire);
492 DBGFTRACE_U64_TAG2(pVM, u64Expire, "tmTimerQueueLinkActive head", pTimer->szName);
493 }
494 tmTimerSetPrev(pQueueCC, pCur, pTimer);
495 return;
496 }
497 if (pCur->idxNext == UINT32_MAX)
498 {
499 tmTimerSetNext(pQueueCC, pCur, pTimer);
500 tmTimerSetPrev(pQueueCC, pTimer, pCur);
501 DBGFTRACE_U64_TAG2(pVM, u64Expire, "tmTimerQueueLinkActive tail", pTimer->szName);
502 return;
503 }
504 }
505 }
506 else
507 {
508 tmTimerQueueSetHead(pQueueCC, pQueue, pTimer);
509 ASMAtomicWriteU64(&pQueue->u64Expire, u64Expire);
510 DBGFTRACE_U64_TAG2(pVM, u64Expire, "tmTimerQueueLinkActive empty", pTimer->szName);
511 }
512}
513
514
515
516/**
517 * Schedules the given timer on the given queue.
518 *
519 * @param pVM The cross context VM structure.
520 * @param pQueueCC The current context queue (same as @a pQueue for
521 * ring-3).
522 * @param pQueue The shared queue data.
523 * @param pTimer The timer that needs scheduling.
524 *
525 * @remarks Called while owning the lock.
526 */
527DECLINLINE(void) tmTimerQueueScheduleOne(PVMCC pVM, PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
528{
529 Assert(pQueue->enmClock != TMCLOCK_VIRTUAL_SYNC);
530 RT_NOREF(pVM);
531
532 /*
533 * Processing.
534 */
535 unsigned cRetries = 2;
536 do
537 {
538 TMTIMERSTATE enmState = pTimer->enmState;
539 switch (enmState)
540 {
541 /*
542 * Reschedule timer (in the active list).
543 */
544 case TMTIMERSTATE_PENDING_RESCHEDULE:
545 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE, TMTIMERSTATE_PENDING_RESCHEDULE)))
546 break; /* retry */
547 tmTimerQueueUnlinkActive(pVM, pQueueCC, pQueue, pTimer);
548 RT_FALL_THRU();
549
550 /*
551 * Schedule timer (insert into the active list).
552 */
553 case TMTIMERSTATE_PENDING_SCHEDULE:
554 Assert(pTimer->idxNext == UINT32_MAX); Assert(pTimer->idxPrev == UINT32_MAX);
555 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, TMTIMERSTATE_PENDING_SCHEDULE)))
556 break; /* retry */
557 tmTimerQueueLinkActive(pVM, pQueueCC, pQueue, pTimer, pTimer->u64Expire);
558 return;
559
560 /*
561 * Stop the timer in active list.
562 */
563 case TMTIMERSTATE_PENDING_STOP:
564 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, TMTIMERSTATE_PENDING_STOP)))
565 break; /* retry */
566 tmTimerQueueUnlinkActive(pVM, pQueueCC, pQueue, pTimer);
567 RT_FALL_THRU();
568
569 /*
570 * Stop the timer (not on the active list).
571 */
572 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
573 Assert(pTimer->idxNext == UINT32_MAX); Assert(pTimer->idxPrev == UINT32_MAX);
574 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_STOPPED, TMTIMERSTATE_PENDING_STOP_SCHEDULE)))
575 break;
576 return;
577
578 /*
579 * The timer is pending destruction by TMR3TimerDestroy, our caller.
580 * Nothing to do here.
581 */
582 case TMTIMERSTATE_DESTROY:
583 break;
584
585 /*
586 * Postpone these until they get into the right state.
587 */
588 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
589 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
590 tmTimerLinkSchedule(pQueueCC, pQueue, pTimer);
591 STAM_COUNTER_INC(&pVM->tm.s.CTX_SUFF_Z(StatPostponed));
592 return;
593
594 /*
595 * None of these can be in the schedule.
596 */
597 case TMTIMERSTATE_FREE:
598 case TMTIMERSTATE_STOPPED:
599 case TMTIMERSTATE_ACTIVE:
600 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
601 case TMTIMERSTATE_EXPIRED_DELIVER:
602 default:
603 AssertMsgFailed(("Timer (%p) in the scheduling list has an invalid state %s (%d)!",
604 pTimer, tmTimerState(pTimer->enmState), pTimer->enmState));
605 return;
606 }
607 } while (cRetries-- > 0);
608}
609
610
611/**
612 * Schedules the specified timer queue.
613 *
614 * @param pVM The cross context VM structure.
615 * @param pQueueCC The current context queue (same as @a pQueue for
616 * ring-3) data of the queue to schedule.
617 * @param pQueue The shared queue data of the queue to schedule.
618 *
619 * @remarks Called while owning the lock.
620 */
621void tmTimerQueueSchedule(PVMCC pVM, PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue)
622{
623 Assert(PDMCritSectIsOwner(&pQueue->TimerLock));
624
625 /*
626 * Dequeue the scheduling list and iterate it.
627 */
628 uint32_t idxNext = ASMAtomicXchgU32(&pQueue->idxSchedule, UINT32_MAX);
629 Log2(("tmTimerQueueSchedule: pQueue=%p:{.enmClock=%d, idxNext=%RI32, .u64Expired=%'RU64}\n", pQueue, pQueue->enmClock, idxNext, pQueue->u64Expire));
630 while (idxNext != UINT32_MAX)
631 {
632 AssertBreak(idxNext < pQueueCC->cTimersAlloc);
633
634 /*
635 * Unlink the head timer and take down the index of the next one.
636 */
637 PTMTIMER pTimer = &pQueueCC->paTimers[idxNext];
638 idxNext = pTimer->idxScheduleNext;
639 pTimer->idxScheduleNext = UINT32_MAX;
640
641 /*
642 * Do the scheduling.
643 */
644 Log2(("tmTimerQueueSchedule: %p:{.enmState=%s, .enmClock=%d, .enmType=%d, .szName=%s}\n",
645 pTimer, tmTimerState(pTimer->enmState), pQueue->enmClock, pTimer->enmType, pTimer->szName));
646 tmTimerQueueScheduleOne(pVM, pQueueCC, pQueue, pTimer);
647 Log2(("tmTimerQueueSchedule: %p: new %s\n", pTimer, tmTimerState(pTimer->enmState)));
648 }
649 Log2(("tmTimerQueueSchedule: u64Expired=%'RU64\n", pQueue->u64Expire));
650}
651
652
653#ifdef VBOX_STRICT
654/**
655 * Checks that the timer queues are sane.
656 *
657 * @param pVM The cross context VM structure.
658 * @param pszWhere Caller location clue.
659 */
660void tmTimerQueuesSanityChecks(PVMCC pVM, const char *pszWhere)
661{
662 for (uint32_t idxQueue = 0; idxQueue < RT_ELEMENTS(pVM->tm.s.aTimerQueues); idxQueue++)
663 {
664 PTMTIMERQUEUE const pQueue = &pVM->tm.s.aTimerQueues[idxQueue];
665 PTMTIMERQUEUECC const pQueueCC = TM_GET_TIMER_QUEUE_CC(pVM, idxQueue, pQueue);
666 Assert(pQueue->enmClock == (TMCLOCK)idxQueue);
667
668 int rc = PDMCritSectTryEnter(&pQueue->TimerLock);
669 if (RT_SUCCESS(rc))
670 {
671 if ( pQueue->enmClock != TMCLOCK_VIRTUAL_SYNC
672 || PDMCritSectTryEnter(&pVM->tm.s.VirtualSyncLock) == VINF_SUCCESS)
673 {
674 /* Check the linking of the active lists. */
675 PTMTIMER pPrev = NULL;
676 for (PTMTIMER pCur = tmTimerQueueGetHead(pQueueCC, pQueue);
677 pCur;
678 pPrev = pCur, pCur = tmTimerGetNext(pQueueCC, pCur))
679 {
680 AssertMsg(tmTimerGetPrev(pQueueCC, pCur) == pPrev, ("%s: %p != %p\n", pszWhere, tmTimerGetPrev(pQueueCC, pCur), pPrev));
681 TMTIMERSTATE enmState = pCur->enmState;
682 switch (enmState)
683 {
684 case TMTIMERSTATE_ACTIVE:
685 AssertMsg( pCur->idxScheduleNext == UINT32_MAX
686 || pCur->enmState != TMTIMERSTATE_ACTIVE,
687 ("%s: %RI32\n", pszWhere, pCur->idxScheduleNext));
688 break;
689 case TMTIMERSTATE_PENDING_STOP:
690 case TMTIMERSTATE_PENDING_RESCHEDULE:
691 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
692 break;
693 default:
694 AssertMsgFailed(("%s: Invalid state enmState=%d %s\n", pszWhere, enmState, tmTimerState(enmState)));
695 break;
696 }
697 }
698
699# ifdef IN_RING3
700 /* Go thru all the timers and check that the active ones all are in the active lists. */
701 uint32_t idxTimer = pQueue->cTimersAlloc;
702 uint32_t cFree = 0;
703 while (idxTimer-- > 0)
704 {
705 PTMTIMER const pTimer = &pQueue->paTimers[idxTimer];
706 TMTIMERSTATE const enmState = pTimer->enmState;
707 switch (enmState)
708 {
709 case TMTIMERSTATE_FREE:
710 cFree++;
711 break;
712
713 case TMTIMERSTATE_ACTIVE:
714 case TMTIMERSTATE_PENDING_STOP:
715 case TMTIMERSTATE_PENDING_RESCHEDULE:
716 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
717 {
718 PTMTIMERR3 pCurAct = tmTimerQueueGetHead(pQueueCC, pQueue);
719 Assert(pTimer->idxPrev != UINT32_MAX || pTimer == pCurAct);
720 while (pCurAct && pCurAct != pTimer)
721 pCurAct = tmTimerGetNext(pQueueCC, pCurAct);
722 Assert(pCurAct == pTimer);
723 break;
724 }
725
726 case TMTIMERSTATE_PENDING_SCHEDULE:
727 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
728 case TMTIMERSTATE_STOPPED:
729 case TMTIMERSTATE_EXPIRED_DELIVER:
730 {
731 Assert(pTimer->idxNext == UINT32_MAX);
732 Assert(pTimer->idxPrev == UINT32_MAX);
733 for (PTMTIMERR3 pCurAct = tmTimerQueueGetHead(pQueueCC, pQueue);
734 pCurAct;
735 pCurAct = tmTimerGetNext(pQueueCC, pCurAct))
736 {
737 Assert(pCurAct != pTimer);
738 Assert(tmTimerGetNext(pQueueCC, pCurAct) != pTimer);
739 Assert(tmTimerGetPrev(pQueueCC, pCurAct) != pTimer);
740 }
741 break;
742 }
743
744 /* ignore */
745 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
746 break;
747
748 case TMTIMERSTATE_INVALID:
749 Assert(idxTimer == 0);
750 break;
751
752 /* shouldn't get here! */
753 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
754 case TMTIMERSTATE_DESTROY:
755 default:
756 AssertMsgFailed(("Invalid state enmState=%d %s\n", enmState, tmTimerState(enmState)));
757 break;
758 }
759
760 /* Check the handle value. */
761 if (enmState > TMTIMERSTATE_INVALID && enmState < TMTIMERSTATE_DESTROY)
762 {
763 Assert((pTimer->hSelf & TMTIMERHANDLE_TIMER_IDX_MASK) == idxTimer);
764 Assert(((pTimer->hSelf >> TMTIMERHANDLE_QUEUE_IDX_SHIFT) & TMTIMERHANDLE_QUEUE_IDX_SMASK) == idxQueue);
765 }
766 }
767 Assert(cFree == pQueue->cTimersFree);
768# endif /* IN_RING3 */
769
770 if (pQueue->enmClock == TMCLOCK_VIRTUAL_SYNC)
771 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
772 }
773 PDMCritSectLeave(&pQueue->TimerLock);
774 }
775 }
776}
777#endif /* !VBOX_STRICT */
778
779#ifdef VBOX_HIGH_RES_TIMERS_HACK
780
781/**
782 * Worker for tmTimerPollInternal that handles misses when the dedicated timer
783 * EMT is polling.
784 *
785 * @returns See tmTimerPollInternal.
786 * @param pVM The cross context VM structure.
787 * @param u64Now Current virtual clock timestamp.
788 * @param u64Delta The delta to the next even in ticks of the
789 * virtual clock.
790 * @param pu64Delta Where to return the delta.
791 */
792DECLINLINE(uint64_t) tmTimerPollReturnMiss(PVM pVM, uint64_t u64Now, uint64_t u64Delta, uint64_t *pu64Delta)
793{
794 Assert(!(u64Delta & RT_BIT_64(63)));
795
796 if (!pVM->tm.s.fVirtualWarpDrive)
797 {
798 *pu64Delta = u64Delta;
799 return u64Delta + u64Now + pVM->tm.s.u64VirtualOffset;
800 }
801
802 /*
803 * Warp drive adjustments - this is the reverse of what tmVirtualGetRaw is doing.
804 */
805 uint64_t const u64Start = pVM->tm.s.u64VirtualWarpDriveStart;
806 uint32_t const u32Pct = pVM->tm.s.u32VirtualWarpDrivePercentage;
807
808 uint64_t u64GipTime = u64Delta + u64Now + pVM->tm.s.u64VirtualOffset;
809 u64GipTime -= u64Start; /* the start is GIP time. */
810 if (u64GipTime >= u64Delta)
811 {
812 ASMMultU64ByU32DivByU32(u64GipTime, 100, u32Pct);
813 ASMMultU64ByU32DivByU32(u64Delta, 100, u32Pct);
814 }
815 else
816 {
817 u64Delta -= u64GipTime;
818 ASMMultU64ByU32DivByU32(u64GipTime, 100, u32Pct);
819 u64Delta += u64GipTime;
820 }
821 *pu64Delta = u64Delta;
822 u64GipTime += u64Start;
823 return u64GipTime;
824}
825
826
827/**
828 * Worker for tmTimerPollInternal dealing with returns on virtual CPUs other
829 * than the one dedicated to timer work.
830 *
831 * @returns See tmTimerPollInternal.
832 * @param pVM The cross context VM structure.
833 * @param u64Now Current virtual clock timestamp.
834 * @param pu64Delta Where to return the delta.
835 */
836DECL_FORCE_INLINE(uint64_t) tmTimerPollReturnOtherCpu(PVM pVM, uint64_t u64Now, uint64_t *pu64Delta)
837{
838 static const uint64_t s_u64OtherRet = 500000000; /* 500 ms for non-timer EMTs. */
839 *pu64Delta = s_u64OtherRet;
840 return u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet;
841}
842
843
844/**
845 * Worker for tmTimerPollInternal.
846 *
847 * @returns See tmTimerPollInternal.
848 * @param pVM The cross context VM structure.
849 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
850 * @param pVCpuDst The cross context virtual CPU structure of the dedicated
851 * timer EMT.
852 * @param u64Now Current virtual clock timestamp.
853 * @param pu64Delta Where to return the delta.
854 * @param pCounter The statistics counter to update.
855 */
856DECL_FORCE_INLINE(uint64_t) tmTimerPollReturnHit(PVM pVM, PVMCPU pVCpu, PVMCPU pVCpuDst, uint64_t u64Now,
857 uint64_t *pu64Delta, PSTAMCOUNTER pCounter)
858{
859 STAM_COUNTER_INC(pCounter); NOREF(pCounter);
860 if (pVCpuDst != pVCpu)
861 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
862 *pu64Delta = 0;
863 return 0;
864}
865
866
867/**
868 * Common worker for TMTimerPollGIP and TMTimerPoll.
869 *
870 * This function is called before FFs are checked in the inner execution EM loops.
871 *
872 * @returns The GIP timestamp of the next event.
873 * 0 if the next event has already expired.
874 *
875 * @param pVM The cross context VM structure.
876 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
877 * @param pu64Delta Where to store the delta.
878 *
879 * @thread The emulation thread.
880 *
881 * @remarks GIP uses ns ticks.
882 */
883DECL_FORCE_INLINE(uint64_t) tmTimerPollInternal(PVMCC pVM, PVMCPUCC pVCpu, uint64_t *pu64Delta)
884{
885 VMCPUID idCpu = pVM->tm.s.idTimerCpu;
886 AssertReturn(idCpu < pVM->cCpus, 0);
887 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, idCpu);
888
889 const uint64_t u64Now = TMVirtualGetNoCheck(pVM);
890 STAM_COUNTER_INC(&pVM->tm.s.StatPoll);
891
892 /*
893 * Return straight away if the timer FF is already set ...
894 */
895 if (VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
896 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet);
897
898 /*
899 * ... or if timers are being run.
900 */
901 if (ASMAtomicReadBool(&pVM->tm.s.fRunningQueues))
902 {
903 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
904 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
905 }
906
907 /*
908 * Check for TMCLOCK_VIRTUAL expiration.
909 */
910 const uint64_t u64Expire1 = ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL].u64Expire);
911 const int64_t i64Delta1 = u64Expire1 - u64Now;
912 if (i64Delta1 <= 0)
913 {
914 if (!VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
915 {
916 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
917 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
918 }
919 LogFlow(("TMTimerPoll: expire1=%'RU64 <= now=%'RU64\n", u64Expire1, u64Now));
920 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtual);
921 }
922
923 /*
924 * Check for TMCLOCK_VIRTUAL_SYNC expiration.
925 * This isn't quite as straight forward if in a catch-up, not only do
926 * we have to adjust the 'now' but when have to adjust the delta as well.
927 */
928
929 /*
930 * Optimistic lockless approach.
931 */
932 uint64_t u64VirtualSyncNow;
933 uint64_t u64Expire2 = ASMAtomicUoReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire);
934 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
935 {
936 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
937 {
938 u64VirtualSyncNow = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
939 if (RT_LIKELY( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
940 && !ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
941 && u64VirtualSyncNow == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
942 && u64Expire2 == ASMAtomicUoReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire)))
943 {
944 u64VirtualSyncNow = u64Now - u64VirtualSyncNow;
945 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
946 if (i64Delta2 > 0)
947 {
948 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
949 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
950
951 if (pVCpu == pVCpuDst)
952 return tmTimerPollReturnMiss(pVM, u64Now, RT_MIN(i64Delta1, i64Delta2), pu64Delta);
953 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
954 }
955
956 if ( !pVM->tm.s.fRunningQueues
957 && !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
958 {
959 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
960 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
961 }
962
963 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
964 LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now));
965 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
966 }
967 }
968 }
969 else
970 {
971 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
972 LogFlow(("TMTimerPoll: stopped\n"));
973 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
974 }
975
976 /*
977 * Complicated lockless approach.
978 */
979 uint64_t off;
980 uint32_t u32Pct = 0;
981 bool fCatchUp;
982 int cOuterTries = 42;
983 for (;; cOuterTries--)
984 {
985 fCatchUp = ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp);
986 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
987 u64Expire2 = ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire);
988 if (fCatchUp)
989 {
990 /* No changes allowed, try get a consistent set of parameters. */
991 uint64_t const u64Prev = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev);
992 uint64_t const offGivenUp = ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp);
993 u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
994 if ( ( u64Prev == ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev)
995 && offGivenUp == ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp)
996 && u32Pct == ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage)
997 && off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
998 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire)
999 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
1000 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
1001 || cOuterTries <= 0)
1002 {
1003 uint64_t u64Delta = u64Now - u64Prev;
1004 if (RT_LIKELY(!(u64Delta >> 32)))
1005 {
1006 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, u32Pct, 100);
1007 if (off > u64Sub + offGivenUp)
1008 off -= u64Sub;
1009 else /* we've completely caught up. */
1010 off = offGivenUp;
1011 }
1012 else
1013 /* More than 4 seconds since last time (or negative), ignore it. */
1014 Log(("TMVirtualGetSync: u64Delta=%RX64 (NoLock)\n", u64Delta));
1015
1016 /* Check that we're still running and in catch up. */
1017 if ( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
1018 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
1019 break;
1020 }
1021 }
1022 else if ( off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
1023 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire)
1024 && !ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
1025 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
1026 break; /* Got an consistent offset */
1027
1028 /* Repeat the initial checks before iterating. */
1029 if (VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
1030 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet);
1031 if (ASMAtomicUoReadBool(&pVM->tm.s.fRunningQueues))
1032 {
1033 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
1034 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
1035 }
1036 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
1037 {
1038 LogFlow(("TMTimerPoll: stopped\n"));
1039 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
1040 }
1041 if (cOuterTries <= 0)
1042 break; /* that's enough */
1043 }
1044 if (cOuterTries <= 0)
1045 STAM_COUNTER_INC(&pVM->tm.s.StatPollELoop);
1046 u64VirtualSyncNow = u64Now - off;
1047
1048 /* Calc delta and see if we've got a virtual sync hit. */
1049 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
1050 if (i64Delta2 <= 0)
1051 {
1052 if ( !pVM->tm.s.fRunningQueues
1053 && !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
1054 {
1055 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
1056 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
1057 }
1058 STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtualSync);
1059 LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now));
1060 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
1061 }
1062
1063 /*
1064 * Return the time left to the next event.
1065 */
1066 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
1067 if (pVCpu == pVCpuDst)
1068 {
1069 if (fCatchUp)
1070 i64Delta2 = ASMMultU64ByU32DivByU32(i64Delta2, 100, u32Pct + 100);
1071 return tmTimerPollReturnMiss(pVM, u64Now, RT_MIN(i64Delta1, i64Delta2), pu64Delta);
1072 }
1073 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
1074}
1075
1076
1077/**
1078 * Set FF if we've passed the next virtual event.
1079 *
1080 * This function is called before FFs are checked in the inner execution EM loops.
1081 *
1082 * @returns true if timers are pending, false if not.
1083 *
1084 * @param pVM The cross context VM structure.
1085 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1086 * @thread The emulation thread.
1087 */
1088VMMDECL(bool) TMTimerPollBool(PVMCC pVM, PVMCPUCC pVCpu)
1089{
1090 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1091 uint64_t off = 0;
1092 tmTimerPollInternal(pVM, pVCpu, &off);
1093 return off == 0;
1094}
1095
1096
1097/**
1098 * Set FF if we've passed the next virtual event.
1099 *
1100 * This function is called before FFs are checked in the inner execution EM loops.
1101 *
1102 * @param pVM The cross context VM structure.
1103 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1104 * @thread The emulation thread.
1105 */
1106VMM_INT_DECL(void) TMTimerPollVoid(PVMCC pVM, PVMCPUCC pVCpu)
1107{
1108 uint64_t off;
1109 tmTimerPollInternal(pVM, pVCpu, &off);
1110}
1111
1112
1113/**
1114 * Set FF if we've passed the next virtual event.
1115 *
1116 * This function is called before FFs are checked in the inner execution EM loops.
1117 *
1118 * @returns The GIP timestamp of the next event.
1119 * 0 if the next event has already expired.
1120 * @param pVM The cross context VM structure.
1121 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1122 * @param pu64Delta Where to store the delta.
1123 * @thread The emulation thread.
1124 */
1125VMM_INT_DECL(uint64_t) TMTimerPollGIP(PVMCC pVM, PVMCPUCC pVCpu, uint64_t *pu64Delta)
1126{
1127 return tmTimerPollInternal(pVM, pVCpu, pu64Delta);
1128}
1129
1130#endif /* VBOX_HIGH_RES_TIMERS_HACK */
1131
1132/**
1133 * Locks the timer clock.
1134 *
1135 * @returns VINF_SUCCESS on success, @a rcBusy if busy, and VERR_NOT_SUPPORTED
1136 * if the clock does not have a lock.
1137 * @param pVM The cross context VM structure.
1138 * @param hTimer Timer handle as returned by one of the create functions.
1139 * @param rcBusy What to return in ring-0 and raw-mode context if the
1140 * lock is busy. Pass VINF_SUCCESS to acquired the
1141 * critical section thru a ring-3 call if necessary.
1142 *
1143 * @remarks Currently only supported on timers using the virtual sync clock.
1144 */
1145VMMDECL(int) TMTimerLock(PVMCC pVM, TMTIMERHANDLE hTimer, int rcBusy)
1146{
1147 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
1148 AssertReturn(idxQueue == TMCLOCK_VIRTUAL_SYNC, VERR_NOT_SUPPORTED);
1149 return PDMCritSectEnter(&pVM->tm.s.VirtualSyncLock, rcBusy);
1150}
1151
1152
1153/**
1154 * Unlocks a timer clock locked by TMTimerLock.
1155 *
1156 * @param pVM The cross context VM structure.
1157 * @param hTimer Timer handle as returned by one of the create functions.
1158 */
1159VMMDECL(void) TMTimerUnlock(PVMCC pVM, TMTIMERHANDLE hTimer)
1160{
1161 TMTIMER_HANDLE_TO_VARS_RETURN_VOID(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
1162 AssertReturnVoid(idxQueue == TMCLOCK_VIRTUAL_SYNC);
1163 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
1164}
1165
1166
1167/**
1168 * Checks if the current thread owns the timer clock lock.
1169 *
1170 * @returns @c true if its the owner, @c false if not.
1171 * @param pVM The cross context VM structure.
1172 * @param hTimer Timer handle as returned by one of the create functions.
1173 */
1174VMMDECL(bool) TMTimerIsLockOwner(PVMCC pVM, TMTIMERHANDLE hTimer)
1175{
1176 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, false); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
1177 AssertReturn(idxQueue == TMCLOCK_VIRTUAL_SYNC, false);
1178 return PDMCritSectIsOwner(&pVM->tm.s.VirtualSyncLock);
1179}
1180
1181
1182/**
1183 * Optimized TMTimerSet code path for starting an inactive timer.
1184 *
1185 * @returns VBox status code.
1186 *
1187 * @param pVM The cross context VM structure.
1188 * @param pTimer The timer handle.
1189 * @param u64Expire The new expire time.
1190 * @param pQueue Pointer to the shared timer queue data.
1191 * @param idxQueue The queue index.
1192 */
1193static int tmTimerSetOptimizedStart(PVMCC pVM, PTMTIMER pTimer, uint64_t u64Expire, PTMTIMERQUEUE pQueue, uint32_t idxQueue)
1194{
1195 Assert(pTimer->idxPrev == UINT32_MAX);
1196 Assert(pTimer->idxNext == UINT32_MAX);
1197 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE);
1198
1199 /*
1200 * Calculate and set the expiration time.
1201 */
1202 if (idxQueue == TMCLOCK_VIRTUAL_SYNC)
1203 {
1204 uint64_t u64Last = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSync);
1205 AssertMsgStmt(u64Expire >= u64Last,
1206 ("exp=%#llx last=%#llx\n", u64Expire, u64Last),
1207 u64Expire = u64Last);
1208 }
1209 ASMAtomicWriteU64(&pTimer->u64Expire, u64Expire);
1210 Log2(("tmTimerSetOptimizedStart: %p:{.pszDesc='%s', .u64Expire=%'RU64}\n", pTimer, pTimer->szName, u64Expire));
1211
1212 /*
1213 * Link the timer into the active list.
1214 */
1215 tmTimerQueueLinkActive(pVM, TM_GET_TIMER_QUEUE_CC(pVM, idxQueue, pQueue), pQueue, pTimer, u64Expire);
1216
1217 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetOpt);
1218 return VINF_SUCCESS;
1219}
1220
1221
1222/**
1223 * TMTimerSet for the virtual sync timer queue.
1224 *
1225 * This employs a greatly simplified state machine by always acquiring the
1226 * queue lock and bypassing the scheduling list.
1227 *
1228 * @returns VBox status code
1229 * @param pVM The cross context VM structure.
1230 * @param pTimer The timer handle.
1231 * @param u64Expire The expiration time.
1232 */
1233static int tmTimerVirtualSyncSet(PVMCC pVM, PTMTIMER pTimer, uint64_t u64Expire)
1234{
1235 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetVs), a);
1236 VM_ASSERT_EMT(pVM);
1237 TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer);
1238 int rc = PDMCritSectEnter(&pVM->tm.s.VirtualSyncLock, VINF_SUCCESS);
1239 AssertRCReturn(rc, rc);
1240
1241 PTMTIMERQUEUE const pQueue = &pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC];
1242 PTMTIMERQUEUECC const pQueueCC = TM_GET_TIMER_QUEUE_CC(pVM, TMCLOCK_VIRTUAL_SYNC, pQueue);
1243 TMTIMERSTATE const enmState = pTimer->enmState;
1244 switch (enmState)
1245 {
1246 case TMTIMERSTATE_EXPIRED_DELIVER:
1247 case TMTIMERSTATE_STOPPED:
1248 if (enmState == TMTIMERSTATE_EXPIRED_DELIVER)
1249 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetVsStExpDeliver);
1250 else
1251 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetVsStStopped);
1252
1253 AssertMsg(u64Expire >= pVM->tm.s.u64VirtualSync,
1254 ("%'RU64 < %'RU64 %s\n", u64Expire, pVM->tm.s.u64VirtualSync, pTimer->szName));
1255 pTimer->u64Expire = u64Expire;
1256 TM_SET_STATE(pTimer, TMTIMERSTATE_ACTIVE);
1257 tmTimerQueueLinkActive(pVM, pQueueCC, pQueue, pTimer, u64Expire);
1258 rc = VINF_SUCCESS;
1259 break;
1260
1261 case TMTIMERSTATE_ACTIVE:
1262 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetVsStActive);
1263 tmTimerQueueUnlinkActive(pVM, pQueueCC, pQueue, pTimer);
1264 pTimer->u64Expire = u64Expire;
1265 tmTimerQueueLinkActive(pVM, pQueueCC, pQueue, pTimer, u64Expire);
1266 rc = VINF_SUCCESS;
1267 break;
1268
1269 case TMTIMERSTATE_PENDING_RESCHEDULE:
1270 case TMTIMERSTATE_PENDING_STOP:
1271 case TMTIMERSTATE_PENDING_SCHEDULE:
1272 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1273 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1274 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1275 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1276 case TMTIMERSTATE_DESTROY:
1277 case TMTIMERSTATE_FREE:
1278 AssertLogRelMsgFailed(("Invalid timer state %s: %s\n", tmTimerState(enmState), pTimer->szName));
1279 rc = VERR_TM_INVALID_STATE;
1280 break;
1281
1282 default:
1283 AssertMsgFailed(("Unknown timer state %d: %s\n", enmState, pTimer->szName));
1284 rc = VERR_TM_UNKNOWN_STATE;
1285 break;
1286 }
1287
1288 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetVs), a);
1289 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
1290 return rc;
1291}
1292
1293
1294/**
1295 * Arm a timer with a (new) expire time.
1296 *
1297 * @returns VBox status code.
1298 * @param pVM The cross context VM structure.
1299 * @param hTimer Timer handle as returned by one of the create functions.
1300 * @param u64Expire New expire time.
1301 */
1302VMMDECL(int) TMTimerSet(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t u64Expire)
1303{
1304 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
1305 STAM_COUNTER_INC(&pTimer->StatSetAbsolute);
1306
1307 /* Treat virtual sync timers specially. */
1308 if (idxQueue == TMCLOCK_VIRTUAL_SYNC)
1309 return tmTimerVirtualSyncSet(pVM, pTimer, u64Expire);
1310
1311 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1312 TMTIMER_ASSERT_CRITSECT(pVM, pTimer);
1313
1314 DBGFTRACE_U64_TAG2(pVM, u64Expire, "TMTimerSet", pTimer->szName);
1315
1316#ifdef VBOX_WITH_STATISTICS
1317 /*
1318 * Gather optimization info.
1319 */
1320 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSet);
1321 TMTIMERSTATE enmOrgState = pTimer->enmState;
1322 switch (enmOrgState)
1323 {
1324 case TMTIMERSTATE_STOPPED: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStStopped); break;
1325 case TMTIMERSTATE_EXPIRED_DELIVER: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStExpDeliver); break;
1326 case TMTIMERSTATE_ACTIVE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStActive); break;
1327 case TMTIMERSTATE_PENDING_STOP: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendStop); break;
1328 case TMTIMERSTATE_PENDING_STOP_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendStopSched); break;
1329 case TMTIMERSTATE_PENDING_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendSched); break;
1330 case TMTIMERSTATE_PENDING_RESCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendResched); break;
1331 default: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStOther); break;
1332 }
1333#endif
1334
1335#if 1
1336 /*
1337 * The most common case is setting the timer again during the callback.
1338 * The second most common case is starting a timer at some other time.
1339 */
1340 TMTIMERSTATE enmState1 = pTimer->enmState;
1341 if ( enmState1 == TMTIMERSTATE_EXPIRED_DELIVER
1342 || ( enmState1 == TMTIMERSTATE_STOPPED
1343 && pTimer->pCritSect))
1344 {
1345 /* Try take the TM lock and check the state again. */
1346 int rc = PDMCritSectTryEnter(&pQueue->TimerLock);
1347 if (RT_SUCCESS_NP(rc))
1348 {
1349 if (RT_LIKELY(tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, enmState1)))
1350 {
1351 tmTimerSetOptimizedStart(pVM, pTimer, u64Expire, pQueue, idxQueue);
1352 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1353 PDMCritSectLeave(&pQueue->TimerLock);
1354 return VINF_SUCCESS;
1355 }
1356 PDMCritSectLeave(&pQueue->TimerLock);
1357 }
1358 }
1359#endif
1360
1361 /*
1362 * Unoptimized code path.
1363 */
1364 int cRetries = 1000;
1365 do
1366 {
1367 /*
1368 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1369 */
1370 TMTIMERSTATE enmState = pTimer->enmState;
1371 Log2(("TMTimerSet: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d u64Expire=%'RU64\n",
1372 pTimer, tmTimerState(enmState), pTimer->szName, cRetries, u64Expire));
1373 switch (enmState)
1374 {
1375 case TMTIMERSTATE_EXPIRED_DELIVER:
1376 case TMTIMERSTATE_STOPPED:
1377 if (tmTimerTryWithLink(pQueueCC, pQueue, pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1378 {
1379 Assert(pTimer->idxPrev == UINT32_MAX);
1380 Assert(pTimer->idxNext == UINT32_MAX);
1381 pTimer->u64Expire = u64Expire;
1382 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1383 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1384 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1385 return VINF_SUCCESS;
1386 }
1387 break;
1388
1389 case TMTIMERSTATE_PENDING_SCHEDULE:
1390 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1391 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1392 {
1393 pTimer->u64Expire = u64Expire;
1394 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1395 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1396 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1397 return VINF_SUCCESS;
1398 }
1399 break;
1400
1401
1402 case TMTIMERSTATE_ACTIVE:
1403 if (tmTimerTryWithLink(pQueueCC, pQueue, pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1404 {
1405 pTimer->u64Expire = u64Expire;
1406 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1407 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1408 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1409 return VINF_SUCCESS;
1410 }
1411 break;
1412
1413 case TMTIMERSTATE_PENDING_RESCHEDULE:
1414 case TMTIMERSTATE_PENDING_STOP:
1415 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1416 {
1417 pTimer->u64Expire = u64Expire;
1418 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1419 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1420 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1421 return VINF_SUCCESS;
1422 }
1423 break;
1424
1425
1426 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1427 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1428 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1429#ifdef IN_RING3
1430 if (!RTThreadYield())
1431 RTThreadSleep(1);
1432#else
1433/** @todo call host context and yield after a couple of iterations */
1434#endif
1435 break;
1436
1437 /*
1438 * Invalid states.
1439 */
1440 case TMTIMERSTATE_DESTROY:
1441 case TMTIMERSTATE_FREE:
1442 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, pTimer->szName));
1443 return VERR_TM_INVALID_STATE;
1444 default:
1445 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, pTimer->szName));
1446 return VERR_TM_UNKNOWN_STATE;
1447 }
1448 } while (cRetries-- > 0);
1449
1450 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, pTimer->szName));
1451 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1452 return VERR_TM_TIMER_UNSTABLE_STATE;
1453}
1454
1455
1456/**
1457 * Return the current time for the specified clock, setting pu64Now if not NULL.
1458 *
1459 * @returns Current time.
1460 * @param pVM The cross context VM structure.
1461 * @param enmClock The clock to query.
1462 * @param pu64Now Optional pointer where to store the return time
1463 */
1464DECL_FORCE_INLINE(uint64_t) tmTimerSetRelativeNowWorker(PVMCC pVM, TMCLOCK enmClock, uint64_t *pu64Now)
1465{
1466 uint64_t u64Now;
1467 switch (enmClock)
1468 {
1469 case TMCLOCK_VIRTUAL_SYNC:
1470 u64Now = TMVirtualSyncGet(pVM);
1471 break;
1472 case TMCLOCK_VIRTUAL:
1473 u64Now = TMVirtualGet(pVM);
1474 break;
1475 case TMCLOCK_REAL:
1476 u64Now = TMRealGet(pVM);
1477 break;
1478 default:
1479 AssertFatalMsgFailed(("%d\n", enmClock));
1480 }
1481
1482 if (pu64Now)
1483 *pu64Now = u64Now;
1484 return u64Now;
1485}
1486
1487
1488/**
1489 * Optimized TMTimerSetRelative code path.
1490 *
1491 * @returns VBox status code.
1492 *
1493 * @param pVM The cross context VM structure.
1494 * @param pTimer The timer handle.
1495 * @param cTicksToNext Clock ticks until the next time expiration.
1496 * @param pu64Now Where to return the current time stamp used.
1497 * Optional.
1498 * @param pQueueCC The context specific queue data (same as @a pQueue
1499 * for ring-3).
1500 * @param pQueue The shared queue data.
1501 */
1502static int tmTimerSetRelativeOptimizedStart(PVMCC pVM, PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now,
1503 PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue)
1504{
1505 Assert(pTimer->idxPrev == UINT32_MAX);
1506 Assert(pTimer->idxNext == UINT32_MAX);
1507 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE);
1508
1509 /*
1510 * Calculate and set the expiration time.
1511 */
1512 uint64_t const u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, pQueue->enmClock, pu64Now);
1513 pTimer->u64Expire = u64Expire;
1514 Log2(("tmTimerSetRelativeOptimizedStart: %p:{.pszDesc='%s', .u64Expire=%'RU64} cTicksToNext=%'RU64\n", pTimer, pTimer->szName, u64Expire, cTicksToNext));
1515
1516 /*
1517 * Link the timer into the active list.
1518 */
1519 DBGFTRACE_U64_TAG2(pVM, u64Expire, "tmTimerSetRelativeOptimizedStart", pTimer->szName);
1520 tmTimerQueueLinkActive(pVM, pQueueCC, pQueue, pTimer, u64Expire);
1521
1522 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeOpt);
1523 return VINF_SUCCESS;
1524}
1525
1526
1527/**
1528 * TMTimerSetRelative for the virtual sync timer queue.
1529 *
1530 * This employs a greatly simplified state machine by always acquiring the
1531 * queue lock and bypassing the scheduling list.
1532 *
1533 * @returns VBox status code
1534 * @param pVM The cross context VM structure.
1535 * @param pTimer The timer to (re-)arm.
1536 * @param cTicksToNext Clock ticks until the next time expiration.
1537 * @param pu64Now Where to return the current time stamp used.
1538 * Optional.
1539 */
1540static int tmTimerVirtualSyncSetRelative(PVMCC pVM, PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1541{
1542 STAM_PROFILE_START(pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelativeVs), a);
1543 VM_ASSERT_EMT(pVM);
1544 TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer);
1545 int rc = PDMCritSectEnter(&pVM->tm.s.VirtualSyncLock, VINF_SUCCESS);
1546 AssertRCReturn(rc, rc);
1547
1548 /* Calculate the expiration tick. */
1549 uint64_t u64Expire = TMVirtualSyncGetNoCheck(pVM);
1550 if (pu64Now)
1551 *pu64Now = u64Expire;
1552 u64Expire += cTicksToNext;
1553
1554 /* Update the timer. */
1555 PTMTIMERQUEUE const pQueue = &pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC];
1556 PTMTIMERQUEUECC const pQueueCC = TM_GET_TIMER_QUEUE_CC(pVM, TMCLOCK_VIRTUAL_SYNC, pQueue);
1557 TMTIMERSTATE const enmState = pTimer->enmState;
1558 switch (enmState)
1559 {
1560 case TMTIMERSTATE_EXPIRED_DELIVER:
1561 case TMTIMERSTATE_STOPPED:
1562 if (enmState == TMTIMERSTATE_EXPIRED_DELIVER)
1563 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeVsStExpDeliver);
1564 else
1565 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeVsStStopped);
1566 pTimer->u64Expire = u64Expire;
1567 TM_SET_STATE(pTimer, TMTIMERSTATE_ACTIVE);
1568 tmTimerQueueLinkActive(pVM, pQueueCC, pQueue, pTimer, u64Expire);
1569 rc = VINF_SUCCESS;
1570 break;
1571
1572 case TMTIMERSTATE_ACTIVE:
1573 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeVsStActive);
1574 tmTimerQueueUnlinkActive(pVM, pQueueCC, pQueue, pTimer);
1575 pTimer->u64Expire = u64Expire;
1576 tmTimerQueueLinkActive(pVM, pQueueCC, pQueue, pTimer, u64Expire);
1577 rc = VINF_SUCCESS;
1578 break;
1579
1580 case TMTIMERSTATE_PENDING_RESCHEDULE:
1581 case TMTIMERSTATE_PENDING_STOP:
1582 case TMTIMERSTATE_PENDING_SCHEDULE:
1583 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1584 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1585 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1586 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1587 case TMTIMERSTATE_DESTROY:
1588 case TMTIMERSTATE_FREE:
1589 AssertLogRelMsgFailed(("Invalid timer state %s: %s\n", tmTimerState(enmState), pTimer->szName));
1590 rc = VERR_TM_INVALID_STATE;
1591 break;
1592
1593 default:
1594 AssertMsgFailed(("Unknown timer state %d: %s\n", enmState, pTimer->szName));
1595 rc = VERR_TM_UNKNOWN_STATE;
1596 break;
1597 }
1598
1599 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelativeVs), a);
1600 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
1601 return rc;
1602}
1603
1604
1605/**
1606 * Arm a timer with a expire time relative to the current time.
1607 *
1608 * @returns VBox status code.
1609 * @param pVM The cross context VM structure.
1610 * @param pTimer The timer to arm.
1611 * @param cTicksToNext Clock ticks until the next time expiration.
1612 * @param pu64Now Where to return the current time stamp used.
1613 * Optional.
1614 * @param pQueueCC The context specific queue data (same as @a pQueue
1615 * for ring-3).
1616 * @param pQueue The shared queue data.
1617 * @param idxQueue The queue index.
1618 */
1619static int tmTimerSetRelative(PVMCC pVM, PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now,
1620 PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue)
1621{
1622 STAM_COUNTER_INC(&pTimer->StatSetRelative);
1623
1624 /* Treat virtual sync timers specially. */
1625 if (pQueue->enmClock == TMCLOCK_VIRTUAL_SYNC)
1626 return tmTimerVirtualSyncSetRelative(pVM, pTimer, cTicksToNext, pu64Now);
1627
1628 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1629 TMTIMER_ASSERT_CRITSECT(pVM, pTimer);
1630
1631 DBGFTRACE_U64_TAG2(pVM, cTicksToNext, "TMTimerSetRelative", pTimer->szName);
1632
1633#ifdef VBOX_WITH_STATISTICS
1634 /*
1635 * Gather optimization info.
1636 */
1637 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelative);
1638 TMTIMERSTATE enmOrgState = pTimer->enmState;
1639 switch (enmOrgState)
1640 {
1641 case TMTIMERSTATE_STOPPED: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStStopped); break;
1642 case TMTIMERSTATE_EXPIRED_DELIVER: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStExpDeliver); break;
1643 case TMTIMERSTATE_ACTIVE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStActive); break;
1644 case TMTIMERSTATE_PENDING_STOP: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendStop); break;
1645 case TMTIMERSTATE_PENDING_STOP_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendStopSched); break;
1646 case TMTIMERSTATE_PENDING_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendSched); break;
1647 case TMTIMERSTATE_PENDING_RESCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendResched); break;
1648 default: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStOther); break;
1649 }
1650#endif
1651
1652 /*
1653 * Try to take the TM lock and optimize the common cases.
1654 *
1655 * With the TM lock we can safely make optimizations like immediate
1656 * scheduling and we can also be 100% sure that we're not racing the
1657 * running of the timer queues. As an additional restraint we require the
1658 * timer to have a critical section associated with to be 100% there aren't
1659 * concurrent operations on the timer. (This latter isn't necessary any
1660 * longer as this isn't supported for any timers, critsect or not.)
1661 *
1662 * Note! Lock ordering doesn't apply when we only _try_ to
1663 * get the innermost locks.
1664 */
1665 bool fOwnTMLock = RT_SUCCESS_NP(PDMCritSectTryEnter(&pQueue->TimerLock));
1666#if 1
1667 if ( fOwnTMLock
1668 && pTimer->pCritSect)
1669 {
1670 TMTIMERSTATE enmState = pTimer->enmState;
1671 if (RT_LIKELY( ( enmState == TMTIMERSTATE_EXPIRED_DELIVER
1672 || enmState == TMTIMERSTATE_STOPPED)
1673 && tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, enmState)))
1674 {
1675 tmTimerSetRelativeOptimizedStart(pVM, pTimer, cTicksToNext, pu64Now, pQueueCC, pQueue);
1676 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1677 PDMCritSectLeave(&pQueue->TimerLock);
1678 return VINF_SUCCESS;
1679 }
1680
1681 /* Optimize other states when it becomes necessary. */
1682 }
1683#endif
1684
1685 /*
1686 * Unoptimized path.
1687 */
1688 int rc;
1689 for (int cRetries = 1000; ; cRetries--)
1690 {
1691 /*
1692 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1693 */
1694 TMTIMERSTATE enmState = pTimer->enmState;
1695 switch (enmState)
1696 {
1697 case TMTIMERSTATE_STOPPED:
1698 if (pQueue->enmClock == TMCLOCK_VIRTUAL_SYNC)
1699 {
1700 /** @todo To fix assertion in tmR3TimerQueueRunVirtualSync:
1701 * Figure a safe way of activating this timer while the queue is
1702 * being run.
1703 * (99.9% sure this that the assertion is caused by DevAPIC.cpp
1704 * re-starting the timer in response to a initial_count write.) */
1705 }
1706 RT_FALL_THRU();
1707 case TMTIMERSTATE_EXPIRED_DELIVER:
1708 if (tmTimerTryWithLink(pQueueCC, pQueue, pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1709 {
1710 Assert(pTimer->idxPrev == UINT32_MAX);
1711 Assert(pTimer->idxNext == UINT32_MAX);
1712 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, pQueue->enmClock, pu64Now);
1713 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [EXP/STOP]\n",
1714 pTimer, tmTimerState(enmState), pTimer->szName, pTimer->u64Expire, cRetries));
1715 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1716 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1717 rc = VINF_SUCCESS;
1718 break;
1719 }
1720 rc = VERR_TRY_AGAIN;
1721 break;
1722
1723 case TMTIMERSTATE_PENDING_SCHEDULE:
1724 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1725 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1726 {
1727 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, pQueue->enmClock, pu64Now);
1728 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [PEND_SCHED]\n",
1729 pTimer, tmTimerState(enmState), pTimer->szName, pTimer->u64Expire, cRetries));
1730 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1731 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1732 rc = VINF_SUCCESS;
1733 break;
1734 }
1735 rc = VERR_TRY_AGAIN;
1736 break;
1737
1738
1739 case TMTIMERSTATE_ACTIVE:
1740 if (tmTimerTryWithLink(pQueueCC, pQueue, pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1741 {
1742 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, pQueue->enmClock, pu64Now);
1743 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [ACTIVE]\n",
1744 pTimer, tmTimerState(enmState), pTimer->szName, pTimer->u64Expire, cRetries));
1745 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1746 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1747 rc = VINF_SUCCESS;
1748 break;
1749 }
1750 rc = VERR_TRY_AGAIN;
1751 break;
1752
1753 case TMTIMERSTATE_PENDING_RESCHEDULE:
1754 case TMTIMERSTATE_PENDING_STOP:
1755 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1756 {
1757 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, pQueue->enmClock, pu64Now);
1758 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [PEND_RESCH/STOP]\n",
1759 pTimer, tmTimerState(enmState), pTimer->szName, pTimer->u64Expire, cRetries));
1760 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1761 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1762 rc = VINF_SUCCESS;
1763 break;
1764 }
1765 rc = VERR_TRY_AGAIN;
1766 break;
1767
1768
1769 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1770 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1771 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1772#ifdef IN_RING3
1773 if (!RTThreadYield())
1774 RTThreadSleep(1);
1775#else
1776/** @todo call host context and yield after a couple of iterations */
1777#endif
1778 rc = VERR_TRY_AGAIN;
1779 break;
1780
1781 /*
1782 * Invalid states.
1783 */
1784 case TMTIMERSTATE_DESTROY:
1785 case TMTIMERSTATE_FREE:
1786 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, pTimer->szName));
1787 rc = VERR_TM_INVALID_STATE;
1788 break;
1789
1790 default:
1791 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, pTimer->szName));
1792 rc = VERR_TM_UNKNOWN_STATE;
1793 break;
1794 }
1795
1796 /* switch + loop is tedious to break out of. */
1797 if (rc == VINF_SUCCESS)
1798 break;
1799
1800 if (rc != VERR_TRY_AGAIN)
1801 {
1802 tmTimerSetRelativeNowWorker(pVM, pQueue->enmClock, pu64Now);
1803 break;
1804 }
1805 if (cRetries <= 0)
1806 {
1807 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, pTimer->szName));
1808 rc = VERR_TM_TIMER_UNSTABLE_STATE;
1809 tmTimerSetRelativeNowWorker(pVM, pQueue->enmClock, pu64Now);
1810 break;
1811 }
1812
1813 /*
1814 * Retry to gain locks.
1815 */
1816 if (!fOwnTMLock)
1817 fOwnTMLock = RT_SUCCESS_NP(PDMCritSectTryEnter(&pQueue->TimerLock));
1818
1819 } /* for (;;) */
1820
1821 /*
1822 * Clean up and return.
1823 */
1824 if (fOwnTMLock)
1825 PDMCritSectLeave(&pQueue->TimerLock);
1826
1827 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1828 return rc;
1829}
1830
1831
1832/**
1833 * Arm a timer with a expire time relative to the current time.
1834 *
1835 * @returns VBox status code.
1836 * @param pVM The cross context VM structure.
1837 * @param hTimer Timer handle as returned by one of the create functions.
1838 * @param cTicksToNext Clock ticks until the next time expiration.
1839 * @param pu64Now Where to return the current time stamp used.
1840 * Optional.
1841 */
1842VMMDECL(int) TMTimerSetRelative(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1843{
1844 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
1845 return tmTimerSetRelative(pVM, pTimer, cTicksToNext, pu64Now, pQueueCC, pQueue);
1846}
1847
1848
1849/**
1850 * Drops a hint about the frequency of the timer.
1851 *
1852 * This is used by TM and the VMM to calculate how often guest execution needs
1853 * to be interrupted. The hint is automatically cleared by TMTimerStop.
1854 *
1855 * @returns VBox status code.
1856 * @param pVM The cross context VM structure.
1857 * @param hTimer Timer handle as returned by one of the create functions.
1858 * @param uHzHint The frequency hint. Pass 0 to clear the hint.
1859 *
1860 * @remarks We're using an integer hertz value here since anything above 1 HZ
1861 * is not going to be any trouble satisfying scheduling wise. The
1862 * range where it makes sense is >= 100 HZ.
1863 */
1864VMMDECL(int) TMTimerSetFrequencyHint(PVMCC pVM, TMTIMERHANDLE hTimer, uint32_t uHzHint)
1865{
1866 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
1867 TMTIMER_ASSERT_CRITSECT(pVM, pTimer);
1868
1869 uint32_t const uHzOldHint = pTimer->uHzHint;
1870 pTimer->uHzHint = uHzHint;
1871
1872 uint32_t const uMaxHzHint = pQueue->uMaxHzHint;
1873 if ( uHzHint > uMaxHzHint
1874 || uHzOldHint >= uMaxHzHint)
1875 ASMAtomicOrU64(&pVM->tm.s.HzHint.u64Combined, RT_BIT_32(idxQueue) | RT_BIT_32(idxQueue + 16));
1876
1877 return VINF_SUCCESS;
1878}
1879
1880
1881/**
1882 * TMTimerStop for the virtual sync timer queue.
1883 *
1884 * This employs a greatly simplified state machine by always acquiring the
1885 * queue lock and bypassing the scheduling list.
1886 *
1887 * @returns VBox status code
1888 * @param pVM The cross context VM structure.
1889 * @param pTimer The timer handle.
1890 */
1891static int tmTimerVirtualSyncStop(PVMCC pVM, PTMTIMER pTimer)
1892{
1893 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerStopVs), a);
1894 VM_ASSERT_EMT(pVM);
1895 TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer);
1896 int rc = PDMCritSectEnter(&pVM->tm.s.VirtualSyncLock, VINF_SUCCESS);
1897 AssertRCReturn(rc, rc);
1898
1899 /* Reset the HZ hint. */
1900 uint32_t uOldHzHint = pTimer->uHzHint;
1901 if (uOldHzHint)
1902 {
1903 if (uOldHzHint >= pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].uMaxHzHint)
1904 ASMAtomicOrU64(&pVM->tm.s.HzHint.u64Combined, RT_BIT_32(TMCLOCK_VIRTUAL_SYNC) | RT_BIT_32(TMCLOCK_VIRTUAL_SYNC + 16));
1905 pTimer->uHzHint = 0;
1906 }
1907
1908 /* Update the timer state. */
1909 TMTIMERSTATE const enmState = pTimer->enmState;
1910 switch (enmState)
1911 {
1912 case TMTIMERSTATE_ACTIVE:
1913 {
1914 PTMTIMERQUEUE const pQueue = &pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC];
1915 tmTimerQueueUnlinkActive(pVM, TM_GET_TIMER_QUEUE_CC(pVM, TMCLOCK_VIRTUAL_SYNC, pQueue), pQueue, pTimer);
1916 TM_SET_STATE(pTimer, TMTIMERSTATE_STOPPED);
1917 rc = VINF_SUCCESS;
1918 break;
1919 }
1920
1921 case TMTIMERSTATE_EXPIRED_DELIVER:
1922 TM_SET_STATE(pTimer, TMTIMERSTATE_STOPPED);
1923 rc = VINF_SUCCESS;
1924 break;
1925
1926 case TMTIMERSTATE_STOPPED:
1927 rc = VINF_SUCCESS;
1928 break;
1929
1930 case TMTIMERSTATE_PENDING_RESCHEDULE:
1931 case TMTIMERSTATE_PENDING_STOP:
1932 case TMTIMERSTATE_PENDING_SCHEDULE:
1933 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1934 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1935 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1936 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1937 case TMTIMERSTATE_DESTROY:
1938 case TMTIMERSTATE_FREE:
1939 AssertLogRelMsgFailed(("Invalid timer state %s: %s\n", tmTimerState(enmState), pTimer->szName));
1940 rc = VERR_TM_INVALID_STATE;
1941 break;
1942
1943 default:
1944 AssertMsgFailed(("Unknown timer state %d: %s\n", enmState, pTimer->szName));
1945 rc = VERR_TM_UNKNOWN_STATE;
1946 break;
1947 }
1948
1949 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStopVs), a);
1950 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
1951 return rc;
1952}
1953
1954
1955/**
1956 * Stop the timer.
1957 * Use TMR3TimerArm() to "un-stop" the timer.
1958 *
1959 * @returns VBox status code.
1960 * @param pVM The cross context VM structure.
1961 * @param hTimer Timer handle as returned by one of the create functions.
1962 */
1963VMMDECL(int) TMTimerStop(PVMCC pVM, TMTIMERHANDLE hTimer)
1964{
1965 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
1966 STAM_COUNTER_INC(&pTimer->StatStop);
1967
1968 /* Treat virtual sync timers specially. */
1969 if (idxQueue == TMCLOCK_VIRTUAL_SYNC)
1970 return tmTimerVirtualSyncStop(pVM, pTimer);
1971
1972 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1973 TMTIMER_ASSERT_CRITSECT(pVM, pTimer);
1974
1975 /*
1976 * Reset the HZ hint.
1977 */
1978 uint32_t const uOldHzHint = pTimer->uHzHint;
1979 if (uOldHzHint)
1980 {
1981 if (uOldHzHint >= pQueue->uMaxHzHint)
1982 ASMAtomicOrU64(&pVM->tm.s.HzHint.u64Combined, RT_BIT_32(idxQueue) | RT_BIT_32(idxQueue + 16));
1983 pTimer->uHzHint = 0;
1984 }
1985
1986 /** @todo see if this function needs optimizing. */
1987 int cRetries = 1000;
1988 do
1989 {
1990 /*
1991 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1992 */
1993 TMTIMERSTATE enmState = pTimer->enmState;
1994 Log2(("TMTimerStop: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d\n",
1995 pTimer, tmTimerState(enmState), pTimer->szName, cRetries));
1996 switch (enmState)
1997 {
1998 case TMTIMERSTATE_EXPIRED_DELIVER:
1999 //AssertMsgFailed(("You don't stop an expired timer dude!\n"));
2000 return VERR_INVALID_PARAMETER;
2001
2002 case TMTIMERSTATE_STOPPED:
2003 case TMTIMERSTATE_PENDING_STOP:
2004 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2005 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
2006 return VINF_SUCCESS;
2007
2008 case TMTIMERSTATE_PENDING_SCHEDULE:
2009 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, enmState))
2010 {
2011 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
2012 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
2013 return VINF_SUCCESS;
2014 }
2015 break;
2016
2017 case TMTIMERSTATE_PENDING_RESCHEDULE:
2018 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
2019 {
2020 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
2021 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
2022 return VINF_SUCCESS;
2023 }
2024 break;
2025
2026 case TMTIMERSTATE_ACTIVE:
2027 if (tmTimerTryWithLink(pQueueCC, pQueue, pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
2028 {
2029 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
2030 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
2031 return VINF_SUCCESS;
2032 }
2033 break;
2034
2035 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2036 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2037 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2038#ifdef IN_RING3
2039 if (!RTThreadYield())
2040 RTThreadSleep(1);
2041#else
2042/** @todo call host and yield cpu after a while. */
2043#endif
2044 break;
2045
2046 /*
2047 * Invalid states.
2048 */
2049 case TMTIMERSTATE_DESTROY:
2050 case TMTIMERSTATE_FREE:
2051 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, pTimer->szName));
2052 return VERR_TM_INVALID_STATE;
2053 default:
2054 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, pTimer->szName));
2055 return VERR_TM_UNKNOWN_STATE;
2056 }
2057 } while (cRetries-- > 0);
2058
2059 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, pTimer->szName));
2060 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
2061 return VERR_TM_TIMER_UNSTABLE_STATE;
2062}
2063
2064
2065/**
2066 * Get the current clock time.
2067 * Handy for calculating the new expire time.
2068 *
2069 * @returns Current clock time.
2070 * @param pVM The cross context VM structure.
2071 * @param hTimer Timer handle as returned by one of the create functions.
2072 */
2073VMMDECL(uint64_t) TMTimerGet(PVMCC pVM, TMTIMERHANDLE hTimer)
2074{
2075 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2076 STAM_COUNTER_INC(&pTimer->StatGet);
2077
2078 uint64_t u64;
2079 switch (pQueue->enmClock)
2080 {
2081 case TMCLOCK_VIRTUAL:
2082 u64 = TMVirtualGet(pVM);
2083 break;
2084 case TMCLOCK_VIRTUAL_SYNC:
2085 u64 = TMVirtualSyncGet(pVM);
2086 break;
2087 case TMCLOCK_REAL:
2088 u64 = TMRealGet(pVM);
2089 break;
2090 default:
2091 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2092 return UINT64_MAX;
2093 }
2094 //Log2(("TMTimerGet: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2095 // u64, pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2096 return u64;
2097}
2098
2099
2100/**
2101 * Get the frequency of the timer clock.
2102 *
2103 * @returns Clock frequency (as Hz of course).
2104 * @param pVM The cross context VM structure.
2105 * @param hTimer Timer handle as returned by one of the create functions.
2106 */
2107VMMDECL(uint64_t) TMTimerGetFreq(PVMCC pVM, TMTIMERHANDLE hTimer)
2108{
2109 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2110 switch (pQueue->enmClock)
2111 {
2112 case TMCLOCK_VIRTUAL:
2113 case TMCLOCK_VIRTUAL_SYNC:
2114 return TMCLOCK_FREQ_VIRTUAL;
2115
2116 case TMCLOCK_REAL:
2117 return TMCLOCK_FREQ_REAL;
2118
2119 default:
2120 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2121 return 0;
2122 }
2123}
2124
2125
2126/**
2127 * Get the expire time of the timer.
2128 * Only valid for active timers.
2129 *
2130 * @returns Expire time of the timer.
2131 * @param pVM The cross context VM structure.
2132 * @param hTimer Timer handle as returned by one of the create functions.
2133 */
2134VMMDECL(uint64_t) TMTimerGetExpire(PVMCC pVM, TMTIMERHANDLE hTimer)
2135{
2136 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, UINT64_MAX); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2137 TMTIMER_ASSERT_CRITSECT(pVM, pTimer);
2138 int cRetries = 1000;
2139 do
2140 {
2141 TMTIMERSTATE enmState = pTimer->enmState;
2142 switch (enmState)
2143 {
2144 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2145 case TMTIMERSTATE_EXPIRED_DELIVER:
2146 case TMTIMERSTATE_STOPPED:
2147 case TMTIMERSTATE_PENDING_STOP:
2148 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2149 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2150 pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2151 return UINT64_MAX;
2152
2153 case TMTIMERSTATE_ACTIVE:
2154 case TMTIMERSTATE_PENDING_RESCHEDULE:
2155 case TMTIMERSTATE_PENDING_SCHEDULE:
2156 Log2(("TMTimerGetExpire: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2157 pTimer->u64Expire, pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2158 return pTimer->u64Expire;
2159
2160 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2161 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2162#ifdef IN_RING3
2163 if (!RTThreadYield())
2164 RTThreadSleep(1);
2165#endif
2166 break;
2167
2168 /*
2169 * Invalid states.
2170 */
2171 case TMTIMERSTATE_DESTROY:
2172 case TMTIMERSTATE_FREE:
2173 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, pTimer->szName));
2174 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2175 pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2176 return UINT64_MAX;
2177 default:
2178 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, pTimer->szName));
2179 return UINT64_MAX;
2180 }
2181 } while (cRetries-- > 0);
2182
2183 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, pTimer->szName));
2184 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2185 pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2186 return UINT64_MAX;
2187}
2188
2189
2190/**
2191 * Checks if a timer is active or not.
2192 *
2193 * @returns True if active.
2194 * @returns False if not active.
2195 * @param pVM The cross context VM structure.
2196 * @param hTimer Timer handle as returned by one of the create functions.
2197 */
2198VMMDECL(bool) TMTimerIsActive(PVMCC pVM, TMTIMERHANDLE hTimer)
2199{
2200 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, false); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2201 TMTIMERSTATE enmState = pTimer->enmState;
2202 switch (enmState)
2203 {
2204 case TMTIMERSTATE_STOPPED:
2205 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2206 case TMTIMERSTATE_EXPIRED_DELIVER:
2207 case TMTIMERSTATE_PENDING_STOP:
2208 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2209 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2210 pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2211 return false;
2212
2213 case TMTIMERSTATE_ACTIVE:
2214 case TMTIMERSTATE_PENDING_RESCHEDULE:
2215 case TMTIMERSTATE_PENDING_SCHEDULE:
2216 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2217 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2218 Log2(("TMTimerIsActive: returns true (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2219 pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2220 return true;
2221
2222 /*
2223 * Invalid states.
2224 */
2225 case TMTIMERSTATE_DESTROY:
2226 case TMTIMERSTATE_FREE:
2227 AssertMsgFailed(("Invalid timer state %s (%s)\n", tmTimerState(enmState), pTimer->szName));
2228 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2229 pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2230 return false;
2231 default:
2232 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, pTimer->szName));
2233 return false;
2234 }
2235}
2236
2237
2238/* -=-=-=-=-=-=- Convenience APIs -=-=-=-=-=-=- */
2239
2240
2241/**
2242 * Arm a timer with a (new) expire time relative to current time.
2243 *
2244 * @returns VBox status code.
2245 * @param pVM The cross context VM structure.
2246 * @param hTimer Timer handle as returned by one of the create functions.
2247 * @param cMilliesToNext Number of milliseconds to the next tick.
2248 */
2249VMMDECL(int) TMTimerSetMillies(PVMCC pVM, TMTIMERHANDLE hTimer, uint32_t cMilliesToNext)
2250{
2251 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2252 switch (pQueue->enmClock)
2253 {
2254 case TMCLOCK_VIRTUAL:
2255 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2256 return tmTimerSetRelative(pVM, pTimer, cMilliesToNext * UINT64_C(1000000), NULL, pQueueCC, pQueue);
2257
2258 case TMCLOCK_VIRTUAL_SYNC:
2259 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2260 return tmTimerSetRelative(pVM, pTimer, cMilliesToNext * UINT64_C(1000000), NULL, pQueueCC, pQueue);
2261
2262 case TMCLOCK_REAL:
2263 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2264 return tmTimerSetRelative(pVM, pTimer, cMilliesToNext, NULL, pQueueCC, pQueue);
2265
2266 default:
2267 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2268 return VERR_TM_TIMER_BAD_CLOCK;
2269 }
2270}
2271
2272
2273/**
2274 * Arm a timer with a (new) expire time relative to current time.
2275 *
2276 * @returns VBox status code.
2277 * @param pVM The cross context VM structure.
2278 * @param hTimer Timer handle as returned by one of the create functions.
2279 * @param cMicrosToNext Number of microseconds to the next tick.
2280 */
2281VMMDECL(int) TMTimerSetMicro(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cMicrosToNext)
2282{
2283 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2284 switch (pQueue->enmClock)
2285 {
2286 case TMCLOCK_VIRTUAL:
2287 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2288 return tmTimerSetRelative(pVM, pTimer, cMicrosToNext * 1000, NULL, pQueueCC, pQueue);
2289
2290 case TMCLOCK_VIRTUAL_SYNC:
2291 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2292 return tmTimerSetRelative(pVM, pTimer, cMicrosToNext * 1000, NULL, pQueueCC, pQueue);
2293
2294 case TMCLOCK_REAL:
2295 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2296 return tmTimerSetRelative(pVM, pTimer, cMicrosToNext / 1000, NULL, pQueueCC, pQueue);
2297
2298 default:
2299 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2300 return VERR_TM_TIMER_BAD_CLOCK;
2301 }
2302}
2303
2304
2305/**
2306 * Arm a timer with a (new) expire time relative to current time.
2307 *
2308 * @returns VBox status code.
2309 * @param pVM The cross context VM structure.
2310 * @param hTimer Timer handle as returned by one of the create functions.
2311 * @param cNanosToNext Number of nanoseconds to the next tick.
2312 */
2313VMMDECL(int) TMTimerSetNano(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cNanosToNext)
2314{
2315 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2316 switch (pQueue->enmClock)
2317 {
2318 case TMCLOCK_VIRTUAL:
2319 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2320 return tmTimerSetRelative(pVM, pTimer, cNanosToNext, NULL, pQueueCC, pQueue);
2321
2322 case TMCLOCK_VIRTUAL_SYNC:
2323 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2324 return tmTimerSetRelative(pVM, pTimer, cNanosToNext, NULL, pQueueCC, pQueue);
2325
2326 case TMCLOCK_REAL:
2327 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2328 return tmTimerSetRelative(pVM, pTimer, cNanosToNext / 1000000, NULL, pQueueCC, pQueue);
2329
2330 default:
2331 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2332 return VERR_TM_TIMER_BAD_CLOCK;
2333 }
2334}
2335
2336
2337/**
2338 * Get the current clock time as nanoseconds.
2339 *
2340 * @returns The timer clock as nanoseconds.
2341 * @param pVM The cross context VM structure.
2342 * @param hTimer Timer handle as returned by one of the create functions.
2343 */
2344VMMDECL(uint64_t) TMTimerGetNano(PVMCC pVM, TMTIMERHANDLE hTimer)
2345{
2346 return TMTimerToNano(pVM, hTimer, TMTimerGet(pVM, hTimer));
2347}
2348
2349
2350/**
2351 * Get the current clock time as microseconds.
2352 *
2353 * @returns The timer clock as microseconds.
2354 * @param pVM The cross context VM structure.
2355 * @param hTimer Timer handle as returned by one of the create functions.
2356 */
2357VMMDECL(uint64_t) TMTimerGetMicro(PVMCC pVM, TMTIMERHANDLE hTimer)
2358{
2359 return TMTimerToMicro(pVM, hTimer, TMTimerGet(pVM, hTimer));
2360}
2361
2362
2363/**
2364 * Get the current clock time as milliseconds.
2365 *
2366 * @returns The timer clock as milliseconds.
2367 * @param pVM The cross context VM structure.
2368 * @param hTimer Timer handle as returned by one of the create functions.
2369 */
2370VMMDECL(uint64_t) TMTimerGetMilli(PVMCC pVM, TMTIMERHANDLE hTimer)
2371{
2372 return TMTimerToMilli(pVM, hTimer, TMTimerGet(pVM, hTimer));
2373}
2374
2375
2376/**
2377 * Converts the specified timer clock time to nanoseconds.
2378 *
2379 * @returns nanoseconds.
2380 * @param pVM The cross context VM structure.
2381 * @param hTimer Timer handle as returned by one of the create functions.
2382 * @param cTicks The clock ticks.
2383 * @remark There could be rounding errors here. We just do a simple integer divide
2384 * without any adjustments.
2385 */
2386VMMDECL(uint64_t) TMTimerToNano(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cTicks)
2387{
2388 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2389 switch (pQueue->enmClock)
2390 {
2391 case TMCLOCK_VIRTUAL:
2392 case TMCLOCK_VIRTUAL_SYNC:
2393 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2394 return cTicks;
2395
2396 case TMCLOCK_REAL:
2397 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2398 return cTicks * 1000000;
2399
2400 default:
2401 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2402 return 0;
2403 }
2404}
2405
2406
2407/**
2408 * Converts the specified timer clock time to microseconds.
2409 *
2410 * @returns microseconds.
2411 * @param pVM The cross context VM structure.
2412 * @param hTimer Timer handle as returned by one of the create functions.
2413 * @param cTicks The clock ticks.
2414 * @remark There could be rounding errors here. We just do a simple integer divide
2415 * without any adjustments.
2416 */
2417VMMDECL(uint64_t) TMTimerToMicro(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cTicks)
2418{
2419 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2420 switch (pQueue->enmClock)
2421 {
2422 case TMCLOCK_VIRTUAL:
2423 case TMCLOCK_VIRTUAL_SYNC:
2424 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2425 return cTicks / 1000;
2426
2427 case TMCLOCK_REAL:
2428 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2429 return cTicks * 1000;
2430
2431 default:
2432 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2433 return 0;
2434 }
2435}
2436
2437
2438/**
2439 * Converts the specified timer clock time to milliseconds.
2440 *
2441 * @returns milliseconds.
2442 * @param pVM The cross context VM structure.
2443 * @param hTimer Timer handle as returned by one of the create functions.
2444 * @param cTicks The clock ticks.
2445 * @remark There could be rounding errors here. We just do a simple integer divide
2446 * without any adjustments.
2447 */
2448VMMDECL(uint64_t) TMTimerToMilli(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cTicks)
2449{
2450 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2451 switch (pQueue->enmClock)
2452 {
2453 case TMCLOCK_VIRTUAL:
2454 case TMCLOCK_VIRTUAL_SYNC:
2455 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2456 return cTicks / 1000000;
2457
2458 case TMCLOCK_REAL:
2459 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2460 return cTicks;
2461
2462 default:
2463 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2464 return 0;
2465 }
2466}
2467
2468
2469/**
2470 * Converts the specified nanosecond timestamp to timer clock ticks.
2471 *
2472 * @returns timer clock ticks.
2473 * @param pVM The cross context VM structure.
2474 * @param hTimer Timer handle as returned by one of the create functions.
2475 * @param cNanoSecs The nanosecond value ticks to convert.
2476 * @remark There could be rounding and overflow errors here.
2477 */
2478VMMDECL(uint64_t) TMTimerFromNano(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cNanoSecs)
2479{
2480 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2481 switch (pQueue->enmClock)
2482 {
2483 case TMCLOCK_VIRTUAL:
2484 case TMCLOCK_VIRTUAL_SYNC:
2485 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2486 return cNanoSecs;
2487
2488 case TMCLOCK_REAL:
2489 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2490 return cNanoSecs / 1000000;
2491
2492 default:
2493 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2494 return 0;
2495 }
2496}
2497
2498
2499/**
2500 * Converts the specified microsecond timestamp to timer clock ticks.
2501 *
2502 * @returns timer clock ticks.
2503 * @param pVM The cross context VM structure.
2504 * @param hTimer Timer handle as returned by one of the create functions.
2505 * @param cMicroSecs The microsecond value ticks to convert.
2506 * @remark There could be rounding and overflow errors here.
2507 */
2508VMMDECL(uint64_t) TMTimerFromMicro(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cMicroSecs)
2509{
2510 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2511 switch (pQueue->enmClock)
2512 {
2513 case TMCLOCK_VIRTUAL:
2514 case TMCLOCK_VIRTUAL_SYNC:
2515 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2516 return cMicroSecs * 1000;
2517
2518 case TMCLOCK_REAL:
2519 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2520 return cMicroSecs / 1000;
2521
2522 default:
2523 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2524 return 0;
2525 }
2526}
2527
2528
2529/**
2530 * Converts the specified millisecond timestamp to timer clock ticks.
2531 *
2532 * @returns timer clock ticks.
2533 * @param pVM The cross context VM structure.
2534 * @param hTimer Timer handle as returned by one of the create functions.
2535 * @param cMilliSecs The millisecond value ticks to convert.
2536 * @remark There could be rounding and overflow errors here.
2537 */
2538VMMDECL(uint64_t) TMTimerFromMilli(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cMilliSecs)
2539{
2540 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2541 switch (pQueue->enmClock)
2542 {
2543 case TMCLOCK_VIRTUAL:
2544 case TMCLOCK_VIRTUAL_SYNC:
2545 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2546 return cMilliSecs * 1000000;
2547
2548 case TMCLOCK_REAL:
2549 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2550 return cMilliSecs;
2551
2552 default:
2553 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2554 return 0;
2555 }
2556}
2557
2558
2559/**
2560 * Convert state to string.
2561 *
2562 * @returns Readonly status name.
2563 * @param enmState State.
2564 */
2565const char *tmTimerState(TMTIMERSTATE enmState)
2566{
2567 switch (enmState)
2568 {
2569#define CASE(num, state) \
2570 case TMTIMERSTATE_##state: \
2571 AssertCompile(TMTIMERSTATE_##state == (num)); \
2572 return #num "-" #state
2573 CASE( 0,INVALID);
2574 CASE( 1,STOPPED);
2575 CASE( 2,ACTIVE);
2576 CASE( 3,EXPIRED_GET_UNLINK);
2577 CASE( 4,EXPIRED_DELIVER);
2578 CASE( 5,PENDING_STOP);
2579 CASE( 6,PENDING_STOP_SCHEDULE);
2580 CASE( 7,PENDING_SCHEDULE_SET_EXPIRE);
2581 CASE( 8,PENDING_SCHEDULE);
2582 CASE( 9,PENDING_RESCHEDULE_SET_EXPIRE);
2583 CASE(10,PENDING_RESCHEDULE);
2584 CASE(11,DESTROY);
2585 CASE(12,FREE);
2586 default:
2587 AssertMsgFailed(("Invalid state enmState=%d\n", enmState));
2588 return "Invalid state!";
2589#undef CASE
2590 }
2591}
2592
2593
2594/**
2595 * The slow path of tmGetFrequencyHint() where we try to recalculate the value.
2596 *
2597 * @returns The highest frequency. 0 if no timers care.
2598 * @param pVM The cross context VM structure.
2599 * @param uOldMaxHzHint The old global hint.
2600 */
2601DECL_NO_INLINE(static, uint32_t) tmGetFrequencyHintSlow(PVMCC pVM, uint32_t uOldMaxHzHint)
2602{
2603 /* Set two bits, though not entirely sure it's needed (too exhaused to think clearly)
2604 but it should force other callers thru the slow path while we're recalculating and
2605 help us detect changes while we're recalculating. */
2606 AssertCompile(RT_ELEMENTS(pVM->tm.s.aTimerQueues) <= 16);
2607
2608 /*
2609 * The "right" highest frequency value isn't so important that we'll block
2610 * waiting on the timer semaphores.
2611 */
2612 uint32_t uMaxHzHint = 0;
2613 for (uint32_t idxQueue = 0; idxQueue < RT_ELEMENTS(pVM->tm.s.aTimerQueues); idxQueue++)
2614 {
2615 PTMTIMERQUEUE pQueue = &pVM->tm.s.aTimerQueues[idxQueue];
2616
2617 /* Get the max Hz hint for the queue. */
2618 uint32_t uMaxHzHintQueue;
2619 if ( !(ASMAtomicUoReadU64(&pVM->tm.s.HzHint.u64Combined) & (RT_BIT_32(idxQueue) | RT_BIT_32(idxQueue + 16)))
2620 || RT_FAILURE_NP(PDMCritSectTryEnter(&pQueue->TimerLock)))
2621 uMaxHzHintQueue = ASMAtomicReadU32(&pQueue->uMaxHzHint);
2622 else
2623 {
2624 /* Is it still necessary to do updating? */
2625 if (ASMAtomicUoReadU64(&pVM->tm.s.HzHint.u64Combined) & (RT_BIT_32(idxQueue) | RT_BIT_32(idxQueue + 16)))
2626 {
2627 ASMAtomicAndU64(&pVM->tm.s.HzHint.u64Combined, ~RT_BIT_64(idxQueue + 16)); /* clear one flag up front */
2628
2629 PTMTIMERQUEUECC pQueueCC = TM_GET_TIMER_QUEUE_CC(pVM, idxQueue, pQueue);
2630 uMaxHzHintQueue = 0;
2631 for (PTMTIMER pCur = tmTimerQueueGetHead(pQueueCC, pQueue);
2632 pCur;
2633 pCur = tmTimerGetNext(pQueueCC, pCur))
2634 {
2635 uint32_t uHzHint = ASMAtomicUoReadU32(&pCur->uHzHint);
2636 if (uHzHint > uMaxHzHintQueue)
2637 {
2638 TMTIMERSTATE enmState = pCur->enmState;
2639 switch (enmState)
2640 {
2641 case TMTIMERSTATE_ACTIVE:
2642 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2643 case TMTIMERSTATE_EXPIRED_DELIVER:
2644 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2645 case TMTIMERSTATE_PENDING_SCHEDULE:
2646 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2647 case TMTIMERSTATE_PENDING_RESCHEDULE:
2648 uMaxHzHintQueue = uHzHint;
2649 break;
2650
2651 case TMTIMERSTATE_STOPPED:
2652 case TMTIMERSTATE_PENDING_STOP:
2653 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2654 case TMTIMERSTATE_DESTROY:
2655 case TMTIMERSTATE_FREE:
2656 case TMTIMERSTATE_INVALID:
2657 break;
2658 /* no default, want gcc warnings when adding more states. */
2659 }
2660 }
2661 }
2662
2663 /* Write the new Hz hint for the quest and clear the other update flag. */
2664 ASMAtomicUoWriteU32(&pQueue->uMaxHzHint, uMaxHzHintQueue);
2665 ASMAtomicAndU64(&pVM->tm.s.HzHint.u64Combined, ~RT_BIT_64(idxQueue));
2666 }
2667 else
2668 uMaxHzHintQueue = ASMAtomicUoReadU32(&pQueue->uMaxHzHint);
2669
2670 PDMCritSectLeave(&pQueue->TimerLock);
2671 }
2672
2673 /* Update the global max Hz hint. */
2674 if (uMaxHzHint < uMaxHzHintQueue)
2675 uMaxHzHint = uMaxHzHintQueue;
2676 }
2677
2678 /*
2679 * Update the frequency hint if no pending frequency changes and we didn't race anyone thru here.
2680 */
2681 uint64_t u64Actual = RT_MAKE_U64(0 /*no pending updates*/, uOldMaxHzHint);
2682 if (ASMAtomicCmpXchgExU64(&pVM->tm.s.HzHint.u64Combined, RT_MAKE_U64(0, uMaxHzHint), u64Actual, &u64Actual))
2683 Log(("tmGetFrequencyHintSlow: New value %u Hz\n", uMaxHzHint));
2684 else
2685 for (uint32_t iTry = 1;; iTry++)
2686 {
2687 if (RT_LO_U32(u64Actual) != 0)
2688 Log(("tmGetFrequencyHintSlow: Outdated value %u Hz (%#x, try %u)\n", uMaxHzHint, RT_LO_U32(u64Actual), iTry));
2689 else if (iTry >= 4)
2690 Log(("tmGetFrequencyHintSlow: Unable to set %u Hz (try %u)\n", uMaxHzHint, iTry));
2691 else if (ASMAtomicCmpXchgExU64(&pVM->tm.s.HzHint.u64Combined, RT_MAKE_U64(0, uMaxHzHint), u64Actual, &u64Actual))
2692 Log(("tmGetFrequencyHintSlow: New value %u Hz (try %u)\n", uMaxHzHint, iTry));
2693 else
2694 continue;
2695 break;
2696 }
2697 return uMaxHzHint;
2698}
2699
2700
2701/**
2702 * Gets the highest frequency hint for all the important timers.
2703 *
2704 * @returns The highest frequency. 0 if no timers care.
2705 * @param pVM The cross context VM structure.
2706 */
2707DECLINLINE(uint32_t) tmGetFrequencyHint(PVMCC pVM)
2708{
2709 /*
2710 * Query the value, recalculate it if necessary.
2711 */
2712 uint64_t u64Combined = ASMAtomicReadU64(&pVM->tm.s.HzHint.u64Combined);
2713 if (RT_HI_U32(u64Combined) == 0)
2714 return RT_LO_U32(u64Combined); /* hopefully somewhat likely */
2715 return tmGetFrequencyHintSlow(pVM, RT_LO_U32(u64Combined));
2716}
2717
2718
2719/**
2720 * Calculates a host timer frequency that would be suitable for the current
2721 * timer load.
2722 *
2723 * This will take the highest timer frequency, adjust for catch-up and warp
2724 * driver, and finally add a little fudge factor. The caller (VMM) will use
2725 * the result to adjust the per-cpu preemption timer.
2726 *
2727 * @returns The highest frequency. 0 if no important timers around.
2728 * @param pVM The cross context VM structure.
2729 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2730 */
2731VMM_INT_DECL(uint32_t) TMCalcHostTimerFrequency(PVMCC pVM, PVMCPUCC pVCpu)
2732{
2733 uint32_t uHz = tmGetFrequencyHint(pVM);
2734
2735 /* Catch up, we have to be more aggressive than the % indicates at the
2736 beginning of the effort. */
2737 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
2738 {
2739 uint32_t u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
2740 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
2741 {
2742 if (u32Pct <= 100)
2743 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp100 / 100;
2744 else if (u32Pct <= 200)
2745 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp200 / 100;
2746 else if (u32Pct <= 400)
2747 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp400 / 100;
2748 uHz *= u32Pct + 100;
2749 uHz /= 100;
2750 }
2751 }
2752
2753 /* Warp drive. */
2754 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualWarpDrive))
2755 {
2756 uint32_t u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualWarpDrivePercentage);
2757 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualWarpDrive))
2758 {
2759 uHz *= u32Pct;
2760 uHz /= 100;
2761 }
2762 }
2763
2764 /* Fudge factor. */
2765 if (pVCpu->idCpu == pVM->tm.s.idTimerCpu)
2766 uHz *= pVM->tm.s.cPctHostHzFudgeFactorTimerCpu;
2767 else
2768 uHz *= pVM->tm.s.cPctHostHzFudgeFactorOtherCpu;
2769 uHz /= 100;
2770
2771 /* Make sure it isn't too high. */
2772 if (uHz > pVM->tm.s.cHostHzMax)
2773 uHz = pVM->tm.s.cHostHzMax;
2774
2775 return uHz;
2776}
2777
2778
2779/**
2780 * Whether the guest virtual clock is ticking.
2781 *
2782 * @returns true if ticking, false otherwise.
2783 * @param pVM The cross context VM structure.
2784 */
2785VMM_INT_DECL(bool) TMVirtualIsTicking(PVM pVM)
2786{
2787 return RT_BOOL(pVM->tm.s.cVirtualTicking);
2788}
2789
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette