VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/TMAll.cpp@ 87812

Last change on this file since 87812 was 87812, checked in by vboxsync, 4 years ago

VMM/TM: Replaced the global timer active list lock with per queue locks. bugref:9943

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 104.5 KB
Line 
1/* $Id: TMAll.cpp 87812 2021-02-19 20:54:49Z vboxsync $ */
2/** @file
3 * TM - Timeout Manager, all contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_TM
23#ifdef DEBUG_bird
24# define DBGFTRACE_DISABLED /* annoying */
25#endif
26#include <VBox/vmm/tm.h>
27#include <VBox/vmm/mm.h>
28#include <VBox/vmm/dbgftrace.h>
29#ifdef IN_RING3
30#endif
31#include <VBox/vmm/pdmdev.h> /* (for TMTIMER_GET_CRITSECT implementation) */
32#include "TMInternal.h"
33#include <VBox/vmm/vmcc.h>
34
35#include <VBox/param.h>
36#include <VBox/err.h>
37#include <VBox/log.h>
38#include <VBox/sup.h>
39#include <iprt/time.h>
40#include <iprt/assert.h>
41#include <iprt/asm.h>
42#include <iprt/asm-math.h>
43#ifdef IN_RING3
44# include <iprt/thread.h>
45#endif
46
47#include "TMInline.h"
48
49
50/*********************************************************************************************************************************
51* Defined Constants And Macros *
52*********************************************************************************************************************************/
53#ifdef VBOX_STRICT
54/** @def TMTIMER_GET_CRITSECT
55 * Helper for safely resolving the critical section for a timer belonging to a
56 * device instance.
57 * @todo needs reworking later as it uses PDMDEVINSR0::pDevInsR0RemoveMe. */
58# ifdef IN_RING3
59# define TMTIMER_GET_CRITSECT(a_pVM, a_pTimer) ((a_pTimer)->pCritSect)
60# else
61# define TMTIMER_GET_CRITSECT(a_pVM, a_pTimer) tmRZTimerGetCritSect(a_pVM, a_pTimer)
62# endif
63#endif
64
65/** @def TMTIMER_ASSERT_CRITSECT
66 * Checks that the caller owns the critical section if one is associated with
67 * the timer. */
68#ifdef VBOX_STRICT
69# define TMTIMER_ASSERT_CRITSECT(a_pVM, a_pTimer) \
70 do { \
71 if ((a_pTimer)->pCritSect) \
72 { \
73 VMSTATE enmState; \
74 PPDMCRITSECT pCritSect = TMTIMER_GET_CRITSECT(a_pVM, a_pTimer); \
75 AssertMsg( pCritSect \
76 && ( PDMCritSectIsOwner(pCritSect) \
77 || (enmState = (a_pVM)->enmVMState) == VMSTATE_CREATING \
78 || enmState == VMSTATE_RESETTING \
79 || enmState == VMSTATE_RESETTING_LS ),\
80 ("pTimer=%p (%s) pCritSect=%p (%s)\n", a_pTimer, (a_pTimer)->szName, \
81 (a_pTimer)->pCritSect, R3STRING(PDMR3CritSectName((a_pTimer)->pCritSect)) )); \
82 } \
83 } while (0)
84#else
85# define TMTIMER_ASSERT_CRITSECT(pVM, pTimer) do { } while (0)
86#endif
87
88/** @def TMTIMER_ASSERT_SYNC_CRITSECT_ORDER
89 * Checks for lock order trouble between the timer critsect and the critical
90 * section critsect. The virtual sync critsect must always be entered before
91 * the one associated with the timer (see TMR3TimerQueuesDo). It is OK if there
92 * isn't any critical section associated with the timer or if the calling thread
93 * doesn't own it, ASSUMING of course that the thread using this macro is going
94 * to enter the virtual sync critical section anyway.
95 *
96 * @remarks This is a sligtly relaxed timer locking attitude compared to
97 * TMTIMER_ASSERT_CRITSECT, however, the calling device/whatever code
98 * should know what it's doing if it's stopping or starting a timer
99 * without taking the device lock.
100 */
101#ifdef VBOX_STRICT
102# define TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer) \
103 do { \
104 if ((pTimer)->pCritSect) \
105 { \
106 VMSTATE enmState; \
107 PPDMCRITSECT pCritSect = TMTIMER_GET_CRITSECT(pVM, pTimer); \
108 AssertMsg( pCritSect \
109 && ( !PDMCritSectIsOwner(pCritSect) \
110 || PDMCritSectIsOwner(&pVM->tm.s.VirtualSyncLock) \
111 || (enmState = (pVM)->enmVMState) == VMSTATE_CREATING \
112 || enmState == VMSTATE_RESETTING \
113 || enmState == VMSTATE_RESETTING_LS ),\
114 ("pTimer=%p (%s) pCritSect=%p (%s)\n", pTimer, pTimer->szName, \
115 (pTimer)->pCritSect, R3STRING(PDMR3CritSectName((pTimer)->pCritSect)) )); \
116 } \
117 } while (0)
118#else
119# define TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer) do { } while (0)
120#endif
121
122
123#if defined(VBOX_STRICT) && defined(IN_RING0)
124/**
125 * Helper for TMTIMER_GET_CRITSECT
126 * @todo This needs a redo!
127 */
128DECLINLINE(PPDMCRITSECT) tmRZTimerGetCritSect(PVMCC pVM, PTMTIMER pTimer)
129{
130 if (pTimer->enmType == TMTIMERTYPE_DEV)
131 {
132 RTCCUINTREG fSavedFlags = ASMAddFlags(X86_EFL_AC); /** @todo fix ring-3 pointer use */
133 PPDMDEVINSR0 pDevInsR0 = ((struct PDMDEVINSR3 *)pTimer->u.Dev.pDevIns)->pDevInsR0RemoveMe; /* !ring-3 read! */
134 ASMSetFlags(fSavedFlags);
135 struct PDMDEVINSR3 *pDevInsR3 = pDevInsR0->pDevInsForR3R0;
136 if (pTimer->pCritSect == pDevInsR3->pCritSectRoR3)
137 return pDevInsR0->pCritSectRoR0;
138 uintptr_t offCritSect = (uintptr_t)pTimer->pCritSect - (uintptr_t)pDevInsR3->pvInstanceDataR3;
139 if (offCritSect < pDevInsR0->pReg->cbInstanceShared)
140 return (PPDMCRITSECT)((uintptr_t)pDevInsR0->pvInstanceDataR0 + offCritSect);
141 }
142 return (PPDMCRITSECT)MMHyperR3ToCC(pVM, pTimer->pCritSect);
143}
144#endif /* VBOX_STRICT && IN_RING0 */
145
146
147/**
148 * Notification that execution is about to start.
149 *
150 * This call must always be paired with a TMNotifyEndOfExecution call.
151 *
152 * The function may, depending on the configuration, resume the TSC and future
153 * clocks that only ticks when we're executing guest code.
154 *
155 * @param pVM The cross context VM structure.
156 * @param pVCpu The cross context virtual CPU structure.
157 */
158VMMDECL(void) TMNotifyStartOfExecution(PVMCC pVM, PVMCPUCC pVCpu)
159{
160#ifndef VBOX_WITHOUT_NS_ACCOUNTING
161 pVCpu->tm.s.uTscStartExecuting = SUPReadTsc();
162 pVCpu->tm.s.fExecuting = true;
163#endif
164 if (pVM->tm.s.fTSCTiedToExecution)
165 tmCpuTickResume(pVM, pVCpu);
166}
167
168
169/**
170 * Notification that execution has ended.
171 *
172 * This call must always be paired with a TMNotifyStartOfExecution call.
173 *
174 * The function may, depending on the configuration, suspend the TSC and future
175 * clocks that only ticks when we're executing guest code.
176 *
177 * @param pVM The cross context VM structure.
178 * @param pVCpu The cross context virtual CPU structure.
179 * @param uTsc TSC value when exiting guest context.
180 */
181VMMDECL(void) TMNotifyEndOfExecution(PVMCC pVM, PVMCPUCC pVCpu, uint64_t uTsc)
182{
183 if (pVM->tm.s.fTSCTiedToExecution)
184 tmCpuTickPause(pVCpu); /** @todo use uTsc here if we can. */
185
186#ifndef VBOX_WITHOUT_NS_ACCOUNTING
187 /*
188 * Calculate the elapsed tick count and convert it to nanoseconds.
189 */
190# ifdef IN_RING3
191 uint64_t cTicks = uTsc - pVCpu->tm.s.uTscStartExecuting - SUPGetTscDelta();
192 uint64_t const uCpuHz = SUPGetCpuHzFromGip(g_pSUPGlobalInfoPage);
193# else
194 uint64_t cTicks = uTsc - pVCpu->tm.s.uTscStartExecuting - SUPGetTscDeltaByCpuSetIndex(pVCpu->iHostCpuSet);
195 uint64_t const uCpuHz = SUPGetCpuHzFromGipBySetIndex(g_pSUPGlobalInfoPage, pVCpu->iHostCpuSet);
196# endif
197 AssertStmt(cTicks <= uCpuHz << 2, cTicks = uCpuHz << 2); /* max 4 sec */
198
199 uint64_t cNsExecutingDelta;
200 if (uCpuHz < _4G)
201 cNsExecutingDelta = ASMMultU64ByU32DivByU32(cTicks, RT_NS_1SEC, uCpuHz);
202 else if (uCpuHz < 16*_1G64)
203 cNsExecutingDelta = ASMMultU64ByU32DivByU32(cTicks >> 2, RT_NS_1SEC, uCpuHz >> 2);
204 else
205 {
206 Assert(uCpuHz < 64 * _1G64);
207 cNsExecutingDelta = ASMMultU64ByU32DivByU32(cTicks >> 4, RT_NS_1SEC, uCpuHz >> 4);
208 }
209
210 /*
211 * Update the data.
212 *
213 * Note! We're not using strict memory ordering here to speed things us.
214 * The data is in a single cache line and this thread is the only
215 * one writing to that line, so I cannot quite imagine why we would
216 * need any strict ordering here.
217 */
218 uint64_t const cNsExecutingNew = pVCpu->tm.s.cNsExecuting + cNsExecutingDelta;
219 uint32_t uGen = ASMAtomicUoIncU32(&pVCpu->tm.s.uTimesGen); Assert(uGen & 1);
220 ASMCompilerBarrier();
221 pVCpu->tm.s.fExecuting = false;
222 pVCpu->tm.s.cNsExecuting = cNsExecutingNew;
223 pVCpu->tm.s.cPeriodsExecuting++;
224 ASMCompilerBarrier();
225 ASMAtomicUoWriteU32(&pVCpu->tm.s.uTimesGen, (uGen | 1) + 1);
226
227 /*
228 * Update stats.
229 */
230# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
231 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecuting, cNsExecutingDelta);
232 if (cNsExecutingDelta < 5000)
233 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecTiny, cNsExecutingDelta);
234 else if (cNsExecutingDelta < 50000)
235 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecShort, cNsExecutingDelta);
236 else
237 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecLong, cNsExecutingDelta);
238# endif
239
240 /* The timer triggers occational updating of the others and total stats: */
241 if (RT_LIKELY(!pVCpu->tm.s.fUpdateStats))
242 { /*likely*/ }
243 else
244 {
245 pVCpu->tm.s.fUpdateStats = false;
246
247 uint64_t const cNsTotalNew = RTTimeNanoTS() - pVCpu->tm.s.nsStartTotal;
248 uint64_t const cNsOtherNew = cNsTotalNew - cNsExecutingNew - pVCpu->tm.s.cNsHalted;
249
250# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
251 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsTotal, cNsTotalNew - pVCpu->tm.s.cNsTotalStat);
252 int64_t const cNsOtherNewDelta = cNsOtherNew - pVCpu->tm.s.cNsOtherStat;
253 if (cNsOtherNewDelta > 0)
254 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsOther, (uint64_t)cNsOtherNewDelta);
255# endif
256
257 pVCpu->tm.s.cNsTotalStat = cNsTotalNew;
258 pVCpu->tm.s.cNsOtherStat = cNsOtherNew;
259 }
260
261#endif
262}
263
264
265/**
266 * Notification that the cpu is entering the halt state
267 *
268 * This call must always be paired with a TMNotifyEndOfExecution call.
269 *
270 * The function may, depending on the configuration, resume the TSC and future
271 * clocks that only ticks when we're halted.
272 *
273 * @param pVCpu The cross context virtual CPU structure.
274 */
275VMM_INT_DECL(void) TMNotifyStartOfHalt(PVMCPUCC pVCpu)
276{
277 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
278
279#ifndef VBOX_WITHOUT_NS_ACCOUNTING
280 pVCpu->tm.s.nsStartHalting = RTTimeNanoTS();
281 pVCpu->tm.s.fHalting = true;
282#endif
283
284 if ( pVM->tm.s.fTSCTiedToExecution
285 && !pVM->tm.s.fTSCNotTiedToHalt)
286 tmCpuTickResume(pVM, pVCpu);
287}
288
289
290/**
291 * Notification that the cpu is leaving the halt state
292 *
293 * This call must always be paired with a TMNotifyStartOfHalt call.
294 *
295 * The function may, depending on the configuration, suspend the TSC and future
296 * clocks that only ticks when we're halted.
297 *
298 * @param pVCpu The cross context virtual CPU structure.
299 */
300VMM_INT_DECL(void) TMNotifyEndOfHalt(PVMCPUCC pVCpu)
301{
302 PVM pVM = pVCpu->CTX_SUFF(pVM);
303
304 if ( pVM->tm.s.fTSCTiedToExecution
305 && !pVM->tm.s.fTSCNotTiedToHalt)
306 tmCpuTickPause(pVCpu);
307
308#ifndef VBOX_WITHOUT_NS_ACCOUNTING
309 uint64_t const u64NsTs = RTTimeNanoTS();
310 uint64_t const cNsTotalNew = u64NsTs - pVCpu->tm.s.nsStartTotal;
311 uint64_t const cNsHaltedDelta = u64NsTs - pVCpu->tm.s.nsStartHalting;
312 uint64_t const cNsHaltedNew = pVCpu->tm.s.cNsHalted + cNsHaltedDelta;
313 uint64_t const cNsOtherNew = cNsTotalNew - pVCpu->tm.s.cNsExecuting - cNsHaltedNew;
314
315 uint32_t uGen = ASMAtomicUoIncU32(&pVCpu->tm.s.uTimesGen); Assert(uGen & 1);
316 ASMCompilerBarrier();
317 pVCpu->tm.s.fHalting = false;
318 pVCpu->tm.s.fUpdateStats = false;
319 pVCpu->tm.s.cNsHalted = cNsHaltedNew;
320 pVCpu->tm.s.cPeriodsHalted++;
321 ASMCompilerBarrier();
322 ASMAtomicUoWriteU32(&pVCpu->tm.s.uTimesGen, (uGen | 1) + 1);
323
324# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
325 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsHalted, cNsHaltedDelta);
326 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsTotal, cNsTotalNew - pVCpu->tm.s.cNsTotalStat);
327 int64_t const cNsOtherNewDelta = cNsOtherNew - pVCpu->tm.s.cNsOtherStat;
328 if (cNsOtherNewDelta > 0)
329 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsOther, (uint64_t)cNsOtherNewDelta);
330# endif
331 pVCpu->tm.s.cNsTotalStat = cNsTotalNew;
332 pVCpu->tm.s.cNsOtherStat = cNsOtherNew;
333#endif
334}
335
336
337/**
338 * Raise the timer force action flag and notify the dedicated timer EMT.
339 *
340 * @param pVM The cross context VM structure.
341 */
342DECLINLINE(void) tmScheduleNotify(PVMCC pVM)
343{
344 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
345 if (!VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
346 {
347 Log5(("TMAll(%u): FF: 0 -> 1\n", __LINE__));
348 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
349#ifdef IN_RING3
350 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
351#endif
352 STAM_COUNTER_INC(&pVM->tm.s.StatScheduleSetFF);
353 }
354}
355
356
357/**
358 * Schedule the queue which was changed.
359 */
360DECLINLINE(void) tmSchedule(PVMCC pVM, PTMTIMER pTimer)
361{
362 TMCLOCK const enmClock = pTimer->enmClock;
363 AssertReturnVoid((unsigned)enmClock < TMCLOCK_MAX);
364 PTMTIMERQUEUE const pQueue = &pVM->tm.s.aTimerQueues[enmClock];
365 PTMTIMERQUEUECC const pQueueCC = TM_GET_TIMER_QUEUE_CC(pVM, enmClock, pQueue);
366
367 if (VM_IS_EMT(pVM)) /** @todo drop EMT requirement here. */
368 {
369 int rc = PDMCritSectTryEnter(&pQueue->TimerLock);
370 if (RT_SUCCESS_NP(rc))
371 {
372 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
373 Log3(("tmSchedule: tmTimerQueueSchedule\n"));
374 tmTimerQueueSchedule(pVM, pQueueCC, pQueue);
375#ifdef VBOX_STRICT
376 tmTimerQueuesSanityChecks(pVM, "tmSchedule");
377#endif
378 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
379 PDMCritSectLeave(&pQueue->TimerLock);
380 return;
381 }
382 }
383
384 TMTIMERSTATE enmState = pTimer->enmState;
385 if (TMTIMERSTATE_IS_PENDING_SCHEDULING(enmState))
386 tmScheduleNotify(pVM);
387}
388
389
390/**
391 * Try change the state to enmStateNew from enmStateOld
392 * and link the timer into the scheduling queue.
393 *
394 * @returns Success indicator.
395 * @param pTimer Timer in question.
396 * @param enmStateNew The new timer state.
397 * @param enmStateOld The old timer state.
398 */
399DECLINLINE(bool) tmTimerTry(PTMTIMER pTimer, TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
400{
401 /*
402 * Attempt state change.
403 */
404 bool fRc;
405 TM_TRY_SET_STATE(pTimer, enmStateNew, enmStateOld, fRc);
406 return fRc;
407}
408
409
410/**
411 * Links the timer onto the scheduling queue.
412 *
413 * @param pQueueCC The current context queue (same as @a pQueue for
414 * ring-3).
415 * @param pQueue The shared queue data.
416 * @param pTimer The timer.
417 *
418 * @todo FIXME: Look into potential race with the thread running the queues
419 * and stuff.
420 */
421DECLINLINE(void) tmTimerLinkSchedule(PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
422{
423 Assert(pTimer->idxScheduleNext == UINT32_MAX);
424 const uint32_t idxHeadNew = pTimer - &pQueueCC->paTimers[0];
425 AssertReturnVoid(idxHeadNew < pQueueCC->cTimersAlloc);
426
427 uint32_t idxHead;
428 do
429 {
430 idxHead = pQueue->idxSchedule;
431 Assert(idxHead == UINT32_MAX || idxHead < pQueueCC->cTimersAlloc);
432 pTimer->idxScheduleNext = idxHead;
433 } while (!ASMAtomicCmpXchgU32(&pQueue->idxSchedule, idxHeadNew, idxHead));
434}
435
436
437/**
438 * Try change the state to enmStateNew from enmStateOld
439 * and link the timer into the scheduling queue.
440 *
441 * @returns Success indicator.
442 * @param pVM The cross context VM structure.
443 * @param pTimer Timer in question.
444 * @param enmStateNew The new timer state.
445 * @param enmStateOld The old timer state.
446 */
447DECLINLINE(bool) tmTimerTryWithLink(PVMCC pVM, PTMTIMER pTimer, TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
448{
449 TMCLOCK const enmClock = pTimer->enmClock;
450 AssertReturn((unsigned)enmClock < TMCLOCK_MAX, false);
451 if (tmTimerTry(pTimer, enmStateNew, enmStateOld))
452 {
453 PTMTIMERQUEUE const pQueue = &pVM->tm.s.aTimerQueues[enmClock];
454 tmTimerLinkSchedule(TM_GET_TIMER_QUEUE_CC(pVM, enmClock, pQueue), pQueue, pTimer);
455 return true;
456 }
457 return false;
458}
459
460
461/**
462 * Links a timer into the active list of a timer queue.
463 *
464 * @param pVM The cross context VM structure.
465 * @param pQueueCC The current context queue (same as @a pQueue for
466 * ring-3).
467 * @param pQueue The shared queue data.
468 * @param pTimer The timer.
469 * @param u64Expire The timer expiration time.
470 *
471 * @remarks Called while owning the relevant queue lock.
472 */
473DECL_FORCE_INLINE(void) tmTimerQueueLinkActive(PVMCC pVM, PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue,
474 PTMTIMER pTimer, uint64_t u64Expire)
475{
476 Assert(pTimer->idxNext == UINT32_MAX);
477 Assert(pTimer->idxPrev == UINT32_MAX);
478 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE || pTimer->enmClock != TMCLOCK_VIRTUAL_SYNC); /* (active is not a stable state) */
479 RT_NOREF(pVM);
480
481 PTMTIMER pCur = tmTimerQueueGetHead(pQueueCC, pQueue);
482 if (pCur)
483 {
484 for (;; pCur = tmTimerGetNext(pQueueCC, pCur))
485 {
486 if (pCur->u64Expire > u64Expire)
487 {
488 const PTMTIMER pPrev = tmTimerGetPrev(pQueueCC, pCur);
489 tmTimerSetNext(pQueueCC, pTimer, pCur);
490 tmTimerSetPrev(pQueueCC, pTimer, pPrev);
491 if (pPrev)
492 tmTimerSetNext(pQueueCC, pPrev, pTimer);
493 else
494 {
495 tmTimerQueueSetHead(pQueueCC, pQueue, pTimer);
496 ASMAtomicWriteU64(&pQueue->u64Expire, u64Expire);
497 DBGFTRACE_U64_TAG2(pVM, u64Expire, "tmTimerQueueLinkActive head", pTimer->szName);
498 }
499 tmTimerSetPrev(pQueueCC, pCur, pTimer);
500 return;
501 }
502 if (pCur->idxNext == UINT32_MAX)
503 {
504 tmTimerSetNext(pQueueCC, pCur, pTimer);
505 tmTimerSetPrev(pQueueCC, pTimer, pCur);
506 DBGFTRACE_U64_TAG2(pVM, u64Expire, "tmTimerQueueLinkActive tail", pTimer->szName);
507 return;
508 }
509 }
510 }
511 else
512 {
513 tmTimerQueueSetHead(pQueueCC, pQueue, pTimer);
514 ASMAtomicWriteU64(&pQueue->u64Expire, u64Expire);
515 DBGFTRACE_U64_TAG2(pVM, u64Expire, "tmTimerQueueLinkActive empty", pTimer->szName);
516 }
517}
518
519
520
521/**
522 * Schedules the given timer on the given queue.
523 *
524 * @param pVM The cross context VM structure.
525 * @param pQueueCC The current context queue (same as @a pQueue for
526 * ring-3).
527 * @param pQueue The shared queue data.
528 * @param pTimer The timer that needs scheduling.
529 *
530 * @remarks Called while owning the lock.
531 */
532DECLINLINE(void) tmTimerQueueScheduleOne(PVMCC pVM, PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
533{
534 Assert(pQueue->enmClock != TMCLOCK_VIRTUAL_SYNC);
535 RT_NOREF(pVM);
536
537 /*
538 * Processing.
539 */
540 unsigned cRetries = 2;
541 do
542 {
543 TMTIMERSTATE enmState = pTimer->enmState;
544 switch (enmState)
545 {
546 /*
547 * Reschedule timer (in the active list).
548 */
549 case TMTIMERSTATE_PENDING_RESCHEDULE:
550 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE, TMTIMERSTATE_PENDING_RESCHEDULE)))
551 break; /* retry */
552 tmTimerQueueUnlinkActive(pVM, pQueueCC, pQueue, pTimer);
553 RT_FALL_THRU();
554
555 /*
556 * Schedule timer (insert into the active list).
557 */
558 case TMTIMERSTATE_PENDING_SCHEDULE:
559 Assert(pTimer->idxNext == UINT32_MAX); Assert(pTimer->idxPrev == UINT32_MAX);
560 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, TMTIMERSTATE_PENDING_SCHEDULE)))
561 break; /* retry */
562 tmTimerQueueLinkActive(pVM, pQueueCC, pQueue, pTimer, pTimer->u64Expire);
563 return;
564
565 /*
566 * Stop the timer in active list.
567 */
568 case TMTIMERSTATE_PENDING_STOP:
569 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, TMTIMERSTATE_PENDING_STOP)))
570 break; /* retry */
571 tmTimerQueueUnlinkActive(pVM, pQueueCC, pQueue, pTimer);
572 RT_FALL_THRU();
573
574 /*
575 * Stop the timer (not on the active list).
576 */
577 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
578 Assert(pTimer->idxNext == UINT32_MAX); Assert(pTimer->idxPrev == UINT32_MAX);
579 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_STOPPED, TMTIMERSTATE_PENDING_STOP_SCHEDULE)))
580 break;
581 return;
582
583 /*
584 * The timer is pending destruction by TMR3TimerDestroy, our caller.
585 * Nothing to do here.
586 */
587 case TMTIMERSTATE_DESTROY:
588 break;
589
590 /*
591 * Postpone these until they get into the right state.
592 */
593 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
594 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
595 tmTimerLinkSchedule(pQueueCC, pQueue, pTimer);
596 STAM_COUNTER_INC(&pVM->tm.s.CTX_SUFF_Z(StatPostponed));
597 return;
598
599 /*
600 * None of these can be in the schedule.
601 */
602 case TMTIMERSTATE_FREE:
603 case TMTIMERSTATE_STOPPED:
604 case TMTIMERSTATE_ACTIVE:
605 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
606 case TMTIMERSTATE_EXPIRED_DELIVER:
607 default:
608 AssertMsgFailed(("Timer (%p) in the scheduling list has an invalid state %s (%d)!",
609 pTimer, tmTimerState(pTimer->enmState), pTimer->enmState));
610 return;
611 }
612 } while (cRetries-- > 0);
613}
614
615
616/**
617 * Schedules the specified timer queue.
618 *
619 * @param pVM The cross context VM structure.
620 * @param pQueueCC The current context queue (same as @a pQueue for
621 * ring-3) data of the queue to schedule.
622 * @param pQueue The shared queue data of the queue to schedule.
623 *
624 * @remarks Called while owning the lock.
625 */
626void tmTimerQueueSchedule(PVMCC pVM, PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue)
627{
628 Assert(PDMCritSectIsOwner(&pQueue->TimerLock));
629
630 /*
631 * Dequeue the scheduling list and iterate it.
632 */
633 uint32_t idxNext = ASMAtomicXchgU32(&pQueue->idxSchedule, UINT32_MAX);
634 Log2(("tmTimerQueueSchedule: pQueue=%p:{.enmClock=%d, idxNext=%RI32, .u64Expired=%'RU64}\n", pQueue, pQueue->enmClock, idxNext, pQueue->u64Expire));
635 while (idxNext != UINT32_MAX)
636 {
637 AssertBreak(idxNext < pQueueCC->cTimersAlloc);
638
639 /*
640 * Unlink the head timer and take down the index of the next one.
641 */
642 PTMTIMER pTimer = &pQueueCC->paTimers[idxNext];
643 idxNext = pTimer->idxScheduleNext;
644 pTimer->idxScheduleNext = UINT32_MAX;
645
646 /*
647 * Do the scheduling.
648 */
649 Log2(("tmTimerQueueSchedule: %p:{.enmState=%s, .enmClock=%d, .enmType=%d, .szName=%s}\n",
650 pTimer, tmTimerState(pTimer->enmState), pTimer->enmClock, pTimer->enmType, pTimer->szName));
651 tmTimerQueueScheduleOne(pVM, pQueueCC, pQueue, pTimer);
652 Log2(("tmTimerQueueSchedule: %p: new %s\n", pTimer, tmTimerState(pTimer->enmState)));
653 }
654 Log2(("tmTimerQueueSchedule: u64Expired=%'RU64\n", pQueue->u64Expire));
655}
656
657
658#ifdef VBOX_STRICT
659/**
660 * Checks that the timer queues are sane.
661 *
662 * @param pVM The cross context VM structure.
663 * @param pszWhere Caller location clue.
664 */
665void tmTimerQueuesSanityChecks(PVMCC pVM, const char *pszWhere)
666{
667 for (uint32_t idxQueue = 0; idxQueue < RT_ELEMENTS(pVM->tm.s.aTimerQueues); idxQueue++)
668 {
669 PTMTIMERQUEUE const pQueue = &pVM->tm.s.aTimerQueues[idxQueue];
670 PTMTIMERQUEUECC const pQueueCC = TM_GET_TIMER_QUEUE_CC(pVM, idxQueue, pQueue);
671 Assert(pQueue->enmClock == (TMCLOCK)idxQueue);
672
673 int rc = PDMCritSectTryEnter(&pQueue->TimerLock);
674 if (RT_SUCCESS(rc))
675 {
676 if ( pQueue->enmClock != TMCLOCK_VIRTUAL_SYNC
677 || PDMCritSectTryEnter(&pVM->tm.s.VirtualSyncLock) == VINF_SUCCESS)
678 {
679 /* Check the linking of the active lists. */
680 PTMTIMER pPrev = NULL;
681 for (PTMTIMER pCur = tmTimerQueueGetHead(pQueueCC, pQueue);
682 pCur;
683 pPrev = pCur, pCur = tmTimerGetNext(pQueueCC, pCur))
684 {
685 AssertMsg(pCur->enmClock == (TMCLOCK)idxQueue, ("%s: %d != %u\n", pszWhere, pCur->enmClock, idxQueue));
686 AssertMsg(tmTimerGetPrev(pQueueCC, pCur) == pPrev, ("%s: %p != %p\n", pszWhere, tmTimerGetPrev(pQueueCC, pCur), pPrev));
687 TMTIMERSTATE enmState = pCur->enmState;
688 switch (enmState)
689 {
690 case TMTIMERSTATE_ACTIVE:
691 AssertMsg( pCur->idxScheduleNext == UINT32_MAX
692 || pCur->enmState != TMTIMERSTATE_ACTIVE,
693 ("%s: %RI32\n", pszWhere, pCur->idxScheduleNext));
694 break;
695 case TMTIMERSTATE_PENDING_STOP:
696 case TMTIMERSTATE_PENDING_RESCHEDULE:
697 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
698 break;
699 default:
700 AssertMsgFailed(("%s: Invalid state enmState=%d %s\n", pszWhere, enmState, tmTimerState(enmState)));
701 break;
702 }
703 }
704
705# ifdef IN_RING3
706 /* Go thru all the timers and check that the active ones all are in the active lists. */
707 uint32_t idxTimer = pQueue->cTimersAlloc;
708 uint32_t cFree = 0;
709 while (idxTimer-- > 0)
710 {
711 PTMTIMER const pTimer = &pQueue->paTimers[idxTimer];
712 TMTIMERSTATE const enmState = pTimer->enmState;
713 switch (enmState)
714 {
715 case TMTIMERSTATE_FREE:
716 cFree++;
717 break;
718
719 case TMTIMERSTATE_ACTIVE:
720 case TMTIMERSTATE_PENDING_STOP:
721 case TMTIMERSTATE_PENDING_RESCHEDULE:
722 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
723 {
724 PTMTIMERR3 pCurAct = tmTimerQueueGetHead(pQueueCC, pQueue);
725 Assert(pTimer->idxPrev != UINT32_MAX || pTimer == pCurAct);
726 while (pCurAct && pCurAct != pTimer)
727 pCurAct = tmTimerGetNext(pQueueCC, pCurAct);
728 Assert(pCurAct == pTimer);
729 break;
730 }
731
732 case TMTIMERSTATE_PENDING_SCHEDULE:
733 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
734 case TMTIMERSTATE_STOPPED:
735 case TMTIMERSTATE_EXPIRED_DELIVER:
736 {
737 Assert(pTimer->idxNext == UINT32_MAX);
738 Assert(pTimer->idxPrev == UINT32_MAX);
739 for (PTMTIMERR3 pCurAct = tmTimerQueueGetHead(pQueueCC, pQueue);
740 pCurAct;
741 pCurAct = tmTimerGetNext(pQueueCC, pCurAct))
742 {
743 Assert(pCurAct != pTimer);
744 Assert(tmTimerGetNext(pQueueCC, pCurAct) != pTimer);
745 Assert(tmTimerGetPrev(pQueueCC, pCurAct) != pTimer);
746 }
747 break;
748 }
749
750 /* ignore */
751 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
752 break;
753
754 case TMTIMERSTATE_INVALID:
755 Assert(idxTimer == 0);
756 break;
757
758 /* shouldn't get here! */
759 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
760 case TMTIMERSTATE_DESTROY:
761 default:
762 AssertMsgFailed(("Invalid state enmState=%d %s\n", enmState, tmTimerState(enmState)));
763 break;
764 }
765
766 /* Check the handle value. */
767 if (enmState > TMTIMERSTATE_INVALID && enmState < TMTIMERSTATE_DESTROY)
768 {
769 Assert((pTimer->hSelf & TMTIMERHANDLE_TIMER_IDX_MASK) == idxTimer);
770 Assert(((pTimer->hSelf >> TMTIMERHANDLE_QUEUE_IDX_SHIFT) & TMTIMERHANDLE_QUEUE_IDX_SMASK) == idxQueue);
771 }
772 }
773 Assert(cFree == pQueue->cTimersFree);
774# endif /* IN_RING3 */
775
776 if (pQueue->enmClock == TMCLOCK_VIRTUAL_SYNC)
777 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
778 }
779 PDMCritSectLeave(&pQueue->TimerLock);
780 }
781 }
782}
783#endif /* !VBOX_STRICT */
784
785#ifdef VBOX_HIGH_RES_TIMERS_HACK
786
787/**
788 * Worker for tmTimerPollInternal that handles misses when the dedicated timer
789 * EMT is polling.
790 *
791 * @returns See tmTimerPollInternal.
792 * @param pVM The cross context VM structure.
793 * @param u64Now Current virtual clock timestamp.
794 * @param u64Delta The delta to the next even in ticks of the
795 * virtual clock.
796 * @param pu64Delta Where to return the delta.
797 */
798DECLINLINE(uint64_t) tmTimerPollReturnMiss(PVM pVM, uint64_t u64Now, uint64_t u64Delta, uint64_t *pu64Delta)
799{
800 Assert(!(u64Delta & RT_BIT_64(63)));
801
802 if (!pVM->tm.s.fVirtualWarpDrive)
803 {
804 *pu64Delta = u64Delta;
805 return u64Delta + u64Now + pVM->tm.s.u64VirtualOffset;
806 }
807
808 /*
809 * Warp drive adjustments - this is the reverse of what tmVirtualGetRaw is doing.
810 */
811 uint64_t const u64Start = pVM->tm.s.u64VirtualWarpDriveStart;
812 uint32_t const u32Pct = pVM->tm.s.u32VirtualWarpDrivePercentage;
813
814 uint64_t u64GipTime = u64Delta + u64Now + pVM->tm.s.u64VirtualOffset;
815 u64GipTime -= u64Start; /* the start is GIP time. */
816 if (u64GipTime >= u64Delta)
817 {
818 ASMMultU64ByU32DivByU32(u64GipTime, 100, u32Pct);
819 ASMMultU64ByU32DivByU32(u64Delta, 100, u32Pct);
820 }
821 else
822 {
823 u64Delta -= u64GipTime;
824 ASMMultU64ByU32DivByU32(u64GipTime, 100, u32Pct);
825 u64Delta += u64GipTime;
826 }
827 *pu64Delta = u64Delta;
828 u64GipTime += u64Start;
829 return u64GipTime;
830}
831
832
833/**
834 * Worker for tmTimerPollInternal dealing with returns on virtual CPUs other
835 * than the one dedicated to timer work.
836 *
837 * @returns See tmTimerPollInternal.
838 * @param pVM The cross context VM structure.
839 * @param u64Now Current virtual clock timestamp.
840 * @param pu64Delta Where to return the delta.
841 */
842DECL_FORCE_INLINE(uint64_t) tmTimerPollReturnOtherCpu(PVM pVM, uint64_t u64Now, uint64_t *pu64Delta)
843{
844 static const uint64_t s_u64OtherRet = 500000000; /* 500 ms for non-timer EMTs. */
845 *pu64Delta = s_u64OtherRet;
846 return u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet;
847}
848
849
850/**
851 * Worker for tmTimerPollInternal.
852 *
853 * @returns See tmTimerPollInternal.
854 * @param pVM The cross context VM structure.
855 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
856 * @param pVCpuDst The cross context virtual CPU structure of the dedicated
857 * timer EMT.
858 * @param u64Now Current virtual clock timestamp.
859 * @param pu64Delta Where to return the delta.
860 * @param pCounter The statistics counter to update.
861 */
862DECL_FORCE_INLINE(uint64_t) tmTimerPollReturnHit(PVM pVM, PVMCPU pVCpu, PVMCPU pVCpuDst, uint64_t u64Now,
863 uint64_t *pu64Delta, PSTAMCOUNTER pCounter)
864{
865 STAM_COUNTER_INC(pCounter); NOREF(pCounter);
866 if (pVCpuDst != pVCpu)
867 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
868 *pu64Delta = 0;
869 return 0;
870}
871
872/**
873 * Common worker for TMTimerPollGIP and TMTimerPoll.
874 *
875 * This function is called before FFs are checked in the inner execution EM loops.
876 *
877 * @returns The GIP timestamp of the next event.
878 * 0 if the next event has already expired.
879 *
880 * @param pVM The cross context VM structure.
881 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
882 * @param pu64Delta Where to store the delta.
883 *
884 * @thread The emulation thread.
885 *
886 * @remarks GIP uses ns ticks.
887 */
888DECL_FORCE_INLINE(uint64_t) tmTimerPollInternal(PVMCC pVM, PVMCPUCC pVCpu, uint64_t *pu64Delta)
889{
890 PVMCPU pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
891 const uint64_t u64Now = TMVirtualGetNoCheck(pVM);
892 STAM_COUNTER_INC(&pVM->tm.s.StatPoll);
893
894 /*
895 * Return straight away if the timer FF is already set ...
896 */
897 if (VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
898 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet);
899
900 /*
901 * ... or if timers are being run.
902 */
903 if (ASMAtomicReadBool(&pVM->tm.s.fRunningQueues))
904 {
905 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
906 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
907 }
908
909 /*
910 * Check for TMCLOCK_VIRTUAL expiration.
911 */
912 const uint64_t u64Expire1 = ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL].u64Expire);
913 const int64_t i64Delta1 = u64Expire1 - u64Now;
914 if (i64Delta1 <= 0)
915 {
916 if (!VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
917 {
918 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
919 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
920 }
921 LogFlow(("TMTimerPoll: expire1=%'RU64 <= now=%'RU64\n", u64Expire1, u64Now));
922 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtual);
923 }
924
925 /*
926 * Check for TMCLOCK_VIRTUAL_SYNC expiration.
927 * This isn't quite as straight forward if in a catch-up, not only do
928 * we have to adjust the 'now' but when have to adjust the delta as well.
929 */
930
931 /*
932 * Optimistic lockless approach.
933 */
934 uint64_t u64VirtualSyncNow;
935 uint64_t u64Expire2 = ASMAtomicUoReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire);
936 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
937 {
938 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
939 {
940 u64VirtualSyncNow = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
941 if (RT_LIKELY( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
942 && !ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
943 && u64VirtualSyncNow == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
944 && u64Expire2 == ASMAtomicUoReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire)))
945 {
946 u64VirtualSyncNow = u64Now - u64VirtualSyncNow;
947 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
948 if (i64Delta2 > 0)
949 {
950 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
951 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
952
953 if (pVCpu == pVCpuDst)
954 return tmTimerPollReturnMiss(pVM, u64Now, RT_MIN(i64Delta1, i64Delta2), pu64Delta);
955 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
956 }
957
958 if ( !pVM->tm.s.fRunningQueues
959 && !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
960 {
961 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
962 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
963 }
964
965 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
966 LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now));
967 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
968 }
969 }
970 }
971 else
972 {
973 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
974 LogFlow(("TMTimerPoll: stopped\n"));
975 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
976 }
977
978 /*
979 * Complicated lockless approach.
980 */
981 uint64_t off;
982 uint32_t u32Pct = 0;
983 bool fCatchUp;
984 int cOuterTries = 42;
985 for (;; cOuterTries--)
986 {
987 fCatchUp = ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp);
988 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
989 u64Expire2 = ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire);
990 if (fCatchUp)
991 {
992 /* No changes allowed, try get a consistent set of parameters. */
993 uint64_t const u64Prev = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev);
994 uint64_t const offGivenUp = ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp);
995 u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
996 if ( ( u64Prev == ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev)
997 && offGivenUp == ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp)
998 && u32Pct == ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage)
999 && off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
1000 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire)
1001 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
1002 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
1003 || cOuterTries <= 0)
1004 {
1005 uint64_t u64Delta = u64Now - u64Prev;
1006 if (RT_LIKELY(!(u64Delta >> 32)))
1007 {
1008 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, u32Pct, 100);
1009 if (off > u64Sub + offGivenUp)
1010 off -= u64Sub;
1011 else /* we've completely caught up. */
1012 off = offGivenUp;
1013 }
1014 else
1015 /* More than 4 seconds since last time (or negative), ignore it. */
1016 Log(("TMVirtualGetSync: u64Delta=%RX64 (NoLock)\n", u64Delta));
1017
1018 /* Check that we're still running and in catch up. */
1019 if ( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
1020 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
1021 break;
1022 }
1023 }
1024 else if ( off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
1025 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire)
1026 && !ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
1027 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
1028 break; /* Got an consistent offset */
1029
1030 /* Repeat the initial checks before iterating. */
1031 if (VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
1032 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet);
1033 if (ASMAtomicUoReadBool(&pVM->tm.s.fRunningQueues))
1034 {
1035 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
1036 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
1037 }
1038 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
1039 {
1040 LogFlow(("TMTimerPoll: stopped\n"));
1041 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
1042 }
1043 if (cOuterTries <= 0)
1044 break; /* that's enough */
1045 }
1046 if (cOuterTries <= 0)
1047 STAM_COUNTER_INC(&pVM->tm.s.StatPollELoop);
1048 u64VirtualSyncNow = u64Now - off;
1049
1050 /* Calc delta and see if we've got a virtual sync hit. */
1051 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
1052 if (i64Delta2 <= 0)
1053 {
1054 if ( !pVM->tm.s.fRunningQueues
1055 && !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
1056 {
1057 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
1058 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
1059 }
1060 STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtualSync);
1061 LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now));
1062 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
1063 }
1064
1065 /*
1066 * Return the time left to the next event.
1067 */
1068 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
1069 if (pVCpu == pVCpuDst)
1070 {
1071 if (fCatchUp)
1072 i64Delta2 = ASMMultU64ByU32DivByU32(i64Delta2, 100, u32Pct + 100);
1073 return tmTimerPollReturnMiss(pVM, u64Now, RT_MIN(i64Delta1, i64Delta2), pu64Delta);
1074 }
1075 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
1076}
1077
1078
1079/**
1080 * Set FF if we've passed the next virtual event.
1081 *
1082 * This function is called before FFs are checked in the inner execution EM loops.
1083 *
1084 * @returns true if timers are pending, false if not.
1085 *
1086 * @param pVM The cross context VM structure.
1087 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1088 * @thread The emulation thread.
1089 */
1090VMMDECL(bool) TMTimerPollBool(PVMCC pVM, PVMCPUCC pVCpu)
1091{
1092 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1093 uint64_t off = 0;
1094 tmTimerPollInternal(pVM, pVCpu, &off);
1095 return off == 0;
1096}
1097
1098
1099/**
1100 * Set FF if we've passed the next virtual event.
1101 *
1102 * This function is called before FFs are checked in the inner execution EM loops.
1103 *
1104 * @param pVM The cross context VM structure.
1105 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1106 * @thread The emulation thread.
1107 */
1108VMM_INT_DECL(void) TMTimerPollVoid(PVMCC pVM, PVMCPUCC pVCpu)
1109{
1110 uint64_t off;
1111 tmTimerPollInternal(pVM, pVCpu, &off);
1112}
1113
1114
1115/**
1116 * Set FF if we've passed the next virtual event.
1117 *
1118 * This function is called before FFs are checked in the inner execution EM loops.
1119 *
1120 * @returns The GIP timestamp of the next event.
1121 * 0 if the next event has already expired.
1122 * @param pVM The cross context VM structure.
1123 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1124 * @param pu64Delta Where to store the delta.
1125 * @thread The emulation thread.
1126 */
1127VMM_INT_DECL(uint64_t) TMTimerPollGIP(PVMCC pVM, PVMCPUCC pVCpu, uint64_t *pu64Delta)
1128{
1129 return tmTimerPollInternal(pVM, pVCpu, pu64Delta);
1130}
1131
1132#endif /* VBOX_HIGH_RES_TIMERS_HACK */
1133
1134/**
1135 * Locks the timer clock.
1136 *
1137 * @returns VINF_SUCCESS on success, @a rcBusy if busy, and VERR_NOT_SUPPORTED
1138 * if the clock does not have a lock.
1139 * @param pVM The cross context VM structure.
1140 * @param hTimer Timer handle as returned by one of the create functions.
1141 * @param rcBusy What to return in ring-0 and raw-mode context if the
1142 * lock is busy. Pass VINF_SUCCESS to acquired the
1143 * critical section thru a ring-3 call if necessary.
1144 *
1145 * @remarks Currently only supported on timers using the virtual sync clock.
1146 */
1147VMMDECL(int) TMTimerLock(PVMCC pVM, TMTIMERHANDLE hTimer, int rcBusy)
1148{
1149 PTMTIMER pTimer;
1150 TMTIMER_HANDLE_TO_PTR_RETURN(pVM, hTimer, pTimer);
1151 AssertPtr(pTimer);
1152 AssertReturn(pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC, VERR_NOT_SUPPORTED);
1153 return PDMCritSectEnter(&pVM->tm.s.VirtualSyncLock, rcBusy);
1154}
1155
1156
1157/**
1158 * Unlocks a timer clock locked by TMTimerLock.
1159 *
1160 * @param pVM The cross context VM structure.
1161 * @param hTimer Timer handle as returned by one of the create functions.
1162 */
1163VMMDECL(void) TMTimerUnlock(PVMCC pVM, TMTIMERHANDLE hTimer)
1164{
1165 PTMTIMER pTimer;
1166 TMTIMER_HANDLE_TO_PTR_RETURN_VOID(pVM, hTimer, pTimer);
1167 AssertReturnVoid(pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC);
1168 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
1169}
1170
1171
1172/**
1173 * Checks if the current thread owns the timer clock lock.
1174 *
1175 * @returns @c true if its the owner, @c false if not.
1176 * @param pVM The cross context VM structure.
1177 * @param hTimer Timer handle as returned by one of the create functions.
1178 */
1179VMMDECL(bool) TMTimerIsLockOwner(PVMCC pVM, TMTIMERHANDLE hTimer)
1180{
1181 PTMTIMER pTimer;
1182 TMTIMER_HANDLE_TO_PTR_RETURN_EX(pVM, hTimer, false, pTimer);
1183 AssertPtr(pTimer);
1184 AssertReturn(pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC, false);
1185 return PDMCritSectIsOwner(&pVM->tm.s.VirtualSyncLock);
1186}
1187
1188
1189/**
1190 * Optimized TMTimerSet code path for starting an inactive timer.
1191 *
1192 * @returns VBox status code.
1193 *
1194 * @param pVM The cross context VM structure.
1195 * @param pTimer The timer handle.
1196 * @param u64Expire The new expire time.
1197 * @param pQueue Pointer to the shared timer queue data.
1198 * @param enmClock The sanitized clock.
1199 */
1200static int tmTimerSetOptimizedStart(PVMCC pVM, PTMTIMER pTimer, uint64_t u64Expire, PTMTIMERQUEUE pQueue, TMCLOCK enmClock)
1201{
1202 Assert(pTimer->idxPrev == UINT32_MAX);
1203 Assert(pTimer->idxNext == UINT32_MAX);
1204 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE);
1205
1206 /*
1207 * Calculate and set the expiration time.
1208 */
1209 if (enmClock == TMCLOCK_VIRTUAL_SYNC)
1210 {
1211 uint64_t u64Last = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSync);
1212 AssertMsgStmt(u64Expire >= u64Last,
1213 ("exp=%#llx last=%#llx\n", u64Expire, u64Last),
1214 u64Expire = u64Last);
1215 }
1216 ASMAtomicWriteU64(&pTimer->u64Expire, u64Expire);
1217 Log2(("tmTimerSetOptimizedStart: %p:{.pszDesc='%s', .u64Expire=%'RU64}\n", pTimer, pTimer->szName, u64Expire));
1218
1219 /*
1220 * Link the timer into the active list.
1221 */
1222 tmTimerQueueLinkActive(pVM, TM_GET_TIMER_QUEUE_CC(pVM, enmClock, pQueue), pQueue, pTimer, u64Expire);
1223
1224 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetOpt);
1225 return VINF_SUCCESS;
1226}
1227
1228
1229/**
1230 * TMTimerSet for the virtual sync timer queue.
1231 *
1232 * This employs a greatly simplified state machine by always acquiring the
1233 * queue lock and bypassing the scheduling list.
1234 *
1235 * @returns VBox status code
1236 * @param pVM The cross context VM structure.
1237 * @param pTimer The timer handle.
1238 * @param u64Expire The expiration time.
1239 */
1240static int tmTimerVirtualSyncSet(PVMCC pVM, PTMTIMER pTimer, uint64_t u64Expire)
1241{
1242 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetVs), a);
1243 VM_ASSERT_EMT(pVM);
1244 TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer);
1245 int rc = PDMCritSectEnter(&pVM->tm.s.VirtualSyncLock, VINF_SUCCESS);
1246 AssertRCReturn(rc, rc);
1247
1248 PTMTIMERQUEUE const pQueue = &pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC];
1249 PTMTIMERQUEUECC const pQueueCC = TM_GET_TIMER_QUEUE_CC(pVM, TMCLOCK_VIRTUAL_SYNC, pQueue);
1250 TMTIMERSTATE const enmState = pTimer->enmState;
1251 switch (enmState)
1252 {
1253 case TMTIMERSTATE_EXPIRED_DELIVER:
1254 case TMTIMERSTATE_STOPPED:
1255 if (enmState == TMTIMERSTATE_EXPIRED_DELIVER)
1256 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetVsStExpDeliver);
1257 else
1258 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetVsStStopped);
1259
1260 AssertMsg(u64Expire >= pVM->tm.s.u64VirtualSync,
1261 ("%'RU64 < %'RU64 %s\n", u64Expire, pVM->tm.s.u64VirtualSync, pTimer->szName));
1262 pTimer->u64Expire = u64Expire;
1263 TM_SET_STATE(pTimer, TMTIMERSTATE_ACTIVE);
1264 tmTimerQueueLinkActive(pVM, pQueueCC, pQueue, pTimer, u64Expire);
1265 rc = VINF_SUCCESS;
1266 break;
1267
1268 case TMTIMERSTATE_ACTIVE:
1269 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetVsStActive);
1270 tmTimerQueueUnlinkActive(pVM, pQueueCC, pQueue, pTimer);
1271 pTimer->u64Expire = u64Expire;
1272 tmTimerQueueLinkActive(pVM, pQueueCC, pQueue, pTimer, u64Expire);
1273 rc = VINF_SUCCESS;
1274 break;
1275
1276 case TMTIMERSTATE_PENDING_RESCHEDULE:
1277 case TMTIMERSTATE_PENDING_STOP:
1278 case TMTIMERSTATE_PENDING_SCHEDULE:
1279 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1280 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1281 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1282 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1283 case TMTIMERSTATE_DESTROY:
1284 case TMTIMERSTATE_FREE:
1285 AssertLogRelMsgFailed(("Invalid timer state %s: %s\n", tmTimerState(enmState), pTimer->szName));
1286 rc = VERR_TM_INVALID_STATE;
1287 break;
1288
1289 default:
1290 AssertMsgFailed(("Unknown timer state %d: %s\n", enmState, pTimer->szName));
1291 rc = VERR_TM_UNKNOWN_STATE;
1292 break;
1293 }
1294
1295 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetVs), a);
1296 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
1297 return rc;
1298}
1299
1300
1301/**
1302 * Arm a timer with a (new) expire time.
1303 *
1304 * @returns VBox status code.
1305 * @param pVM The cross context VM structure.
1306 * @param hTimer Timer handle as returned by one of the create functions.
1307 * @param u64Expire New expire time.
1308 */
1309VMMDECL(int) TMTimerSet(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t u64Expire)
1310{
1311 PTMTIMER pTimer;
1312 TMTIMER_HANDLE_TO_PTR_RETURN(pVM, hTimer, pTimer);
1313 STAM_COUNTER_INC(&pTimer->StatSetAbsolute);
1314
1315 /* Treat virtual sync timers specially. */
1316 if (pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC)
1317 return tmTimerVirtualSyncSet(pVM, pTimer, u64Expire);
1318
1319 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1320 TMTIMER_ASSERT_CRITSECT(pVM, pTimer);
1321
1322 DBGFTRACE_U64_TAG2(pVM, u64Expire, "TMTimerSet", pTimer->szName);
1323
1324#ifdef VBOX_WITH_STATISTICS
1325 /*
1326 * Gather optimization info.
1327 */
1328 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSet);
1329 TMTIMERSTATE enmOrgState = pTimer->enmState;
1330 switch (enmOrgState)
1331 {
1332 case TMTIMERSTATE_STOPPED: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStStopped); break;
1333 case TMTIMERSTATE_EXPIRED_DELIVER: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStExpDeliver); break;
1334 case TMTIMERSTATE_ACTIVE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStActive); break;
1335 case TMTIMERSTATE_PENDING_STOP: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendStop); break;
1336 case TMTIMERSTATE_PENDING_STOP_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendStopSched); break;
1337 case TMTIMERSTATE_PENDING_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendSched); break;
1338 case TMTIMERSTATE_PENDING_RESCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendResched); break;
1339 default: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStOther); break;
1340 }
1341#endif
1342
1343 /*
1344 * The most common case is setting the timer again during the callback.
1345 * The second most common case is starting a timer at some other time.
1346 */
1347#if 1
1348 TMTIMERSTATE enmState1 = pTimer->enmState;
1349 if ( enmState1 == TMTIMERSTATE_EXPIRED_DELIVER
1350 || ( enmState1 == TMTIMERSTATE_STOPPED
1351 && pTimer->pCritSect))
1352 {
1353 /* Try take the TM lock and check the state again. */
1354 TMCLOCK const enmClock = pTimer->enmClock;
1355 AssertReturn((unsigned)enmClock < TMCLOCK_MAX, VERR_TM_IPE_2);
1356 PTMTIMERQUEUE const pQueue = &pVM->tm.s.aTimerQueues[enmClock];
1357
1358 int rc = PDMCritSectTryEnter(&pQueue->TimerLock);
1359 if (RT_SUCCESS_NP(rc))
1360 {
1361 if (RT_LIKELY(tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, enmState1)))
1362 {
1363 tmTimerSetOptimizedStart(pVM, pTimer, u64Expire, pQueue, enmClock);
1364 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1365 PDMCritSectLeave(&pQueue->TimerLock);
1366 return VINF_SUCCESS;
1367 }
1368 PDMCritSectLeave(&pQueue->TimerLock);
1369 }
1370 }
1371#endif
1372
1373 /*
1374 * Unoptimized code path.
1375 */
1376 int cRetries = 1000;
1377 do
1378 {
1379 /*
1380 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1381 */
1382 TMTIMERSTATE enmState = pTimer->enmState;
1383 Log2(("TMTimerSet: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d u64Expire=%'RU64\n",
1384 pTimer, tmTimerState(enmState), pTimer->szName, cRetries, u64Expire));
1385 switch (enmState)
1386 {
1387 case TMTIMERSTATE_EXPIRED_DELIVER:
1388 case TMTIMERSTATE_STOPPED:
1389 if (tmTimerTryWithLink(pVM, pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1390 {
1391 Assert(pTimer->idxPrev == UINT32_MAX);
1392 Assert(pTimer->idxNext == UINT32_MAX);
1393 pTimer->u64Expire = u64Expire;
1394 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1395 tmSchedule(pVM, pTimer);
1396 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1397 return VINF_SUCCESS;
1398 }
1399 break;
1400
1401 case TMTIMERSTATE_PENDING_SCHEDULE:
1402 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1403 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1404 {
1405 pTimer->u64Expire = u64Expire;
1406 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1407 tmSchedule(pVM, pTimer);
1408 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1409 return VINF_SUCCESS;
1410 }
1411 break;
1412
1413
1414 case TMTIMERSTATE_ACTIVE:
1415 if (tmTimerTryWithLink(pVM, pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1416 {
1417 pTimer->u64Expire = u64Expire;
1418 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1419 tmSchedule(pVM, pTimer);
1420 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1421 return VINF_SUCCESS;
1422 }
1423 break;
1424
1425 case TMTIMERSTATE_PENDING_RESCHEDULE:
1426 case TMTIMERSTATE_PENDING_STOP:
1427 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1428 {
1429 pTimer->u64Expire = u64Expire;
1430 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1431 tmSchedule(pVM, pTimer);
1432 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1433 return VINF_SUCCESS;
1434 }
1435 break;
1436
1437
1438 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1439 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1440 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1441#ifdef IN_RING3
1442 if (!RTThreadYield())
1443 RTThreadSleep(1);
1444#else
1445/** @todo call host context and yield after a couple of iterations */
1446#endif
1447 break;
1448
1449 /*
1450 * Invalid states.
1451 */
1452 case TMTIMERSTATE_DESTROY:
1453 case TMTIMERSTATE_FREE:
1454 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, pTimer->szName));
1455 return VERR_TM_INVALID_STATE;
1456 default:
1457 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, pTimer->szName));
1458 return VERR_TM_UNKNOWN_STATE;
1459 }
1460 } while (cRetries-- > 0);
1461
1462 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, pTimer->szName));
1463 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1464 return VERR_TM_TIMER_UNSTABLE_STATE;
1465}
1466
1467
1468/**
1469 * Return the current time for the specified clock, setting pu64Now if not NULL.
1470 *
1471 * @returns Current time.
1472 * @param pVM The cross context VM structure.
1473 * @param enmClock The clock to query.
1474 * @param pu64Now Optional pointer where to store the return time
1475 */
1476DECL_FORCE_INLINE(uint64_t) tmTimerSetRelativeNowWorker(PVMCC pVM, TMCLOCK enmClock, uint64_t *pu64Now)
1477{
1478 uint64_t u64Now;
1479 switch (enmClock)
1480 {
1481 case TMCLOCK_VIRTUAL_SYNC:
1482 u64Now = TMVirtualSyncGet(pVM);
1483 break;
1484 case TMCLOCK_VIRTUAL:
1485 u64Now = TMVirtualGet(pVM);
1486 break;
1487 case TMCLOCK_REAL:
1488 u64Now = TMRealGet(pVM);
1489 break;
1490 default:
1491 AssertFatalMsgFailed(("%d\n", enmClock));
1492 }
1493
1494 if (pu64Now)
1495 *pu64Now = u64Now;
1496 return u64Now;
1497}
1498
1499
1500/**
1501 * Optimized TMTimerSetRelative code path.
1502 *
1503 * @returns VBox status code.
1504 *
1505 * @param pVM The cross context VM structure.
1506 * @param pTimer The timer handle.
1507 * @param cTicksToNext Clock ticks until the next time expiration.
1508 * @param pu64Now Where to return the current time stamp used.
1509 * Optional.
1510 * @param pQueue Pointer to the shared timer queue data.
1511 * @param enmClock The sanitized clock.
1512 */
1513static int tmTimerSetRelativeOptimizedStart(PVMCC pVM, PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now,
1514 PTMTIMERQUEUE pQueue, TMCLOCK enmClock)
1515{
1516 Assert(pTimer->idxPrev == UINT32_MAX);
1517 Assert(pTimer->idxNext == UINT32_MAX);
1518 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE);
1519
1520 /*
1521 * Calculate and set the expiration time.
1522 */
1523 uint64_t const u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1524 pTimer->u64Expire = u64Expire;
1525 Log2(("tmTimerSetRelativeOptimizedStart: %p:{.pszDesc='%s', .u64Expire=%'RU64} cTicksToNext=%'RU64\n", pTimer, pTimer->szName, u64Expire, cTicksToNext));
1526
1527 /*
1528 * Link the timer into the active list.
1529 */
1530 DBGFTRACE_U64_TAG2(pVM, u64Expire, "tmTimerSetRelativeOptimizedStart", pTimer->szName);
1531 tmTimerQueueLinkActive(pVM, TM_GET_TIMER_QUEUE_CC(pVM, enmClock, pQueue), pQueue, pTimer, u64Expire);
1532
1533 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeOpt);
1534 return VINF_SUCCESS;
1535}
1536
1537
1538/**
1539 * TMTimerSetRelative for the virtual sync timer queue.
1540 *
1541 * This employs a greatly simplified state machine by always acquiring the
1542 * queue lock and bypassing the scheduling list.
1543 *
1544 * @returns VBox status code
1545 * @param pVM The cross context VM structure.
1546 * @param pTimer The timer to (re-)arm.
1547 * @param cTicksToNext Clock ticks until the next time expiration.
1548 * @param pu64Now Where to return the current time stamp used.
1549 * Optional.
1550 */
1551static int tmTimerVirtualSyncSetRelative(PVMCC pVM, PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1552{
1553 STAM_PROFILE_START(pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelativeVs), a);
1554 VM_ASSERT_EMT(pVM);
1555 TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer);
1556 int rc = PDMCritSectEnter(&pVM->tm.s.VirtualSyncLock, VINF_SUCCESS);
1557 AssertRCReturn(rc, rc);
1558
1559 /* Calculate the expiration tick. */
1560 uint64_t u64Expire = TMVirtualSyncGetNoCheck(pVM);
1561 if (pu64Now)
1562 *pu64Now = u64Expire;
1563 u64Expire += cTicksToNext;
1564
1565 /* Update the timer. */
1566 PTMTIMERQUEUE const pQueue = &pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC];
1567 PTMTIMERQUEUECC const pQueueCC = TM_GET_TIMER_QUEUE_CC(pVM, TMCLOCK_VIRTUAL_SYNC, pQueue);
1568 TMTIMERSTATE const enmState = pTimer->enmState;
1569 switch (enmState)
1570 {
1571 case TMTIMERSTATE_EXPIRED_DELIVER:
1572 case TMTIMERSTATE_STOPPED:
1573 if (enmState == TMTIMERSTATE_EXPIRED_DELIVER)
1574 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeVsStExpDeliver);
1575 else
1576 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeVsStStopped);
1577 pTimer->u64Expire = u64Expire;
1578 TM_SET_STATE(pTimer, TMTIMERSTATE_ACTIVE);
1579 tmTimerQueueLinkActive(pVM, pQueueCC, pQueue, pTimer, u64Expire);
1580 rc = VINF_SUCCESS;
1581 break;
1582
1583 case TMTIMERSTATE_ACTIVE:
1584 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeVsStActive);
1585 tmTimerQueueUnlinkActive(pVM, pQueueCC, pQueue, pTimer);
1586 pTimer->u64Expire = u64Expire;
1587 tmTimerQueueLinkActive(pVM, pQueueCC, pQueue, pTimer, u64Expire);
1588 rc = VINF_SUCCESS;
1589 break;
1590
1591 case TMTIMERSTATE_PENDING_RESCHEDULE:
1592 case TMTIMERSTATE_PENDING_STOP:
1593 case TMTIMERSTATE_PENDING_SCHEDULE:
1594 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1595 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1596 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1597 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1598 case TMTIMERSTATE_DESTROY:
1599 case TMTIMERSTATE_FREE:
1600 AssertLogRelMsgFailed(("Invalid timer state %s: %s\n", tmTimerState(enmState), pTimer->szName));
1601 rc = VERR_TM_INVALID_STATE;
1602 break;
1603
1604 default:
1605 AssertMsgFailed(("Unknown timer state %d: %s\n", enmState, pTimer->szName));
1606 rc = VERR_TM_UNKNOWN_STATE;
1607 break;
1608 }
1609
1610 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelativeVs), a);
1611 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
1612 return rc;
1613}
1614
1615
1616/**
1617 * Arm a timer with a expire time relative to the current time.
1618 *
1619 * @returns VBox status code.
1620 * @param pVM The cross context VM structure.
1621 * @param pTimer The timer to arm.
1622 * @param cTicksToNext Clock ticks until the next time expiration.
1623 * @param pu64Now Where to return the current time stamp used.
1624 * Optional.
1625 */
1626static int tmTimerSetRelative(PVMCC pVM, PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1627{
1628 STAM_COUNTER_INC(&pTimer->StatSetRelative);
1629
1630 /* Treat virtual sync timers specially. */
1631 TMCLOCK enmClock = pTimer->enmClock;
1632 if (enmClock == TMCLOCK_VIRTUAL_SYNC)
1633 return tmTimerVirtualSyncSetRelative(pVM, pTimer, cTicksToNext, pu64Now);
1634 AssertReturn((unsigned)enmClock < (unsigned)TMCLOCK_MAX, VERR_TM_IPE_2);
1635
1636 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1637 TMTIMER_ASSERT_CRITSECT(pVM, pTimer);
1638
1639 DBGFTRACE_U64_TAG2(pVM, cTicksToNext, "TMTimerSetRelative", pTimer->szName);
1640
1641#ifdef VBOX_WITH_STATISTICS
1642 /*
1643 * Gather optimization info.
1644 */
1645 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelative);
1646 TMTIMERSTATE enmOrgState = pTimer->enmState;
1647 switch (enmOrgState)
1648 {
1649 case TMTIMERSTATE_STOPPED: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStStopped); break;
1650 case TMTIMERSTATE_EXPIRED_DELIVER: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStExpDeliver); break;
1651 case TMTIMERSTATE_ACTIVE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStActive); break;
1652 case TMTIMERSTATE_PENDING_STOP: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendStop); break;
1653 case TMTIMERSTATE_PENDING_STOP_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendStopSched); break;
1654 case TMTIMERSTATE_PENDING_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendSched); break;
1655 case TMTIMERSTATE_PENDING_RESCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendResched); break;
1656 default: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStOther); break;
1657 }
1658#endif
1659
1660 /*
1661 * Try to take the TM lock and optimize the common cases.
1662 *
1663 * With the TM lock we can safely make optimizations like immediate
1664 * scheduling and we can also be 100% sure that we're not racing the
1665 * running of the timer queues. As an additional restraint we require the
1666 * timer to have a critical section associated with to be 100% there aren't
1667 * concurrent operations on the timer. (This latter isn't necessary any
1668 * longer as this isn't supported for any timers, critsect or not.)
1669 *
1670 * Note! Lock ordering doesn't apply when we only tries to
1671 * get the innermost locks.
1672 */
1673 PTMTIMERQUEUE pQueue = &pVM->tm.s.aTimerQueues[enmClock];
1674 bool fOwnTMLock = RT_SUCCESS_NP(PDMCritSectTryEnter(&pQueue->TimerLock));
1675#if 1
1676 if ( fOwnTMLock
1677 && pTimer->pCritSect)
1678 {
1679 TMTIMERSTATE enmState = pTimer->enmState;
1680 if (RT_LIKELY( ( enmState == TMTIMERSTATE_EXPIRED_DELIVER
1681 || enmState == TMTIMERSTATE_STOPPED)
1682 && tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, enmState)))
1683 {
1684 tmTimerSetRelativeOptimizedStart(pVM, pTimer, cTicksToNext, pu64Now, pQueue, enmClock);
1685 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1686 PDMCritSectLeave(&pQueue->TimerLock);
1687 return VINF_SUCCESS;
1688 }
1689
1690 /* Optimize other states when it becomes necessary. */
1691 }
1692#endif
1693
1694 /*
1695 * Unoptimized path.
1696 */
1697 int rc;
1698 for (int cRetries = 1000; ; cRetries--)
1699 {
1700 /*
1701 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1702 */
1703 TMTIMERSTATE enmState = pTimer->enmState;
1704 switch (enmState)
1705 {
1706 case TMTIMERSTATE_STOPPED:
1707 if (enmClock == TMCLOCK_VIRTUAL_SYNC)
1708 {
1709 /** @todo To fix assertion in tmR3TimerQueueRunVirtualSync:
1710 * Figure a safe way of activating this timer while the queue is
1711 * being run.
1712 * (99.9% sure this that the assertion is caused by DevAPIC.cpp
1713 * re-starting the timer in response to a initial_count write.) */
1714 }
1715 RT_FALL_THRU();
1716 case TMTIMERSTATE_EXPIRED_DELIVER:
1717 if (tmTimerTryWithLink(pVM, pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1718 {
1719 Assert(pTimer->idxPrev == UINT32_MAX);
1720 Assert(pTimer->idxNext == UINT32_MAX);
1721 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1722 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [EXP/STOP]\n",
1723 pTimer, tmTimerState(enmState), pTimer->szName, pTimer->u64Expire, cRetries));
1724 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1725 tmSchedule(pVM, pTimer);
1726 rc = VINF_SUCCESS;
1727 break;
1728 }
1729 rc = VERR_TRY_AGAIN;
1730 break;
1731
1732 case TMTIMERSTATE_PENDING_SCHEDULE:
1733 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1734 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1735 {
1736 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1737 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [PEND_SCHED]\n",
1738 pTimer, tmTimerState(enmState), pTimer->szName, pTimer->u64Expire, cRetries));
1739 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1740 tmSchedule(pVM, pTimer);
1741 rc = VINF_SUCCESS;
1742 break;
1743 }
1744 rc = VERR_TRY_AGAIN;
1745 break;
1746
1747
1748 case TMTIMERSTATE_ACTIVE:
1749 if (tmTimerTryWithLink(pVM, pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1750 {
1751 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1752 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [ACTIVE]\n",
1753 pTimer, tmTimerState(enmState), pTimer->szName, pTimer->u64Expire, cRetries));
1754 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1755 tmSchedule(pVM, pTimer);
1756 rc = VINF_SUCCESS;
1757 break;
1758 }
1759 rc = VERR_TRY_AGAIN;
1760 break;
1761
1762 case TMTIMERSTATE_PENDING_RESCHEDULE:
1763 case TMTIMERSTATE_PENDING_STOP:
1764 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1765 {
1766 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1767 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [PEND_RESCH/STOP]\n",
1768 pTimer, tmTimerState(enmState), pTimer->szName, pTimer->u64Expire, cRetries));
1769 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1770 tmSchedule(pVM, pTimer);
1771 rc = VINF_SUCCESS;
1772 break;
1773 }
1774 rc = VERR_TRY_AGAIN;
1775 break;
1776
1777
1778 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1779 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1780 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1781#ifdef IN_RING3
1782 if (!RTThreadYield())
1783 RTThreadSleep(1);
1784#else
1785/** @todo call host context and yield after a couple of iterations */
1786#endif
1787 rc = VERR_TRY_AGAIN;
1788 break;
1789
1790 /*
1791 * Invalid states.
1792 */
1793 case TMTIMERSTATE_DESTROY:
1794 case TMTIMERSTATE_FREE:
1795 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, pTimer->szName));
1796 rc = VERR_TM_INVALID_STATE;
1797 break;
1798
1799 default:
1800 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, pTimer->szName));
1801 rc = VERR_TM_UNKNOWN_STATE;
1802 break;
1803 }
1804
1805 /* switch + loop is tedious to break out of. */
1806 if (rc == VINF_SUCCESS)
1807 break;
1808
1809 if (rc != VERR_TRY_AGAIN)
1810 {
1811 tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1812 break;
1813 }
1814 if (cRetries <= 0)
1815 {
1816 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, pTimer->szName));
1817 rc = VERR_TM_TIMER_UNSTABLE_STATE;
1818 tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1819 break;
1820 }
1821
1822 /*
1823 * Retry to gain locks.
1824 */
1825 if (!fOwnTMLock)
1826 fOwnTMLock = RT_SUCCESS_NP(PDMCritSectTryEnter(&pQueue->TimerLock));
1827
1828 } /* for (;;) */
1829
1830 /*
1831 * Clean up and return.
1832 */
1833 if (fOwnTMLock)
1834 PDMCritSectLeave(&pQueue->TimerLock);
1835
1836 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1837 return rc;
1838}
1839
1840
1841/**
1842 * Arm a timer with a expire time relative to the current time.
1843 *
1844 * @returns VBox status code.
1845 * @param pVM The cross context VM structure.
1846 * @param hTimer Timer handle as returned by one of the create functions.
1847 * @param cTicksToNext Clock ticks until the next time expiration.
1848 * @param pu64Now Where to return the current time stamp used.
1849 * Optional.
1850 */
1851VMMDECL(int) TMTimerSetRelative(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1852{
1853 PTMTIMER pTimer;
1854 TMTIMER_HANDLE_TO_PTR_RETURN(pVM, hTimer, pTimer);
1855 return tmTimerSetRelative(pVM, pTimer, cTicksToNext, pu64Now);
1856}
1857
1858
1859/**
1860 * Drops a hint about the frequency of the timer.
1861 *
1862 * This is used by TM and the VMM to calculate how often guest execution needs
1863 * to be interrupted. The hint is automatically cleared by TMTimerStop.
1864 *
1865 * @returns VBox status code.
1866 * @param pVM The cross context VM structure.
1867 * @param hTimer Timer handle as returned by one of the create functions.
1868 * @param uHzHint The frequency hint. Pass 0 to clear the hint.
1869 *
1870 * @remarks We're using an integer hertz value here since anything above 1 HZ
1871 * is not going to be any trouble satisfying scheduling wise. The
1872 * range where it makes sense is >= 100 HZ.
1873 */
1874VMMDECL(int) TMTimerSetFrequencyHint(PVMCC pVM, TMTIMERHANDLE hTimer, uint32_t uHzHint)
1875{
1876 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
1877 TMTIMER_ASSERT_CRITSECT(pVM, pTimer);
1878
1879 uint32_t const uHzOldHint = pTimer->uHzHint;
1880 pTimer->uHzHint = uHzHint;
1881
1882 uint32_t const uMaxHzHint = pQueue->uMaxHzHint;
1883 if ( uHzHint > uMaxHzHint
1884 || uHzOldHint >= uMaxHzHint)
1885 ASMAtomicOrU64(&pVM->tm.s.HzHint.u64Combined, RT_BIT_32(idxQueue) | RT_BIT_32(idxQueue + 16));
1886
1887 return VINF_SUCCESS;
1888}
1889
1890
1891/**
1892 * TMTimerStop for the virtual sync timer queue.
1893 *
1894 * This employs a greatly simplified state machine by always acquiring the
1895 * queue lock and bypassing the scheduling list.
1896 *
1897 * @returns VBox status code
1898 * @param pVM The cross context VM structure.
1899 * @param pTimer The timer handle.
1900 */
1901static int tmTimerVirtualSyncStop(PVMCC pVM, PTMTIMER pTimer)
1902{
1903 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerStopVs), a);
1904 VM_ASSERT_EMT(pVM);
1905 TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer);
1906 int rc = PDMCritSectEnter(&pVM->tm.s.VirtualSyncLock, VINF_SUCCESS);
1907 AssertRCReturn(rc, rc);
1908
1909 /* Reset the HZ hint. */
1910 uint32_t uOldHzHint = pTimer->uHzHint;
1911 if (uOldHzHint)
1912 {
1913 if (uOldHzHint >= pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].uMaxHzHint)
1914 ASMAtomicOrU64(&pVM->tm.s.HzHint.u64Combined, RT_BIT_32(TMCLOCK_VIRTUAL_SYNC) | RT_BIT_32(TMCLOCK_VIRTUAL_SYNC + 16));
1915 pTimer->uHzHint = 0;
1916 }
1917
1918 /* Update the timer state. */
1919 TMTIMERSTATE const enmState = pTimer->enmState;
1920 switch (enmState)
1921 {
1922 case TMTIMERSTATE_ACTIVE:
1923 {
1924 PTMTIMERQUEUE const pQueue = &pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC];
1925 tmTimerQueueUnlinkActive(pVM, TM_GET_TIMER_QUEUE_CC(pVM, TMCLOCK_VIRTUAL_SYNC, pQueue), pQueue, pTimer);
1926 TM_SET_STATE(pTimer, TMTIMERSTATE_STOPPED);
1927 rc = VINF_SUCCESS;
1928 break;
1929 }
1930
1931 case TMTIMERSTATE_EXPIRED_DELIVER:
1932 TM_SET_STATE(pTimer, TMTIMERSTATE_STOPPED);
1933 rc = VINF_SUCCESS;
1934 break;
1935
1936 case TMTIMERSTATE_STOPPED:
1937 rc = VINF_SUCCESS;
1938 break;
1939
1940 case TMTIMERSTATE_PENDING_RESCHEDULE:
1941 case TMTIMERSTATE_PENDING_STOP:
1942 case TMTIMERSTATE_PENDING_SCHEDULE:
1943 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1944 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1945 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1946 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1947 case TMTIMERSTATE_DESTROY:
1948 case TMTIMERSTATE_FREE:
1949 AssertLogRelMsgFailed(("Invalid timer state %s: %s\n", tmTimerState(enmState), pTimer->szName));
1950 rc = VERR_TM_INVALID_STATE;
1951 break;
1952
1953 default:
1954 AssertMsgFailed(("Unknown timer state %d: %s\n", enmState, pTimer->szName));
1955 rc = VERR_TM_UNKNOWN_STATE;
1956 break;
1957 }
1958
1959 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStopVs), a);
1960 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
1961 return rc;
1962}
1963
1964
1965/**
1966 * Stop the timer.
1967 * Use TMR3TimerArm() to "un-stop" the timer.
1968 *
1969 * @returns VBox status code.
1970 * @param pVM The cross context VM structure.
1971 * @param hTimer Timer handle as returned by one of the create functions.
1972 */
1973VMMDECL(int) TMTimerStop(PVMCC pVM, TMTIMERHANDLE hTimer)
1974{
1975 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
1976 STAM_COUNTER_INC(&pTimer->StatStop);
1977
1978 /* Treat virtual sync timers specially. */
1979 if (idxQueue == TMCLOCK_VIRTUAL_SYNC)
1980 return tmTimerVirtualSyncStop(pVM, pTimer);
1981
1982 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1983 TMTIMER_ASSERT_CRITSECT(pVM, pTimer);
1984
1985 /*
1986 * Reset the HZ hint.
1987 */
1988 uint32_t const uOldHzHint = pTimer->uHzHint;
1989 if (uOldHzHint)
1990 {
1991 if (uOldHzHint >= pQueue->uMaxHzHint)
1992 ASMAtomicOrU64(&pVM->tm.s.HzHint.u64Combined, RT_BIT_32(idxQueue) | RT_BIT_32(idxQueue + 16));
1993 pTimer->uHzHint = 0;
1994 }
1995
1996 /** @todo see if this function needs optimizing. */
1997 int cRetries = 1000;
1998 do
1999 {
2000 /*
2001 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
2002 */
2003 TMTIMERSTATE enmState = pTimer->enmState;
2004 Log2(("TMTimerStop: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d\n",
2005 pTimer, tmTimerState(enmState), pTimer->szName, cRetries));
2006 switch (enmState)
2007 {
2008 case TMTIMERSTATE_EXPIRED_DELIVER:
2009 //AssertMsgFailed(("You don't stop an expired timer dude!\n"));
2010 return VERR_INVALID_PARAMETER;
2011
2012 case TMTIMERSTATE_STOPPED:
2013 case TMTIMERSTATE_PENDING_STOP:
2014 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2015 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
2016 return VINF_SUCCESS;
2017
2018 case TMTIMERSTATE_PENDING_SCHEDULE:
2019 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, enmState))
2020 {
2021 tmSchedule(pVM, pTimer);
2022 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
2023 return VINF_SUCCESS;
2024 }
2025 break;
2026
2027 case TMTIMERSTATE_PENDING_RESCHEDULE:
2028 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
2029 {
2030 tmSchedule(pVM, pTimer);
2031 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
2032 return VINF_SUCCESS;
2033 }
2034 break;
2035
2036 case TMTIMERSTATE_ACTIVE:
2037 if (tmTimerTryWithLink(pVM, pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
2038 {
2039 tmSchedule(pVM, pTimer);
2040 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
2041 return VINF_SUCCESS;
2042 }
2043 break;
2044
2045 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2046 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2047 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2048#ifdef IN_RING3
2049 if (!RTThreadYield())
2050 RTThreadSleep(1);
2051#else
2052/** @todo call host and yield cpu after a while. */
2053#endif
2054 break;
2055
2056 /*
2057 * Invalid states.
2058 */
2059 case TMTIMERSTATE_DESTROY:
2060 case TMTIMERSTATE_FREE:
2061 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, pTimer->szName));
2062 return VERR_TM_INVALID_STATE;
2063 default:
2064 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, pTimer->szName));
2065 return VERR_TM_UNKNOWN_STATE;
2066 }
2067 } while (cRetries-- > 0);
2068
2069 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, pTimer->szName));
2070 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
2071 return VERR_TM_TIMER_UNSTABLE_STATE;
2072}
2073
2074
2075/**
2076 * Get the current clock time.
2077 * Handy for calculating the new expire time.
2078 *
2079 * @returns Current clock time.
2080 * @param pVM The cross context VM structure.
2081 * @param hTimer Timer handle as returned by one of the create functions.
2082 */
2083VMMDECL(uint64_t) TMTimerGet(PVMCC pVM, TMTIMERHANDLE hTimer)
2084{
2085 PTMTIMER pTimer;
2086 TMTIMER_HANDLE_TO_PTR_RETURN_EX(pVM, hTimer, UINT64_MAX, pTimer);
2087 STAM_COUNTER_INC(&pTimer->StatGet);
2088
2089 uint64_t u64;
2090 switch (pTimer->enmClock)
2091 {
2092 case TMCLOCK_VIRTUAL:
2093 u64 = TMVirtualGet(pVM);
2094 break;
2095 case TMCLOCK_VIRTUAL_SYNC:
2096 u64 = TMVirtualSyncGet(pVM);
2097 break;
2098 case TMCLOCK_REAL:
2099 u64 = TMRealGet(pVM);
2100 break;
2101 default:
2102 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2103 return UINT64_MAX;
2104 }
2105 //Log2(("TMTimerGet: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2106 // u64, pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2107 return u64;
2108}
2109
2110
2111/**
2112 * Get the frequency of the timer clock.
2113 *
2114 * @returns Clock frequency (as Hz of course).
2115 * @param pVM The cross context VM structure.
2116 * @param hTimer Timer handle as returned by one of the create functions.
2117 */
2118VMMDECL(uint64_t) TMTimerGetFreq(PVMCC pVM, TMTIMERHANDLE hTimer)
2119{
2120 PTMTIMER pTimer;
2121 TMTIMER_HANDLE_TO_PTR_RETURN_EX(pVM, hTimer, 0, pTimer);
2122 switch (pTimer->enmClock)
2123 {
2124 case TMCLOCK_VIRTUAL:
2125 case TMCLOCK_VIRTUAL_SYNC:
2126 return TMCLOCK_FREQ_VIRTUAL;
2127
2128 case TMCLOCK_REAL:
2129 return TMCLOCK_FREQ_REAL;
2130
2131 default:
2132 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2133 return 0;
2134 }
2135}
2136
2137
2138/**
2139 * Get the expire time of the timer.
2140 * Only valid for active timers.
2141 *
2142 * @returns Expire time of the timer.
2143 * @param pVM The cross context VM structure.
2144 * @param hTimer Timer handle as returned by one of the create functions.
2145 */
2146VMMDECL(uint64_t) TMTimerGetExpire(PVMCC pVM, TMTIMERHANDLE hTimer)
2147{
2148 PTMTIMER pTimer;
2149 TMTIMER_HANDLE_TO_PTR_RETURN_EX(pVM, hTimer, UINT64_MAX, pTimer);
2150 TMTIMER_ASSERT_CRITSECT(pVM, pTimer);
2151 int cRetries = 1000;
2152 do
2153 {
2154 TMTIMERSTATE enmState = pTimer->enmState;
2155 switch (enmState)
2156 {
2157 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2158 case TMTIMERSTATE_EXPIRED_DELIVER:
2159 case TMTIMERSTATE_STOPPED:
2160 case TMTIMERSTATE_PENDING_STOP:
2161 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2162 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2163 pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2164 return UINT64_MAX;
2165
2166 case TMTIMERSTATE_ACTIVE:
2167 case TMTIMERSTATE_PENDING_RESCHEDULE:
2168 case TMTIMERSTATE_PENDING_SCHEDULE:
2169 Log2(("TMTimerGetExpire: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2170 pTimer->u64Expire, pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2171 return pTimer->u64Expire;
2172
2173 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2174 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2175#ifdef IN_RING3
2176 if (!RTThreadYield())
2177 RTThreadSleep(1);
2178#endif
2179 break;
2180
2181 /*
2182 * Invalid states.
2183 */
2184 case TMTIMERSTATE_DESTROY:
2185 case TMTIMERSTATE_FREE:
2186 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, pTimer->szName));
2187 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2188 pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2189 return UINT64_MAX;
2190 default:
2191 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, pTimer->szName));
2192 return UINT64_MAX;
2193 }
2194 } while (cRetries-- > 0);
2195
2196 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, pTimer->szName));
2197 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2198 pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2199 return UINT64_MAX;
2200}
2201
2202
2203/**
2204 * Checks if a timer is active or not.
2205 *
2206 * @returns True if active.
2207 * @returns False if not active.
2208 * @param pVM The cross context VM structure.
2209 * @param hTimer Timer handle as returned by one of the create functions.
2210 */
2211VMMDECL(bool) TMTimerIsActive(PVMCC pVM, TMTIMERHANDLE hTimer)
2212{
2213 PTMTIMER pTimer;
2214 TMTIMER_HANDLE_TO_PTR_RETURN_EX(pVM, hTimer, false, pTimer);
2215 TMTIMERSTATE enmState = pTimer->enmState;
2216 switch (enmState)
2217 {
2218 case TMTIMERSTATE_STOPPED:
2219 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2220 case TMTIMERSTATE_EXPIRED_DELIVER:
2221 case TMTIMERSTATE_PENDING_STOP:
2222 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2223 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2224 pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2225 return false;
2226
2227 case TMTIMERSTATE_ACTIVE:
2228 case TMTIMERSTATE_PENDING_RESCHEDULE:
2229 case TMTIMERSTATE_PENDING_SCHEDULE:
2230 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2231 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2232 Log2(("TMTimerIsActive: returns true (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2233 pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2234 return true;
2235
2236 /*
2237 * Invalid states.
2238 */
2239 case TMTIMERSTATE_DESTROY:
2240 case TMTIMERSTATE_FREE:
2241 AssertMsgFailed(("Invalid timer state %s (%s)\n", tmTimerState(enmState), pTimer->szName));
2242 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2243 pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2244 return false;
2245 default:
2246 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, pTimer->szName));
2247 return false;
2248 }
2249}
2250
2251
2252/* -=-=-=-=-=-=- Convenience APIs -=-=-=-=-=-=- */
2253
2254
2255/**
2256 * Arm a timer with a (new) expire time relative to current time.
2257 *
2258 * @returns VBox status code.
2259 * @param pVM The cross context VM structure.
2260 * @param hTimer Timer handle as returned by one of the create functions.
2261 * @param cMilliesToNext Number of milliseconds to the next tick.
2262 */
2263VMMDECL(int) TMTimerSetMillies(PVMCC pVM, TMTIMERHANDLE hTimer, uint32_t cMilliesToNext)
2264{
2265 PTMTIMER pTimer;
2266 TMTIMER_HANDLE_TO_PTR_RETURN(pVM, hTimer, pTimer);
2267 switch (pTimer->enmClock)
2268 {
2269 case TMCLOCK_VIRTUAL:
2270 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2271 return tmTimerSetRelative(pVM, pTimer, cMilliesToNext * UINT64_C(1000000), NULL);
2272
2273 case TMCLOCK_VIRTUAL_SYNC:
2274 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2275 return tmTimerSetRelative(pVM, pTimer, cMilliesToNext * UINT64_C(1000000), NULL);
2276
2277 case TMCLOCK_REAL:
2278 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2279 return tmTimerSetRelative(pVM, pTimer, cMilliesToNext, NULL);
2280
2281 default:
2282 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2283 return VERR_TM_TIMER_BAD_CLOCK;
2284 }
2285}
2286
2287
2288/**
2289 * Arm a timer with a (new) expire time relative to current time.
2290 *
2291 * @returns VBox status code.
2292 * @param pVM The cross context VM structure.
2293 * @param hTimer Timer handle as returned by one of the create functions.
2294 * @param cMicrosToNext Number of microseconds to the next tick.
2295 */
2296VMMDECL(int) TMTimerSetMicro(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cMicrosToNext)
2297{
2298 PTMTIMER pTimer;
2299 TMTIMER_HANDLE_TO_PTR_RETURN(pVM, hTimer, pTimer);
2300 switch (pTimer->enmClock)
2301 {
2302 case TMCLOCK_VIRTUAL:
2303 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2304 return tmTimerSetRelative(pVM, pTimer, cMicrosToNext * 1000, NULL);
2305
2306 case TMCLOCK_VIRTUAL_SYNC:
2307 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2308 return tmTimerSetRelative(pVM, pTimer, cMicrosToNext * 1000, NULL);
2309
2310 case TMCLOCK_REAL:
2311 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2312 return tmTimerSetRelative(pVM, pTimer, cMicrosToNext / 1000, NULL);
2313
2314 default:
2315 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2316 return VERR_TM_TIMER_BAD_CLOCK;
2317 }
2318}
2319
2320
2321/**
2322 * Arm a timer with a (new) expire time relative to current time.
2323 *
2324 * @returns VBox status code.
2325 * @param pVM The cross context VM structure.
2326 * @param hTimer Timer handle as returned by one of the create functions.
2327 * @param cNanosToNext Number of nanoseconds to the next tick.
2328 */
2329VMMDECL(int) TMTimerSetNano(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cNanosToNext)
2330{
2331 PTMTIMER pTimer;
2332 TMTIMER_HANDLE_TO_PTR_RETURN(pVM, hTimer, pTimer);
2333 switch (pTimer->enmClock)
2334 {
2335 case TMCLOCK_VIRTUAL:
2336 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2337 return tmTimerSetRelative(pVM, pTimer, cNanosToNext, NULL);
2338
2339 case TMCLOCK_VIRTUAL_SYNC:
2340 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2341 return tmTimerSetRelative(pVM, pTimer, cNanosToNext, NULL);
2342
2343 case TMCLOCK_REAL:
2344 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2345 return tmTimerSetRelative(pVM, pTimer, cNanosToNext / 1000000, NULL);
2346
2347 default:
2348 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2349 return VERR_TM_TIMER_BAD_CLOCK;
2350 }
2351}
2352
2353
2354/**
2355 * Get the current clock time as nanoseconds.
2356 *
2357 * @returns The timer clock as nanoseconds.
2358 * @param pVM The cross context VM structure.
2359 * @param hTimer Timer handle as returned by one of the create functions.
2360 */
2361VMMDECL(uint64_t) TMTimerGetNano(PVMCC pVM, TMTIMERHANDLE hTimer)
2362{
2363 return TMTimerToNano(pVM, hTimer, TMTimerGet(pVM, hTimer));
2364}
2365
2366
2367/**
2368 * Get the current clock time as microseconds.
2369 *
2370 * @returns The timer clock as microseconds.
2371 * @param pVM The cross context VM structure.
2372 * @param hTimer Timer handle as returned by one of the create functions.
2373 */
2374VMMDECL(uint64_t) TMTimerGetMicro(PVMCC pVM, TMTIMERHANDLE hTimer)
2375{
2376 return TMTimerToMicro(pVM, hTimer, TMTimerGet(pVM, hTimer));
2377}
2378
2379
2380/**
2381 * Get the current clock time as milliseconds.
2382 *
2383 * @returns The timer clock as milliseconds.
2384 * @param pVM The cross context VM structure.
2385 * @param hTimer Timer handle as returned by one of the create functions.
2386 */
2387VMMDECL(uint64_t) TMTimerGetMilli(PVMCC pVM, TMTIMERHANDLE hTimer)
2388{
2389 return TMTimerToMilli(pVM, hTimer, TMTimerGet(pVM, hTimer));
2390}
2391
2392
2393/**
2394 * Converts the specified timer clock time to nanoseconds.
2395 *
2396 * @returns nanoseconds.
2397 * @param pVM The cross context VM structure.
2398 * @param hTimer Timer handle as returned by one of the create functions.
2399 * @param cTicks The clock ticks.
2400 * @remark There could be rounding errors here. We just do a simple integer divide
2401 * without any adjustments.
2402 */
2403VMMDECL(uint64_t) TMTimerToNano(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cTicks)
2404{
2405 PTMTIMER pTimer;
2406 TMTIMER_HANDLE_TO_PTR_RETURN_EX(pVM, hTimer, 0, pTimer);
2407 switch (pTimer->enmClock)
2408 {
2409 case TMCLOCK_VIRTUAL:
2410 case TMCLOCK_VIRTUAL_SYNC:
2411 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2412 return cTicks;
2413
2414 case TMCLOCK_REAL:
2415 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2416 return cTicks * 1000000;
2417
2418 default:
2419 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2420 return 0;
2421 }
2422}
2423
2424
2425/**
2426 * Converts the specified timer clock time to microseconds.
2427 *
2428 * @returns microseconds.
2429 * @param pVM The cross context VM structure.
2430 * @param hTimer Timer handle as returned by one of the create functions.
2431 * @param cTicks The clock ticks.
2432 * @remark There could be rounding errors here. We just do a simple integer divide
2433 * without any adjustments.
2434 */
2435VMMDECL(uint64_t) TMTimerToMicro(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cTicks)
2436{
2437 PTMTIMER pTimer;
2438 TMTIMER_HANDLE_TO_PTR_RETURN_EX(pVM, hTimer, 0, pTimer);
2439 switch (pTimer->enmClock)
2440 {
2441 case TMCLOCK_VIRTUAL:
2442 case TMCLOCK_VIRTUAL_SYNC:
2443 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2444 return cTicks / 1000;
2445
2446 case TMCLOCK_REAL:
2447 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2448 return cTicks * 1000;
2449
2450 default:
2451 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2452 return 0;
2453 }
2454}
2455
2456
2457/**
2458 * Converts the specified timer clock time to milliseconds.
2459 *
2460 * @returns milliseconds.
2461 * @param pVM The cross context VM structure.
2462 * @param hTimer Timer handle as returned by one of the create functions.
2463 * @param cTicks The clock ticks.
2464 * @remark There could be rounding errors here. We just do a simple integer divide
2465 * without any adjustments.
2466 */
2467VMMDECL(uint64_t) TMTimerToMilli(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cTicks)
2468{
2469 PTMTIMER pTimer;
2470 TMTIMER_HANDLE_TO_PTR_RETURN_EX(pVM, hTimer, 0, pTimer);
2471 switch (pTimer->enmClock)
2472 {
2473 case TMCLOCK_VIRTUAL:
2474 case TMCLOCK_VIRTUAL_SYNC:
2475 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2476 return cTicks / 1000000;
2477
2478 case TMCLOCK_REAL:
2479 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2480 return cTicks;
2481
2482 default:
2483 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2484 return 0;
2485 }
2486}
2487
2488
2489/**
2490 * Converts the specified nanosecond timestamp to timer clock ticks.
2491 *
2492 * @returns timer clock ticks.
2493 * @param pVM The cross context VM structure.
2494 * @param hTimer Timer handle as returned by one of the create functions.
2495 * @param cNanoSecs The nanosecond value ticks to convert.
2496 * @remark There could be rounding and overflow errors here.
2497 */
2498VMMDECL(uint64_t) TMTimerFromNano(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cNanoSecs)
2499{
2500 PTMTIMER pTimer;
2501 TMTIMER_HANDLE_TO_PTR_RETURN_EX(pVM, hTimer, 0, pTimer);
2502 switch (pTimer->enmClock)
2503 {
2504 case TMCLOCK_VIRTUAL:
2505 case TMCLOCK_VIRTUAL_SYNC:
2506 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2507 return cNanoSecs;
2508
2509 case TMCLOCK_REAL:
2510 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2511 return cNanoSecs / 1000000;
2512
2513 default:
2514 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2515 return 0;
2516 }
2517}
2518
2519
2520/**
2521 * Converts the specified microsecond timestamp to timer clock ticks.
2522 *
2523 * @returns timer clock ticks.
2524 * @param pVM The cross context VM structure.
2525 * @param hTimer Timer handle as returned by one of the create functions.
2526 * @param cMicroSecs The microsecond value ticks to convert.
2527 * @remark There could be rounding and overflow errors here.
2528 */
2529VMMDECL(uint64_t) TMTimerFromMicro(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cMicroSecs)
2530{
2531 PTMTIMER pTimer;
2532 TMTIMER_HANDLE_TO_PTR_RETURN_EX(pVM, hTimer, 0, pTimer);
2533 switch (pTimer->enmClock)
2534 {
2535 case TMCLOCK_VIRTUAL:
2536 case TMCLOCK_VIRTUAL_SYNC:
2537 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2538 return cMicroSecs * 1000;
2539
2540 case TMCLOCK_REAL:
2541 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2542 return cMicroSecs / 1000;
2543
2544 default:
2545 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2546 return 0;
2547 }
2548}
2549
2550
2551/**
2552 * Converts the specified millisecond timestamp to timer clock ticks.
2553 *
2554 * @returns timer clock ticks.
2555 * @param pVM The cross context VM structure.
2556 * @param hTimer Timer handle as returned by one of the create functions.
2557 * @param cMilliSecs The millisecond value ticks to convert.
2558 * @remark There could be rounding and overflow errors here.
2559 */
2560VMMDECL(uint64_t) TMTimerFromMilli(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cMilliSecs)
2561{
2562 PTMTIMER pTimer;
2563 TMTIMER_HANDLE_TO_PTR_RETURN_EX(pVM, hTimer, 0, pTimer);
2564 switch (pTimer->enmClock)
2565 {
2566 case TMCLOCK_VIRTUAL:
2567 case TMCLOCK_VIRTUAL_SYNC:
2568 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2569 return cMilliSecs * 1000000;
2570
2571 case TMCLOCK_REAL:
2572 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2573 return cMilliSecs;
2574
2575 default:
2576 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2577 return 0;
2578 }
2579}
2580
2581
2582/**
2583 * Convert state to string.
2584 *
2585 * @returns Readonly status name.
2586 * @param enmState State.
2587 */
2588const char *tmTimerState(TMTIMERSTATE enmState)
2589{
2590 switch (enmState)
2591 {
2592#define CASE(num, state) \
2593 case TMTIMERSTATE_##state: \
2594 AssertCompile(TMTIMERSTATE_##state == (num)); \
2595 return #num "-" #state
2596 CASE( 0,INVALID);
2597 CASE( 1,STOPPED);
2598 CASE( 2,ACTIVE);
2599 CASE( 3,EXPIRED_GET_UNLINK);
2600 CASE( 4,EXPIRED_DELIVER);
2601 CASE( 5,PENDING_STOP);
2602 CASE( 6,PENDING_STOP_SCHEDULE);
2603 CASE( 7,PENDING_SCHEDULE_SET_EXPIRE);
2604 CASE( 8,PENDING_SCHEDULE);
2605 CASE( 9,PENDING_RESCHEDULE_SET_EXPIRE);
2606 CASE(10,PENDING_RESCHEDULE);
2607 CASE(11,DESTROY);
2608 CASE(12,FREE);
2609 default:
2610 AssertMsgFailed(("Invalid state enmState=%d\n", enmState));
2611 return "Invalid state!";
2612#undef CASE
2613 }
2614}
2615
2616
2617/**
2618 * The slow path of tmGetFrequencyHint() where we try to recalculate the value.
2619 *
2620 * @returns The highest frequency. 0 if no timers care.
2621 * @param pVM The cross context VM structure.
2622 * @param uOldMaxHzHint The old global hint.
2623 */
2624DECL_NO_INLINE(static, uint32_t) tmGetFrequencyHintSlow(PVMCC pVM, uint32_t uOldMaxHzHint)
2625{
2626 /* Set two bits, though not entirely sure it's needed (too exhaused to think clearly)
2627 but it should force other callers thru the slow path while we're recalculating and
2628 help us detect changes while we're recalculating. */
2629 AssertCompile(RT_ELEMENTS(pVM->tm.s.aTimerQueues) <= 16);
2630
2631 /*
2632 * The "right" highest frequency value isn't so important that we'll block
2633 * waiting on the timer semaphores.
2634 */
2635 uint32_t uMaxHzHint = 0;
2636 for (uint32_t idxQueue = 0; idxQueue < RT_ELEMENTS(pVM->tm.s.aTimerQueues); idxQueue++)
2637 {
2638 PTMTIMERQUEUE pQueue = &pVM->tm.s.aTimerQueues[idxQueue];
2639
2640 /* Get the max Hz hint for the queue. */
2641 uint32_t uMaxHzHintQueue;
2642 if ( !(ASMAtomicUoReadU64(&pVM->tm.s.HzHint.u64Combined) & (RT_BIT_32(idxQueue) | RT_BIT_32(idxQueue + 16)))
2643 || RT_FAILURE_NP(PDMCritSectTryEnter(&pQueue->TimerLock)))
2644 uMaxHzHintQueue = ASMAtomicReadU32(&pQueue->uMaxHzHint);
2645 else
2646 {
2647 /* Is it still necessary to do updating? */
2648 if (ASMAtomicUoReadU64(&pVM->tm.s.HzHint.u64Combined) & (RT_BIT_32(idxQueue) | RT_BIT_32(idxQueue + 16)))
2649 {
2650 ASMAtomicAndU64(&pVM->tm.s.HzHint.u64Combined, ~RT_BIT_64(idxQueue + 16)); /* clear one flag up front */
2651
2652 PTMTIMERQUEUECC pQueueCC = TM_GET_TIMER_QUEUE_CC(pVM, idxQueue, pQueue);
2653 uMaxHzHintQueue = 0;
2654 for (PTMTIMER pCur = tmTimerQueueGetHead(pQueueCC, pQueue);
2655 pCur;
2656 pCur = tmTimerGetNext(pQueueCC, pCur))
2657 {
2658 uint32_t uHzHint = ASMAtomicUoReadU32(&pCur->uHzHint);
2659 if (uHzHint > uMaxHzHintQueue)
2660 {
2661 TMTIMERSTATE enmState = pCur->enmState;
2662 switch (enmState)
2663 {
2664 case TMTIMERSTATE_ACTIVE:
2665 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2666 case TMTIMERSTATE_EXPIRED_DELIVER:
2667 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2668 case TMTIMERSTATE_PENDING_SCHEDULE:
2669 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2670 case TMTIMERSTATE_PENDING_RESCHEDULE:
2671 uMaxHzHintQueue = uHzHint;
2672 break;
2673
2674 case TMTIMERSTATE_STOPPED:
2675 case TMTIMERSTATE_PENDING_STOP:
2676 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2677 case TMTIMERSTATE_DESTROY:
2678 case TMTIMERSTATE_FREE:
2679 case TMTIMERSTATE_INVALID:
2680 break;
2681 /* no default, want gcc warnings when adding more states. */
2682 }
2683 }
2684 }
2685
2686 /* Write the new Hz hint for the quest and clear the other update flag. */
2687 ASMAtomicUoWriteU32(&pQueue->uMaxHzHint, uMaxHzHintQueue);
2688 ASMAtomicAndU64(&pVM->tm.s.HzHint.u64Combined, ~RT_BIT_64(idxQueue));
2689 }
2690 else
2691 uMaxHzHintQueue = ASMAtomicUoReadU32(&pQueue->uMaxHzHint);
2692
2693 PDMCritSectLeave(&pQueue->TimerLock);
2694 }
2695
2696 /* Update the global max Hz hint. */
2697 if (uMaxHzHint < uMaxHzHintQueue)
2698 uMaxHzHint = uMaxHzHintQueue;
2699 }
2700
2701 /*
2702 * Update the frequency hint if no pending frequency changes and we didn't race anyone thru here.
2703 */
2704 uint64_t u64Actual = RT_MAKE_U64(0 /*no pending updates*/, uOldMaxHzHint);
2705 if (ASMAtomicCmpXchgExU64(&pVM->tm.s.HzHint.u64Combined, RT_MAKE_U64(0, uMaxHzHint), u64Actual, &u64Actual))
2706 Log(("tmGetFrequencyHintSlow: New value %u Hz\n", uMaxHzHint));
2707 else
2708 for (uint32_t iTry = 1;; iTry++)
2709 {
2710 if (RT_LO_U32(u64Actual) != 0)
2711 Log(("tmGetFrequencyHintSlow: Outdated value %u Hz (%#x, try %u)\n", uMaxHzHint, RT_LO_U32(u64Actual), iTry));
2712 else if (iTry >= 4)
2713 Log(("tmGetFrequencyHintSlow: Unable to set %u Hz (try %u)\n", uMaxHzHint, iTry));
2714 else if (ASMAtomicCmpXchgExU64(&pVM->tm.s.HzHint.u64Combined, RT_MAKE_U64(0, uMaxHzHint), u64Actual, &u64Actual))
2715 Log(("tmGetFrequencyHintSlow: New value %u Hz (try %u)\n", uMaxHzHint, iTry));
2716 else
2717 continue;
2718 break;
2719 }
2720 return uMaxHzHint;
2721}
2722
2723
2724/**
2725 * Gets the highest frequency hint for all the important timers.
2726 *
2727 * @returns The highest frequency. 0 if no timers care.
2728 * @param pVM The cross context VM structure.
2729 */
2730DECLINLINE(uint32_t) tmGetFrequencyHint(PVMCC pVM)
2731{
2732 /*
2733 * Query the value, recalculate it if necessary.
2734 */
2735 uint64_t u64Combined = ASMAtomicReadU64(&pVM->tm.s.HzHint.u64Combined);
2736 if (RT_HI_U32(u64Combined) == 0)
2737 return RT_LO_U32(u64Combined); /* hopefully somewhat likely */
2738 return tmGetFrequencyHintSlow(pVM, RT_LO_U32(u64Combined));
2739}
2740
2741
2742/**
2743 * Calculates a host timer frequency that would be suitable for the current
2744 * timer load.
2745 *
2746 * This will take the highest timer frequency, adjust for catch-up and warp
2747 * driver, and finally add a little fudge factor. The caller (VMM) will use
2748 * the result to adjust the per-cpu preemption timer.
2749 *
2750 * @returns The highest frequency. 0 if no important timers around.
2751 * @param pVM The cross context VM structure.
2752 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2753 */
2754VMM_INT_DECL(uint32_t) TMCalcHostTimerFrequency(PVMCC pVM, PVMCPUCC pVCpu)
2755{
2756 uint32_t uHz = tmGetFrequencyHint(pVM);
2757
2758 /* Catch up, we have to be more aggressive than the % indicates at the
2759 beginning of the effort. */
2760 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
2761 {
2762 uint32_t u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
2763 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
2764 {
2765 if (u32Pct <= 100)
2766 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp100 / 100;
2767 else if (u32Pct <= 200)
2768 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp200 / 100;
2769 else if (u32Pct <= 400)
2770 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp400 / 100;
2771 uHz *= u32Pct + 100;
2772 uHz /= 100;
2773 }
2774 }
2775
2776 /* Warp drive. */
2777 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualWarpDrive))
2778 {
2779 uint32_t u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualWarpDrivePercentage);
2780 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualWarpDrive))
2781 {
2782 uHz *= u32Pct;
2783 uHz /= 100;
2784 }
2785 }
2786
2787 /* Fudge factor. */
2788 if (pVCpu->idCpu == pVM->tm.s.idTimerCpu)
2789 uHz *= pVM->tm.s.cPctHostHzFudgeFactorTimerCpu;
2790 else
2791 uHz *= pVM->tm.s.cPctHostHzFudgeFactorOtherCpu;
2792 uHz /= 100;
2793
2794 /* Make sure it isn't too high. */
2795 if (uHz > pVM->tm.s.cHostHzMax)
2796 uHz = pVM->tm.s.cHostHzMax;
2797
2798 return uHz;
2799}
2800
2801
2802/**
2803 * Whether the guest virtual clock is ticking.
2804 *
2805 * @returns true if ticking, false otherwise.
2806 * @param pVM The cross context VM structure.
2807 */
2808VMM_INT_DECL(bool) TMVirtualIsTicking(PVM pVM)
2809{
2810 return RT_BOOL(pVM->tm.s.cVirtualTicking);
2811}
2812
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette