VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/TMAll.cpp@ 105698

Last change on this file since 105698 was 105698, checked in by vboxsync, 4 months ago

VMM/IEM,TM: Adaptive timer polling and running of the timer queues from the IEM recompiler execution loop. bugref:10656

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 109.3 KB
Line 
1/* $Id: TMAll.cpp 105698 2024-08-15 23:33:49Z vboxsync $ */
2/** @file
3 * TM - Timeout Manager, all contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_TM
33#ifdef DEBUG_bird
34# define DBGFTRACE_DISABLED /* annoying */
35#endif
36#include <VBox/vmm/tm.h>
37#include <VBox/vmm/mm.h>
38#include <VBox/vmm/dbgftrace.h>
39#ifdef IN_RING3
40#endif
41#include <VBox/vmm/pdmdev.h> /* (for TMTIMER_GET_CRITSECT implementation) */
42#include "TMInternal.h"
43#include <VBox/vmm/vmcc.h>
44
45#include <VBox/param.h>
46#include <VBox/err.h>
47#include <VBox/log.h>
48#include <VBox/sup.h>
49#include <iprt/time.h>
50#include <iprt/assert.h>
51#include <iprt/asm.h>
52#include <iprt/asm-math.h>
53#include <iprt/string.h>
54#ifdef IN_RING3
55# include <iprt/thread.h>
56#endif
57
58#include "TMInline.h"
59
60
61/*********************************************************************************************************************************
62* Defined Constants And Macros *
63*********************************************************************************************************************************/
64#ifdef VBOX_STRICT
65/** @def TMTIMER_GET_CRITSECT
66 * Helper for safely resolving the critical section for a timer belonging to a
67 * device instance.
68 * @todo needs reworking later as it uses PDMDEVINSR0::pDevInsR0RemoveMe. */
69# ifdef IN_RING3
70# define TMTIMER_GET_CRITSECT(a_pVM, a_pTimer) ((a_pTimer)->pCritSect)
71# else
72# define TMTIMER_GET_CRITSECT(a_pVM, a_pTimer) tmRZTimerGetCritSect(a_pVM, a_pTimer)
73# endif
74#endif
75
76/** @def TMTIMER_ASSERT_CRITSECT
77 * Checks that the caller owns the critical section if one is associated with
78 * the timer. */
79#ifdef VBOX_STRICT
80# define TMTIMER_ASSERT_CRITSECT(a_pVM, a_pTimer) \
81 do { \
82 if ((a_pTimer)->pCritSect) \
83 { \
84 VMSTATE enmState; \
85 PPDMCRITSECT pCritSect = TMTIMER_GET_CRITSECT(a_pVM, a_pTimer); \
86 AssertMsg( pCritSect \
87 && ( PDMCritSectIsOwner((a_pVM), pCritSect) \
88 || (enmState = (a_pVM)->enmVMState) == VMSTATE_CREATING \
89 || enmState == VMSTATE_RESETTING \
90 || enmState == VMSTATE_RESETTING_LS ),\
91 ("pTimer=%p (%s) pCritSect=%p (%s)\n", a_pTimer, (a_pTimer)->szName, \
92 (a_pTimer)->pCritSect, R3STRING(PDMR3CritSectName((a_pTimer)->pCritSect)) )); \
93 } \
94 } while (0)
95#else
96# define TMTIMER_ASSERT_CRITSECT(pVM, pTimer) do { } while (0)
97#endif
98
99/** @def TMTIMER_ASSERT_SYNC_CRITSECT_ORDER
100 * Checks for lock order trouble between the timer critsect and the critical
101 * section critsect. The virtual sync critsect must always be entered before
102 * the one associated with the timer (see TMR3TimerQueuesDo). It is OK if there
103 * isn't any critical section associated with the timer or if the calling thread
104 * doesn't own it, ASSUMING of course that the thread using this macro is going
105 * to enter the virtual sync critical section anyway.
106 *
107 * @remarks This is a sligtly relaxed timer locking attitude compared to
108 * TMTIMER_ASSERT_CRITSECT, however, the calling device/whatever code
109 * should know what it's doing if it's stopping or starting a timer
110 * without taking the device lock.
111 */
112#ifdef VBOX_STRICT
113# define TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer) \
114 do { \
115 if ((pTimer)->pCritSect) \
116 { \
117 VMSTATE enmState; \
118 PPDMCRITSECT pCritSect = TMTIMER_GET_CRITSECT(pVM, pTimer); \
119 AssertMsg( pCritSect \
120 && ( !PDMCritSectIsOwner((pVM), pCritSect) \
121 || PDMCritSectIsOwner((pVM), &(pVM)->tm.s.VirtualSyncLock) \
122 || (enmState = (pVM)->enmVMState) == VMSTATE_CREATING \
123 || enmState == VMSTATE_RESETTING \
124 || enmState == VMSTATE_RESETTING_LS ),\
125 ("pTimer=%p (%s) pCritSect=%p (%s)\n", pTimer, pTimer->szName, \
126 (pTimer)->pCritSect, R3STRING(PDMR3CritSectName((pTimer)->pCritSect)) )); \
127 } \
128 } while (0)
129#else
130# define TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer) do { } while (0)
131#endif
132
133
134#if defined(VBOX_STRICT) && defined(IN_RING0)
135/**
136 * Helper for TMTIMER_GET_CRITSECT
137 * @todo This needs a redo!
138 */
139DECLINLINE(PPDMCRITSECT) tmRZTimerGetCritSect(PVMCC pVM, PTMTIMER pTimer)
140{
141 if (pTimer->enmType == TMTIMERTYPE_DEV)
142 {
143 RTCCUINTREG fSavedFlags = ASMAddFlags(X86_EFL_AC); /** @todo fix ring-3 pointer use */
144 PPDMDEVINSR0 pDevInsR0 = ((struct PDMDEVINSR3 *)pTimer->u.Dev.pDevIns)->pDevInsR0RemoveMe; /* !ring-3 read! */
145 ASMSetFlags(fSavedFlags);
146 struct PDMDEVINSR3 *pDevInsR3 = pDevInsR0->pDevInsForR3R0;
147 if (pTimer->pCritSect == pDevInsR3->pCritSectRoR3)
148 return pDevInsR0->pCritSectRoR0;
149 uintptr_t offCritSect = (uintptr_t)pTimer->pCritSect - (uintptr_t)pDevInsR3->pvInstanceDataR3;
150 if (offCritSect < pDevInsR0->pReg->cbInstanceShared)
151 return (PPDMCRITSECT)((uintptr_t)pDevInsR0->pvInstanceDataR0 + offCritSect);
152 }
153 RT_NOREF(pVM);
154 Assert(pTimer->pCritSect == NULL);
155 return NULL;
156}
157#endif /* VBOX_STRICT && IN_RING0 */
158
159
160/**
161 * Notification that execution is about to start.
162 *
163 * This call must always be paired with a TMNotifyEndOfExecution call.
164 *
165 * The function may, depending on the configuration, resume the TSC and future
166 * clocks that only ticks when we're executing guest code.
167 *
168 * @param pVM The cross context VM structure.
169 * @param pVCpu The cross context virtual CPU structure.
170 */
171VMMDECL(void) TMNotifyStartOfExecution(PVMCC pVM, PVMCPUCC pVCpu)
172{
173#ifndef VBOX_WITHOUT_NS_ACCOUNTING
174 pVCpu->tm.s.uTscStartExecuting = SUPReadTsc();
175 pVCpu->tm.s.fExecuting = true;
176#endif
177 if (pVM->tm.s.fTSCTiedToExecution)
178 tmCpuTickResume(pVM, pVCpu);
179}
180
181
182/**
183 * Notification that execution has ended.
184 *
185 * This call must always be paired with a TMNotifyStartOfExecution call.
186 *
187 * The function may, depending on the configuration, suspend the TSC and future
188 * clocks that only ticks when we're executing guest code.
189 *
190 * @param pVM The cross context VM structure.
191 * @param pVCpu The cross context virtual CPU structure.
192 * @param uTsc TSC value when exiting guest context.
193 */
194VMMDECL(void) TMNotifyEndOfExecution(PVMCC pVM, PVMCPUCC pVCpu, uint64_t uTsc)
195{
196 if (pVM->tm.s.fTSCTiedToExecution)
197 tmCpuTickPause(pVCpu); /** @todo use uTsc here if we can. */
198
199#ifndef VBOX_WITHOUT_NS_ACCOUNTING
200 /*
201 * Calculate the elapsed tick count and convert it to nanoseconds.
202 */
203# ifdef IN_RING3
204 PSUPGLOBALINFOPAGE const pGip = g_pSUPGlobalInfoPage;
205 uint64_t cTicks = uTsc - pVCpu->tm.s.uTscStartExecuting - SUPGetTscDelta(pGip);
206 uint64_t const uCpuHz = pGip ? SUPGetCpuHzFromGip(pGip) : pVM->tm.s.cTSCTicksPerSecondHost;
207# else
208 uint64_t cTicks = uTsc - pVCpu->tm.s.uTscStartExecuting - SUPGetTscDeltaByCpuSetIndex(pVCpu->iHostCpuSet);
209 uint64_t const uCpuHz = SUPGetCpuHzFromGipBySetIndex(g_pSUPGlobalInfoPage, pVCpu->iHostCpuSet);
210# endif
211 AssertStmt(cTicks <= uCpuHz << 2, cTicks = uCpuHz << 2); /* max 4 sec */
212
213 uint64_t cNsExecutingDelta;
214 if (uCpuHz < _4G)
215 cNsExecutingDelta = ASMMultU64ByU32DivByU32(cTicks, RT_NS_1SEC, uCpuHz);
216 else if (uCpuHz < 16*_1G64)
217 cNsExecutingDelta = ASMMultU64ByU32DivByU32(cTicks >> 2, RT_NS_1SEC, uCpuHz >> 2);
218 else
219 {
220 Assert(uCpuHz < 64 * _1G64);
221 cNsExecutingDelta = ASMMultU64ByU32DivByU32(cTicks >> 4, RT_NS_1SEC, uCpuHz >> 4);
222 }
223
224 /*
225 * Update the data.
226 *
227 * Note! We're not using strict memory ordering here to speed things us.
228 * The data is in a single cache line and this thread is the only
229 * one writing to that line, so I cannot quite imagine why we would
230 * need any strict ordering here.
231 */
232 uint64_t const cNsExecutingNew = pVCpu->tm.s.cNsExecuting + cNsExecutingDelta;
233 uint32_t uGen = ASMAtomicUoIncU32(&pVCpu->tm.s.uTimesGen); Assert(uGen & 1);
234 ASMCompilerBarrier();
235 pVCpu->tm.s.fExecuting = false;
236 pVCpu->tm.s.cNsExecuting = cNsExecutingNew;
237 pVCpu->tm.s.cPeriodsExecuting++;
238 ASMCompilerBarrier();
239 ASMAtomicUoWriteU32(&pVCpu->tm.s.uTimesGen, (uGen | 1) + 1);
240
241 /*
242 * Update stats.
243 */
244# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
245 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecuting, cNsExecutingDelta);
246 if (cNsExecutingDelta < 5000)
247 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecTiny, cNsExecutingDelta);
248 else if (cNsExecutingDelta < 50000)
249 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecShort, cNsExecutingDelta);
250 else
251 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecLong, cNsExecutingDelta);
252# endif
253
254 /* The timer triggers occational updating of the others and total stats: */
255 if (RT_LIKELY(!pVCpu->tm.s.fUpdateStats))
256 { /*likely*/ }
257 else
258 {
259 pVCpu->tm.s.fUpdateStats = false;
260
261 uint64_t const cNsTotalNew = RTTimeNanoTS() - pVCpu->tm.s.nsStartTotal;
262 uint64_t const cNsOtherNew = cNsTotalNew - cNsExecutingNew - pVCpu->tm.s.cNsHalted;
263
264# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
265 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsTotal, cNsTotalNew - pVCpu->tm.s.cNsTotalStat);
266 int64_t const cNsOtherNewDelta = cNsOtherNew - pVCpu->tm.s.cNsOtherStat;
267 if (cNsOtherNewDelta > 0)
268 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsOther, (uint64_t)cNsOtherNewDelta);
269# endif
270
271 pVCpu->tm.s.cNsTotalStat = cNsTotalNew;
272 pVCpu->tm.s.cNsOtherStat = cNsOtherNew;
273 }
274
275#endif
276}
277
278
279/**
280 * Notification that the cpu is entering the halt state
281 *
282 * This call must always be paired with a TMNotifyEndOfExecution call.
283 *
284 * The function may, depending on the configuration, resume the TSC and future
285 * clocks that only ticks when we're halted.
286 *
287 * @param pVCpu The cross context virtual CPU structure.
288 */
289VMM_INT_DECL(void) TMNotifyStartOfHalt(PVMCPUCC pVCpu)
290{
291 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
292
293#ifndef VBOX_WITHOUT_NS_ACCOUNTING
294 pVCpu->tm.s.nsStartHalting = RTTimeNanoTS();
295 pVCpu->tm.s.fHalting = true;
296#endif
297
298 if ( pVM->tm.s.fTSCTiedToExecution
299 && !pVM->tm.s.fTSCNotTiedToHalt)
300 tmCpuTickResume(pVM, pVCpu);
301}
302
303
304/**
305 * Notification that the cpu is leaving the halt state
306 *
307 * This call must always be paired with a TMNotifyStartOfHalt call.
308 *
309 * The function may, depending on the configuration, suspend the TSC and future
310 * clocks that only ticks when we're halted.
311 *
312 * @param pVCpu The cross context virtual CPU structure.
313 */
314VMM_INT_DECL(void) TMNotifyEndOfHalt(PVMCPUCC pVCpu)
315{
316 PVM pVM = pVCpu->CTX_SUFF(pVM);
317
318 if ( pVM->tm.s.fTSCTiedToExecution
319 && !pVM->tm.s.fTSCNotTiedToHalt)
320 tmCpuTickPause(pVCpu);
321
322#ifndef VBOX_WITHOUT_NS_ACCOUNTING
323 uint64_t const u64NsTs = RTTimeNanoTS();
324 uint64_t const cNsTotalNew = u64NsTs - pVCpu->tm.s.nsStartTotal;
325 uint64_t const cNsHaltedDelta = u64NsTs - pVCpu->tm.s.nsStartHalting;
326 uint64_t const cNsHaltedNew = pVCpu->tm.s.cNsHalted + cNsHaltedDelta;
327 uint64_t const cNsOtherNew = cNsTotalNew - pVCpu->tm.s.cNsExecuting - cNsHaltedNew;
328
329 uint32_t uGen = ASMAtomicUoIncU32(&pVCpu->tm.s.uTimesGen); Assert(uGen & 1);
330 ASMCompilerBarrier();
331 pVCpu->tm.s.fHalting = false;
332 pVCpu->tm.s.fUpdateStats = false;
333 pVCpu->tm.s.cNsHalted = cNsHaltedNew;
334 pVCpu->tm.s.cPeriodsHalted++;
335 ASMCompilerBarrier();
336 ASMAtomicUoWriteU32(&pVCpu->tm.s.uTimesGen, (uGen | 1) + 1);
337
338# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
339 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsHalted, cNsHaltedDelta);
340 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsTotal, cNsTotalNew - pVCpu->tm.s.cNsTotalStat);
341 int64_t const cNsOtherNewDelta = cNsOtherNew - pVCpu->tm.s.cNsOtherStat;
342 if (cNsOtherNewDelta > 0)
343 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsOther, (uint64_t)cNsOtherNewDelta);
344# endif
345 pVCpu->tm.s.cNsTotalStat = cNsTotalNew;
346 pVCpu->tm.s.cNsOtherStat = cNsOtherNew;
347#endif
348}
349
350
351/**
352 * Raise the timer force action flag and notify the dedicated timer EMT.
353 *
354 * @param pVM The cross context VM structure.
355 */
356DECLINLINE(void) tmScheduleNotify(PVMCC pVM)
357{
358 VMCPUID idCpu = pVM->tm.s.idTimerCpu;
359 AssertReturnVoid(idCpu < pVM->cCpus);
360 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, idCpu);
361
362 if (!VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
363 {
364 Log5(("TMAll(%u): FF: 0 -> 1\n", __LINE__));
365 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
366#ifdef IN_RING3
367 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
368#endif
369 STAM_COUNTER_INC(&pVM->tm.s.StatScheduleSetFF);
370 }
371}
372
373
374/**
375 * Schedule the queue which was changed.
376 */
377DECLINLINE(void) tmSchedule(PVMCC pVM, PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
378{
379 int rc = PDMCritSectTryEnter(pVM, &pQueue->TimerLock);
380 if (RT_SUCCESS_NP(rc))
381 {
382 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
383 Log3(("tmSchedule: tmTimerQueueSchedule\n"));
384 tmTimerQueueSchedule(pVM, pQueueCC, pQueue);
385#ifdef VBOX_STRICT
386 tmTimerQueuesSanityChecks(pVM, "tmSchedule");
387#endif
388 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
389 PDMCritSectLeave(pVM, &pQueue->TimerLock);
390 return;
391 }
392
393 TMTIMERSTATE enmState = pTimer->enmState;
394 if (TMTIMERSTATE_IS_PENDING_SCHEDULING(enmState))
395 tmScheduleNotify(pVM);
396}
397
398
399/**
400 * Try change the state to enmStateNew from enmStateOld
401 * and link the timer into the scheduling queue.
402 *
403 * @returns Success indicator.
404 * @param pTimer Timer in question.
405 * @param enmStateNew The new timer state.
406 * @param enmStateOld The old timer state.
407 */
408DECLINLINE(bool) tmTimerTry(PTMTIMER pTimer, TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
409{
410 /*
411 * Attempt state change.
412 */
413 bool fRc;
414 TM_TRY_SET_STATE(pTimer, enmStateNew, enmStateOld, fRc);
415 return fRc;
416}
417
418
419/**
420 * Links the timer onto the scheduling queue.
421 *
422 * @param pQueueCC The current context queue (same as @a pQueue for
423 * ring-3).
424 * @param pQueue The shared queue data.
425 * @param pTimer The timer.
426 *
427 * @todo FIXME: Look into potential race with the thread running the queues
428 * and stuff.
429 */
430DECLINLINE(void) tmTimerLinkSchedule(PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
431{
432 Assert(pTimer->idxScheduleNext == UINT32_MAX);
433 const uint32_t idxHeadNew = pTimer - &pQueueCC->paTimers[0];
434 AssertReturnVoid(idxHeadNew < pQueueCC->cTimersAlloc);
435
436 uint32_t idxHead;
437 do
438 {
439 idxHead = pQueue->idxSchedule;
440 Assert(idxHead == UINT32_MAX || idxHead < pQueueCC->cTimersAlloc);
441 pTimer->idxScheduleNext = idxHead;
442 } while (!ASMAtomicCmpXchgU32(&pQueue->idxSchedule, idxHeadNew, idxHead));
443}
444
445
446/**
447 * Try change the state to enmStateNew from enmStateOld
448 * and link the timer into the scheduling queue.
449 *
450 * @returns Success indicator.
451 * @param pQueueCC The current context queue (same as @a pQueue for
452 * ring-3).
453 * @param pQueue The shared queue data.
454 * @param pTimer Timer in question.
455 * @param enmStateNew The new timer state.
456 * @param enmStateOld The old timer state.
457 */
458DECLINLINE(bool) tmTimerTryWithLink(PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue, PTMTIMER pTimer,
459 TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
460{
461 if (tmTimerTry(pTimer, enmStateNew, enmStateOld))
462 {
463 tmTimerLinkSchedule(pQueueCC, pQueue, pTimer);
464 return true;
465 }
466 return false;
467}
468
469
470/**
471 * Links a timer into the active list of a timer queue.
472 *
473 * @param pVM The cross context VM structure.
474 * @param pQueueCC The current context queue (same as @a pQueue for
475 * ring-3).
476 * @param pQueue The shared queue data.
477 * @param pTimer The timer.
478 * @param u64Expire The timer expiration time.
479 *
480 * @remarks Called while owning the relevant queue lock.
481 */
482DECL_FORCE_INLINE(void) tmTimerQueueLinkActive(PVMCC pVM, PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue,
483 PTMTIMER pTimer, uint64_t u64Expire)
484{
485 Assert(pTimer->idxNext == UINT32_MAX);
486 Assert(pTimer->idxPrev == UINT32_MAX);
487 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE || pQueue->enmClock != TMCLOCK_VIRTUAL_SYNC); /* (active is not a stable state) */
488 RT_NOREF(pVM);
489
490 PTMTIMER pCur = tmTimerQueueGetHead(pQueueCC, pQueue);
491 if (pCur)
492 {
493 for (;; pCur = tmTimerGetNext(pQueueCC, pCur))
494 {
495 if (pCur->u64Expire > u64Expire)
496 {
497 const PTMTIMER pPrev = tmTimerGetPrev(pQueueCC, pCur);
498 tmTimerSetNext(pQueueCC, pTimer, pCur);
499 tmTimerSetPrev(pQueueCC, pTimer, pPrev);
500 if (pPrev)
501 tmTimerSetNext(pQueueCC, pPrev, pTimer);
502 else
503 {
504 tmTimerQueueSetHead(pQueueCC, pQueue, pTimer);
505 ASMAtomicWriteU64(&pQueue->u64Expire, u64Expire);
506 DBGFTRACE_U64_TAG2(pVM, u64Expire, "tmTimerQueueLinkActive head", pTimer->szName);
507 }
508 tmTimerSetPrev(pQueueCC, pCur, pTimer);
509 return;
510 }
511 if (pCur->idxNext == UINT32_MAX)
512 {
513 tmTimerSetNext(pQueueCC, pCur, pTimer);
514 tmTimerSetPrev(pQueueCC, pTimer, pCur);
515 DBGFTRACE_U64_TAG2(pVM, u64Expire, "tmTimerQueueLinkActive tail", pTimer->szName);
516 return;
517 }
518 }
519 }
520 else
521 {
522 tmTimerQueueSetHead(pQueueCC, pQueue, pTimer);
523 ASMAtomicWriteU64(&pQueue->u64Expire, u64Expire);
524 DBGFTRACE_U64_TAG2(pVM, u64Expire, "tmTimerQueueLinkActive empty", pTimer->szName);
525 }
526}
527
528
529
530/**
531 * Schedules the given timer on the given queue.
532 *
533 * @param pVM The cross context VM structure.
534 * @param pQueueCC The current context queue (same as @a pQueue for
535 * ring-3).
536 * @param pQueue The shared queue data.
537 * @param pTimer The timer that needs scheduling.
538 *
539 * @remarks Called while owning the lock.
540 */
541DECLINLINE(void) tmTimerQueueScheduleOne(PVMCC pVM, PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
542{
543 Assert(pQueue->enmClock != TMCLOCK_VIRTUAL_SYNC);
544 RT_NOREF(pVM);
545
546 /*
547 * Processing.
548 */
549 unsigned cRetries = 2;
550 do
551 {
552 TMTIMERSTATE enmState = pTimer->enmState;
553 switch (enmState)
554 {
555 /*
556 * Reschedule timer (in the active list).
557 */
558 case TMTIMERSTATE_PENDING_RESCHEDULE:
559 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE, TMTIMERSTATE_PENDING_RESCHEDULE)))
560 break; /* retry */
561 tmTimerQueueUnlinkActive(pVM, pQueueCC, pQueue, pTimer);
562 RT_FALL_THRU();
563
564 /*
565 * Schedule timer (insert into the active list).
566 */
567 case TMTIMERSTATE_PENDING_SCHEDULE:
568 Assert(pTimer->idxNext == UINT32_MAX); Assert(pTimer->idxPrev == UINT32_MAX);
569 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, TMTIMERSTATE_PENDING_SCHEDULE)))
570 break; /* retry */
571 tmTimerQueueLinkActive(pVM, pQueueCC, pQueue, pTimer, pTimer->u64Expire);
572 return;
573
574 /*
575 * Stop the timer in active list.
576 */
577 case TMTIMERSTATE_PENDING_STOP:
578 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, TMTIMERSTATE_PENDING_STOP)))
579 break; /* retry */
580 tmTimerQueueUnlinkActive(pVM, pQueueCC, pQueue, pTimer);
581 RT_FALL_THRU();
582
583 /*
584 * Stop the timer (not on the active list).
585 */
586 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
587 Assert(pTimer->idxNext == UINT32_MAX); Assert(pTimer->idxPrev == UINT32_MAX);
588 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_STOPPED, TMTIMERSTATE_PENDING_STOP_SCHEDULE)))
589 break;
590 return;
591
592 /*
593 * The timer is pending destruction by TMR3TimerDestroy, our caller.
594 * Nothing to do here.
595 */
596 case TMTIMERSTATE_DESTROY:
597 break;
598
599 /*
600 * Postpone these until they get into the right state.
601 */
602 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
603 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
604 tmTimerLinkSchedule(pQueueCC, pQueue, pTimer);
605 STAM_COUNTER_INC(&pVM->tm.s.CTX_SUFF_Z(StatPostponed));
606 return;
607
608 /*
609 * None of these can be in the schedule.
610 */
611 case TMTIMERSTATE_FREE:
612 case TMTIMERSTATE_STOPPED:
613 case TMTIMERSTATE_ACTIVE:
614 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
615 case TMTIMERSTATE_EXPIRED_DELIVER:
616 default:
617 AssertMsgFailed(("Timer (%p) in the scheduling list has an invalid state %s (%d)!",
618 pTimer, tmTimerState(pTimer->enmState), pTimer->enmState));
619 return;
620 }
621 } while (cRetries-- > 0);
622}
623
624
625/**
626 * Schedules the specified timer queue.
627 *
628 * @param pVM The cross context VM structure.
629 * @param pQueueCC The current context queue (same as @a pQueue for
630 * ring-3) data of the queue to schedule.
631 * @param pQueue The shared queue data of the queue to schedule.
632 *
633 * @remarks Called while owning the lock.
634 */
635void tmTimerQueueSchedule(PVMCC pVM, PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue)
636{
637 Assert(PDMCritSectIsOwner(pVM, &pQueue->TimerLock));
638
639 /*
640 * Dequeue the scheduling list and iterate it.
641 */
642 uint32_t idxNext = ASMAtomicXchgU32(&pQueue->idxSchedule, UINT32_MAX);
643 Log2(("tmTimerQueueSchedule: pQueue=%p:{.enmClock=%d, idxNext=%RI32, .u64Expired=%'RU64}\n", pQueue, pQueue->enmClock, idxNext, pQueue->u64Expire));
644 while (idxNext != UINT32_MAX)
645 {
646 AssertBreak(idxNext < pQueueCC->cTimersAlloc);
647
648 /*
649 * Unlink the head timer and take down the index of the next one.
650 */
651 PTMTIMER pTimer = &pQueueCC->paTimers[idxNext];
652 idxNext = pTimer->idxScheduleNext;
653 pTimer->idxScheduleNext = UINT32_MAX;
654
655 /*
656 * Do the scheduling.
657 */
658 Log2(("tmTimerQueueSchedule: %p:{.enmState=%s, .enmClock=%d, .enmType=%d, .szName=%s}\n",
659 pTimer, tmTimerState(pTimer->enmState), pQueue->enmClock, pTimer->enmType, pTimer->szName));
660 tmTimerQueueScheduleOne(pVM, pQueueCC, pQueue, pTimer);
661 Log2(("tmTimerQueueSchedule: %p: new %s\n", pTimer, tmTimerState(pTimer->enmState)));
662 }
663 Log2(("tmTimerQueueSchedule: u64Expired=%'RU64\n", pQueue->u64Expire));
664}
665
666
667#ifdef VBOX_STRICT
668/**
669 * Checks that the timer queues are sane.
670 *
671 * @param pVM The cross context VM structure.
672 * @param pszWhere Caller location clue.
673 */
674void tmTimerQueuesSanityChecks(PVMCC pVM, const char *pszWhere)
675{
676 for (uint32_t idxQueue = 0; idxQueue < RT_ELEMENTS(pVM->tm.s.aTimerQueues); idxQueue++)
677 {
678 PTMTIMERQUEUE const pQueue = &pVM->tm.s.aTimerQueues[idxQueue];
679 PTMTIMERQUEUECC const pQueueCC = TM_GET_TIMER_QUEUE_CC(pVM, idxQueue, pQueue);
680 Assert(pQueue->enmClock == (TMCLOCK)idxQueue);
681
682 int rc = PDMCritSectTryEnter(pVM, &pQueue->TimerLock);
683 if (RT_SUCCESS(rc))
684 {
685 if ( pQueue->enmClock != TMCLOCK_VIRTUAL_SYNC
686 || PDMCritSectTryEnter(pVM, &pVM->tm.s.VirtualSyncLock) == VINF_SUCCESS)
687 {
688 /* Check the linking of the active lists. */
689 PTMTIMER pPrev = NULL;
690 for (PTMTIMER pCur = tmTimerQueueGetHead(pQueueCC, pQueue);
691 pCur;
692 pPrev = pCur, pCur = tmTimerGetNext(pQueueCC, pCur))
693 {
694 AssertMsg(tmTimerGetPrev(pQueueCC, pCur) == pPrev, ("%s: %p != %p\n", pszWhere, tmTimerGetPrev(pQueueCC, pCur), pPrev));
695 TMTIMERSTATE enmState = pCur->enmState;
696 switch (enmState)
697 {
698 case TMTIMERSTATE_ACTIVE:
699 AssertMsg( pCur->idxScheduleNext == UINT32_MAX
700 || pCur->enmState != TMTIMERSTATE_ACTIVE,
701 ("%s: %RI32\n", pszWhere, pCur->idxScheduleNext));
702 break;
703 case TMTIMERSTATE_PENDING_STOP:
704 case TMTIMERSTATE_PENDING_RESCHEDULE:
705 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
706 break;
707 default:
708 AssertMsgFailed(("%s: Invalid state enmState=%d %s\n", pszWhere, enmState, tmTimerState(enmState)));
709 break;
710 }
711 }
712
713# ifdef IN_RING3
714 /* Go thru all the timers and check that the active ones all are in the active lists. */
715 int const rcAllocLock = PDMCritSectRwTryEnterShared(pVM, &pQueue->AllocLock);
716 uint32_t idxTimer = pQueue->cTimersAlloc;
717 uint32_t cFree = 0;
718 while (idxTimer-- > 0)
719 {
720 PTMTIMER const pTimer = &pQueue->paTimers[idxTimer];
721 TMTIMERSTATE const enmState = pTimer->enmState;
722 switch (enmState)
723 {
724 case TMTIMERSTATE_FREE:
725 cFree++;
726 break;
727
728 case TMTIMERSTATE_ACTIVE:
729 case TMTIMERSTATE_PENDING_STOP:
730 case TMTIMERSTATE_PENDING_RESCHEDULE:
731 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
732 {
733 PTMTIMERR3 pCurAct = tmTimerQueueGetHead(pQueueCC, pQueue);
734 Assert(pTimer->idxPrev != UINT32_MAX || pTimer == pCurAct);
735 while (pCurAct && pCurAct != pTimer)
736 pCurAct = tmTimerGetNext(pQueueCC, pCurAct);
737 Assert(pCurAct == pTimer);
738 break;
739 }
740
741 case TMTIMERSTATE_PENDING_SCHEDULE:
742 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
743 case TMTIMERSTATE_STOPPED:
744 case TMTIMERSTATE_EXPIRED_DELIVER:
745 {
746 Assert(pTimer->idxNext == UINT32_MAX);
747 Assert(pTimer->idxPrev == UINT32_MAX);
748 for (PTMTIMERR3 pCurAct = tmTimerQueueGetHead(pQueueCC, pQueue);
749 pCurAct;
750 pCurAct = tmTimerGetNext(pQueueCC, pCurAct))
751 {
752 Assert(pCurAct != pTimer);
753 Assert(tmTimerGetNext(pQueueCC, pCurAct) != pTimer);
754 Assert(tmTimerGetPrev(pQueueCC, pCurAct) != pTimer);
755 }
756 break;
757 }
758
759 /* ignore */
760 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
761 break;
762
763 case TMTIMERSTATE_INVALID:
764 Assert(idxTimer == 0);
765 break;
766
767 /* shouldn't get here! */
768 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
769 case TMTIMERSTATE_DESTROY:
770 default:
771 AssertMsgFailed(("Invalid state enmState=%d %s\n", enmState, tmTimerState(enmState)));
772 break;
773 }
774
775 /* Check the handle value. */
776 if (enmState > TMTIMERSTATE_INVALID && enmState < TMTIMERSTATE_DESTROY)
777 {
778 Assert((pTimer->hSelf & TMTIMERHANDLE_TIMER_IDX_MASK) == idxTimer);
779 Assert(((pTimer->hSelf >> TMTIMERHANDLE_QUEUE_IDX_SHIFT) & TMTIMERHANDLE_QUEUE_IDX_SMASK) == idxQueue);
780 }
781 }
782 if (RT_SUCCESS(rcAllocLock))
783 {
784 Assert(cFree == pQueue->cTimersFree);
785 PDMCritSectRwLeaveShared(pVM, &pQueue->AllocLock);
786 }
787 else
788 Assert(cFree >= pQueue->cTimersFree); /* Can be lower as the tmr3TimerCreate may run concurrent. */
789
790# endif /* IN_RING3 */
791
792 if (pQueue->enmClock == TMCLOCK_VIRTUAL_SYNC)
793 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
794 }
795 PDMCritSectLeave(pVM, &pQueue->TimerLock);
796 }
797 }
798}
799#endif /* !VBOX_STRICT */
800
801#ifdef VBOX_HIGH_RES_TIMERS_HACK
802
803/**
804 * Worker for tmTimerPollInternal that handles misses when the dedicated timer
805 * EMT is polling.
806 *
807 * @returns See tmTimerPollInternal.
808 * @param pVM The cross context VM structure.
809 * @param u64Now Current virtual clock timestamp.
810 * @param u64Delta The delta to the next even in ticks of the
811 * virtual clock.
812 * @param pu64Delta Where to return the delta.
813 */
814DECLINLINE(uint64_t) tmTimerPollReturnMiss(PVM pVM, uint64_t u64Now, uint64_t u64Delta, uint64_t *pu64Delta)
815{
816 Assert(!(u64Delta & RT_BIT_64(63)));
817
818 if (!pVM->tm.s.fVirtualWarpDrive)
819 {
820 *pu64Delta = u64Delta;
821 return u64Delta + u64Now + pVM->tm.s.u64VirtualOffset;
822 }
823
824 /*
825 * Warp drive adjustments - this is the reverse of what tmVirtualGetRaw is doing.
826 */
827 uint64_t const u64Start = pVM->tm.s.u64VirtualWarpDriveStart;
828 uint32_t const u32Pct = pVM->tm.s.u32VirtualWarpDrivePercentage;
829
830 uint64_t u64GipTime = u64Delta + u64Now + pVM->tm.s.u64VirtualOffset;
831 u64GipTime -= u64Start; /* the start is GIP time. */
832 if (u64GipTime >= u64Delta)
833 {
834 ASMMultU64ByU32DivByU32(u64GipTime, 100, u32Pct);
835 ASMMultU64ByU32DivByU32(u64Delta, 100, u32Pct);
836 }
837 else
838 {
839 u64Delta -= u64GipTime;
840 ASMMultU64ByU32DivByU32(u64GipTime, 100, u32Pct);
841 u64Delta += u64GipTime;
842 }
843 *pu64Delta = u64Delta;
844 u64GipTime += u64Start;
845 return u64GipTime;
846}
847
848
849/**
850 * Worker for tmTimerPollInternal dealing with returns on virtual CPUs other
851 * than the one dedicated to timer work.
852 *
853 * @returns See tmTimerPollInternal.
854 * @param pVM The cross context VM structure.
855 * @param u64Now Current virtual clock timestamp.
856 * @param pu64Delta Where to return the delta.
857 */
858DECL_FORCE_INLINE(uint64_t) tmTimerPollReturnOtherCpu(PVM pVM, uint64_t u64Now, uint64_t *pu64Delta)
859{
860 static const uint64_t s_u64OtherRet = 500000000; /* 500 ms for non-timer EMTs. */
861 *pu64Delta = s_u64OtherRet;
862 return u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet;
863}
864
865
866/**
867 * Worker for tmTimerPollInternal.
868 *
869 * @returns See tmTimerPollInternal.
870 * @param pVM The cross context VM structure.
871 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
872 * @param pVCpuDst The cross context virtual CPU structure of the dedicated
873 * timer EMT.
874 * @param u64Now Current virtual clock timestamp.
875 * @param pu64Delta Where to return the delta.
876 * @param pCounter The statistics counter to update.
877 */
878DECL_FORCE_INLINE(uint64_t) tmTimerPollReturnHit(PVM pVM, PVMCPU pVCpu, PVMCPU pVCpuDst, uint64_t u64Now,
879 uint64_t *pu64Delta, PSTAMCOUNTER pCounter)
880{
881 STAM_COUNTER_INC(pCounter); NOREF(pCounter);
882 if (pVCpuDst != pVCpu)
883 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
884 *pu64Delta = 0;
885 return 0;
886}
887
888
889/**
890 * Common worker for TMTimerPollGIP and TMTimerPoll.
891 *
892 * This function is called before FFs are checked in the inner execution EM loops.
893 *
894 * @returns The GIP timestamp of the next event.
895 * 0 if the next event has already expired.
896 *
897 * @param pVM The cross context VM structure.
898 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
899 * @param pu64Delta Where to store the delta.
900 * @param pu64Now Where to store the current time. Optional.
901 *
902 * @thread The emulation thread.
903 *
904 * @remarks GIP uses ns ticks.
905 */
906DECL_FORCE_INLINE(uint64_t) tmTimerPollInternal(PVMCC pVM, PVMCPUCC pVCpu, uint64_t *pu64Delta, uint64_t *pu64Now)
907{
908 VMCPUID idCpu = pVM->tm.s.idTimerCpu;
909 AssertReturn(idCpu < pVM->cCpus, 0);
910 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, idCpu);
911
912 const uint64_t u64Now = TMVirtualGetNoCheck(pVM);
913 STAM_COUNTER_INC(&pVM->tm.s.StatPoll);
914 if (pu64Now)
915 *pu64Now = u64Now;
916
917 /*
918 * Return straight away if the timer FF is already set ...
919 */
920 if (VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
921 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet);
922
923 /*
924 * ... or if timers are being run.
925 */
926 if (ASMAtomicReadBool(&pVM->tm.s.fRunningQueues))
927 {
928 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
929 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
930 }
931
932 /*
933 * Check for TMCLOCK_VIRTUAL expiration.
934 */
935 const uint64_t u64Expire1 = ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL].u64Expire);
936 const int64_t i64Delta1 = u64Expire1 - u64Now;
937 if (i64Delta1 <= 0)
938 {
939 if (!VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
940 {
941 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
942 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
943 }
944 LogFlow(("TMTimerPoll: expire1=%'RU64 <= now=%'RU64\n", u64Expire1, u64Now));
945 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtual);
946 }
947
948 /*
949 * Check for TMCLOCK_VIRTUAL_SYNC expiration.
950 * This isn't quite as straight forward if in a catch-up, not only do
951 * we have to adjust the 'now' but when have to adjust the delta as well.
952 */
953
954 /*
955 * Optimistic lockless approach.
956 */
957 uint64_t u64VirtualSyncNow;
958 uint64_t u64Expire2 = ASMAtomicUoReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire);
959 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
960 {
961 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
962 {
963 u64VirtualSyncNow = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
964 if (RT_LIKELY( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
965 && !ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
966 && u64VirtualSyncNow == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
967 && u64Expire2 == ASMAtomicUoReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire)))
968 {
969 u64VirtualSyncNow = u64Now - u64VirtualSyncNow;
970 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
971 if (i64Delta2 > 0)
972 {
973 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
974 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
975
976 if (pVCpu == pVCpuDst)
977 return tmTimerPollReturnMiss(pVM, u64Now, RT_MIN(i64Delta1, i64Delta2), pu64Delta);
978 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
979 }
980
981 if ( !pVM->tm.s.fRunningQueues
982 && !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
983 {
984 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
985 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
986 }
987
988 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
989 LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now));
990 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
991 }
992 }
993 }
994 else
995 {
996 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
997 LogFlow(("TMTimerPoll: stopped\n"));
998 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
999 }
1000
1001 /*
1002 * Complicated lockless approach.
1003 */
1004 uint64_t off;
1005 uint32_t u32Pct = 0;
1006 bool fCatchUp;
1007 int cOuterTries = 42;
1008 for (;; cOuterTries--)
1009 {
1010 fCatchUp = ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp);
1011 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
1012 u64Expire2 = ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire);
1013 if (fCatchUp)
1014 {
1015 /* No changes allowed, try get a consistent set of parameters. */
1016 uint64_t const u64Prev = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev);
1017 uint64_t const offGivenUp = ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp);
1018 u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
1019 if ( ( u64Prev == ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev)
1020 && offGivenUp == ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp)
1021 && u32Pct == ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage)
1022 && off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
1023 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire)
1024 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
1025 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
1026 || cOuterTries <= 0)
1027 {
1028 uint64_t u64Delta = u64Now - u64Prev;
1029 if (RT_LIKELY(!(u64Delta >> 32)))
1030 {
1031 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, u32Pct, 100);
1032 if (off > u64Sub + offGivenUp)
1033 off -= u64Sub;
1034 else /* we've completely caught up. */
1035 off = offGivenUp;
1036 }
1037 else
1038 /* More than 4 seconds since last time (or negative), ignore it. */
1039 Log(("TMVirtualGetSync: u64Delta=%RX64 (NoLock)\n", u64Delta));
1040
1041 /* Check that we're still running and in catch up. */
1042 if ( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
1043 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
1044 break;
1045 }
1046 }
1047 else if ( off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
1048 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].u64Expire)
1049 && !ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
1050 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
1051 break; /* Got an consistent offset */
1052
1053 /* Repeat the initial checks before iterating. */
1054 if (VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
1055 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet);
1056 if (ASMAtomicUoReadBool(&pVM->tm.s.fRunningQueues))
1057 {
1058 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
1059 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
1060 }
1061 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
1062 {
1063 LogFlow(("TMTimerPoll: stopped\n"));
1064 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
1065 }
1066 if (cOuterTries <= 0)
1067 break; /* that's enough */
1068 }
1069 if (cOuterTries <= 0)
1070 STAM_COUNTER_INC(&pVM->tm.s.StatPollELoop);
1071 u64VirtualSyncNow = u64Now - off;
1072
1073 /* Calc delta and see if we've got a virtual sync hit. */
1074 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
1075 if (i64Delta2 <= 0)
1076 {
1077 if ( !pVM->tm.s.fRunningQueues
1078 && !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
1079 {
1080 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
1081 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
1082 }
1083 STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtualSync);
1084 LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now));
1085 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
1086 }
1087
1088 /*
1089 * Return the time left to the next event.
1090 */
1091 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
1092 if (pVCpu == pVCpuDst)
1093 {
1094 if (fCatchUp)
1095 i64Delta2 = ASMMultU64ByU32DivByU32(i64Delta2, 100, u32Pct + 100);
1096 return tmTimerPollReturnMiss(pVM, u64Now, RT_MIN(i64Delta1, i64Delta2), pu64Delta);
1097 }
1098 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
1099}
1100
1101
1102/**
1103 * Set FF if we've passed the next virtual event.
1104 *
1105 * This function is called before FFs are checked in the inner execution EM loops.
1106 *
1107 * @returns true if timers are pending, false if not.
1108 *
1109 * @param pVM The cross context VM structure.
1110 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1111 * @thread The emulation thread.
1112 */
1113VMMDECL(bool) TMTimerPollBool(PVMCC pVM, PVMCPUCC pVCpu)
1114{
1115 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1116 uint64_t off = 0;
1117 tmTimerPollInternal(pVM, pVCpu, &off, NULL);
1118 return off == 0;
1119}
1120
1121
1122/**
1123 * Set FF if we've passed the next virtual event and return virtual time as MS.
1124 *
1125 * This function is called before FFs are checked in the inner execution EM loops.
1126 *
1127 * This is used by the IEM recompiler for polling timers while also providing a
1128 * free time source for recent use tracking and such.
1129 *
1130 * @returns true if timers are pending, false if not.
1131 *
1132 * @param pVM The cross context VM structure.
1133 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1134 * @param pmsNow Where to return the current virtual time in
1135 * milliseconds.
1136 * @thread The emulation thread.
1137 */
1138VMM_INT_DECL(bool) TMTimerPollBoolWith32BitMilliTS(PVMCC pVM, PVMCPUCC pVCpu, uint32_t *pmsNow)
1139{
1140 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1141 uint64_t off = 0;
1142 uint64_t u64Now = 0;
1143 tmTimerPollInternal(pVM, pVCpu, &off, &u64Now);
1144 *pmsNow = (uint32_t)(u64Now / RT_NS_1MS);
1145 return off == 0;
1146}
1147
1148
1149/**
1150 * Set FF if we've passed the next virtual event and return virtual time as MS.
1151 *
1152 * This function is called before FFs are checked in the inner execution EM loops.
1153 *
1154 * This is used by the IEM recompiler for polling timers while also providing a
1155 * free time source for recent use tracking and such.
1156 *
1157 * @returns Nanoseconds till the next event, 0 if event already pending.
1158 *
1159 * @param pVM The cross context VM structure.
1160 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1161 * @param pnsNow Where to return the current virtual time in nanoseconds.
1162 * @thread The emulation thread.
1163 */
1164VMM_INT_DECL(uint64_t) TMTimerPollBoolWithNanoTS(PVMCC pVM, PVMCPUCC pVCpu, uint64_t *pnsNow)
1165{
1166 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1167 uint64_t offDelta = 0;
1168 tmTimerPollInternal(pVM, pVCpu, &offDelta, pnsNow);
1169 return offDelta;
1170}
1171
1172
1173/**
1174 * Set FF if we've passed the next virtual event.
1175 *
1176 * This function is called before FFs are checked in the inner execution EM loops.
1177 *
1178 * @param pVM The cross context VM structure.
1179 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1180 * @thread The emulation thread.
1181 */
1182VMM_INT_DECL(void) TMTimerPollVoid(PVMCC pVM, PVMCPUCC pVCpu)
1183{
1184 uint64_t off;
1185 tmTimerPollInternal(pVM, pVCpu, &off, NULL);
1186}
1187
1188
1189/**
1190 * Set FF if we've passed the next virtual event.
1191 *
1192 * This function is called before FFs are checked in the inner execution EM loops.
1193 *
1194 * @returns The GIP timestamp of the next event.
1195 * 0 if the next event has already expired.
1196 * @param pVM The cross context VM structure.
1197 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1198 * @param pu64Delta Where to store the delta.
1199 * @thread The emulation thread.
1200 */
1201VMM_INT_DECL(uint64_t) TMTimerPollGIP(PVMCC pVM, PVMCPUCC pVCpu, uint64_t *pu64Delta)
1202{
1203 return tmTimerPollInternal(pVM, pVCpu, pu64Delta, NULL);
1204}
1205
1206#endif /* VBOX_HIGH_RES_TIMERS_HACK */
1207
1208/**
1209 * Locks the timer clock.
1210 *
1211 * @returns VINF_SUCCESS on success, @a rcBusy if busy, and VERR_NOT_SUPPORTED
1212 * if the clock does not have a lock.
1213 * @param pVM The cross context VM structure.
1214 * @param hTimer Timer handle as returned by one of the create functions.
1215 * @param rcBusy What to return in ring-0 and raw-mode context if the
1216 * lock is busy. Pass VINF_SUCCESS to acquired the
1217 * critical section thru a ring-3 call if necessary.
1218 *
1219 * @remarks Currently only supported on timers using the virtual sync clock.
1220 */
1221VMMDECL(int) TMTimerLock(PVMCC pVM, TMTIMERHANDLE hTimer, int rcBusy)
1222{
1223 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
1224 AssertReturn(idxQueue == TMCLOCK_VIRTUAL_SYNC, VERR_NOT_SUPPORTED);
1225 return PDMCritSectEnter(pVM, &pVM->tm.s.VirtualSyncLock, rcBusy);
1226}
1227
1228
1229/**
1230 * Unlocks a timer clock locked by TMTimerLock.
1231 *
1232 * @param pVM The cross context VM structure.
1233 * @param hTimer Timer handle as returned by one of the create functions.
1234 */
1235VMMDECL(void) TMTimerUnlock(PVMCC pVM, TMTIMERHANDLE hTimer)
1236{
1237 TMTIMER_HANDLE_TO_VARS_RETURN_VOID(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
1238 AssertReturnVoid(idxQueue == TMCLOCK_VIRTUAL_SYNC);
1239 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
1240}
1241
1242
1243/**
1244 * Checks if the current thread owns the timer clock lock.
1245 *
1246 * @returns @c true if its the owner, @c false if not.
1247 * @param pVM The cross context VM structure.
1248 * @param hTimer Timer handle as returned by one of the create functions.
1249 */
1250VMMDECL(bool) TMTimerIsLockOwner(PVMCC pVM, TMTIMERHANDLE hTimer)
1251{
1252 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, false); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
1253 AssertReturn(idxQueue == TMCLOCK_VIRTUAL_SYNC, false);
1254 return PDMCritSectIsOwner(pVM, &pVM->tm.s.VirtualSyncLock);
1255}
1256
1257
1258/**
1259 * Optimized TMTimerSet code path for starting an inactive timer.
1260 *
1261 * @returns VBox status code.
1262 *
1263 * @param pVM The cross context VM structure.
1264 * @param pTimer The timer handle.
1265 * @param u64Expire The new expire time.
1266 * @param pQueue Pointer to the shared timer queue data.
1267 * @param idxQueue The queue index.
1268 */
1269static int tmTimerSetOptimizedStart(PVMCC pVM, PTMTIMER pTimer, uint64_t u64Expire, PTMTIMERQUEUE pQueue, uint32_t idxQueue)
1270{
1271 Assert(pTimer->idxPrev == UINT32_MAX);
1272 Assert(pTimer->idxNext == UINT32_MAX);
1273 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE);
1274
1275 /*
1276 * Calculate and set the expiration time.
1277 */
1278 if (idxQueue == TMCLOCK_VIRTUAL_SYNC)
1279 {
1280 uint64_t u64Last = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSync);
1281 AssertMsgStmt(u64Expire >= u64Last,
1282 ("exp=%#llx last=%#llx\n", u64Expire, u64Last),
1283 u64Expire = u64Last);
1284 }
1285 ASMAtomicWriteU64(&pTimer->u64Expire, u64Expire);
1286 Log2(("tmTimerSetOptimizedStart: %p:{.pszDesc='%s', .u64Expire=%'RU64}\n", pTimer, pTimer->szName, u64Expire));
1287
1288 /*
1289 * Link the timer into the active list.
1290 */
1291 tmTimerQueueLinkActive(pVM, TM_GET_TIMER_QUEUE_CC(pVM, idxQueue, pQueue), pQueue, pTimer, u64Expire);
1292
1293 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetOpt);
1294 return VINF_SUCCESS;
1295}
1296
1297
1298/**
1299 * TMTimerSet for the virtual sync timer queue.
1300 *
1301 * This employs a greatly simplified state machine by always acquiring the
1302 * queue lock and bypassing the scheduling list.
1303 *
1304 * @returns VBox status code
1305 * @param pVM The cross context VM structure.
1306 * @param pTimer The timer handle.
1307 * @param u64Expire The expiration time.
1308 */
1309static int tmTimerVirtualSyncSet(PVMCC pVM, PTMTIMER pTimer, uint64_t u64Expire)
1310{
1311 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetVs), a);
1312 VM_ASSERT_EMT(pVM);
1313 TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer);
1314 int rc = PDMCritSectEnter(pVM, &pVM->tm.s.VirtualSyncLock, VINF_SUCCESS);
1315 AssertRCReturn(rc, rc);
1316
1317 PTMTIMERQUEUE const pQueue = &pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC];
1318 PTMTIMERQUEUECC const pQueueCC = TM_GET_TIMER_QUEUE_CC(pVM, TMCLOCK_VIRTUAL_SYNC, pQueue);
1319 TMTIMERSTATE const enmState = pTimer->enmState;
1320 switch (enmState)
1321 {
1322 case TMTIMERSTATE_EXPIRED_DELIVER:
1323 case TMTIMERSTATE_STOPPED:
1324 if (enmState == TMTIMERSTATE_EXPIRED_DELIVER)
1325 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetVsStExpDeliver);
1326 else
1327 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetVsStStopped);
1328
1329 AssertMsg(u64Expire >= pVM->tm.s.u64VirtualSync,
1330 ("%'RU64 < %'RU64 %s\n", u64Expire, pVM->tm.s.u64VirtualSync, pTimer->szName));
1331 pTimer->u64Expire = u64Expire;
1332 TM_SET_STATE(pTimer, TMTIMERSTATE_ACTIVE);
1333 tmTimerQueueLinkActive(pVM, pQueueCC, pQueue, pTimer, u64Expire);
1334 rc = VINF_SUCCESS;
1335 break;
1336
1337 case TMTIMERSTATE_ACTIVE:
1338 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetVsStActive);
1339 tmTimerQueueUnlinkActive(pVM, pQueueCC, pQueue, pTimer);
1340 pTimer->u64Expire = u64Expire;
1341 tmTimerQueueLinkActive(pVM, pQueueCC, pQueue, pTimer, u64Expire);
1342 rc = VINF_SUCCESS;
1343 break;
1344
1345 case TMTIMERSTATE_PENDING_RESCHEDULE:
1346 case TMTIMERSTATE_PENDING_STOP:
1347 case TMTIMERSTATE_PENDING_SCHEDULE:
1348 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1349 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1350 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1351 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1352 case TMTIMERSTATE_DESTROY:
1353 case TMTIMERSTATE_FREE:
1354 AssertLogRelMsgFailed(("Invalid timer state %s: %s\n", tmTimerState(enmState), pTimer->szName));
1355 rc = VERR_TM_INVALID_STATE;
1356 break;
1357
1358 default:
1359 AssertMsgFailed(("Unknown timer state %d: %s\n", enmState, pTimer->szName));
1360 rc = VERR_TM_UNKNOWN_STATE;
1361 break;
1362 }
1363
1364 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetVs), a);
1365 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
1366 return rc;
1367}
1368
1369
1370/**
1371 * Arm a timer with a (new) expire time.
1372 *
1373 * @returns VBox status code.
1374 * @param pVM The cross context VM structure.
1375 * @param hTimer Timer handle as returned by one of the create functions.
1376 * @param u64Expire New expire time.
1377 */
1378VMMDECL(int) TMTimerSet(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t u64Expire)
1379{
1380 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
1381 STAM_COUNTER_INC(&pTimer->StatSetAbsolute);
1382
1383 /* Treat virtual sync timers specially. */
1384 if (idxQueue == TMCLOCK_VIRTUAL_SYNC)
1385 return tmTimerVirtualSyncSet(pVM, pTimer, u64Expire);
1386
1387 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1388 TMTIMER_ASSERT_CRITSECT(pVM, pTimer);
1389
1390 DBGFTRACE_U64_TAG2(pVM, u64Expire, "TMTimerSet", pTimer->szName);
1391
1392#ifdef VBOX_WITH_STATISTICS
1393 /*
1394 * Gather optimization info.
1395 */
1396 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSet);
1397 TMTIMERSTATE enmOrgState = pTimer->enmState;
1398 switch (enmOrgState)
1399 {
1400 case TMTIMERSTATE_STOPPED: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStStopped); break;
1401 case TMTIMERSTATE_EXPIRED_DELIVER: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStExpDeliver); break;
1402 case TMTIMERSTATE_ACTIVE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStActive); break;
1403 case TMTIMERSTATE_PENDING_STOP: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendStop); break;
1404 case TMTIMERSTATE_PENDING_STOP_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendStopSched); break;
1405 case TMTIMERSTATE_PENDING_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendSched); break;
1406 case TMTIMERSTATE_PENDING_RESCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendResched); break;
1407 default: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStOther); break;
1408 }
1409#endif
1410
1411#if 1
1412 /*
1413 * The most common case is setting the timer again during the callback.
1414 * The second most common case is starting a timer at some other time.
1415 */
1416 TMTIMERSTATE enmState1 = pTimer->enmState;
1417 if ( enmState1 == TMTIMERSTATE_EXPIRED_DELIVER
1418 || ( enmState1 == TMTIMERSTATE_STOPPED
1419 && pTimer->pCritSect))
1420 {
1421 /* Try take the TM lock and check the state again. */
1422 int rc = PDMCritSectTryEnter(pVM, &pQueue->TimerLock);
1423 if (RT_SUCCESS_NP(rc))
1424 {
1425 if (RT_LIKELY(tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, enmState1)))
1426 {
1427 tmTimerSetOptimizedStart(pVM, pTimer, u64Expire, pQueue, idxQueue);
1428 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1429 PDMCritSectLeave(pVM, &pQueue->TimerLock);
1430 return VINF_SUCCESS;
1431 }
1432 PDMCritSectLeave(pVM, &pQueue->TimerLock);
1433 }
1434 }
1435#endif
1436
1437 /*
1438 * Unoptimized code path.
1439 */
1440 int cRetries = 1000;
1441 do
1442 {
1443 /*
1444 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1445 */
1446 TMTIMERSTATE enmState = pTimer->enmState;
1447 Log2(("TMTimerSet: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d u64Expire=%'RU64\n",
1448 pTimer, tmTimerState(enmState), pTimer->szName, cRetries, u64Expire));
1449 switch (enmState)
1450 {
1451 case TMTIMERSTATE_EXPIRED_DELIVER:
1452 case TMTIMERSTATE_STOPPED:
1453 if (tmTimerTryWithLink(pQueueCC, pQueue, pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1454 {
1455 Assert(pTimer->idxPrev == UINT32_MAX);
1456 Assert(pTimer->idxNext == UINT32_MAX);
1457 pTimer->u64Expire = u64Expire;
1458 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1459 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1460 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1461 return VINF_SUCCESS;
1462 }
1463 break;
1464
1465 case TMTIMERSTATE_PENDING_SCHEDULE:
1466 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1467 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1468 {
1469 pTimer->u64Expire = u64Expire;
1470 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1471 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1472 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1473 return VINF_SUCCESS;
1474 }
1475 break;
1476
1477
1478 case TMTIMERSTATE_ACTIVE:
1479 if (tmTimerTryWithLink(pQueueCC, pQueue, pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1480 {
1481 pTimer->u64Expire = u64Expire;
1482 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1483 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1484 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1485 return VINF_SUCCESS;
1486 }
1487 break;
1488
1489 case TMTIMERSTATE_PENDING_RESCHEDULE:
1490 case TMTIMERSTATE_PENDING_STOP:
1491 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1492 {
1493 pTimer->u64Expire = u64Expire;
1494 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1495 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1496 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1497 return VINF_SUCCESS;
1498 }
1499 break;
1500
1501
1502 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1503 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1504 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1505#ifdef IN_RING3
1506 if (!RTThreadYield())
1507 RTThreadSleep(1);
1508#else
1509/** @todo call host context and yield after a couple of iterations */
1510#endif
1511 break;
1512
1513 /*
1514 * Invalid states.
1515 */
1516 case TMTIMERSTATE_DESTROY:
1517 case TMTIMERSTATE_FREE:
1518 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, pTimer->szName));
1519 return VERR_TM_INVALID_STATE;
1520 default:
1521 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, pTimer->szName));
1522 return VERR_TM_UNKNOWN_STATE;
1523 }
1524 } while (cRetries-- > 0);
1525
1526 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, pTimer->szName));
1527 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1528 return VERR_TM_TIMER_UNSTABLE_STATE;
1529}
1530
1531
1532/**
1533 * Return the current time for the specified clock, setting pu64Now if not NULL.
1534 *
1535 * @returns Current time.
1536 * @param pVM The cross context VM structure.
1537 * @param enmClock The clock to query.
1538 * @param pu64Now Optional pointer where to store the return time
1539 */
1540DECL_FORCE_INLINE(uint64_t) tmTimerSetRelativeNowWorker(PVMCC pVM, TMCLOCK enmClock, uint64_t *pu64Now)
1541{
1542 uint64_t u64Now;
1543 switch (enmClock)
1544 {
1545 case TMCLOCK_VIRTUAL_SYNC:
1546 u64Now = TMVirtualSyncGet(pVM);
1547 break;
1548 case TMCLOCK_VIRTUAL:
1549 u64Now = TMVirtualGet(pVM);
1550 break;
1551 case TMCLOCK_REAL:
1552 u64Now = TMRealGet(pVM);
1553 break;
1554 default:
1555 AssertFatalMsgFailed(("%d\n", enmClock));
1556 }
1557
1558 if (pu64Now)
1559 *pu64Now = u64Now;
1560 return u64Now;
1561}
1562
1563
1564/**
1565 * Optimized TMTimerSetRelative code path.
1566 *
1567 * @returns VBox status code.
1568 *
1569 * @param pVM The cross context VM structure.
1570 * @param pTimer The timer handle.
1571 * @param cTicksToNext Clock ticks until the next time expiration.
1572 * @param pu64Now Where to return the current time stamp used.
1573 * Optional.
1574 * @param pQueueCC The context specific queue data (same as @a pQueue
1575 * for ring-3).
1576 * @param pQueue The shared queue data.
1577 */
1578static int tmTimerSetRelativeOptimizedStart(PVMCC pVM, PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now,
1579 PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue)
1580{
1581 Assert(pTimer->idxPrev == UINT32_MAX);
1582 Assert(pTimer->idxNext == UINT32_MAX);
1583 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE);
1584
1585 /*
1586 * Calculate and set the expiration time.
1587 */
1588 uint64_t const u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, pQueue->enmClock, pu64Now);
1589 pTimer->u64Expire = u64Expire;
1590 Log2(("tmTimerSetRelativeOptimizedStart: %p:{.pszDesc='%s', .u64Expire=%'RU64} cTicksToNext=%'RU64\n", pTimer, pTimer->szName, u64Expire, cTicksToNext));
1591
1592 /*
1593 * Link the timer into the active list.
1594 */
1595 DBGFTRACE_U64_TAG2(pVM, u64Expire, "tmTimerSetRelativeOptimizedStart", pTimer->szName);
1596 tmTimerQueueLinkActive(pVM, pQueueCC, pQueue, pTimer, u64Expire);
1597
1598 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeOpt);
1599 return VINF_SUCCESS;
1600}
1601
1602
1603/**
1604 * TMTimerSetRelative for the virtual sync timer queue.
1605 *
1606 * This employs a greatly simplified state machine by always acquiring the
1607 * queue lock and bypassing the scheduling list.
1608 *
1609 * @returns VBox status code
1610 * @param pVM The cross context VM structure.
1611 * @param pTimer The timer to (re-)arm.
1612 * @param cTicksToNext Clock ticks until the next time expiration.
1613 * @param pu64Now Where to return the current time stamp used.
1614 * Optional.
1615 */
1616static int tmTimerVirtualSyncSetRelative(PVMCC pVM, PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1617{
1618 STAM_PROFILE_START(pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelativeVs), a);
1619 VM_ASSERT_EMT(pVM);
1620 TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer);
1621 int rc = PDMCritSectEnter(pVM, &pVM->tm.s.VirtualSyncLock, VINF_SUCCESS);
1622 AssertRCReturn(rc, rc);
1623
1624 /* Calculate the expiration tick. */
1625 uint64_t u64Expire = TMVirtualSyncGetNoCheck(pVM);
1626 if (pu64Now)
1627 *pu64Now = u64Expire;
1628 u64Expire += cTicksToNext;
1629
1630 /* Update the timer. */
1631 PTMTIMERQUEUE const pQueue = &pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC];
1632 PTMTIMERQUEUECC const pQueueCC = TM_GET_TIMER_QUEUE_CC(pVM, TMCLOCK_VIRTUAL_SYNC, pQueue);
1633 TMTIMERSTATE const enmState = pTimer->enmState;
1634 switch (enmState)
1635 {
1636 case TMTIMERSTATE_EXPIRED_DELIVER:
1637 case TMTIMERSTATE_STOPPED:
1638 if (enmState == TMTIMERSTATE_EXPIRED_DELIVER)
1639 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeVsStExpDeliver);
1640 else
1641 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeVsStStopped);
1642 pTimer->u64Expire = u64Expire;
1643 TM_SET_STATE(pTimer, TMTIMERSTATE_ACTIVE);
1644 tmTimerQueueLinkActive(pVM, pQueueCC, pQueue, pTimer, u64Expire);
1645 rc = VINF_SUCCESS;
1646 break;
1647
1648 case TMTIMERSTATE_ACTIVE:
1649 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeVsStActive);
1650 tmTimerQueueUnlinkActive(pVM, pQueueCC, pQueue, pTimer);
1651 pTimer->u64Expire = u64Expire;
1652 tmTimerQueueLinkActive(pVM, pQueueCC, pQueue, pTimer, u64Expire);
1653 rc = VINF_SUCCESS;
1654 break;
1655
1656 case TMTIMERSTATE_PENDING_RESCHEDULE:
1657 case TMTIMERSTATE_PENDING_STOP:
1658 case TMTIMERSTATE_PENDING_SCHEDULE:
1659 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1660 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1661 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1662 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1663 case TMTIMERSTATE_DESTROY:
1664 case TMTIMERSTATE_FREE:
1665 AssertLogRelMsgFailed(("Invalid timer state %s: %s\n", tmTimerState(enmState), pTimer->szName));
1666 rc = VERR_TM_INVALID_STATE;
1667 break;
1668
1669 default:
1670 AssertMsgFailed(("Unknown timer state %d: %s\n", enmState, pTimer->szName));
1671 rc = VERR_TM_UNKNOWN_STATE;
1672 break;
1673 }
1674
1675 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelativeVs), a);
1676 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
1677 return rc;
1678}
1679
1680
1681/**
1682 * Arm a timer with a expire time relative to the current time.
1683 *
1684 * @returns VBox status code.
1685 * @param pVM The cross context VM structure.
1686 * @param pTimer The timer to arm.
1687 * @param cTicksToNext Clock ticks until the next time expiration.
1688 * @param pu64Now Where to return the current time stamp used.
1689 * Optional.
1690 * @param pQueueCC The context specific queue data (same as @a pQueue
1691 * for ring-3).
1692 * @param pQueue The shared queue data.
1693 */
1694static int tmTimerSetRelative(PVMCC pVM, PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now,
1695 PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue)
1696{
1697 STAM_COUNTER_INC(&pTimer->StatSetRelative);
1698
1699 /* Treat virtual sync timers specially. */
1700 if (pQueue->enmClock == TMCLOCK_VIRTUAL_SYNC)
1701 return tmTimerVirtualSyncSetRelative(pVM, pTimer, cTicksToNext, pu64Now);
1702
1703 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1704 TMTIMER_ASSERT_CRITSECT(pVM, pTimer);
1705
1706 DBGFTRACE_U64_TAG2(pVM, cTicksToNext, "TMTimerSetRelative", pTimer->szName);
1707
1708#ifdef VBOX_WITH_STATISTICS
1709 /*
1710 * Gather optimization info.
1711 */
1712 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelative);
1713 TMTIMERSTATE enmOrgState = pTimer->enmState;
1714 switch (enmOrgState)
1715 {
1716 case TMTIMERSTATE_STOPPED: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStStopped); break;
1717 case TMTIMERSTATE_EXPIRED_DELIVER: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStExpDeliver); break;
1718 case TMTIMERSTATE_ACTIVE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStActive); break;
1719 case TMTIMERSTATE_PENDING_STOP: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendStop); break;
1720 case TMTIMERSTATE_PENDING_STOP_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendStopSched); break;
1721 case TMTIMERSTATE_PENDING_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendSched); break;
1722 case TMTIMERSTATE_PENDING_RESCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendResched); break;
1723 default: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStOther); break;
1724 }
1725#endif
1726
1727 /*
1728 * Try to take the TM lock and optimize the common cases.
1729 *
1730 * With the TM lock we can safely make optimizations like immediate
1731 * scheduling and we can also be 100% sure that we're not racing the
1732 * running of the timer queues. As an additional restraint we require the
1733 * timer to have a critical section associated with to be 100% there aren't
1734 * concurrent operations on the timer. (This latter isn't necessary any
1735 * longer as this isn't supported for any timers, critsect or not.)
1736 *
1737 * Note! Lock ordering doesn't apply when we only _try_ to
1738 * get the innermost locks.
1739 */
1740 bool fOwnTMLock = RT_SUCCESS_NP(PDMCritSectTryEnter(pVM, &pQueue->TimerLock));
1741#if 1
1742 if ( fOwnTMLock
1743 && pTimer->pCritSect)
1744 {
1745 TMTIMERSTATE enmState = pTimer->enmState;
1746 if (RT_LIKELY( ( enmState == TMTIMERSTATE_EXPIRED_DELIVER
1747 || enmState == TMTIMERSTATE_STOPPED)
1748 && tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, enmState)))
1749 {
1750 tmTimerSetRelativeOptimizedStart(pVM, pTimer, cTicksToNext, pu64Now, pQueueCC, pQueue);
1751 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1752 PDMCritSectLeave(pVM, &pQueue->TimerLock);
1753 return VINF_SUCCESS;
1754 }
1755
1756 /* Optimize other states when it becomes necessary. */
1757 }
1758#endif
1759
1760 /*
1761 * Unoptimized path.
1762 */
1763 int rc;
1764 for (int cRetries = 1000; ; cRetries--)
1765 {
1766 /*
1767 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1768 */
1769 TMTIMERSTATE enmState = pTimer->enmState;
1770 switch (enmState)
1771 {
1772 case TMTIMERSTATE_STOPPED:
1773 if (pQueue->enmClock == TMCLOCK_VIRTUAL_SYNC)
1774 {
1775 /** @todo To fix assertion in tmR3TimerQueueRunVirtualSync:
1776 * Figure a safe way of activating this timer while the queue is
1777 * being run.
1778 * (99.9% sure this that the assertion is caused by DevAPIC.cpp
1779 * re-starting the timer in response to a initial_count write.) */
1780 }
1781 RT_FALL_THRU();
1782 case TMTIMERSTATE_EXPIRED_DELIVER:
1783 if (tmTimerTryWithLink(pQueueCC, pQueue, pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1784 {
1785 Assert(pTimer->idxPrev == UINT32_MAX);
1786 Assert(pTimer->idxNext == UINT32_MAX);
1787 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, pQueue->enmClock, pu64Now);
1788 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [EXP/STOP]\n",
1789 pTimer, tmTimerState(enmState), pTimer->szName, pTimer->u64Expire, cRetries));
1790 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1791 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1792 rc = VINF_SUCCESS;
1793 break;
1794 }
1795 rc = VERR_TRY_AGAIN;
1796 break;
1797
1798 case TMTIMERSTATE_PENDING_SCHEDULE:
1799 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1800 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1801 {
1802 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, pQueue->enmClock, pu64Now);
1803 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [PEND_SCHED]\n",
1804 pTimer, tmTimerState(enmState), pTimer->szName, pTimer->u64Expire, cRetries));
1805 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1806 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1807 rc = VINF_SUCCESS;
1808 break;
1809 }
1810 rc = VERR_TRY_AGAIN;
1811 break;
1812
1813
1814 case TMTIMERSTATE_ACTIVE:
1815 if (tmTimerTryWithLink(pQueueCC, pQueue, pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1816 {
1817 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, pQueue->enmClock, pu64Now);
1818 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [ACTIVE]\n",
1819 pTimer, tmTimerState(enmState), pTimer->szName, pTimer->u64Expire, cRetries));
1820 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1821 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1822 rc = VINF_SUCCESS;
1823 break;
1824 }
1825 rc = VERR_TRY_AGAIN;
1826 break;
1827
1828 case TMTIMERSTATE_PENDING_RESCHEDULE:
1829 case TMTIMERSTATE_PENDING_STOP:
1830 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1831 {
1832 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, pQueue->enmClock, pu64Now);
1833 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [PEND_RESCH/STOP]\n",
1834 pTimer, tmTimerState(enmState), pTimer->szName, pTimer->u64Expire, cRetries));
1835 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1836 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
1837 rc = VINF_SUCCESS;
1838 break;
1839 }
1840 rc = VERR_TRY_AGAIN;
1841 break;
1842
1843
1844 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1845 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1846 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1847#ifdef IN_RING3
1848 if (!RTThreadYield())
1849 RTThreadSleep(1);
1850#else
1851/** @todo call host context and yield after a couple of iterations */
1852#endif
1853 rc = VERR_TRY_AGAIN;
1854 break;
1855
1856 /*
1857 * Invalid states.
1858 */
1859 case TMTIMERSTATE_DESTROY:
1860 case TMTIMERSTATE_FREE:
1861 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, pTimer->szName));
1862 rc = VERR_TM_INVALID_STATE;
1863 break;
1864
1865 default:
1866 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, pTimer->szName));
1867 rc = VERR_TM_UNKNOWN_STATE;
1868 break;
1869 }
1870
1871 /* switch + loop is tedious to break out of. */
1872 if (rc == VINF_SUCCESS)
1873 break;
1874
1875 if (rc != VERR_TRY_AGAIN)
1876 {
1877 tmTimerSetRelativeNowWorker(pVM, pQueue->enmClock, pu64Now);
1878 break;
1879 }
1880 if (cRetries <= 0)
1881 {
1882 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, pTimer->szName));
1883 rc = VERR_TM_TIMER_UNSTABLE_STATE;
1884 tmTimerSetRelativeNowWorker(pVM, pQueue->enmClock, pu64Now);
1885 break;
1886 }
1887
1888 /*
1889 * Retry to gain locks.
1890 */
1891 if (!fOwnTMLock)
1892 fOwnTMLock = RT_SUCCESS_NP(PDMCritSectTryEnter(pVM, &pQueue->TimerLock));
1893
1894 } /* for (;;) */
1895
1896 /*
1897 * Clean up and return.
1898 */
1899 if (fOwnTMLock)
1900 PDMCritSectLeave(pVM, &pQueue->TimerLock);
1901
1902 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1903 return rc;
1904}
1905
1906
1907/**
1908 * Arm a timer with a expire time relative to the current time.
1909 *
1910 * @returns VBox status code.
1911 * @param pVM The cross context VM structure.
1912 * @param hTimer Timer handle as returned by one of the create functions.
1913 * @param cTicksToNext Clock ticks until the next time expiration.
1914 * @param pu64Now Where to return the current time stamp used.
1915 * Optional.
1916 */
1917VMMDECL(int) TMTimerSetRelative(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1918{
1919 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
1920 return tmTimerSetRelative(pVM, pTimer, cTicksToNext, pu64Now, pQueueCC, pQueue);
1921}
1922
1923
1924/**
1925 * Drops a hint about the frequency of the timer.
1926 *
1927 * This is used by TM and the VMM to calculate how often guest execution needs
1928 * to be interrupted. The hint is automatically cleared by TMTimerStop.
1929 *
1930 * @returns VBox status code.
1931 * @param pVM The cross context VM structure.
1932 * @param hTimer Timer handle as returned by one of the create functions.
1933 * @param uHzHint The frequency hint. Pass 0 to clear the hint.
1934 *
1935 * @remarks We're using an integer hertz value here since anything above 1 HZ
1936 * is not going to be any trouble satisfying scheduling wise. The
1937 * range where it makes sense is >= 100 HZ.
1938 */
1939VMMDECL(int) TMTimerSetFrequencyHint(PVMCC pVM, TMTIMERHANDLE hTimer, uint32_t uHzHint)
1940{
1941 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
1942 TMTIMER_ASSERT_CRITSECT(pVM, pTimer);
1943
1944 uint32_t const uHzOldHint = pTimer->uHzHint;
1945 pTimer->uHzHint = uHzHint;
1946
1947 uint32_t const uMaxHzHint = pQueue->uMaxHzHint;
1948 if ( uHzHint > uMaxHzHint
1949 || uHzOldHint >= uMaxHzHint)
1950 ASMAtomicOrU64(&pVM->tm.s.HzHint.u64Combined, RT_BIT_32(idxQueue) | RT_BIT_32(idxQueue + 16));
1951
1952 return VINF_SUCCESS;
1953}
1954
1955
1956/**
1957 * TMTimerStop for the virtual sync timer queue.
1958 *
1959 * This employs a greatly simplified state machine by always acquiring the
1960 * queue lock and bypassing the scheduling list.
1961 *
1962 * @returns VBox status code
1963 * @param pVM The cross context VM structure.
1964 * @param pTimer The timer handle.
1965 */
1966static int tmTimerVirtualSyncStop(PVMCC pVM, PTMTIMER pTimer)
1967{
1968 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerStopVs), a);
1969 VM_ASSERT_EMT(pVM);
1970 TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer);
1971 int rc = PDMCritSectEnter(pVM, &pVM->tm.s.VirtualSyncLock, VINF_SUCCESS);
1972 AssertRCReturn(rc, rc);
1973
1974 /* Reset the HZ hint. */
1975 uint32_t uOldHzHint = pTimer->uHzHint;
1976 if (uOldHzHint)
1977 {
1978 if (uOldHzHint >= pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].uMaxHzHint)
1979 ASMAtomicOrU64(&pVM->tm.s.HzHint.u64Combined, RT_BIT_32(TMCLOCK_VIRTUAL_SYNC) | RT_BIT_32(TMCLOCK_VIRTUAL_SYNC + 16));
1980 pTimer->uHzHint = 0;
1981 }
1982
1983 /* Update the timer state. */
1984 TMTIMERSTATE const enmState = pTimer->enmState;
1985 switch (enmState)
1986 {
1987 case TMTIMERSTATE_ACTIVE:
1988 {
1989 PTMTIMERQUEUE const pQueue = &pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC];
1990 tmTimerQueueUnlinkActive(pVM, TM_GET_TIMER_QUEUE_CC(pVM, TMCLOCK_VIRTUAL_SYNC, pQueue), pQueue, pTimer);
1991 TM_SET_STATE(pTimer, TMTIMERSTATE_STOPPED);
1992 rc = VINF_SUCCESS;
1993 break;
1994 }
1995
1996 case TMTIMERSTATE_EXPIRED_DELIVER:
1997 TM_SET_STATE(pTimer, TMTIMERSTATE_STOPPED);
1998 rc = VINF_SUCCESS;
1999 break;
2000
2001 case TMTIMERSTATE_STOPPED:
2002 rc = VINF_SUCCESS;
2003 break;
2004
2005 case TMTIMERSTATE_PENDING_RESCHEDULE:
2006 case TMTIMERSTATE_PENDING_STOP:
2007 case TMTIMERSTATE_PENDING_SCHEDULE:
2008 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2009 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2010 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2011 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2012 case TMTIMERSTATE_DESTROY:
2013 case TMTIMERSTATE_FREE:
2014 AssertLogRelMsgFailed(("Invalid timer state %s: %s\n", tmTimerState(enmState), pTimer->szName));
2015 rc = VERR_TM_INVALID_STATE;
2016 break;
2017
2018 default:
2019 AssertMsgFailed(("Unknown timer state %d: %s\n", enmState, pTimer->szName));
2020 rc = VERR_TM_UNKNOWN_STATE;
2021 break;
2022 }
2023
2024 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStopVs), a);
2025 PDMCritSectLeave(pVM, &pVM->tm.s.VirtualSyncLock);
2026 return rc;
2027}
2028
2029
2030/**
2031 * Stop the timer.
2032 * Use TMR3TimerArm() to "un-stop" the timer.
2033 *
2034 * @returns VBox status code.
2035 * @param pVM The cross context VM structure.
2036 * @param hTimer Timer handle as returned by one of the create functions.
2037 */
2038VMMDECL(int) TMTimerStop(PVMCC pVM, TMTIMERHANDLE hTimer)
2039{
2040 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2041 STAM_COUNTER_INC(&pTimer->StatStop);
2042
2043 /* Treat virtual sync timers specially. */
2044 if (idxQueue == TMCLOCK_VIRTUAL_SYNC)
2045 return tmTimerVirtualSyncStop(pVM, pTimer);
2046
2047 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
2048 TMTIMER_ASSERT_CRITSECT(pVM, pTimer);
2049
2050 /*
2051 * Reset the HZ hint.
2052 */
2053 uint32_t const uOldHzHint = pTimer->uHzHint;
2054 if (uOldHzHint)
2055 {
2056 if (uOldHzHint >= pQueue->uMaxHzHint)
2057 ASMAtomicOrU64(&pVM->tm.s.HzHint.u64Combined, RT_BIT_32(idxQueue) | RT_BIT_32(idxQueue + 16));
2058 pTimer->uHzHint = 0;
2059 }
2060
2061 /** @todo see if this function needs optimizing. */
2062 int cRetries = 1000;
2063 do
2064 {
2065 /*
2066 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
2067 */
2068 TMTIMERSTATE enmState = pTimer->enmState;
2069 Log2(("TMTimerStop: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d\n",
2070 pTimer, tmTimerState(enmState), pTimer->szName, cRetries));
2071 switch (enmState)
2072 {
2073 case TMTIMERSTATE_EXPIRED_DELIVER:
2074 //AssertMsgFailed(("You don't stop an expired timer dude!\n"));
2075 return VERR_INVALID_PARAMETER;
2076
2077 case TMTIMERSTATE_STOPPED:
2078 case TMTIMERSTATE_PENDING_STOP:
2079 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2080 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
2081 return VINF_SUCCESS;
2082
2083 case TMTIMERSTATE_PENDING_SCHEDULE:
2084 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, enmState))
2085 {
2086 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
2087 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
2088 return VINF_SUCCESS;
2089 }
2090 break;
2091
2092 case TMTIMERSTATE_PENDING_RESCHEDULE:
2093 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
2094 {
2095 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
2096 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
2097 return VINF_SUCCESS;
2098 }
2099 break;
2100
2101 case TMTIMERSTATE_ACTIVE:
2102 if (tmTimerTryWithLink(pQueueCC, pQueue, pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
2103 {
2104 tmSchedule(pVM, pQueueCC, pQueue, pTimer);
2105 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
2106 return VINF_SUCCESS;
2107 }
2108 break;
2109
2110 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2111 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2112 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2113#ifdef IN_RING3
2114 if (!RTThreadYield())
2115 RTThreadSleep(1);
2116#else
2117/** @todo call host and yield cpu after a while. */
2118#endif
2119 break;
2120
2121 /*
2122 * Invalid states.
2123 */
2124 case TMTIMERSTATE_DESTROY:
2125 case TMTIMERSTATE_FREE:
2126 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, pTimer->szName));
2127 return VERR_TM_INVALID_STATE;
2128 default:
2129 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, pTimer->szName));
2130 return VERR_TM_UNKNOWN_STATE;
2131 }
2132 } while (cRetries-- > 0);
2133
2134 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, pTimer->szName));
2135 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
2136 return VERR_TM_TIMER_UNSTABLE_STATE;
2137}
2138
2139
2140/**
2141 * Get the current clock time.
2142 * Handy for calculating the new expire time.
2143 *
2144 * @returns Current clock time.
2145 * @param pVM The cross context VM structure.
2146 * @param hTimer Timer handle as returned by one of the create functions.
2147 */
2148VMMDECL(uint64_t) TMTimerGet(PVMCC pVM, TMTIMERHANDLE hTimer)
2149{
2150 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2151 STAM_COUNTER_INC(&pTimer->StatGet);
2152
2153 uint64_t u64;
2154 switch (pQueue->enmClock)
2155 {
2156 case TMCLOCK_VIRTUAL:
2157 u64 = TMVirtualGet(pVM);
2158 break;
2159 case TMCLOCK_VIRTUAL_SYNC:
2160 u64 = TMVirtualSyncGet(pVM);
2161 break;
2162 case TMCLOCK_REAL:
2163 u64 = TMRealGet(pVM);
2164 break;
2165 default:
2166 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2167 return UINT64_MAX;
2168 }
2169 //Log2(("TMTimerGet: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2170 // u64, pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2171 return u64;
2172}
2173
2174
2175/**
2176 * Get the frequency of the timer clock.
2177 *
2178 * @returns Clock frequency (as Hz of course).
2179 * @param pVM The cross context VM structure.
2180 * @param hTimer Timer handle as returned by one of the create functions.
2181 */
2182VMMDECL(uint64_t) TMTimerGetFreq(PVMCC pVM, TMTIMERHANDLE hTimer)
2183{
2184 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2185 switch (pQueue->enmClock)
2186 {
2187 case TMCLOCK_VIRTUAL:
2188 case TMCLOCK_VIRTUAL_SYNC:
2189 return TMCLOCK_FREQ_VIRTUAL;
2190
2191 case TMCLOCK_REAL:
2192 return TMCLOCK_FREQ_REAL;
2193
2194 default:
2195 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2196 return 0;
2197 }
2198}
2199
2200
2201/**
2202 * Get the expire time of the timer.
2203 * Only valid for active timers.
2204 *
2205 * @returns Expire time of the timer.
2206 * @param pVM The cross context VM structure.
2207 * @param hTimer Timer handle as returned by one of the create functions.
2208 */
2209VMMDECL(uint64_t) TMTimerGetExpire(PVMCC pVM, TMTIMERHANDLE hTimer)
2210{
2211 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, UINT64_MAX); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2212 TMTIMER_ASSERT_CRITSECT(pVM, pTimer);
2213 int cRetries = 1000;
2214 do
2215 {
2216 TMTIMERSTATE enmState = pTimer->enmState;
2217 switch (enmState)
2218 {
2219 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2220 case TMTIMERSTATE_EXPIRED_DELIVER:
2221 case TMTIMERSTATE_STOPPED:
2222 case TMTIMERSTATE_PENDING_STOP:
2223 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2224 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2225 pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2226 return UINT64_MAX;
2227
2228 case TMTIMERSTATE_ACTIVE:
2229 case TMTIMERSTATE_PENDING_RESCHEDULE:
2230 case TMTIMERSTATE_PENDING_SCHEDULE:
2231 Log2(("TMTimerGetExpire: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2232 pTimer->u64Expire, pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2233 return pTimer->u64Expire;
2234
2235 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2236 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2237#ifdef IN_RING3
2238 if (!RTThreadYield())
2239 RTThreadSleep(1);
2240#endif
2241 break;
2242
2243 /*
2244 * Invalid states.
2245 */
2246 case TMTIMERSTATE_DESTROY:
2247 case TMTIMERSTATE_FREE:
2248 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, pTimer->szName));
2249 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2250 pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2251 return UINT64_MAX;
2252 default:
2253 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, pTimer->szName));
2254 return UINT64_MAX;
2255 }
2256 } while (cRetries-- > 0);
2257
2258 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, pTimer->szName));
2259 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2260 pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2261 return UINT64_MAX;
2262}
2263
2264
2265/**
2266 * Checks if a timer is active or not.
2267 *
2268 * @returns True if active.
2269 * @returns False if not active.
2270 * @param pVM The cross context VM structure.
2271 * @param hTimer Timer handle as returned by one of the create functions.
2272 */
2273VMMDECL(bool) TMTimerIsActive(PVMCC pVM, TMTIMERHANDLE hTimer)
2274{
2275 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, false); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2276 TMTIMERSTATE enmState = pTimer->enmState;
2277 switch (enmState)
2278 {
2279 case TMTIMERSTATE_STOPPED:
2280 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2281 case TMTIMERSTATE_EXPIRED_DELIVER:
2282 case TMTIMERSTATE_PENDING_STOP:
2283 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2284 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2285 pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2286 return false;
2287
2288 case TMTIMERSTATE_ACTIVE:
2289 case TMTIMERSTATE_PENDING_RESCHEDULE:
2290 case TMTIMERSTATE_PENDING_SCHEDULE:
2291 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2292 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2293 Log2(("TMTimerIsActive: returns true (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2294 pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2295 return true;
2296
2297 /*
2298 * Invalid states.
2299 */
2300 case TMTIMERSTATE_DESTROY:
2301 case TMTIMERSTATE_FREE:
2302 AssertMsgFailed(("Invalid timer state %s (%s)\n", tmTimerState(enmState), pTimer->szName));
2303 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2304 pTimer, tmTimerState(pTimer->enmState), pTimer->szName));
2305 return false;
2306 default:
2307 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, pTimer->szName));
2308 return false;
2309 }
2310}
2311
2312
2313/* -=-=-=-=-=-=- Convenience APIs -=-=-=-=-=-=- */
2314
2315
2316/**
2317 * Arm a timer with a (new) expire time relative to current time.
2318 *
2319 * @returns VBox status code.
2320 * @param pVM The cross context VM structure.
2321 * @param hTimer Timer handle as returned by one of the create functions.
2322 * @param cMilliesToNext Number of milliseconds to the next tick.
2323 */
2324VMMDECL(int) TMTimerSetMillies(PVMCC pVM, TMTIMERHANDLE hTimer, uint32_t cMilliesToNext)
2325{
2326 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2327 switch (pQueue->enmClock)
2328 {
2329 case TMCLOCK_VIRTUAL:
2330 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2331 return tmTimerSetRelative(pVM, pTimer, cMilliesToNext * UINT64_C(1000000), NULL, pQueueCC, pQueue);
2332
2333 case TMCLOCK_VIRTUAL_SYNC:
2334 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2335 return tmTimerSetRelative(pVM, pTimer, cMilliesToNext * UINT64_C(1000000), NULL, pQueueCC, pQueue);
2336
2337 case TMCLOCK_REAL:
2338 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2339 return tmTimerSetRelative(pVM, pTimer, cMilliesToNext, NULL, pQueueCC, pQueue);
2340
2341 default:
2342 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2343 return VERR_TM_TIMER_BAD_CLOCK;
2344 }
2345}
2346
2347
2348/**
2349 * Arm a timer with a (new) expire time relative to current time.
2350 *
2351 * @returns VBox status code.
2352 * @param pVM The cross context VM structure.
2353 * @param hTimer Timer handle as returned by one of the create functions.
2354 * @param cMicrosToNext Number of microseconds to the next tick.
2355 */
2356VMMDECL(int) TMTimerSetMicro(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cMicrosToNext)
2357{
2358 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2359 switch (pQueue->enmClock)
2360 {
2361 case TMCLOCK_VIRTUAL:
2362 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2363 return tmTimerSetRelative(pVM, pTimer, cMicrosToNext * 1000, NULL, pQueueCC, pQueue);
2364
2365 case TMCLOCK_VIRTUAL_SYNC:
2366 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2367 return tmTimerSetRelative(pVM, pTimer, cMicrosToNext * 1000, NULL, pQueueCC, pQueue);
2368
2369 case TMCLOCK_REAL:
2370 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2371 return tmTimerSetRelative(pVM, pTimer, cMicrosToNext / 1000, NULL, pQueueCC, pQueue);
2372
2373 default:
2374 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2375 return VERR_TM_TIMER_BAD_CLOCK;
2376 }
2377}
2378
2379
2380/**
2381 * Arm a timer with a (new) expire time relative to current time.
2382 *
2383 * @returns VBox status code.
2384 * @param pVM The cross context VM structure.
2385 * @param hTimer Timer handle as returned by one of the create functions.
2386 * @param cNanosToNext Number of nanoseconds to the next tick.
2387 */
2388VMMDECL(int) TMTimerSetNano(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cNanosToNext)
2389{
2390 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2391 switch (pQueue->enmClock)
2392 {
2393 case TMCLOCK_VIRTUAL:
2394 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2395 return tmTimerSetRelative(pVM, pTimer, cNanosToNext, NULL, pQueueCC, pQueue);
2396
2397 case TMCLOCK_VIRTUAL_SYNC:
2398 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2399 return tmTimerSetRelative(pVM, pTimer, cNanosToNext, NULL, pQueueCC, pQueue);
2400
2401 case TMCLOCK_REAL:
2402 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2403 return tmTimerSetRelative(pVM, pTimer, cNanosToNext / 1000000, NULL, pQueueCC, pQueue);
2404
2405 default:
2406 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2407 return VERR_TM_TIMER_BAD_CLOCK;
2408 }
2409}
2410
2411
2412/**
2413 * Get the current clock time as nanoseconds.
2414 *
2415 * @returns The timer clock as nanoseconds.
2416 * @param pVM The cross context VM structure.
2417 * @param hTimer Timer handle as returned by one of the create functions.
2418 */
2419VMMDECL(uint64_t) TMTimerGetNano(PVMCC pVM, TMTIMERHANDLE hTimer)
2420{
2421 return TMTimerToNano(pVM, hTimer, TMTimerGet(pVM, hTimer));
2422}
2423
2424
2425/**
2426 * Get the current clock time as microseconds.
2427 *
2428 * @returns The timer clock as microseconds.
2429 * @param pVM The cross context VM structure.
2430 * @param hTimer Timer handle as returned by one of the create functions.
2431 */
2432VMMDECL(uint64_t) TMTimerGetMicro(PVMCC pVM, TMTIMERHANDLE hTimer)
2433{
2434 return TMTimerToMicro(pVM, hTimer, TMTimerGet(pVM, hTimer));
2435}
2436
2437
2438/**
2439 * Get the current clock time as milliseconds.
2440 *
2441 * @returns The timer clock as milliseconds.
2442 * @param pVM The cross context VM structure.
2443 * @param hTimer Timer handle as returned by one of the create functions.
2444 */
2445VMMDECL(uint64_t) TMTimerGetMilli(PVMCC pVM, TMTIMERHANDLE hTimer)
2446{
2447 return TMTimerToMilli(pVM, hTimer, TMTimerGet(pVM, hTimer));
2448}
2449
2450
2451/**
2452 * Converts the specified timer clock time to nanoseconds.
2453 *
2454 * @returns nanoseconds.
2455 * @param pVM The cross context VM structure.
2456 * @param hTimer Timer handle as returned by one of the create functions.
2457 * @param cTicks The clock ticks.
2458 * @remark There could be rounding errors here. We just do a simple integer divide
2459 * without any adjustments.
2460 */
2461VMMDECL(uint64_t) TMTimerToNano(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cTicks)
2462{
2463 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2464 switch (pQueue->enmClock)
2465 {
2466 case TMCLOCK_VIRTUAL:
2467 case TMCLOCK_VIRTUAL_SYNC:
2468 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2469 return cTicks;
2470
2471 case TMCLOCK_REAL:
2472 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2473 return cTicks * 1000000;
2474
2475 default:
2476 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2477 return 0;
2478 }
2479}
2480
2481
2482/**
2483 * Converts the specified timer clock time to microseconds.
2484 *
2485 * @returns microseconds.
2486 * @param pVM The cross context VM structure.
2487 * @param hTimer Timer handle as returned by one of the create functions.
2488 * @param cTicks The clock ticks.
2489 * @remark There could be rounding errors here. We just do a simple integer divide
2490 * without any adjustments.
2491 */
2492VMMDECL(uint64_t) TMTimerToMicro(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cTicks)
2493{
2494 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2495 switch (pQueue->enmClock)
2496 {
2497 case TMCLOCK_VIRTUAL:
2498 case TMCLOCK_VIRTUAL_SYNC:
2499 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2500 return cTicks / 1000;
2501
2502 case TMCLOCK_REAL:
2503 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2504 return cTicks * 1000;
2505
2506 default:
2507 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2508 return 0;
2509 }
2510}
2511
2512
2513/**
2514 * Converts the specified timer clock time to milliseconds.
2515 *
2516 * @returns milliseconds.
2517 * @param pVM The cross context VM structure.
2518 * @param hTimer Timer handle as returned by one of the create functions.
2519 * @param cTicks The clock ticks.
2520 * @remark There could be rounding errors here. We just do a simple integer divide
2521 * without any adjustments.
2522 */
2523VMMDECL(uint64_t) TMTimerToMilli(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cTicks)
2524{
2525 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2526 switch (pQueue->enmClock)
2527 {
2528 case TMCLOCK_VIRTUAL:
2529 case TMCLOCK_VIRTUAL_SYNC:
2530 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2531 return cTicks / 1000000;
2532
2533 case TMCLOCK_REAL:
2534 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2535 return cTicks;
2536
2537 default:
2538 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2539 return 0;
2540 }
2541}
2542
2543
2544/**
2545 * Converts the specified nanosecond timestamp to timer clock ticks.
2546 *
2547 * @returns timer clock ticks.
2548 * @param pVM The cross context VM structure.
2549 * @param hTimer Timer handle as returned by one of the create functions.
2550 * @param cNanoSecs The nanosecond value ticks to convert.
2551 * @remark There could be rounding and overflow errors here.
2552 */
2553VMMDECL(uint64_t) TMTimerFromNano(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cNanoSecs)
2554{
2555 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2556 switch (pQueue->enmClock)
2557 {
2558 case TMCLOCK_VIRTUAL:
2559 case TMCLOCK_VIRTUAL_SYNC:
2560 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2561 return cNanoSecs;
2562
2563 case TMCLOCK_REAL:
2564 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2565 return cNanoSecs / 1000000;
2566
2567 default:
2568 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2569 return 0;
2570 }
2571}
2572
2573
2574/**
2575 * Converts the specified microsecond timestamp to timer clock ticks.
2576 *
2577 * @returns timer clock ticks.
2578 * @param pVM The cross context VM structure.
2579 * @param hTimer Timer handle as returned by one of the create functions.
2580 * @param cMicroSecs The microsecond value ticks to convert.
2581 * @remark There could be rounding and overflow errors here.
2582 */
2583VMMDECL(uint64_t) TMTimerFromMicro(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cMicroSecs)
2584{
2585 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2586 switch (pQueue->enmClock)
2587 {
2588 case TMCLOCK_VIRTUAL:
2589 case TMCLOCK_VIRTUAL_SYNC:
2590 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2591 return cMicroSecs * 1000;
2592
2593 case TMCLOCK_REAL:
2594 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2595 return cMicroSecs / 1000;
2596
2597 default:
2598 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2599 return 0;
2600 }
2601}
2602
2603
2604/**
2605 * Converts the specified millisecond timestamp to timer clock ticks.
2606 *
2607 * @returns timer clock ticks.
2608 * @param pVM The cross context VM structure.
2609 * @param hTimer Timer handle as returned by one of the create functions.
2610 * @param cMilliSecs The millisecond value ticks to convert.
2611 * @remark There could be rounding and overflow errors here.
2612 */
2613VMMDECL(uint64_t) TMTimerFromMilli(PVMCC pVM, TMTIMERHANDLE hTimer, uint64_t cMilliSecs)
2614{
2615 TMTIMER_HANDLE_TO_VARS_RETURN_EX(pVM, hTimer, 0); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */
2616 switch (pQueue->enmClock)
2617 {
2618 case TMCLOCK_VIRTUAL:
2619 case TMCLOCK_VIRTUAL_SYNC:
2620 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2621 return cMilliSecs * 1000000;
2622
2623 case TMCLOCK_REAL:
2624 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2625 return cMilliSecs;
2626
2627 default:
2628 AssertMsgFailed(("Invalid enmClock=%d\n", pQueue->enmClock));
2629 return 0;
2630 }
2631}
2632
2633
2634/**
2635 * Convert state to string.
2636 *
2637 * @returns Readonly status name.
2638 * @param enmState State.
2639 */
2640const char *tmTimerState(TMTIMERSTATE enmState)
2641{
2642 switch (enmState)
2643 {
2644#define CASE(num, state) \
2645 case TMTIMERSTATE_##state: \
2646 AssertCompile(TMTIMERSTATE_##state == (num)); \
2647 return #num "-" #state
2648 CASE( 0,INVALID);
2649 CASE( 1,STOPPED);
2650 CASE( 2,ACTIVE);
2651 CASE( 3,EXPIRED_GET_UNLINK);
2652 CASE( 4,EXPIRED_DELIVER);
2653 CASE( 5,PENDING_STOP);
2654 CASE( 6,PENDING_STOP_SCHEDULE);
2655 CASE( 7,PENDING_SCHEDULE_SET_EXPIRE);
2656 CASE( 8,PENDING_SCHEDULE);
2657 CASE( 9,PENDING_RESCHEDULE_SET_EXPIRE);
2658 CASE(10,PENDING_RESCHEDULE);
2659 CASE(11,DESTROY);
2660 CASE(12,FREE);
2661 default:
2662 AssertMsgFailed(("Invalid state enmState=%d\n", enmState));
2663 return "Invalid state!";
2664#undef CASE
2665 }
2666}
2667
2668
2669#if defined(IN_RING0) || defined(IN_RING3)
2670/**
2671 * Copies over old timers and initialized newly allocted ones.
2672 *
2673 * Helper for TMR0TimerQueueGrow an tmR3TimerQueueGrow.
2674 *
2675 * @param paTimers The new timer allocation.
2676 * @param paOldTimers The old timers.
2677 * @param cNewTimers Number of new timers.
2678 * @param cOldTimers Number of old timers.
2679 */
2680void tmHCTimerQueueGrowInit(PTMTIMER paTimers, TMTIMER const *paOldTimers, uint32_t cNewTimers, uint32_t cOldTimers)
2681{
2682 Assert(cOldTimers < cNewTimers);
2683
2684 /*
2685 * Copy over the old info and initialize the new handles.
2686 */
2687 if (cOldTimers > 0)
2688 memcpy(paTimers, paOldTimers, sizeof(TMTIMER) * cOldTimers);
2689
2690 size_t i = cNewTimers;
2691 while (i-- > cOldTimers)
2692 {
2693 paTimers[i].u64Expire = UINT64_MAX;
2694 paTimers[i].enmType = TMTIMERTYPE_INVALID;
2695 paTimers[i].enmState = TMTIMERSTATE_FREE;
2696 paTimers[i].idxScheduleNext = UINT32_MAX;
2697 paTimers[i].idxNext = UINT32_MAX;
2698 paTimers[i].idxPrev = UINT32_MAX;
2699 paTimers[i].hSelf = NIL_TMTIMERHANDLE;
2700 }
2701
2702 /*
2703 * Mark the zero'th entry as allocated but invalid if we just allocated it.
2704 */
2705 if (cOldTimers == 0)
2706 {
2707 paTimers[0].enmState = TMTIMERSTATE_INVALID;
2708 paTimers[0].szName[0] = 'n';
2709 paTimers[0].szName[1] = 'i';
2710 paTimers[0].szName[2] = 'l';
2711 paTimers[0].szName[3] = '\0';
2712 }
2713}
2714#endif /* IN_RING0 || IN_RING3 */
2715
2716
2717/**
2718 * The slow path of tmGetFrequencyHint() where we try to recalculate the value.
2719 *
2720 * @returns The highest frequency. 0 if no timers care.
2721 * @param pVM The cross context VM structure.
2722 * @param uOldMaxHzHint The old global hint.
2723 */
2724DECL_NO_INLINE(static, uint32_t) tmGetFrequencyHintSlow(PVMCC pVM, uint32_t uOldMaxHzHint)
2725{
2726 /* Set two bits, though not entirely sure it's needed (too exhaused to think clearly)
2727 but it should force other callers thru the slow path while we're recalculating and
2728 help us detect changes while we're recalculating. */
2729 AssertCompile(RT_ELEMENTS(pVM->tm.s.aTimerQueues) <= 16);
2730
2731 /*
2732 * The "right" highest frequency value isn't so important that we'll block
2733 * waiting on the timer semaphores.
2734 */
2735 uint32_t uMaxHzHint = 0;
2736 for (uint32_t idxQueue = 0; idxQueue < RT_ELEMENTS(pVM->tm.s.aTimerQueues); idxQueue++)
2737 {
2738 PTMTIMERQUEUE pQueue = &pVM->tm.s.aTimerQueues[idxQueue];
2739
2740 /* Get the max Hz hint for the queue. */
2741 uint32_t uMaxHzHintQueue;
2742 if ( !(ASMAtomicUoReadU64(&pVM->tm.s.HzHint.u64Combined) & (RT_BIT_32(idxQueue) | RT_BIT_32(idxQueue + 16)))
2743 || RT_FAILURE_NP(PDMCritSectTryEnter(pVM, &pQueue->TimerLock)))
2744 uMaxHzHintQueue = ASMAtomicReadU32(&pQueue->uMaxHzHint);
2745 else
2746 {
2747 /* Is it still necessary to do updating? */
2748 if (ASMAtomicUoReadU64(&pVM->tm.s.HzHint.u64Combined) & (RT_BIT_32(idxQueue) | RT_BIT_32(idxQueue + 16)))
2749 {
2750 ASMAtomicAndU64(&pVM->tm.s.HzHint.u64Combined, ~RT_BIT_64(idxQueue + 16)); /* clear one flag up front */
2751
2752 PTMTIMERQUEUECC pQueueCC = TM_GET_TIMER_QUEUE_CC(pVM, idxQueue, pQueue);
2753 uMaxHzHintQueue = 0;
2754 for (PTMTIMER pCur = tmTimerQueueGetHead(pQueueCC, pQueue);
2755 pCur;
2756 pCur = tmTimerGetNext(pQueueCC, pCur))
2757 {
2758 uint32_t uHzHint = ASMAtomicUoReadU32(&pCur->uHzHint);
2759 if (uHzHint > uMaxHzHintQueue)
2760 {
2761 TMTIMERSTATE enmState = pCur->enmState;
2762 switch (enmState)
2763 {
2764 case TMTIMERSTATE_ACTIVE:
2765 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2766 case TMTIMERSTATE_EXPIRED_DELIVER:
2767 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2768 case TMTIMERSTATE_PENDING_SCHEDULE:
2769 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2770 case TMTIMERSTATE_PENDING_RESCHEDULE:
2771 uMaxHzHintQueue = uHzHint;
2772 break;
2773
2774 case TMTIMERSTATE_STOPPED:
2775 case TMTIMERSTATE_PENDING_STOP:
2776 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2777 case TMTIMERSTATE_DESTROY:
2778 case TMTIMERSTATE_FREE:
2779 case TMTIMERSTATE_INVALID:
2780 break;
2781 /* no default, want gcc warnings when adding more states. */
2782 }
2783 }
2784 }
2785
2786 /* Write the new Hz hint for the quest and clear the other update flag. */
2787 ASMAtomicUoWriteU32(&pQueue->uMaxHzHint, uMaxHzHintQueue);
2788 ASMAtomicAndU64(&pVM->tm.s.HzHint.u64Combined, ~RT_BIT_64(idxQueue));
2789 }
2790 else
2791 uMaxHzHintQueue = ASMAtomicUoReadU32(&pQueue->uMaxHzHint);
2792
2793 PDMCritSectLeave(pVM, &pQueue->TimerLock);
2794 }
2795
2796 /* Update the global max Hz hint. */
2797 if (uMaxHzHint < uMaxHzHintQueue)
2798 uMaxHzHint = uMaxHzHintQueue;
2799 }
2800
2801 /*
2802 * Update the frequency hint if no pending frequency changes and we didn't race anyone thru here.
2803 */
2804 uint64_t u64Actual = RT_MAKE_U64(0 /*no pending updates*/, uOldMaxHzHint);
2805 if (ASMAtomicCmpXchgExU64(&pVM->tm.s.HzHint.u64Combined, RT_MAKE_U64(0, uMaxHzHint), u64Actual, &u64Actual))
2806 Log(("tmGetFrequencyHintSlow: New value %u Hz\n", uMaxHzHint));
2807 else
2808 for (uint32_t iTry = 1;; iTry++)
2809 {
2810 if (RT_LO_U32(u64Actual) != 0)
2811 Log(("tmGetFrequencyHintSlow: Outdated value %u Hz (%#x, try %u)\n", uMaxHzHint, RT_LO_U32(u64Actual), iTry));
2812 else if (iTry >= 4)
2813 Log(("tmGetFrequencyHintSlow: Unable to set %u Hz (try %u)\n", uMaxHzHint, iTry));
2814 else if (ASMAtomicCmpXchgExU64(&pVM->tm.s.HzHint.u64Combined, RT_MAKE_U64(0, uMaxHzHint), u64Actual, &u64Actual))
2815 Log(("tmGetFrequencyHintSlow: New value %u Hz (try %u)\n", uMaxHzHint, iTry));
2816 else
2817 continue;
2818 break;
2819 }
2820 return uMaxHzHint;
2821}
2822
2823
2824/**
2825 * Gets the highest frequency hint for all the important timers.
2826 *
2827 * @returns The highest frequency. 0 if no timers care.
2828 * @param pVM The cross context VM structure.
2829 */
2830DECLINLINE(uint32_t) tmGetFrequencyHint(PVMCC pVM)
2831{
2832 /*
2833 * Query the value, recalculate it if necessary.
2834 */
2835 uint64_t u64Combined = ASMAtomicReadU64(&pVM->tm.s.HzHint.u64Combined);
2836 if (RT_HI_U32(u64Combined) == 0)
2837 return RT_LO_U32(u64Combined); /* hopefully somewhat likely */
2838 return tmGetFrequencyHintSlow(pVM, RT_LO_U32(u64Combined));
2839}
2840
2841
2842/**
2843 * Calculates a host timer frequency that would be suitable for the current
2844 * timer load.
2845 *
2846 * This will take the highest timer frequency, adjust for catch-up and warp
2847 * driver, and finally add a little fudge factor. The caller (VMM) will use
2848 * the result to adjust the per-cpu preemption timer.
2849 *
2850 * @returns The highest frequency. 0 if no important timers around.
2851 * @param pVM The cross context VM structure.
2852 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2853 */
2854VMM_INT_DECL(uint32_t) TMCalcHostTimerFrequency(PVMCC pVM, PVMCPUCC pVCpu)
2855{
2856 uint32_t uHz = tmGetFrequencyHint(pVM);
2857
2858 /* Catch up, we have to be more aggressive than the % indicates at the
2859 beginning of the effort. */
2860 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
2861 {
2862 uint32_t u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
2863 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
2864 {
2865 if (u32Pct <= 100)
2866 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp100 / 100;
2867 else if (u32Pct <= 200)
2868 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp200 / 100;
2869 else if (u32Pct <= 400)
2870 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp400 / 100;
2871 uHz *= u32Pct + 100;
2872 uHz /= 100;
2873 }
2874 }
2875
2876 /* Warp drive. */
2877 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualWarpDrive))
2878 {
2879 uint32_t u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualWarpDrivePercentage);
2880 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualWarpDrive))
2881 {
2882 uHz *= u32Pct;
2883 uHz /= 100;
2884 }
2885 }
2886
2887 /* Fudge factor. */
2888 if (pVCpu->idCpu == pVM->tm.s.idTimerCpu)
2889 uHz *= pVM->tm.s.cPctHostHzFudgeFactorTimerCpu;
2890 else
2891 uHz *= pVM->tm.s.cPctHostHzFudgeFactorOtherCpu;
2892 uHz /= 100;
2893
2894 /* Make sure it isn't too high. */
2895 if (uHz > pVM->tm.s.cHostHzMax)
2896 uHz = pVM->tm.s.cHostHzMax;
2897
2898 return uHz;
2899}
2900
2901
2902/**
2903 * Whether the guest virtual clock is ticking.
2904 *
2905 * @returns true if ticking, false otherwise.
2906 * @param pVM The cross context VM structure.
2907 */
2908VMM_INT_DECL(bool) TMVirtualIsTicking(PVM pVM)
2909{
2910 return RT_BOOL(pVM->tm.s.cVirtualTicking);
2911}
2912
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette