VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/TMAll.cpp@ 87749

Last change on this file since 87749 was 87749, checked in by vboxsync, 4 years ago

VMM/TM: Relax ordering when signaling update-in-progress in TMNotifyEndOfExecution. bugref:9941

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 94.6 KB
Line 
1/* $Id: TMAll.cpp 87749 2021-02-13 03:18:27Z vboxsync $ */
2/** @file
3 * TM - Timeout Manager, all contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_TM
23#ifdef DEBUG_bird
24# define DBGFTRACE_DISABLED /* annoying */
25#endif
26#include <VBox/vmm/tm.h>
27#include <VBox/vmm/mm.h>
28#include <VBox/vmm/dbgftrace.h>
29#ifdef IN_RING3
30#endif
31#include <VBox/vmm/pdmdev.h> /* (for TMTIMER_GET_CRITSECT implementation) */
32#include "TMInternal.h"
33#include <VBox/vmm/vmcc.h>
34
35#include <VBox/param.h>
36#include <VBox/err.h>
37#include <VBox/log.h>
38#include <VBox/sup.h>
39#include <iprt/time.h>
40#include <iprt/assert.h>
41#include <iprt/asm.h>
42#include <iprt/asm-math.h>
43#ifdef IN_RING3
44# include <iprt/thread.h>
45#endif
46
47#include "TMInline.h"
48
49
50/*********************************************************************************************************************************
51* Defined Constants And Macros *
52*********************************************************************************************************************************/
53#ifdef VBOX_STRICT
54/** @def TMTIMER_GET_CRITSECT
55 * Helper for safely resolving the critical section for a timer belonging to a
56 * device instance.
57 * @todo needs reworking later as it uses PDMDEVINSR0::pDevInsR0RemoveMe. */
58# ifdef IN_RING3
59# define TMTIMER_GET_CRITSECT(pTimer) ((pTimer)->pCritSect)
60# else
61# define TMTIMER_GET_CRITSECT(pTimer) tmRZTimerGetCritSect(pTimer)
62# endif
63#endif
64
65/** @def TMTIMER_ASSERT_CRITSECT
66 * Checks that the caller owns the critical section if one is associated with
67 * the timer. */
68#ifdef VBOX_STRICT
69# define TMTIMER_ASSERT_CRITSECT(pTimer) \
70 do { \
71 if ((pTimer)->pCritSect) \
72 { \
73 VMSTATE enmState; \
74 PPDMCRITSECT pCritSect = TMTIMER_GET_CRITSECT(pTimer); \
75 AssertMsg( pCritSect \
76 && ( PDMCritSectIsOwner(pCritSect) \
77 || (enmState = (pTimer)->CTX_SUFF(pVM)->enmVMState) == VMSTATE_CREATING \
78 || enmState == VMSTATE_RESETTING \
79 || enmState == VMSTATE_RESETTING_LS ),\
80 ("pTimer=%p (%s) pCritSect=%p (%s)\n", pTimer, R3STRING(pTimer->pszDesc), \
81 (pTimer)->pCritSect, R3STRING(PDMR3CritSectName((pTimer)->pCritSect)) )); \
82 } \
83 } while (0)
84#else
85# define TMTIMER_ASSERT_CRITSECT(pTimer) do { } while (0)
86#endif
87
88/** @def TMTIMER_ASSERT_SYNC_CRITSECT_ORDER
89 * Checks for lock order trouble between the timer critsect and the critical
90 * section critsect. The virtual sync critsect must always be entered before
91 * the one associated with the timer (see TMR3TimerQueuesDo). It is OK if there
92 * isn't any critical section associated with the timer or if the calling thread
93 * doesn't own it, ASSUMING of course that the thread using this macro is going
94 * to enter the virtual sync critical section anyway.
95 *
96 * @remarks This is a sligtly relaxed timer locking attitude compared to
97 * TMTIMER_ASSERT_CRITSECT, however, the calling device/whatever code
98 * should know what it's doing if it's stopping or starting a timer
99 * without taking the device lock.
100 */
101#ifdef VBOX_STRICT
102# define TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer) \
103 do { \
104 if ((pTimer)->pCritSect) \
105 { \
106 VMSTATE enmState; \
107 PPDMCRITSECT pCritSect = TMTIMER_GET_CRITSECT(pTimer); \
108 AssertMsg( pCritSect \
109 && ( !PDMCritSectIsOwner(pCritSect) \
110 || PDMCritSectIsOwner(&pVM->tm.s.VirtualSyncLock) \
111 || (enmState = (pVM)->enmVMState) == VMSTATE_CREATING \
112 || enmState == VMSTATE_RESETTING \
113 || enmState == VMSTATE_RESETTING_LS ),\
114 ("pTimer=%p (%s) pCritSect=%p (%s)\n", pTimer, R3STRING(pTimer->pszDesc), \
115 (pTimer)->pCritSect, R3STRING(PDMR3CritSectName((pTimer)->pCritSect)) )); \
116 } \
117 } while (0)
118#else
119# define TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer) do { } while (0)
120#endif
121
122
123#if defined(VBOX_STRICT) && defined(IN_RING0)
124/**
125 * Helper for TMTIMER_GET_CRITSECT
126 * @todo This needs a redo!
127 */
128DECLINLINE(PPDMCRITSECT) tmRZTimerGetCritSect(PTMTIMER pTimer)
129{
130 if (pTimer->enmType == TMTIMERTYPE_DEV)
131 {
132 RTCCUINTREG fSavedFlags = ASMAddFlags(X86_EFL_AC); /** @todo fix ring-3 pointer use */
133 PPDMDEVINSR0 pDevInsR0 = ((struct PDMDEVINSR3 *)pTimer->u.Dev.pDevIns)->pDevInsR0RemoveMe; /* !ring-3 read! */
134 ASMSetFlags(fSavedFlags);
135 struct PDMDEVINSR3 *pDevInsR3 = pDevInsR0->pDevInsForR3R0;
136 if (pTimer->pCritSect == pDevInsR3->pCritSectRoR3)
137 return pDevInsR0->pCritSectRoR0;
138 uintptr_t offCritSect = (uintptr_t)pTimer->pCritSect - (uintptr_t)pDevInsR3->pvInstanceDataR3;
139 if (offCritSect < pDevInsR0->pReg->cbInstanceShared)
140 return (PPDMCRITSECT)((uintptr_t)pDevInsR0->pvInstanceDataR0 + offCritSect);
141 }
142 return (PPDMCRITSECT)MMHyperR3ToCC((pTimer)->CTX_SUFF(pVM), pTimer->pCritSect);
143}
144#endif /* VBOX_STRICT && IN_RING0 */
145
146
147/**
148 * Notification that execution is about to start.
149 *
150 * This call must always be paired with a TMNotifyEndOfExecution call.
151 *
152 * The function may, depending on the configuration, resume the TSC and future
153 * clocks that only ticks when we're executing guest code.
154 *
155 * @param pVM The cross context VM structure.
156 * @param pVCpu The cross context virtual CPU structure.
157 */
158VMMDECL(void) TMNotifyStartOfExecution(PVMCC pVM, PVMCPUCC pVCpu)
159{
160#ifndef VBOX_WITHOUT_NS_ACCOUNTING
161 pVCpu->tm.s.uTscStartExecuting = SUPReadTsc();
162 pVCpu->tm.s.fExecuting = true;
163#endif
164 if (pVM->tm.s.fTSCTiedToExecution)
165 tmCpuTickResume(pVM, pVCpu);
166}
167
168
169/**
170 * Notification that execution has ended.
171 *
172 * This call must always be paired with a TMNotifyStartOfExecution call.
173 *
174 * The function may, depending on the configuration, suspend the TSC and future
175 * clocks that only ticks when we're executing guest code.
176 *
177 * @param pVM The cross context VM structure.
178 * @param pVCpu The cross context virtual CPU structure.
179 */
180VMMDECL(void) TMNotifyEndOfExecution(PVMCC pVM, PVMCPUCC pVCpu)
181{
182 if (pVM->tm.s.fTSCTiedToExecution)
183 tmCpuTickPause(pVCpu);
184
185#ifndef VBOX_WITHOUT_NS_ACCOUNTING
186 /*
187 * Calculate the elapsed tick count and convert it to nanoseconds.
188 */
189 /** @todo get TSC from caller (HMR0A.asm) */
190 uint64_t cTicks = SUPReadTsc() - pVCpu->tm.s.uTscStartExecuting;
191# ifdef IN_RING3
192 uint64_t const uCpuHz = SUPGetCpuHzFromGip(g_pSUPGlobalInfoPage);
193# else
194 uint64_t const uCpuHz = SUPGetCpuHzFromGipBySetIndex(g_pSUPGlobalInfoPage, pVCpu->iHostCpuSet);
195# endif
196 AssertStmt(cTicks <= uCpuHz << 2, cTicks = uCpuHz << 2); /* max 4 sec */
197
198 uint64_t cNsExecutingDelta;
199 if (uCpuHz < _4G)
200 cNsExecutingDelta = ASMMultU64ByU32DivByU32(cTicks, RT_NS_1SEC, uCpuHz);
201 else if (uCpuHz < 16*_1G64)
202 cNsExecutingDelta = ASMMultU64ByU32DivByU32(cTicks >> 2, RT_NS_1SEC, uCpuHz >> 2);
203 else
204 {
205 Assert(uCpuHz < 64 * _1G64);
206 cNsExecutingDelta = ASMMultU64ByU32DivByU32(cTicks >> 4, RT_NS_1SEC, uCpuHz >> 4);
207 }
208
209 /*
210 * Update the data.
211 *
212 * Note! Using ASMAtomicUoIncU32 instead of ASMAtomicIncU32 here to
213 * save a tiny bit of time here. Currently, the only user
214 * is tmR3CpuLoadTimer(), so nothing terribly important.
215 */
216 uint64_t const cNsExecutingNew = pVCpu->tm.s.cNsExecuting + cNsExecutingDelta;
217 uint32_t uGen = ASMAtomicUoIncU32(&pVCpu->tm.s.uTimesGen); Assert(uGen & 1);
218 pVCpu->tm.s.fExecuting = false;
219 pVCpu->tm.s.cNsExecuting = cNsExecutingNew;
220 pVCpu->tm.s.cPeriodsExecuting++;
221 ASMAtomicWriteU32(&pVCpu->tm.s.uTimesGen, (uGen | 1) + 1);
222
223 /*
224 * Update stats.
225 */
226# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
227 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecuting, cNsExecutingDelta);
228 if (cNsExecutingDelta < 5000)
229 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecTiny, cNsExecutingDelta);
230 else if (cNsExecutingDelta < 50000)
231 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecShort, cNsExecutingDelta);
232 else
233 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecLong, cNsExecutingDelta);
234# endif
235
236 /* The timer triggers occational updating of the others and total stats: */
237 if (RT_LIKELY(!pVCpu->tm.s.fUpdateStats))
238 { /*likely*/ }
239 else
240 {
241 pVCpu->tm.s.fUpdateStats = false;
242
243 uint64_t const cNsTotalNew = RTTimeNanoTS() - pVCpu->tm.s.nsStartTotal;
244 uint64_t const cNsOtherNew = cNsTotalNew - cNsExecutingNew - pVCpu->tm.s.cNsHalted;
245
246# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
247 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsTotal, cNsTotalNew - pVCpu->tm.s.cNsTotalStat);
248 int64_t const cNsOtherNewDelta = cNsOtherNew - pVCpu->tm.s.cNsOtherStat;
249 if (cNsOtherNewDelta > 0)
250 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsOther, (uint64_t)cNsOtherNewDelta);
251# endif
252
253 pVCpu->tm.s.cNsTotalStat = cNsTotalNew;
254 pVCpu->tm.s.cNsOtherStat = cNsOtherNew;
255 }
256
257#endif
258}
259
260
261/**
262 * Notification that the cpu is entering the halt state
263 *
264 * This call must always be paired with a TMNotifyEndOfExecution call.
265 *
266 * The function may, depending on the configuration, resume the TSC and future
267 * clocks that only ticks when we're halted.
268 *
269 * @param pVCpu The cross context virtual CPU structure.
270 */
271VMM_INT_DECL(void) TMNotifyStartOfHalt(PVMCPUCC pVCpu)
272{
273 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
274
275#ifndef VBOX_WITHOUT_NS_ACCOUNTING
276 pVCpu->tm.s.nsStartHalting = RTTimeNanoTS();
277 pVCpu->tm.s.fHalting = true;
278#endif
279
280 if ( pVM->tm.s.fTSCTiedToExecution
281 && !pVM->tm.s.fTSCNotTiedToHalt)
282 tmCpuTickResume(pVM, pVCpu);
283}
284
285
286/**
287 * Notification that the cpu is leaving the halt state
288 *
289 * This call must always be paired with a TMNotifyStartOfHalt call.
290 *
291 * The function may, depending on the configuration, suspend the TSC and future
292 * clocks that only ticks when we're halted.
293 *
294 * @param pVCpu The cross context virtual CPU structure.
295 */
296VMM_INT_DECL(void) TMNotifyEndOfHalt(PVMCPUCC pVCpu)
297{
298 PVM pVM = pVCpu->CTX_SUFF(pVM);
299
300 if ( pVM->tm.s.fTSCTiedToExecution
301 && !pVM->tm.s.fTSCNotTiedToHalt)
302 tmCpuTickPause(pVCpu);
303
304#ifndef VBOX_WITHOUT_NS_ACCOUNTING
305 uint64_t const u64NsTs = RTTimeNanoTS();
306 uint64_t const cNsTotalNew = u64NsTs - pVCpu->tm.s.nsStartTotal;
307 uint64_t const cNsHaltedDelta = u64NsTs - pVCpu->tm.s.nsStartHalting;
308 uint64_t const cNsHaltedNew = pVCpu->tm.s.cNsHalted + cNsHaltedDelta;
309 uint64_t const cNsOtherNew = cNsTotalNew - pVCpu->tm.s.cNsExecuting - cNsHaltedNew;
310
311 uint32_t uGen = ASMAtomicIncU32(&pVCpu->tm.s.uTimesGen); Assert(uGen & 1);
312 pVCpu->tm.s.fHalting = false;
313 pVCpu->tm.s.fUpdateStats = false;
314 pVCpu->tm.s.cNsHalted = cNsHaltedNew;
315 pVCpu->tm.s.cPeriodsHalted++;
316 ASMAtomicWriteU32(&pVCpu->tm.s.uTimesGen, (uGen | 1) + 1);
317
318# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
319 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsHalted, cNsHaltedDelta);
320 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsTotal, cNsTotalNew - pVCpu->tm.s.cNsTotalStat);
321 int64_t const cNsOtherNewDelta = cNsOtherNew - pVCpu->tm.s.cNsOtherStat;
322 if (cNsOtherNewDelta > 0)
323 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsOther, (uint64_t)cNsOtherNewDelta);
324# endif
325 pVCpu->tm.s.cNsTotalStat = cNsTotalNew;
326 pVCpu->tm.s.cNsOtherStat = cNsOtherNew;
327#endif
328}
329
330
331/**
332 * Raise the timer force action flag and notify the dedicated timer EMT.
333 *
334 * @param pVM The cross context VM structure.
335 */
336DECLINLINE(void) tmScheduleNotify(PVMCC pVM)
337{
338 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
339 if (!VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
340 {
341 Log5(("TMAll(%u): FF: 0 -> 1\n", __LINE__));
342 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
343#ifdef IN_RING3
344 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
345#endif
346 STAM_COUNTER_INC(&pVM->tm.s.StatScheduleSetFF);
347 }
348}
349
350
351/**
352 * Schedule the queue which was changed.
353 */
354DECLINLINE(void) tmSchedule(PTMTIMER pTimer)
355{
356 PVMCC pVM = pTimer->CTX_SUFF(pVM);
357 if ( VM_IS_EMT(pVM)
358 && RT_SUCCESS(TM_TRY_LOCK_TIMERS(pVM)))
359 {
360 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
361 Log3(("tmSchedule: tmTimerQueueSchedule\n"));
362 tmTimerQueueSchedule(pVM, &pVM->tm.s.CTX_SUFF(paTimerQueues)[pTimer->enmClock]);
363#ifdef VBOX_STRICT
364 tmTimerQueuesSanityChecks(pVM, "tmSchedule");
365#endif
366 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
367 TM_UNLOCK_TIMERS(pVM);
368 }
369 else
370 {
371 TMTIMERSTATE enmState = pTimer->enmState;
372 if (TMTIMERSTATE_IS_PENDING_SCHEDULING(enmState))
373 tmScheduleNotify(pVM);
374 }
375}
376
377
378/**
379 * Try change the state to enmStateNew from enmStateOld
380 * and link the timer into the scheduling queue.
381 *
382 * @returns Success indicator.
383 * @param pTimer Timer in question.
384 * @param enmStateNew The new timer state.
385 * @param enmStateOld The old timer state.
386 */
387DECLINLINE(bool) tmTimerTry(PTMTIMER pTimer, TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
388{
389 /*
390 * Attempt state change.
391 */
392 bool fRc;
393 TM_TRY_SET_STATE(pTimer, enmStateNew, enmStateOld, fRc);
394 return fRc;
395}
396
397
398/**
399 * Links the timer onto the scheduling queue.
400 *
401 * @param pQueue The timer queue the timer belongs to.
402 * @param pTimer The timer.
403 *
404 * @todo FIXME: Look into potential race with the thread running the queues
405 * and stuff.
406 */
407DECLINLINE(void) tmTimerLinkSchedule(PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
408{
409 Assert(!pTimer->offScheduleNext);
410 const int32_t offHeadNew = (intptr_t)pTimer - (intptr_t)pQueue;
411 int32_t offHead;
412 do
413 {
414 offHead = pQueue->offSchedule;
415 if (offHead)
416 pTimer->offScheduleNext = ((intptr_t)pQueue + offHead) - (intptr_t)pTimer;
417 else
418 pTimer->offScheduleNext = 0;
419 } while (!ASMAtomicCmpXchgS32(&pQueue->offSchedule, offHeadNew, offHead));
420}
421
422
423/**
424 * Try change the state to enmStateNew from enmStateOld
425 * and link the timer into the scheduling queue.
426 *
427 * @returns Success indicator.
428 * @param pTimer Timer in question.
429 * @param enmStateNew The new timer state.
430 * @param enmStateOld The old timer state.
431 */
432DECLINLINE(bool) tmTimerTryWithLink(PTMTIMER pTimer, TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
433{
434 if (tmTimerTry(pTimer, enmStateNew, enmStateOld))
435 {
436 tmTimerLinkSchedule(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF(paTimerQueues)[pTimer->enmClock], pTimer);
437 return true;
438 }
439 return false;
440}
441
442
443/**
444 * Links a timer into the active list of a timer queue.
445 *
446 * @param pQueue The queue.
447 * @param pTimer The timer.
448 * @param u64Expire The timer expiration time.
449 *
450 * @remarks Called while owning the relevant queue lock.
451 */
452DECL_FORCE_INLINE(void) tmTimerQueueLinkActive(PTMTIMERQUEUE pQueue, PTMTIMER pTimer, uint64_t u64Expire)
453{
454 Assert(!pTimer->offNext);
455 Assert(!pTimer->offPrev);
456 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE || pTimer->enmClock != TMCLOCK_VIRTUAL_SYNC); /* (active is not a stable state) */
457
458 PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue);
459 if (pCur)
460 {
461 for (;; pCur = TMTIMER_GET_NEXT(pCur))
462 {
463 if (pCur->u64Expire > u64Expire)
464 {
465 const PTMTIMER pPrev = TMTIMER_GET_PREV(pCur);
466 TMTIMER_SET_NEXT(pTimer, pCur);
467 TMTIMER_SET_PREV(pTimer, pPrev);
468 if (pPrev)
469 TMTIMER_SET_NEXT(pPrev, pTimer);
470 else
471 {
472 TMTIMER_SET_HEAD(pQueue, pTimer);
473 ASMAtomicWriteU64(&pQueue->u64Expire, u64Expire);
474 DBGFTRACE_U64_TAG2(pTimer->CTX_SUFF(pVM), u64Expire, "tmTimerQueueLinkActive head", R3STRING(pTimer->pszDesc));
475 }
476 TMTIMER_SET_PREV(pCur, pTimer);
477 return;
478 }
479 if (!pCur->offNext)
480 {
481 TMTIMER_SET_NEXT(pCur, pTimer);
482 TMTIMER_SET_PREV(pTimer, pCur);
483 DBGFTRACE_U64_TAG2(pTimer->CTX_SUFF(pVM), u64Expire, "tmTimerQueueLinkActive tail", R3STRING(pTimer->pszDesc));
484 return;
485 }
486 }
487 }
488 else
489 {
490 TMTIMER_SET_HEAD(pQueue, pTimer);
491 ASMAtomicWriteU64(&pQueue->u64Expire, u64Expire);
492 DBGFTRACE_U64_TAG2(pTimer->CTX_SUFF(pVM), u64Expire, "tmTimerQueueLinkActive empty", R3STRING(pTimer->pszDesc));
493 }
494}
495
496
497
498/**
499 * Schedules the given timer on the given queue.
500 *
501 * @param pQueue The timer queue.
502 * @param pTimer The timer that needs scheduling.
503 *
504 * @remarks Called while owning the lock.
505 */
506DECLINLINE(void) tmTimerQueueScheduleOne(PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
507{
508 Assert(pQueue->enmClock != TMCLOCK_VIRTUAL_SYNC);
509
510 /*
511 * Processing.
512 */
513 unsigned cRetries = 2;
514 do
515 {
516 TMTIMERSTATE enmState = pTimer->enmState;
517 switch (enmState)
518 {
519 /*
520 * Reschedule timer (in the active list).
521 */
522 case TMTIMERSTATE_PENDING_RESCHEDULE:
523 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE, TMTIMERSTATE_PENDING_RESCHEDULE)))
524 break; /* retry */
525 tmTimerQueueUnlinkActive(pQueue, pTimer);
526 RT_FALL_THRU();
527
528 /*
529 * Schedule timer (insert into the active list).
530 */
531 case TMTIMERSTATE_PENDING_SCHEDULE:
532 Assert(!pTimer->offNext); Assert(!pTimer->offPrev);
533 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, TMTIMERSTATE_PENDING_SCHEDULE)))
534 break; /* retry */
535 tmTimerQueueLinkActive(pQueue, pTimer, pTimer->u64Expire);
536 return;
537
538 /*
539 * Stop the timer in active list.
540 */
541 case TMTIMERSTATE_PENDING_STOP:
542 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, TMTIMERSTATE_PENDING_STOP)))
543 break; /* retry */
544 tmTimerQueueUnlinkActive(pQueue, pTimer);
545 RT_FALL_THRU();
546
547 /*
548 * Stop the timer (not on the active list).
549 */
550 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
551 Assert(!pTimer->offNext); Assert(!pTimer->offPrev);
552 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_STOPPED, TMTIMERSTATE_PENDING_STOP_SCHEDULE)))
553 break;
554 return;
555
556 /*
557 * The timer is pending destruction by TMR3TimerDestroy, our caller.
558 * Nothing to do here.
559 */
560 case TMTIMERSTATE_DESTROY:
561 break;
562
563 /*
564 * Postpone these until they get into the right state.
565 */
566 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
567 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
568 tmTimerLinkSchedule(pQueue, pTimer);
569 STAM_COUNTER_INC(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatPostponed));
570 return;
571
572 /*
573 * None of these can be in the schedule.
574 */
575 case TMTIMERSTATE_FREE:
576 case TMTIMERSTATE_STOPPED:
577 case TMTIMERSTATE_ACTIVE:
578 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
579 case TMTIMERSTATE_EXPIRED_DELIVER:
580 default:
581 AssertMsgFailed(("Timer (%p) in the scheduling list has an invalid state %s (%d)!",
582 pTimer, tmTimerState(pTimer->enmState), pTimer->enmState));
583 return;
584 }
585 } while (cRetries-- > 0);
586}
587
588
589/**
590 * Schedules the specified timer queue.
591 *
592 * @param pVM The cross context VM structure.
593 * @param pQueue The queue to schedule.
594 *
595 * @remarks Called while owning the lock.
596 */
597void tmTimerQueueSchedule(PVM pVM, PTMTIMERQUEUE pQueue)
598{
599 TM_ASSERT_TIMER_LOCK_OWNERSHIP(pVM);
600 NOREF(pVM);
601
602 /*
603 * Dequeue the scheduling list and iterate it.
604 */
605 int32_t offNext = ASMAtomicXchgS32(&pQueue->offSchedule, 0);
606 Log2(("tmTimerQueueSchedule: pQueue=%p:{.enmClock=%d, offNext=%RI32, .u64Expired=%'RU64}\n", pQueue, pQueue->enmClock, offNext, pQueue->u64Expire));
607 if (!offNext)
608 return;
609 PTMTIMER pNext = (PTMTIMER)((intptr_t)pQueue + offNext);
610 while (pNext)
611 {
612 /*
613 * Unlink the head timer and find the next one.
614 */
615 PTMTIMER pTimer = pNext;
616 pNext = pNext->offScheduleNext ? (PTMTIMER)((intptr_t)pNext + pNext->offScheduleNext) : NULL;
617 pTimer->offScheduleNext = 0;
618
619 /*
620 * Do the scheduling.
621 */
622 Log2(("tmTimerQueueSchedule: %p:{.enmState=%s, .enmClock=%d, .enmType=%d, .pszDesc=%s}\n",
623 pTimer, tmTimerState(pTimer->enmState), pTimer->enmClock, pTimer->enmType, R3STRING(pTimer->pszDesc)));
624 tmTimerQueueScheduleOne(pQueue, pTimer);
625 Log2(("tmTimerQueueSchedule: %p: new %s\n", pTimer, tmTimerState(pTimer->enmState)));
626 } /* foreach timer in current schedule batch. */
627 Log2(("tmTimerQueueSchedule: u64Expired=%'RU64\n", pQueue->u64Expire));
628}
629
630
631#ifdef VBOX_STRICT
632/**
633 * Checks that the timer queues are sane.
634 *
635 * @param pVM The cross context VM structure.
636 * @param pszWhere Caller location clue.
637 *
638 * @remarks Called while owning the lock.
639 */
640void tmTimerQueuesSanityChecks(PVM pVM, const char *pszWhere)
641{
642 TM_ASSERT_TIMER_LOCK_OWNERSHIP(pVM);
643
644 /*
645 * Check the linking of the active lists.
646 */
647 bool fHaveVirtualSyncLock = false;
648 for (int i = 0; i < TMCLOCK_MAX; i++)
649 {
650 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[i];
651 Assert((int)pQueue->enmClock == i);
652 if (pQueue->enmClock == TMCLOCK_VIRTUAL_SYNC)
653 {
654 if (PDMCritSectTryEnter(&pVM->tm.s.VirtualSyncLock) != VINF_SUCCESS)
655 continue;
656 fHaveVirtualSyncLock = true;
657 }
658 PTMTIMER pPrev = NULL;
659 for (PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue); pCur; pPrev = pCur, pCur = TMTIMER_GET_NEXT(pCur))
660 {
661 AssertMsg((int)pCur->enmClock == i, ("%s: %d != %d\n", pszWhere, pCur->enmClock, i));
662 AssertMsg(TMTIMER_GET_PREV(pCur) == pPrev, ("%s: %p != %p\n", pszWhere, TMTIMER_GET_PREV(pCur), pPrev));
663 TMTIMERSTATE enmState = pCur->enmState;
664 switch (enmState)
665 {
666 case TMTIMERSTATE_ACTIVE:
667 AssertMsg( !pCur->offScheduleNext
668 || pCur->enmState != TMTIMERSTATE_ACTIVE,
669 ("%s: %RI32\n", pszWhere, pCur->offScheduleNext));
670 break;
671 case TMTIMERSTATE_PENDING_STOP:
672 case TMTIMERSTATE_PENDING_RESCHEDULE:
673 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
674 break;
675 default:
676 AssertMsgFailed(("%s: Invalid state enmState=%d %s\n", pszWhere, enmState, tmTimerState(enmState)));
677 break;
678 }
679 }
680 }
681
682
683# ifdef IN_RING3
684 /*
685 * Do the big list and check that active timers all are in the active lists.
686 */
687 PTMTIMERR3 pPrev = NULL;
688 for (PTMTIMERR3 pCur = pVM->tm.s.pCreated; pCur; pPrev = pCur, pCur = pCur->pBigNext)
689 {
690 Assert(pCur->pBigPrev == pPrev);
691 Assert((unsigned)pCur->enmClock < (unsigned)TMCLOCK_MAX);
692
693 TMTIMERSTATE enmState = pCur->enmState;
694 switch (enmState)
695 {
696 case TMTIMERSTATE_ACTIVE:
697 case TMTIMERSTATE_PENDING_STOP:
698 case TMTIMERSTATE_PENDING_RESCHEDULE:
699 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
700 if (fHaveVirtualSyncLock || pCur->enmClock != TMCLOCK_VIRTUAL_SYNC)
701 {
702 PTMTIMERR3 pCurAct = TMTIMER_GET_HEAD(&pVM->tm.s.CTX_SUFF(paTimerQueues)[pCur->enmClock]);
703 Assert(pCur->offPrev || pCur == pCurAct);
704 while (pCurAct && pCurAct != pCur)
705 pCurAct = TMTIMER_GET_NEXT(pCurAct);
706 Assert(pCurAct == pCur);
707 }
708 break;
709
710 case TMTIMERSTATE_PENDING_SCHEDULE:
711 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
712 case TMTIMERSTATE_STOPPED:
713 case TMTIMERSTATE_EXPIRED_DELIVER:
714 if (fHaveVirtualSyncLock || pCur->enmClock != TMCLOCK_VIRTUAL_SYNC)
715 {
716 Assert(!pCur->offNext);
717 Assert(!pCur->offPrev);
718 for (PTMTIMERR3 pCurAct = TMTIMER_GET_HEAD(&pVM->tm.s.CTX_SUFF(paTimerQueues)[pCur->enmClock]);
719 pCurAct;
720 pCurAct = TMTIMER_GET_NEXT(pCurAct))
721 {
722 Assert(pCurAct != pCur);
723 Assert(TMTIMER_GET_NEXT(pCurAct) != pCur);
724 Assert(TMTIMER_GET_PREV(pCurAct) != pCur);
725 }
726 }
727 break;
728
729 /* ignore */
730 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
731 break;
732
733 /* shouldn't get here! */
734 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
735 case TMTIMERSTATE_DESTROY:
736 default:
737 AssertMsgFailed(("Invalid state enmState=%d %s\n", enmState, tmTimerState(enmState)));
738 break;
739 }
740 }
741# endif /* IN_RING3 */
742
743 if (fHaveVirtualSyncLock)
744 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
745}
746#endif /* !VBOX_STRICT */
747
748#ifdef VBOX_HIGH_RES_TIMERS_HACK
749
750/**
751 * Worker for tmTimerPollInternal that handles misses when the dedicated timer
752 * EMT is polling.
753 *
754 * @returns See tmTimerPollInternal.
755 * @param pVM The cross context VM structure.
756 * @param u64Now Current virtual clock timestamp.
757 * @param u64Delta The delta to the next even in ticks of the
758 * virtual clock.
759 * @param pu64Delta Where to return the delta.
760 */
761DECLINLINE(uint64_t) tmTimerPollReturnMiss(PVM pVM, uint64_t u64Now, uint64_t u64Delta, uint64_t *pu64Delta)
762{
763 Assert(!(u64Delta & RT_BIT_64(63)));
764
765 if (!pVM->tm.s.fVirtualWarpDrive)
766 {
767 *pu64Delta = u64Delta;
768 return u64Delta + u64Now + pVM->tm.s.u64VirtualOffset;
769 }
770
771 /*
772 * Warp drive adjustments - this is the reverse of what tmVirtualGetRaw is doing.
773 */
774 uint64_t const u64Start = pVM->tm.s.u64VirtualWarpDriveStart;
775 uint32_t const u32Pct = pVM->tm.s.u32VirtualWarpDrivePercentage;
776
777 uint64_t u64GipTime = u64Delta + u64Now + pVM->tm.s.u64VirtualOffset;
778 u64GipTime -= u64Start; /* the start is GIP time. */
779 if (u64GipTime >= u64Delta)
780 {
781 ASMMultU64ByU32DivByU32(u64GipTime, 100, u32Pct);
782 ASMMultU64ByU32DivByU32(u64Delta, 100, u32Pct);
783 }
784 else
785 {
786 u64Delta -= u64GipTime;
787 ASMMultU64ByU32DivByU32(u64GipTime, 100, u32Pct);
788 u64Delta += u64GipTime;
789 }
790 *pu64Delta = u64Delta;
791 u64GipTime += u64Start;
792 return u64GipTime;
793}
794
795
796/**
797 * Worker for tmTimerPollInternal dealing with returns on virtual CPUs other
798 * than the one dedicated to timer work.
799 *
800 * @returns See tmTimerPollInternal.
801 * @param pVM The cross context VM structure.
802 * @param u64Now Current virtual clock timestamp.
803 * @param pu64Delta Where to return the delta.
804 */
805DECL_FORCE_INLINE(uint64_t) tmTimerPollReturnOtherCpu(PVM pVM, uint64_t u64Now, uint64_t *pu64Delta)
806{
807 static const uint64_t s_u64OtherRet = 500000000; /* 500 ms for non-timer EMTs. */
808 *pu64Delta = s_u64OtherRet;
809 return u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet;
810}
811
812
813/**
814 * Worker for tmTimerPollInternal.
815 *
816 * @returns See tmTimerPollInternal.
817 * @param pVM The cross context VM structure.
818 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
819 * @param pVCpuDst The cross context virtual CPU structure of the dedicated
820 * timer EMT.
821 * @param u64Now Current virtual clock timestamp.
822 * @param pu64Delta Where to return the delta.
823 * @param pCounter The statistics counter to update.
824 */
825DECL_FORCE_INLINE(uint64_t) tmTimerPollReturnHit(PVM pVM, PVMCPU pVCpu, PVMCPU pVCpuDst, uint64_t u64Now,
826 uint64_t *pu64Delta, PSTAMCOUNTER pCounter)
827{
828 STAM_COUNTER_INC(pCounter); NOREF(pCounter);
829 if (pVCpuDst != pVCpu)
830 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
831 *pu64Delta = 0;
832 return 0;
833}
834
835/**
836 * Common worker for TMTimerPollGIP and TMTimerPoll.
837 *
838 * This function is called before FFs are checked in the inner execution EM loops.
839 *
840 * @returns The GIP timestamp of the next event.
841 * 0 if the next event has already expired.
842 *
843 * @param pVM The cross context VM structure.
844 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
845 * @param pu64Delta Where to store the delta.
846 *
847 * @thread The emulation thread.
848 *
849 * @remarks GIP uses ns ticks.
850 */
851DECL_FORCE_INLINE(uint64_t) tmTimerPollInternal(PVMCC pVM, PVMCPUCC pVCpu, uint64_t *pu64Delta)
852{
853 PVMCPU pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
854 const uint64_t u64Now = TMVirtualGetNoCheck(pVM);
855 STAM_COUNTER_INC(&pVM->tm.s.StatPoll);
856
857 /*
858 * Return straight away if the timer FF is already set ...
859 */
860 if (VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
861 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet);
862
863 /*
864 * ... or if timers are being run.
865 */
866 if (ASMAtomicReadBool(&pVM->tm.s.fRunningQueues))
867 {
868 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
869 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
870 }
871
872 /*
873 * Check for TMCLOCK_VIRTUAL expiration.
874 */
875 const uint64_t u64Expire1 = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire);
876 const int64_t i64Delta1 = u64Expire1 - u64Now;
877 if (i64Delta1 <= 0)
878 {
879 if (!VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
880 {
881 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
882 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
883 }
884 LogFlow(("TMTimerPoll: expire1=%'RU64 <= now=%'RU64\n", u64Expire1, u64Now));
885 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtual);
886 }
887
888 /*
889 * Check for TMCLOCK_VIRTUAL_SYNC expiration.
890 * This isn't quite as straight forward if in a catch-up, not only do
891 * we have to adjust the 'now' but when have to adjust the delta as well.
892 */
893
894 /*
895 * Optimistic lockless approach.
896 */
897 uint64_t u64VirtualSyncNow;
898 uint64_t u64Expire2 = ASMAtomicUoReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
899 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
900 {
901 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
902 {
903 u64VirtualSyncNow = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
904 if (RT_LIKELY( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
905 && !ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
906 && u64VirtualSyncNow == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
907 && u64Expire2 == ASMAtomicUoReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)))
908 {
909 u64VirtualSyncNow = u64Now - u64VirtualSyncNow;
910 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
911 if (i64Delta2 > 0)
912 {
913 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
914 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
915
916 if (pVCpu == pVCpuDst)
917 return tmTimerPollReturnMiss(pVM, u64Now, RT_MIN(i64Delta1, i64Delta2), pu64Delta);
918 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
919 }
920
921 if ( !pVM->tm.s.fRunningQueues
922 && !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
923 {
924 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
925 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
926 }
927
928 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
929 LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now));
930 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
931 }
932 }
933 }
934 else
935 {
936 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
937 LogFlow(("TMTimerPoll: stopped\n"));
938 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
939 }
940
941 /*
942 * Complicated lockless approach.
943 */
944 uint64_t off;
945 uint32_t u32Pct = 0;
946 bool fCatchUp;
947 int cOuterTries = 42;
948 for (;; cOuterTries--)
949 {
950 fCatchUp = ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp);
951 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
952 u64Expire2 = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
953 if (fCatchUp)
954 {
955 /* No changes allowed, try get a consistent set of parameters. */
956 uint64_t const u64Prev = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev);
957 uint64_t const offGivenUp = ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp);
958 u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
959 if ( ( u64Prev == ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev)
960 && offGivenUp == ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp)
961 && u32Pct == ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage)
962 && off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
963 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)
964 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
965 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
966 || cOuterTries <= 0)
967 {
968 uint64_t u64Delta = u64Now - u64Prev;
969 if (RT_LIKELY(!(u64Delta >> 32)))
970 {
971 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, u32Pct, 100);
972 if (off > u64Sub + offGivenUp)
973 off -= u64Sub;
974 else /* we've completely caught up. */
975 off = offGivenUp;
976 }
977 else
978 /* More than 4 seconds since last time (or negative), ignore it. */
979 Log(("TMVirtualGetSync: u64Delta=%RX64 (NoLock)\n", u64Delta));
980
981 /* Check that we're still running and in catch up. */
982 if ( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
983 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
984 break;
985 }
986 }
987 else if ( off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
988 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)
989 && !ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
990 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
991 break; /* Got an consistent offset */
992
993 /* Repeat the initial checks before iterating. */
994 if (VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
995 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet);
996 if (ASMAtomicUoReadBool(&pVM->tm.s.fRunningQueues))
997 {
998 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
999 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
1000 }
1001 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
1002 {
1003 LogFlow(("TMTimerPoll: stopped\n"));
1004 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
1005 }
1006 if (cOuterTries <= 0)
1007 break; /* that's enough */
1008 }
1009 if (cOuterTries <= 0)
1010 STAM_COUNTER_INC(&pVM->tm.s.StatPollELoop);
1011 u64VirtualSyncNow = u64Now - off;
1012
1013 /* Calc delta and see if we've got a virtual sync hit. */
1014 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
1015 if (i64Delta2 <= 0)
1016 {
1017 if ( !pVM->tm.s.fRunningQueues
1018 && !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
1019 {
1020 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
1021 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
1022 }
1023 STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtualSync);
1024 LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now));
1025 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
1026 }
1027
1028 /*
1029 * Return the time left to the next event.
1030 */
1031 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
1032 if (pVCpu == pVCpuDst)
1033 {
1034 if (fCatchUp)
1035 i64Delta2 = ASMMultU64ByU32DivByU32(i64Delta2, 100, u32Pct + 100);
1036 return tmTimerPollReturnMiss(pVM, u64Now, RT_MIN(i64Delta1, i64Delta2), pu64Delta);
1037 }
1038 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
1039}
1040
1041
1042/**
1043 * Set FF if we've passed the next virtual event.
1044 *
1045 * This function is called before FFs are checked in the inner execution EM loops.
1046 *
1047 * @returns true if timers are pending, false if not.
1048 *
1049 * @param pVM The cross context VM structure.
1050 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1051 * @thread The emulation thread.
1052 */
1053VMMDECL(bool) TMTimerPollBool(PVMCC pVM, PVMCPUCC pVCpu)
1054{
1055 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1056 uint64_t off = 0;
1057 tmTimerPollInternal(pVM, pVCpu, &off);
1058 return off == 0;
1059}
1060
1061
1062/**
1063 * Set FF if we've passed the next virtual event.
1064 *
1065 * This function is called before FFs are checked in the inner execution EM loops.
1066 *
1067 * @param pVM The cross context VM structure.
1068 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1069 * @thread The emulation thread.
1070 */
1071VMM_INT_DECL(void) TMTimerPollVoid(PVMCC pVM, PVMCPUCC pVCpu)
1072{
1073 uint64_t off;
1074 tmTimerPollInternal(pVM, pVCpu, &off);
1075}
1076
1077
1078/**
1079 * Set FF if we've passed the next virtual event.
1080 *
1081 * This function is called before FFs are checked in the inner execution EM loops.
1082 *
1083 * @returns The GIP timestamp of the next event.
1084 * 0 if the next event has already expired.
1085 * @param pVM The cross context VM structure.
1086 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1087 * @param pu64Delta Where to store the delta.
1088 * @thread The emulation thread.
1089 */
1090VMM_INT_DECL(uint64_t) TMTimerPollGIP(PVMCC pVM, PVMCPUCC pVCpu, uint64_t *pu64Delta)
1091{
1092 return tmTimerPollInternal(pVM, pVCpu, pu64Delta);
1093}
1094
1095#endif /* VBOX_HIGH_RES_TIMERS_HACK */
1096
1097/**
1098 * Gets the host context ring-3 pointer of the timer.
1099 *
1100 * @returns HC R3 pointer.
1101 * @param pTimer Timer handle as returned by one of the create functions.
1102 */
1103VMMDECL(PTMTIMERR3) TMTimerR3Ptr(PTMTIMER pTimer)
1104{
1105 return (PTMTIMERR3)MMHyperCCToR3(pTimer->CTX_SUFF(pVM), pTimer);
1106}
1107
1108
1109/**
1110 * Gets the host context ring-0 pointer of the timer.
1111 *
1112 * @returns HC R0 pointer.
1113 * @param pTimer Timer handle as returned by one of the create functions.
1114 */
1115VMMDECL(PTMTIMERR0) TMTimerR0Ptr(PTMTIMER pTimer)
1116{
1117 return (PTMTIMERR0)MMHyperCCToR0(pTimer->CTX_SUFF(pVM), pTimer);
1118}
1119
1120
1121/**
1122 * Gets the RC pointer of the timer.
1123 *
1124 * @returns RC pointer.
1125 * @param pTimer Timer handle as returned by one of the create functions.
1126 */
1127VMMDECL(PTMTIMERRC) TMTimerRCPtr(PTMTIMER pTimer)
1128{
1129 return (PTMTIMERRC)MMHyperCCToRC(pTimer->CTX_SUFF(pVM), pTimer);
1130}
1131
1132
1133/**
1134 * Locks the timer clock.
1135 *
1136 * @returns VINF_SUCCESS on success, @a rcBusy if busy, and VERR_NOT_SUPPORTED
1137 * if the clock does not have a lock.
1138 * @param pTimer The timer which clock lock we wish to take.
1139 * @param rcBusy What to return in ring-0 and raw-mode context
1140 * if the lock is busy. Pass VINF_SUCCESS to
1141 * acquired the critical section thru a ring-3
1142 call if necessary.
1143 *
1144 * @remarks Currently only supported on timers using the virtual sync clock.
1145 */
1146VMMDECL(int) TMTimerLock(PTMTIMER pTimer, int rcBusy)
1147{
1148 AssertPtr(pTimer);
1149 AssertReturn(pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC, VERR_NOT_SUPPORTED);
1150 return PDMCritSectEnter(&pTimer->CTX_SUFF(pVM)->tm.s.VirtualSyncLock, rcBusy);
1151}
1152
1153
1154/**
1155 * Unlocks a timer clock locked by TMTimerLock.
1156 *
1157 * @param pTimer The timer which clock to unlock.
1158 */
1159VMMDECL(void) TMTimerUnlock(PTMTIMER pTimer)
1160{
1161 AssertPtr(pTimer);
1162 AssertReturnVoid(pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC);
1163 PDMCritSectLeave(&pTimer->CTX_SUFF(pVM)->tm.s.VirtualSyncLock);
1164}
1165
1166
1167/**
1168 * Checks if the current thread owns the timer clock lock.
1169 *
1170 * @returns @c true if its the owner, @c false if not.
1171 * @param pTimer The timer handle.
1172 */
1173VMMDECL(bool) TMTimerIsLockOwner(PTMTIMER pTimer)
1174{
1175 AssertPtr(pTimer);
1176 AssertReturn(pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC, false);
1177 return PDMCritSectIsOwner(&pTimer->CTX_SUFF(pVM)->tm.s.VirtualSyncLock);
1178}
1179
1180
1181/**
1182 * Optimized TMTimerSet code path for starting an inactive timer.
1183 *
1184 * @returns VBox status code.
1185 *
1186 * @param pVM The cross context VM structure.
1187 * @param pTimer The timer handle.
1188 * @param u64Expire The new expire time.
1189 */
1190static int tmTimerSetOptimizedStart(PVM pVM, PTMTIMER pTimer, uint64_t u64Expire)
1191{
1192 Assert(!pTimer->offPrev);
1193 Assert(!pTimer->offNext);
1194 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE);
1195
1196 TMCLOCK const enmClock = pTimer->enmClock;
1197
1198 /*
1199 * Calculate and set the expiration time.
1200 */
1201 if (enmClock == TMCLOCK_VIRTUAL_SYNC)
1202 {
1203 uint64_t u64Last = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSync);
1204 AssertMsgStmt(u64Expire >= u64Last,
1205 ("exp=%#llx last=%#llx\n", u64Expire, u64Last),
1206 u64Expire = u64Last);
1207 }
1208 ASMAtomicWriteU64(&pTimer->u64Expire, u64Expire);
1209 Log2(("tmTimerSetOptimizedStart: %p:{.pszDesc='%s', .u64Expire=%'RU64}\n", pTimer, R3STRING(pTimer->pszDesc), u64Expire));
1210
1211 /*
1212 * Link the timer into the active list.
1213 */
1214 tmTimerQueueLinkActive(&pVM->tm.s.CTX_SUFF(paTimerQueues)[enmClock], pTimer, u64Expire);
1215
1216 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetOpt);
1217 TM_UNLOCK_TIMERS(pVM);
1218 return VINF_SUCCESS;
1219}
1220
1221
1222/**
1223 * TMTimerSet for the virtual sync timer queue.
1224 *
1225 * This employs a greatly simplified state machine by always acquiring the
1226 * queue lock and bypassing the scheduling list.
1227 *
1228 * @returns VBox status code
1229 * @param pVM The cross context VM structure.
1230 * @param pTimer The timer handle.
1231 * @param u64Expire The expiration time.
1232 */
1233static int tmTimerVirtualSyncSet(PVMCC pVM, PTMTIMER pTimer, uint64_t u64Expire)
1234{
1235 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetVs), a);
1236 VM_ASSERT_EMT(pVM);
1237 TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer);
1238 int rc = PDMCritSectEnter(&pVM->tm.s.VirtualSyncLock, VINF_SUCCESS);
1239 AssertRCReturn(rc, rc);
1240
1241 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC];
1242 TMTIMERSTATE enmState = pTimer->enmState;
1243 switch (enmState)
1244 {
1245 case TMTIMERSTATE_EXPIRED_DELIVER:
1246 case TMTIMERSTATE_STOPPED:
1247 if (enmState == TMTIMERSTATE_EXPIRED_DELIVER)
1248 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetVsStExpDeliver);
1249 else
1250 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetVsStStopped);
1251
1252 AssertMsg(u64Expire >= pVM->tm.s.u64VirtualSync,
1253 ("%'RU64 < %'RU64 %s\n", u64Expire, pVM->tm.s.u64VirtualSync, R3STRING(pTimer->pszDesc)));
1254 pTimer->u64Expire = u64Expire;
1255 TM_SET_STATE(pTimer, TMTIMERSTATE_ACTIVE);
1256 tmTimerQueueLinkActive(pQueue, pTimer, u64Expire);
1257 rc = VINF_SUCCESS;
1258 break;
1259
1260 case TMTIMERSTATE_ACTIVE:
1261 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetVsStActive);
1262 tmTimerQueueUnlinkActive(pQueue, pTimer);
1263 pTimer->u64Expire = u64Expire;
1264 tmTimerQueueLinkActive(pQueue, pTimer, u64Expire);
1265 rc = VINF_SUCCESS;
1266 break;
1267
1268 case TMTIMERSTATE_PENDING_RESCHEDULE:
1269 case TMTIMERSTATE_PENDING_STOP:
1270 case TMTIMERSTATE_PENDING_SCHEDULE:
1271 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1272 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1273 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1274 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1275 case TMTIMERSTATE_DESTROY:
1276 case TMTIMERSTATE_FREE:
1277 AssertLogRelMsgFailed(("Invalid timer state %s: %s\n", tmTimerState(enmState), R3STRING(pTimer->pszDesc)));
1278 rc = VERR_TM_INVALID_STATE;
1279 break;
1280
1281 default:
1282 AssertMsgFailed(("Unknown timer state %d: %s\n", enmState, R3STRING(pTimer->pszDesc)));
1283 rc = VERR_TM_UNKNOWN_STATE;
1284 break;
1285 }
1286
1287 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetVs), a);
1288 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
1289 return rc;
1290}
1291
1292
1293/**
1294 * Arm a timer with a (new) expire time.
1295 *
1296 * @returns VBox status code.
1297 * @param pTimer Timer handle as returned by one of the create functions.
1298 * @param u64Expire New expire time.
1299 */
1300VMMDECL(int) TMTimerSet(PTMTIMER pTimer, uint64_t u64Expire)
1301{
1302 PVMCC pVM = pTimer->CTX_SUFF(pVM);
1303 STAM_COUNTER_INC(&pTimer->StatSetAbsolute);
1304
1305 /* Treat virtual sync timers specially. */
1306 if (pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC)
1307 return tmTimerVirtualSyncSet(pVM, pTimer, u64Expire);
1308
1309 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1310 TMTIMER_ASSERT_CRITSECT(pTimer);
1311
1312 DBGFTRACE_U64_TAG2(pVM, u64Expire, "TMTimerSet", R3STRING(pTimer->pszDesc));
1313
1314#ifdef VBOX_WITH_STATISTICS
1315 /*
1316 * Gather optimization info.
1317 */
1318 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSet);
1319 TMTIMERSTATE enmOrgState = pTimer->enmState;
1320 switch (enmOrgState)
1321 {
1322 case TMTIMERSTATE_STOPPED: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStStopped); break;
1323 case TMTIMERSTATE_EXPIRED_DELIVER: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStExpDeliver); break;
1324 case TMTIMERSTATE_ACTIVE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStActive); break;
1325 case TMTIMERSTATE_PENDING_STOP: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendStop); break;
1326 case TMTIMERSTATE_PENDING_STOP_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendStopSched); break;
1327 case TMTIMERSTATE_PENDING_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendSched); break;
1328 case TMTIMERSTATE_PENDING_RESCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendResched); break;
1329 default: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStOther); break;
1330 }
1331#endif
1332
1333 /*
1334 * The most common case is setting the timer again during the callback.
1335 * The second most common case is starting a timer at some other time.
1336 */
1337#if 1
1338 TMTIMERSTATE enmState1 = pTimer->enmState;
1339 if ( enmState1 == TMTIMERSTATE_EXPIRED_DELIVER
1340 || ( enmState1 == TMTIMERSTATE_STOPPED
1341 && pTimer->pCritSect))
1342 {
1343 /* Try take the TM lock and check the state again. */
1344 if (RT_SUCCESS_NP(TM_TRY_LOCK_TIMERS(pVM)))
1345 {
1346 if (RT_LIKELY(tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, enmState1)))
1347 {
1348 tmTimerSetOptimizedStart(pVM, pTimer, u64Expire);
1349 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1350 return VINF_SUCCESS;
1351 }
1352 TM_UNLOCK_TIMERS(pVM);
1353 }
1354 }
1355#endif
1356
1357 /*
1358 * Unoptimized code path.
1359 */
1360 int cRetries = 1000;
1361 do
1362 {
1363 /*
1364 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1365 */
1366 TMTIMERSTATE enmState = pTimer->enmState;
1367 Log2(("TMTimerSet: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d u64Expire=%'RU64\n",
1368 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), cRetries, u64Expire));
1369 switch (enmState)
1370 {
1371 case TMTIMERSTATE_EXPIRED_DELIVER:
1372 case TMTIMERSTATE_STOPPED:
1373 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1374 {
1375 Assert(!pTimer->offPrev);
1376 Assert(!pTimer->offNext);
1377 pTimer->u64Expire = u64Expire;
1378 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1379 tmSchedule(pTimer);
1380 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1381 return VINF_SUCCESS;
1382 }
1383 break;
1384
1385 case TMTIMERSTATE_PENDING_SCHEDULE:
1386 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1387 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1388 {
1389 pTimer->u64Expire = u64Expire;
1390 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1391 tmSchedule(pTimer);
1392 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1393 return VINF_SUCCESS;
1394 }
1395 break;
1396
1397
1398 case TMTIMERSTATE_ACTIVE:
1399 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1400 {
1401 pTimer->u64Expire = u64Expire;
1402 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1403 tmSchedule(pTimer);
1404 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1405 return VINF_SUCCESS;
1406 }
1407 break;
1408
1409 case TMTIMERSTATE_PENDING_RESCHEDULE:
1410 case TMTIMERSTATE_PENDING_STOP:
1411 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1412 {
1413 pTimer->u64Expire = u64Expire;
1414 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1415 tmSchedule(pTimer);
1416 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1417 return VINF_SUCCESS;
1418 }
1419 break;
1420
1421
1422 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1423 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1424 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1425#ifdef IN_RING3
1426 if (!RTThreadYield())
1427 RTThreadSleep(1);
1428#else
1429/** @todo call host context and yield after a couple of iterations */
1430#endif
1431 break;
1432
1433 /*
1434 * Invalid states.
1435 */
1436 case TMTIMERSTATE_DESTROY:
1437 case TMTIMERSTATE_FREE:
1438 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1439 return VERR_TM_INVALID_STATE;
1440 default:
1441 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1442 return VERR_TM_UNKNOWN_STATE;
1443 }
1444 } while (cRetries-- > 0);
1445
1446 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1447 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1448 return VERR_TM_TIMER_UNSTABLE_STATE;
1449}
1450
1451
1452/**
1453 * Return the current time for the specified clock, setting pu64Now if not NULL.
1454 *
1455 * @returns Current time.
1456 * @param pVM The cross context VM structure.
1457 * @param enmClock The clock to query.
1458 * @param pu64Now Optional pointer where to store the return time
1459 */
1460DECL_FORCE_INLINE(uint64_t) tmTimerSetRelativeNowWorker(PVMCC pVM, TMCLOCK enmClock, uint64_t *pu64Now)
1461{
1462 uint64_t u64Now;
1463 switch (enmClock)
1464 {
1465 case TMCLOCK_VIRTUAL_SYNC:
1466 u64Now = TMVirtualSyncGet(pVM);
1467 break;
1468 case TMCLOCK_VIRTUAL:
1469 u64Now = TMVirtualGet(pVM);
1470 break;
1471 case TMCLOCK_REAL:
1472 u64Now = TMRealGet(pVM);
1473 break;
1474 default:
1475 AssertFatalMsgFailed(("%d\n", enmClock));
1476 }
1477
1478 if (pu64Now)
1479 *pu64Now = u64Now;
1480 return u64Now;
1481}
1482
1483
1484/**
1485 * Optimized TMTimerSetRelative code path.
1486 *
1487 * @returns VBox status code.
1488 *
1489 * @param pVM The cross context VM structure.
1490 * @param pTimer The timer handle.
1491 * @param cTicksToNext Clock ticks until the next time expiration.
1492 * @param pu64Now Where to return the current time stamp used.
1493 * Optional.
1494 */
1495static int tmTimerSetRelativeOptimizedStart(PVMCC pVM, PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1496{
1497 Assert(!pTimer->offPrev);
1498 Assert(!pTimer->offNext);
1499 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE);
1500
1501 /*
1502 * Calculate and set the expiration time.
1503 */
1504 TMCLOCK const enmClock = pTimer->enmClock;
1505 uint64_t const u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1506 pTimer->u64Expire = u64Expire;
1507 Log2(("tmTimerSetRelativeOptimizedStart: %p:{.pszDesc='%s', .u64Expire=%'RU64} cTicksToNext=%'RU64\n", pTimer, R3STRING(pTimer->pszDesc), u64Expire, cTicksToNext));
1508
1509 /*
1510 * Link the timer into the active list.
1511 */
1512 DBGFTRACE_U64_TAG2(pVM, u64Expire, "tmTimerSetRelativeOptimizedStart", R3STRING(pTimer->pszDesc));
1513 tmTimerQueueLinkActive(&pVM->tm.s.CTX_SUFF(paTimerQueues)[enmClock], pTimer, u64Expire);
1514
1515 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeOpt);
1516 TM_UNLOCK_TIMERS(pVM);
1517 return VINF_SUCCESS;
1518}
1519
1520
1521/**
1522 * TMTimerSetRelative for the virtual sync timer queue.
1523 *
1524 * This employs a greatly simplified state machine by always acquiring the
1525 * queue lock and bypassing the scheduling list.
1526 *
1527 * @returns VBox status code
1528 * @param pVM The cross context VM structure.
1529 * @param pTimer The timer to (re-)arm.
1530 * @param cTicksToNext Clock ticks until the next time expiration.
1531 * @param pu64Now Where to return the current time stamp used.
1532 * Optional.
1533 */
1534static int tmTimerVirtualSyncSetRelative(PVMCC pVM, PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1535{
1536 STAM_PROFILE_START(pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelativeVs), a);
1537 VM_ASSERT_EMT(pVM);
1538 TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer);
1539 int rc = PDMCritSectEnter(&pVM->tm.s.VirtualSyncLock, VINF_SUCCESS);
1540 AssertRCReturn(rc, rc);
1541
1542 /* Calculate the expiration tick. */
1543 uint64_t u64Expire = TMVirtualSyncGetNoCheck(pVM);
1544 if (pu64Now)
1545 *pu64Now = u64Expire;
1546 u64Expire += cTicksToNext;
1547
1548 /* Update the timer. */
1549 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC];
1550 TMTIMERSTATE enmState = pTimer->enmState;
1551 switch (enmState)
1552 {
1553 case TMTIMERSTATE_EXPIRED_DELIVER:
1554 case TMTIMERSTATE_STOPPED:
1555 if (enmState == TMTIMERSTATE_EXPIRED_DELIVER)
1556 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeVsStExpDeliver);
1557 else
1558 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeVsStStopped);
1559 pTimer->u64Expire = u64Expire;
1560 TM_SET_STATE(pTimer, TMTIMERSTATE_ACTIVE);
1561 tmTimerQueueLinkActive(pQueue, pTimer, u64Expire);
1562 rc = VINF_SUCCESS;
1563 break;
1564
1565 case TMTIMERSTATE_ACTIVE:
1566 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeVsStActive);
1567 tmTimerQueueUnlinkActive(pQueue, pTimer);
1568 pTimer->u64Expire = u64Expire;
1569 tmTimerQueueLinkActive(pQueue, pTimer, u64Expire);
1570 rc = VINF_SUCCESS;
1571 break;
1572
1573 case TMTIMERSTATE_PENDING_RESCHEDULE:
1574 case TMTIMERSTATE_PENDING_STOP:
1575 case TMTIMERSTATE_PENDING_SCHEDULE:
1576 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1577 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1578 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1579 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1580 case TMTIMERSTATE_DESTROY:
1581 case TMTIMERSTATE_FREE:
1582 AssertLogRelMsgFailed(("Invalid timer state %s: %s\n", tmTimerState(enmState), R3STRING(pTimer->pszDesc)));
1583 rc = VERR_TM_INVALID_STATE;
1584 break;
1585
1586 default:
1587 AssertMsgFailed(("Unknown timer state %d: %s\n", enmState, R3STRING(pTimer->pszDesc)));
1588 rc = VERR_TM_UNKNOWN_STATE;
1589 break;
1590 }
1591
1592 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelativeVs), a);
1593 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
1594 return rc;
1595}
1596
1597
1598/**
1599 * Arm a timer with a expire time relative to the current time.
1600 *
1601 * @returns VBox status code.
1602 * @param pTimer Timer handle as returned by one of the create functions.
1603 * @param cTicksToNext Clock ticks until the next time expiration.
1604 * @param pu64Now Where to return the current time stamp used.
1605 * Optional.
1606 */
1607VMMDECL(int) TMTimerSetRelative(PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1608{
1609 PVMCC pVM = pTimer->CTX_SUFF(pVM);
1610 STAM_COUNTER_INC(&pTimer->StatSetRelative);
1611
1612 /* Treat virtual sync timers specially. */
1613 if (pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC)
1614 return tmTimerVirtualSyncSetRelative(pVM, pTimer, cTicksToNext, pu64Now);
1615
1616 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1617 TMTIMER_ASSERT_CRITSECT(pTimer);
1618
1619 DBGFTRACE_U64_TAG2(pVM, cTicksToNext, "TMTimerSetRelative", R3STRING(pTimer->pszDesc));
1620
1621#ifdef VBOX_WITH_STATISTICS
1622 /*
1623 * Gather optimization info.
1624 */
1625 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelative);
1626 TMTIMERSTATE enmOrgState = pTimer->enmState;
1627 switch (enmOrgState)
1628 {
1629 case TMTIMERSTATE_STOPPED: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStStopped); break;
1630 case TMTIMERSTATE_EXPIRED_DELIVER: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStExpDeliver); break;
1631 case TMTIMERSTATE_ACTIVE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStActive); break;
1632 case TMTIMERSTATE_PENDING_STOP: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendStop); break;
1633 case TMTIMERSTATE_PENDING_STOP_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendStopSched); break;
1634 case TMTIMERSTATE_PENDING_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendSched); break;
1635 case TMTIMERSTATE_PENDING_RESCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendResched); break;
1636 default: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStOther); break;
1637 }
1638#endif
1639
1640 /*
1641 * Try to take the TM lock and optimize the common cases.
1642 *
1643 * With the TM lock we can safely make optimizations like immediate
1644 * scheduling and we can also be 100% sure that we're not racing the
1645 * running of the timer queues. As an additional restraint we require the
1646 * timer to have a critical section associated with to be 100% there aren't
1647 * concurrent operations on the timer. (This latter isn't necessary any
1648 * longer as this isn't supported for any timers, critsect or not.)
1649 *
1650 * Note! Lock ordering doesn't apply when we only tries to
1651 * get the innermost locks.
1652 */
1653 bool fOwnTMLock = RT_SUCCESS_NP(TM_TRY_LOCK_TIMERS(pVM));
1654#if 1
1655 if ( fOwnTMLock
1656 && pTimer->pCritSect)
1657 {
1658 TMTIMERSTATE enmState = pTimer->enmState;
1659 if (RT_LIKELY( ( enmState == TMTIMERSTATE_EXPIRED_DELIVER
1660 || enmState == TMTIMERSTATE_STOPPED)
1661 && tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, enmState)))
1662 {
1663 tmTimerSetRelativeOptimizedStart(pVM, pTimer, cTicksToNext, pu64Now);
1664 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1665 return VINF_SUCCESS;
1666 }
1667
1668 /* Optimize other states when it becomes necessary. */
1669 }
1670#endif
1671
1672 /*
1673 * Unoptimized path.
1674 */
1675 int rc;
1676 TMCLOCK const enmClock = pTimer->enmClock;
1677 for (int cRetries = 1000; ; cRetries--)
1678 {
1679 /*
1680 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1681 */
1682 TMTIMERSTATE enmState = pTimer->enmState;
1683 switch (enmState)
1684 {
1685 case TMTIMERSTATE_STOPPED:
1686 if (enmClock == TMCLOCK_VIRTUAL_SYNC)
1687 {
1688 /** @todo To fix assertion in tmR3TimerQueueRunVirtualSync:
1689 * Figure a safe way of activating this timer while the queue is
1690 * being run.
1691 * (99.9% sure this that the assertion is caused by DevAPIC.cpp
1692 * re-starting the timer in response to a initial_count write.) */
1693 }
1694 RT_FALL_THRU();
1695 case TMTIMERSTATE_EXPIRED_DELIVER:
1696 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1697 {
1698 Assert(!pTimer->offPrev);
1699 Assert(!pTimer->offNext);
1700 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1701 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [EXP/STOP]\n",
1702 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1703 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1704 tmSchedule(pTimer);
1705 rc = VINF_SUCCESS;
1706 break;
1707 }
1708 rc = VERR_TRY_AGAIN;
1709 break;
1710
1711 case TMTIMERSTATE_PENDING_SCHEDULE:
1712 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1713 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1714 {
1715 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1716 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [PEND_SCHED]\n",
1717 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1718 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1719 tmSchedule(pTimer);
1720 rc = VINF_SUCCESS;
1721 break;
1722 }
1723 rc = VERR_TRY_AGAIN;
1724 break;
1725
1726
1727 case TMTIMERSTATE_ACTIVE:
1728 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1729 {
1730 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1731 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [ACTIVE]\n",
1732 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1733 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1734 tmSchedule(pTimer);
1735 rc = VINF_SUCCESS;
1736 break;
1737 }
1738 rc = VERR_TRY_AGAIN;
1739 break;
1740
1741 case TMTIMERSTATE_PENDING_RESCHEDULE:
1742 case TMTIMERSTATE_PENDING_STOP:
1743 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1744 {
1745 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1746 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [PEND_RESCH/STOP]\n",
1747 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1748 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1749 tmSchedule(pTimer);
1750 rc = VINF_SUCCESS;
1751 break;
1752 }
1753 rc = VERR_TRY_AGAIN;
1754 break;
1755
1756
1757 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1758 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1759 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1760#ifdef IN_RING3
1761 if (!RTThreadYield())
1762 RTThreadSleep(1);
1763#else
1764/** @todo call host context and yield after a couple of iterations */
1765#endif
1766 rc = VERR_TRY_AGAIN;
1767 break;
1768
1769 /*
1770 * Invalid states.
1771 */
1772 case TMTIMERSTATE_DESTROY:
1773 case TMTIMERSTATE_FREE:
1774 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1775 rc = VERR_TM_INVALID_STATE;
1776 break;
1777
1778 default:
1779 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1780 rc = VERR_TM_UNKNOWN_STATE;
1781 break;
1782 }
1783
1784 /* switch + loop is tedious to break out of. */
1785 if (rc == VINF_SUCCESS)
1786 break;
1787
1788 if (rc != VERR_TRY_AGAIN)
1789 {
1790 tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1791 break;
1792 }
1793 if (cRetries <= 0)
1794 {
1795 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1796 rc = VERR_TM_TIMER_UNSTABLE_STATE;
1797 tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1798 break;
1799 }
1800
1801 /*
1802 * Retry to gain locks.
1803 */
1804 if (!fOwnTMLock)
1805 fOwnTMLock = RT_SUCCESS_NP(TM_TRY_LOCK_TIMERS(pVM));
1806
1807 } /* for (;;) */
1808
1809 /*
1810 * Clean up and return.
1811 */
1812 if (fOwnTMLock)
1813 TM_UNLOCK_TIMERS(pVM);
1814
1815 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1816 return rc;
1817}
1818
1819
1820/**
1821 * Drops a hint about the frequency of the timer.
1822 *
1823 * This is used by TM and the VMM to calculate how often guest execution needs
1824 * to be interrupted. The hint is automatically cleared by TMTimerStop.
1825 *
1826 * @returns VBox status code.
1827 * @param pTimer Timer handle as returned by one of the create
1828 * functions.
1829 * @param uHzHint The frequency hint. Pass 0 to clear the hint.
1830 *
1831 * @remarks We're using an integer hertz value here since anything above 1 HZ
1832 * is not going to be any trouble satisfying scheduling wise. The
1833 * range where it makes sense is >= 100 HZ.
1834 */
1835VMMDECL(int) TMTimerSetFrequencyHint(PTMTIMER pTimer, uint32_t uHzHint)
1836{
1837 TMTIMER_ASSERT_CRITSECT(pTimer);
1838
1839 uint32_t const uHzOldHint = pTimer->uHzHint;
1840 pTimer->uHzHint = uHzHint;
1841
1842 PVM pVM = pTimer->CTX_SUFF(pVM);
1843 uint32_t const uMaxHzHint = pVM->tm.s.uMaxHzHint;
1844 if ( uHzHint > uMaxHzHint
1845 || uHzOldHint >= uMaxHzHint)
1846 ASMAtomicWriteBool(&pVM->tm.s.fHzHintNeedsUpdating, true);
1847
1848 return VINF_SUCCESS;
1849}
1850
1851
1852/**
1853 * TMTimerStop for the virtual sync timer queue.
1854 *
1855 * This employs a greatly simplified state machine by always acquiring the
1856 * queue lock and bypassing the scheduling list.
1857 *
1858 * @returns VBox status code
1859 * @param pVM The cross context VM structure.
1860 * @param pTimer The timer handle.
1861 */
1862static int tmTimerVirtualSyncStop(PVMCC pVM, PTMTIMER pTimer)
1863{
1864 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerStopVs), a);
1865 VM_ASSERT_EMT(pVM);
1866 TMTIMER_ASSERT_SYNC_CRITSECT_ORDER(pVM, pTimer);
1867 int rc = PDMCritSectEnter(&pVM->tm.s.VirtualSyncLock, VINF_SUCCESS);
1868 AssertRCReturn(rc, rc);
1869
1870 /* Reset the HZ hint. */
1871 if (pTimer->uHzHint)
1872 {
1873 if (pTimer->uHzHint >= pVM->tm.s.uMaxHzHint)
1874 ASMAtomicWriteBool(&pVM->tm.s.fHzHintNeedsUpdating, true);
1875 pTimer->uHzHint = 0;
1876 }
1877
1878 /* Update the timer state. */
1879 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC];
1880 TMTIMERSTATE enmState = pTimer->enmState;
1881 switch (enmState)
1882 {
1883 case TMTIMERSTATE_ACTIVE:
1884 tmTimerQueueUnlinkActive(pQueue, pTimer);
1885 TM_SET_STATE(pTimer, TMTIMERSTATE_STOPPED);
1886 rc = VINF_SUCCESS;
1887 break;
1888
1889 case TMTIMERSTATE_EXPIRED_DELIVER:
1890 TM_SET_STATE(pTimer, TMTIMERSTATE_STOPPED);
1891 rc = VINF_SUCCESS;
1892 break;
1893
1894 case TMTIMERSTATE_STOPPED:
1895 rc = VINF_SUCCESS;
1896 break;
1897
1898 case TMTIMERSTATE_PENDING_RESCHEDULE:
1899 case TMTIMERSTATE_PENDING_STOP:
1900 case TMTIMERSTATE_PENDING_SCHEDULE:
1901 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1902 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1903 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1904 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1905 case TMTIMERSTATE_DESTROY:
1906 case TMTIMERSTATE_FREE:
1907 AssertLogRelMsgFailed(("Invalid timer state %s: %s\n", tmTimerState(enmState), R3STRING(pTimer->pszDesc)));
1908 rc = VERR_TM_INVALID_STATE;
1909 break;
1910
1911 default:
1912 AssertMsgFailed(("Unknown timer state %d: %s\n", enmState, R3STRING(pTimer->pszDesc)));
1913 rc = VERR_TM_UNKNOWN_STATE;
1914 break;
1915 }
1916
1917 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStopVs), a);
1918 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
1919 return rc;
1920}
1921
1922
1923/**
1924 * Stop the timer.
1925 * Use TMR3TimerArm() to "un-stop" the timer.
1926 *
1927 * @returns VBox status code.
1928 * @param pTimer Timer handle as returned by one of the create functions.
1929 */
1930VMMDECL(int) TMTimerStop(PTMTIMER pTimer)
1931{
1932 PVMCC pVM = pTimer->CTX_SUFF(pVM);
1933 STAM_COUNTER_INC(&pTimer->StatStop);
1934
1935 /* Treat virtual sync timers specially. */
1936 if (pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC)
1937 return tmTimerVirtualSyncStop(pVM, pTimer);
1938
1939 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1940 TMTIMER_ASSERT_CRITSECT(pTimer);
1941
1942 /*
1943 * Reset the HZ hint.
1944 */
1945 if (pTimer->uHzHint)
1946 {
1947 if (pTimer->uHzHint >= pVM->tm.s.uMaxHzHint)
1948 ASMAtomicWriteBool(&pVM->tm.s.fHzHintNeedsUpdating, true);
1949 pTimer->uHzHint = 0;
1950 }
1951
1952 /** @todo see if this function needs optimizing. */
1953 int cRetries = 1000;
1954 do
1955 {
1956 /*
1957 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1958 */
1959 TMTIMERSTATE enmState = pTimer->enmState;
1960 Log2(("TMTimerStop: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d\n",
1961 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), cRetries));
1962 switch (enmState)
1963 {
1964 case TMTIMERSTATE_EXPIRED_DELIVER:
1965 //AssertMsgFailed(("You don't stop an expired timer dude!\n"));
1966 return VERR_INVALID_PARAMETER;
1967
1968 case TMTIMERSTATE_STOPPED:
1969 case TMTIMERSTATE_PENDING_STOP:
1970 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1971 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1972 return VINF_SUCCESS;
1973
1974 case TMTIMERSTATE_PENDING_SCHEDULE:
1975 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, enmState))
1976 {
1977 tmSchedule(pTimer);
1978 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1979 return VINF_SUCCESS;
1980 }
1981 break;
1982
1983 case TMTIMERSTATE_PENDING_RESCHEDULE:
1984 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
1985 {
1986 tmSchedule(pTimer);
1987 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1988 return VINF_SUCCESS;
1989 }
1990 break;
1991
1992 case TMTIMERSTATE_ACTIVE:
1993 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
1994 {
1995 tmSchedule(pTimer);
1996 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1997 return VINF_SUCCESS;
1998 }
1999 break;
2000
2001 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2002 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2003 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2004#ifdef IN_RING3
2005 if (!RTThreadYield())
2006 RTThreadSleep(1);
2007#else
2008/** @todo call host and yield cpu after a while. */
2009#endif
2010 break;
2011
2012 /*
2013 * Invalid states.
2014 */
2015 case TMTIMERSTATE_DESTROY:
2016 case TMTIMERSTATE_FREE:
2017 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
2018 return VERR_TM_INVALID_STATE;
2019 default:
2020 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
2021 return VERR_TM_UNKNOWN_STATE;
2022 }
2023 } while (cRetries-- > 0);
2024
2025 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
2026 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
2027 return VERR_TM_TIMER_UNSTABLE_STATE;
2028}
2029
2030
2031/**
2032 * Get the current clock time.
2033 * Handy for calculating the new expire time.
2034 *
2035 * @returns Current clock time.
2036 * @param pTimer Timer handle as returned by one of the create functions.
2037 */
2038VMMDECL(uint64_t) TMTimerGet(PTMTIMER pTimer)
2039{
2040 PVMCC pVM = pTimer->CTX_SUFF(pVM);
2041 STAM_COUNTER_INC(&pTimer->StatGet);
2042
2043 uint64_t u64;
2044 switch (pTimer->enmClock)
2045 {
2046 case TMCLOCK_VIRTUAL:
2047 u64 = TMVirtualGet(pVM);
2048 break;
2049 case TMCLOCK_VIRTUAL_SYNC:
2050 u64 = TMVirtualSyncGet(pVM);
2051 break;
2052 case TMCLOCK_REAL:
2053 u64 = TMRealGet(pVM);
2054 break;
2055 default:
2056 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2057 return UINT64_MAX;
2058 }
2059 //Log2(("TMTimerGet: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2060 // u64, pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2061 return u64;
2062}
2063
2064
2065/**
2066 * Get the frequency of the timer clock.
2067 *
2068 * @returns Clock frequency (as Hz of course).
2069 * @param pTimer Timer handle as returned by one of the create functions.
2070 */
2071VMMDECL(uint64_t) TMTimerGetFreq(PTMTIMER pTimer)
2072{
2073 switch (pTimer->enmClock)
2074 {
2075 case TMCLOCK_VIRTUAL:
2076 case TMCLOCK_VIRTUAL_SYNC:
2077 return TMCLOCK_FREQ_VIRTUAL;
2078
2079 case TMCLOCK_REAL:
2080 return TMCLOCK_FREQ_REAL;
2081
2082 default:
2083 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2084 return 0;
2085 }
2086}
2087
2088
2089/**
2090 * Get the expire time of the timer.
2091 * Only valid for active timers.
2092 *
2093 * @returns Expire time of the timer.
2094 * @param pTimer Timer handle as returned by one of the create functions.
2095 */
2096VMMDECL(uint64_t) TMTimerGetExpire(PTMTIMER pTimer)
2097{
2098 TMTIMER_ASSERT_CRITSECT(pTimer);
2099 int cRetries = 1000;
2100 do
2101 {
2102 TMTIMERSTATE enmState = pTimer->enmState;
2103 switch (enmState)
2104 {
2105 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2106 case TMTIMERSTATE_EXPIRED_DELIVER:
2107 case TMTIMERSTATE_STOPPED:
2108 case TMTIMERSTATE_PENDING_STOP:
2109 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2110 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2111 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2112 return ~(uint64_t)0;
2113
2114 case TMTIMERSTATE_ACTIVE:
2115 case TMTIMERSTATE_PENDING_RESCHEDULE:
2116 case TMTIMERSTATE_PENDING_SCHEDULE:
2117 Log2(("TMTimerGetExpire: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2118 pTimer->u64Expire, pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2119 return pTimer->u64Expire;
2120
2121 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2122 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2123#ifdef IN_RING3
2124 if (!RTThreadYield())
2125 RTThreadSleep(1);
2126#endif
2127 break;
2128
2129 /*
2130 * Invalid states.
2131 */
2132 case TMTIMERSTATE_DESTROY:
2133 case TMTIMERSTATE_FREE:
2134 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
2135 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2136 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2137 return ~(uint64_t)0;
2138 default:
2139 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
2140 return ~(uint64_t)0;
2141 }
2142 } while (cRetries-- > 0);
2143
2144 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
2145 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2146 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2147 return ~(uint64_t)0;
2148}
2149
2150
2151/**
2152 * Checks if a timer is active or not.
2153 *
2154 * @returns True if active.
2155 * @returns False if not active.
2156 * @param pTimer Timer handle as returned by one of the create functions.
2157 */
2158VMMDECL(bool) TMTimerIsActive(PTMTIMER pTimer)
2159{
2160 TMTIMERSTATE enmState = pTimer->enmState;
2161 switch (enmState)
2162 {
2163 case TMTIMERSTATE_STOPPED:
2164 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2165 case TMTIMERSTATE_EXPIRED_DELIVER:
2166 case TMTIMERSTATE_PENDING_STOP:
2167 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2168 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2169 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2170 return false;
2171
2172 case TMTIMERSTATE_ACTIVE:
2173 case TMTIMERSTATE_PENDING_RESCHEDULE:
2174 case TMTIMERSTATE_PENDING_SCHEDULE:
2175 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2176 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2177 Log2(("TMTimerIsActive: returns true (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2178 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2179 return true;
2180
2181 /*
2182 * Invalid states.
2183 */
2184 case TMTIMERSTATE_DESTROY:
2185 case TMTIMERSTATE_FREE:
2186 AssertMsgFailed(("Invalid timer state %s (%s)\n", tmTimerState(enmState), R3STRING(pTimer->pszDesc)));
2187 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2188 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2189 return false;
2190 default:
2191 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
2192 return false;
2193 }
2194}
2195
2196
2197/* -=-=-=-=-=-=- Convenience APIs -=-=-=-=-=-=- */
2198
2199
2200/**
2201 * Arm a timer with a (new) expire time relative to current time.
2202 *
2203 * @returns VBox status code.
2204 * @param pTimer Timer handle as returned by one of the create functions.
2205 * @param cMilliesToNext Number of milliseconds to the next tick.
2206 */
2207VMMDECL(int) TMTimerSetMillies(PTMTIMER pTimer, uint32_t cMilliesToNext)
2208{
2209 switch (pTimer->enmClock)
2210 {
2211 case TMCLOCK_VIRTUAL:
2212 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2213 return TMTimerSetRelative(pTimer, cMilliesToNext * UINT64_C(1000000), NULL);
2214
2215 case TMCLOCK_VIRTUAL_SYNC:
2216 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2217 return TMTimerSetRelative(pTimer, cMilliesToNext * UINT64_C(1000000), NULL);
2218
2219 case TMCLOCK_REAL:
2220 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2221 return TMTimerSetRelative(pTimer, cMilliesToNext, NULL);
2222
2223 default:
2224 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2225 return VERR_TM_TIMER_BAD_CLOCK;
2226 }
2227}
2228
2229
2230/**
2231 * Arm a timer with a (new) expire time relative to current time.
2232 *
2233 * @returns VBox status code.
2234 * @param pTimer Timer handle as returned by one of the create functions.
2235 * @param cMicrosToNext Number of microseconds to the next tick.
2236 */
2237VMMDECL(int) TMTimerSetMicro(PTMTIMER pTimer, uint64_t cMicrosToNext)
2238{
2239 switch (pTimer->enmClock)
2240 {
2241 case TMCLOCK_VIRTUAL:
2242 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2243 return TMTimerSetRelative(pTimer, cMicrosToNext * 1000, NULL);
2244
2245 case TMCLOCK_VIRTUAL_SYNC:
2246 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2247 return TMTimerSetRelative(pTimer, cMicrosToNext * 1000, NULL);
2248
2249 case TMCLOCK_REAL:
2250 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2251 return TMTimerSetRelative(pTimer, cMicrosToNext / 1000, NULL);
2252
2253 default:
2254 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2255 return VERR_TM_TIMER_BAD_CLOCK;
2256 }
2257}
2258
2259
2260/**
2261 * Arm a timer with a (new) expire time relative to current time.
2262 *
2263 * @returns VBox status code.
2264 * @param pTimer Timer handle as returned by one of the create functions.
2265 * @param cNanosToNext Number of nanoseconds to the next tick.
2266 */
2267VMMDECL(int) TMTimerSetNano(PTMTIMER pTimer, uint64_t cNanosToNext)
2268{
2269 switch (pTimer->enmClock)
2270 {
2271 case TMCLOCK_VIRTUAL:
2272 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2273 return TMTimerSetRelative(pTimer, cNanosToNext, NULL);
2274
2275 case TMCLOCK_VIRTUAL_SYNC:
2276 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2277 return TMTimerSetRelative(pTimer, cNanosToNext, NULL);
2278
2279 case TMCLOCK_REAL:
2280 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2281 return TMTimerSetRelative(pTimer, cNanosToNext / 1000000, NULL);
2282
2283 default:
2284 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2285 return VERR_TM_TIMER_BAD_CLOCK;
2286 }
2287}
2288
2289
2290/**
2291 * Get the current clock time as nanoseconds.
2292 *
2293 * @returns The timer clock as nanoseconds.
2294 * @param pTimer Timer handle as returned by one of the create functions.
2295 */
2296VMMDECL(uint64_t) TMTimerGetNano(PTMTIMER pTimer)
2297{
2298 return TMTimerToNano(pTimer, TMTimerGet(pTimer));
2299}
2300
2301
2302/**
2303 * Get the current clock time as microseconds.
2304 *
2305 * @returns The timer clock as microseconds.
2306 * @param pTimer Timer handle as returned by one of the create functions.
2307 */
2308VMMDECL(uint64_t) TMTimerGetMicro(PTMTIMER pTimer)
2309{
2310 return TMTimerToMicro(pTimer, TMTimerGet(pTimer));
2311}
2312
2313
2314/**
2315 * Get the current clock time as milliseconds.
2316 *
2317 * @returns The timer clock as milliseconds.
2318 * @param pTimer Timer handle as returned by one of the create functions.
2319 */
2320VMMDECL(uint64_t) TMTimerGetMilli(PTMTIMER pTimer)
2321{
2322 return TMTimerToMilli(pTimer, TMTimerGet(pTimer));
2323}
2324
2325
2326/**
2327 * Converts the specified timer clock time to nanoseconds.
2328 *
2329 * @returns nanoseconds.
2330 * @param pTimer Timer handle as returned by one of the create functions.
2331 * @param u64Ticks The clock ticks.
2332 * @remark There could be rounding errors here. We just do a simple integer divide
2333 * without any adjustments.
2334 */
2335VMMDECL(uint64_t) TMTimerToNano(PTMTIMER pTimer, uint64_t u64Ticks)
2336{
2337 switch (pTimer->enmClock)
2338 {
2339 case TMCLOCK_VIRTUAL:
2340 case TMCLOCK_VIRTUAL_SYNC:
2341 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2342 return u64Ticks;
2343
2344 case TMCLOCK_REAL:
2345 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2346 return u64Ticks * 1000000;
2347
2348 default:
2349 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2350 return 0;
2351 }
2352}
2353
2354
2355/**
2356 * Converts the specified timer clock time to microseconds.
2357 *
2358 * @returns microseconds.
2359 * @param pTimer Timer handle as returned by one of the create functions.
2360 * @param u64Ticks The clock ticks.
2361 * @remark There could be rounding errors here. We just do a simple integer divide
2362 * without any adjustments.
2363 */
2364VMMDECL(uint64_t) TMTimerToMicro(PTMTIMER pTimer, uint64_t u64Ticks)
2365{
2366 switch (pTimer->enmClock)
2367 {
2368 case TMCLOCK_VIRTUAL:
2369 case TMCLOCK_VIRTUAL_SYNC:
2370 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2371 return u64Ticks / 1000;
2372
2373 case TMCLOCK_REAL:
2374 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2375 return u64Ticks * 1000;
2376
2377 default:
2378 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2379 return 0;
2380 }
2381}
2382
2383
2384/**
2385 * Converts the specified timer clock time to milliseconds.
2386 *
2387 * @returns milliseconds.
2388 * @param pTimer Timer handle as returned by one of the create functions.
2389 * @param u64Ticks The clock ticks.
2390 * @remark There could be rounding errors here. We just do a simple integer divide
2391 * without any adjustments.
2392 */
2393VMMDECL(uint64_t) TMTimerToMilli(PTMTIMER pTimer, uint64_t u64Ticks)
2394{
2395 switch (pTimer->enmClock)
2396 {
2397 case TMCLOCK_VIRTUAL:
2398 case TMCLOCK_VIRTUAL_SYNC:
2399 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2400 return u64Ticks / 1000000;
2401
2402 case TMCLOCK_REAL:
2403 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2404 return u64Ticks;
2405
2406 default:
2407 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2408 return 0;
2409 }
2410}
2411
2412
2413/**
2414 * Converts the specified nanosecond timestamp to timer clock ticks.
2415 *
2416 * @returns timer clock ticks.
2417 * @param pTimer Timer handle as returned by one of the create functions.
2418 * @param cNanoSecs The nanosecond value ticks to convert.
2419 * @remark There could be rounding and overflow errors here.
2420 */
2421VMMDECL(uint64_t) TMTimerFromNano(PTMTIMER pTimer, uint64_t cNanoSecs)
2422{
2423 switch (pTimer->enmClock)
2424 {
2425 case TMCLOCK_VIRTUAL:
2426 case TMCLOCK_VIRTUAL_SYNC:
2427 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2428 return cNanoSecs;
2429
2430 case TMCLOCK_REAL:
2431 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2432 return cNanoSecs / 1000000;
2433
2434 default:
2435 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2436 return 0;
2437 }
2438}
2439
2440
2441/**
2442 * Converts the specified microsecond timestamp to timer clock ticks.
2443 *
2444 * @returns timer clock ticks.
2445 * @param pTimer Timer handle as returned by one of the create functions.
2446 * @param cMicroSecs The microsecond value ticks to convert.
2447 * @remark There could be rounding and overflow errors here.
2448 */
2449VMMDECL(uint64_t) TMTimerFromMicro(PTMTIMER pTimer, uint64_t cMicroSecs)
2450{
2451 switch (pTimer->enmClock)
2452 {
2453 case TMCLOCK_VIRTUAL:
2454 case TMCLOCK_VIRTUAL_SYNC:
2455 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2456 return cMicroSecs * 1000;
2457
2458 case TMCLOCK_REAL:
2459 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2460 return cMicroSecs / 1000;
2461
2462 default:
2463 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2464 return 0;
2465 }
2466}
2467
2468
2469/**
2470 * Converts the specified millisecond timestamp to timer clock ticks.
2471 *
2472 * @returns timer clock ticks.
2473 * @param pTimer Timer handle as returned by one of the create functions.
2474 * @param cMilliSecs The millisecond value ticks to convert.
2475 * @remark There could be rounding and overflow errors here.
2476 */
2477VMMDECL(uint64_t) TMTimerFromMilli(PTMTIMER pTimer, uint64_t cMilliSecs)
2478{
2479 switch (pTimer->enmClock)
2480 {
2481 case TMCLOCK_VIRTUAL:
2482 case TMCLOCK_VIRTUAL_SYNC:
2483 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2484 return cMilliSecs * 1000000;
2485
2486 case TMCLOCK_REAL:
2487 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2488 return cMilliSecs;
2489
2490 default:
2491 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2492 return 0;
2493 }
2494}
2495
2496
2497/**
2498 * Convert state to string.
2499 *
2500 * @returns Readonly status name.
2501 * @param enmState State.
2502 */
2503const char *tmTimerState(TMTIMERSTATE enmState)
2504{
2505 switch (enmState)
2506 {
2507#define CASE(num, state) \
2508 case TMTIMERSTATE_##state: \
2509 AssertCompile(TMTIMERSTATE_##state == (num)); \
2510 return #num "-" #state
2511 CASE( 1,STOPPED);
2512 CASE( 2,ACTIVE);
2513 CASE( 3,EXPIRED_GET_UNLINK);
2514 CASE( 4,EXPIRED_DELIVER);
2515 CASE( 5,PENDING_STOP);
2516 CASE( 6,PENDING_STOP_SCHEDULE);
2517 CASE( 7,PENDING_SCHEDULE_SET_EXPIRE);
2518 CASE( 8,PENDING_SCHEDULE);
2519 CASE( 9,PENDING_RESCHEDULE_SET_EXPIRE);
2520 CASE(10,PENDING_RESCHEDULE);
2521 CASE(11,DESTROY);
2522 CASE(12,FREE);
2523 default:
2524 AssertMsgFailed(("Invalid state enmState=%d\n", enmState));
2525 return "Invalid state!";
2526#undef CASE
2527 }
2528}
2529
2530
2531/**
2532 * Gets the highest frequency hint for all the important timers.
2533 *
2534 * @returns The highest frequency. 0 if no timers care.
2535 * @param pVM The cross context VM structure.
2536 */
2537static uint32_t tmGetFrequencyHint(PVM pVM)
2538{
2539 /*
2540 * Query the value, recalculate it if necessary.
2541 *
2542 * The "right" highest frequency value isn't so important that we'll block
2543 * waiting on the timer semaphore.
2544 */
2545 uint32_t uMaxHzHint = ASMAtomicUoReadU32(&pVM->tm.s.uMaxHzHint);
2546 if (RT_UNLIKELY(ASMAtomicReadBool(&pVM->tm.s.fHzHintNeedsUpdating)))
2547 {
2548 if (RT_SUCCESS(TM_TRY_LOCK_TIMERS(pVM)))
2549 {
2550 ASMAtomicWriteBool(&pVM->tm.s.fHzHintNeedsUpdating, false);
2551
2552 /*
2553 * Loop over the timers associated with each clock.
2554 */
2555 uMaxHzHint = 0;
2556 for (int i = 0; i < TMCLOCK_MAX; i++)
2557 {
2558 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[i];
2559 for (PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue); pCur; pCur = TMTIMER_GET_NEXT(pCur))
2560 {
2561 uint32_t uHzHint = ASMAtomicUoReadU32(&pCur->uHzHint);
2562 if (uHzHint > uMaxHzHint)
2563 {
2564 switch (pCur->enmState)
2565 {
2566 case TMTIMERSTATE_ACTIVE:
2567 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2568 case TMTIMERSTATE_EXPIRED_DELIVER:
2569 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2570 case TMTIMERSTATE_PENDING_SCHEDULE:
2571 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2572 case TMTIMERSTATE_PENDING_RESCHEDULE:
2573 uMaxHzHint = uHzHint;
2574 break;
2575
2576 case TMTIMERSTATE_STOPPED:
2577 case TMTIMERSTATE_PENDING_STOP:
2578 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2579 case TMTIMERSTATE_DESTROY:
2580 case TMTIMERSTATE_FREE:
2581 break;
2582 /* no default, want gcc warnings when adding more states. */
2583 }
2584 }
2585 }
2586 }
2587 ASMAtomicWriteU32(&pVM->tm.s.uMaxHzHint, uMaxHzHint);
2588 Log(("tmGetFrequencyHint: New value %u Hz\n", uMaxHzHint));
2589 TM_UNLOCK_TIMERS(pVM);
2590 }
2591 }
2592 return uMaxHzHint;
2593}
2594
2595
2596/**
2597 * Calculates a host timer frequency that would be suitable for the current
2598 * timer load.
2599 *
2600 * This will take the highest timer frequency, adjust for catch-up and warp
2601 * driver, and finally add a little fudge factor. The caller (VMM) will use
2602 * the result to adjust the per-cpu preemption timer.
2603 *
2604 * @returns The highest frequency. 0 if no important timers around.
2605 * @param pVM The cross context VM structure.
2606 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2607 */
2608VMM_INT_DECL(uint32_t) TMCalcHostTimerFrequency(PVMCC pVM, PVMCPUCC pVCpu)
2609{
2610 uint32_t uHz = tmGetFrequencyHint(pVM);
2611
2612 /* Catch up, we have to be more aggressive than the % indicates at the
2613 beginning of the effort. */
2614 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
2615 {
2616 uint32_t u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
2617 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
2618 {
2619 if (u32Pct <= 100)
2620 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp100 / 100;
2621 else if (u32Pct <= 200)
2622 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp200 / 100;
2623 else if (u32Pct <= 400)
2624 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp400 / 100;
2625 uHz *= u32Pct + 100;
2626 uHz /= 100;
2627 }
2628 }
2629
2630 /* Warp drive. */
2631 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualWarpDrive))
2632 {
2633 uint32_t u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualWarpDrivePercentage);
2634 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualWarpDrive))
2635 {
2636 uHz *= u32Pct;
2637 uHz /= 100;
2638 }
2639 }
2640
2641 /* Fudge factor. */
2642 if (pVCpu->idCpu == pVM->tm.s.idTimerCpu)
2643 uHz *= pVM->tm.s.cPctHostHzFudgeFactorTimerCpu;
2644 else
2645 uHz *= pVM->tm.s.cPctHostHzFudgeFactorOtherCpu;
2646 uHz /= 100;
2647
2648 /* Make sure it isn't too high. */
2649 if (uHz > pVM->tm.s.cHostHzMax)
2650 uHz = pVM->tm.s.cHostHzMax;
2651
2652 return uHz;
2653}
2654
2655
2656/**
2657 * Whether the guest virtual clock is ticking.
2658 *
2659 * @returns true if ticking, false otherwise.
2660 * @param pVM The cross context VM structure.
2661 */
2662VMM_INT_DECL(bool) TMVirtualIsTicking(PVM pVM)
2663{
2664 return RT_BOOL(pVM->tm.s.cVirtualTicking);
2665}
2666
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette