VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/TMAll.cpp@ 32796

Last change on this file since 32796 was 32796, checked in by vboxsync, 14 years ago

GVMMR0,TM,STAM: Periodic preemption timer fixes, adjustments and statistics. (still disabled)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 82.6 KB
Line 
1/* $Id: TMAll.cpp 32796 2010-09-28 14:54:41Z vboxsync $ */
2/** @file
3 * TM - Timeout Manager, all contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_TM
23#include <VBox/tm.h>
24#include <VBox/mm.h>
25#ifdef IN_RING3
26# include <VBox/rem.h>
27#endif
28#include "TMInternal.h"
29#include <VBox/vm.h>
30
31#include <VBox/param.h>
32#include <VBox/err.h>
33#include <VBox/log.h>
34#include <VBox/sup.h>
35#include <iprt/time.h>
36#include <iprt/assert.h>
37#include <iprt/asm.h>
38#include <iprt/asm-math.h>
39#ifdef IN_RING3
40# include <iprt/thread.h>
41#endif
42
43
44/*******************************************************************************
45* Defined Constants And Macros *
46*******************************************************************************/
47/** @def TMTIMER_ASSERT_CRITSECT
48 * Checks that the caller owns the critical section if one is associated with
49 * the timer. */
50#ifdef VBOX_STRICT
51# define TMTIMER_ASSERT_CRITSECT(pTimer) \
52 do { \
53 if ((pTimer)->pCritSect) \
54 { \
55 PPDMCRITSECT pCritSect = (PPDMCRITSECT)MMHyperR3ToCC((pTimer)->CTX_SUFF(pVM), (pTimer)->pCritSect); \
56 AssertMsg(pCritSect && PDMCritSectIsOwner(pCritSect), \
57 ("pTimer=%p (%s) pCritSect=%p\n", pTimer, R3STRING(pTimer->pszDesc), (pTimer)->pCritSect)); \
58 } \
59 } while (0)
60#else
61# define TMTIMER_ASSERT_CRITSECT(pTimer) do { } while (0)
62#endif
63
64
65#ifndef tmTimerLock
66
67/**
68 * Try take the timer lock, wait in ring-3 return VERR_SEM_BUSY in R0/RC.
69 *
70 * @retval VINF_SUCCESS on success (always in ring-3).
71 * @retval VERR_SEM_BUSY in RC and R0 if the semaphore is busy.
72 *
73 * @param pVM The VM handle.
74 *
75 * @thread EMTs for the time being.
76 */
77int tmTimerLock(PVM pVM)
78{
79 VM_ASSERT_EMT(pVM);
80 int rc = PDMCritSectEnter(&pVM->tm.s.TimerCritSect, VERR_SEM_BUSY);
81 return rc;
82}
83
84
85/**
86 * Try take the timer lock, no waiting.
87 *
88 * @retval VINF_SUCCESS on success.
89 * @retval VERR_SEM_BUSY if busy.
90 *
91 * @param pVM The VM handle.
92 */
93int tmTimerTryLock(PVM pVM)
94{
95 int rc = PDMCritSectTryEnter(&pVM->tm.s.TimerCritSect);
96 return rc;
97}
98
99
100/**
101 * Release the EMT/TM lock.
102 *
103 * @param pVM The VM handle.
104 */
105void tmTimerUnlock(PVM pVM)
106{
107 PDMCritSectLeave(&pVM->tm.s.TimerCritSect);
108}
109
110
111/**
112 * Try take the VirtualSync lock, wait in ring-3 return VERR_SEM_BUSY in R0/RC.
113 *
114 * @retval VINF_SUCCESS on success (always in ring-3).
115 * @retval VERR_SEM_BUSY in RC and R0 if the semaphore is busy.
116 *
117 * @param pVM The VM handle.
118 */
119int tmVirtualSyncLock(PVM pVM)
120{
121 VM_ASSERT_EMT(pVM);
122 int rc = PDMCritSectEnter(&pVM->tm.s.VirtualSyncLock, VERR_SEM_BUSY);
123 return rc;
124}
125
126
127/**
128 * Try take the VirtualSync lock, no waiting.
129 *
130 * @retval VINF_SUCCESS on success.
131 * @retval VERR_SEM_BUSY if busy.
132 *
133 * @param pVM The VM handle.
134 */
135int tmVirtualSyncTryLock(PVM pVM)
136{
137 VM_ASSERT_EMT(pVM);
138 int rc = PDMCritSectTryEnter(&pVM->tm.s.VirtualSyncLock);
139 return rc;
140}
141
142
143/**
144 * Release the VirtualSync lock.
145 *
146 * @param pVM The VM handle.
147 */
148void tmVirtualSyncUnlock(PVM pVM)
149{
150 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
151}
152
153#endif /* ! macros */
154
155/**
156 * Notification that execution is about to start.
157 *
158 * This call must always be paired with a TMNotifyEndOfExecution call.
159 *
160 * The function may, depending on the configuration, resume the TSC and future
161 * clocks that only ticks when we're executing guest code.
162 *
163 * @param pVCpu The VMCPU to operate on.
164 */
165VMMDECL(void) TMNotifyStartOfExecution(PVMCPU pVCpu)
166{
167 PVM pVM = pVCpu->CTX_SUFF(pVM);
168
169#ifndef VBOX_WITHOUT_NS_ACCOUNTING
170 pVCpu->tm.s.u64NsTsStartExecuting = RTTimeNanoTS();
171#endif
172 if (pVM->tm.s.fTSCTiedToExecution)
173 tmCpuTickResume(pVM, pVCpu);
174}
175
176
177/**
178 * Notification that execution is about to start.
179 *
180 * This call must always be paired with a TMNotifyStartOfExecution call.
181 *
182 * The function may, depending on the configuration, suspend the TSC and future
183 * clocks that only ticks when we're executing guest code.
184 *
185 * @param pVCpu The VMCPU to operate on.
186 */
187VMMDECL(void) TMNotifyEndOfExecution(PVMCPU pVCpu)
188{
189 PVM pVM = pVCpu->CTX_SUFF(pVM);
190
191 if (pVM->tm.s.fTSCTiedToExecution)
192 tmCpuTickPause(pVM, pVCpu);
193
194#ifndef VBOX_WITHOUT_NS_ACCOUNTING
195 uint64_t const u64NsTs = RTTimeNanoTS();
196 uint64_t const cNsTotalNew = u64NsTs - pVCpu->tm.s.u64NsTsStartTotal;
197 uint64_t const cNsExecutingDelta = u64NsTs - pVCpu->tm.s.u64NsTsStartExecuting;
198 uint64_t const cNsExecutingNew = pVCpu->tm.s.cNsExecuting + cNsExecutingDelta;
199 uint64_t const cNsOtherNew = cNsTotalNew - cNsExecutingNew - pVCpu->tm.s.cNsHalted;
200
201# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
202 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecuting, cNsExecutingDelta);
203 if (cNsExecutingDelta < 5000)
204 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecTiny, cNsExecutingDelta);
205 else if (cNsExecutingDelta < 50000)
206 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecShort, cNsExecutingDelta);
207 else
208 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecLong, cNsExecutingDelta);
209 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsTotal, cNsTotalNew - pVCpu->tm.s.cNsTotal);
210 int64_t const cNsOtherNewDelta = cNsOtherNew - pVCpu->tm.s.cNsOther;
211 if (cNsOtherNewDelta > 0)
212 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsOther, cNsOtherNewDelta); /* (the period before execution) */
213# endif
214
215 uint32_t uGen = ASMAtomicIncU32(&pVCpu->tm.s.uTimesGen); Assert(uGen & 1);
216 pVCpu->tm.s.cNsExecuting = cNsExecutingNew;
217 pVCpu->tm.s.cNsTotal = cNsTotalNew;
218 pVCpu->tm.s.cNsOther = cNsOtherNew;
219 pVCpu->tm.s.cPeriodsExecuting++;
220 ASMAtomicWriteU32(&pVCpu->tm.s.uTimesGen, (uGen | 1) + 1);
221#endif
222}
223
224
225/**
226 * Notification that the cpu is entering the halt state
227 *
228 * This call must always be paired with a TMNotifyEndOfExecution call.
229 *
230 * The function may, depending on the configuration, resume the TSC and future
231 * clocks that only ticks when we're halted.
232 *
233 * @param pVCpu The VMCPU to operate on.
234 */
235VMM_INT_DECL(void) TMNotifyStartOfHalt(PVMCPU pVCpu)
236{
237 PVM pVM = pVCpu->CTX_SUFF(pVM);
238
239#ifndef VBOX_WITHOUT_NS_ACCOUNTING
240 pVCpu->tm.s.u64NsTsStartHalting = RTTimeNanoTS();
241#endif
242
243 if ( pVM->tm.s.fTSCTiedToExecution
244 && !pVM->tm.s.fTSCNotTiedToHalt)
245 tmCpuTickResume(pVM, pVCpu);
246}
247
248
249/**
250 * Notification that the cpu is leaving the halt state
251 *
252 * This call must always be paired with a TMNotifyStartOfHalt call.
253 *
254 * The function may, depending on the configuration, suspend the TSC and future
255 * clocks that only ticks when we're halted.
256 *
257 * @param pVCpu The VMCPU to operate on.
258 */
259VMM_INT_DECL(void) TMNotifyEndOfHalt(PVMCPU pVCpu)
260{
261 PVM pVM = pVCpu->CTX_SUFF(pVM);
262
263 if ( pVM->tm.s.fTSCTiedToExecution
264 && !pVM->tm.s.fTSCNotTiedToHalt)
265 tmCpuTickPause(pVM, pVCpu);
266
267#ifndef VBOX_WITHOUT_NS_ACCOUNTING
268 uint64_t const u64NsTs = RTTimeNanoTS();
269 uint64_t const cNsTotalNew = u64NsTs - pVCpu->tm.s.u64NsTsStartTotal;
270 uint64_t const cNsHaltedDelta = u64NsTs - pVCpu->tm.s.u64NsTsStartHalting;
271 uint64_t const cNsHaltedNew = pVCpu->tm.s.cNsHalted + cNsHaltedDelta;
272 uint64_t const cNsOtherNew = cNsTotalNew - pVCpu->tm.s.cNsExecuting - cNsHaltedNew;
273
274# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
275 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsHalted, cNsHaltedDelta);
276 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsTotal, cNsTotalNew - pVCpu->tm.s.cNsTotal);
277 int64_t const cNsOtherNewDelta = cNsOtherNew - pVCpu->tm.s.cNsOther;
278 if (cNsOtherNewDelta > 0)
279 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsOther, cNsOtherNewDelta); /* (the period before halting) */
280# endif
281
282 uint32_t uGen = ASMAtomicIncU32(&pVCpu->tm.s.uTimesGen); Assert(uGen & 1);
283 pVCpu->tm.s.cNsHalted = cNsHaltedNew;
284 pVCpu->tm.s.cNsTotal = cNsTotalNew;
285 pVCpu->tm.s.cNsOther = cNsOtherNew;
286 pVCpu->tm.s.cPeriodsHalted++;
287 ASMAtomicWriteU32(&pVCpu->tm.s.uTimesGen, (uGen | 1) + 1);
288#endif
289}
290
291
292/**
293 * Raise the timer force action flag and notify the dedicated timer EMT.
294 *
295 * @param pVM The VM handle.
296 */
297DECLINLINE(void) tmScheduleNotify(PVM pVM)
298{
299 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
300 if (!VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
301 {
302 Log5(("TMAll(%u): FF: 0 -> 1\n", __LINE__));
303 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
304#ifdef IN_RING3
305 REMR3NotifyTimerPending(pVM, pVCpuDst);
306 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
307#endif
308 STAM_COUNTER_INC(&pVM->tm.s.StatScheduleSetFF);
309 }
310}
311
312
313/**
314 * Schedule the queue which was changed.
315 */
316DECLINLINE(void) tmSchedule(PTMTIMER pTimer)
317{
318 PVM pVM = pTimer->CTX_SUFF(pVM);
319 if ( VM_IS_EMT(pVM)
320 && RT_SUCCESS(tmTimerTryLock(pVM)))
321 {
322 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
323 Log3(("tmSchedule: tmTimerQueueSchedule\n"));
324 tmTimerQueueSchedule(pVM, &pVM->tm.s.CTX_SUFF(paTimerQueues)[pTimer->enmClock]);
325#ifdef VBOX_STRICT
326 tmTimerQueuesSanityChecks(pVM, "tmSchedule");
327#endif
328 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
329 tmTimerUnlock(pVM);
330 }
331 else
332 {
333 TMTIMERSTATE enmState = pTimer->enmState;
334 if (TMTIMERSTATE_IS_PENDING_SCHEDULING(enmState))
335 tmScheduleNotify(pVM);
336 }
337}
338
339
340/**
341 * Try change the state to enmStateNew from enmStateOld
342 * and link the timer into the scheduling queue.
343 *
344 * @returns Success indicator.
345 * @param pTimer Timer in question.
346 * @param enmStateNew The new timer state.
347 * @param enmStateOld The old timer state.
348 */
349DECLINLINE(bool) tmTimerTry(PTMTIMER pTimer, TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
350{
351 /*
352 * Attempt state change.
353 */
354 bool fRc;
355 TM_TRY_SET_STATE(pTimer, enmStateNew, enmStateOld, fRc);
356 return fRc;
357}
358
359
360/**
361 * Links the timer onto the scheduling queue.
362 *
363 * @param pQueue The timer queue the timer belongs to.
364 * @param pTimer The timer.
365 *
366 * @todo FIXME: Look into potential race with the thread running the queues
367 * and stuff.
368 */
369DECLINLINE(void) tmTimerLink(PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
370{
371 Assert(!pTimer->offScheduleNext);
372 const int32_t offHeadNew = (intptr_t)pTimer - (intptr_t)pQueue;
373 int32_t offHead;
374 do
375 {
376 offHead = pQueue->offSchedule;
377 if (offHead)
378 pTimer->offScheduleNext = ((intptr_t)pQueue + offHead) - (intptr_t)pTimer;
379 else
380 pTimer->offScheduleNext = 0;
381 } while (!ASMAtomicCmpXchgS32(&pQueue->offSchedule, offHeadNew, offHead));
382}
383
384
385/**
386 * Try change the state to enmStateNew from enmStateOld
387 * and link the timer into the scheduling queue.
388 *
389 * @returns Success indicator.
390 * @param pTimer Timer in question.
391 * @param enmStateNew The new timer state.
392 * @param enmStateOld The old timer state.
393 */
394DECLINLINE(bool) tmTimerTryWithLink(PTMTIMER pTimer, TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
395{
396 if (tmTimerTry(pTimer, enmStateNew, enmStateOld))
397 {
398 tmTimerLink(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF(paTimerQueues)[pTimer->enmClock], pTimer);
399 return true;
400 }
401 return false;
402}
403
404
405#ifdef VBOX_HIGH_RES_TIMERS_HACK
406
407/**
408 * Worker for tmTimerPollInternal that handles misses when the decidate timer
409 * EMT is polling.
410 *
411 * @returns See tmTimerPollInternal.
412 * @param pVM Pointer to the shared VM structure.
413 * @param u64Now Current virtual clock timestamp.
414 * @param u64Delta The delta to the next even in ticks of the
415 * virtual clock.
416 * @param pu64Delta Where to return the delta.
417 * @param pCounter The statistics counter to update.
418 */
419DECLINLINE(uint64_t) tmTimerPollReturnMiss(PVM pVM, uint64_t u64Now, uint64_t u64Delta, uint64_t *pu64Delta)
420{
421 Assert(!(u64Delta & RT_BIT_64(63)));
422
423 if (!pVM->tm.s.fVirtualWarpDrive)
424 {
425 *pu64Delta = u64Delta;
426 return u64Delta + u64Now + pVM->tm.s.u64VirtualOffset;
427 }
428
429 /*
430 * Warp drive adjustments - this is the reverse of what tmVirtualGetRaw is doing.
431 */
432 uint64_t const u64Start = pVM->tm.s.u64VirtualWarpDriveStart;
433 uint32_t const u32Pct = pVM->tm.s.u32VirtualWarpDrivePercentage;
434
435 uint64_t u64GipTime = u64Delta + u64Now + pVM->tm.s.u64VirtualOffset;
436 u64GipTime -= u64Start; /* the start is GIP time. */
437 if (u64GipTime >= u64Delta)
438 {
439 ASMMultU64ByU32DivByU32(u64GipTime, 100, u32Pct);
440 ASMMultU64ByU32DivByU32(u64Delta, 100, u32Pct);
441 }
442 else
443 {
444 u64Delta -= u64GipTime;
445 ASMMultU64ByU32DivByU32(u64GipTime, 100, u32Pct);
446 u64Delta += u64GipTime;
447 }
448 *pu64Delta = u64Delta;
449 u64GipTime += u64Start;
450 return u64GipTime;
451}
452
453
454/**
455 * Worker for tmTimerPollInternal dealing with returns on virtual CPUs other
456 * than the one dedicated to timer work.
457 *
458 * @returns See tmTimerPollInternal.
459 * @param pVM Pointer to the shared VM structure.
460 * @param u64Now Current virtual clock timestamp.
461 * @param pu64Delta Where to return the delta.
462 */
463DECL_FORCE_INLINE(uint64_t) tmTimerPollReturnOtherCpu(PVM pVM, uint64_t u64Now, uint64_t *pu64Delta)
464{
465 static const uint64_t s_u64OtherRet = 500000000; /* 500 ms for non-timer EMTs. */
466 *pu64Delta = s_u64OtherRet;
467 return u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet;
468}
469
470
471/**
472 * Worker for tmTimerPollInternal.
473 *
474 * @returns See tmTimerPollInternal.
475 * @param pVM Pointer to the shared VM structure.
476 * @param pVCpu Pointer to the shared VMCPU structure of the
477 * caller.
478 * @param pVCpuDst Pointer to the shared VMCPU structure of the
479 * dedicated timer EMT.
480 * @param u64Now Current virtual clock timestamp.
481 * @param pu64Delta Where to return the delta.
482 * @param pCounter The statistics counter to update.
483 */
484DECL_FORCE_INLINE(uint64_t) tmTimerPollReturnHit(PVM pVM, PVMCPU pVCpu, PVMCPU pVCpuDst, uint64_t u64Now,
485 uint64_t *pu64Delta, PSTAMCOUNTER pCounter)
486{
487 STAM_COUNTER_INC(pCounter);
488 if (pVCpuDst != pVCpu)
489 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
490 *pu64Delta = 0;
491 return 0;
492}
493
494/**
495 * Common worker for TMTimerPollGIP and TMTimerPoll.
496 *
497 * This function is called before FFs are checked in the inner execution EM loops.
498 *
499 * @returns The GIP timestamp of the next event.
500 * 0 if the next event has already expired.
501 *
502 * @param pVM Pointer to the shared VM structure.
503 * @param pVCpu Pointer to the shared VMCPU structure of the caller.
504 * @param pu64Delta Where to store the delta.
505 *
506 * @thread The emulation thread.
507 *
508 * @remarks GIP uses ns ticks.
509 */
510DECL_FORCE_INLINE(uint64_t) tmTimerPollInternal(PVM pVM, PVMCPU pVCpu, uint64_t *pu64Delta)
511{
512 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
513 const uint64_t u64Now = TMVirtualGetNoCheck(pVM);
514 STAM_COUNTER_INC(&pVM->tm.s.StatPoll);
515
516 /*
517 * Return straight away if the timer FF is already set ...
518 */
519 if (VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
520 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet);
521
522 /*
523 * ... or if timers are being run.
524 */
525 if (ASMAtomicReadBool(&pVM->tm.s.fRunningQueues))
526 {
527 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
528 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
529 }
530
531 /*
532 * Check for TMCLOCK_VIRTUAL expiration.
533 */
534 const uint64_t u64Expire1 = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire);
535 const int64_t i64Delta1 = u64Expire1 - u64Now;
536 if (i64Delta1 <= 0)
537 {
538 if (!VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
539 {
540 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
541 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
542#ifdef IN_RING3
543 REMR3NotifyTimerPending(pVM, pVCpuDst);
544#endif
545 }
546 LogFlow(("TMTimerPoll: expire1=%'RU64 <= now=%'RU64\n", u64Expire1, u64Now));
547 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtual);
548 }
549
550 /*
551 * Check for TMCLOCK_VIRTUAL_SYNC expiration.
552 * This isn't quite as stright forward if in a catch-up, not only do
553 * we have to adjust the 'now' but when have to adjust the delta as well.
554 */
555
556 /*
557 * Optimistic lockless approach.
558 */
559 uint64_t u64VirtualSyncNow;
560 uint64_t u64Expire2 = ASMAtomicUoReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
561 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
562 {
563 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
564 {
565 u64VirtualSyncNow = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
566 if (RT_LIKELY( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
567 && !ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
568 && u64VirtualSyncNow == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
569 && u64Expire2 == ASMAtomicUoReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)))
570 {
571 u64VirtualSyncNow = u64Now - u64VirtualSyncNow;
572 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
573 if (i64Delta2 > 0)
574 {
575 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
576 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
577
578 if (pVCpu == pVCpuDst)
579 return tmTimerPollReturnMiss(pVM, u64Now, RT_MIN(i64Delta1, i64Delta2), pu64Delta);
580 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
581 }
582
583 if ( !pVM->tm.s.fRunningQueues
584 && !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
585 {
586 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
587 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
588#ifdef IN_RING3
589 REMR3NotifyTimerPending(pVM, pVCpuDst);
590#endif
591 }
592
593 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
594 LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now));
595 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
596 }
597 }
598 }
599 else
600 {
601 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
602 LogFlow(("TMTimerPoll: stopped\n"));
603 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
604 }
605
606 /*
607 * Complicated lockless approach.
608 */
609 uint64_t off;
610 uint32_t u32Pct = 0;
611 bool fCatchUp;
612 int cOuterTries = 42;
613 for (;; cOuterTries--)
614 {
615 fCatchUp = ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp);
616 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
617 u64Expire2 = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
618 if (fCatchUp)
619 {
620 /* No changes allowed, try get a consistent set of parameters. */
621 uint64_t const u64Prev = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev);
622 uint64_t const offGivenUp = ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp);
623 u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
624 if ( ( u64Prev == ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev)
625 && offGivenUp == ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp)
626 && u32Pct == ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage)
627 && off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
628 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)
629 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
630 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
631 || cOuterTries <= 0)
632 {
633 uint64_t u64Delta = u64Now - u64Prev;
634 if (RT_LIKELY(!(u64Delta >> 32)))
635 {
636 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, u32Pct, 100);
637 if (off > u64Sub + offGivenUp)
638 off -= u64Sub;
639 else /* we've completely caught up. */
640 off = offGivenUp;
641 }
642 else
643 /* More than 4 seconds since last time (or negative), ignore it. */
644 Log(("TMVirtualGetSync: u64Delta=%RX64 (NoLock)\n", u64Delta));
645
646 /* Check that we're still running and in catch up. */
647 if ( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
648 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
649 break;
650 }
651 }
652 else if ( off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
653 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)
654 && !ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
655 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
656 break; /* Got an consistent offset */
657
658 /* Repeat the initial checks before iterating. */
659 if (VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
660 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet);
661 if (ASMAtomicUoReadBool(&pVM->tm.s.fRunningQueues))
662 {
663 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
664 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
665 }
666 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
667 {
668 LogFlow(("TMTimerPoll: stopped\n"));
669 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
670 }
671 if (cOuterTries <= 0)
672 break; /* that's enough */
673 }
674 if (cOuterTries <= 0)
675 STAM_COUNTER_INC(&pVM->tm.s.StatPollELoop);
676 u64VirtualSyncNow = u64Now - off;
677
678 /* Calc delta and see if we've got a virtual sync hit. */
679 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
680 if (i64Delta2 <= 0)
681 {
682 if ( !pVM->tm.s.fRunningQueues
683 && !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
684 {
685 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
686 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
687#ifdef IN_RING3
688 REMR3NotifyTimerPending(pVM, pVCpuDst);
689#endif
690 }
691 STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtualSync);
692 LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now));
693 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
694 }
695
696 /*
697 * Return the time left to the next event.
698 */
699 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
700 if (pVCpu == pVCpuDst)
701 {
702 if (fCatchUp)
703 i64Delta2 = ASMMultU64ByU32DivByU32(i64Delta2, 100, u32Pct + 100);
704 return tmTimerPollReturnMiss(pVM, u64Now, RT_MIN(i64Delta1, i64Delta2), pu64Delta);
705 }
706 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
707}
708
709
710/**
711 * Set FF if we've passed the next virtual event.
712 *
713 * This function is called before FFs are checked in the inner execution EM loops.
714 *
715 * @returns true if timers are pending, false if not.
716 *
717 * @param pVM Pointer to the shared VM structure.
718 * @param pVCpu Pointer to the shared VMCPU structure of the caller.
719 * @thread The emulation thread.
720 */
721VMMDECL(bool) TMTimerPollBool(PVM pVM, PVMCPU pVCpu)
722{
723 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
724 uint64_t off = 0;
725 tmTimerPollInternal(pVM, pVCpu, &off);
726 return off == 0;
727}
728
729
730/**
731 * Set FF if we've passed the next virtual event.
732 *
733 * This function is called before FFs are checked in the inner execution EM loops.
734 *
735 * @param pVM Pointer to the shared VM structure.
736 * @param pVCpu Pointer to the shared VMCPU structure of the caller.
737 * @thread The emulation thread.
738 */
739VMM_INT_DECL(void) TMTimerPollVoid(PVM pVM, PVMCPU pVCpu)
740{
741 uint64_t off;
742 tmTimerPollInternal(pVM, pVCpu, &off);
743}
744
745
746/**
747 * Set FF if we've passed the next virtual event.
748 *
749 * This function is called before FFs are checked in the inner execution EM loops.
750 *
751 * @returns The GIP timestamp of the next event.
752 * 0 if the next event has already expired.
753 * @param pVM Pointer to the shared VM structure.
754 * @param pVCpu Pointer to the shared VMCPU structure of the caller.
755 * @param pu64Delta Where to store the delta.
756 * @thread The emulation thread.
757 */
758VMM_INT_DECL(uint64_t) TMTimerPollGIP(PVM pVM, PVMCPU pVCpu, uint64_t *pu64Delta)
759{
760 return tmTimerPollInternal(pVM, pVCpu, pu64Delta);
761}
762
763#endif /* VBOX_HIGH_RES_TIMERS_HACK */
764
765/**
766 * Gets the host context ring-3 pointer of the timer.
767 *
768 * @returns HC R3 pointer.
769 * @param pTimer Timer handle as returned by one of the create functions.
770 */
771VMMDECL(PTMTIMERR3) TMTimerR3Ptr(PTMTIMER pTimer)
772{
773 return (PTMTIMERR3)MMHyperCCToR3(pTimer->CTX_SUFF(pVM), pTimer);
774}
775
776
777/**
778 * Gets the host context ring-0 pointer of the timer.
779 *
780 * @returns HC R0 pointer.
781 * @param pTimer Timer handle as returned by one of the create functions.
782 */
783VMMDECL(PTMTIMERR0) TMTimerR0Ptr(PTMTIMER pTimer)
784{
785 return (PTMTIMERR0)MMHyperCCToR0(pTimer->CTX_SUFF(pVM), pTimer);
786}
787
788
789/**
790 * Gets the RC pointer of the timer.
791 *
792 * @returns RC pointer.
793 * @param pTimer Timer handle as returned by one of the create functions.
794 */
795VMMDECL(PTMTIMERRC) TMTimerRCPtr(PTMTIMER pTimer)
796{
797 return (PTMTIMERRC)MMHyperCCToRC(pTimer->CTX_SUFF(pVM), pTimer);
798}
799
800
801/**
802 * Links a timer into the active list of a timer queue.
803 *
804 * The caller must have taken the TM semaphore before calling this function.
805 *
806 * @param pQueue The queue.
807 * @param pTimer The timer.
808 * @param u64Expire The timer expiration time.
809 */
810DECL_FORCE_INLINE(void) tmTimerActiveLink(PTMTIMERQUEUE pQueue, PTMTIMER pTimer, uint64_t u64Expire)
811{
812 PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue);
813 if (pCur)
814 {
815 for (;; pCur = TMTIMER_GET_NEXT(pCur))
816 {
817 if (pCur->u64Expire > u64Expire)
818 {
819 const PTMTIMER pPrev = TMTIMER_GET_PREV(pCur);
820 TMTIMER_SET_NEXT(pTimer, pCur);
821 TMTIMER_SET_PREV(pTimer, pPrev);
822 if (pPrev)
823 TMTIMER_SET_NEXT(pPrev, pTimer);
824 else
825 {
826 TMTIMER_SET_HEAD(pQueue, pTimer);
827 pQueue->u64Expire = u64Expire;
828 }
829 TMTIMER_SET_PREV(pCur, pTimer);
830 return;
831 }
832 if (!pCur->offNext)
833 {
834 TMTIMER_SET_NEXT(pCur, pTimer);
835 TMTIMER_SET_PREV(pTimer, pCur);
836 return;
837 }
838 }
839 }
840 else
841 {
842 TMTIMER_SET_HEAD(pQueue, pTimer);
843 pQueue->u64Expire = u64Expire;
844 }
845}
846
847
848/**
849 * Optimized TMTimerSet code path for starting an inactive timer.
850 *
851 * @returns VBox status code.
852 *
853 * @param pVM The VM handle.
854 * @param pTimer The timer handle.
855 * @param u64Expire The new expire time.
856 */
857static int tmTimerSetOptimizedStart(PVM pVM, PTMTIMER pTimer, uint64_t u64Expire)
858{
859 Assert(!pTimer->offPrev);
860 Assert(!pTimer->offNext);
861 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE);
862
863 /*
864 * Calculate and set the expiration time.
865 */
866 pTimer->u64Expire = u64Expire;
867 Log2(("tmTimerSetOptimizedStart: %p:{.pszDesc='%s', .u64Expire=%'RU64}\n", pTimer, R3STRING(pTimer->pszDesc), u64Expire));
868
869 /*
870 * Link the timer into the active list.
871 */
872 TMCLOCK const enmClock = pTimer->enmClock;
873 tmTimerActiveLink(&pVM->tm.s.CTX_SUFF(paTimerQueues)[enmClock], pTimer, u64Expire);
874
875 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetOpt);
876 tmTimerUnlock(pVM);
877 return VINF_SUCCESS;
878}
879
880
881
882
883
884/**
885 * Arm a timer with a (new) expire time.
886 *
887 * @returns VBox status.
888 * @param pTimer Timer handle as returned by one of the create functions.
889 * @param u64Expire New expire time.
890 */
891VMMDECL(int) TMTimerSet(PTMTIMER pTimer, uint64_t u64Expire)
892{
893 PVM pVM = pTimer->CTX_SUFF(pVM);
894 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
895 TMTIMER_ASSERT_CRITSECT(pTimer);
896
897#ifdef VBOX_WITH_STATISTICS
898 /* Gather optimization info. */
899 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSet);
900 TMTIMERSTATE enmOrgState = pTimer->enmState;
901 switch (enmOrgState)
902 {
903 case TMTIMERSTATE_STOPPED: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStStopped); break;
904 case TMTIMERSTATE_EXPIRED_DELIVER: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStExpDeliver); break;
905 case TMTIMERSTATE_ACTIVE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStActive); break;
906 case TMTIMERSTATE_PENDING_STOP: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendStop); break;
907 case TMTIMERSTATE_PENDING_STOP_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendStopSched); break;
908 case TMTIMERSTATE_PENDING_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendSched); break;
909 case TMTIMERSTATE_PENDING_RESCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendResched); break;
910 default: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStOther); break;
911 }
912#endif
913
914 /*
915 * The most common case is setting the timer again during the callback.
916 * The second most common case is starting a timer at some other time.
917 */
918#if 1
919 TMTIMERSTATE enmState1 = pTimer->enmState;
920 if ( enmState1 == TMTIMERSTATE_EXPIRED_DELIVER
921 || ( enmState1 == TMTIMERSTATE_STOPPED
922 && pTimer->pCritSect))
923 {
924 /* Try take the TM lock and check the state again. */
925 if (RT_SUCCESS_NP(tmTimerTryLock(pVM)))
926 {
927 if (RT_LIKELY(tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, enmState1)))
928 {
929 tmTimerSetOptimizedStart(pVM, pTimer, u64Expire);
930 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
931 return VINF_SUCCESS;
932 }
933 tmTimerUnlock(pVM);
934 }
935 }
936#endif
937
938 /*
939 * Unoptimized code path.
940 */
941 int cRetries = 1000;
942 do
943 {
944 /*
945 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
946 */
947 TMTIMERSTATE enmState = pTimer->enmState;
948 Log2(("TMTimerSet: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d u64Expire=%'RU64\n",
949 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), cRetries, u64Expire));
950 switch (enmState)
951 {
952 case TMTIMERSTATE_EXPIRED_DELIVER:
953 case TMTIMERSTATE_STOPPED:
954 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
955 {
956 Assert(!pTimer->offPrev);
957 Assert(!pTimer->offNext);
958 AssertMsg( pTimer->enmClock != TMCLOCK_VIRTUAL_SYNC
959 || pVM->tm.s.fVirtualSyncTicking
960 || u64Expire >= pVM->tm.s.u64VirtualSync,
961 ("%'RU64 < %'RU64 %s\n", u64Expire, pVM->tm.s.u64VirtualSync, R3STRING(pTimer->pszDesc)));
962 pTimer->u64Expire = u64Expire;
963 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
964 tmSchedule(pTimer);
965 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
966 return VINF_SUCCESS;
967 }
968 break;
969
970 case TMTIMERSTATE_PENDING_SCHEDULE:
971 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
972 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
973 {
974 pTimer->u64Expire = u64Expire;
975 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
976 tmSchedule(pTimer);
977 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
978 return VINF_SUCCESS;
979 }
980 break;
981
982
983 case TMTIMERSTATE_ACTIVE:
984 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
985 {
986 pTimer->u64Expire = u64Expire;
987 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
988 tmSchedule(pTimer);
989 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
990 return VINF_SUCCESS;
991 }
992 break;
993
994 case TMTIMERSTATE_PENDING_RESCHEDULE:
995 case TMTIMERSTATE_PENDING_STOP:
996 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
997 {
998 pTimer->u64Expire = u64Expire;
999 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1000 tmSchedule(pTimer);
1001 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1002 return VINF_SUCCESS;
1003 }
1004 break;
1005
1006
1007 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1008 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1009 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1010#ifdef IN_RING3
1011 if (!RTThreadYield())
1012 RTThreadSleep(1);
1013#else
1014/** @todo call host context and yield after a couple of iterations */
1015#endif
1016 break;
1017
1018 /*
1019 * Invalid states.
1020 */
1021 case TMTIMERSTATE_DESTROY:
1022 case TMTIMERSTATE_FREE:
1023 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1024 return VERR_TM_INVALID_STATE;
1025 default:
1026 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1027 return VERR_TM_UNKNOWN_STATE;
1028 }
1029 } while (cRetries-- > 0);
1030
1031 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1032 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1033 return VERR_INTERNAL_ERROR;
1034}
1035
1036
1037/**
1038 * Return the current time for the specified clock, setting pu64Now if not NULL.
1039 *
1040 * @returns Current time.
1041 * @param pVM The VM handle.
1042 * @param enmClock The clock to query.
1043 * @param pu64Now Optional pointer where to store the return time
1044 */
1045DECL_FORCE_INLINE(uint64_t) tmTimerSetRelativeNowWorker(PVM pVM, TMCLOCK enmClock, uint64_t *pu64Now)
1046{
1047 uint64_t u64Now;
1048 switch (enmClock)
1049 {
1050 case TMCLOCK_VIRTUAL_SYNC:
1051 u64Now = TMVirtualSyncGet(pVM);
1052 break;
1053 case TMCLOCK_VIRTUAL:
1054 u64Now = TMVirtualGet(pVM);
1055 break;
1056 case TMCLOCK_REAL:
1057 u64Now = TMRealGet(pVM);
1058 break;
1059 default:
1060 AssertFatalMsgFailed(("%d\n", enmClock));
1061 }
1062
1063 if (pu64Now)
1064 *pu64Now = u64Now;
1065 return u64Now;
1066}
1067
1068
1069/**
1070 * Optimized TMTimerSetRelative code path.
1071 *
1072 * @returns VBox status code.
1073 *
1074 * @param pVM The VM handle.
1075 * @param pTimer The timer handle.
1076 * @param cTicksToNext Clock ticks until the next time expiration.
1077 * @param pu64Now Where to return the current time stamp used.
1078 * Optional.
1079 */
1080static int tmTimerSetRelativeOptimizedStart(PVM pVM, PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1081{
1082 Assert(!pTimer->offPrev);
1083 Assert(!pTimer->offNext);
1084 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE);
1085
1086 /*
1087 * Calculate and set the expiration time.
1088 */
1089 TMCLOCK const enmClock = pTimer->enmClock;
1090 uint64_t const u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1091 pTimer->u64Expire = u64Expire;
1092 Log2(("tmTimerSetRelativeOptimizedStart: %p:{.pszDesc='%s', .u64Expire=%'RU64} cTicksToNext=%'RU64\n", pTimer, R3STRING(pTimer->pszDesc), u64Expire, cTicksToNext));
1093
1094 /*
1095 * Link the timer into the active list.
1096 */
1097 tmTimerActiveLink(&pVM->tm.s.CTX_SUFF(paTimerQueues)[enmClock], pTimer, u64Expire);
1098
1099 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeOpt);
1100 tmTimerUnlock(pVM);
1101 return VINF_SUCCESS;
1102}
1103
1104
1105/**
1106 * Arm a timer with a expire time relative to the current time.
1107 *
1108 * @returns VBox status.
1109 * @param pTimer Timer handle as returned by one of the create functions.
1110 * @param cTicksToNext Clock ticks until the next time expiration.
1111 * @param pu64Now Where to return the current time stamp used.
1112 * Optional.
1113 */
1114VMMDECL(int) TMTimerSetRelative(PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1115{
1116 STAM_PROFILE_START(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1117 TMTIMER_ASSERT_CRITSECT(pTimer);
1118 PVM pVM = pTimer->CTX_SUFF(pVM);
1119 int rc;
1120
1121#ifdef VBOX_WITH_STATISTICS
1122 /* Gather optimization info. */
1123 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelative);
1124 TMTIMERSTATE enmOrgState = pTimer->enmState;
1125 switch (enmOrgState)
1126 {
1127 case TMTIMERSTATE_STOPPED: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStStopped); break;
1128 case TMTIMERSTATE_EXPIRED_DELIVER: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStExpDeliver); break;
1129 case TMTIMERSTATE_ACTIVE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStActive); break;
1130 case TMTIMERSTATE_PENDING_STOP: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendStop); break;
1131 case TMTIMERSTATE_PENDING_STOP_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendStopSched); break;
1132 case TMTIMERSTATE_PENDING_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendSched); break;
1133 case TMTIMERSTATE_PENDING_RESCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendResched); break;
1134 default: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStOther); break;
1135 }
1136#endif
1137
1138 /*
1139 * Try to take the TM lock and optimize the common cases.
1140 *
1141 * With the TM lock we can safely make optimizations like immediate
1142 * scheduling and we can also be 100% sure that we're not racing the
1143 * running of the timer queues. As an additional restraint we require the
1144 * timer to have a critical section associated with to be 100% there aren't
1145 * concurrent operations on the timer. (This latter isn't necessary any
1146 * longer as this isn't supported for any timers, critsect or not.)
1147 *
1148 * Note! Lock ordering doesn't apply when we only tries to
1149 * get the innermost locks.
1150 */
1151 bool fOwnTMLock = RT_SUCCESS_NP(tmTimerTryLock(pVM));
1152#if 1
1153 if ( fOwnTMLock
1154 && pTimer->pCritSect)
1155 {
1156 TMTIMERSTATE enmState = pTimer->enmState;
1157 if (RT_LIKELY( ( enmState == TMTIMERSTATE_EXPIRED_DELIVER
1158 || enmState == TMTIMERSTATE_STOPPED)
1159 && tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, enmState)))
1160 {
1161 tmTimerSetRelativeOptimizedStart(pVM, pTimer, cTicksToNext, pu64Now);
1162 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1163 return VINF_SUCCESS;
1164 }
1165
1166 /* Optimize other states when it becomes necessary. */
1167 }
1168#endif
1169
1170 /*
1171 * Unoptimized path.
1172 */
1173 TMCLOCK const enmClock = pTimer->enmClock;
1174 bool fOwnVirtSyncLock;
1175 fOwnVirtSyncLock = !fOwnTMLock
1176 && enmClock == TMCLOCK_VIRTUAL_SYNC
1177 && RT_SUCCESS(tmVirtualSyncTryLock(pVM));
1178 for (int cRetries = 1000; ; cRetries--)
1179 {
1180 /*
1181 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1182 */
1183 TMTIMERSTATE enmState = pTimer->enmState;
1184 switch (enmState)
1185 {
1186 case TMTIMERSTATE_STOPPED:
1187 if (enmClock == TMCLOCK_VIRTUAL_SYNC)
1188 {
1189 /** @todo To fix assertion in tmR3TimerQueueRunVirtualSync:
1190 * Figure a safe way of activating this timer while the queue is
1191 * being run.
1192 * (99.9% sure this that the assertion is caused by DevAPIC.cpp
1193 * re-starting the timer in respons to a initial_count write.) */
1194 }
1195 /* fall thru */
1196 case TMTIMERSTATE_EXPIRED_DELIVER:
1197 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1198 {
1199 Assert(!pTimer->offPrev);
1200 Assert(!pTimer->offNext);
1201 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1202 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [EXP/STOP]\n",
1203 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1204 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1205 tmSchedule(pTimer);
1206 rc = VINF_SUCCESS;
1207 break;
1208 }
1209 rc = VERR_TRY_AGAIN;
1210 break;
1211
1212 case TMTIMERSTATE_PENDING_SCHEDULE:
1213 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1214 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1215 {
1216 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1217 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [PEND_SCHED]\n",
1218 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1219 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1220 tmSchedule(pTimer);
1221 rc = VINF_SUCCESS;
1222 break;
1223 }
1224 rc = VERR_TRY_AGAIN;
1225 break;
1226
1227
1228 case TMTIMERSTATE_ACTIVE:
1229 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1230 {
1231 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1232 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [ACTIVE]\n",
1233 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1234 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1235 tmSchedule(pTimer);
1236 rc = VINF_SUCCESS;
1237 break;
1238 }
1239 rc = VERR_TRY_AGAIN;
1240 break;
1241
1242 case TMTIMERSTATE_PENDING_RESCHEDULE:
1243 case TMTIMERSTATE_PENDING_STOP:
1244 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1245 {
1246 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1247 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [PEND_RESCH/STOP]\n",
1248 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1249 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1250 tmSchedule(pTimer);
1251 rc = VINF_SUCCESS;
1252 break;
1253 }
1254 rc = VERR_TRY_AGAIN;
1255 break;
1256
1257
1258 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1259 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1260 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1261#ifdef IN_RING3
1262 if (!RTThreadYield())
1263 RTThreadSleep(1);
1264#else
1265/** @todo call host context and yield after a couple of iterations */
1266#endif
1267 rc = VERR_TRY_AGAIN;
1268 break;
1269
1270 /*
1271 * Invalid states.
1272 */
1273 case TMTIMERSTATE_DESTROY:
1274 case TMTIMERSTATE_FREE:
1275 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1276 rc = VERR_TM_INVALID_STATE;
1277 break;
1278
1279 default:
1280 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1281 rc = VERR_TM_UNKNOWN_STATE;
1282 break;
1283 }
1284
1285 /* switch + loop is tedious to break out of. */
1286 if (rc == VINF_SUCCESS)
1287 break;
1288
1289 if (rc != VERR_TRY_AGAIN)
1290 {
1291 tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1292 break;
1293 }
1294 if (cRetries <= 0)
1295 {
1296 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1297 rc = VERR_INTERNAL_ERROR;
1298 tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1299 break;
1300 }
1301
1302 /*
1303 * Retry to gain locks.
1304 */
1305 if (!fOwnTMLock)
1306 {
1307 fOwnTMLock = RT_SUCCESS_NP(tmTimerTryLock(pVM));
1308 if ( !fOwnTMLock
1309 && enmClock == TMCLOCK_VIRTUAL_SYNC
1310 && !fOwnVirtSyncLock)
1311 fOwnVirtSyncLock = RT_SUCCESS_NP(tmVirtualSyncTryLock(pVM));
1312 }
1313
1314 } /* for (;;) */
1315
1316 /*
1317 * Clean up and return.
1318 */
1319 if (fOwnVirtSyncLock)
1320 tmVirtualSyncUnlock(pVM);
1321 if (fOwnTMLock)
1322 tmTimerUnlock(pVM);
1323
1324 if ( !fOwnTMLock
1325 && !fOwnVirtSyncLock
1326 && enmClock == TMCLOCK_VIRTUAL_SYNC)
1327 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeRacyVirtSync);
1328
1329 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1330 return rc;
1331}
1332
1333
1334/**
1335 * Arm a timer with a (new) expire time relative to current time.
1336 *
1337 * @returns VBox status.
1338 * @param pTimer Timer handle as returned by one of the create functions.
1339 * @param cMilliesToNext Number of millieseconds to the next tick.
1340 */
1341VMMDECL(int) TMTimerSetMillies(PTMTIMER pTimer, uint32_t cMilliesToNext)
1342{
1343 PVM pVM = pTimer->CTX_SUFF(pVM);
1344 PVMCPU pVCpu = &pVM->aCpus[0]; /* just take the first VCPU */
1345
1346 switch (pTimer->enmClock)
1347 {
1348 case TMCLOCK_VIRTUAL:
1349 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1350 return TMTimerSetRelative(pTimer, cMilliesToNext * UINT64_C(1000000), NULL);
1351
1352 case TMCLOCK_VIRTUAL_SYNC:
1353 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1354 return TMTimerSetRelative(pTimer, cMilliesToNext * UINT64_C(1000000), NULL);
1355
1356 case TMCLOCK_REAL:
1357 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1358 return TMTimerSetRelative(pTimer, cMilliesToNext, NULL);
1359
1360 default:
1361 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1362 return VERR_INTERNAL_ERROR;
1363 }
1364}
1365
1366
1367/**
1368 * Arm a timer with a (new) expire time relative to current time.
1369 *
1370 * @returns VBox status.
1371 * @param pTimer Timer handle as returned by one of the create functions.
1372 * @param cMicrosToNext Number of microseconds to the next tick.
1373 */
1374VMMDECL(int) TMTimerSetMicro(PTMTIMER pTimer, uint64_t cMicrosToNext)
1375{
1376 PVM pVM = pTimer->CTX_SUFF(pVM);
1377 PVMCPU pVCpu = &pVM->aCpus[0]; /* just take the first VCPU */
1378
1379 switch (pTimer->enmClock)
1380 {
1381 case TMCLOCK_VIRTUAL:
1382 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1383 return TMTimerSetRelative(pTimer, cMicrosToNext * 1000, NULL);
1384
1385 case TMCLOCK_VIRTUAL_SYNC:
1386 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1387 return TMTimerSetRelative(pTimer, cMicrosToNext * 1000, NULL);
1388
1389 case TMCLOCK_REAL:
1390 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1391 return TMTimerSetRelative(pTimer, cMicrosToNext / 1000, NULL);
1392
1393 default:
1394 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1395 return VERR_INTERNAL_ERROR;
1396 }
1397}
1398
1399
1400/**
1401 * Arm a timer with a (new) expire time relative to current time.
1402 *
1403 * @returns VBox status.
1404 * @param pTimer Timer handle as returned by one of the create functions.
1405 * @param cNanosToNext Number of nanoseconds to the next tick.
1406 */
1407VMMDECL(int) TMTimerSetNano(PTMTIMER pTimer, uint64_t cNanosToNext)
1408{
1409 PVM pVM = pTimer->CTX_SUFF(pVM);
1410 PVMCPU pVCpu = &pVM->aCpus[0]; /* just take the first VCPU */
1411
1412 switch (pTimer->enmClock)
1413 {
1414 case TMCLOCK_VIRTUAL:
1415 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1416 return TMTimerSetRelative(pTimer, cNanosToNext, NULL);
1417
1418 case TMCLOCK_VIRTUAL_SYNC:
1419 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1420 return TMTimerSetRelative(pTimer, cNanosToNext, NULL);
1421
1422 case TMCLOCK_REAL:
1423 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1424 return TMTimerSetRelative(pTimer, cNanosToNext / 1000000, NULL);
1425
1426 default:
1427 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1428 return VERR_INTERNAL_ERROR;
1429 }
1430}
1431
1432
1433/**
1434 * Drops a hint about the frequency of the timer.
1435 *
1436 * This is used by TM and the VMM to calculate how often guest execution needs
1437 * to be interrupted. The hint is automatically cleared by TMTimerStop.
1438 *
1439 * @returns VBox status code.
1440 * @param pTimer Timer handle as returned by one of the create
1441 * functions.
1442 * @param uHzHint The frequency hint. Pass 0 to clear the hint.
1443 *
1444 * @remarks We're using an integer hertz value here since anything above 1 HZ
1445 * is not going to be any trouble satisfying scheduling wise. The
1446 * range where it makes sense is >= 100 HZ.
1447 */
1448VMMDECL(int) TMTimerSetFrequencyHint(PTMTIMER pTimer, uint32_t uHzHint)
1449{
1450 TMTIMER_ASSERT_CRITSECT(pTimer);
1451
1452 uint32_t const uHzOldHint = pTimer->uHzHint;
1453 pTimer->uHzHint = uHzHint;
1454
1455 PVM pVM = pTimer->CTX_SUFF(pVM);
1456 uint32_t const uMaxHzHint = pVM->tm.s.uMaxHzHint;
1457 if ( uHzHint > uMaxHzHint
1458 || uHzOldHint >= uMaxHzHint)
1459 ASMAtomicWriteBool(&pVM->tm.s.fHzHintNeedsUpdating, true);
1460
1461 return VINF_SUCCESS;
1462}
1463
1464
1465/**
1466 * Stop the timer.
1467 * Use TMR3TimerArm() to "un-stop" the timer.
1468 *
1469 * @returns VBox status.
1470 * @param pTimer Timer handle as returned by one of the create functions.
1471 */
1472VMMDECL(int) TMTimerStop(PTMTIMER pTimer)
1473{
1474 STAM_PROFILE_START(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1475 TMTIMER_ASSERT_CRITSECT(pTimer);
1476
1477 /* Reset the HZ hint. */
1478 if (pTimer->uHzHint)
1479 {
1480 PVM pVM = pTimer->CTX_SUFF(pVM);
1481 if (pTimer->uHzHint >= pVM->tm.s.uMaxHzHint)
1482 ASMAtomicWriteBool(&pVM->tm.s.fHzHintNeedsUpdating, true);
1483 pTimer->uHzHint = 0;
1484 }
1485
1486 /** @todo see if this function needs optimizing. */
1487 int cRetries = 1000;
1488 do
1489 {
1490 /*
1491 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1492 */
1493 TMTIMERSTATE enmState = pTimer->enmState;
1494 Log2(("TMTimerStop: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d\n",
1495 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), cRetries));
1496 switch (enmState)
1497 {
1498 case TMTIMERSTATE_EXPIRED_DELIVER:
1499 //AssertMsgFailed(("You don't stop an expired timer dude!\n"));
1500 return VERR_INVALID_PARAMETER;
1501
1502 case TMTIMERSTATE_STOPPED:
1503 case TMTIMERSTATE_PENDING_STOP:
1504 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1505 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1506 return VINF_SUCCESS;
1507
1508 case TMTIMERSTATE_PENDING_SCHEDULE:
1509 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, enmState))
1510 {
1511 tmSchedule(pTimer);
1512 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1513 return VINF_SUCCESS;
1514 }
1515
1516 case TMTIMERSTATE_PENDING_RESCHEDULE:
1517 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
1518 {
1519 tmSchedule(pTimer);
1520 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1521 return VINF_SUCCESS;
1522 }
1523 break;
1524
1525 case TMTIMERSTATE_ACTIVE:
1526 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
1527 {
1528 tmSchedule(pTimer);
1529 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1530 return VINF_SUCCESS;
1531 }
1532 break;
1533
1534 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1535 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1536 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1537#ifdef IN_RING3
1538 if (!RTThreadYield())
1539 RTThreadSleep(1);
1540#else
1541/**@todo call host and yield cpu after a while. */
1542#endif
1543 break;
1544
1545 /*
1546 * Invalid states.
1547 */
1548 case TMTIMERSTATE_DESTROY:
1549 case TMTIMERSTATE_FREE:
1550 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1551 return VERR_TM_INVALID_STATE;
1552 default:
1553 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1554 return VERR_TM_UNKNOWN_STATE;
1555 }
1556 } while (cRetries-- > 0);
1557
1558 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1559 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1560 return VERR_INTERNAL_ERROR;
1561}
1562
1563
1564/**
1565 * Get the current clock time.
1566 * Handy for calculating the new expire time.
1567 *
1568 * @returns Current clock time.
1569 * @param pTimer Timer handle as returned by one of the create functions.
1570 */
1571VMMDECL(uint64_t) TMTimerGet(PTMTIMER pTimer)
1572{
1573 uint64_t u64;
1574 PVM pVM = pTimer->CTX_SUFF(pVM);
1575
1576 switch (pTimer->enmClock)
1577 {
1578 case TMCLOCK_VIRTUAL:
1579 u64 = TMVirtualGet(pVM);
1580 break;
1581 case TMCLOCK_VIRTUAL_SYNC:
1582 u64 = TMVirtualSyncGet(pVM);
1583 break;
1584 case TMCLOCK_REAL:
1585 u64 = TMRealGet(pVM);
1586 break;
1587 default:
1588 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1589 return ~(uint64_t)0;
1590 }
1591 //Log2(("TMTimerGet: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1592 // u64, pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1593 return u64;
1594}
1595
1596
1597/**
1598 * Get the freqency of the timer clock.
1599 *
1600 * @returns Clock frequency (as Hz of course).
1601 * @param pTimer Timer handle as returned by one of the create functions.
1602 */
1603VMMDECL(uint64_t) TMTimerGetFreq(PTMTIMER pTimer)
1604{
1605 switch (pTimer->enmClock)
1606 {
1607 case TMCLOCK_VIRTUAL:
1608 case TMCLOCK_VIRTUAL_SYNC:
1609 return TMCLOCK_FREQ_VIRTUAL;
1610
1611 case TMCLOCK_REAL:
1612 return TMCLOCK_FREQ_REAL;
1613
1614 default:
1615 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1616 return 0;
1617 }
1618}
1619
1620
1621/**
1622 * Get the current clock time as nanoseconds.
1623 *
1624 * @returns The timer clock as nanoseconds.
1625 * @param pTimer Timer handle as returned by one of the create functions.
1626 */
1627VMMDECL(uint64_t) TMTimerGetNano(PTMTIMER pTimer)
1628{
1629 return TMTimerToNano(pTimer, TMTimerGet(pTimer));
1630}
1631
1632
1633/**
1634 * Get the current clock time as microseconds.
1635 *
1636 * @returns The timer clock as microseconds.
1637 * @param pTimer Timer handle as returned by one of the create functions.
1638 */
1639VMMDECL(uint64_t) TMTimerGetMicro(PTMTIMER pTimer)
1640{
1641 return TMTimerToMicro(pTimer, TMTimerGet(pTimer));
1642}
1643
1644
1645/**
1646 * Get the current clock time as milliseconds.
1647 *
1648 * @returns The timer clock as milliseconds.
1649 * @param pTimer Timer handle as returned by one of the create functions.
1650 */
1651VMMDECL(uint64_t) TMTimerGetMilli(PTMTIMER pTimer)
1652{
1653 return TMTimerToMilli(pTimer, TMTimerGet(pTimer));
1654}
1655
1656
1657/**
1658 * Converts the specified timer clock time to nanoseconds.
1659 *
1660 * @returns nanoseconds.
1661 * @param pTimer Timer handle as returned by one of the create functions.
1662 * @param u64Ticks The clock ticks.
1663 * @remark There could be rounding errors here. We just do a simple integere divide
1664 * without any adjustments.
1665 */
1666VMMDECL(uint64_t) TMTimerToNano(PTMTIMER pTimer, uint64_t u64Ticks)
1667{
1668 switch (pTimer->enmClock)
1669 {
1670 case TMCLOCK_VIRTUAL:
1671 case TMCLOCK_VIRTUAL_SYNC:
1672 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1673 return u64Ticks;
1674
1675 case TMCLOCK_REAL:
1676 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1677 return u64Ticks * 1000000;
1678
1679 default:
1680 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1681 return 0;
1682 }
1683}
1684
1685
1686/**
1687 * Converts the specified timer clock time to microseconds.
1688 *
1689 * @returns microseconds.
1690 * @param pTimer Timer handle as returned by one of the create functions.
1691 * @param u64Ticks The clock ticks.
1692 * @remark There could be rounding errors here. We just do a simple integere divide
1693 * without any adjustments.
1694 */
1695VMMDECL(uint64_t) TMTimerToMicro(PTMTIMER pTimer, uint64_t u64Ticks)
1696{
1697 switch (pTimer->enmClock)
1698 {
1699 case TMCLOCK_VIRTUAL:
1700 case TMCLOCK_VIRTUAL_SYNC:
1701 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1702 return u64Ticks / 1000;
1703
1704 case TMCLOCK_REAL:
1705 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1706 return u64Ticks * 1000;
1707
1708 default:
1709 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1710 return 0;
1711 }
1712}
1713
1714
1715/**
1716 * Converts the specified timer clock time to milliseconds.
1717 *
1718 * @returns milliseconds.
1719 * @param pTimer Timer handle as returned by one of the create functions.
1720 * @param u64Ticks The clock ticks.
1721 * @remark There could be rounding errors here. We just do a simple integere divide
1722 * without any adjustments.
1723 */
1724VMMDECL(uint64_t) TMTimerToMilli(PTMTIMER pTimer, uint64_t u64Ticks)
1725{
1726 switch (pTimer->enmClock)
1727 {
1728 case TMCLOCK_VIRTUAL:
1729 case TMCLOCK_VIRTUAL_SYNC:
1730 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1731 return u64Ticks / 1000000;
1732
1733 case TMCLOCK_REAL:
1734 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1735 return u64Ticks;
1736
1737 default:
1738 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1739 return 0;
1740 }
1741}
1742
1743
1744/**
1745 * Converts the specified nanosecond timestamp to timer clock ticks.
1746 *
1747 * @returns timer clock ticks.
1748 * @param pTimer Timer handle as returned by one of the create functions.
1749 * @param u64NanoTS The nanosecond value ticks to convert.
1750 * @remark There could be rounding and overflow errors here.
1751 */
1752VMMDECL(uint64_t) TMTimerFromNano(PTMTIMER pTimer, uint64_t u64NanoTS)
1753{
1754 switch (pTimer->enmClock)
1755 {
1756 case TMCLOCK_VIRTUAL:
1757 case TMCLOCK_VIRTUAL_SYNC:
1758 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1759 return u64NanoTS;
1760
1761 case TMCLOCK_REAL:
1762 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1763 return u64NanoTS / 1000000;
1764
1765 default:
1766 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1767 return 0;
1768 }
1769}
1770
1771
1772/**
1773 * Converts the specified microsecond timestamp to timer clock ticks.
1774 *
1775 * @returns timer clock ticks.
1776 * @param pTimer Timer handle as returned by one of the create functions.
1777 * @param u64MicroTS The microsecond value ticks to convert.
1778 * @remark There could be rounding and overflow errors here.
1779 */
1780VMMDECL(uint64_t) TMTimerFromMicro(PTMTIMER pTimer, uint64_t u64MicroTS)
1781{
1782 switch (pTimer->enmClock)
1783 {
1784 case TMCLOCK_VIRTUAL:
1785 case TMCLOCK_VIRTUAL_SYNC:
1786 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1787 return u64MicroTS * 1000;
1788
1789 case TMCLOCK_REAL:
1790 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1791 return u64MicroTS / 1000;
1792
1793 default:
1794 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1795 return 0;
1796 }
1797}
1798
1799
1800/**
1801 * Converts the specified millisecond timestamp to timer clock ticks.
1802 *
1803 * @returns timer clock ticks.
1804 * @param pTimer Timer handle as returned by one of the create functions.
1805 * @param u64MilliTS The millisecond value ticks to convert.
1806 * @remark There could be rounding and overflow errors here.
1807 */
1808VMMDECL(uint64_t) TMTimerFromMilli(PTMTIMER pTimer, uint64_t u64MilliTS)
1809{
1810 switch (pTimer->enmClock)
1811 {
1812 case TMCLOCK_VIRTUAL:
1813 case TMCLOCK_VIRTUAL_SYNC:
1814 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1815 return u64MilliTS * 1000000;
1816
1817 case TMCLOCK_REAL:
1818 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1819 return u64MilliTS;
1820
1821 default:
1822 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1823 return 0;
1824 }
1825}
1826
1827
1828/**
1829 * Get the expire time of the timer.
1830 * Only valid for active timers.
1831 *
1832 * @returns Expire time of the timer.
1833 * @param pTimer Timer handle as returned by one of the create functions.
1834 */
1835VMMDECL(uint64_t) TMTimerGetExpire(PTMTIMER pTimer)
1836{
1837 TMTIMER_ASSERT_CRITSECT(pTimer);
1838 int cRetries = 1000;
1839 do
1840 {
1841 TMTIMERSTATE enmState = pTimer->enmState;
1842 switch (enmState)
1843 {
1844 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1845 case TMTIMERSTATE_EXPIRED_DELIVER:
1846 case TMTIMERSTATE_STOPPED:
1847 case TMTIMERSTATE_PENDING_STOP:
1848 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1849 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1850 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1851 return ~(uint64_t)0;
1852
1853 case TMTIMERSTATE_ACTIVE:
1854 case TMTIMERSTATE_PENDING_RESCHEDULE:
1855 case TMTIMERSTATE_PENDING_SCHEDULE:
1856 Log2(("TMTimerGetExpire: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1857 pTimer->u64Expire, pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1858 return pTimer->u64Expire;
1859
1860 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1861 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1862#ifdef IN_RING3
1863 if (!RTThreadYield())
1864 RTThreadSleep(1);
1865#endif
1866 break;
1867
1868 /*
1869 * Invalid states.
1870 */
1871 case TMTIMERSTATE_DESTROY:
1872 case TMTIMERSTATE_FREE:
1873 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1874 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1875 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1876 return ~(uint64_t)0;
1877 default:
1878 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1879 return ~(uint64_t)0;
1880 }
1881 } while (cRetries-- > 0);
1882
1883 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1884 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1885 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1886 return ~(uint64_t)0;
1887}
1888
1889
1890/**
1891 * Checks if a timer is active or not.
1892 *
1893 * @returns True if active.
1894 * @returns False if not active.
1895 * @param pTimer Timer handle as returned by one of the create functions.
1896 */
1897VMMDECL(bool) TMTimerIsActive(PTMTIMER pTimer)
1898{
1899 TMTIMERSTATE enmState = pTimer->enmState;
1900 switch (enmState)
1901 {
1902 case TMTIMERSTATE_STOPPED:
1903 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1904 case TMTIMERSTATE_EXPIRED_DELIVER:
1905 case TMTIMERSTATE_PENDING_STOP:
1906 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1907 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1908 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1909 return false;
1910
1911 case TMTIMERSTATE_ACTIVE:
1912 case TMTIMERSTATE_PENDING_RESCHEDULE:
1913 case TMTIMERSTATE_PENDING_SCHEDULE:
1914 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1915 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1916 Log2(("TMTimerIsActive: returns true (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1917 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1918 return true;
1919
1920 /*
1921 * Invalid states.
1922 */
1923 case TMTIMERSTATE_DESTROY:
1924 case TMTIMERSTATE_FREE:
1925 AssertMsgFailed(("Invalid timer state %s (%s)\n", tmTimerState(enmState), R3STRING(pTimer->pszDesc)));
1926 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1927 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1928 return false;
1929 default:
1930 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1931 return false;
1932 }
1933}
1934
1935
1936/**
1937 * Convert state to string.
1938 *
1939 * @returns Readonly status name.
1940 * @param enmState State.
1941 */
1942const char *tmTimerState(TMTIMERSTATE enmState)
1943{
1944 switch (enmState)
1945 {
1946#define CASE(num, state) \
1947 case TMTIMERSTATE_##state: \
1948 AssertCompile(TMTIMERSTATE_##state == (num)); \
1949 return #num "-" #state
1950 CASE( 1,STOPPED);
1951 CASE( 2,ACTIVE);
1952 CASE( 3,EXPIRED_GET_UNLINK);
1953 CASE( 4,EXPIRED_DELIVER);
1954 CASE( 5,PENDING_STOP);
1955 CASE( 6,PENDING_STOP_SCHEDULE);
1956 CASE( 7,PENDING_SCHEDULE_SET_EXPIRE);
1957 CASE( 8,PENDING_SCHEDULE);
1958 CASE( 9,PENDING_RESCHEDULE_SET_EXPIRE);
1959 CASE(10,PENDING_RESCHEDULE);
1960 CASE(11,DESTROY);
1961 CASE(12,FREE);
1962 default:
1963 AssertMsgFailed(("Invalid state enmState=%d\n", enmState));
1964 return "Invalid state!";
1965#undef CASE
1966 }
1967}
1968
1969
1970/**
1971 * Schedules the given timer on the given queue.
1972 *
1973 * @param pQueue The timer queue.
1974 * @param pTimer The timer that needs scheduling.
1975 *
1976 * @remarks Called while owning the lock.
1977 */
1978DECLINLINE(void) tmTimerQueueScheduleOne(PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
1979{
1980 /*
1981 * Processing.
1982 */
1983 unsigned cRetries = 2;
1984 do
1985 {
1986 TMTIMERSTATE enmState = pTimer->enmState;
1987 switch (enmState)
1988 {
1989 /*
1990 * Reschedule timer (in the active list).
1991 */
1992 case TMTIMERSTATE_PENDING_RESCHEDULE:
1993 {
1994 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE, TMTIMERSTATE_PENDING_RESCHEDULE)))
1995 break; /* retry */
1996
1997 const PTMTIMER pPrev = TMTIMER_GET_PREV(pTimer);
1998 const PTMTIMER pNext = TMTIMER_GET_NEXT(pTimer);
1999 if (pPrev)
2000 TMTIMER_SET_NEXT(pPrev, pNext);
2001 else
2002 {
2003 TMTIMER_SET_HEAD(pQueue, pNext);
2004 pQueue->u64Expire = pNext ? pNext->u64Expire : INT64_MAX;
2005 }
2006 if (pNext)
2007 TMTIMER_SET_PREV(pNext, pPrev);
2008 pTimer->offNext = 0;
2009 pTimer->offPrev = 0;
2010 /* fall thru */
2011 }
2012
2013 /*
2014 * Schedule timer (insert into the active list).
2015 */
2016 case TMTIMERSTATE_PENDING_SCHEDULE:
2017 {
2018 Assert(!pTimer->offNext); Assert(!pTimer->offPrev);
2019 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, TMTIMERSTATE_PENDING_SCHEDULE)))
2020 break; /* retry */
2021
2022 PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue);
2023 if (pCur)
2024 {
2025 const uint64_t u64Expire = pTimer->u64Expire;
2026 for (;; pCur = TMTIMER_GET_NEXT(pCur))
2027 {
2028 if (pCur->u64Expire > u64Expire)
2029 {
2030 const PTMTIMER pPrev = TMTIMER_GET_PREV(pCur);
2031 TMTIMER_SET_NEXT(pTimer, pCur);
2032 TMTIMER_SET_PREV(pTimer, pPrev);
2033 if (pPrev)
2034 TMTIMER_SET_NEXT(pPrev, pTimer);
2035 else
2036 {
2037 TMTIMER_SET_HEAD(pQueue, pTimer);
2038 pQueue->u64Expire = u64Expire;
2039 }
2040 TMTIMER_SET_PREV(pCur, pTimer);
2041 return;
2042 }
2043 if (!pCur->offNext)
2044 {
2045 TMTIMER_SET_NEXT(pCur, pTimer);
2046 TMTIMER_SET_PREV(pTimer, pCur);
2047 return;
2048 }
2049 }
2050 }
2051 else
2052 {
2053 TMTIMER_SET_HEAD(pQueue, pTimer);
2054 pQueue->u64Expire = pTimer->u64Expire;
2055 }
2056 return;
2057 }
2058
2059 /*
2060 * Stop the timer in active list.
2061 */
2062 case TMTIMERSTATE_PENDING_STOP:
2063 {
2064 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, TMTIMERSTATE_PENDING_STOP)))
2065 break; /* retry */
2066
2067 const PTMTIMER pPrev = TMTIMER_GET_PREV(pTimer);
2068 const PTMTIMER pNext = TMTIMER_GET_NEXT(pTimer);
2069 if (pPrev)
2070 TMTIMER_SET_NEXT(pPrev, pNext);
2071 else
2072 {
2073 TMTIMER_SET_HEAD(pQueue, pNext);
2074 pQueue->u64Expire = pNext ? pNext->u64Expire : INT64_MAX;
2075 }
2076 if (pNext)
2077 TMTIMER_SET_PREV(pNext, pPrev);
2078 pTimer->offNext = 0;
2079 pTimer->offPrev = 0;
2080 /* fall thru */
2081 }
2082
2083 /*
2084 * Stop the timer (not on the active list).
2085 */
2086 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2087 Assert(!pTimer->offNext); Assert(!pTimer->offPrev);
2088 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_STOPPED, TMTIMERSTATE_PENDING_STOP_SCHEDULE)))
2089 break;
2090 return;
2091
2092 /*
2093 * The timer is pending destruction by TMR3TimerDestroy, our caller.
2094 * Nothing to do here.
2095 */
2096 case TMTIMERSTATE_DESTROY:
2097 break;
2098
2099 /*
2100 * Postpone these until they get into the right state.
2101 */
2102 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2103 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2104 tmTimerLink(pQueue, pTimer);
2105 STAM_COUNTER_INC(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatPostponed));
2106 return;
2107
2108 /*
2109 * None of these can be in the schedule.
2110 */
2111 case TMTIMERSTATE_FREE:
2112 case TMTIMERSTATE_STOPPED:
2113 case TMTIMERSTATE_ACTIVE:
2114 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2115 case TMTIMERSTATE_EXPIRED_DELIVER:
2116 default:
2117 AssertMsgFailed(("Timer (%p) in the scheduling list has an invalid state %s (%d)!",
2118 pTimer, tmTimerState(pTimer->enmState), pTimer->enmState));
2119 return;
2120 }
2121 } while (cRetries-- > 0);
2122}
2123
2124
2125/**
2126 * Schedules the specified timer queue.
2127 *
2128 * @param pVM The VM to run the timers for.
2129 * @param pQueue The queue to schedule.
2130 *
2131 * @remarks Called while owning the lock.
2132 */
2133void tmTimerQueueSchedule(PVM pVM, PTMTIMERQUEUE pQueue)
2134{
2135 TM_ASSERT_LOCK(pVM);
2136
2137 /*
2138 * Dequeue the scheduling list and iterate it.
2139 */
2140 int32_t offNext = ASMAtomicXchgS32(&pQueue->offSchedule, 0);
2141 Log2(("tmTimerQueueSchedule: pQueue=%p:{.enmClock=%d, offNext=%RI32, .u64Expired=%'RU64}\n", pQueue, pQueue->enmClock, offNext, pQueue->u64Expire));
2142 if (!offNext)
2143 return;
2144 PTMTIMER pNext = (PTMTIMER)((intptr_t)pQueue + offNext);
2145 while (pNext)
2146 {
2147 /*
2148 * Unlink the head timer and find the next one.
2149 */
2150 PTMTIMER pTimer = pNext;
2151 pNext = pNext->offScheduleNext ? (PTMTIMER)((intptr_t)pNext + pNext->offScheduleNext) : NULL;
2152 pTimer->offScheduleNext = 0;
2153
2154 /*
2155 * Do the scheduling.
2156 */
2157 Log2(("tmTimerQueueSchedule: %p:{.enmState=%s, .enmClock=%d, .enmType=%d, .pszDesc=%s}\n",
2158 pTimer, tmTimerState(pTimer->enmState), pTimer->enmClock, pTimer->enmType, R3STRING(pTimer->pszDesc)));
2159 tmTimerQueueScheduleOne(pQueue, pTimer);
2160 Log2(("tmTimerQueueSchedule: %p: new %s\n", pTimer, tmTimerState(pTimer->enmState)));
2161 } /* foreach timer in current schedule batch. */
2162 Log2(("tmTimerQueueSchedule: u64Expired=%'RU64\n", pQueue->u64Expire));
2163}
2164
2165
2166#ifdef VBOX_STRICT
2167/**
2168 * Checks that the timer queues are sane.
2169 *
2170 * @param pVM VM handle.
2171 *
2172 * @remarks Called while owning the lock.
2173 */
2174void tmTimerQueuesSanityChecks(PVM pVM, const char *pszWhere)
2175{
2176 TM_ASSERT_LOCK(pVM);
2177
2178 /*
2179 * Check the linking of the active lists.
2180 */
2181 for (int i = 0; i < TMCLOCK_MAX; i++)
2182 {
2183 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[i];
2184 Assert((int)pQueue->enmClock == i);
2185 PTMTIMER pPrev = NULL;
2186 for (PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue); pCur; pPrev = pCur, pCur = TMTIMER_GET_NEXT(pCur))
2187 {
2188 AssertMsg((int)pCur->enmClock == i, ("%s: %d != %d\n", pszWhere, pCur->enmClock, i));
2189 AssertMsg(TMTIMER_GET_PREV(pCur) == pPrev, ("%s: %p != %p\n", pszWhere, TMTIMER_GET_PREV(pCur), pPrev));
2190 TMTIMERSTATE enmState = pCur->enmState;
2191 switch (enmState)
2192 {
2193 case TMTIMERSTATE_ACTIVE:
2194 AssertMsg( !pCur->offScheduleNext
2195 || pCur->enmState != TMTIMERSTATE_ACTIVE,
2196 ("%s: %RI32\n", pszWhere, pCur->offScheduleNext));
2197 break;
2198 case TMTIMERSTATE_PENDING_STOP:
2199 case TMTIMERSTATE_PENDING_RESCHEDULE:
2200 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2201 break;
2202 default:
2203 AssertMsgFailed(("%s: Invalid state enmState=%d %s\n", pszWhere, enmState, tmTimerState(enmState)));
2204 break;
2205 }
2206 }
2207 }
2208
2209
2210# ifdef IN_RING3
2211 /*
2212 * Do the big list and check that active timers all are in the active lists.
2213 */
2214 PTMTIMERR3 pPrev = NULL;
2215 for (PTMTIMERR3 pCur = pVM->tm.s.pCreated; pCur; pPrev = pCur, pCur = pCur->pBigNext)
2216 {
2217 Assert(pCur->pBigPrev == pPrev);
2218 Assert((unsigned)pCur->enmClock < (unsigned)TMCLOCK_MAX);
2219
2220 TMTIMERSTATE enmState = pCur->enmState;
2221 switch (enmState)
2222 {
2223 case TMTIMERSTATE_ACTIVE:
2224 case TMTIMERSTATE_PENDING_STOP:
2225 case TMTIMERSTATE_PENDING_RESCHEDULE:
2226 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2227 {
2228 PTMTIMERR3 pCurAct = TMTIMER_GET_HEAD(&pVM->tm.s.CTX_SUFF(paTimerQueues)[pCur->enmClock]);
2229 Assert(pCur->offPrev || pCur == pCurAct);
2230 while (pCurAct && pCurAct != pCur)
2231 pCurAct = TMTIMER_GET_NEXT(pCurAct);
2232 Assert(pCurAct == pCur);
2233 break;
2234 }
2235
2236 case TMTIMERSTATE_PENDING_SCHEDULE:
2237 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2238 case TMTIMERSTATE_STOPPED:
2239 case TMTIMERSTATE_EXPIRED_DELIVER:
2240 {
2241 Assert(!pCur->offNext);
2242 Assert(!pCur->offPrev);
2243 for (PTMTIMERR3 pCurAct = TMTIMER_GET_HEAD(&pVM->tm.s.CTX_SUFF(paTimerQueues)[pCur->enmClock]);
2244 pCurAct;
2245 pCurAct = TMTIMER_GET_NEXT(pCurAct))
2246 {
2247 Assert(pCurAct != pCur);
2248 Assert(TMTIMER_GET_NEXT(pCurAct) != pCur);
2249 Assert(TMTIMER_GET_PREV(pCurAct) != pCur);
2250 }
2251 break;
2252 }
2253
2254 /* ignore */
2255 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2256 break;
2257
2258 /* shouldn't get here! */
2259 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2260 case TMTIMERSTATE_DESTROY:
2261 default:
2262 AssertMsgFailed(("Invalid state enmState=%d %s\n", enmState, tmTimerState(enmState)));
2263 break;
2264 }
2265 }
2266# endif /* IN_RING3 */
2267}
2268#endif /* !VBOX_STRICT */
2269
2270
2271/**
2272 * Gets the current warp drive percent.
2273 *
2274 * @returns The warp drive percent.
2275 * @param pVM The VM handle.
2276 */
2277VMMDECL(uint32_t) TMGetWarpDrive(PVM pVM)
2278{
2279 return pVM->tm.s.u32VirtualWarpDrivePercentage;
2280}
2281
2282
2283/**
2284 * Gets the highest frequency hint for all the important timers.
2285 *
2286 * @returns The highest frequency. 0 if no timers care.
2287 * @param pVM The VM handle.
2288 */
2289static uint32_t tmGetFrequencyHint(PVM pVM)
2290{
2291 /*
2292 * Query the value, recalculate it if necessary.
2293 *
2294 * The "right" highest frequency value isn't so important that we'll block
2295 * waiting on the timer semaphore.
2296 */
2297 uint32_t uMaxHzHint = ASMAtomicUoReadU32(&pVM->tm.s.uMaxHzHint);
2298 if (RT_UNLIKELY(ASMAtomicReadBool(&pVM->tm.s.fHzHintNeedsUpdating)))
2299 {
2300 if (RT_SUCCESS(tmTimerTryLock(pVM)))
2301 {
2302 ASMAtomicWriteBool(&pVM->tm.s.fHzHintNeedsUpdating, false);
2303
2304 /*
2305 * Loop over the timers associated with each clock.
2306 */
2307 uMaxHzHint = 0;
2308 for (int i = 0; i < TMCLOCK_MAX; i++)
2309 {
2310 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[i];
2311 for (PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue); pCur; pCur = TMTIMER_GET_NEXT(pCur))
2312 {
2313 uint32_t uHzHint = ASMAtomicUoReadU32(&pCur->uHzHint);
2314 if (uHzHint > uMaxHzHint)
2315 {
2316 switch (pCur->enmState)
2317 {
2318 case TMTIMERSTATE_ACTIVE:
2319 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2320 case TMTIMERSTATE_EXPIRED_DELIVER:
2321 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2322 case TMTIMERSTATE_PENDING_SCHEDULE:
2323 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2324 case TMTIMERSTATE_PENDING_RESCHEDULE:
2325 uMaxHzHint = uHzHint;
2326 break;
2327
2328 case TMTIMERSTATE_STOPPED:
2329 case TMTIMERSTATE_PENDING_STOP:
2330 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2331 case TMTIMERSTATE_DESTROY:
2332 case TMTIMERSTATE_FREE:
2333 break;
2334 /* no default, want gcc warnings when adding more states. */
2335 }
2336 }
2337 }
2338 }
2339 ASMAtomicWriteU32(&pVM->tm.s.uMaxHzHint, uMaxHzHint);
2340 Log(("tmGetFrequencyHint: New value %u Hz\n", uMaxHzHint));
2341 tmTimerUnlock(pVM);
2342 }
2343 }
2344 return uMaxHzHint;
2345}
2346
2347
2348/**
2349 * Calculates a host timer frequency that would be suitable for the current
2350 * timer load.
2351 *
2352 * This will take the highest timer frequency, adjust for catch-up and warp
2353 * driver, and finally add a little fudge factor. The caller (VMM) will use
2354 * the result to adjust the per-cpu preemption timer.
2355 *
2356 * @returns The highest frequency. 0 if no important timers around.
2357 * @param pVM The VM handle.
2358 * @param pVCpu The current CPU.
2359 */
2360VMM_INT_DECL(uint32_t) TMCalcHostTimerFrequency(PVM pVM, PVMCPU pVCpu)
2361{
2362 uint32_t uHz = tmGetFrequencyHint(pVM);
2363
2364 /* Catch up, we have to be more aggressive than the % indicates at the
2365 beginning of the effort. */
2366 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
2367 {
2368 uint32_t u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
2369 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
2370 {
2371 if (u32Pct <= 100)
2372 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp100 / 100;
2373 else if (u32Pct <= 200)
2374 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp200 / 100;
2375 else if (u32Pct <= 400)
2376 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp400 / 100;
2377 uHz *= u32Pct + 100;
2378 uHz /= 100;
2379 }
2380 }
2381
2382 /* Warp drive. */
2383 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualWarpDrive))
2384 {
2385 uint32_t u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualWarpDrivePercentage);
2386 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualWarpDrive))
2387 {
2388 uHz *= u32Pct;
2389 uHz /= 100;
2390 }
2391 }
2392
2393 /* Fudge factor. */
2394 if (pVCpu->idCpu == pVM->tm.s.idTimerCpu)
2395 uHz *= pVM->tm.s.cPctHostHzFudgeFactorTimerCpu;
2396 else
2397 uHz *= pVM->tm.s.cPctHostHzFudgeFactorOtherCpu;
2398 uHz /= 100;
2399
2400 /* Make sure it isn't too high. */
2401 if (uHz > pVM->tm.s.cHostHzMax)
2402 uHz = pVM->tm.s.cHostHzMax;
2403
2404 return uHz;
2405}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette