VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/TMAll.cpp@ 37389

Last change on this file since 37389 was 37324, checked in by vboxsync, 14 years ago

TM,Devices: Fixed default critical section screwup and adjusted its usage in the devices.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 83.0 KB
Line 
1/* $Id: TMAll.cpp 37324 2011-06-03 16:28:03Z vboxsync $ */
2/** @file
3 * TM - Timeout Manager, all contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_TM
23#include <VBox/vmm/tm.h>
24#include <VBox/vmm/mm.h>
25#ifdef IN_RING3
26# include <VBox/vmm/rem.h>
27#endif
28#include "TMInternal.h"
29#include <VBox/vmm/vm.h>
30
31#include <VBox/param.h>
32#include <VBox/err.h>
33#include <VBox/log.h>
34#include <VBox/sup.h>
35#include <iprt/time.h>
36#include <iprt/assert.h>
37#include <iprt/asm.h>
38#include <iprt/asm-math.h>
39#ifdef IN_RING3
40# include <iprt/thread.h>
41#endif
42
43
44/*******************************************************************************
45* Defined Constants And Macros *
46*******************************************************************************/
47/** @def TMTIMER_ASSERT_CRITSECT
48 * Checks that the caller owns the critical section if one is associated with
49 * the timer. */
50#ifdef VBOX_STRICT
51# define TMTIMER_ASSERT_CRITSECT(pTimer) \
52 do { \
53 if ((pTimer)->pCritSect) \
54 { \
55 VMSTATE enmState; \
56 PPDMCRITSECT pCritSect = (PPDMCRITSECT)MMHyperR3ToCC((pTimer)->CTX_SUFF(pVM), (pTimer)->pCritSect); \
57 AssertMsg( pCritSect \
58 && ( PDMCritSectIsOwner(pCritSect) \
59 || (enmState = (pTimer)->CTX_SUFF(pVM)->enmVMState) == VMSTATE_CREATING \
60 || enmState == VMSTATE_RESETTING \
61 || enmState == VMSTATE_RESETTING_LS ),\
62 ("pTimer=%p (%s) pCritSect=%p (%s)\n", pTimer, R3STRING(pTimer->pszDesc), \
63 (pTimer)->pCritSect, R3STRING(PDMR3CritSectName((pTimer)->pCritSect)) )); \
64 } \
65 } while (0)
66#else
67# define TMTIMER_ASSERT_CRITSECT(pTimer) do { } while (0)
68#endif
69
70
71#ifndef tmTimerLock
72
73/**
74 * Try take the timer lock, wait in ring-3 return VERR_SEM_BUSY in R0/RC.
75 *
76 * @retval VINF_SUCCESS on success (always in ring-3).
77 * @retval VERR_SEM_BUSY in RC and R0 if the semaphore is busy.
78 *
79 * @param pVM The VM handle.
80 *
81 * @thread EMTs for the time being.
82 */
83int tmTimerLock(PVM pVM)
84{
85 VM_ASSERT_EMT(pVM);
86 int rc = PDMCritSectEnter(&pVM->tm.s.TimerCritSect, VERR_SEM_BUSY);
87 return rc;
88}
89
90
91/**
92 * Try take the timer lock, no waiting.
93 *
94 * @retval VINF_SUCCESS on success.
95 * @retval VERR_SEM_BUSY if busy.
96 *
97 * @param pVM The VM handle.
98 */
99int tmTimerTryLock(PVM pVM)
100{
101 int rc = PDMCritSectTryEnter(&pVM->tm.s.TimerCritSect);
102 return rc;
103}
104
105
106/**
107 * Release the EMT/TM lock.
108 *
109 * @param pVM The VM handle.
110 */
111void tmTimerUnlock(PVM pVM)
112{
113 PDMCritSectLeave(&pVM->tm.s.TimerCritSect);
114}
115
116
117/**
118 * Try take the VirtualSync lock, wait in ring-3 return VERR_SEM_BUSY in R0/RC.
119 *
120 * @retval VINF_SUCCESS on success (always in ring-3).
121 * @retval VERR_SEM_BUSY in RC and R0 if the semaphore is busy.
122 *
123 * @param pVM The VM handle.
124 */
125int tmVirtualSyncLock(PVM pVM)
126{
127 VM_ASSERT_EMT(pVM);
128 int rc = PDMCritSectEnter(&pVM->tm.s.VirtualSyncLock, VERR_SEM_BUSY);
129 return rc;
130}
131
132
133/**
134 * Try take the VirtualSync lock, no waiting.
135 *
136 * @retval VINF_SUCCESS on success.
137 * @retval VERR_SEM_BUSY if busy.
138 *
139 * @param pVM The VM handle.
140 */
141int tmVirtualSyncTryLock(PVM pVM)
142{
143 VM_ASSERT_EMT(pVM);
144 int rc = PDMCritSectTryEnter(&pVM->tm.s.VirtualSyncLock);
145 return rc;
146}
147
148
149/**
150 * Release the VirtualSync lock.
151 *
152 * @param pVM The VM handle.
153 */
154void tmVirtualSyncUnlock(PVM pVM)
155{
156 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
157}
158
159#endif /* ! macros */
160
161/**
162 * Notification that execution is about to start.
163 *
164 * This call must always be paired with a TMNotifyEndOfExecution call.
165 *
166 * The function may, depending on the configuration, resume the TSC and future
167 * clocks that only ticks when we're executing guest code.
168 *
169 * @param pVCpu The VMCPU to operate on.
170 */
171VMMDECL(void) TMNotifyStartOfExecution(PVMCPU pVCpu)
172{
173 PVM pVM = pVCpu->CTX_SUFF(pVM);
174
175#ifndef VBOX_WITHOUT_NS_ACCOUNTING
176 pVCpu->tm.s.u64NsTsStartExecuting = RTTimeNanoTS();
177#endif
178 if (pVM->tm.s.fTSCTiedToExecution)
179 tmCpuTickResume(pVM, pVCpu);
180}
181
182
183/**
184 * Notification that execution is about to start.
185 *
186 * This call must always be paired with a TMNotifyStartOfExecution call.
187 *
188 * The function may, depending on the configuration, suspend the TSC and future
189 * clocks that only ticks when we're executing guest code.
190 *
191 * @param pVCpu The VMCPU to operate on.
192 */
193VMMDECL(void) TMNotifyEndOfExecution(PVMCPU pVCpu)
194{
195 PVM pVM = pVCpu->CTX_SUFF(pVM);
196
197 if (pVM->tm.s.fTSCTiedToExecution)
198 tmCpuTickPause(pVM, pVCpu);
199
200#ifndef VBOX_WITHOUT_NS_ACCOUNTING
201 uint64_t const u64NsTs = RTTimeNanoTS();
202 uint64_t const cNsTotalNew = u64NsTs - pVCpu->tm.s.u64NsTsStartTotal;
203 uint64_t const cNsExecutingDelta = u64NsTs - pVCpu->tm.s.u64NsTsStartExecuting;
204 uint64_t const cNsExecutingNew = pVCpu->tm.s.cNsExecuting + cNsExecutingDelta;
205 uint64_t const cNsOtherNew = cNsTotalNew - cNsExecutingNew - pVCpu->tm.s.cNsHalted;
206
207# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
208 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecuting, cNsExecutingDelta);
209 if (cNsExecutingDelta < 5000)
210 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecTiny, cNsExecutingDelta);
211 else if (cNsExecutingDelta < 50000)
212 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecShort, cNsExecutingDelta);
213 else
214 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecLong, cNsExecutingDelta);
215 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsTotal, cNsTotalNew - pVCpu->tm.s.cNsTotal);
216 int64_t const cNsOtherNewDelta = cNsOtherNew - pVCpu->tm.s.cNsOther;
217 if (cNsOtherNewDelta > 0)
218 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsOther, cNsOtherNewDelta); /* (the period before execution) */
219# endif
220
221 uint32_t uGen = ASMAtomicIncU32(&pVCpu->tm.s.uTimesGen); Assert(uGen & 1);
222 pVCpu->tm.s.cNsExecuting = cNsExecutingNew;
223 pVCpu->tm.s.cNsTotal = cNsTotalNew;
224 pVCpu->tm.s.cNsOther = cNsOtherNew;
225 pVCpu->tm.s.cPeriodsExecuting++;
226 ASMAtomicWriteU32(&pVCpu->tm.s.uTimesGen, (uGen | 1) + 1);
227#endif
228}
229
230
231/**
232 * Notification that the cpu is entering the halt state
233 *
234 * This call must always be paired with a TMNotifyEndOfExecution call.
235 *
236 * The function may, depending on the configuration, resume the TSC and future
237 * clocks that only ticks when we're halted.
238 *
239 * @param pVCpu The VMCPU to operate on.
240 */
241VMM_INT_DECL(void) TMNotifyStartOfHalt(PVMCPU pVCpu)
242{
243 PVM pVM = pVCpu->CTX_SUFF(pVM);
244
245#ifndef VBOX_WITHOUT_NS_ACCOUNTING
246 pVCpu->tm.s.u64NsTsStartHalting = RTTimeNanoTS();
247#endif
248
249 if ( pVM->tm.s.fTSCTiedToExecution
250 && !pVM->tm.s.fTSCNotTiedToHalt)
251 tmCpuTickResume(pVM, pVCpu);
252}
253
254
255/**
256 * Notification that the cpu is leaving the halt state
257 *
258 * This call must always be paired with a TMNotifyStartOfHalt call.
259 *
260 * The function may, depending on the configuration, suspend the TSC and future
261 * clocks that only ticks when we're halted.
262 *
263 * @param pVCpu The VMCPU to operate on.
264 */
265VMM_INT_DECL(void) TMNotifyEndOfHalt(PVMCPU pVCpu)
266{
267 PVM pVM = pVCpu->CTX_SUFF(pVM);
268
269 if ( pVM->tm.s.fTSCTiedToExecution
270 && !pVM->tm.s.fTSCNotTiedToHalt)
271 tmCpuTickPause(pVM, pVCpu);
272
273#ifndef VBOX_WITHOUT_NS_ACCOUNTING
274 uint64_t const u64NsTs = RTTimeNanoTS();
275 uint64_t const cNsTotalNew = u64NsTs - pVCpu->tm.s.u64NsTsStartTotal;
276 uint64_t const cNsHaltedDelta = u64NsTs - pVCpu->tm.s.u64NsTsStartHalting;
277 uint64_t const cNsHaltedNew = pVCpu->tm.s.cNsHalted + cNsHaltedDelta;
278 uint64_t const cNsOtherNew = cNsTotalNew - pVCpu->tm.s.cNsExecuting - cNsHaltedNew;
279
280# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
281 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsHalted, cNsHaltedDelta);
282 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsTotal, cNsTotalNew - pVCpu->tm.s.cNsTotal);
283 int64_t const cNsOtherNewDelta = cNsOtherNew - pVCpu->tm.s.cNsOther;
284 if (cNsOtherNewDelta > 0)
285 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsOther, cNsOtherNewDelta); /* (the period before halting) */
286# endif
287
288 uint32_t uGen = ASMAtomicIncU32(&pVCpu->tm.s.uTimesGen); Assert(uGen & 1);
289 pVCpu->tm.s.cNsHalted = cNsHaltedNew;
290 pVCpu->tm.s.cNsTotal = cNsTotalNew;
291 pVCpu->tm.s.cNsOther = cNsOtherNew;
292 pVCpu->tm.s.cPeriodsHalted++;
293 ASMAtomicWriteU32(&pVCpu->tm.s.uTimesGen, (uGen | 1) + 1);
294#endif
295}
296
297
298/**
299 * Raise the timer force action flag and notify the dedicated timer EMT.
300 *
301 * @param pVM The VM handle.
302 */
303DECLINLINE(void) tmScheduleNotify(PVM pVM)
304{
305 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
306 if (!VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
307 {
308 Log5(("TMAll(%u): FF: 0 -> 1\n", __LINE__));
309 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
310#ifdef IN_RING3
311 REMR3NotifyTimerPending(pVM, pVCpuDst);
312 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
313#endif
314 STAM_COUNTER_INC(&pVM->tm.s.StatScheduleSetFF);
315 }
316}
317
318
319/**
320 * Schedule the queue which was changed.
321 */
322DECLINLINE(void) tmSchedule(PTMTIMER pTimer)
323{
324 PVM pVM = pTimer->CTX_SUFF(pVM);
325 if ( VM_IS_EMT(pVM)
326 && RT_SUCCESS(tmTimerTryLock(pVM)))
327 {
328 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
329 Log3(("tmSchedule: tmTimerQueueSchedule\n"));
330 tmTimerQueueSchedule(pVM, &pVM->tm.s.CTX_SUFF(paTimerQueues)[pTimer->enmClock]);
331#ifdef VBOX_STRICT
332 tmTimerQueuesSanityChecks(pVM, "tmSchedule");
333#endif
334 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
335 tmTimerUnlock(pVM);
336 }
337 else
338 {
339 TMTIMERSTATE enmState = pTimer->enmState;
340 if (TMTIMERSTATE_IS_PENDING_SCHEDULING(enmState))
341 tmScheduleNotify(pVM);
342 }
343}
344
345
346/**
347 * Try change the state to enmStateNew from enmStateOld
348 * and link the timer into the scheduling queue.
349 *
350 * @returns Success indicator.
351 * @param pTimer Timer in question.
352 * @param enmStateNew The new timer state.
353 * @param enmStateOld The old timer state.
354 */
355DECLINLINE(bool) tmTimerTry(PTMTIMER pTimer, TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
356{
357 /*
358 * Attempt state change.
359 */
360 bool fRc;
361 TM_TRY_SET_STATE(pTimer, enmStateNew, enmStateOld, fRc);
362 return fRc;
363}
364
365
366/**
367 * Links the timer onto the scheduling queue.
368 *
369 * @param pQueue The timer queue the timer belongs to.
370 * @param pTimer The timer.
371 *
372 * @todo FIXME: Look into potential race with the thread running the queues
373 * and stuff.
374 */
375DECLINLINE(void) tmTimerLink(PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
376{
377 Assert(!pTimer->offScheduleNext);
378 const int32_t offHeadNew = (intptr_t)pTimer - (intptr_t)pQueue;
379 int32_t offHead;
380 do
381 {
382 offHead = pQueue->offSchedule;
383 if (offHead)
384 pTimer->offScheduleNext = ((intptr_t)pQueue + offHead) - (intptr_t)pTimer;
385 else
386 pTimer->offScheduleNext = 0;
387 } while (!ASMAtomicCmpXchgS32(&pQueue->offSchedule, offHeadNew, offHead));
388}
389
390
391/**
392 * Try change the state to enmStateNew from enmStateOld
393 * and link the timer into the scheduling queue.
394 *
395 * @returns Success indicator.
396 * @param pTimer Timer in question.
397 * @param enmStateNew The new timer state.
398 * @param enmStateOld The old timer state.
399 */
400DECLINLINE(bool) tmTimerTryWithLink(PTMTIMER pTimer, TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
401{
402 if (tmTimerTry(pTimer, enmStateNew, enmStateOld))
403 {
404 tmTimerLink(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF(paTimerQueues)[pTimer->enmClock], pTimer);
405 return true;
406 }
407 return false;
408}
409
410
411#ifdef VBOX_HIGH_RES_TIMERS_HACK
412
413/**
414 * Worker for tmTimerPollInternal that handles misses when the dedicated timer
415 * EMT is polling.
416 *
417 * @returns See tmTimerPollInternal.
418 * @param pVM Pointer to the shared VM structure.
419 * @param u64Now Current virtual clock timestamp.
420 * @param u64Delta The delta to the next even in ticks of the
421 * virtual clock.
422 * @param pu64Delta Where to return the delta.
423 * @param pCounter The statistics counter to update.
424 */
425DECLINLINE(uint64_t) tmTimerPollReturnMiss(PVM pVM, uint64_t u64Now, uint64_t u64Delta, uint64_t *pu64Delta)
426{
427 Assert(!(u64Delta & RT_BIT_64(63)));
428
429 if (!pVM->tm.s.fVirtualWarpDrive)
430 {
431 *pu64Delta = u64Delta;
432 return u64Delta + u64Now + pVM->tm.s.u64VirtualOffset;
433 }
434
435 /*
436 * Warp drive adjustments - this is the reverse of what tmVirtualGetRaw is doing.
437 */
438 uint64_t const u64Start = pVM->tm.s.u64VirtualWarpDriveStart;
439 uint32_t const u32Pct = pVM->tm.s.u32VirtualWarpDrivePercentage;
440
441 uint64_t u64GipTime = u64Delta + u64Now + pVM->tm.s.u64VirtualOffset;
442 u64GipTime -= u64Start; /* the start is GIP time. */
443 if (u64GipTime >= u64Delta)
444 {
445 ASMMultU64ByU32DivByU32(u64GipTime, 100, u32Pct);
446 ASMMultU64ByU32DivByU32(u64Delta, 100, u32Pct);
447 }
448 else
449 {
450 u64Delta -= u64GipTime;
451 ASMMultU64ByU32DivByU32(u64GipTime, 100, u32Pct);
452 u64Delta += u64GipTime;
453 }
454 *pu64Delta = u64Delta;
455 u64GipTime += u64Start;
456 return u64GipTime;
457}
458
459
460/**
461 * Worker for tmTimerPollInternal dealing with returns on virtual CPUs other
462 * than the one dedicated to timer work.
463 *
464 * @returns See tmTimerPollInternal.
465 * @param pVM Pointer to the shared VM structure.
466 * @param u64Now Current virtual clock timestamp.
467 * @param pu64Delta Where to return the delta.
468 */
469DECL_FORCE_INLINE(uint64_t) tmTimerPollReturnOtherCpu(PVM pVM, uint64_t u64Now, uint64_t *pu64Delta)
470{
471 static const uint64_t s_u64OtherRet = 500000000; /* 500 ms for non-timer EMTs. */
472 *pu64Delta = s_u64OtherRet;
473 return u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet;
474}
475
476
477/**
478 * Worker for tmTimerPollInternal.
479 *
480 * @returns See tmTimerPollInternal.
481 * @param pVM Pointer to the shared VM structure.
482 * @param pVCpu Pointer to the shared VMCPU structure of the
483 * caller.
484 * @param pVCpuDst Pointer to the shared VMCPU structure of the
485 * dedicated timer EMT.
486 * @param u64Now Current virtual clock timestamp.
487 * @param pu64Delta Where to return the delta.
488 * @param pCounter The statistics counter to update.
489 */
490DECL_FORCE_INLINE(uint64_t) tmTimerPollReturnHit(PVM pVM, PVMCPU pVCpu, PVMCPU pVCpuDst, uint64_t u64Now,
491 uint64_t *pu64Delta, PSTAMCOUNTER pCounter)
492{
493 STAM_COUNTER_INC(pCounter);
494 if (pVCpuDst != pVCpu)
495 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
496 *pu64Delta = 0;
497 return 0;
498}
499
500/**
501 * Common worker for TMTimerPollGIP and TMTimerPoll.
502 *
503 * This function is called before FFs are checked in the inner execution EM loops.
504 *
505 * @returns The GIP timestamp of the next event.
506 * 0 if the next event has already expired.
507 *
508 * @param pVM Pointer to the shared VM structure.
509 * @param pVCpu Pointer to the shared VMCPU structure of the caller.
510 * @param pu64Delta Where to store the delta.
511 *
512 * @thread The emulation thread.
513 *
514 * @remarks GIP uses ns ticks.
515 */
516DECL_FORCE_INLINE(uint64_t) tmTimerPollInternal(PVM pVM, PVMCPU pVCpu, uint64_t *pu64Delta)
517{
518 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
519 const uint64_t u64Now = TMVirtualGetNoCheck(pVM);
520 STAM_COUNTER_INC(&pVM->tm.s.StatPoll);
521
522 /*
523 * Return straight away if the timer FF is already set ...
524 */
525 if (VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
526 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet);
527
528 /*
529 * ... or if timers are being run.
530 */
531 if (ASMAtomicReadBool(&pVM->tm.s.fRunningQueues))
532 {
533 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
534 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
535 }
536
537 /*
538 * Check for TMCLOCK_VIRTUAL expiration.
539 */
540 const uint64_t u64Expire1 = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire);
541 const int64_t i64Delta1 = u64Expire1 - u64Now;
542 if (i64Delta1 <= 0)
543 {
544 if (!VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
545 {
546 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
547 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
548#ifdef IN_RING3
549 REMR3NotifyTimerPending(pVM, pVCpuDst);
550#endif
551 }
552 LogFlow(("TMTimerPoll: expire1=%'RU64 <= now=%'RU64\n", u64Expire1, u64Now));
553 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtual);
554 }
555
556 /*
557 * Check for TMCLOCK_VIRTUAL_SYNC expiration.
558 * This isn't quite as straight forward if in a catch-up, not only do
559 * we have to adjust the 'now' but when have to adjust the delta as well.
560 */
561
562 /*
563 * Optimistic lockless approach.
564 */
565 uint64_t u64VirtualSyncNow;
566 uint64_t u64Expire2 = ASMAtomicUoReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
567 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
568 {
569 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
570 {
571 u64VirtualSyncNow = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
572 if (RT_LIKELY( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
573 && !ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
574 && u64VirtualSyncNow == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
575 && u64Expire2 == ASMAtomicUoReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)))
576 {
577 u64VirtualSyncNow = u64Now - u64VirtualSyncNow;
578 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
579 if (i64Delta2 > 0)
580 {
581 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
582 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
583
584 if (pVCpu == pVCpuDst)
585 return tmTimerPollReturnMiss(pVM, u64Now, RT_MIN(i64Delta1, i64Delta2), pu64Delta);
586 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
587 }
588
589 if ( !pVM->tm.s.fRunningQueues
590 && !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
591 {
592 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
593 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
594#ifdef IN_RING3
595 REMR3NotifyTimerPending(pVM, pVCpuDst);
596#endif
597 }
598
599 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
600 LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now));
601 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
602 }
603 }
604 }
605 else
606 {
607 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
608 LogFlow(("TMTimerPoll: stopped\n"));
609 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
610 }
611
612 /*
613 * Complicated lockless approach.
614 */
615 uint64_t off;
616 uint32_t u32Pct = 0;
617 bool fCatchUp;
618 int cOuterTries = 42;
619 for (;; cOuterTries--)
620 {
621 fCatchUp = ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp);
622 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
623 u64Expire2 = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
624 if (fCatchUp)
625 {
626 /* No changes allowed, try get a consistent set of parameters. */
627 uint64_t const u64Prev = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev);
628 uint64_t const offGivenUp = ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp);
629 u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
630 if ( ( u64Prev == ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev)
631 && offGivenUp == ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp)
632 && u32Pct == ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage)
633 && off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
634 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)
635 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
636 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
637 || cOuterTries <= 0)
638 {
639 uint64_t u64Delta = u64Now - u64Prev;
640 if (RT_LIKELY(!(u64Delta >> 32)))
641 {
642 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, u32Pct, 100);
643 if (off > u64Sub + offGivenUp)
644 off -= u64Sub;
645 else /* we've completely caught up. */
646 off = offGivenUp;
647 }
648 else
649 /* More than 4 seconds since last time (or negative), ignore it. */
650 Log(("TMVirtualGetSync: u64Delta=%RX64 (NoLock)\n", u64Delta));
651
652 /* Check that we're still running and in catch up. */
653 if ( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
654 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
655 break;
656 }
657 }
658 else if ( off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
659 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)
660 && !ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
661 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
662 break; /* Got an consistent offset */
663
664 /* Repeat the initial checks before iterating. */
665 if (VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
666 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet);
667 if (ASMAtomicUoReadBool(&pVM->tm.s.fRunningQueues))
668 {
669 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
670 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
671 }
672 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
673 {
674 LogFlow(("TMTimerPoll: stopped\n"));
675 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
676 }
677 if (cOuterTries <= 0)
678 break; /* that's enough */
679 }
680 if (cOuterTries <= 0)
681 STAM_COUNTER_INC(&pVM->tm.s.StatPollELoop);
682 u64VirtualSyncNow = u64Now - off;
683
684 /* Calc delta and see if we've got a virtual sync hit. */
685 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
686 if (i64Delta2 <= 0)
687 {
688 if ( !pVM->tm.s.fRunningQueues
689 && !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
690 {
691 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
692 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
693#ifdef IN_RING3
694 REMR3NotifyTimerPending(pVM, pVCpuDst);
695#endif
696 }
697 STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtualSync);
698 LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now));
699 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
700 }
701
702 /*
703 * Return the time left to the next event.
704 */
705 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
706 if (pVCpu == pVCpuDst)
707 {
708 if (fCatchUp)
709 i64Delta2 = ASMMultU64ByU32DivByU32(i64Delta2, 100, u32Pct + 100);
710 return tmTimerPollReturnMiss(pVM, u64Now, RT_MIN(i64Delta1, i64Delta2), pu64Delta);
711 }
712 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
713}
714
715
716/**
717 * Set FF if we've passed the next virtual event.
718 *
719 * This function is called before FFs are checked in the inner execution EM loops.
720 *
721 * @returns true if timers are pending, false if not.
722 *
723 * @param pVM Pointer to the shared VM structure.
724 * @param pVCpu Pointer to the shared VMCPU structure of the caller.
725 * @thread The emulation thread.
726 */
727VMMDECL(bool) TMTimerPollBool(PVM pVM, PVMCPU pVCpu)
728{
729 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
730 uint64_t off = 0;
731 tmTimerPollInternal(pVM, pVCpu, &off);
732 return off == 0;
733}
734
735
736/**
737 * Set FF if we've passed the next virtual event.
738 *
739 * This function is called before FFs are checked in the inner execution EM loops.
740 *
741 * @param pVM Pointer to the shared VM structure.
742 * @param pVCpu Pointer to the shared VMCPU structure of the caller.
743 * @thread The emulation thread.
744 */
745VMM_INT_DECL(void) TMTimerPollVoid(PVM pVM, PVMCPU pVCpu)
746{
747 uint64_t off;
748 tmTimerPollInternal(pVM, pVCpu, &off);
749}
750
751
752/**
753 * Set FF if we've passed the next virtual event.
754 *
755 * This function is called before FFs are checked in the inner execution EM loops.
756 *
757 * @returns The GIP timestamp of the next event.
758 * 0 if the next event has already expired.
759 * @param pVM Pointer to the shared VM structure.
760 * @param pVCpu Pointer to the shared VMCPU structure of the caller.
761 * @param pu64Delta Where to store the delta.
762 * @thread The emulation thread.
763 */
764VMM_INT_DECL(uint64_t) TMTimerPollGIP(PVM pVM, PVMCPU pVCpu, uint64_t *pu64Delta)
765{
766 return tmTimerPollInternal(pVM, pVCpu, pu64Delta);
767}
768
769#endif /* VBOX_HIGH_RES_TIMERS_HACK */
770
771/**
772 * Gets the host context ring-3 pointer of the timer.
773 *
774 * @returns HC R3 pointer.
775 * @param pTimer Timer handle as returned by one of the create functions.
776 */
777VMMDECL(PTMTIMERR3) TMTimerR3Ptr(PTMTIMER pTimer)
778{
779 return (PTMTIMERR3)MMHyperCCToR3(pTimer->CTX_SUFF(pVM), pTimer);
780}
781
782
783/**
784 * Gets the host context ring-0 pointer of the timer.
785 *
786 * @returns HC R0 pointer.
787 * @param pTimer Timer handle as returned by one of the create functions.
788 */
789VMMDECL(PTMTIMERR0) TMTimerR0Ptr(PTMTIMER pTimer)
790{
791 return (PTMTIMERR0)MMHyperCCToR0(pTimer->CTX_SUFF(pVM), pTimer);
792}
793
794
795/**
796 * Gets the RC pointer of the timer.
797 *
798 * @returns RC pointer.
799 * @param pTimer Timer handle as returned by one of the create functions.
800 */
801VMMDECL(PTMTIMERRC) TMTimerRCPtr(PTMTIMER pTimer)
802{
803 return (PTMTIMERRC)MMHyperCCToRC(pTimer->CTX_SUFF(pVM), pTimer);
804}
805
806
807/**
808 * Links a timer into the active list of a timer queue.
809 *
810 * The caller must have taken the TM semaphore before calling this function.
811 *
812 * @param pQueue The queue.
813 * @param pTimer The timer.
814 * @param u64Expire The timer expiration time.
815 */
816DECL_FORCE_INLINE(void) tmTimerActiveLink(PTMTIMERQUEUE pQueue, PTMTIMER pTimer, uint64_t u64Expire)
817{
818 PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue);
819 if (pCur)
820 {
821 for (;; pCur = TMTIMER_GET_NEXT(pCur))
822 {
823 if (pCur->u64Expire > u64Expire)
824 {
825 const PTMTIMER pPrev = TMTIMER_GET_PREV(pCur);
826 TMTIMER_SET_NEXT(pTimer, pCur);
827 TMTIMER_SET_PREV(pTimer, pPrev);
828 if (pPrev)
829 TMTIMER_SET_NEXT(pPrev, pTimer);
830 else
831 {
832 TMTIMER_SET_HEAD(pQueue, pTimer);
833 pQueue->u64Expire = u64Expire;
834 }
835 TMTIMER_SET_PREV(pCur, pTimer);
836 return;
837 }
838 if (!pCur->offNext)
839 {
840 TMTIMER_SET_NEXT(pCur, pTimer);
841 TMTIMER_SET_PREV(pTimer, pCur);
842 return;
843 }
844 }
845 }
846 else
847 {
848 TMTIMER_SET_HEAD(pQueue, pTimer);
849 pQueue->u64Expire = u64Expire;
850 }
851}
852
853
854/**
855 * Optimized TMTimerSet code path for starting an inactive timer.
856 *
857 * @returns VBox status code.
858 *
859 * @param pVM The VM handle.
860 * @param pTimer The timer handle.
861 * @param u64Expire The new expire time.
862 */
863static int tmTimerSetOptimizedStart(PVM pVM, PTMTIMER pTimer, uint64_t u64Expire)
864{
865 Assert(!pTimer->offPrev);
866 Assert(!pTimer->offNext);
867 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE);
868
869 /*
870 * Calculate and set the expiration time.
871 */
872 pTimer->u64Expire = u64Expire;
873 Log2(("tmTimerSetOptimizedStart: %p:{.pszDesc='%s', .u64Expire=%'RU64}\n", pTimer, R3STRING(pTimer->pszDesc), u64Expire));
874
875 /*
876 * Link the timer into the active list.
877 */
878 TMCLOCK const enmClock = pTimer->enmClock;
879 tmTimerActiveLink(&pVM->tm.s.CTX_SUFF(paTimerQueues)[enmClock], pTimer, u64Expire);
880
881 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetOpt);
882 tmTimerUnlock(pVM);
883 return VINF_SUCCESS;
884}
885
886
887
888
889
890/**
891 * Arm a timer with a (new) expire time.
892 *
893 * @returns VBox status.
894 * @param pTimer Timer handle as returned by one of the create functions.
895 * @param u64Expire New expire time.
896 */
897VMMDECL(int) TMTimerSet(PTMTIMER pTimer, uint64_t u64Expire)
898{
899 PVM pVM = pTimer->CTX_SUFF(pVM);
900 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
901 TMTIMER_ASSERT_CRITSECT(pTimer);
902
903#ifdef VBOX_WITH_STATISTICS
904 /* Gather optimization info. */
905 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSet);
906 TMTIMERSTATE enmOrgState = pTimer->enmState;
907 switch (enmOrgState)
908 {
909 case TMTIMERSTATE_STOPPED: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStStopped); break;
910 case TMTIMERSTATE_EXPIRED_DELIVER: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStExpDeliver); break;
911 case TMTIMERSTATE_ACTIVE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStActive); break;
912 case TMTIMERSTATE_PENDING_STOP: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendStop); break;
913 case TMTIMERSTATE_PENDING_STOP_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendStopSched); break;
914 case TMTIMERSTATE_PENDING_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendSched); break;
915 case TMTIMERSTATE_PENDING_RESCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendResched); break;
916 default: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStOther); break;
917 }
918#endif
919
920 /*
921 * The most common case is setting the timer again during the callback.
922 * The second most common case is starting a timer at some other time.
923 */
924#if 1
925 TMTIMERSTATE enmState1 = pTimer->enmState;
926 if ( enmState1 == TMTIMERSTATE_EXPIRED_DELIVER
927 || ( enmState1 == TMTIMERSTATE_STOPPED
928 && pTimer->pCritSect))
929 {
930 /* Try take the TM lock and check the state again. */
931 if (RT_SUCCESS_NP(tmTimerTryLock(pVM)))
932 {
933 if (RT_LIKELY(tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, enmState1)))
934 {
935 tmTimerSetOptimizedStart(pVM, pTimer, u64Expire);
936 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
937 return VINF_SUCCESS;
938 }
939 tmTimerUnlock(pVM);
940 }
941 }
942#endif
943
944 /*
945 * Unoptimized code path.
946 */
947 int cRetries = 1000;
948 do
949 {
950 /*
951 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
952 */
953 TMTIMERSTATE enmState = pTimer->enmState;
954 Log2(("TMTimerSet: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d u64Expire=%'RU64\n",
955 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), cRetries, u64Expire));
956 switch (enmState)
957 {
958 case TMTIMERSTATE_EXPIRED_DELIVER:
959 case TMTIMERSTATE_STOPPED:
960 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
961 {
962 Assert(!pTimer->offPrev);
963 Assert(!pTimer->offNext);
964 AssertMsg( pTimer->enmClock != TMCLOCK_VIRTUAL_SYNC
965 || pVM->tm.s.fVirtualSyncTicking
966 || u64Expire >= pVM->tm.s.u64VirtualSync,
967 ("%'RU64 < %'RU64 %s\n", u64Expire, pVM->tm.s.u64VirtualSync, R3STRING(pTimer->pszDesc)));
968 pTimer->u64Expire = u64Expire;
969 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
970 tmSchedule(pTimer);
971 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
972 return VINF_SUCCESS;
973 }
974 break;
975
976 case TMTIMERSTATE_PENDING_SCHEDULE:
977 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
978 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
979 {
980 pTimer->u64Expire = u64Expire;
981 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
982 tmSchedule(pTimer);
983 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
984 return VINF_SUCCESS;
985 }
986 break;
987
988
989 case TMTIMERSTATE_ACTIVE:
990 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
991 {
992 pTimer->u64Expire = u64Expire;
993 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
994 tmSchedule(pTimer);
995 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
996 return VINF_SUCCESS;
997 }
998 break;
999
1000 case TMTIMERSTATE_PENDING_RESCHEDULE:
1001 case TMTIMERSTATE_PENDING_STOP:
1002 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1003 {
1004 pTimer->u64Expire = u64Expire;
1005 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1006 tmSchedule(pTimer);
1007 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1008 return VINF_SUCCESS;
1009 }
1010 break;
1011
1012
1013 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1014 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1015 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1016#ifdef IN_RING3
1017 if (!RTThreadYield())
1018 RTThreadSleep(1);
1019#else
1020/** @todo call host context and yield after a couple of iterations */
1021#endif
1022 break;
1023
1024 /*
1025 * Invalid states.
1026 */
1027 case TMTIMERSTATE_DESTROY:
1028 case TMTIMERSTATE_FREE:
1029 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1030 return VERR_TM_INVALID_STATE;
1031 default:
1032 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1033 return VERR_TM_UNKNOWN_STATE;
1034 }
1035 } while (cRetries-- > 0);
1036
1037 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1038 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1039 return VERR_INTERNAL_ERROR;
1040}
1041
1042
1043/**
1044 * Return the current time for the specified clock, setting pu64Now if not NULL.
1045 *
1046 * @returns Current time.
1047 * @param pVM The VM handle.
1048 * @param enmClock The clock to query.
1049 * @param pu64Now Optional pointer where to store the return time
1050 */
1051DECL_FORCE_INLINE(uint64_t) tmTimerSetRelativeNowWorker(PVM pVM, TMCLOCK enmClock, uint64_t *pu64Now)
1052{
1053 uint64_t u64Now;
1054 switch (enmClock)
1055 {
1056 case TMCLOCK_VIRTUAL_SYNC:
1057 u64Now = TMVirtualSyncGet(pVM);
1058 break;
1059 case TMCLOCK_VIRTUAL:
1060 u64Now = TMVirtualGet(pVM);
1061 break;
1062 case TMCLOCK_REAL:
1063 u64Now = TMRealGet(pVM);
1064 break;
1065 default:
1066 AssertFatalMsgFailed(("%d\n", enmClock));
1067 }
1068
1069 if (pu64Now)
1070 *pu64Now = u64Now;
1071 return u64Now;
1072}
1073
1074
1075/**
1076 * Optimized TMTimerSetRelative code path.
1077 *
1078 * @returns VBox status code.
1079 *
1080 * @param pVM The VM handle.
1081 * @param pTimer The timer handle.
1082 * @param cTicksToNext Clock ticks until the next time expiration.
1083 * @param pu64Now Where to return the current time stamp used.
1084 * Optional.
1085 */
1086static int tmTimerSetRelativeOptimizedStart(PVM pVM, PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1087{
1088 Assert(!pTimer->offPrev);
1089 Assert(!pTimer->offNext);
1090 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE);
1091
1092 /*
1093 * Calculate and set the expiration time.
1094 */
1095 TMCLOCK const enmClock = pTimer->enmClock;
1096 uint64_t const u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1097 pTimer->u64Expire = u64Expire;
1098 Log2(("tmTimerSetRelativeOptimizedStart: %p:{.pszDesc='%s', .u64Expire=%'RU64} cTicksToNext=%'RU64\n", pTimer, R3STRING(pTimer->pszDesc), u64Expire, cTicksToNext));
1099
1100 /*
1101 * Link the timer into the active list.
1102 */
1103 tmTimerActiveLink(&pVM->tm.s.CTX_SUFF(paTimerQueues)[enmClock], pTimer, u64Expire);
1104
1105 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeOpt);
1106 tmTimerUnlock(pVM);
1107 return VINF_SUCCESS;
1108}
1109
1110
1111/**
1112 * Arm a timer with a expire time relative to the current time.
1113 *
1114 * @returns VBox status.
1115 * @param pTimer Timer handle as returned by one of the create functions.
1116 * @param cTicksToNext Clock ticks until the next time expiration.
1117 * @param pu64Now Where to return the current time stamp used.
1118 * Optional.
1119 */
1120VMMDECL(int) TMTimerSetRelative(PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1121{
1122 STAM_PROFILE_START(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1123 TMTIMER_ASSERT_CRITSECT(pTimer);
1124 PVM pVM = pTimer->CTX_SUFF(pVM);
1125 int rc;
1126
1127#ifdef VBOX_WITH_STATISTICS
1128 /* Gather optimization info. */
1129 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelative);
1130 TMTIMERSTATE enmOrgState = pTimer->enmState;
1131 switch (enmOrgState)
1132 {
1133 case TMTIMERSTATE_STOPPED: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStStopped); break;
1134 case TMTIMERSTATE_EXPIRED_DELIVER: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStExpDeliver); break;
1135 case TMTIMERSTATE_ACTIVE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStActive); break;
1136 case TMTIMERSTATE_PENDING_STOP: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendStop); break;
1137 case TMTIMERSTATE_PENDING_STOP_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendStopSched); break;
1138 case TMTIMERSTATE_PENDING_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendSched); break;
1139 case TMTIMERSTATE_PENDING_RESCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendResched); break;
1140 default: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStOther); break;
1141 }
1142#endif
1143
1144 /*
1145 * Try to take the TM lock and optimize the common cases.
1146 *
1147 * With the TM lock we can safely make optimizations like immediate
1148 * scheduling and we can also be 100% sure that we're not racing the
1149 * running of the timer queues. As an additional restraint we require the
1150 * timer to have a critical section associated with to be 100% there aren't
1151 * concurrent operations on the timer. (This latter isn't necessary any
1152 * longer as this isn't supported for any timers, critsect or not.)
1153 *
1154 * Note! Lock ordering doesn't apply when we only tries to
1155 * get the innermost locks.
1156 */
1157 bool fOwnTMLock = RT_SUCCESS_NP(tmTimerTryLock(pVM));
1158#if 1
1159 if ( fOwnTMLock
1160 && pTimer->pCritSect)
1161 {
1162 TMTIMERSTATE enmState = pTimer->enmState;
1163 if (RT_LIKELY( ( enmState == TMTIMERSTATE_EXPIRED_DELIVER
1164 || enmState == TMTIMERSTATE_STOPPED)
1165 && tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, enmState)))
1166 {
1167 tmTimerSetRelativeOptimizedStart(pVM, pTimer, cTicksToNext, pu64Now);
1168 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1169 return VINF_SUCCESS;
1170 }
1171
1172 /* Optimize other states when it becomes necessary. */
1173 }
1174#endif
1175
1176 /*
1177 * Unoptimized path.
1178 */
1179 TMCLOCK const enmClock = pTimer->enmClock;
1180 bool fOwnVirtSyncLock;
1181 fOwnVirtSyncLock = !fOwnTMLock
1182 && enmClock == TMCLOCK_VIRTUAL_SYNC
1183 && RT_SUCCESS(tmVirtualSyncTryLock(pVM));
1184 for (int cRetries = 1000; ; cRetries--)
1185 {
1186 /*
1187 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1188 */
1189 TMTIMERSTATE enmState = pTimer->enmState;
1190 switch (enmState)
1191 {
1192 case TMTIMERSTATE_STOPPED:
1193 if (enmClock == TMCLOCK_VIRTUAL_SYNC)
1194 {
1195 /** @todo To fix assertion in tmR3TimerQueueRunVirtualSync:
1196 * Figure a safe way of activating this timer while the queue is
1197 * being run.
1198 * (99.9% sure this that the assertion is caused by DevAPIC.cpp
1199 * re-starting the timer in response to a initial_count write.) */
1200 }
1201 /* fall thru */
1202 case TMTIMERSTATE_EXPIRED_DELIVER:
1203 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1204 {
1205 Assert(!pTimer->offPrev);
1206 Assert(!pTimer->offNext);
1207 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1208 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [EXP/STOP]\n",
1209 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1210 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1211 tmSchedule(pTimer);
1212 rc = VINF_SUCCESS;
1213 break;
1214 }
1215 rc = VERR_TRY_AGAIN;
1216 break;
1217
1218 case TMTIMERSTATE_PENDING_SCHEDULE:
1219 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1220 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1221 {
1222 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1223 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [PEND_SCHED]\n",
1224 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1225 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1226 tmSchedule(pTimer);
1227 rc = VINF_SUCCESS;
1228 break;
1229 }
1230 rc = VERR_TRY_AGAIN;
1231 break;
1232
1233
1234 case TMTIMERSTATE_ACTIVE:
1235 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1236 {
1237 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1238 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [ACTIVE]\n",
1239 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1240 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1241 tmSchedule(pTimer);
1242 rc = VINF_SUCCESS;
1243 break;
1244 }
1245 rc = VERR_TRY_AGAIN;
1246 break;
1247
1248 case TMTIMERSTATE_PENDING_RESCHEDULE:
1249 case TMTIMERSTATE_PENDING_STOP:
1250 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1251 {
1252 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1253 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [PEND_RESCH/STOP]\n",
1254 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1255 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1256 tmSchedule(pTimer);
1257 rc = VINF_SUCCESS;
1258 break;
1259 }
1260 rc = VERR_TRY_AGAIN;
1261 break;
1262
1263
1264 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1265 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1266 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1267#ifdef IN_RING3
1268 if (!RTThreadYield())
1269 RTThreadSleep(1);
1270#else
1271/** @todo call host context and yield after a couple of iterations */
1272#endif
1273 rc = VERR_TRY_AGAIN;
1274 break;
1275
1276 /*
1277 * Invalid states.
1278 */
1279 case TMTIMERSTATE_DESTROY:
1280 case TMTIMERSTATE_FREE:
1281 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1282 rc = VERR_TM_INVALID_STATE;
1283 break;
1284
1285 default:
1286 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1287 rc = VERR_TM_UNKNOWN_STATE;
1288 break;
1289 }
1290
1291 /* switch + loop is tedious to break out of. */
1292 if (rc == VINF_SUCCESS)
1293 break;
1294
1295 if (rc != VERR_TRY_AGAIN)
1296 {
1297 tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1298 break;
1299 }
1300 if (cRetries <= 0)
1301 {
1302 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1303 rc = VERR_INTERNAL_ERROR;
1304 tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1305 break;
1306 }
1307
1308 /*
1309 * Retry to gain locks.
1310 */
1311 if (!fOwnTMLock)
1312 {
1313 fOwnTMLock = RT_SUCCESS_NP(tmTimerTryLock(pVM));
1314 if ( !fOwnTMLock
1315 && enmClock == TMCLOCK_VIRTUAL_SYNC
1316 && !fOwnVirtSyncLock)
1317 fOwnVirtSyncLock = RT_SUCCESS_NP(tmVirtualSyncTryLock(pVM));
1318 }
1319
1320 } /* for (;;) */
1321
1322 /*
1323 * Clean up and return.
1324 */
1325 if (fOwnVirtSyncLock)
1326 tmVirtualSyncUnlock(pVM);
1327 if (fOwnTMLock)
1328 tmTimerUnlock(pVM);
1329
1330 if ( !fOwnTMLock
1331 && !fOwnVirtSyncLock
1332 && enmClock == TMCLOCK_VIRTUAL_SYNC)
1333 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeRacyVirtSync);
1334
1335 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1336 return rc;
1337}
1338
1339
1340/**
1341 * Arm a timer with a (new) expire time relative to current time.
1342 *
1343 * @returns VBox status.
1344 * @param pTimer Timer handle as returned by one of the create functions.
1345 * @param cMilliesToNext Number of milliseconds to the next tick.
1346 */
1347VMMDECL(int) TMTimerSetMillies(PTMTIMER pTimer, uint32_t cMilliesToNext)
1348{
1349 PVM pVM = pTimer->CTX_SUFF(pVM);
1350 PVMCPU pVCpu = &pVM->aCpus[0]; /* just take the first VCPU */
1351
1352 switch (pTimer->enmClock)
1353 {
1354 case TMCLOCK_VIRTUAL:
1355 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1356 return TMTimerSetRelative(pTimer, cMilliesToNext * UINT64_C(1000000), NULL);
1357
1358 case TMCLOCK_VIRTUAL_SYNC:
1359 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1360 return TMTimerSetRelative(pTimer, cMilliesToNext * UINT64_C(1000000), NULL);
1361
1362 case TMCLOCK_REAL:
1363 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1364 return TMTimerSetRelative(pTimer, cMilliesToNext, NULL);
1365
1366 default:
1367 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1368 return VERR_INTERNAL_ERROR;
1369 }
1370}
1371
1372
1373/**
1374 * Arm a timer with a (new) expire time relative to current time.
1375 *
1376 * @returns VBox status.
1377 * @param pTimer Timer handle as returned by one of the create functions.
1378 * @param cMicrosToNext Number of microseconds to the next tick.
1379 */
1380VMMDECL(int) TMTimerSetMicro(PTMTIMER pTimer, uint64_t cMicrosToNext)
1381{
1382 PVM pVM = pTimer->CTX_SUFF(pVM);
1383 PVMCPU pVCpu = &pVM->aCpus[0]; /* just take the first VCPU */
1384
1385 switch (pTimer->enmClock)
1386 {
1387 case TMCLOCK_VIRTUAL:
1388 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1389 return TMTimerSetRelative(pTimer, cMicrosToNext * 1000, NULL);
1390
1391 case TMCLOCK_VIRTUAL_SYNC:
1392 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1393 return TMTimerSetRelative(pTimer, cMicrosToNext * 1000, NULL);
1394
1395 case TMCLOCK_REAL:
1396 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1397 return TMTimerSetRelative(pTimer, cMicrosToNext / 1000, NULL);
1398
1399 default:
1400 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1401 return VERR_INTERNAL_ERROR;
1402 }
1403}
1404
1405
1406/**
1407 * Arm a timer with a (new) expire time relative to current time.
1408 *
1409 * @returns VBox status.
1410 * @param pTimer Timer handle as returned by one of the create functions.
1411 * @param cNanosToNext Number of nanoseconds to the next tick.
1412 */
1413VMMDECL(int) TMTimerSetNano(PTMTIMER pTimer, uint64_t cNanosToNext)
1414{
1415 PVM pVM = pTimer->CTX_SUFF(pVM);
1416 PVMCPU pVCpu = &pVM->aCpus[0]; /* just take the first VCPU */
1417
1418 switch (pTimer->enmClock)
1419 {
1420 case TMCLOCK_VIRTUAL:
1421 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1422 return TMTimerSetRelative(pTimer, cNanosToNext, NULL);
1423
1424 case TMCLOCK_VIRTUAL_SYNC:
1425 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1426 return TMTimerSetRelative(pTimer, cNanosToNext, NULL);
1427
1428 case TMCLOCK_REAL:
1429 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1430 return TMTimerSetRelative(pTimer, cNanosToNext / 1000000, NULL);
1431
1432 default:
1433 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1434 return VERR_INTERNAL_ERROR;
1435 }
1436}
1437
1438
1439/**
1440 * Drops a hint about the frequency of the timer.
1441 *
1442 * This is used by TM and the VMM to calculate how often guest execution needs
1443 * to be interrupted. The hint is automatically cleared by TMTimerStop.
1444 *
1445 * @returns VBox status code.
1446 * @param pTimer Timer handle as returned by one of the create
1447 * functions.
1448 * @param uHzHint The frequency hint. Pass 0 to clear the hint.
1449 *
1450 * @remarks We're using an integer hertz value here since anything above 1 HZ
1451 * is not going to be any trouble satisfying scheduling wise. The
1452 * range where it makes sense is >= 100 HZ.
1453 */
1454VMMDECL(int) TMTimerSetFrequencyHint(PTMTIMER pTimer, uint32_t uHzHint)
1455{
1456 TMTIMER_ASSERT_CRITSECT(pTimer);
1457
1458 uint32_t const uHzOldHint = pTimer->uHzHint;
1459 pTimer->uHzHint = uHzHint;
1460
1461 PVM pVM = pTimer->CTX_SUFF(pVM);
1462 uint32_t const uMaxHzHint = pVM->tm.s.uMaxHzHint;
1463 if ( uHzHint > uMaxHzHint
1464 || uHzOldHint >= uMaxHzHint)
1465 ASMAtomicWriteBool(&pVM->tm.s.fHzHintNeedsUpdating, true);
1466
1467 return VINF_SUCCESS;
1468}
1469
1470
1471/**
1472 * Stop the timer.
1473 * Use TMR3TimerArm() to "un-stop" the timer.
1474 *
1475 * @returns VBox status.
1476 * @param pTimer Timer handle as returned by one of the create functions.
1477 */
1478VMMDECL(int) TMTimerStop(PTMTIMER pTimer)
1479{
1480 STAM_PROFILE_START(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1481 TMTIMER_ASSERT_CRITSECT(pTimer);
1482
1483 /* Reset the HZ hint. */
1484 if (pTimer->uHzHint)
1485 {
1486 PVM pVM = pTimer->CTX_SUFF(pVM);
1487 if (pTimer->uHzHint >= pVM->tm.s.uMaxHzHint)
1488 ASMAtomicWriteBool(&pVM->tm.s.fHzHintNeedsUpdating, true);
1489 pTimer->uHzHint = 0;
1490 }
1491
1492 /** @todo see if this function needs optimizing. */
1493 int cRetries = 1000;
1494 do
1495 {
1496 /*
1497 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1498 */
1499 TMTIMERSTATE enmState = pTimer->enmState;
1500 Log2(("TMTimerStop: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d\n",
1501 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), cRetries));
1502 switch (enmState)
1503 {
1504 case TMTIMERSTATE_EXPIRED_DELIVER:
1505 //AssertMsgFailed(("You don't stop an expired timer dude!\n"));
1506 return VERR_INVALID_PARAMETER;
1507
1508 case TMTIMERSTATE_STOPPED:
1509 case TMTIMERSTATE_PENDING_STOP:
1510 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1511 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1512 return VINF_SUCCESS;
1513
1514 case TMTIMERSTATE_PENDING_SCHEDULE:
1515 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, enmState))
1516 {
1517 tmSchedule(pTimer);
1518 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1519 return VINF_SUCCESS;
1520 }
1521
1522 case TMTIMERSTATE_PENDING_RESCHEDULE:
1523 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
1524 {
1525 tmSchedule(pTimer);
1526 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1527 return VINF_SUCCESS;
1528 }
1529 break;
1530
1531 case TMTIMERSTATE_ACTIVE:
1532 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
1533 {
1534 tmSchedule(pTimer);
1535 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1536 return VINF_SUCCESS;
1537 }
1538 break;
1539
1540 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1541 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1542 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1543#ifdef IN_RING3
1544 if (!RTThreadYield())
1545 RTThreadSleep(1);
1546#else
1547/**@todo call host and yield cpu after a while. */
1548#endif
1549 break;
1550
1551 /*
1552 * Invalid states.
1553 */
1554 case TMTIMERSTATE_DESTROY:
1555 case TMTIMERSTATE_FREE:
1556 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1557 return VERR_TM_INVALID_STATE;
1558 default:
1559 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1560 return VERR_TM_UNKNOWN_STATE;
1561 }
1562 } while (cRetries-- > 0);
1563
1564 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1565 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1566 return VERR_INTERNAL_ERROR;
1567}
1568
1569
1570/**
1571 * Get the current clock time.
1572 * Handy for calculating the new expire time.
1573 *
1574 * @returns Current clock time.
1575 * @param pTimer Timer handle as returned by one of the create functions.
1576 */
1577VMMDECL(uint64_t) TMTimerGet(PTMTIMER pTimer)
1578{
1579 uint64_t u64;
1580 PVM pVM = pTimer->CTX_SUFF(pVM);
1581
1582 switch (pTimer->enmClock)
1583 {
1584 case TMCLOCK_VIRTUAL:
1585 u64 = TMVirtualGet(pVM);
1586 break;
1587 case TMCLOCK_VIRTUAL_SYNC:
1588 u64 = TMVirtualSyncGet(pVM);
1589 break;
1590 case TMCLOCK_REAL:
1591 u64 = TMRealGet(pVM);
1592 break;
1593 default:
1594 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1595 return ~(uint64_t)0;
1596 }
1597 //Log2(("TMTimerGet: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1598 // u64, pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1599 return u64;
1600}
1601
1602
1603/**
1604 * Get the frequency of the timer clock.
1605 *
1606 * @returns Clock frequency (as Hz of course).
1607 * @param pTimer Timer handle as returned by one of the create functions.
1608 */
1609VMMDECL(uint64_t) TMTimerGetFreq(PTMTIMER pTimer)
1610{
1611 switch (pTimer->enmClock)
1612 {
1613 case TMCLOCK_VIRTUAL:
1614 case TMCLOCK_VIRTUAL_SYNC:
1615 return TMCLOCK_FREQ_VIRTUAL;
1616
1617 case TMCLOCK_REAL:
1618 return TMCLOCK_FREQ_REAL;
1619
1620 default:
1621 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1622 return 0;
1623 }
1624}
1625
1626
1627/**
1628 * Get the current clock time as nanoseconds.
1629 *
1630 * @returns The timer clock as nanoseconds.
1631 * @param pTimer Timer handle as returned by one of the create functions.
1632 */
1633VMMDECL(uint64_t) TMTimerGetNano(PTMTIMER pTimer)
1634{
1635 return TMTimerToNano(pTimer, TMTimerGet(pTimer));
1636}
1637
1638
1639/**
1640 * Get the current clock time as microseconds.
1641 *
1642 * @returns The timer clock as microseconds.
1643 * @param pTimer Timer handle as returned by one of the create functions.
1644 */
1645VMMDECL(uint64_t) TMTimerGetMicro(PTMTIMER pTimer)
1646{
1647 return TMTimerToMicro(pTimer, TMTimerGet(pTimer));
1648}
1649
1650
1651/**
1652 * Get the current clock time as milliseconds.
1653 *
1654 * @returns The timer clock as milliseconds.
1655 * @param pTimer Timer handle as returned by one of the create functions.
1656 */
1657VMMDECL(uint64_t) TMTimerGetMilli(PTMTIMER pTimer)
1658{
1659 return TMTimerToMilli(pTimer, TMTimerGet(pTimer));
1660}
1661
1662
1663/**
1664 * Converts the specified timer clock time to nanoseconds.
1665 *
1666 * @returns nanoseconds.
1667 * @param pTimer Timer handle as returned by one of the create functions.
1668 * @param u64Ticks The clock ticks.
1669 * @remark There could be rounding errors here. We just do a simple integer divide
1670 * without any adjustments.
1671 */
1672VMMDECL(uint64_t) TMTimerToNano(PTMTIMER pTimer, uint64_t u64Ticks)
1673{
1674 switch (pTimer->enmClock)
1675 {
1676 case TMCLOCK_VIRTUAL:
1677 case TMCLOCK_VIRTUAL_SYNC:
1678 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1679 return u64Ticks;
1680
1681 case TMCLOCK_REAL:
1682 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1683 return u64Ticks * 1000000;
1684
1685 default:
1686 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1687 return 0;
1688 }
1689}
1690
1691
1692/**
1693 * Converts the specified timer clock time to microseconds.
1694 *
1695 * @returns microseconds.
1696 * @param pTimer Timer handle as returned by one of the create functions.
1697 * @param u64Ticks The clock ticks.
1698 * @remark There could be rounding errors here. We just do a simple integer divide
1699 * without any adjustments.
1700 */
1701VMMDECL(uint64_t) TMTimerToMicro(PTMTIMER pTimer, uint64_t u64Ticks)
1702{
1703 switch (pTimer->enmClock)
1704 {
1705 case TMCLOCK_VIRTUAL:
1706 case TMCLOCK_VIRTUAL_SYNC:
1707 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1708 return u64Ticks / 1000;
1709
1710 case TMCLOCK_REAL:
1711 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1712 return u64Ticks * 1000;
1713
1714 default:
1715 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1716 return 0;
1717 }
1718}
1719
1720
1721/**
1722 * Converts the specified timer clock time to milliseconds.
1723 *
1724 * @returns milliseconds.
1725 * @param pTimer Timer handle as returned by one of the create functions.
1726 * @param u64Ticks The clock ticks.
1727 * @remark There could be rounding errors here. We just do a simple integer divide
1728 * without any adjustments.
1729 */
1730VMMDECL(uint64_t) TMTimerToMilli(PTMTIMER pTimer, uint64_t u64Ticks)
1731{
1732 switch (pTimer->enmClock)
1733 {
1734 case TMCLOCK_VIRTUAL:
1735 case TMCLOCK_VIRTUAL_SYNC:
1736 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1737 return u64Ticks / 1000000;
1738
1739 case TMCLOCK_REAL:
1740 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1741 return u64Ticks;
1742
1743 default:
1744 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1745 return 0;
1746 }
1747}
1748
1749
1750/**
1751 * Converts the specified nanosecond timestamp to timer clock ticks.
1752 *
1753 * @returns timer clock ticks.
1754 * @param pTimer Timer handle as returned by one of the create functions.
1755 * @param u64NanoTS The nanosecond value ticks to convert.
1756 * @remark There could be rounding and overflow errors here.
1757 */
1758VMMDECL(uint64_t) TMTimerFromNano(PTMTIMER pTimer, uint64_t u64NanoTS)
1759{
1760 switch (pTimer->enmClock)
1761 {
1762 case TMCLOCK_VIRTUAL:
1763 case TMCLOCK_VIRTUAL_SYNC:
1764 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1765 return u64NanoTS;
1766
1767 case TMCLOCK_REAL:
1768 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1769 return u64NanoTS / 1000000;
1770
1771 default:
1772 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1773 return 0;
1774 }
1775}
1776
1777
1778/**
1779 * Converts the specified microsecond timestamp to timer clock ticks.
1780 *
1781 * @returns timer clock ticks.
1782 * @param pTimer Timer handle as returned by one of the create functions.
1783 * @param u64MicroTS The microsecond value ticks to convert.
1784 * @remark There could be rounding and overflow errors here.
1785 */
1786VMMDECL(uint64_t) TMTimerFromMicro(PTMTIMER pTimer, uint64_t u64MicroTS)
1787{
1788 switch (pTimer->enmClock)
1789 {
1790 case TMCLOCK_VIRTUAL:
1791 case TMCLOCK_VIRTUAL_SYNC:
1792 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1793 return u64MicroTS * 1000;
1794
1795 case TMCLOCK_REAL:
1796 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1797 return u64MicroTS / 1000;
1798
1799 default:
1800 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1801 return 0;
1802 }
1803}
1804
1805
1806/**
1807 * Converts the specified millisecond timestamp to timer clock ticks.
1808 *
1809 * @returns timer clock ticks.
1810 * @param pTimer Timer handle as returned by one of the create functions.
1811 * @param u64MilliTS The millisecond value ticks to convert.
1812 * @remark There could be rounding and overflow errors here.
1813 */
1814VMMDECL(uint64_t) TMTimerFromMilli(PTMTIMER pTimer, uint64_t u64MilliTS)
1815{
1816 switch (pTimer->enmClock)
1817 {
1818 case TMCLOCK_VIRTUAL:
1819 case TMCLOCK_VIRTUAL_SYNC:
1820 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1821 return u64MilliTS * 1000000;
1822
1823 case TMCLOCK_REAL:
1824 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1825 return u64MilliTS;
1826
1827 default:
1828 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1829 return 0;
1830 }
1831}
1832
1833
1834/**
1835 * Get the expire time of the timer.
1836 * Only valid for active timers.
1837 *
1838 * @returns Expire time of the timer.
1839 * @param pTimer Timer handle as returned by one of the create functions.
1840 */
1841VMMDECL(uint64_t) TMTimerGetExpire(PTMTIMER pTimer)
1842{
1843 TMTIMER_ASSERT_CRITSECT(pTimer);
1844 int cRetries = 1000;
1845 do
1846 {
1847 TMTIMERSTATE enmState = pTimer->enmState;
1848 switch (enmState)
1849 {
1850 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1851 case TMTIMERSTATE_EXPIRED_DELIVER:
1852 case TMTIMERSTATE_STOPPED:
1853 case TMTIMERSTATE_PENDING_STOP:
1854 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1855 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1856 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1857 return ~(uint64_t)0;
1858
1859 case TMTIMERSTATE_ACTIVE:
1860 case TMTIMERSTATE_PENDING_RESCHEDULE:
1861 case TMTIMERSTATE_PENDING_SCHEDULE:
1862 Log2(("TMTimerGetExpire: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1863 pTimer->u64Expire, pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1864 return pTimer->u64Expire;
1865
1866 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1867 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1868#ifdef IN_RING3
1869 if (!RTThreadYield())
1870 RTThreadSleep(1);
1871#endif
1872 break;
1873
1874 /*
1875 * Invalid states.
1876 */
1877 case TMTIMERSTATE_DESTROY:
1878 case TMTIMERSTATE_FREE:
1879 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1880 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1881 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1882 return ~(uint64_t)0;
1883 default:
1884 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1885 return ~(uint64_t)0;
1886 }
1887 } while (cRetries-- > 0);
1888
1889 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1890 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1891 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1892 return ~(uint64_t)0;
1893}
1894
1895
1896/**
1897 * Checks if a timer is active or not.
1898 *
1899 * @returns True if active.
1900 * @returns False if not active.
1901 * @param pTimer Timer handle as returned by one of the create functions.
1902 */
1903VMMDECL(bool) TMTimerIsActive(PTMTIMER pTimer)
1904{
1905 TMTIMERSTATE enmState = pTimer->enmState;
1906 switch (enmState)
1907 {
1908 case TMTIMERSTATE_STOPPED:
1909 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1910 case TMTIMERSTATE_EXPIRED_DELIVER:
1911 case TMTIMERSTATE_PENDING_STOP:
1912 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1913 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1914 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1915 return false;
1916
1917 case TMTIMERSTATE_ACTIVE:
1918 case TMTIMERSTATE_PENDING_RESCHEDULE:
1919 case TMTIMERSTATE_PENDING_SCHEDULE:
1920 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1921 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1922 Log2(("TMTimerIsActive: returns true (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1923 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1924 return true;
1925
1926 /*
1927 * Invalid states.
1928 */
1929 case TMTIMERSTATE_DESTROY:
1930 case TMTIMERSTATE_FREE:
1931 AssertMsgFailed(("Invalid timer state %s (%s)\n", tmTimerState(enmState), R3STRING(pTimer->pszDesc)));
1932 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1933 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1934 return false;
1935 default:
1936 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1937 return false;
1938 }
1939}
1940
1941
1942/**
1943 * Convert state to string.
1944 *
1945 * @returns Readonly status name.
1946 * @param enmState State.
1947 */
1948const char *tmTimerState(TMTIMERSTATE enmState)
1949{
1950 switch (enmState)
1951 {
1952#define CASE(num, state) \
1953 case TMTIMERSTATE_##state: \
1954 AssertCompile(TMTIMERSTATE_##state == (num)); \
1955 return #num "-" #state
1956 CASE( 1,STOPPED);
1957 CASE( 2,ACTIVE);
1958 CASE( 3,EXPIRED_GET_UNLINK);
1959 CASE( 4,EXPIRED_DELIVER);
1960 CASE( 5,PENDING_STOP);
1961 CASE( 6,PENDING_STOP_SCHEDULE);
1962 CASE( 7,PENDING_SCHEDULE_SET_EXPIRE);
1963 CASE( 8,PENDING_SCHEDULE);
1964 CASE( 9,PENDING_RESCHEDULE_SET_EXPIRE);
1965 CASE(10,PENDING_RESCHEDULE);
1966 CASE(11,DESTROY);
1967 CASE(12,FREE);
1968 default:
1969 AssertMsgFailed(("Invalid state enmState=%d\n", enmState));
1970 return "Invalid state!";
1971#undef CASE
1972 }
1973}
1974
1975
1976/**
1977 * Schedules the given timer on the given queue.
1978 *
1979 * @param pQueue The timer queue.
1980 * @param pTimer The timer that needs scheduling.
1981 *
1982 * @remarks Called while owning the lock.
1983 */
1984DECLINLINE(void) tmTimerQueueScheduleOne(PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
1985{
1986 /*
1987 * Processing.
1988 */
1989 unsigned cRetries = 2;
1990 do
1991 {
1992 TMTIMERSTATE enmState = pTimer->enmState;
1993 switch (enmState)
1994 {
1995 /*
1996 * Reschedule timer (in the active list).
1997 */
1998 case TMTIMERSTATE_PENDING_RESCHEDULE:
1999 {
2000 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE, TMTIMERSTATE_PENDING_RESCHEDULE)))
2001 break; /* retry */
2002
2003 const PTMTIMER pPrev = TMTIMER_GET_PREV(pTimer);
2004 const PTMTIMER pNext = TMTIMER_GET_NEXT(pTimer);
2005 if (pPrev)
2006 TMTIMER_SET_NEXT(pPrev, pNext);
2007 else
2008 {
2009 TMTIMER_SET_HEAD(pQueue, pNext);
2010 pQueue->u64Expire = pNext ? pNext->u64Expire : INT64_MAX;
2011 }
2012 if (pNext)
2013 TMTIMER_SET_PREV(pNext, pPrev);
2014 pTimer->offNext = 0;
2015 pTimer->offPrev = 0;
2016 /* fall thru */
2017 }
2018
2019 /*
2020 * Schedule timer (insert into the active list).
2021 */
2022 case TMTIMERSTATE_PENDING_SCHEDULE:
2023 {
2024 Assert(!pTimer->offNext); Assert(!pTimer->offPrev);
2025 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, TMTIMERSTATE_PENDING_SCHEDULE)))
2026 break; /* retry */
2027
2028 PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue);
2029 if (pCur)
2030 {
2031 const uint64_t u64Expire = pTimer->u64Expire;
2032 for (;; pCur = TMTIMER_GET_NEXT(pCur))
2033 {
2034 if (pCur->u64Expire > u64Expire)
2035 {
2036 const PTMTIMER pPrev = TMTIMER_GET_PREV(pCur);
2037 TMTIMER_SET_NEXT(pTimer, pCur);
2038 TMTIMER_SET_PREV(pTimer, pPrev);
2039 if (pPrev)
2040 TMTIMER_SET_NEXT(pPrev, pTimer);
2041 else
2042 {
2043 TMTIMER_SET_HEAD(pQueue, pTimer);
2044 pQueue->u64Expire = u64Expire;
2045 }
2046 TMTIMER_SET_PREV(pCur, pTimer);
2047 return;
2048 }
2049 if (!pCur->offNext)
2050 {
2051 TMTIMER_SET_NEXT(pCur, pTimer);
2052 TMTIMER_SET_PREV(pTimer, pCur);
2053 return;
2054 }
2055 }
2056 }
2057 else
2058 {
2059 TMTIMER_SET_HEAD(pQueue, pTimer);
2060 pQueue->u64Expire = pTimer->u64Expire;
2061 }
2062 return;
2063 }
2064
2065 /*
2066 * Stop the timer in active list.
2067 */
2068 case TMTIMERSTATE_PENDING_STOP:
2069 {
2070 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, TMTIMERSTATE_PENDING_STOP)))
2071 break; /* retry */
2072
2073 const PTMTIMER pPrev = TMTIMER_GET_PREV(pTimer);
2074 const PTMTIMER pNext = TMTIMER_GET_NEXT(pTimer);
2075 if (pPrev)
2076 TMTIMER_SET_NEXT(pPrev, pNext);
2077 else
2078 {
2079 TMTIMER_SET_HEAD(pQueue, pNext);
2080 pQueue->u64Expire = pNext ? pNext->u64Expire : INT64_MAX;
2081 }
2082 if (pNext)
2083 TMTIMER_SET_PREV(pNext, pPrev);
2084 pTimer->offNext = 0;
2085 pTimer->offPrev = 0;
2086 /* fall thru */
2087 }
2088
2089 /*
2090 * Stop the timer (not on the active list).
2091 */
2092 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2093 Assert(!pTimer->offNext); Assert(!pTimer->offPrev);
2094 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_STOPPED, TMTIMERSTATE_PENDING_STOP_SCHEDULE)))
2095 break;
2096 return;
2097
2098 /*
2099 * The timer is pending destruction by TMR3TimerDestroy, our caller.
2100 * Nothing to do here.
2101 */
2102 case TMTIMERSTATE_DESTROY:
2103 break;
2104
2105 /*
2106 * Postpone these until they get into the right state.
2107 */
2108 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2109 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2110 tmTimerLink(pQueue, pTimer);
2111 STAM_COUNTER_INC(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatPostponed));
2112 return;
2113
2114 /*
2115 * None of these can be in the schedule.
2116 */
2117 case TMTIMERSTATE_FREE:
2118 case TMTIMERSTATE_STOPPED:
2119 case TMTIMERSTATE_ACTIVE:
2120 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2121 case TMTIMERSTATE_EXPIRED_DELIVER:
2122 default:
2123 AssertMsgFailed(("Timer (%p) in the scheduling list has an invalid state %s (%d)!",
2124 pTimer, tmTimerState(pTimer->enmState), pTimer->enmState));
2125 return;
2126 }
2127 } while (cRetries-- > 0);
2128}
2129
2130
2131/**
2132 * Schedules the specified timer queue.
2133 *
2134 * @param pVM The VM to run the timers for.
2135 * @param pQueue The queue to schedule.
2136 *
2137 * @remarks Called while owning the lock.
2138 */
2139void tmTimerQueueSchedule(PVM pVM, PTMTIMERQUEUE pQueue)
2140{
2141 TM_ASSERT_LOCK(pVM);
2142
2143 /*
2144 * Dequeue the scheduling list and iterate it.
2145 */
2146 int32_t offNext = ASMAtomicXchgS32(&pQueue->offSchedule, 0);
2147 Log2(("tmTimerQueueSchedule: pQueue=%p:{.enmClock=%d, offNext=%RI32, .u64Expired=%'RU64}\n", pQueue, pQueue->enmClock, offNext, pQueue->u64Expire));
2148 if (!offNext)
2149 return;
2150 PTMTIMER pNext = (PTMTIMER)((intptr_t)pQueue + offNext);
2151 while (pNext)
2152 {
2153 /*
2154 * Unlink the head timer and find the next one.
2155 */
2156 PTMTIMER pTimer = pNext;
2157 pNext = pNext->offScheduleNext ? (PTMTIMER)((intptr_t)pNext + pNext->offScheduleNext) : NULL;
2158 pTimer->offScheduleNext = 0;
2159
2160 /*
2161 * Do the scheduling.
2162 */
2163 Log2(("tmTimerQueueSchedule: %p:{.enmState=%s, .enmClock=%d, .enmType=%d, .pszDesc=%s}\n",
2164 pTimer, tmTimerState(pTimer->enmState), pTimer->enmClock, pTimer->enmType, R3STRING(pTimer->pszDesc)));
2165 tmTimerQueueScheduleOne(pQueue, pTimer);
2166 Log2(("tmTimerQueueSchedule: %p: new %s\n", pTimer, tmTimerState(pTimer->enmState)));
2167 } /* foreach timer in current schedule batch. */
2168 Log2(("tmTimerQueueSchedule: u64Expired=%'RU64\n", pQueue->u64Expire));
2169}
2170
2171
2172#ifdef VBOX_STRICT
2173/**
2174 * Checks that the timer queues are sane.
2175 *
2176 * @param pVM VM handle.
2177 *
2178 * @remarks Called while owning the lock.
2179 */
2180void tmTimerQueuesSanityChecks(PVM pVM, const char *pszWhere)
2181{
2182 TM_ASSERT_LOCK(pVM);
2183
2184 /*
2185 * Check the linking of the active lists.
2186 */
2187 for (int i = 0; i < TMCLOCK_MAX; i++)
2188 {
2189 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[i];
2190 Assert((int)pQueue->enmClock == i);
2191 PTMTIMER pPrev = NULL;
2192 for (PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue); pCur; pPrev = pCur, pCur = TMTIMER_GET_NEXT(pCur))
2193 {
2194 AssertMsg((int)pCur->enmClock == i, ("%s: %d != %d\n", pszWhere, pCur->enmClock, i));
2195 AssertMsg(TMTIMER_GET_PREV(pCur) == pPrev, ("%s: %p != %p\n", pszWhere, TMTIMER_GET_PREV(pCur), pPrev));
2196 TMTIMERSTATE enmState = pCur->enmState;
2197 switch (enmState)
2198 {
2199 case TMTIMERSTATE_ACTIVE:
2200 AssertMsg( !pCur->offScheduleNext
2201 || pCur->enmState != TMTIMERSTATE_ACTIVE,
2202 ("%s: %RI32\n", pszWhere, pCur->offScheduleNext));
2203 break;
2204 case TMTIMERSTATE_PENDING_STOP:
2205 case TMTIMERSTATE_PENDING_RESCHEDULE:
2206 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2207 break;
2208 default:
2209 AssertMsgFailed(("%s: Invalid state enmState=%d %s\n", pszWhere, enmState, tmTimerState(enmState)));
2210 break;
2211 }
2212 }
2213 }
2214
2215
2216# ifdef IN_RING3
2217 /*
2218 * Do the big list and check that active timers all are in the active lists.
2219 */
2220 PTMTIMERR3 pPrev = NULL;
2221 for (PTMTIMERR3 pCur = pVM->tm.s.pCreated; pCur; pPrev = pCur, pCur = pCur->pBigNext)
2222 {
2223 Assert(pCur->pBigPrev == pPrev);
2224 Assert((unsigned)pCur->enmClock < (unsigned)TMCLOCK_MAX);
2225
2226 TMTIMERSTATE enmState = pCur->enmState;
2227 switch (enmState)
2228 {
2229 case TMTIMERSTATE_ACTIVE:
2230 case TMTIMERSTATE_PENDING_STOP:
2231 case TMTIMERSTATE_PENDING_RESCHEDULE:
2232 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2233 {
2234 PTMTIMERR3 pCurAct = TMTIMER_GET_HEAD(&pVM->tm.s.CTX_SUFF(paTimerQueues)[pCur->enmClock]);
2235 Assert(pCur->offPrev || pCur == pCurAct);
2236 while (pCurAct && pCurAct != pCur)
2237 pCurAct = TMTIMER_GET_NEXT(pCurAct);
2238 Assert(pCurAct == pCur);
2239 break;
2240 }
2241
2242 case TMTIMERSTATE_PENDING_SCHEDULE:
2243 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2244 case TMTIMERSTATE_STOPPED:
2245 case TMTIMERSTATE_EXPIRED_DELIVER:
2246 {
2247 Assert(!pCur->offNext);
2248 Assert(!pCur->offPrev);
2249 for (PTMTIMERR3 pCurAct = TMTIMER_GET_HEAD(&pVM->tm.s.CTX_SUFF(paTimerQueues)[pCur->enmClock]);
2250 pCurAct;
2251 pCurAct = TMTIMER_GET_NEXT(pCurAct))
2252 {
2253 Assert(pCurAct != pCur);
2254 Assert(TMTIMER_GET_NEXT(pCurAct) != pCur);
2255 Assert(TMTIMER_GET_PREV(pCurAct) != pCur);
2256 }
2257 break;
2258 }
2259
2260 /* ignore */
2261 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2262 break;
2263
2264 /* shouldn't get here! */
2265 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2266 case TMTIMERSTATE_DESTROY:
2267 default:
2268 AssertMsgFailed(("Invalid state enmState=%d %s\n", enmState, tmTimerState(enmState)));
2269 break;
2270 }
2271 }
2272# endif /* IN_RING3 */
2273}
2274#endif /* !VBOX_STRICT */
2275
2276
2277/**
2278 * Gets the current warp drive percent.
2279 *
2280 * @returns The warp drive percent.
2281 * @param pVM The VM handle.
2282 */
2283VMMDECL(uint32_t) TMGetWarpDrive(PVM pVM)
2284{
2285 return pVM->tm.s.u32VirtualWarpDrivePercentage;
2286}
2287
2288
2289/**
2290 * Gets the highest frequency hint for all the important timers.
2291 *
2292 * @returns The highest frequency. 0 if no timers care.
2293 * @param pVM The VM handle.
2294 */
2295static uint32_t tmGetFrequencyHint(PVM pVM)
2296{
2297 /*
2298 * Query the value, recalculate it if necessary.
2299 *
2300 * The "right" highest frequency value isn't so important that we'll block
2301 * waiting on the timer semaphore.
2302 */
2303 uint32_t uMaxHzHint = ASMAtomicUoReadU32(&pVM->tm.s.uMaxHzHint);
2304 if (RT_UNLIKELY(ASMAtomicReadBool(&pVM->tm.s.fHzHintNeedsUpdating)))
2305 {
2306 if (RT_SUCCESS(tmTimerTryLock(pVM)))
2307 {
2308 ASMAtomicWriteBool(&pVM->tm.s.fHzHintNeedsUpdating, false);
2309
2310 /*
2311 * Loop over the timers associated with each clock.
2312 */
2313 uMaxHzHint = 0;
2314 for (int i = 0; i < TMCLOCK_MAX; i++)
2315 {
2316 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[i];
2317 for (PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue); pCur; pCur = TMTIMER_GET_NEXT(pCur))
2318 {
2319 uint32_t uHzHint = ASMAtomicUoReadU32(&pCur->uHzHint);
2320 if (uHzHint > uMaxHzHint)
2321 {
2322 switch (pCur->enmState)
2323 {
2324 case TMTIMERSTATE_ACTIVE:
2325 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2326 case TMTIMERSTATE_EXPIRED_DELIVER:
2327 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2328 case TMTIMERSTATE_PENDING_SCHEDULE:
2329 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2330 case TMTIMERSTATE_PENDING_RESCHEDULE:
2331 uMaxHzHint = uHzHint;
2332 break;
2333
2334 case TMTIMERSTATE_STOPPED:
2335 case TMTIMERSTATE_PENDING_STOP:
2336 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2337 case TMTIMERSTATE_DESTROY:
2338 case TMTIMERSTATE_FREE:
2339 break;
2340 /* no default, want gcc warnings when adding more states. */
2341 }
2342 }
2343 }
2344 }
2345 ASMAtomicWriteU32(&pVM->tm.s.uMaxHzHint, uMaxHzHint);
2346 Log(("tmGetFrequencyHint: New value %u Hz\n", uMaxHzHint));
2347 tmTimerUnlock(pVM);
2348 }
2349 }
2350 return uMaxHzHint;
2351}
2352
2353
2354/**
2355 * Calculates a host timer frequency that would be suitable for the current
2356 * timer load.
2357 *
2358 * This will take the highest timer frequency, adjust for catch-up and warp
2359 * driver, and finally add a little fudge factor. The caller (VMM) will use
2360 * the result to adjust the per-cpu preemption timer.
2361 *
2362 * @returns The highest frequency. 0 if no important timers around.
2363 * @param pVM The VM handle.
2364 * @param pVCpu The current CPU.
2365 */
2366VMM_INT_DECL(uint32_t) TMCalcHostTimerFrequency(PVM pVM, PVMCPU pVCpu)
2367{
2368 uint32_t uHz = tmGetFrequencyHint(pVM);
2369
2370 /* Catch up, we have to be more aggressive than the % indicates at the
2371 beginning of the effort. */
2372 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
2373 {
2374 uint32_t u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
2375 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
2376 {
2377 if (u32Pct <= 100)
2378 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp100 / 100;
2379 else if (u32Pct <= 200)
2380 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp200 / 100;
2381 else if (u32Pct <= 400)
2382 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp400 / 100;
2383 uHz *= u32Pct + 100;
2384 uHz /= 100;
2385 }
2386 }
2387
2388 /* Warp drive. */
2389 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualWarpDrive))
2390 {
2391 uint32_t u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualWarpDrivePercentage);
2392 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualWarpDrive))
2393 {
2394 uHz *= u32Pct;
2395 uHz /= 100;
2396 }
2397 }
2398
2399 /* Fudge factor. */
2400 if (pVCpu->idCpu == pVM->tm.s.idTimerCpu)
2401 uHz *= pVM->tm.s.cPctHostHzFudgeFactorTimerCpu;
2402 else
2403 uHz *= pVM->tm.s.cPctHostHzFudgeFactorOtherCpu;
2404 uHz /= 100;
2405
2406 /* Make sure it isn't too high. */
2407 if (uHz > pVM->tm.s.cHostHzMax)
2408 uHz = pVM->tm.s.cHostHzMax;
2409
2410 return uHz;
2411}
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette