VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/TMAll.cpp@ 32572

Last change on this file since 32572 was 32572, checked in by vboxsync, 14 years ago

VMM,SUPDrv,IPRT: More changes for related to the priodic preemption timer. (still disabled)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 81.8 KB
Line 
1/* $Id: TMAll.cpp 32572 2010-09-16 16:18:12Z vboxsync $ */
2/** @file
3 * TM - Timeout Manager, all contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_TM
23#include <VBox/tm.h>
24#include <VBox/mm.h>
25#ifdef IN_RING3
26# include <VBox/rem.h>
27#endif
28#include "TMInternal.h"
29#include <VBox/vm.h>
30
31#include <VBox/param.h>
32#include <VBox/err.h>
33#include <VBox/log.h>
34#include <VBox/sup.h>
35#include <iprt/time.h>
36#include <iprt/assert.h>
37#include <iprt/asm.h>
38#include <iprt/asm-math.h>
39#ifdef IN_RING3
40# include <iprt/thread.h>
41#endif
42
43
44/*******************************************************************************
45* Defined Constants And Macros *
46*******************************************************************************/
47/** @def TMTIMER_ASSERT_CRITSECT
48 * Checks that the caller owns the critical section if one is associated with
49 * the timer. */
50#ifdef VBOX_STRICT
51# define TMTIMER_ASSERT_CRITSECT(pTimer) \
52 do { \
53 if ((pTimer)->pCritSect) \
54 { \
55 PPDMCRITSECT pCritSect = (PPDMCRITSECT)MMHyperR3ToCC((pTimer)->CTX_SUFF(pVM), (pTimer)->pCritSect); \
56 AssertMsg(pCritSect && PDMCritSectIsOwner(pCritSect), \
57 ("pTimer=%p (%s) pCritSect=%p\n", pTimer, R3STRING(pTimer->pszDesc), (pTimer)->pCritSect)); \
58 } \
59 } while (0)
60#else
61# define TMTIMER_ASSERT_CRITSECT(pTimer) do { } while (0)
62#endif
63
64
65#ifndef tmTimerLock
66
67/**
68 * Try take the timer lock, wait in ring-3 return VERR_SEM_BUSY in R0/RC.
69 *
70 * @retval VINF_SUCCESS on success (always in ring-3).
71 * @retval VERR_SEM_BUSY in RC and R0 if the semaphore is busy.
72 *
73 * @param pVM The VM handle.
74 *
75 * @thread EMTs for the time being.
76 */
77int tmTimerLock(PVM pVM)
78{
79 VM_ASSERT_EMT(pVM);
80 int rc = PDMCritSectEnter(&pVM->tm.s.TimerCritSect, VERR_SEM_BUSY);
81 return rc;
82}
83
84
85/**
86 * Try take the timer lock, no waiting.
87 *
88 * @retval VINF_SUCCESS on success.
89 * @retval VERR_SEM_BUSY if busy.
90 *
91 * @param pVM The VM handle.
92 */
93int tmTimerTryLock(PVM pVM)
94{
95 int rc = PDMCritSectTryEnter(&pVM->tm.s.TimerCritSect);
96 return rc;
97}
98
99
100/**
101 * Release the EMT/TM lock.
102 *
103 * @param pVM The VM handle.
104 */
105void tmTimerUnlock(PVM pVM)
106{
107 PDMCritSectLeave(&pVM->tm.s.TimerCritSect);
108}
109
110
111/**
112 * Try take the VirtualSync lock, wait in ring-3 return VERR_SEM_BUSY in R0/RC.
113 *
114 * @retval VINF_SUCCESS on success (always in ring-3).
115 * @retval VERR_SEM_BUSY in RC and R0 if the semaphore is busy.
116 *
117 * @param pVM The VM handle.
118 */
119int tmVirtualSyncLock(PVM pVM)
120{
121 VM_ASSERT_EMT(pVM);
122 int rc = PDMCritSectEnter(&pVM->tm.s.VirtualSyncLock, VERR_SEM_BUSY);
123 return rc;
124}
125
126
127/**
128 * Try take the VirtualSync lock, no waiting.
129 *
130 * @retval VINF_SUCCESS on success.
131 * @retval VERR_SEM_BUSY if busy.
132 *
133 * @param pVM The VM handle.
134 */
135int tmVirtualSyncTryLock(PVM pVM)
136{
137 VM_ASSERT_EMT(pVM);
138 int rc = PDMCritSectTryEnter(&pVM->tm.s.VirtualSyncLock);
139 return rc;
140}
141
142
143/**
144 * Release the VirtualSync lock.
145 *
146 * @param pVM The VM handle.
147 */
148void tmVirtualSyncUnlock(PVM pVM)
149{
150 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
151}
152
153#endif /* ! macros */
154
155/**
156 * Notification that execution is about to start.
157 *
158 * This call must always be paired with a TMNotifyEndOfExecution call.
159 *
160 * The function may, depending on the configuration, resume the TSC and future
161 * clocks that only ticks when we're executing guest code.
162 *
163 * @param pVCpu The VMCPU to operate on.
164 */
165VMMDECL(void) TMNotifyStartOfExecution(PVMCPU pVCpu)
166{
167 PVM pVM = pVCpu->CTX_SUFF(pVM);
168
169#ifndef VBOX_WITHOUT_NS_ACCOUNTING
170 pVCpu->tm.s.u64NsTsStartExecuting = RTTimeNanoTS();
171#endif
172 if (pVM->tm.s.fTSCTiedToExecution)
173 tmCpuTickResume(pVM, pVCpu);
174}
175
176
177/**
178 * Notification that execution is about to start.
179 *
180 * This call must always be paired with a TMNotifyStartOfExecution call.
181 *
182 * The function may, depending on the configuration, suspend the TSC and future
183 * clocks that only ticks when we're executing guest code.
184 *
185 * @param pVCpu The VMCPU to operate on.
186 */
187VMMDECL(void) TMNotifyEndOfExecution(PVMCPU pVCpu)
188{
189 PVM pVM = pVCpu->CTX_SUFF(pVM);
190
191 if (pVM->tm.s.fTSCTiedToExecution)
192 tmCpuTickPause(pVM, pVCpu);
193
194#ifndef VBOX_WITHOUT_NS_ACCOUNTING
195 uint64_t const u64NsTs = RTTimeNanoTS();
196 uint64_t const cNsTotalNew = u64NsTs - pVCpu->tm.s.u64NsTsStartTotal;
197 uint64_t const cNsExecutingDelta = u64NsTs - pVCpu->tm.s.u64NsTsStartExecuting;
198 uint64_t const cNsExecutingNew = pVCpu->tm.s.cNsExecuting + cNsExecutingDelta;
199 uint64_t const cNsOtherNew = cNsTotalNew - cNsExecutingNew - pVCpu->tm.s.cNsHalted;
200
201# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
202 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecuting, cNsExecutingDelta);
203 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsTotal, cNsTotalNew - pVCpu->tm.s.cNsTotal);
204 int64_t const cNsOtherNewDelta = cNsOtherNew - pVCpu->tm.s.cNsOther;
205 if (cNsOtherNewDelta > 0)
206 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsOther, cNsOtherNewDelta); /* (the period before execution) */
207# endif
208
209 uint32_t uGen = ASMAtomicIncU32(&pVCpu->tm.s.uTimesGen); Assert(uGen & 1);
210 pVCpu->tm.s.cNsExecuting = cNsExecutingNew;
211 pVCpu->tm.s.cNsTotal = cNsTotalNew;
212 pVCpu->tm.s.cNsOther = cNsOtherNew;
213 pVCpu->tm.s.cPeriodsExecuting++;
214 ASMAtomicWriteU32(&pVCpu->tm.s.uTimesGen, (uGen | 1) + 1);
215#endif
216}
217
218
219/**
220 * Notification that the cpu is entering the halt state
221 *
222 * This call must always be paired with a TMNotifyEndOfExecution call.
223 *
224 * The function may, depending on the configuration, resume the TSC and future
225 * clocks that only ticks when we're halted.
226 *
227 * @param pVCpu The VMCPU to operate on.
228 */
229VMM_INT_DECL(void) TMNotifyStartOfHalt(PVMCPU pVCpu)
230{
231 PVM pVM = pVCpu->CTX_SUFF(pVM);
232
233#ifndef VBOX_WITHOUT_NS_ACCOUNTING
234 pVCpu->tm.s.u64NsTsStartHalting = RTTimeNanoTS();
235#endif
236
237 if ( pVM->tm.s.fTSCTiedToExecution
238 && !pVM->tm.s.fTSCNotTiedToHalt)
239 tmCpuTickResume(pVM, pVCpu);
240}
241
242
243/**
244 * Notification that the cpu is leaving the halt state
245 *
246 * This call must always be paired with a TMNotifyStartOfHalt call.
247 *
248 * The function may, depending on the configuration, suspend the TSC and future
249 * clocks that only ticks when we're halted.
250 *
251 * @param pVCpu The VMCPU to operate on.
252 */
253VMM_INT_DECL(void) TMNotifyEndOfHalt(PVMCPU pVCpu)
254{
255 PVM pVM = pVCpu->CTX_SUFF(pVM);
256
257 if ( pVM->tm.s.fTSCTiedToExecution
258 && !pVM->tm.s.fTSCNotTiedToHalt)
259 tmCpuTickPause(pVM, pVCpu);
260
261#ifndef VBOX_WITHOUT_NS_ACCOUNTING
262 uint64_t const u64NsTs = RTTimeNanoTS();
263 uint64_t const cNsTotalNew = u64NsTs - pVCpu->tm.s.u64NsTsStartTotal;
264 uint64_t const cNsHaltedDelta = u64NsTs - pVCpu->tm.s.u64NsTsStartHalting;
265 uint64_t const cNsHaltedNew = pVCpu->tm.s.cNsHalted + cNsHaltedDelta;
266 uint64_t const cNsOtherNew = cNsTotalNew - pVCpu->tm.s.cNsExecuting - cNsHaltedNew;
267
268# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
269 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsHalted, cNsHaltedDelta);
270 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsTotal, cNsTotalNew - pVCpu->tm.s.cNsTotal);
271 int64_t const cNsOtherNewDelta = cNsOtherNew - pVCpu->tm.s.cNsOther;
272 if (cNsOtherNewDelta > 0)
273 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsOther, cNsOtherNewDelta); /* (the period before halting) */
274# endif
275
276 uint32_t uGen = ASMAtomicIncU32(&pVCpu->tm.s.uTimesGen); Assert(uGen & 1);
277 pVCpu->tm.s.cNsHalted = cNsHaltedNew;
278 pVCpu->tm.s.cNsTotal = cNsTotalNew;
279 pVCpu->tm.s.cNsOther = cNsOtherNew;
280 pVCpu->tm.s.cPeriodsHalted++;
281 ASMAtomicWriteU32(&pVCpu->tm.s.uTimesGen, (uGen | 1) + 1);
282#endif
283}
284
285
286/**
287 * Raise the timer force action flag and notify the dedicated timer EMT.
288 *
289 * @param pVM The VM handle.
290 */
291DECLINLINE(void) tmScheduleNotify(PVM pVM)
292{
293 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
294 if (!VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
295 {
296 Log5(("TMAll(%u): FF: 0 -> 1\n", __LINE__));
297 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
298#ifdef IN_RING3
299 REMR3NotifyTimerPending(pVM, pVCpuDst);
300 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
301#endif
302 STAM_COUNTER_INC(&pVM->tm.s.StatScheduleSetFF);
303 }
304}
305
306
307/**
308 * Schedule the queue which was changed.
309 */
310DECLINLINE(void) tmSchedule(PTMTIMER pTimer)
311{
312 PVM pVM = pTimer->CTX_SUFF(pVM);
313 if ( VM_IS_EMT(pVM)
314 && RT_SUCCESS(tmTimerTryLock(pVM)))
315 {
316 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
317 Log3(("tmSchedule: tmTimerQueueSchedule\n"));
318 tmTimerQueueSchedule(pVM, &pVM->tm.s.CTX_SUFF(paTimerQueues)[pTimer->enmClock]);
319#ifdef VBOX_STRICT
320 tmTimerQueuesSanityChecks(pVM, "tmSchedule");
321#endif
322 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
323 tmTimerUnlock(pVM);
324 }
325 else
326 {
327 TMTIMERSTATE enmState = pTimer->enmState;
328 if (TMTIMERSTATE_IS_PENDING_SCHEDULING(enmState))
329 tmScheduleNotify(pVM);
330 }
331}
332
333
334/**
335 * Try change the state to enmStateNew from enmStateOld
336 * and link the timer into the scheduling queue.
337 *
338 * @returns Success indicator.
339 * @param pTimer Timer in question.
340 * @param enmStateNew The new timer state.
341 * @param enmStateOld The old timer state.
342 */
343DECLINLINE(bool) tmTimerTry(PTMTIMER pTimer, TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
344{
345 /*
346 * Attempt state change.
347 */
348 bool fRc;
349 TM_TRY_SET_STATE(pTimer, enmStateNew, enmStateOld, fRc);
350 return fRc;
351}
352
353
354/**
355 * Links the timer onto the scheduling queue.
356 *
357 * @param pQueue The timer queue the timer belongs to.
358 * @param pTimer The timer.
359 *
360 * @todo FIXME: Look into potential race with the thread running the queues
361 * and stuff.
362 */
363DECLINLINE(void) tmTimerLink(PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
364{
365 Assert(!pTimer->offScheduleNext);
366 const int32_t offHeadNew = (intptr_t)pTimer - (intptr_t)pQueue;
367 int32_t offHead;
368 do
369 {
370 offHead = pQueue->offSchedule;
371 if (offHead)
372 pTimer->offScheduleNext = ((intptr_t)pQueue + offHead) - (intptr_t)pTimer;
373 else
374 pTimer->offScheduleNext = 0;
375 } while (!ASMAtomicCmpXchgS32(&pQueue->offSchedule, offHeadNew, offHead));
376}
377
378
379/**
380 * Try change the state to enmStateNew from enmStateOld
381 * and link the timer into the scheduling queue.
382 *
383 * @returns Success indicator.
384 * @param pTimer Timer in question.
385 * @param enmStateNew The new timer state.
386 * @param enmStateOld The old timer state.
387 */
388DECLINLINE(bool) tmTimerTryWithLink(PTMTIMER pTimer, TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
389{
390 if (tmTimerTry(pTimer, enmStateNew, enmStateOld))
391 {
392 tmTimerLink(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF(paTimerQueues)[pTimer->enmClock], pTimer);
393 return true;
394 }
395 return false;
396}
397
398
399#ifdef VBOX_HIGH_RES_TIMERS_HACK
400
401/**
402 * Worker for tmTimerPollInternal that handles misses when the decidate timer
403 * EMT is polling.
404 *
405 * @returns See tmTimerPollInternal.
406 * @param pVM Pointer to the shared VM structure.
407 * @param u64Now Current virtual clock timestamp.
408 * @param u64Delta The delta to the next even in ticks of the
409 * virtual clock.
410 * @param pu64Delta Where to return the delta.
411 * @param pCounter The statistics counter to update.
412 */
413DECLINLINE(uint64_t) tmTimerPollReturnMiss(PVM pVM, uint64_t u64Now, uint64_t u64Delta, uint64_t *pu64Delta)
414{
415 Assert(!(u64Delta & RT_BIT_64(63)));
416
417 if (!pVM->tm.s.fVirtualWarpDrive)
418 {
419 *pu64Delta = u64Delta;
420 return u64Delta + u64Now + pVM->tm.s.u64VirtualOffset;
421 }
422
423 /*
424 * Warp drive adjustments - this is the reverse of what tmVirtualGetRaw is doing.
425 */
426 uint64_t const u64Start = pVM->tm.s.u64VirtualWarpDriveStart;
427 uint32_t const u32Pct = pVM->tm.s.u32VirtualWarpDrivePercentage;
428
429 uint64_t u64GipTime = u64Delta + u64Now + pVM->tm.s.u64VirtualOffset;
430 u64GipTime -= u64Start; /* the start is GIP time. */
431 if (u64GipTime >= u64Delta)
432 {
433 ASMMultU64ByU32DivByU32(u64GipTime, 100, u32Pct);
434 ASMMultU64ByU32DivByU32(u64Delta, 100, u32Pct);
435 }
436 else
437 {
438 u64Delta -= u64GipTime;
439 ASMMultU64ByU32DivByU32(u64GipTime, 100, u32Pct);
440 u64Delta += u64GipTime;
441 }
442 *pu64Delta = u64Delta;
443 u64GipTime += u64Start;
444 return u64GipTime;
445}
446
447
448/**
449 * Worker for tmTimerPollInternal dealing with returns on virtual CPUs other
450 * than the one dedicated to timer work.
451 *
452 * @returns See tmTimerPollInternal.
453 * @param pVM Pointer to the shared VM structure.
454 * @param u64Now Current virtual clock timestamp.
455 * @param pu64Delta Where to return the delta.
456 */
457DECL_FORCE_INLINE(uint64_t) tmTimerPollReturnOtherCpu(PVM pVM, uint64_t u64Now, uint64_t *pu64Delta)
458{
459 static const uint64_t s_u64OtherRet = 500000000; /* 500 ms for non-timer EMTs. */
460 *pu64Delta = s_u64OtherRet;
461 return u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet;
462}
463
464
465/**
466 * Worker for tmTimerPollInternal.
467 *
468 * @returns See tmTimerPollInternal.
469 * @param pVM Pointer to the shared VM structure.
470 * @param pVCpu Pointer to the shared VMCPU structure of the
471 * caller.
472 * @param pVCpuDst Pointer to the shared VMCPU structure of the
473 * dedicated timer EMT.
474 * @param u64Now Current virtual clock timestamp.
475 * @param pu64Delta Where to return the delta.
476 * @param pCounter The statistics counter to update.
477 */
478DECL_FORCE_INLINE(uint64_t) tmTimerPollReturnHit(PVM pVM, PVMCPU pVCpu, PVMCPU pVCpuDst, uint64_t u64Now,
479 uint64_t *pu64Delta, PSTAMCOUNTER pCounter)
480{
481 STAM_COUNTER_INC(pCounter);
482 if (pVCpuDst != pVCpu)
483 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
484 *pu64Delta = 0;
485 return 0;
486}
487
488/**
489 * Common worker for TMTimerPollGIP and TMTimerPoll.
490 *
491 * This function is called before FFs are checked in the inner execution EM loops.
492 *
493 * @returns The GIP timestamp of the next event.
494 * 0 if the next event has already expired.
495 *
496 * @param pVM Pointer to the shared VM structure.
497 * @param pVCpu Pointer to the shared VMCPU structure of the caller.
498 * @param pu64Delta Where to store the delta.
499 *
500 * @thread The emulation thread.
501 *
502 * @remarks GIP uses ns ticks.
503 */
504DECL_FORCE_INLINE(uint64_t) tmTimerPollInternal(PVM pVM, PVMCPU pVCpu, uint64_t *pu64Delta)
505{
506 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
507 const uint64_t u64Now = TMVirtualGetNoCheck(pVM);
508 STAM_COUNTER_INC(&pVM->tm.s.StatPoll);
509
510 /*
511 * Return straight away if the timer FF is already set ...
512 */
513 if (VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
514 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet);
515
516 /*
517 * ... or if timers are being run.
518 */
519 if (ASMAtomicReadBool(&pVM->tm.s.fRunningQueues))
520 {
521 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
522 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
523 }
524
525 /*
526 * Check for TMCLOCK_VIRTUAL expiration.
527 */
528 const uint64_t u64Expire1 = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire);
529 const int64_t i64Delta1 = u64Expire1 - u64Now;
530 if (i64Delta1 <= 0)
531 {
532 if (!VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
533 {
534 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
535 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
536#ifdef IN_RING3
537 REMR3NotifyTimerPending(pVM, pVCpuDst);
538#endif
539 }
540 LogFlow(("TMTimerPoll: expire1=%'RU64 <= now=%'RU64\n", u64Expire1, u64Now));
541 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtual);
542 }
543
544 /*
545 * Check for TMCLOCK_VIRTUAL_SYNC expiration.
546 * This isn't quite as stright forward if in a catch-up, not only do
547 * we have to adjust the 'now' but when have to adjust the delta as well.
548 */
549
550 /*
551 * Optimistic lockless approach.
552 */
553 uint64_t u64VirtualSyncNow;
554 uint64_t u64Expire2 = ASMAtomicUoReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
555 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
556 {
557 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
558 {
559 u64VirtualSyncNow = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
560 if (RT_LIKELY( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
561 && !ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
562 && u64VirtualSyncNow == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
563 && u64Expire2 == ASMAtomicUoReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)))
564 {
565 u64VirtualSyncNow = u64Now - u64VirtualSyncNow;
566 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
567 if (i64Delta2 > 0)
568 {
569 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
570 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
571
572 if (pVCpu == pVCpuDst)
573 return tmTimerPollReturnMiss(pVM, u64Now, RT_MIN(i64Delta1, i64Delta2), pu64Delta);
574 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
575 }
576
577 if ( !pVM->tm.s.fRunningQueues
578 && !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
579 {
580 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
581 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
582#ifdef IN_RING3
583 REMR3NotifyTimerPending(pVM, pVCpuDst);
584#endif
585 }
586
587 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
588 LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now));
589 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
590 }
591 }
592 }
593 else
594 {
595 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
596 LogFlow(("TMTimerPoll: stopped\n"));
597 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
598 }
599
600 /*
601 * Complicated lockless approach.
602 */
603 uint64_t off;
604 uint32_t u32Pct = 0;
605 bool fCatchUp;
606 int cOuterTries = 42;
607 for (;; cOuterTries--)
608 {
609 fCatchUp = ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp);
610 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
611 u64Expire2 = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
612 if (fCatchUp)
613 {
614 /* No changes allowed, try get a consistent set of parameters. */
615 uint64_t const u64Prev = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev);
616 uint64_t const offGivenUp = ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp);
617 u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
618 if ( ( u64Prev == ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev)
619 && offGivenUp == ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp)
620 && u32Pct == ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage)
621 && off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
622 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)
623 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
624 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
625 || cOuterTries <= 0)
626 {
627 uint64_t u64Delta = u64Now - u64Prev;
628 if (RT_LIKELY(!(u64Delta >> 32)))
629 {
630 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, u32Pct, 100);
631 if (off > u64Sub + offGivenUp)
632 off -= u64Sub;
633 else /* we've completely caught up. */
634 off = offGivenUp;
635 }
636 else
637 /* More than 4 seconds since last time (or negative), ignore it. */
638 Log(("TMVirtualGetSync: u64Delta=%RX64 (NoLock)\n", u64Delta));
639
640 /* Check that we're still running and in catch up. */
641 if ( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
642 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
643 break;
644 }
645 }
646 else if ( off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
647 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)
648 && !ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
649 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
650 break; /* Got an consistent offset */
651
652 /* Repeat the initial checks before iterating. */
653 if (VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
654 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet);
655 if (ASMAtomicUoReadBool(&pVM->tm.s.fRunningQueues))
656 {
657 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
658 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
659 }
660 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
661 {
662 LogFlow(("TMTimerPoll: stopped\n"));
663 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
664 }
665 if (cOuterTries <= 0)
666 break; /* that's enough */
667 }
668 if (cOuterTries <= 0)
669 STAM_COUNTER_INC(&pVM->tm.s.StatPollELoop);
670 u64VirtualSyncNow = u64Now - off;
671
672 /* Calc delta and see if we've got a virtual sync hit. */
673 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
674 if (i64Delta2 <= 0)
675 {
676 if ( !pVM->tm.s.fRunningQueues
677 && !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
678 {
679 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
680 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
681#ifdef IN_RING3
682 REMR3NotifyTimerPending(pVM, pVCpuDst);
683#endif
684 }
685 STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtualSync);
686 LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now));
687 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
688 }
689
690 /*
691 * Return the time left to the next event.
692 */
693 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
694 if (pVCpu == pVCpuDst)
695 {
696 if (fCatchUp)
697 i64Delta2 = ASMMultU64ByU32DivByU32(i64Delta2, 100, u32Pct + 100);
698 return tmTimerPollReturnMiss(pVM, u64Now, RT_MIN(i64Delta1, i64Delta2), pu64Delta);
699 }
700 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
701}
702
703
704/**
705 * Set FF if we've passed the next virtual event.
706 *
707 * This function is called before FFs are checked in the inner execution EM loops.
708 *
709 * @returns true if timers are pending, false if not.
710 *
711 * @param pVM Pointer to the shared VM structure.
712 * @param pVCpu Pointer to the shared VMCPU structure of the caller.
713 * @thread The emulation thread.
714 */
715VMMDECL(bool) TMTimerPollBool(PVM pVM, PVMCPU pVCpu)
716{
717 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
718 uint64_t off = 0;
719 tmTimerPollInternal(pVM, pVCpu, &off);
720 return off == 0;
721}
722
723
724/**
725 * Set FF if we've passed the next virtual event.
726 *
727 * This function is called before FFs are checked in the inner execution EM loops.
728 *
729 * @param pVM Pointer to the shared VM structure.
730 * @param pVCpu Pointer to the shared VMCPU structure of the caller.
731 * @thread The emulation thread.
732 */
733VMM_INT_DECL(void) TMTimerPollVoid(PVM pVM, PVMCPU pVCpu)
734{
735 uint64_t off;
736 tmTimerPollInternal(pVM, pVCpu, &off);
737}
738
739
740/**
741 * Set FF if we've passed the next virtual event.
742 *
743 * This function is called before FFs are checked in the inner execution EM loops.
744 *
745 * @returns The GIP timestamp of the next event.
746 * 0 if the next event has already expired.
747 * @param pVM Pointer to the shared VM structure.
748 * @param pVCpu Pointer to the shared VMCPU structure of the caller.
749 * @param pu64Delta Where to store the delta.
750 * @thread The emulation thread.
751 */
752VMM_INT_DECL(uint64_t) TMTimerPollGIP(PVM pVM, PVMCPU pVCpu, uint64_t *pu64Delta)
753{
754 return tmTimerPollInternal(pVM, pVCpu, pu64Delta);
755}
756
757#endif /* VBOX_HIGH_RES_TIMERS_HACK */
758
759/**
760 * Gets the host context ring-3 pointer of the timer.
761 *
762 * @returns HC R3 pointer.
763 * @param pTimer Timer handle as returned by one of the create functions.
764 */
765VMMDECL(PTMTIMERR3) TMTimerR3Ptr(PTMTIMER pTimer)
766{
767 return (PTMTIMERR3)MMHyperCCToR3(pTimer->CTX_SUFF(pVM), pTimer);
768}
769
770
771/**
772 * Gets the host context ring-0 pointer of the timer.
773 *
774 * @returns HC R0 pointer.
775 * @param pTimer Timer handle as returned by one of the create functions.
776 */
777VMMDECL(PTMTIMERR0) TMTimerR0Ptr(PTMTIMER pTimer)
778{
779 return (PTMTIMERR0)MMHyperCCToR0(pTimer->CTX_SUFF(pVM), pTimer);
780}
781
782
783/**
784 * Gets the RC pointer of the timer.
785 *
786 * @returns RC pointer.
787 * @param pTimer Timer handle as returned by one of the create functions.
788 */
789VMMDECL(PTMTIMERRC) TMTimerRCPtr(PTMTIMER pTimer)
790{
791 return (PTMTIMERRC)MMHyperCCToRC(pTimer->CTX_SUFF(pVM), pTimer);
792}
793
794
795/**
796 * Links a timer into the active list of a timer queue.
797 *
798 * The caller must have taken the TM semaphore before calling this function.
799 *
800 * @param pQueue The queue.
801 * @param pTimer The timer.
802 * @param u64Expire The timer expiration time.
803 */
804DECL_FORCE_INLINE(void) tmTimerActiveLink(PTMTIMERQUEUE pQueue, PTMTIMER pTimer, uint64_t u64Expire)
805{
806 PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue);
807 if (pCur)
808 {
809 for (;; pCur = TMTIMER_GET_NEXT(pCur))
810 {
811 if (pCur->u64Expire > u64Expire)
812 {
813 const PTMTIMER pPrev = TMTIMER_GET_PREV(pCur);
814 TMTIMER_SET_NEXT(pTimer, pCur);
815 TMTIMER_SET_PREV(pTimer, pPrev);
816 if (pPrev)
817 TMTIMER_SET_NEXT(pPrev, pTimer);
818 else
819 {
820 TMTIMER_SET_HEAD(pQueue, pTimer);
821 pQueue->u64Expire = u64Expire;
822 }
823 TMTIMER_SET_PREV(pCur, pTimer);
824 return;
825 }
826 if (!pCur->offNext)
827 {
828 TMTIMER_SET_NEXT(pCur, pTimer);
829 TMTIMER_SET_PREV(pTimer, pCur);
830 return;
831 }
832 }
833 }
834 else
835 {
836 TMTIMER_SET_HEAD(pQueue, pTimer);
837 pQueue->u64Expire = u64Expire;
838 }
839}
840
841
842/**
843 * Optimized TMTimerSet code path for starting an inactive timer.
844 *
845 * @returns VBox status code.
846 *
847 * @param pVM The VM handle.
848 * @param pTimer The timer handle.
849 * @param u64Expire The new expire time.
850 */
851static int tmTimerSetOptimizedStart(PVM pVM, PTMTIMER pTimer, uint64_t u64Expire)
852{
853 Assert(!pTimer->offPrev);
854 Assert(!pTimer->offNext);
855 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE);
856
857 /*
858 * Calculate and set the expiration time.
859 */
860 pTimer->u64Expire = u64Expire;
861 Log2(("tmTimerSetOptimizedStart: %p:{.pszDesc='%s', .u64Expire=%'RU64}\n", pTimer, R3STRING(pTimer->pszDesc), u64Expire));
862
863 /*
864 * Link the timer into the active list.
865 */
866 TMCLOCK const enmClock = pTimer->enmClock;
867 tmTimerActiveLink(&pVM->tm.s.CTX_SUFF(paTimerQueues)[enmClock], pTimer, u64Expire);
868
869 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetOpt);
870 tmTimerUnlock(pVM);
871 return VINF_SUCCESS;
872}
873
874
875
876
877
878/**
879 * Arm a timer with a (new) expire time.
880 *
881 * @returns VBox status.
882 * @param pTimer Timer handle as returned by one of the create functions.
883 * @param u64Expire New expire time.
884 */
885VMMDECL(int) TMTimerSet(PTMTIMER pTimer, uint64_t u64Expire)
886{
887 PVM pVM = pTimer->CTX_SUFF(pVM);
888 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
889 TMTIMER_ASSERT_CRITSECT(pTimer);
890
891#ifdef VBOX_WITH_STATISTICS
892 /* Gather optimization info. */
893 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSet);
894 TMTIMERSTATE enmOrgState = pTimer->enmState;
895 switch (enmOrgState)
896 {
897 case TMTIMERSTATE_STOPPED: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStStopped); break;
898 case TMTIMERSTATE_EXPIRED_DELIVER: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStExpDeliver); break;
899 case TMTIMERSTATE_ACTIVE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStActive); break;
900 case TMTIMERSTATE_PENDING_STOP: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendStop); break;
901 case TMTIMERSTATE_PENDING_STOP_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendStopSched); break;
902 case TMTIMERSTATE_PENDING_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendSched); break;
903 case TMTIMERSTATE_PENDING_RESCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendResched); break;
904 default: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStOther); break;
905 }
906#endif
907
908 /*
909 * The most common case is setting the timer again during the callback.
910 * The second most common case is starting a timer at some other time.
911 */
912#if 1
913 TMTIMERSTATE enmState1 = pTimer->enmState;
914 if ( enmState1 == TMTIMERSTATE_EXPIRED_DELIVER
915 || ( enmState1 == TMTIMERSTATE_STOPPED
916 && pTimer->pCritSect))
917 {
918 /* Try take the TM lock and check the state again. */
919 if (RT_SUCCESS_NP(tmTimerTryLock(pVM)))
920 {
921 if (RT_LIKELY(tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, enmState1)))
922 {
923 tmTimerSetOptimizedStart(pVM, pTimer, u64Expire);
924 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
925 return VINF_SUCCESS;
926 }
927 tmTimerUnlock(pVM);
928 }
929 }
930#endif
931
932 /*
933 * Unoptimized code path.
934 */
935 int cRetries = 1000;
936 do
937 {
938 /*
939 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
940 */
941 TMTIMERSTATE enmState = pTimer->enmState;
942 Log2(("TMTimerSet: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d u64Expire=%'RU64\n",
943 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), cRetries, u64Expire));
944 switch (enmState)
945 {
946 case TMTIMERSTATE_EXPIRED_DELIVER:
947 case TMTIMERSTATE_STOPPED:
948 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
949 {
950 Assert(!pTimer->offPrev);
951 Assert(!pTimer->offNext);
952 AssertMsg( pTimer->enmClock != TMCLOCK_VIRTUAL_SYNC
953 || pVM->tm.s.fVirtualSyncTicking
954 || u64Expire >= pVM->tm.s.u64VirtualSync,
955 ("%'RU64 < %'RU64 %s\n", u64Expire, pVM->tm.s.u64VirtualSync, R3STRING(pTimer->pszDesc)));
956 pTimer->u64Expire = u64Expire;
957 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
958 tmSchedule(pTimer);
959 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
960 return VINF_SUCCESS;
961 }
962 break;
963
964 case TMTIMERSTATE_PENDING_SCHEDULE:
965 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
966 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
967 {
968 pTimer->u64Expire = u64Expire;
969 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
970 tmSchedule(pTimer);
971 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
972 return VINF_SUCCESS;
973 }
974 break;
975
976
977 case TMTIMERSTATE_ACTIVE:
978 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
979 {
980 pTimer->u64Expire = u64Expire;
981 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
982 tmSchedule(pTimer);
983 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
984 return VINF_SUCCESS;
985 }
986 break;
987
988 case TMTIMERSTATE_PENDING_RESCHEDULE:
989 case TMTIMERSTATE_PENDING_STOP:
990 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
991 {
992 pTimer->u64Expire = u64Expire;
993 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
994 tmSchedule(pTimer);
995 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
996 return VINF_SUCCESS;
997 }
998 break;
999
1000
1001 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1002 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1003 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1004#ifdef IN_RING3
1005 if (!RTThreadYield())
1006 RTThreadSleep(1);
1007#else
1008/** @todo call host context and yield after a couple of iterations */
1009#endif
1010 break;
1011
1012 /*
1013 * Invalid states.
1014 */
1015 case TMTIMERSTATE_DESTROY:
1016 case TMTIMERSTATE_FREE:
1017 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1018 return VERR_TM_INVALID_STATE;
1019 default:
1020 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1021 return VERR_TM_UNKNOWN_STATE;
1022 }
1023 } while (cRetries-- > 0);
1024
1025 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1026 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1027 return VERR_INTERNAL_ERROR;
1028}
1029
1030
1031/**
1032 * Return the current time for the specified clock, setting pu64Now if not NULL.
1033 *
1034 * @returns Current time.
1035 * @param pVM The VM handle.
1036 * @param enmClock The clock to query.
1037 * @param pu64Now Optional pointer where to store the return time
1038 */
1039DECL_FORCE_INLINE(uint64_t) tmTimerSetRelativeNowWorker(PVM pVM, TMCLOCK enmClock, uint64_t *pu64Now)
1040{
1041 uint64_t u64Now;
1042 switch (enmClock)
1043 {
1044 case TMCLOCK_VIRTUAL_SYNC:
1045 u64Now = TMVirtualSyncGet(pVM);
1046 break;
1047 case TMCLOCK_VIRTUAL:
1048 u64Now = TMVirtualGet(pVM);
1049 break;
1050 case TMCLOCK_REAL:
1051 u64Now = TMRealGet(pVM);
1052 break;
1053 default:
1054 AssertFatalMsgFailed(("%d\n", enmClock));
1055 }
1056
1057 if (pu64Now)
1058 *pu64Now = u64Now;
1059 return u64Now;
1060}
1061
1062
1063/**
1064 * Optimized TMTimerSetRelative code path.
1065 *
1066 * @returns VBox status code.
1067 *
1068 * @param pVM The VM handle.
1069 * @param pTimer The timer handle.
1070 * @param cTicksToNext Clock ticks until the next time expiration.
1071 * @param pu64Now Where to return the current time stamp used.
1072 * Optional.
1073 */
1074static int tmTimerSetRelativeOptimizedStart(PVM pVM, PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1075{
1076 Assert(!pTimer->offPrev);
1077 Assert(!pTimer->offNext);
1078 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE);
1079
1080 /*
1081 * Calculate and set the expiration time.
1082 */
1083 TMCLOCK const enmClock = pTimer->enmClock;
1084 uint64_t const u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1085 pTimer->u64Expire = u64Expire;
1086 Log2(("tmTimerSetRelativeOptimizedStart: %p:{.pszDesc='%s', .u64Expire=%'RU64} cTicksToNext=%'RU64\n", pTimer, R3STRING(pTimer->pszDesc), u64Expire, cTicksToNext));
1087
1088 /*
1089 * Link the timer into the active list.
1090 */
1091 tmTimerActiveLink(&pVM->tm.s.CTX_SUFF(paTimerQueues)[enmClock], pTimer, u64Expire);
1092
1093 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeOpt);
1094 tmTimerUnlock(pVM);
1095 return VINF_SUCCESS;
1096}
1097
1098
1099/**
1100 * Arm a timer with a expire time relative to the current time.
1101 *
1102 * @returns VBox status.
1103 * @param pTimer Timer handle as returned by one of the create functions.
1104 * @param cTicksToNext Clock ticks until the next time expiration.
1105 * @param pu64Now Where to return the current time stamp used.
1106 * Optional.
1107 */
1108VMMDECL(int) TMTimerSetRelative(PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1109{
1110 STAM_PROFILE_START(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1111 TMTIMER_ASSERT_CRITSECT(pTimer);
1112 PVM pVM = pTimer->CTX_SUFF(pVM);
1113 int rc;
1114
1115#ifdef VBOX_WITH_STATISTICS
1116 /* Gather optimization info. */
1117 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelative);
1118 TMTIMERSTATE enmOrgState = pTimer->enmState;
1119 switch (enmOrgState)
1120 {
1121 case TMTIMERSTATE_STOPPED: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStStopped); break;
1122 case TMTIMERSTATE_EXPIRED_DELIVER: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStExpDeliver); break;
1123 case TMTIMERSTATE_ACTIVE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStActive); break;
1124 case TMTIMERSTATE_PENDING_STOP: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendStop); break;
1125 case TMTIMERSTATE_PENDING_STOP_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendStopSched); break;
1126 case TMTIMERSTATE_PENDING_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendSched); break;
1127 case TMTIMERSTATE_PENDING_RESCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendResched); break;
1128 default: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStOther); break;
1129 }
1130#endif
1131
1132 /*
1133 * Try to take the TM lock and optimize the common cases.
1134 *
1135 * With the TM lock we can safely make optimizations like immediate
1136 * scheduling and we can also be 100% sure that we're not racing the
1137 * running of the timer queues. As an additional restraint we require the
1138 * timer to have a critical section associated with to be 100% there aren't
1139 * concurrent operations on the timer. (This latter isn't necessary any
1140 * longer as this isn't supported for any timers, critsect or not.)
1141 *
1142 * Note! Lock ordering doesn't apply when we only tries to
1143 * get the innermost locks.
1144 */
1145 bool fOwnTMLock = RT_SUCCESS_NP(tmTimerTryLock(pVM));
1146#if 1
1147 if ( fOwnTMLock
1148 && pTimer->pCritSect)
1149 {
1150 TMTIMERSTATE enmState = pTimer->enmState;
1151 if (RT_LIKELY( ( enmState == TMTIMERSTATE_EXPIRED_DELIVER
1152 || enmState == TMTIMERSTATE_STOPPED)
1153 && tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, enmState)))
1154 {
1155 tmTimerSetRelativeOptimizedStart(pVM, pTimer, cTicksToNext, pu64Now);
1156 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1157 return VINF_SUCCESS;
1158 }
1159
1160 /* Optimize other states when it becomes necessary. */
1161 }
1162#endif
1163
1164 /*
1165 * Unoptimized path.
1166 */
1167 TMCLOCK const enmClock = pTimer->enmClock;
1168 bool fOwnVirtSyncLock;
1169 fOwnVirtSyncLock = !fOwnTMLock
1170 && enmClock == TMCLOCK_VIRTUAL_SYNC
1171 && RT_SUCCESS(tmVirtualSyncTryLock(pVM));
1172 for (int cRetries = 1000; ; cRetries--)
1173 {
1174 /*
1175 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1176 */
1177 TMTIMERSTATE enmState = pTimer->enmState;
1178 switch (enmState)
1179 {
1180 case TMTIMERSTATE_STOPPED:
1181 if (enmClock == TMCLOCK_VIRTUAL_SYNC)
1182 {
1183 /** @todo To fix assertion in tmR3TimerQueueRunVirtualSync:
1184 * Figure a safe way of activating this timer while the queue is
1185 * being run.
1186 * (99.9% sure this that the assertion is caused by DevAPIC.cpp
1187 * re-starting the timer in respons to a initial_count write.) */
1188 }
1189 /* fall thru */
1190 case TMTIMERSTATE_EXPIRED_DELIVER:
1191 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1192 {
1193 Assert(!pTimer->offPrev);
1194 Assert(!pTimer->offNext);
1195 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1196 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [EXP/STOP]\n",
1197 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1198 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1199 tmSchedule(pTimer);
1200 rc = VINF_SUCCESS;
1201 break;
1202 }
1203 rc = VERR_TRY_AGAIN;
1204 break;
1205
1206 case TMTIMERSTATE_PENDING_SCHEDULE:
1207 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1208 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1209 {
1210 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1211 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [PEND_SCHED]\n",
1212 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1213 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1214 tmSchedule(pTimer);
1215 rc = VINF_SUCCESS;
1216 break;
1217 }
1218 rc = VERR_TRY_AGAIN;
1219 break;
1220
1221
1222 case TMTIMERSTATE_ACTIVE:
1223 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1224 {
1225 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1226 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [ACTIVE]\n",
1227 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1228 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1229 tmSchedule(pTimer);
1230 rc = VINF_SUCCESS;
1231 break;
1232 }
1233 rc = VERR_TRY_AGAIN;
1234 break;
1235
1236 case TMTIMERSTATE_PENDING_RESCHEDULE:
1237 case TMTIMERSTATE_PENDING_STOP:
1238 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1239 {
1240 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1241 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [PEND_RESCH/STOP]\n",
1242 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1243 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1244 tmSchedule(pTimer);
1245 rc = VINF_SUCCESS;
1246 break;
1247 }
1248 rc = VERR_TRY_AGAIN;
1249 break;
1250
1251
1252 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1253 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1254 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1255#ifdef IN_RING3
1256 if (!RTThreadYield())
1257 RTThreadSleep(1);
1258#else
1259/** @todo call host context and yield after a couple of iterations */
1260#endif
1261 rc = VERR_TRY_AGAIN;
1262 break;
1263
1264 /*
1265 * Invalid states.
1266 */
1267 case TMTIMERSTATE_DESTROY:
1268 case TMTIMERSTATE_FREE:
1269 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1270 rc = VERR_TM_INVALID_STATE;
1271 break;
1272
1273 default:
1274 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1275 rc = VERR_TM_UNKNOWN_STATE;
1276 break;
1277 }
1278
1279 /* switch + loop is tedious to break out of. */
1280 if (rc == VINF_SUCCESS)
1281 break;
1282
1283 if (rc != VERR_TRY_AGAIN)
1284 {
1285 tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1286 break;
1287 }
1288 if (cRetries <= 0)
1289 {
1290 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1291 rc = VERR_INTERNAL_ERROR;
1292 tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1293 break;
1294 }
1295
1296 /*
1297 * Retry to gain locks.
1298 */
1299 if (!fOwnTMLock)
1300 {
1301 fOwnTMLock = RT_SUCCESS_NP(tmTimerTryLock(pVM));
1302 if ( !fOwnTMLock
1303 && enmClock == TMCLOCK_VIRTUAL_SYNC
1304 && !fOwnVirtSyncLock)
1305 fOwnVirtSyncLock = RT_SUCCESS_NP(tmVirtualSyncTryLock(pVM));
1306 }
1307
1308 } /* for (;;) */
1309
1310 /*
1311 * Clean up and return.
1312 */
1313 if (fOwnVirtSyncLock)
1314 tmVirtualSyncUnlock(pVM);
1315 if (fOwnTMLock)
1316 tmTimerUnlock(pVM);
1317
1318 if ( !fOwnTMLock
1319 && !fOwnVirtSyncLock
1320 && enmClock == TMCLOCK_VIRTUAL_SYNC)
1321 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeRacyVirtSync);
1322
1323 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1324 return rc;
1325}
1326
1327
1328/**
1329 * Arm a timer with a (new) expire time relative to current time.
1330 *
1331 * @returns VBox status.
1332 * @param pTimer Timer handle as returned by one of the create functions.
1333 * @param cMilliesToNext Number of millieseconds to the next tick.
1334 */
1335VMMDECL(int) TMTimerSetMillies(PTMTIMER pTimer, uint32_t cMilliesToNext)
1336{
1337 PVM pVM = pTimer->CTX_SUFF(pVM);
1338 PVMCPU pVCpu = &pVM->aCpus[0]; /* just take the first VCPU */
1339
1340 switch (pTimer->enmClock)
1341 {
1342 case TMCLOCK_VIRTUAL:
1343 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1344 return TMTimerSetRelative(pTimer, cMilliesToNext * UINT64_C(1000000), NULL);
1345
1346 case TMCLOCK_VIRTUAL_SYNC:
1347 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1348 return TMTimerSetRelative(pTimer, cMilliesToNext * UINT64_C(1000000), NULL);
1349
1350 case TMCLOCK_REAL:
1351 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1352 return TMTimerSetRelative(pTimer, cMilliesToNext, NULL);
1353
1354 default:
1355 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1356 return VERR_INTERNAL_ERROR;
1357 }
1358}
1359
1360
1361/**
1362 * Arm a timer with a (new) expire time relative to current time.
1363 *
1364 * @returns VBox status.
1365 * @param pTimer Timer handle as returned by one of the create functions.
1366 * @param cMicrosToNext Number of microseconds to the next tick.
1367 */
1368VMMDECL(int) TMTimerSetMicro(PTMTIMER pTimer, uint64_t cMicrosToNext)
1369{
1370 PVM pVM = pTimer->CTX_SUFF(pVM);
1371 PVMCPU pVCpu = &pVM->aCpus[0]; /* just take the first VCPU */
1372
1373 switch (pTimer->enmClock)
1374 {
1375 case TMCLOCK_VIRTUAL:
1376 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1377 return TMTimerSetRelative(pTimer, cMicrosToNext * 1000, NULL);
1378
1379 case TMCLOCK_VIRTUAL_SYNC:
1380 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1381 return TMTimerSetRelative(pTimer, cMicrosToNext * 1000, NULL);
1382
1383 case TMCLOCK_REAL:
1384 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1385 return TMTimerSetRelative(pTimer, cMicrosToNext / 1000, NULL);
1386
1387 default:
1388 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1389 return VERR_INTERNAL_ERROR;
1390 }
1391}
1392
1393
1394/**
1395 * Arm a timer with a (new) expire time relative to current time.
1396 *
1397 * @returns VBox status.
1398 * @param pTimer Timer handle as returned by one of the create functions.
1399 * @param cNanosToNext Number of nanoseconds to the next tick.
1400 */
1401VMMDECL(int) TMTimerSetNano(PTMTIMER pTimer, uint64_t cNanosToNext)
1402{
1403 PVM pVM = pTimer->CTX_SUFF(pVM);
1404 PVMCPU pVCpu = &pVM->aCpus[0]; /* just take the first VCPU */
1405
1406 switch (pTimer->enmClock)
1407 {
1408 case TMCLOCK_VIRTUAL:
1409 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1410 return TMTimerSetRelative(pTimer, cNanosToNext, NULL);
1411
1412 case TMCLOCK_VIRTUAL_SYNC:
1413 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1414 return TMTimerSetRelative(pTimer, cNanosToNext, NULL);
1415
1416 case TMCLOCK_REAL:
1417 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1418 return TMTimerSetRelative(pTimer, cNanosToNext / 1000000, NULL);
1419
1420 default:
1421 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1422 return VERR_INTERNAL_ERROR;
1423 }
1424}
1425
1426
1427/**
1428 * Drops a hint about the frequency of the timer.
1429 *
1430 * This is used by TM and the VMM to calculate how often guest execution needs
1431 * to be interrupted. The hint is automatically cleared by TMTimerStop.
1432 *
1433 * @returns VBox status code.
1434 * @param pTimer Timer handle as returned by one of the create
1435 * functions.
1436 * @param uHzHint The frequency hint. Pass 0 to clear the hint.
1437 *
1438 * @remarks We're using an integer hertz value here since anything above 1 HZ
1439 * is not going to be any trouble satisfying scheduling wise. The
1440 * range where it makes sense is >= 100 HZ.
1441 */
1442VMMDECL(int) TMTimerSetFrequencyHint(PTMTIMER pTimer, uint32_t uHzHint)
1443{
1444 TMTIMER_ASSERT_CRITSECT(pTimer);
1445 pTimer->uHzHint = uHzHint;
1446 PVM pVM = pTimer->CTX_SUFF(pVM);
1447 if (uHzHint >= pVM->tm.s.uMaxHzHint)
1448 ASMAtomicWriteBool(&pVM->tm.s.fHzHintNeedsUpdating, true);
1449 return VINF_SUCCESS;
1450}
1451
1452
1453/**
1454 * Stop the timer.
1455 * Use TMR3TimerArm() to "un-stop" the timer.
1456 *
1457 * @returns VBox status.
1458 * @param pTimer Timer handle as returned by one of the create functions.
1459 */
1460VMMDECL(int) TMTimerStop(PTMTIMER pTimer)
1461{
1462 STAM_PROFILE_START(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1463 TMTIMER_ASSERT_CRITSECT(pTimer);
1464
1465 /* Reset the HZ hint. */
1466 if (pTimer->uHzHint)
1467 {
1468 PVM pVM = pTimer->CTX_SUFF(pVM);
1469 if (pTimer->uHzHint >= pVM->tm.s.uMaxHzHint)
1470 ASMAtomicWriteBool(&pVM->tm.s.fHzHintNeedsUpdating, true);
1471 pTimer->uHzHint = 0;
1472 }
1473
1474 /** @todo see if this function needs optimizing. */
1475 int cRetries = 1000;
1476 do
1477 {
1478 /*
1479 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1480 */
1481 TMTIMERSTATE enmState = pTimer->enmState;
1482 Log2(("TMTimerStop: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d\n",
1483 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), cRetries));
1484 switch (enmState)
1485 {
1486 case TMTIMERSTATE_EXPIRED_DELIVER:
1487 //AssertMsgFailed(("You don't stop an expired timer dude!\n"));
1488 return VERR_INVALID_PARAMETER;
1489
1490 case TMTIMERSTATE_STOPPED:
1491 case TMTIMERSTATE_PENDING_STOP:
1492 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1493 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1494 return VINF_SUCCESS;
1495
1496 case TMTIMERSTATE_PENDING_SCHEDULE:
1497 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, enmState))
1498 {
1499 tmSchedule(pTimer);
1500 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1501 return VINF_SUCCESS;
1502 }
1503
1504 case TMTIMERSTATE_PENDING_RESCHEDULE:
1505 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
1506 {
1507 tmSchedule(pTimer);
1508 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1509 return VINF_SUCCESS;
1510 }
1511 break;
1512
1513 case TMTIMERSTATE_ACTIVE:
1514 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
1515 {
1516 tmSchedule(pTimer);
1517 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1518 return VINF_SUCCESS;
1519 }
1520 break;
1521
1522 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1523 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1524 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1525#ifdef IN_RING3
1526 if (!RTThreadYield())
1527 RTThreadSleep(1);
1528#else
1529/**@todo call host and yield cpu after a while. */
1530#endif
1531 break;
1532
1533 /*
1534 * Invalid states.
1535 */
1536 case TMTIMERSTATE_DESTROY:
1537 case TMTIMERSTATE_FREE:
1538 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1539 return VERR_TM_INVALID_STATE;
1540 default:
1541 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1542 return VERR_TM_UNKNOWN_STATE;
1543 }
1544 } while (cRetries-- > 0);
1545
1546 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1547 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1548 return VERR_INTERNAL_ERROR;
1549}
1550
1551
1552/**
1553 * Get the current clock time.
1554 * Handy for calculating the new expire time.
1555 *
1556 * @returns Current clock time.
1557 * @param pTimer Timer handle as returned by one of the create functions.
1558 */
1559VMMDECL(uint64_t) TMTimerGet(PTMTIMER pTimer)
1560{
1561 uint64_t u64;
1562 PVM pVM = pTimer->CTX_SUFF(pVM);
1563
1564 switch (pTimer->enmClock)
1565 {
1566 case TMCLOCK_VIRTUAL:
1567 u64 = TMVirtualGet(pVM);
1568 break;
1569 case TMCLOCK_VIRTUAL_SYNC:
1570 u64 = TMVirtualSyncGet(pVM);
1571 break;
1572 case TMCLOCK_REAL:
1573 u64 = TMRealGet(pVM);
1574 break;
1575 default:
1576 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1577 return ~(uint64_t)0;
1578 }
1579 //Log2(("TMTimerGet: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1580 // u64, pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1581 return u64;
1582}
1583
1584
1585/**
1586 * Get the freqency of the timer clock.
1587 *
1588 * @returns Clock frequency (as Hz of course).
1589 * @param pTimer Timer handle as returned by one of the create functions.
1590 */
1591VMMDECL(uint64_t) TMTimerGetFreq(PTMTIMER pTimer)
1592{
1593 switch (pTimer->enmClock)
1594 {
1595 case TMCLOCK_VIRTUAL:
1596 case TMCLOCK_VIRTUAL_SYNC:
1597 return TMCLOCK_FREQ_VIRTUAL;
1598
1599 case TMCLOCK_REAL:
1600 return TMCLOCK_FREQ_REAL;
1601
1602 default:
1603 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1604 return 0;
1605 }
1606}
1607
1608
1609/**
1610 * Get the current clock time as nanoseconds.
1611 *
1612 * @returns The timer clock as nanoseconds.
1613 * @param pTimer Timer handle as returned by one of the create functions.
1614 */
1615VMMDECL(uint64_t) TMTimerGetNano(PTMTIMER pTimer)
1616{
1617 return TMTimerToNano(pTimer, TMTimerGet(pTimer));
1618}
1619
1620
1621/**
1622 * Get the current clock time as microseconds.
1623 *
1624 * @returns The timer clock as microseconds.
1625 * @param pTimer Timer handle as returned by one of the create functions.
1626 */
1627VMMDECL(uint64_t) TMTimerGetMicro(PTMTIMER pTimer)
1628{
1629 return TMTimerToMicro(pTimer, TMTimerGet(pTimer));
1630}
1631
1632
1633/**
1634 * Get the current clock time as milliseconds.
1635 *
1636 * @returns The timer clock as milliseconds.
1637 * @param pTimer Timer handle as returned by one of the create functions.
1638 */
1639VMMDECL(uint64_t) TMTimerGetMilli(PTMTIMER pTimer)
1640{
1641 return TMTimerToMilli(pTimer, TMTimerGet(pTimer));
1642}
1643
1644
1645/**
1646 * Converts the specified timer clock time to nanoseconds.
1647 *
1648 * @returns nanoseconds.
1649 * @param pTimer Timer handle as returned by one of the create functions.
1650 * @param u64Ticks The clock ticks.
1651 * @remark There could be rounding errors here. We just do a simple integere divide
1652 * without any adjustments.
1653 */
1654VMMDECL(uint64_t) TMTimerToNano(PTMTIMER pTimer, uint64_t u64Ticks)
1655{
1656 switch (pTimer->enmClock)
1657 {
1658 case TMCLOCK_VIRTUAL:
1659 case TMCLOCK_VIRTUAL_SYNC:
1660 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1661 return u64Ticks;
1662
1663 case TMCLOCK_REAL:
1664 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1665 return u64Ticks * 1000000;
1666
1667 default:
1668 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1669 return 0;
1670 }
1671}
1672
1673
1674/**
1675 * Converts the specified timer clock time to microseconds.
1676 *
1677 * @returns microseconds.
1678 * @param pTimer Timer handle as returned by one of the create functions.
1679 * @param u64Ticks The clock ticks.
1680 * @remark There could be rounding errors here. We just do a simple integere divide
1681 * without any adjustments.
1682 */
1683VMMDECL(uint64_t) TMTimerToMicro(PTMTIMER pTimer, uint64_t u64Ticks)
1684{
1685 switch (pTimer->enmClock)
1686 {
1687 case TMCLOCK_VIRTUAL:
1688 case TMCLOCK_VIRTUAL_SYNC:
1689 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1690 return u64Ticks / 1000;
1691
1692 case TMCLOCK_REAL:
1693 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1694 return u64Ticks * 1000;
1695
1696 default:
1697 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1698 return 0;
1699 }
1700}
1701
1702
1703/**
1704 * Converts the specified timer clock time to milliseconds.
1705 *
1706 * @returns milliseconds.
1707 * @param pTimer Timer handle as returned by one of the create functions.
1708 * @param u64Ticks The clock ticks.
1709 * @remark There could be rounding errors here. We just do a simple integere divide
1710 * without any adjustments.
1711 */
1712VMMDECL(uint64_t) TMTimerToMilli(PTMTIMER pTimer, uint64_t u64Ticks)
1713{
1714 switch (pTimer->enmClock)
1715 {
1716 case TMCLOCK_VIRTUAL:
1717 case TMCLOCK_VIRTUAL_SYNC:
1718 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1719 return u64Ticks / 1000000;
1720
1721 case TMCLOCK_REAL:
1722 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1723 return u64Ticks;
1724
1725 default:
1726 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1727 return 0;
1728 }
1729}
1730
1731
1732/**
1733 * Converts the specified nanosecond timestamp to timer clock ticks.
1734 *
1735 * @returns timer clock ticks.
1736 * @param pTimer Timer handle as returned by one of the create functions.
1737 * @param u64NanoTS The nanosecond value ticks to convert.
1738 * @remark There could be rounding and overflow errors here.
1739 */
1740VMMDECL(uint64_t) TMTimerFromNano(PTMTIMER pTimer, uint64_t u64NanoTS)
1741{
1742 switch (pTimer->enmClock)
1743 {
1744 case TMCLOCK_VIRTUAL:
1745 case TMCLOCK_VIRTUAL_SYNC:
1746 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1747 return u64NanoTS;
1748
1749 case TMCLOCK_REAL:
1750 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1751 return u64NanoTS / 1000000;
1752
1753 default:
1754 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1755 return 0;
1756 }
1757}
1758
1759
1760/**
1761 * Converts the specified microsecond timestamp to timer clock ticks.
1762 *
1763 * @returns timer clock ticks.
1764 * @param pTimer Timer handle as returned by one of the create functions.
1765 * @param u64MicroTS The microsecond value ticks to convert.
1766 * @remark There could be rounding and overflow errors here.
1767 */
1768VMMDECL(uint64_t) TMTimerFromMicro(PTMTIMER pTimer, uint64_t u64MicroTS)
1769{
1770 switch (pTimer->enmClock)
1771 {
1772 case TMCLOCK_VIRTUAL:
1773 case TMCLOCK_VIRTUAL_SYNC:
1774 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1775 return u64MicroTS * 1000;
1776
1777 case TMCLOCK_REAL:
1778 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1779 return u64MicroTS / 1000;
1780
1781 default:
1782 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1783 return 0;
1784 }
1785}
1786
1787
1788/**
1789 * Converts the specified millisecond timestamp to timer clock ticks.
1790 *
1791 * @returns timer clock ticks.
1792 * @param pTimer Timer handle as returned by one of the create functions.
1793 * @param u64MilliTS The millisecond value ticks to convert.
1794 * @remark There could be rounding and overflow errors here.
1795 */
1796VMMDECL(uint64_t) TMTimerFromMilli(PTMTIMER pTimer, uint64_t u64MilliTS)
1797{
1798 switch (pTimer->enmClock)
1799 {
1800 case TMCLOCK_VIRTUAL:
1801 case TMCLOCK_VIRTUAL_SYNC:
1802 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1803 return u64MilliTS * 1000000;
1804
1805 case TMCLOCK_REAL:
1806 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1807 return u64MilliTS;
1808
1809 default:
1810 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1811 return 0;
1812 }
1813}
1814
1815
1816/**
1817 * Get the expire time of the timer.
1818 * Only valid for active timers.
1819 *
1820 * @returns Expire time of the timer.
1821 * @param pTimer Timer handle as returned by one of the create functions.
1822 */
1823VMMDECL(uint64_t) TMTimerGetExpire(PTMTIMER pTimer)
1824{
1825 TMTIMER_ASSERT_CRITSECT(pTimer);
1826 int cRetries = 1000;
1827 do
1828 {
1829 TMTIMERSTATE enmState = pTimer->enmState;
1830 switch (enmState)
1831 {
1832 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1833 case TMTIMERSTATE_EXPIRED_DELIVER:
1834 case TMTIMERSTATE_STOPPED:
1835 case TMTIMERSTATE_PENDING_STOP:
1836 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1837 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1838 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1839 return ~(uint64_t)0;
1840
1841 case TMTIMERSTATE_ACTIVE:
1842 case TMTIMERSTATE_PENDING_RESCHEDULE:
1843 case TMTIMERSTATE_PENDING_SCHEDULE:
1844 Log2(("TMTimerGetExpire: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1845 pTimer->u64Expire, pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1846 return pTimer->u64Expire;
1847
1848 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1849 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1850#ifdef IN_RING3
1851 if (!RTThreadYield())
1852 RTThreadSleep(1);
1853#endif
1854 break;
1855
1856 /*
1857 * Invalid states.
1858 */
1859 case TMTIMERSTATE_DESTROY:
1860 case TMTIMERSTATE_FREE:
1861 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1862 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1863 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1864 return ~(uint64_t)0;
1865 default:
1866 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1867 return ~(uint64_t)0;
1868 }
1869 } while (cRetries-- > 0);
1870
1871 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1872 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1873 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1874 return ~(uint64_t)0;
1875}
1876
1877
1878/**
1879 * Checks if a timer is active or not.
1880 *
1881 * @returns True if active.
1882 * @returns False if not active.
1883 * @param pTimer Timer handle as returned by one of the create functions.
1884 */
1885VMMDECL(bool) TMTimerIsActive(PTMTIMER pTimer)
1886{
1887 TMTIMERSTATE enmState = pTimer->enmState;
1888 switch (enmState)
1889 {
1890 case TMTIMERSTATE_STOPPED:
1891 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1892 case TMTIMERSTATE_EXPIRED_DELIVER:
1893 case TMTIMERSTATE_PENDING_STOP:
1894 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1895 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1896 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1897 return false;
1898
1899 case TMTIMERSTATE_ACTIVE:
1900 case TMTIMERSTATE_PENDING_RESCHEDULE:
1901 case TMTIMERSTATE_PENDING_SCHEDULE:
1902 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1903 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1904 Log2(("TMTimerIsActive: returns true (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1905 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1906 return true;
1907
1908 /*
1909 * Invalid states.
1910 */
1911 case TMTIMERSTATE_DESTROY:
1912 case TMTIMERSTATE_FREE:
1913 AssertMsgFailed(("Invalid timer state %s (%s)\n", tmTimerState(enmState), R3STRING(pTimer->pszDesc)));
1914 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1915 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1916 return false;
1917 default:
1918 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1919 return false;
1920 }
1921}
1922
1923
1924/**
1925 * Convert state to string.
1926 *
1927 * @returns Readonly status name.
1928 * @param enmState State.
1929 */
1930const char *tmTimerState(TMTIMERSTATE enmState)
1931{
1932 switch (enmState)
1933 {
1934#define CASE(num, state) \
1935 case TMTIMERSTATE_##state: \
1936 AssertCompile(TMTIMERSTATE_##state == (num)); \
1937 return #num "-" #state
1938 CASE( 1,STOPPED);
1939 CASE( 2,ACTIVE);
1940 CASE( 3,EXPIRED_GET_UNLINK);
1941 CASE( 4,EXPIRED_DELIVER);
1942 CASE( 5,PENDING_STOP);
1943 CASE( 6,PENDING_STOP_SCHEDULE);
1944 CASE( 7,PENDING_SCHEDULE_SET_EXPIRE);
1945 CASE( 8,PENDING_SCHEDULE);
1946 CASE( 9,PENDING_RESCHEDULE_SET_EXPIRE);
1947 CASE(10,PENDING_RESCHEDULE);
1948 CASE(11,DESTROY);
1949 CASE(12,FREE);
1950 default:
1951 AssertMsgFailed(("Invalid state enmState=%d\n", enmState));
1952 return "Invalid state!";
1953#undef CASE
1954 }
1955}
1956
1957
1958/**
1959 * Schedules the given timer on the given queue.
1960 *
1961 * @param pQueue The timer queue.
1962 * @param pTimer The timer that needs scheduling.
1963 *
1964 * @remarks Called while owning the lock.
1965 */
1966DECLINLINE(void) tmTimerQueueScheduleOne(PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
1967{
1968 /*
1969 * Processing.
1970 */
1971 unsigned cRetries = 2;
1972 do
1973 {
1974 TMTIMERSTATE enmState = pTimer->enmState;
1975 switch (enmState)
1976 {
1977 /*
1978 * Reschedule timer (in the active list).
1979 */
1980 case TMTIMERSTATE_PENDING_RESCHEDULE:
1981 {
1982 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE, TMTIMERSTATE_PENDING_RESCHEDULE)))
1983 break; /* retry */
1984
1985 const PTMTIMER pPrev = TMTIMER_GET_PREV(pTimer);
1986 const PTMTIMER pNext = TMTIMER_GET_NEXT(pTimer);
1987 if (pPrev)
1988 TMTIMER_SET_NEXT(pPrev, pNext);
1989 else
1990 {
1991 TMTIMER_SET_HEAD(pQueue, pNext);
1992 pQueue->u64Expire = pNext ? pNext->u64Expire : INT64_MAX;
1993 }
1994 if (pNext)
1995 TMTIMER_SET_PREV(pNext, pPrev);
1996 pTimer->offNext = 0;
1997 pTimer->offPrev = 0;
1998 /* fall thru */
1999 }
2000
2001 /*
2002 * Schedule timer (insert into the active list).
2003 */
2004 case TMTIMERSTATE_PENDING_SCHEDULE:
2005 {
2006 Assert(!pTimer->offNext); Assert(!pTimer->offPrev);
2007 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, TMTIMERSTATE_PENDING_SCHEDULE)))
2008 break; /* retry */
2009
2010 PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue);
2011 if (pCur)
2012 {
2013 const uint64_t u64Expire = pTimer->u64Expire;
2014 for (;; pCur = TMTIMER_GET_NEXT(pCur))
2015 {
2016 if (pCur->u64Expire > u64Expire)
2017 {
2018 const PTMTIMER pPrev = TMTIMER_GET_PREV(pCur);
2019 TMTIMER_SET_NEXT(pTimer, pCur);
2020 TMTIMER_SET_PREV(pTimer, pPrev);
2021 if (pPrev)
2022 TMTIMER_SET_NEXT(pPrev, pTimer);
2023 else
2024 {
2025 TMTIMER_SET_HEAD(pQueue, pTimer);
2026 pQueue->u64Expire = u64Expire;
2027 }
2028 TMTIMER_SET_PREV(pCur, pTimer);
2029 return;
2030 }
2031 if (!pCur->offNext)
2032 {
2033 TMTIMER_SET_NEXT(pCur, pTimer);
2034 TMTIMER_SET_PREV(pTimer, pCur);
2035 return;
2036 }
2037 }
2038 }
2039 else
2040 {
2041 TMTIMER_SET_HEAD(pQueue, pTimer);
2042 pQueue->u64Expire = pTimer->u64Expire;
2043 }
2044 return;
2045 }
2046
2047 /*
2048 * Stop the timer in active list.
2049 */
2050 case TMTIMERSTATE_PENDING_STOP:
2051 {
2052 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, TMTIMERSTATE_PENDING_STOP)))
2053 break; /* retry */
2054
2055 const PTMTIMER pPrev = TMTIMER_GET_PREV(pTimer);
2056 const PTMTIMER pNext = TMTIMER_GET_NEXT(pTimer);
2057 if (pPrev)
2058 TMTIMER_SET_NEXT(pPrev, pNext);
2059 else
2060 {
2061 TMTIMER_SET_HEAD(pQueue, pNext);
2062 pQueue->u64Expire = pNext ? pNext->u64Expire : INT64_MAX;
2063 }
2064 if (pNext)
2065 TMTIMER_SET_PREV(pNext, pPrev);
2066 pTimer->offNext = 0;
2067 pTimer->offPrev = 0;
2068 /* fall thru */
2069 }
2070
2071 /*
2072 * Stop the timer (not on the active list).
2073 */
2074 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2075 Assert(!pTimer->offNext); Assert(!pTimer->offPrev);
2076 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_STOPPED, TMTIMERSTATE_PENDING_STOP_SCHEDULE)))
2077 break;
2078 return;
2079
2080 /*
2081 * The timer is pending destruction by TMR3TimerDestroy, our caller.
2082 * Nothing to do here.
2083 */
2084 case TMTIMERSTATE_DESTROY:
2085 break;
2086
2087 /*
2088 * Postpone these until they get into the right state.
2089 */
2090 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2091 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2092 tmTimerLink(pQueue, pTimer);
2093 STAM_COUNTER_INC(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatPostponed));
2094 return;
2095
2096 /*
2097 * None of these can be in the schedule.
2098 */
2099 case TMTIMERSTATE_FREE:
2100 case TMTIMERSTATE_STOPPED:
2101 case TMTIMERSTATE_ACTIVE:
2102 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2103 case TMTIMERSTATE_EXPIRED_DELIVER:
2104 default:
2105 AssertMsgFailed(("Timer (%p) in the scheduling list has an invalid state %s (%d)!",
2106 pTimer, tmTimerState(pTimer->enmState), pTimer->enmState));
2107 return;
2108 }
2109 } while (cRetries-- > 0);
2110}
2111
2112
2113/**
2114 * Schedules the specified timer queue.
2115 *
2116 * @param pVM The VM to run the timers for.
2117 * @param pQueue The queue to schedule.
2118 *
2119 * @remarks Called while owning the lock.
2120 */
2121void tmTimerQueueSchedule(PVM pVM, PTMTIMERQUEUE pQueue)
2122{
2123 TM_ASSERT_LOCK(pVM);
2124
2125 /*
2126 * Dequeue the scheduling list and iterate it.
2127 */
2128 int32_t offNext = ASMAtomicXchgS32(&pQueue->offSchedule, 0);
2129 Log2(("tmTimerQueueSchedule: pQueue=%p:{.enmClock=%d, offNext=%RI32, .u64Expired=%'RU64}\n", pQueue, pQueue->enmClock, offNext, pQueue->u64Expire));
2130 if (!offNext)
2131 return;
2132 PTMTIMER pNext = (PTMTIMER)((intptr_t)pQueue + offNext);
2133 while (pNext)
2134 {
2135 /*
2136 * Unlink the head timer and find the next one.
2137 */
2138 PTMTIMER pTimer = pNext;
2139 pNext = pNext->offScheduleNext ? (PTMTIMER)((intptr_t)pNext + pNext->offScheduleNext) : NULL;
2140 pTimer->offScheduleNext = 0;
2141
2142 /*
2143 * Do the scheduling.
2144 */
2145 Log2(("tmTimerQueueSchedule: %p:{.enmState=%s, .enmClock=%d, .enmType=%d, .pszDesc=%s}\n",
2146 pTimer, tmTimerState(pTimer->enmState), pTimer->enmClock, pTimer->enmType, R3STRING(pTimer->pszDesc)));
2147 tmTimerQueueScheduleOne(pQueue, pTimer);
2148 Log2(("tmTimerQueueSchedule: %p: new %s\n", pTimer, tmTimerState(pTimer->enmState)));
2149 } /* foreach timer in current schedule batch. */
2150 Log2(("tmTimerQueueSchedule: u64Expired=%'RU64\n", pQueue->u64Expire));
2151}
2152
2153
2154#ifdef VBOX_STRICT
2155/**
2156 * Checks that the timer queues are sane.
2157 *
2158 * @param pVM VM handle.
2159 *
2160 * @remarks Called while owning the lock.
2161 */
2162void tmTimerQueuesSanityChecks(PVM pVM, const char *pszWhere)
2163{
2164 TM_ASSERT_LOCK(pVM);
2165
2166 /*
2167 * Check the linking of the active lists.
2168 */
2169 for (int i = 0; i < TMCLOCK_MAX; i++)
2170 {
2171 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[i];
2172 Assert((int)pQueue->enmClock == i);
2173 PTMTIMER pPrev = NULL;
2174 for (PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue); pCur; pPrev = pCur, pCur = TMTIMER_GET_NEXT(pCur))
2175 {
2176 AssertMsg((int)pCur->enmClock == i, ("%s: %d != %d\n", pszWhere, pCur->enmClock, i));
2177 AssertMsg(TMTIMER_GET_PREV(pCur) == pPrev, ("%s: %p != %p\n", pszWhere, TMTIMER_GET_PREV(pCur), pPrev));
2178 TMTIMERSTATE enmState = pCur->enmState;
2179 switch (enmState)
2180 {
2181 case TMTIMERSTATE_ACTIVE:
2182 AssertMsg( !pCur->offScheduleNext
2183 || pCur->enmState != TMTIMERSTATE_ACTIVE,
2184 ("%s: %RI32\n", pszWhere, pCur->offScheduleNext));
2185 break;
2186 case TMTIMERSTATE_PENDING_STOP:
2187 case TMTIMERSTATE_PENDING_RESCHEDULE:
2188 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2189 break;
2190 default:
2191 AssertMsgFailed(("%s: Invalid state enmState=%d %s\n", pszWhere, enmState, tmTimerState(enmState)));
2192 break;
2193 }
2194 }
2195 }
2196
2197
2198# ifdef IN_RING3
2199 /*
2200 * Do the big list and check that active timers all are in the active lists.
2201 */
2202 PTMTIMERR3 pPrev = NULL;
2203 for (PTMTIMERR3 pCur = pVM->tm.s.pCreated; pCur; pPrev = pCur, pCur = pCur->pBigNext)
2204 {
2205 Assert(pCur->pBigPrev == pPrev);
2206 Assert((unsigned)pCur->enmClock < (unsigned)TMCLOCK_MAX);
2207
2208 TMTIMERSTATE enmState = pCur->enmState;
2209 switch (enmState)
2210 {
2211 case TMTIMERSTATE_ACTIVE:
2212 case TMTIMERSTATE_PENDING_STOP:
2213 case TMTIMERSTATE_PENDING_RESCHEDULE:
2214 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2215 {
2216 PTMTIMERR3 pCurAct = TMTIMER_GET_HEAD(&pVM->tm.s.CTX_SUFF(paTimerQueues)[pCur->enmClock]);
2217 Assert(pCur->offPrev || pCur == pCurAct);
2218 while (pCurAct && pCurAct != pCur)
2219 pCurAct = TMTIMER_GET_NEXT(pCurAct);
2220 Assert(pCurAct == pCur);
2221 break;
2222 }
2223
2224 case TMTIMERSTATE_PENDING_SCHEDULE:
2225 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2226 case TMTIMERSTATE_STOPPED:
2227 case TMTIMERSTATE_EXPIRED_DELIVER:
2228 {
2229 Assert(!pCur->offNext);
2230 Assert(!pCur->offPrev);
2231 for (PTMTIMERR3 pCurAct = TMTIMER_GET_HEAD(&pVM->tm.s.CTX_SUFF(paTimerQueues)[pCur->enmClock]);
2232 pCurAct;
2233 pCurAct = TMTIMER_GET_NEXT(pCurAct))
2234 {
2235 Assert(pCurAct != pCur);
2236 Assert(TMTIMER_GET_NEXT(pCurAct) != pCur);
2237 Assert(TMTIMER_GET_PREV(pCurAct) != pCur);
2238 }
2239 break;
2240 }
2241
2242 /* ignore */
2243 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2244 break;
2245
2246 /* shouldn't get here! */
2247 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2248 case TMTIMERSTATE_DESTROY:
2249 default:
2250 AssertMsgFailed(("Invalid state enmState=%d %s\n", enmState, tmTimerState(enmState)));
2251 break;
2252 }
2253 }
2254# endif /* IN_RING3 */
2255}
2256#endif /* !VBOX_STRICT */
2257
2258
2259/**
2260 * Gets the current warp drive percent.
2261 *
2262 * @returns The warp drive percent.
2263 * @param pVM The VM handle.
2264 */
2265VMMDECL(uint32_t) TMGetWarpDrive(PVM pVM)
2266{
2267 return pVM->tm.s.u32VirtualWarpDrivePercentage;
2268}
2269
2270
2271/**
2272 * Gets the highest frequency hint for all the important timers.
2273 *
2274 * @returns The highest frequency. 0 if no timers care.
2275 * @param pVM The VM handle.
2276 */
2277static uint32_t tmGetFrequencyHint(PVM pVM)
2278{
2279 /*
2280 * Query the value, recalculate it if necessary.
2281 *
2282 * The "right" highest frequency value isn't so important that we'll block
2283 * waiting on the timer semaphore.
2284 */
2285 uint32_t uMaxHzHint = ASMAtomicUoReadU32(&pVM->tm.s.uMaxHzHint);
2286 if (RT_UNLIKELY(ASMAtomicReadBool(&pVM->tm.s.fHzHintNeedsUpdating)))
2287 {
2288 if (RT_SUCCESS(tmTimerTryLock(pVM)))
2289 {
2290 ASMAtomicWriteBool(&pVM->tm.s.fHzHintNeedsUpdating, false);
2291
2292 /*
2293 * Loop over the timers associated with each clock.
2294 */
2295 uMaxHzHint = 0;
2296 for (int i = 0; i < TMCLOCK_MAX; i++)
2297 {
2298 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[i];
2299 for (PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue); pCur; pCur = TMTIMER_GET_NEXT(pCur))
2300 {
2301 uint32_t uHzHint = ASMAtomicUoReadU32(&pCur->uHzHint);
2302 if (uHzHint > uMaxHzHint)
2303 {
2304 switch (pCur->enmState)
2305 {
2306 case TMTIMERSTATE_ACTIVE:
2307 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2308 case TMTIMERSTATE_EXPIRED_DELIVER:
2309 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2310 case TMTIMERSTATE_PENDING_SCHEDULE:
2311 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2312 case TMTIMERSTATE_PENDING_RESCHEDULE:
2313 uMaxHzHint = uHzHint;
2314 break;
2315
2316 case TMTIMERSTATE_STOPPED:
2317 case TMTIMERSTATE_PENDING_STOP:
2318 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2319 case TMTIMERSTATE_DESTROY:
2320 case TMTIMERSTATE_FREE:
2321 break;
2322 /* no default, want gcc warnings when adding more states. */
2323 }
2324 }
2325 }
2326 }
2327 ASMAtomicWriteU32(&pVM->tm.s.uMaxHzHint, uMaxHzHint);
2328 Log(("tmGetFrequencyHint: New value %u Hz\n", uMaxHzHint));
2329 tmTimerUnlock(pVM);
2330 }
2331 }
2332 return uMaxHzHint;
2333}
2334
2335
2336/**
2337 * Calculates a host timer frequency that would be suitable for the current
2338 * timer load.
2339 *
2340 * This will take the highest timer frequency, adjust for catch-up and warp
2341 * driver, and finally add a little fudge factor. The caller (VMM) will use
2342 * the result to adjust the per-cpu preemption timer.
2343 *
2344 * @returns The highest frequency. 0 if no important timers around.
2345 * @param pVM The VM handle.
2346 * @param pVCpu The current CPU.
2347 */
2348VMM_INT_DECL(uint32_t) TMCalcHostTimerFrequency(PVM pVM, PVMCPU pVCpu)
2349{
2350 uint32_t uHz = tmGetFrequencyHint(pVM);
2351
2352 /* Catch up. */
2353 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
2354 {
2355 uint32_t u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
2356 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
2357 {
2358 uHz *= u32Pct + 100;
2359 uHz /= 100;
2360 }
2361 }
2362
2363 /* Warp drive */
2364 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualWarpDrive))
2365 {
2366 uint32_t u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualWarpDrivePercentage);
2367 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualWarpDrive))
2368 {
2369 uHz *= u32Pct;
2370 uHz /= 100;
2371 }
2372 }
2373
2374 /* Fudge factor. */
2375 /** @todo make this configurable. */
2376#if 0 /* what's wrong with this expression? I end up with uHz = 0 after this multiplication... */
2377 uHz *= 110 + pVCpu->idCpu == pVM->tm.s.idTimerCpu;
2378#else
2379 if (pVCpu->idCpu == pVM->tm.s.idTimerCpu)
2380 uHz *= 111;
2381 else
2382 uHz *= 110;
2383#endif
2384 uHz /= 100;
2385
2386 //LogAlways(("TMCalcHostTimerFrequency->%u\n", uHz));
2387 return uHz;
2388}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette