VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/TMAll.cpp@ 37503

Last change on this file since 37503 was 37452, checked in by vboxsync, 14 years ago

IOM,PDMCritSect: Extended PDMCritSectEnter to handle rcBusy=VINF_SUCCESS as a request to call ring-3 to acquire a busy lock. Implemented device level locking in the MMIO code.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 84.5 KB
Line 
1/* $Id: TMAll.cpp 37452 2011-06-14 18:13:48Z vboxsync $ */
2/** @file
3 * TM - Timeout Manager, all contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_TM
23#include <VBox/vmm/tm.h>
24#include <VBox/vmm/mm.h>
25#ifdef IN_RING3
26# include <VBox/vmm/rem.h>
27#endif
28#include "TMInternal.h"
29#include <VBox/vmm/vm.h>
30
31#include <VBox/param.h>
32#include <VBox/err.h>
33#include <VBox/log.h>
34#include <VBox/sup.h>
35#include <iprt/time.h>
36#include <iprt/assert.h>
37#include <iprt/asm.h>
38#include <iprt/asm-math.h>
39#ifdef IN_RING3
40# include <iprt/thread.h>
41#endif
42
43
44/*******************************************************************************
45* Defined Constants And Macros *
46*******************************************************************************/
47/** @def TMTIMER_ASSERT_CRITSECT
48 * Checks that the caller owns the critical section if one is associated with
49 * the timer. */
50#ifdef VBOX_STRICT
51# define TMTIMER_ASSERT_CRITSECT(pTimer) \
52 do { \
53 if ((pTimer)->pCritSect) \
54 { \
55 VMSTATE enmState; \
56 PPDMCRITSECT pCritSect = (PPDMCRITSECT)MMHyperR3ToCC((pTimer)->CTX_SUFF(pVM), (pTimer)->pCritSect); \
57 AssertMsg( pCritSect \
58 && ( PDMCritSectIsOwner(pCritSect) \
59 || (enmState = (pTimer)->CTX_SUFF(pVM)->enmVMState) == VMSTATE_CREATING \
60 || enmState == VMSTATE_RESETTING \
61 || enmState == VMSTATE_RESETTING_LS ),\
62 ("pTimer=%p (%s) pCritSect=%p (%s)\n", pTimer, R3STRING(pTimer->pszDesc), \
63 (pTimer)->pCritSect, R3STRING(PDMR3CritSectName((pTimer)->pCritSect)) )); \
64 } \
65 } while (0)
66#else
67# define TMTIMER_ASSERT_CRITSECT(pTimer) do { } while (0)
68#endif
69
70
71#ifndef tmTimerLock
72
73/**
74 * Try take the timer lock, wait in ring-3 return VERR_SEM_BUSY in R0/RC.
75 *
76 * @retval VINF_SUCCESS on success (always in ring-3).
77 * @retval VERR_SEM_BUSY in RC and R0 if the semaphore is busy.
78 *
79 * @param pVM The VM handle.
80 *
81 * @thread EMTs for the time being.
82 */
83int tmTimerLock(PVM pVM)
84{
85 VM_ASSERT_EMT(pVM);
86 int rc = PDMCritSectEnter(&pVM->tm.s.TimerCritSect, VERR_SEM_BUSY);
87 return rc;
88}
89
90
91/**
92 * Try take the timer lock, no waiting.
93 *
94 * @retval VINF_SUCCESS on success.
95 * @retval VERR_SEM_BUSY if busy.
96 *
97 * @param pVM The VM handle.
98 */
99int tmTimerTryLock(PVM pVM)
100{
101 int rc = PDMCritSectTryEnter(&pVM->tm.s.TimerCritSect);
102 return rc;
103}
104
105
106/**
107 * Release the EMT/TM lock.
108 *
109 * @param pVM The VM handle.
110 */
111void tmTimerUnlock(PVM pVM)
112{
113 PDMCritSectLeave(&pVM->tm.s.TimerCritSect);
114}
115
116
117/**
118 * Try take the VirtualSync lock, wait in ring-3 return VERR_SEM_BUSY in R0/RC.
119 *
120 * @retval VINF_SUCCESS on success (always in ring-3).
121 * @retval VERR_SEM_BUSY in RC and R0 if the semaphore is busy.
122 *
123 * @param pVM The VM handle.
124 */
125int tmVirtualSyncLock(PVM pVM)
126{
127 VM_ASSERT_EMT(pVM);
128 int rc = PDMCritSectEnter(&pVM->tm.s.VirtualSyncLock, VERR_SEM_BUSY);
129 return rc;
130}
131
132
133/**
134 * Try take the VirtualSync lock, no waiting.
135 *
136 * @retval VINF_SUCCESS on success.
137 * @retval VERR_SEM_BUSY if busy.
138 *
139 * @param pVM The VM handle.
140 */
141int tmVirtualSyncTryLock(PVM pVM)
142{
143 VM_ASSERT_EMT(pVM);
144 int rc = PDMCritSectTryEnter(&pVM->tm.s.VirtualSyncLock);
145 return rc;
146}
147
148
149/**
150 * Release the VirtualSync lock.
151 *
152 * @param pVM The VM handle.
153 */
154void tmVirtualSyncUnlock(PVM pVM)
155{
156 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
157}
158
159#endif /* ! macros */
160
161/**
162 * Notification that execution is about to start.
163 *
164 * This call must always be paired with a TMNotifyEndOfExecution call.
165 *
166 * The function may, depending on the configuration, resume the TSC and future
167 * clocks that only ticks when we're executing guest code.
168 *
169 * @param pVCpu The VMCPU to operate on.
170 */
171VMMDECL(void) TMNotifyStartOfExecution(PVMCPU pVCpu)
172{
173 PVM pVM = pVCpu->CTX_SUFF(pVM);
174
175#ifndef VBOX_WITHOUT_NS_ACCOUNTING
176 pVCpu->tm.s.u64NsTsStartExecuting = RTTimeNanoTS();
177#endif
178 if (pVM->tm.s.fTSCTiedToExecution)
179 tmCpuTickResume(pVM, pVCpu);
180}
181
182
183/**
184 * Notification that execution is about to start.
185 *
186 * This call must always be paired with a TMNotifyStartOfExecution call.
187 *
188 * The function may, depending on the configuration, suspend the TSC and future
189 * clocks that only ticks when we're executing guest code.
190 *
191 * @param pVCpu The VMCPU to operate on.
192 */
193VMMDECL(void) TMNotifyEndOfExecution(PVMCPU pVCpu)
194{
195 PVM pVM = pVCpu->CTX_SUFF(pVM);
196
197 if (pVM->tm.s.fTSCTiedToExecution)
198 tmCpuTickPause(pVM, pVCpu);
199
200#ifndef VBOX_WITHOUT_NS_ACCOUNTING
201 uint64_t const u64NsTs = RTTimeNanoTS();
202 uint64_t const cNsTotalNew = u64NsTs - pVCpu->tm.s.u64NsTsStartTotal;
203 uint64_t const cNsExecutingDelta = u64NsTs - pVCpu->tm.s.u64NsTsStartExecuting;
204 uint64_t const cNsExecutingNew = pVCpu->tm.s.cNsExecuting + cNsExecutingDelta;
205 uint64_t const cNsOtherNew = cNsTotalNew - cNsExecutingNew - pVCpu->tm.s.cNsHalted;
206
207# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
208 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecuting, cNsExecutingDelta);
209 if (cNsExecutingDelta < 5000)
210 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecTiny, cNsExecutingDelta);
211 else if (cNsExecutingDelta < 50000)
212 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecShort, cNsExecutingDelta);
213 else
214 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecLong, cNsExecutingDelta);
215 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsTotal, cNsTotalNew - pVCpu->tm.s.cNsTotal);
216 int64_t const cNsOtherNewDelta = cNsOtherNew - pVCpu->tm.s.cNsOther;
217 if (cNsOtherNewDelta > 0)
218 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsOther, cNsOtherNewDelta); /* (the period before execution) */
219# endif
220
221 uint32_t uGen = ASMAtomicIncU32(&pVCpu->tm.s.uTimesGen); Assert(uGen & 1);
222 pVCpu->tm.s.cNsExecuting = cNsExecutingNew;
223 pVCpu->tm.s.cNsTotal = cNsTotalNew;
224 pVCpu->tm.s.cNsOther = cNsOtherNew;
225 pVCpu->tm.s.cPeriodsExecuting++;
226 ASMAtomicWriteU32(&pVCpu->tm.s.uTimesGen, (uGen | 1) + 1);
227#endif
228}
229
230
231/**
232 * Notification that the cpu is entering the halt state
233 *
234 * This call must always be paired with a TMNotifyEndOfExecution call.
235 *
236 * The function may, depending on the configuration, resume the TSC and future
237 * clocks that only ticks when we're halted.
238 *
239 * @param pVCpu The VMCPU to operate on.
240 */
241VMM_INT_DECL(void) TMNotifyStartOfHalt(PVMCPU pVCpu)
242{
243 PVM pVM = pVCpu->CTX_SUFF(pVM);
244
245#ifndef VBOX_WITHOUT_NS_ACCOUNTING
246 pVCpu->tm.s.u64NsTsStartHalting = RTTimeNanoTS();
247#endif
248
249 if ( pVM->tm.s.fTSCTiedToExecution
250 && !pVM->tm.s.fTSCNotTiedToHalt)
251 tmCpuTickResume(pVM, pVCpu);
252}
253
254
255/**
256 * Notification that the cpu is leaving the halt state
257 *
258 * This call must always be paired with a TMNotifyStartOfHalt call.
259 *
260 * The function may, depending on the configuration, suspend the TSC and future
261 * clocks that only ticks when we're halted.
262 *
263 * @param pVCpu The VMCPU to operate on.
264 */
265VMM_INT_DECL(void) TMNotifyEndOfHalt(PVMCPU pVCpu)
266{
267 PVM pVM = pVCpu->CTX_SUFF(pVM);
268
269 if ( pVM->tm.s.fTSCTiedToExecution
270 && !pVM->tm.s.fTSCNotTiedToHalt)
271 tmCpuTickPause(pVM, pVCpu);
272
273#ifndef VBOX_WITHOUT_NS_ACCOUNTING
274 uint64_t const u64NsTs = RTTimeNanoTS();
275 uint64_t const cNsTotalNew = u64NsTs - pVCpu->tm.s.u64NsTsStartTotal;
276 uint64_t const cNsHaltedDelta = u64NsTs - pVCpu->tm.s.u64NsTsStartHalting;
277 uint64_t const cNsHaltedNew = pVCpu->tm.s.cNsHalted + cNsHaltedDelta;
278 uint64_t const cNsOtherNew = cNsTotalNew - pVCpu->tm.s.cNsExecuting - cNsHaltedNew;
279
280# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
281 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsHalted, cNsHaltedDelta);
282 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsTotal, cNsTotalNew - pVCpu->tm.s.cNsTotal);
283 int64_t const cNsOtherNewDelta = cNsOtherNew - pVCpu->tm.s.cNsOther;
284 if (cNsOtherNewDelta > 0)
285 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsOther, cNsOtherNewDelta); /* (the period before halting) */
286# endif
287
288 uint32_t uGen = ASMAtomicIncU32(&pVCpu->tm.s.uTimesGen); Assert(uGen & 1);
289 pVCpu->tm.s.cNsHalted = cNsHaltedNew;
290 pVCpu->tm.s.cNsTotal = cNsTotalNew;
291 pVCpu->tm.s.cNsOther = cNsOtherNew;
292 pVCpu->tm.s.cPeriodsHalted++;
293 ASMAtomicWriteU32(&pVCpu->tm.s.uTimesGen, (uGen | 1) + 1);
294#endif
295}
296
297
298/**
299 * Raise the timer force action flag and notify the dedicated timer EMT.
300 *
301 * @param pVM The VM handle.
302 */
303DECLINLINE(void) tmScheduleNotify(PVM pVM)
304{
305 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
306 if (!VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
307 {
308 Log5(("TMAll(%u): FF: 0 -> 1\n", __LINE__));
309 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
310#ifdef IN_RING3
311 REMR3NotifyTimerPending(pVM, pVCpuDst);
312 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
313#endif
314 STAM_COUNTER_INC(&pVM->tm.s.StatScheduleSetFF);
315 }
316}
317
318
319/**
320 * Schedule the queue which was changed.
321 */
322DECLINLINE(void) tmSchedule(PTMTIMER pTimer)
323{
324 PVM pVM = pTimer->CTX_SUFF(pVM);
325 if ( VM_IS_EMT(pVM)
326 && RT_SUCCESS(tmTimerTryLock(pVM)))
327 {
328 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
329 Log3(("tmSchedule: tmTimerQueueSchedule\n"));
330 tmTimerQueueSchedule(pVM, &pVM->tm.s.CTX_SUFF(paTimerQueues)[pTimer->enmClock]);
331#ifdef VBOX_STRICT
332 tmTimerQueuesSanityChecks(pVM, "tmSchedule");
333#endif
334 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
335 tmTimerUnlock(pVM);
336 }
337 else
338 {
339 TMTIMERSTATE enmState = pTimer->enmState;
340 if (TMTIMERSTATE_IS_PENDING_SCHEDULING(enmState))
341 tmScheduleNotify(pVM);
342 }
343}
344
345
346/**
347 * Try change the state to enmStateNew from enmStateOld
348 * and link the timer into the scheduling queue.
349 *
350 * @returns Success indicator.
351 * @param pTimer Timer in question.
352 * @param enmStateNew The new timer state.
353 * @param enmStateOld The old timer state.
354 */
355DECLINLINE(bool) tmTimerTry(PTMTIMER pTimer, TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
356{
357 /*
358 * Attempt state change.
359 */
360 bool fRc;
361 TM_TRY_SET_STATE(pTimer, enmStateNew, enmStateOld, fRc);
362 return fRc;
363}
364
365
366/**
367 * Links the timer onto the scheduling queue.
368 *
369 * @param pQueue The timer queue the timer belongs to.
370 * @param pTimer The timer.
371 *
372 * @todo FIXME: Look into potential race with the thread running the queues
373 * and stuff.
374 */
375DECLINLINE(void) tmTimerLink(PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
376{
377 Assert(!pTimer->offScheduleNext);
378 const int32_t offHeadNew = (intptr_t)pTimer - (intptr_t)pQueue;
379 int32_t offHead;
380 do
381 {
382 offHead = pQueue->offSchedule;
383 if (offHead)
384 pTimer->offScheduleNext = ((intptr_t)pQueue + offHead) - (intptr_t)pTimer;
385 else
386 pTimer->offScheduleNext = 0;
387 } while (!ASMAtomicCmpXchgS32(&pQueue->offSchedule, offHeadNew, offHead));
388}
389
390
391/**
392 * Try change the state to enmStateNew from enmStateOld
393 * and link the timer into the scheduling queue.
394 *
395 * @returns Success indicator.
396 * @param pTimer Timer in question.
397 * @param enmStateNew The new timer state.
398 * @param enmStateOld The old timer state.
399 */
400DECLINLINE(bool) tmTimerTryWithLink(PTMTIMER pTimer, TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
401{
402 if (tmTimerTry(pTimer, enmStateNew, enmStateOld))
403 {
404 tmTimerLink(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF(paTimerQueues)[pTimer->enmClock], pTimer);
405 return true;
406 }
407 return false;
408}
409
410
411#ifdef VBOX_HIGH_RES_TIMERS_HACK
412
413/**
414 * Worker for tmTimerPollInternal that handles misses when the dedicated timer
415 * EMT is polling.
416 *
417 * @returns See tmTimerPollInternal.
418 * @param pVM Pointer to the shared VM structure.
419 * @param u64Now Current virtual clock timestamp.
420 * @param u64Delta The delta to the next even in ticks of the
421 * virtual clock.
422 * @param pu64Delta Where to return the delta.
423 * @param pCounter The statistics counter to update.
424 */
425DECLINLINE(uint64_t) tmTimerPollReturnMiss(PVM pVM, uint64_t u64Now, uint64_t u64Delta, uint64_t *pu64Delta)
426{
427 Assert(!(u64Delta & RT_BIT_64(63)));
428
429 if (!pVM->tm.s.fVirtualWarpDrive)
430 {
431 *pu64Delta = u64Delta;
432 return u64Delta + u64Now + pVM->tm.s.u64VirtualOffset;
433 }
434
435 /*
436 * Warp drive adjustments - this is the reverse of what tmVirtualGetRaw is doing.
437 */
438 uint64_t const u64Start = pVM->tm.s.u64VirtualWarpDriveStart;
439 uint32_t const u32Pct = pVM->tm.s.u32VirtualWarpDrivePercentage;
440
441 uint64_t u64GipTime = u64Delta + u64Now + pVM->tm.s.u64VirtualOffset;
442 u64GipTime -= u64Start; /* the start is GIP time. */
443 if (u64GipTime >= u64Delta)
444 {
445 ASMMultU64ByU32DivByU32(u64GipTime, 100, u32Pct);
446 ASMMultU64ByU32DivByU32(u64Delta, 100, u32Pct);
447 }
448 else
449 {
450 u64Delta -= u64GipTime;
451 ASMMultU64ByU32DivByU32(u64GipTime, 100, u32Pct);
452 u64Delta += u64GipTime;
453 }
454 *pu64Delta = u64Delta;
455 u64GipTime += u64Start;
456 return u64GipTime;
457}
458
459
460/**
461 * Worker for tmTimerPollInternal dealing with returns on virtual CPUs other
462 * than the one dedicated to timer work.
463 *
464 * @returns See tmTimerPollInternal.
465 * @param pVM Pointer to the shared VM structure.
466 * @param u64Now Current virtual clock timestamp.
467 * @param pu64Delta Where to return the delta.
468 */
469DECL_FORCE_INLINE(uint64_t) tmTimerPollReturnOtherCpu(PVM pVM, uint64_t u64Now, uint64_t *pu64Delta)
470{
471 static const uint64_t s_u64OtherRet = 500000000; /* 500 ms for non-timer EMTs. */
472 *pu64Delta = s_u64OtherRet;
473 return u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet;
474}
475
476
477/**
478 * Worker for tmTimerPollInternal.
479 *
480 * @returns See tmTimerPollInternal.
481 * @param pVM Pointer to the shared VM structure.
482 * @param pVCpu Pointer to the shared VMCPU structure of the
483 * caller.
484 * @param pVCpuDst Pointer to the shared VMCPU structure of the
485 * dedicated timer EMT.
486 * @param u64Now Current virtual clock timestamp.
487 * @param pu64Delta Where to return the delta.
488 * @param pCounter The statistics counter to update.
489 */
490DECL_FORCE_INLINE(uint64_t) tmTimerPollReturnHit(PVM pVM, PVMCPU pVCpu, PVMCPU pVCpuDst, uint64_t u64Now,
491 uint64_t *pu64Delta, PSTAMCOUNTER pCounter)
492{
493 STAM_COUNTER_INC(pCounter);
494 if (pVCpuDst != pVCpu)
495 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
496 *pu64Delta = 0;
497 return 0;
498}
499
500/**
501 * Common worker for TMTimerPollGIP and TMTimerPoll.
502 *
503 * This function is called before FFs are checked in the inner execution EM loops.
504 *
505 * @returns The GIP timestamp of the next event.
506 * 0 if the next event has already expired.
507 *
508 * @param pVM Pointer to the shared VM structure.
509 * @param pVCpu Pointer to the shared VMCPU structure of the caller.
510 * @param pu64Delta Where to store the delta.
511 *
512 * @thread The emulation thread.
513 *
514 * @remarks GIP uses ns ticks.
515 */
516DECL_FORCE_INLINE(uint64_t) tmTimerPollInternal(PVM pVM, PVMCPU pVCpu, uint64_t *pu64Delta)
517{
518 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
519 const uint64_t u64Now = TMVirtualGetNoCheck(pVM);
520 STAM_COUNTER_INC(&pVM->tm.s.StatPoll);
521
522 /*
523 * Return straight away if the timer FF is already set ...
524 */
525 if (VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
526 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet);
527
528 /*
529 * ... or if timers are being run.
530 */
531 if (ASMAtomicReadBool(&pVM->tm.s.fRunningQueues))
532 {
533 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
534 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
535 }
536
537 /*
538 * Check for TMCLOCK_VIRTUAL expiration.
539 */
540 const uint64_t u64Expire1 = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire);
541 const int64_t i64Delta1 = u64Expire1 - u64Now;
542 if (i64Delta1 <= 0)
543 {
544 if (!VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
545 {
546 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
547 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
548#ifdef IN_RING3
549 REMR3NotifyTimerPending(pVM, pVCpuDst);
550#endif
551 }
552 LogFlow(("TMTimerPoll: expire1=%'RU64 <= now=%'RU64\n", u64Expire1, u64Now));
553 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtual);
554 }
555
556 /*
557 * Check for TMCLOCK_VIRTUAL_SYNC expiration.
558 * This isn't quite as straight forward if in a catch-up, not only do
559 * we have to adjust the 'now' but when have to adjust the delta as well.
560 */
561
562 /*
563 * Optimistic lockless approach.
564 */
565 uint64_t u64VirtualSyncNow;
566 uint64_t u64Expire2 = ASMAtomicUoReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
567 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
568 {
569 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
570 {
571 u64VirtualSyncNow = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
572 if (RT_LIKELY( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
573 && !ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
574 && u64VirtualSyncNow == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
575 && u64Expire2 == ASMAtomicUoReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)))
576 {
577 u64VirtualSyncNow = u64Now - u64VirtualSyncNow;
578 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
579 if (i64Delta2 > 0)
580 {
581 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
582 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
583
584 if (pVCpu == pVCpuDst)
585 return tmTimerPollReturnMiss(pVM, u64Now, RT_MIN(i64Delta1, i64Delta2), pu64Delta);
586 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
587 }
588
589 if ( !pVM->tm.s.fRunningQueues
590 && !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
591 {
592 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
593 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
594#ifdef IN_RING3
595 REMR3NotifyTimerPending(pVM, pVCpuDst);
596#endif
597 }
598
599 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
600 LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now));
601 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
602 }
603 }
604 }
605 else
606 {
607 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
608 LogFlow(("TMTimerPoll: stopped\n"));
609 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
610 }
611
612 /*
613 * Complicated lockless approach.
614 */
615 uint64_t off;
616 uint32_t u32Pct = 0;
617 bool fCatchUp;
618 int cOuterTries = 42;
619 for (;; cOuterTries--)
620 {
621 fCatchUp = ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp);
622 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
623 u64Expire2 = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
624 if (fCatchUp)
625 {
626 /* No changes allowed, try get a consistent set of parameters. */
627 uint64_t const u64Prev = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev);
628 uint64_t const offGivenUp = ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp);
629 u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
630 if ( ( u64Prev == ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev)
631 && offGivenUp == ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp)
632 && u32Pct == ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage)
633 && off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
634 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)
635 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
636 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
637 || cOuterTries <= 0)
638 {
639 uint64_t u64Delta = u64Now - u64Prev;
640 if (RT_LIKELY(!(u64Delta >> 32)))
641 {
642 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, u32Pct, 100);
643 if (off > u64Sub + offGivenUp)
644 off -= u64Sub;
645 else /* we've completely caught up. */
646 off = offGivenUp;
647 }
648 else
649 /* More than 4 seconds since last time (or negative), ignore it. */
650 Log(("TMVirtualGetSync: u64Delta=%RX64 (NoLock)\n", u64Delta));
651
652 /* Check that we're still running and in catch up. */
653 if ( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
654 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
655 break;
656 }
657 }
658 else if ( off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
659 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)
660 && !ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
661 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
662 break; /* Got an consistent offset */
663
664 /* Repeat the initial checks before iterating. */
665 if (VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
666 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet);
667 if (ASMAtomicUoReadBool(&pVM->tm.s.fRunningQueues))
668 {
669 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
670 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
671 }
672 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
673 {
674 LogFlow(("TMTimerPoll: stopped\n"));
675 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
676 }
677 if (cOuterTries <= 0)
678 break; /* that's enough */
679 }
680 if (cOuterTries <= 0)
681 STAM_COUNTER_INC(&pVM->tm.s.StatPollELoop);
682 u64VirtualSyncNow = u64Now - off;
683
684 /* Calc delta and see if we've got a virtual sync hit. */
685 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
686 if (i64Delta2 <= 0)
687 {
688 if ( !pVM->tm.s.fRunningQueues
689 && !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
690 {
691 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
692 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
693#ifdef IN_RING3
694 REMR3NotifyTimerPending(pVM, pVCpuDst);
695#endif
696 }
697 STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtualSync);
698 LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now));
699 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
700 }
701
702 /*
703 * Return the time left to the next event.
704 */
705 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
706 if (pVCpu == pVCpuDst)
707 {
708 if (fCatchUp)
709 i64Delta2 = ASMMultU64ByU32DivByU32(i64Delta2, 100, u32Pct + 100);
710 return tmTimerPollReturnMiss(pVM, u64Now, RT_MIN(i64Delta1, i64Delta2), pu64Delta);
711 }
712 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
713}
714
715
716/**
717 * Set FF if we've passed the next virtual event.
718 *
719 * This function is called before FFs are checked in the inner execution EM loops.
720 *
721 * @returns true if timers are pending, false if not.
722 *
723 * @param pVM Pointer to the shared VM structure.
724 * @param pVCpu Pointer to the shared VMCPU structure of the caller.
725 * @thread The emulation thread.
726 */
727VMMDECL(bool) TMTimerPollBool(PVM pVM, PVMCPU pVCpu)
728{
729 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
730 uint64_t off = 0;
731 tmTimerPollInternal(pVM, pVCpu, &off);
732 return off == 0;
733}
734
735
736/**
737 * Set FF if we've passed the next virtual event.
738 *
739 * This function is called before FFs are checked in the inner execution EM loops.
740 *
741 * @param pVM Pointer to the shared VM structure.
742 * @param pVCpu Pointer to the shared VMCPU structure of the caller.
743 * @thread The emulation thread.
744 */
745VMM_INT_DECL(void) TMTimerPollVoid(PVM pVM, PVMCPU pVCpu)
746{
747 uint64_t off;
748 tmTimerPollInternal(pVM, pVCpu, &off);
749}
750
751
752/**
753 * Set FF if we've passed the next virtual event.
754 *
755 * This function is called before FFs are checked in the inner execution EM loops.
756 *
757 * @returns The GIP timestamp of the next event.
758 * 0 if the next event has already expired.
759 * @param pVM Pointer to the shared VM structure.
760 * @param pVCpu Pointer to the shared VMCPU structure of the caller.
761 * @param pu64Delta Where to store the delta.
762 * @thread The emulation thread.
763 */
764VMM_INT_DECL(uint64_t) TMTimerPollGIP(PVM pVM, PVMCPU pVCpu, uint64_t *pu64Delta)
765{
766 return tmTimerPollInternal(pVM, pVCpu, pu64Delta);
767}
768
769#endif /* VBOX_HIGH_RES_TIMERS_HACK */
770
771/**
772 * Gets the host context ring-3 pointer of the timer.
773 *
774 * @returns HC R3 pointer.
775 * @param pTimer Timer handle as returned by one of the create functions.
776 */
777VMMDECL(PTMTIMERR3) TMTimerR3Ptr(PTMTIMER pTimer)
778{
779 return (PTMTIMERR3)MMHyperCCToR3(pTimer->CTX_SUFF(pVM), pTimer);
780}
781
782
783/**
784 * Gets the host context ring-0 pointer of the timer.
785 *
786 * @returns HC R0 pointer.
787 * @param pTimer Timer handle as returned by one of the create functions.
788 */
789VMMDECL(PTMTIMERR0) TMTimerR0Ptr(PTMTIMER pTimer)
790{
791 return (PTMTIMERR0)MMHyperCCToR0(pTimer->CTX_SUFF(pVM), pTimer);
792}
793
794
795/**
796 * Gets the RC pointer of the timer.
797 *
798 * @returns RC pointer.
799 * @param pTimer Timer handle as returned by one of the create functions.
800 */
801VMMDECL(PTMTIMERRC) TMTimerRCPtr(PTMTIMER pTimer)
802{
803 return (PTMTIMERRC)MMHyperCCToRC(pTimer->CTX_SUFF(pVM), pTimer);
804}
805
806
807/**
808 * Locks the timer clock.
809 *
810 * @returns VINF_SUCCESS on success, @a rcBusy if busy, and VERR_NOT_SUPPORTED
811 * if the clock does not have a lock.
812 * @param pTimer The timer which clock lock we wish to take.
813 * @param rcBusy What to return in ring-0 and raw-mode context
814 * if the lock is busy. Pass VINF_SUCCESS to
815 * acquired the critical section thru a ring-3
816 call if necessary.
817 *
818 * @remarks Currently only supported on timers using the virtual sync clock.
819 */
820VMMDECL(int) TMTimerLock(PTMTIMER pTimer, int rcBusy)
821{
822 AssertPtr(pTimer);
823 AssertReturn(pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC, VERR_NOT_SUPPORTED);
824 return PDMCritSectEnter(&pTimer->CTX_SUFF(pVM)->tm.s.VirtualSyncLock, rcBusy);
825}
826
827
828/**
829 * Unlocks a timer clock locked by TMTimerLock.
830 *
831 * @param pTimer The timer which clock to unlock.
832 */
833VMMDECL(void) TMTimerUnlock(PTMTIMER pTimer)
834{
835 AssertPtr(pTimer);
836 AssertReturnVoid(pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC);
837 PDMCritSectLeave(&pTimer->CTX_SUFF(pVM)->tm.s.VirtualSyncLock);
838}
839
840
841/**
842 * Checks if the current thread owns the timer clock lock.
843 *
844 * @returns @c true if its the owner, @c false if not.
845 * @param pTimer The timer handle.
846 */
847VMMDECL(bool) TMTimerIsLockOwner(PTMTIMER pTimer)
848{
849 AssertPtr(pTimer);
850 AssertReturn(pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC, false);
851 return PDMCritSectIsOwner(&pTimer->CTX_SUFF(pVM)->tm.s.VirtualSyncLock);
852}
853
854
855/**
856 * Links a timer into the active list of a timer queue.
857 *
858 * The caller must have taken the TM semaphore before calling this function.
859 *
860 * @param pQueue The queue.
861 * @param pTimer The timer.
862 * @param u64Expire The timer expiration time.
863 */
864DECL_FORCE_INLINE(void) tmTimerActiveLink(PTMTIMERQUEUE pQueue, PTMTIMER pTimer, uint64_t u64Expire)
865{
866 PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue);
867 if (pCur)
868 {
869 for (;; pCur = TMTIMER_GET_NEXT(pCur))
870 {
871 if (pCur->u64Expire > u64Expire)
872 {
873 const PTMTIMER pPrev = TMTIMER_GET_PREV(pCur);
874 TMTIMER_SET_NEXT(pTimer, pCur);
875 TMTIMER_SET_PREV(pTimer, pPrev);
876 if (pPrev)
877 TMTIMER_SET_NEXT(pPrev, pTimer);
878 else
879 {
880 TMTIMER_SET_HEAD(pQueue, pTimer);
881 pQueue->u64Expire = u64Expire;
882 }
883 TMTIMER_SET_PREV(pCur, pTimer);
884 return;
885 }
886 if (!pCur->offNext)
887 {
888 TMTIMER_SET_NEXT(pCur, pTimer);
889 TMTIMER_SET_PREV(pTimer, pCur);
890 return;
891 }
892 }
893 }
894 else
895 {
896 TMTIMER_SET_HEAD(pQueue, pTimer);
897 pQueue->u64Expire = u64Expire;
898 }
899}
900
901
902/**
903 * Optimized TMTimerSet code path for starting an inactive timer.
904 *
905 * @returns VBox status code.
906 *
907 * @param pVM The VM handle.
908 * @param pTimer The timer handle.
909 * @param u64Expire The new expire time.
910 */
911static int tmTimerSetOptimizedStart(PVM pVM, PTMTIMER pTimer, uint64_t u64Expire)
912{
913 Assert(!pTimer->offPrev);
914 Assert(!pTimer->offNext);
915 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE);
916
917 /*
918 * Calculate and set the expiration time.
919 */
920 pTimer->u64Expire = u64Expire;
921 Log2(("tmTimerSetOptimizedStart: %p:{.pszDesc='%s', .u64Expire=%'RU64}\n", pTimer, R3STRING(pTimer->pszDesc), u64Expire));
922
923 /*
924 * Link the timer into the active list.
925 */
926 TMCLOCK const enmClock = pTimer->enmClock;
927 tmTimerActiveLink(&pVM->tm.s.CTX_SUFF(paTimerQueues)[enmClock], pTimer, u64Expire);
928
929 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetOpt);
930 tmTimerUnlock(pVM);
931 return VINF_SUCCESS;
932}
933
934
935
936
937
938/**
939 * Arm a timer with a (new) expire time.
940 *
941 * @returns VBox status.
942 * @param pTimer Timer handle as returned by one of the create functions.
943 * @param u64Expire New expire time.
944 */
945VMMDECL(int) TMTimerSet(PTMTIMER pTimer, uint64_t u64Expire)
946{
947 PVM pVM = pTimer->CTX_SUFF(pVM);
948 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
949 TMTIMER_ASSERT_CRITSECT(pTimer);
950
951#ifdef VBOX_WITH_STATISTICS
952 /* Gather optimization info. */
953 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSet);
954 TMTIMERSTATE enmOrgState = pTimer->enmState;
955 switch (enmOrgState)
956 {
957 case TMTIMERSTATE_STOPPED: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStStopped); break;
958 case TMTIMERSTATE_EXPIRED_DELIVER: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStExpDeliver); break;
959 case TMTIMERSTATE_ACTIVE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStActive); break;
960 case TMTIMERSTATE_PENDING_STOP: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendStop); break;
961 case TMTIMERSTATE_PENDING_STOP_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendStopSched); break;
962 case TMTIMERSTATE_PENDING_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendSched); break;
963 case TMTIMERSTATE_PENDING_RESCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendResched); break;
964 default: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStOther); break;
965 }
966#endif
967
968 /*
969 * The most common case is setting the timer again during the callback.
970 * The second most common case is starting a timer at some other time.
971 */
972#if 1
973 TMTIMERSTATE enmState1 = pTimer->enmState;
974 if ( enmState1 == TMTIMERSTATE_EXPIRED_DELIVER
975 || ( enmState1 == TMTIMERSTATE_STOPPED
976 && pTimer->pCritSect))
977 {
978 /* Try take the TM lock and check the state again. */
979 if (RT_SUCCESS_NP(tmTimerTryLock(pVM)))
980 {
981 if (RT_LIKELY(tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, enmState1)))
982 {
983 tmTimerSetOptimizedStart(pVM, pTimer, u64Expire);
984 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
985 return VINF_SUCCESS;
986 }
987 tmTimerUnlock(pVM);
988 }
989 }
990#endif
991
992 /*
993 * Unoptimized code path.
994 */
995 int cRetries = 1000;
996 do
997 {
998 /*
999 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1000 */
1001 TMTIMERSTATE enmState = pTimer->enmState;
1002 Log2(("TMTimerSet: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d u64Expire=%'RU64\n",
1003 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), cRetries, u64Expire));
1004 switch (enmState)
1005 {
1006 case TMTIMERSTATE_EXPIRED_DELIVER:
1007 case TMTIMERSTATE_STOPPED:
1008 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1009 {
1010 Assert(!pTimer->offPrev);
1011 Assert(!pTimer->offNext);
1012 AssertMsg( pTimer->enmClock != TMCLOCK_VIRTUAL_SYNC
1013 || pVM->tm.s.fVirtualSyncTicking
1014 || u64Expire >= pVM->tm.s.u64VirtualSync,
1015 ("%'RU64 < %'RU64 %s\n", u64Expire, pVM->tm.s.u64VirtualSync, R3STRING(pTimer->pszDesc)));
1016 pTimer->u64Expire = u64Expire;
1017 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1018 tmSchedule(pTimer);
1019 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1020 return VINF_SUCCESS;
1021 }
1022 break;
1023
1024 case TMTIMERSTATE_PENDING_SCHEDULE:
1025 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1026 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1027 {
1028 pTimer->u64Expire = u64Expire;
1029 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1030 tmSchedule(pTimer);
1031 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1032 return VINF_SUCCESS;
1033 }
1034 break;
1035
1036
1037 case TMTIMERSTATE_ACTIVE:
1038 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1039 {
1040 pTimer->u64Expire = u64Expire;
1041 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1042 tmSchedule(pTimer);
1043 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1044 return VINF_SUCCESS;
1045 }
1046 break;
1047
1048 case TMTIMERSTATE_PENDING_RESCHEDULE:
1049 case TMTIMERSTATE_PENDING_STOP:
1050 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1051 {
1052 pTimer->u64Expire = u64Expire;
1053 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1054 tmSchedule(pTimer);
1055 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1056 return VINF_SUCCESS;
1057 }
1058 break;
1059
1060
1061 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1062 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1063 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1064#ifdef IN_RING3
1065 if (!RTThreadYield())
1066 RTThreadSleep(1);
1067#else
1068/** @todo call host context and yield after a couple of iterations */
1069#endif
1070 break;
1071
1072 /*
1073 * Invalid states.
1074 */
1075 case TMTIMERSTATE_DESTROY:
1076 case TMTIMERSTATE_FREE:
1077 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1078 return VERR_TM_INVALID_STATE;
1079 default:
1080 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1081 return VERR_TM_UNKNOWN_STATE;
1082 }
1083 } while (cRetries-- > 0);
1084
1085 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1086 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1087 return VERR_INTERNAL_ERROR;
1088}
1089
1090
1091/**
1092 * Return the current time for the specified clock, setting pu64Now if not NULL.
1093 *
1094 * @returns Current time.
1095 * @param pVM The VM handle.
1096 * @param enmClock The clock to query.
1097 * @param pu64Now Optional pointer where to store the return time
1098 */
1099DECL_FORCE_INLINE(uint64_t) tmTimerSetRelativeNowWorker(PVM pVM, TMCLOCK enmClock, uint64_t *pu64Now)
1100{
1101 uint64_t u64Now;
1102 switch (enmClock)
1103 {
1104 case TMCLOCK_VIRTUAL_SYNC:
1105 u64Now = TMVirtualSyncGet(pVM);
1106 break;
1107 case TMCLOCK_VIRTUAL:
1108 u64Now = TMVirtualGet(pVM);
1109 break;
1110 case TMCLOCK_REAL:
1111 u64Now = TMRealGet(pVM);
1112 break;
1113 default:
1114 AssertFatalMsgFailed(("%d\n", enmClock));
1115 }
1116
1117 if (pu64Now)
1118 *pu64Now = u64Now;
1119 return u64Now;
1120}
1121
1122
1123/**
1124 * Optimized TMTimerSetRelative code path.
1125 *
1126 * @returns VBox status code.
1127 *
1128 * @param pVM The VM handle.
1129 * @param pTimer The timer handle.
1130 * @param cTicksToNext Clock ticks until the next time expiration.
1131 * @param pu64Now Where to return the current time stamp used.
1132 * Optional.
1133 */
1134static int tmTimerSetRelativeOptimizedStart(PVM pVM, PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1135{
1136 Assert(!pTimer->offPrev);
1137 Assert(!pTimer->offNext);
1138 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE);
1139
1140 /*
1141 * Calculate and set the expiration time.
1142 */
1143 TMCLOCK const enmClock = pTimer->enmClock;
1144 uint64_t const u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1145 pTimer->u64Expire = u64Expire;
1146 Log2(("tmTimerSetRelativeOptimizedStart: %p:{.pszDesc='%s', .u64Expire=%'RU64} cTicksToNext=%'RU64\n", pTimer, R3STRING(pTimer->pszDesc), u64Expire, cTicksToNext));
1147
1148 /*
1149 * Link the timer into the active list.
1150 */
1151 tmTimerActiveLink(&pVM->tm.s.CTX_SUFF(paTimerQueues)[enmClock], pTimer, u64Expire);
1152
1153 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeOpt);
1154 tmTimerUnlock(pVM);
1155 return VINF_SUCCESS;
1156}
1157
1158
1159/**
1160 * Arm a timer with a expire time relative to the current time.
1161 *
1162 * @returns VBox status.
1163 * @param pTimer Timer handle as returned by one of the create functions.
1164 * @param cTicksToNext Clock ticks until the next time expiration.
1165 * @param pu64Now Where to return the current time stamp used.
1166 * Optional.
1167 */
1168VMMDECL(int) TMTimerSetRelative(PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1169{
1170 STAM_PROFILE_START(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1171 TMTIMER_ASSERT_CRITSECT(pTimer);
1172 PVM pVM = pTimer->CTX_SUFF(pVM);
1173 int rc;
1174
1175#ifdef VBOX_WITH_STATISTICS
1176 /* Gather optimization info. */
1177 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelative);
1178 TMTIMERSTATE enmOrgState = pTimer->enmState;
1179 switch (enmOrgState)
1180 {
1181 case TMTIMERSTATE_STOPPED: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStStopped); break;
1182 case TMTIMERSTATE_EXPIRED_DELIVER: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStExpDeliver); break;
1183 case TMTIMERSTATE_ACTIVE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStActive); break;
1184 case TMTIMERSTATE_PENDING_STOP: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendStop); break;
1185 case TMTIMERSTATE_PENDING_STOP_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendStopSched); break;
1186 case TMTIMERSTATE_PENDING_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendSched); break;
1187 case TMTIMERSTATE_PENDING_RESCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendResched); break;
1188 default: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStOther); break;
1189 }
1190#endif
1191
1192 /*
1193 * Try to take the TM lock and optimize the common cases.
1194 *
1195 * With the TM lock we can safely make optimizations like immediate
1196 * scheduling and we can also be 100% sure that we're not racing the
1197 * running of the timer queues. As an additional restraint we require the
1198 * timer to have a critical section associated with to be 100% there aren't
1199 * concurrent operations on the timer. (This latter isn't necessary any
1200 * longer as this isn't supported for any timers, critsect or not.)
1201 *
1202 * Note! Lock ordering doesn't apply when we only tries to
1203 * get the innermost locks.
1204 */
1205 bool fOwnTMLock = RT_SUCCESS_NP(tmTimerTryLock(pVM));
1206#if 1
1207 if ( fOwnTMLock
1208 && pTimer->pCritSect)
1209 {
1210 TMTIMERSTATE enmState = pTimer->enmState;
1211 if (RT_LIKELY( ( enmState == TMTIMERSTATE_EXPIRED_DELIVER
1212 || enmState == TMTIMERSTATE_STOPPED)
1213 && tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, enmState)))
1214 {
1215 tmTimerSetRelativeOptimizedStart(pVM, pTimer, cTicksToNext, pu64Now);
1216 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1217 return VINF_SUCCESS;
1218 }
1219
1220 /* Optimize other states when it becomes necessary. */
1221 }
1222#endif
1223
1224 /*
1225 * Unoptimized path.
1226 */
1227 TMCLOCK const enmClock = pTimer->enmClock;
1228 bool fOwnVirtSyncLock;
1229 fOwnVirtSyncLock = !fOwnTMLock
1230 && enmClock == TMCLOCK_VIRTUAL_SYNC
1231 && RT_SUCCESS(tmVirtualSyncTryLock(pVM));
1232 for (int cRetries = 1000; ; cRetries--)
1233 {
1234 /*
1235 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1236 */
1237 TMTIMERSTATE enmState = pTimer->enmState;
1238 switch (enmState)
1239 {
1240 case TMTIMERSTATE_STOPPED:
1241 if (enmClock == TMCLOCK_VIRTUAL_SYNC)
1242 {
1243 /** @todo To fix assertion in tmR3TimerQueueRunVirtualSync:
1244 * Figure a safe way of activating this timer while the queue is
1245 * being run.
1246 * (99.9% sure this that the assertion is caused by DevAPIC.cpp
1247 * re-starting the timer in response to a initial_count write.) */
1248 }
1249 /* fall thru */
1250 case TMTIMERSTATE_EXPIRED_DELIVER:
1251 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1252 {
1253 Assert(!pTimer->offPrev);
1254 Assert(!pTimer->offNext);
1255 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1256 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [EXP/STOP]\n",
1257 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1258 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1259 tmSchedule(pTimer);
1260 rc = VINF_SUCCESS;
1261 break;
1262 }
1263 rc = VERR_TRY_AGAIN;
1264 break;
1265
1266 case TMTIMERSTATE_PENDING_SCHEDULE:
1267 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1268 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1269 {
1270 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1271 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [PEND_SCHED]\n",
1272 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1273 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1274 tmSchedule(pTimer);
1275 rc = VINF_SUCCESS;
1276 break;
1277 }
1278 rc = VERR_TRY_AGAIN;
1279 break;
1280
1281
1282 case TMTIMERSTATE_ACTIVE:
1283 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1284 {
1285 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1286 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [ACTIVE]\n",
1287 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1288 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1289 tmSchedule(pTimer);
1290 rc = VINF_SUCCESS;
1291 break;
1292 }
1293 rc = VERR_TRY_AGAIN;
1294 break;
1295
1296 case TMTIMERSTATE_PENDING_RESCHEDULE:
1297 case TMTIMERSTATE_PENDING_STOP:
1298 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1299 {
1300 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1301 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [PEND_RESCH/STOP]\n",
1302 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1303 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1304 tmSchedule(pTimer);
1305 rc = VINF_SUCCESS;
1306 break;
1307 }
1308 rc = VERR_TRY_AGAIN;
1309 break;
1310
1311
1312 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1313 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1314 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1315#ifdef IN_RING3
1316 if (!RTThreadYield())
1317 RTThreadSleep(1);
1318#else
1319/** @todo call host context and yield after a couple of iterations */
1320#endif
1321 rc = VERR_TRY_AGAIN;
1322 break;
1323
1324 /*
1325 * Invalid states.
1326 */
1327 case TMTIMERSTATE_DESTROY:
1328 case TMTIMERSTATE_FREE:
1329 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1330 rc = VERR_TM_INVALID_STATE;
1331 break;
1332
1333 default:
1334 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1335 rc = VERR_TM_UNKNOWN_STATE;
1336 break;
1337 }
1338
1339 /* switch + loop is tedious to break out of. */
1340 if (rc == VINF_SUCCESS)
1341 break;
1342
1343 if (rc != VERR_TRY_AGAIN)
1344 {
1345 tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1346 break;
1347 }
1348 if (cRetries <= 0)
1349 {
1350 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1351 rc = VERR_INTERNAL_ERROR;
1352 tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1353 break;
1354 }
1355
1356 /*
1357 * Retry to gain locks.
1358 */
1359 if (!fOwnTMLock)
1360 {
1361 fOwnTMLock = RT_SUCCESS_NP(tmTimerTryLock(pVM));
1362 if ( !fOwnTMLock
1363 && enmClock == TMCLOCK_VIRTUAL_SYNC
1364 && !fOwnVirtSyncLock)
1365 fOwnVirtSyncLock = RT_SUCCESS_NP(tmVirtualSyncTryLock(pVM));
1366 }
1367
1368 } /* for (;;) */
1369
1370 /*
1371 * Clean up and return.
1372 */
1373 if (fOwnVirtSyncLock)
1374 tmVirtualSyncUnlock(pVM);
1375 if (fOwnTMLock)
1376 tmTimerUnlock(pVM);
1377
1378 if ( !fOwnTMLock
1379 && !fOwnVirtSyncLock
1380 && enmClock == TMCLOCK_VIRTUAL_SYNC)
1381 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeRacyVirtSync);
1382
1383 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1384 return rc;
1385}
1386
1387
1388/**
1389 * Arm a timer with a (new) expire time relative to current time.
1390 *
1391 * @returns VBox status.
1392 * @param pTimer Timer handle as returned by one of the create functions.
1393 * @param cMilliesToNext Number of milliseconds to the next tick.
1394 */
1395VMMDECL(int) TMTimerSetMillies(PTMTIMER pTimer, uint32_t cMilliesToNext)
1396{
1397 PVM pVM = pTimer->CTX_SUFF(pVM);
1398 PVMCPU pVCpu = &pVM->aCpus[0]; /* just take the first VCPU */
1399
1400 switch (pTimer->enmClock)
1401 {
1402 case TMCLOCK_VIRTUAL:
1403 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1404 return TMTimerSetRelative(pTimer, cMilliesToNext * UINT64_C(1000000), NULL);
1405
1406 case TMCLOCK_VIRTUAL_SYNC:
1407 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1408 return TMTimerSetRelative(pTimer, cMilliesToNext * UINT64_C(1000000), NULL);
1409
1410 case TMCLOCK_REAL:
1411 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1412 return TMTimerSetRelative(pTimer, cMilliesToNext, NULL);
1413
1414 default:
1415 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1416 return VERR_INTERNAL_ERROR;
1417 }
1418}
1419
1420
1421/**
1422 * Arm a timer with a (new) expire time relative to current time.
1423 *
1424 * @returns VBox status.
1425 * @param pTimer Timer handle as returned by one of the create functions.
1426 * @param cMicrosToNext Number of microseconds to the next tick.
1427 */
1428VMMDECL(int) TMTimerSetMicro(PTMTIMER pTimer, uint64_t cMicrosToNext)
1429{
1430 PVM pVM = pTimer->CTX_SUFF(pVM);
1431 PVMCPU pVCpu = &pVM->aCpus[0]; /* just take the first VCPU */
1432
1433 switch (pTimer->enmClock)
1434 {
1435 case TMCLOCK_VIRTUAL:
1436 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1437 return TMTimerSetRelative(pTimer, cMicrosToNext * 1000, NULL);
1438
1439 case TMCLOCK_VIRTUAL_SYNC:
1440 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1441 return TMTimerSetRelative(pTimer, cMicrosToNext * 1000, NULL);
1442
1443 case TMCLOCK_REAL:
1444 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1445 return TMTimerSetRelative(pTimer, cMicrosToNext / 1000, NULL);
1446
1447 default:
1448 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1449 return VERR_INTERNAL_ERROR;
1450 }
1451}
1452
1453
1454/**
1455 * Arm a timer with a (new) expire time relative to current time.
1456 *
1457 * @returns VBox status.
1458 * @param pTimer Timer handle as returned by one of the create functions.
1459 * @param cNanosToNext Number of nanoseconds to the next tick.
1460 */
1461VMMDECL(int) TMTimerSetNano(PTMTIMER pTimer, uint64_t cNanosToNext)
1462{
1463 PVM pVM = pTimer->CTX_SUFF(pVM);
1464 PVMCPU pVCpu = &pVM->aCpus[0]; /* just take the first VCPU */
1465
1466 switch (pTimer->enmClock)
1467 {
1468 case TMCLOCK_VIRTUAL:
1469 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1470 return TMTimerSetRelative(pTimer, cNanosToNext, NULL);
1471
1472 case TMCLOCK_VIRTUAL_SYNC:
1473 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1474 return TMTimerSetRelative(pTimer, cNanosToNext, NULL);
1475
1476 case TMCLOCK_REAL:
1477 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1478 return TMTimerSetRelative(pTimer, cNanosToNext / 1000000, NULL);
1479
1480 default:
1481 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1482 return VERR_INTERNAL_ERROR;
1483 }
1484}
1485
1486
1487/**
1488 * Drops a hint about the frequency of the timer.
1489 *
1490 * This is used by TM and the VMM to calculate how often guest execution needs
1491 * to be interrupted. The hint is automatically cleared by TMTimerStop.
1492 *
1493 * @returns VBox status code.
1494 * @param pTimer Timer handle as returned by one of the create
1495 * functions.
1496 * @param uHzHint The frequency hint. Pass 0 to clear the hint.
1497 *
1498 * @remarks We're using an integer hertz value here since anything above 1 HZ
1499 * is not going to be any trouble satisfying scheduling wise. The
1500 * range where it makes sense is >= 100 HZ.
1501 */
1502VMMDECL(int) TMTimerSetFrequencyHint(PTMTIMER pTimer, uint32_t uHzHint)
1503{
1504 TMTIMER_ASSERT_CRITSECT(pTimer);
1505
1506 uint32_t const uHzOldHint = pTimer->uHzHint;
1507 pTimer->uHzHint = uHzHint;
1508
1509 PVM pVM = pTimer->CTX_SUFF(pVM);
1510 uint32_t const uMaxHzHint = pVM->tm.s.uMaxHzHint;
1511 if ( uHzHint > uMaxHzHint
1512 || uHzOldHint >= uMaxHzHint)
1513 ASMAtomicWriteBool(&pVM->tm.s.fHzHintNeedsUpdating, true);
1514
1515 return VINF_SUCCESS;
1516}
1517
1518
1519/**
1520 * Stop the timer.
1521 * Use TMR3TimerArm() to "un-stop" the timer.
1522 *
1523 * @returns VBox status.
1524 * @param pTimer Timer handle as returned by one of the create functions.
1525 */
1526VMMDECL(int) TMTimerStop(PTMTIMER pTimer)
1527{
1528 STAM_PROFILE_START(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1529 TMTIMER_ASSERT_CRITSECT(pTimer);
1530
1531 /* Reset the HZ hint. */
1532 if (pTimer->uHzHint)
1533 {
1534 PVM pVM = pTimer->CTX_SUFF(pVM);
1535 if (pTimer->uHzHint >= pVM->tm.s.uMaxHzHint)
1536 ASMAtomicWriteBool(&pVM->tm.s.fHzHintNeedsUpdating, true);
1537 pTimer->uHzHint = 0;
1538 }
1539
1540 /** @todo see if this function needs optimizing. */
1541 int cRetries = 1000;
1542 do
1543 {
1544 /*
1545 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1546 */
1547 TMTIMERSTATE enmState = pTimer->enmState;
1548 Log2(("TMTimerStop: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d\n",
1549 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), cRetries));
1550 switch (enmState)
1551 {
1552 case TMTIMERSTATE_EXPIRED_DELIVER:
1553 //AssertMsgFailed(("You don't stop an expired timer dude!\n"));
1554 return VERR_INVALID_PARAMETER;
1555
1556 case TMTIMERSTATE_STOPPED:
1557 case TMTIMERSTATE_PENDING_STOP:
1558 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1559 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1560 return VINF_SUCCESS;
1561
1562 case TMTIMERSTATE_PENDING_SCHEDULE:
1563 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, enmState))
1564 {
1565 tmSchedule(pTimer);
1566 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1567 return VINF_SUCCESS;
1568 }
1569
1570 case TMTIMERSTATE_PENDING_RESCHEDULE:
1571 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
1572 {
1573 tmSchedule(pTimer);
1574 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1575 return VINF_SUCCESS;
1576 }
1577 break;
1578
1579 case TMTIMERSTATE_ACTIVE:
1580 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
1581 {
1582 tmSchedule(pTimer);
1583 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1584 return VINF_SUCCESS;
1585 }
1586 break;
1587
1588 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1589 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1590 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1591#ifdef IN_RING3
1592 if (!RTThreadYield())
1593 RTThreadSleep(1);
1594#else
1595/**@todo call host and yield cpu after a while. */
1596#endif
1597 break;
1598
1599 /*
1600 * Invalid states.
1601 */
1602 case TMTIMERSTATE_DESTROY:
1603 case TMTIMERSTATE_FREE:
1604 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1605 return VERR_TM_INVALID_STATE;
1606 default:
1607 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1608 return VERR_TM_UNKNOWN_STATE;
1609 }
1610 } while (cRetries-- > 0);
1611
1612 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1613 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1614 return VERR_INTERNAL_ERROR;
1615}
1616
1617
1618/**
1619 * Get the current clock time.
1620 * Handy for calculating the new expire time.
1621 *
1622 * @returns Current clock time.
1623 * @param pTimer Timer handle as returned by one of the create functions.
1624 */
1625VMMDECL(uint64_t) TMTimerGet(PTMTIMER pTimer)
1626{
1627 uint64_t u64;
1628 PVM pVM = pTimer->CTX_SUFF(pVM);
1629
1630 switch (pTimer->enmClock)
1631 {
1632 case TMCLOCK_VIRTUAL:
1633 u64 = TMVirtualGet(pVM);
1634 break;
1635 case TMCLOCK_VIRTUAL_SYNC:
1636 u64 = TMVirtualSyncGet(pVM);
1637 break;
1638 case TMCLOCK_REAL:
1639 u64 = TMRealGet(pVM);
1640 break;
1641 default:
1642 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1643 return ~(uint64_t)0;
1644 }
1645 //Log2(("TMTimerGet: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1646 // u64, pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1647 return u64;
1648}
1649
1650
1651/**
1652 * Get the frequency of the timer clock.
1653 *
1654 * @returns Clock frequency (as Hz of course).
1655 * @param pTimer Timer handle as returned by one of the create functions.
1656 */
1657VMMDECL(uint64_t) TMTimerGetFreq(PTMTIMER pTimer)
1658{
1659 switch (pTimer->enmClock)
1660 {
1661 case TMCLOCK_VIRTUAL:
1662 case TMCLOCK_VIRTUAL_SYNC:
1663 return TMCLOCK_FREQ_VIRTUAL;
1664
1665 case TMCLOCK_REAL:
1666 return TMCLOCK_FREQ_REAL;
1667
1668 default:
1669 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1670 return 0;
1671 }
1672}
1673
1674
1675/**
1676 * Get the current clock time as nanoseconds.
1677 *
1678 * @returns The timer clock as nanoseconds.
1679 * @param pTimer Timer handle as returned by one of the create functions.
1680 */
1681VMMDECL(uint64_t) TMTimerGetNano(PTMTIMER pTimer)
1682{
1683 return TMTimerToNano(pTimer, TMTimerGet(pTimer));
1684}
1685
1686
1687/**
1688 * Get the current clock time as microseconds.
1689 *
1690 * @returns The timer clock as microseconds.
1691 * @param pTimer Timer handle as returned by one of the create functions.
1692 */
1693VMMDECL(uint64_t) TMTimerGetMicro(PTMTIMER pTimer)
1694{
1695 return TMTimerToMicro(pTimer, TMTimerGet(pTimer));
1696}
1697
1698
1699/**
1700 * Get the current clock time as milliseconds.
1701 *
1702 * @returns The timer clock as milliseconds.
1703 * @param pTimer Timer handle as returned by one of the create functions.
1704 */
1705VMMDECL(uint64_t) TMTimerGetMilli(PTMTIMER pTimer)
1706{
1707 return TMTimerToMilli(pTimer, TMTimerGet(pTimer));
1708}
1709
1710
1711/**
1712 * Converts the specified timer clock time to nanoseconds.
1713 *
1714 * @returns nanoseconds.
1715 * @param pTimer Timer handle as returned by one of the create functions.
1716 * @param u64Ticks The clock ticks.
1717 * @remark There could be rounding errors here. We just do a simple integer divide
1718 * without any adjustments.
1719 */
1720VMMDECL(uint64_t) TMTimerToNano(PTMTIMER pTimer, uint64_t u64Ticks)
1721{
1722 switch (pTimer->enmClock)
1723 {
1724 case TMCLOCK_VIRTUAL:
1725 case TMCLOCK_VIRTUAL_SYNC:
1726 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1727 return u64Ticks;
1728
1729 case TMCLOCK_REAL:
1730 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1731 return u64Ticks * 1000000;
1732
1733 default:
1734 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1735 return 0;
1736 }
1737}
1738
1739
1740/**
1741 * Converts the specified timer clock time to microseconds.
1742 *
1743 * @returns microseconds.
1744 * @param pTimer Timer handle as returned by one of the create functions.
1745 * @param u64Ticks The clock ticks.
1746 * @remark There could be rounding errors here. We just do a simple integer divide
1747 * without any adjustments.
1748 */
1749VMMDECL(uint64_t) TMTimerToMicro(PTMTIMER pTimer, uint64_t u64Ticks)
1750{
1751 switch (pTimer->enmClock)
1752 {
1753 case TMCLOCK_VIRTUAL:
1754 case TMCLOCK_VIRTUAL_SYNC:
1755 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1756 return u64Ticks / 1000;
1757
1758 case TMCLOCK_REAL:
1759 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1760 return u64Ticks * 1000;
1761
1762 default:
1763 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1764 return 0;
1765 }
1766}
1767
1768
1769/**
1770 * Converts the specified timer clock time to milliseconds.
1771 *
1772 * @returns milliseconds.
1773 * @param pTimer Timer handle as returned by one of the create functions.
1774 * @param u64Ticks The clock ticks.
1775 * @remark There could be rounding errors here. We just do a simple integer divide
1776 * without any adjustments.
1777 */
1778VMMDECL(uint64_t) TMTimerToMilli(PTMTIMER pTimer, uint64_t u64Ticks)
1779{
1780 switch (pTimer->enmClock)
1781 {
1782 case TMCLOCK_VIRTUAL:
1783 case TMCLOCK_VIRTUAL_SYNC:
1784 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1785 return u64Ticks / 1000000;
1786
1787 case TMCLOCK_REAL:
1788 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1789 return u64Ticks;
1790
1791 default:
1792 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1793 return 0;
1794 }
1795}
1796
1797
1798/**
1799 * Converts the specified nanosecond timestamp to timer clock ticks.
1800 *
1801 * @returns timer clock ticks.
1802 * @param pTimer Timer handle as returned by one of the create functions.
1803 * @param u64NanoTS The nanosecond value ticks to convert.
1804 * @remark There could be rounding and overflow errors here.
1805 */
1806VMMDECL(uint64_t) TMTimerFromNano(PTMTIMER pTimer, uint64_t u64NanoTS)
1807{
1808 switch (pTimer->enmClock)
1809 {
1810 case TMCLOCK_VIRTUAL:
1811 case TMCLOCK_VIRTUAL_SYNC:
1812 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1813 return u64NanoTS;
1814
1815 case TMCLOCK_REAL:
1816 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1817 return u64NanoTS / 1000000;
1818
1819 default:
1820 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1821 return 0;
1822 }
1823}
1824
1825
1826/**
1827 * Converts the specified microsecond timestamp to timer clock ticks.
1828 *
1829 * @returns timer clock ticks.
1830 * @param pTimer Timer handle as returned by one of the create functions.
1831 * @param u64MicroTS The microsecond value ticks to convert.
1832 * @remark There could be rounding and overflow errors here.
1833 */
1834VMMDECL(uint64_t) TMTimerFromMicro(PTMTIMER pTimer, uint64_t u64MicroTS)
1835{
1836 switch (pTimer->enmClock)
1837 {
1838 case TMCLOCK_VIRTUAL:
1839 case TMCLOCK_VIRTUAL_SYNC:
1840 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1841 return u64MicroTS * 1000;
1842
1843 case TMCLOCK_REAL:
1844 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1845 return u64MicroTS / 1000;
1846
1847 default:
1848 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1849 return 0;
1850 }
1851}
1852
1853
1854/**
1855 * Converts the specified millisecond timestamp to timer clock ticks.
1856 *
1857 * @returns timer clock ticks.
1858 * @param pTimer Timer handle as returned by one of the create functions.
1859 * @param u64MilliTS The millisecond value ticks to convert.
1860 * @remark There could be rounding and overflow errors here.
1861 */
1862VMMDECL(uint64_t) TMTimerFromMilli(PTMTIMER pTimer, uint64_t u64MilliTS)
1863{
1864 switch (pTimer->enmClock)
1865 {
1866 case TMCLOCK_VIRTUAL:
1867 case TMCLOCK_VIRTUAL_SYNC:
1868 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1869 return u64MilliTS * 1000000;
1870
1871 case TMCLOCK_REAL:
1872 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1873 return u64MilliTS;
1874
1875 default:
1876 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1877 return 0;
1878 }
1879}
1880
1881
1882/**
1883 * Get the expire time of the timer.
1884 * Only valid for active timers.
1885 *
1886 * @returns Expire time of the timer.
1887 * @param pTimer Timer handle as returned by one of the create functions.
1888 */
1889VMMDECL(uint64_t) TMTimerGetExpire(PTMTIMER pTimer)
1890{
1891 TMTIMER_ASSERT_CRITSECT(pTimer);
1892 int cRetries = 1000;
1893 do
1894 {
1895 TMTIMERSTATE enmState = pTimer->enmState;
1896 switch (enmState)
1897 {
1898 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1899 case TMTIMERSTATE_EXPIRED_DELIVER:
1900 case TMTIMERSTATE_STOPPED:
1901 case TMTIMERSTATE_PENDING_STOP:
1902 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1903 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1904 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1905 return ~(uint64_t)0;
1906
1907 case TMTIMERSTATE_ACTIVE:
1908 case TMTIMERSTATE_PENDING_RESCHEDULE:
1909 case TMTIMERSTATE_PENDING_SCHEDULE:
1910 Log2(("TMTimerGetExpire: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1911 pTimer->u64Expire, pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1912 return pTimer->u64Expire;
1913
1914 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1915 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1916#ifdef IN_RING3
1917 if (!RTThreadYield())
1918 RTThreadSleep(1);
1919#endif
1920 break;
1921
1922 /*
1923 * Invalid states.
1924 */
1925 case TMTIMERSTATE_DESTROY:
1926 case TMTIMERSTATE_FREE:
1927 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1928 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1929 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1930 return ~(uint64_t)0;
1931 default:
1932 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1933 return ~(uint64_t)0;
1934 }
1935 } while (cRetries-- > 0);
1936
1937 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1938 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1939 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1940 return ~(uint64_t)0;
1941}
1942
1943
1944/**
1945 * Checks if a timer is active or not.
1946 *
1947 * @returns True if active.
1948 * @returns False if not active.
1949 * @param pTimer Timer handle as returned by one of the create functions.
1950 */
1951VMMDECL(bool) TMTimerIsActive(PTMTIMER pTimer)
1952{
1953 TMTIMERSTATE enmState = pTimer->enmState;
1954 switch (enmState)
1955 {
1956 case TMTIMERSTATE_STOPPED:
1957 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1958 case TMTIMERSTATE_EXPIRED_DELIVER:
1959 case TMTIMERSTATE_PENDING_STOP:
1960 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1961 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1962 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1963 return false;
1964
1965 case TMTIMERSTATE_ACTIVE:
1966 case TMTIMERSTATE_PENDING_RESCHEDULE:
1967 case TMTIMERSTATE_PENDING_SCHEDULE:
1968 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1969 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1970 Log2(("TMTimerIsActive: returns true (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1971 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1972 return true;
1973
1974 /*
1975 * Invalid states.
1976 */
1977 case TMTIMERSTATE_DESTROY:
1978 case TMTIMERSTATE_FREE:
1979 AssertMsgFailed(("Invalid timer state %s (%s)\n", tmTimerState(enmState), R3STRING(pTimer->pszDesc)));
1980 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1981 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1982 return false;
1983 default:
1984 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1985 return false;
1986 }
1987}
1988
1989
1990/**
1991 * Convert state to string.
1992 *
1993 * @returns Readonly status name.
1994 * @param enmState State.
1995 */
1996const char *tmTimerState(TMTIMERSTATE enmState)
1997{
1998 switch (enmState)
1999 {
2000#define CASE(num, state) \
2001 case TMTIMERSTATE_##state: \
2002 AssertCompile(TMTIMERSTATE_##state == (num)); \
2003 return #num "-" #state
2004 CASE( 1,STOPPED);
2005 CASE( 2,ACTIVE);
2006 CASE( 3,EXPIRED_GET_UNLINK);
2007 CASE( 4,EXPIRED_DELIVER);
2008 CASE( 5,PENDING_STOP);
2009 CASE( 6,PENDING_STOP_SCHEDULE);
2010 CASE( 7,PENDING_SCHEDULE_SET_EXPIRE);
2011 CASE( 8,PENDING_SCHEDULE);
2012 CASE( 9,PENDING_RESCHEDULE_SET_EXPIRE);
2013 CASE(10,PENDING_RESCHEDULE);
2014 CASE(11,DESTROY);
2015 CASE(12,FREE);
2016 default:
2017 AssertMsgFailed(("Invalid state enmState=%d\n", enmState));
2018 return "Invalid state!";
2019#undef CASE
2020 }
2021}
2022
2023
2024/**
2025 * Schedules the given timer on the given queue.
2026 *
2027 * @param pQueue The timer queue.
2028 * @param pTimer The timer that needs scheduling.
2029 *
2030 * @remarks Called while owning the lock.
2031 */
2032DECLINLINE(void) tmTimerQueueScheduleOne(PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
2033{
2034 /*
2035 * Processing.
2036 */
2037 unsigned cRetries = 2;
2038 do
2039 {
2040 TMTIMERSTATE enmState = pTimer->enmState;
2041 switch (enmState)
2042 {
2043 /*
2044 * Reschedule timer (in the active list).
2045 */
2046 case TMTIMERSTATE_PENDING_RESCHEDULE:
2047 {
2048 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE, TMTIMERSTATE_PENDING_RESCHEDULE)))
2049 break; /* retry */
2050
2051 const PTMTIMER pPrev = TMTIMER_GET_PREV(pTimer);
2052 const PTMTIMER pNext = TMTIMER_GET_NEXT(pTimer);
2053 if (pPrev)
2054 TMTIMER_SET_NEXT(pPrev, pNext);
2055 else
2056 {
2057 TMTIMER_SET_HEAD(pQueue, pNext);
2058 pQueue->u64Expire = pNext ? pNext->u64Expire : INT64_MAX;
2059 }
2060 if (pNext)
2061 TMTIMER_SET_PREV(pNext, pPrev);
2062 pTimer->offNext = 0;
2063 pTimer->offPrev = 0;
2064 /* fall thru */
2065 }
2066
2067 /*
2068 * Schedule timer (insert into the active list).
2069 */
2070 case TMTIMERSTATE_PENDING_SCHEDULE:
2071 {
2072 Assert(!pTimer->offNext); Assert(!pTimer->offPrev);
2073 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, TMTIMERSTATE_PENDING_SCHEDULE)))
2074 break; /* retry */
2075
2076 PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue);
2077 if (pCur)
2078 {
2079 const uint64_t u64Expire = pTimer->u64Expire;
2080 for (;; pCur = TMTIMER_GET_NEXT(pCur))
2081 {
2082 if (pCur->u64Expire > u64Expire)
2083 {
2084 const PTMTIMER pPrev = TMTIMER_GET_PREV(pCur);
2085 TMTIMER_SET_NEXT(pTimer, pCur);
2086 TMTIMER_SET_PREV(pTimer, pPrev);
2087 if (pPrev)
2088 TMTIMER_SET_NEXT(pPrev, pTimer);
2089 else
2090 {
2091 TMTIMER_SET_HEAD(pQueue, pTimer);
2092 pQueue->u64Expire = u64Expire;
2093 }
2094 TMTIMER_SET_PREV(pCur, pTimer);
2095 return;
2096 }
2097 if (!pCur->offNext)
2098 {
2099 TMTIMER_SET_NEXT(pCur, pTimer);
2100 TMTIMER_SET_PREV(pTimer, pCur);
2101 return;
2102 }
2103 }
2104 }
2105 else
2106 {
2107 TMTIMER_SET_HEAD(pQueue, pTimer);
2108 pQueue->u64Expire = pTimer->u64Expire;
2109 }
2110 return;
2111 }
2112
2113 /*
2114 * Stop the timer in active list.
2115 */
2116 case TMTIMERSTATE_PENDING_STOP:
2117 {
2118 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, TMTIMERSTATE_PENDING_STOP)))
2119 break; /* retry */
2120
2121 const PTMTIMER pPrev = TMTIMER_GET_PREV(pTimer);
2122 const PTMTIMER pNext = TMTIMER_GET_NEXT(pTimer);
2123 if (pPrev)
2124 TMTIMER_SET_NEXT(pPrev, pNext);
2125 else
2126 {
2127 TMTIMER_SET_HEAD(pQueue, pNext);
2128 pQueue->u64Expire = pNext ? pNext->u64Expire : INT64_MAX;
2129 }
2130 if (pNext)
2131 TMTIMER_SET_PREV(pNext, pPrev);
2132 pTimer->offNext = 0;
2133 pTimer->offPrev = 0;
2134 /* fall thru */
2135 }
2136
2137 /*
2138 * Stop the timer (not on the active list).
2139 */
2140 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2141 Assert(!pTimer->offNext); Assert(!pTimer->offPrev);
2142 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_STOPPED, TMTIMERSTATE_PENDING_STOP_SCHEDULE)))
2143 break;
2144 return;
2145
2146 /*
2147 * The timer is pending destruction by TMR3TimerDestroy, our caller.
2148 * Nothing to do here.
2149 */
2150 case TMTIMERSTATE_DESTROY:
2151 break;
2152
2153 /*
2154 * Postpone these until they get into the right state.
2155 */
2156 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2157 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2158 tmTimerLink(pQueue, pTimer);
2159 STAM_COUNTER_INC(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatPostponed));
2160 return;
2161
2162 /*
2163 * None of these can be in the schedule.
2164 */
2165 case TMTIMERSTATE_FREE:
2166 case TMTIMERSTATE_STOPPED:
2167 case TMTIMERSTATE_ACTIVE:
2168 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2169 case TMTIMERSTATE_EXPIRED_DELIVER:
2170 default:
2171 AssertMsgFailed(("Timer (%p) in the scheduling list has an invalid state %s (%d)!",
2172 pTimer, tmTimerState(pTimer->enmState), pTimer->enmState));
2173 return;
2174 }
2175 } while (cRetries-- > 0);
2176}
2177
2178
2179/**
2180 * Schedules the specified timer queue.
2181 *
2182 * @param pVM The VM to run the timers for.
2183 * @param pQueue The queue to schedule.
2184 *
2185 * @remarks Called while owning the lock.
2186 */
2187void tmTimerQueueSchedule(PVM pVM, PTMTIMERQUEUE pQueue)
2188{
2189 TM_ASSERT_LOCK(pVM);
2190
2191 /*
2192 * Dequeue the scheduling list and iterate it.
2193 */
2194 int32_t offNext = ASMAtomicXchgS32(&pQueue->offSchedule, 0);
2195 Log2(("tmTimerQueueSchedule: pQueue=%p:{.enmClock=%d, offNext=%RI32, .u64Expired=%'RU64}\n", pQueue, pQueue->enmClock, offNext, pQueue->u64Expire));
2196 if (!offNext)
2197 return;
2198 PTMTIMER pNext = (PTMTIMER)((intptr_t)pQueue + offNext);
2199 while (pNext)
2200 {
2201 /*
2202 * Unlink the head timer and find the next one.
2203 */
2204 PTMTIMER pTimer = pNext;
2205 pNext = pNext->offScheduleNext ? (PTMTIMER)((intptr_t)pNext + pNext->offScheduleNext) : NULL;
2206 pTimer->offScheduleNext = 0;
2207
2208 /*
2209 * Do the scheduling.
2210 */
2211 Log2(("tmTimerQueueSchedule: %p:{.enmState=%s, .enmClock=%d, .enmType=%d, .pszDesc=%s}\n",
2212 pTimer, tmTimerState(pTimer->enmState), pTimer->enmClock, pTimer->enmType, R3STRING(pTimer->pszDesc)));
2213 tmTimerQueueScheduleOne(pQueue, pTimer);
2214 Log2(("tmTimerQueueSchedule: %p: new %s\n", pTimer, tmTimerState(pTimer->enmState)));
2215 } /* foreach timer in current schedule batch. */
2216 Log2(("tmTimerQueueSchedule: u64Expired=%'RU64\n", pQueue->u64Expire));
2217}
2218
2219
2220#ifdef VBOX_STRICT
2221/**
2222 * Checks that the timer queues are sane.
2223 *
2224 * @param pVM VM handle.
2225 *
2226 * @remarks Called while owning the lock.
2227 */
2228void tmTimerQueuesSanityChecks(PVM pVM, const char *pszWhere)
2229{
2230 TM_ASSERT_LOCK(pVM);
2231
2232 /*
2233 * Check the linking of the active lists.
2234 */
2235 for (int i = 0; i < TMCLOCK_MAX; i++)
2236 {
2237 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[i];
2238 Assert((int)pQueue->enmClock == i);
2239 PTMTIMER pPrev = NULL;
2240 for (PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue); pCur; pPrev = pCur, pCur = TMTIMER_GET_NEXT(pCur))
2241 {
2242 AssertMsg((int)pCur->enmClock == i, ("%s: %d != %d\n", pszWhere, pCur->enmClock, i));
2243 AssertMsg(TMTIMER_GET_PREV(pCur) == pPrev, ("%s: %p != %p\n", pszWhere, TMTIMER_GET_PREV(pCur), pPrev));
2244 TMTIMERSTATE enmState = pCur->enmState;
2245 switch (enmState)
2246 {
2247 case TMTIMERSTATE_ACTIVE:
2248 AssertMsg( !pCur->offScheduleNext
2249 || pCur->enmState != TMTIMERSTATE_ACTIVE,
2250 ("%s: %RI32\n", pszWhere, pCur->offScheduleNext));
2251 break;
2252 case TMTIMERSTATE_PENDING_STOP:
2253 case TMTIMERSTATE_PENDING_RESCHEDULE:
2254 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2255 break;
2256 default:
2257 AssertMsgFailed(("%s: Invalid state enmState=%d %s\n", pszWhere, enmState, tmTimerState(enmState)));
2258 break;
2259 }
2260 }
2261 }
2262
2263
2264# ifdef IN_RING3
2265 /*
2266 * Do the big list and check that active timers all are in the active lists.
2267 */
2268 PTMTIMERR3 pPrev = NULL;
2269 for (PTMTIMERR3 pCur = pVM->tm.s.pCreated; pCur; pPrev = pCur, pCur = pCur->pBigNext)
2270 {
2271 Assert(pCur->pBigPrev == pPrev);
2272 Assert((unsigned)pCur->enmClock < (unsigned)TMCLOCK_MAX);
2273
2274 TMTIMERSTATE enmState = pCur->enmState;
2275 switch (enmState)
2276 {
2277 case TMTIMERSTATE_ACTIVE:
2278 case TMTIMERSTATE_PENDING_STOP:
2279 case TMTIMERSTATE_PENDING_RESCHEDULE:
2280 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2281 {
2282 PTMTIMERR3 pCurAct = TMTIMER_GET_HEAD(&pVM->tm.s.CTX_SUFF(paTimerQueues)[pCur->enmClock]);
2283 Assert(pCur->offPrev || pCur == pCurAct);
2284 while (pCurAct && pCurAct != pCur)
2285 pCurAct = TMTIMER_GET_NEXT(pCurAct);
2286 Assert(pCurAct == pCur);
2287 break;
2288 }
2289
2290 case TMTIMERSTATE_PENDING_SCHEDULE:
2291 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2292 case TMTIMERSTATE_STOPPED:
2293 case TMTIMERSTATE_EXPIRED_DELIVER:
2294 {
2295 Assert(!pCur->offNext);
2296 Assert(!pCur->offPrev);
2297 for (PTMTIMERR3 pCurAct = TMTIMER_GET_HEAD(&pVM->tm.s.CTX_SUFF(paTimerQueues)[pCur->enmClock]);
2298 pCurAct;
2299 pCurAct = TMTIMER_GET_NEXT(pCurAct))
2300 {
2301 Assert(pCurAct != pCur);
2302 Assert(TMTIMER_GET_NEXT(pCurAct) != pCur);
2303 Assert(TMTIMER_GET_PREV(pCurAct) != pCur);
2304 }
2305 break;
2306 }
2307
2308 /* ignore */
2309 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2310 break;
2311
2312 /* shouldn't get here! */
2313 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2314 case TMTIMERSTATE_DESTROY:
2315 default:
2316 AssertMsgFailed(("Invalid state enmState=%d %s\n", enmState, tmTimerState(enmState)));
2317 break;
2318 }
2319 }
2320# endif /* IN_RING3 */
2321}
2322#endif /* !VBOX_STRICT */
2323
2324
2325/**
2326 * Gets the current warp drive percent.
2327 *
2328 * @returns The warp drive percent.
2329 * @param pVM The VM handle.
2330 */
2331VMMDECL(uint32_t) TMGetWarpDrive(PVM pVM)
2332{
2333 return pVM->tm.s.u32VirtualWarpDrivePercentage;
2334}
2335
2336
2337/**
2338 * Gets the highest frequency hint for all the important timers.
2339 *
2340 * @returns The highest frequency. 0 if no timers care.
2341 * @param pVM The VM handle.
2342 */
2343static uint32_t tmGetFrequencyHint(PVM pVM)
2344{
2345 /*
2346 * Query the value, recalculate it if necessary.
2347 *
2348 * The "right" highest frequency value isn't so important that we'll block
2349 * waiting on the timer semaphore.
2350 */
2351 uint32_t uMaxHzHint = ASMAtomicUoReadU32(&pVM->tm.s.uMaxHzHint);
2352 if (RT_UNLIKELY(ASMAtomicReadBool(&pVM->tm.s.fHzHintNeedsUpdating)))
2353 {
2354 if (RT_SUCCESS(tmTimerTryLock(pVM)))
2355 {
2356 ASMAtomicWriteBool(&pVM->tm.s.fHzHintNeedsUpdating, false);
2357
2358 /*
2359 * Loop over the timers associated with each clock.
2360 */
2361 uMaxHzHint = 0;
2362 for (int i = 0; i < TMCLOCK_MAX; i++)
2363 {
2364 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[i];
2365 for (PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue); pCur; pCur = TMTIMER_GET_NEXT(pCur))
2366 {
2367 uint32_t uHzHint = ASMAtomicUoReadU32(&pCur->uHzHint);
2368 if (uHzHint > uMaxHzHint)
2369 {
2370 switch (pCur->enmState)
2371 {
2372 case TMTIMERSTATE_ACTIVE:
2373 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2374 case TMTIMERSTATE_EXPIRED_DELIVER:
2375 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2376 case TMTIMERSTATE_PENDING_SCHEDULE:
2377 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2378 case TMTIMERSTATE_PENDING_RESCHEDULE:
2379 uMaxHzHint = uHzHint;
2380 break;
2381
2382 case TMTIMERSTATE_STOPPED:
2383 case TMTIMERSTATE_PENDING_STOP:
2384 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2385 case TMTIMERSTATE_DESTROY:
2386 case TMTIMERSTATE_FREE:
2387 break;
2388 /* no default, want gcc warnings when adding more states. */
2389 }
2390 }
2391 }
2392 }
2393 ASMAtomicWriteU32(&pVM->tm.s.uMaxHzHint, uMaxHzHint);
2394 Log(("tmGetFrequencyHint: New value %u Hz\n", uMaxHzHint));
2395 tmTimerUnlock(pVM);
2396 }
2397 }
2398 return uMaxHzHint;
2399}
2400
2401
2402/**
2403 * Calculates a host timer frequency that would be suitable for the current
2404 * timer load.
2405 *
2406 * This will take the highest timer frequency, adjust for catch-up and warp
2407 * driver, and finally add a little fudge factor. The caller (VMM) will use
2408 * the result to adjust the per-cpu preemption timer.
2409 *
2410 * @returns The highest frequency. 0 if no important timers around.
2411 * @param pVM The VM handle.
2412 * @param pVCpu The current CPU.
2413 */
2414VMM_INT_DECL(uint32_t) TMCalcHostTimerFrequency(PVM pVM, PVMCPU pVCpu)
2415{
2416 uint32_t uHz = tmGetFrequencyHint(pVM);
2417
2418 /* Catch up, we have to be more aggressive than the % indicates at the
2419 beginning of the effort. */
2420 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
2421 {
2422 uint32_t u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
2423 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
2424 {
2425 if (u32Pct <= 100)
2426 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp100 / 100;
2427 else if (u32Pct <= 200)
2428 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp200 / 100;
2429 else if (u32Pct <= 400)
2430 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp400 / 100;
2431 uHz *= u32Pct + 100;
2432 uHz /= 100;
2433 }
2434 }
2435
2436 /* Warp drive. */
2437 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualWarpDrive))
2438 {
2439 uint32_t u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualWarpDrivePercentage);
2440 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualWarpDrive))
2441 {
2442 uHz *= u32Pct;
2443 uHz /= 100;
2444 }
2445 }
2446
2447 /* Fudge factor. */
2448 if (pVCpu->idCpu == pVM->tm.s.idTimerCpu)
2449 uHz *= pVM->tm.s.cPctHostHzFudgeFactorTimerCpu;
2450 else
2451 uHz *= pVM->tm.s.cPctHostHzFudgeFactorOtherCpu;
2452 uHz /= 100;
2453
2454 /* Make sure it isn't too high. */
2455 if (uHz > pVM->tm.s.cHostHzMax)
2456 uHz = pVM->tm.s.cHostHzMax;
2457
2458 return uHz;
2459}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette