VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/TMAll.cpp@ 37518

Last change on this file since 37518 was 37517, checked in by vboxsync, 14 years ago

TM: Simplified the virtual sync timers by requiring changes to be done while holding the virtual sync lock. This means we can skip all the pending states and move timers on and off the active list immediately, avoiding the problems with timers being on the pending-scheduling list. Also made u64VirtualSync keep track of the last time stamp all the time (when under the lock) and thus really making sure time does not jump backwards.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 91.1 KB
Line 
1/* $Id: TMAll.cpp 37517 2011-06-16 19:24:00Z vboxsync $ */
2/** @file
3 * TM - Timeout Manager, all contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2011 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_TM
23#include <VBox/vmm/tm.h>
24#include <VBox/vmm/mm.h>
25#include <VBox/vmm/dbgftrace.h>
26#ifdef IN_RING3
27# include <VBox/vmm/rem.h>
28#endif
29#include "TMInternal.h"
30#include <VBox/vmm/vm.h>
31
32#include <VBox/param.h>
33#include <VBox/err.h>
34#include <VBox/log.h>
35#include <VBox/sup.h>
36#include <iprt/time.h>
37#include <iprt/assert.h>
38#include <iprt/asm.h>
39#include <iprt/asm-math.h>
40#ifdef IN_RING3
41# include <iprt/thread.h>
42#endif
43
44#include "TMInline.h"
45
46
47/*******************************************************************************
48* Defined Constants And Macros *
49*******************************************************************************/
50/** @def TMTIMER_ASSERT_CRITSECT
51 * Checks that the caller owns the critical section if one is associated with
52 * the timer. */
53#ifdef VBOX_STRICT
54# define TMTIMER_ASSERT_CRITSECT(pTimer) \
55 do { \
56 if ((pTimer)->pCritSect) \
57 { \
58 VMSTATE enmState; \
59 PPDMCRITSECT pCritSect = (PPDMCRITSECT)MMHyperR3ToCC((pTimer)->CTX_SUFF(pVM), (pTimer)->pCritSect); \
60 AssertMsg( pCritSect \
61 && ( PDMCritSectIsOwner(pCritSect) \
62 || (enmState = (pTimer)->CTX_SUFF(pVM)->enmVMState) == VMSTATE_CREATING \
63 || enmState == VMSTATE_RESETTING \
64 || enmState == VMSTATE_RESETTING_LS ),\
65 ("pTimer=%p (%s) pCritSect=%p (%s)\n", pTimer, R3STRING(pTimer->pszDesc), \
66 (pTimer)->pCritSect, R3STRING(PDMR3CritSectName((pTimer)->pCritSect)) )); \
67 } \
68 } while (0)
69#else
70# define TMTIMER_ASSERT_CRITSECT(pTimer) do { } while (0)
71#endif
72
73
74#ifndef tmTimerLock
75
76/**
77 * Try take the timer lock, wait in ring-3 return VERR_SEM_BUSY in R0/RC.
78 *
79 * @retval VINF_SUCCESS on success (always in ring-3).
80 * @retval VERR_SEM_BUSY in RC and R0 if the semaphore is busy.
81 *
82 * @param pVM The VM handle.
83 *
84 * @thread EMTs for the time being.
85 */
86int tmTimerLock(PVM pVM)
87{
88 VM_ASSERT_EMT(pVM);
89 int rc = PDMCritSectEnter(&pVM->tm.s.TimerCritSect, VERR_SEM_BUSY);
90 return rc;
91}
92
93
94/**
95 * Try take the timer lock, no waiting.
96 *
97 * @retval VINF_SUCCESS on success.
98 * @retval VERR_SEM_BUSY if busy.
99 *
100 * @param pVM The VM handle.
101 */
102int tmTimerTryLock(PVM pVM)
103{
104 int rc = PDMCritSectTryEnter(&pVM->tm.s.TimerCritSect);
105 return rc;
106}
107
108
109/**
110 * Release the EMT/TM lock.
111 *
112 * @param pVM The VM handle.
113 */
114void tmTimerUnlock(PVM pVM)
115{
116 PDMCritSectLeave(&pVM->tm.s.TimerCritSect);
117}
118
119
120/**
121 * Try take the VirtualSync lock, wait in ring-3 return VERR_SEM_BUSY in R0/RC.
122 *
123 * @retval VINF_SUCCESS on success (always in ring-3).
124 * @retval VERR_SEM_BUSY in RC and R0 if the semaphore is busy.
125 *
126 * @param pVM The VM handle.
127 */
128int tmVirtualSyncLock(PVM pVM)
129{
130 VM_ASSERT_EMT(pVM);
131 int rc = PDMCritSectEnter(&pVM->tm.s.VirtualSyncLock, VERR_SEM_BUSY);
132 return rc;
133}
134
135
136/**
137 * Try take the VirtualSync lock, no waiting.
138 *
139 * @retval VINF_SUCCESS on success.
140 * @retval VERR_SEM_BUSY if busy.
141 *
142 * @param pVM The VM handle.
143 */
144int tmVirtualSyncTryLock(PVM pVM)
145{
146 VM_ASSERT_EMT(pVM);
147 int rc = PDMCritSectTryEnter(&pVM->tm.s.VirtualSyncLock);
148 return rc;
149}
150
151
152/**
153 * Release the VirtualSync lock.
154 *
155 * @param pVM The VM handle.
156 */
157void tmVirtualSyncUnlock(PVM pVM)
158{
159 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
160}
161
162#endif /* ! macros */
163
164/**
165 * Notification that execution is about to start.
166 *
167 * This call must always be paired with a TMNotifyEndOfExecution call.
168 *
169 * The function may, depending on the configuration, resume the TSC and future
170 * clocks that only ticks when we're executing guest code.
171 *
172 * @param pVCpu The VMCPU to operate on.
173 */
174VMMDECL(void) TMNotifyStartOfExecution(PVMCPU pVCpu)
175{
176 PVM pVM = pVCpu->CTX_SUFF(pVM);
177
178#ifndef VBOX_WITHOUT_NS_ACCOUNTING
179 pVCpu->tm.s.u64NsTsStartExecuting = RTTimeNanoTS();
180#endif
181 if (pVM->tm.s.fTSCTiedToExecution)
182 tmCpuTickResume(pVM, pVCpu);
183}
184
185
186/**
187 * Notification that execution is about to start.
188 *
189 * This call must always be paired with a TMNotifyStartOfExecution call.
190 *
191 * The function may, depending on the configuration, suspend the TSC and future
192 * clocks that only ticks when we're executing guest code.
193 *
194 * @param pVCpu The VMCPU to operate on.
195 */
196VMMDECL(void) TMNotifyEndOfExecution(PVMCPU pVCpu)
197{
198 PVM pVM = pVCpu->CTX_SUFF(pVM);
199
200 if (pVM->tm.s.fTSCTiedToExecution)
201 tmCpuTickPause(pVM, pVCpu);
202
203#ifndef VBOX_WITHOUT_NS_ACCOUNTING
204 uint64_t const u64NsTs = RTTimeNanoTS();
205 uint64_t const cNsTotalNew = u64NsTs - pVCpu->tm.s.u64NsTsStartTotal;
206 uint64_t const cNsExecutingDelta = u64NsTs - pVCpu->tm.s.u64NsTsStartExecuting;
207 uint64_t const cNsExecutingNew = pVCpu->tm.s.cNsExecuting + cNsExecutingDelta;
208 uint64_t const cNsOtherNew = cNsTotalNew - cNsExecutingNew - pVCpu->tm.s.cNsHalted;
209
210# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
211 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecuting, cNsExecutingDelta);
212 if (cNsExecutingDelta < 5000)
213 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecTiny, cNsExecutingDelta);
214 else if (cNsExecutingDelta < 50000)
215 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecShort, cNsExecutingDelta);
216 else
217 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsExecLong, cNsExecutingDelta);
218 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsTotal, cNsTotalNew - pVCpu->tm.s.cNsTotal);
219 int64_t const cNsOtherNewDelta = cNsOtherNew - pVCpu->tm.s.cNsOther;
220 if (cNsOtherNewDelta > 0)
221 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsOther, cNsOtherNewDelta); /* (the period before execution) */
222# endif
223
224 uint32_t uGen = ASMAtomicIncU32(&pVCpu->tm.s.uTimesGen); Assert(uGen & 1);
225 pVCpu->tm.s.cNsExecuting = cNsExecutingNew;
226 pVCpu->tm.s.cNsTotal = cNsTotalNew;
227 pVCpu->tm.s.cNsOther = cNsOtherNew;
228 pVCpu->tm.s.cPeriodsExecuting++;
229 ASMAtomicWriteU32(&pVCpu->tm.s.uTimesGen, (uGen | 1) + 1);
230#endif
231}
232
233
234/**
235 * Notification that the cpu is entering the halt state
236 *
237 * This call must always be paired with a TMNotifyEndOfExecution call.
238 *
239 * The function may, depending on the configuration, resume the TSC and future
240 * clocks that only ticks when we're halted.
241 *
242 * @param pVCpu The VMCPU to operate on.
243 */
244VMM_INT_DECL(void) TMNotifyStartOfHalt(PVMCPU pVCpu)
245{
246 PVM pVM = pVCpu->CTX_SUFF(pVM);
247
248#ifndef VBOX_WITHOUT_NS_ACCOUNTING
249 pVCpu->tm.s.u64NsTsStartHalting = RTTimeNanoTS();
250#endif
251
252 if ( pVM->tm.s.fTSCTiedToExecution
253 && !pVM->tm.s.fTSCNotTiedToHalt)
254 tmCpuTickResume(pVM, pVCpu);
255}
256
257
258/**
259 * Notification that the cpu is leaving the halt state
260 *
261 * This call must always be paired with a TMNotifyStartOfHalt call.
262 *
263 * The function may, depending on the configuration, suspend the TSC and future
264 * clocks that only ticks when we're halted.
265 *
266 * @param pVCpu The VMCPU to operate on.
267 */
268VMM_INT_DECL(void) TMNotifyEndOfHalt(PVMCPU pVCpu)
269{
270 PVM pVM = pVCpu->CTX_SUFF(pVM);
271
272 if ( pVM->tm.s.fTSCTiedToExecution
273 && !pVM->tm.s.fTSCNotTiedToHalt)
274 tmCpuTickPause(pVM, pVCpu);
275
276#ifndef VBOX_WITHOUT_NS_ACCOUNTING
277 uint64_t const u64NsTs = RTTimeNanoTS();
278 uint64_t const cNsTotalNew = u64NsTs - pVCpu->tm.s.u64NsTsStartTotal;
279 uint64_t const cNsHaltedDelta = u64NsTs - pVCpu->tm.s.u64NsTsStartHalting;
280 uint64_t const cNsHaltedNew = pVCpu->tm.s.cNsHalted + cNsHaltedDelta;
281 uint64_t const cNsOtherNew = cNsTotalNew - pVCpu->tm.s.cNsExecuting - cNsHaltedNew;
282
283# if defined(VBOX_WITH_STATISTICS) || defined(VBOX_WITH_NS_ACCOUNTING_STATS)
284 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsHalted, cNsHaltedDelta);
285 STAM_REL_COUNTER_ADD(&pVCpu->tm.s.StatNsTotal, cNsTotalNew - pVCpu->tm.s.cNsTotal);
286 int64_t const cNsOtherNewDelta = cNsOtherNew - pVCpu->tm.s.cNsOther;
287 if (cNsOtherNewDelta > 0)
288 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->tm.s.StatNsOther, cNsOtherNewDelta); /* (the period before halting) */
289# endif
290
291 uint32_t uGen = ASMAtomicIncU32(&pVCpu->tm.s.uTimesGen); Assert(uGen & 1);
292 pVCpu->tm.s.cNsHalted = cNsHaltedNew;
293 pVCpu->tm.s.cNsTotal = cNsTotalNew;
294 pVCpu->tm.s.cNsOther = cNsOtherNew;
295 pVCpu->tm.s.cPeriodsHalted++;
296 ASMAtomicWriteU32(&pVCpu->tm.s.uTimesGen, (uGen | 1) + 1);
297#endif
298}
299
300
301/**
302 * Raise the timer force action flag and notify the dedicated timer EMT.
303 *
304 * @param pVM The VM handle.
305 */
306DECLINLINE(void) tmScheduleNotify(PVM pVM)
307{
308 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
309 if (!VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
310 {
311 Log5(("TMAll(%u): FF: 0 -> 1\n", __LINE__));
312 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
313#ifdef IN_RING3
314 REMR3NotifyTimerPending(pVM, pVCpuDst);
315 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
316#endif
317 STAM_COUNTER_INC(&pVM->tm.s.StatScheduleSetFF);
318 }
319}
320
321
322/**
323 * Schedule the queue which was changed.
324 */
325DECLINLINE(void) tmSchedule(PTMTIMER pTimer)
326{
327 PVM pVM = pTimer->CTX_SUFF(pVM);
328 if ( VM_IS_EMT(pVM)
329 && RT_SUCCESS(tmTimerTryLock(pVM)))
330 {
331 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
332 Log3(("tmSchedule: tmTimerQueueSchedule\n"));
333 tmTimerQueueSchedule(pVM, &pVM->tm.s.CTX_SUFF(paTimerQueues)[pTimer->enmClock]);
334#ifdef VBOX_STRICT
335 tmTimerQueuesSanityChecks(pVM, "tmSchedule");
336#endif
337 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
338 tmTimerUnlock(pVM);
339 }
340 else
341 {
342 TMTIMERSTATE enmState = pTimer->enmState;
343 if (TMTIMERSTATE_IS_PENDING_SCHEDULING(enmState))
344 tmScheduleNotify(pVM);
345 }
346}
347
348
349/**
350 * Try change the state to enmStateNew from enmStateOld
351 * and link the timer into the scheduling queue.
352 *
353 * @returns Success indicator.
354 * @param pTimer Timer in question.
355 * @param enmStateNew The new timer state.
356 * @param enmStateOld The old timer state.
357 */
358DECLINLINE(bool) tmTimerTry(PTMTIMER pTimer, TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
359{
360 /*
361 * Attempt state change.
362 */
363 bool fRc;
364 TM_TRY_SET_STATE(pTimer, enmStateNew, enmStateOld, fRc);
365 return fRc;
366}
367
368
369/**
370 * Links the timer onto the scheduling queue.
371 *
372 * @param pQueue The timer queue the timer belongs to.
373 * @param pTimer The timer.
374 *
375 * @todo FIXME: Look into potential race with the thread running the queues
376 * and stuff.
377 */
378DECLINLINE(void) tmTimerLinkSchedule(PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
379{
380 Assert(!pTimer->offScheduleNext);
381 const int32_t offHeadNew = (intptr_t)pTimer - (intptr_t)pQueue;
382 int32_t offHead;
383 do
384 {
385 offHead = pQueue->offSchedule;
386 if (offHead)
387 pTimer->offScheduleNext = ((intptr_t)pQueue + offHead) - (intptr_t)pTimer;
388 else
389 pTimer->offScheduleNext = 0;
390 } while (!ASMAtomicCmpXchgS32(&pQueue->offSchedule, offHeadNew, offHead));
391}
392
393
394/**
395 * Try change the state to enmStateNew from enmStateOld
396 * and link the timer into the scheduling queue.
397 *
398 * @returns Success indicator.
399 * @param pTimer Timer in question.
400 * @param enmStateNew The new timer state.
401 * @param enmStateOld The old timer state.
402 */
403DECLINLINE(bool) tmTimerTryWithLink(PTMTIMER pTimer, TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
404{
405 if (tmTimerTry(pTimer, enmStateNew, enmStateOld))
406 {
407 tmTimerLinkSchedule(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF(paTimerQueues)[pTimer->enmClock], pTimer);
408 return true;
409 }
410 return false;
411}
412
413
414/**
415 * Links a timer into the active list of a timer queue.
416 *
417 * @param pQueue The queue.
418 * @param pTimer The timer.
419 * @param u64Expire The timer expiration time.
420 *
421 * @remarks Called while owning the relevant queue lock.
422 */
423DECL_FORCE_INLINE(void) tmTimerQueueLinkActive(PTMTIMERQUEUE pQueue, PTMTIMER pTimer, uint64_t u64Expire)
424{
425 Assert(!pTimer->offNext);
426 Assert(!pTimer->offPrev);
427 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE || pTimer->enmClock != TMCLOCK_VIRTUAL_SYNC); /* (active is not a stable state) */
428
429 PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue);
430 if (pCur)
431 {
432 for (;; pCur = TMTIMER_GET_NEXT(pCur))
433 {
434 if (pCur->u64Expire > u64Expire)
435 {
436 const PTMTIMER pPrev = TMTIMER_GET_PREV(pCur);
437 TMTIMER_SET_NEXT(pTimer, pCur);
438 TMTIMER_SET_PREV(pTimer, pPrev);
439 if (pPrev)
440 TMTIMER_SET_NEXT(pPrev, pTimer);
441 else
442 {
443 TMTIMER_SET_HEAD(pQueue, pTimer);
444 ASMAtomicWriteU64(&pQueue->u64Expire, u64Expire);
445 DBGFTRACE_U64_TAG2(pTimer->CTX_SUFF(pVM), u64Expire, "tmTimerQueueLinkActive head", R3STRING(pTimer->pszDesc));
446 }
447 TMTIMER_SET_PREV(pCur, pTimer);
448 return;
449 }
450 if (!pCur->offNext)
451 {
452 TMTIMER_SET_NEXT(pCur, pTimer);
453 TMTIMER_SET_PREV(pTimer, pCur);
454 DBGFTRACE_U64_TAG2(pTimer->CTX_SUFF(pVM), u64Expire, "tmTimerQueueLinkActive tail", R3STRING(pTimer->pszDesc));
455 return;
456 }
457 }
458 }
459 else
460 {
461 TMTIMER_SET_HEAD(pQueue, pTimer);
462 ASMAtomicWriteU64(&pQueue->u64Expire, u64Expire);
463 DBGFTRACE_U64_TAG2(pTimer->CTX_SUFF(pVM), u64Expire, "tmTimerQueueLinkActive empty", R3STRING(pTimer->pszDesc));
464 }
465}
466
467
468
469/**
470 * Schedules the given timer on the given queue.
471 *
472 * @param pQueue The timer queue.
473 * @param pTimer The timer that needs scheduling.
474 *
475 * @remarks Called while owning the lock.
476 */
477DECLINLINE(void) tmTimerQueueScheduleOne(PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
478{
479 Assert(pQueue->enmClock != TMCLOCK_VIRTUAL_SYNC);
480
481 /*
482 * Processing.
483 */
484 unsigned cRetries = 2;
485 do
486 {
487 TMTIMERSTATE enmState = pTimer->enmState;
488 switch (enmState)
489 {
490 /*
491 * Reschedule timer (in the active list).
492 */
493 case TMTIMERSTATE_PENDING_RESCHEDULE:
494 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE, TMTIMERSTATE_PENDING_RESCHEDULE)))
495 break; /* retry */
496 tmTimerQueueUnlinkActive(pQueue, pTimer);
497 /* fall thru */
498
499 /*
500 * Schedule timer (insert into the active list).
501 */
502 case TMTIMERSTATE_PENDING_SCHEDULE:
503 Assert(!pTimer->offNext); Assert(!pTimer->offPrev);
504 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, TMTIMERSTATE_PENDING_SCHEDULE)))
505 break; /* retry */
506 tmTimerQueueLinkActive(pQueue, pTimer, pTimer->u64Expire);
507 return;
508
509 /*
510 * Stop the timer in active list.
511 */
512 case TMTIMERSTATE_PENDING_STOP:
513 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, TMTIMERSTATE_PENDING_STOP)))
514 break; /* retry */
515 tmTimerQueueUnlinkActive(pQueue, pTimer);
516 /* fall thru */
517
518 /*
519 * Stop the timer (not on the active list).
520 */
521 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
522 Assert(!pTimer->offNext); Assert(!pTimer->offPrev);
523 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_STOPPED, TMTIMERSTATE_PENDING_STOP_SCHEDULE)))
524 break;
525 return;
526
527 /*
528 * The timer is pending destruction by TMR3TimerDestroy, our caller.
529 * Nothing to do here.
530 */
531 case TMTIMERSTATE_DESTROY:
532 break;
533
534 /*
535 * Postpone these until they get into the right state.
536 */
537 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
538 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
539 tmTimerLinkSchedule(pQueue, pTimer);
540 STAM_COUNTER_INC(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatPostponed));
541 return;
542
543 /*
544 * None of these can be in the schedule.
545 */
546 case TMTIMERSTATE_FREE:
547 case TMTIMERSTATE_STOPPED:
548 case TMTIMERSTATE_ACTIVE:
549 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
550 case TMTIMERSTATE_EXPIRED_DELIVER:
551 default:
552 AssertMsgFailed(("Timer (%p) in the scheduling list has an invalid state %s (%d)!",
553 pTimer, tmTimerState(pTimer->enmState), pTimer->enmState));
554 return;
555 }
556 } while (cRetries-- > 0);
557}
558
559
560/**
561 * Schedules the specified timer queue.
562 *
563 * @param pVM The VM to run the timers for.
564 * @param pQueue The queue to schedule.
565 *
566 * @remarks Called while owning the lock.
567 */
568void tmTimerQueueSchedule(PVM pVM, PTMTIMERQUEUE pQueue)
569{
570 TM_ASSERT_LOCK(pVM);
571
572 /*
573 * Dequeue the scheduling list and iterate it.
574 */
575 int32_t offNext = ASMAtomicXchgS32(&pQueue->offSchedule, 0);
576 Log2(("tmTimerQueueSchedule: pQueue=%p:{.enmClock=%d, offNext=%RI32, .u64Expired=%'RU64}\n", pQueue, pQueue->enmClock, offNext, pQueue->u64Expire));
577 if (!offNext)
578 return;
579 PTMTIMER pNext = (PTMTIMER)((intptr_t)pQueue + offNext);
580 while (pNext)
581 {
582 /*
583 * Unlink the head timer and find the next one.
584 */
585 PTMTIMER pTimer = pNext;
586 pNext = pNext->offScheduleNext ? (PTMTIMER)((intptr_t)pNext + pNext->offScheduleNext) : NULL;
587 pTimer->offScheduleNext = 0;
588
589 /*
590 * Do the scheduling.
591 */
592 Log2(("tmTimerQueueSchedule: %p:{.enmState=%s, .enmClock=%d, .enmType=%d, .pszDesc=%s}\n",
593 pTimer, tmTimerState(pTimer->enmState), pTimer->enmClock, pTimer->enmType, R3STRING(pTimer->pszDesc)));
594 tmTimerQueueScheduleOne(pQueue, pTimer);
595 Log2(("tmTimerQueueSchedule: %p: new %s\n", pTimer, tmTimerState(pTimer->enmState)));
596 } /* foreach timer in current schedule batch. */
597 Log2(("tmTimerQueueSchedule: u64Expired=%'RU64\n", pQueue->u64Expire));
598}
599
600
601#ifdef VBOX_STRICT
602/**
603 * Checks that the timer queues are sane.
604 *
605 * @param pVM VM handle.
606 *
607 * @remarks Called while owning the lock.
608 */
609void tmTimerQueuesSanityChecks(PVM pVM, const char *pszWhere)
610{
611 TM_ASSERT_LOCK(pVM);
612
613 /*
614 * Check the linking of the active lists.
615 */
616 bool fHaveVirtualSyncLock = false;
617 for (int i = 0; i < TMCLOCK_MAX; i++)
618 {
619 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[i];
620 Assert((int)pQueue->enmClock == i);
621 if (pQueue->enmClock == TMCLOCK_VIRTUAL_SYNC)
622 {
623 if (PDMCritSectTryEnter(&pVM->tm.s.VirtualSyncLock) != VINF_SUCCESS)
624 continue;
625 fHaveVirtualSyncLock = true;
626 }
627 PTMTIMER pPrev = NULL;
628 for (PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue); pCur; pPrev = pCur, pCur = TMTIMER_GET_NEXT(pCur))
629 {
630 AssertMsg((int)pCur->enmClock == i, ("%s: %d != %d\n", pszWhere, pCur->enmClock, i));
631 AssertMsg(TMTIMER_GET_PREV(pCur) == pPrev, ("%s: %p != %p\n", pszWhere, TMTIMER_GET_PREV(pCur), pPrev));
632 TMTIMERSTATE enmState = pCur->enmState;
633 switch (enmState)
634 {
635 case TMTIMERSTATE_ACTIVE:
636 AssertMsg( !pCur->offScheduleNext
637 || pCur->enmState != TMTIMERSTATE_ACTIVE,
638 ("%s: %RI32\n", pszWhere, pCur->offScheduleNext));
639 break;
640 case TMTIMERSTATE_PENDING_STOP:
641 case TMTIMERSTATE_PENDING_RESCHEDULE:
642 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
643 break;
644 default:
645 AssertMsgFailed(("%s: Invalid state enmState=%d %s\n", pszWhere, enmState, tmTimerState(enmState)));
646 break;
647 }
648 }
649 }
650
651
652# ifdef IN_RING3
653 /*
654 * Do the big list and check that active timers all are in the active lists.
655 */
656 PTMTIMERR3 pPrev = NULL;
657 for (PTMTIMERR3 pCur = pVM->tm.s.pCreated; pCur; pPrev = pCur, pCur = pCur->pBigNext)
658 {
659 Assert(pCur->pBigPrev == pPrev);
660 Assert((unsigned)pCur->enmClock < (unsigned)TMCLOCK_MAX);
661
662 TMTIMERSTATE enmState = pCur->enmState;
663 switch (enmState)
664 {
665 case TMTIMERSTATE_ACTIVE:
666 case TMTIMERSTATE_PENDING_STOP:
667 case TMTIMERSTATE_PENDING_RESCHEDULE:
668 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
669 if (fHaveVirtualSyncLock || pCur->enmClock != TMCLOCK_VIRTUAL_SYNC)
670 {
671 PTMTIMERR3 pCurAct = TMTIMER_GET_HEAD(&pVM->tm.s.CTX_SUFF(paTimerQueues)[pCur->enmClock]);
672 Assert(pCur->offPrev || pCur == pCurAct);
673 while (pCurAct && pCurAct != pCur)
674 pCurAct = TMTIMER_GET_NEXT(pCurAct);
675 Assert(pCurAct == pCur);
676 }
677 break;
678
679 case TMTIMERSTATE_PENDING_SCHEDULE:
680 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
681 case TMTIMERSTATE_STOPPED:
682 case TMTIMERSTATE_EXPIRED_DELIVER:
683 if (fHaveVirtualSyncLock || pCur->enmClock != TMCLOCK_VIRTUAL_SYNC)
684 {
685 Assert(!pCur->offNext);
686 Assert(!pCur->offPrev);
687 for (PTMTIMERR3 pCurAct = TMTIMER_GET_HEAD(&pVM->tm.s.CTX_SUFF(paTimerQueues)[pCur->enmClock]);
688 pCurAct;
689 pCurAct = TMTIMER_GET_NEXT(pCurAct))
690 {
691 Assert(pCurAct != pCur);
692 Assert(TMTIMER_GET_NEXT(pCurAct) != pCur);
693 Assert(TMTIMER_GET_PREV(pCurAct) != pCur);
694 }
695 }
696 break;
697
698 /* ignore */
699 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
700 break;
701
702 /* shouldn't get here! */
703 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
704 case TMTIMERSTATE_DESTROY:
705 default:
706 AssertMsgFailed(("Invalid state enmState=%d %s\n", enmState, tmTimerState(enmState)));
707 break;
708 }
709 }
710# endif /* IN_RING3 */
711
712 if (fHaveVirtualSyncLock)
713 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
714}
715#endif /* !VBOX_STRICT */
716
717#ifdef VBOX_HIGH_RES_TIMERS_HACK
718
719/**
720 * Worker for tmTimerPollInternal that handles misses when the dedicated timer
721 * EMT is polling.
722 *
723 * @returns See tmTimerPollInternal.
724 * @param pVM Pointer to the shared VM structure.
725 * @param u64Now Current virtual clock timestamp.
726 * @param u64Delta The delta to the next even in ticks of the
727 * virtual clock.
728 * @param pu64Delta Where to return the delta.
729 * @param pCounter The statistics counter to update.
730 */
731DECLINLINE(uint64_t) tmTimerPollReturnMiss(PVM pVM, uint64_t u64Now, uint64_t u64Delta, uint64_t *pu64Delta)
732{
733 Assert(!(u64Delta & RT_BIT_64(63)));
734
735 if (!pVM->tm.s.fVirtualWarpDrive)
736 {
737 *pu64Delta = u64Delta;
738 return u64Delta + u64Now + pVM->tm.s.u64VirtualOffset;
739 }
740
741 /*
742 * Warp drive adjustments - this is the reverse of what tmVirtualGetRaw is doing.
743 */
744 uint64_t const u64Start = pVM->tm.s.u64VirtualWarpDriveStart;
745 uint32_t const u32Pct = pVM->tm.s.u32VirtualWarpDrivePercentage;
746
747 uint64_t u64GipTime = u64Delta + u64Now + pVM->tm.s.u64VirtualOffset;
748 u64GipTime -= u64Start; /* the start is GIP time. */
749 if (u64GipTime >= u64Delta)
750 {
751 ASMMultU64ByU32DivByU32(u64GipTime, 100, u32Pct);
752 ASMMultU64ByU32DivByU32(u64Delta, 100, u32Pct);
753 }
754 else
755 {
756 u64Delta -= u64GipTime;
757 ASMMultU64ByU32DivByU32(u64GipTime, 100, u32Pct);
758 u64Delta += u64GipTime;
759 }
760 *pu64Delta = u64Delta;
761 u64GipTime += u64Start;
762 return u64GipTime;
763}
764
765
766/**
767 * Worker for tmTimerPollInternal dealing with returns on virtual CPUs other
768 * than the one dedicated to timer work.
769 *
770 * @returns See tmTimerPollInternal.
771 * @param pVM Pointer to the shared VM structure.
772 * @param u64Now Current virtual clock timestamp.
773 * @param pu64Delta Where to return the delta.
774 */
775DECL_FORCE_INLINE(uint64_t) tmTimerPollReturnOtherCpu(PVM pVM, uint64_t u64Now, uint64_t *pu64Delta)
776{
777 static const uint64_t s_u64OtherRet = 500000000; /* 500 ms for non-timer EMTs. */
778 *pu64Delta = s_u64OtherRet;
779 return u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet;
780}
781
782
783/**
784 * Worker for tmTimerPollInternal.
785 *
786 * @returns See tmTimerPollInternal.
787 * @param pVM Pointer to the shared VM structure.
788 * @param pVCpu Pointer to the shared VMCPU structure of the
789 * caller.
790 * @param pVCpuDst Pointer to the shared VMCPU structure of the
791 * dedicated timer EMT.
792 * @param u64Now Current virtual clock timestamp.
793 * @param pu64Delta Where to return the delta.
794 * @param pCounter The statistics counter to update.
795 */
796DECL_FORCE_INLINE(uint64_t) tmTimerPollReturnHit(PVM pVM, PVMCPU pVCpu, PVMCPU pVCpuDst, uint64_t u64Now,
797 uint64_t *pu64Delta, PSTAMCOUNTER pCounter)
798{
799 STAM_COUNTER_INC(pCounter);
800 if (pVCpuDst != pVCpu)
801 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
802 *pu64Delta = 0;
803 return 0;
804}
805
806/**
807 * Common worker for TMTimerPollGIP and TMTimerPoll.
808 *
809 * This function is called before FFs are checked in the inner execution EM loops.
810 *
811 * @returns The GIP timestamp of the next event.
812 * 0 if the next event has already expired.
813 *
814 * @param pVM Pointer to the shared VM structure.
815 * @param pVCpu Pointer to the shared VMCPU structure of the caller.
816 * @param pu64Delta Where to store the delta.
817 *
818 * @thread The emulation thread.
819 *
820 * @remarks GIP uses ns ticks.
821 */
822DECL_FORCE_INLINE(uint64_t) tmTimerPollInternal(PVM pVM, PVMCPU pVCpu, uint64_t *pu64Delta)
823{
824 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
825 const uint64_t u64Now = TMVirtualGetNoCheck(pVM);
826 STAM_COUNTER_INC(&pVM->tm.s.StatPoll);
827
828 /*
829 * Return straight away if the timer FF is already set ...
830 */
831 if (VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
832 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet);
833
834 /*
835 * ... or if timers are being run.
836 */
837 if (ASMAtomicReadBool(&pVM->tm.s.fRunningQueues))
838 {
839 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
840 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
841 }
842
843 /*
844 * Check for TMCLOCK_VIRTUAL expiration.
845 */
846 const uint64_t u64Expire1 = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire);
847 const int64_t i64Delta1 = u64Expire1 - u64Now;
848 if (i64Delta1 <= 0)
849 {
850 if (!VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
851 {
852 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
853 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
854#ifdef IN_RING3
855 REMR3NotifyTimerPending(pVM, pVCpuDst);
856#endif
857 }
858 LogFlow(("TMTimerPoll: expire1=%'RU64 <= now=%'RU64\n", u64Expire1, u64Now));
859 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtual);
860 }
861
862 /*
863 * Check for TMCLOCK_VIRTUAL_SYNC expiration.
864 * This isn't quite as straight forward if in a catch-up, not only do
865 * we have to adjust the 'now' but when have to adjust the delta as well.
866 */
867
868 /*
869 * Optimistic lockless approach.
870 */
871 uint64_t u64VirtualSyncNow;
872 uint64_t u64Expire2 = ASMAtomicUoReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
873 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
874 {
875 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
876 {
877 u64VirtualSyncNow = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
878 if (RT_LIKELY( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
879 && !ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
880 && u64VirtualSyncNow == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
881 && u64Expire2 == ASMAtomicUoReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)))
882 {
883 u64VirtualSyncNow = u64Now - u64VirtualSyncNow;
884 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
885 if (i64Delta2 > 0)
886 {
887 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
888 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
889
890 if (pVCpu == pVCpuDst)
891 return tmTimerPollReturnMiss(pVM, u64Now, RT_MIN(i64Delta1, i64Delta2), pu64Delta);
892 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
893 }
894
895 if ( !pVM->tm.s.fRunningQueues
896 && !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
897 {
898 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
899 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
900#ifdef IN_RING3
901 REMR3NotifyTimerPending(pVM, pVCpuDst);
902#endif
903 }
904
905 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
906 LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now));
907 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
908 }
909 }
910 }
911 else
912 {
913 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
914 LogFlow(("TMTimerPoll: stopped\n"));
915 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
916 }
917
918 /*
919 * Complicated lockless approach.
920 */
921 uint64_t off;
922 uint32_t u32Pct = 0;
923 bool fCatchUp;
924 int cOuterTries = 42;
925 for (;; cOuterTries--)
926 {
927 fCatchUp = ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp);
928 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
929 u64Expire2 = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
930 if (fCatchUp)
931 {
932 /* No changes allowed, try get a consistent set of parameters. */
933 uint64_t const u64Prev = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev);
934 uint64_t const offGivenUp = ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp);
935 u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
936 if ( ( u64Prev == ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev)
937 && offGivenUp == ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp)
938 && u32Pct == ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage)
939 && off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
940 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)
941 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
942 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
943 || cOuterTries <= 0)
944 {
945 uint64_t u64Delta = u64Now - u64Prev;
946 if (RT_LIKELY(!(u64Delta >> 32)))
947 {
948 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, u32Pct, 100);
949 if (off > u64Sub + offGivenUp)
950 off -= u64Sub;
951 else /* we've completely caught up. */
952 off = offGivenUp;
953 }
954 else
955 /* More than 4 seconds since last time (or negative), ignore it. */
956 Log(("TMVirtualGetSync: u64Delta=%RX64 (NoLock)\n", u64Delta));
957
958 /* Check that we're still running and in catch up. */
959 if ( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
960 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
961 break;
962 }
963 }
964 else if ( off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
965 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)
966 && !ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
967 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
968 break; /* Got an consistent offset */
969
970 /* Repeat the initial checks before iterating. */
971 if (VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
972 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet);
973 if (ASMAtomicUoReadBool(&pVM->tm.s.fRunningQueues))
974 {
975 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
976 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
977 }
978 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
979 {
980 LogFlow(("TMTimerPoll: stopped\n"));
981 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
982 }
983 if (cOuterTries <= 0)
984 break; /* that's enough */
985 }
986 if (cOuterTries <= 0)
987 STAM_COUNTER_INC(&pVM->tm.s.StatPollELoop);
988 u64VirtualSyncNow = u64Now - off;
989
990 /* Calc delta and see if we've got a virtual sync hit. */
991 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
992 if (i64Delta2 <= 0)
993 {
994 if ( !pVM->tm.s.fRunningQueues
995 && !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
996 {
997 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
998 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
999#ifdef IN_RING3
1000 REMR3NotifyTimerPending(pVM, pVCpuDst);
1001#endif
1002 }
1003 STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtualSync);
1004 LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now));
1005 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
1006 }
1007
1008 /*
1009 * Return the time left to the next event.
1010 */
1011 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
1012 if (pVCpu == pVCpuDst)
1013 {
1014 if (fCatchUp)
1015 i64Delta2 = ASMMultU64ByU32DivByU32(i64Delta2, 100, u32Pct + 100);
1016 return tmTimerPollReturnMiss(pVM, u64Now, RT_MIN(i64Delta1, i64Delta2), pu64Delta);
1017 }
1018 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
1019}
1020
1021
1022/**
1023 * Set FF if we've passed the next virtual event.
1024 *
1025 * This function is called before FFs are checked in the inner execution EM loops.
1026 *
1027 * @returns true if timers are pending, false if not.
1028 *
1029 * @param pVM Pointer to the shared VM structure.
1030 * @param pVCpu Pointer to the shared VMCPU structure of the caller.
1031 * @thread The emulation thread.
1032 */
1033VMMDECL(bool) TMTimerPollBool(PVM pVM, PVMCPU pVCpu)
1034{
1035 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1036 uint64_t off = 0;
1037 tmTimerPollInternal(pVM, pVCpu, &off);
1038 return off == 0;
1039}
1040
1041
1042/**
1043 * Set FF if we've passed the next virtual event.
1044 *
1045 * This function is called before FFs are checked in the inner execution EM loops.
1046 *
1047 * @param pVM Pointer to the shared VM structure.
1048 * @param pVCpu Pointer to the shared VMCPU structure of the caller.
1049 * @thread The emulation thread.
1050 */
1051VMM_INT_DECL(void) TMTimerPollVoid(PVM pVM, PVMCPU pVCpu)
1052{
1053 uint64_t off;
1054 tmTimerPollInternal(pVM, pVCpu, &off);
1055}
1056
1057
1058/**
1059 * Set FF if we've passed the next virtual event.
1060 *
1061 * This function is called before FFs are checked in the inner execution EM loops.
1062 *
1063 * @returns The GIP timestamp of the next event.
1064 * 0 if the next event has already expired.
1065 * @param pVM Pointer to the shared VM structure.
1066 * @param pVCpu Pointer to the shared VMCPU structure of the caller.
1067 * @param pu64Delta Where to store the delta.
1068 * @thread The emulation thread.
1069 */
1070VMM_INT_DECL(uint64_t) TMTimerPollGIP(PVM pVM, PVMCPU pVCpu, uint64_t *pu64Delta)
1071{
1072 return tmTimerPollInternal(pVM, pVCpu, pu64Delta);
1073}
1074
1075#endif /* VBOX_HIGH_RES_TIMERS_HACK */
1076
1077/**
1078 * Gets the host context ring-3 pointer of the timer.
1079 *
1080 * @returns HC R3 pointer.
1081 * @param pTimer Timer handle as returned by one of the create functions.
1082 */
1083VMMDECL(PTMTIMERR3) TMTimerR3Ptr(PTMTIMER pTimer)
1084{
1085 return (PTMTIMERR3)MMHyperCCToR3(pTimer->CTX_SUFF(pVM), pTimer);
1086}
1087
1088
1089/**
1090 * Gets the host context ring-0 pointer of the timer.
1091 *
1092 * @returns HC R0 pointer.
1093 * @param pTimer Timer handle as returned by one of the create functions.
1094 */
1095VMMDECL(PTMTIMERR0) TMTimerR0Ptr(PTMTIMER pTimer)
1096{
1097 return (PTMTIMERR0)MMHyperCCToR0(pTimer->CTX_SUFF(pVM), pTimer);
1098}
1099
1100
1101/**
1102 * Gets the RC pointer of the timer.
1103 *
1104 * @returns RC pointer.
1105 * @param pTimer Timer handle as returned by one of the create functions.
1106 */
1107VMMDECL(PTMTIMERRC) TMTimerRCPtr(PTMTIMER pTimer)
1108{
1109 return (PTMTIMERRC)MMHyperCCToRC(pTimer->CTX_SUFF(pVM), pTimer);
1110}
1111
1112
1113/**
1114 * Locks the timer clock.
1115 *
1116 * @returns VINF_SUCCESS on success, @a rcBusy if busy, and VERR_NOT_SUPPORTED
1117 * if the clock does not have a lock.
1118 * @param pTimer The timer which clock lock we wish to take.
1119 * @param rcBusy What to return in ring-0 and raw-mode context
1120 * if the lock is busy. Pass VINF_SUCCESS to
1121 * acquired the critical section thru a ring-3
1122 call if necessary.
1123 *
1124 * @remarks Currently only supported on timers using the virtual sync clock.
1125 */
1126VMMDECL(int) TMTimerLock(PTMTIMER pTimer, int rcBusy)
1127{
1128 AssertPtr(pTimer);
1129 AssertReturn(pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC, VERR_NOT_SUPPORTED);
1130 return PDMCritSectEnter(&pTimer->CTX_SUFF(pVM)->tm.s.VirtualSyncLock, rcBusy);
1131}
1132
1133
1134/**
1135 * Unlocks a timer clock locked by TMTimerLock.
1136 *
1137 * @param pTimer The timer which clock to unlock.
1138 */
1139VMMDECL(void) TMTimerUnlock(PTMTIMER pTimer)
1140{
1141 AssertPtr(pTimer);
1142 AssertReturnVoid(pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC);
1143 PDMCritSectLeave(&pTimer->CTX_SUFF(pVM)->tm.s.VirtualSyncLock);
1144}
1145
1146
1147/**
1148 * Checks if the current thread owns the timer clock lock.
1149 *
1150 * @returns @c true if its the owner, @c false if not.
1151 * @param pTimer The timer handle.
1152 */
1153VMMDECL(bool) TMTimerIsLockOwner(PTMTIMER pTimer)
1154{
1155 AssertPtr(pTimer);
1156 AssertReturn(pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC, false);
1157 return PDMCritSectIsOwner(&pTimer->CTX_SUFF(pVM)->tm.s.VirtualSyncLock);
1158}
1159
1160
1161/**
1162 * Optimized TMTimerSet code path for starting an inactive timer.
1163 *
1164 * @returns VBox status code.
1165 *
1166 * @param pVM The VM handle.
1167 * @param pTimer The timer handle.
1168 * @param u64Expire The new expire time.
1169 */
1170static int tmTimerSetOptimizedStart(PVM pVM, PTMTIMER pTimer, uint64_t u64Expire)
1171{
1172 Assert(!pTimer->offPrev);
1173 Assert(!pTimer->offNext);
1174 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE);
1175
1176 TMCLOCK const enmClock = pTimer->enmClock;
1177
1178 /*
1179 * Calculate and set the expiration time.
1180 */
1181 if (enmClock == TMCLOCK_VIRTUAL_SYNC)
1182 {
1183 uint64_t u64Last = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSync);
1184 AssertMsgStmt(u64Expire >= u64Last,
1185 ("exp=%#llx last=%#llx\n", u64Expire, u64Last),
1186 u64Expire = u64Last);
1187 }
1188 ASMAtomicWriteU64(&pTimer->u64Expire, u64Expire);
1189 Log2(("tmTimerSetOptimizedStart: %p:{.pszDesc='%s', .u64Expire=%'RU64}\n", pTimer, R3STRING(pTimer->pszDesc), u64Expire));
1190
1191 /*
1192 * Link the timer into the active list.
1193 */
1194 tmTimerQueueLinkActive(&pVM->tm.s.CTX_SUFF(paTimerQueues)[enmClock], pTimer, u64Expire);
1195
1196 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetOpt);
1197 tmTimerUnlock(pVM);
1198 return VINF_SUCCESS;
1199}
1200
1201
1202/**
1203 * TMTimerSet for the virtual sync timer queue.
1204 *
1205 * This employs a greatly simplified state machine by always acquiring the
1206 * queue lock and bypassing the scheduling list.
1207 *
1208 * @returns VBox status code
1209 * @param pVM The VM handle.
1210 * @param pTimer The timer handle.
1211 * @param u64Expire The expiration time.
1212 */
1213static int tmTimerVirtualSyncSet(PVM pVM, PTMTIMER pTimer, uint64_t u64Expire)
1214{
1215 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetVs), a);
1216 VM_ASSERT_EMT(pVM);
1217 int rc = PDMCritSectEnter(&pVM->tm.s.VirtualSyncLock, VINF_SUCCESS);
1218 AssertRCReturn(rc, rc);
1219
1220 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC];
1221 TMTIMERSTATE enmState = pTimer->enmState;
1222 switch (enmState)
1223 {
1224 case TMTIMERSTATE_EXPIRED_DELIVER:
1225 case TMTIMERSTATE_STOPPED:
1226 if (enmState == TMTIMERSTATE_EXPIRED_DELIVER)
1227 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetVsStExpDeliver);
1228 else
1229 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetVsStStopped);
1230
1231 AssertMsg(u64Expire >= pVM->tm.s.u64VirtualSync,
1232 ("%'RU64 < %'RU64 %s\n", u64Expire, pVM->tm.s.u64VirtualSync, R3STRING(pTimer->pszDesc)));
1233 pTimer->u64Expire = u64Expire;
1234 TM_SET_STATE(pTimer, TMTIMERSTATE_ACTIVE);
1235 tmTimerQueueLinkActive(pQueue, pTimer, u64Expire);
1236 rc = VINF_SUCCESS;
1237 break;
1238
1239 case TMTIMERSTATE_ACTIVE:
1240 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetVsStActive);
1241 tmTimerQueueUnlinkActive(pQueue, pTimer);
1242 pTimer->u64Expire = u64Expire;
1243 tmTimerQueueLinkActive(pQueue, pTimer, u64Expire);
1244 rc = VINF_SUCCESS;
1245 break;
1246
1247 case TMTIMERSTATE_PENDING_RESCHEDULE:
1248 case TMTIMERSTATE_PENDING_STOP:
1249 case TMTIMERSTATE_PENDING_SCHEDULE:
1250 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1251 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1252 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1253 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1254 case TMTIMERSTATE_DESTROY:
1255 case TMTIMERSTATE_FREE:
1256 AssertLogRelMsgFailed(("Invalid timer state %s: %s\n", tmTimerState(enmState), R3STRING(pTimer->pszDesc)));
1257 rc = VERR_TM_INVALID_STATE;
1258 break;
1259
1260 default:
1261 AssertMsgFailed(("Unknown timer state %d: %s\n", enmState, R3STRING(pTimer->pszDesc)));
1262 rc = VERR_TM_UNKNOWN_STATE;
1263 break;
1264 }
1265
1266 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetVs), a);
1267 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
1268 return rc;
1269}
1270
1271
1272/**
1273 * Arm a timer with a (new) expire time.
1274 *
1275 * @returns VBox status.
1276 * @param pTimer Timer handle as returned by one of the create functions.
1277 * @param u64Expire New expire time.
1278 */
1279VMMDECL(int) TMTimerSet(PTMTIMER pTimer, uint64_t u64Expire)
1280{
1281 PVM pVM = pTimer->CTX_SUFF(pVM);
1282
1283 /* Treat virtual sync timers specially. */
1284 if (pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC)
1285 return tmTimerVirtualSyncSet(pVM, pTimer, u64Expire);
1286
1287 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1288 TMTIMER_ASSERT_CRITSECT(pTimer);
1289
1290 DBGFTRACE_U64_TAG2(pVM, u64Expire, "TMTimerSet", R3STRING(pTimer->pszDesc));
1291
1292#ifdef VBOX_WITH_STATISTICS
1293 /*
1294 * Gather optimization info.
1295 */
1296 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSet);
1297 TMTIMERSTATE enmOrgState = pTimer->enmState;
1298 switch (enmOrgState)
1299 {
1300 case TMTIMERSTATE_STOPPED: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStStopped); break;
1301 case TMTIMERSTATE_EXPIRED_DELIVER: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStExpDeliver); break;
1302 case TMTIMERSTATE_ACTIVE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStActive); break;
1303 case TMTIMERSTATE_PENDING_STOP: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendStop); break;
1304 case TMTIMERSTATE_PENDING_STOP_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendStopSched); break;
1305 case TMTIMERSTATE_PENDING_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendSched); break;
1306 case TMTIMERSTATE_PENDING_RESCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendResched); break;
1307 default: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStOther); break;
1308 }
1309#endif
1310
1311 /*
1312 * The most common case is setting the timer again during the callback.
1313 * The second most common case is starting a timer at some other time.
1314 */
1315#if 1
1316 TMTIMERSTATE enmState1 = pTimer->enmState;
1317 if ( enmState1 == TMTIMERSTATE_EXPIRED_DELIVER
1318 || ( enmState1 == TMTIMERSTATE_STOPPED
1319 && pTimer->pCritSect))
1320 {
1321 /* Try take the TM lock and check the state again. */
1322 if (RT_SUCCESS_NP(tmTimerTryLock(pVM)))
1323 {
1324 if (RT_LIKELY(tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, enmState1)))
1325 {
1326 tmTimerSetOptimizedStart(pVM, pTimer, u64Expire);
1327 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1328 return VINF_SUCCESS;
1329 }
1330 tmTimerUnlock(pVM);
1331 }
1332 }
1333#endif
1334
1335 /*
1336 * Unoptimized code path.
1337 */
1338 int cRetries = 1000;
1339 do
1340 {
1341 /*
1342 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1343 */
1344 TMTIMERSTATE enmState = pTimer->enmState;
1345 Log2(("TMTimerSet: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d u64Expire=%'RU64\n",
1346 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), cRetries, u64Expire));
1347 switch (enmState)
1348 {
1349 case TMTIMERSTATE_EXPIRED_DELIVER:
1350 case TMTIMERSTATE_STOPPED:
1351 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1352 {
1353 Assert(!pTimer->offPrev);
1354 Assert(!pTimer->offNext);
1355 pTimer->u64Expire = u64Expire;
1356 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1357 tmSchedule(pTimer);
1358 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1359 return VINF_SUCCESS;
1360 }
1361 break;
1362
1363 case TMTIMERSTATE_PENDING_SCHEDULE:
1364 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1365 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1366 {
1367 pTimer->u64Expire = u64Expire;
1368 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1369 tmSchedule(pTimer);
1370 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1371 return VINF_SUCCESS;
1372 }
1373 break;
1374
1375
1376 case TMTIMERSTATE_ACTIVE:
1377 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1378 {
1379 pTimer->u64Expire = u64Expire;
1380 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1381 tmSchedule(pTimer);
1382 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1383 return VINF_SUCCESS;
1384 }
1385 break;
1386
1387 case TMTIMERSTATE_PENDING_RESCHEDULE:
1388 case TMTIMERSTATE_PENDING_STOP:
1389 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1390 {
1391 pTimer->u64Expire = u64Expire;
1392 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1393 tmSchedule(pTimer);
1394 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1395 return VINF_SUCCESS;
1396 }
1397 break;
1398
1399
1400 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1401 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1402 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1403#ifdef IN_RING3
1404 if (!RTThreadYield())
1405 RTThreadSleep(1);
1406#else
1407/** @todo call host context and yield after a couple of iterations */
1408#endif
1409 break;
1410
1411 /*
1412 * Invalid states.
1413 */
1414 case TMTIMERSTATE_DESTROY:
1415 case TMTIMERSTATE_FREE:
1416 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1417 return VERR_TM_INVALID_STATE;
1418 default:
1419 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1420 return VERR_TM_UNKNOWN_STATE;
1421 }
1422 } while (cRetries-- > 0);
1423
1424 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1425 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
1426 return VERR_INTERNAL_ERROR;
1427}
1428
1429
1430/**
1431 * Return the current time for the specified clock, setting pu64Now if not NULL.
1432 *
1433 * @returns Current time.
1434 * @param pVM The VM handle.
1435 * @param enmClock The clock to query.
1436 * @param pu64Now Optional pointer where to store the return time
1437 */
1438DECL_FORCE_INLINE(uint64_t) tmTimerSetRelativeNowWorker(PVM pVM, TMCLOCK enmClock, uint64_t *pu64Now)
1439{
1440 uint64_t u64Now;
1441 switch (enmClock)
1442 {
1443 case TMCLOCK_VIRTUAL_SYNC:
1444 u64Now = TMVirtualSyncGet(pVM);
1445 break;
1446 case TMCLOCK_VIRTUAL:
1447 u64Now = TMVirtualGet(pVM);
1448 break;
1449 case TMCLOCK_REAL:
1450 u64Now = TMRealGet(pVM);
1451 break;
1452 default:
1453 AssertFatalMsgFailed(("%d\n", enmClock));
1454 }
1455
1456 if (pu64Now)
1457 *pu64Now = u64Now;
1458 return u64Now;
1459}
1460
1461
1462/**
1463 * Optimized TMTimerSetRelative code path.
1464 *
1465 * @returns VBox status code.
1466 *
1467 * @param pVM The VM handle.
1468 * @param pTimer The timer handle.
1469 * @param cTicksToNext Clock ticks until the next time expiration.
1470 * @param pu64Now Where to return the current time stamp used.
1471 * Optional.
1472 */
1473static int tmTimerSetRelativeOptimizedStart(PVM pVM, PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1474{
1475 Assert(!pTimer->offPrev);
1476 Assert(!pTimer->offNext);
1477 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE);
1478
1479 /*
1480 * Calculate and set the expiration time.
1481 */
1482 TMCLOCK const enmClock = pTimer->enmClock;
1483 uint64_t const u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1484 pTimer->u64Expire = u64Expire;
1485 Log2(("tmTimerSetRelativeOptimizedStart: %p:{.pszDesc='%s', .u64Expire=%'RU64} cTicksToNext=%'RU64\n", pTimer, R3STRING(pTimer->pszDesc), u64Expire, cTicksToNext));
1486
1487 /*
1488 * Link the timer into the active list.
1489 */
1490 DBGFTRACE_U64_TAG2(pVM, u64Expire, "tmTimerSetRelativeOptimizedStart", R3STRING(pTimer->pszDesc));
1491 tmTimerQueueLinkActive(&pVM->tm.s.CTX_SUFF(paTimerQueues)[enmClock], pTimer, u64Expire);
1492
1493 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeOpt);
1494 tmTimerUnlock(pVM);
1495 return VINF_SUCCESS;
1496}
1497
1498
1499/**
1500 * TMTimerSetRelative for the virtual sync timer queue.
1501 *
1502 * This employs a greatly simplified state machine by always acquiring the
1503 * queue lock and bypassing the scheduling list.
1504 *
1505 * @returns VBox status code
1506 * @param pVM The VM handle.
1507 * @param cTicksToNext Clock ticks until the next time expiration.
1508 * @param pu64Now Where to return the current time stamp used.
1509 * Optional.
1510 */
1511static int tmTimerVirtualSyncSetRelative(PVM pVM, PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1512{
1513 STAM_PROFILE_START(pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelativeVs), a);
1514 VM_ASSERT_EMT(pVM);
1515 int rc = PDMCritSectEnter(&pVM->tm.s.VirtualSyncLock, VINF_SUCCESS);
1516 AssertRCReturn(rc, rc);
1517
1518 /* Calculate the expiration tick. */
1519 uint64_t u64Expire = TMVirtualSyncGetNoCheck(pVM);
1520 if (pu64Now)
1521 *pu64Now = u64Expire;
1522 u64Expire += cTicksToNext;
1523
1524 /* Update the timer. */
1525 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC];
1526 TMTIMERSTATE enmState = pTimer->enmState;
1527 switch (enmState)
1528 {
1529 case TMTIMERSTATE_EXPIRED_DELIVER:
1530 case TMTIMERSTATE_STOPPED:
1531 if (enmState == TMTIMERSTATE_EXPIRED_DELIVER)
1532 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeVsStExpDeliver);
1533 else
1534 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeVsStStopped);
1535 pTimer->u64Expire = u64Expire;
1536 TM_SET_STATE(pTimer, TMTIMERSTATE_ACTIVE);
1537 tmTimerQueueLinkActive(pQueue, pTimer, u64Expire);
1538 rc = VINF_SUCCESS;
1539 break;
1540
1541 case TMTIMERSTATE_ACTIVE:
1542 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeVsStActive);
1543 tmTimerQueueUnlinkActive(pQueue, pTimer);
1544 pTimer->u64Expire = u64Expire;
1545 tmTimerQueueLinkActive(pQueue, pTimer, u64Expire);
1546 rc = VINF_SUCCESS;
1547 break;
1548
1549 case TMTIMERSTATE_PENDING_RESCHEDULE:
1550 case TMTIMERSTATE_PENDING_STOP:
1551 case TMTIMERSTATE_PENDING_SCHEDULE:
1552 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1553 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1554 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1555 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1556 case TMTIMERSTATE_DESTROY:
1557 case TMTIMERSTATE_FREE:
1558 AssertLogRelMsgFailed(("Invalid timer state %s: %s\n", tmTimerState(enmState), R3STRING(pTimer->pszDesc)));
1559 rc = VERR_TM_INVALID_STATE;
1560 break;
1561
1562 default:
1563 AssertMsgFailed(("Unknown timer state %d: %s\n", enmState, R3STRING(pTimer->pszDesc)));
1564 rc = VERR_TM_UNKNOWN_STATE;
1565 break;
1566 }
1567
1568 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelativeVs), a);
1569 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
1570 return rc;
1571}
1572
1573
1574/**
1575 * Arm a timer with a expire time relative to the current time.
1576 *
1577 * @returns VBox status.
1578 * @param pTimer Timer handle as returned by one of the create functions.
1579 * @param cTicksToNext Clock ticks until the next time expiration.
1580 * @param pu64Now Where to return the current time stamp used.
1581 * Optional.
1582 */
1583VMMDECL(int) TMTimerSetRelative(PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1584{
1585 PVM pVM = pTimer->CTX_SUFF(pVM);
1586
1587 /* Treat virtual sync timers specially. */
1588 if (pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC)
1589 return tmTimerVirtualSyncSetRelative(pVM, pTimer, cTicksToNext, pu64Now);
1590
1591 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1592 TMTIMER_ASSERT_CRITSECT(pTimer);
1593
1594 DBGFTRACE_U64_TAG2(pVM, cTicksToNext, "TMTimerSetRelative", R3STRING(pTimer->pszDesc));
1595
1596#ifdef VBOX_WITH_STATISTICS
1597 /*
1598 * Gather optimization info.
1599 */
1600 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelative);
1601 TMTIMERSTATE enmOrgState = pTimer->enmState;
1602 switch (enmOrgState)
1603 {
1604 case TMTIMERSTATE_STOPPED: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStStopped); break;
1605 case TMTIMERSTATE_EXPIRED_DELIVER: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStExpDeliver); break;
1606 case TMTIMERSTATE_ACTIVE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStActive); break;
1607 case TMTIMERSTATE_PENDING_STOP: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendStop); break;
1608 case TMTIMERSTATE_PENDING_STOP_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendStopSched); break;
1609 case TMTIMERSTATE_PENDING_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendSched); break;
1610 case TMTIMERSTATE_PENDING_RESCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendResched); break;
1611 default: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStOther); break;
1612 }
1613#endif
1614
1615 /*
1616 * Try to take the TM lock and optimize the common cases.
1617 *
1618 * With the TM lock we can safely make optimizations like immediate
1619 * scheduling and we can also be 100% sure that we're not racing the
1620 * running of the timer queues. As an additional restraint we require the
1621 * timer to have a critical section associated with to be 100% there aren't
1622 * concurrent operations on the timer. (This latter isn't necessary any
1623 * longer as this isn't supported for any timers, critsect or not.)
1624 *
1625 * Note! Lock ordering doesn't apply when we only tries to
1626 * get the innermost locks.
1627 */
1628 bool fOwnTMLock = RT_SUCCESS_NP(tmTimerTryLock(pVM));
1629#if 1
1630 if ( fOwnTMLock
1631 && pTimer->pCritSect)
1632 {
1633 TMTIMERSTATE enmState = pTimer->enmState;
1634 if (RT_LIKELY( ( enmState == TMTIMERSTATE_EXPIRED_DELIVER
1635 || enmState == TMTIMERSTATE_STOPPED)
1636 && tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, enmState)))
1637 {
1638 tmTimerSetRelativeOptimizedStart(pVM, pTimer, cTicksToNext, pu64Now);
1639 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1640 return VINF_SUCCESS;
1641 }
1642
1643 /* Optimize other states when it becomes necessary. */
1644 }
1645#endif
1646
1647 /*
1648 * Unoptimized path.
1649 */
1650 int rc;
1651 TMCLOCK const enmClock = pTimer->enmClock;
1652 for (int cRetries = 1000; ; cRetries--)
1653 {
1654 /*
1655 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1656 */
1657 TMTIMERSTATE enmState = pTimer->enmState;
1658 switch (enmState)
1659 {
1660 case TMTIMERSTATE_STOPPED:
1661 if (enmClock == TMCLOCK_VIRTUAL_SYNC)
1662 {
1663 /** @todo To fix assertion in tmR3TimerQueueRunVirtualSync:
1664 * Figure a safe way of activating this timer while the queue is
1665 * being run.
1666 * (99.9% sure this that the assertion is caused by DevAPIC.cpp
1667 * re-starting the timer in response to a initial_count write.) */
1668 }
1669 /* fall thru */
1670 case TMTIMERSTATE_EXPIRED_DELIVER:
1671 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1672 {
1673 Assert(!pTimer->offPrev);
1674 Assert(!pTimer->offNext);
1675 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1676 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [EXP/STOP]\n",
1677 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1678 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1679 tmSchedule(pTimer);
1680 rc = VINF_SUCCESS;
1681 break;
1682 }
1683 rc = VERR_TRY_AGAIN;
1684 break;
1685
1686 case TMTIMERSTATE_PENDING_SCHEDULE:
1687 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1688 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1689 {
1690 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1691 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [PEND_SCHED]\n",
1692 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1693 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1694 tmSchedule(pTimer);
1695 rc = VINF_SUCCESS;
1696 break;
1697 }
1698 rc = VERR_TRY_AGAIN;
1699 break;
1700
1701
1702 case TMTIMERSTATE_ACTIVE:
1703 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1704 {
1705 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1706 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [ACTIVE]\n",
1707 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1708 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1709 tmSchedule(pTimer);
1710 rc = VINF_SUCCESS;
1711 break;
1712 }
1713 rc = VERR_TRY_AGAIN;
1714 break;
1715
1716 case TMTIMERSTATE_PENDING_RESCHEDULE:
1717 case TMTIMERSTATE_PENDING_STOP:
1718 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1719 {
1720 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1721 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [PEND_RESCH/STOP]\n",
1722 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1723 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1724 tmSchedule(pTimer);
1725 rc = VINF_SUCCESS;
1726 break;
1727 }
1728 rc = VERR_TRY_AGAIN;
1729 break;
1730
1731
1732 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1733 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1734 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1735#ifdef IN_RING3
1736 if (!RTThreadYield())
1737 RTThreadSleep(1);
1738#else
1739/** @todo call host context and yield after a couple of iterations */
1740#endif
1741 rc = VERR_TRY_AGAIN;
1742 break;
1743
1744 /*
1745 * Invalid states.
1746 */
1747 case TMTIMERSTATE_DESTROY:
1748 case TMTIMERSTATE_FREE:
1749 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1750 rc = VERR_TM_INVALID_STATE;
1751 break;
1752
1753 default:
1754 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1755 rc = VERR_TM_UNKNOWN_STATE;
1756 break;
1757 }
1758
1759 /* switch + loop is tedious to break out of. */
1760 if (rc == VINF_SUCCESS)
1761 break;
1762
1763 if (rc != VERR_TRY_AGAIN)
1764 {
1765 tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1766 break;
1767 }
1768 if (cRetries <= 0)
1769 {
1770 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1771 rc = VERR_INTERNAL_ERROR;
1772 tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1773 break;
1774 }
1775
1776 /*
1777 * Retry to gain locks.
1778 */
1779 if (!fOwnTMLock)
1780 fOwnTMLock = RT_SUCCESS_NP(tmTimerTryLock(pVM));
1781
1782 } /* for (;;) */
1783
1784 /*
1785 * Clean up and return.
1786 */
1787 if (fOwnTMLock)
1788 tmTimerUnlock(pVM);
1789
1790 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1791 return rc;
1792}
1793
1794
1795/**
1796 * Arm a timer with a (new) expire time relative to current time.
1797 *
1798 * @returns VBox status.
1799 * @param pTimer Timer handle as returned by one of the create functions.
1800 * @param cMilliesToNext Number of milliseconds to the next tick.
1801 */
1802VMMDECL(int) TMTimerSetMillies(PTMTIMER pTimer, uint32_t cMilliesToNext)
1803{
1804 PVM pVM = pTimer->CTX_SUFF(pVM);
1805 PVMCPU pVCpu = &pVM->aCpus[0]; /* just take the first VCPU */
1806
1807 switch (pTimer->enmClock)
1808 {
1809 case TMCLOCK_VIRTUAL:
1810 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1811 return TMTimerSetRelative(pTimer, cMilliesToNext * UINT64_C(1000000), NULL);
1812
1813 case TMCLOCK_VIRTUAL_SYNC:
1814 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1815 return TMTimerSetRelative(pTimer, cMilliesToNext * UINT64_C(1000000), NULL);
1816
1817 case TMCLOCK_REAL:
1818 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1819 return TMTimerSetRelative(pTimer, cMilliesToNext, NULL);
1820
1821 default:
1822 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1823 return VERR_INTERNAL_ERROR;
1824 }
1825}
1826
1827
1828/**
1829 * Arm a timer with a (new) expire time relative to current time.
1830 *
1831 * @returns VBox status.
1832 * @param pTimer Timer handle as returned by one of the create functions.
1833 * @param cMicrosToNext Number of microseconds to the next tick.
1834 */
1835VMMDECL(int) TMTimerSetMicro(PTMTIMER pTimer, uint64_t cMicrosToNext)
1836{
1837 PVM pVM = pTimer->CTX_SUFF(pVM);
1838 PVMCPU pVCpu = &pVM->aCpus[0]; /* just take the first VCPU */
1839
1840 switch (pTimer->enmClock)
1841 {
1842 case TMCLOCK_VIRTUAL:
1843 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1844 return TMTimerSetRelative(pTimer, cMicrosToNext * 1000, NULL);
1845
1846 case TMCLOCK_VIRTUAL_SYNC:
1847 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1848 return TMTimerSetRelative(pTimer, cMicrosToNext * 1000, NULL);
1849
1850 case TMCLOCK_REAL:
1851 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1852 return TMTimerSetRelative(pTimer, cMicrosToNext / 1000, NULL);
1853
1854 default:
1855 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1856 return VERR_INTERNAL_ERROR;
1857 }
1858}
1859
1860
1861/**
1862 * Arm a timer with a (new) expire time relative to current time.
1863 *
1864 * @returns VBox status.
1865 * @param pTimer Timer handle as returned by one of the create functions.
1866 * @param cNanosToNext Number of nanoseconds to the next tick.
1867 */
1868VMMDECL(int) TMTimerSetNano(PTMTIMER pTimer, uint64_t cNanosToNext)
1869{
1870 PVM pVM = pTimer->CTX_SUFF(pVM);
1871 PVMCPU pVCpu = &pVM->aCpus[0]; /* just take the first VCPU */
1872
1873 switch (pTimer->enmClock)
1874 {
1875 case TMCLOCK_VIRTUAL:
1876 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1877 return TMTimerSetRelative(pTimer, cNanosToNext, NULL);
1878
1879 case TMCLOCK_VIRTUAL_SYNC:
1880 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1881 return TMTimerSetRelative(pTimer, cNanosToNext, NULL);
1882
1883 case TMCLOCK_REAL:
1884 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1885 return TMTimerSetRelative(pTimer, cNanosToNext / 1000000, NULL);
1886
1887 default:
1888 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1889 return VERR_INTERNAL_ERROR;
1890 }
1891}
1892
1893
1894/**
1895 * Drops a hint about the frequency of the timer.
1896 *
1897 * This is used by TM and the VMM to calculate how often guest execution needs
1898 * to be interrupted. The hint is automatically cleared by TMTimerStop.
1899 *
1900 * @returns VBox status code.
1901 * @param pTimer Timer handle as returned by one of the create
1902 * functions.
1903 * @param uHzHint The frequency hint. Pass 0 to clear the hint.
1904 *
1905 * @remarks We're using an integer hertz value here since anything above 1 HZ
1906 * is not going to be any trouble satisfying scheduling wise. The
1907 * range where it makes sense is >= 100 HZ.
1908 */
1909VMMDECL(int) TMTimerSetFrequencyHint(PTMTIMER pTimer, uint32_t uHzHint)
1910{
1911 TMTIMER_ASSERT_CRITSECT(pTimer);
1912
1913 uint32_t const uHzOldHint = pTimer->uHzHint;
1914 pTimer->uHzHint = uHzHint;
1915
1916 PVM pVM = pTimer->CTX_SUFF(pVM);
1917 uint32_t const uMaxHzHint = pVM->tm.s.uMaxHzHint;
1918 if ( uHzHint > uMaxHzHint
1919 || uHzOldHint >= uMaxHzHint)
1920 ASMAtomicWriteBool(&pVM->tm.s.fHzHintNeedsUpdating, true);
1921
1922 return VINF_SUCCESS;
1923}
1924
1925
1926/**
1927 * TMTimerStop for the virtual sync timer queue.
1928 *
1929 * This employs a greatly simplified state machine by always acquiring the
1930 * queue lock and bypassing the scheduling list.
1931 *
1932 * @returns VBox status code
1933 * @param pVM The VM handle.
1934 * @param pTimer The timer handle.
1935 */
1936static int tmTimerVirtualSyncStop(PVM pVM, PTMTIMER pTimer)
1937{
1938 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerStopVs), a);
1939 VM_ASSERT_EMT(pVM);
1940 int rc = PDMCritSectEnter(&pVM->tm.s.VirtualSyncLock, VINF_SUCCESS);
1941 AssertRCReturn(rc, rc);
1942
1943 /* Reset the HZ hint. */
1944 if (pTimer->uHzHint)
1945 {
1946 if (pTimer->uHzHint >= pVM->tm.s.uMaxHzHint)
1947 ASMAtomicWriteBool(&pVM->tm.s.fHzHintNeedsUpdating, true);
1948 pTimer->uHzHint = 0;
1949 }
1950
1951 /* Update the timer state. */
1952 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC];
1953 TMTIMERSTATE enmState = pTimer->enmState;
1954 switch (enmState)
1955 {
1956 case TMTIMERSTATE_ACTIVE:
1957 tmTimerQueueUnlinkActive(pQueue, pTimer);
1958 TM_SET_STATE(pTimer, TMTIMERSTATE_STOPPED);
1959 rc = VINF_SUCCESS;
1960 break;
1961
1962 case TMTIMERSTATE_EXPIRED_DELIVER:
1963 TM_SET_STATE(pTimer, TMTIMERSTATE_STOPPED);
1964 rc = VINF_SUCCESS;
1965 break;
1966
1967 case TMTIMERSTATE_STOPPED:
1968 rc = VINF_SUCCESS;
1969 break;
1970
1971 case TMTIMERSTATE_PENDING_RESCHEDULE:
1972 case TMTIMERSTATE_PENDING_STOP:
1973 case TMTIMERSTATE_PENDING_SCHEDULE:
1974 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1975 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1976 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1977 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1978 case TMTIMERSTATE_DESTROY:
1979 case TMTIMERSTATE_FREE:
1980 AssertLogRelMsgFailed(("Invalid timer state %s: %s\n", tmTimerState(enmState), R3STRING(pTimer->pszDesc)));
1981 rc = VERR_TM_INVALID_STATE;
1982 break;
1983
1984 default:
1985 AssertMsgFailed(("Unknown timer state %d: %s\n", enmState, R3STRING(pTimer->pszDesc)));
1986 rc = VERR_TM_UNKNOWN_STATE;
1987 break;
1988 }
1989
1990 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStopVs), a);
1991 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
1992 return rc;
1993}
1994
1995
1996/**
1997 * Stop the timer.
1998 * Use TMR3TimerArm() to "un-stop" the timer.
1999 *
2000 * @returns VBox status.
2001 * @param pTimer Timer handle as returned by one of the create functions.
2002 */
2003VMMDECL(int) TMTimerStop(PTMTIMER pTimer)
2004{
2005 PVM pVM = pTimer->CTX_SUFF(pVM);
2006
2007 /* Treat virtual sync timers specially. */
2008 if (pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC)
2009 return tmTimerVirtualSyncStop(pVM, pTimer);
2010
2011 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
2012 TMTIMER_ASSERT_CRITSECT(pTimer);
2013
2014 /*
2015 * Reset the HZ hint.
2016 */
2017 if (pTimer->uHzHint)
2018 {
2019 if (pTimer->uHzHint >= pVM->tm.s.uMaxHzHint)
2020 ASMAtomicWriteBool(&pVM->tm.s.fHzHintNeedsUpdating, true);
2021 pTimer->uHzHint = 0;
2022 }
2023
2024 /** @todo see if this function needs optimizing. */
2025 int cRetries = 1000;
2026 do
2027 {
2028 /*
2029 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
2030 */
2031 TMTIMERSTATE enmState = pTimer->enmState;
2032 Log2(("TMTimerStop: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d\n",
2033 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), cRetries));
2034 switch (enmState)
2035 {
2036 case TMTIMERSTATE_EXPIRED_DELIVER:
2037 //AssertMsgFailed(("You don't stop an expired timer dude!\n"));
2038 return VERR_INVALID_PARAMETER;
2039
2040 case TMTIMERSTATE_STOPPED:
2041 case TMTIMERSTATE_PENDING_STOP:
2042 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2043 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
2044 return VINF_SUCCESS;
2045
2046 case TMTIMERSTATE_PENDING_SCHEDULE:
2047 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, enmState))
2048 {
2049 tmSchedule(pTimer);
2050 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
2051 return VINF_SUCCESS;
2052 }
2053
2054 case TMTIMERSTATE_PENDING_RESCHEDULE:
2055 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
2056 {
2057 tmSchedule(pTimer);
2058 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
2059 return VINF_SUCCESS;
2060 }
2061 break;
2062
2063 case TMTIMERSTATE_ACTIVE:
2064 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
2065 {
2066 tmSchedule(pTimer);
2067 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
2068 return VINF_SUCCESS;
2069 }
2070 break;
2071
2072 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2073 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2074 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2075#ifdef IN_RING3
2076 if (!RTThreadYield())
2077 RTThreadSleep(1);
2078#else
2079/**@todo call host and yield cpu after a while. */
2080#endif
2081 break;
2082
2083 /*
2084 * Invalid states.
2085 */
2086 case TMTIMERSTATE_DESTROY:
2087 case TMTIMERSTATE_FREE:
2088 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
2089 return VERR_TM_INVALID_STATE;
2090 default:
2091 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
2092 return VERR_TM_UNKNOWN_STATE;
2093 }
2094 } while (cRetries-- > 0);
2095
2096 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
2097 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerStop), a);
2098 return VERR_INTERNAL_ERROR;
2099}
2100
2101
2102/**
2103 * Get the current clock time.
2104 * Handy for calculating the new expire time.
2105 *
2106 * @returns Current clock time.
2107 * @param pTimer Timer handle as returned by one of the create functions.
2108 */
2109VMMDECL(uint64_t) TMTimerGet(PTMTIMER pTimer)
2110{
2111 PVM pVM = pTimer->CTX_SUFF(pVM);
2112
2113 uint64_t u64;
2114 switch (pTimer->enmClock)
2115 {
2116 case TMCLOCK_VIRTUAL:
2117 u64 = TMVirtualGet(pVM);
2118 break;
2119 case TMCLOCK_VIRTUAL_SYNC:
2120 u64 = TMVirtualSyncGet(pVM);
2121 break;
2122 case TMCLOCK_REAL:
2123 u64 = TMRealGet(pVM);
2124 break;
2125 default:
2126 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2127 return UINT64_MAX;
2128 }
2129 //Log2(("TMTimerGet: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2130 // u64, pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2131 return u64;
2132}
2133
2134
2135/**
2136 * Get the frequency of the timer clock.
2137 *
2138 * @returns Clock frequency (as Hz of course).
2139 * @param pTimer Timer handle as returned by one of the create functions.
2140 */
2141VMMDECL(uint64_t) TMTimerGetFreq(PTMTIMER pTimer)
2142{
2143 switch (pTimer->enmClock)
2144 {
2145 case TMCLOCK_VIRTUAL:
2146 case TMCLOCK_VIRTUAL_SYNC:
2147 return TMCLOCK_FREQ_VIRTUAL;
2148
2149 case TMCLOCK_REAL:
2150 return TMCLOCK_FREQ_REAL;
2151
2152 default:
2153 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2154 return 0;
2155 }
2156}
2157
2158
2159/**
2160 * Get the current clock time as nanoseconds.
2161 *
2162 * @returns The timer clock as nanoseconds.
2163 * @param pTimer Timer handle as returned by one of the create functions.
2164 */
2165VMMDECL(uint64_t) TMTimerGetNano(PTMTIMER pTimer)
2166{
2167 return TMTimerToNano(pTimer, TMTimerGet(pTimer));
2168}
2169
2170
2171/**
2172 * Get the current clock time as microseconds.
2173 *
2174 * @returns The timer clock as microseconds.
2175 * @param pTimer Timer handle as returned by one of the create functions.
2176 */
2177VMMDECL(uint64_t) TMTimerGetMicro(PTMTIMER pTimer)
2178{
2179 return TMTimerToMicro(pTimer, TMTimerGet(pTimer));
2180}
2181
2182
2183/**
2184 * Get the current clock time as milliseconds.
2185 *
2186 * @returns The timer clock as milliseconds.
2187 * @param pTimer Timer handle as returned by one of the create functions.
2188 */
2189VMMDECL(uint64_t) TMTimerGetMilli(PTMTIMER pTimer)
2190{
2191 return TMTimerToMilli(pTimer, TMTimerGet(pTimer));
2192}
2193
2194
2195/**
2196 * Converts the specified timer clock time to nanoseconds.
2197 *
2198 * @returns nanoseconds.
2199 * @param pTimer Timer handle as returned by one of the create functions.
2200 * @param u64Ticks The clock ticks.
2201 * @remark There could be rounding errors here. We just do a simple integer divide
2202 * without any adjustments.
2203 */
2204VMMDECL(uint64_t) TMTimerToNano(PTMTIMER pTimer, uint64_t u64Ticks)
2205{
2206 switch (pTimer->enmClock)
2207 {
2208 case TMCLOCK_VIRTUAL:
2209 case TMCLOCK_VIRTUAL_SYNC:
2210 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2211 return u64Ticks;
2212
2213 case TMCLOCK_REAL:
2214 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2215 return u64Ticks * 1000000;
2216
2217 default:
2218 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2219 return 0;
2220 }
2221}
2222
2223
2224/**
2225 * Converts the specified timer clock time to microseconds.
2226 *
2227 * @returns microseconds.
2228 * @param pTimer Timer handle as returned by one of the create functions.
2229 * @param u64Ticks The clock ticks.
2230 * @remark There could be rounding errors here. We just do a simple integer divide
2231 * without any adjustments.
2232 */
2233VMMDECL(uint64_t) TMTimerToMicro(PTMTIMER pTimer, uint64_t u64Ticks)
2234{
2235 switch (pTimer->enmClock)
2236 {
2237 case TMCLOCK_VIRTUAL:
2238 case TMCLOCK_VIRTUAL_SYNC:
2239 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2240 return u64Ticks / 1000;
2241
2242 case TMCLOCK_REAL:
2243 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2244 return u64Ticks * 1000;
2245
2246 default:
2247 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2248 return 0;
2249 }
2250}
2251
2252
2253/**
2254 * Converts the specified timer clock time to milliseconds.
2255 *
2256 * @returns milliseconds.
2257 * @param pTimer Timer handle as returned by one of the create functions.
2258 * @param u64Ticks The clock ticks.
2259 * @remark There could be rounding errors here. We just do a simple integer divide
2260 * without any adjustments.
2261 */
2262VMMDECL(uint64_t) TMTimerToMilli(PTMTIMER pTimer, uint64_t u64Ticks)
2263{
2264 switch (pTimer->enmClock)
2265 {
2266 case TMCLOCK_VIRTUAL:
2267 case TMCLOCK_VIRTUAL_SYNC:
2268 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2269 return u64Ticks / 1000000;
2270
2271 case TMCLOCK_REAL:
2272 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2273 return u64Ticks;
2274
2275 default:
2276 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2277 return 0;
2278 }
2279}
2280
2281
2282/**
2283 * Converts the specified nanosecond timestamp to timer clock ticks.
2284 *
2285 * @returns timer clock ticks.
2286 * @param pTimer Timer handle as returned by one of the create functions.
2287 * @param cNanoSecs The nanosecond value ticks to convert.
2288 * @remark There could be rounding and overflow errors here.
2289 */
2290VMMDECL(uint64_t) TMTimerFromNano(PTMTIMER pTimer, uint64_t cNanoSecs)
2291{
2292 switch (pTimer->enmClock)
2293 {
2294 case TMCLOCK_VIRTUAL:
2295 case TMCLOCK_VIRTUAL_SYNC:
2296 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2297 return cNanoSecs;
2298
2299 case TMCLOCK_REAL:
2300 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2301 return cNanoSecs / 1000000;
2302
2303 default:
2304 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2305 return 0;
2306 }
2307}
2308
2309
2310/**
2311 * Converts the specified microsecond timestamp to timer clock ticks.
2312 *
2313 * @returns timer clock ticks.
2314 * @param pTimer Timer handle as returned by one of the create functions.
2315 * @param cMicroSecs The microsecond value ticks to convert.
2316 * @remark There could be rounding and overflow errors here.
2317 */
2318VMMDECL(uint64_t) TMTimerFromMicro(PTMTIMER pTimer, uint64_t cMicroSecs)
2319{
2320 switch (pTimer->enmClock)
2321 {
2322 case TMCLOCK_VIRTUAL:
2323 case TMCLOCK_VIRTUAL_SYNC:
2324 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2325 return cMicroSecs * 1000;
2326
2327 case TMCLOCK_REAL:
2328 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2329 return cMicroSecs / 1000;
2330
2331 default:
2332 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2333 return 0;
2334 }
2335}
2336
2337
2338/**
2339 * Converts the specified millisecond timestamp to timer clock ticks.
2340 *
2341 * @returns timer clock ticks.
2342 * @param pTimer Timer handle as returned by one of the create functions.
2343 * @param cMilliSecs The millisecond value ticks to convert.
2344 * @remark There could be rounding and overflow errors here.
2345 */
2346VMMDECL(uint64_t) TMTimerFromMilli(PTMTIMER pTimer, uint64_t cMilliSecs)
2347{
2348 switch (pTimer->enmClock)
2349 {
2350 case TMCLOCK_VIRTUAL:
2351 case TMCLOCK_VIRTUAL_SYNC:
2352 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
2353 return cMilliSecs * 1000000;
2354
2355 case TMCLOCK_REAL:
2356 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
2357 return cMilliSecs;
2358
2359 default:
2360 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
2361 return 0;
2362 }
2363}
2364
2365
2366/**
2367 * Get the expire time of the timer.
2368 * Only valid for active timers.
2369 *
2370 * @returns Expire time of the timer.
2371 * @param pTimer Timer handle as returned by one of the create functions.
2372 */
2373VMMDECL(uint64_t) TMTimerGetExpire(PTMTIMER pTimer)
2374{
2375 TMTIMER_ASSERT_CRITSECT(pTimer);
2376 int cRetries = 1000;
2377 do
2378 {
2379 TMTIMERSTATE enmState = pTimer->enmState;
2380 switch (enmState)
2381 {
2382 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2383 case TMTIMERSTATE_EXPIRED_DELIVER:
2384 case TMTIMERSTATE_STOPPED:
2385 case TMTIMERSTATE_PENDING_STOP:
2386 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2387 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2388 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2389 return ~(uint64_t)0;
2390
2391 case TMTIMERSTATE_ACTIVE:
2392 case TMTIMERSTATE_PENDING_RESCHEDULE:
2393 case TMTIMERSTATE_PENDING_SCHEDULE:
2394 Log2(("TMTimerGetExpire: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2395 pTimer->u64Expire, pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2396 return pTimer->u64Expire;
2397
2398 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2399 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2400#ifdef IN_RING3
2401 if (!RTThreadYield())
2402 RTThreadSleep(1);
2403#endif
2404 break;
2405
2406 /*
2407 * Invalid states.
2408 */
2409 case TMTIMERSTATE_DESTROY:
2410 case TMTIMERSTATE_FREE:
2411 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
2412 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2413 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2414 return ~(uint64_t)0;
2415 default:
2416 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
2417 return ~(uint64_t)0;
2418 }
2419 } while (cRetries-- > 0);
2420
2421 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
2422 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2423 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2424 return ~(uint64_t)0;
2425}
2426
2427
2428/**
2429 * Checks if a timer is active or not.
2430 *
2431 * @returns True if active.
2432 * @returns False if not active.
2433 * @param pTimer Timer handle as returned by one of the create functions.
2434 */
2435VMMDECL(bool) TMTimerIsActive(PTMTIMER pTimer)
2436{
2437 TMTIMERSTATE enmState = pTimer->enmState;
2438 switch (enmState)
2439 {
2440 case TMTIMERSTATE_STOPPED:
2441 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2442 case TMTIMERSTATE_EXPIRED_DELIVER:
2443 case TMTIMERSTATE_PENDING_STOP:
2444 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2445 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2446 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2447 return false;
2448
2449 case TMTIMERSTATE_ACTIVE:
2450 case TMTIMERSTATE_PENDING_RESCHEDULE:
2451 case TMTIMERSTATE_PENDING_SCHEDULE:
2452 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2453 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2454 Log2(("TMTimerIsActive: returns true (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2455 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2456 return true;
2457
2458 /*
2459 * Invalid states.
2460 */
2461 case TMTIMERSTATE_DESTROY:
2462 case TMTIMERSTATE_FREE:
2463 AssertMsgFailed(("Invalid timer state %s (%s)\n", tmTimerState(enmState), R3STRING(pTimer->pszDesc)));
2464 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
2465 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
2466 return false;
2467 default:
2468 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
2469 return false;
2470 }
2471}
2472
2473
2474/**
2475 * Gets the current warp drive percent.
2476 *
2477 * @returns The warp drive percent.
2478 * @param pVM The VM handle.
2479 */
2480VMMDECL(uint32_t) TMGetWarpDrive(PVM pVM)
2481{
2482 return pVM->tm.s.u32VirtualWarpDrivePercentage;
2483}
2484
2485
2486/**
2487 * Convert state to string.
2488 *
2489 * @returns Readonly status name.
2490 * @param enmState State.
2491 */
2492const char *tmTimerState(TMTIMERSTATE enmState)
2493{
2494 switch (enmState)
2495 {
2496#define CASE(num, state) \
2497 case TMTIMERSTATE_##state: \
2498 AssertCompile(TMTIMERSTATE_##state == (num)); \
2499 return #num "-" #state
2500 CASE( 1,STOPPED);
2501 CASE( 2,ACTIVE);
2502 CASE( 3,EXPIRED_GET_UNLINK);
2503 CASE( 4,EXPIRED_DELIVER);
2504 CASE( 5,PENDING_STOP);
2505 CASE( 6,PENDING_STOP_SCHEDULE);
2506 CASE( 7,PENDING_SCHEDULE_SET_EXPIRE);
2507 CASE( 8,PENDING_SCHEDULE);
2508 CASE( 9,PENDING_RESCHEDULE_SET_EXPIRE);
2509 CASE(10,PENDING_RESCHEDULE);
2510 CASE(11,DESTROY);
2511 CASE(12,FREE);
2512 default:
2513 AssertMsgFailed(("Invalid state enmState=%d\n", enmState));
2514 return "Invalid state!";
2515#undef CASE
2516 }
2517}
2518
2519
2520/**
2521 * Gets the highest frequency hint for all the important timers.
2522 *
2523 * @returns The highest frequency. 0 if no timers care.
2524 * @param pVM The VM handle.
2525 */
2526static uint32_t tmGetFrequencyHint(PVM pVM)
2527{
2528 /*
2529 * Query the value, recalculate it if necessary.
2530 *
2531 * The "right" highest frequency value isn't so important that we'll block
2532 * waiting on the timer semaphore.
2533 */
2534 uint32_t uMaxHzHint = ASMAtomicUoReadU32(&pVM->tm.s.uMaxHzHint);
2535 if (RT_UNLIKELY(ASMAtomicReadBool(&pVM->tm.s.fHzHintNeedsUpdating)))
2536 {
2537 if (RT_SUCCESS(tmTimerTryLock(pVM)))
2538 {
2539 ASMAtomicWriteBool(&pVM->tm.s.fHzHintNeedsUpdating, false);
2540
2541 /*
2542 * Loop over the timers associated with each clock.
2543 */
2544 uMaxHzHint = 0;
2545 for (int i = 0; i < TMCLOCK_MAX; i++)
2546 {
2547 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[i];
2548 for (PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue); pCur; pCur = TMTIMER_GET_NEXT(pCur))
2549 {
2550 uint32_t uHzHint = ASMAtomicUoReadU32(&pCur->uHzHint);
2551 if (uHzHint > uMaxHzHint)
2552 {
2553 switch (pCur->enmState)
2554 {
2555 case TMTIMERSTATE_ACTIVE:
2556 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2557 case TMTIMERSTATE_EXPIRED_DELIVER:
2558 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2559 case TMTIMERSTATE_PENDING_SCHEDULE:
2560 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2561 case TMTIMERSTATE_PENDING_RESCHEDULE:
2562 uMaxHzHint = uHzHint;
2563 break;
2564
2565 case TMTIMERSTATE_STOPPED:
2566 case TMTIMERSTATE_PENDING_STOP:
2567 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2568 case TMTIMERSTATE_DESTROY:
2569 case TMTIMERSTATE_FREE:
2570 break;
2571 /* no default, want gcc warnings when adding more states. */
2572 }
2573 }
2574 }
2575 }
2576 ASMAtomicWriteU32(&pVM->tm.s.uMaxHzHint, uMaxHzHint);
2577 Log(("tmGetFrequencyHint: New value %u Hz\n", uMaxHzHint));
2578 tmTimerUnlock(pVM);
2579 }
2580 }
2581 return uMaxHzHint;
2582}
2583
2584
2585/**
2586 * Calculates a host timer frequency that would be suitable for the current
2587 * timer load.
2588 *
2589 * This will take the highest timer frequency, adjust for catch-up and warp
2590 * driver, and finally add a little fudge factor. The caller (VMM) will use
2591 * the result to adjust the per-cpu preemption timer.
2592 *
2593 * @returns The highest frequency. 0 if no important timers around.
2594 * @param pVM The VM handle.
2595 * @param pVCpu The current CPU.
2596 */
2597VMM_INT_DECL(uint32_t) TMCalcHostTimerFrequency(PVM pVM, PVMCPU pVCpu)
2598{
2599 uint32_t uHz = tmGetFrequencyHint(pVM);
2600
2601 /* Catch up, we have to be more aggressive than the % indicates at the
2602 beginning of the effort. */
2603 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
2604 {
2605 uint32_t u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
2606 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
2607 {
2608 if (u32Pct <= 100)
2609 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp100 / 100;
2610 else if (u32Pct <= 200)
2611 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp200 / 100;
2612 else if (u32Pct <= 400)
2613 u32Pct = u32Pct * pVM->tm.s.cPctHostHzFudgeFactorCatchUp400 / 100;
2614 uHz *= u32Pct + 100;
2615 uHz /= 100;
2616 }
2617 }
2618
2619 /* Warp drive. */
2620 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualWarpDrive))
2621 {
2622 uint32_t u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualWarpDrivePercentage);
2623 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualWarpDrive))
2624 {
2625 uHz *= u32Pct;
2626 uHz /= 100;
2627 }
2628 }
2629
2630 /* Fudge factor. */
2631 if (pVCpu->idCpu == pVM->tm.s.idTimerCpu)
2632 uHz *= pVM->tm.s.cPctHostHzFudgeFactorTimerCpu;
2633 else
2634 uHz *= pVM->tm.s.cPctHostHzFudgeFactorOtherCpu;
2635 uHz /= 100;
2636
2637 /* Make sure it isn't too high. */
2638 if (uHz > pVM->tm.s.cHostHzMax)
2639 uHz = pVM->tm.s.cHostHzMax;
2640
2641 return uHz;
2642}
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette