VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/TMAll.cpp@ 19602

Last change on this file since 19602 was 19538, checked in by vboxsync, 16 years ago

TM: Moved TMR3TimerDestroy from TMAll.cpp to TM.cpp.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 50.2 KB
Line 
1/* $Id: TMAll.cpp 19538 2009-05-08 18:12:33Z vboxsync $ */
2/** @file
3 * TM - Timeout Manager, all contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_TM
27#include <VBox/tm.h>
28#include <VBox/mm.h>
29#ifdef IN_RING3
30# include <VBox/rem.h>
31#endif
32#include "TMInternal.h"
33#include <VBox/vm.h>
34
35#include <VBox/param.h>
36#include <VBox/err.h>
37#include <VBox/log.h>
38#include <VBox/sup.h>
39#include <iprt/time.h>
40#include <iprt/assert.h>
41#include <iprt/asm.h>
42#ifdef IN_RING3
43# include <iprt/thread.h>
44#endif
45
46
47#ifndef tmLock
48
49/**
50 * Try take the EMT/TM lock, wait in ring-3 return VERR_SEM_BUSY in R0/RC.
51 *
52 * @retval VINF_SUCCESS on success (always in ring-3).
53 * @retval VERR_SEM_BUSY in RC and R0 if the semaphore is busy.
54 *
55 * @param pVM The VM handle.
56 */
57int tmLock(PVM pVM)
58{
59 VM_ASSERT_EMT(pVM);
60 int rc = PDMCritSectEnter(&pVM->tm.s.EmtLock, VERR_SEM_BUSY);
61 return rc;
62}
63
64
65/**
66 * Try take the EMT/TM lock, no waiting.
67 *
68 * @retval VINF_SUCCESS on success.
69 * @retval VERR_SEM_BUSY if busy.
70 *
71 * @param pVM The VM handle.
72 */
73int tmTryLock(PVM pVM)
74{
75 VM_ASSERT_EMT(pVM);
76 int rc = PDMCritSectTryEnter(&pVM->tm.s.EmtLock);
77 return rc;
78}
79
80
81/**
82 * Release EMT/TM lock.
83 *
84 * @param pVM The VM handle.
85 */
86void tmUnlock(PVM pVM)
87{
88 PDMCritSectLeave(&pVM->tm.s.EmtLock);
89}
90
91#endif /* ! macros */
92
93/**
94 * Notification that execution is about to start.
95 *
96 * This call must always be paired with a TMNotifyEndOfExecution call.
97 *
98 * The function may, depending on the configuration, resume the TSC and future
99 * clocks that only ticks when we're executing guest code.
100 *
101 * @param pVCpu The VMCPU to operate on.
102 */
103VMMDECL(void) TMNotifyStartOfExecution(PVMCPU pVCpu)
104{
105 PVM pVM = pVCpu->CTX_SUFF(pVM);
106
107 if (pVM->tm.s.fTSCTiedToExecution)
108 tmCpuTickResume(pVM, pVCpu);
109}
110
111
112/**
113 * Notification that execution is about to start.
114 *
115 * This call must always be paired with a TMNotifyStartOfExecution call.
116 *
117 * The function may, depending on the configuration, suspend the TSC and future
118 * clocks that only ticks when we're executing guest code.
119 *
120 * @param pVCpu The VMCPU to operate on.
121 */
122VMMDECL(void) TMNotifyEndOfExecution(PVMCPU pVCpu)
123{
124 PVM pVM = pVCpu->CTX_SUFF(pVM);
125
126 if (pVM->tm.s.fTSCTiedToExecution)
127 tmCpuTickPause(pVM, pVCpu);
128}
129
130
131/**
132 * Notification that the cpu is entering the halt state
133 *
134 * This call must always be paired with a TMNotifyEndOfExecution call.
135 *
136 * The function may, depending on the configuration, resume the TSC and future
137 * clocks that only ticks when we're halted.
138 *
139 * @param pVCpu The VMCPU to operate on.
140 */
141VMMDECL(void) TMNotifyStartOfHalt(PVMCPU pVCpu)
142{
143 PVM pVM = pVCpu->CTX_SUFF(pVM);
144
145 if ( pVM->tm.s.fTSCTiedToExecution
146 && !pVM->tm.s.fTSCNotTiedToHalt)
147 tmCpuTickResume(pVM, pVCpu);
148}
149
150
151/**
152 * Notification that the cpu is leaving the halt state
153 *
154 * This call must always be paired with a TMNotifyStartOfHalt call.
155 *
156 * The function may, depending on the configuration, suspend the TSC and future
157 * clocks that only ticks when we're halted.
158 *
159 * @param pVCpu The VMCPU to operate on.
160 */
161VMMDECL(void) TMNotifyEndOfHalt(PVMCPU pVCpu)
162{
163 PVM pVM = pVCpu->CTX_SUFF(pVM);
164
165 if ( pVM->tm.s.fTSCTiedToExecution
166 && !pVM->tm.s.fTSCNotTiedToHalt)
167 tmCpuTickPause(pVM, pVCpu);
168}
169
170
171/**
172 * Schedule the queue which was changed.
173 */
174DECLINLINE(void) tmSchedule(PTMTIMER pTimer)
175{
176 PVM pVM = pTimer->CTX_SUFF(pVM);
177 if ( VM_IS_EMT(pVM)
178 && RT_SUCCESS(tmTryLock(pVM)))
179 {
180 STAM_PROFILE_START(&pVM->tm.s.CTXALLSUFF(StatScheduleOne), a);
181 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[pTimer->enmClock];
182 Log3(("tmSchedule: tmTimerQueueSchedule\n"));
183 tmTimerQueueSchedule(pVM, pQueue);
184#ifdef VBOX_STRICT
185 tmTimerQueuesSanityChecks(pVM, "tmSchedule");
186#endif
187 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
188 tmUnlock(pVM);
189 }
190 else if (!VM_FF_ISSET(pVM, VM_FF_TIMER)) /**@todo only do this when arming the timer. */
191 {
192 STAM_COUNTER_INC(&pVM->tm.s.StatScheduleSetFF);
193 VM_FF_SET(pVM, VM_FF_TIMER);
194#ifdef IN_RING3
195 REMR3NotifyTimerPending(pVM);
196 VMR3NotifyGlobalFFU(pVM->pUVM, VMNOTIFYFF_FLAGS_DONE_REM);
197#endif
198 }
199}
200
201
202/**
203 * Try change the state to enmStateNew from enmStateOld
204 * and link the timer into the scheduling queue.
205 *
206 * @returns Success indicator.
207 * @param pTimer Timer in question.
208 * @param enmStateNew The new timer state.
209 * @param enmStateOld The old timer state.
210 */
211DECLINLINE(bool) tmTimerTry(PTMTIMER pTimer, TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
212{
213 /*
214 * Attempt state change.
215 */
216 bool fRc;
217 TM_TRY_SET_STATE(pTimer, enmStateNew, enmStateOld, fRc);
218 return fRc;
219}
220
221
222/**
223 * Links the timer onto the scheduling queue.
224 *
225 * @param pQueue The timer queue the timer belongs to.
226 * @param pTimer The timer.
227 */
228DECLINLINE(void) tmTimerLink(PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
229{
230 Assert(!pTimer->offScheduleNext);
231 const int32_t offHeadNew = (intptr_t)pTimer - (intptr_t)pQueue;
232 int32_t offHead;
233 do
234 {
235 offHead = pQueue->offSchedule;
236 if (offHead)
237 pTimer->offScheduleNext = ((intptr_t)pQueue + offHead) - (intptr_t)pTimer;
238 else
239 pTimer->offScheduleNext = 0;
240 } while (!ASMAtomicCmpXchgS32(&pQueue->offSchedule, offHeadNew, offHead));
241}
242
243
244/**
245 * Try change the state to enmStateNew from enmStateOld
246 * and link the timer into the scheduling queue.
247 *
248 * @returns Success indicator.
249 * @param pTimer Timer in question.
250 * @param enmStateNew The new timer state.
251 * @param enmStateOld The old timer state.
252 */
253DECLINLINE(bool) tmTimerTryWithLink(PTMTIMER pTimer, TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
254{
255 if (tmTimerTry(pTimer, enmStateNew, enmStateOld))
256 {
257 tmTimerLink(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF(paTimerQueues)[pTimer->enmClock], pTimer);
258 return true;
259 }
260 return false;
261}
262
263
264#ifdef VBOX_HIGH_RES_TIMERS_HACK
265/**
266 * Set FF if we've passed the next virtual event.
267 *
268 * This function is called before FFs are checked in the inner execution EM loops.
269 *
270 * @returns Virtual timer ticks to the next event.
271 * @param pVM Pointer to the shared VM structure.
272 * @thread The emulation thread.
273 */
274VMMDECL(uint64_t) TMTimerPoll(PVM pVM)
275{
276 int rc = tmLock(pVM); /* play safe for now */
277
278 /*
279 * Return straight away if the timer FF is already set.
280 */
281 if (VM_FF_ISSET(pVM, VM_FF_TIMER))
282 {
283 STAM_COUNTER_INC(&pVM->tm.s.StatPollAlreadySet);
284#ifndef IN_RING3
285 if (RT_SUCCESS(rc))
286#endif
287 tmUnlock(pVM);
288 return 0;
289 }
290
291 /*
292 * Get current time and check the expire times of the two relevant queues.
293 */
294 const uint64_t u64Now = TMVirtualGet(pVM);
295
296 /*
297 * TMCLOCK_VIRTUAL
298 */
299 const uint64_t u64Expire1 = pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire;
300 const int64_t i64Delta1 = u64Expire1 - u64Now;
301 if (i64Delta1 <= 0)
302 {
303 STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtual);
304 LogFlow(("TMTimerPoll: expire1=%RU64 <= now=%RU64\n", u64Expire1, u64Now));
305#ifndef IN_RING3
306 if (RT_SUCCESS(rc))
307#endif
308 tmUnlock(pVM);
309 VM_FF_SET(pVM, VM_FF_TIMER);
310#ifdef IN_RING3
311 REMR3NotifyTimerPending(pVM);
312#endif
313 return 0;
314 }
315
316 /*
317 * TMCLOCK_VIRTUAL_SYNC
318 * This isn't quite as stright forward if in a catch-up, not only do
319 * we have to adjust the 'now' but when have to adjust the delta as well.
320 */
321 const uint64_t u64Expire2 = pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire;
322 uint64_t u64VirtualSyncNow;
323 if (!pVM->tm.s.fVirtualSyncTicking)
324 u64VirtualSyncNow = pVM->tm.s.u64VirtualSync;
325 else
326 {
327 if (!pVM->tm.s.fVirtualSyncCatchUp)
328 u64VirtualSyncNow = u64Now - pVM->tm.s.offVirtualSync;
329 else
330 {
331 uint64_t off = pVM->tm.s.offVirtualSync;
332 uint64_t u64Delta = u64Now - pVM->tm.s.u64VirtualSyncCatchUpPrev;
333 if (RT_LIKELY(!(u64Delta >> 32)))
334 {
335 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, pVM->tm.s.u32VirtualSyncCatchUpPercentage, 100);
336 if (off > u64Sub + pVM->tm.s.offVirtualSyncGivenUp)
337 off -= u64Sub;
338 else
339 off = pVM->tm.s.offVirtualSyncGivenUp;
340 }
341 u64VirtualSyncNow = u64Now - off;
342 }
343 }
344 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
345 if (i64Delta2 <= 0)
346 {
347 STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtualSync);
348#ifndef IN_RING3
349 if (RT_SUCCESS(rc))
350#endif
351 tmUnlock(pVM);
352 LogFlow(("TMTimerPoll: expire2=%RU64 <= now=%RU64\n", u64Expire2, u64Now));
353 VM_FF_SET(pVM, VM_FF_TIMER);
354#ifdef IN_RING3
355 REMR3NotifyTimerPending(pVM);
356#endif
357 return 0;
358 }
359 if (pVM->tm.s.fVirtualSyncCatchUp)
360 i64Delta2 = ASMMultU64ByU32DivByU32(i64Delta2, 100, pVM->tm.s.u32VirtualSyncCatchUpPercentage + 100);
361
362 /*
363 * Return the time left to the next event.
364 */
365 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
366#ifndef IN_RING3
367 if (RT_SUCCESS(rc))
368#endif
369 tmUnlock(pVM);
370 return RT_MIN(i64Delta1, i64Delta2);
371}
372
373
374/**
375 * Set FF if we've passed the next virtual event.
376 *
377 * This function is called before FFs are checked in the inner execution EM loops.
378 *
379 * @returns The GIP timestamp of the next event.
380 * 0 if the next event has already expired.
381 * @param pVM Pointer to the shared VM structure.
382 * @param pVM Pointer to the shared VM structure.
383 * @param pu64Delta Where to store the delta.
384 * @thread The emulation thread.
385 */
386VMMDECL(uint64_t) TMTimerPollGIP(PVM pVM, uint64_t *pu64Delta)
387{
388 int rc = tmLock(pVM); /* play safe for now. */
389
390 /*
391 * Return straight away if the timer FF is already set.
392 */
393 if (VM_FF_ISSET(pVM, VM_FF_TIMER))
394 {
395 STAM_COUNTER_INC(&pVM->tm.s.StatPollAlreadySet);
396#ifndef IN_RING3
397 if (RT_SUCCESS(rc))
398#endif
399 tmUnlock(pVM);
400 *pu64Delta = 0;
401 return 0;
402 }
403
404 /*
405 * Get current time and check the expire times of the two relevant queues.
406 */
407 const uint64_t u64Now = TMVirtualGet(pVM);
408
409 /*
410 * TMCLOCK_VIRTUAL
411 */
412 const uint64_t u64Expire1 = pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire;
413 const int64_t i64Delta1 = u64Expire1 - u64Now;
414 if (i64Delta1 <= 0)
415 {
416 STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtual);
417#ifndef IN_RING3
418 if (RT_SUCCESS(rc))
419#endif
420 tmUnlock(pVM);
421 LogFlow(("TMTimerPoll: expire1=%RU64 <= now=%RU64\n", u64Expire1, u64Now));
422 VM_FF_SET(pVM, VM_FF_TIMER);
423#ifdef IN_RING3
424 REMR3NotifyTimerPending(pVM);
425#endif
426 *pu64Delta = 0;
427 return 0;
428 }
429
430 /*
431 * TMCLOCK_VIRTUAL_SYNC
432 * This isn't quite as stright forward if in a catch-up, not only do
433 * we have to adjust the 'now' but when have to adjust the delta as well.
434 */
435 const uint64_t u64Expire2 = pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire;
436 uint64_t u64VirtualSyncNow;
437 if (!pVM->tm.s.fVirtualSyncTicking)
438 u64VirtualSyncNow = pVM->tm.s.u64VirtualSync;
439 else
440 {
441 if (!pVM->tm.s.fVirtualSyncCatchUp)
442 u64VirtualSyncNow = u64Now - pVM->tm.s.offVirtualSync;
443 else
444 {
445 uint64_t off = pVM->tm.s.offVirtualSync;
446 uint64_t u64Delta = u64Now - pVM->tm.s.u64VirtualSyncCatchUpPrev;
447 if (RT_LIKELY(!(u64Delta >> 32)))
448 {
449 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, pVM->tm.s.u32VirtualSyncCatchUpPercentage, 100);
450 if (off > u64Sub + pVM->tm.s.offVirtualSyncGivenUp)
451 off -= u64Sub;
452 else
453 off = pVM->tm.s.offVirtualSyncGivenUp;
454 }
455 u64VirtualSyncNow = u64Now - off;
456 }
457 }
458
459 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
460 if (i64Delta2 <= 0)
461 {
462 STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtualSync);
463#ifndef IN_RING3
464 if (RT_SUCCESS(rc))
465#endif
466 tmUnlock(pVM);
467 LogFlow(("TMTimerPoll: expire2=%RU64 <= now=%RU64\n", u64Expire2, u64Now));
468 VM_FF_SET(pVM, VM_FF_TIMER);
469#ifdef IN_RING3
470 REMR3NotifyTimerPending(pVM);
471#endif
472 *pu64Delta = 0;
473 return 0;
474 }
475 if (pVM->tm.s.fVirtualSyncCatchUp)
476 i64Delta2 = ASMMultU64ByU32DivByU32(i64Delta2, 100, pVM->tm.s.u32VirtualSyncCatchUpPercentage + 100);
477
478 /*
479 * Return the GIP time of the next event.
480 * This is the reverse of what tmVirtualGetRaw is doing.
481 */
482 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
483 uint64_t u64GipTime = RT_MIN(i64Delta1, i64Delta2);
484 *pu64Delta = u64GipTime;
485 u64GipTime += u64Now + pVM->tm.s.u64VirtualOffset;
486 if (RT_UNLIKELY(!pVM->tm.s.fVirtualWarpDrive))
487 {
488 u64GipTime -= pVM->tm.s.u64VirtualWarpDriveStart; /* the start is GIP time. */
489 u64GipTime *= 100;
490 u64GipTime /= pVM->tm.s.u32VirtualWarpDrivePercentage;
491 u64GipTime += pVM->tm.s.u64VirtualWarpDriveStart;
492 }
493
494#ifndef IN_RING3
495 if (RT_SUCCESS(rc))
496#endif
497 tmUnlock(pVM);
498 return u64GipTime;
499}
500#endif
501
502
503/**
504 * Gets the host context ring-3 pointer of the timer.
505 *
506 * @returns HC R3 pointer.
507 * @param pTimer Timer handle as returned by one of the create functions.
508 */
509VMMDECL(PTMTIMERR3) TMTimerR3Ptr(PTMTIMER pTimer)
510{
511 return (PTMTIMERR3)MMHyperCCToR3(pTimer->CTX_SUFF(pVM), pTimer);
512}
513
514
515/**
516 * Gets the host context ring-0 pointer of the timer.
517 *
518 * @returns HC R0 pointer.
519 * @param pTimer Timer handle as returned by one of the create functions.
520 */
521VMMDECL(PTMTIMERR0) TMTimerR0Ptr(PTMTIMER pTimer)
522{
523 return (PTMTIMERR0)MMHyperCCToR0(pTimer->CTX_SUFF(pVM), pTimer);
524}
525
526
527/**
528 * Gets the RC pointer of the timer.
529 *
530 * @returns RC pointer.
531 * @param pTimer Timer handle as returned by one of the create functions.
532 */
533VMMDECL(PTMTIMERRC) TMTimerRCPtr(PTMTIMER pTimer)
534{
535 return (PTMTIMERRC)MMHyperCCToRC(pTimer->CTX_SUFF(pVM), pTimer);
536}
537
538
539/**
540 * Arm a timer with a (new) expire time.
541 *
542 * @returns VBox status.
543 * @param pTimer Timer handle as returned by one of the create functions.
544 * @param u64Expire New expire time.
545 */
546VMMDECL(int) TMTimerSet(PTMTIMER pTimer, uint64_t u64Expire)
547{
548 STAM_PROFILE_START(&pTimer->CTX_SUFF(pVM)->tm.s.CTXALLSUFF(StatTimerSet), a);
549
550 /** @todo find the most frequently used paths and make them skip tmSchedule and tmTimerTryWithLink. */
551 int cRetries = 1000;
552 do
553 {
554 /*
555 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
556 */
557 TMTIMERSTATE enmState = pTimer->enmState;
558 Log2(("TMTimerSet: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d u64Expire=%llu\n",
559 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), cRetries, u64Expire));
560 switch (enmState)
561 {
562 case TMTIMERSTATE_EXPIRED:
563 case TMTIMERSTATE_STOPPED:
564 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
565 {
566 Assert(!pTimer->offPrev);
567 Assert(!pTimer->offNext);
568 AssertMsg( pTimer->enmClock != TMCLOCK_VIRTUAL_SYNC
569 || pTimer->CTX_SUFF(pVM)->tm.s.fVirtualSyncTicking
570 || u64Expire >= pTimer->CTX_SUFF(pVM)->tm.s.u64VirtualSync,
571 ("%RU64 < %RU64 %s\n", u64Expire, pTimer->CTX_SUFF(pVM)->tm.s.u64VirtualSync, R3STRING(pTimer->pszDesc)));
572 pTimer->u64Expire = u64Expire;
573 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
574 tmSchedule(pTimer);
575 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSet), a);
576 return VINF_SUCCESS;
577 }
578 break;
579
580 case TMTIMERSTATE_PENDING_SCHEDULE:
581 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
582 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
583 {
584 pTimer->u64Expire = u64Expire;
585 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
586 tmSchedule(pTimer);
587 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSet), a);
588 return VINF_SUCCESS;
589 }
590 break;
591
592
593 case TMTIMERSTATE_ACTIVE:
594 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
595 {
596 pTimer->u64Expire = u64Expire;
597 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
598 tmSchedule(pTimer);
599 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSet), a);
600 return VINF_SUCCESS;
601 }
602 break;
603
604 case TMTIMERSTATE_PENDING_RESCHEDULE:
605 case TMTIMERSTATE_PENDING_STOP:
606 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
607 {
608 pTimer->u64Expire = u64Expire;
609 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
610 tmSchedule(pTimer);
611 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSet), a);
612 return VINF_SUCCESS;
613 }
614 break;
615
616
617 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
618 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
619#ifdef IN_RING3
620 if (!RTThreadYield())
621 RTThreadSleep(1);
622#else
623/** @todo call host context and yield after a couple of iterations */
624#endif
625 break;
626
627 /*
628 * Invalid states.
629 */
630 case TMTIMERSTATE_DESTROY:
631 case TMTIMERSTATE_FREE:
632 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
633 return VERR_TM_INVALID_STATE;
634 default:
635 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
636 return VERR_TM_UNKNOWN_STATE;
637 }
638 } while (cRetries-- > 0);
639
640 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
641 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSet), a);
642 return VERR_INTERNAL_ERROR;
643}
644
645
646/**
647 * Arm a timer with a (new) expire time relative to current time.
648 *
649 * @returns VBox status.
650 * @param pTimer Timer handle as returned by one of the create functions.
651 * @param cMilliesToNext Number of millieseconds to the next tick.
652 */
653VMMDECL(int) TMTimerSetMillies(PTMTIMER pTimer, uint32_t cMilliesToNext)
654{
655 PVM pVM = pTimer->CTX_SUFF(pVM);
656 PVMCPU pVCpu = &pVM->aCpus[0]; /* just take the first VCPU */
657
658 switch (pTimer->enmClock)
659 {
660 case TMCLOCK_VIRTUAL:
661 return TMTimerSet(pTimer, cMilliesToNext * (uint64_t)TMCLOCK_FREQ_VIRTUAL / 1000 + TMVirtualGet(pVM));
662 case TMCLOCK_VIRTUAL_SYNC:
663 return TMTimerSet(pTimer, cMilliesToNext * (uint64_t)TMCLOCK_FREQ_VIRTUAL / 1000 + TMVirtualSyncGet(pVM));
664 case TMCLOCK_REAL:
665 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
666 return TMTimerSet(pTimer, cMilliesToNext + TMRealGet(pVM));
667 case TMCLOCK_TSC:
668 return TMTimerSet(pTimer, cMilliesToNext * pVM->tm.s.cTSCTicksPerSecond / 1000 + TMCpuTickGet(pVCpu));
669
670 default:
671 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
672 return VERR_INTERNAL_ERROR;
673 }
674}
675
676
677/**
678 * Arm a timer with a (new) expire time relative to current time.
679 *
680 * @returns VBox status.
681 * @param pTimer Timer handle as returned by one of the create functions.
682 * @param cMicrosToNext Number of microseconds to the next tick.
683 */
684VMMDECL(int) TMTimerSetMicro(PTMTIMER pTimer, uint64_t cMicrosToNext)
685{
686 PVM pVM = pTimer->CTX_SUFF(pVM);
687 PVMCPU pVCpu = &pVM->aCpus[0]; /* just take the first VCPU */
688
689 switch (pTimer->enmClock)
690 {
691 case TMCLOCK_VIRTUAL:
692 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
693 return TMTimerSet(pTimer, cMicrosToNext * 1000 + TMVirtualGet(pVM));
694
695 case TMCLOCK_VIRTUAL_SYNC:
696 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
697 return TMTimerSet(pTimer, cMicrosToNext * 1000 + TMVirtualSyncGet(pVM));
698
699 case TMCLOCK_REAL:
700 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
701 return TMTimerSet(pTimer, cMicrosToNext / 1000 + TMRealGet(pVM));
702
703 case TMCLOCK_TSC:
704 return TMTimerSet(pTimer, TMTimerFromMicro(pTimer, cMicrosToNext) + TMCpuTickGet(pVCpu));
705
706 default:
707 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
708 return VERR_INTERNAL_ERROR;
709 }
710}
711
712
713/**
714 * Arm a timer with a (new) expire time relative to current time.
715 *
716 * @returns VBox status.
717 * @param pTimer Timer handle as returned by one of the create functions.
718 * @param cNanosToNext Number of nanoseconds to the next tick.
719 */
720VMMDECL(int) TMTimerSetNano(PTMTIMER pTimer, uint64_t cNanosToNext)
721{
722 PVM pVM = pTimer->CTX_SUFF(pVM);
723 PVMCPU pVCpu = &pVM->aCpus[0]; /* just take the first VCPU */
724
725 switch (pTimer->enmClock)
726 {
727 case TMCLOCK_VIRTUAL:
728 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
729 return TMTimerSet(pTimer, cNanosToNext + TMVirtualGet(pVM));
730
731 case TMCLOCK_VIRTUAL_SYNC:
732 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
733 return TMTimerSet(pTimer, cNanosToNext + TMVirtualSyncGet(pVM));
734
735 case TMCLOCK_REAL:
736 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
737 return TMTimerSet(pTimer, cNanosToNext / 1000000 + TMRealGet(pVM));
738
739 case TMCLOCK_TSC:
740 return TMTimerSet(pTimer, TMTimerFromNano(pTimer, cNanosToNext) + TMCpuTickGet(pVCpu));
741
742 default:
743 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
744 return VERR_INTERNAL_ERROR;
745 }
746}
747
748
749/**
750 * Stop the timer.
751 * Use TMR3TimerArm() to "un-stop" the timer.
752 *
753 * @returns VBox status.
754 * @param pTimer Timer handle as returned by one of the create functions.
755 */
756VMMDECL(int) TMTimerStop(PTMTIMER pTimer)
757{
758 STAM_PROFILE_START(&pTimer->CTX_SUFF(pVM)->tm.s.CTXALLSUFF(StatTimerStop), a);
759 /** @todo see if this function needs optimizing. */
760 int cRetries = 1000;
761 do
762 {
763 /*
764 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
765 */
766 TMTIMERSTATE enmState = pTimer->enmState;
767 Log2(("TMTimerStop: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d\n",
768 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), cRetries));
769 switch (enmState)
770 {
771 case TMTIMERSTATE_EXPIRED:
772 //AssertMsgFailed(("You don't stop an expired timer dude!\n"));
773 return VERR_INVALID_PARAMETER;
774
775 case TMTIMERSTATE_STOPPED:
776 case TMTIMERSTATE_PENDING_STOP:
777 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
778 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
779 return VINF_SUCCESS;
780
781 case TMTIMERSTATE_PENDING_SCHEDULE:
782 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, enmState))
783 {
784 tmSchedule(pTimer);
785 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
786 return VINF_SUCCESS;
787 }
788
789 case TMTIMERSTATE_PENDING_RESCHEDULE:
790 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
791 {
792 tmSchedule(pTimer);
793 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
794 return VINF_SUCCESS;
795 }
796 break;
797
798 case TMTIMERSTATE_ACTIVE:
799 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
800 {
801 tmSchedule(pTimer);
802 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
803 return VINF_SUCCESS;
804 }
805 break;
806
807 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
808 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
809#ifdef IN_RING3
810 if (!RTThreadYield())
811 RTThreadSleep(1);
812#else
813/**@todo call host and yield cpu after a while. */
814#endif
815 break;
816
817 /*
818 * Invalid states.
819 */
820 case TMTIMERSTATE_DESTROY:
821 case TMTIMERSTATE_FREE:
822 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
823 return VERR_TM_INVALID_STATE;
824 default:
825 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
826 return VERR_TM_UNKNOWN_STATE;
827 }
828 } while (cRetries-- > 0);
829
830 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
831 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
832 return VERR_INTERNAL_ERROR;
833}
834
835
836/**
837 * Get the current clock time.
838 * Handy for calculating the new expire time.
839 *
840 * @returns Current clock time.
841 * @param pTimer Timer handle as returned by one of the create functions.
842 */
843VMMDECL(uint64_t) TMTimerGet(PTMTIMER pTimer)
844{
845 uint64_t u64;
846 PVM pVM = pTimer->CTX_SUFF(pVM);
847
848 switch (pTimer->enmClock)
849 {
850 case TMCLOCK_VIRTUAL:
851 u64 = TMVirtualGet(pVM);
852 break;
853 case TMCLOCK_VIRTUAL_SYNC:
854 u64 = TMVirtualSyncGet(pVM);
855 break;
856 case TMCLOCK_REAL:
857 u64 = TMRealGet(pVM);
858 break;
859 case TMCLOCK_TSC:
860 {
861 PVMCPU pVCpu = &pVM->aCpus[0]; /* just take the first VCPU */
862 u64 = TMCpuTickGet(pVCpu);
863 break;
864 }
865 default:
866 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
867 return ~(uint64_t)0;
868 }
869 //Log2(("TMTimerGet: returns %llu (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
870 // u64, pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
871 return u64;
872}
873
874
875/**
876 * Get the freqency of the timer clock.
877 *
878 * @returns Clock frequency (as Hz of course).
879 * @param pTimer Timer handle as returned by one of the create functions.
880 */
881VMMDECL(uint64_t) TMTimerGetFreq(PTMTIMER pTimer)
882{
883 switch (pTimer->enmClock)
884 {
885 case TMCLOCK_VIRTUAL:
886 case TMCLOCK_VIRTUAL_SYNC:
887 return TMCLOCK_FREQ_VIRTUAL;
888
889 case TMCLOCK_REAL:
890 return TMCLOCK_FREQ_REAL;
891
892 case TMCLOCK_TSC:
893 return TMCpuTicksPerSecond(pTimer->CTX_SUFF(pVM));
894
895 default:
896 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
897 return 0;
898 }
899}
900
901
902/**
903 * Get the current clock time as nanoseconds.
904 *
905 * @returns The timer clock as nanoseconds.
906 * @param pTimer Timer handle as returned by one of the create functions.
907 */
908VMMDECL(uint64_t) TMTimerGetNano(PTMTIMER pTimer)
909{
910 return TMTimerToNano(pTimer, TMTimerGet(pTimer));
911}
912
913
914/**
915 * Get the current clock time as microseconds.
916 *
917 * @returns The timer clock as microseconds.
918 * @param pTimer Timer handle as returned by one of the create functions.
919 */
920VMMDECL(uint64_t) TMTimerGetMicro(PTMTIMER pTimer)
921{
922 return TMTimerToMicro(pTimer, TMTimerGet(pTimer));
923}
924
925
926/**
927 * Get the current clock time as milliseconds.
928 *
929 * @returns The timer clock as milliseconds.
930 * @param pTimer Timer handle as returned by one of the create functions.
931 */
932VMMDECL(uint64_t) TMTimerGetMilli(PTMTIMER pTimer)
933{
934 return TMTimerToMilli(pTimer, TMTimerGet(pTimer));
935}
936
937
938/**
939 * Converts the specified timer clock time to nanoseconds.
940 *
941 * @returns nanoseconds.
942 * @param pTimer Timer handle as returned by one of the create functions.
943 * @param u64Ticks The clock ticks.
944 * @remark There could be rounding errors here. We just do a simple integere divide
945 * without any adjustments.
946 */
947VMMDECL(uint64_t) TMTimerToNano(PTMTIMER pTimer, uint64_t u64Ticks)
948{
949 switch (pTimer->enmClock)
950 {
951 case TMCLOCK_VIRTUAL:
952 case TMCLOCK_VIRTUAL_SYNC:
953 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
954 return u64Ticks;
955
956 case TMCLOCK_REAL:
957 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
958 return u64Ticks * 1000000;
959
960 case TMCLOCK_TSC:
961 AssertReleaseMsgFailed(("TMCLOCK_TSC conversions are not implemented\n"));
962 return 0;
963
964 default:
965 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
966 return 0;
967 }
968}
969
970
971/**
972 * Converts the specified timer clock time to microseconds.
973 *
974 * @returns microseconds.
975 * @param pTimer Timer handle as returned by one of the create functions.
976 * @param u64Ticks The clock ticks.
977 * @remark There could be rounding errors here. We just do a simple integere divide
978 * without any adjustments.
979 */
980VMMDECL(uint64_t) TMTimerToMicro(PTMTIMER pTimer, uint64_t u64Ticks)
981{
982 switch (pTimer->enmClock)
983 {
984 case TMCLOCK_VIRTUAL:
985 case TMCLOCK_VIRTUAL_SYNC:
986 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
987 return u64Ticks / 1000;
988
989 case TMCLOCK_REAL:
990 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
991 return u64Ticks * 1000;
992
993 case TMCLOCK_TSC:
994 AssertReleaseMsgFailed(("TMCLOCK_TSC conversions are not implemented\n"));
995 return 0;
996
997 default:
998 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
999 return 0;
1000 }
1001}
1002
1003
1004/**
1005 * Converts the specified timer clock time to milliseconds.
1006 *
1007 * @returns milliseconds.
1008 * @param pTimer Timer handle as returned by one of the create functions.
1009 * @param u64Ticks The clock ticks.
1010 * @remark There could be rounding errors here. We just do a simple integere divide
1011 * without any adjustments.
1012 */
1013VMMDECL(uint64_t) TMTimerToMilli(PTMTIMER pTimer, uint64_t u64Ticks)
1014{
1015 switch (pTimer->enmClock)
1016 {
1017 case TMCLOCK_VIRTUAL:
1018 case TMCLOCK_VIRTUAL_SYNC:
1019 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1020 return u64Ticks / 1000000;
1021
1022 case TMCLOCK_REAL:
1023 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1024 return u64Ticks;
1025
1026 case TMCLOCK_TSC:
1027 AssertReleaseMsgFailed(("TMCLOCK_TSC conversions are not implemented\n"));
1028 return 0;
1029
1030 default:
1031 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1032 return 0;
1033 }
1034}
1035
1036
1037/**
1038 * Converts the specified nanosecond timestamp to timer clock ticks.
1039 *
1040 * @returns timer clock ticks.
1041 * @param pTimer Timer handle as returned by one of the create functions.
1042 * @param u64NanoTS The nanosecond value ticks to convert.
1043 * @remark There could be rounding and overflow errors here.
1044 */
1045VMMDECL(uint64_t) TMTimerFromNano(PTMTIMER pTimer, uint64_t u64NanoTS)
1046{
1047 switch (pTimer->enmClock)
1048 {
1049 case TMCLOCK_VIRTUAL:
1050 case TMCLOCK_VIRTUAL_SYNC:
1051 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1052 return u64NanoTS;
1053
1054 case TMCLOCK_REAL:
1055 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1056 return u64NanoTS / 1000000;
1057
1058 case TMCLOCK_TSC:
1059 AssertReleaseMsgFailed(("TMCLOCK_TSC conversions are not implemented\n"));
1060 return 0;
1061
1062 default:
1063 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1064 return 0;
1065 }
1066}
1067
1068
1069/**
1070 * Converts the specified microsecond timestamp to timer clock ticks.
1071 *
1072 * @returns timer clock ticks.
1073 * @param pTimer Timer handle as returned by one of the create functions.
1074 * @param u64MicroTS The microsecond value ticks to convert.
1075 * @remark There could be rounding and overflow errors here.
1076 */
1077VMMDECL(uint64_t) TMTimerFromMicro(PTMTIMER pTimer, uint64_t u64MicroTS)
1078{
1079 switch (pTimer->enmClock)
1080 {
1081 case TMCLOCK_VIRTUAL:
1082 case TMCLOCK_VIRTUAL_SYNC:
1083 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1084 return u64MicroTS * 1000;
1085
1086 case TMCLOCK_REAL:
1087 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1088 return u64MicroTS / 1000;
1089
1090 case TMCLOCK_TSC:
1091 AssertReleaseMsgFailed(("TMCLOCK_TSC conversions are not implemented\n"));
1092 return 0;
1093
1094 default:
1095 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1096 return 0;
1097 }
1098}
1099
1100
1101/**
1102 * Converts the specified millisecond timestamp to timer clock ticks.
1103 *
1104 * @returns timer clock ticks.
1105 * @param pTimer Timer handle as returned by one of the create functions.
1106 * @param u64MilliTS The millisecond value ticks to convert.
1107 * @remark There could be rounding and overflow errors here.
1108 */
1109VMMDECL(uint64_t) TMTimerFromMilli(PTMTIMER pTimer, uint64_t u64MilliTS)
1110{
1111 switch (pTimer->enmClock)
1112 {
1113 case TMCLOCK_VIRTUAL:
1114 case TMCLOCK_VIRTUAL_SYNC:
1115 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1116 return u64MilliTS * 1000000;
1117
1118 case TMCLOCK_REAL:
1119 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1120 return u64MilliTS;
1121
1122 case TMCLOCK_TSC:
1123 AssertReleaseMsgFailed(("TMCLOCK_TSC conversions are not implemented\n"));
1124 return 0;
1125
1126 default:
1127 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1128 return 0;
1129 }
1130}
1131
1132
1133/**
1134 * Get the expire time of the timer.
1135 * Only valid for active timers.
1136 *
1137 * @returns Expire time of the timer.
1138 * @param pTimer Timer handle as returned by one of the create functions.
1139 */
1140VMMDECL(uint64_t) TMTimerGetExpire(PTMTIMER pTimer)
1141{
1142 int cRetries = 1000;
1143 do
1144 {
1145 TMTIMERSTATE enmState = pTimer->enmState;
1146 switch (enmState)
1147 {
1148 case TMTIMERSTATE_EXPIRED:
1149 case TMTIMERSTATE_STOPPED:
1150 case TMTIMERSTATE_PENDING_STOP:
1151 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1152 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1153 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1154 return ~(uint64_t)0;
1155
1156 case TMTIMERSTATE_ACTIVE:
1157 case TMTIMERSTATE_PENDING_RESCHEDULE:
1158 case TMTIMERSTATE_PENDING_SCHEDULE:
1159 Log2(("TMTimerGetExpire: returns %llu (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1160 pTimer->u64Expire, pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1161 return pTimer->u64Expire;
1162
1163 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1164 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1165#ifdef IN_RING3
1166 if (!RTThreadYield())
1167 RTThreadSleep(1);
1168#endif
1169 break;
1170
1171 /*
1172 * Invalid states.
1173 */
1174 case TMTIMERSTATE_DESTROY:
1175 case TMTIMERSTATE_FREE:
1176 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1177 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1178 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1179 return ~(uint64_t)0;
1180 default:
1181 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1182 return ~(uint64_t)0;
1183 }
1184 } while (cRetries-- > 0);
1185
1186 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1187 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1188 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1189 return ~(uint64_t)0;
1190}
1191
1192
1193/**
1194 * Checks if a timer is active or not.
1195 *
1196 * @returns True if active.
1197 * @returns False if not active.
1198 * @param pTimer Timer handle as returned by one of the create functions.
1199 */
1200VMMDECL(bool) TMTimerIsActive(PTMTIMER pTimer)
1201{
1202 TMTIMERSTATE enmState = pTimer->enmState;
1203 switch (enmState)
1204 {
1205 case TMTIMERSTATE_STOPPED:
1206 case TMTIMERSTATE_EXPIRED:
1207 case TMTIMERSTATE_PENDING_STOP:
1208 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1209 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1210 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1211 return false;
1212
1213 case TMTIMERSTATE_ACTIVE:
1214 case TMTIMERSTATE_PENDING_RESCHEDULE:
1215 case TMTIMERSTATE_PENDING_SCHEDULE:
1216 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1217 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1218 Log2(("TMTimerIsActive: returns true (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1219 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1220 return true;
1221
1222 /*
1223 * Invalid states.
1224 */
1225 case TMTIMERSTATE_DESTROY:
1226 case TMTIMERSTATE_FREE:
1227 AssertMsgFailed(("Invalid timer state %s (%s)\n", tmTimerState(enmState), R3STRING(pTimer->pszDesc)));
1228 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1229 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1230 return false;
1231 default:
1232 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1233 return false;
1234 }
1235}
1236
1237
1238/**
1239 * Convert state to string.
1240 *
1241 * @returns Readonly status name.
1242 * @param enmState State.
1243 */
1244const char *tmTimerState(TMTIMERSTATE enmState)
1245{
1246 switch (enmState)
1247 {
1248#define CASE(num, state) \
1249 case TMTIMERSTATE_##state: \
1250 AssertCompile(TMTIMERSTATE_##state == (num)); \
1251 return #num "-" #state
1252 CASE( 1,STOPPED);
1253 CASE( 2,ACTIVE);
1254 CASE( 3,EXPIRED);
1255 CASE( 4,PENDING_STOP);
1256 CASE( 5,PENDING_STOP_SCHEDULE);
1257 CASE( 6,PENDING_SCHEDULE_SET_EXPIRE);
1258 CASE( 7,PENDING_SCHEDULE);
1259 CASE( 8,PENDING_RESCHEDULE_SET_EXPIRE);
1260 CASE( 9,PENDING_RESCHEDULE);
1261 CASE(10,DESTROY);
1262 CASE(11,FREE);
1263 default:
1264 AssertMsgFailed(("Invalid state enmState=%d\n", enmState));
1265 return "Invalid state!";
1266#undef CASE
1267 }
1268}
1269
1270
1271/**
1272 * Schedules the given timer on the given queue.
1273 *
1274 * @param pQueue The timer queue.
1275 * @param pTimer The timer that needs scheduling.
1276 *
1277 * @remarks Called while owning the lock.
1278 */
1279DECLINLINE(void) tmTimerQueueScheduleOne(PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
1280{
1281 /*
1282 * Processing.
1283 */
1284 unsigned cRetries = 2;
1285 do
1286 {
1287 TMTIMERSTATE enmState = pTimer->enmState;
1288 switch (enmState)
1289 {
1290 /*
1291 * Reschedule timer (in the active list).
1292 */
1293 case TMTIMERSTATE_PENDING_RESCHEDULE:
1294 {
1295 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE, TMTIMERSTATE_PENDING_RESCHEDULE)))
1296 break; /* retry */
1297
1298 const PTMTIMER pPrev = TMTIMER_GET_PREV(pTimer);
1299 const PTMTIMER pNext = TMTIMER_GET_NEXT(pTimer);
1300 if (pPrev)
1301 TMTIMER_SET_NEXT(pPrev, pNext);
1302 else
1303 {
1304 TMTIMER_SET_HEAD(pQueue, pNext);
1305 pQueue->u64Expire = pNext ? pNext->u64Expire : INT64_MAX;
1306 }
1307 if (pNext)
1308 TMTIMER_SET_PREV(pNext, pPrev);
1309 pTimer->offNext = 0;
1310 pTimer->offPrev = 0;
1311 /* fall thru */
1312 }
1313
1314 /*
1315 * Schedule timer (insert into the active list).
1316 */
1317 case TMTIMERSTATE_PENDING_SCHEDULE:
1318 {
1319 Assert(!pTimer->offNext); Assert(!pTimer->offPrev);
1320 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, TMTIMERSTATE_PENDING_SCHEDULE)))
1321 break; /* retry */
1322
1323 PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue);
1324 if (pCur)
1325 {
1326 const uint64_t u64Expire = pTimer->u64Expire;
1327 for (;; pCur = TMTIMER_GET_NEXT(pCur))
1328 {
1329 if (pCur->u64Expire > u64Expire)
1330 {
1331 const PTMTIMER pPrev = TMTIMER_GET_PREV(pCur);
1332 TMTIMER_SET_NEXT(pTimer, pCur);
1333 TMTIMER_SET_PREV(pTimer, pPrev);
1334 if (pPrev)
1335 TMTIMER_SET_NEXT(pPrev, pTimer);
1336 else
1337 {
1338 TMTIMER_SET_HEAD(pQueue, pTimer);
1339 pQueue->u64Expire = u64Expire;
1340 }
1341 TMTIMER_SET_PREV(pCur, pTimer);
1342 return;
1343 }
1344 if (!pCur->offNext)
1345 {
1346 TMTIMER_SET_NEXT(pCur, pTimer);
1347 TMTIMER_SET_PREV(pTimer, pCur);
1348 return;
1349 }
1350 }
1351 }
1352 else
1353 {
1354 TMTIMER_SET_HEAD(pQueue, pTimer);
1355 pQueue->u64Expire = pTimer->u64Expire;
1356 }
1357 return;
1358 }
1359
1360 /*
1361 * Stop the timer in active list.
1362 */
1363 case TMTIMERSTATE_PENDING_STOP:
1364 {
1365 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, TMTIMERSTATE_PENDING_STOP)))
1366 break; /* retry */
1367
1368 const PTMTIMER pPrev = TMTIMER_GET_PREV(pTimer);
1369 const PTMTIMER pNext = TMTIMER_GET_NEXT(pTimer);
1370 if (pPrev)
1371 TMTIMER_SET_NEXT(pPrev, pNext);
1372 else
1373 {
1374 TMTIMER_SET_HEAD(pQueue, pNext);
1375 pQueue->u64Expire = pNext ? pNext->u64Expire : INT64_MAX;
1376 }
1377 if (pNext)
1378 TMTIMER_SET_PREV(pNext, pPrev);
1379 pTimer->offNext = 0;
1380 pTimer->offPrev = 0;
1381 /* fall thru */
1382 }
1383
1384 /*
1385 * Stop the timer (not on the active list).
1386 */
1387 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1388 Assert(!pTimer->offNext); Assert(!pTimer->offPrev);
1389 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_STOPPED, TMTIMERSTATE_PENDING_STOP_SCHEDULE)))
1390 break;
1391 return;
1392
1393 /*
1394 * The timer is pending destruction by TMR3TimerDestroy, our caller.
1395 * Nothing to do here.
1396 */
1397 case TMTIMERSTATE_DESTROY:
1398 break;
1399
1400 /*
1401 * Postpone these until they get into the right state.
1402 */
1403 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1404 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1405 tmTimerLink(pQueue, pTimer);
1406 STAM_COUNTER_INC(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatPostponed));
1407 return;
1408
1409 /*
1410 * None of these can be in the schedule.
1411 */
1412 case TMTIMERSTATE_FREE:
1413 case TMTIMERSTATE_STOPPED:
1414 case TMTIMERSTATE_ACTIVE:
1415 case TMTIMERSTATE_EXPIRED:
1416 default:
1417 AssertMsgFailed(("Timer (%p) in the scheduling list has an invalid state %s (%d)!",
1418 pTimer, tmTimerState(pTimer->enmState), pTimer->enmState));
1419 return;
1420 }
1421 } while (cRetries-- > 0);
1422}
1423
1424
1425/**
1426 * Schedules the specified timer queue.
1427 *
1428 * @param pVM The VM to run the timers for.
1429 * @param pQueue The queue to schedule.
1430 *
1431 * @remarks Called while owning the lock.
1432 */
1433void tmTimerQueueSchedule(PVM pVM, PTMTIMERQUEUE pQueue)
1434{
1435 TM_ASSERT_EMT_LOCK(pVM);
1436
1437 /*
1438 * Dequeue the scheduling list and iterate it.
1439 */
1440 int32_t offNext = ASMAtomicXchgS32(&pQueue->offSchedule, 0);
1441 Log2(("tmTimerQueueSchedule: pQueue=%p:{.enmClock=%d, offNext=%RI32}\n", pQueue, pQueue->enmClock, offNext));
1442 if (!offNext)
1443 return;
1444 PTMTIMER pNext = (PTMTIMER)((intptr_t)pQueue + offNext);
1445 while (pNext)
1446 {
1447 /*
1448 * Unlink the head timer and find the next one.
1449 */
1450 PTMTIMER pTimer = pNext;
1451 pNext = pNext->offScheduleNext ? (PTMTIMER)((intptr_t)pNext + pNext->offScheduleNext) : NULL;
1452 pTimer->offScheduleNext = 0;
1453
1454 /*
1455 * Do the scheduling.
1456 */
1457 Log2(("tmTimerQueueSchedule: %p:{.enmState=%s, .enmClock=%d, .enmType=%d, .pszDesc=%s}\n",
1458 pTimer, tmTimerState(pTimer->enmState), pTimer->enmClock, pTimer->enmType, R3STRING(pTimer->pszDesc)));
1459 tmTimerQueueScheduleOne(pQueue, pTimer);
1460 Log2(("tmTimerQueueSchedule: %p: new %s\n", pTimer, tmTimerState(pTimer->enmState)));
1461 } /* foreach timer in current schedule batch. */
1462}
1463
1464
1465#ifdef VBOX_STRICT
1466/**
1467 * Checks that the timer queues are sane.
1468 *
1469 * @param pVM VM handle.
1470 *
1471 * @remarks Called while owning the lock.
1472 */
1473void tmTimerQueuesSanityChecks(PVM pVM, const char *pszWhere)
1474{
1475 TM_ASSERT_EMT_LOCK(pVM);
1476
1477 /*
1478 * Check the linking of the active lists.
1479 */
1480 for (int i = 0; i < TMCLOCK_MAX; i++)
1481 {
1482 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[i];
1483 Assert((int)pQueue->enmClock == i);
1484 PTMTIMER pPrev = NULL;
1485 for (PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue); pCur; pPrev = pCur, pCur = TMTIMER_GET_NEXT(pCur))
1486 {
1487 AssertMsg((int)pCur->enmClock == i, ("%s: %d != %d\n", pszWhere, pCur->enmClock, i));
1488 AssertMsg(TMTIMER_GET_PREV(pCur) == pPrev, ("%s: %p != %p\n", pszWhere, TMTIMER_GET_PREV(pCur), pPrev));
1489 TMTIMERSTATE enmState = pCur->enmState;
1490 switch (enmState)
1491 {
1492 case TMTIMERSTATE_ACTIVE:
1493 AssertMsg( !pCur->offScheduleNext
1494 || pCur->enmState != TMTIMERSTATE_ACTIVE,
1495 ("%s: %RI32\n", pszWhere, pCur->offScheduleNext));
1496 break;
1497 case TMTIMERSTATE_PENDING_STOP:
1498 case TMTIMERSTATE_PENDING_RESCHEDULE:
1499 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1500 break;
1501 default:
1502 AssertMsgFailed(("%s: Invalid state enmState=%d %s\n", pszWhere, enmState, tmTimerState(enmState)));
1503 break;
1504 }
1505 }
1506 }
1507
1508
1509# ifdef IN_RING3
1510 /*
1511 * Do the big list and check that active timers all are in the active lists.
1512 */
1513 PTMTIMERR3 pPrev = NULL;
1514 for (PTMTIMERR3 pCur = pVM->tm.s.pCreated; pCur; pPrev = pCur, pCur = pCur->pBigNext)
1515 {
1516 Assert(pCur->pBigPrev == pPrev);
1517 Assert((unsigned)pCur->enmClock < (unsigned)TMCLOCK_MAX);
1518
1519 TMTIMERSTATE enmState = pCur->enmState;
1520 switch (enmState)
1521 {
1522 case TMTIMERSTATE_ACTIVE:
1523 case TMTIMERSTATE_PENDING_STOP:
1524 case TMTIMERSTATE_PENDING_RESCHEDULE:
1525 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1526 {
1527 PTMTIMERR3 pCurAct = TMTIMER_GET_HEAD(&pVM->tm.s.CTX_SUFF(paTimerQueues)[pCur->enmClock]);
1528 Assert(pCur->offPrev || pCur == pCurAct);
1529 while (pCurAct && pCurAct != pCur)
1530 pCurAct = TMTIMER_GET_NEXT(pCurAct);
1531 Assert(pCurAct == pCur);
1532 break;
1533 }
1534
1535 case TMTIMERSTATE_PENDING_SCHEDULE:
1536 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1537 case TMTIMERSTATE_STOPPED:
1538 case TMTIMERSTATE_EXPIRED:
1539 {
1540 Assert(!pCur->offNext);
1541 Assert(!pCur->offPrev);
1542 for (PTMTIMERR3 pCurAct = TMTIMER_GET_HEAD(&pVM->tm.s.CTX_SUFF(paTimerQueues)[pCur->enmClock]);
1543 pCurAct;
1544 pCurAct = TMTIMER_GET_NEXT(pCurAct))
1545 {
1546 Assert(pCurAct != pCur);
1547 Assert(TMTIMER_GET_NEXT(pCurAct) != pCur);
1548 Assert(TMTIMER_GET_PREV(pCurAct) != pCur);
1549 }
1550 break;
1551 }
1552
1553 /* ignore */
1554 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1555 break;
1556
1557 /* shouldn't get here! */
1558 case TMTIMERSTATE_DESTROY:
1559 default:
1560 AssertMsgFailed(("Invalid state enmState=%d %s\n", enmState, tmTimerState(enmState)));
1561 break;
1562 }
1563 }
1564# endif /* IN_RING3 */
1565}
1566#endif /* !VBOX_STRICT */
1567
1568
1569/**
1570 * Gets the current warp drive percent.
1571 *
1572 * @returns The warp drive percent.
1573 * @param pVM The VM handle.
1574 */
1575VMMDECL(uint32_t) TMGetWarpDrive(PVM pVM)
1576{
1577 return pVM->tm.s.u32VirtualWarpDrivePercentage;
1578}
1579
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette