VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/TMAll.cpp@ 19667

Last change on this file since 19667 was 19660, checked in by vboxsync, 16 years ago

TM+affected: SMP changes in progress.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 52.9 KB
Line 
1/* $Id: TMAll.cpp 19660 2009-05-13 14:09:15Z vboxsync $ */
2/** @file
3 * TM - Timeout Manager, all contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_TM
27#include <VBox/tm.h>
28#include <VBox/mm.h>
29#ifdef IN_RING3
30# include <VBox/rem.h>
31#endif
32#include "TMInternal.h"
33#include <VBox/vm.h>
34
35#include <VBox/param.h>
36#include <VBox/err.h>
37#include <VBox/log.h>
38#include <VBox/sup.h>
39#include <iprt/time.h>
40#include <iprt/assert.h>
41#include <iprt/asm.h>
42#ifdef IN_RING3
43# include <iprt/thread.h>
44#endif
45
46
47#ifndef tmLock
48
49/**
50 * Try take the EMT/TM lock, wait in ring-3 return VERR_SEM_BUSY in R0/RC.
51 *
52 * @retval VINF_SUCCESS on success (always in ring-3).
53 * @retval VERR_SEM_BUSY in RC and R0 if the semaphore is busy.
54 *
55 * @param pVM The VM handle.
56 */
57int tmLock(PVM pVM)
58{
59 VM_ASSERT_EMT(pVM);
60 int rc = PDMCritSectEnter(&pVM->tm.s.EmtLock, VERR_SEM_BUSY);
61 return rc;
62}
63
64
65/**
66 * Try take the EMT/TM lock, no waiting.
67 *
68 * @retval VINF_SUCCESS on success.
69 * @retval VERR_SEM_BUSY if busy.
70 *
71 * @param pVM The VM handle.
72 */
73int tmTryLock(PVM pVM)
74{
75 VM_ASSERT_EMT(pVM);
76 int rc = PDMCritSectTryEnter(&pVM->tm.s.EmtLock);
77 return rc;
78}
79
80
81/**
82 * Release EMT/TM lock.
83 *
84 * @param pVM The VM handle.
85 */
86void tmUnlock(PVM pVM)
87{
88 PDMCritSectLeave(&pVM->tm.s.EmtLock);
89}
90
91#endif /* ! macros */
92
93/**
94 * Notification that execution is about to start.
95 *
96 * This call must always be paired with a TMNotifyEndOfExecution call.
97 *
98 * The function may, depending on the configuration, resume the TSC and future
99 * clocks that only ticks when we're executing guest code.
100 *
101 * @param pVCpu The VMCPU to operate on.
102 */
103VMMDECL(void) TMNotifyStartOfExecution(PVMCPU pVCpu)
104{
105 PVM pVM = pVCpu->CTX_SUFF(pVM);
106
107 if (pVM->tm.s.fTSCTiedToExecution)
108 tmCpuTickResume(pVM, pVCpu);
109}
110
111
112/**
113 * Notification that execution is about to start.
114 *
115 * This call must always be paired with a TMNotifyStartOfExecution call.
116 *
117 * The function may, depending on the configuration, suspend the TSC and future
118 * clocks that only ticks when we're executing guest code.
119 *
120 * @param pVCpu The VMCPU to operate on.
121 */
122VMMDECL(void) TMNotifyEndOfExecution(PVMCPU pVCpu)
123{
124 PVM pVM = pVCpu->CTX_SUFF(pVM);
125
126 if (pVM->tm.s.fTSCTiedToExecution)
127 tmCpuTickPause(pVM, pVCpu);
128}
129
130
131/**
132 * Notification that the cpu is entering the halt state
133 *
134 * This call must always be paired with a TMNotifyEndOfExecution call.
135 *
136 * The function may, depending on the configuration, resume the TSC and future
137 * clocks that only ticks when we're halted.
138 *
139 * @param pVCpu The VMCPU to operate on.
140 */
141VMMDECL(void) TMNotifyStartOfHalt(PVMCPU pVCpu)
142{
143 PVM pVM = pVCpu->CTX_SUFF(pVM);
144
145 if ( pVM->tm.s.fTSCTiedToExecution
146 && !pVM->tm.s.fTSCNotTiedToHalt)
147 tmCpuTickResume(pVM, pVCpu);
148}
149
150
151/**
152 * Notification that the cpu is leaving the halt state
153 *
154 * This call must always be paired with a TMNotifyStartOfHalt call.
155 *
156 * The function may, depending on the configuration, suspend the TSC and future
157 * clocks that only ticks when we're halted.
158 *
159 * @param pVCpu The VMCPU to operate on.
160 */
161VMMDECL(void) TMNotifyEndOfHalt(PVMCPU pVCpu)
162{
163 PVM pVM = pVCpu->CTX_SUFF(pVM);
164
165 if ( pVM->tm.s.fTSCTiedToExecution
166 && !pVM->tm.s.fTSCNotTiedToHalt)
167 tmCpuTickPause(pVM, pVCpu);
168}
169
170
171/**
172 * Schedule the queue which was changed.
173 */
174DECLINLINE(void) tmSchedule(PTMTIMER pTimer)
175{
176 PVM pVM = pTimer->CTX_SUFF(pVM);
177 if ( VM_IS_EMT(pVM)
178 && RT_SUCCESS(tmTryLock(pVM)))
179 {
180 STAM_PROFILE_START(&pVM->tm.s.CTXALLSUFF(StatScheduleOne), a);
181 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[pTimer->enmClock];
182 Log3(("tmSchedule: tmTimerQueueSchedule\n"));
183 tmTimerQueueSchedule(pVM, pQueue);
184#ifdef VBOX_STRICT
185 tmTimerQueuesSanityChecks(pVM, "tmSchedule");
186#endif
187 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
188 tmUnlock(pVM);
189 }
190 else
191 {
192 /** @todo FIXME: don't use FF for scheduling! */
193 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
194 if (!VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER)) /**@todo only do this when arming the timer. */
195 {
196 Log5(("TMAll(%u): FF: 0 -> 1\n", __LINE__));
197 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
198#ifdef IN_RING3
199 REMR3NotifyTimerPending(pVM, pVCpuDst);
200 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
201#endif
202 STAM_COUNTER_INC(&pVM->tm.s.StatScheduleSetFF);
203 }
204 }
205}
206
207
208/**
209 * Try change the state to enmStateNew from enmStateOld
210 * and link the timer into the scheduling queue.
211 *
212 * @returns Success indicator.
213 * @param pTimer Timer in question.
214 * @param enmStateNew The new timer state.
215 * @param enmStateOld The old timer state.
216 */
217DECLINLINE(bool) tmTimerTry(PTMTIMER pTimer, TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
218{
219 /*
220 * Attempt state change.
221 */
222 bool fRc;
223 TM_TRY_SET_STATE(pTimer, enmStateNew, enmStateOld, fRc);
224 return fRc;
225}
226
227
228/**
229 * Links the timer onto the scheduling queue.
230 *
231 * @param pQueue The timer queue the timer belongs to.
232 * @param pTimer The timer.
233 */
234DECLINLINE(void) tmTimerLink(PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
235{
236 Assert(!pTimer->offScheduleNext);
237 const int32_t offHeadNew = (intptr_t)pTimer - (intptr_t)pQueue;
238 int32_t offHead;
239 do
240 {
241 offHead = pQueue->offSchedule;
242 if (offHead)
243 pTimer->offScheduleNext = ((intptr_t)pQueue + offHead) - (intptr_t)pTimer;
244 else
245 pTimer->offScheduleNext = 0;
246 } while (!ASMAtomicCmpXchgS32(&pQueue->offSchedule, offHeadNew, offHead));
247}
248
249
250/**
251 * Try change the state to enmStateNew from enmStateOld
252 * and link the timer into the scheduling queue.
253 *
254 * @returns Success indicator.
255 * @param pTimer Timer in question.
256 * @param enmStateNew The new timer state.
257 * @param enmStateOld The old timer state.
258 */
259DECLINLINE(bool) tmTimerTryWithLink(PTMTIMER pTimer, TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
260{
261 if (tmTimerTry(pTimer, enmStateNew, enmStateOld))
262 {
263 tmTimerLink(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF(paTimerQueues)[pTimer->enmClock], pTimer);
264 return true;
265 }
266 return false;
267}
268
269
270#ifdef VBOX_HIGH_RES_TIMERS_HACK
271/**
272 * Set FF if we've passed the next virtual event.
273 *
274 * This function is called before FFs are checked in the inner execution EM loops.
275 *
276 * @returns Virtual timer ticks to the next event. (I.e. 0 means that an timer
277 * has expired or some important rescheduling is pending.)
278 * @param pVM Pointer to the shared VM structure.
279 * @param pVCpu Pointer to the shared VMCPU structure of the caller.
280 * @thread The emulation thread.
281 */
282VMMDECL(uint64_t) TMTimerPoll(PVM pVM, PVMCPU pVCpu)
283{
284 static const uint64_t s_u64OtherRet = 500000000; /* 500 ms for non-timer EMTs. */
285 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
286 STAM_COUNTER_INC(&pVM->tm.s.StatPoll);
287
288 /*
289 * Return straight away if the timer FF is already set ...
290 */
291 if (VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
292 {
293 STAM_COUNTER_INC(&pVM->tm.s.StatPollAlreadySet);
294 return pVCpu == pVCpuDst ? 0 : s_u64OtherRet;
295 }
296
297 /*
298 * ... or if timers are being run.
299 */
300 if (pVM->tm.s.fRunningQueues)
301 {
302 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
303 return s_u64OtherRet;
304 }
305
306 /*
307 * Get current time and check the expire times of the two relevant queues.
308 */
309 int rc = tmLock(pVM); /** @todo FIXME: Stop playing safe here... */
310 const uint64_t u64Now = TMVirtualGetNoCheck(pVM);
311
312 /*
313 * TMCLOCK_VIRTUAL
314 */
315 const uint64_t u64Expire1 = pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire;
316 const int64_t i64Delta1 = u64Expire1 - u64Now;
317 if (i64Delta1 <= 0)
318 {
319 STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtual);
320 LogFlow(("TMTimerPoll: expire1=%RU64 <= now=%RU64\n", u64Expire1, u64Now));
321#ifndef IN_RING3
322 if (RT_SUCCESS(rc))
323#endif
324 tmUnlock(pVM);
325 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
326 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
327#ifdef IN_RING3
328 REMR3NotifyTimerPending(pVM, pVCpuDst);
329#endif
330 return pVCpu == pVCpuDst ? 0 : s_u64OtherRet;
331 }
332
333 /*
334 * TMCLOCK_VIRTUAL_SYNC
335 * This isn't quite as stright forward if in a catch-up, not only do
336 * we have to adjust the 'now' but when have to adjust the delta as well.
337 */
338 const uint64_t u64Expire2 = pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire;
339 uint64_t u64VirtualSyncNow;
340 if (!pVM->tm.s.fVirtualSyncTicking)
341 u64VirtualSyncNow = pVM->tm.s.u64VirtualSync;
342 else
343 {
344 if (!pVM->tm.s.fVirtualSyncCatchUp)
345 u64VirtualSyncNow = u64Now - pVM->tm.s.offVirtualSync;
346 else
347 {
348 uint64_t off = pVM->tm.s.offVirtualSync;
349 uint64_t u64Delta = u64Now - pVM->tm.s.u64VirtualSyncCatchUpPrev;
350 if (RT_LIKELY(!(u64Delta >> 32)))
351 {
352 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, pVM->tm.s.u32VirtualSyncCatchUpPercentage, 100);
353 if (off > u64Sub + pVM->tm.s.offVirtualSyncGivenUp)
354 off -= u64Sub;
355 else
356 off = pVM->tm.s.offVirtualSyncGivenUp;
357 }
358 u64VirtualSyncNow = u64Now - off;
359 }
360 }
361 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
362 if (i64Delta2 <= 0)
363 {
364 if ( !pVM->tm.s.fRunningQueues
365 && !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
366 {
367 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
368 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
369#ifdef IN_RING3
370 REMR3NotifyTimerPending(pVM, pVCpuDst);
371#endif
372 }
373 STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtualSync);
374#ifndef IN_RING3
375 if (RT_SUCCESS(rc))
376#endif
377 tmUnlock(pVM);
378 LogFlow(("TMTimerPoll: expire2=%RU64 <= now=%RU64\n", u64Expire2, u64Now));
379 return pVCpu == pVCpuDst ? 0 : s_u64OtherRet;
380 }
381 if (pVM->tm.s.fVirtualSyncCatchUp)
382 i64Delta2 = ASMMultU64ByU32DivByU32(i64Delta2, 100, pVM->tm.s.u32VirtualSyncCatchUpPercentage + 100);
383
384 /*
385 * Return the time left to the next event.
386 */
387 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
388#ifndef IN_RING3
389 if (RT_SUCCESS(rc))
390#endif
391 tmUnlock(pVM);
392 return RT_MIN(i64Delta1, i64Delta2);
393}
394
395
396/**
397 * Set FF if we've passed the next virtual event.
398 *
399 * This function is called before FFs are checked in the inner execution EM loops.
400 *
401 * @returns The GIP timestamp of the next event.
402 * 0 if the next event has already expired.
403 * @param pVM Pointer to the shared VM structure.
404 * @param pVCpu Pointer to the shared VMCPU structure of the caller.
405 * @param pu64Delta Where to store the delta.
406 * @thread The emulation thread.
407 */
408VMMDECL(uint64_t) TMTimerPollGIP(PVM pVM, PVMCPU pVCpu, uint64_t *pu64Delta)
409{
410 static const uint64_t s_u64OtherRet = 500000000; /* 500 million GIP ticks for non-timer EMTs. */
411 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
412 const uint64_t u64Now = TMVirtualGetNoCheck(pVM);
413 STAM_COUNTER_INC(&pVM->tm.s.StatPollGIP);
414
415 /*
416 * Return straight away if the timer FF is already set ...
417 */
418 if (VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
419 {
420 STAM_COUNTER_INC(&pVM->tm.s.StatPollGIPAlreadySet);
421 if (pVCpuDst == pVCpu)
422 {
423 *pu64Delta = 0;
424 return 0;
425 }
426 *pu64Delta = s_u64OtherRet;
427 return u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet;
428 }
429
430 /*
431 * ... or if timers are being run.
432 */
433 if (pVM->tm.s.fRunningQueues)
434 {
435 STAM_COUNTER_INC(&pVM->tm.s.StatPollGIPRunning);
436 *pu64Delta = s_u64OtherRet;
437 return u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet;
438 }
439
440 int rc = tmLock(pVM); /** @todo FIXME: Stop playin safe... */
441
442 /*
443 * Check for TMCLOCK_VIRTUAL expiration.
444 */
445 const uint64_t u64Expire1 = pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire;
446 const int64_t i64Delta1 = u64Expire1 - u64Now;
447 if (i64Delta1 <= 0)
448 {
449 STAM_COUNTER_INC(&pVM->tm.s.StatPollGIPVirtual);
450 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
451 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
452#ifdef IN_RING3
453 REMR3NotifyTimerPending(pVM, pVCpuDst);
454#endif
455#ifndef IN_RING3
456 if (RT_SUCCESS(rc))
457#endif
458 tmUnlock(pVM);
459 LogFlow(("TMTimerPoll: expire1=%RU64 <= now=%RU64\n", u64Expire1, u64Now));
460 if (pVCpuDst == pVCpu)
461 {
462 *pu64Delta = 0;
463 return 0;
464 }
465 *pu64Delta = s_u64OtherRet;
466 return u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet;
467 }
468
469 /*
470 * Check for TMCLOCK_VIRTUAL_SYNC expiration.
471 * This isn't quite as stright forward if in a catch-up, not only do
472 * we have to adjust the 'now' but when have to adjust the delta as well.
473 */
474 const uint64_t u64Expire2 = pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire;
475 uint64_t u64VirtualSyncNow;
476 if (!pVM->tm.s.fVirtualSyncTicking)
477 u64VirtualSyncNow = pVM->tm.s.u64VirtualSync;
478 else
479 {
480 if (!pVM->tm.s.fVirtualSyncCatchUp)
481 u64VirtualSyncNow = u64Now - pVM->tm.s.offVirtualSync;
482 else
483 {
484 uint64_t off = pVM->tm.s.offVirtualSync;
485 uint64_t u64Delta = u64Now - pVM->tm.s.u64VirtualSyncCatchUpPrev;
486 if (RT_LIKELY(!(u64Delta >> 32)))
487 {
488 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, pVM->tm.s.u32VirtualSyncCatchUpPercentage, 100);
489 if (off > u64Sub + pVM->tm.s.offVirtualSyncGivenUp)
490 off -= u64Sub;
491 else
492 off = pVM->tm.s.offVirtualSyncGivenUp;
493 }
494 u64VirtualSyncNow = u64Now - off;
495 }
496 }
497
498 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
499 if (i64Delta2 <= 0)
500 {
501 if (!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_TIMER))
502 {
503 Log5(("TMAll(%u): FF: 0 -> 1\n", __LINE__));
504 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER /** @todo poke */);
505#ifdef IN_RING3
506 REMR3NotifyTimerPending(pVM, pVCpuDst);
507#endif
508 }
509 STAM_COUNTER_INC(&pVM->tm.s.StatPollGIPVirtualSync);
510
511#ifndef IN_RING3
512 if (RT_SUCCESS(rc))
513#endif
514 tmUnlock(pVM);
515 LogFlow(("TMTimerPoll: expire2=%RU64 <= now=%RU64\n", u64Expire2, u64Now));
516 if (pVCpuDst == pVCpu)
517 {
518 *pu64Delta = 0;
519 return 0;
520 }
521 *pu64Delta = s_u64OtherRet;
522 return u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet;
523 }
524 if (pVM->tm.s.fVirtualSyncCatchUp)
525 i64Delta2 = ASMMultU64ByU32DivByU32(i64Delta2, 100, pVM->tm.s.u32VirtualSyncCatchUpPercentage + 100);
526
527 uint64_t u64GipTime;
528 if (pVCpuDst == pVCpu)
529 {
530 /*
531 * Return the GIP time of the next event.
532 * This is the reverse of what tmVirtualGetRaw is doing.
533 */
534 STAM_COUNTER_INC(&pVM->tm.s.StatPollGIPMiss);
535 u64GipTime = RT_MIN(i64Delta1, i64Delta2);
536 *pu64Delta = u64GipTime;
537 u64GipTime += u64Now + pVM->tm.s.u64VirtualOffset;
538 if (RT_UNLIKELY(!pVM->tm.s.fVirtualWarpDrive))
539 {
540 u64GipTime -= pVM->tm.s.u64VirtualWarpDriveStart; /* the start is GIP time. */
541 u64GipTime *= 100;
542 u64GipTime /= pVM->tm.s.u32VirtualWarpDrivePercentage;
543 u64GipTime += pVM->tm.s.u64VirtualWarpDriveStart;
544 }
545 }
546 else
547 {
548 *pu64Delta = s_u64OtherRet;
549 u64GipTime = u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet;
550 }
551#ifndef IN_RING3
552 if (RT_SUCCESS(rc))
553#endif
554 tmUnlock(pVM);
555 return u64GipTime;
556}
557#endif
558
559
560/**
561 * Gets the host context ring-3 pointer of the timer.
562 *
563 * @returns HC R3 pointer.
564 * @param pTimer Timer handle as returned by one of the create functions.
565 */
566VMMDECL(PTMTIMERR3) TMTimerR3Ptr(PTMTIMER pTimer)
567{
568 return (PTMTIMERR3)MMHyperCCToR3(pTimer->CTX_SUFF(pVM), pTimer);
569}
570
571
572/**
573 * Gets the host context ring-0 pointer of the timer.
574 *
575 * @returns HC R0 pointer.
576 * @param pTimer Timer handle as returned by one of the create functions.
577 */
578VMMDECL(PTMTIMERR0) TMTimerR0Ptr(PTMTIMER pTimer)
579{
580 return (PTMTIMERR0)MMHyperCCToR0(pTimer->CTX_SUFF(pVM), pTimer);
581}
582
583
584/**
585 * Gets the RC pointer of the timer.
586 *
587 * @returns RC pointer.
588 * @param pTimer Timer handle as returned by one of the create functions.
589 */
590VMMDECL(PTMTIMERRC) TMTimerRCPtr(PTMTIMER pTimer)
591{
592 return (PTMTIMERRC)MMHyperCCToRC(pTimer->CTX_SUFF(pVM), pTimer);
593}
594
595
596/**
597 * Arm a timer with a (new) expire time.
598 *
599 * @returns VBox status.
600 * @param pTimer Timer handle as returned by one of the create functions.
601 * @param u64Expire New expire time.
602 */
603VMMDECL(int) TMTimerSet(PTMTIMER pTimer, uint64_t u64Expire)
604{
605 STAM_PROFILE_START(&pTimer->CTX_SUFF(pVM)->tm.s.CTXALLSUFF(StatTimerSet), a);
606
607 /** @todo find the most frequently used paths and make them skip tmSchedule and tmTimerTryWithLink. */
608 int cRetries = 1000;
609 do
610 {
611 /*
612 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
613 */
614 TMTIMERSTATE enmState = pTimer->enmState;
615 Log2(("TMTimerSet: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d u64Expire=%llu\n",
616 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), cRetries, u64Expire));
617 switch (enmState)
618 {
619 case TMTIMERSTATE_EXPIRED:
620 case TMTIMERSTATE_STOPPED:
621 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
622 {
623 Assert(!pTimer->offPrev);
624 Assert(!pTimer->offNext);
625 AssertMsg( pTimer->enmClock != TMCLOCK_VIRTUAL_SYNC
626 || pTimer->CTX_SUFF(pVM)->tm.s.fVirtualSyncTicking
627 || u64Expire >= pTimer->CTX_SUFF(pVM)->tm.s.u64VirtualSync,
628 ("%RU64 < %RU64 %s\n", u64Expire, pTimer->CTX_SUFF(pVM)->tm.s.u64VirtualSync, R3STRING(pTimer->pszDesc)));
629 pTimer->u64Expire = u64Expire;
630 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
631 tmSchedule(pTimer);
632 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSet), a);
633 return VINF_SUCCESS;
634 }
635 break;
636
637 case TMTIMERSTATE_PENDING_SCHEDULE:
638 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
639 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
640 {
641 pTimer->u64Expire = u64Expire;
642 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
643 tmSchedule(pTimer);
644 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSet), a);
645 return VINF_SUCCESS;
646 }
647 break;
648
649
650 case TMTIMERSTATE_ACTIVE:
651 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
652 {
653 pTimer->u64Expire = u64Expire;
654 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
655 tmSchedule(pTimer);
656 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSet), a);
657 return VINF_SUCCESS;
658 }
659 break;
660
661 case TMTIMERSTATE_PENDING_RESCHEDULE:
662 case TMTIMERSTATE_PENDING_STOP:
663 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
664 {
665 pTimer->u64Expire = u64Expire;
666 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
667 tmSchedule(pTimer);
668 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSet), a);
669 return VINF_SUCCESS;
670 }
671 break;
672
673
674 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
675 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
676#ifdef IN_RING3
677 if (!RTThreadYield())
678 RTThreadSleep(1);
679#else
680/** @todo call host context and yield after a couple of iterations */
681#endif
682 break;
683
684 /*
685 * Invalid states.
686 */
687 case TMTIMERSTATE_DESTROY:
688 case TMTIMERSTATE_FREE:
689 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
690 return VERR_TM_INVALID_STATE;
691 default:
692 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
693 return VERR_TM_UNKNOWN_STATE;
694 }
695 } while (cRetries-- > 0);
696
697 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
698 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSet), a);
699 return VERR_INTERNAL_ERROR;
700}
701
702
703/**
704 * Arm a timer with a (new) expire time relative to current time.
705 *
706 * @returns VBox status.
707 * @param pTimer Timer handle as returned by one of the create functions.
708 * @param cMilliesToNext Number of millieseconds to the next tick.
709 */
710VMMDECL(int) TMTimerSetMillies(PTMTIMER pTimer, uint32_t cMilliesToNext)
711{
712 PVM pVM = pTimer->CTX_SUFF(pVM);
713 PVMCPU pVCpu = &pVM->aCpus[0]; /* just take the first VCPU */
714
715 switch (pTimer->enmClock)
716 {
717 case TMCLOCK_VIRTUAL:
718 return TMTimerSet(pTimer, cMilliesToNext * (uint64_t)TMCLOCK_FREQ_VIRTUAL / 1000 + TMVirtualGet(pVM));
719 case TMCLOCK_VIRTUAL_SYNC:
720 return TMTimerSet(pTimer, cMilliesToNext * (uint64_t)TMCLOCK_FREQ_VIRTUAL / 1000 + TMVirtualSyncGet(pVM));
721 case TMCLOCK_REAL:
722 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
723 return TMTimerSet(pTimer, cMilliesToNext + TMRealGet(pVM));
724 case TMCLOCK_TSC:
725 return TMTimerSet(pTimer, cMilliesToNext * pVM->tm.s.cTSCTicksPerSecond / 1000 + TMCpuTickGet(pVCpu));
726
727 default:
728 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
729 return VERR_INTERNAL_ERROR;
730 }
731}
732
733
734/**
735 * Arm a timer with a (new) expire time relative to current time.
736 *
737 * @returns VBox status.
738 * @param pTimer Timer handle as returned by one of the create functions.
739 * @param cMicrosToNext Number of microseconds to the next tick.
740 */
741VMMDECL(int) TMTimerSetMicro(PTMTIMER pTimer, uint64_t cMicrosToNext)
742{
743 PVM pVM = pTimer->CTX_SUFF(pVM);
744 PVMCPU pVCpu = &pVM->aCpus[0]; /* just take the first VCPU */
745
746 switch (pTimer->enmClock)
747 {
748 case TMCLOCK_VIRTUAL:
749 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
750 return TMTimerSet(pTimer, cMicrosToNext * 1000 + TMVirtualGet(pVM));
751
752 case TMCLOCK_VIRTUAL_SYNC:
753 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
754 return TMTimerSet(pTimer, cMicrosToNext * 1000 + TMVirtualSyncGet(pVM));
755
756 case TMCLOCK_REAL:
757 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
758 return TMTimerSet(pTimer, cMicrosToNext / 1000 + TMRealGet(pVM));
759
760 case TMCLOCK_TSC:
761 return TMTimerSet(pTimer, TMTimerFromMicro(pTimer, cMicrosToNext) + TMCpuTickGet(pVCpu));
762
763 default:
764 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
765 return VERR_INTERNAL_ERROR;
766 }
767}
768
769
770/**
771 * Arm a timer with a (new) expire time relative to current time.
772 *
773 * @returns VBox status.
774 * @param pTimer Timer handle as returned by one of the create functions.
775 * @param cNanosToNext Number of nanoseconds to the next tick.
776 */
777VMMDECL(int) TMTimerSetNano(PTMTIMER pTimer, uint64_t cNanosToNext)
778{
779 PVM pVM = pTimer->CTX_SUFF(pVM);
780 PVMCPU pVCpu = &pVM->aCpus[0]; /* just take the first VCPU */
781
782 switch (pTimer->enmClock)
783 {
784 case TMCLOCK_VIRTUAL:
785 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
786 return TMTimerSet(pTimer, cNanosToNext + TMVirtualGet(pVM));
787
788 case TMCLOCK_VIRTUAL_SYNC:
789 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
790 return TMTimerSet(pTimer, cNanosToNext + TMVirtualSyncGet(pVM));
791
792 case TMCLOCK_REAL:
793 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
794 return TMTimerSet(pTimer, cNanosToNext / 1000000 + TMRealGet(pVM));
795
796 case TMCLOCK_TSC:
797 return TMTimerSet(pTimer, TMTimerFromNano(pTimer, cNanosToNext) + TMCpuTickGet(pVCpu));
798
799 default:
800 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
801 return VERR_INTERNAL_ERROR;
802 }
803}
804
805
806/**
807 * Stop the timer.
808 * Use TMR3TimerArm() to "un-stop" the timer.
809 *
810 * @returns VBox status.
811 * @param pTimer Timer handle as returned by one of the create functions.
812 */
813VMMDECL(int) TMTimerStop(PTMTIMER pTimer)
814{
815 STAM_PROFILE_START(&pTimer->CTX_SUFF(pVM)->tm.s.CTXALLSUFF(StatTimerStop), a);
816 /** @todo see if this function needs optimizing. */
817 int cRetries = 1000;
818 do
819 {
820 /*
821 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
822 */
823 TMTIMERSTATE enmState = pTimer->enmState;
824 Log2(("TMTimerStop: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d\n",
825 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), cRetries));
826 switch (enmState)
827 {
828 case TMTIMERSTATE_EXPIRED:
829 //AssertMsgFailed(("You don't stop an expired timer dude!\n"));
830 return VERR_INVALID_PARAMETER;
831
832 case TMTIMERSTATE_STOPPED:
833 case TMTIMERSTATE_PENDING_STOP:
834 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
835 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
836 return VINF_SUCCESS;
837
838 case TMTIMERSTATE_PENDING_SCHEDULE:
839 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, enmState))
840 {
841 tmSchedule(pTimer);
842 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
843 return VINF_SUCCESS;
844 }
845
846 case TMTIMERSTATE_PENDING_RESCHEDULE:
847 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
848 {
849 tmSchedule(pTimer);
850 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
851 return VINF_SUCCESS;
852 }
853 break;
854
855 case TMTIMERSTATE_ACTIVE:
856 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
857 {
858 tmSchedule(pTimer);
859 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
860 return VINF_SUCCESS;
861 }
862 break;
863
864 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
865 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
866#ifdef IN_RING3
867 if (!RTThreadYield())
868 RTThreadSleep(1);
869#else
870/**@todo call host and yield cpu after a while. */
871#endif
872 break;
873
874 /*
875 * Invalid states.
876 */
877 case TMTIMERSTATE_DESTROY:
878 case TMTIMERSTATE_FREE:
879 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
880 return VERR_TM_INVALID_STATE;
881 default:
882 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
883 return VERR_TM_UNKNOWN_STATE;
884 }
885 } while (cRetries-- > 0);
886
887 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
888 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
889 return VERR_INTERNAL_ERROR;
890}
891
892
893/**
894 * Get the current clock time.
895 * Handy for calculating the new expire time.
896 *
897 * @returns Current clock time.
898 * @param pTimer Timer handle as returned by one of the create functions.
899 */
900VMMDECL(uint64_t) TMTimerGet(PTMTIMER pTimer)
901{
902 uint64_t u64;
903 PVM pVM = pTimer->CTX_SUFF(pVM);
904
905 switch (pTimer->enmClock)
906 {
907 case TMCLOCK_VIRTUAL:
908 u64 = TMVirtualGet(pVM);
909 break;
910 case TMCLOCK_VIRTUAL_SYNC:
911 u64 = TMVirtualSyncGet(pVM);
912 break;
913 case TMCLOCK_REAL:
914 u64 = TMRealGet(pVM);
915 break;
916 case TMCLOCK_TSC:
917 {
918 PVMCPU pVCpu = &pVM->aCpus[0]; /* just take the first VCPU */
919 u64 = TMCpuTickGet(pVCpu);
920 break;
921 }
922 default:
923 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
924 return ~(uint64_t)0;
925 }
926 //Log2(("TMTimerGet: returns %llu (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
927 // u64, pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
928 return u64;
929}
930
931
932/**
933 * Get the freqency of the timer clock.
934 *
935 * @returns Clock frequency (as Hz of course).
936 * @param pTimer Timer handle as returned by one of the create functions.
937 */
938VMMDECL(uint64_t) TMTimerGetFreq(PTMTIMER pTimer)
939{
940 switch (pTimer->enmClock)
941 {
942 case TMCLOCK_VIRTUAL:
943 case TMCLOCK_VIRTUAL_SYNC:
944 return TMCLOCK_FREQ_VIRTUAL;
945
946 case TMCLOCK_REAL:
947 return TMCLOCK_FREQ_REAL;
948
949 case TMCLOCK_TSC:
950 return TMCpuTicksPerSecond(pTimer->CTX_SUFF(pVM));
951
952 default:
953 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
954 return 0;
955 }
956}
957
958
959/**
960 * Get the current clock time as nanoseconds.
961 *
962 * @returns The timer clock as nanoseconds.
963 * @param pTimer Timer handle as returned by one of the create functions.
964 */
965VMMDECL(uint64_t) TMTimerGetNano(PTMTIMER pTimer)
966{
967 return TMTimerToNano(pTimer, TMTimerGet(pTimer));
968}
969
970
971/**
972 * Get the current clock time as microseconds.
973 *
974 * @returns The timer clock as microseconds.
975 * @param pTimer Timer handle as returned by one of the create functions.
976 */
977VMMDECL(uint64_t) TMTimerGetMicro(PTMTIMER pTimer)
978{
979 return TMTimerToMicro(pTimer, TMTimerGet(pTimer));
980}
981
982
983/**
984 * Get the current clock time as milliseconds.
985 *
986 * @returns The timer clock as milliseconds.
987 * @param pTimer Timer handle as returned by one of the create functions.
988 */
989VMMDECL(uint64_t) TMTimerGetMilli(PTMTIMER pTimer)
990{
991 return TMTimerToMilli(pTimer, TMTimerGet(pTimer));
992}
993
994
995/**
996 * Converts the specified timer clock time to nanoseconds.
997 *
998 * @returns nanoseconds.
999 * @param pTimer Timer handle as returned by one of the create functions.
1000 * @param u64Ticks The clock ticks.
1001 * @remark There could be rounding errors here. We just do a simple integere divide
1002 * without any adjustments.
1003 */
1004VMMDECL(uint64_t) TMTimerToNano(PTMTIMER pTimer, uint64_t u64Ticks)
1005{
1006 switch (pTimer->enmClock)
1007 {
1008 case TMCLOCK_VIRTUAL:
1009 case TMCLOCK_VIRTUAL_SYNC:
1010 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1011 return u64Ticks;
1012
1013 case TMCLOCK_REAL:
1014 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1015 return u64Ticks * 1000000;
1016
1017 case TMCLOCK_TSC:
1018 AssertReleaseMsgFailed(("TMCLOCK_TSC conversions are not implemented\n"));
1019 return 0;
1020
1021 default:
1022 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1023 return 0;
1024 }
1025}
1026
1027
1028/**
1029 * Converts the specified timer clock time to microseconds.
1030 *
1031 * @returns microseconds.
1032 * @param pTimer Timer handle as returned by one of the create functions.
1033 * @param u64Ticks The clock ticks.
1034 * @remark There could be rounding errors here. We just do a simple integere divide
1035 * without any adjustments.
1036 */
1037VMMDECL(uint64_t) TMTimerToMicro(PTMTIMER pTimer, uint64_t u64Ticks)
1038{
1039 switch (pTimer->enmClock)
1040 {
1041 case TMCLOCK_VIRTUAL:
1042 case TMCLOCK_VIRTUAL_SYNC:
1043 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1044 return u64Ticks / 1000;
1045
1046 case TMCLOCK_REAL:
1047 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1048 return u64Ticks * 1000;
1049
1050 case TMCLOCK_TSC:
1051 AssertReleaseMsgFailed(("TMCLOCK_TSC conversions are not implemented\n"));
1052 return 0;
1053
1054 default:
1055 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1056 return 0;
1057 }
1058}
1059
1060
1061/**
1062 * Converts the specified timer clock time to milliseconds.
1063 *
1064 * @returns milliseconds.
1065 * @param pTimer Timer handle as returned by one of the create functions.
1066 * @param u64Ticks The clock ticks.
1067 * @remark There could be rounding errors here. We just do a simple integere divide
1068 * without any adjustments.
1069 */
1070VMMDECL(uint64_t) TMTimerToMilli(PTMTIMER pTimer, uint64_t u64Ticks)
1071{
1072 switch (pTimer->enmClock)
1073 {
1074 case TMCLOCK_VIRTUAL:
1075 case TMCLOCK_VIRTUAL_SYNC:
1076 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1077 return u64Ticks / 1000000;
1078
1079 case TMCLOCK_REAL:
1080 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1081 return u64Ticks;
1082
1083 case TMCLOCK_TSC:
1084 AssertReleaseMsgFailed(("TMCLOCK_TSC conversions are not implemented\n"));
1085 return 0;
1086
1087 default:
1088 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1089 return 0;
1090 }
1091}
1092
1093
1094/**
1095 * Converts the specified nanosecond timestamp to timer clock ticks.
1096 *
1097 * @returns timer clock ticks.
1098 * @param pTimer Timer handle as returned by one of the create functions.
1099 * @param u64NanoTS The nanosecond value ticks to convert.
1100 * @remark There could be rounding and overflow errors here.
1101 */
1102VMMDECL(uint64_t) TMTimerFromNano(PTMTIMER pTimer, uint64_t u64NanoTS)
1103{
1104 switch (pTimer->enmClock)
1105 {
1106 case TMCLOCK_VIRTUAL:
1107 case TMCLOCK_VIRTUAL_SYNC:
1108 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1109 return u64NanoTS;
1110
1111 case TMCLOCK_REAL:
1112 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1113 return u64NanoTS / 1000000;
1114
1115 case TMCLOCK_TSC:
1116 AssertReleaseMsgFailed(("TMCLOCK_TSC conversions are not implemented\n"));
1117 return 0;
1118
1119 default:
1120 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1121 return 0;
1122 }
1123}
1124
1125
1126/**
1127 * Converts the specified microsecond timestamp to timer clock ticks.
1128 *
1129 * @returns timer clock ticks.
1130 * @param pTimer Timer handle as returned by one of the create functions.
1131 * @param u64MicroTS The microsecond value ticks to convert.
1132 * @remark There could be rounding and overflow errors here.
1133 */
1134VMMDECL(uint64_t) TMTimerFromMicro(PTMTIMER pTimer, uint64_t u64MicroTS)
1135{
1136 switch (pTimer->enmClock)
1137 {
1138 case TMCLOCK_VIRTUAL:
1139 case TMCLOCK_VIRTUAL_SYNC:
1140 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1141 return u64MicroTS * 1000;
1142
1143 case TMCLOCK_REAL:
1144 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1145 return u64MicroTS / 1000;
1146
1147 case TMCLOCK_TSC:
1148 AssertReleaseMsgFailed(("TMCLOCK_TSC conversions are not implemented\n"));
1149 return 0;
1150
1151 default:
1152 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1153 return 0;
1154 }
1155}
1156
1157
1158/**
1159 * Converts the specified millisecond timestamp to timer clock ticks.
1160 *
1161 * @returns timer clock ticks.
1162 * @param pTimer Timer handle as returned by one of the create functions.
1163 * @param u64MilliTS The millisecond value ticks to convert.
1164 * @remark There could be rounding and overflow errors here.
1165 */
1166VMMDECL(uint64_t) TMTimerFromMilli(PTMTIMER pTimer, uint64_t u64MilliTS)
1167{
1168 switch (pTimer->enmClock)
1169 {
1170 case TMCLOCK_VIRTUAL:
1171 case TMCLOCK_VIRTUAL_SYNC:
1172 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1173 return u64MilliTS * 1000000;
1174
1175 case TMCLOCK_REAL:
1176 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1177 return u64MilliTS;
1178
1179 case TMCLOCK_TSC:
1180 AssertReleaseMsgFailed(("TMCLOCK_TSC conversions are not implemented\n"));
1181 return 0;
1182
1183 default:
1184 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1185 return 0;
1186 }
1187}
1188
1189
1190/**
1191 * Get the expire time of the timer.
1192 * Only valid for active timers.
1193 *
1194 * @returns Expire time of the timer.
1195 * @param pTimer Timer handle as returned by one of the create functions.
1196 */
1197VMMDECL(uint64_t) TMTimerGetExpire(PTMTIMER pTimer)
1198{
1199 int cRetries = 1000;
1200 do
1201 {
1202 TMTIMERSTATE enmState = pTimer->enmState;
1203 switch (enmState)
1204 {
1205 case TMTIMERSTATE_EXPIRED:
1206 case TMTIMERSTATE_STOPPED:
1207 case TMTIMERSTATE_PENDING_STOP:
1208 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1209 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1210 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1211 return ~(uint64_t)0;
1212
1213 case TMTIMERSTATE_ACTIVE:
1214 case TMTIMERSTATE_PENDING_RESCHEDULE:
1215 case TMTIMERSTATE_PENDING_SCHEDULE:
1216 Log2(("TMTimerGetExpire: returns %llu (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1217 pTimer->u64Expire, pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1218 return pTimer->u64Expire;
1219
1220 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1221 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1222#ifdef IN_RING3
1223 if (!RTThreadYield())
1224 RTThreadSleep(1);
1225#endif
1226 break;
1227
1228 /*
1229 * Invalid states.
1230 */
1231 case TMTIMERSTATE_DESTROY:
1232 case TMTIMERSTATE_FREE:
1233 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1234 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1235 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1236 return ~(uint64_t)0;
1237 default:
1238 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1239 return ~(uint64_t)0;
1240 }
1241 } while (cRetries-- > 0);
1242
1243 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1244 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1245 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1246 return ~(uint64_t)0;
1247}
1248
1249
1250/**
1251 * Checks if a timer is active or not.
1252 *
1253 * @returns True if active.
1254 * @returns False if not active.
1255 * @param pTimer Timer handle as returned by one of the create functions.
1256 */
1257VMMDECL(bool) TMTimerIsActive(PTMTIMER pTimer)
1258{
1259 TMTIMERSTATE enmState = pTimer->enmState;
1260 switch (enmState)
1261 {
1262 case TMTIMERSTATE_STOPPED:
1263 case TMTIMERSTATE_EXPIRED:
1264 case TMTIMERSTATE_PENDING_STOP:
1265 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1266 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1267 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1268 return false;
1269
1270 case TMTIMERSTATE_ACTIVE:
1271 case TMTIMERSTATE_PENDING_RESCHEDULE:
1272 case TMTIMERSTATE_PENDING_SCHEDULE:
1273 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1274 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1275 Log2(("TMTimerIsActive: returns true (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1276 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1277 return true;
1278
1279 /*
1280 * Invalid states.
1281 */
1282 case TMTIMERSTATE_DESTROY:
1283 case TMTIMERSTATE_FREE:
1284 AssertMsgFailed(("Invalid timer state %s (%s)\n", tmTimerState(enmState), R3STRING(pTimer->pszDesc)));
1285 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1286 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1287 return false;
1288 default:
1289 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1290 return false;
1291 }
1292}
1293
1294
1295/**
1296 * Convert state to string.
1297 *
1298 * @returns Readonly status name.
1299 * @param enmState State.
1300 */
1301const char *tmTimerState(TMTIMERSTATE enmState)
1302{
1303 switch (enmState)
1304 {
1305#define CASE(num, state) \
1306 case TMTIMERSTATE_##state: \
1307 AssertCompile(TMTIMERSTATE_##state == (num)); \
1308 return #num "-" #state
1309 CASE( 1,STOPPED);
1310 CASE( 2,ACTIVE);
1311 CASE( 3,EXPIRED);
1312 CASE( 4,PENDING_STOP);
1313 CASE( 5,PENDING_STOP_SCHEDULE);
1314 CASE( 6,PENDING_SCHEDULE_SET_EXPIRE);
1315 CASE( 7,PENDING_SCHEDULE);
1316 CASE( 8,PENDING_RESCHEDULE_SET_EXPIRE);
1317 CASE( 9,PENDING_RESCHEDULE);
1318 CASE(10,DESTROY);
1319 CASE(11,FREE);
1320 default:
1321 AssertMsgFailed(("Invalid state enmState=%d\n", enmState));
1322 return "Invalid state!";
1323#undef CASE
1324 }
1325}
1326
1327
1328/**
1329 * Schedules the given timer on the given queue.
1330 *
1331 * @param pQueue The timer queue.
1332 * @param pTimer The timer that needs scheduling.
1333 *
1334 * @remarks Called while owning the lock.
1335 */
1336DECLINLINE(void) tmTimerQueueScheduleOne(PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
1337{
1338 /*
1339 * Processing.
1340 */
1341 unsigned cRetries = 2;
1342 do
1343 {
1344 TMTIMERSTATE enmState = pTimer->enmState;
1345 switch (enmState)
1346 {
1347 /*
1348 * Reschedule timer (in the active list).
1349 */
1350 case TMTIMERSTATE_PENDING_RESCHEDULE:
1351 {
1352 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE, TMTIMERSTATE_PENDING_RESCHEDULE)))
1353 break; /* retry */
1354
1355 const PTMTIMER pPrev = TMTIMER_GET_PREV(pTimer);
1356 const PTMTIMER pNext = TMTIMER_GET_NEXT(pTimer);
1357 if (pPrev)
1358 TMTIMER_SET_NEXT(pPrev, pNext);
1359 else
1360 {
1361 TMTIMER_SET_HEAD(pQueue, pNext);
1362 pQueue->u64Expire = pNext ? pNext->u64Expire : INT64_MAX;
1363 }
1364 if (pNext)
1365 TMTIMER_SET_PREV(pNext, pPrev);
1366 pTimer->offNext = 0;
1367 pTimer->offPrev = 0;
1368 /* fall thru */
1369 }
1370
1371 /*
1372 * Schedule timer (insert into the active list).
1373 */
1374 case TMTIMERSTATE_PENDING_SCHEDULE:
1375 {
1376 Assert(!pTimer->offNext); Assert(!pTimer->offPrev);
1377 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, TMTIMERSTATE_PENDING_SCHEDULE)))
1378 break; /* retry */
1379
1380 PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue);
1381 if (pCur)
1382 {
1383 const uint64_t u64Expire = pTimer->u64Expire;
1384 for (;; pCur = TMTIMER_GET_NEXT(pCur))
1385 {
1386 if (pCur->u64Expire > u64Expire)
1387 {
1388 const PTMTIMER pPrev = TMTIMER_GET_PREV(pCur);
1389 TMTIMER_SET_NEXT(pTimer, pCur);
1390 TMTIMER_SET_PREV(pTimer, pPrev);
1391 if (pPrev)
1392 TMTIMER_SET_NEXT(pPrev, pTimer);
1393 else
1394 {
1395 TMTIMER_SET_HEAD(pQueue, pTimer);
1396 pQueue->u64Expire = u64Expire;
1397 }
1398 TMTIMER_SET_PREV(pCur, pTimer);
1399 return;
1400 }
1401 if (!pCur->offNext)
1402 {
1403 TMTIMER_SET_NEXT(pCur, pTimer);
1404 TMTIMER_SET_PREV(pTimer, pCur);
1405 return;
1406 }
1407 }
1408 }
1409 else
1410 {
1411 TMTIMER_SET_HEAD(pQueue, pTimer);
1412 pQueue->u64Expire = pTimer->u64Expire;
1413 }
1414 return;
1415 }
1416
1417 /*
1418 * Stop the timer in active list.
1419 */
1420 case TMTIMERSTATE_PENDING_STOP:
1421 {
1422 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, TMTIMERSTATE_PENDING_STOP)))
1423 break; /* retry */
1424
1425 const PTMTIMER pPrev = TMTIMER_GET_PREV(pTimer);
1426 const PTMTIMER pNext = TMTIMER_GET_NEXT(pTimer);
1427 if (pPrev)
1428 TMTIMER_SET_NEXT(pPrev, pNext);
1429 else
1430 {
1431 TMTIMER_SET_HEAD(pQueue, pNext);
1432 pQueue->u64Expire = pNext ? pNext->u64Expire : INT64_MAX;
1433 }
1434 if (pNext)
1435 TMTIMER_SET_PREV(pNext, pPrev);
1436 pTimer->offNext = 0;
1437 pTimer->offPrev = 0;
1438 /* fall thru */
1439 }
1440
1441 /*
1442 * Stop the timer (not on the active list).
1443 */
1444 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1445 Assert(!pTimer->offNext); Assert(!pTimer->offPrev);
1446 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_STOPPED, TMTIMERSTATE_PENDING_STOP_SCHEDULE)))
1447 break;
1448 return;
1449
1450 /*
1451 * The timer is pending destruction by TMR3TimerDestroy, our caller.
1452 * Nothing to do here.
1453 */
1454 case TMTIMERSTATE_DESTROY:
1455 break;
1456
1457 /*
1458 * Postpone these until they get into the right state.
1459 */
1460 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1461 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1462 tmTimerLink(pQueue, pTimer);
1463 STAM_COUNTER_INC(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatPostponed));
1464 return;
1465
1466 /*
1467 * None of these can be in the schedule.
1468 */
1469 case TMTIMERSTATE_FREE:
1470 case TMTIMERSTATE_STOPPED:
1471 case TMTIMERSTATE_ACTIVE:
1472 case TMTIMERSTATE_EXPIRED:
1473 default:
1474 AssertMsgFailed(("Timer (%p) in the scheduling list has an invalid state %s (%d)!",
1475 pTimer, tmTimerState(pTimer->enmState), pTimer->enmState));
1476 return;
1477 }
1478 } while (cRetries-- > 0);
1479}
1480
1481
1482/**
1483 * Schedules the specified timer queue.
1484 *
1485 * @param pVM The VM to run the timers for.
1486 * @param pQueue The queue to schedule.
1487 *
1488 * @remarks Called while owning the lock.
1489 */
1490void tmTimerQueueSchedule(PVM pVM, PTMTIMERQUEUE pQueue)
1491{
1492 TM_ASSERT_EMT_LOCK(pVM);
1493
1494 /*
1495 * Dequeue the scheduling list and iterate it.
1496 */
1497 int32_t offNext = ASMAtomicXchgS32(&pQueue->offSchedule, 0);
1498 Log2(("tmTimerQueueSchedule: pQueue=%p:{.enmClock=%d, offNext=%RI32}\n", pQueue, pQueue->enmClock, offNext));
1499 if (!offNext)
1500 return;
1501 PTMTIMER pNext = (PTMTIMER)((intptr_t)pQueue + offNext);
1502 while (pNext)
1503 {
1504 /*
1505 * Unlink the head timer and find the next one.
1506 */
1507 PTMTIMER pTimer = pNext;
1508 pNext = pNext->offScheduleNext ? (PTMTIMER)((intptr_t)pNext + pNext->offScheduleNext) : NULL;
1509 pTimer->offScheduleNext = 0;
1510
1511 /*
1512 * Do the scheduling.
1513 */
1514 Log2(("tmTimerQueueSchedule: %p:{.enmState=%s, .enmClock=%d, .enmType=%d, .pszDesc=%s}\n",
1515 pTimer, tmTimerState(pTimer->enmState), pTimer->enmClock, pTimer->enmType, R3STRING(pTimer->pszDesc)));
1516 tmTimerQueueScheduleOne(pQueue, pTimer);
1517 Log2(("tmTimerQueueSchedule: %p: new %s\n", pTimer, tmTimerState(pTimer->enmState)));
1518 } /* foreach timer in current schedule batch. */
1519}
1520
1521
1522#ifdef VBOX_STRICT
1523/**
1524 * Checks that the timer queues are sane.
1525 *
1526 * @param pVM VM handle.
1527 *
1528 * @remarks Called while owning the lock.
1529 */
1530void tmTimerQueuesSanityChecks(PVM pVM, const char *pszWhere)
1531{
1532 TM_ASSERT_EMT_LOCK(pVM);
1533
1534 /*
1535 * Check the linking of the active lists.
1536 */
1537 for (int i = 0; i < TMCLOCK_MAX; i++)
1538 {
1539 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[i];
1540 Assert((int)pQueue->enmClock == i);
1541 PTMTIMER pPrev = NULL;
1542 for (PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue); pCur; pPrev = pCur, pCur = TMTIMER_GET_NEXT(pCur))
1543 {
1544 AssertMsg((int)pCur->enmClock == i, ("%s: %d != %d\n", pszWhere, pCur->enmClock, i));
1545 AssertMsg(TMTIMER_GET_PREV(pCur) == pPrev, ("%s: %p != %p\n", pszWhere, TMTIMER_GET_PREV(pCur), pPrev));
1546 TMTIMERSTATE enmState = pCur->enmState;
1547 switch (enmState)
1548 {
1549 case TMTIMERSTATE_ACTIVE:
1550 AssertMsg( !pCur->offScheduleNext
1551 || pCur->enmState != TMTIMERSTATE_ACTIVE,
1552 ("%s: %RI32\n", pszWhere, pCur->offScheduleNext));
1553 break;
1554 case TMTIMERSTATE_PENDING_STOP:
1555 case TMTIMERSTATE_PENDING_RESCHEDULE:
1556 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1557 break;
1558 default:
1559 AssertMsgFailed(("%s: Invalid state enmState=%d %s\n", pszWhere, enmState, tmTimerState(enmState)));
1560 break;
1561 }
1562 }
1563 }
1564
1565
1566# ifdef IN_RING3
1567 /*
1568 * Do the big list and check that active timers all are in the active lists.
1569 */
1570 PTMTIMERR3 pPrev = NULL;
1571 for (PTMTIMERR3 pCur = pVM->tm.s.pCreated; pCur; pPrev = pCur, pCur = pCur->pBigNext)
1572 {
1573 Assert(pCur->pBigPrev == pPrev);
1574 Assert((unsigned)pCur->enmClock < (unsigned)TMCLOCK_MAX);
1575
1576 TMTIMERSTATE enmState = pCur->enmState;
1577 switch (enmState)
1578 {
1579 case TMTIMERSTATE_ACTIVE:
1580 case TMTIMERSTATE_PENDING_STOP:
1581 case TMTIMERSTATE_PENDING_RESCHEDULE:
1582 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1583 {
1584 PTMTIMERR3 pCurAct = TMTIMER_GET_HEAD(&pVM->tm.s.CTX_SUFF(paTimerQueues)[pCur->enmClock]);
1585 Assert(pCur->offPrev || pCur == pCurAct);
1586 while (pCurAct && pCurAct != pCur)
1587 pCurAct = TMTIMER_GET_NEXT(pCurAct);
1588 Assert(pCurAct == pCur);
1589 break;
1590 }
1591
1592 case TMTIMERSTATE_PENDING_SCHEDULE:
1593 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1594 case TMTIMERSTATE_STOPPED:
1595 case TMTIMERSTATE_EXPIRED:
1596 {
1597 Assert(!pCur->offNext);
1598 Assert(!pCur->offPrev);
1599 for (PTMTIMERR3 pCurAct = TMTIMER_GET_HEAD(&pVM->tm.s.CTX_SUFF(paTimerQueues)[pCur->enmClock]);
1600 pCurAct;
1601 pCurAct = TMTIMER_GET_NEXT(pCurAct))
1602 {
1603 Assert(pCurAct != pCur);
1604 Assert(TMTIMER_GET_NEXT(pCurAct) != pCur);
1605 Assert(TMTIMER_GET_PREV(pCurAct) != pCur);
1606 }
1607 break;
1608 }
1609
1610 /* ignore */
1611 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1612 break;
1613
1614 /* shouldn't get here! */
1615 case TMTIMERSTATE_DESTROY:
1616 default:
1617 AssertMsgFailed(("Invalid state enmState=%d %s\n", enmState, tmTimerState(enmState)));
1618 break;
1619 }
1620 }
1621# endif /* IN_RING3 */
1622}
1623#endif /* !VBOX_STRICT */
1624
1625
1626/**
1627 * Gets the current warp drive percent.
1628 *
1629 * @returns The warp drive percent.
1630 * @param pVM The VM handle.
1631 */
1632VMMDECL(uint32_t) TMGetWarpDrive(PVM pVM)
1633{
1634 return pVM->tm.s.u32VirtualWarpDrivePercentage;
1635}
1636
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette