VirtualBox

source: vbox/trunk/src/VBox/VMM/TM.cpp@ 2828

Last change on this file since 2828 was 2828, checked in by vboxsync, 18 years ago

If the TSC isn't fixed, spin instead of sleeping when reading out the the TSC frequency.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 77.9 KB
Line 
1/* $Id: TM.cpp 2828 2007-05-23 16:20:24Z vboxsync $ */
2/** @file
3 * TM - Timeout Manager.
4 */
5
6/*
7 * Copyright (C) 2006 InnoTek Systemberatung GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * If you received this file as part of a commercial VirtualBox
18 * distribution, then only the terms of your commercial VirtualBox
19 * license agreement apply instead of the previous paragraph.
20 */
21
22
23/** @page pg_tm TM - The Time Manager
24 *
25 * The Time Manager abstracts the CPU clocks and manages timers used by the VMM,
26 * device and drivers.
27 *
28 *
29 * @section sec_tm_clocks Clocks
30 *
31 * There are currently 4 clocks:
32 * - Virtual (guest).
33 * - Synchronous virtual (guest).
34 * - CPU Tick (TSC) (guest). Only current use is rdtsc emulation. Usually a
35 * function of the virtual clock.
36 * - Real (host). The only current use is display updates for not real
37 * good reason...
38 *
39 * The interesting clocks are two first ones, the virtual and synchronous virtual
40 * clock. The synchronous virtual clock is tied to the virtual clock except that
41 * it will take into account timer delivery lag caused by host scheduling. It will
42 * normally never advance beyond the header timer, and when lagging too far behind
43 * it will gradually speed up to catch up with the virtual clock.
44 *
45 * The CPU tick (TSC) is normally virtualized as a function of the virtual time,
46 * where the frequency defaults to the host cpu frequency (as we measure it). It
47 * can also use the host TSC as source and either present it with an offset or
48 * unmodified. It is of course possible to configure the TSC frequency and mode
49 * of operation.
50 *
51 * @subsection subsec_tm_timesync Guest Time Sync / UTC time
52 *
53 * Guest time syncing is primarily taken care of by the VMM device. The principle
54 * is very simple, the guest additions periodically asks the VMM device what the
55 * current UTC time is and makes adjustments accordingly. Now, because the
56 * synchronous virtual clock might be doing catchups and we would therefore
57 * deliver more than the normal rate for a little while, some adjusting of the
58 * UTC time is required before passing it on to the guest. This is why TM provides
59 * an API for query the current UTC time.
60 *
61 *
62 * @section sec_tm_timers Timers
63 *
64 * The timers can use any of the TM clocks described in the previous section. Each
65 * clock has its own scheduling facility, or timer queue if you like. There are
66 * a few factors which makes it a bit complex. First there is the usual R0 vs R3
67 * vs. GC thing. Then there is multiple threads, and then there is the timer thread
68 * that periodically checks whether any timers has expired without EMT noticing. On
69 * the API level, all but the create and save APIs must be mulithreaded. EMT will
70 * always run the timers.
71 *
72 * The design is using a doubly linked list of active timers which is ordered
73 * by expire date. This list is only modified by the EMT thread. Updates to the
74 * list are are batched in a singly linked list, which is then process by the EMT
75 * thread at the first opportunity (immediately, next time EMT modifies a timer
76 * on that clock, or next timer timeout). Both lists are offset based and all
77 * the elements therefore allocated from the hyper heap.
78 *
79 * For figuring out when there is need to schedule and run timers TM will:
80 * - Poll whenever somebody queries the virtual clock.
81 * - Poll the virtual clocks from the EM and REM loops.
82 * - Poll the virtual clocks from trap exit path.
83 * - Poll the virtual clocks and calculate first timeout from the halt loop.
84 * - Employ a thread which periodically (100Hz) polls all the timer queues.
85 *
86 *
87 * @section sec_tm_timer Logging
88 *
89 * Level 2: Logs a most of the timer state transitions and queue servicing.
90 * Level 3: Logs a few oddments.
91 * Level 4: Logs TMCLOCK_VIRTUAL_SYNC catch-up events.
92 *
93 */
94
95
96
97
98/*******************************************************************************
99* Header Files *
100*******************************************************************************/
101#define LOG_GROUP LOG_GROUP_TM
102#include <VBox/tm.h>
103#include <VBox/vmm.h>
104#include <VBox/mm.h>
105#include <VBox/ssm.h>
106#include <VBox/dbgf.h>
107#include <VBox/rem.h>
108#include "TMInternal.h"
109#include <VBox/vm.h>
110
111#include <VBox/param.h>
112#include <VBox/err.h>
113
114#include <VBox/log.h>
115#include <iprt/asm.h>
116#include <iprt/assert.h>
117#include <iprt/thread.h>
118#include <iprt/time.h>
119#include <iprt/timer.h>
120#include <iprt/semaphore.h>
121#include <iprt/string.h>
122#include <iprt/env.h>
123
124
125/*******************************************************************************
126* Defined Constants And Macros *
127*******************************************************************************/
128/** The current saved state version.*/
129#define TM_SAVED_STATE_VERSION 3
130
131
132/*******************************************************************************
133* Internal Functions *
134*******************************************************************************/
135static bool tmR3HasFixedTSC(void);
136static uint64_t tmR3CalibrateTSC(void);
137static DECLCALLBACK(int) tmR3Save(PVM pVM, PSSMHANDLE pSSM);
138static DECLCALLBACK(int) tmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version);
139static DECLCALLBACK(void) tmR3TimerCallback(PRTTIMER pTimer, void *pvUser);
140static void tmR3TimerQueueRun(PVM pVM, PTMTIMERQUEUE pQueue);
141static void tmR3TimerQueueRunVirtualSync(PVM pVM);
142static DECLCALLBACK(void) tmR3TimerInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
143static DECLCALLBACK(void) tmR3TimerInfoActive(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
144static DECLCALLBACK(void) tmR3InfoClocks(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
145
146
147/**
148 * Internal function for getting the clock time.
149 *
150 * @returns clock time.
151 * @param pVM The VM handle.
152 * @param enmClock The clock.
153 */
154DECLINLINE(uint64_t) tmClock(PVM pVM, TMCLOCK enmClock)
155{
156 switch (enmClock)
157 {
158 case TMCLOCK_VIRTUAL: return TMVirtualGet(pVM);
159 case TMCLOCK_VIRTUAL_SYNC: return TMVirtualSyncGet(pVM);
160 case TMCLOCK_REAL: return TMRealGet(pVM);
161 case TMCLOCK_TSC: return TMCpuTickGet(pVM);
162 default:
163 AssertMsgFailed(("enmClock=%d\n", enmClock));
164 return ~(uint64_t)0;
165 }
166}
167
168
169/**
170 * Initializes the TM.
171 *
172 * @returns VBox status code.
173 * @param pVM The VM to operate on.
174 */
175TMR3DECL(int) TMR3Init(PVM pVM)
176{
177 LogFlow(("TMR3Init:\n"));
178
179 /*
180 * Assert alignment and sizes.
181 */
182 AssertRelease(!(RT_OFFSETOF(VM, tm.s) & 31));
183 AssertRelease(sizeof(pVM->tm.s) <= sizeof(pVM->tm.padding));
184
185 /*
186 * Init the structure.
187 */
188 void *pv;
189 int rc = MMHyperAlloc(pVM, sizeof(pVM->tm.s.paTimerQueuesR3[0]) * TMCLOCK_MAX, 0, MM_TAG_TM, &pv);
190 AssertRCReturn(rc, rc);
191 pVM->tm.s.paTimerQueuesR3 = (PTMTIMERQUEUE)pv;
192
193 pVM->tm.s.offVM = RT_OFFSETOF(VM, tm.s);
194 pVM->tm.s.paTimerQueuesR3[TMCLOCK_VIRTUAL].enmClock = TMCLOCK_VIRTUAL;
195 pVM->tm.s.paTimerQueuesR3[TMCLOCK_VIRTUAL].u64Expire = INT64_MAX;
196 pVM->tm.s.paTimerQueuesR3[TMCLOCK_VIRTUAL_SYNC].enmClock = TMCLOCK_VIRTUAL_SYNC;
197 pVM->tm.s.paTimerQueuesR3[TMCLOCK_VIRTUAL_SYNC].u64Expire = INT64_MAX;
198 pVM->tm.s.paTimerQueuesR3[TMCLOCK_REAL].enmClock = TMCLOCK_REAL;
199 pVM->tm.s.paTimerQueuesR3[TMCLOCK_REAL].u64Expire = INT64_MAX;
200 pVM->tm.s.paTimerQueuesR3[TMCLOCK_TSC].enmClock = TMCLOCK_TSC;
201 pVM->tm.s.paTimerQueuesR3[TMCLOCK_TSC].u64Expire = INT64_MAX;
202
203 /*
204 * We indirectly - thru RTTimeNanoTS and RTTimeMilliTS - use the global
205 * info page (GIP) for both the virtual and the real clock. By mapping
206 * the GIP into guest context we can get just as accurate time even there.
207 * All that's required is that the g_pSUPGlobalInfoPage symbol is available
208 * to the GC Runtime.
209 */
210 pVM->tm.s.pvGIPR3 = (void *)g_pSUPGlobalInfoPage;
211 AssertMsgReturn(pVM->tm.s.pvGIPR3, ("GIP support is now required!\n"), VERR_INTERNAL_ERROR);
212 RTHCPHYS HCPhysGIP;
213 rc = SUPGipGetPhys(&HCPhysGIP);
214 AssertMsgRCReturn(rc, ("Failed to get GIP physical address!\n"), rc);
215
216 rc = MMR3HyperMapHCPhys(pVM, pVM->tm.s.pvGIPR3, HCPhysGIP, PAGE_SIZE, "GIP", &pVM->tm.s.pvGIPGC);
217 if (VBOX_FAILURE(rc))
218 {
219 AssertMsgFailed(("Failed to map GIP into GC, rc=%Vrc!\n", rc));
220 return rc;
221 }
222 LogFlow(("TMR3Init: HCPhysGIP=%RHp at %VGv\n", HCPhysGIP, pVM->tm.s.pvGIPGC));
223 MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
224
225 /*
226 * Get our CFGM node, create it if necessary.
227 */
228 PCFGMNODE pCfgHandle = CFGMR3GetChild(CFGMR3GetRoot(pVM), "TM");
229 if (!pCfgHandle)
230 {
231 rc = CFGMR3InsertNode(CFGMR3GetRoot(pVM), "TM", &pCfgHandle);
232 AssertRCReturn(rc, rc);
233 }
234
235 /*
236 * Determin the TSC configuration and frequency.
237 */
238 /* mode */
239 rc = CFGMR3QueryBool(pCfgHandle, "TSCVirtualized", &pVM->tm.s.fTSCVirtualized);
240 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
241 pVM->tm.s.fTSCVirtualized = true; /* trap rdtsc */
242 else if (VBOX_FAILURE(rc))
243 return VMSetError(pVM, rc, RT_SRC_POS,
244 N_("Configuration error: Failed to querying bool value \"UseRealTSC\". (%Vrc)"), rc);
245
246 /* source */
247 rc = CFGMR3QueryBool(pCfgHandle, "UseRealTSC", &pVM->tm.s.fTSCTicking);
248 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
249 pVM->tm.s.fTSCUseRealTSC = false; /* use virtual time */
250 else if (VBOX_FAILURE(rc))
251 return VMSetError(pVM, rc, RT_SRC_POS,
252 N_("Configuration error: Failed to querying bool value \"UseRealTSC\". (%Vrc)"), rc);
253 if (!pVM->tm.s.fTSCUseRealTSC)
254 pVM->tm.s.fTSCVirtualized = true;
255
256 /* TSC reliability */
257 rc = CFGMR3QueryBool(pCfgHandle, "MaybeUseOffsettedHostTSC", &pVM->tm.s.fMaybeUseOffsettedHostTSC);
258 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
259 {
260 if (!pVM->tm.s.fTSCUseRealTSC)
261 pVM->tm.s.fMaybeUseOffsettedHostTSC = tmR3HasFixedTSC();
262 else
263 pVM->tm.s.fMaybeUseOffsettedHostTSC = true;
264 }
265
266 /* frequency */
267 rc = CFGMR3QueryU64(pCfgHandle, "TSCTicksPerSecond", &pVM->tm.s.cTSCTicksPerSecond);
268 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
269 {
270 pVM->tm.s.cTSCTicksPerSecond = tmR3CalibrateTSC();
271 if ( !pVM->tm.s.fTSCUseRealTSC
272 && pVM->tm.s.cTSCTicksPerSecond >= _4G)
273 {
274 pVM->tm.s.cTSCTicksPerSecond = _4G - 1; /* (A limitation of our math code) */
275 pVM->tm.s.fMaybeUseOffsettedHostTSC = false;
276 }
277 }
278 else if (VBOX_FAILURE(rc))
279 return VMSetError(pVM, rc, RT_SRC_POS,
280 N_("Configuration error: Failed to querying uint64_t value \"TSCTicksPerSecond\". (%Vrc)"), rc);
281 else if ( pVM->tm.s.cTSCTicksPerSecond < _1M
282 || pVM->tm.s.cTSCTicksPerSecond >= _4G)
283 return VMSetError(pVM, VERR_INVALID_PARAMETER, RT_SRC_POS,
284 N_("Configuration error: \"TSCTicksPerSecond\" = %RI64 is not in the range 1MHz..4GHz-1!"),
285 pVM->tm.s.cTSCTicksPerSecond);
286 else
287 {
288 pVM->tm.s.fTSCUseRealTSC = pVM->tm.s.fMaybeUseOffsettedHostTSC = false;
289 pVM->tm.s.fTSCVirtualized = true;
290 }
291
292 /* setup and report */
293 if (pVM->tm.s.fTSCVirtualized)
294 CPUMR3SetCR4Feature(pVM, X86_CR4_TSD, ~X86_CR4_TSD);
295 else
296 CPUMR3SetCR4Feature(pVM, 0, ~X86_CR4_TSD);
297 LogRel(("TM: cTSCTicksPerSecond=%#RX64 (%RU64) fTSCVirtualized=%RTbool fTSCUseRealTSC=%RTbool fMaybeUseOffsettedHostTSC=%RTbool\n",
298 pVM->tm.s.cTSCTicksPerSecond, pVM->tm.s.cTSCTicksPerSecond, pVM->tm.s.fTSCVirtualized,
299 pVM->tm.s.fTSCUseRealTSC, pVM->tm.s.fMaybeUseOffsettedHostTSC));
300
301 /*
302 * Configure the timer synchronous virtual time.
303 */
304 rc = CFGMR3QueryU32(pCfgHandle, "ScheduleSlack", &pVM->tm.s.u32VirtualSyncScheduleSlack);
305 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
306 pVM->tm.s.u32VirtualSyncScheduleSlack = 100000; /* 0.100ms (ASSUMES virtual time is nanoseconds) */
307 else if (VBOX_FAILURE(rc))
308 return VMSetError(pVM, rc, RT_SRC_POS,
309 N_("Configuration error: Failed to querying 32-bit integer value \"ScheduleSlack\". (%Vrc)"), rc);
310
311 rc = CFGMR3QueryU64(pCfgHandle, "CatchUpStopThreshold", &pVM->tm.s.u64VirtualSyncCatchUpStopThreshold);
312 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
313 pVM->tm.s.u64VirtualSyncCatchUpStopThreshold = 500000; /* 0.5ms */
314 else if (VBOX_FAILURE(rc))
315 return VMSetError(pVM, rc, RT_SRC_POS,
316 N_("Configuration error: Failed to querying 64-bit integer value \"CatchUpStopThreshold\". (%Vrc)"), rc);
317
318 rc = CFGMR3QueryU64(pCfgHandle, "CatchUpGiveUpThreshold", &pVM->tm.s.u64VirtualSyncCatchUpGiveUpThreshold);
319 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
320 pVM->tm.s.u64VirtualSyncCatchUpGiveUpThreshold = UINT64_C(60000000000); /* 60 sec */
321 else if (VBOX_FAILURE(rc))
322 return VMSetError(pVM, rc, RT_SRC_POS,
323 N_("Configuration error: Failed to querying 64-bit integer value \"CatchUpGiveUpThreshold\". (%Vrc)"), rc);
324
325
326#define TM_CFG_PERIOD(iPeriod, DefStart, DefPct) \
327 do \
328 { \
329 uint64_t u64; \
330 rc = CFGMR3QueryU64(pCfgHandle, "CatchUpStartThreshold" #iPeriod, &u64); \
331 if (rc == VERR_CFGM_VALUE_NOT_FOUND) \
332 u64 = UINT64_C(DefStart); \
333 else if (VBOX_FAILURE(rc)) \
334 return VMSetError(pVM, rc, RT_SRC_POS, N_("Configuration error: Failed to querying 64-bit integer value \"CatchUpThreshold" #iPeriod "\". (%Vrc)"), rc); \
335 if ( (iPeriod > 0 && u64 <= pVM->tm.s.aVirtualSyncCatchUpPeriods[iPeriod - 1].u64Start) \
336 || u64 >= pVM->tm.s.u64VirtualSyncCatchUpGiveUpThreshold) \
337 return VMSetError(pVM, VERR_INVALID_PARAMETER, RT_SRC_POS, N_("Configuration error: Invalid start of period #" #iPeriod ": %RU64\n"), u64); \
338 pVM->tm.s.aVirtualSyncCatchUpPeriods[iPeriod].u64Start = u64; \
339 rc = CFGMR3QueryU32(pCfgHandle, "CatchUpPrecentage" #iPeriod, &pVM->tm.s.aVirtualSyncCatchUpPeriods[iPeriod].u32Percentage); \
340 if (rc == VERR_CFGM_VALUE_NOT_FOUND) \
341 pVM->tm.s.aVirtualSyncCatchUpPeriods[iPeriod].u32Percentage = (DefPct); \
342 else if (VBOX_FAILURE(rc)) \
343 return VMSetError(pVM, rc, RT_SRC_POS, N_("Configuration error: Failed to querying 32-bit integer value \"CatchUpPrecentage" #iPeriod "\". (%Vrc)"), rc); \
344 } while (0)
345 /* This needs more tuning. Not sure if we really need so many period and be so gentle. */
346 TM_CFG_PERIOD(0, 750000, 5); /* 0.75ms at 1.05x */
347 TM_CFG_PERIOD(1, 1500000, 10); /* 1.50ms at 1.10x */
348 TM_CFG_PERIOD(2, 8000000, 25); /* 8ms at 1.25x */
349 TM_CFG_PERIOD(3, 30000000, 50); /* 30ms at 1.50x */
350 TM_CFG_PERIOD(4, 100000000, 75); /* 100ms at 1.75x */
351 TM_CFG_PERIOD(5, 175000000, 100); /* 175ms at 2x */
352 TM_CFG_PERIOD(6, 500000000, 200); /* 500ms at 3x */
353 TM_CFG_PERIOD(7, 3000000000, 300); /* 3s at 4x */
354 TM_CFG_PERIOD(8,30000000000, 400); /* 30s at 5x */
355 TM_CFG_PERIOD(9,55000000000, 500); /* 55s at 6x */
356 AssertCompile(RT_ELEMENTS(pVM->tm.s.aVirtualSyncCatchUpPeriods) == 10);
357#undef TM_CFG_PERIOD
358
359 /*
360 * Configure real world time (UTC).
361 */
362 rc = CFGMR3QueryS64(pCfgHandle, "UTCOffset", &pVM->tm.s.offUTC);
363 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
364 pVM->tm.s.offUTC = 0; /* ns */
365 else if (VBOX_FAILURE(rc))
366 return VMSetError(pVM, rc, RT_SRC_POS,
367 N_("Configuration error: Failed to querying 64-bit integer value \"UTCOffset\". (%Vrc)"), rc);
368
369 /*
370 * Setup the warp drive.
371 */
372 rc = CFGMR3QueryU32(pCfgHandle, "WarpDrivePercentage", &pVM->tm.s.u32VirtualWarpDrivePercentage);
373 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
374 rc = CFGMR3QueryU32(CFGMR3GetRoot(pVM), "WarpDrivePercentage", &pVM->tm.s.u32VirtualWarpDrivePercentage); /* legacy */
375 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
376 pVM->tm.s.u32VirtualWarpDrivePercentage = 100;
377 else if (VBOX_FAILURE(rc))
378 return VMSetError(pVM, rc, RT_SRC_POS,
379 N_("Configuration error: Failed to querying uint32_t value \"WarpDrivePercent\". (%Vrc)"), rc);
380 else if ( pVM->tm.s.u32VirtualWarpDrivePercentage < 2
381 || pVM->tm.s.u32VirtualWarpDrivePercentage > 20000)
382 return VMSetError(pVM, VERR_INVALID_PARAMETER, RT_SRC_POS,
383 N_("Configuration error: \"WarpDrivePercent\" = %RI32 is not in the range 2..20000!"),
384 pVM->tm.s.u32VirtualWarpDrivePercentage);
385 pVM->tm.s.fVirtualWarpDrive = pVM->tm.s.u32VirtualWarpDrivePercentage != 100;
386 if (pVM->tm.s.fVirtualWarpDrive)
387 LogRel(("TM: u32VirtualWarpDrivePercentage=%RI32\n", pVM->tm.s.u32VirtualWarpDrivePercentage));
388
389 /*
390 * Start the timer (guard against REM not yielding).
391 */
392 uint32_t u32Millies;
393 rc = CFGMR3QueryU32(pCfgHandle, "TimerMillies", &u32Millies);
394 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
395 u32Millies = 10;
396 else if (VBOX_FAILURE(rc))
397 return VMSetError(pVM, rc, RT_SRC_POS,
398 N_("Configuration error: Failed to query uint32_t value \"TimerMillies\", rc=%Vrc.\n"), rc);
399 rc = RTTimerCreate(&pVM->tm.s.pTimer, u32Millies, tmR3TimerCallback, pVM);
400 if (VBOX_FAILURE(rc))
401 {
402 AssertMsgFailed(("Failed to create timer, u32Millies=%d rc=%Vrc.\n", u32Millies, rc));
403 return rc;
404 }
405 Log(("TM: Created timer %p firing every %d millieseconds\n", pVM->tm.s.pTimer, u32Millies));
406 pVM->tm.s.u32TimerMillies = u32Millies;
407
408 /*
409 * Register saved state.
410 */
411 rc = SSMR3RegisterInternal(pVM, "tm", 1, TM_SAVED_STATE_VERSION, sizeof(uint64_t) * 8,
412 NULL, tmR3Save, NULL,
413 NULL, tmR3Load, NULL);
414 if (VBOX_FAILURE(rc))
415 return rc;
416
417#ifdef VBOX_WITH_STATISTICS
418 /*
419 * Register statistics.
420 */
421 STAM_REG(pVM, &pVM->tm.s.StatDoQueues, STAMTYPE_PROFILE, "/TM/DoQueues", STAMUNIT_TICKS_PER_CALL, "Profiling timer TMR3TimerQueuesDo.");
422 STAM_REG(pVM, &pVM->tm.s.StatDoQueuesSchedule, STAMTYPE_PROFILE_ADV, "/TM/DoQueues/Schedule",STAMUNIT_TICKS_PER_CALL, "The scheduling part.");
423 STAM_REG(pVM, &pVM->tm.s.StatDoQueuesRun, STAMTYPE_PROFILE_ADV, "/TM/DoQueues/Run", STAMUNIT_TICKS_PER_CALL, "The run part.");
424
425 STAM_REG(pVM, &pVM->tm.s.StatPollAlreadySet, STAMTYPE_COUNTER, "/TM/PollAlreadySet", STAMUNIT_OCCURENCES, "TMTimerPoll calls where the FF was already set.");
426 STAM_REG(pVM, &pVM->tm.s.StatPollVirtual, STAMTYPE_COUNTER, "/TM/PollHitsVirtual", STAMUNIT_OCCURENCES, "The number of times TMTimerPoll found an expired TMCLOCK_VIRTUAL queue.");
427 STAM_REG(pVM, &pVM->tm.s.StatPollVirtualSync, STAMTYPE_COUNTER, "/TM/PollHitsVirtualSync",STAMUNIT_OCCURENCES, "The number of times TMTimerPoll found an expired TMCLOCK_VIRTUAL_SYNC queue.");
428 STAM_REG(pVM, &pVM->tm.s.StatPollMiss, STAMTYPE_COUNTER, "/TM/PollMiss", STAMUNIT_OCCURENCES, "TMTimerPoll calls where nothing had expired.");
429
430 STAM_REG(pVM, &pVM->tm.s.StatPostponedR3, STAMTYPE_COUNTER, "/TM/PostponedR3", STAMUNIT_OCCURENCES, "Postponed due to unschedulable state, in ring-3.");
431 STAM_REG(pVM, &pVM->tm.s.StatPostponedR0, STAMTYPE_COUNTER, "/TM/PostponedR0", STAMUNIT_OCCURENCES, "Postponed due to unschedulable state, in ring-0.");
432 STAM_REG(pVM, &pVM->tm.s.StatPostponedGC, STAMTYPE_COUNTER, "/TM/PostponedGC", STAMUNIT_OCCURENCES, "Postponed due to unschedulable state, in GC.");
433
434 STAM_REG(pVM, &pVM->tm.s.StatScheduleOneGC, STAMTYPE_PROFILE, "/TM/ScheduleOneGC", STAMUNIT_TICKS_PER_CALL, "Profiling the scheduling of one queue during a TMTimer* call in EMT.\n");
435 STAM_REG(pVM, &pVM->tm.s.StatScheduleOneR0, STAMTYPE_PROFILE, "/TM/ScheduleOneR0", STAMUNIT_TICKS_PER_CALL, "Profiling the scheduling of one queue during a TMTimer* call in EMT.\n");
436 STAM_REG(pVM, &pVM->tm.s.StatScheduleOneR3, STAMTYPE_PROFILE, "/TM/ScheduleOneR3", STAMUNIT_TICKS_PER_CALL, "Profiling the scheduling of one queue during a TMTimer* call in EMT.\n");
437 STAM_REG(pVM, &pVM->tm.s.StatScheduleSetFF, STAMTYPE_COUNTER, "/TM/ScheduleSetFF", STAMUNIT_OCCURENCES, "The number of times the timer FF was set instead of doing scheduling.");
438
439 STAM_REG(pVM, &pVM->tm.s.StatTimerSetGC, STAMTYPE_PROFILE, "/TM/TimerSetGC", STAMUNIT_TICKS_PER_CALL, "Profiling TMTimerSet calls made in GC.");
440 STAM_REG(pVM, &pVM->tm.s.StatTimerSetR0, STAMTYPE_PROFILE, "/TM/TimerSetR0", STAMUNIT_TICKS_PER_CALL, "Profiling TMTimerSet calls made in ring-0.");
441 STAM_REG(pVM, &pVM->tm.s.StatTimerSetR3, STAMTYPE_PROFILE, "/TM/TimerSetR3", STAMUNIT_TICKS_PER_CALL, "Profiling TMTimerSet calls made in ring-3.");
442
443 STAM_REG(pVM, &pVM->tm.s.StatTimerStopGC, STAMTYPE_PROFILE, "/TM/TimerStopGC", STAMUNIT_TICKS_PER_CALL, "Profiling TMTimerStop calls made in GC.");
444 STAM_REG(pVM, &pVM->tm.s.StatTimerStopR0, STAMTYPE_PROFILE, "/TM/TimerStopR0", STAMUNIT_TICKS_PER_CALL, "Profiling TMTimerStop calls made in ring-0.");
445 STAM_REG(pVM, &pVM->tm.s.StatTimerStopR3, STAMTYPE_PROFILE, "/TM/TimerStopR3", STAMUNIT_TICKS_PER_CALL, "Profiling TMTimerStop calls made in ring-3.");
446
447 STAM_REG(pVM, &pVM->tm.s.StatVirtualGet, STAMTYPE_COUNTER, "/TM/VirtualGet", STAMUNIT_OCCURENCES, "The number of times TMTimerGet was called when the clock was running.");
448 STAM_REG(pVM, &pVM->tm.s.StatVirtualGetSetFF, STAMTYPE_COUNTER, "/TM/VirtualGetSetFF", STAMUNIT_OCCURENCES, "Times we set the FF when calling TMTimerGet.");
449 STAM_REG(pVM, &pVM->tm.s.StatVirtualGetSync, STAMTYPE_COUNTER, "/TM/VirtualGetSync", STAMUNIT_OCCURENCES, "The number of times TMTimerGetSync was called when the clock was running.");
450 STAM_REG(pVM, &pVM->tm.s.StatVirtualGetSyncSetFF,STAMTYPE_COUNTER, "/TM/VirtualGetSyncSetFF",STAMUNIT_OCCURENCES, "Times we set the FF when calling TMTimerGetSync.");
451 STAM_REG(pVM, &pVM->tm.s.StatVirtualPause, STAMTYPE_COUNTER, "/TM/VirtualPause", STAMUNIT_OCCURENCES, "The number of times TMR3TimerPause was called.");
452 STAM_REG(pVM, &pVM->tm.s.StatVirtualResume, STAMTYPE_COUNTER, "/TM/VirtualResume", STAMUNIT_OCCURENCES, "The number of times TMR3TimerResume was called.");
453
454 STAM_REG(pVM, &pVM->tm.s.StatTimerCallbackSetFF,STAMTYPE_COUNTER, "/TM/CallbackSetFF", STAMUNIT_OCCURENCES, "The number of times the timer callback set FF.");
455
456
457 STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncCatchup, STAMTYPE_PROFILE_ADV, "/TM/VirtualSync/CatchUp", STAMUNIT_TICKS_PER_OCCURENCE, "Counting and measuring the times spent catching up.");
458 STAM_REG(pVM, (void *)&pVM->tm.s.fVirtualSyncCatchUp, STAMTYPE_U8, "/TM/VirtualSync/CatchUpActive", STAMUNIT_NONE, "Catch-Up active indicator.");
459 STAM_REG(pVM, (void *)&pVM->tm.s.u32VirtualSyncCatchUpPercentage, STAMTYPE_U32, "/TM/VirtualSync/CatchUpPercentage", STAMUNIT_PCT, "The catch-up percentage. (+100/100 to get clock multiplier)");
460 STAM_REG(pVM, (void *)&pVM->tm.s.offVirtualSync, STAMTYPE_U64, "/TM/VirtualSync/CurrentOffset", STAMUNIT_NS, "The current offset. (subtract GivenUp to get the lag)");
461 STAM_REG(pVM, (void *)&pVM->tm.s.offVirtualSyncGivenUp, STAMTYPE_U64, "/TM/VirtualSync/GivenUp", STAMUNIT_NS, "Nanoseconds of the 'CurrentOffset' that's been given up and won't ever be attemted caught up with.");
462 STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncGiveUp, STAMTYPE_COUNTER, "/TM/VirtualSync/GiveUp", STAMUNIT_OCCURENCES, "Times the catch-up was abandoned.");
463 STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncGiveUpBeforeStarting,STAMTYPE_COUNTER, "/TM/VirtualSync/GiveUpBeforeStarting", STAMUNIT_OCCURENCES, "Times the catch-up was abandoned before even starting. (Typically debugging++.)");
464 STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncRun, STAMTYPE_COUNTER, "/TM/VirtualSync/Run", STAMUNIT_OCCURENCES, "Times the virtual sync timer queue was considered.");
465 STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncRunRestart, STAMTYPE_COUNTER, "/TM/VirtualSync/Run/Restarts", STAMUNIT_OCCURENCES, "Times the clock was restarted after a run.");
466 STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncRunStop, STAMTYPE_COUNTER, "/TM/VirtualSync/Run/Stop", STAMUNIT_OCCURENCES, "Times the clock was stopped when calculating the current time before examining the timers.");
467 STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncRunStoppedAlready, STAMTYPE_COUNTER, "/TM/VirtualSync/Run/StoppedAlready", STAMUNIT_OCCURENCES, "Times the clock was already stopped elsewhere (TMVirtualSyncGet).");
468 STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncRunSlack, STAMTYPE_PROFILE, "/TM/VirtualSync/Run/Slack", STAMUNIT_NS_PER_OCCURENCE, "The scheduling slack. (Catch-up handed out when running timers.)");
469 for (unsigned i = 0; i < RT_ELEMENTS(pVM->tm.s.aVirtualSyncCatchUpPeriods); i++)
470 {
471 STAMR3RegisterF(pVM, &pVM->tm.s.aVirtualSyncCatchUpPeriods[i].u32Percentage, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, "The catch-up percentage.", "/TM/VirtualSync/Periods/%u", i);
472 STAMR3RegisterF(pVM, &pVM->tm.s.aStatVirtualSyncCatchupAdjust[i], STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Times adjusted to this period.", "/TM/VirtualSync/Periods/%u/Adjust", i);
473 STAMR3RegisterF(pVM, &pVM->tm.s.aStatVirtualSyncCatchupInitial[i], STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Times started in this period.", "/TM/VirtualSync/Periods/%u/Initial", i);
474 STAMR3RegisterF(pVM, &pVM->tm.s.aVirtualSyncCatchUpPeriods[i].u64Start, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "Start of this period (lag).", "/TM/VirtualSync/Periods/%u/Start", i);
475 }
476
477#endif /* VBOX_WITH_STATISTICS */
478
479 /*
480 * Register info handlers.
481 */
482 DBGFR3InfoRegisterInternalEx(pVM, "timers", "Dumps all timers. No arguments.", tmR3TimerInfo, DBGFINFO_FLAGS_RUN_ON_EMT);
483 DBGFR3InfoRegisterInternalEx(pVM, "activetimers", "Dumps active all timers. No arguments.", tmR3TimerInfoActive, DBGFINFO_FLAGS_RUN_ON_EMT);
484 DBGFR3InfoRegisterInternalEx(pVM, "clocks", "Display the time of the various clocks.", tmR3InfoClocks, DBGFINFO_FLAGS_RUN_ON_EMT);
485
486 return VINF_SUCCESS;
487}
488
489
490/**
491 * Checks if the host CPU has a fixed TSC frequency.
492 *
493 * @returns true if it has, false if it hasn't.
494 *
495 * @remark This test doesn't bother with very old CPUs that doesn't do power
496 * management or any other stuff that might influence the TSC rate.
497 * This isn't currently relevant.
498 */
499static bool tmR3HasFixedTSC(void)
500{
501 if (ASMHasCpuId())
502 {
503 uint32_t uEAX, uEBX, uECX, uEDX;
504 ASMCpuId(0, &uEAX, &uEBX, &uECX, &uEDX);
505 if ( uEAX >= 1
506 && uEBX == 0x68747541
507 && uECX == 0x444d4163
508 && uEDX == 0x69746e65)
509 {
510 /*
511 * AuthenticAMD - Check for APM support and that TscInvariant is set.
512 *
513 * This test isn't correct with respect to fixed/non-fixed TSC and
514 * older models, but this isn't relevant since the result is currently
515 * only used for making a descision on AMD-V models.
516 */
517 ASMCpuId(0x80000000, &uEAX, &uEBX, &uECX, &uEDX);
518 if (uEAX >= 0x80000007)
519 {
520 ASMCpuId(0x80000007, &uEAX, &uEBX, &uECX, &uEDX);
521 if (uEDX & BIT(8) /* TscInvariant */)
522 return true;
523 }
524 }
525 else if ( uEAX >= 1
526 && uEBX == 0x756e6547
527 && uECX == 0x6c65746e
528 && uEDX == 0x49656e69)
529 {
530 /*
531 * GenuineIntel - Check the model number.
532 *
533 * This test is lacking in the same way and for the same reasons
534 * as the AMD test above.
535 */
536 ASMCpuId(1, &uEAX, &uEBX, &uECX, &uEDX);
537 unsigned uModel = (uEAX >> 4) & 0x0f;
538 unsigned uFamily = (uEAX >> 8) & 0x0f;
539 if (uFamily == 0x0f)
540 uFamily += (uEAX >> 20) & 0xff;
541 if (uFamily >= 0x06)
542 uModel += ((uEAX >> 16) & 0x0f) << 4;
543 if ( (uFamily == 0x0f /*P4*/ && uModel >= 0x03)
544 || (uFamily == 0x06 /*P2/P3*/ && uModel >= 0x0e))
545 return true;
546 }
547 }
548 return false;
549}
550
551
552/**
553 * Calibrate the CPU tick.
554 *
555 * @returns Number of ticks per second.
556 */
557static uint64_t tmR3CalibrateTSC(void)
558{
559 /*
560 * Use GIP when available present.
561 */
562 uint64_t u64Hz;
563 PCSUPGLOBALINFOPAGE pGip = g_pSUPGlobalInfoPage;
564 if ( pGip
565 && pGip->u32Magic == SUPGLOBALINFOPAGE_MAGIC)
566 {
567 unsigned iCpu = pGip->u32Mode != SUPGIPMODE_ASYNC_TSC ? 0 : ASMGetApicId();
568 if (iCpu >= RT_ELEMENTS(pGip->aCPUs))
569 AssertReleaseMsgFailed(("iCpu=%d - the ApicId is too high. send VBox.log and hardware specs!\n", iCpu));
570 else
571 {
572 if (tmR3HasFixedTSC())
573 /* Sleep a bit to get a more reliable CpuHz value. */
574 RTThreadSleep(32);
575 else
576 {
577 /* Spin for 40ms to try push up the CPU frequency and get a more reliable CpuHz value. */
578 const uint64_t u64 = RTTimeMilliTS();
579 while ((RTTimeMilliTS() - u64) < 40 /*ms*/)
580 /* nothing */;
581 }
582
583 pGip = g_pSUPGlobalInfoPage;
584 if ( pGip
585 && pGip->u32Magic == SUPGLOBALINFOPAGE_MAGIC
586 && (u64Hz = pGip->aCPUs[iCpu].u64CpuHz)
587 && u64Hz != ~(uint64_t)0)
588 return u64Hz;
589 }
590 }
591
592 /* call this once first to make sure it's initialized. */
593 RTTimeNanoTS();
594
595 /*
596 * Yield the CPU to increase our chances of getting
597 * a correct value.
598 */
599 RTThreadYield(); /* Try avoid interruptions between TSC and NanoTS samplings. */
600 static const unsigned s_auSleep[5] = { 50, 30, 30, 40, 40 };
601 uint64_t au64Samples[5];
602 unsigned i;
603 for (i = 0; i < ELEMENTS(au64Samples); i++)
604 {
605 unsigned cMillies;
606 int cTries = 5;
607 uint64_t u64Start = ASMReadTSC();
608 uint64_t u64End;
609 uint64_t StartTS = RTTimeNanoTS();
610 uint64_t EndTS;
611 do
612 {
613 RTThreadSleep(s_auSleep[i]);
614 u64End = ASMReadTSC();
615 EndTS = RTTimeNanoTS();
616 cMillies = (unsigned)((EndTS - StartTS + 500000) / 1000000);
617 } while ( cMillies == 0 /* the sleep may be interrupted... */
618 || (cMillies < 20 && --cTries > 0));
619 uint64_t u64Diff = u64End - u64Start;
620
621 au64Samples[i] = (u64Diff * 1000) / cMillies;
622 AssertMsg(cTries > 0, ("cMillies=%d i=%d\n", cMillies, i));
623 }
624
625 /*
626 * Discard the highest and lowest results and calculate the average.
627 */
628 unsigned iHigh = 0;
629 unsigned iLow = 0;
630 for (i = 1; i < ELEMENTS(au64Samples); i++)
631 {
632 if (au64Samples[i] < au64Samples[iLow])
633 iLow = i;
634 if (au64Samples[i] > au64Samples[iHigh])
635 iHigh = i;
636 }
637 au64Samples[iLow] = 0;
638 au64Samples[iHigh] = 0;
639
640 u64Hz = au64Samples[0];
641 for (i = 1; i < ELEMENTS(au64Samples); i++)
642 u64Hz += au64Samples[i];
643 u64Hz /= ELEMENTS(au64Samples) - 2;
644
645 return u64Hz;
646}
647
648
649/**
650 * Applies relocations to data and code managed by this
651 * component. This function will be called at init and
652 * whenever the VMM need to relocate it self inside the GC.
653 *
654 * @param pVM The VM.
655 * @param offDelta Relocation delta relative to old location.
656 */
657TMR3DECL(void) TMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
658{
659 LogFlow(("TMR3Relocate\n"));
660 pVM->tm.s.pvGIPGC = MMHyperR3ToGC(pVM, pVM->tm.s.pvGIPR3);
661 pVM->tm.s.paTimerQueuesGC = MMHyperR3ToGC(pVM, pVM->tm.s.paTimerQueuesR3);
662 pVM->tm.s.paTimerQueuesR0 = MMHyperR3ToR0(pVM, pVM->tm.s.paTimerQueuesR3);
663
664 /*
665 * Iterate the timers updating the pVMGC pointers.
666 */
667 for (PTMTIMER pTimer = pVM->tm.s.pCreated; pTimer; pTimer = pTimer->pBigNext)
668 {
669 pTimer->pVMGC = pVM->pVMGC;
670 pTimer->pVMR0 = (PVMR0)pVM->pVMHC; /// @todo pTimer->pVMR0 = pVM->pVMR0;
671 }
672}
673
674
675/**
676 * Terminates the TM.
677 *
678 * Termination means cleaning up and freeing all resources,
679 * the VM it self is at this point powered off or suspended.
680 *
681 * @returns VBox status code.
682 * @param pVM The VM to operate on.
683 */
684TMR3DECL(int) TMR3Term(PVM pVM)
685{
686 AssertMsg(pVM->tm.s.offVM, ("bad init order!\n"));
687 if (pVM->tm.s.pTimer)
688 {
689 int rc = RTTimerDestroy(pVM->tm.s.pTimer);
690 AssertRC(rc);
691 pVM->tm.s.pTimer = NULL;
692 }
693
694 return VINF_SUCCESS;
695}
696
697
698/**
699 * The VM is being reset.
700 *
701 * For the TM component this means that a rescheduling is preformed,
702 * the FF is cleared and but without running the queues. We'll have to
703 * check if this makes sense or not, but it seems like a good idea now....
704 *
705 * @param pVM VM handle.
706 */
707TMR3DECL(void) TMR3Reset(PVM pVM)
708{
709 LogFlow(("TMR3Reset:\n"));
710 VM_ASSERT_EMT(pVM);
711
712 /*
713 * Process the queues.
714 */
715 for (int i = 0; i < TMCLOCK_MAX; i++)
716 tmTimerQueueSchedule(pVM, &pVM->tm.s.paTimerQueuesR3[i]);
717#ifdef VBOX_STRICT
718 tmTimerQueuesSanityChecks(pVM, "TMR3Reset");
719#endif
720 VM_FF_CLEAR(pVM, VM_FF_TIMER);
721}
722
723
724/**
725 * Resolve a builtin GC symbol.
726 * Called by PDM when loading or relocating GC modules.
727 *
728 * @returns VBox status
729 * @param pVM VM Handle.
730 * @param pszSymbol Symbol to resolv
731 * @param pGCPtrValue Where to store the symbol value.
732 * @remark This has to work before TMR3Relocate() is called.
733 */
734TMR3DECL(int) TMR3GetImportGC(PVM pVM, const char *pszSymbol, PRTGCPTR pGCPtrValue)
735{
736 if (!strcmp(pszSymbol, "g_pSUPGlobalInfoPage"))
737 *pGCPtrValue = MMHyperHC2GC(pVM, &pVM->tm.s.pvGIPGC);
738 //else if (..)
739 else
740 return VERR_SYMBOL_NOT_FOUND;
741 return VINF_SUCCESS;
742}
743
744
745/**
746 * Execute state save operation.
747 *
748 * @returns VBox status code.
749 * @param pVM VM Handle.
750 * @param pSSM SSM operation handle.
751 */
752static DECLCALLBACK(int) tmR3Save(PVM pVM, PSSMHANDLE pSSM)
753{
754 LogFlow(("tmR3Save:\n"));
755 Assert(!pVM->tm.s.fTSCTicking);
756 Assert(!pVM->tm.s.fVirtualTicking);
757 Assert(!pVM->tm.s.fVirtualSyncTicking);
758
759 /*
760 * Save the virtual clocks.
761 */
762 /* the virtual clock. */
763 SSMR3PutU64(pSSM, TMCLOCK_FREQ_VIRTUAL);
764 SSMR3PutU64(pSSM, pVM->tm.s.u64Virtual);
765
766 /* the virtual timer synchronous clock. */
767 SSMR3PutU64(pSSM, pVM->tm.s.u64VirtualSync);
768 SSMR3PutU64(pSSM, pVM->tm.s.offVirtualSync);
769 SSMR3PutU64(pSSM, pVM->tm.s.offVirtualSyncGivenUp);
770 SSMR3PutU64(pSSM, pVM->tm.s.u64VirtualSyncCatchUpPrev);
771 SSMR3PutBool(pSSM, pVM->tm.s.fVirtualSyncCatchUp);
772
773 /* real time clock */
774 SSMR3PutU64(pSSM, TMCLOCK_FREQ_REAL);
775
776 /* the cpu tick clock. */
777 SSMR3PutU64(pSSM, TMCpuTickGet(pVM));
778 return SSMR3PutU64(pSSM, pVM->tm.s.cTSCTicksPerSecond);
779}
780
781
782/**
783 * Execute state load operation.
784 *
785 * @returns VBox status code.
786 * @param pVM VM Handle.
787 * @param pSSM SSM operation handle.
788 * @param u32Version Data layout version.
789 */
790static DECLCALLBACK(int) tmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version)
791{
792 LogFlow(("tmR3Load:\n"));
793 Assert(!pVM->tm.s.fTSCTicking);
794 Assert(!pVM->tm.s.fVirtualTicking);
795 Assert(!pVM->tm.s.fVirtualSyncTicking);
796
797 /*
798 * Validate version.
799 */
800 if (u32Version != TM_SAVED_STATE_VERSION)
801 {
802 Log(("tmR3Load: Invalid version u32Version=%d!\n", u32Version));
803 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
804 }
805
806 /*
807 * Load the virtual clock.
808 */
809 pVM->tm.s.fVirtualTicking = false;
810 /* the virtual clock. */
811 uint64_t u64Hz;
812 int rc = SSMR3GetU64(pSSM, &u64Hz);
813 if (VBOX_FAILURE(rc))
814 return rc;
815 if (u64Hz != TMCLOCK_FREQ_VIRTUAL)
816 {
817 AssertMsgFailed(("The virtual clock frequency differs! Saved: %RU64 Binary: %RU64\n",
818 u64Hz, TMCLOCK_FREQ_VIRTUAL));
819 return VERR_SSM_VIRTUAL_CLOCK_HZ;
820 }
821 SSMR3GetU64(pSSM, &pVM->tm.s.u64Virtual);
822 pVM->tm.s.u64VirtualOffset = 0;
823
824 /* the virtual timer synchronous clock. */
825 pVM->tm.s.fVirtualSyncTicking = false;
826 uint64_t u64;
827 SSMR3GetU64(pSSM, &u64);
828 pVM->tm.s.u64VirtualSync = u64;
829 SSMR3GetU64(pSSM, &u64);
830 pVM->tm.s.offVirtualSync = u64;
831 SSMR3GetU64(pSSM, &u64);
832 pVM->tm.s.offVirtualSyncGivenUp = u64;
833 SSMR3GetU64(pSSM, &u64);
834 pVM->tm.s.u64VirtualSyncCatchUpPrev = u64;
835 bool f;
836 SSMR3GetBool(pSSM, &f);
837 pVM->tm.s.fVirtualSyncCatchUp = f;
838
839 /* the real clock */
840 rc = SSMR3GetU64(pSSM, &u64Hz);
841 if (VBOX_FAILURE(rc))
842 return rc;
843 if (u64Hz != TMCLOCK_FREQ_REAL)
844 {
845 AssertMsgFailed(("The real clock frequency differs! Saved: %RU64 Binary: %RU64\n",
846 u64Hz, TMCLOCK_FREQ_REAL));
847 return VERR_SSM_VIRTUAL_CLOCK_HZ; /* missleading... */
848 }
849
850 /* the cpu tick clock. */
851 pVM->tm.s.fTSCTicking = false;
852 SSMR3GetU64(pSSM, &pVM->tm.s.u64TSC);
853 rc = SSMR3GetU64(pSSM, &u64Hz);
854 if (VBOX_FAILURE(rc))
855 return rc;
856 if (pVM->tm.s.fTSCUseRealTSC)
857 pVM->tm.s.u64TSCOffset = 0; /** @todo TSC restore stuff and HWACC. */
858 else
859 pVM->tm.s.cTSCTicksPerSecond = u64Hz;
860 LogRel(("TM: cTSCTicksPerSecond=%#RX64 (%RU64) fTSCVirtualized=%RTbool fTSCUseRealTSC=%RTbool (state load)\n",
861 pVM->tm.s.cTSCTicksPerSecond, pVM->tm.s.cTSCTicksPerSecond, pVM->tm.s.fTSCVirtualized, pVM->tm.s.fTSCUseRealTSC));
862
863 /*
864 * Make sure timers get rescheduled immediately.
865 */
866 VM_FF_SET(pVM, VM_FF_TIMER);
867
868 return VINF_SUCCESS;
869}
870
871
872/**
873 * Internal TMR3TimerCreate worker.
874 *
875 * @returns VBox status code.
876 * @param pVM The VM handle.
877 * @param enmClock The timer clock.
878 * @param pszDesc The timer description.
879 * @param ppTimer Where to store the timer pointer on success.
880 */
881static int tmr3TimerCreate(PVM pVM, TMCLOCK enmClock, const char *pszDesc, PPTMTIMERR3 ppTimer)
882{
883 VM_ASSERT_EMT(pVM);
884
885 /*
886 * Allocate the timer.
887 */
888 PTMTIMERHC pTimer = NULL;
889 if (pVM->tm.s.pFree && VM_IS_EMT(pVM))
890 {
891 pTimer = pVM->tm.s.pFree;
892 pVM->tm.s.pFree = pTimer->pBigNext;
893 Log3(("TM: Recycling timer %p, new free head %p.\n", pTimer, pTimer->pBigNext));
894 }
895
896 if (!pTimer)
897 {
898 int rc = MMHyperAlloc(pVM, sizeof(*pTimer), 0, MM_TAG_TM, (void **)&pTimer);
899 if (VBOX_FAILURE(rc))
900 return rc;
901 Log3(("TM: Allocated new timer %p\n", pTimer));
902 }
903
904 /*
905 * Initialize it.
906 */
907 pTimer->u64Expire = 0;
908 pTimer->enmClock = enmClock;
909 pTimer->pVMR3 = pVM;
910 pTimer->pVMR0 = pVM->pVMR0;
911 pTimer->pVMGC = pVM->pVMGC;
912 pTimer->enmState = TMTIMERSTATE_STOPPED;
913 pTimer->offScheduleNext = 0;
914 pTimer->offNext = 0;
915 pTimer->offPrev = 0;
916 pTimer->pszDesc = pszDesc;
917
918 /* insert into the list of created timers. */
919 pTimer->pBigPrev = NULL;
920 pTimer->pBigNext = pVM->tm.s.pCreated;
921 pVM->tm.s.pCreated = pTimer;
922 if (pTimer->pBigNext)
923 pTimer->pBigNext->pBigPrev = pTimer;
924#ifdef VBOX_STRICT
925 tmTimerQueuesSanityChecks(pVM, "tmR3TimerCreate");
926#endif
927
928 *ppTimer = pTimer;
929 return VINF_SUCCESS;
930}
931
932
933/**
934 * Creates a device timer.
935 *
936 * @returns VBox status.
937 * @param pVM The VM to create the timer in.
938 * @param pDevIns Device instance.
939 * @param enmClock The clock to use on this timer.
940 * @param pfnCallback Callback function.
941 * @param pszDesc Pointer to description string which must stay around
942 * until the timer is fully destroyed (i.e. a bit after TMTimerDestroy()).
943 * @param ppTimer Where to store the timer on success.
944 */
945TMR3DECL(int) TMR3TimerCreateDevice(PVM pVM, PPDMDEVINS pDevIns, TMCLOCK enmClock, PFNTMTIMERDEV pfnCallback, const char *pszDesc, PPTMTIMERHC ppTimer)
946{
947 /*
948 * Allocate and init stuff.
949 */
950 int rc = tmr3TimerCreate(pVM, enmClock, pszDesc, ppTimer);
951 if (VBOX_SUCCESS(rc))
952 {
953 (*ppTimer)->enmType = TMTIMERTYPE_DEV;
954 (*ppTimer)->u.Dev.pfnTimer = pfnCallback;
955 (*ppTimer)->u.Dev.pDevIns = pDevIns;
956 Log(("TM: Created device timer %p clock %d callback %p '%s'\n", (*ppTimer), enmClock, pfnCallback, pszDesc));
957 }
958
959 return rc;
960}
961
962
963/**
964 * Creates a driver timer.
965 *
966 * @returns VBox status.
967 * @param pVM The VM to create the timer in.
968 * @param pDrvIns Driver instance.
969 * @param enmClock The clock to use on this timer.
970 * @param pfnCallback Callback function.
971 * @param pszDesc Pointer to description string which must stay around
972 * until the timer is fully destroyed (i.e. a bit after TMTimerDestroy()).
973 * @param ppTimer Where to store the timer on success.
974 */
975TMR3DECL(int) TMR3TimerCreateDriver(PVM pVM, PPDMDRVINS pDrvIns, TMCLOCK enmClock, PFNTMTIMERDRV pfnCallback, const char *pszDesc, PPTMTIMERHC ppTimer)
976{
977 /*
978 * Allocate and init stuff.
979 */
980 int rc = tmr3TimerCreate(pVM, enmClock, pszDesc, ppTimer);
981 if (VBOX_SUCCESS(rc))
982 {
983 (*ppTimer)->enmType = TMTIMERTYPE_DRV;
984 (*ppTimer)->u.Drv.pfnTimer = pfnCallback;
985 (*ppTimer)->u.Drv.pDrvIns = pDrvIns;
986 Log(("TM: Created device timer %p clock %d callback %p '%s'\n", (*ppTimer), enmClock, pfnCallback, pszDesc));
987 }
988
989 return rc;
990}
991
992
993/**
994 * Creates an internal timer.
995 *
996 * @returns VBox status.
997 * @param pVM The VM to create the timer in.
998 * @param enmClock The clock to use on this timer.
999 * @param pfnCallback Callback function.
1000 * @param pvUser User argument to be passed to the callback.
1001 * @param pszDesc Pointer to description string which must stay around
1002 * until the timer is fully destroyed (i.e. a bit after TMTimerDestroy()).
1003 * @param ppTimer Where to store the timer on success.
1004 */
1005TMR3DECL(int) TMR3TimerCreateInternal(PVM pVM, TMCLOCK enmClock, PFNTMTIMERINT pfnCallback, void *pvUser, const char *pszDesc, PPTMTIMERHC ppTimer)
1006{
1007 /*
1008 * Allocate and init stuff.
1009 */
1010 PTMTIMER pTimer;
1011 int rc = tmr3TimerCreate(pVM, enmClock, pszDesc, &pTimer);
1012 if (VBOX_SUCCESS(rc))
1013 {
1014 pTimer->enmType = TMTIMERTYPE_INTERNAL;
1015 pTimer->u.Internal.pfnTimer = pfnCallback;
1016 pTimer->u.Internal.pvUser = pvUser;
1017 *ppTimer = pTimer;
1018 Log(("TM: Created internal timer %p clock %d callback %p '%s'\n", pTimer, enmClock, pfnCallback, pszDesc));
1019 }
1020
1021 return rc;
1022}
1023
1024/**
1025 * Creates an external timer.
1026 *
1027 * @returns Timer handle on success.
1028 * @returns NULL on failure.
1029 * @param pVM The VM to create the timer in.
1030 * @param enmClock The clock to use on this timer.
1031 * @param pfnCallback Callback function.
1032 * @param pvUser User argument.
1033 * @param pszDesc Pointer to description string which must stay around
1034 * until the timer is fully destroyed (i.e. a bit after TMTimerDestroy()).
1035 */
1036TMR3DECL(PTMTIMERHC) TMR3TimerCreateExternal(PVM pVM, TMCLOCK enmClock, PFNTMTIMEREXT pfnCallback, void *pvUser, const char *pszDesc)
1037{
1038 /*
1039 * Allocate and init stuff.
1040 */
1041 PTMTIMERHC pTimer;
1042 int rc = tmr3TimerCreate(pVM, enmClock, pszDesc, &pTimer);
1043 if (VBOX_SUCCESS(rc))
1044 {
1045 pTimer->enmType = TMTIMERTYPE_EXTERNAL;
1046 pTimer->u.External.pfnTimer = pfnCallback;
1047 pTimer->u.External.pvUser = pvUser;
1048 Log(("TM: Created external timer %p clock %d callback %p '%s'\n", pTimer, enmClock, pfnCallback, pszDesc));
1049 return pTimer;
1050 }
1051
1052 return NULL;
1053}
1054
1055
1056/**
1057 * Destroy all timers owned by a device.
1058 *
1059 * @returns VBox status.
1060 * @param pVM VM handle.
1061 * @param pDevIns Device which timers should be destroyed.
1062 */
1063TMR3DECL(int) TMR3TimerDestroyDevice(PVM pVM, PPDMDEVINS pDevIns)
1064{
1065 LogFlow(("TMR3TimerDestroyDevice: pDevIns=%p\n", pDevIns));
1066 if (!pDevIns)
1067 return VERR_INVALID_PARAMETER;
1068
1069 PTMTIMER pCur = pVM->tm.s.pCreated;
1070 while (pCur)
1071 {
1072 PTMTIMER pDestroy = pCur;
1073 pCur = pDestroy->pBigNext;
1074 if ( pDestroy->enmType == TMTIMERTYPE_DEV
1075 && pDestroy->u.Dev.pDevIns == pDevIns)
1076 {
1077 int rc = TMTimerDestroy(pDestroy);
1078 AssertRC(rc);
1079 }
1080 }
1081 LogFlow(("TMR3TimerDestroyDevice: returns VINF_SUCCESS\n"));
1082 return VINF_SUCCESS;
1083}
1084
1085
1086/**
1087 * Destroy all timers owned by a driver.
1088 *
1089 * @returns VBox status.
1090 * @param pVM VM handle.
1091 * @param pDrvIns Driver which timers should be destroyed.
1092 */
1093TMR3DECL(int) TMR3TimerDestroyDriver(PVM pVM, PPDMDRVINS pDrvIns)
1094{
1095 LogFlow(("TMR3TimerDestroyDriver: pDrvIns=%p\n", pDrvIns));
1096 if (!pDrvIns)
1097 return VERR_INVALID_PARAMETER;
1098
1099 PTMTIMER pCur = pVM->tm.s.pCreated;
1100 while (pCur)
1101 {
1102 PTMTIMER pDestroy = pCur;
1103 pCur = pDestroy->pBigNext;
1104 if ( pDestroy->enmType == TMTIMERTYPE_DRV
1105 && pDestroy->u.Drv.pDrvIns == pDrvIns)
1106 {
1107 int rc = TMTimerDestroy(pDestroy);
1108 AssertRC(rc);
1109 }
1110 }
1111 LogFlow(("TMR3TimerDestroyDriver: returns VINF_SUCCESS\n"));
1112 return VINF_SUCCESS;
1113}
1114
1115
1116/**
1117 * Checks if the sync queue has one or more expired timers.
1118 *
1119 * @returns true / false.
1120 *
1121 * @param pVM The VM handle.
1122 * @param enmClock The queue.
1123 */
1124DECLINLINE(bool) tmR3HasExpiredTimer(PVM pVM, TMCLOCK enmClock)
1125{
1126 const uint64_t u64Expire = pVM->tm.s.CTXALLSUFF(paTimerQueues)[enmClock].u64Expire;
1127 return u64Expire != INT64_MAX && u64Expire <= tmClock(pVM, enmClock);
1128}
1129
1130
1131/**
1132 * Checks for expired timers in all the queues.
1133 *
1134 * @returns true / false.
1135 * @param pVM The VM handle.
1136 */
1137DECLINLINE(bool) tmR3AnyExpiredTimers(PVM pVM)
1138{
1139 /*
1140 * Combine the time calculation for the first two since we're not on EMT
1141 * TMVirtualSyncGet only permits EMT.
1142 */
1143 uint64_t u64Now = TMVirtualGet(pVM);
1144 if (pVM->tm.s.CTXALLSUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire <= u64Now)
1145 return true;
1146 u64Now = pVM->tm.s.fVirtualSyncTicking
1147 ? u64Now - pVM->tm.s.offVirtualSync
1148 : pVM->tm.s.u64VirtualSync;
1149 if (pVM->tm.s.CTXALLSUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire <= u64Now)
1150 return true;
1151
1152 /*
1153 * The remaining timers.
1154 */
1155 if (tmR3HasExpiredTimer(pVM, TMCLOCK_REAL))
1156 return true;
1157 if (tmR3HasExpiredTimer(pVM, TMCLOCK_TSC))
1158 return true;
1159 return false;
1160}
1161
1162
1163/**
1164 * Schedulation timer callback.
1165 *
1166 * @param pTimer Timer handle.
1167 * @param pvUser VM handle.
1168 * @thread Timer thread.
1169 *
1170 * @remark We cannot do the scheduling and queues running from a timer handler
1171 * since it's not executing in EMT, and even if it was it would be async
1172 * and we wouldn't know the state of the affairs.
1173 * So, we'll just raise the timer FF and force any REM execution to exit.
1174 */
1175static DECLCALLBACK(void) tmR3TimerCallback(PRTTIMER pTimer, void *pvUser)
1176{
1177 PVM pVM = (PVM)pvUser;
1178 AssertCompile(TMCLOCK_MAX == 4);
1179#ifdef DEBUG_Sander /* very annoying, keep it private. */
1180 if (VM_FF_ISSET(pVM, VM_FF_TIMER))
1181 Log(("tmR3TimerCallback: timer event still pending!!\n"));
1182#endif
1183 if ( !VM_FF_ISSET(pVM, VM_FF_TIMER)
1184 && ( pVM->tm.s.paTimerQueuesR3[TMCLOCK_VIRTUAL_SYNC].offSchedule
1185 || pVM->tm.s.paTimerQueuesR3[TMCLOCK_VIRTUAL].offSchedule
1186 || pVM->tm.s.paTimerQueuesR3[TMCLOCK_REAL].offSchedule
1187 || pVM->tm.s.paTimerQueuesR3[TMCLOCK_TSC].offSchedule
1188 || tmR3AnyExpiredTimers(pVM)
1189 )
1190 && !VM_FF_ISSET(pVM, VM_FF_TIMER)
1191 )
1192 {
1193 VM_FF_SET(pVM, VM_FF_TIMER);
1194 REMR3NotifyTimerPending(pVM);
1195 VMR3NotifyFF(pVM, true);
1196 STAM_COUNTER_INC(&pVM->tm.s.StatTimerCallbackSetFF);
1197 }
1198}
1199
1200
1201/**
1202 * Schedules and runs any pending timers.
1203 *
1204 * This is normally called from a forced action handler in EMT.
1205 *
1206 * @param pVM The VM to run the timers for.
1207 */
1208TMR3DECL(void) TMR3TimerQueuesDo(PVM pVM)
1209{
1210 STAM_PROFILE_START(&pVM->tm.s.StatDoQueues, a);
1211 Log2(("TMR3TimerQueuesDo:\n"));
1212
1213 /*
1214 * Process the queues.
1215 */
1216 AssertCompile(TMCLOCK_MAX == 4);
1217
1218 /* TMCLOCK_VIRTUAL_SYNC */
1219 STAM_PROFILE_ADV_START(&pVM->tm.s.StatDoQueuesSchedule, s1);
1220 tmTimerQueueSchedule(pVM, &pVM->tm.s.paTimerQueuesR3[TMCLOCK_VIRTUAL_SYNC]);
1221 STAM_PROFILE_ADV_SUSPEND(&pVM->tm.s.StatDoQueuesSchedule, s1);
1222 STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatDoQueuesRun, r1);
1223 tmR3TimerQueueRunVirtualSync(pVM);
1224 STAM_PROFILE_ADV_SUSPEND(&pVM->tm.s.StatDoQueuesRun, r1);
1225
1226 /* TMCLOCK_VIRTUAL */
1227 STAM_PROFILE_ADV_RESUME(&pVM->tm.s.StatDoQueuesSchedule, s1);
1228 tmTimerQueueSchedule(pVM, &pVM->tm.s.paTimerQueuesR3[TMCLOCK_VIRTUAL]);
1229 STAM_PROFILE_ADV_SUSPEND(&pVM->tm.s.StatDoQueuesSchedule, s2);
1230 STAM_PROFILE_ADV_RESUME(&pVM->tm.s.StatDoQueuesRun, r1);
1231 tmR3TimerQueueRun(pVM, &pVM->tm.s.paTimerQueuesR3[TMCLOCK_VIRTUAL]);
1232 STAM_PROFILE_ADV_SUSPEND(&pVM->tm.s.StatDoQueuesRun, r2);
1233
1234#if 0 /** @todo if ever used, remove this and fix the stam prefixes on TMCLOCK_REAL below. */
1235 /* TMCLOCK_TSC */
1236 STAM_PROFILE_ADV_RESUME(&pVM->tm.s.StatDoQueuesSchedule, s2);
1237 tmTimerQueueSchedule(pVM, &pVM->tm.s.paTimerQueuesR3[TMCLOCK_TSC]);
1238 STAM_PROFILE_ADV_SUSPEND(&pVM->tm.s.StatDoQueuesSchedule, s3);
1239 STAM_PROFILE_ADV_RESUME(&pVM->tm.s.StatDoQueuesRun, r2);
1240 tmR3TimerQueueRun(pVM, &pVM->tm.s.paTimerQueuesR3[TMCLOCK_TSC]);
1241 STAM_PROFILE_ADV_SUSPEND(&pVM->tm.s.StatDoQueuesRun, r3);
1242#endif
1243
1244 /* TMCLOCK_REAL */
1245 STAM_PROFILE_ADV_RESUME(&pVM->tm.s.StatDoQueuesSchedule, s2);
1246 tmTimerQueueSchedule(pVM, &pVM->tm.s.paTimerQueuesR3[TMCLOCK_REAL]);
1247 STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatDoQueuesSchedule, s3);
1248 STAM_PROFILE_ADV_RESUME(&pVM->tm.s.StatDoQueuesRun, r2);
1249 tmR3TimerQueueRun(pVM, &pVM->tm.s.paTimerQueuesR3[TMCLOCK_REAL]);
1250 STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatDoQueuesRun, r3);
1251
1252 /* done. */
1253 VM_FF_CLEAR(pVM, VM_FF_TIMER);
1254
1255#ifdef VBOX_STRICT
1256 /* check that we didn't screwup. */
1257 tmTimerQueuesSanityChecks(pVM, "TMR3TimerQueuesDo");
1258#endif
1259
1260 Log2(("TMR3TimerQueuesDo: returns void\n"));
1261 STAM_PROFILE_STOP(&pVM->tm.s.StatDoQueues, a);
1262}
1263
1264
1265/**
1266 * Schedules and runs any pending times in the specified queue.
1267 *
1268 * This is normally called from a forced action handler in EMT.
1269 *
1270 * @param pVM The VM to run the timers for.
1271 * @param pQueue The queue to run.
1272 */
1273static void tmR3TimerQueueRun(PVM pVM, PTMTIMERQUEUE pQueue)
1274{
1275 VM_ASSERT_EMT(pVM);
1276
1277 /*
1278 * Run timers.
1279 *
1280 * We check the clock once and run all timers which are ACTIVE
1281 * and have an expire time less or equal to the time we read.
1282 *
1283 * N.B. A generic unlink must be applied since other threads
1284 * are allowed to mess with any active timer at any time.
1285 * However, we only allow EMT to handle EXPIRED_PENDING
1286 * timers, thus enabling the timer handler function to
1287 * arm the timer again.
1288 */
1289 PTMTIMER pNext = TMTIMER_GET_HEAD(pQueue);
1290 if (!pNext)
1291 return;
1292 const uint64_t u64Now = tmClock(pVM, pQueue->enmClock);
1293 while (pNext && pNext->u64Expire <= u64Now)
1294 {
1295 PTMTIMER pTimer = pNext;
1296 pNext = TMTIMER_GET_NEXT(pTimer);
1297 Log2(("tmR3TimerQueueRun: pTimer=%p:{.enmState=%s, .enmClock=%d, .enmType=%d, u64Expire=%llx (now=%llx) .pszDesc=%s}\n",
1298 pTimer, tmTimerState(pTimer->enmState), pTimer->enmClock, pTimer->enmType, pTimer->u64Expire, u64Now, pTimer->pszDesc));
1299 bool fRc;
1300 TM_TRY_SET_STATE(pTimer, TMTIMERSTATE_EXPIRED, TMTIMERSTATE_ACTIVE, fRc);
1301 if (fRc)
1302 {
1303 Assert(!pTimer->offScheduleNext); /* this can trigger falsely */
1304
1305 /* unlink */
1306 const PTMTIMER pPrev = TMTIMER_GET_PREV(pTimer);
1307 if (pPrev)
1308 TMTIMER_SET_NEXT(pPrev, pNext);
1309 else
1310 {
1311 TMTIMER_SET_HEAD(pQueue, pNext);
1312 pQueue->u64Expire = pNext ? pNext->u64Expire : INT64_MAX;
1313 }
1314 if (pNext)
1315 TMTIMER_SET_PREV(pNext, pPrev);
1316 pTimer->offNext = 0;
1317 pTimer->offPrev = 0;
1318
1319
1320 /* fire */
1321 switch (pTimer->enmType)
1322 {
1323 case TMTIMERTYPE_DEV: pTimer->u.Dev.pfnTimer(pTimer->u.Dev.pDevIns, pTimer); break;
1324 case TMTIMERTYPE_DRV: pTimer->u.Drv.pfnTimer(pTimer->u.Drv.pDrvIns, pTimer); break;
1325 case TMTIMERTYPE_INTERNAL: pTimer->u.Internal.pfnTimer(pVM, pTimer, pTimer->u.Internal.pvUser); break;
1326 case TMTIMERTYPE_EXTERNAL: pTimer->u.External.pfnTimer(pTimer->u.External.pvUser); break;
1327 default:
1328 AssertMsgFailed(("Invalid timer type %d (%s)\n", pTimer->enmType, pTimer->pszDesc));
1329 break;
1330 }
1331
1332 /* change the state if it wasn't changed already in the handler. */
1333 TM_TRY_SET_STATE(pTimer, TMTIMERSTATE_STOPPED, TMTIMERSTATE_EXPIRED, fRc);
1334 Log2(("tmR3TimerQueueRun: new state %s\n", tmTimerState(pTimer->enmState)));
1335 }
1336 } /* run loop */
1337}
1338
1339
1340/**
1341 * Schedules and runs any pending times in the timer queue for the
1342 * synchronous virtual clock.
1343 *
1344 * This scheduling is a bit different from the other queues as it need
1345 * to implement the special requirements of the timer synchronous virtual
1346 * clock, thus this 2nd queue run funcion.
1347 *
1348 * @param pVM The VM to run the timers for.
1349 */
1350static void tmR3TimerQueueRunVirtualSync(PVM pVM)
1351{
1352 PTMTIMERQUEUE const pQueue = &pVM->tm.s.paTimerQueuesR3[TMCLOCK_VIRTUAL_SYNC];
1353 VM_ASSERT_EMT(pVM);
1354
1355 /*
1356 * Any timers?
1357 */
1358 PTMTIMER pNext = TMTIMER_GET_HEAD(pQueue);
1359 if (RT_UNLIKELY(!pNext))
1360 {
1361 Assert(pVM->tm.s.fVirtualSyncTicking || !pVM->tm.s.fVirtualTicking);
1362 return;
1363 }
1364 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncRun);
1365
1366 /*
1367 * Calculate the time frame for which we will dispatch timers.
1368 *
1369 * We use a time frame ranging from the current sync time (which is most likely the
1370 * same as the head timer) and some configurable period (100000ns) up towards the
1371 * current virtual time. This period might also need to be restricted by the catch-up
1372 * rate so frequent calls to this function won't accelerate the time too much, however
1373 * this will be implemented at a later point if neccessary.
1374 *
1375 * Without this frame we would 1) having to run timers much more frequently
1376 * and 2) lag behind at a steady rate.
1377 */
1378 const uint64_t u64VirtualNow = TMVirtualGetEx(pVM, false /* don't check timers */);
1379 uint64_t u64Now;
1380uint64_t off = 0, u64Delta = 0, u64Sub = 0; /* debugging - to be removed */
1381bool fWasInCatchup = false; /* debugging - to be removed */
1382bool fWasTicking = pVM->tm.s.fVirtualSyncTicking; /* debugging - to be removed*/
1383 if (!pVM->tm.s.fVirtualSyncTicking)
1384 {
1385 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncRunStoppedAlready);
1386 u64Now = pVM->tm.s.u64VirtualSync;
1387 Assert(u64Now <= pNext->u64Expire);
1388 }
1389 else
1390 {
1391 /* Calc 'now'. (update order doesn't really matter here) */
1392 /*uint64_t*/ off = pVM->tm.s.offVirtualSync;
1393 if (pVM->tm.s.fVirtualSyncCatchUp)
1394 {
1395fWasInCatchup = pVM->tm.s.fVirtualSyncCatchUp; /* debugging - to be removed */
1396 /*uint64_t*/ u64Delta = u64VirtualNow - pVM->tm.s.u64VirtualSyncCatchUpPrev;
1397 if (RT_LIKELY(!(u64Delta >> 32)))
1398 {
1399 /*uint64_t*/ u64Sub = ASMMultU64ByU32DivByU32(u64Delta, pVM->tm.s.u32VirtualSyncCatchUpPercentage, 100);
1400 if (off > u64Sub + pVM->tm.s.offVirtualSyncGivenUp)
1401 {
1402 off -= u64Sub;
1403 Log4(("TM: %RU64/%RU64: sub %RU64 (run)\n", u64VirtualNow - off, off - pVM->tm.s.offVirtualSyncGivenUp, u64Sub));
1404 }
1405 else
1406 {
1407 STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c);
1408 ASMAtomicXchgBool(&pVM->tm.s.fVirtualSyncCatchUp, false);
1409 off = pVM->tm.s.offVirtualSyncGivenUp;
1410 Log4(("TM: %RU64/0: caught up (run)\n", u64VirtualNow));
1411 }
1412 }
1413 ASMAtomicXchgU64(&pVM->tm.s.offVirtualSync, off);
1414 pVM->tm.s.u64VirtualSyncCatchUpPrev = u64VirtualNow;
1415 }
1416 u64Now = u64VirtualNow - off;
1417
1418 /* Check if stopped by expired timer. */
1419 if (u64Now >= pNext->u64Expire)
1420 {
1421 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncRunStop);
1422 u64Now = pNext->u64Expire;
1423 ASMAtomicXchgU64(&pVM->tm.s.u64VirtualSync, u64Now);
1424 ASMAtomicXchgBool(&pVM->tm.s.fVirtualSyncTicking, false);
1425 Log4(("TM: %RU64/%RU64: exp tmr (run)\n", u64Now, u64VirtualNow - u64Now - pVM->tm.s.offVirtualSyncGivenUp));
1426
1427 }
1428 }
1429
1430 /* calc end of frame. */
1431 uint64_t u64Max = u64Now + pVM->tm.s.u32VirtualSyncScheduleSlack;
1432 if (u64Max > u64VirtualNow - pVM->tm.s.offVirtualSyncGivenUp)
1433 u64Max = u64VirtualNow - pVM->tm.s.offVirtualSyncGivenUp;
1434
1435 /* assert sanity */
1436if (RT_UNLIKELY( !(u64Now <= u64VirtualNow - pVM->tm.s.offVirtualSyncGivenUp)
1437 || !(u64Max <= u64VirtualNow - pVM->tm.s.offVirtualSyncGivenUp)
1438 || !(u64Now <= u64Max)))
1439{
1440 LogRel(("TM: Add the following to defect #1414:\n"
1441 " u64Now=%016RX64\n"
1442 " u64Max=%016RX64\n"
1443 " pNext->u64Expire=%016RX64\n"
1444 " u64VirtualSync=%016RX64\n"
1445 " u64VirtualNow=%016RX64\n"
1446 " off=%016RX64\n"
1447 " u64Delta=%016RX64\n"
1448 " u64Sub=%016RX64\n"
1449 " offVirtualSync=%016RX64\n"
1450 " offVirtualSyncGivenUp=%016RX64\n"
1451 " u64VirtualSyncCatchUpPrev=%016RX64\n"
1452 " u64VirtualSyncStoppedTS=%016RX64\n"
1453 "u32VirtualSyncCatchUpPercentage=%08RX32\n"
1454 " fVirtualSyncTicking=%RTbool (prev=%RTbool)\n"
1455 " fVirtualSyncCatchUp=%RTbool (prev=%RTbool)\n",
1456 u64Now,
1457 u64Max,
1458 pNext->u64Expire,
1459 pVM->tm.s.u64VirtualSync,
1460 u64VirtualNow,
1461 off,
1462 u64Delta,
1463 u64Sub,
1464 pVM->tm.s.offVirtualSync,
1465 pVM->tm.s.offVirtualSyncGivenUp,
1466 pVM->tm.s.u64VirtualSyncCatchUpPrev,
1467 pVM->tm.s.u64VirtualSyncStoppedTS,
1468 pVM->tm.s.u32VirtualSyncCatchUpPercentage,
1469 pVM->tm.s.fVirtualSyncTicking, fWasTicking,
1470 pVM->tm.s.fVirtualSyncCatchUp, fWasInCatchup));
1471 Assert(u64Now <= u64VirtualNow - pVM->tm.s.offVirtualSyncGivenUp);
1472 Assert(u64Max <= u64VirtualNow - pVM->tm.s.offVirtualSyncGivenUp);
1473 Assert(u64Now <= u64Max);
1474}
1475
1476 /*
1477 * Process the expired timers moving the clock along as we progress.
1478 */
1479#ifdef VBOX_STRICT
1480 uint64_t u64Prev = u64Now; NOREF(u64Prev);
1481#endif
1482 while (pNext && pNext->u64Expire <= u64Max)
1483 {
1484 PTMTIMER pTimer = pNext;
1485 pNext = TMTIMER_GET_NEXT(pTimer);
1486 Log2(("tmR3TimerQueueRun: pTimer=%p:{.enmState=%s, .enmClock=%d, .enmType=%d, u64Expire=%llx (now=%llx) .pszDesc=%s}\n",
1487 pTimer, tmTimerState(pTimer->enmState), pTimer->enmClock, pTimer->enmType, pTimer->u64Expire, u64Now, pTimer->pszDesc));
1488 bool fRc;
1489 TM_TRY_SET_STATE(pTimer, TMTIMERSTATE_EXPIRED, TMTIMERSTATE_ACTIVE, fRc);
1490 if (fRc)
1491 {
1492 /* unlink */
1493 const PTMTIMER pPrev = TMTIMER_GET_PREV(pTimer);
1494 if (pPrev)
1495 TMTIMER_SET_NEXT(pPrev, pNext);
1496 else
1497 {
1498 TMTIMER_SET_HEAD(pQueue, pNext);
1499 pQueue->u64Expire = pNext ? pNext->u64Expire : INT64_MAX;
1500 }
1501 if (pNext)
1502 TMTIMER_SET_PREV(pNext, pPrev);
1503 pTimer->offNext = 0;
1504 pTimer->offPrev = 0;
1505
1506 /* advance the clock - don't permit timers to be out of order or armed in the 'past'. */
1507#ifdef VBOX_STRICT
1508 AssertMsg(pTimer->u64Expire >= u64Prev, ("%RU64 < %RU64 %s\n", pTimer->u64Expire, u64Prev, pTimer->pszDesc));
1509 u64Prev = pTimer->u64Expire;
1510#endif
1511 ASMAtomicXchgSize(&pVM->tm.s.fVirtualSyncTicking, false);
1512 ASMAtomicXchgU64(&pVM->tm.s.u64VirtualSync, pTimer->u64Expire);
1513
1514 /* fire */
1515 switch (pTimer->enmType)
1516 {
1517 case TMTIMERTYPE_DEV: pTimer->u.Dev.pfnTimer(pTimer->u.Dev.pDevIns, pTimer); break;
1518 case TMTIMERTYPE_DRV: pTimer->u.Drv.pfnTimer(pTimer->u.Drv.pDrvIns, pTimer); break;
1519 case TMTIMERTYPE_INTERNAL: pTimer->u.Internal.pfnTimer(pVM, pTimer, pTimer->u.Internal.pvUser); break;
1520 case TMTIMERTYPE_EXTERNAL: pTimer->u.External.pfnTimer(pTimer->u.External.pvUser); break;
1521 default:
1522 AssertMsgFailed(("Invalid timer type %d (%s)\n", pTimer->enmType, pTimer->pszDesc));
1523 break;
1524 }
1525
1526 /* change the state if it wasn't changed already in the handler. */
1527 TM_TRY_SET_STATE(pTimer, TMTIMERSTATE_STOPPED, TMTIMERSTATE_EXPIRED, fRc);
1528 Log2(("tmR3TimerQueueRun: new state %s\n", tmTimerState(pTimer->enmState)));
1529 }
1530 } /* run loop */
1531
1532 /*
1533 * Restart the clock if it was stopped to serve any timers,
1534 * and start/adjust catch-up if necessary.
1535 */
1536 if ( !pVM->tm.s.fVirtualSyncTicking
1537 && pVM->tm.s.fVirtualTicking)
1538 {
1539 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncRunRestart);
1540
1541 /* calc the slack we've handed out. */
1542 const uint64_t u64VirtualNow2 = TMVirtualGetEx(pVM, false /* don't check timers */);
1543 Assert(u64VirtualNow2 >= u64VirtualNow);
1544if (RT_UNLIKELY(u64VirtualNow2 < u64VirtualNow)) LogRel(("TM: u64VirtualNow2=%#RX64 < u64VirtualNow=%#RX64\n", u64VirtualNow2, u64VirtualNow)); /* debugging - to be removed. */
1545 AssertMsg(pVM->tm.s.u64VirtualSync >= u64Now, ("%RU64 < %RU64\n", pVM->tm.s.u64VirtualSync, u64Now));
1546 const uint64_t offSlack = pVM->tm.s.u64VirtualSync - u64Now;
1547if (RT_UNLIKELY(offSlack & BIT64(63))) LogRel(("TM: pVM->tm.s.u64VirtualSync=%#RX64 - u64Now=%#RX64 -> %#RX64\n", pVM->tm.s.u64VirtualSync, u64Now, offSlack)); /* debugging - to be removed. */
1548 STAM_STATS({
1549 if (offSlack)
1550 {
1551 PSTAMPROFILE p = &pVM->tm.s.StatVirtualSyncRunSlack;
1552 p->cPeriods++;
1553 p->cTicks += offSlack;
1554 if (p->cTicksMax < offSlack) p->cTicksMax = offSlack;
1555 if (p->cTicksMin > offSlack) p->cTicksMin = offSlack;
1556 }
1557 });
1558
1559 /* Let the time run a little bit while we were busy running timers(?). */
1560 uint64_t u64Elapsed;
1561#define MAX_ELAPSED 30000 /* ns */
1562 if (offSlack > MAX_ELAPSED)
1563 u64Elapsed = 0;
1564 else
1565 {
1566 u64Elapsed = u64VirtualNow2 - u64VirtualNow;
1567 if (u64Elapsed > MAX_ELAPSED)
1568 u64Elapsed = MAX_ELAPSED;
1569 u64Elapsed = u64Elapsed > offSlack ? u64Elapsed - offSlack : 0;
1570 }
1571#undef MAX_ELAPSED
1572
1573 /* Calc the current offset. */
1574 uint64_t offNew = u64VirtualNow2 - pVM->tm.s.u64VirtualSync - u64Elapsed;
1575 Assert(!(offNew & RT_BIT_64(63)));
1576 uint64_t offLag = offNew - pVM->tm.s.offVirtualSyncGivenUp;
1577 Assert(!(offLag & RT_BIT_64(63)));
1578
1579 /*
1580 * Deal with starting, adjusting and stopping catchup.
1581 */
1582 if (pVM->tm.s.fVirtualSyncCatchUp)
1583 {
1584 if (offLag <= pVM->tm.s.u64VirtualSyncCatchUpStopThreshold)
1585 {
1586 /* stop */
1587 STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c);
1588 ASMAtomicXchgBool(&pVM->tm.s.fVirtualSyncCatchUp, false);
1589 Log4(("TM: %RU64/%RU64: caught up\n", u64VirtualNow2 - offNew, offLag));
1590 }
1591 else if (offLag <= pVM->tm.s.u64VirtualSyncCatchUpGiveUpThreshold)
1592 {
1593 /* adjust */
1594 unsigned i = 0;
1595 while ( i + 1 < RT_ELEMENTS(pVM->tm.s.aVirtualSyncCatchUpPeriods)
1596 && offLag >= pVM->tm.s.aVirtualSyncCatchUpPeriods[i + 1].u64Start)
1597 i++;
1598 if (pVM->tm.s.u32VirtualSyncCatchUpPercentage < pVM->tm.s.aVirtualSyncCatchUpPeriods[i].u32Percentage)
1599 {
1600 STAM_COUNTER_INC(&pVM->tm.s.aStatVirtualSyncCatchupAdjust[i]);
1601 ASMAtomicXchgU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage, pVM->tm.s.aVirtualSyncCatchUpPeriods[i].u32Percentage);
1602 Log4(("TM: %RU64/%RU64: adj %u%%\n", u64VirtualNow2 - offNew, offLag, pVM->tm.s.u32VirtualSyncCatchUpPercentage));
1603 }
1604 pVM->tm.s.u64VirtualSyncCatchUpPrev = u64VirtualNow2;
1605 }
1606 else
1607 {
1608 /* give up */
1609 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGiveUp);
1610 STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c);
1611 ASMAtomicXchgU64((uint64_t volatile *)&pVM->tm.s.offVirtualSyncGivenUp, offNew);
1612 ASMAtomicXchgBool(&pVM->tm.s.fVirtualSyncCatchUp, false);
1613 Log4(("TM: %RU64/%RU64: give up %u%%\n", u64VirtualNow2 - offNew, offLag, pVM->tm.s.u32VirtualSyncCatchUpPercentage));
1614 LogRel(("TM: Giving up catch-up attempt at a %RU64 ns lag; new total: %RU64 ns\n", offLag, offNew));
1615 }
1616 }
1617 else if (offLag >= pVM->tm.s.aVirtualSyncCatchUpPeriods[0].u64Start)
1618 {
1619 if (offLag <= pVM->tm.s.u64VirtualSyncCatchUpGiveUpThreshold)
1620 {
1621 /* start */
1622 STAM_PROFILE_ADV_START(&pVM->tm.s.StatVirtualSyncCatchup, c);
1623 unsigned i = 0;
1624 while ( i + 1 < RT_ELEMENTS(pVM->tm.s.aVirtualSyncCatchUpPeriods)
1625 && offLag >= pVM->tm.s.aVirtualSyncCatchUpPeriods[i + 1].u64Start)
1626 i++;
1627 STAM_COUNTER_INC(&pVM->tm.s.aStatVirtualSyncCatchupInitial[i]);
1628 ASMAtomicXchgU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage, pVM->tm.s.aVirtualSyncCatchUpPeriods[i].u32Percentage);
1629 ASMAtomicXchgBool(&pVM->tm.s.fVirtualSyncCatchUp, true);
1630 Log4(("TM: %RU64/%RU64: catch-up %u%%\n", u64VirtualNow2 - offNew, offLag, pVM->tm.s.u32VirtualSyncCatchUpPercentage));
1631 }
1632 else
1633 {
1634 /* don't bother */
1635 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGiveUpBeforeStarting);
1636 ASMAtomicXchgU64((uint64_t volatile *)&pVM->tm.s.offVirtualSyncGivenUp, offNew);
1637 Log4(("TM: %RU64/%RU64: give up\n", u64VirtualNow2 - offNew, offLag));
1638 LogRel(("TM: Not bothering to attempt catching up a %RU64 ns lag; new total: %RU64\n", offLag, offNew));
1639 }
1640 }
1641
1642 /*
1643 * Update the offset and restart the clock.
1644 */
1645 Assert(!(offNew & RT_BIT_64(63)));
1646 ASMAtomicXchgU64(&pVM->tm.s.offVirtualSync, offNew);
1647 ASMAtomicXchgBool(&pVM->tm.s.fVirtualSyncTicking, true);
1648 }
1649}
1650
1651
1652/**
1653 * Saves the state of a timer to a saved state.
1654 *
1655 * @returns VBox status.
1656 * @param pTimer Timer to save.
1657 * @param pSSM Save State Manager handle.
1658 */
1659TMR3DECL(int) TMR3TimerSave(PTMTIMERHC pTimer, PSSMHANDLE pSSM)
1660{
1661 LogFlow(("TMR3TimerSave: pTimer=%p:{enmState=%s, .pszDesc={%s}} pSSM=%p\n", pTimer, tmTimerState(pTimer->enmState), pTimer->pszDesc, pSSM));
1662 switch (pTimer->enmState)
1663 {
1664 case TMTIMERSTATE_STOPPED:
1665 case TMTIMERSTATE_PENDING_STOP:
1666 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1667 return SSMR3PutU8(pSSM, (uint8_t)TMTIMERSTATE_PENDING_STOP);
1668
1669 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1670 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1671 AssertMsgFailed(("u64Expire is being updated! (%s)\n", pTimer->pszDesc));
1672 if (!RTThreadYield())
1673 RTThreadSleep(1);
1674 /* fall thru */
1675 case TMTIMERSTATE_ACTIVE:
1676 case TMTIMERSTATE_PENDING_SCHEDULE:
1677 case TMTIMERSTATE_PENDING_RESCHEDULE:
1678 SSMR3PutU8(pSSM, (uint8_t)TMTIMERSTATE_PENDING_SCHEDULE);
1679 return SSMR3PutU64(pSSM, pTimer->u64Expire);
1680
1681 case TMTIMERSTATE_EXPIRED:
1682 case TMTIMERSTATE_PENDING_DESTROY:
1683 case TMTIMERSTATE_PENDING_STOP_DESTROY:
1684 case TMTIMERSTATE_FREE:
1685 AssertMsgFailed(("Invalid timer state %d %s (%s)\n", pTimer->enmState, tmTimerState(pTimer->enmState), pTimer->pszDesc));
1686 return SSMR3HandleSetStatus(pSSM, VERR_TM_INVALID_STATE);
1687 }
1688
1689 AssertMsgFailed(("Unknown timer state %d (%s)\n", pTimer->enmState, pTimer->pszDesc));
1690 return SSMR3HandleSetStatus(pSSM, VERR_TM_UNKNOWN_STATE);
1691}
1692
1693
1694/**
1695 * Loads the state of a timer from a saved state.
1696 *
1697 * @returns VBox status.
1698 * @param pTimer Timer to restore.
1699 * @param pSSM Save State Manager handle.
1700 */
1701TMR3DECL(int) TMR3TimerLoad(PTMTIMERHC pTimer, PSSMHANDLE pSSM)
1702{
1703 Assert(pTimer); Assert(pSSM); VM_ASSERT_EMT(pTimer->pVMR3);
1704 LogFlow(("TMR3TimerLoad: pTimer=%p:{enmState=%s, .pszDesc={%s}} pSSM=%p\n", pTimer, tmTimerState(pTimer->enmState), pTimer->pszDesc, pSSM));
1705
1706 /*
1707 * Load the state and validate it.
1708 */
1709 uint8_t u8State;
1710 int rc = SSMR3GetU8(pSSM, &u8State);
1711 if (VBOX_FAILURE(rc))
1712 return rc;
1713 TMTIMERSTATE enmState = (TMTIMERSTATE)u8State;
1714 if ( enmState != TMTIMERSTATE_PENDING_STOP
1715 && enmState != TMTIMERSTATE_PENDING_SCHEDULE
1716 && enmState != TMTIMERSTATE_PENDING_STOP_SCHEDULE)
1717 {
1718 AssertMsgFailed(("enmState=%d %s\n", enmState, tmTimerState(enmState)));
1719 return SSMR3HandleSetStatus(pSSM, VERR_TM_LOAD_STATE);
1720 }
1721
1722 if (enmState == TMTIMERSTATE_PENDING_SCHEDULE)
1723 {
1724 /*
1725 * Load the expire time.
1726 */
1727 uint64_t u64Expire;
1728 rc = SSMR3GetU64(pSSM, &u64Expire);
1729 if (VBOX_FAILURE(rc))
1730 return rc;
1731
1732 /*
1733 * Set it.
1734 */
1735 Log(("enmState=%d %s u64Expire=%llu\n", enmState, tmTimerState(enmState), u64Expire));
1736 rc = TMTimerSet(pTimer, u64Expire);
1737 }
1738 else
1739 {
1740 /*
1741 * Stop it.
1742 */
1743 Log(("enmState=%d %s\n", enmState, tmTimerState(enmState)));
1744 rc = TMTimerStop(pTimer);
1745 }
1746
1747 /*
1748 * On failure set SSM status.
1749 */
1750 if (VBOX_FAILURE(rc))
1751 rc = SSMR3HandleSetStatus(pSSM, rc);
1752 return rc;
1753}
1754
1755
1756/**
1757 * Get the real world UTC time adjusted for VM lag.
1758 *
1759 * @returns pTime.
1760 * @param pVM The VM instance.
1761 * @param pTime Where to store the time.
1762 */
1763TMR3DECL(PRTTIMESPEC) TMR3UTCNow(PVM pVM, PRTTIMESPEC pTime)
1764{
1765 RTTimeNow(pTime);
1766 RTTimeSpecSubNano(pTime, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp);
1767 RTTimeSpecAddNano(pTime, pVM->tm.s.offUTC);
1768 return pTime;
1769}
1770
1771
1772/**
1773 * Display all timers.
1774 *
1775 * @param pVM VM Handle.
1776 * @param pHlp The info helpers.
1777 * @param pszArgs Arguments, ignored.
1778 */
1779static DECLCALLBACK(void) tmR3TimerInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1780{
1781 NOREF(pszArgs);
1782 pHlp->pfnPrintf(pHlp,
1783 "Timers (pVM=%p)\n"
1784 "%.*s %.*s %.*s %.*s Clock %-18s %-18s %-25s Description\n",
1785 pVM,
1786 sizeof(RTR3PTR) * 2, "pTimerR3 ",
1787 sizeof(int32_t) * 2, "offNext ",
1788 sizeof(int32_t) * 2, "offPrev ",
1789 sizeof(int32_t) * 2, "offSched ",
1790 "Time",
1791 "Expire",
1792 "State");
1793 for (PTMTIMERHC pTimer = pVM->tm.s.pCreated; pTimer; pTimer = pTimer->pBigNext)
1794 {
1795 pHlp->pfnPrintf(pHlp,
1796 "%p %08RX32 %08RX32 %08RX32 %s %18RU64 %18RU64 %-25s %s\n",
1797 pTimer,
1798 pTimer->offNext,
1799 pTimer->offPrev,
1800 pTimer->offScheduleNext,
1801 pTimer->enmClock == TMCLOCK_REAL ? "Real " : "Virt ",
1802 TMTimerGet(pTimer),
1803 pTimer->u64Expire,
1804 tmTimerState(pTimer->enmState),
1805 pTimer->pszDesc);
1806 }
1807}
1808
1809
1810/**
1811 * Display all active timers.
1812 *
1813 * @param pVM VM Handle.
1814 * @param pHlp The info helpers.
1815 * @param pszArgs Arguments, ignored.
1816 */
1817static DECLCALLBACK(void) tmR3TimerInfoActive(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1818{
1819 NOREF(pszArgs);
1820 pHlp->pfnPrintf(pHlp,
1821 "Active Timers (pVM=%p)\n"
1822 "%.*s %.*s %.*s %.*s Clock %-18s %-18s %-25s Description\n",
1823 pVM,
1824 sizeof(RTR3PTR) * 2, "pTimerR3 ",
1825 sizeof(int32_t) * 2, "offNext ",
1826 sizeof(int32_t) * 2, "offPrev ",
1827 sizeof(int32_t) * 2, "offSched ",
1828 "Time",
1829 "Expire",
1830 "State");
1831 for (unsigned iQueue = 0; iQueue < TMCLOCK_MAX; iQueue++)
1832 {
1833 for (PTMTIMERHC pTimer = TMTIMER_GET_HEAD(&pVM->tm.s.paTimerQueuesR3[iQueue]);
1834 pTimer;
1835 pTimer = TMTIMER_GET_NEXT(pTimer))
1836 {
1837 pHlp->pfnPrintf(pHlp,
1838 "%p %08RX32 %08RX32 %08RX32 %s %18RU64 %18RU64 %-25s %s\n",
1839 pTimer,
1840 pTimer->offNext,
1841 pTimer->offPrev,
1842 pTimer->offScheduleNext,
1843 pTimer->enmClock == TMCLOCK_REAL
1844 ? "Real "
1845 : pTimer->enmClock == TMCLOCK_VIRTUAL
1846 ? "Virt "
1847 : pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC
1848 ? "VrSy "
1849 : "TSC ",
1850 TMTimerGet(pTimer),
1851 pTimer->u64Expire,
1852 tmTimerState(pTimer->enmState),
1853 pTimer->pszDesc);
1854 }
1855 }
1856}
1857
1858
1859/**
1860 * Display all clocks.
1861 *
1862 * @param pVM VM Handle.
1863 * @param pHlp The info helpers.
1864 * @param pszArgs Arguments, ignored.
1865 */
1866static DECLCALLBACK(void) tmR3InfoClocks(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1867{
1868 NOREF(pszArgs);
1869
1870 /*
1871 * Read the times first to avoid more than necessary time variation.
1872 */
1873 const uint64_t u64TSC = TMCpuTickGet(pVM);
1874 const uint64_t u64Virtual = TMVirtualGet(pVM);
1875 const uint64_t u64VirtualSync = TMVirtualSyncGet(pVM);
1876 const uint64_t u64Real = TMRealGet(pVM);
1877
1878 /*
1879 * TSC
1880 */
1881 pHlp->pfnPrintf(pHlp,
1882 "Cpu Tick: %18RU64 (%#016RX64) %RU64Hz %s%s",
1883 u64TSC, u64TSC, TMCpuTicksPerSecond(pVM),
1884 pVM->tm.s.fTSCTicking ? "ticking" : "paused",
1885 pVM->tm.s.fTSCVirtualized ? " - virtualized" : "");
1886 if (pVM->tm.s.fTSCUseRealTSC)
1887 {
1888 pHlp->pfnPrintf(pHlp, " - real tsc");
1889 if (pVM->tm.s.u64TSCOffset)
1890 pHlp->pfnPrintf(pHlp, "\n offset %RU64", pVM->tm.s.u64TSCOffset);
1891 }
1892 else
1893 pHlp->pfnPrintf(pHlp, " - virtual clock");
1894 pHlp->pfnPrintf(pHlp, "\n");
1895
1896 /*
1897 * virtual
1898 */
1899 pHlp->pfnPrintf(pHlp,
1900 " Virtual: %18RU64 (%#016RX64) %RU64Hz %s",
1901 u64Virtual, u64Virtual, TMVirtualGetFreq(pVM),
1902 pVM->tm.s.fVirtualTicking ? "ticking" : "paused");
1903 if (pVM->tm.s.fVirtualWarpDrive)
1904 pHlp->pfnPrintf(pHlp, " WarpDrive %RU32 %%", pVM->tm.s.u32VirtualWarpDrivePercentage);
1905 pHlp->pfnPrintf(pHlp, "\n");
1906
1907 /*
1908 * virtual sync
1909 */
1910 pHlp->pfnPrintf(pHlp,
1911 "VirtSync: %18RU64 (%#016RX64) %s%s",
1912 u64VirtualSync, u64VirtualSync,
1913 pVM->tm.s.fVirtualSyncTicking ? "ticking" : "paused",
1914 pVM->tm.s.fVirtualSyncCatchUp ? " - catchup" : "");
1915 if (pVM->tm.s.offVirtualSync)
1916 {
1917 pHlp->pfnPrintf(pHlp, "\n offset %RU64", pVM->tm.s.offVirtualSync);
1918 if (pVM->tm.s.u32VirtualSyncCatchUpPercentage)
1919 pHlp->pfnPrintf(pHlp, " catch-up rate %u %%", pVM->tm.s.u32VirtualSyncCatchUpPercentage);
1920 }
1921 pHlp->pfnPrintf(pHlp, "\n");
1922
1923 /*
1924 * real
1925 */
1926 pHlp->pfnPrintf(pHlp,
1927 " Real: %18RU64 (%#016RX64) %RU64Hz\n",
1928 u64Real, u64Real, TMRealGetFreq(pVM));
1929}
1930
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette