VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/TMAllCpu.cpp@ 52343

Last change on this file since 52343 was 51959, checked in by vboxsync, 11 years ago

TM: Set pVM->tm.s.u64LastPausedTSC to the highest pVCpu->tm.s.u64TSC value on saved state restore. Cleanups.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 17.4 KB
Line 
1/* $Id: TMAllCpu.cpp 51959 2014-07-09 15:18:00Z vboxsync $ */
2/** @file
3 * TM - Timeout Manager, CPU Time, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_TM
23#include <VBox/vmm/tm.h>
24#include <iprt/asm-amd64-x86.h> /* for SUPGetCpuHzFromGIP */
25#include "TMInternal.h"
26#include <VBox/vmm/vm.h>
27#include <VBox/vmm/gim.h>
28#include <VBox/sup.h>
29
30#include <VBox/param.h>
31#include <VBox/err.h>
32#include <iprt/asm-math.h>
33#include <iprt/assert.h>
34#include <VBox/log.h>
35
36
37/**
38 * Gets the raw cpu tick from current virtual time.
39 */
40DECLINLINE(uint64_t) tmCpuTickGetRawVirtual(PVM pVM, bool fCheckTimers)
41{
42 uint64_t u64;
43 if (fCheckTimers)
44 u64 = TMVirtualSyncGet(pVM);
45 else
46 u64 = TMVirtualSyncGetNoCheck(pVM);
47 if (u64 != TMCLOCK_FREQ_VIRTUAL) /* what's the use of this test, document! */
48 u64 = ASMMultU64ByU32DivByU32(u64, pVM->tm.s.cTSCTicksPerSecond, TMCLOCK_FREQ_VIRTUAL);
49 return u64;
50}
51
52
53/**
54 * Resumes the CPU timestamp counter ticking.
55 *
56 * @returns VBox status code.
57 * @param pVM Pointer to the VM.
58 * @param pVCpu Pointer to the VMCPU.
59 * @internal
60 */
61int tmCpuTickResume(PVM pVM, PVMCPU pVCpu)
62{
63 if (!pVCpu->tm.s.fTSCTicking)
64 {
65 pVCpu->tm.s.fTSCTicking = true;
66 if (pVM->tm.s.fTSCVirtualized)
67 {
68 /** @todo Test that pausing and resuming doesn't cause lag! (I.e. that we're
69 * unpaused before the virtual time and stopped after it. */
70 if (pVM->tm.s.fTSCUseRealTSC)
71 pVCpu->tm.s.offTSCRawSrc = ASMReadTSC() - pVCpu->tm.s.u64TSC;
72 else
73 pVCpu->tm.s.offTSCRawSrc = tmCpuTickGetRawVirtual(pVM, false /* don't check for pending timers */)
74 - pVCpu->tm.s.u64TSC;
75 }
76 return VINF_SUCCESS;
77 }
78 AssertFailed();
79 return VERR_TM_TSC_ALREADY_TICKING;
80}
81
82
83/**
84 * Resumes the CPU timestamp counter ticking.
85 *
86 * @returns VINF_SUCCESS or VERR_TM_VIRTUAL_TICKING_IPE (asserted).
87 * @param pVM Pointer to the VM.
88 * @param pVCpu Pointer to the VCPU.
89 */
90int tmCpuTickResumeLocked(PVM pVM, PVMCPU pVCpu)
91{
92 if (!pVCpu->tm.s.fTSCTicking)
93 {
94 /* TSC must be ticking before calling tmCpuTickGetRawVirtual()! */
95 pVCpu->tm.s.fTSCTicking = true;
96 if (pVM->tm.s.fTSCVirtualized)
97 {
98 uint32_t c = ASMAtomicIncU32(&pVM->tm.s.cTSCsTicking);
99 AssertMsgReturn(c <= pVM->cCpus, ("%u vs %u\n", c, pVM->cCpus), VERR_TM_VIRTUAL_TICKING_IPE);
100 if (c == 1)
101 {
102 /* The first VCPU to resume. */
103 uint64_t offTSCRawSrcOld = pVCpu->tm.s.offTSCRawSrc;
104
105 STAM_COUNTER_INC(&pVM->tm.s.StatTSCResume);
106
107 /* When resuming, use the TSC value of the last stopped VCPU to avoid the TSC going back. */
108 if (pVM->tm.s.fTSCUseRealTSC)
109 pVCpu->tm.s.offTSCRawSrc = ASMReadTSC() - pVM->tm.s.u64LastPausedTSC;
110 else
111 pVCpu->tm.s.offTSCRawSrc = tmCpuTickGetRawVirtual(pVM, false /* don't check for pending timers */)
112 - pVM->tm.s.u64LastPausedTSC;
113
114 /* Calculate the offset for other VCPUs to use. */
115 pVM->tm.s.offTSCPause = pVCpu->tm.s.offTSCRawSrc - offTSCRawSrcOld;
116 }
117 else
118 {
119 /* All other VCPUs (if any). */
120 pVCpu->tm.s.offTSCRawSrc += pVM->tm.s.offTSCPause;
121 }
122 }
123 }
124 return VINF_SUCCESS;
125}
126
127
128/**
129 * Pauses the CPU timestamp counter ticking.
130 *
131 * @returns VBox status code.
132 * @param pVCpu Pointer to the VMCPU.
133 * @internal
134 */
135int tmCpuTickPause(PVMCPU pVCpu)
136{
137 if (pVCpu->tm.s.fTSCTicking)
138 {
139 pVCpu->tm.s.u64TSC = TMCpuTickGetNoCheck(pVCpu);
140 pVCpu->tm.s.fTSCTicking = false;
141 return VINF_SUCCESS;
142 }
143 AssertFailed();
144 return VERR_TM_TSC_ALREADY_PAUSED;
145}
146
147
148/**
149 * Pauses the CPU timestamp counter ticking.
150 *
151 * @returns VBox status code.
152 * @param pVM Pointer to the VM.
153 * @param pVCpu Pointer to the VMCPU.
154 * @internal
155 */
156int tmCpuTickPauseLocked(PVM pVM, PVMCPU pVCpu)
157{
158 if (pVCpu->tm.s.fTSCTicking)
159 {
160 pVCpu->tm.s.u64TSC = TMCpuTickGetNoCheck(pVCpu);
161 pVCpu->tm.s.fTSCTicking = false;
162
163 uint32_t c = ASMAtomicDecU32(&pVM->tm.s.cTSCsTicking);
164 AssertMsgReturn(c < pVM->cCpus, ("%u vs %u\n", c, pVM->cCpus), VERR_TM_VIRTUAL_TICKING_IPE);
165 if (c == 0)
166 {
167 /* When the last TSC stops, remember the value. */
168 STAM_COUNTER_INC(&pVM->tm.s.StatTSCPause);
169 pVM->tm.s.u64LastPausedTSC = pVCpu->tm.s.u64TSC;
170 }
171 return VINF_SUCCESS;
172 }
173 AssertFailed();
174 return VERR_TM_TSC_ALREADY_PAUSED;
175}
176
177
178/**
179 * Record why we refused to use offsetted TSC.
180 *
181 * Used by TMCpuTickCanUseRealTSC and TMCpuTickGetDeadlineAndTscOffset.
182 *
183 * @param pVM Pointer to the VM.
184 * @param pVCpu The current CPU.
185 */
186DECLINLINE(void) tmCpuTickRecordOffsettedTscRefusal(PVM pVM, PVMCPU pVCpu)
187{
188
189 /* Sample the reason for refusing. */
190 if (!pVM->tm.s.fMaybeUseOffsettedHostTSC)
191 STAM_COUNTER_INC(&pVM->tm.s.StatTSCNotFixed);
192 else if (!pVCpu->tm.s.fTSCTicking)
193 STAM_COUNTER_INC(&pVM->tm.s.StatTSCNotTicking);
194 else if (!pVM->tm.s.fTSCUseRealTSC)
195 {
196 if (pVM->tm.s.fVirtualSyncCatchUp)
197 {
198 if (pVM->tm.s.u32VirtualSyncCatchUpPercentage <= 10)
199 STAM_COUNTER_INC(&pVM->tm.s.StatTSCCatchupLE010);
200 else if (pVM->tm.s.u32VirtualSyncCatchUpPercentage <= 25)
201 STAM_COUNTER_INC(&pVM->tm.s.StatTSCCatchupLE025);
202 else if (pVM->tm.s.u32VirtualSyncCatchUpPercentage <= 100)
203 STAM_COUNTER_INC(&pVM->tm.s.StatTSCCatchupLE100);
204 else
205 STAM_COUNTER_INC(&pVM->tm.s.StatTSCCatchupOther);
206 }
207 else if (!pVM->tm.s.fVirtualSyncTicking)
208 STAM_COUNTER_INC(&pVM->tm.s.StatTSCSyncNotTicking);
209 else if (pVM->tm.s.fVirtualWarpDrive)
210 STAM_COUNTER_INC(&pVM->tm.s.StatTSCWarp);
211 }
212}
213
214
215/**
216 * Checks if AMD-V / VT-x can use an offsetted hardware TSC or not.
217 *
218 * @returns true/false accordingly.
219 * @param pVCpu Pointer to the VMCPU.
220 * @param poffRealTSC The offset against the TSC of the current CPU.
221 * Can be NULL.
222 * @param pfParavirtTsc Where to store whether paravirt. TSC can be used or
223 * not.
224 * @thread EMT(pVCpu).
225 */
226VMM_INT_DECL(bool) TMCpuTickCanUseRealTSC(PVMCPU pVCpu, uint64_t *poffRealTSC, bool *pfParavirtTsc)
227{
228 PVM pVM = pVCpu->CTX_SUFF(pVM);
229 bool fParavirtTsc = false;
230
231 /*
232 * We require:
233 * 1. Use of a paravirtualized TSC is enabled by the guest.
234 * (OR)
235 * 1. A fixed TSC, this is checked at init time.
236 * 2. That the TSC is ticking (we shouldn't be here if it isn't)
237 * 3. Either that we're using the real TSC as time source or
238 * a) we don't have any lag to catch up, and
239 * b) the virtual sync clock hasn't been halted by an expired timer, and
240 * c) we're not using warp drive (accelerated virtual guest time).
241 */
242 if ( (*pfParavirtTsc = GIMIsParavirtTscEnabled(pVM)) == true
243 || ( pVM->tm.s.fMaybeUseOffsettedHostTSC
244 && RT_LIKELY(pVCpu->tm.s.fTSCTicking)
245 && ( pVM->tm.s.fTSCUseRealTSC
246 || ( !pVM->tm.s.fVirtualSyncCatchUp
247 && RT_LIKELY(pVM->tm.s.fVirtualSyncTicking)
248 && !pVM->tm.s.fVirtualWarpDrive))))
249 {
250 if (!pVM->tm.s.fTSCUseRealTSC)
251 {
252 /* The source is the timer synchronous virtual clock. */
253 Assert(pVM->tm.s.fTSCVirtualized);
254
255 if (poffRealTSC)
256 {
257 uint64_t u64Now = tmCpuTickGetRawVirtual(pVM, false /* don't check for pending timers */)
258 - pVCpu->tm.s.offTSCRawSrc;
259 /** @todo When we start collecting statistics on how much time we spend executing
260 * guest code before exiting, we should check this against the next virtual sync
261 * timer timeout. If it's lower than the avg. length, we should trap rdtsc to increase
262 * the chance that we'll get interrupted right after the timer expired. */
263 *poffRealTSC = u64Now - ASMReadTSC();
264 }
265 }
266 else if (poffRealTSC)
267 {
268 /* The source is the real TSC. */
269 if (pVM->tm.s.fTSCVirtualized)
270 *poffRealTSC = pVCpu->tm.s.offTSCRawSrc;
271 else
272 *poffRealTSC = 0;
273 }
274 /** @todo count this? */
275 return true;
276 }
277
278#ifdef VBOX_WITH_STATISTICS
279 tmCpuTickRecordOffsettedTscRefusal(pVM, pVCpu);
280#endif
281 return false;
282}
283
284
285/**
286 * Calculates the number of host CPU ticks till the next virtual sync deadline.
287 *
288 * @note To save work, this function will not bother calculating the accurate
289 * tick count for deadlines that are more than a second ahead.
290 *
291 * @returns The number of host cpu ticks to the next deadline. Max one second.
292 * @param cNsToDeadline The number of nano seconds to the next virtual
293 * sync deadline.
294 */
295DECLINLINE(uint64_t) tmCpuCalcTicksToDeadline(uint64_t cNsToDeadline)
296{
297 AssertCompile(TMCLOCK_FREQ_VIRTUAL <= _4G);
298 if (RT_UNLIKELY(cNsToDeadline >= TMCLOCK_FREQ_VIRTUAL))
299 return SUPGetCpuHzFromGIP(g_pSUPGlobalInfoPage);
300 uint64_t cTicks = ASMMultU64ByU32DivByU32(SUPGetCpuHzFromGIP(g_pSUPGlobalInfoPage),
301 cNsToDeadline,
302 TMCLOCK_FREQ_VIRTUAL);
303 if (cTicks > 4000)
304 cTicks -= 4000; /* fudge to account for overhead */
305 else
306 cTicks >>= 1;
307 return cTicks;
308}
309
310
311/**
312 * Gets the next deadline in host CPU clock ticks and the TSC offset if we can
313 * use the raw TSC.
314 *
315 * @returns The number of host CPU clock ticks to the next timer deadline.
316 * @param pVCpu The current CPU.
317 * @param pfParavirtTsc Where to store whether paravirt. TSC can be used or
318 * not.
319 * @param poffRealTSC The offset against the TSC of the current CPU.
320 *
321 * @thread EMT(pVCpu).
322 * @remarks Superset of TMCpuTickCanUseRealTSC().
323 */
324VMM_INT_DECL(uint64_t) TMCpuTickGetDeadlineAndTscOffset(PVMCPU pVCpu, bool *pfOffsettedTsc, bool *pfParavirtTsc,
325 uint64_t *poffRealTSC)
326{
327 PVM pVM = pVCpu->CTX_SUFF(pVM);
328 uint64_t cTicksToDeadline;
329
330 /*
331 * We require:
332 * 1. Use of a paravirtualized TSC is enabled by the guest.
333 * (OR)
334 * 1. A fixed TSC, this is checked at init time.
335 * 2. That the TSC is ticking (we shouldn't be here if it isn't)
336 * 3. Either that we're using the real TSC as time source or
337 * a) we don't have any lag to catch up, and
338 * b) the virtual sync clock hasn't been halted by an expired timer, and
339 * c) we're not using warp drive (accelerated virtual guest time).
340 */
341 if ( (*pfParavirtTsc = GIMIsParavirtTscEnabled(pVM)) == true
342 || ( pVM->tm.s.fMaybeUseOffsettedHostTSC
343 && RT_LIKELY(pVCpu->tm.s.fTSCTicking)
344 && ( pVM->tm.s.fTSCUseRealTSC
345 || ( !pVM->tm.s.fVirtualSyncCatchUp
346 && RT_LIKELY(pVM->tm.s.fVirtualSyncTicking)
347 && !pVM->tm.s.fVirtualWarpDrive))))
348 {
349 *pfOffsettedTsc = true;
350 if (!pVM->tm.s.fTSCUseRealTSC)
351 {
352 /* The source is the timer synchronous virtual clock. */
353 Assert(pVM->tm.s.fTSCVirtualized);
354
355 uint64_t cNsToDeadline;
356 uint64_t u64NowVirtSync = TMVirtualSyncGetWithDeadlineNoCheck(pVM, &cNsToDeadline);
357 uint64_t u64Now = u64NowVirtSync != TMCLOCK_FREQ_VIRTUAL /* what's the use of this? */
358 ? ASMMultU64ByU32DivByU32(u64NowVirtSync, pVM->tm.s.cTSCTicksPerSecond, TMCLOCK_FREQ_VIRTUAL)
359 : u64NowVirtSync;
360 u64Now -= pVCpu->tm.s.offTSCRawSrc;
361 *poffRealTSC = u64Now - ASMReadTSC();
362 cTicksToDeadline = tmCpuCalcTicksToDeadline(cNsToDeadline);
363 }
364 else
365 {
366 /* The source is the real TSC. */
367 if (pVM->tm.s.fTSCVirtualized)
368 *poffRealTSC = pVCpu->tm.s.offTSCRawSrc;
369 else
370 *poffRealTSC = 0;
371 cTicksToDeadline = tmCpuCalcTicksToDeadline(TMVirtualSyncGetNsToDeadline(pVM));
372 }
373 }
374 else
375 {
376#ifdef VBOX_WITH_STATISTICS
377 tmCpuTickRecordOffsettedTscRefusal(pVM, pVCpu);
378#endif
379 *pfOffsettedTsc = false;
380 *poffRealTSC = 0;
381 cTicksToDeadline = tmCpuCalcTicksToDeadline(TMVirtualSyncGetNsToDeadline(pVM));
382 }
383
384 return cTicksToDeadline;
385}
386
387
388/**
389 * Read the current CPU timestamp counter.
390 *
391 * @returns Gets the CPU tsc.
392 * @param pVCpu Pointer to the VMCPU.
393 */
394DECLINLINE(uint64_t) tmCpuTickGetInternal(PVMCPU pVCpu, bool fCheckTimers)
395{
396 uint64_t u64;
397
398 if (RT_LIKELY(pVCpu->tm.s.fTSCTicking))
399 {
400 PVM pVM = pVCpu->CTX_SUFF(pVM);
401 if (pVM->tm.s.fTSCVirtualized)
402 {
403 if (pVM->tm.s.fTSCUseRealTSC)
404 u64 = ASMReadTSC();
405 else
406 u64 = tmCpuTickGetRawVirtual(pVM, fCheckTimers);
407 u64 -= pVCpu->tm.s.offTSCRawSrc;
408 }
409 else
410 u64 = ASMReadTSC();
411
412 /* Always return a value higher than what the guest has already seen. */
413 if (RT_LIKELY(u64 > pVCpu->tm.s.u64TSCLastSeen))
414 pVCpu->tm.s.u64TSCLastSeen = u64;
415 else
416 {
417 STAM_COUNTER_INC(&pVM->tm.s.StatTSCUnderflow);
418 pVCpu->tm.s.u64TSCLastSeen += 64; /* @todo choose a good increment here */
419 u64 = pVCpu->tm.s.u64TSCLastSeen;
420 }
421 }
422 else
423 u64 = pVCpu->tm.s.u64TSC;
424 return u64;
425}
426
427
428/**
429 * Read the current CPU timestamp counter.
430 *
431 * @returns Gets the CPU tsc.
432 * @param pVCpu Pointer to the VMCPU.
433 */
434VMMDECL(uint64_t) TMCpuTickGet(PVMCPU pVCpu)
435{
436 return tmCpuTickGetInternal(pVCpu, true /* fCheckTimers */);
437}
438
439
440/**
441 * Read the current CPU timestamp counter, don't check for expired timers.
442 *
443 * @returns Gets the CPU tsc.
444 * @param pVCpu Pointer to the VMCPU.
445 */
446VMM_INT_DECL(uint64_t) TMCpuTickGetNoCheck(PVMCPU pVCpu)
447{
448 return tmCpuTickGetInternal(pVCpu, false /* fCheckTimers */);
449}
450
451
452/**
453 * Sets the current CPU timestamp counter.
454 *
455 * @returns VBox status code.
456 * @param pVM Pointer to the VM.
457 * @param pVCpu Pointer to the VMCPU.
458 * @param u64Tick The new timestamp value.
459 *
460 * @thread EMT which TSC is to be set.
461 */
462VMM_INT_DECL(int) TMCpuTickSet(PVM pVM, PVMCPU pVCpu, uint64_t u64Tick)
463{
464 VMCPU_ASSERT_EMT(pVCpu);
465 STAM_COUNTER_INC(&pVM->tm.s.StatTSCSet);
466
467 /*
468 * This is easier to do when the TSC is paused since resume will
469 * do all the calculations for us. Actually, we don't need to
470 * call tmCpuTickPause here since we overwrite u64TSC anyway.
471 */
472 bool fTSCTicking = pVCpu->tm.s.fTSCTicking;
473 pVCpu->tm.s.fTSCTicking = false;
474 pVCpu->tm.s.u64TSC = u64Tick;
475 pVCpu->tm.s.u64TSCLastSeen = u64Tick;
476 if (fTSCTicking)
477 tmCpuTickResume(pVM, pVCpu);
478 /** @todo Try help synchronizing it better among the virtual CPUs? */
479
480 return VINF_SUCCESS;
481}
482
483/**
484 * Sets the last seen CPU timestamp counter.
485 *
486 * @returns VBox status code.
487 * @param pVCpu Pointer to the VMCPU.
488 * @param u64LastSeenTick The last seen timestamp value.
489 *
490 * @thread EMT which TSC is to be set.
491 */
492VMM_INT_DECL(int) TMCpuTickSetLastSeen(PVMCPU pVCpu, uint64_t u64LastSeenTick)
493{
494 VMCPU_ASSERT_EMT(pVCpu);
495
496 LogFlow(("TMCpuTickSetLastSeen %RX64\n", u64LastSeenTick));
497 if (pVCpu->tm.s.u64TSCLastSeen < u64LastSeenTick)
498 pVCpu->tm.s.u64TSCLastSeen = u64LastSeenTick;
499 return VINF_SUCCESS;
500}
501
502/**
503 * Gets the last seen CPU timestamp counter of the guest.
504 *
505 * @returns the last seen TSC.
506 * @param pVCpu Pointer to the VMCPU.
507 *
508 * @thread EMT(pVCpu).
509 */
510VMM_INT_DECL(uint64_t) TMCpuTickGetLastSeen(PVMCPU pVCpu)
511{
512 VMCPU_ASSERT_EMT(pVCpu);
513
514 return pVCpu->tm.s.u64TSCLastSeen;
515}
516
517
518/**
519 * Get the timestamp frequency.
520 *
521 * @returns Number of ticks per second.
522 * @param pVM The VM.
523 */
524VMMDECL(uint64_t) TMCpuTicksPerSecond(PVM pVM)
525{
526 if (pVM->tm.s.fTSCUseRealTSC)
527 {
528 uint64_t cTSCTicksPerSecond = SUPGetCpuHzFromGIP(g_pSUPGlobalInfoPage);
529 if (RT_LIKELY(cTSCTicksPerSecond != ~(uint64_t)0))
530 return cTSCTicksPerSecond;
531 }
532 return pVM->tm.s.cTSCTicksPerSecond;
533}
534
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette