VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/TMAllCpu.cpp@ 51686

Last change on this file since 51686 was 51643, checked in by vboxsync, 11 years ago

VMM/GIM: More bits for Hyper-V implementation.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 14.9 KB
Line 
1/* $Id: TMAllCpu.cpp 51643 2014-06-18 11:06:06Z vboxsync $ */
2/** @file
3 * TM - Timeout Manager, CPU Time, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_TM
23#include <VBox/vmm/tm.h>
24#include <iprt/asm-amd64-x86.h> /* for SUPGetCpuHzFromGIP */
25#include "TMInternal.h"
26#include <VBox/vmm/vm.h>
27#include <VBox/vmm/gim.h>
28#include <VBox/sup.h>
29
30#include <VBox/param.h>
31#include <VBox/err.h>
32#include <iprt/asm-math.h>
33#include <iprt/assert.h>
34#include <VBox/log.h>
35
36
37/**
38 * Gets the raw cpu tick from current virtual time.
39 */
40DECLINLINE(uint64_t) tmCpuTickGetRawVirtual(PVM pVM, bool fCheckTimers)
41{
42 uint64_t u64;
43 if (fCheckTimers)
44 u64 = TMVirtualSyncGet(pVM);
45 else
46 u64 = TMVirtualSyncGetNoCheck(pVM);
47 if (u64 != TMCLOCK_FREQ_VIRTUAL) /* what's the use of this test, document! */
48 u64 = ASMMultU64ByU32DivByU32(u64, pVM->tm.s.cTSCTicksPerSecond, TMCLOCK_FREQ_VIRTUAL);
49 return u64;
50}
51
52
53/**
54 * Resumes the CPU timestamp counter ticking.
55 *
56 * @returns VBox status code.
57 * @param pVM Pointer to the VM.
58 * @param pVCpu Pointer to the VMCPU.
59 * @internal
60 */
61int tmCpuTickResume(PVM pVM, PVMCPU pVCpu)
62{
63 if (!pVCpu->tm.s.fTSCTicking)
64 {
65 pVCpu->tm.s.fTSCTicking = true;
66 if (pVM->tm.s.fTSCVirtualized)
67 {
68 /** @todo Test that pausing and resuming doesn't cause lag! (I.e. that we're
69 * unpaused before the virtual time and stopped after it. */
70 if (pVM->tm.s.fTSCUseRealTSC)
71 pVCpu->tm.s.offTSCRawSrc = ASMReadTSC() - pVCpu->tm.s.u64TSC;
72 else
73 pVCpu->tm.s.offTSCRawSrc = tmCpuTickGetRawVirtual(pVM, false /* don't check for pending timers */)
74 - pVCpu->tm.s.u64TSC;
75 }
76 return VINF_SUCCESS;
77 }
78 AssertFailed();
79 return VERR_TM_TSC_ALREADY_TICKING;
80}
81
82
83/**
84 * Pauses the CPU timestamp counter ticking.
85 *
86 * @returns VBox status code.
87 * @param pVCpu Pointer to the VMCPU.
88 * @internal
89 */
90int tmCpuTickPause(PVMCPU pVCpu)
91{
92 if (pVCpu->tm.s.fTSCTicking)
93 {
94 pVCpu->tm.s.u64TSC = TMCpuTickGetNoCheck(pVCpu);
95 pVCpu->tm.s.fTSCTicking = false;
96 return VINF_SUCCESS;
97 }
98 AssertFailed();
99 return VERR_TM_TSC_ALREADY_PAUSED;
100}
101
102/**
103 * Record why we refused to use offsetted TSC.
104 *
105 * Used by TMCpuTickCanUseRealTSC and TMCpuTickGetDeadlineAndTscOffset.
106 *
107 * @param pVM Pointer to the VM.
108 * @param pVCpu The current CPU.
109 */
110DECLINLINE(void) tmCpuTickRecordOffsettedTscRefusal(PVM pVM, PVMCPU pVCpu)
111{
112
113 /* Sample the reason for refusing. */
114 if (!pVM->tm.s.fMaybeUseOffsettedHostTSC)
115 STAM_COUNTER_INC(&pVM->tm.s.StatTSCNotFixed);
116 else if (!pVCpu->tm.s.fTSCTicking)
117 STAM_COUNTER_INC(&pVM->tm.s.StatTSCNotTicking);
118 else if (!pVM->tm.s.fTSCUseRealTSC)
119 {
120 if (pVM->tm.s.fVirtualSyncCatchUp)
121 {
122 if (pVM->tm.s.u32VirtualSyncCatchUpPercentage <= 10)
123 STAM_COUNTER_INC(&pVM->tm.s.StatTSCCatchupLE010);
124 else if (pVM->tm.s.u32VirtualSyncCatchUpPercentage <= 25)
125 STAM_COUNTER_INC(&pVM->tm.s.StatTSCCatchupLE025);
126 else if (pVM->tm.s.u32VirtualSyncCatchUpPercentage <= 100)
127 STAM_COUNTER_INC(&pVM->tm.s.StatTSCCatchupLE100);
128 else
129 STAM_COUNTER_INC(&pVM->tm.s.StatTSCCatchupOther);
130 }
131 else if (!pVM->tm.s.fVirtualSyncTicking)
132 STAM_COUNTER_INC(&pVM->tm.s.StatTSCSyncNotTicking);
133 else if (pVM->tm.s.fVirtualWarpDrive)
134 STAM_COUNTER_INC(&pVM->tm.s.StatTSCWarp);
135 }
136}
137
138
139/**
140 * Checks if AMD-V / VT-x can use an offsetted hardware TSC or not.
141 *
142 * @returns true/false accordingly.
143 * @param pVCpu Pointer to the VMCPU.
144 * @param poffRealTSC The offset against the TSC of the current CPU.
145 * Can be NULL.
146 * @param pfParavirtTsc Where to store whether paravirt. TSC can be used or
147 * not.
148 * @thread EMT(pVCpu).
149 */
150VMM_INT_DECL(bool) TMCpuTickCanUseRealTSC(PVMCPU pVCpu, uint64_t *poffRealTSC, bool *pfParavirtTsc)
151{
152 PVM pVM = pVCpu->CTX_SUFF(pVM);
153 bool fParavirtTsc = false;
154
155 /*
156 * We require:
157 * 1. Use of a paravirtualized TSC is enabled by the guest.
158 * (OR)
159 * 1. A fixed TSC, this is checked at init time.
160 * 2. That the TSC is ticking (we shouldn't be here if it isn't)
161 * 3. Either that we're using the real TSC as time source or
162 * a) we don't have any lag to catch up, and
163 * b) the virtual sync clock hasn't been halted by an expired timer, and
164 * c) we're not using warp drive (accelerated virtual guest time).
165 */
166 if ( (*pfParavirtTsc = GIMIsParavirtTscEnabled(pVM)) == true
167 || ( pVM->tm.s.fMaybeUseOffsettedHostTSC
168 && RT_LIKELY(pVCpu->tm.s.fTSCTicking)
169 && ( pVM->tm.s.fTSCUseRealTSC
170 || ( !pVM->tm.s.fVirtualSyncCatchUp
171 && RT_LIKELY(pVM->tm.s.fVirtualSyncTicking)
172 && !pVM->tm.s.fVirtualWarpDrive))))
173 {
174 if (!pVM->tm.s.fTSCUseRealTSC)
175 {
176 /* The source is the timer synchronous virtual clock. */
177 Assert(pVM->tm.s.fTSCVirtualized);
178
179 if (poffRealTSC)
180 {
181 uint64_t u64Now = tmCpuTickGetRawVirtual(pVM, false /* don't check for pending timers */)
182 - pVCpu->tm.s.offTSCRawSrc;
183 /** @todo When we start collecting statistics on how much time we spend executing
184 * guest code before exiting, we should check this against the next virtual sync
185 * timer timeout. If it's lower than the avg. length, we should trap rdtsc to increase
186 * the chance that we'll get interrupted right after the timer expired. */
187 *poffRealTSC = u64Now - ASMReadTSC();
188 }
189 }
190 else if (poffRealTSC)
191 {
192 /* The source is the real TSC. */
193 if (pVM->tm.s.fTSCVirtualized)
194 *poffRealTSC = pVCpu->tm.s.offTSCRawSrc;
195 else
196 *poffRealTSC = 0;
197 }
198 /** @todo count this? */
199 return true;
200 }
201
202#ifdef VBOX_WITH_STATISTICS
203 tmCpuTickRecordOffsettedTscRefusal(pVM, pVCpu);
204#endif
205 return false;
206}
207
208
209/**
210 * Calculates the number of host CPU ticks till the next virtual sync deadline.
211 *
212 * @note To save work, this function will not bother calculating the accurate
213 * tick count for deadlines that are more than a second ahead.
214 *
215 * @returns The number of host cpu ticks to the next deadline. Max one second.
216 * @param cNsToDeadline The number of nano seconds to the next virtual
217 * sync deadline.
218 */
219DECLINLINE(uint64_t) tmCpuCalcTicksToDeadline(uint64_t cNsToDeadline)
220{
221 AssertCompile(TMCLOCK_FREQ_VIRTUAL <= _4G);
222 if (RT_UNLIKELY(cNsToDeadline >= TMCLOCK_FREQ_VIRTUAL))
223 return SUPGetCpuHzFromGIP(g_pSUPGlobalInfoPage);
224 uint64_t cTicks = ASMMultU64ByU32DivByU32(SUPGetCpuHzFromGIP(g_pSUPGlobalInfoPage),
225 cNsToDeadline,
226 TMCLOCK_FREQ_VIRTUAL);
227 if (cTicks > 4000)
228 cTicks -= 4000; /* fudge to account for overhead */
229 else
230 cTicks >>= 1;
231 return cTicks;
232}
233
234
235/**
236 * Gets the next deadline in host CPU clock ticks and the TSC offset if we can
237 * use the raw TSC.
238 *
239 * @returns The number of host CPU clock ticks to the next timer deadline.
240 * @param pVCpu The current CPU.
241 * @param pfParavirtTsc Where to store whether paravirt. TSC can be used or
242 * not.
243 * @param poffRealTSC The offset against the TSC of the current CPU.
244 *
245 * @thread EMT(pVCpu).
246 * @remarks Superset of TMCpuTickCanUseRealTSC().
247 */
248VMM_INT_DECL(uint64_t) TMCpuTickGetDeadlineAndTscOffset(PVMCPU pVCpu, bool *pfOffsettedTsc, bool *pfParavirtTsc,
249 uint64_t *poffRealTSC)
250{
251 PVM pVM = pVCpu->CTX_SUFF(pVM);
252 uint64_t cTicksToDeadline;
253
254 /*
255 * We require:
256 * 1. Use of a paravirtualized TSC is enabled by the guest.
257 * (OR)
258 * 1. A fixed TSC, this is checked at init time.
259 * 2. That the TSC is ticking (we shouldn't be here if it isn't)
260 * 3. Either that we're using the real TSC as time source or
261 * a) we don't have any lag to catch up, and
262 * b) the virtual sync clock hasn't been halted by an expired timer, and
263 * c) we're not using warp drive (accelerated virtual guest time).
264 */
265 if ( (*pfParavirtTsc = GIMIsParavirtTscEnabled(pVM)) == true
266 || ( pVM->tm.s.fMaybeUseOffsettedHostTSC
267 && RT_LIKELY(pVCpu->tm.s.fTSCTicking)
268 && ( pVM->tm.s.fTSCUseRealTSC
269 || ( !pVM->tm.s.fVirtualSyncCatchUp
270 && RT_LIKELY(pVM->tm.s.fVirtualSyncTicking)
271 && !pVM->tm.s.fVirtualWarpDrive))))
272 {
273 *pfOffsettedTsc = true;
274 if (!pVM->tm.s.fTSCUseRealTSC)
275 {
276 /* The source is the timer synchronous virtual clock. */
277 Assert(pVM->tm.s.fTSCVirtualized);
278
279 uint64_t cNsToDeadline;
280 uint64_t u64NowVirtSync = TMVirtualSyncGetWithDeadlineNoCheck(pVM, &cNsToDeadline);
281 uint64_t u64Now = u64NowVirtSync != TMCLOCK_FREQ_VIRTUAL /* what's the use of this? */
282 ? ASMMultU64ByU32DivByU32(u64NowVirtSync, pVM->tm.s.cTSCTicksPerSecond, TMCLOCK_FREQ_VIRTUAL)
283 : u64NowVirtSync;
284 u64Now -= pVCpu->tm.s.offTSCRawSrc;
285 *poffRealTSC = u64Now - ASMReadTSC();
286 cTicksToDeadline = tmCpuCalcTicksToDeadline(cNsToDeadline);
287 }
288 else
289 {
290 /* The source is the real TSC. */
291 if (pVM->tm.s.fTSCVirtualized)
292 *poffRealTSC = pVCpu->tm.s.offTSCRawSrc;
293 else
294 *poffRealTSC = 0;
295 cTicksToDeadline = tmCpuCalcTicksToDeadline(TMVirtualSyncGetNsToDeadline(pVM));
296 }
297 }
298 else
299 {
300#ifdef VBOX_WITH_STATISTICS
301 tmCpuTickRecordOffsettedTscRefusal(pVM, pVCpu);
302#endif
303 *pfOffsettedTsc = false;
304 *poffRealTSC = 0;
305 cTicksToDeadline = tmCpuCalcTicksToDeadline(TMVirtualSyncGetNsToDeadline(pVM));
306 }
307
308 return cTicksToDeadline;
309}
310
311
312/**
313 * Read the current CPU timestamp counter.
314 *
315 * @returns Gets the CPU tsc.
316 * @param pVCpu Pointer to the VMCPU.
317 */
318DECLINLINE(uint64_t) tmCpuTickGetInternal(PVMCPU pVCpu, bool fCheckTimers)
319{
320 uint64_t u64;
321
322 if (RT_LIKELY(pVCpu->tm.s.fTSCTicking))
323 {
324 PVM pVM = pVCpu->CTX_SUFF(pVM);
325 if (pVM->tm.s.fTSCVirtualized)
326 {
327 if (pVM->tm.s.fTSCUseRealTSC)
328 u64 = ASMReadTSC();
329 else
330 u64 = tmCpuTickGetRawVirtual(pVM, fCheckTimers);
331 u64 -= pVCpu->tm.s.offTSCRawSrc;
332 }
333 else
334 u64 = ASMReadTSC();
335
336 /* Always return a value higher than what the guest has already seen. */
337 if (RT_LIKELY(u64 > pVCpu->tm.s.u64TSCLastSeen))
338 pVCpu->tm.s.u64TSCLastSeen = u64;
339 else
340 {
341 STAM_COUNTER_INC(&pVM->tm.s.StatTSCUnderflow);
342 pVCpu->tm.s.u64TSCLastSeen += 64; /* @todo choose a good increment here */
343 u64 = pVCpu->tm.s.u64TSCLastSeen;
344 }
345 }
346 else
347 u64 = pVCpu->tm.s.u64TSC;
348 return u64;
349}
350
351
352/**
353 * Read the current CPU timestamp counter.
354 *
355 * @returns Gets the CPU tsc.
356 * @param pVCpu Pointer to the VMCPU.
357 */
358VMMDECL(uint64_t) TMCpuTickGet(PVMCPU pVCpu)
359{
360 return tmCpuTickGetInternal(pVCpu, true /* fCheckTimers */);
361}
362
363
364/**
365 * Read the current CPU timestamp counter, don't check for expired timers.
366 *
367 * @returns Gets the CPU tsc.
368 * @param pVCpu Pointer to the VMCPU.
369 */
370VMM_INT_DECL(uint64_t) TMCpuTickGetNoCheck(PVMCPU pVCpu)
371{
372 return tmCpuTickGetInternal(pVCpu, false /* fCheckTimers */);
373}
374
375
376/**
377 * Sets the current CPU timestamp counter.
378 *
379 * @returns VBox status code.
380 * @param pVM Pointer to the VM.
381 * @param pVCpu Pointer to the VMCPU.
382 * @param u64Tick The new timestamp value.
383 *
384 * @thread EMT which TSC is to be set.
385 */
386VMM_INT_DECL(int) TMCpuTickSet(PVM pVM, PVMCPU pVCpu, uint64_t u64Tick)
387{
388 VMCPU_ASSERT_EMT(pVCpu);
389 STAM_COUNTER_INC(&pVM->tm.s.StatTSCSet);
390
391 /*
392 * This is easier to do when the TSC is paused since resume will
393 * do all the calculations for us. Actually, we don't need to
394 * call tmCpuTickPause here since we overwrite u64TSC anyway.
395 */
396 bool fTSCTicking = pVCpu->tm.s.fTSCTicking;
397 pVCpu->tm.s.fTSCTicking = false;
398 pVCpu->tm.s.u64TSC = u64Tick;
399 pVCpu->tm.s.u64TSCLastSeen = u64Tick;
400 if (fTSCTicking)
401 tmCpuTickResume(pVM, pVCpu);
402 /** @todo Try help synchronizing it better among the virtual CPUs? */
403
404 return VINF_SUCCESS;
405}
406
407/**
408 * Sets the last seen CPU timestamp counter.
409 *
410 * @returns VBox status code.
411 * @param pVCpu Pointer to the VMCPU.
412 * @param u64LastSeenTick The last seen timestamp value.
413 *
414 * @thread EMT which TSC is to be set.
415 */
416VMM_INT_DECL(int) TMCpuTickSetLastSeen(PVMCPU pVCpu, uint64_t u64LastSeenTick)
417{
418 VMCPU_ASSERT_EMT(pVCpu);
419
420 LogFlow(("TMCpuTickSetLastSeen %RX64\n", u64LastSeenTick));
421 if (pVCpu->tm.s.u64TSCLastSeen < u64LastSeenTick)
422 pVCpu->tm.s.u64TSCLastSeen = u64LastSeenTick;
423 return VINF_SUCCESS;
424}
425
426/**
427 * Gets the last seen CPU timestamp counter of the guest.
428 *
429 * @returns the last seen TSC.
430 * @param pVCpu Pointer to the VMCPU.
431 *
432 * @thread EMT(pVCpu).
433 */
434VMM_INT_DECL(uint64_t) TMCpuTickGetLastSeen(PVMCPU pVCpu)
435{
436 VMCPU_ASSERT_EMT(pVCpu);
437
438 return pVCpu->tm.s.u64TSCLastSeen;
439}
440
441
442/**
443 * Get the timestamp frequency.
444 *
445 * @returns Number of ticks per second.
446 * @param pVM The VM.
447 */
448VMMDECL(uint64_t) TMCpuTicksPerSecond(PVM pVM)
449{
450 if (pVM->tm.s.fTSCUseRealTSC)
451 {
452 uint64_t cTSCTicksPerSecond = SUPGetCpuHzFromGIP(g_pSUPGlobalInfoPage);
453 if (RT_LIKELY(cTSCTicksPerSecond != ~(uint64_t)0))
454 return cTSCTicksPerSecond;
455 }
456 return pVM->tm.s.cTSCTicksPerSecond;
457}
458
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette