VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/TMAllCpu.cpp@ 54845

Last change on this file since 54845 was 54845, checked in by vboxsync, 10 years ago

VMM/GIM,TM: paranoia assertions.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 18.1 KB
Line 
1/* $Id: TMAllCpu.cpp 54845 2015-03-19 10:55:06Z vboxsync $ */
2/** @file
3 * TM - Timeout Manager, CPU Time, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_TM
23#include <VBox/vmm/tm.h>
24#include <iprt/asm-amd64-x86.h> /* for SUPGetCpuHzFromGIP */
25#include "TMInternal.h"
26#include <VBox/vmm/vm.h>
27#include <VBox/vmm/gim.h>
28#include <VBox/sup.h>
29
30#include <VBox/param.h>
31#include <VBox/err.h>
32#include <iprt/asm-math.h>
33#include <iprt/assert.h>
34#include <VBox/log.h>
35
36
37/**
38 * Gets the raw cpu tick from current virtual time.
39 */
40DECLINLINE(uint64_t) tmCpuTickGetRawVirtual(PVM pVM, bool fCheckTimers)
41{
42 uint64_t u64;
43 if (fCheckTimers)
44 u64 = TMVirtualSyncGet(pVM);
45 else
46 u64 = TMVirtualSyncGetNoCheck(pVM);
47 if (u64 != TMCLOCK_FREQ_VIRTUAL) /* what's the use of this test, document! */
48 u64 = ASMMultU64ByU32DivByU32(u64, pVM->tm.s.cTSCTicksPerSecond, TMCLOCK_FREQ_VIRTUAL);
49 return u64;
50}
51
52
53#ifdef IN_RING3
54/**
55 * Used by tmR3CpuTickParavirtEnable and tmR3CpuTickParavirtDisable.
56 */
57uint64_t tmR3CpuTickGetRawVirtualNoCheck(PVM pVM)
58{
59 return tmCpuTickGetRawVirtual(pVM, false /*fCheckTimers*/);
60}
61#endif
62
63
64/**
65 * Resumes the CPU timestamp counter ticking.
66 *
67 * @returns VBox status code.
68 * @param pVM Pointer to the VM.
69 * @param pVCpu Pointer to the VMCPU.
70 * @internal
71 */
72int tmCpuTickResume(PVM pVM, PVMCPU pVCpu)
73{
74 if (!pVCpu->tm.s.fTSCTicking)
75 {
76 pVCpu->tm.s.fTSCTicking = true;
77
78 /** @todo Test that pausing and resuming doesn't cause lag! (I.e. that we're
79 * unpaused before the virtual time and stopped after it. */
80 if (pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET)
81 pVCpu->tm.s.offTSCRawSrc = SUPReadTsc() - pVCpu->tm.s.u64TSC;
82 else
83 pVCpu->tm.s.offTSCRawSrc = tmCpuTickGetRawVirtual(pVM, false /* don't check for pending timers */)
84 - pVCpu->tm.s.u64TSC;
85 return VINF_SUCCESS;
86 }
87 AssertFailed();
88 return VERR_TM_TSC_ALREADY_TICKING;
89}
90
91
92/**
93 * Resumes the CPU timestamp counter ticking.
94 *
95 * @returns VINF_SUCCESS or VERR_TM_VIRTUAL_TICKING_IPE (asserted).
96 * @param pVM Pointer to the VM.
97 * @param pVCpu Pointer to the VCPU.
98 */
99int tmCpuTickResumeLocked(PVM pVM, PVMCPU pVCpu)
100{
101 if (!pVCpu->tm.s.fTSCTicking)
102 {
103 /* TSC must be ticking before calling tmCpuTickGetRawVirtual()! */
104 pVCpu->tm.s.fTSCTicking = true;
105 uint32_t c = ASMAtomicIncU32(&pVM->tm.s.cTSCsTicking);
106 AssertMsgReturn(c <= pVM->cCpus, ("%u vs %u\n", c, pVM->cCpus), VERR_TM_VIRTUAL_TICKING_IPE);
107 if (c == 1)
108 {
109 /* The first VCPU to resume. */
110 uint64_t offTSCRawSrcOld = pVCpu->tm.s.offTSCRawSrc;
111
112 STAM_COUNTER_INC(&pVM->tm.s.StatTSCResume);
113
114 /* When resuming, use the TSC value of the last stopped VCPU to avoid the TSC going back. */
115 if (pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET)
116 pVCpu->tm.s.offTSCRawSrc = SUPReadTsc() - pVM->tm.s.u64LastPausedTSC;
117 else
118 pVCpu->tm.s.offTSCRawSrc = tmCpuTickGetRawVirtual(pVM, false /* don't check for pending timers */)
119 - pVM->tm.s.u64LastPausedTSC;
120
121 /* Calculate the offset for other VCPUs to use. */
122 pVM->tm.s.offTSCPause = pVCpu->tm.s.offTSCRawSrc - offTSCRawSrcOld;
123 }
124 else
125 {
126 /* All other VCPUs (if any). */
127 pVCpu->tm.s.offTSCRawSrc += pVM->tm.s.offTSCPause;
128 }
129 }
130 return VINF_SUCCESS;
131}
132
133
134/**
135 * Pauses the CPU timestamp counter ticking.
136 *
137 * @returns VBox status code.
138 * @param pVCpu Pointer to the VMCPU.
139 * @internal
140 */
141int tmCpuTickPause(PVMCPU pVCpu)
142{
143 if (pVCpu->tm.s.fTSCTicking)
144 {
145 pVCpu->tm.s.u64TSC = TMCpuTickGetNoCheck(pVCpu);
146 pVCpu->tm.s.fTSCTicking = false;
147 return VINF_SUCCESS;
148 }
149 AssertFailed();
150 return VERR_TM_TSC_ALREADY_PAUSED;
151}
152
153
154/**
155 * Pauses the CPU timestamp counter ticking.
156 *
157 * @returns VBox status code.
158 * @param pVM Pointer to the VM.
159 * @param pVCpu Pointer to the VMCPU.
160 * @internal
161 */
162int tmCpuTickPauseLocked(PVM pVM, PVMCPU pVCpu)
163{
164 if (pVCpu->tm.s.fTSCTicking)
165 {
166 pVCpu->tm.s.u64TSC = TMCpuTickGetNoCheck(pVCpu);
167 pVCpu->tm.s.fTSCTicking = false;
168
169 uint32_t c = ASMAtomicDecU32(&pVM->tm.s.cTSCsTicking);
170 AssertMsgReturn(c < pVM->cCpus, ("%u vs %u\n", c, pVM->cCpus), VERR_TM_VIRTUAL_TICKING_IPE);
171 if (c == 0)
172 {
173 /* When the last TSC stops, remember the value. */
174 STAM_COUNTER_INC(&pVM->tm.s.StatTSCPause);
175 pVM->tm.s.u64LastPausedTSC = pVCpu->tm.s.u64TSC;
176 }
177 return VINF_SUCCESS;
178 }
179 AssertFailed();
180 return VERR_TM_TSC_ALREADY_PAUSED;
181}
182
183
184/**
185 * Record why we refused to use offsetted TSC.
186 *
187 * Used by TMCpuTickCanUseRealTSC() and TMCpuTickGetDeadlineAndTscOffset().
188 *
189 * @param pVM Pointer to the VM.
190 * @param pVCpu The current CPU.
191 */
192DECLINLINE(void) tmCpuTickRecordOffsettedTscRefusal(PVM pVM, PVMCPU pVCpu)
193{
194 /* Sample the reason for refusing. */
195 if (pVM->tm.s.enmTSCMode != TMTSCMODE_DYNAMIC)
196 STAM_COUNTER_INC(&pVM->tm.s.StatTSCNotFixed);
197 else if (!pVCpu->tm.s.fTSCTicking)
198 STAM_COUNTER_INC(&pVM->tm.s.StatTSCNotTicking);
199 else if (pVM->tm.s.enmTSCMode != TMTSCMODE_REAL_TSC_OFFSET)
200 {
201 if (pVM->tm.s.fVirtualSyncCatchUp)
202 {
203 if (pVM->tm.s.u32VirtualSyncCatchUpPercentage <= 10)
204 STAM_COUNTER_INC(&pVM->tm.s.StatTSCCatchupLE010);
205 else if (pVM->tm.s.u32VirtualSyncCatchUpPercentage <= 25)
206 STAM_COUNTER_INC(&pVM->tm.s.StatTSCCatchupLE025);
207 else if (pVM->tm.s.u32VirtualSyncCatchUpPercentage <= 100)
208 STAM_COUNTER_INC(&pVM->tm.s.StatTSCCatchupLE100);
209 else
210 STAM_COUNTER_INC(&pVM->tm.s.StatTSCCatchupOther);
211 }
212 else if (!pVM->tm.s.fVirtualSyncTicking)
213 STAM_COUNTER_INC(&pVM->tm.s.StatTSCSyncNotTicking);
214 else if (pVM->tm.s.fVirtualWarpDrive)
215 STAM_COUNTER_INC(&pVM->tm.s.StatTSCWarp);
216 }
217}
218
219
220/**
221 * Checks if AMD-V / VT-x can use an offsetted hardware TSC or not.
222 *
223 * @returns true/false accordingly.
224 * @param pVM Pointer to the cross context VM structure.
225 * @param pVCpu Pointer to the VMCPU.
226 * @param poffRealTsc The offset against the TSC of the current host CPU,
227 * if pfOffsettedTsc is set to true.
228 * @param pfParavirtTsc Where to return whether paravirt TSC is enabled.
229 *
230 * @thread EMT(pVCpu).
231 * @see TMCpuTickGetDeadlineAndTscOffset().
232 */
233VMM_INT_DECL(bool) TMCpuTickCanUseRealTSC(PVM pVM, PVMCPU pVCpu, uint64_t *poffRealTsc, bool *pfParavirtTsc)
234{
235 Assert(pVCpu->tm.s.fTSCTicking);
236
237 *pfParavirtTsc = pVM->tm.s.fParavirtTscEnabled;
238
239 /*
240 * In real TSC mode it's easy, we just need the delta & offTscRawSrc and
241 * the CPU will add them to RDTSC and RDTSCP at runtime.
242 *
243 * In tmCpuTickGetInternal we do:
244 * SUPReadTsc() - pVCpu->tm.s.offTSCRawSrc;
245 * Where SUPReadTsc() does:
246 * ASMReadTSC() - pGipCpu->i64TscDelta;
247 * Which means tmCpuTickGetInternal actually does:
248 * ASMReadTSC() - pGipCpu->i64TscDelta - pVCpu->tm.s.offTSCRawSrc;
249 * So, the offset to be ADDED to RDTSC[P] is:
250 * offRealTsc = -(pGipCpu->i64TscDelta + pVCpu->tm.s.offTSCRawSrc)
251 */
252 if (pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET)
253 {
254 /** @todo We should negate both deltas! It's soo weird that we do the
255 * exact opposite of what the hardware implements. */
256#ifdef IN_RING3
257 *poffRealTsc = 0 - pVCpu->tm.s.offTSCRawSrc - SUPGetTscDelta();
258#else
259 *poffRealTsc = 0 - pVCpu->tm.s.offTSCRawSrc - SUPGetTscDeltaByCpuSetIndex(pVCpu->iHostCpuSet);
260#endif
261 return true;
262 }
263
264 /*
265 * We require:
266 * 1. A fixed TSC, this is checked at init time.
267 * 2. That the TSC is ticking (we shouldn't be here if it isn't)
268 * 3. Either that we're using the real TSC as time source or
269 * a) we don't have any lag to catch up, and
270 * b) the virtual sync clock hasn't been halted by an expired timer, and
271 * c) we're not using warp drive (accelerated virtual guest time).
272 */
273 if ( pVM->tm.s.enmTSCMode == TMTSCMODE_DYNAMIC
274 && !pVM->tm.s.fVirtualSyncCatchUp
275 && RT_LIKELY(pVM->tm.s.fVirtualSyncTicking)
276 && !pVM->tm.s.fVirtualWarpDrive)
277 {
278 /* The source is the timer synchronous virtual clock. */
279 uint64_t u64Now = tmCpuTickGetRawVirtual(pVM, false /* don't check for pending timers */)
280 - pVCpu->tm.s.offTSCRawSrc;
281 /** @todo When we start collecting statistics on how much time we spend executing
282 * guest code before exiting, we should check this against the next virtual sync
283 * timer timeout. If it's lower than the avg. length, we should trap rdtsc to increase
284 * the chance that we'll get interrupted right after the timer expired. */
285 if (u64Now >= pVCpu->tm.s.u64TSCLastSeen)
286 {
287 *poffRealTsc = u64Now - ASMReadTSC();
288 return true; /** @todo count this? */
289 }
290 }
291
292#ifdef VBOX_WITH_STATISTICS
293 tmCpuTickRecordOffsettedTscRefusal(pVM, pVCpu);
294#endif
295 return false;
296}
297
298
299/**
300 * Calculates the number of host CPU ticks till the next virtual sync deadline.
301 *
302 * @note To save work, this function will not bother calculating the accurate
303 * tick count for deadlines that are more than a second ahead.
304 *
305 * @returns The number of host cpu ticks to the next deadline. Max one second.
306 * @param pVCpu The current CPU.
307 * @param cNsToDeadline The number of nano seconds to the next virtual
308 * sync deadline.
309 */
310DECLINLINE(uint64_t) tmCpuCalcTicksToDeadline(PVMCPU pVCpu, uint64_t cNsToDeadline)
311{
312 AssertCompile(TMCLOCK_FREQ_VIRTUAL <= _4G);
313#ifdef IN_RING3
314 uint64_t uCpuHz = SUPGetCpuHzFromGip(g_pSUPGlobalInfoPage);
315#else
316 uint64_t uCpuHz = SUPGetCpuHzFromGipBySetIndex(g_pSUPGlobalInfoPage, pVCpu->iHostCpuSet);
317#endif
318 if (RT_UNLIKELY(cNsToDeadline >= TMCLOCK_FREQ_VIRTUAL))
319 return uCpuHz;
320 uint64_t cTicks = ASMMultU64ByU32DivByU32(uCpuHz, cNsToDeadline, TMCLOCK_FREQ_VIRTUAL);
321 if (cTicks > 4000)
322 cTicks -= 4000; /* fudge to account for overhead */
323 else
324 cTicks >>= 1;
325 return cTicks;
326}
327
328
329/**
330 * Gets the next deadline in host CPU clock ticks and the TSC offset if we can
331 * use the raw TSC.
332 *
333 * @returns The number of host CPU clock ticks to the next timer deadline.
334 * @param pVM Pointer to the cross context VM structure.
335 * @param pVCpu The current CPU.
336 * @param poffRealTsc The offset against the TSC of the current host CPU,
337 * if pfOffsettedTsc is set to true.
338 * @param pfOffsettedTsc Where to return whether TSC offsetting can be used.
339 * @param pfParavirtTsc Where to return whether paravirt TSC is enabled.
340 *
341 * @thread EMT(pVCpu).
342 * @see TMCpuTickCanUseRealTSC().
343 */
344VMM_INT_DECL(uint64_t) TMCpuTickGetDeadlineAndTscOffset(PVM pVM, PVMCPU pVCpu, uint64_t *poffRealTsc,
345 bool *pfOffsettedTsc, bool *pfParavirtTsc)
346{
347 Assert(pVCpu->tm.s.fTSCTicking);
348
349 *pfParavirtTsc = pVM->tm.s.fParavirtTscEnabled;
350
351 /*
352 * Same logic as in TMCpuTickCanUseRealTSC.
353 */
354 if (pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET)
355 {
356 /** @todo We should negate both deltas! It's soo weird that we do the
357 * exact opposite of what the hardware implements. */
358#ifdef IN_RING3
359 *poffRealTsc = 0 - pVCpu->tm.s.offTSCRawSrc - SUPGetTscDelta();
360#else
361 *poffRealTsc = 0 - pVCpu->tm.s.offTSCRawSrc - SUPGetTscDeltaByCpuSetIndex(pVCpu->iHostCpuSet);
362#endif
363 *pfOffsettedTsc = true;
364 return tmCpuCalcTicksToDeadline(pVCpu, TMVirtualSyncGetNsToDeadline(pVM));
365 }
366
367 /*
368 * Same logic as in TMCpuTickCanUseRealTSC.
369 */
370 if ( pVM->tm.s.enmTSCMode == TMTSCMODE_DYNAMIC
371 && !pVM->tm.s.fVirtualSyncCatchUp
372 && RT_LIKELY(pVM->tm.s.fVirtualSyncTicking)
373 && !pVM->tm.s.fVirtualWarpDrive)
374 {
375 /* The source is the timer synchronous virtual clock. */
376 uint64_t cNsToDeadline;
377 uint64_t u64NowVirtSync = TMVirtualSyncGetWithDeadlineNoCheck(pVM, &cNsToDeadline);
378 uint64_t u64Now = u64NowVirtSync != TMCLOCK_FREQ_VIRTUAL /* what's the use of this? */
379 ? ASMMultU64ByU32DivByU32(u64NowVirtSync, pVM->tm.s.cTSCTicksPerSecond, TMCLOCK_FREQ_VIRTUAL)
380 : u64NowVirtSync;
381 u64Now -= pVCpu->tm.s.offTSCRawSrc;
382 *poffRealTsc = u64Now - ASMReadTSC();
383 *pfOffsettedTsc = u64Now >= pVCpu->tm.s.u64TSCLastSeen;
384 return tmCpuCalcTicksToDeadline(pVCpu, cNsToDeadline);
385 }
386
387#ifdef VBOX_WITH_STATISTICS
388 tmCpuTickRecordOffsettedTscRefusal(pVM, pVCpu);
389#endif
390 *pfOffsettedTsc = false;
391 *poffRealTsc = 0;
392 return tmCpuCalcTicksToDeadline(pVCpu, TMVirtualSyncGetNsToDeadline(pVM));
393}
394
395
396/**
397 * Read the current CPU timestamp counter.
398 *
399 * @returns Gets the CPU tsc.
400 * @param pVCpu Pointer to the VMCPU.
401 */
402DECLINLINE(uint64_t) tmCpuTickGetInternal(PVMCPU pVCpu, bool fCheckTimers)
403{
404 uint64_t u64;
405
406 if (RT_LIKELY(pVCpu->tm.s.fTSCTicking))
407 {
408 PVM pVM = pVCpu->CTX_SUFF(pVM);
409 if (pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET)
410 u64 = SUPReadTsc();
411 else
412 u64 = tmCpuTickGetRawVirtual(pVM, fCheckTimers);
413 u64 -= pVCpu->tm.s.offTSCRawSrc;
414
415 /* Always return a value higher than what the guest has already seen. */
416 if (RT_LIKELY(u64 > pVCpu->tm.s.u64TSCLastSeen))
417 pVCpu->tm.s.u64TSCLastSeen = u64;
418 else
419 {
420 STAM_COUNTER_INC(&pVM->tm.s.StatTSCUnderflow);
421 pVCpu->tm.s.u64TSCLastSeen += 64; /** @todo choose a good increment here */
422 u64 = pVCpu->tm.s.u64TSCLastSeen;
423 }
424 }
425 else
426 u64 = pVCpu->tm.s.u64TSC;
427 return u64;
428}
429
430
431/**
432 * Read the current CPU timestamp counter.
433 *
434 * @returns Gets the CPU tsc.
435 * @param pVCpu Pointer to the VMCPU.
436 */
437VMMDECL(uint64_t) TMCpuTickGet(PVMCPU pVCpu)
438{
439 return tmCpuTickGetInternal(pVCpu, true /* fCheckTimers */);
440}
441
442
443/**
444 * Read the current CPU timestamp counter, don't check for expired timers.
445 *
446 * @returns Gets the CPU tsc.
447 * @param pVCpu Pointer to the VMCPU.
448 */
449VMM_INT_DECL(uint64_t) TMCpuTickGetNoCheck(PVMCPU pVCpu)
450{
451 return tmCpuTickGetInternal(pVCpu, false /* fCheckTimers */);
452}
453
454
455/**
456 * Sets the current CPU timestamp counter.
457 *
458 * @returns VBox status code.
459 * @param pVM Pointer to the VM.
460 * @param pVCpu Pointer to the VMCPU.
461 * @param u64Tick The new timestamp value.
462 *
463 * @thread EMT which TSC is to be set.
464 */
465VMM_INT_DECL(int) TMCpuTickSet(PVM pVM, PVMCPU pVCpu, uint64_t u64Tick)
466{
467 VMCPU_ASSERT_EMT(pVCpu);
468 STAM_COUNTER_INC(&pVM->tm.s.StatTSCSet);
469
470 /*
471 * This is easier to do when the TSC is paused since resume will
472 * do all the calculations for us. Actually, we don't need to
473 * call tmCpuTickPause here since we overwrite u64TSC anyway.
474 */
475 bool fTSCTicking = pVCpu->tm.s.fTSCTicking;
476 pVCpu->tm.s.fTSCTicking = false;
477 pVCpu->tm.s.u64TSC = u64Tick;
478 pVCpu->tm.s.u64TSCLastSeen = u64Tick;
479 if (fTSCTicking)
480 tmCpuTickResume(pVM, pVCpu);
481 /** @todo Try help synchronizing it better among the virtual CPUs? */
482
483 return VINF_SUCCESS;
484}
485
486/**
487 * Sets the last seen CPU timestamp counter.
488 *
489 * @returns VBox status code.
490 * @param pVCpu Pointer to the VMCPU.
491 * @param u64LastSeenTick The last seen timestamp value.
492 *
493 * @thread EMT which TSC is to be set.
494 */
495VMM_INT_DECL(int) TMCpuTickSetLastSeen(PVMCPU pVCpu, uint64_t u64LastSeenTick)
496{
497 VMCPU_ASSERT_EMT(pVCpu);
498
499 LogFlow(("TMCpuTickSetLastSeen %RX64\n", u64LastSeenTick));
500 if (pVCpu->tm.s.u64TSCLastSeen < u64LastSeenTick)
501 pVCpu->tm.s.u64TSCLastSeen = u64LastSeenTick;
502 return VINF_SUCCESS;
503}
504
505/**
506 * Gets the last seen CPU timestamp counter of the guest.
507 *
508 * @returns the last seen TSC.
509 * @param pVCpu Pointer to the VMCPU.
510 *
511 * @thread EMT(pVCpu).
512 */
513VMM_INT_DECL(uint64_t) TMCpuTickGetLastSeen(PVMCPU pVCpu)
514{
515 VMCPU_ASSERT_EMT(pVCpu);
516
517 return pVCpu->tm.s.u64TSCLastSeen;
518}
519
520
521/**
522 * Get the timestamp frequency.
523 *
524 * @returns Number of ticks per second.
525 * @param pVM The VM.
526 */
527VMMDECL(uint64_t) TMCpuTicksPerSecond(PVM pVM)
528{
529 if ( pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET
530 && g_pSUPGlobalInfoPage->u32Mode != SUPGIPMODE_INVARIANT_TSC)
531 {
532#ifdef IN_RING3
533 uint64_t cTSCTicksPerSecond = SUPGetCpuHzFromGip(g_pSUPGlobalInfoPage);
534#elif defined(IN_RING0)
535 uint64_t cTSCTicksPerSecond = SUPGetCpuHzFromGipBySetIndex(g_pSUPGlobalInfoPage, RTMpCpuIdToSetIndex(RTMpCpuId()));
536#else
537 uint64_t cTSCTicksPerSecond = SUPGetCpuHzFromGipBySetIndex(g_pSUPGlobalInfoPage, VMMGetCpu(pVM)->iHostCpuSet);
538#endif
539 if (RT_LIKELY(cTSCTicksPerSecond != ~(uint64_t)0))
540 return cTSCTicksPerSecond;
541 }
542 return pVM->tm.s.cTSCTicksPerSecond;
543}
544
545
546/**
547 * Whether the TSC is ticking for the VCPU.
548 *
549 * @returns true if ticking, false otherwise.
550 * @param pVCpu Pointer to the VMCPU.
551 */
552VMM_INT_DECL(bool) TMCpuTickIsTicking(PVMCPU pVCpu)
553{
554 return pVCpu->tm.s.fTSCTicking;
555}
556
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette