VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/TMAllCpu.cpp@ 72526

Last change on this file since 72526 was 72522, checked in by vboxsync, 7 years ago

NEM,TM: Work on TSC and NEM/win. bugref:9044 [=>office]

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 20.2 KB
Line 
1/* $Id: TMAllCpu.cpp 72522 2018-06-12 08:45:27Z vboxsync $ */
2/** @file
3 * TM - Timeout Manager, CPU Time, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_TM
23#include <VBox/vmm/tm.h>
24#include <VBox/vmm/gim.h>
25#include <VBox/vmm/dbgf.h>
26#include <VBox/vmm/nem.h>
27#include <iprt/asm-amd64-x86.h> /* for SUPGetCpuHzFromGIP */
28#include "TMInternal.h"
29#include <VBox/vmm/vm.h>
30#include <VBox/sup.h>
31
32#include <VBox/param.h>
33#include <VBox/err.h>
34#include <iprt/asm-math.h>
35#include <iprt/assert.h>
36#include <VBox/log.h>
37
38
39/**
40 * Gets the raw cpu tick from current virtual time.
41 *
42 * @param pVM The cross context VM structure.
43 * @param fCheckTimers Whether to check timers.
44 */
45DECLINLINE(uint64_t) tmCpuTickGetRawVirtual(PVM pVM, bool fCheckTimers)
46{
47 uint64_t u64;
48 if (fCheckTimers)
49 u64 = TMVirtualSyncGet(pVM);
50 else
51 u64 = TMVirtualSyncGetNoCheck(pVM);
52 return ASMMultU64ByU32DivByU32(u64, pVM->tm.s.cTSCTicksPerSecond, TMCLOCK_FREQ_VIRTUAL);
53}
54
55
56#ifdef IN_RING3
57/**
58 * Used by tmR3CpuTickParavirtEnable and tmR3CpuTickParavirtDisable.
59 *
60 * @param pVM The cross context VM structure.
61 */
62uint64_t tmR3CpuTickGetRawVirtualNoCheck(PVM pVM)
63{
64 return tmCpuTickGetRawVirtual(pVM, false /*fCheckTimers*/);
65}
66#endif
67
68
69/**
70 * Resumes the CPU timestamp counter ticking.
71 *
72 * @returns VBox status code.
73 * @param pVM The cross context VM structure.
74 * @param pVCpu The cross context virtual CPU structure.
75 * @internal
76 */
77int tmCpuTickResume(PVM pVM, PVMCPU pVCpu)
78{
79 if (!pVCpu->tm.s.fTSCTicking)
80 {
81 pVCpu->tm.s.fTSCTicking = true;
82
83 /** @todo Test that pausing and resuming doesn't cause lag! (I.e. that we're
84 * unpaused before the virtual time and stopped after it. */
85 switch (pVM->tm.s.enmTSCMode)
86 {
87 case TMTSCMODE_REAL_TSC_OFFSET:
88 pVCpu->tm.s.offTSCRawSrc = SUPReadTsc() - pVCpu->tm.s.u64TSC;
89 break;
90 case TMTSCMODE_VIRT_TSC_EMULATED:
91 case TMTSCMODE_DYNAMIC:
92 pVCpu->tm.s.offTSCRawSrc = tmCpuTickGetRawVirtual(pVM, false /* don't check for pending timers */)
93 - pVCpu->tm.s.u64TSC;
94 break;
95 case TMTSCMODE_NATIVE_API:
96 pVCpu->tm.s.offTSCRawSrc = 0; /** @todo ?? */
97 break;
98 default:
99 AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE);
100 }
101 return VINF_SUCCESS;
102 }
103 AssertFailed();
104 return VERR_TM_TSC_ALREADY_TICKING;
105}
106
107
108/**
109 * Resumes the CPU timestamp counter ticking.
110 *
111 * @returns VINF_SUCCESS or VERR_TM_VIRTUAL_TICKING_IPE (asserted).
112 * @param pVM The cross context VM structure.
113 * @param pVCpu The cross context virtual CPU structure.
114 */
115int tmCpuTickResumeLocked(PVM pVM, PVMCPU pVCpu)
116{
117 if (!pVCpu->tm.s.fTSCTicking)
118 {
119 /* TSC must be ticking before calling tmCpuTickGetRawVirtual()! */
120 pVCpu->tm.s.fTSCTicking = true;
121 uint32_t c = ASMAtomicIncU32(&pVM->tm.s.cTSCsTicking);
122 AssertMsgReturn(c <= pVM->cCpus, ("%u vs %u\n", c, pVM->cCpus), VERR_TM_VIRTUAL_TICKING_IPE);
123 if (c == 1)
124 {
125 /* The first VCPU to resume. */
126 uint64_t offTSCRawSrcOld = pVCpu->tm.s.offTSCRawSrc;
127
128 STAM_COUNTER_INC(&pVM->tm.s.StatTSCResume);
129
130 /* When resuming, use the TSC value of the last stopped VCPU to avoid the TSC going back. */
131 switch (pVM->tm.s.enmTSCMode)
132 {
133 case TMTSCMODE_REAL_TSC_OFFSET:
134 pVCpu->tm.s.offTSCRawSrc = SUPReadTsc() - pVM->tm.s.u64LastPausedTSC;
135 break;
136 case TMTSCMODE_VIRT_TSC_EMULATED:
137 case TMTSCMODE_DYNAMIC:
138 pVCpu->tm.s.offTSCRawSrc = tmCpuTickGetRawVirtual(pVM, false /* don't check for pending timers */)
139 - pVM->tm.s.u64LastPausedTSC;
140 break;
141 case TMTSCMODE_NATIVE_API:
142 pVCpu->tm.s.offTSCRawSrc = 0; /** @todo ?? */
143 break;
144 default:
145 AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE);
146 }
147
148 /* Calculate the offset for other VCPUs to use. */
149 pVM->tm.s.offTSCPause = pVCpu->tm.s.offTSCRawSrc - offTSCRawSrcOld;
150 }
151 else
152 {
153 /* All other VCPUs (if any). */
154 pVCpu->tm.s.offTSCRawSrc += pVM->tm.s.offTSCPause;
155 }
156 }
157 return VINF_SUCCESS;
158}
159
160
161/**
162 * Pauses the CPU timestamp counter ticking.
163 *
164 * @returns VBox status code.
165 * @param pVCpu The cross context virtual CPU structure.
166 * @internal
167 */
168int tmCpuTickPause(PVMCPU pVCpu)
169{
170 if (pVCpu->tm.s.fTSCTicking)
171 {
172 pVCpu->tm.s.u64TSC = TMCpuTickGetNoCheck(pVCpu);
173 pVCpu->tm.s.fTSCTicking = false;
174 return VINF_SUCCESS;
175 }
176 AssertFailed();
177 return VERR_TM_TSC_ALREADY_PAUSED;
178}
179
180
181/**
182 * Pauses the CPU timestamp counter ticking.
183 *
184 * @returns VBox status code.
185 * @param pVM The cross context VM structure.
186 * @param pVCpu The cross context virtual CPU structure.
187 * @internal
188 */
189int tmCpuTickPauseLocked(PVM pVM, PVMCPU pVCpu)
190{
191 if (pVCpu->tm.s.fTSCTicking)
192 {
193 pVCpu->tm.s.u64TSC = TMCpuTickGetNoCheck(pVCpu);
194 pVCpu->tm.s.fTSCTicking = false;
195
196 uint32_t c = ASMAtomicDecU32(&pVM->tm.s.cTSCsTicking);
197 AssertMsgReturn(c < pVM->cCpus, ("%u vs %u\n", c, pVM->cCpus), VERR_TM_VIRTUAL_TICKING_IPE);
198 if (c == 0)
199 {
200 /* When the last TSC stops, remember the value. */
201 STAM_COUNTER_INC(&pVM->tm.s.StatTSCPause);
202 pVM->tm.s.u64LastPausedTSC = pVCpu->tm.s.u64TSC;
203 }
204 return VINF_SUCCESS;
205 }
206 AssertFailed();
207 return VERR_TM_TSC_ALREADY_PAUSED;
208}
209
210
211#ifdef VBOX_WITH_STATISTICS
212/**
213 * Record why we refused to use offsetted TSC.
214 *
215 * Used by TMCpuTickCanUseRealTSC() and TMCpuTickGetDeadlineAndTscOffset().
216 *
217 * @param pVM The cross context VM structure.
218 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
219 */
220DECLINLINE(void) tmCpuTickRecordOffsettedTscRefusal(PVM pVM, PVMCPU pVCpu)
221{
222 /* Sample the reason for refusing. */
223 if (pVM->tm.s.enmTSCMode != TMTSCMODE_DYNAMIC)
224 STAM_COUNTER_INC(&pVM->tm.s.StatTSCNotFixed);
225 else if (!pVCpu->tm.s.fTSCTicking)
226 STAM_COUNTER_INC(&pVM->tm.s.StatTSCNotTicking);
227 else if (pVM->tm.s.enmTSCMode != TMTSCMODE_REAL_TSC_OFFSET)
228 {
229 if (pVM->tm.s.fVirtualSyncCatchUp)
230 {
231 if (pVM->tm.s.u32VirtualSyncCatchUpPercentage <= 10)
232 STAM_COUNTER_INC(&pVM->tm.s.StatTSCCatchupLE010);
233 else if (pVM->tm.s.u32VirtualSyncCatchUpPercentage <= 25)
234 STAM_COUNTER_INC(&pVM->tm.s.StatTSCCatchupLE025);
235 else if (pVM->tm.s.u32VirtualSyncCatchUpPercentage <= 100)
236 STAM_COUNTER_INC(&pVM->tm.s.StatTSCCatchupLE100);
237 else
238 STAM_COUNTER_INC(&pVM->tm.s.StatTSCCatchupOther);
239 }
240 else if (!pVM->tm.s.fVirtualSyncTicking)
241 STAM_COUNTER_INC(&pVM->tm.s.StatTSCSyncNotTicking);
242 else if (pVM->tm.s.fVirtualWarpDrive)
243 STAM_COUNTER_INC(&pVM->tm.s.StatTSCWarp);
244 }
245}
246#endif /* VBOX_WITH_STATISTICS */
247
248
249/**
250 * Checks if AMD-V / VT-x can use an offsetted hardware TSC or not.
251 *
252 * @returns true/false accordingly.
253 * @param pVM The cross context VM structure.
254 * @param pVCpu The cross context virtual CPU structure.
255 * @param poffRealTsc The offset against the TSC of the current host CPU,
256 * if pfOffsettedTsc is set to true.
257 * @param pfParavirtTsc Where to return whether paravirt TSC is enabled.
258 *
259 * @thread EMT(pVCpu).
260 * @see TMCpuTickGetDeadlineAndTscOffset().
261 */
262VMM_INT_DECL(bool) TMCpuTickCanUseRealTSC(PVM pVM, PVMCPU pVCpu, uint64_t *poffRealTsc, bool *pfParavirtTsc)
263{
264 Assert(pVCpu->tm.s.fTSCTicking || DBGFIsStepping(pVCpu));
265
266 *pfParavirtTsc = pVM->tm.s.fParavirtTscEnabled;
267
268 /*
269 * In real TSC mode it's easy, we just need the delta & offTscRawSrc and
270 * the CPU will add them to RDTSC and RDTSCP at runtime.
271 *
272 * In tmCpuTickGetInternal we do:
273 * SUPReadTsc() - pVCpu->tm.s.offTSCRawSrc;
274 * Where SUPReadTsc() does:
275 * ASMReadTSC() - pGipCpu->i64TscDelta;
276 * Which means tmCpuTickGetInternal actually does:
277 * ASMReadTSC() - pGipCpu->i64TscDelta - pVCpu->tm.s.offTSCRawSrc;
278 * So, the offset to be ADDED to RDTSC[P] is:
279 * offRealTsc = -(pGipCpu->i64TscDelta + pVCpu->tm.s.offTSCRawSrc)
280 */
281 if (pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET)
282 {
283 /** @todo We should negate both deltas! It's soo weird that we do the
284 * exact opposite of what the hardware implements. */
285#ifdef IN_RING3
286 *poffRealTsc = 0 - pVCpu->tm.s.offTSCRawSrc - SUPGetTscDelta();
287#else
288 *poffRealTsc = 0 - pVCpu->tm.s.offTSCRawSrc - SUPGetTscDeltaByCpuSetIndex(pVCpu->iHostCpuSet);
289#endif
290 return true;
291 }
292
293 /*
294 * We require:
295 * 1. A fixed TSC, this is checked at init time.
296 * 2. That the TSC is ticking (we shouldn't be here if it isn't)
297 * 3. Either that we're using the real TSC as time source or
298 * a) we don't have any lag to catch up, and
299 * b) the virtual sync clock hasn't been halted by an expired timer, and
300 * c) we're not using warp drive (accelerated virtual guest time).
301 */
302 if ( pVM->tm.s.enmTSCMode == TMTSCMODE_DYNAMIC
303 && !pVM->tm.s.fVirtualSyncCatchUp
304 && RT_LIKELY(pVM->tm.s.fVirtualSyncTicking)
305 && !pVM->tm.s.fVirtualWarpDrive)
306 {
307 /* The source is the timer synchronous virtual clock. */
308 uint64_t u64Now = tmCpuTickGetRawVirtual(pVM, false /* don't check for pending timers */)
309 - pVCpu->tm.s.offTSCRawSrc;
310 /** @todo When we start collecting statistics on how much time we spend executing
311 * guest code before exiting, we should check this against the next virtual sync
312 * timer timeout. If it's lower than the avg. length, we should trap rdtsc to increase
313 * the chance that we'll get interrupted right after the timer expired. */
314 if (u64Now >= pVCpu->tm.s.u64TSCLastSeen)
315 {
316 *poffRealTsc = u64Now - ASMReadTSC();
317 return true; /** @todo count this? */
318 }
319 }
320
321#ifdef VBOX_WITH_STATISTICS
322 tmCpuTickRecordOffsettedTscRefusal(pVM, pVCpu);
323#endif
324 return false;
325}
326
327
328/**
329 * Calculates the number of host CPU ticks till the next virtual sync deadline.
330 *
331 * @note To save work, this function will not bother calculating the accurate
332 * tick count for deadlines that are more than a second ahead.
333 *
334 * @returns The number of host cpu ticks to the next deadline. Max one second.
335 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
336 * @param cNsToDeadline The number of nano seconds to the next virtual
337 * sync deadline.
338 */
339DECLINLINE(uint64_t) tmCpuCalcTicksToDeadline(PVMCPU pVCpu, uint64_t cNsToDeadline)
340{
341 AssertCompile(TMCLOCK_FREQ_VIRTUAL <= _4G);
342#ifdef IN_RING3
343 RT_NOREF_PV(pVCpu);
344 uint64_t uCpuHz = SUPGetCpuHzFromGip(g_pSUPGlobalInfoPage);
345#else
346 uint64_t uCpuHz = SUPGetCpuHzFromGipBySetIndex(g_pSUPGlobalInfoPage, pVCpu->iHostCpuSet);
347#endif
348 if (RT_UNLIKELY(cNsToDeadline >= TMCLOCK_FREQ_VIRTUAL))
349 return uCpuHz;
350 uint64_t cTicks = ASMMultU64ByU32DivByU32(uCpuHz, cNsToDeadline, TMCLOCK_FREQ_VIRTUAL);
351 if (cTicks > 4000)
352 cTicks -= 4000; /* fudge to account for overhead */
353 else
354 cTicks >>= 1;
355 return cTicks;
356}
357
358
359/**
360 * Gets the next deadline in host CPU clock ticks and the TSC offset if we can
361 * use the raw TSC.
362 *
363 * @returns The number of host CPU clock ticks to the next timer deadline.
364 * @param pVM The cross context VM structure.
365 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
366 * @param poffRealTsc The offset against the TSC of the current host CPU,
367 * if pfOffsettedTsc is set to true.
368 * @param pfOffsettedTsc Where to return whether TSC offsetting can be used.
369 * @param pfParavirtTsc Where to return whether paravirt TSC is enabled.
370 *
371 * @thread EMT(pVCpu).
372 * @see TMCpuTickCanUseRealTSC().
373 */
374VMM_INT_DECL(uint64_t) TMCpuTickGetDeadlineAndTscOffset(PVM pVM, PVMCPU pVCpu, uint64_t *poffRealTsc,
375 bool *pfOffsettedTsc, bool *pfParavirtTsc)
376{
377 Assert(pVCpu->tm.s.fTSCTicking || DBGFIsStepping(pVCpu));
378
379 *pfParavirtTsc = pVM->tm.s.fParavirtTscEnabled;
380
381 /*
382 * Same logic as in TMCpuTickCanUseRealTSC.
383 */
384 if (pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET)
385 {
386 /** @todo We should negate both deltas! It's soo weird that we do the
387 * exact opposite of what the hardware implements. */
388#ifdef IN_RING3
389 *poffRealTsc = 0 - pVCpu->tm.s.offTSCRawSrc - SUPGetTscDelta();
390#else
391 *poffRealTsc = 0 - pVCpu->tm.s.offTSCRawSrc - SUPGetTscDeltaByCpuSetIndex(pVCpu->iHostCpuSet);
392#endif
393 *pfOffsettedTsc = true;
394 return tmCpuCalcTicksToDeadline(pVCpu, TMVirtualSyncGetNsToDeadline(pVM));
395 }
396
397 /*
398 * Same logic as in TMCpuTickCanUseRealTSC.
399 */
400 if ( pVM->tm.s.enmTSCMode == TMTSCMODE_DYNAMIC
401 && !pVM->tm.s.fVirtualSyncCatchUp
402 && RT_LIKELY(pVM->tm.s.fVirtualSyncTicking)
403 && !pVM->tm.s.fVirtualWarpDrive)
404 {
405 /* The source is the timer synchronous virtual clock. */
406 uint64_t cNsToDeadline;
407 uint64_t u64NowVirtSync = TMVirtualSyncGetWithDeadlineNoCheck(pVM, &cNsToDeadline);
408 uint64_t u64Now = ASMMultU64ByU32DivByU32(u64NowVirtSync, pVM->tm.s.cTSCTicksPerSecond, TMCLOCK_FREQ_VIRTUAL);
409 u64Now -= pVCpu->tm.s.offTSCRawSrc;
410 *poffRealTsc = u64Now - ASMReadTSC();
411 *pfOffsettedTsc = u64Now >= pVCpu->tm.s.u64TSCLastSeen;
412 return tmCpuCalcTicksToDeadline(pVCpu, cNsToDeadline);
413 }
414
415#ifdef VBOX_WITH_STATISTICS
416 tmCpuTickRecordOffsettedTscRefusal(pVM, pVCpu);
417#endif
418 *pfOffsettedTsc = false;
419 *poffRealTsc = 0;
420 return tmCpuCalcTicksToDeadline(pVCpu, TMVirtualSyncGetNsToDeadline(pVM));
421}
422
423
424/**
425 * Read the current CPU timestamp counter.
426 *
427 * @returns Gets the CPU tsc.
428 * @param pVCpu The cross context virtual CPU structure.
429 * @param fCheckTimers Whether to check timers.
430 */
431DECLINLINE(uint64_t) tmCpuTickGetInternal(PVMCPU pVCpu, bool fCheckTimers)
432{
433 uint64_t u64;
434
435 if (RT_LIKELY(pVCpu->tm.s.fTSCTicking))
436 {
437 PVM pVM = pVCpu->CTX_SUFF(pVM);
438 switch (pVM->tm.s.enmTSCMode)
439 {
440 case TMTSCMODE_REAL_TSC_OFFSET:
441 u64 = SUPReadTsc();
442 break;
443 case TMTSCMODE_VIRT_TSC_EMULATED:
444 case TMTSCMODE_DYNAMIC:
445 u64 = tmCpuTickGetRawVirtual(pVM, fCheckTimers);
446 break;
447#ifndef IN_RC
448 case TMTSCMODE_NATIVE_API:
449 {
450 u64 = 0;
451 int rcNem = NEMHCQueryCpuTick(pVCpu, &u64, NULL);
452 AssertLogRelRCReturn(rcNem, SUPReadTsc());
453 break;
454 }
455#endif
456 default:
457 AssertFailedBreakStmt(u64 = SUPReadTsc());
458 }
459 u64 -= pVCpu->tm.s.offTSCRawSrc;
460
461 /* Always return a value higher than what the guest has already seen. */
462 if (RT_LIKELY(u64 > pVCpu->tm.s.u64TSCLastSeen))
463 pVCpu->tm.s.u64TSCLastSeen = u64;
464 else
465 {
466 STAM_COUNTER_INC(&pVM->tm.s.StatTSCUnderflow);
467 pVCpu->tm.s.u64TSCLastSeen += 64; /** @todo choose a good increment here */
468 u64 = pVCpu->tm.s.u64TSCLastSeen;
469 }
470 }
471 else
472 u64 = pVCpu->tm.s.u64TSC;
473 /** @todo @bugref{7243}: SVM TSC offset. */
474 return u64;
475}
476
477
478/**
479 * Read the current CPU timestamp counter.
480 *
481 * @returns Gets the CPU tsc.
482 * @param pVCpu The cross context virtual CPU structure.
483 */
484VMMDECL(uint64_t) TMCpuTickGet(PVMCPU pVCpu)
485{
486 return tmCpuTickGetInternal(pVCpu, true /* fCheckTimers */);
487}
488
489
490/**
491 * Read the current CPU timestamp counter, don't check for expired timers.
492 *
493 * @returns Gets the CPU tsc.
494 * @param pVCpu The cross context virtual CPU structure.
495 */
496VMM_INT_DECL(uint64_t) TMCpuTickGetNoCheck(PVMCPU pVCpu)
497{
498 return tmCpuTickGetInternal(pVCpu, false /* fCheckTimers */);
499}
500
501
502/**
503 * Sets the current CPU timestamp counter.
504 *
505 * @returns VBox status code.
506 * @param pVM The cross context VM structure.
507 * @param pVCpu The cross context virtual CPU structure.
508 * @param u64Tick The new timestamp value.
509 *
510 * @thread EMT which TSC is to be set.
511 */
512VMM_INT_DECL(int) TMCpuTickSet(PVM pVM, PVMCPU pVCpu, uint64_t u64Tick)
513{
514 VMCPU_ASSERT_EMT(pVCpu);
515 STAM_COUNTER_INC(&pVM->tm.s.StatTSCSet);
516
517 /*
518 * This is easier to do when the TSC is paused since resume will
519 * do all the calculations for us. Actually, we don't need to
520 * call tmCpuTickPause here since we overwrite u64TSC anyway.
521 */
522 bool fTSCTicking = pVCpu->tm.s.fTSCTicking;
523 pVCpu->tm.s.fTSCTicking = false;
524 pVCpu->tm.s.u64TSC = u64Tick;
525 pVCpu->tm.s.u64TSCLastSeen = u64Tick;
526 if (fTSCTicking)
527 tmCpuTickResume(pVM, pVCpu);
528 /** @todo Try help synchronizing it better among the virtual CPUs? */
529
530 return VINF_SUCCESS;
531}
532
533/**
534 * Sets the last seen CPU timestamp counter.
535 *
536 * @returns VBox status code.
537 * @param pVCpu The cross context virtual CPU structure.
538 * @param u64LastSeenTick The last seen timestamp value.
539 *
540 * @thread EMT which TSC is to be set.
541 */
542VMM_INT_DECL(int) TMCpuTickSetLastSeen(PVMCPU pVCpu, uint64_t u64LastSeenTick)
543{
544 VMCPU_ASSERT_EMT(pVCpu);
545
546 LogFlow(("TMCpuTickSetLastSeen %RX64\n", u64LastSeenTick));
547 if (pVCpu->tm.s.u64TSCLastSeen < u64LastSeenTick)
548 pVCpu->tm.s.u64TSCLastSeen = u64LastSeenTick;
549 return VINF_SUCCESS;
550}
551
552/**
553 * Gets the last seen CPU timestamp counter of the guest.
554 *
555 * @returns the last seen TSC.
556 * @param pVCpu The cross context virtual CPU structure.
557 *
558 * @thread EMT(pVCpu).
559 */
560VMM_INT_DECL(uint64_t) TMCpuTickGetLastSeen(PVMCPU pVCpu)
561{
562 VMCPU_ASSERT_EMT(pVCpu);
563
564 return pVCpu->tm.s.u64TSCLastSeen;
565}
566
567
568/**
569 * Get the timestamp frequency.
570 *
571 * @returns Number of ticks per second.
572 * @param pVM The cross context VM structure.
573 */
574VMMDECL(uint64_t) TMCpuTicksPerSecond(PVM pVM)
575{
576 if ( pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET
577 && g_pSUPGlobalInfoPage->u32Mode != SUPGIPMODE_INVARIANT_TSC)
578 {
579#ifdef IN_RING3
580 uint64_t cTSCTicksPerSecond = SUPGetCpuHzFromGip(g_pSUPGlobalInfoPage);
581#elif defined(IN_RING0)
582 uint64_t cTSCTicksPerSecond = SUPGetCpuHzFromGipBySetIndex(g_pSUPGlobalInfoPage, RTMpCpuIdToSetIndex(RTMpCpuId()));
583#else
584 uint64_t cTSCTicksPerSecond = SUPGetCpuHzFromGipBySetIndex(g_pSUPGlobalInfoPage, VMMGetCpu(pVM)->iHostCpuSet);
585#endif
586 if (RT_LIKELY(cTSCTicksPerSecond != ~(uint64_t)0))
587 return cTSCTicksPerSecond;
588 }
589 return pVM->tm.s.cTSCTicksPerSecond;
590}
591
592
593/**
594 * Whether the TSC is ticking for the VCPU.
595 *
596 * @returns true if ticking, false otherwise.
597 * @param pVCpu The cross context virtual CPU structure.
598 */
599VMM_INT_DECL(bool) TMCpuTickIsTicking(PVMCPU pVCpu)
600{
601 return pVCpu->tm.s.fTSCTicking;
602}
603
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette