VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/TMAllCpu.cpp@ 96945

Last change on this file since 96945 was 96407, checked in by vboxsync, 2 years ago

scm copyright and license note update

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 23.2 KB
Line 
1/* $Id: TMAllCpu.cpp 96407 2022-08-22 17:43:14Z vboxsync $ */
2/** @file
3 * TM - Timeout Manager, CPU Time, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_TM
33#include <VBox/vmm/tm.h>
34#include <VBox/vmm/gim.h>
35#include <VBox/vmm/dbgf.h>
36#include <VBox/vmm/nem.h>
37#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
38# include <iprt/asm-amd64-x86.h> /* for SUPGetCpuHzFromGIP; ASMReadTSC */
39#elif defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32)
40# include <iprt/asm-arm.h>
41#endif
42#include "TMInternal.h"
43#include <VBox/vmm/vmcc.h>
44#include <VBox/sup.h>
45
46#include <VBox/param.h>
47#include <VBox/err.h>
48#include <iprt/asm-math.h>
49#include <iprt/assert.h>
50#include <VBox/log.h>
51
52
53
54/**
55 * Converts from virtual time to raw CPU ticks.
56 *
57 * Mainly to have the ASMMultU64ByU32DivByU32 overflow trickery in one place.
58 *
59 * @returns raw CPU ticks.
60 * @param pVM The cross context VM structure.
61 * @param u64VirtualTime The virtual time to convert.
62 */
63DECLINLINE(uint64_t) tmCpuTickCalcFromVirtual(PVMCC pVM, uint64_t u64VirtualTime)
64{
65 if (pVM->tm.s.cTSCTicksPerSecond <= UINT32_MAX)
66 return ASMMultU64ByU32DivByU32(u64VirtualTime, (uint32_t)pVM->tm.s.cTSCTicksPerSecond, TMCLOCK_FREQ_VIRTUAL);
67 Assert(pVM->tm.s.cTSCTicksPerSecond <= ((uint64_t)UINT32_MAX << 2)); /* <= 15.99 GHz */
68 return ASMMultU64ByU32DivByU32(u64VirtualTime, (uint32_t)(pVM->tm.s.cTSCTicksPerSecond >> 2), TMCLOCK_FREQ_VIRTUAL >> 2);
69}
70
71
72/**
73 * Gets the raw cpu tick from current virtual time.
74 *
75 * @param pVM The cross context VM structure.
76 * @param fCheckTimers Whether to check timers.
77 */
78DECLINLINE(uint64_t) tmCpuTickGetRawVirtual(PVMCC pVM, bool fCheckTimers)
79{
80 if (fCheckTimers)
81 return tmCpuTickCalcFromVirtual(pVM, TMVirtualSyncGet(pVM));
82 return tmCpuTickCalcFromVirtual(pVM, TMVirtualSyncGetNoCheck(pVM));
83}
84
85
86#ifdef IN_RING3
87/**
88 * Used by tmR3CpuTickParavirtEnable and tmR3CpuTickParavirtDisable.
89 *
90 * @param pVM The cross context VM structure.
91 */
92uint64_t tmR3CpuTickGetRawVirtualNoCheck(PVM pVM)
93{
94 return tmCpuTickGetRawVirtual(pVM, false /*fCheckTimers*/);
95}
96#endif
97
98
99/**
100 * Resumes the CPU timestamp counter ticking.
101 *
102 * @returns VBox status code.
103 * @param pVM The cross context VM structure.
104 * @param pVCpu The cross context virtual CPU structure.
105 * @internal
106 */
107int tmCpuTickResume(PVMCC pVM, PVMCPUCC pVCpu)
108{
109 if (!pVCpu->tm.s.fTSCTicking)
110 {
111 pVCpu->tm.s.fTSCTicking = true;
112
113 /** @todo Test that pausing and resuming doesn't cause lag! (I.e. that we're
114 * unpaused before the virtual time and stopped after it. */
115 switch (pVM->tm.s.enmTSCMode)
116 {
117 case TMTSCMODE_REAL_TSC_OFFSET:
118 pVCpu->tm.s.offTSCRawSrc = SUPReadTsc() - pVCpu->tm.s.u64TSC;
119 break;
120 case TMTSCMODE_VIRT_TSC_EMULATED:
121 case TMTSCMODE_DYNAMIC:
122 pVCpu->tm.s.offTSCRawSrc = tmCpuTickGetRawVirtual(pVM, false /* don't check for pending timers */)
123 - pVCpu->tm.s.u64TSC;
124 break;
125 case TMTSCMODE_NATIVE_API:
126 pVCpu->tm.s.offTSCRawSrc = 0; /** @todo ?? */
127 /* Looks like this is only used by weird modes and MSR TSC writes. We cannot support either on NEM/win. */
128 break;
129 default:
130 AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE);
131 }
132 return VINF_SUCCESS;
133 }
134 AssertFailed();
135 return VERR_TM_TSC_ALREADY_TICKING;
136}
137
138
139/**
140 * Resumes the CPU timestamp counter ticking.
141 *
142 * @returns VINF_SUCCESS or VERR_TM_VIRTUAL_TICKING_IPE (asserted).
143 * @param pVM The cross context VM structure.
144 * @param pVCpu The cross context virtual CPU structure.
145 */
146int tmCpuTickResumeLocked(PVMCC pVM, PVMCPUCC pVCpu)
147{
148 if (!pVCpu->tm.s.fTSCTicking)
149 {
150 /* TSC must be ticking before calling tmCpuTickGetRawVirtual()! */
151 pVCpu->tm.s.fTSCTicking = true;
152 uint32_t c = ASMAtomicIncU32(&pVM->tm.s.cTSCsTicking);
153 AssertMsgReturn(c <= pVM->cCpus, ("%u vs %u\n", c, pVM->cCpus), VERR_TM_VIRTUAL_TICKING_IPE);
154 if (c == 1)
155 {
156 /* The first VCPU to resume. */
157 uint64_t offTSCRawSrcOld = pVCpu->tm.s.offTSCRawSrc;
158
159 STAM_COUNTER_INC(&pVM->tm.s.StatTSCResume);
160
161 /* When resuming, use the TSC value of the last stopped VCPU to avoid the TSC going back. */
162 switch (pVM->tm.s.enmTSCMode)
163 {
164 case TMTSCMODE_REAL_TSC_OFFSET:
165 pVCpu->tm.s.offTSCRawSrc = SUPReadTsc() - pVM->tm.s.u64LastPausedTSC;
166 break;
167 case TMTSCMODE_VIRT_TSC_EMULATED:
168 case TMTSCMODE_DYNAMIC:
169 pVCpu->tm.s.offTSCRawSrc = tmCpuTickGetRawVirtual(pVM, false /* don't check for pending timers */)
170 - pVM->tm.s.u64LastPausedTSC;
171 break;
172 case TMTSCMODE_NATIVE_API:
173 {
174 int rc = NEMHCResumeCpuTickOnAll(pVM, pVCpu, pVM->tm.s.u64LastPausedTSC);
175 AssertRCReturn(rc, rc);
176 pVCpu->tm.s.offTSCRawSrc = offTSCRawSrcOld = 0;
177 break;
178 }
179 default:
180 AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE);
181 }
182
183 /* Calculate the offset addendum for other VCPUs to use. */
184 pVM->tm.s.offTSCPause = pVCpu->tm.s.offTSCRawSrc - offTSCRawSrcOld;
185 }
186 else
187 {
188 /* All other VCPUs (if any). */
189 pVCpu->tm.s.offTSCRawSrc += pVM->tm.s.offTSCPause;
190 }
191 }
192 return VINF_SUCCESS;
193}
194
195
196/**
197 * Pauses the CPU timestamp counter ticking.
198 *
199 * @returns VBox status code.
200 * @param pVCpu The cross context virtual CPU structure.
201 * @internal
202 */
203int tmCpuTickPause(PVMCPUCC pVCpu)
204{
205 if (pVCpu->tm.s.fTSCTicking)
206 {
207 pVCpu->tm.s.u64TSC = TMCpuTickGetNoCheck(pVCpu);
208 pVCpu->tm.s.fTSCTicking = false;
209 return VINF_SUCCESS;
210 }
211 AssertFailed();
212 return VERR_TM_TSC_ALREADY_PAUSED;
213}
214
215
216/**
217 * Pauses the CPU timestamp counter ticking.
218 *
219 * @returns VBox status code.
220 * @param pVM The cross context VM structure.
221 * @param pVCpu The cross context virtual CPU structure.
222 * @internal
223 */
224int tmCpuTickPauseLocked(PVMCC pVM, PVMCPUCC pVCpu)
225{
226 if (pVCpu->tm.s.fTSCTicking)
227 {
228 pVCpu->tm.s.u64TSC = TMCpuTickGetNoCheck(pVCpu);
229 pVCpu->tm.s.fTSCTicking = false;
230
231 uint32_t c = ASMAtomicDecU32(&pVM->tm.s.cTSCsTicking);
232 AssertMsgReturn(c < pVM->cCpus, ("%u vs %u\n", c, pVM->cCpus), VERR_TM_VIRTUAL_TICKING_IPE);
233 if (c == 0)
234 {
235 /* When the last TSC stops, remember the value. */
236 STAM_COUNTER_INC(&pVM->tm.s.StatTSCPause);
237 pVM->tm.s.u64LastPausedTSC = pVCpu->tm.s.u64TSC;
238 }
239 return VINF_SUCCESS;
240 }
241 AssertFailed();
242 return VERR_TM_TSC_ALREADY_PAUSED;
243}
244
245
246#ifdef IN_RING0 /* Only used in ring-0 at present (AMD-V and VT-x). */
247
248# ifdef VBOX_WITH_STATISTICS
249/**
250 * Record why we refused to use offsetted TSC.
251 *
252 * Used by TMCpuTickCanUseRealTSC() and TMCpuTickGetDeadlineAndTscOffset().
253 *
254 * @param pVM The cross context VM structure.
255 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
256 */
257DECLINLINE(void) tmCpuTickRecordOffsettedTscRefusal(PVM pVM, PVMCPU pVCpu)
258{
259 /* Sample the reason for refusing. */
260 if (pVM->tm.s.enmTSCMode != TMTSCMODE_DYNAMIC)
261 STAM_COUNTER_INC(&pVM->tm.s.StatTSCNotFixed);
262 else if (!pVCpu->tm.s.fTSCTicking)
263 STAM_COUNTER_INC(&pVM->tm.s.StatTSCNotTicking);
264 else if (pVM->tm.s.enmTSCMode != TMTSCMODE_REAL_TSC_OFFSET)
265 {
266 if (pVM->tm.s.fVirtualSyncCatchUp)
267 {
268 if (pVM->tm.s.u32VirtualSyncCatchUpPercentage <= 10)
269 STAM_COUNTER_INC(&pVM->tm.s.StatTSCCatchupLE010);
270 else if (pVM->tm.s.u32VirtualSyncCatchUpPercentage <= 25)
271 STAM_COUNTER_INC(&pVM->tm.s.StatTSCCatchupLE025);
272 else if (pVM->tm.s.u32VirtualSyncCatchUpPercentage <= 100)
273 STAM_COUNTER_INC(&pVM->tm.s.StatTSCCatchupLE100);
274 else
275 STAM_COUNTER_INC(&pVM->tm.s.StatTSCCatchupOther);
276 }
277 else if (!pVM->tm.s.fVirtualSyncTicking)
278 STAM_COUNTER_INC(&pVM->tm.s.StatTSCSyncNotTicking);
279 else if (pVM->tm.s.fVirtualWarpDrive)
280 STAM_COUNTER_INC(&pVM->tm.s.StatTSCWarp);
281 }
282}
283# endif /* VBOX_WITH_STATISTICS */
284
285/**
286 * Checks if AMD-V / VT-x can use an offsetted hardware TSC or not.
287 *
288 * @returns true/false accordingly.
289 * @param pVM The cross context VM structure.
290 * @param pVCpu The cross context virtual CPU structure.
291 * @param poffRealTsc The offset against the TSC of the current host CPU,
292 * if pfOffsettedTsc is set to true.
293 * @param pfParavirtTsc Where to return whether paravirt TSC is enabled.
294 *
295 * @thread EMT(pVCpu).
296 * @see TMCpuTickGetDeadlineAndTscOffset().
297 */
298VMM_INT_DECL(bool) TMCpuTickCanUseRealTSC(PVMCC pVM, PVMCPUCC pVCpu, uint64_t *poffRealTsc, bool *pfParavirtTsc)
299{
300 Assert(pVCpu->tm.s.fTSCTicking || DBGFIsStepping(pVCpu));
301
302 *pfParavirtTsc = pVM->tm.s.fParavirtTscEnabled;
303
304 /*
305 * In real TSC mode it's easy, we just need the delta & offTscRawSrc and
306 * the CPU will add them to RDTSC and RDTSCP at runtime.
307 *
308 * In tmCpuTickGetInternal we do:
309 * SUPReadTsc() - pVCpu->tm.s.offTSCRawSrc;
310 * Where SUPReadTsc() does:
311 * ASMReadTSC() - pGipCpu->i64TscDelta;
312 * Which means tmCpuTickGetInternal actually does:
313 * ASMReadTSC() - pGipCpu->i64TscDelta - pVCpu->tm.s.offTSCRawSrc;
314 * So, the offset to be ADDED to RDTSC[P] is:
315 * offRealTsc = -(pGipCpu->i64TscDelta + pVCpu->tm.s.offTSCRawSrc)
316 */
317 if (pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET)
318 {
319 /** @todo We should negate both deltas! It's soo weird that we do the
320 * exact opposite of what the hardware implements. */
321# ifdef IN_RING3
322 *poffRealTsc = (uint64_t)0 - pVCpu->tm.s.offTSCRawSrc - (uint64_t)SUPGetTscDelta(g_pSUPGlobalInfoPage);
323# else
324 *poffRealTsc = (uint64_t)0 - pVCpu->tm.s.offTSCRawSrc - (uint64_t)SUPGetTscDeltaByCpuSetIndex(pVCpu->iHostCpuSet);
325# endif
326 return true;
327 }
328
329 /*
330 * We require:
331 * 1. A fixed TSC, this is checked at init time.
332 * 2. That the TSC is ticking (we shouldn't be here if it isn't)
333 * 3. Either that we're using the real TSC as time source or
334 * a) we don't have any lag to catch up, and
335 * b) the virtual sync clock hasn't been halted by an expired timer, and
336 * c) we're not using warp drive (accelerated virtual guest time).
337 */
338 if ( pVM->tm.s.enmTSCMode == TMTSCMODE_DYNAMIC
339 && !pVM->tm.s.fVirtualSyncCatchUp
340 && RT_LIKELY(pVM->tm.s.fVirtualSyncTicking)
341 && !pVM->tm.s.fVirtualWarpDrive)
342 {
343 /* The source is the timer synchronous virtual clock. */
344 uint64_t uTscNow;
345 uint64_t u64Now = tmCpuTickCalcFromVirtual(pVM, TMVirtualSyncGetNoCheckWithTsc(pVM, &uTscNow))
346 - pVCpu->tm.s.offTSCRawSrc;
347 /** @todo When we start collecting statistics on how much time we spend executing
348 * guest code before exiting, we should check this against the next virtual sync
349 * timer timeout. If it's lower than the avg. length, we should trap rdtsc to increase
350 * the chance that we'll get interrupted right after the timer expired. */
351 if (u64Now >= pVCpu->tm.s.u64TSCLastSeen)
352 {
353# ifdef IN_RING3
354 *poffRealTsc = u64Now - (uTscNow + (uint64_t)SUPGetTscDelta(g_pSUPGlobalInfoPage);
355# else
356 *poffRealTsc = u64Now - (uTscNow + (uint64_t)SUPGetTscDeltaByCpuSetIndex(pVCpu->iHostCpuSet));
357# endif
358 return true; /** @todo count this? */
359 }
360 }
361
362# ifdef VBOX_WITH_STATISTICS
363 tmCpuTickRecordOffsettedTscRefusal(pVM, pVCpu);
364# endif
365 return false;
366}
367
368
369/**
370 * Calculates the number of host CPU ticks till the next virtual sync deadline.
371 *
372 * @note To save work, this function will not bother calculating the accurate
373 * tick count for deadlines that are more than a second ahead.
374 *
375 * @returns The number of host cpu ticks to the next deadline. Max one second.
376 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
377 * @param cNsToDeadline The number of nano seconds to the next virtual
378 * sync deadline.
379 */
380DECLINLINE(uint64_t) tmCpuCalcTicksToDeadline(PVMCPUCC pVCpu, uint64_t cNsToDeadline)
381{
382 AssertCompile(TMCLOCK_FREQ_VIRTUAL <= _4G);
383# ifdef IN_RING3
384 RT_NOREF_PV(pVCpu);
385 PSUPGIP const pGip = g_pSUPGlobalInfoPage;
386 uint64_t uCpuHz = pGip ? SUPGetCpuHzFromGip(pGip) : pVCpu->pVMR3->tm.s.cTSCTicksPerSecondHost;
387# else
388 uint64_t uCpuHz = SUPGetCpuHzFromGipBySetIndex(g_pSUPGlobalInfoPage, pVCpu->iHostCpuSet);
389# endif
390 if (RT_UNLIKELY(cNsToDeadline >= TMCLOCK_FREQ_VIRTUAL))
391 return uCpuHz;
392 AssertCompile(TMCLOCK_FREQ_VIRTUAL <= UINT32_MAX);
393 uint64_t cTicks = ASMMultU64ByU32DivByU32(uCpuHz, (uint32_t)cNsToDeadline, TMCLOCK_FREQ_VIRTUAL);
394 if (cTicks > 4000)
395 cTicks -= 4000; /* fudge to account for overhead */
396 else
397 cTicks >>= 1;
398 return cTicks;
399}
400
401
402/**
403 * Gets the next deadline in host CPU clock ticks and the TSC offset if we can
404 * use the raw TSC.
405 *
406 * @returns The number of host CPU clock ticks to the next timer deadline.
407 * @param pVM The cross context VM structure.
408 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
409 * @param poffRealTsc The offset against the TSC of the current host CPU,
410 * if pfOffsettedTsc is set to true.
411 * @param pfOffsettedTsc Where to return whether TSC offsetting can be used.
412 * @param pfParavirtTsc Where to return whether paravirt TSC is enabled.
413 * @param puTscNow Where to return the TSC value that the return
414 * value is relative to. This is delta adjusted.
415 * @param puDeadlineVersion Where to return the deadline "version" number.
416 * Use with TMVirtualSyncIsCurrentDeadlineVersion()
417 * to check if the absolute deadline is still up to
418 * date and the caller can skip calling this
419 * function.
420 *
421 * @thread EMT(pVCpu).
422 * @see TMCpuTickCanUseRealTSC().
423 */
424VMM_INT_DECL(uint64_t) TMCpuTickGetDeadlineAndTscOffset(PVMCC pVM, PVMCPUCC pVCpu, uint64_t *poffRealTsc,
425 bool *pfOffsettedTsc, bool *pfParavirtTsc,
426 uint64_t *puTscNow, uint64_t *puDeadlineVersion)
427{
428 Assert(pVCpu->tm.s.fTSCTicking || DBGFIsStepping(pVCpu));
429
430 *pfParavirtTsc = pVM->tm.s.fParavirtTscEnabled;
431
432 /*
433 * Same logic as in TMCpuTickCanUseRealTSC.
434 */
435 if (pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET)
436 {
437 /** @todo We should negate both deltas! It's soo weird that we do the
438 * exact opposite of what the hardware implements. */
439# ifdef IN_RING3
440 *poffRealTsc = (uint64_t)0 - pVCpu->tm.s.offTSCRawSrc - (uint64_t)SUPGetTscDelta(g_pSUPGlobalInfoPage);
441# else
442 *poffRealTsc = (uint64_t)0 - pVCpu->tm.s.offTSCRawSrc - (uint64_t)SUPGetTscDeltaByCpuSetIndex(pVCpu->iHostCpuSet);
443# endif
444 *pfOffsettedTsc = true;
445 return tmCpuCalcTicksToDeadline(pVCpu, TMVirtualSyncGetNsToDeadline(pVM, puDeadlineVersion, puTscNow));
446 }
447
448 /*
449 * Same logic as in TMCpuTickCanUseRealTSC.
450 */
451 if ( pVM->tm.s.enmTSCMode == TMTSCMODE_DYNAMIC
452 && !pVM->tm.s.fVirtualSyncCatchUp
453 && RT_LIKELY(pVM->tm.s.fVirtualSyncTicking)
454 && !pVM->tm.s.fVirtualWarpDrive)
455 {
456 /* The source is the timer synchronous virtual clock. */
457 uint64_t cNsToDeadline;
458 uint64_t u64NowVirtSync = TMVirtualSyncGetWithDeadlineNoCheck(pVM, &cNsToDeadline, puDeadlineVersion, puTscNow);
459 uint64_t u64Now = tmCpuTickCalcFromVirtual(pVM, u64NowVirtSync);
460 u64Now -= pVCpu->tm.s.offTSCRawSrc;
461
462# ifdef IN_RING3
463 *poffRealTsc = u64Now - (*puTscNow + (uint64_t)SUPGetTscDelta(g_pSUPGlobalInfoPage)); /* undoing delta */
464# else
465 *poffRealTsc = u64Now - (*puTscNow + (uint64_t)SUPGetTscDeltaByCpuSetIndex(pVCpu->iHostCpuSet)); /* undoing delta */
466# endif
467 *pfOffsettedTsc = u64Now >= pVCpu->tm.s.u64TSCLastSeen;
468 return tmCpuCalcTicksToDeadline(pVCpu, cNsToDeadline);
469 }
470
471# ifdef VBOX_WITH_STATISTICS
472 tmCpuTickRecordOffsettedTscRefusal(pVM, pVCpu);
473# endif
474 *pfOffsettedTsc = false;
475 *poffRealTsc = 0;
476 return tmCpuCalcTicksToDeadline(pVCpu, TMVirtualSyncGetNsToDeadline(pVM, puDeadlineVersion, puTscNow));
477}
478
479#endif /* IN_RING0 - at the moment */
480
481/**
482 * Read the current CPU timestamp counter.
483 *
484 * @returns Gets the CPU tsc.
485 * @param pVCpu The cross context virtual CPU structure.
486 * @param fCheckTimers Whether to check timers.
487 */
488DECLINLINE(uint64_t) tmCpuTickGetInternal(PVMCPUCC pVCpu, bool fCheckTimers)
489{
490 uint64_t u64;
491
492 if (RT_LIKELY(pVCpu->tm.s.fTSCTicking))
493 {
494 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
495 switch (pVM->tm.s.enmTSCMode)
496 {
497 case TMTSCMODE_REAL_TSC_OFFSET:
498 u64 = SUPReadTsc();
499 break;
500 case TMTSCMODE_VIRT_TSC_EMULATED:
501 case TMTSCMODE_DYNAMIC:
502 u64 = tmCpuTickGetRawVirtual(pVM, fCheckTimers);
503 break;
504 case TMTSCMODE_NATIVE_API:
505 {
506 u64 = 0;
507 int rcNem = NEMHCQueryCpuTick(pVCpu, &u64, NULL);
508 AssertLogRelRCReturn(rcNem, SUPReadTsc());
509 break;
510 }
511 default:
512 AssertFailedBreakStmt(u64 = SUPReadTsc());
513 }
514 u64 -= pVCpu->tm.s.offTSCRawSrc;
515
516 /* Always return a value higher than what the guest has already seen. */
517 if (RT_LIKELY(u64 > pVCpu->tm.s.u64TSCLastSeen))
518 pVCpu->tm.s.u64TSCLastSeen = u64;
519 else
520 {
521 STAM_COUNTER_INC(&pVM->tm.s.StatTSCUnderflow);
522 pVCpu->tm.s.u64TSCLastSeen += 64; /** @todo choose a good increment here */
523 u64 = pVCpu->tm.s.u64TSCLastSeen;
524 }
525 }
526 else
527 u64 = pVCpu->tm.s.u64TSC;
528 return u64;
529}
530
531
532/**
533 * Read the current CPU timestamp counter.
534 *
535 * @returns Gets the CPU tsc.
536 * @param pVCpu The cross context virtual CPU structure.
537 */
538VMMDECL(uint64_t) TMCpuTickGet(PVMCPUCC pVCpu)
539{
540 return tmCpuTickGetInternal(pVCpu, true /* fCheckTimers */);
541}
542
543
544/**
545 * Read the current CPU timestamp counter, don't check for expired timers.
546 *
547 * @returns Gets the CPU tsc.
548 * @param pVCpu The cross context virtual CPU structure.
549 */
550VMM_INT_DECL(uint64_t) TMCpuTickGetNoCheck(PVMCPUCC pVCpu)
551{
552 return tmCpuTickGetInternal(pVCpu, false /* fCheckTimers */);
553}
554
555
556/**
557 * Sets the current CPU timestamp counter.
558 *
559 * @returns VBox status code.
560 * @param pVM The cross context VM structure.
561 * @param pVCpu The cross context virtual CPU structure.
562 * @param u64Tick The new timestamp value.
563 *
564 * @thread EMT which TSC is to be set.
565 */
566VMM_INT_DECL(int) TMCpuTickSet(PVMCC pVM, PVMCPUCC pVCpu, uint64_t u64Tick)
567{
568 VMCPU_ASSERT_EMT(pVCpu);
569 STAM_COUNTER_INC(&pVM->tm.s.StatTSCSet);
570
571 /*
572 * This is easier to do when the TSC is paused since resume will
573 * do all the calculations for us. Actually, we don't need to
574 * call tmCpuTickPause here since we overwrite u64TSC anyway.
575 */
576 bool fTSCTicking = pVCpu->tm.s.fTSCTicking;
577 pVCpu->tm.s.fTSCTicking = false;
578 pVCpu->tm.s.u64TSC = u64Tick;
579 pVCpu->tm.s.u64TSCLastSeen = u64Tick;
580 if (fTSCTicking)
581 tmCpuTickResume(pVM, pVCpu);
582 /** @todo Try help synchronizing it better among the virtual CPUs? */
583
584 return VINF_SUCCESS;
585}
586
587/**
588 * Sets the last seen CPU timestamp counter.
589 *
590 * @returns VBox status code.
591 * @param pVCpu The cross context virtual CPU structure.
592 * @param u64LastSeenTick The last seen timestamp value.
593 *
594 * @thread EMT which TSC is to be set.
595 */
596VMM_INT_DECL(int) TMCpuTickSetLastSeen(PVMCPUCC pVCpu, uint64_t u64LastSeenTick)
597{
598 VMCPU_ASSERT_EMT(pVCpu);
599
600 LogFlow(("TMCpuTickSetLastSeen %RX64\n", u64LastSeenTick));
601 /** @todo deal with wraparound! */
602 if (pVCpu->tm.s.u64TSCLastSeen < u64LastSeenTick)
603 pVCpu->tm.s.u64TSCLastSeen = u64LastSeenTick;
604 return VINF_SUCCESS;
605}
606
607/**
608 * Gets the last seen CPU timestamp counter of the guest.
609 *
610 * @returns the last seen TSC.
611 * @param pVCpu The cross context virtual CPU structure.
612 *
613 * @thread EMT(pVCpu).
614 */
615VMM_INT_DECL(uint64_t) TMCpuTickGetLastSeen(PVMCPUCC pVCpu)
616{
617 VMCPU_ASSERT_EMT(pVCpu);
618
619 return pVCpu->tm.s.u64TSCLastSeen;
620}
621
622
623/**
624 * Get the timestamp frequency.
625 *
626 * @returns Number of ticks per second.
627 * @param pVM The cross context VM structure.
628 */
629VMMDECL(uint64_t) TMCpuTicksPerSecond(PVMCC pVM)
630{
631 if (pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET)
632 {
633 PSUPGLOBALINFOPAGE const pGip = g_pSUPGlobalInfoPage;
634 if (pGip && pGip->u32Mode != SUPGIPMODE_INVARIANT_TSC)
635 {
636#ifdef IN_RING3
637 uint64_t cTSCTicksPerSecond = SUPGetCpuHzFromGip(pGip);
638#elif defined(IN_RING0)
639 uint64_t cTSCTicksPerSecond = SUPGetCpuHzFromGipBySetIndex(pGip, (uint32_t)RTMpCpuIdToSetIndex(RTMpCpuId()));
640#else
641 uint64_t cTSCTicksPerSecond = SUPGetCpuHzFromGipBySetIndex(pGip, VMMGetCpu(pVM)->iHostCpuSet);
642#endif
643 if (RT_LIKELY(cTSCTicksPerSecond != ~(uint64_t)0))
644 return cTSCTicksPerSecond;
645 }
646 }
647 return pVM->tm.s.cTSCTicksPerSecond;
648}
649
650
651/**
652 * Whether the TSC is ticking for the VCPU.
653 *
654 * @returns true if ticking, false otherwise.
655 * @param pVCpu The cross context virtual CPU structure.
656 */
657VMM_INT_DECL(bool) TMCpuTickIsTicking(PVMCPUCC pVCpu)
658{
659 return pVCpu->tm.s.fTSCTicking;
660}
661
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette