VirtualBox

source: vbox/trunk/src/VBox/Runtime/common/time/timesupref.h@ 78402

Last change on this file since 78402 was 76553, checked in by vboxsync, 6 years ago

scm --update-copyright-year

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 16.9 KB
Line 
1/* $Id: timesupref.h 76553 2019-01-01 01:45:53Z vboxsync $ */
2/** @file
3 * IPRT - Time using SUPLib, the C Code Template.
4 */
5
6/*
7 * Copyright (C) 2006-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/**
29 * The C reference implementation of the assembly routines.
30 *
31 * Calculate NanoTS using the information in the global information page (GIP)
32 * which the support library (SUPLib) exports.
33 *
34 * This function guarantees that the returned timestamp is later (in time) than
35 * any previous calls in the same thread.
36 *
37 * @remark The way the ever increasing time guarantee is currently implemented means
38 * that if you call this function at a frequency higher than 1GHz you're in for
39 * trouble. We currently assume that no idiot will do that for real life purposes.
40 *
41 * @returns Nanosecond timestamp.
42 * @param pData Pointer to the data structure.
43 */
44RTDECL(uint64_t) rtTimeNanoTSInternalRef(PRTTIMENANOTSDATA pData)
45{
46#if TMPL_MODE == TMPL_MODE_SYNC_INVAR_WITH_DELTA && defined(IN_RING3)
47 PSUPGIPCPU pGipCpuAttemptedTscRecalibration = NULL;
48#endif
49 AssertCompile(RT_IS_POWER_OF_TWO(RTCPUSET_MAX_CPUS));
50
51 for (;;)
52 {
53#ifndef IN_RING3 /* This simplifies and improves everything. */
54 RTCCUINTREG const uFlags = ASMIntDisableFlags();
55#endif
56
57 /*
58 * Check that the GIP is sane and that the premises for this worker function
59 * hasn't changed (CPU onlined with bad delta or missing features).
60 */
61 PSUPGLOBALINFOPAGE pGip = g_pSUPGlobalInfoPage;
62 if ( RT_LIKELY(pGip)
63 && RT_LIKELY(pGip->u32Magic == SUPGLOBALINFOPAGE_MAGIC)
64#if TMPL_MODE == TMPL_MODE_SYNC_INVAR_WITH_DELTA
65 && RT_LIKELY(pGip->enmUseTscDelta >= SUPGIPUSETSCDELTA_PRACTICALLY_ZERO)
66#else
67 && RT_LIKELY(pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_ROUGHLY_ZERO)
68#endif
69#if defined(IN_RING3) && TMPL_GET_CPU_METHOD != 0 && TMPL_GET_CPU_METHOD != SUPGIPGETCPU_APIC_ID
70 && RT_LIKELY(pGip->fGetGipCpu & TMPL_GET_CPU_METHOD)
71#endif
72 )
73 {
74 /*
75 * Resolve pGipCpu if needed. If the instruction is serializing, we
76 * read the transaction id first if possible.
77 */
78#if TMPL_MODE == TMPL_MODE_ASYNC || TMPL_MODE == TMPL_MODE_SYNC_INVAR_WITH_DELTA
79# if defined(IN_RING0)
80 uint32_t const iCpuSet = RTMpCurSetIndex();
81 uint16_t const iGipCpu = iCpuSet < RT_ELEMENTS(pGip->aiCpuFromCpuSetIdx)
82 ? pGip->aiCpuFromCpuSetIdx[iCpuSet] : UINT16_MAX;
83# elif defined(IN_RC)
84 uint32_t const iCpuSet = VMMGetCpu(&g_VM)->iHostCpuSet;
85 uint16_t const iGipCpu = iCpuSet < RT_ELEMENTS(pGip->aiCpuFromCpuSetIdx)
86 ? pGip->aiCpuFromCpuSetIdx[iCpuSet] : UINT16_MAX;
87# elif TMPL_GET_CPU_METHOD == SUPGIPGETCPU_APIC_ID
88# if TMPL_MODE != TMPL_MODE_ASYNC
89 uint32_t const u32TransactionId = pGip->aCPUs[0].u32TransactionId;
90# endif
91 uint8_t const idApic = ASMGetApicId();
92 uint16_t const iGipCpu = pGip->aiCpuFromApicId[idApic];
93# elif TMPL_GET_CPU_METHOD == SUPGIPGETCPU_RDTSCP_MASK_MAX_SET_CPUS \
94 || TMPL_GET_CPU_METHOD == SUPGIPGETCPU_RDTSCP_GROUP_IN_CH_NUMBER_IN_CL
95# if TMPL_MODE != TMPL_MODE_ASYNC
96 uint32_t const u32TransactionId = pGip->aCPUs[0].u32TransactionId;
97# endif
98 uint32_t uAux;
99 ASMReadTscWithAux(&uAux);
100# if TMPL_GET_CPU_METHOD == SUPGIPGETCPU_RDTSCP_MASK_MAX_SET_CPUS
101 uint16_t const iCpuSet = uAux & (RTCPUSET_MAX_CPUS - 1);
102# else
103 uint16_t iCpuSet = 0;
104 uint16_t offGipCpuGroup = pGip->aoffCpuGroup[(uAux >> 8) & UINT8_MAX];
105 if (offGipCpuGroup < pGip->cPages * PAGE_SIZE)
106 {
107 PSUPGIPCPUGROUP pGipCpuGroup = (PSUPGIPCPUGROUP)((uintptr_t)pGip + offGipCpuGroup);
108 if ( (uAux & UINT8_MAX) < pGipCpuGroup->cMaxMembers
109 && pGipCpuGroup->aiCpuSetIdxs[uAux & UINT8_MAX] != -1)
110 iCpuSet = pGipCpuGroup->aiCpuSetIdxs[uAux & UINT8_MAX];
111 }
112# endif
113 uint16_t const iGipCpu = pGip->aiCpuFromCpuSetIdx[iCpuSet];
114# elif TMPL_GET_CPU_METHOD == SUPGIPGETCPU_IDTR_LIMIT_MASK_MAX_SET_CPUS
115 uint16_t const cbLim = ASMGetIdtrLimit();
116 uint16_t const iCpuSet = (cbLim - 256 * (ARCH_BITS == 64 ? 16 : 8)) & (RTCPUSET_MAX_CPUS - 1);
117 uint16_t const iGipCpu = pGip->aiCpuFromCpuSetIdx[iCpuSet];
118# else
119# error "What?"
120# endif
121 if (RT_LIKELY(iGipCpu < pGip->cCpus))
122 {
123 PSUPGIPCPU pGipCpu = &pGip->aCPUs[iGipCpu];
124#else
125 {
126#endif
127 /*
128 * Get the transaction ID if necessary and we haven't already
129 * read it before a serializing instruction above. We can skip
130 * this for ASYNC_TSC mode in ring-0 and raw-mode context since
131 * we disable interrupts.
132 */
133#if TMPL_MODE == TMPL_MODE_ASYNC && defined(IN_RING3)
134 uint32_t const u32TransactionId = pGipCpu->u32TransactionId;
135 ASMCompilerBarrier();
136 TMPL_READ_FENCE();
137#elif TMPL_MODE != TMPL_MODE_ASYNC \
138 && TMPL_GET_CPU_METHOD != SUPGIPGETCPU_APIC_ID \
139 && TMPL_GET_CPU_METHOD != SUPGIPGETCPU_RDTSCP_MASK_MAX_SET_CPUS \
140 && TMPL_GET_CPU_METHOD != SUPGIPGETCPU_RDTSCP_GROUP_IN_CH_NUMBER_IN_CL
141 uint32_t const u32TransactionId = pGip->aCPUs[0].u32TransactionId;
142 ASMCompilerBarrier();
143 TMPL_READ_FENCE();
144#endif
145
146 /*
147 * Gather all the data we need. The mess at the end is to make
148 * sure all loads are done before we recheck the transaction ID
149 * without triggering serializing twice.
150 */
151 uint32_t u32NanoTSFactor0 = pGip->u32UpdateIntervalNS;
152#if TMPL_MODE == TMPL_MODE_ASYNC
153 uint32_t u32UpdateIntervalTSC = pGipCpu->u32UpdateIntervalTSC;
154 uint64_t u64NanoTS = pGipCpu->u64NanoTS;
155 uint64_t u64TSC = pGipCpu->u64TSC;
156#else
157 uint32_t u32UpdateIntervalTSC = pGip->aCPUs[0].u32UpdateIntervalTSC;
158 uint64_t u64NanoTS = pGip->aCPUs[0].u64NanoTS;
159 uint64_t u64TSC = pGip->aCPUs[0].u64TSC;
160# if TMPL_MODE == TMPL_MODE_SYNC_INVAR_WITH_DELTA
161 int64_t i64TscDelta = pGipCpu->i64TSCDelta;
162# endif
163#endif
164 uint64_t u64PrevNanoTS = ASMAtomicUoReadU64(pData->pu64Prev);
165#if TMPL_GET_CPU_METHOD == SUPGIPGETCPU_RDTSCP_MASK_MAX_SET_CPUS \
166 || TMPL_GET_CPU_METHOD == SUPGIPGETCPU_RDTSCP_GROUP_IN_CH_NUMBER_IN_CL
167 ASMCompilerBarrier();
168 uint32_t uAux2;
169 uint64_t u64Delta = ASMReadTscWithAux(&uAux2); /* serializing */
170#else
171 uint64_t u64Delta = ASMReadTSC();
172 ASMCompilerBarrier();
173# if TMPL_GET_CPU_METHOD != SUPGIPGETCPU_APIC_ID /* getting APIC will serialize */ \
174 && (defined(IN_RING3) || TMPL_MODE != TMPL_MODE_ASYNC)
175 TMPL_READ_FENCE(); /* Expensive (~30 ticks). Would like convincing argumentation that let us remove it. */
176# endif
177#endif
178
179 /*
180 * Check that we didn't change CPU.
181 */
182#if defined(IN_RING3) && ( TMPL_MODE == TMPL_MODE_ASYNC || TMPL_MODE == TMPL_MODE_SYNC_INVAR_WITH_DELTA )
183# if TMPL_GET_CPU_METHOD == SUPGIPGETCPU_APIC_ID
184 if (RT_LIKELY(ASMGetApicId() == idApic))
185# elif TMPL_GET_CPU_METHOD == SUPGIPGETCPU_RDTSCP_MASK_MAX_SET_CPUS \
186 || TMPL_GET_CPU_METHOD == SUPGIPGETCPU_RDTSCP_GROUP_IN_CH_NUMBER_IN_CL
187 if (RT_LIKELY(uAux2 == uAux))
188# elif TMPL_GET_CPU_METHOD == SUPGIPGETCPU_IDTR_LIMIT_MASK_MAX_SET_CPUS
189 if (RT_LIKELY(ASMGetIdtrLimit() == cbLim))
190# endif
191#endif
192 {
193 /*
194 * Check the transaction ID (see above for R0/RC + ASYNC).
195 */
196#if defined(IN_RING3) || TMPL_MODE != TMPL_MODE_ASYNC
197# if TMPL_MODE == TMPL_MODE_ASYNC
198 if (RT_LIKELY(pGipCpu->u32TransactionId == u32TransactionId && !(u32TransactionId & 1) ))
199# else
200 if (RT_LIKELY(pGip->aCPUs[0].u32TransactionId == u32TransactionId && !(u32TransactionId & 1) ))
201# endif
202#endif
203 {
204
205 /*
206 * Apply the TSC delta. If the delta is invalid and the
207 * execution allows it, try trigger delta recalibration.
208 */
209#if TMPL_MODE == TMPL_MODE_SYNC_INVAR_WITH_DELTA && defined(IN_RING3)
210 if (RT_LIKELY( i64TscDelta != INT64_MAX
211 || pGipCpu == pGipCpuAttemptedTscRecalibration))
212#endif
213 {
214#if TMPL_MODE == TMPL_MODE_SYNC_INVAR_WITH_DELTA
215# ifndef IN_RING3
216 if (RT_LIKELY(i64TscDelta != INT64_MAX))
217# endif
218 u64Delta -= i64TscDelta;
219#endif
220
221 /*
222 * Bingo! We've got a consistent set of data.
223 */
224#ifndef IN_RING3
225 ASMSetFlags(uFlags);
226#endif
227
228 /*
229 * Calc NanoTS delta.
230 */
231 u64Delta -= u64TSC;
232 if (RT_LIKELY(u64Delta <= u32UpdateIntervalTSC))
233 { /* MSVC branch hint, probably pointless. */ }
234 else
235 {
236 /*
237 * We've expired the interval, cap it. If we're here for the 2nd
238 * time without any GIP update in-between, the checks against
239 * *pu64Prev below will force 1ns stepping.
240 */
241 ASMAtomicIncU32(&pData->cExpired);
242 u64Delta = u32UpdateIntervalTSC;
243 }
244#if !defined(_MSC_VER) || !defined(RT_ARCH_X86) /* GCC makes very pretty code from these two inline calls, while MSC cannot. */
245 u64Delta = ASMMult2xU32RetU64((uint32_t)u64Delta, u32NanoTSFactor0);
246 u64Delta = ASMDivU64ByU32RetU32(u64Delta, u32UpdateIntervalTSC);
247#else
248 __asm
249 {
250 mov eax, dword ptr [u64Delta]
251 mul dword ptr [u32NanoTSFactor0]
252 div dword ptr [u32UpdateIntervalTSC]
253 mov dword ptr [u64Delta], eax
254 xor edx, edx
255 mov dword ptr [u64Delta + 4], edx
256 }
257#endif
258
259 /*
260 * Calculate the time and compare it with the previously returned value.
261 */
262 u64NanoTS += u64Delta;
263 uint64_t u64DeltaPrev = u64NanoTS - u64PrevNanoTS;
264 if (RT_LIKELY( u64DeltaPrev > 0
265 && u64DeltaPrev < UINT64_C(86000000000000) /* 24h */))
266 { /* Frequent - less than 24h since last call. */ }
267 else if (RT_LIKELY( (int64_t)u64DeltaPrev <= 0
268 && (int64_t)u64DeltaPrev + u32NanoTSFactor0 * 2 >= 0))
269 {
270 /* Occasional - u64NanoTS is in the recent 'past' relative the previous call. */
271 ASMAtomicIncU32(&pData->c1nsSteps);
272 u64NanoTS = u64PrevNanoTS + 1;
273 }
274 else if (!u64PrevNanoTS)
275 /* We're resuming (see TMVirtualResume). */;
276 else
277 {
278 /* Something has gone bust, if negative offset it's real bad. */
279 ASMAtomicIncU32(&pData->cBadPrev);
280 pData->pfnBad(pData, u64NanoTS, u64DeltaPrev, u64PrevNanoTS);
281 }
282
283 /*
284 * Attempt updating the previous value, provided we're still ahead of it.
285 *
286 * There is no point in recalculating u64NanoTS because we got preempted or if
287 * we raced somebody while the GIP was updated, since these are events
288 * that might occur at any point in the return path as well.
289 */
290 if (RT_LIKELY(ASMAtomicCmpXchgU64(pData->pu64Prev, u64NanoTS, u64PrevNanoTS)))
291 return u64NanoTS;
292
293 ASMAtomicIncU32(&pData->cUpdateRaces);
294 for (int cTries = 25; cTries > 0; cTries--)
295 {
296 u64PrevNanoTS = ASMAtomicReadU64(pData->pu64Prev);
297 if (u64PrevNanoTS >= u64NanoTS)
298 break;
299 if (ASMAtomicCmpXchgU64(pData->pu64Prev, u64NanoTS, u64PrevNanoTS))
300 break;
301 ASMNopPause();
302 }
303 return u64NanoTS;
304 }
305
306#if TMPL_MODE == TMPL_MODE_SYNC_INVAR_WITH_DELTA && defined(IN_RING3)
307 /*
308 * Call into the support driver to try make it recalculate the delta. We
309 * remember which GIP CPU structure we're probably working on so we won't
310 * end up in a loop if the driver for some reason cannot get the job done.
311 */
312 else /* else is unecessary, but helps checking the preprocessor spaghetti. */
313 {
314 pGipCpuAttemptedTscRecalibration = pGipCpu;
315 uint64_t u64TscTmp;
316 uint16_t idApicUpdate;
317 int rc = SUPR3ReadTsc(&u64TscTmp, &idApicUpdate);
318 if (RT_SUCCESS(rc) && idApicUpdate < RT_ELEMENTS(pGip->aiCpuFromApicId))
319 {
320 uint32_t iUpdateGipCpu = pGip->aiCpuFromApicId[idApicUpdate];
321 if (iUpdateGipCpu < pGip->cCpus)
322 pGipCpuAttemptedTscRecalibration = &pGip->aCPUs[iUpdateGipCpu];
323 }
324 }
325#endif
326 }
327 }
328
329 /*
330 * No joy must try again.
331 */
332#ifdef _MSC_VER
333# pragma warning(disable: 4702)
334#endif
335#ifndef IN_RING3
336 ASMSetFlags(uFlags);
337#endif
338 ASMNopPause();
339 continue;
340 }
341
342#if TMPL_MODE == TMPL_MODE_ASYNC || TMPL_MODE == TMPL_MODE_SYNC_INVAR_WITH_DELTA
343 /*
344 * We've got a bad CPU or APIC index of some kind.
345 */
346 else /* else is unecessary, but helps checking the preprocessor spaghetti. */
347 {
348# ifndef IN_RING3
349 ASMSetFlags(uFlags);
350# endif
351# if defined(IN_RING0) || defined(IN_RC) || TMPL_GET_CPU_METHOD != SUPGIPGETCPU_APIC_ID
352 return pData->pfnBadCpuIndex(pData, UINT16_MAX-1, iCpuSet, iGipCpu);
353# else
354 return pData->pfnBadCpuIndex(pData, idApic, UINT16_MAX-1, iGipCpu);
355# endif
356 }
357#endif
358 }
359
360 /*
361 * Something changed in the GIP config or it was unmapped, figure out
362 * the right worker function to use now.
363 */
364#ifndef IN_RING3
365 ASMSetFlags(uFlags);
366#endif
367 return pData->pfnRediscover(pData);
368 }
369}
370
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette