VirtualBox

source: vbox/trunk/src/VBox/Runtime/common/time/timesupref.h@ 81071

Last change on this file since 81071 was 81071, checked in by vboxsync, 5 years ago

SUPDrv,IPRT,VMM: Support host APIC ID above 256 in GIP. (Only tested on 4 core intel.) bugref:9501

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 18.1 KB
Line 
1/* $Id: timesupref.h 81071 2019-09-30 10:17:28Z vboxsync $ */
2/** @file
3 * IPRT - Time using SUPLib, the C Code Template.
4 */
5
6/*
7 * Copyright (C) 2006-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/**
29 * The C reference implementation of the assembly routines.
30 *
31 * Calculate NanoTS using the information in the global information page (GIP)
32 * which the support library (SUPLib) exports.
33 *
34 * This function guarantees that the returned timestamp is later (in time) than
35 * any previous calls in the same thread.
36 *
37 * @remark The way the ever increasing time guarantee is currently implemented means
38 * that if you call this function at a frequency higher than 1GHz you're in for
39 * trouble. We currently assume that no idiot will do that for real life purposes.
40 *
41 * @returns Nanosecond timestamp.
42 * @param pData Pointer to the data structure.
43 */
44RTDECL(uint64_t) rtTimeNanoTSInternalRef(PRTTIMENANOTSDATA pData)
45{
46#if TMPL_MODE == TMPL_MODE_SYNC_INVAR_WITH_DELTA && defined(IN_RING3)
47 PSUPGIPCPU pGipCpuAttemptedTscRecalibration = NULL;
48#endif
49 AssertCompile(RT_IS_POWER_OF_TWO(RTCPUSET_MAX_CPUS));
50
51 for (;;)
52 {
53#ifndef IN_RING3 /* This simplifies and improves everything. */
54 RTCCUINTREG const uFlags = ASMIntDisableFlags();
55#endif
56
57 /*
58 * Check that the GIP is sane and that the premises for this worker function
59 * hasn't changed (CPU onlined with bad delta or missing features).
60 */
61 PSUPGLOBALINFOPAGE pGip = g_pSUPGlobalInfoPage;
62 if ( RT_LIKELY(pGip)
63 && RT_LIKELY(pGip->u32Magic == SUPGLOBALINFOPAGE_MAGIC)
64#if TMPL_MODE == TMPL_MODE_SYNC_INVAR_WITH_DELTA
65 && RT_LIKELY(pGip->enmUseTscDelta >= SUPGIPUSETSCDELTA_PRACTICALLY_ZERO)
66#else
67 && RT_LIKELY(pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_ROUGHLY_ZERO)
68#endif
69#if defined(IN_RING3) && TMPL_GET_CPU_METHOD != 0
70 && RT_LIKELY(pGip->fGetGipCpu & TMPL_GET_CPU_METHOD)
71#endif
72 )
73 {
74 /*
75 * Resolve pGipCpu if needed. If the instruction is serializing, we
76 * read the transaction id first if possible.
77 */
78#if TMPL_MODE == TMPL_MODE_ASYNC || TMPL_MODE == TMPL_MODE_SYNC_INVAR_WITH_DELTA
79# if defined(IN_RING0)
80 uint32_t const iCpuSet = RTMpCurSetIndex();
81 uint16_t const iGipCpu = iCpuSet < RT_ELEMENTS(pGip->aiCpuFromCpuSetIdx)
82 ? pGip->aiCpuFromCpuSetIdx[iCpuSet] : UINT16_MAX;
83# elif defined(IN_RC)
84 uint32_t const iCpuSet = VMMGetCpu(&g_VM)->iHostCpuSet;
85 uint16_t const iGipCpu = iCpuSet < RT_ELEMENTS(pGip->aiCpuFromCpuSetIdx)
86 ? pGip->aiCpuFromCpuSetIdx[iCpuSet] : UINT16_MAX;
87# elif TMPL_GET_CPU_METHOD == SUPGIPGETCPU_APIC_ID
88# if TMPL_MODE != TMPL_MODE_ASYNC
89 uint32_t const u32TransactionId = pGip->aCPUs[0].u32TransactionId;
90# endif
91 uint8_t const idApic = ASMGetApicId();
92 uint16_t const iGipCpu = pGip->aiCpuFromApicId[idApic];
93# elif TMPL_GET_CPU_METHOD == SUPGIPGETCPU_APIC_ID_EXT_0B
94# if TMPL_MODE != TMPL_MODE_ASYNC
95 uint32_t const u32TransactionId = pGip->aCPUs[0].u32TransactionId;
96# endif
97 uint32_t const idApic = ASMGetApicIdExt0B();
98 uint16_t const iGipCpu = pGip->aiCpuFromApicId[idApic];
99# elif TMPL_GET_CPU_METHOD == SUPGIPGETCPU_APIC_ID_EXT_8000001E
100# if TMPL_MODE != TMPL_MODE_ASYNC
101 uint32_t const u32TransactionId = pGip->aCPUs[0].u32TransactionId;
102# endif
103 uint32_t const idApic = ASMGetApicIdExt8000001E();
104 uint16_t const iGipCpu = pGip->aiCpuFromApicId[idApic];
105# elif TMPL_GET_CPU_METHOD == SUPGIPGETCPU_RDTSCP_MASK_MAX_SET_CPUS \
106 || TMPL_GET_CPU_METHOD == SUPGIPGETCPU_RDTSCP_GROUP_IN_CH_NUMBER_IN_CL
107# if TMPL_MODE != TMPL_MODE_ASYNC
108 uint32_t const u32TransactionId = pGip->aCPUs[0].u32TransactionId;
109# endif
110 uint32_t uAux;
111 ASMReadTscWithAux(&uAux);
112# if TMPL_GET_CPU_METHOD == SUPGIPGETCPU_RDTSCP_MASK_MAX_SET_CPUS
113 uint16_t const iCpuSet = uAux & (RTCPUSET_MAX_CPUS - 1);
114# else
115 uint16_t iCpuSet = 0;
116 uint16_t offGipCpuGroup = pGip->aoffCpuGroup[(uAux >> 8) & UINT8_MAX];
117 if (offGipCpuGroup < pGip->cPages * PAGE_SIZE)
118 {
119 PSUPGIPCPUGROUP pGipCpuGroup = (PSUPGIPCPUGROUP)((uintptr_t)pGip + offGipCpuGroup);
120 if ( (uAux & UINT8_MAX) < pGipCpuGroup->cMaxMembers
121 && pGipCpuGroup->aiCpuSetIdxs[uAux & UINT8_MAX] != -1)
122 iCpuSet = pGipCpuGroup->aiCpuSetIdxs[uAux & UINT8_MAX];
123 }
124# endif
125 uint16_t const iGipCpu = pGip->aiCpuFromCpuSetIdx[iCpuSet];
126# elif TMPL_GET_CPU_METHOD == SUPGIPGETCPU_IDTR_LIMIT_MASK_MAX_SET_CPUS
127 uint16_t const cbLim = ASMGetIdtrLimit();
128 uint16_t const iCpuSet = (cbLim - 256 * (ARCH_BITS == 64 ? 16 : 8)) & (RTCPUSET_MAX_CPUS - 1);
129 uint16_t const iGipCpu = pGip->aiCpuFromCpuSetIdx[iCpuSet];
130# else
131# error "What?"
132# endif
133 if (RT_LIKELY(iGipCpu < pGip->cCpus))
134 {
135 PSUPGIPCPU pGipCpu = &pGip->aCPUs[iGipCpu];
136#else
137 {
138#endif
139 /*
140 * Get the transaction ID if necessary and we haven't already
141 * read it before a serializing instruction above. We can skip
142 * this for ASYNC_TSC mode in ring-0 and raw-mode context since
143 * we disable interrupts.
144 */
145#if TMPL_MODE == TMPL_MODE_ASYNC && defined(IN_RING3)
146 uint32_t const u32TransactionId = pGipCpu->u32TransactionId;
147 ASMCompilerBarrier();
148 TMPL_READ_FENCE();
149#elif TMPL_MODE != TMPL_MODE_ASYNC \
150 && TMPL_GET_CPU_METHOD != SUPGIPGETCPU_APIC_ID \
151 && TMPL_GET_CPU_METHOD != SUPGIPGETCPU_APIC_ID_EXT_0B \
152 && TMPL_GET_CPU_METHOD != SUPGIPGETCPU_APIC_ID_EXT_8000001E \
153 && TMPL_GET_CPU_METHOD != SUPGIPGETCPU_RDTSCP_MASK_MAX_SET_CPUS \
154 && TMPL_GET_CPU_METHOD != SUPGIPGETCPU_RDTSCP_GROUP_IN_CH_NUMBER_IN_CL
155 uint32_t const u32TransactionId = pGip->aCPUs[0].u32TransactionId;
156 ASMCompilerBarrier();
157 TMPL_READ_FENCE();
158#endif
159
160 /*
161 * Gather all the data we need. The mess at the end is to make
162 * sure all loads are done before we recheck the transaction ID
163 * without triggering serializing twice.
164 */
165 uint32_t u32NanoTSFactor0 = pGip->u32UpdateIntervalNS;
166#if TMPL_MODE == TMPL_MODE_ASYNC
167 uint32_t u32UpdateIntervalTSC = pGipCpu->u32UpdateIntervalTSC;
168 uint64_t u64NanoTS = pGipCpu->u64NanoTS;
169 uint64_t u64TSC = pGipCpu->u64TSC;
170#else
171 uint32_t u32UpdateIntervalTSC = pGip->aCPUs[0].u32UpdateIntervalTSC;
172 uint64_t u64NanoTS = pGip->aCPUs[0].u64NanoTS;
173 uint64_t u64TSC = pGip->aCPUs[0].u64TSC;
174# if TMPL_MODE == TMPL_MODE_SYNC_INVAR_WITH_DELTA
175 int64_t i64TscDelta = pGipCpu->i64TSCDelta;
176# endif
177#endif
178 uint64_t u64PrevNanoTS = ASMAtomicUoReadU64(pData->pu64Prev);
179#if TMPL_GET_CPU_METHOD == SUPGIPGETCPU_RDTSCP_MASK_MAX_SET_CPUS \
180 || TMPL_GET_CPU_METHOD == SUPGIPGETCPU_RDTSCP_GROUP_IN_CH_NUMBER_IN_CL
181 ASMCompilerBarrier();
182 uint32_t uAux2;
183 uint64_t u64Delta = ASMReadTscWithAux(&uAux2); /* serializing */
184#else
185 uint64_t u64Delta = ASMReadTSC();
186 ASMCompilerBarrier();
187# if TMPL_GET_CPU_METHOD != SUPGIPGETCPU_APIC_ID /* getting APIC will serialize */ \
188 && TMPL_GET_CPU_METHOD != SUPGIPGETCPU_APIC_ID_EXT_0B \
189 && TMPL_GET_CPU_METHOD != SUPGIPGETCPU_APIC_ID_EXT_8000001E \
190 && (defined(IN_RING3) || TMPL_MODE != TMPL_MODE_ASYNC)
191 TMPL_READ_FENCE(); /* Expensive (~30 ticks). Would like convincing argumentation that let us remove it. */
192# endif
193#endif
194
195 /*
196 * Check that we didn't change CPU.
197 */
198#if defined(IN_RING3) && ( TMPL_MODE == TMPL_MODE_ASYNC || TMPL_MODE == TMPL_MODE_SYNC_INVAR_WITH_DELTA )
199# if TMPL_GET_CPU_METHOD == SUPGIPGETCPU_APIC_ID
200 if (RT_LIKELY(ASMGetApicId() == idApic))
201# elif TMPL_GET_CPU_METHOD == SUPGIPGETCPU_APIC_ID_EXT_0B
202 if (RT_LIKELY(ASMGetApicIdExt0B() == idApic))
203# elif TMPL_GET_CPU_METHOD == SUPGIPGETCPU_APIC_ID_EXT_8000001E
204 if (RT_LIKELY(ASMGetApicIdExt8000001E() == idApic))
205# elif TMPL_GET_CPU_METHOD == SUPGIPGETCPU_RDTSCP_MASK_MAX_SET_CPUS \
206 || TMPL_GET_CPU_METHOD == SUPGIPGETCPU_RDTSCP_GROUP_IN_CH_NUMBER_IN_CL
207 if (RT_LIKELY(uAux2 == uAux))
208# elif TMPL_GET_CPU_METHOD == SUPGIPGETCPU_IDTR_LIMIT_MASK_MAX_SET_CPUS
209 if (RT_LIKELY(ASMGetIdtrLimit() == cbLim))
210# endif
211#endif
212 {
213 /*
214 * Check the transaction ID (see above for R0/RC + ASYNC).
215 */
216#if defined(IN_RING3) || TMPL_MODE != TMPL_MODE_ASYNC
217# if TMPL_MODE == TMPL_MODE_ASYNC
218 if (RT_LIKELY(pGipCpu->u32TransactionId == u32TransactionId && !(u32TransactionId & 1) ))
219# else
220 if (RT_LIKELY(pGip->aCPUs[0].u32TransactionId == u32TransactionId && !(u32TransactionId & 1) ))
221# endif
222#endif
223 {
224
225 /*
226 * Apply the TSC delta. If the delta is invalid and the
227 * execution allows it, try trigger delta recalibration.
228 */
229#if TMPL_MODE == TMPL_MODE_SYNC_INVAR_WITH_DELTA && defined(IN_RING3)
230 if (RT_LIKELY( i64TscDelta != INT64_MAX
231 || pGipCpu == pGipCpuAttemptedTscRecalibration))
232#endif
233 {
234#if TMPL_MODE == TMPL_MODE_SYNC_INVAR_WITH_DELTA
235# ifndef IN_RING3
236 if (RT_LIKELY(i64TscDelta != INT64_MAX))
237# endif
238 u64Delta -= i64TscDelta;
239#endif
240
241 /*
242 * Bingo! We've got a consistent set of data.
243 */
244#ifndef IN_RING3
245 ASMSetFlags(uFlags);
246#endif
247
248 /*
249 * Calc NanoTS delta.
250 */
251 u64Delta -= u64TSC;
252 if (RT_LIKELY(u64Delta <= u32UpdateIntervalTSC))
253 { /* MSVC branch hint, probably pointless. */ }
254 else
255 {
256 /*
257 * We've expired the interval, cap it. If we're here for the 2nd
258 * time without any GIP update in-between, the checks against
259 * *pu64Prev below will force 1ns stepping.
260 */
261 ASMAtomicIncU32(&pData->cExpired);
262 u64Delta = u32UpdateIntervalTSC;
263 }
264#if !defined(_MSC_VER) || !defined(RT_ARCH_X86) /* GCC makes very pretty code from these two inline calls, while MSC cannot. */
265 u64Delta = ASMMult2xU32RetU64((uint32_t)u64Delta, u32NanoTSFactor0);
266 u64Delta = ASMDivU64ByU32RetU32(u64Delta, u32UpdateIntervalTSC);
267#else
268 __asm
269 {
270 mov eax, dword ptr [u64Delta]
271 mul dword ptr [u32NanoTSFactor0]
272 div dword ptr [u32UpdateIntervalTSC]
273 mov dword ptr [u64Delta], eax
274 xor edx, edx
275 mov dword ptr [u64Delta + 4], edx
276 }
277#endif
278
279 /*
280 * Calculate the time and compare it with the previously returned value.
281 */
282 u64NanoTS += u64Delta;
283 uint64_t u64DeltaPrev = u64NanoTS - u64PrevNanoTS;
284 if (RT_LIKELY( u64DeltaPrev > 0
285 && u64DeltaPrev < UINT64_C(86000000000000) /* 24h */))
286 { /* Frequent - less than 24h since last call. */ }
287 else if (RT_LIKELY( (int64_t)u64DeltaPrev <= 0
288 && (int64_t)u64DeltaPrev + u32NanoTSFactor0 * 2 >= 0))
289 {
290 /* Occasional - u64NanoTS is in the recent 'past' relative the previous call. */
291 ASMAtomicIncU32(&pData->c1nsSteps);
292 u64NanoTS = u64PrevNanoTS + 1;
293 }
294 else if (!u64PrevNanoTS)
295 /* We're resuming (see TMVirtualResume). */;
296 else
297 {
298 /* Something has gone bust, if negative offset it's real bad. */
299 ASMAtomicIncU32(&pData->cBadPrev);
300 pData->pfnBad(pData, u64NanoTS, u64DeltaPrev, u64PrevNanoTS);
301 }
302
303 /*
304 * Attempt updating the previous value, provided we're still ahead of it.
305 *
306 * There is no point in recalculating u64NanoTS because we got preempted or if
307 * we raced somebody while the GIP was updated, since these are events
308 * that might occur at any point in the return path as well.
309 */
310 if (RT_LIKELY(ASMAtomicCmpXchgU64(pData->pu64Prev, u64NanoTS, u64PrevNanoTS)))
311 return u64NanoTS;
312
313 ASMAtomicIncU32(&pData->cUpdateRaces);
314 for (int cTries = 25; cTries > 0; cTries--)
315 {
316 u64PrevNanoTS = ASMAtomicReadU64(pData->pu64Prev);
317 if (u64PrevNanoTS >= u64NanoTS)
318 break;
319 if (ASMAtomicCmpXchgU64(pData->pu64Prev, u64NanoTS, u64PrevNanoTS))
320 break;
321 ASMNopPause();
322 }
323 return u64NanoTS;
324 }
325
326#if TMPL_MODE == TMPL_MODE_SYNC_INVAR_WITH_DELTA && defined(IN_RING3)
327 /*
328 * Call into the support driver to try make it recalculate the delta. We
329 * remember which GIP CPU structure we're probably working on so we won't
330 * end up in a loop if the driver for some reason cannot get the job done.
331 */
332 else /* else is unecessary, but helps checking the preprocessor spaghetti. */
333 {
334 pGipCpuAttemptedTscRecalibration = pGipCpu;
335 uint64_t u64TscTmp;
336 uint16_t idApicUpdate;
337 int rc = SUPR3ReadTsc(&u64TscTmp, &idApicUpdate);
338 if (RT_SUCCESS(rc) && idApicUpdate < RT_ELEMENTS(pGip->aiCpuFromApicId))
339 {
340 uint32_t iUpdateGipCpu = pGip->aiCpuFromApicId[idApicUpdate];
341 if (iUpdateGipCpu < pGip->cCpus)
342 pGipCpuAttemptedTscRecalibration = &pGip->aCPUs[iUpdateGipCpu];
343 }
344 }
345#endif
346 }
347 }
348
349 /*
350 * No joy must try again.
351 */
352#ifdef _MSC_VER
353# pragma warning(disable: 4702)
354#endif
355#ifndef IN_RING3
356 ASMSetFlags(uFlags);
357#endif
358 ASMNopPause();
359 continue;
360 }
361
362#if TMPL_MODE == TMPL_MODE_ASYNC || TMPL_MODE == TMPL_MODE_SYNC_INVAR_WITH_DELTA
363 /*
364 * We've got a bad CPU or APIC index of some kind.
365 */
366 else /* else is unecessary, but helps checking the preprocessor spaghetti. */
367 {
368# ifndef IN_RING3
369 ASMSetFlags(uFlags);
370# endif
371# if defined(IN_RING0) \
372 || defined(IN_RC) \
373 || ( TMPL_GET_CPU_METHOD != SUPGIPGETCPU_APIC_ID \
374 && TMPL_GET_CPU_METHOD != SUPGIPGETCPU_APIC_ID_EXT_0B /*?*/ \
375 && TMPL_GET_CPU_METHOD != SUPGIPGETCPU_APIC_ID_EXT_8000001E /*?*/)
376 return pData->pfnBadCpuIndex(pData, UINT16_MAX-1, iCpuSet, iGipCpu);
377# else
378 return pData->pfnBadCpuIndex(pData, idApic, UINT16_MAX-1, iGipCpu);
379# endif
380 }
381#endif
382 }
383
384 /*
385 * Something changed in the GIP config or it was unmapped, figure out
386 * the right worker function to use now.
387 */
388#ifndef IN_RING3
389 ASMSetFlags(uFlags);
390#endif
391 return pData->pfnRediscover(pData);
392 }
393}
394
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette