VirtualBox

source: vbox/trunk/src/VBox/Runtime/timesup.cpp@ 3393

Last change on this file since 3393 was 3393, checked in by vboxsync, 17 years ago

Removed PCSUPGLOBALINFOPAGE and PCSUPGIPCPU to avoid const/volatile confusion.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 7.3 KB
Line 
1/* $Id: timesup.cpp 3393 2007-07-03 15:36:47Z vboxsync $ */
2/** @file
3 * innotek Portable Runtime - Time using SUPLib.
4 */
5
6/*
7 * Copyright (C) 2006-2007 innotek GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * If you received this file as part of a commercial VirtualBox
18 * distribution, then only the terms of your commercial VirtualBox
19 * license agreement apply instead of the previous paragraph.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP RTLOGGROUP_TIME
27#include <iprt/time.h>
28#include <iprt/asm.h>
29#include <iprt/assert.h>
30#include <iprt/err.h>
31#include <VBox/sup.h>
32#include "internal/time.h"
33
34
35/*******************************************************************************
36* Global Variables *
37*******************************************************************************/
38#ifndef IN_GUEST
39/** The previously returned nano TS.
40 * This handles TSC drift on SMP systems and expired interval.
41 * This is a valid range u64NanoTS to u64NanoTS + 1000000000 (ie. 1sec).
42 */
43static uint64_t volatile s_u64PrevNanoTS = 0;
44/**
45 * Number of times we've had to resort to 1ns walking. */
46static uint32_t volatile g_c1nsSteps = 0;
47#endif
48
49
50/**
51 * Calculate NanoTS using the information in the global information page (GIP)
52 * which the support library (SUPLib) exports.
53 *
54 * This function guarantees that the returned timestamp is later (in time) than
55 * any previous calls in the same thread.
56 *
57 * @returns Nanosecond timestamp.
58 *
59 * @remark The way the ever increasing time guarantee is currently implemented means
60 * that if you call this function at a freqency higher than 1GHz you're in for
61 * trouble. We currently assume that no idiot will do that for real life purposes.
62 */
63DECLINLINE(uint64_t) rtTimeNanoTSInternal(void)
64{
65#ifndef IN_GUEST
66 uint64_t u64Delta;
67 uint32_t u32NanoTSFactor0;
68 uint64_t u64TSC;
69 uint64_t u64NanoTS;
70 uint32_t u32UpdateIntervalTSC;
71 uint32_t u32TransactionId;
72 PSUPGLOBALINFOPAGE pGip;
73
74 /*
75 * Read the data.
76 */
77 for (;;)
78 {
79 pGip = g_pSUPGlobalInfoPage;
80#ifdef IN_RING3
81 if (!pGip || pGip->u32Magic != SUPGLOBALINFOPAGE_MAGIC)
82 return RTTimeSystemNanoTS();
83#endif
84
85 if (pGip->u32Mode != SUPGIPMODE_ASYNC_TSC)
86 {
87 u32TransactionId = pGip->aCPUs[0].u32TransactionId;
88#ifdef __L4__
89 Assert((u32TransactionId & 1) == 0);
90#endif
91 u32UpdateIntervalTSC = pGip->aCPUs[0].u32UpdateIntervalTSC;
92 u64NanoTS = pGip->aCPUs[0].u64NanoTS;
93 u64TSC = pGip->aCPUs[0].u64TSC;
94 u32NanoTSFactor0 = pGip->u32UpdateIntervalNS;
95 u64Delta = ASMReadTSC();
96 if (RT_UNLIKELY( pGip->aCPUs[0].u32TransactionId != u32TransactionId
97 || (u32TransactionId & 1)))
98 continue;
99 }
100 else
101 {
102 /* SUPGIPMODE_ASYNC_TSC */
103 PSUPGIPCPU pGipCpu;
104
105 uint8_t u8ApicId = ASMGetApicId();
106 if (RT_LIKELY(u8ApicId < RT_ELEMENTS(pGip->aCPUs)))
107 pGipCpu = &pGip->aCPUs[u8ApicId];
108 else
109 {
110 AssertMsgFailed(("%x\n", u8ApicId));
111 pGipCpu = &pGip->aCPUs[0];
112 }
113
114 u32TransactionId = pGipCpu->u32TransactionId;
115#ifdef __L4__
116 Assert((u32TransactionId & 1) == 0);
117#endif
118 u32UpdateIntervalTSC = pGipCpu->u32UpdateIntervalTSC;
119 u64NanoTS = pGipCpu->u64NanoTS;
120 u64TSC = pGipCpu->u64TSC;
121 u32NanoTSFactor0 = pGip->u32UpdateIntervalNS;
122 u64Delta = ASMReadTSC();
123 if (RT_UNLIKELY(u8ApicId != ASMGetApicId()))
124 continue;
125 if (RT_UNLIKELY( pGipCpu->u32TransactionId != u32TransactionId
126 || (u32TransactionId & 1)))
127 continue;
128 }
129 break;
130 }
131
132 /*
133 * Calc NanoTS delta.
134 */
135 u64Delta -= u64TSC;
136 if (u64Delta > u32UpdateIntervalTSC)
137 {
138 /*
139 * We've expired the interval. Do 1ns per call until we've
140 * got valid TSC deltas again (s_u64PrevNanoTS takes care of this).
141 */
142 u64Delta = u32UpdateIntervalTSC;
143 }
144#if !defined(_MSC_VER) || defined(__AMD64__) /* GCC makes very pretty code from these two inline calls, while MSC cannot. */
145 u64Delta = ASMMult2xU32RetU64((uint32_t)u64Delta, u32NanoTSFactor0);
146 u64Delta = ASMDivU64ByU32RetU32(u64Delta, u32UpdateIntervalTSC);
147#else
148 __asm
149 {
150 mov eax, dword ptr [u64Delta]
151 mul dword ptr [u32NanoTSFactor0]
152 div dword ptr [u32UpdateIntervalTSC]
153 mov dword ptr [u64Delta], eax
154 xor edx, edx
155 mov dword ptr [u64Delta + 4], edx
156 }
157#endif
158
159 /*
160 * The most frequent case is that the delta is either too old
161 * or that our timestamp is higher (relative to u64NanoTS) than it.
162 */
163 uint64_t u64;
164 uint64_t u64PrevNanoTS = ASMAtomicReadU64(&s_u64PrevNanoTS);
165 uint64_t u64DeltaPrev = u64PrevNanoTS - u64NanoTS;
166 if ( u64DeltaPrev > 1000000000 /* (invalid prev) */
167 || (uint32_t)u64DeltaPrev < (uint32_t)u64Delta) /* (we're later) */
168 {
169 u64 = u64Delta + u64NanoTS;
170 if (ASMAtomicCmpXchgU64(&s_u64PrevNanoTS, u64, u64PrevNanoTS))
171 return u64;
172 }
173 else
174 {
175 /*
176 * Our timestamp is lower than the last returned timestamp;
177 * advance 1ns beyond that.
178 */
179 u64Delta = u64DeltaPrev + 1;
180 u64 = u64Delta + u64NanoTS;
181 ASMAtomicIncU32(&g_c1nsSteps);
182 }
183
184 /*
185 * Attempt updating the previous value.
186 * u64 == timestamp, u64Delta == delta relative to u64NanoTS.
187 */
188 for (int cTries = 100;;)
189 {
190 u64PrevNanoTS = ASMAtomicReadU64(&s_u64PrevNanoTS);
191 u64DeltaPrev = u64PrevNanoTS - u64NanoTS;
192 if (u64DeltaPrev > u64Delta)
193 break;
194 if (ASMAtomicCmpXchgU64(&s_u64PrevNanoTS, u64, u64PrevNanoTS))
195 break;
196 if (--cTries <= 0)
197 {
198 AssertBreakpoint(); /* (recursion) */
199 break;
200 }
201 }
202
203 return u64;
204#else /* IN_GUEST */
205 return RTTimeSystemNanoTS();
206#endif /* IN_GUEST */
207}
208
209
210/**
211 * Gets the current nanosecond timestamp.
212 *
213 * @returns nanosecond timestamp.
214 */
215RTDECL(uint64_t) RTTimeNanoTS(void)
216{
217 return rtTimeNanoTSInternal();
218}
219
220
221/**
222 * Gets the current millisecond timestamp.
223 *
224 * @returns millisecond timestamp.
225 */
226RTDECL(uint64_t) RTTimeMilliTS(void)
227{
228 return rtTimeNanoTSInternal() / 1000000;
229}
230
231
232#ifndef IN_GUEST
233/**
234 * Debugging the time api.
235 *
236 * @returns the number of 1ns steps which has been applied by rtTimeNanoTSInternal().
237 */
238RTDECL(uint32_t) RTTime1nsSteps(void)
239{
240 return g_c1nsSteps;
241}
242#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette