Changeset 9607 in vbox for trunk/src/VBox
- Timestamp:
- Jun 11, 2008 1:00:23 PM (17 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/HostDrivers/Support/SUPDRVShared.c
r9587 r9607 3991 3991 3992 3992 /** 3993 * Callback used by supdrvDetermineAsyncTSC to read the TSC on a CPU. 3994 * 3995 * @param idCpu Ignored. 3996 * @param pvUser1 Where to put the TSC. 3997 * @param pvUser2 Ignored. 3998 */ 3999 static DECLCALLBACK(void) supdrvDetermineAsyncTscWorker(RTCPUID idCpu, void *pvUser1, void *pvUser2) 4000 { 4001 *(uint64_t *)pvUser1 = ASMReadTSC(); 4002 } 4003 4004 4005 /** 4006 * Determine if Async GIP mode is required because of TSC drift. 4007 * 4008 * When using the default/normal timer code it is essential that the time stamp counter 4009 * (TSC) runs never backwards, that is, a read operation to the counter should return 4010 * a bigger value than any previous read operation. This is guaranteed by the latest 4011 * AMD CPUs and by newer Intel CPUs which never enter the C2 state (P4). In any other 4012 * case we have to choose the asynchronous timer mode. 4013 * 4014 * @param pu64Diff pointer to the determined difference between different cores. 4015 * @return false if the time stamp counters appear to be synchron, true otherwise. 4016 */ 4017 bool VBOXCALL supdrvDetermineAsyncTsc(uint64_t *pu64DiffCores) 4018 { 4019 static uint64_t s_aTsc[8][RTCPUSET_MAX_CPUS]; 4020 uint64_t u64Diff, u64DiffMin, u64DiffMax, u64TscLast; 4021 int iSlot, iCpu, cCpus; 4022 bool fBackwards; 4023 RTCPUSET OnlineCpus; 4024 int rc; 4025 4026 *pu64DiffCores = 1; 4027 4028 RTMpGetOnlineSet(&OnlineCpus); 4029 cCpus = RTCpuSetCount(&OnlineCpus); 4030 if (cCpus < 2) 4031 return false; 4032 Assert(cCpus <= RT_ELEMENTS(s_aTsc[0])); 4033 4034 /* 4035 * Collect data from the online CPUs. 4036 */ 4037 for (iSlot = 0; iSlot < RT_ELEMENTS(s_aTsc); iSlot++) 4038 { 4039 RTCPUID iCpuSet = 0; 4040 for (iCpu = 0; iCpu < cCpus; iCpu++) 4041 { 4042 while (!RTCpuSetIsMemberByIndex(&OnlineCpus, iCpuSet)) 4043 { 4044 iCpuSet++; /* skip offline CPU */ 4045 dprintf2(("skipping %d\n", iCpuSet)); 4046 } 4047 rc = RTMpOnSpecific(RTMpCpuIdFromSetIndex(iCpuSet), supdrvDetermineAsyncTscWorker, &s_aTsc[iSlot][iCpu], NULL); 4048 if (rc == VERR_NOT_SUPPORTED) 4049 return false; 4050 iCpuSet++; 4051 } 4052 } 4053 4054 /* 4055 * Check that the TSC reads are strictly ascending. 4056 */ 4057 /** @todo This doesn't work if a CPU is offline. Make these loops ignore 4058 * offline CPUs. */ 4059 fBackwards = false; 4060 u64DiffMin = (uint64_t)~0; 4061 u64TscLast = 0; 4062 for (iSlot = 0; iSlot < RT_ELEMENTS(s_aTsc); iSlot++) 4063 { 4064 uint64_t u64Tsc0 = s_aTsc[iSlot][0]; 4065 u64DiffMax = 0; 4066 if (u64Tsc0 <= u64TscLast) 4067 { 4068 dprintf2(("iSlot=%d u64Tsc0=%#x%#08x u64TscLast=%#x%#08x\n", iSlot, 4069 (long)(u64Tsc0 >> 32), (long)u64Tsc0, (long)(u64TscLast >> 32), (long)u64TscLast)); 4070 fBackwards = true; 4071 } 4072 u64TscLast = u64Tsc0; 4073 for (iCpu = 1; iCpu < cCpus; iCpu++) 4074 { 4075 uint64_t u64TscN = s_aTsc[iSlot][iCpu]; 4076 if (u64TscN <= u64TscLast) 4077 { 4078 dprintf2(("iSlot=%d iCpu=%d u64TscN=%#x%#08x u64TscLast=%#x%#08x\n", iSlot, iCpu, 4079 (long)(u64TscN >> 32), (long)u64TscN, (long)(u64TscLast >> 32), (long)u64TscLast)); 4080 fBackwards = true; 4081 } 4082 u64TscLast = u64TscN; 4083 4084 u64Diff = u64TscN > u64Tsc0 ? u64TscN - u64Tsc0 : u64Tsc0 - u64TscN; 4085 if (u64DiffMax < u64Diff) 4086 u64DiffMax = u64Diff; 4087 } 4088 if (u64DiffMin > u64DiffMax) 4089 u64DiffMin = u64DiffMax; 4090 } 4091 /* informational */ 4092 *pu64DiffCores = u64DiffMin; 4093 4094 return fBackwards; 4095 } 4096 4097 4098 /** 3993 4099 * Determin the GIP TSC mode. 3994 4100 * … … 3999 4105 { 4000 4106 /* 4001 * The problem here is that AMD processors with power management features4002 * may easily end up with different TSCs because the CPUs or even cores4003 * on the same physical chip run at different frequencies to save power.4004 * 4005 * It is rumoured that this will be corrected with Barcelona and it's4006 * expected that this will be indicated by the TscInvariant bit in4007 * cpuid(0x80000007). So, the "difficult" bit here is to correctly4008 * identify the older CPUs which don't do different frequency and4009 * can be relied upon to have somewhat uniform TSC between the cpus.4107 * On SMP we're faced with two problems: 4108 * (1) There might be a skew between the CPU, so that cpu0 4109 * returns a TSC that is sligtly different from cpu1. 4110 * (2) Power management (and other things) may cause the TSC 4111 * to run at a non-constant speed, and cause the speed 4112 * to be different on the cpus. This will result in (1). 4113 * 4114 * So, on SMP systems we'll have to select the ASYNC update method 4115 * if there are symphoms of these problems. 4010 4116 */ 4011 4117 if (RTMpGetCount() > 1) 4012 4118 { 4013 4119 uint32_t uEAX, uEBX, uECX, uEDX; 4120 uint64_t u64DiffCoresIgnored; 4014 4121 4015 4122 /* Permit the user and/or the OS specfic bits to force async mode. */ … … 4017 4124 return SUPGIPMODE_ASYNC_TSC; 4018 4125 4019 /** @todo perform supdrvDetermineAsyncTsc here! */ 4020 4126 /* Try check for current differences between the cpus. */ 4127 if (supdrvDetermineAsyncTsc(&u64DiffCoresIgnored)) 4128 return SUPGIPMODE_ASYNC_TSC; 4129 4130 /* 4131 * If the CPU supports power management and is an AMD one we 4132 * won't trust it unless it has the TscInvariant bit is set. 4133 */ 4021 4134 /* Check for "AuthenticAMD" */ 4022 4135 ASMCpuId(0, &uEAX, &uEBX, &uECX, &uEDX); … … 4026 4139 && uEDX == X86_CPUID_VENDOR_AMD_EDX) 4027 4140 { 4028 /** @todo This is probably wrong. TscInvariant doesn't seem to mean that RdTSC returns the4029 * value everywhere, but rather that the rate is supposed to be the same. */4030 4141 /* Check for APM support and that TscInvariant is cleared. */ 4031 4142 ASMCpuId(0x80000000, &uEAX, &uEBX, &uECX, &uEDX); … … 4259 4370 4260 4371 4261 /**4262 * Callback used by supdrvDetermineAsyncTSC to read the TSC on a CPU.4263 *4264 * @param idCpu Ignored.4265 * @param pvUser1 Where to put the TSC.4266 * @param pvUser2 Ignored.4267 */4268 static DECLCALLBACK(void) supdrvDetermineAsyncTscWorker(RTCPUID idCpu, void *pvUser1, void *pvUser2)4269 {4270 *(uint64_t *)pvUser1 = ASMReadTSC();4271 }4272 4273 4274 /**4275 * Determine if Async GIP mode is required because of TSC drift.4276 *4277 * When using the default/normal timer code it is essential that the time stamp counter4278 * (TSC) runs never backwards, that is, a read operation to the counter should return4279 * a bigger value than any previous read operation. This is guaranteed by the latest4280 * AMD CPUs and by newer Intel CPUs which never enter the C2 state (P4). In any other4281 * case we have to choose the asynchronous timer mode.4282 *4283 * @param pu64Diff pointer to the determined difference between different cores.4284 * @return false if the time stamp counters appear to be synchron, true otherwise.4285 */4286 bool VBOXCALL supdrvDetermineAsyncTsc(uint64_t *pu64DiffCores)4287 {4288 static uint64_t s_aTsc[8][RTCPUSET_MAX_CPUS];4289 uint64_t u64Diff, u64DiffMin, u64DiffMax, u64TscLast;4290 int iSlot, iCpu, cCpus;4291 bool fBackwards;4292 RTCPUSET OnlineCpus;4293 int rc;4294 4295 *pu64DiffCores = 1;4296 4297 RTMpGetOnlineSet(&OnlineCpus);4298 cCpus = RTCpuSetCount(&OnlineCpus);4299 if (cCpus < 2)4300 return false;4301 Assert(cCpus <= RT_ELEMENTS(s_aTsc[0]));4302 4303 /*4304 * Collect data from the online CPUs.4305 */4306 for (iSlot = 0; iSlot < RT_ELEMENTS(s_aTsc); iSlot++)4307 {4308 RTCPUID iCpuSet = 0;4309 for (iCpu = 0; iCpu < cCpus; iCpu++)4310 {4311 while (!RTCpuSetIsMemberByIndex(&OnlineCpus, iCpuSet))4312 {4313 iCpuSet++; /* skip offline CPU */4314 dprintf2(("skipping %d\n", iCpuSet));4315 }4316 rc = RTMpOnSpecific(RTMpCpuIdFromSetIndex(iCpuSet), supdrvDetermineAsyncTscWorker, &s_aTsc[iSlot][iCpu], NULL);4317 if (rc == VERR_NOT_SUPPORTED)4318 return false;4319 iCpuSet++;4320 }4321 }4322 4323 /*4324 * Check that the TSC reads are strictly ascending.4325 */4326 /** @todo this doesn't work if a CPU is offline for some reason. */4327 fBackwards = false;4328 u64DiffMin = (uint64_t)~0;4329 u64TscLast = 0;4330 for (iSlot = 0; iSlot < RT_ELEMENTS(s_aTsc); iSlot++)4331 {4332 uint64_t u64Tsc0 = s_aTsc[iSlot][0];4333 u64DiffMax = 0;4334 if (u64Tsc0 <= u64TscLast)4335 {4336 dprintf2(("iSlot=%d u64Tsc0=%#x%#08x u64TscLast=%#x%#08x\n", iSlot,4337 (long)(u64Tsc0 >> 32), (long)u64Tsc0, (long)(u64TscLast >> 32), (long)u64TscLast));4338 fBackwards = true;4339 }4340 u64TscLast = u64Tsc0;4341 for (iCpu = 1; iCpu < cCpus; iCpu++)4342 {4343 uint64_t u64TscN = s_aTsc[iSlot][iCpu];4344 if (u64TscN <= u64TscLast)4345 {4346 dprintf2(("iSlot=%d iCpu=%d u64TscN=%#x%#08x u64TscLast=%#x%#08x\n", iSlot, iCpu,4347 (long)(u64TscN >> 32), (long)u64TscN, (long)(u64TscLast >> 32), (long)u64TscLast));4348 fBackwards = true;4349 }4350 u64TscLast = u64TscN;4351 4352 u64Diff = u64TscN > u64Tsc0 ? u64TscN - u64Tsc0 : u64Tsc0 - u64TscN;4353 if (u64DiffMax < u64Diff)4354 u64DiffMax = u64Diff;4355 }4356 if (u64DiffMin > u64DiffMax)4357 u64DiffMin = u64DiffMax;4358 }4359 /* informational */4360 *pu64DiffCores = u64DiffMin;4361 4362 return fBackwards;4363 }4364 4365 4366 4372 #ifndef DEBUG /** @todo change #ifndef DEBUG -> #ifdef LOG_ENABLED */ 4367 4373 /**
Note:
See TracChangeset
for help on using the changeset viewer.