Changeset 23726 in vbox
- Timestamp:
- Oct 13, 2009 1:53:33 PM (15 years ago)
- svn:sync-xref-src-repo-rev:
- 53446
- Location:
- trunk/src/VBox/HostDrivers/Support
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/HostDrivers/Support/SUPDrv.c
r23725 r23726 57 57 # include <iprt/path.h> 58 58 #endif 59 /* VBox/x86.h not compatible with the Linux kernel sources */60 #ifdef RT_OS_LINUX61 # define X86_CPUID_VENDOR_AMD_EBX 0x6874754162 # define X86_CPUID_VENDOR_AMD_ECX 0x444d416363 # define X86_CPUID_VENDOR_AMD_EDX 0x69746e6564 #else65 # include <VBox/x86.h>66 #endif67 59 68 60 /* … … 84 76 * Defined Constants And Macros * 85 77 *******************************************************************************/ 86 /* from x86.h - clashes with linux thus this duplication */87 #undef X86_CR0_PG88 #define X86_CR0_PG RT_BIT(31)89 #undef X86_CR0_PE90 #define X86_CR0_PE RT_BIT(0)91 #undef X86_CPUID_AMD_FEATURE_EDX_NX92 #define X86_CPUID_AMD_FEATURE_EDX_NX RT_BIT(20)93 #undef MSR_K6_EFER94 #define MSR_K6_EFER 0xc000008095 #undef MSR_K6_EFER_NXE96 #define MSR_K6_EFER_NXE RT_BIT(11)97 #undef MSR_K6_EFER_LMA98 #define MSR_K6_EFER_LMA RT_BIT(10)99 #undef X86_CR4_PGE100 #define X86_CR4_PGE RT_BIT(7)101 #undef X86_CR4_PAE102 #define X86_CR4_PAE RT_BIT(5)103 #undef X86_CPUID_AMD_FEATURE_EDX_LONG_MODE104 #define X86_CPUID_AMD_FEATURE_EDX_LONG_MODE RT_BIT(29)105 106 107 78 /** The frequency by which we recalculate the u32UpdateHz and 108 79 * u32UpdateIntervalNS GIP members. The value must be a power of 2. */ 109 80 #define GIP_UPDATEHZ_RECALC_FREQ 0x800 110 111 /**112 * Validates a session pointer.113 *114 * @returns true/false accordingly.115 * @param pSession The session.116 */117 #define SUP_IS_SESSION_VALID(pSession) \118 ( VALID_PTR(pSession) \119 && pSession->u32Cookie == BIRD_INV)120 81 121 82 /** @def VBOX_SVN_REV … … 124 85 # define VBOX_SVN_REV 0 125 86 #endif 87 126 88 127 89 /******************************************************************************* … … 143 105 static int supdrvIOCtl_CallServiceModule(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPCALLSERVICE pReq); 144 106 static int supdrvIOCtl_LoggerSettings(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLOGGERSETTINGS pReq); 145 static SUPGIPMODE supdrvGipDeterminTscMode(PSUPDRVDEVEXT pDevExt);146 107 static int supdrvGipCreate(PSUPDRVDEVEXT pDevExt); 147 108 static void supdrvGipDestroy(PSUPDRVDEVEXT pDevExt); … … 3677 3638 3678 3639 3679 SUPR0DECL(int) SUPR0QueryVTCaps(PSUPDRVSESSION pSession, uint32_t *pfCaps)3680 {3681 /*3682 * Input validation.3683 */3684 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);3685 AssertPtrReturn(pfCaps, VERR_INVALID_POINTER);3686 3687 return supR0QueryVTCaps(pfCaps);3688 }3689 3690 3691 3640 /** 3692 3641 * Adds a memory object to the session. … … 4720 4669 4721 4670 /** 4722 * Gets the paging mode of the current CPU.4723 *4724 * @returns Paging mode, SUPPAGEINGMODE_INVALID on error.4725 */4726 SUPR0DECL(SUPPAGINGMODE) SUPR0GetPagingMode(void)4727 {4728 SUPPAGINGMODE enmMode;4729 4730 RTR0UINTREG cr0 = ASMGetCR0();4731 if ((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))4732 enmMode = SUPPAGINGMODE_INVALID;4733 else4734 {4735 RTR0UINTREG cr4 = ASMGetCR4();4736 uint32_t fNXEPlusLMA = 0;4737 if (cr4 & X86_CR4_PAE)4738 {4739 uint32_t fAmdFeatures = ASMCpuId_EDX(0x80000001);4740 if (fAmdFeatures & (X86_CPUID_AMD_FEATURE_EDX_NX | X86_CPUID_AMD_FEATURE_EDX_LONG_MODE))4741 {4742 uint64_t efer = ASMRdMsr(MSR_K6_EFER);4743 if ((fAmdFeatures & X86_CPUID_AMD_FEATURE_EDX_NX) && (efer & MSR_K6_EFER_NXE))4744 fNXEPlusLMA |= RT_BIT(0);4745 if ((fAmdFeatures & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE) && (efer & MSR_K6_EFER_LMA))4746 fNXEPlusLMA |= RT_BIT(1);4747 }4748 }4749 4750 switch ((cr4 & (X86_CR4_PAE | X86_CR4_PGE)) | fNXEPlusLMA)4751 {4752 case 0:4753 enmMode = SUPPAGINGMODE_32_BIT;4754 break;4755 4756 case X86_CR4_PGE:4757 enmMode = SUPPAGINGMODE_32_BIT_GLOBAL;4758 break;4759 4760 case X86_CR4_PAE:4761 enmMode = SUPPAGINGMODE_PAE;4762 break;4763 4764 case X86_CR4_PAE | RT_BIT(0):4765 enmMode = SUPPAGINGMODE_PAE_NX;4766 break;4767 4768 case X86_CR4_PAE | X86_CR4_PGE:4769 enmMode = SUPPAGINGMODE_PAE_GLOBAL;4770 break;4771 4772 case X86_CR4_PAE | X86_CR4_PGE | RT_BIT(0):4773 enmMode = SUPPAGINGMODE_PAE_GLOBAL;4774 break;4775 4776 case RT_BIT(1) | X86_CR4_PAE:4777 enmMode = SUPPAGINGMODE_AMD64;4778 break;4779 4780 case RT_BIT(1) | X86_CR4_PAE | RT_BIT(0):4781 enmMode = SUPPAGINGMODE_AMD64_NX;4782 break;4783 4784 case RT_BIT(1) | X86_CR4_PAE | X86_CR4_PGE:4785 enmMode = SUPPAGINGMODE_AMD64_GLOBAL;4786 break;4787 4788 case RT_BIT(1) | X86_CR4_PAE | X86_CR4_PGE | RT_BIT(0):4789 enmMode = SUPPAGINGMODE_AMD64_GLOBAL_NX;4790 break;4791 4792 default:4793 AssertMsgFailed(("Cannot happen! cr4=%#x fNXEPlusLMA=%d\n", cr4, fNXEPlusLMA));4794 enmMode = SUPPAGINGMODE_INVALID;4795 break;4796 }4797 }4798 return enmMode;4799 }4800 4801 4802 /**4803 * Enables or disabled hardware virtualization extensions using native OS APIs.4804 *4805 * @returns VBox status code.4806 * @retval VINF_SUCCESS on success.4807 * @retval VERR_NOT_SUPPORTED if not supported by the native OS.4808 *4809 * @param fEnable Whether to enable or disable.4810 */4811 SUPR0DECL(int) SUPR0EnableVTx(bool fEnable)4812 {4813 #ifdef RT_OS_DARWIN4814 return supdrvOSEnableVTx(fEnable);4815 #else4816 return VERR_NOT_SUPPORTED;4817 #endif4818 }4819 4820 4821 /**4822 4671 * Creates the GIP. 4823 4672 * … … 5225 5074 5226 5075 /** 5227 * Determin the GIP TSC mode.5228 *5229 * @returns The most suitable TSC mode.5230 * @param pDevExt Pointer to the device instance data.5231 */5232 static SUPGIPMODE supdrvGipDeterminTscMode(PSUPDRVDEVEXT pDevExt)5233 {5234 /*5235 * On SMP we're faced with two problems:5236 * (1) There might be a skew between the CPU, so that cpu05237 * returns a TSC that is sligtly different from cpu1.5238 * (2) Power management (and other things) may cause the TSC5239 * to run at a non-constant speed, and cause the speed5240 * to be different on the cpus. This will result in (1).5241 *5242 * So, on SMP systems we'll have to select the ASYNC update method5243 * if there are symphoms of these problems.5244 */5245 if (RTMpGetCount() > 1)5246 {5247 uint32_t uEAX, uEBX, uECX, uEDX;5248 uint64_t u64DiffCoresIgnored;5249 5250 /* Permit the user and/or the OS specfic bits to force async mode. */5251 if (supdrvOSGetForcedAsyncTscMode(pDevExt))5252 return SUPGIPMODE_ASYNC_TSC;5253 5254 /* Try check for current differences between the cpus. */5255 if (supdrvDetermineAsyncTsc(&u64DiffCoresIgnored))5256 return SUPGIPMODE_ASYNC_TSC;5257 5258 /*5259 * If the CPU supports power management and is an AMD one we5260 * won't trust it unless it has the TscInvariant bit is set.5261 */5262 /* Check for "AuthenticAMD" */5263 ASMCpuId(0, &uEAX, &uEBX, &uECX, &uEDX);5264 if ( uEAX >= 15265 && uEBX == X86_CPUID_VENDOR_AMD_EBX5266 && uECX == X86_CPUID_VENDOR_AMD_ECX5267 && uEDX == X86_CPUID_VENDOR_AMD_EDX)5268 {5269 /* Check for APM support and that TscInvariant is cleared. */5270 ASMCpuId(0x80000000, &uEAX, &uEBX, &uECX, &uEDX);5271 if (uEAX >= 0x80000007)5272 {5273 ASMCpuId(0x80000007, &uEAX, &uEBX, &uECX, &uEDX);5274 if ( !(uEDX & RT_BIT(8))/* TscInvariant */5275 && (uEDX & 0x3e)) /* STC|TM|THERMTRIP|VID|FID. Ignore TS. */5276 return SUPGIPMODE_ASYNC_TSC;5277 }5278 }5279 }5280 return SUPGIPMODE_SYNC_TSC;5281 }5282 5283 5284 /**5285 5076 * Invalidates the GIP data upon termination. 5286 5077 * -
trunk/src/VBox/HostDrivers/Support/SUPDrvAgnostic.c
r23725 r23726 57 57 58 58 59 60 59 /** 61 * Internal worker for SUPR0QueryVTCaps. 62 * 63 * @returns See QUPR0QueryVTCaps. 64 * @param pfCaps See QUPR0QueryVTCaps 65 */ 66 int VBOXCALL supR0QueryVTCaps(uint32_t *pfCaps) 60 * Gets the paging mode of the current CPU. 61 * 62 * @returns Paging mode, SUPPAGEINGMODE_INVALID on error. 63 */ 64 SUPR0DECL(SUPPAGINGMODE) SUPR0GetPagingMode(void) 67 65 { 66 SUPPAGINGMODE enmMode; 67 68 RTR0UINTREG cr0 = ASMGetCR0(); 69 if ((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE)) 70 enmMode = SUPPAGINGMODE_INVALID; 71 else 72 { 73 RTR0UINTREG cr4 = ASMGetCR4(); 74 uint32_t fNXEPlusLMA = 0; 75 if (cr4 & X86_CR4_PAE) 76 { 77 uint32_t fAmdFeatures = ASMCpuId_EDX(0x80000001); 78 if (fAmdFeatures & (X86_CPUID_AMD_FEATURE_EDX_NX | X86_CPUID_AMD_FEATURE_EDX_LONG_MODE)) 79 { 80 uint64_t efer = ASMRdMsr(MSR_K6_EFER); 81 if ((fAmdFeatures & X86_CPUID_AMD_FEATURE_EDX_NX) && (efer & MSR_K6_EFER_NXE)) 82 fNXEPlusLMA |= RT_BIT(0); 83 if ((fAmdFeatures & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE) && (efer & MSR_K6_EFER_LMA)) 84 fNXEPlusLMA |= RT_BIT(1); 85 } 86 } 87 88 switch ((cr4 & (X86_CR4_PAE | X86_CR4_PGE)) | fNXEPlusLMA) 89 { 90 case 0: 91 enmMode = SUPPAGINGMODE_32_BIT; 92 break; 93 94 case X86_CR4_PGE: 95 enmMode = SUPPAGINGMODE_32_BIT_GLOBAL; 96 break; 97 98 case X86_CR4_PAE: 99 enmMode = SUPPAGINGMODE_PAE; 100 break; 101 102 case X86_CR4_PAE | RT_BIT(0): 103 enmMode = SUPPAGINGMODE_PAE_NX; 104 break; 105 106 case X86_CR4_PAE | X86_CR4_PGE: 107 enmMode = SUPPAGINGMODE_PAE_GLOBAL; 108 break; 109 110 case X86_CR4_PAE | X86_CR4_PGE | RT_BIT(0): 111 enmMode = SUPPAGINGMODE_PAE_GLOBAL; 112 break; 113 114 case RT_BIT(1) | X86_CR4_PAE: 115 enmMode = SUPPAGINGMODE_AMD64; 116 break; 117 118 case RT_BIT(1) | X86_CR4_PAE | RT_BIT(0): 119 enmMode = SUPPAGINGMODE_AMD64_NX; 120 break; 121 122 case RT_BIT(1) | X86_CR4_PAE | X86_CR4_PGE: 123 enmMode = SUPPAGINGMODE_AMD64_GLOBAL; 124 break; 125 126 case RT_BIT(1) | X86_CR4_PAE | X86_CR4_PGE | RT_BIT(0): 127 enmMode = SUPPAGINGMODE_AMD64_GLOBAL_NX; 128 break; 129 130 default: 131 AssertMsgFailed(("Cannot happen! cr4=%#x fNXEPlusLMA=%d\n", cr4, fNXEPlusLMA)); 132 enmMode = SUPPAGINGMODE_INVALID; 133 break; 134 } 135 } 136 return enmMode; 137 } 138 139 140 /** 141 * Enables or disabled hardware virtualization extensions using native OS APIs. 142 * 143 * @returns VBox status code. 144 * @retval VINF_SUCCESS on success. 145 * @retval VERR_NOT_SUPPORTED if not supported by the native OS. 146 * 147 * @param fEnable Whether to enable or disable. 148 */ 149 SUPR0DECL(int) SUPR0EnableVTx(bool fEnable) 150 { 151 #ifdef RT_OS_DARWIN 152 return supdrvOSEnableVTx(fEnable); 153 #else 154 return VERR_NOT_SUPPORTED; 155 #endif 156 } 157 158 159 160 SUPR0DECL(int) SUPR0QueryVTCaps(PSUPDRVSESSION pSession, uint32_t *pfCaps) 161 { 162 /* 163 * Input validation. 164 */ 165 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER); 166 AssertPtrReturn(pfCaps, VERR_INVALID_POINTER); 167 68 168 *pfCaps = 0; 69 169 … … 152 252 } 153 253 254 255 /** 256 * Determin the GIP TSC mode. 257 * 258 * @returns The most suitable TSC mode. 259 * @param pDevExt Pointer to the device instance data. 260 */ 261 SUPGIPMODE VBOXCALL supdrvGipDeterminTscMode(PSUPDRVDEVEXT pDevExt) 262 { 263 /* 264 * On SMP we're faced with two problems: 265 * (1) There might be a skew between the CPU, so that cpu0 266 * returns a TSC that is sligtly different from cpu1. 267 * (2) Power management (and other things) may cause the TSC 268 * to run at a non-constant speed, and cause the speed 269 * to be different on the cpus. This will result in (1). 270 * 271 * So, on SMP systems we'll have to select the ASYNC update method 272 * if there are symphoms of these problems. 273 */ 274 if (RTMpGetCount() > 1) 275 { 276 uint32_t uEAX, uEBX, uECX, uEDX; 277 uint64_t u64DiffCoresIgnored; 278 279 /* Permit the user and/or the OS specfic bits to force async mode. */ 280 if (supdrvOSGetForcedAsyncTscMode(pDevExt)) 281 return SUPGIPMODE_ASYNC_TSC; 282 283 /* Try check for current differences between the cpus. */ 284 if (supdrvDetermineAsyncTsc(&u64DiffCoresIgnored)) 285 return SUPGIPMODE_ASYNC_TSC; 286 287 /* 288 * If the CPU supports power management and is an AMD one we 289 * won't trust it unless it has the TscInvariant bit is set. 290 */ 291 /* Check for "AuthenticAMD" */ 292 ASMCpuId(0, &uEAX, &uEBX, &uECX, &uEDX); 293 if ( uEAX >= 1 294 && uEBX == X86_CPUID_VENDOR_AMD_EBX 295 && uECX == X86_CPUID_VENDOR_AMD_ECX 296 && uEDX == X86_CPUID_VENDOR_AMD_EDX) 297 { 298 /* Check for APM support and that TscInvariant is cleared. */ 299 ASMCpuId(0x80000000, &uEAX, &uEBX, &uECX, &uEDX); 300 if (uEAX >= 0x80000007) 301 { 302 ASMCpuId(0x80000007, &uEAX, &uEBX, &uECX, &uEDX); 303 if ( !(uEDX & RT_BIT(8))/* TscInvariant */ 304 && (uEDX & 0x3e)) /* STC|TM|THERMTRIP|VID|FID. Ignore TS. */ 305 return SUPGIPMODE_ASYNC_TSC; 306 } 307 } 308 } 309 return SUPGIPMODE_SYNC_TSC; 310 } 311 -
trunk/src/VBox/HostDrivers/Support/SUPDrvInternal.h
r23725 r23726 323 323 324 324 325 /** 326 * Validates a session pointer. 327 * 328 * @returns true/false accordingly. 329 * @param pSession The session. 330 */ 331 #define SUP_IS_SESSION_VALID(pSession) \ 332 ( VALID_PTR(pSession) \ 333 && pSession->u32Cookie == BIRD_INV) 334 335 325 336 /******************************************************************************* 326 337 * Structures and Typedefs * … … 660 671 661 672 /* SUPDrvAgnostic.c */ 662 int VBOXCALL supR0QueryVTCaps(uint32_t *pfCaps);673 SUPGIPMODE VBOXCALL supdrvGipDeterminTscMode(PSUPDRVDEVEXT pDevExt); 663 674 664 675 RT_C_DECLS_END
Note:
See TracChangeset
for help on using the changeset viewer.