Changeset 7476 in vbox
- Timestamp:
- Mar 17, 2008 3:07:51 PM (17 years ago)
- svn:sync-xref-src-repo-rev:
- 28916
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/HWACCM.cpp
r7471 r7476 259 259 int rc; 260 260 261 if ( !pVM->hwaccm.s.vmx.fSupported 262 && !pVM->hwaccm.s.svm.fSupported) 263 { 264 LogRel(("HWACCM: No VMX or SVM CPU extension found. Reason %Vrc\n", pVM->hwaccm.s.lLastError)); 265 LogRel(("HWACCM: VMX MSR_IA32_FEATURE_CONTROL=%VX64\n", pVM->hwaccm.s.vmx.msr.feature_ctrl)); 266 return VINF_SUCCESS; 267 } 268 261 269 /* 262 270 * Note that we have a global setting for VT-x/AMD-V usage. VMX root mode changes the way the CPU operates. Our 64 bits switcher will trap … … 266 274 * 267 275 */ 268 269 276 /* If we enabled or disabled hwaccm mode, then it can't be changed until all the VMs are shutdown. */ 270 277 rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_HWACC_ENABLE, (pVM->hwaccm.s.fAllowed) ? HWACCMSTATE_ENABLED : HWACCMSTATE_DISABLED, NULL); … … 480 487 } 481 488 } 482 else483 if (pVM->hwaccm.s.fHWACCMR0Init)484 {485 LogRel(("HWACCM: No VMX or SVM CPU extension found. Reason %Vrc\n", pVM->hwaccm.s.lLastError));486 LogRel(("HWACCM: VMX MSR_IA32_FEATURE_CONTROL=%VX64\n", pVM->hwaccm.s.vmx.msr.feature_ctrl));487 }488 489 return VINF_SUCCESS; 489 490 } -
trunk/src/VBox/VMM/VMMR0/HWACCMR0.cpp
r7471 r7476 47 47 static DECLCALLBACK(void) HWACCMR0EnableCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2); 48 48 static DECLCALLBACK(void) HWACCMR0DisableCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2); 49 static DECLCALLBACK(void) HWACCMR0InitCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2); 50 static int hwaccmr0CheckCpuRcArray(int *paRc, unsigned cErrorCodes, RTCPUID *pidCpu); 49 51 50 52 /******************************************************************************* … … 135 137 memset(pvScatchPage, 0, PAGE_SIZE); 136 138 137 /* Assume success */138 rc = VINF_SUCCESS;139 140 139 #ifndef VBOX_WITH_HYBIRD_32BIT_KERNEL /* paranoia */ 141 140 … … 149 148 uint32_t u32FeaturesEDX; 150 149 uint32_t u32VendorEBX, u32VendorECX, u32VendorEDX; 151 152 /* Make sure we don't get rescheduled to another cpu during this probe. */153 RTCCUINTREG fFlags = ASMIntDisableFlags();154 150 155 151 ASMCpuId(0, &u32Dummy, &u32VendorEBX, &u32VendorECX, &u32VendorEDX); … … 172 168 ) 173 169 { 174 HWACCMR0Globals.vmx.msr.feature_ctrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL); 175 /* 176 * Both the LOCK and VMXON bit must be set; otherwise VMXON will generate a #GP. 177 * Once the lock bit is set, this MSR can no longer be modified. 178 */ 179 /** @todo need to check this for each cpu/core in the system!!!) */ 180 if (!(HWACCMR0Globals.vmx.msr.feature_ctrl & (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK))) 170 int aRc[RTCPUSET_MAX_CPUS]; 171 RTCPUID idCpu = 0; 172 173 HWACCMR0Globals.vmx.msr.feature_ctrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL); 174 175 /* We need to check if VT-x has been properly initialized on all CPUs. Some BIOSes do a lousy job. */ 176 memset(aRc, 0, sizeof(aRc)); 177 HWACCMR0Globals.lLastError = RTMpOnAll(HWACCMR0InitCPU, (void *)u32VendorEBX, aRc); 178 179 /* Check the return code of all invocations. */ 180 if (VBOX_SUCCESS(HWACCMR0Globals.lLastError)) 181 HWACCMR0Globals.lLastError = hwaccmr0CheckCpuRcArray(aRc, RT_ELEMENTS(aRc), &idCpu); 182 183 AssertMsg(VBOX_SUCCESS(HWACCMR0Globals.lLastError), ("HWACCMR0InitCPU failed for cpu %d with rc=%d\n", idCpu, HWACCMR0Globals.lLastError)); 184 185 if (VBOX_SUCCESS(HWACCMR0Globals.lLastError)) 181 186 { 182 /* MSR is not yet locked; we can change it ourselves here */ 183 HWACCMR0Globals.vmx.msr.feature_ctrl |= (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK); 184 ASMWrMsr(MSR_IA32_FEATURE_CONTROL, HWACCMR0Globals.vmx.msr.feature_ctrl); 185 } 186 187 if ( (HWACCMR0Globals.vmx.msr.feature_ctrl & (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK)) 188 == (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK)) 189 { 190 HWACCMR0Globals.vmx.fSupported = true; 191 HWACCMR0Globals.vmx.msr.vmx_basic_info = ASMRdMsr(MSR_IA32_VMX_BASIC_INFO); 192 HWACCMR0Globals.vmx.msr.vmx_pin_ctls = ASMRdMsr(MSR_IA32_VMX_PINBASED_CTLS); 193 HWACCMR0Globals.vmx.msr.vmx_proc_ctls = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS); 194 HWACCMR0Globals.vmx.msr.vmx_exit = ASMRdMsr(MSR_IA32_VMX_EXIT_CTLS); 195 HWACCMR0Globals.vmx.msr.vmx_entry = ASMRdMsr(MSR_IA32_VMX_ENTRY_CTLS); 196 HWACCMR0Globals.vmx.msr.vmx_misc = ASMRdMsr(MSR_IA32_VMX_MISC); 197 HWACCMR0Globals.vmx.msr.vmx_cr0_fixed0 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED0); 198 HWACCMR0Globals.vmx.msr.vmx_cr0_fixed1 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED1); 199 HWACCMR0Globals.vmx.msr.vmx_cr4_fixed0 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED0); 200 HWACCMR0Globals.vmx.msr.vmx_cr4_fixed1 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED1); 201 HWACCMR0Globals.vmx.msr.vmx_vmcs_enum = ASMRdMsr(MSR_IA32_VMX_VMCS_ENUM); 202 203 /* 204 * Check CR4.VMXE 205 */ 206 HWACCMR0Globals.vmx.hostCR4 = ASMGetCR4(); 207 if (!(HWACCMR0Globals.vmx.hostCR4 & X86_CR4_VMXE)) 187 /* Reread in case we've changed it. */ 188 HWACCMR0Globals.vmx.msr.feature_ctrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL); 189 190 if ( (HWACCMR0Globals.vmx.msr.feature_ctrl & (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK)) 191 == (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK)) 208 192 { 209 /* In theory this bit could be cleared behind our back. Which would cause #UD faults when we 210 * try to execute the VMX instructions... 211 */ 212 ASMSetCR4(HWACCMR0Globals.vmx.hostCR4 | X86_CR4_VMXE); 213 } 214 215 /* Set revision dword at the beginning of the structure. */ 216 *(uint32_t *)pvScatchPage = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(HWACCMR0Globals.vmx.msr.vmx_basic_info); 193 HWACCMR0Globals.vmx.fSupported = true; 194 HWACCMR0Globals.vmx.msr.vmx_basic_info = ASMRdMsr(MSR_IA32_VMX_BASIC_INFO); 195 HWACCMR0Globals.vmx.msr.vmx_pin_ctls = ASMRdMsr(MSR_IA32_VMX_PINBASED_CTLS); 196 HWACCMR0Globals.vmx.msr.vmx_proc_ctls = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS); 197 HWACCMR0Globals.vmx.msr.vmx_exit = ASMRdMsr(MSR_IA32_VMX_EXIT_CTLS); 198 HWACCMR0Globals.vmx.msr.vmx_entry = ASMRdMsr(MSR_IA32_VMX_ENTRY_CTLS); 199 HWACCMR0Globals.vmx.msr.vmx_misc = ASMRdMsr(MSR_IA32_VMX_MISC); 200 HWACCMR0Globals.vmx.msr.vmx_cr0_fixed0 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED0); 201 HWACCMR0Globals.vmx.msr.vmx_cr0_fixed1 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED1); 202 HWACCMR0Globals.vmx.msr.vmx_cr4_fixed0 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED0); 203 HWACCMR0Globals.vmx.msr.vmx_cr4_fixed1 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED1); 204 HWACCMR0Globals.vmx.msr.vmx_vmcs_enum = ASMRdMsr(MSR_IA32_VMX_VMCS_ENUM); 205 206 /* Make sure we don't get rescheduled to another cpu during this probe. */ 207 RTCCUINTREG fFlags = ASMIntDisableFlags(); 208 209 /* 210 * Check CR4.VMXE 211 */ 212 HWACCMR0Globals.vmx.hostCR4 = ASMGetCR4(); 213 if (!(HWACCMR0Globals.vmx.hostCR4 & X86_CR4_VMXE)) 214 { 215 /* In theory this bit could be cleared behind our back. Which would cause #UD faults when we 216 * try to execute the VMX instructions... 217 */ 218 ASMSetCR4(HWACCMR0Globals.vmx.hostCR4 | X86_CR4_VMXE); 219 } 220 221 /* Set revision dword at the beginning of the structure. */ 222 *(uint32_t *)pvScatchPage = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(HWACCMR0Globals.vmx.msr.vmx_basic_info); 217 223 218 224 #if HC_ARCH_BITS == 64 219 /* Enter VMX Root Mode */ 220 rc = VMXEnable(pScatchPagePhys); 221 if (VBOX_FAILURE(rc)) 222 { 223 /* KVM leaves the CPU in VMX root mode. Not only is this not allowed, it will crash the host when we enter raw mode, because 224 * (a) clearing X86_CR4_VMXE in CR4 causes a #GP (we no longer modify this bit) 225 * (b) turning off paging causes a #GP (unavoidable when switching from long to 32 bits mode) 226 * 227 * They should fix their code, but until they do we simply refuse to run. 228 */ 229 rc = VERR_VMX_IN_VMX_ROOT_MODE; 225 /* Enter VMX Root Mode */ 226 rc = VMXEnable(pScatchPagePhys); 227 if (VBOX_FAILURE(rc)) 228 { 229 /* KVM leaves the CPU in VMX root mode. Not only is this not allowed, it will crash the host when we enter raw mode, because 230 * (a) clearing X86_CR4_VMXE in CR4 causes a #GP (we no longer modify this bit) 231 * (b) turning off paging causes a #GP (unavoidable when switching from long to 32 bits mode) 232 * 233 * They should fix their code, but until they do we simply refuse to run. 234 */ 235 HWACCMR0Globals.lLastError = VERR_VMX_IN_VMX_ROOT_MODE; 236 HWACCMR0Globals.vmx.fSupported = false; 237 } 238 else 239 VMXDisable(); 240 #endif 241 /* Restore CR4 again; don't leave the X86_CR4_VMXE flag set if it wasn't so before (some software could incorrectly think it's in VMX mode) */ 242 ASMSetCR4(HWACCMR0Globals.vmx.hostCR4); 243 244 ASMSetFlags(fFlags); 230 245 } 231 246 else 232 VMXDisable();233 #endif 234 /* Restore CR4 again; don't leave the X86_CR4_VMXE flag set if it wasn't so before (some software could incorrectly think it's in VMX mode) */235 ASMSetCR4(HWACCMR0Globals.vmx.hostCR4);247 { 248 AssertFailed(); /* can't hit this case anymore */ 249 HWACCMR0Globals.lLastError = VERR_VMX_ILLEGAL_FEATURE_CONTROL_MSR; 250 } 236 251 } 237 else238 HWACCMR0Globals.lLastError = VERR_VMX_ILLEGAL_FEATURE_CONTROL_MSR;239 252 } 240 253 else … … 256 269 ) 257 270 { 258 uint64_t val; 259 260 /* Check if SVM is disabled */ 261 val = ASMRdMsr(MSR_K8_VM_CR); 262 if (!(val & MSR_K8_VM_CR_SVM_DISABLE)) 271 int aRc[RTCPUSET_MAX_CPUS]; 272 RTCPUID idCpu = 0; 273 274 /* We need to check if AMD-V has been properly initialized on all CPUs. Some BIOSes might do a poor job. */ 275 memset(aRc, 0, sizeof(aRc)); 276 rc = RTMpOnAll(HWACCMR0InitCPU, (void *)u32VendorEBX, aRc); 277 AssertRC(rc); 278 279 /* Check the return code of all invocations. */ 280 if (VBOX_SUCCESS(rc)) 281 rc = hwaccmr0CheckCpuRcArray(aRc, RT_ELEMENTS(aRc), &idCpu); 282 283 AssertMsg(VBOX_SUCCESS(rc), ("HWACCMR0InitCPU failed for cpu %d with rc=%d\n", idCpu, rc)); 284 285 if (VBOX_SUCCESS(rc)) 263 286 { 264 /* Turn on SVM in the EFER MSR. */ 265 val = ASMRdMsr(MSR_K6_EFER); 266 if (!(val & MSR_K6_EFER_SVME)) 267 { 268 ASMWrMsr(MSR_K6_EFER, val | MSR_K6_EFER_SVME); 269 } 270 /* Paranoia. */ 271 val = ASMRdMsr(MSR_K6_EFER); 272 if (val & MSR_K6_EFER_SVME) 273 { 274 /* Query AMD features. */ 275 ASMCpuId(0x8000000A, &HWACCMR0Globals.svm.u32Rev, &HWACCMR0Globals.svm.u32MaxASID, &u32Dummy, &u32Dummy); 276 277 HWACCMR0Globals.svm.fSupported = true; 278 } 279 else 280 { 281 HWACCMR0Globals.lLastError = VERR_SVM_ILLEGAL_EFER_MSR; 282 AssertFailed(); 283 } 287 /* Query AMD features. */ 288 ASMCpuId(0x8000000A, &HWACCMR0Globals.svm.u32Rev, &HWACCMR0Globals.svm.u32MaxASID, &u32Dummy, &u32Dummy); 289 290 HWACCMR0Globals.svm.fSupported = true; 284 291 } 285 292 else 286 HWACCMR0Globals.lLastError = VERR_SVM_DISABLED;293 HWACCMR0Globals.lLastError = rc; 287 294 } 288 295 else … … 291 298 else 292 299 HWACCMR0Globals.lLastError = VERR_HWACCM_UNKNOWN_CPU; 293 294 ASMSetFlags(fFlags);295 300 } 296 301 else … … 300 305 301 306 RTR0MemObjFree(pScatchMemObj, false); 307 return VINF_SUCCESS; 308 } 309 310 311 /** 312 * Checks the error code array filled in for each cpu in the system. 313 * 314 * @returns VBox status code. 315 * @param paRc Error code array 316 * @param cErrorCodes Array size 317 * @param pidCpu Value of the first cpu that set an error (out) 318 */ 319 static int hwaccmr0CheckCpuRcArray(int *paRc, unsigned cErrorCodes, RTCPUID *pidCpu) 320 { 321 int rc = VINF_SUCCESS; 322 323 Assert(cErrorCodes == RTCPUSET_MAX_CPUS); 324 325 for (unsigned i=0;i<cErrorCodes;i++) 326 { 327 if (RTMpIsCpuOnline(i)) 328 { 329 if (VBOX_FAILURE(paRc[i])) 330 { 331 rc = paRc[i]; 332 *pidCpu = i; 333 break; 334 } 335 } 336 } 302 337 return rc; 303 338 } … … 329 364 } 330 365 366 367 /** 368 * Worker function passed to RTMpOnAll, RTMpOnOthers and RTMpOnSpecific that 369 * is to be called on the target cpus. 370 * 371 * @param idCpu The identifier for the CPU the function is called on. 372 * @param pvUser1 The 1st user argument. 373 * @param pvUser2 The 2nd user argument. 374 */ 375 static DECLCALLBACK(void) HWACCMR0InitCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2) 376 { 377 unsigned u32VendorEBX = (unsigned)pvUser1; 378 int *paRc = (int *)pvUser2; 379 uint64_t val; 380 381 if (u32VendorEBX == X86_CPUID_VENDOR_INTEL_EBX) 382 { 383 val = ASMRdMsr(MSR_IA32_FEATURE_CONTROL); 384 385 /* 386 * Both the LOCK and VMXON bit must be set; otherwise VMXON will generate a #GP. 387 * Once the lock bit is set, this MSR can no longer be modified. 388 */ 389 if (!(val & (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK))) 390 { 391 /* MSR is not yet locked; we can change it ourselves here */ 392 ASMWrMsr(MSR_IA32_FEATURE_CONTROL, HWACCMR0Globals.vmx.msr.feature_ctrl | MSR_IA32_FEATURE_CONTROL_VMXON | MSR_IA32_FEATURE_CONTROL_LOCK); 393 val = ASMRdMsr(MSR_IA32_FEATURE_CONTROL); 394 } 395 if ( (val & (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK)) 396 == (MSR_IA32_FEATURE_CONTROL_VMXON|MSR_IA32_FEATURE_CONTROL_LOCK)) 397 paRc[idCpu] = VINF_SUCCESS; 398 else 399 paRc[idCpu] = VERR_VMX_MSR_LOCKED_OR_DISABLED; 400 } 401 else 402 if (u32VendorEBX == X86_CPUID_VENDOR_AMD_EBX) 403 { 404 /* Check if SVM is disabled */ 405 val = ASMRdMsr(MSR_K8_VM_CR); 406 if (!(val & MSR_K8_VM_CR_SVM_DISABLE)) 407 { 408 /* Turn on SVM in the EFER MSR. */ 409 val = ASMRdMsr(MSR_K6_EFER); 410 if (!(val & MSR_K6_EFER_SVME)) 411 ASMWrMsr(MSR_K6_EFER, val | MSR_K6_EFER_SVME); 412 413 /* Paranoia. */ 414 val = ASMRdMsr(MSR_K6_EFER); 415 if (val & MSR_K6_EFER_SVME) 416 paRc[idCpu] = VINF_SUCCESS; 417 else 418 paRc[idCpu] = VERR_SVM_ILLEGAL_EFER_MSR; 419 } 420 else 421 paRc[idCpu] = HWACCMR0Globals.lLastError = VERR_SVM_DISABLED; 422 } 423 else 424 AssertFailed(); /* can't happen */ 425 return; 426 } 427 428 331 429 /** 332 430 * Sets up HWACCM on all cpus. … … 342 440 if (ASMAtomicCmpXchgU32((volatile uint32_t *)&HWACCMR0Globals.enmHwAccmState, enmNewHwAccmState, HWACCMSTATE_UNINITIALIZED)) 343 441 { 344 int aRc[RTCPUSET_MAX_CPUS]; 442 int aRc[RTCPUSET_MAX_CPUS]; 443 RTCPUID idCpu = 0; 444 345 445 memset(aRc, 0, sizeof(aRc)); 346 446 … … 364 464 /* First time, so initialize each cpu/core */ 365 465 int rc = RTMpOnAll(HWACCMR0EnableCPU, (void *)pVM, aRc); 466 467 /* Check the return code of all invocations. */ 366 468 if (VBOX_SUCCESS(rc)) 367 { 368 for (unsigned i=0;i<RT_ELEMENTS(aRc);i++) 369 { 370 if (RTMpIsCpuOnline(i)) 371 { 372 AssertMsg(VBOX_SUCCESS(aRc[i]), ("HWACCMR0EnableCPU failed for cpu %d with rc=%d\n", i, aRc[i])); 373 if (VBOX_FAILURE(aRc[i])) 374 { 375 rc = aRc[i]; 376 break; 377 } 378 } 379 } 380 } 469 rc = hwaccmr0CheckCpuRcArray(aRc, RT_ELEMENTS(aRc), &idCpu); 470 471 AssertMsg(VBOX_SUCCESS(rc), ("HWACCMR0EnableAllCpus failed for cpu %d with rc=%d\n", idCpu, rc)); 381 472 return rc; 382 473 } … … 424 515 } 425 516 else 426 {427 Assert(pVM->hwaccm.s.svm.fSupported);517 if (pVM->hwaccm.s.svm.fSupported) 518 { 428 519 paRc[idCpu] = SVMR0EnableCpu(idCpu, pVM, pvPageCpu, pPageCpuPhys); 429 520 if (VBOX_SUCCESS(paRc[idCpu])) … … 449 540 Assert(idCpu < RT_ELEMENTS(HWACCMR0Globals.aCpuInfo)); 450 541 451 /* Should never happen */452 542 if (!HWACCMR0Globals.aCpuInfo[idCpu].pMemObj) 453 {454 AssertFailed();455 543 return; 456 }457 544 458 545 pvPageCpu = RTR0MemObjAddress(HWACCMR0Globals.aCpuInfo[idCpu].pMemObj); … … 489 576 LogComFlow(("HWACCMR0Init: %p\n", pVM)); 490 577 491 pVM->hwaccm.s.vmx.fSupported = false; 492 pVM->hwaccm.s.svm.fSupported = false; 493 494 if (HWACCMR0Globals.vmx.fSupported) 495 { 496 pVM->hwaccm.s.vmx.fSupported = true; 497 pVM->hwaccm.s.vmx.hostCR4 = HWACCMR0Globals.vmx.hostCR4; 498 pVM->hwaccm.s.vmx.msr.feature_ctrl = HWACCMR0Globals.vmx.msr.feature_ctrl; 499 pVM->hwaccm.s.vmx.msr.vmx_basic_info = HWACCMR0Globals.vmx.msr.vmx_basic_info; 500 pVM->hwaccm.s.vmx.msr.vmx_pin_ctls = HWACCMR0Globals.vmx.msr.vmx_pin_ctls; 501 pVM->hwaccm.s.vmx.msr.vmx_proc_ctls = HWACCMR0Globals.vmx.msr.vmx_proc_ctls; 502 pVM->hwaccm.s.vmx.msr.vmx_exit = HWACCMR0Globals.vmx.msr.vmx_exit; 503 pVM->hwaccm.s.vmx.msr.vmx_entry = HWACCMR0Globals.vmx.msr.vmx_entry; 504 pVM->hwaccm.s.vmx.msr.vmx_misc = HWACCMR0Globals.vmx.msr.vmx_misc; 505 pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed0 = HWACCMR0Globals.vmx.msr.vmx_cr0_fixed0; 506 pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed1 = HWACCMR0Globals.vmx.msr.vmx_cr0_fixed1; 507 pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed0 = HWACCMR0Globals.vmx.msr.vmx_cr4_fixed0; 508 pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed1 = HWACCMR0Globals.vmx.msr.vmx_cr4_fixed1; 509 pVM->hwaccm.s.vmx.msr.vmx_vmcs_enum = HWACCMR0Globals.vmx.msr.vmx_vmcs_enum; 510 511 } 512 else 513 if (HWACCMR0Globals.svm.fSupported) 514 { 515 pVM->hwaccm.s.svm.fSupported = true; 516 pVM->hwaccm.s.svm.u32Rev = HWACCMR0Globals.svm.u32Rev; 517 pVM->hwaccm.s.svm.u32MaxASID = HWACCMR0Globals.svm.u32MaxASID; 518 } 519 520 pVM->hwaccm.s.lLastError = HWACCMR0Globals.lLastError; 578 pVM->hwaccm.s.vmx.fSupported = HWACCMR0Globals.vmx.fSupported; 579 pVM->hwaccm.s.svm.fSupported = HWACCMR0Globals.svm.fSupported; 580 581 pVM->hwaccm.s.vmx.msr.feature_ctrl = HWACCMR0Globals.vmx.msr.feature_ctrl; 582 pVM->hwaccm.s.vmx.hostCR4 = HWACCMR0Globals.vmx.hostCR4; 583 pVM->hwaccm.s.vmx.msr.vmx_basic_info = HWACCMR0Globals.vmx.msr.vmx_basic_info; 584 pVM->hwaccm.s.vmx.msr.vmx_pin_ctls = HWACCMR0Globals.vmx.msr.vmx_pin_ctls; 585 pVM->hwaccm.s.vmx.msr.vmx_proc_ctls = HWACCMR0Globals.vmx.msr.vmx_proc_ctls; 586 pVM->hwaccm.s.vmx.msr.vmx_exit = HWACCMR0Globals.vmx.msr.vmx_exit; 587 pVM->hwaccm.s.vmx.msr.vmx_entry = HWACCMR0Globals.vmx.msr.vmx_entry; 588 pVM->hwaccm.s.vmx.msr.vmx_misc = HWACCMR0Globals.vmx.msr.vmx_misc; 589 pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed0 = HWACCMR0Globals.vmx.msr.vmx_cr0_fixed0; 590 pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed1 = HWACCMR0Globals.vmx.msr.vmx_cr0_fixed1; 591 pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed0 = HWACCMR0Globals.vmx.msr.vmx_cr4_fixed0; 592 pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed1 = HWACCMR0Globals.vmx.msr.vmx_cr4_fixed1; 593 pVM->hwaccm.s.vmx.msr.vmx_vmcs_enum = HWACCMR0Globals.vmx.msr.vmx_vmcs_enum; 594 pVM->hwaccm.s.svm.u32Rev = HWACCMR0Globals.svm.u32Rev; 595 pVM->hwaccm.s.svm.u32MaxASID = HWACCMR0Globals.svm.u32MaxASID; 521 596 pVM->hwaccm.s.cpuid.u32AMDFeatureECX = HWACCMR0Globals.cpuid.u32AMDFeatureECX; 522 597 pVM->hwaccm.s.cpuid.u32AMDFeatureEDX = HWACCMR0Globals.cpuid.u32AMDFeatureEDX; 523 598 pVM->hwaccm.s.lLastError = HWACCMR0Globals.lLastError; 524 599 return VINF_SUCCESS; 525 600 }
Note:
See TracChangeset
for help on using the changeset viewer.