Changeset 14903 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Dec 2, 2008 2:25:13 PM (16 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HWACCMR0.cpp
r14899 r14903 50 50 * Internal Functions * 51 51 *******************************************************************************/ 52 static DECLCALLBACK(void) HWACCMR0EnableCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2);53 static DECLCALLBACK(void) HWACCMR0DisableCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2);52 static DECLCALLBACK(void) hwaccmR0EnableCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2); 53 static DECLCALLBACK(void) hwaccmR0DisableCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2); 54 54 static DECLCALLBACK(void) HWACCMR0InitCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2); 55 55 static int hwaccmR0CheckCpuRcArray(int *paRc, unsigned cErrorCodes, RTCPUID *pidCpu); … … 83 83 /** Set by the ring-0 driver to indicate VMX is supported by the CPU. */ 84 84 bool fSupported; 85 /** Whether we're using SUPR0EnableVTx or not. */ 86 bool fUsingSUPR0EnableVTx; 85 87 86 88 /** Host CR4 value (set by ring-0 VMX init) */ … … 195 197 HWACCMR0Globals.vmx.msr.feature_ctrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL); 196 198 197 /* We need to check if VT-x has been properly initialized on all CPUs. Some BIOSes do a lousy job. */ 198 memset(aRc, 0, sizeof(aRc)); 199 HWACCMR0Globals.lLastError = RTMpOnAll(HWACCMR0InitCPU, (void *)u32VendorEBX, aRc); 200 201 /* Check the return code of all invocations. */ 202 if (RT_SUCCESS(HWACCMR0Globals.lLastError)) 203 HWACCMR0Globals.lLastError = hwaccmR0CheckCpuRcArray(aRc, RT_ELEMENTS(aRc), &idCpu); 204 199 /* 200 * First try use native kernel API for controlling VT-x. 201 * (This is only supported by some Mac OS X kernels atm.) 202 */ 203 HWACCMR0Globals.lLastError = rc = SUPR0EnableVTx(true /* fEnable */); 204 if (rc != VERR_NOT_SUPPORTED) 205 { 206 AssertMsg(rc == VINF_SUCCESS || rc == VERR_VMX_IN_VMX_ROOT_MODE || rc == VERR_VMX_NO_VMX, ("%Rrc\n", rc)); 207 HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx = true; 208 if (RT_SUCCESS(rc)) 209 { 210 HWACCMR0Globals.vmx.fSupported = true; 211 rc = SUPR0EnableVTx(false /* fEnable */); 212 AssertRC(rc); 213 } 214 } 215 else 216 { 217 HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx = false; 218 219 /* We need to check if VT-x has been properly initialized on all CPUs. Some BIOSes do a lousy job. */ 220 memset(aRc, 0, sizeof(aRc)); 221 HWACCMR0Globals.lLastError = RTMpOnAll(HWACCMR0InitCPU, (void *)u32VendorEBX, aRc); 222 223 /* Check the return code of all invocations. */ 224 if (RT_SUCCESS(HWACCMR0Globals.lLastError)) 225 HWACCMR0Globals.lLastError = hwaccmR0CheckCpuRcArray(aRc, RT_ELEMENTS(aRc), &idCpu); 226 } 205 227 if (RT_SUCCESS(HWACCMR0Globals.lLastError)) 206 228 { … … 236 258 } 237 259 238 HWACCMR0Globals.vmx.hostCR4 = ASMGetCR4(); 239 240 rc = RTR0MemObjAllocCont(&pScatchMemObj, 1 << PAGE_SHIFT, true /* executable R0 mapping */); 241 if (RT_FAILURE(rc)) 242 return rc; 243 244 pvScatchPage = RTR0MemObjAddress(pScatchMemObj); 245 pScatchPagePhys = RTR0MemObjGetPagePhysAddr(pScatchMemObj, 0); 246 memset(pvScatchPage, 0, PAGE_SIZE); 247 248 /* Set revision dword at the beginning of the structure. */ 249 *(uint32_t *)pvScatchPage = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(HWACCMR0Globals.vmx.msr.vmx_basic_info); 250 251 /* Make sure we don't get rescheduled to another cpu during this probe. */ 252 RTCCUINTREG fFlags = ASMIntDisableFlags(); 253 254 /* 255 * Check CR4.VMXE 256 */ 257 if (!(HWACCMR0Globals.vmx.hostCR4 & X86_CR4_VMXE)) 260 if (!HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx) 258 261 { 259 /* In theory this bit could be cleared behind our back. Which would cause #UD faults when we 260 * try to execute the VMX instructions... 262 HWACCMR0Globals.vmx.hostCR4 = ASMGetCR4(); 263 264 rc = RTR0MemObjAllocCont(&pScatchMemObj, 1 << PAGE_SHIFT, true /* executable R0 mapping */); 265 if (RT_FAILURE(rc)) 266 return rc; 267 268 pvScatchPage = RTR0MemObjAddress(pScatchMemObj); 269 pScatchPagePhys = RTR0MemObjGetPagePhysAddr(pScatchMemObj, 0); 270 memset(pvScatchPage, 0, PAGE_SIZE); 271 272 /* Set revision dword at the beginning of the structure. */ 273 *(uint32_t *)pvScatchPage = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(HWACCMR0Globals.vmx.msr.vmx_basic_info); 274 275 /* Make sure we don't get rescheduled to another cpu during this probe. */ 276 RTCCUINTREG fFlags = ASMIntDisableFlags(); 277 278 /* 279 * Check CR4.VMXE 261 280 */ 262 ASMSetCR4(HWACCMR0Globals.vmx.hostCR4 | X86_CR4_VMXE); 281 if (!(HWACCMR0Globals.vmx.hostCR4 & X86_CR4_VMXE)) 282 { 283 /* In theory this bit could be cleared behind our back. Which would cause #UD faults when we 284 * try to execute the VMX instructions... 285 */ 286 ASMSetCR4(HWACCMR0Globals.vmx.hostCR4 | X86_CR4_VMXE); 287 } 288 289 /* Enter VMX Root Mode */ 290 rc = VMXEnable(pScatchPagePhys); 291 if (RT_FAILURE(rc)) 292 { 293 /* KVM leaves the CPU in VMX root mode. Not only is this not allowed, it will crash the host when we enter raw mode, because 294 * (a) clearing X86_CR4_VMXE in CR4 causes a #GP (we no longer modify this bit) 295 * (b) turning off paging causes a #GP (unavoidable when switching from long to 32 bits mode or 32 bits to PAE) 296 * 297 * They should fix their code, but until they do we simply refuse to run. 298 */ 299 HWACCMR0Globals.lLastError = VERR_VMX_IN_VMX_ROOT_MODE; 300 } 301 else 302 { 303 HWACCMR0Globals.vmx.fSupported = true; 304 VMXDisable(); 305 } 306 307 /* Restore CR4 again; don't leave the X86_CR4_VMXE flag set if it wasn't so before (some software could incorrectly think it's in VMX mode) */ 308 ASMSetCR4(HWACCMR0Globals.vmx.hostCR4); 309 ASMSetFlags(fFlags); 310 311 RTR0MemObjFree(pScatchMemObj, false); 312 if (RT_FAILURE(HWACCMR0Globals.lLastError)) 313 return HWACCMR0Globals.lLastError; 263 314 } 264 265 /* Enter VMX Root Mode */266 rc = VMXEnable(pScatchPagePhys);267 if (RT_FAILURE(rc))268 {269 /* KVM leaves the CPU in VMX root mode. Not only is this not allowed, it will crash the host when we enter raw mode, because270 * (a) clearing X86_CR4_VMXE in CR4 causes a #GP (we no longer modify this bit)271 * (b) turning off paging causes a #GP (unavoidable when switching from long to 32 bits mode or 32 bits to PAE)272 *273 * They should fix their code, but until they do we simply refuse to run.274 */275 HWACCMR0Globals.lLastError = VERR_VMX_IN_VMX_ROOT_MODE;276 }277 else278 {279 HWACCMR0Globals.vmx.fSupported = true;280 VMXDisable();281 }282 283 /* Restore CR4 again; don't leave the X86_CR4_VMXE flag set if it wasn't so before (some software could incorrectly think it's in VMX mode) */284 ASMSetCR4(HWACCMR0Globals.vmx.hostCR4);285 ASMSetFlags(fFlags);286 287 RTR0MemObjFree(pScatchMemObj, false);288 if (RT_FAILURE(HWACCMR0Globals.lLastError))289 return HWACCMR0Globals.lLastError;290 315 } 291 316 else … … 422 447 VMMR0DECL(int) HWACCMR0Term(void) 423 448 { 424 int aRc[RTCPUSET_MAX_CPUS];425 449 int rc; 426 427 rc = RTPowerNotificationDeregister(hwaccmR0PowerCallback, 0); 428 Assert(RT_SUCCESS(rc)); 429 430 memset(aRc, 0, sizeof(aRc)); 431 rc = RTMpOnAll(HWACCMR0DisableCPU, aRc, NULL); 432 Assert(RT_SUCCESS(rc) || rc == VERR_NOT_SUPPORTED); 433 434 /* Free the per-cpu pages used for VT-x and AMD-V */ 435 for (unsigned i=0;i<RT_ELEMENTS(HWACCMR0Globals.aCpuInfo);i++) 436 { 437 AssertMsgRC(aRc[i], ("HWACCMR0DisableCPU failed for cpu %d with rc=%d\n", i, aRc[i])); 438 if (HWACCMR0Globals.aCpuInfo[i].pMemObj != NIL_RTR0MEMOBJ) 450 if ( HWACCMR0Globals.vmx.fSupported 451 && HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx) 452 { 453 rc = SUPR0EnableVTx(false /* fEnable */); 454 for (unsigned iCpu = 0; iCpu < RT_ELEMENTS(HWACCMR0Globals.aCpuInfo); iCpu++) 439 455 { 440 RTR0MemObjFree(HWACCMR0Globals.aCpuInfo[i].pMemObj, false); 441 HWACCMR0Globals.aCpuInfo[i].pMemObj = NIL_RTR0MEMOBJ; 456 HWACCMR0Globals.aCpuInfo[iCpu].fConfigured = false; 457 Assert(HWACCMR0Globals.aCpuInfo[iCpu].pMemObj == NIL_RTR0MEMOBJ); 458 } 459 } 460 else 461 { 462 int aRc[RTCPUSET_MAX_CPUS]; 463 464 rc = RTPowerNotificationDeregister(hwaccmR0PowerCallback, 0); 465 Assert(RT_SUCCESS(rc)); 466 467 memset(aRc, 0, sizeof(aRc)); 468 rc = RTMpOnAll(hwaccmR0DisableCPU, aRc, NULL); 469 Assert(RT_SUCCESS(rc) || rc == VERR_NOT_SUPPORTED); 470 471 /* Free the per-cpu pages used for VT-x and AMD-V */ 472 for (unsigned i=0;i<RT_ELEMENTS(HWACCMR0Globals.aCpuInfo);i++) 473 { 474 AssertMsgRC(aRc[i], ("hwaccmR0DisableCPU failed for cpu %d with rc=%d\n", i, aRc[i])); 475 if (HWACCMR0Globals.aCpuInfo[i].pMemObj != NIL_RTR0MEMOBJ) 476 { 477 RTR0MemObjFree(HWACCMR0Globals.aCpuInfo[i].pMemObj, false); 478 HWACCMR0Globals.aCpuInfo[i].pMemObj = NIL_RTR0MEMOBJ; 479 } 442 480 } 443 481 } … … 531 569 if (ASMAtomicCmpXchgU32((volatile uint32_t *)&HWACCMR0Globals.enmHwAccmState, enmNewHwAccmState, HWACCMSTATE_UNINITIALIZED)) 532 570 { 533 int aRc[RTCPUSET_MAX_CPUS]; 534 RTCPUID idCpu = 0; 571 int rc; 535 572 536 573 /* Don't setup hwaccm as that might not work (vt-x & 64 bits raw mode) */ … … 538 575 return VINF_SUCCESS; 539 576 540 memset(aRc, 0, sizeof(aRc)); 541 542 /* Allocate one page per cpu for the global vt-x and amd-v pages */ 543 for (unsigned i=0;i<RT_ELEMENTS(HWACCMR0Globals.aCpuInfo);i++) 577 if ( HWACCMR0Globals.vmx.fSupported 578 && HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx) 544 579 { 545 Assert(!HWACCMR0Globals.aCpuInfo[i].pMemObj); 546 547 /** @todo this is rather dangerous if cpus can be taken offline; we don't care for now */ 548 if (RTMpIsCpuOnline(i)) 580 rc = SUPR0EnableVTx(true /* fEnable */); 581 if (RT_SUCCESS(rc)) 549 582 { 550 int rc = RTR0MemObjAllocCont(&HWACCMR0Globals.aCpuInfo[i].pMemObj, 1 << PAGE_SHIFT, true /* executable R0 mapping */); 551 AssertRC(rc); 552 if (RT_FAILURE(rc)) 553 return rc; 554 555 void *pvR0 = RTR0MemObjAddress(HWACCMR0Globals.aCpuInfo[i].pMemObj); 556 Assert(pvR0); 557 ASMMemZeroPage(pvR0); 583 for (unsigned iCpu = 0; iCpu < RT_ELEMENTS(HWACCMR0Globals.aCpuInfo); iCpu++) 584 { 585 HWACCMR0Globals.aCpuInfo[iCpu].fConfigured = true; 586 Assert(HWACCMR0Globals.aCpuInfo[iCpu].pMemObj == NIL_RTR0MEMOBJ); 587 } 588 } 589 else 590 AssertMsgFailed(("HWACCMR0EnableAllCpus/SUPR0EnableVTx: rc=%Rrc\n", rc)); 591 } 592 else 593 { 594 int aRc[RTCPUSET_MAX_CPUS]; 595 RTCPUID idCpu = 0; 596 597 memset(aRc, 0, sizeof(aRc)); 598 599 /* Allocate one page per cpu for the global vt-x and amd-v pages */ 600 for (unsigned i=0;i<RT_ELEMENTS(HWACCMR0Globals.aCpuInfo);i++) 601 { 602 Assert(!HWACCMR0Globals.aCpuInfo[i].pMemObj); 603 604 /** @todo this is rather dangerous if cpus can be taken offline; we don't care for now */ 605 if (RTMpIsCpuOnline(i)) 606 { 607 rc = RTR0MemObjAllocCont(&HWACCMR0Globals.aCpuInfo[i].pMemObj, 1 << PAGE_SHIFT, true /* executable R0 mapping */); 608 AssertRC(rc); 609 if (RT_FAILURE(rc)) 610 return rc; 611 612 void *pvR0 = RTR0MemObjAddress(HWACCMR0Globals.aCpuInfo[i].pMemObj); 613 Assert(pvR0); 614 ASMMemZeroPage(pvR0); 558 615 559 616 #ifdef LOG_ENABLED 560 SUPR0Printf("address %x phys %x\n", pvR0, (uint32_t)RTR0MemObjGetPagePhysAddr(HWACCMR0Globals.aCpuInfo[i].pMemObj, 0));617 SUPR0Printf("address %x phys %x\n", pvR0, (uint32_t)RTR0MemObjGetPagePhysAddr(HWACCMR0Globals.aCpuInfo[i].pMemObj, 0)); 561 618 #endif 619 } 562 620 } 621 /* First time, so initialize each cpu/core */ 622 rc = RTMpOnAll(hwaccmR0EnableCPU, (void *)pVM, aRc); 623 624 /* Check the return code of all invocations. */ 625 if (RT_SUCCESS(rc)) 626 rc = hwaccmR0CheckCpuRcArray(aRc, RT_ELEMENTS(aRc), &idCpu); 627 AssertMsgRC(rc, ("HWACCMR0EnableAllCpus failed for cpu %d with rc=%d\n", idCpu, rc)); 563 628 } 564 /* First time, so initialize each cpu/core */ 565 int rc = RTMpOnAll(HWACCMR0EnableCPU, (void *)pVM, aRc); 566 567 /* Check the return code of all invocations. */ 568 if (RT_SUCCESS(rc)) 569 rc = hwaccmR0CheckCpuRcArray(aRc, RT_ELEMENTS(aRc), &idCpu); 570 571 AssertMsgRC(rc, ("HWACCMR0EnableAllCpus failed for cpu %d with rc=%d\n", idCpu, rc)); 629 572 630 return rc; 573 631 } … … 588 646 * @param pvUser2 The 2nd user argument. 589 647 */ 590 static DECLCALLBACK(void) HWACCMR0EnableCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2)648 static DECLCALLBACK(void) hwaccmR0EnableCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2) 591 649 { 592 650 PVM pVM = (PVM)pvUser1; /* can be NULL! */ … … 596 654 PHWACCM_CPUINFO pCpu = &HWACCMR0Globals.aCpuInfo[idCpu]; 597 655 656 Assert(!HWACCMR0Globals.vmx.fSupported || !HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx); 598 657 Assert(idCpu == (RTCPUID)RTMpCpuIdToSetIndex(idCpu)); /// @todo fix idCpu == index assumption (rainy day) 599 658 Assert(idCpu < RT_ELEMENTS(HWACCMR0Globals.aCpuInfo)); … … 636 695 * @param pvUser2 The 2nd user argument. 637 696 */ 638 static DECLCALLBACK(void) HWACCMR0DisableCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2)697 static DECLCALLBACK(void) hwaccmR0DisableCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2) 639 698 { 640 699 void *pvPageCpu; … … 643 702 PHWACCM_CPUINFO pCpu = &HWACCMR0Globals.aCpuInfo[idCpu]; 644 703 704 Assert(!HWACCMR0Globals.vmx.fSupported || !HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx); 645 705 Assert(idCpu == (RTCPUID)RTMpCpuIdToSetIndex(idCpu)); /// @todo fix idCpu == index assumption (rainy day) 646 706 Assert(idCpu < RT_ELEMENTS(HWACCMR0Globals.aCpuInfo)); … … 675 735 { 676 736 NOREF(pvUser); 737 Assert(!HWACCMR0Globals.vmx.fSupported || !HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx); 677 738 678 739 #ifdef LOG_ENABLED … … 696 757 { 697 758 /* Turn off VT-x or AMD-V on all CPUs. */ 698 rc = RTMpOnAll( HWACCMR0DisableCPU, aRc, NULL);759 rc = RTMpOnAll(hwaccmR0DisableCPU, aRc, NULL); 699 760 Assert(RT_SUCCESS(rc) || rc == VERR_NOT_SUPPORTED); 700 761 } … … 713 774 714 775 /* Turn VT-x or AMD-V back on on all CPUs. */ 715 rc = RTMpOnAll( HWACCMR0EnableCPU, NULL, aRc);776 rc = RTMpOnAll(hwaccmR0EnableCPU, NULL, aRc); 716 777 Assert(RT_SUCCESS(rc) || rc == VERR_NOT_SUPPORTED); 717 778 }
Note:
See TracChangeset
for help on using the changeset viewer.