Changeset 101380 in vbox
- Timestamp:
- Oct 6, 2023 9:19:02 AM (14 months ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMR0.cpp
r100357 r101380 334 334 335 335 336 /** 337 * Verify if VMX is really usable by entering and exiting VMX root mode. 338 * 339 * @returns VBox status code. 340 * @param uVmxBasicMsr The host's IA32_VMX_BASIC_MSR value. 341 */ 342 static int hmR0InitIntelVerifyVmxUsability(uint64_t uVmxBasicMsr) 343 { 344 /* Allocate a temporary VMXON region. */ 345 RTR0MEMOBJ hScatchMemObj; 346 int rc = RTR0MemObjAllocCont(&hScatchMemObj, HOST_PAGE_SIZE, NIL_RTHCPHYS /* PhysHighest */, false /* fExecutable */); 347 if (RT_FAILURE(rc)) 348 { 349 LogRelFunc(("RTR0MemObjAllocCont(,HOST_PAGE_SIZE,false) -> %Rrc\n", rc)); 350 return rc; 351 } 352 void *pvScatchPage = RTR0MemObjAddress(hScatchMemObj); 353 RTHCPHYS const HCPhysScratchPage = RTR0MemObjGetPagePhysAddr(hScatchMemObj, 0); 354 RT_BZERO(pvScatchPage, HOST_PAGE_SIZE); 355 356 /* Set revision dword at the beginning of the VMXON structure. */ 357 *(uint32_t *)pvScatchPage = RT_BF_GET(uVmxBasicMsr, VMX_BF_BASIC_VMCS_ID); 358 359 /* Make sure we don't get rescheduled to another CPU during this probe. */ 360 RTCCUINTREG const fEFlags = ASMIntDisableFlags(); 361 362 /* Enable CR4.VMXE if it isn't already set. */ 363 RTCCUINTREG const uOldCr4 = SUPR0ChangeCR4(X86_CR4_VMXE, RTCCUINTREG_MAX); 364 365 /* 366 * The only way of checking if we're in VMX root mode is to try and enter it. 367 * There is no instruction or control bit that tells us if we're in VMX root mode. 368 * Therefore, try and enter and exit VMX root mode. 369 */ 370 rc = VMXEnable(HCPhysScratchPage); 371 if (RT_SUCCESS(rc)) 372 VMXDisable(); 373 else 374 { 375 /* 376 * KVM leaves the CPU in VMX root mode. Not only is this not allowed, 377 * it will crash the host when we enter raw mode, because: 378 * 379 * (a) clearing X86_CR4_VMXE in CR4 causes a #GP (we no longer modify 380 * this bit), and 381 * (b) turning off paging causes a #GP (unavoidable when switching 382 * from long to 32 bits mode or 32 bits to PAE). 383 * 384 * They should fix their code, but until they do we simply refuse to run. 385 */ 386 rc = VERR_VMX_IN_VMX_ROOT_MODE; 387 } 388 389 /* Restore CR4.VMXE if it wasn't set prior to us setting it above. */ 390 if (!(uOldCr4 & X86_CR4_VMXE)) 391 SUPR0ChangeCR4(0 /* fOrMask */, ~(uint64_t)X86_CR4_VMXE); 392 393 /* Restore interrupts. */ 394 ASMSetFlags(fEFlags); 395 396 RTR0MemObjFree(hScatchMemObj, false); 397 398 return rc; 399 } 400 336 401 337 402 /** … … 419 484 if (!g_fHmVmxUsingSUPR0EnableVTx) 420 485 { 421 /* Allocate a temporary VMXON region. */ 422 RTR0MEMOBJ hScatchMemObj; 423 rc = RTR0MemObjAllocCont(&hScatchMemObj, HOST_PAGE_SIZE, NIL_RTHCPHYS /*PhysHighest*/, false /* fExecutable */); 424 if (RT_FAILURE(rc)) 425 { 426 LogRel(("hmR0InitIntel: RTR0MemObjAllocCont(,HOST_PAGE_SIZE,false) -> %Rrc\n", rc)); 427 return rc; 428 } 429 void *pvScatchPage = RTR0MemObjAddress(hScatchMemObj); 430 RTHCPHYS const HCPhysScratchPage = RTR0MemObjGetPagePhysAddr(hScatchMemObj, 0); 431 RT_BZERO(pvScatchPage, HOST_PAGE_SIZE); 432 433 /* Set revision dword at the beginning of the VMXON structure. */ 434 *(uint32_t *)pvScatchPage = RT_BF_GET(uVmxBasicMsr, VMX_BF_BASIC_VMCS_ID); 435 436 /* Make sure we don't get rescheduled to another CPU during this probe. */ 437 RTCCUINTREG const fEFlags = ASMIntDisableFlags(); 438 439 /* Enable CR4.VMXE if it isn't already set. */ 440 RTCCUINTREG const uOldCr4 = SUPR0ChangeCR4(X86_CR4_VMXE, RTCCUINTREG_MAX); 441 442 /* 443 * The only way of checking if we're in VMX root mode or not is to try and enter it. 444 * There is no instruction or control bit that tells us if we're in VMX root mode. 445 * Therefore, try and enter VMX root mode here. 446 */ 447 rc = VMXEnable(HCPhysScratchPage); 486 rc = hmR0InitIntelVerifyVmxUsability(uVmxBasicMsr); 448 487 if (RT_SUCCESS(rc)) 449 {450 488 g_fHmVmxSupported = true; 451 VMXDisable();452 }453 489 else 454 490 { 455 /* 456 * KVM leaves the CPU in VMX root mode. Not only is this not allowed, 457 * it will crash the host when we enter raw mode, because: 458 * 459 * (a) clearing X86_CR4_VMXE in CR4 causes a #GP (we no longer modify 460 * this bit), and 461 * (b) turning off paging causes a #GP (unavoidable when switching 462 * from long to 32 bits mode or 32 bits to PAE). 463 * 464 * They should fix their code, but until they do we simply refuse to run. 465 */ 466 g_rcHmInit = VERR_VMX_IN_VMX_ROOT_MODE; 491 g_rcHmInit = rc; 467 492 Assert(g_fHmVmxSupported == false); 468 493 } 469 470 /* Restore CR4.VMXE if it wasn't set prior to us setting it above. */471 if (!(uOldCr4 & X86_CR4_VMXE))472 SUPR0ChangeCR4(0 /* fOrMask */, ~(uint64_t)X86_CR4_VMXE);473 474 /* Restore interrupts. */475 ASMSetFlags(fEFlags);476 477 RTR0MemObjFree(hScatchMemObj, false);478 494 } 479 495 … … 948 964 && g_fHmGlobalInit) 949 965 { 950 /* First time, so initialize each cpu/core. */ 951 HMR0FIRSTRC FirstRc; 952 hmR0FirstRcInit(&FirstRc); 953 rc = RTMpOnAll(hmR0EnableCpuCallback, (void *)pVM, &FirstRc); 954 if (RT_SUCCESS(rc)) 955 rc = hmR0FirstRcGetStatus(&FirstRc); 966 /* 967 * It's possible we end up here with VMX (and perhaps SVM) not supported, see @bugref{9918}. 968 * In that case, our HMR0 function table contains the dummy placeholder functions which pretend 969 * success. However, we must not pretend success any longer (like we did during HMR0Init called 970 * during VMMR0 module init) as the HM init error code (g_rcHmInit) should be propagated to 971 * ing-3 especially since we now have a VM instance. 972 */ 973 if ( !g_fHmVmxSupported 974 && !g_fHmSvmSupported) 975 { 976 Assert(g_HmR0Ops.pfnEnableCpu == hmR0DummyEnableCpu); 977 Assert(RT_FAILURE(g_rcHmInit)); 978 rc = g_rcHmInit; 979 } 980 else 981 { 982 /* First time, so initialize each cpu/core. */ 983 HMR0FIRSTRC FirstRc; 984 hmR0FirstRcInit(&FirstRc); 985 Assert(g_HmR0Ops.pfnEnableCpu != hmR0DummyEnableCpu); 986 rc = RTMpOnAll(hmR0EnableCpuCallback, (void *)pVM, &FirstRc); 987 if (RT_SUCCESS(rc)) 988 rc = hmR0FirstRcGetStatus(&FirstRc); 989 } 956 990 } 957 991
Note:
See TracChangeset
for help on using the changeset viewer.