Changeset 87561 in vbox
- Timestamp:
- Feb 3, 2021 11:43:51 AM (4 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r87538 r87561 2118 2118 pVCpu->hmr0.s.svm.fSyncVTpr = false; 2119 2119 2120 if (!pVM->hm.s.fT PRPatchingActive)2120 if (!pVM->hm.s.fTprPatchingActive) 2121 2121 { 2122 2122 /* Bits 3-0 of the VTPR field correspond to bits 7-4 of the TPR (which is the Task-Priority Class). */ … … 4143 4143 Assert(!pSvmTransient->fIsNestedGuest); 4144 4144 PCSVMVMCB pVmcb = pVCpu->hmr0.s.svm.pVmcb; 4145 if (pVM->hm.s.fT PRPatchingActive)4145 if (pVM->hm.s.fTprPatchingActive) 4146 4146 pSvmTransient->u8GuestTpr = pVmcb->guest.u64LSTAR; 4147 4147 else … … 4435 4435 Assert(!pSvmTransient->fIsNestedGuest); 4436 4436 /* TPR patching (for 32-bit guests) uses LSTAR MSR for holding the TPR value, otherwise uses the VTPR. */ 4437 if ( pVM->hm.s.fT PRPatchingActive4437 if ( pVM->hm.s.fTprPatchingActive 4438 4438 && (pVmcb->guest.u64LSTAR & 0xff) != pSvmTransient->u8GuestTpr) 4439 4439 { … … 6358 6358 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu); 6359 6359 if ( idMsr == MSR_K8_LSTAR 6360 && pVCpu->CTX_SUFF(pVM)->hm.s.fT PRPatchingActive)6360 && pVCpu->CTX_SUFF(pVM)->hm.s.fTprPatchingActive) 6361 6361 { 6362 6362 unsigned cbInstr; -
trunk/src/VBox/VMM/VMMR3/HM.cpp
r87559 r87561 2050 2050 pVM->hm.s.cPatches = 0; 2051 2051 pVM->hm.s.PatchTree = 0; 2052 pVM->hm.s.fT PRPatchingActive = false;2052 pVM->hm.s.fTprPatchingActive = false; 2053 2053 ASMMemZero32(pVM->hm.s.aPatches, sizeof(pVM->hm.s.aPatches)); 2054 2054 } … … 2114 2114 pVM->hm.s.PatchTree = 0; 2115 2115 pVM->hm.s.pFreeGuestPatchMem = pVM->hm.s.pGuestPatchMem; 2116 pVM->hm.s.fT PRPatchingActive = false;2116 pVM->hm.s.fTprPatchingActive = false; 2117 2117 return VINF_SUCCESS; 2118 2118 } … … 2188 2188 pVM->hm.s.pFreeGuestPatchMem = 0; 2189 2189 pVM->hm.s.cbGuestPatchMem = 0; 2190 pVM->hm.s.fT PRPatchingActive = false;2190 pVM->hm.s.fTprPatchingActive = false; 2191 2191 return VINF_SUCCESS; 2192 2192 } … … 2575 2575 2576 2576 pVM->hm.s.cPatches++; 2577 pVM->hm.s.fT PRPatchingActive = true;2577 pVM->hm.s.fTprPatchingActive = true; 2578 2578 STAM_COUNTER_INC(&pVM->hm.s.StatTprPatchSuccess); 2579 2579 return VINF_SUCCESS; … … 3239 3239 3240 3240 if (pPatch->enmType == HMTPRINSTR_JUMP_REPLACEMENT) 3241 pVM->hm.s.fT PRPatchingActive = true;3242 Assert(pPatch->enmType == HMTPRINSTR_JUMP_REPLACEMENT || pVM->hm.s.fT PRPatchingActive == false);3241 pVM->hm.s.fTprPatchingActive = true; 3242 Assert(pPatch->enmType == HMTPRINSTR_JUMP_REPLACEMENT || pVM->hm.s.fTprPatchingActive == false); 3243 3243 3244 3244 SSMR3GetU32(pSSM, &pPatch->uSrcOperand); -
trunk/src/VBox/VMM/include/HMInternal.h
r87559 r87561 430 430 typedef struct HM 431 431 { 432 /** Set when the debug facility has breakpoints/events enabled that requires 433 * us to use the debug execution loop in ring-0. */ 434 bool fUseDebugLoop; 435 /** Set when TPR patching is allowed. */ 436 bool fTprPatchingAllowed; 437 /** Set when TPR patching is active. */ 438 bool fTprPatchingActive; 439 /** Alignment padding. */ 440 bool afAlignment1[5]; 441 442 /** @todo r=bird: for better cache locality for SVM, it would be good to split 443 * out the non-esssential data (i.e config and for-ring3 bits). */ 444 struct 445 { 446 /** Set by the ring-0 side of HM to indicate VMX is supported by the CPU. */ 447 bool fSupported; 448 /** Set when we've enabled VMX. */ 449 bool fEnabled; 450 /** The shift mask employed by the VMX-Preemption timer (set by ring-0). */ 451 uint8_t cPreemptTimerShift; 452 bool afAlignment1[5]; 453 454 /** Pause-loop exiting (PLE) gap in ticks. */ 455 uint32_t cPleGapTicks; 456 /** Pause-loop exiting (PLE) window in ticks. */ 457 uint32_t cPleWindowTicks; 458 459 /** Virtual address of the TSS page used for real mode emulation. */ 460 R3PTRTYPE(PVBOXTSS) pRealModeTSS; 461 /** Virtual address of the identity page table used for real mode and protected 462 * mode without paging emulation in EPT mode. */ 463 R3PTRTYPE(PX86PD) pNonPagingModeEPTPageTable; 464 465 /** @name Configuration (gets copied if problematic) 466 * @{ */ 467 /** Set if Last Branch Record (LBR) is enabled. */ 468 bool fLbrCfg; 469 /** Set if VT-x VPID is allowed. */ 470 bool fAllowVpid; 471 /** Set if unrestricted guest execution is in use (real and protected mode 472 * without paging). */ 473 bool fUnrestrictedGuestCfg; 474 /** Set if the preemption timer should be used if available. Ring-0 475 * quietly clears this if the hardware doesn't support the preemption timer. */ 476 bool fUsePreemptTimerCfg; 477 /** @} */ 478 479 /** @name For ring-3 consumption 480 * @{ */ 481 /** Set if VPID is supported (ring-3 copy). */ 482 bool fVpidForRing3; 483 /** Whether the CPU supports VMCS fields for swapping EFER (set by ring-0 VMX 484 * init, for logging). */ 485 bool fSupportsVmcsEferForRing3; 486 /** Whether to use VMCS shadowing. */ 487 bool fUseVmcsShadowingForRing3; 488 bool fAlignment2; 489 490 /** Host CR4 value (set by ring-0 VMX init, for logging). */ 491 uint64_t u64HostCr4ForRing3; 492 /** Host SMM monitor control (set by ring-0 VMX init, for logging). */ 493 uint64_t u64HostSmmMonitorCtlForRing3; 494 /** Host EFER value (set by ring-0 VMX init, for logging and guest NX). */ 495 uint64_t u64HostMsrEferForRing3; 496 497 /** The first valid host LBR branch-from-IP stack range. */ 498 uint32_t idLbrFromIpMsrFirstForRing3; 499 /** The last valid host LBR branch-from-IP stack range. */ 500 uint32_t idLbrFromIpMsrLastForRing3; 501 502 /** The first valid host LBR branch-to-IP stack range. */ 503 uint32_t idLbrToIpMsrFirstForRing3; 504 /** The last valid host LBR branch-to-IP stack range. */ 505 uint32_t idLbrToIpMsrLastForRing3; 506 507 /** Host-physical address for a failing VMXON instruction (for diagnostics, ring-3). */ 508 RTHCPHYS HCPhysVmxEnableError; 509 /** VMX MSR values (only for ring-3 consumption). */ 510 VMXMSRS MsrsForRing3; 511 512 /** Tagged-TLB flush type (only for ring-3 consumption). */ 513 VMXTLBFLUSHTYPE enmTlbFlushTypeForRing3; 514 /** Flush type to use for INVEPT (only for ring-3 consumption). */ 515 VMXTLBFLUSHEPT enmTlbFlushEptForRing3; 516 /** Flush type to use for INVVPID (only for ring-3 consumption). */ 517 VMXTLBFLUSHVPID enmTlbFlushVpidForRing3; 518 /** @} */ 519 } vmx; 520 521 struct 522 { 523 /** Set by the ring-0 side of HM to indicate SVM is supported by the CPU. */ 524 bool fSupported; 525 /** Set when we've enabled SVM. */ 526 bool fEnabled; 527 /** Set when the hack to ignore VERR_SVM_IN_USE is active. 528 * @todo Safe? */ 529 bool fIgnoreInUseError; 530 /** Whether to use virtualized VMSAVE/VMLOAD feature. */ 531 bool fVirtVmsaveVmload; 532 /** Whether to use virtual GIF feature. */ 533 bool fVGif; 534 /** Whether to use LBR virtualization feature. */ 535 bool fLbrVirt; 536 bool afAlignment1[2]; 537 538 /** Pause filter counter. */ 539 uint16_t cPauseFilter; 540 /** Pause filter treshold in ticks. */ 541 uint16_t cPauseFilterThresholdTicks; 542 uint32_t u32Alignment2; 543 544 /** @name For ring-3 consumption 545 * @{ */ 546 /** SVM revision. */ 547 uint32_t u32Rev; 548 /** SVM feature bits from cpuid 0x8000000a, ring-3 copy. */ 549 uint32_t fFeaturesForRing3; 550 /** HWCR MSR (for diagnostics). */ 551 uint64_t u64MsrHwcr; 552 /** @} */ 553 } svm; 554 555 /** AVL tree with all patches (active or disabled) sorted by guest instruction address. 556 * @todo For @bugref{9217} this AVL tree must be eliminated and instead 557 * sort aPatches by address and do a safe binary search on it. */ 558 AVLOU32TREE PatchTree; 559 uint32_t cPatches; 560 HMTPRPATCH aPatches[64]; 561 562 /** Guest allocated memory for patching purposes. */ 563 RTGCPTR pGuestPatchMem; 564 /** Current free pointer inside the patch block. */ 565 RTGCPTR pFreeGuestPatchMem; 566 /** Size of the guest patch memory block. */ 567 uint32_t cbGuestPatchMem; 568 569 /** Last recorded error code during HM ring-0 init. */ 570 int32_t rcInit; 571 572 /** Maximum ASID allowed. 573 * This is mainly for the release log. */ 574 uint32_t uMaxAsidForLog; 575 /** World switcher flags (HM_WSF_XXX) for the release log. */ 576 uint32_t fWorldSwitcherForLog; 577 578 /** @name Configuration not used (much) after VM setup 579 * @{ */ 580 /** The maximum number of resumes loops allowed in ring-0 (safety precaution). 581 * This number is set much higher when RTThreadPreemptIsPending is reliable. */ 582 uint32_t cMaxResumeLoopsCfg; 432 583 /** Set if nested paging is enabled. 433 584 * Config value that is copied to HMR0PERVM::fNestedPaging on setup. */ 434 585 bool fNestedPagingCfg; 435 /** Set when we've finalized the VMX / SVM initialization in ring-3436 * (hmR3InitFinalizeR0Intel / hmR3InitFinalizeR0Amd). */437 bool fInitialized;438 586 /** Set if large pages are enabled (requires nested paging). 439 587 * Config only, passed on the PGM where it really belongs. … … 445 593 /** Set when we initialize VT-x or AMD-V once for all CPUs. */ 446 594 bool fGlobalInit; 447 /** Set when TPR patching is allowed. */448 bool fTprPatchingAllowed;449 /** Set when TPR patching is active. */450 bool fTPRPatchingActive;451 /** Set when the debug facility has breakpoints/events enabled that requires452 * us to use the debug execution loop in ring-0. */453 bool fUseDebugLoop;454 595 /** Set if hardware APIC virtualization is enabled. 455 596 * @todo Not really used by HM, move to APIC where it's actually used. */ … … 458 599 * @todo Not really used by HM, move to APIC where it's actually used. */ 459 600 bool fPostedIntrs; 601 /** @} */ 460 602 461 603 /** @name Processed into HMR0PERVCPU::fWorldSwitcher by ring-0 on VM init. … … 478 620 /** @} */ 479 621 480 /** Alignment padding. */ 481 bool afPaddingMinus1[3]; 482 483 /** The maximum number of resumes loops allowed in ring-0 (safety precaution). 484 * This number is set much higher when RTThreadPreemptIsPending is reliable. */ 485 uint32_t cMaxResumeLoopsCfg; 486 487 struct 488 { 489 /** Set by the ring-0 side of HM to indicate VMX is supported by the CPU. */ 490 bool fSupported; 491 /** Set when we've enabled VMX. */ 492 bool fEnabled; 493 /** The shift mask employed by the VMX-Preemption timer (set by ring-0). */ 494 uint8_t cPreemptTimerShift; 495 bool afAlignment1[5]; 496 497 /** Pause-loop exiting (PLE) gap in ticks. */ 498 uint32_t cPleGapTicks; 499 /** Pause-loop exiting (PLE) window in ticks. */ 500 uint32_t cPleWindowTicks; 501 502 /** Virtual address of the TSS page used for real mode emulation. */ 503 R3PTRTYPE(PVBOXTSS) pRealModeTSS; 504 /** Virtual address of the identity page table used for real mode and protected 505 * mode without paging emulation in EPT mode. */ 506 R3PTRTYPE(PX86PD) pNonPagingModeEPTPageTable; 507 508 /** @name Configuration (gets copied if problematic) 509 * @{ */ 510 /** Set if Last Branch Record (LBR) is enabled. */ 511 bool fLbrCfg; 512 /** Set if VT-x VPID is allowed. */ 513 bool fAllowVpid; 514 /** Set if unrestricted guest execution is in use (real and protected mode 515 * without paging). */ 516 bool fUnrestrictedGuestCfg; 517 /** Set if the preemption timer should be used if available. Ring-0 518 * quietly clears this if the hardware doesn't support the preemption timer. */ 519 bool fUsePreemptTimerCfg; 520 /** @} */ 521 522 /** @name For ring-3 consumption 523 * @{ */ 524 /** Set if VPID is supported (ring-3 copy). */ 525 bool fVpidForRing3; 526 /** Whether the CPU supports VMCS fields for swapping EFER (set by ring-0 VMX 527 * init, for logging). */ 528 bool fSupportsVmcsEferForRing3; 529 /** Whether to use VMCS shadowing. */ 530 bool fUseVmcsShadowingForRing3; 531 bool fAlignment2; 532 533 /** Host CR4 value (set by ring-0 VMX init, for logging). */ 534 uint64_t u64HostCr4ForRing3; 535 /** Host SMM monitor control (set by ring-0 VMX init, for logging). */ 536 uint64_t u64HostSmmMonitorCtlForRing3; 537 /** Host EFER value (set by ring-0 VMX init, for logging and guest NX). */ 538 uint64_t u64HostMsrEferForRing3; 539 540 /** The first valid host LBR branch-from-IP stack range. */ 541 uint32_t idLbrFromIpMsrFirstForRing3; 542 /** The last valid host LBR branch-from-IP stack range. */ 543 uint32_t idLbrFromIpMsrLastForRing3; 544 545 /** The first valid host LBR branch-to-IP stack range. */ 546 uint32_t idLbrToIpMsrFirstForRing3; 547 /** The last valid host LBR branch-to-IP stack range. */ 548 uint32_t idLbrToIpMsrLastForRing3; 549 550 /** Host-physical address for a failing VMXON instruction (for diagnostics, ring-3). */ 551 RTHCPHYS HCPhysVmxEnableError; 552 /** VMX MSR values (only for ring-3 consumption). */ 553 VMXMSRS MsrsForRing3; 554 555 /** Tagged-TLB flush type (only for ring-3 consumption). */ 556 VMXTLBFLUSHTYPE enmTlbFlushTypeForRing3; 557 /** Flush type to use for INVEPT (only for ring-3 consumption). */ 558 VMXTLBFLUSHEPT enmTlbFlushEptForRing3; 559 /** Flush type to use for INVVPID (only for ring-3 consumption). */ 560 VMXTLBFLUSHVPID enmTlbFlushVpidForRing3; 561 /** @} */ 562 } vmx; 563 564 struct 565 { 566 /** Set by the ring-0 side of HM to indicate SVM is supported by the CPU. */ 567 bool fSupported; 568 /** Set when we've enabled SVM. */ 569 bool fEnabled; 570 /** Set when the hack to ignore VERR_SVM_IN_USE is active. 571 * @todo Safe? */ 572 bool fIgnoreInUseError; 573 /** Whether to use virtualized VMSAVE/VMLOAD feature. */ 574 bool fVirtVmsaveVmload; 575 /** Whether to use virtual GIF feature. */ 576 bool fVGif; 577 /** Whether to use LBR virtualization feature. */ 578 bool fLbrVirt; 579 bool afAlignment1[2]; 580 581 /** Pause filter counter. */ 582 uint16_t cPauseFilter; 583 /** Pause filter treshold in ticks. */ 584 uint16_t cPauseFilterThresholdTicks; 585 uint32_t u32Alignment2; 586 587 /** @name For ring-3 consumption 588 * @{ */ 589 /** SVM revision. */ 590 uint32_t u32Rev; 591 /** SVM feature bits from cpuid 0x8000000a, ring-3 copy. */ 592 uint32_t fFeaturesForRing3; 593 /** HWCR MSR (for diagnostics). */ 594 uint64_t u64MsrHwcr; 595 /** @} */ 596 } svm; 597 598 /** AVL tree with all patches (active or disabled) sorted by guest instruction address. 599 * @todo For @bugref{9217} this AVL tree must be eliminated and instead 600 * sort aPatches by address and do a safe binary search on it. */ 601 AVLOU32TREE PatchTree; 602 uint32_t cPatches; 603 HMTPRPATCH aPatches[64]; 604 605 /** Guest allocated memory for patching purposes. */ 606 RTGCPTR pGuestPatchMem; 607 /** Current free pointer inside the patch block. */ 608 RTGCPTR pFreeGuestPatchMem; 609 /** Size of the guest patch memory block. */ 610 uint32_t cbGuestPatchMem; 611 612 /** Last recorded error code during HM ring-0 init. */ 613 int32_t rcInit; 614 /** Maximum ASID allowed. 615 * This is mainly for the release log. */ 616 uint32_t uMaxAsidForLog; 617 /** World switcher flags (HM_WSF_XXX) for the release log. */ 618 uint32_t fWorldSwitcherForLog; 622 /** Set when we've finalized the VMX / SVM initialization in ring-3 623 * (hmR3InitFinalizeR0Intel / hmR3InitFinalizeR0Amd). */ 624 bool fInitialized; 625 626 bool afAlignment2[6]; 619 627 620 628 STAMCOUNTER StatTprPatchSuccess;
Note:
See TracChangeset
for help on using the changeset viewer.