Changeset 87359 in vbox for trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
- Timestamp:
- Jan 21, 2021 7:56:26 PM (4 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r87332 r87359 705 705 706 706 /** 707 * Sets pfnVMRun to the best suited variant. 708 * 709 * This must be called whenever anything changes relative to the SVMR0VMRun 710 * variant selection: 711 * - pVCpu->hm.s.fLoadSaveGuestXcr0 712 * - CPUMCTX_WSF_IBPB_ENTRY in pVCpu->cpum.GstCtx.fWorldSwitcher 713 * - CPUMCTX_WSF_IBPB_EXIT in pVCpu->cpum.GstCtx.fWorldSwitcher 714 * - CPUMIsGuestFPUStateActive() (windows only) 715 * - CPUMCTX.fXStateMask (windows only) 716 * 717 * We currently ASSUME that neither CPUMCTX_WSF_IBPB_ENTRY nor 718 * CPUMCTX_WSF_IBPB_EXIT cannot be changed at runtime. 719 */ 720 static void hmR0SvmUpdateRunFunction(PVMCPUCC pVCpu) 721 { 722 static const PFNHMSVMVMRUN s_apfnHmR0SvmVmRunFunctions[] = 723 { 724 hmR0SvmVmRun_SansXcr0_SansIbpbEntry_SansIbpbExit, 725 hmR0SvmVmRun_WithXcr0_SansIbpbEntry_SansIbpbExit, 726 hmR0SvmVmRun_SansXcr0_WithIbpbEntry_SansIbpbExit, 727 hmR0SvmVmRun_WithXcr0_WithIbpbEntry_SansIbpbExit, 728 hmR0SvmVmRun_SansXcr0_SansIbpbEntry_WithIbpbExit, 729 hmR0SvmVmRun_WithXcr0_SansIbpbEntry_WithIbpbExit, 730 hmR0SvmVmRun_SansXcr0_WithIbpbEntry_WithIbpbExit, 731 hmR0SvmVmRun_WithXcr0_WithIbpbEntry_WithIbpbExit, 732 }; 733 uintptr_t const idx = (pVCpu->hm.s.fLoadSaveGuestXcr0 ? 1 : 0) 734 | (pVCpu->cpum.GstCtx.fWorldSwitcher & CPUMCTX_WSF_IBPB_ENTRY ? 2 : 0) 735 | (pVCpu->cpum.GstCtx.fWorldSwitcher & CPUMCTX_WSF_IBPB_EXIT ? 4 : 0); 736 PFNHMSVMVMRUN const pfnVMRun = s_apfnHmR0SvmVmRunFunctions[idx]; 737 if (pVCpu->hm.s.svm.pfnVMRun != pfnVMRun) 738 pVCpu->hm.s.svm.pfnVMRun = pfnVMRun; 739 } 740 741 742 /** 743 * Selector FNHMSVMVMRUN implementation. 744 */ 745 static DECLCALLBACK(int) hmR0SvmVMRunSelector(PVMCC pVM, PVMCPUCC pVCpu, RTHCPHYS HCPhysVMCB) 746 { 747 hmR0SvmUpdateRunFunction(pVCpu); 748 return pVCpu->hm.s.svm.pfnVMRun(pVM, pVCpu, HCPhysVMCB); 749 } 750 751 752 /** 707 753 * Does per-VM AMD-V initialization. 708 754 * … … 745 791 * We now use a single handler for both 32-bit and 64-bit guests, see @bugref{6208#c73}. 746 792 */ 747 pVCpu->hm.s.svm.pfnVMRun = SVMR0VMRun;793 pVCpu->hm.s.svm.pfnVMRun = hmR0SvmVMRunSelector; 748 794 749 795 /* … … 1622 1668 1623 1669 /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */ 1624 pVCpu->hm.s.fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0(); 1670 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0(); 1671 if (fLoadSaveGuestXcr0 != pVCpu->hm.s.fLoadSaveGuestXcr0) 1672 { 1673 pVCpu->hm.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0; 1674 hmR0SvmUpdateRunFunction(pVCpu); 1675 } 1625 1676 1626 1677 /* Avoid intercepting CR4 reads if the guest and shadow CR4 values are identical. */ … … 6513 6564 { 6514 6565 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 6515 pVCpu->hm.s.fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0(); 6516 Log4Func(("New XCR0=%#RX64 fLoadSaveGuestXcr0=%RTbool (cr4=%#RX64)\n", pCtx->aXcr[0], pVCpu->hm.s.fLoadSaveGuestXcr0, 6517 pCtx->cr4)); 6566 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0(); 6567 Log4Func(("New XCR0=%#RX64 fLoadSaveGuestXcr0=%RTbool (cr4=%#RX64)\n", pCtx->aXcr[0], fLoadSaveGuestXcr0, pCtx->cr4)); 6568 if (fLoadSaveGuestXcr0 != pVCpu->hm.s.fLoadSaveGuestXcr0) 6569 { 6570 pVCpu->hm.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0; 6571 hmR0SvmUpdateRunFunction(pVCpu); 6572 } 6518 6573 } 6519 6574 else if (rcStrict == VINF_IEM_RAISED_XCPT)
Note:
See TracChangeset
for help on using the changeset viewer.