Changeset 87469 in vbox
- Timestamp:
- Jan 28, 2021 5:01:06 PM (4 years ago)
- svn:sync-xref-src-repo-rev:
- 142489
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMR0A.asm
r87462 r87469 25 25 %include "VBox/vmm/hm_vmx.mac" 26 26 %include "VBox/vmm/cpum.mac" 27 %include "VBox/vmm/ vm.mac"27 %include "VBox/vmm/gvm.mac" 28 28 %include "iprt/x86.mac" 29 29 %include "HMInternal.mac" … … 903 903 ; before writing here. 904 904 lea rcx, [NAME(RT_CONCAT(hmR0VmxStartVmHostRIP,%1)) wrt rip] 905 cmp rcx, [rsi + VMCPU.hm + HMCPU.u + HMCPUVMX.uHostRIP]905 cmp rcx, [rsi + GVMCPU.hmr0 + HMR0PERVCPU.u + HMR0CPUVMX.uHostRIP] 906 906 jne .write_host_rip 907 907 .wrote_host_rip: 908 cmp rsp, [rsi + VMCPU.hm + HMCPU.u + HMCPUVMX.uHostRSP]908 cmp rsp, [rsi + GVMCPU.hmr0 + HMR0PERVCPU.u + HMR0CPUVMX.uHostRSP] 909 909 jne .write_host_rsp 910 910 .wrote_host_rsp: … … 967 967 ALIGNCODE(8) 968 968 .write_host_rip: 969 mov [rsi + VMCPU.hm + HMCPU.u + HMCPUVMX.uHostRIP], rcx969 mov [rsi + GVMCPU.hmr0 + HMR0PERVCPU.u + HMR0CPUVMX.uHostRIP], rcx 970 970 mov eax, VMX_VMCS_HOST_RIP ;; @todo It is only strictly necessary to write VMX_VMCS_HOST_RIP when 971 971 vmwrite rax, rcx ;; the VMXVMCSINFO::pfnStartVM function changes (eventually … … 977 977 ALIGNCODE(8) 978 978 .write_host_rsp: 979 mov [rsi + VMCPU.hm + HMCPU.u + HMCPUVMX.uHostRSP], rsp979 mov [rsi + GVMCPU.hmr0 + HMR0PERVCPU.u + HMR0CPUVMX.uHostRSP], rsp 980 980 mov eax, VMX_VMCS_HOST_RSP 981 981 vmwrite rax, rsp -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r87466 r87469 1399 1399 * if its content differs, we would have to update the host MSRs anyway. 1400 1400 */ 1401 pVCpu->hm .s.vmx.fUpdatedHostAutoMsrs = false;1401 pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs = false; 1402 1402 } 1403 1403 else … … 2323 2323 { 2324 2324 /* Someone else can do the work. */ 2325 pVCpu->hm .s.vmx.fUpdatedHostAutoMsrs = false;2325 pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs = false; 2326 2326 } 2327 2327 } … … 2471 2471 * Note: If you're adding MSRs here, make sure to update the MSR-bitmap accesses in hmR0VmxSetupVmcsProcCtls(). 2472 2472 */ 2473 if (!(pVCpu->hm .s.vmx.fLazyMsrs & VMX_LAZY_MSRS_SAVED_HOST))2474 { 2475 Assert(!(pVCpu->hm .s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)); /* Guest MSRs better not be loaded now. */2473 if (!(pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_SAVED_HOST)) 2474 { 2475 Assert(!(pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)); /* Guest MSRs better not be loaded now. */ 2476 2476 if (pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests) 2477 2477 { 2478 pVCpu->hm .s.vmx.u64HostMsrLStar = ASMRdMsr(MSR_K8_LSTAR);2479 pVCpu->hm .s.vmx.u64HostMsrStar = ASMRdMsr(MSR_K6_STAR);2480 pVCpu->hm .s.vmx.u64HostMsrSfMask = ASMRdMsr(MSR_K8_SF_MASK);2481 pVCpu->hm .s.vmx.u64HostMsrKernelGsBase = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);2482 } 2483 pVCpu->hm .s.vmx.fLazyMsrs |= VMX_LAZY_MSRS_SAVED_HOST;2478 pVCpu->hmr0.s.vmx.u64HostMsrLStar = ASMRdMsr(MSR_K8_LSTAR); 2479 pVCpu->hmr0.s.vmx.u64HostMsrStar = ASMRdMsr(MSR_K6_STAR); 2480 pVCpu->hmr0.s.vmx.u64HostMsrSfMask = ASMRdMsr(MSR_K8_SF_MASK); 2481 pVCpu->hmr0.s.vmx.u64HostMsrKernelGsBase = ASMRdMsr(MSR_K8_KERNEL_GS_BASE); 2482 } 2483 pVCpu->hmr0.s.vmx.fLazyMsrs |= VMX_LAZY_MSRS_SAVED_HOST; 2484 2484 } 2485 2485 } … … 2528 2528 Assert(!VMMRZCallRing3IsEnabled(pVCpu)); 2529 2529 2530 Assert(pVCpu->hm .s.vmx.fLazyMsrs & VMX_LAZY_MSRS_SAVED_HOST);2530 Assert(pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_SAVED_HOST); 2531 2531 if (pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests) 2532 2532 { … … 2542 2542 */ 2543 2543 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 2544 if ( !(pVCpu->hm .s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)2545 && pCtx->msrKERNELGSBASE == pVCpu->hm .s.vmx.u64HostMsrKernelGsBase2546 && pCtx->msrLSTAR == pVCpu->hm .s.vmx.u64HostMsrLStar2547 && pCtx->msrSTAR == pVCpu->hm .s.vmx.u64HostMsrStar2548 && pCtx->msrSFMASK == pVCpu->hm .s.vmx.u64HostMsrSfMask)2544 if ( !(pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST) 2545 && pCtx->msrKERNELGSBASE == pVCpu->hmr0.s.vmx.u64HostMsrKernelGsBase 2546 && pCtx->msrLSTAR == pVCpu->hmr0.s.vmx.u64HostMsrLStar 2547 && pCtx->msrSTAR == pVCpu->hmr0.s.vmx.u64HostMsrStar 2548 && pCtx->msrSFMASK == pVCpu->hmr0.s.vmx.u64HostMsrSfMask) 2549 2549 { 2550 2550 #ifdef VBOX_STRICT … … 2563 2563 } 2564 2564 } 2565 pVCpu->hm .s.vmx.fLazyMsrs |= VMX_LAZY_MSRS_LOADED_GUEST;2565 pVCpu->hmr0.s.vmx.fLazyMsrs |= VMX_LAZY_MSRS_LOADED_GUEST; 2566 2566 } 2567 2567 … … 2582 2582 Assert(!VMMRZCallRing3IsEnabled(pVCpu)); 2583 2583 2584 if (pVCpu->hm .s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)2585 { 2586 Assert(pVCpu->hm .s.vmx.fLazyMsrs & VMX_LAZY_MSRS_SAVED_HOST);2584 if (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST) 2585 { 2586 Assert(pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_SAVED_HOST); 2587 2587 if (pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests) 2588 2588 { 2589 ASMWrMsr(MSR_K8_LSTAR, pVCpu->hm .s.vmx.u64HostMsrLStar);2590 ASMWrMsr(MSR_K6_STAR, pVCpu->hm .s.vmx.u64HostMsrStar);2591 ASMWrMsr(MSR_K8_SF_MASK, pVCpu->hm .s.vmx.u64HostMsrSfMask);2592 ASMWrMsr(MSR_K8_KERNEL_GS_BASE, pVCpu->hm .s.vmx.u64HostMsrKernelGsBase);2593 } 2594 } 2595 pVCpu->hm .s.vmx.fLazyMsrs &= ~(VMX_LAZY_MSRS_LOADED_GUEST | VMX_LAZY_MSRS_SAVED_HOST);2589 ASMWrMsr(MSR_K8_LSTAR, pVCpu->hmr0.s.vmx.u64HostMsrLStar); 2590 ASMWrMsr(MSR_K6_STAR, pVCpu->hmr0.s.vmx.u64HostMsrStar); 2591 ASMWrMsr(MSR_K8_SF_MASK, pVCpu->hmr0.s.vmx.u64HostMsrSfMask); 2592 ASMWrMsr(MSR_K8_KERNEL_GS_BASE, pVCpu->hmr0.s.vmx.u64HostMsrKernelGsBase); 2593 } 2594 } 2595 pVCpu->hmr0.s.vmx.fLazyMsrs &= ~(VMX_LAZY_MSRS_LOADED_GUEST | VMX_LAZY_MSRS_SAVED_HOST); 2596 2596 } 2597 2597 … … 4605 4605 * asserting. Was observed booting Solaris 10u10 32-bit guest. 4606 4606 */ 4607 if (pVCpu->hm .s.vmx.fRestoreHostFlags > VMX_RESTORE_HOST_REQUIRED)4608 { 4609 Log4Func(("Restoring Host State: fRestoreHostFlags=%#RX32 HostCpuId=%u\n", pVCpu->hm .s.vmx.fRestoreHostFlags,4607 if (pVCpu->hmr0.s.vmx.fRestoreHostFlags > VMX_RESTORE_HOST_REQUIRED) 4608 { 4609 Log4Func(("Restoring Host State: fRestoreHostFlags=%#RX32 HostCpuId=%u\n", pVCpu->hmr0.s.vmx.fRestoreHostFlags, 4610 4610 pVCpu->idCpu)); 4611 VMXRestoreHostState(pVCpu->hm .s.vmx.fRestoreHostFlags, &pVCpu->hm.s.vmx.RestoreHost);4612 pVCpu->hm .s.vmx.fRestoreHostFlags = 0;4611 VMXRestoreHostState(pVCpu->hmr0.s.vmx.fRestoreHostFlags, &pVCpu->hmr0.s.vmx.RestoreHost); 4612 pVCpu->hmr0.s.vmx.fRestoreHostFlags = 0; 4613 4613 } 4614 4614 … … 4622 4622 if (uHostCr4 & X86_CR4_FSGSBASE) 4623 4623 { 4624 hmR0VmxExportHostSegmentRegsAsmHlp(&pVCpu->hm .s.vmx.RestoreHost, true /*fHaveFsGsBase*/);4624 hmR0VmxExportHostSegmentRegsAsmHlp(&pVCpu->hmr0.s.vmx.RestoreHost, true /*fHaveFsGsBase*/); 4625 4625 fRestoreHostFlags = VMX_RESTORE_HOST_CAN_USE_WRFSBASE_AND_WRGSBASE; 4626 4626 } 4627 4627 else 4628 4628 { 4629 hmR0VmxExportHostSegmentRegsAsmHlp(&pVCpu->hm .s.vmx.RestoreHost, false /*fHaveFsGsBase*/);4629 hmR0VmxExportHostSegmentRegsAsmHlp(&pVCpu->hmr0.s.vmx.RestoreHost, false /*fHaveFsGsBase*/); 4630 4630 fRestoreHostFlags = 0; 4631 4631 } 4632 RTSEL uSelES = pVCpu->hm .s.vmx.RestoreHost.uHostSelES;4633 RTSEL uSelDS = pVCpu->hm .s.vmx.RestoreHost.uHostSelDS;4634 RTSEL uSelFS = pVCpu->hm .s.vmx.RestoreHost.uHostSelFS;4635 RTSEL uSelGS = pVCpu->hm .s.vmx.RestoreHost.uHostSelGS;4632 RTSEL uSelES = pVCpu->hmr0.s.vmx.RestoreHost.uHostSelES; 4633 RTSEL uSelDS = pVCpu->hmr0.s.vmx.RestoreHost.uHostSelDS; 4634 RTSEL uSelFS = pVCpu->hmr0.s.vmx.RestoreHost.uHostSelFS; 4635 RTSEL uSelGS = pVCpu->hmr0.s.vmx.RestoreHost.uHostSelGS; 4636 4636 #else 4637 pVCpu->hm .s.vmx.RestoreHost.uHostSelTR = ASMGetTR();4638 pVCpu->hm .s.vmx.RestoreHost.uHostSelSS = ASMGetSS();4639 pVCpu->hm .s.vmx.RestoreHost.uHostSelCS = ASMGetCS();4640 ASMGetGDTR((PRTGDTR)&pVCpu->hm .s.vmx.RestoreHost.HostGdtr);4641 ASMGetIDTR((PRTIDTR)&pVCpu->hm .s.vmx.RestoreHost.HostIdtr);4637 pVCpu->hmr0.s.vmx.RestoreHost.uHostSelTR = ASMGetTR(); 4638 pVCpu->hmr0.s.vmx.RestoreHost.uHostSelSS = ASMGetSS(); 4639 pVCpu->hmr0.s.vmx.RestoreHost.uHostSelCS = ASMGetCS(); 4640 ASMGetGDTR((PRTGDTR)&pVCpu->hmr0.s.vmx.RestoreHost.HostGdtr); 4641 ASMGetIDTR((PRTIDTR)&pVCpu->hmr0.s.vmx.RestoreHost.HostIdtr); 4642 4642 if (uHostCr4 & X86_CR4_FSGSBASE) 4643 4643 { 4644 pVCpu->hm .s.vmx.RestoreHost.uHostFSBase = ASMGetFSBase();4645 pVCpu->hm .s.vmx.RestoreHost.uHostGSBase = ASMGetGSBase();4644 pVCpu->hmr0.s.vmx.RestoreHost.uHostFSBase = ASMGetFSBase(); 4645 pVCpu->hmr0.s.vmx.RestoreHost.uHostGSBase = ASMGetGSBase(); 4646 4646 fRestoreHostFlags = VMX_RESTORE_HOST_CAN_USE_WRFSBASE_AND_WRGSBASE; 4647 4647 } 4648 4648 else 4649 4649 { 4650 pVCpu->hm .s.vmx.RestoreHost.uHostFSBase = ASMRdMsr(MSR_K8_FS_BASE);4651 pVCpu->hm .s.vmx.RestoreHost.uHostGSBase = ASMRdMsr(MSR_K8_GS_BASE);4650 pVCpu->hmr0.s.vmx.RestoreHost.uHostFSBase = ASMRdMsr(MSR_K8_FS_BASE); 4651 pVCpu->hmr0.s.vmx.RestoreHost.uHostGSBase = ASMRdMsr(MSR_K8_GS_BASE); 4652 4652 fRestoreHostFlags = 0; 4653 4653 } 4654 4654 RTSEL uSelES, uSelDS, uSelFS, uSelGS; 4655 pVCpu->hm .s.vmx.RestoreHost.uHostSelDS = uSelDS = ASMGetDS();4656 pVCpu->hm .s.vmx.RestoreHost.uHostSelES = uSelES = ASMGetES();4657 pVCpu->hm .s.vmx.RestoreHost.uHostSelFS = uSelFS = ASMGetFS();4658 pVCpu->hm .s.vmx.RestoreHost.uHostSelGS = uSelGS = ASMGetGS();4655 pVCpu->hmr0.s.vmx.RestoreHost.uHostSelDS = uSelDS = ASMGetDS(); 4656 pVCpu->hmr0.s.vmx.RestoreHost.uHostSelES = uSelES = ASMGetES(); 4657 pVCpu->hmr0.s.vmx.RestoreHost.uHostSelFS = uSelFS = ASMGetFS(); 4658 pVCpu->hmr0.s.vmx.RestoreHost.uHostSelGS = uSelGS = ASMGetGS(); 4659 4659 #endif 4660 4660 … … 4672 4672 #define VMXLOCAL_ADJUST_HOST_SEG(a_Seg, a_uVmcsVar) \ 4673 4673 do { \ 4674 (a_uVmcsVar) = pVCpu->hm .s.vmx.RestoreHost.uHostSel##a_Seg; \4674 (a_uVmcsVar) = pVCpu->hmr0.s.vmx.RestoreHost.uHostSel##a_Seg; \ 4675 4675 if ((a_uVmcsVar) & X86_SEL_RPL) \ 4676 4676 { \ … … 4689 4689 #define VMXLOCAL_ADJUST_HOST_SEG(a_Seg, a_uVmcsVar) \ 4690 4690 do { \ 4691 (a_uVmcsVar) = pVCpu->hm .s.vmx.RestoreHost.uHostSel##a_Seg; \4691 (a_uVmcsVar) = pVCpu->hmr0.s.vmx.RestoreHost.uHostSel##a_Seg; \ 4692 4692 if ((a_uVmcsVar) & (X86_SEL_RPL | X86_SEL_LDT)) \ 4693 4693 { \ … … 4712 4712 4713 4713 /* Verification based on Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers" */ 4714 Assert(!(pVCpu->hm .s.vmx.RestoreHost.uHostSelTR & X86_SEL_RPL)); Assert(!(pVCpu->hm.s.vmx.RestoreHost.uHostSelTR & X86_SEL_LDT)); Assert(pVCpu->hm.s.vmx.RestoreHost.uHostSelTR);4715 Assert(!(pVCpu->hm .s.vmx.RestoreHost.uHostSelCS & X86_SEL_RPL)); Assert(!(pVCpu->hm.s.vmx.RestoreHost.uHostSelCS & X86_SEL_LDT)); Assert(pVCpu->hm.s.vmx.RestoreHost.uHostSelCS);4716 Assert(!(pVCpu->hm .s.vmx.RestoreHost.uHostSelSS & X86_SEL_RPL)); Assert(!(pVCpu->hm.s.vmx.RestoreHost.uHostSelSS & X86_SEL_LDT));4714 Assert(!(pVCpu->hmr0.s.vmx.RestoreHost.uHostSelTR & X86_SEL_RPL)); Assert(!(pVCpu->hmr0.s.vmx.RestoreHost.uHostSelTR & X86_SEL_LDT)); Assert(pVCpu->hmr0.s.vmx.RestoreHost.uHostSelTR); 4715 Assert(!(pVCpu->hmr0.s.vmx.RestoreHost.uHostSelCS & X86_SEL_RPL)); Assert(!(pVCpu->hmr0.s.vmx.RestoreHost.uHostSelCS & X86_SEL_LDT)); Assert(pVCpu->hmr0.s.vmx.RestoreHost.uHostSelCS); 4716 Assert(!(pVCpu->hmr0.s.vmx.RestoreHost.uHostSelSS & X86_SEL_RPL)); Assert(!(pVCpu->hmr0.s.vmx.RestoreHost.uHostSelSS & X86_SEL_LDT)); 4717 4717 Assert(!(uSelDS & X86_SEL_RPL)); Assert(!(uSelDS & X86_SEL_LDT)); 4718 4718 Assert(!(uSelES & X86_SEL_RPL)); Assert(!(uSelES & X86_SEL_LDT)); … … 4724 4724 * them to the maximum limit (0xffff) on every VM-exit. 4725 4725 */ 4726 if (pVCpu->hm .s.vmx.RestoreHost.HostGdtr.cb != 0xffff)4726 if (pVCpu->hmr0.s.vmx.RestoreHost.HostGdtr.cb != 0xffff) 4727 4727 fRestoreHostFlags |= VMX_RESTORE_HOST_GDTR; 4728 4728 … … 4737 4737 */ 4738 4738 #if defined(RT_OS_LINUX) || defined(RT_OS_SOLARIS) 4739 if (pVCpu->hm .s.vmx.RestoreHost.HostIdtr.cb < 0x0fff)4739 if (pVCpu->hmr0.s.vmx.RestoreHost.HostIdtr.cb < 0x0fff) 4740 4740 #else 4741 if (pVCpu->hm .s.vmx.RestoreHost.HostIdtr.cb != 0xffff)4741 if (pVCpu->hmr0.s.vmx.RestoreHost.HostIdtr.cb != 0xffff) 4742 4742 #endif 4743 4743 fRestoreHostFlags |= VMX_RESTORE_HOST_IDTR; … … 4748 4748 * RPL should be too in most cases. 4749 4749 */ 4750 RTSEL const uSelTR = pVCpu->hm .s.vmx.RestoreHost.uHostSelTR;4751 AssertMsgReturn((uSelTR | X86_SEL_RPL_LDT) <= pVCpu->hm .s.vmx.RestoreHost.HostGdtr.cb,4752 ("TR selector exceeds limit. TR=%RTsel cbGdt=%#x\n", uSelTR, pVCpu->hm .s.vmx.RestoreHost.HostGdtr.cb),4750 RTSEL const uSelTR = pVCpu->hmr0.s.vmx.RestoreHost.uHostSelTR; 4751 AssertMsgReturn((uSelTR | X86_SEL_RPL_LDT) <= pVCpu->hmr0.s.vmx.RestoreHost.HostGdtr.cb, 4752 ("TR selector exceeds limit. TR=%RTsel cbGdt=%#x\n", uSelTR, pVCpu->hmr0.s.vmx.RestoreHost.HostGdtr.cb), 4753 4753 VERR_VMX_INVALID_HOST_STATE); 4754 4754 4755 PCX86DESCHC pDesc = (PCX86DESCHC)(pVCpu->hm .s.vmx.RestoreHost.HostGdtr.uAddr + (uSelTR & X86_SEL_MASK));4755 PCX86DESCHC pDesc = (PCX86DESCHC)(pVCpu->hmr0.s.vmx.RestoreHost.HostGdtr.uAddr + (uSelTR & X86_SEL_MASK)); 4756 4756 uintptr_t const uTRBase = X86DESC64_BASE(pDesc); 4757 4757 … … 4780 4780 /* The GDT is read-only but the writable GDT is available. */ 4781 4781 fRestoreHostFlags |= VMX_RESTORE_HOST_GDT_NEED_WRITABLE; 4782 pVCpu->hm .s.vmx.RestoreHost.HostGdtrRw.cb = pVCpu->hm.s.vmx.RestoreHost.HostGdtr.cb;4783 int rc = SUPR0GetCurrentGdtRw(&pVCpu->hm .s.vmx.RestoreHost.HostGdtrRw.uAddr);4782 pVCpu->hmr0.s.vmx.RestoreHost.HostGdtrRw.cb = pVCpu->hmr0.s.vmx.RestoreHost.HostGdtr.cb; 4783 int rc = SUPR0GetCurrentGdtRw(&pVCpu->hmr0.s.vmx.RestoreHost.HostGdtrRw.uAddr); 4784 4784 AssertRCReturn(rc, rc); 4785 4785 } 4786 4786 } 4787 4787 4788 pVCpu->hm .s.vmx.fRestoreHostFlags = fRestoreHostFlags;4788 pVCpu->hmr0.s.vmx.fRestoreHostFlags = fRestoreHostFlags; 4789 4789 4790 4790 /* … … 4792 4792 */ 4793 4793 int rc; 4794 rc = VMXWriteVmcs16(VMX_VMCS16_HOST_CS_SEL, pVCpu->hm .s.vmx.RestoreHost.uHostSelCS); AssertRC(rc);4795 rc = VMXWriteVmcs16(VMX_VMCS16_HOST_SS_SEL, pVCpu->hm .s.vmx.RestoreHost.uHostSelSS); AssertRC(rc);4794 rc = VMXWriteVmcs16(VMX_VMCS16_HOST_CS_SEL, pVCpu->hmr0.s.vmx.RestoreHost.uHostSelCS); AssertRC(rc); 4795 rc = VMXWriteVmcs16(VMX_VMCS16_HOST_SS_SEL, pVCpu->hmr0.s.vmx.RestoreHost.uHostSelSS); AssertRC(rc); 4796 4796 rc = VMXWriteVmcs16(VMX_VMCS16_HOST_DS_SEL, uSelDS); AssertRC(rc); 4797 4797 rc = VMXWriteVmcs16(VMX_VMCS16_HOST_ES_SEL, uSelES); AssertRC(rc); 4798 4798 rc = VMXWriteVmcs16(VMX_VMCS16_HOST_FS_SEL, uSelFS); AssertRC(rc); 4799 4799 rc = VMXWriteVmcs16(VMX_VMCS16_HOST_GS_SEL, uSelGS); AssertRC(rc); 4800 rc = VMXWriteVmcs16(VMX_VMCS16_HOST_TR_SEL, pVCpu->hm .s.vmx.RestoreHost.uHostSelTR); AssertRC(rc);4801 rc = VMXWriteVmcsNw(VMX_VMCS_HOST_GDTR_BASE, pVCpu->hm .s.vmx.RestoreHost.HostGdtr.uAddr); AssertRC(rc);4802 rc = VMXWriteVmcsNw(VMX_VMCS_HOST_IDTR_BASE, pVCpu->hm .s.vmx.RestoreHost.HostIdtr.uAddr); AssertRC(rc);4800 rc = VMXWriteVmcs16(VMX_VMCS16_HOST_TR_SEL, pVCpu->hmr0.s.vmx.RestoreHost.uHostSelTR); AssertRC(rc); 4801 rc = VMXWriteVmcsNw(VMX_VMCS_HOST_GDTR_BASE, pVCpu->hmr0.s.vmx.RestoreHost.HostGdtr.uAddr); AssertRC(rc); 4802 rc = VMXWriteVmcsNw(VMX_VMCS_HOST_IDTR_BASE, pVCpu->hmr0.s.vmx.RestoreHost.HostIdtr.uAddr); AssertRC(rc); 4803 4803 rc = VMXWriteVmcsNw(VMX_VMCS_HOST_TR_BASE, uTRBase); AssertRC(rc); 4804 rc = VMXWriteVmcsNw(VMX_VMCS_HOST_FS_BASE, pVCpu->hm .s.vmx.RestoreHost.uHostFSBase); AssertRC(rc);4805 rc = VMXWriteVmcsNw(VMX_VMCS_HOST_GS_BASE, pVCpu->hm .s.vmx.RestoreHost.uHostGSBase); AssertRC(rc);4804 rc = VMXWriteVmcsNw(VMX_VMCS_HOST_FS_BASE, pVCpu->hmr0.s.vmx.RestoreHost.uHostFSBase); AssertRC(rc); 4805 rc = VMXWriteVmcsNw(VMX_VMCS_HOST_GS_BASE, pVCpu->hmr0.s.vmx.RestoreHost.uHostGSBase); AssertRC(rc); 4806 4806 4807 4807 return VINF_SUCCESS; … … 7704 7704 { 7705 7705 if ( pVM->hm.s.fAllow64BitGuests 7706 && (pVCpu->hm .s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))7706 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)) 7707 7707 pCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE); 7708 7708 } … … 7711 7711 { 7712 7712 if ( pVM->hm.s.fAllow64BitGuests 7713 && (pVCpu->hm .s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))7713 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)) 7714 7714 { 7715 7715 pCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR); … … 8306 8306 8307 8307 /* Restore host-state bits that VT-x only restores partially. */ 8308 if (pVCpu->hm .s.vmx.fRestoreHostFlags > VMX_RESTORE_HOST_REQUIRED)8309 { 8310 Log4Func(("Restoring Host State: fRestoreHostFlags=%#RX32 HostCpuId=%u\n", pVCpu->hm .s.vmx.fRestoreHostFlags, idCpu));8311 VMXRestoreHostState(pVCpu->hm .s.vmx.fRestoreHostFlags, &pVCpu->hm.s.vmx.RestoreHost);8312 } 8313 pVCpu->hm .s.vmx.fRestoreHostFlags = 0;8308 if (pVCpu->hmr0.s.vmx.fRestoreHostFlags > VMX_RESTORE_HOST_REQUIRED) 8309 { 8310 Log4Func(("Restoring Host State: fRestoreHostFlags=%#RX32 HostCpuId=%u\n", pVCpu->hmr0.s.vmx.fRestoreHostFlags, idCpu)); 8311 VMXRestoreHostState(pVCpu->hmr0.s.vmx.fRestoreHostFlags, &pVCpu->hmr0.s.vmx.RestoreHost); 8312 } 8313 pVCpu->hmr0.s.vmx.fRestoreHostFlags = 0; 8314 8314 8315 8315 /* Restore the lazy host MSRs as we're leaving VT-x context. */ 8316 if (pVCpu->hm .s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)8316 if (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST) 8317 8317 { 8318 8318 /* We shouldn't restore the host MSRs without saving the guest MSRs first. */ … … 8323 8323 } 8324 8324 hmR0VmxLazyRestoreHostMsrs(pVCpu); 8325 Assert(!pVCpu->hm .s.vmx.fLazyMsrs);8325 Assert(!pVCpu->hmr0.s.vmx.fLazyMsrs); 8326 8326 } 8327 8327 else 8328 pVCpu->hm .s.vmx.fLazyMsrs = 0;8328 pVCpu->hmr0.s.vmx.fLazyMsrs = 0; 8329 8329 8330 8330 /* Update auto-load/store host MSRs values when we re-enter VT-x (as we could be on a different CPU). */ 8331 pVCpu->hm .s.vmx.fUpdatedHostAutoMsrs = false;8331 pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs = false; 8332 8332 8333 8333 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatEntry); … … 8596 8596 8597 8597 /* Restore host-state bits that VT-x only restores partially. */ 8598 if (pVCpu->hm .s.vmx.fRestoreHostFlags > VMX_RESTORE_HOST_REQUIRED)8599 VMXRestoreHostState(pVCpu->hm .s.vmx.fRestoreHostFlags, &pVCpu->hm.s.vmx.RestoreHost);8600 pVCpu->hm .s.vmx.fRestoreHostFlags = 0;8598 if (pVCpu->hmr0.s.vmx.fRestoreHostFlags > VMX_RESTORE_HOST_REQUIRED) 8599 VMXRestoreHostState(pVCpu->hmr0.s.vmx.fRestoreHostFlags, &pVCpu->hmr0.s.vmx.RestoreHost); 8600 pVCpu->hmr0.s.vmx.fRestoreHostFlags = 0; 8601 8601 8602 8602 /* Restore the lazy host MSRs as we're leaving VT-x context. */ 8603 if (pVCpu->hm .s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)8603 if (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST) 8604 8604 hmR0VmxLazyRestoreHostMsrs(pVCpu); 8605 8605 8606 8606 /* Update auto-load/store host MSRs values when we re-enter VT-x (as we could be on a different CPU). */ 8607 pVCpu->hm .s.vmx.fUpdatedHostAutoMsrs = false;8607 pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs = false; 8608 8608 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC); 8609 8609 … … 10973 10973 * Update the host MSRs values in the VM-exit MSR-load area. 10974 10974 */ 10975 if (!pVCpu->hm .s.vmx.fUpdatedHostAutoMsrs)10975 if (!pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs) 10976 10976 { 10977 10977 if (pVmcsInfo->cExitMsrLoad > 0) 10978 10978 hmR0VmxUpdateAutoLoadHostMsrs(pVCpu, pVmcsInfo); 10979 pVCpu->hm .s.vmx.fUpdatedHostAutoMsrs = true;10979 pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs = true; 10980 10980 } 10981 10981 … … 11032 11032 11033 11033 #ifdef VBOX_STRICT 11034 Assert(pVCpu->hm .s.vmx.fUpdatedHostAutoMsrs);11034 Assert(pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs); 11035 11035 hmR0VmxCheckAutoLoadStoreMsrs(pVCpu, pVmcsInfo, pVmxTransient->fIsNestedGuest); 11036 11036 hmR0VmxCheckHostEferMsr(pVCpu, pVmcsInfo); … … 11090 11090 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM); 11091 11091 11092 pVCpu->hm .s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_REQUIRED; /* Some host state messed up by VMX needs restoring. */11092 pVCpu->hmr0.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_REQUIRED; /* Some host state messed up by VMX needs restoring. */ 11093 11093 pVmcsInfo->fVmcsState |= VMX_V_VMCS_LAUNCH_STATE_LAUNCHED; /* Use VMRESUME instead of VMLAUNCH in the next run. */ 11094 11094 #ifdef VBOX_STRICT -
trunk/src/VBox/VMM/include/HMInternal.h
r87466 r87469 1016 1016 /** Cached guest APIC-base MSR for identifying when to map the APIC-access page. */ 1017 1017 uint64_t u64GstMsrApicBase; 1018 /** @} */1019 1020 /** @name Host information.1021 * @{ */1022 /** Host LSTAR MSR to restore lazily while leaving VT-x. */1023 uint64_t u64HostMsrLStar;1024 /** Host STAR MSR to restore lazily while leaving VT-x. */1025 uint64_t u64HostMsrStar;1026 /** Host SF_MASK MSR to restore lazily while leaving VT-x. */1027 uint64_t u64HostMsrSfMask;1028 /** Host KernelGS-Base MSR to restore lazily while leaving VT-x. */1029 uint64_t u64HostMsrKernelGsBase;1030 /** The mask of lazy MSRs swap/restore state, see VMX_LAZY_MSRS_XXX. */1031 uint32_t fLazyMsrs;1032 /** Whether the host MSR values are up-to-date in the auto-load/store MSR area. */1033 bool fUpdatedHostAutoMsrs;1034 /** Alignment. */1035 uint8_t au8Alignment0[3];1036 /** Which host-state bits to restore before being preempted, see1037 * VMX_RESTORE_HOST_XXX. */1038 uint32_t fRestoreHostFlags;1039 /** Alignment. */1040 uint32_t u32Alignment0;1041 /** Current VMX_VMCS_HOST_RIP value (only used in HMR0A.asm). */1042 uint64_t uHostRIP;1043 /** Current VMX_VMCS_HOST_RSP value (only used in HMR0A.asm). */1044 uint64_t uHostRSP;1045 /** The host-state restoration structure. */1046 VMXRESTOREHOST RestoreHost;1047 1018 /** @} */ 1048 1019 … … 1280 1251 AssertCompileMemberAlignment(HMCPU, HM_UNION_NM(u.) vmx.VmcsInfo, 8); 1281 1252 AssertCompileMemberAlignment(HMCPU, HM_UNION_NM(u.) vmx.VmcsInfoNstGst, 8); 1282 AssertCompileMemberAlignment(HMCPU, HM_UNION_NM(u.) vmx.RestoreHost, 8);1283 1253 AssertCompileMemberAlignment(HMCPU, HM_UNION_NM(u.) svm, 8); 1284 1254 AssertCompileMemberAlignment(HMCPU, Event, 8); … … 1297 1267 /** Ring-0 pointer to the hardware-assisted VMX execution function. */ 1298 1268 PFNHMVMXSTARTVM pfnStartVm; 1269 1270 /** @name Host information. 1271 * @{ */ 1272 /** Host LSTAR MSR to restore lazily while leaving VT-x. */ 1273 uint64_t u64HostMsrLStar; 1274 /** Host STAR MSR to restore lazily while leaving VT-x. */ 1275 uint64_t u64HostMsrStar; 1276 /** Host SF_MASK MSR to restore lazily while leaving VT-x. */ 1277 uint64_t u64HostMsrSfMask; 1278 /** Host KernelGS-Base MSR to restore lazily while leaving VT-x. */ 1279 uint64_t u64HostMsrKernelGsBase; 1280 /** The mask of lazy MSRs swap/restore state, see VMX_LAZY_MSRS_XXX. */ 1281 uint32_t fLazyMsrs; 1282 /** Whether the host MSR values are up-to-date in the auto-load/store MSR area. */ 1283 bool fUpdatedHostAutoMsrs; 1284 /** Alignment. */ 1285 uint8_t au8Alignment0[3]; 1286 /** Which host-state bits to restore before being preempted, see 1287 * VMX_RESTORE_HOST_XXX. */ 1288 uint32_t fRestoreHostFlags; 1289 /** Alignment. */ 1290 uint32_t u32Alignment0; 1291 /** Current VMX_VMCS_HOST_RIP value (only used in HMR0A.asm). */ 1292 uint64_t uHostRIP; 1293 /** Current VMX_VMCS_HOST_RSP value (only used in HMR0A.asm). */ 1294 uint64_t uHostRSP; 1295 /** The host-state restoration structure. */ 1296 VMXRESTOREHOST RestoreHost; 1297 /** @} */ 1299 1298 } vmx; 1300 1299 … … 1309 1308 /** Pointer to HM ring-0 VMCPU instance data. */ 1310 1309 typedef HMR0PERVCPU *PHMR0PERVCPU; 1310 AssertCompileMemberAlignment(HMR0PERVCPU, HM_UNION_NM(u.) vmx.RestoreHost, 8); 1311 1311 1312 1312 -
trunk/src/VBox/VMM/include/HMInternal.mac
r87466 r87469 122 122 .u64GstMsrApicBase resq 1 123 123 124 .u64HostMsrLStar resq 1125 .u64HostMsrStar resq 1126 .u64HostMsrSfMask resq 1127 .u64HostMsrKernelGsBase resq 1128 .fLazyMsrs resd 1129 .fUpdatedHostAutoMsrs resb 1130 alignb 4131 .fRestoreHostFlags resd 1132 alignb 8133 .uHostRIP resq 1134 .uHostRSP resq 1135 .RestoreHost resb VMXRESTOREHOST_size136 137 124 .LastError.idCurrentCpu resd 1 138 125 .LastError.idEnteredCpu resd 1 … … 217 204 struc HMR0CPUVMX 218 205 .pfnStartVm RTR0PTR_RES 1 206 207 .u64HostMsrLStar resq 1 208 .u64HostMsrStar resq 1 209 .u64HostMsrSfMask resq 1 210 .u64HostMsrKernelGsBase resq 1 211 .fLazyMsrs resd 1 212 .fUpdatedHostAutoMsrs resb 1 213 alignb 4 214 .fRestoreHostFlags resd 1 215 alignb 8 216 .uHostRIP resq 1 217 .uHostRSP resq 1 218 .RestoreHost resb VMXRESTOREHOST_size 219 219 endstruc 220 220 -
trunk/src/VBox/VMM/testcase/tstVMStructSize.cpp
r87466 r87469 378 378 CHECK_MEMBER_ALIGNMENT(HMCPU, vmx.VmcsInfo, 8); 379 379 CHECK_MEMBER_ALIGNMENT(HMCPU, vmx.VmcsInfoNstGst, 8); 380 CHECK_MEMBER_ALIGNMENT(HM CPU, vmx.RestoreHost, 8);380 CHECK_MEMBER_ALIGNMENT(HMR0PERVCPU, vmx.RestoreHost, 8); 381 381 CHECK_MEMBER_ALIGNMENT(HMCPU, vmx.LastError, 8); 382 382 CHECK_MEMBER_ALIGNMENT(HMCPU, svm, 8);
Note:
See TracChangeset
for help on using the changeset viewer.