Changeset 91297 in vbox
- Timestamp:
- Sep 17, 2021 11:51:23 AM (3 years ago)
- svn:sync-xref-src-repo-rev:
- 146957
- Location:
- trunk
- Files:
-
- 11 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/cpum.h
r91287 r91297 2083 2083 { 2084 2084 Assert(CPUMIsGuestInVmxNonRootMode(pCtx)); 2085 PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs); 2086 Assert(pVmcs); 2087 return RT_BOOL(pVmcs->u32PinCtls & uPinCtls); 2085 return RT_BOOL(pCtx->hwvirt.vmx.Vmcs.u32PinCtls & uPinCtls); 2088 2086 } 2089 2087 … … 2102 2100 { 2103 2101 Assert(CPUMIsGuestInVmxNonRootMode(pCtx)); 2104 PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs); 2105 Assert(pVmcs); 2106 return RT_BOOL(pVmcs->u32ProcCtls & uProcCtls); 2102 return RT_BOOL(pCtx->hwvirt.vmx.Vmcs.u32ProcCtls & uProcCtls); 2107 2103 } 2108 2104 … … 2122 2118 { 2123 2119 Assert(CPUMIsGuestInVmxNonRootMode(pCtx)); 2124 PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs); 2125 Assert(pVmcs); 2126 return RT_BOOL(pVmcs->u32ProcCtls2 & uProcCtls2); 2120 return RT_BOOL(pCtx->hwvirt.vmx.Vmcs.u32ProcCtls2 & uProcCtls2); 2127 2121 } 2128 2122 … … 2142 2136 { 2143 2137 Assert(CPUMIsGuestInVmxNonRootMode(pCtx)); 2144 PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs); 2145 Assert(pVmcs); 2146 return RT_BOOL(pVmcs->u64ProcCtls3.u & uProcCtls3); 2138 return RT_BOOL(pCtx->hwvirt.vmx.Vmcs.u64ProcCtls3.u & uProcCtls3); 2147 2139 } 2148 2140 … … 2161 2153 { 2162 2154 Assert(CPUMIsGuestInVmxNonRootMode(pCtx)); 2163 PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs); 2164 Assert(pVmcs); 2165 return RT_BOOL(pVmcs->u32ExitCtls & uExitCtls); 2155 return RT_BOOL(pCtx->hwvirt.vmx.Vmcs.u32ExitCtls & uExitCtls); 2166 2156 } 2167 2157 … … 2180 2170 { 2181 2171 Assert(CPUMIsGuestInVmxNonRootMode(pCtx)); 2182 PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs); 2183 Assert(pVmcs); 2184 return RT_BOOL(pVmcs->u32EntryCtls & uEntryCtls); 2172 return RT_BOOL(pCtx->hwvirt.vmx.Vmcs.u32EntryCtls & uEntryCtls); 2185 2173 } 2186 2174 … … 2229 2217 2230 2218 Assert(CPUMIsGuestInVmxNonRootMode(pCtx)); 2231 PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);2232 Assert(pVmcs);2233 2219 2234 2220 /* NMIs have a dedicated VM-execution control for causing VM-exits. */ 2235 2221 if (uVector == X86_XCPT_NMI) 2236 return RT_BOOL(p Vmcs->u32PinCtls & VMX_PIN_CTLS_NMI_EXIT);2222 return RT_BOOL(pCtx->hwvirt.vmx.Vmcs.u32PinCtls & VMX_PIN_CTLS_NMI_EXIT); 2237 2223 2238 2224 /* Page-faults are subject to masking using its error code. */ 2239 uint32_t fXcptBitmap = p Vmcs->u32XcptBitmap;2225 uint32_t fXcptBitmap = pCtx->hwvirt.vmx.Vmcs.u32XcptBitmap; 2240 2226 if (uVector == X86_XCPT_PF) 2241 2227 { 2242 uint32_t const fXcptPFMask = p Vmcs->u32XcptPFMask;2243 uint32_t const fXcptPFMatch = p Vmcs->u32XcptPFMatch;2228 uint32_t const fXcptPFMask = pCtx->hwvirt.vmx.Vmcs.u32XcptPFMask; 2229 uint32_t const fXcptPFMatch = pCtx->hwvirt.vmx.Vmcs.u32XcptPFMatch; 2244 2230 if ((uErrCode & fXcptPFMask) != fXcptPFMatch) 2245 2231 fXcptBitmap ^= RT_BIT(X86_XCPT_PF); … … 2283 2269 pCtx->eflags.u32 &= ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF); 2284 2270 pCtx->eflags.u32 |= X86_EFL_ZF; 2285 pCtx->hwvirt.vmx. CTX_SUFF(pVmcs)->u32RoVmInstrError = enmInsErr;2271 pCtx->hwvirt.vmx.Vmcs.u32RoVmInstrError = enmInsErr; 2286 2272 } 2287 2273 … … 2310 2296 { 2311 2297 Assert(CPUMIsGuestInVmxNonRootMode(pCtx)); 2312 PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs); 2313 Assert(pVmcs); 2314 return pVmcs->u64AddrApicAccess.u; 2298 return pCtx->hwvirt.vmx.Vmcs.u64AddrApicAccess.u; 2315 2299 } 2316 2300 … … 2332 2316 */ 2333 2317 Assert(CPUMIsGuestInVmxNonRootMode(pCtx)); 2334 PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);2335 Assert(pVmcs);2336 2318 uint64_t const uGstCr0 = pCtx->cr0; 2337 uint64_t const fReadShadow = p Vmcs->u64Cr0ReadShadow.u;2319 uint64_t const fReadShadow = pCtx->hwvirt.vmx.Vmcs.u64Cr0ReadShadow.u; 2338 2320 return (fReadShadow & fGstHostMask) | (uGstCr0 & ~fGstHostMask); 2339 2321 } … … 2356 2338 */ 2357 2339 Assert(CPUMIsGuestInVmxNonRootMode(pCtx)); 2358 PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);2359 Assert(pVmcs);2360 2340 uint64_t const uGstCr4 = pCtx->cr4; 2361 uint64_t const fReadShadow = p Vmcs->u64Cr4ReadShadow.u;2341 uint64_t const fReadShadow = pCtx->hwvirt.vmx.Vmcs.u64Cr4ReadShadow.u; 2362 2342 return (fReadShadow & fGstHostMask) | (uGstCr4 & ~fGstHostMask); 2363 2343 } … … 2379 2359 */ 2380 2360 Assert(CPUMIsGuestInVmxNonRootMode(pCtx)); 2381 PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs); 2382 Assert(pVmcs); 2383 2384 uint32_t const fGstHostMask = (uint32_t)pVmcs->u64Cr0Mask.u; 2385 uint32_t const fReadShadow = (uint32_t)pVmcs->u64Cr0ReadShadow.u; 2361 2362 uint32_t const fGstHostMask = (uint32_t)pCtx->hwvirt.vmx.Vmcs.u64Cr0Mask.u; 2363 uint32_t const fReadShadow = (uint32_t)pCtx->hwvirt.vmx.Vmcs.u64Cr0ReadShadow.u; 2386 2364 2387 2365 /* … … 2427 2405 */ 2428 2406 Assert(CPUMIsGuestInVmxNonRootMode(pCtx)); 2429 PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);2430 Assert(pVmcs);2431 2407 Assert(iCrReg == 0 || iCrReg == 4); 2432 2408 … … 2435 2411 if (iCrReg == 0) 2436 2412 { 2437 fGstHostMask = p Vmcs->u64Cr0Mask.u;2438 fReadShadow = p Vmcs->u64Cr0ReadShadow.u;2413 fGstHostMask = pCtx->hwvirt.vmx.Vmcs.u64Cr0Mask.u; 2414 fReadShadow = pCtx->hwvirt.vmx.Vmcs.u64Cr0ReadShadow.u; 2439 2415 } 2440 2416 else 2441 2417 { 2442 fGstHostMask = p Vmcs->u64Cr4Mask.u;2443 fReadShadow = p Vmcs->u64Cr4ReadShadow.u;2418 fGstHostMask = pCtx->hwvirt.vmx.Vmcs.u64Cr4Mask.u; 2419 fReadShadow = pCtx->hwvirt.vmx.Vmcs.u64Cr4ReadShadow.u; 2444 2420 } 2445 2421 -
trunk/include/VBox/vmm/cpumctx.h
r91292 r91297 539 539 struct 540 540 { 541 /** 0x4000 - The current VMCS. */ 542 VMXVVMCS Vmcs; 543 541 544 /** 0x300 - Guest physical address of the VMXON region. */ 542 545 RTGCPHYS GCPhysVmxon; … … 563 566 * mode before execution of IRET. */ 564 567 bool fNmiUnblockingIret; 565 /** 0x330 - The current VMCS - R0 ptr. */566 R0PTRTYPE(PVMXVVMCS) pVmcsR0;567 /** 0x338 - The curent VMCS - R3 ptr. */568 R3PTRTYPE(PVMXVVMCS) pVmcsR3;569 568 /** 0X340 - The shadow VMCS - R0 ptr. */ 570 569 R0PTRTYPE(PVMXVVMCS) pShadowVmcsR0; … … 846 845 AssertCompileMemberAlignment(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.abMsrBitmap, X86_PAGE_SIZE); 847 846 AssertCompileMemberAlignment(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.abIoBitmap, X86_PAGE_SIZE); 848 AssertCompileMemberAlignment(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx. pVmcsR0, 8);847 AssertCompileMemberAlignment(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.Vmcs, X86_PAGE_SIZE); 849 848 AssertCompileMemberAlignment(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pShadowVmcsR0, 8); 850 849 AssertCompileMemberAlignment(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pvVmreadBitmapR0, 8); -
trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp
r91287 r91297 2200 2200 if (CPUMIsGuestInVmxNonRootMode(pCtx)) 2201 2201 { 2202 PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);2203 Assert(pVmcs);2204 2202 if (CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_USE_TSC_OFFSETTING)) 2205 return uTscValue + p Vmcs->u64TscOffset.u;2203 return uTscValue + pCtx->hwvirt.vmx.Vmcs.u64TscOffset.u; 2206 2204 return uTscValue; 2207 2205 } … … 2234 2232 { 2235 2233 if (CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_USE_TSC_OFFSETTING)) 2236 { 2237 PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs); 2238 Assert(pVmcs); 2239 return uTscValue - pVmcs->u64TscOffset.u; 2240 } 2234 return uTscValue - pCtx->hwvirt.vmx.Vmcs.u64TscOffset.u; 2241 2235 return uTscValue; 2242 2236 } … … 2833 2827 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally". 2834 2828 */ 2835 PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest; 2836 PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs); 2829 PCCPUMCTX const pCtx = &pVCpu->cpum.s.Guest; 2837 2830 if (CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_CR3_LOAD_EXIT)) 2838 2831 { 2839 uint32_t const uCr3TargetCount = p Vmcs->u32Cr3TargetCount;2832 uint32_t const uCr3TargetCount = pCtx->hwvirt.vmx.Vmcs.u32Cr3TargetCount; 2840 2833 Assert(uCr3TargetCount <= VMX_V_CR3_TARGET_COUNT); 2841 2834 … … 2846 2839 /* If the CR3 being written doesn't match any of the target values, cause a VM-exit. */ 2847 2840 AssertCompile(VMX_V_CR3_TARGET_COUNT == 4); 2848 if ( uNewCr3 != p Vmcs->u64Cr3Target0.u2849 && uNewCr3 != p Vmcs->u64Cr3Target1.u2850 && uNewCr3 != p Vmcs->u64Cr3Target2.u2851 && uNewCr3 != p Vmcs->u64Cr3Target3.u)2841 if ( uNewCr3 != pCtx->hwvirt.vmx.Vmcs.u64Cr3Target0.u 2842 && uNewCr3 != pCtx->hwvirt.vmx.Vmcs.u64Cr3Target1.u 2843 && uNewCr3 != pCtx->hwvirt.vmx.Vmcs.u64Cr3Target2.u 2844 && uNewCr3 != pCtx->hwvirt.vmx.Vmcs.u64Cr3Target3.u) 2852 2845 return true; 2853 2846 } -
trunk/src/VBox/VMM/VMMAll/HMVMXAll.cpp
r91044 r91297 845 845 } while (0) 846 846 847 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;848 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);847 PCCPUMCTX const pCtx = &pVCpu->cpum.GstCtx; 848 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs; 849 849 if (!pVmcs) 850 850 { -
trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h
r91287 r91297 5584 5584 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu)) 5585 5585 { 5586 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);5587 Assert(pVmcs);5588 5586 switch (iCrReg) 5589 5587 { 5590 5588 /* CR0/CR4 reads are subject to masking when in VMX non-root mode. */ 5591 case 0: crX = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pV mcs->u64Cr0Mask.u); break;5592 case 4: crX = CPUMGetGuestVmxMaskedCr4(&pVCpu->cpum.GstCtx, pV mcs->u64Cr4Mask.u); break;5589 case 0: crX = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr0Mask.u); break; 5590 case 4: crX = CPUMGetGuestVmxMaskedCr4(&pVCpu->cpum.GstCtx, pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr4Mask.u); break; 5593 5591 5594 5592 case 3: … … 5629 5627 u64MaskedCr0 = pVCpu->cpum.GstCtx.cr0; 5630 5628 else 5631 { 5632 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 5633 Assert(pVmcs); 5634 u64MaskedCr0 = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVmcs->u64Cr0Mask.u); 5635 } 5629 u64MaskedCr0 = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr0Mask.u); 5636 5630 uint64_t const u64GuestCr0 = u64MaskedCr0; 5637 5631 #else … … 5682 5676 u64MaskedCr0 = pVCpu->cpum.GstCtx.cr0; 5683 5677 else 5684 { 5685 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 5686 Assert(pVmcs); 5687 u64MaskedCr0 = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVmcs->u64Cr0Mask.u); 5688 } 5678 u64MaskedCr0 = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr0Mask.u); 5689 5679 uint64_t const u64GuestCr0 = u64MaskedCr0; 5690 5680 #else -
trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h
r91044 r91297 683 683 DECL_FORCE_INLINE(void) iemVmxVmcsSetExitQual(PVMCPUCC pVCpu, uint64_t u64ExitQual) 684 684 { 685 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 686 pVmcs->u64RoExitQual.u = u64ExitQual; 685 pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64RoExitQual.u = u64ExitQual; 687 686 } 688 687 … … 696 695 DECL_FORCE_INLINE(void) iemVmxVmcsSetExitIntInfo(PVMCPUCC pVCpu, uint32_t uExitIntInfo) 697 696 { 698 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 699 pVmcs->u32RoExitIntInfo = uExitIntInfo; 697 pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32RoExitIntInfo = uExitIntInfo; 700 698 } 701 699 … … 709 707 DECL_FORCE_INLINE(void) iemVmxVmcsSetExitIntErrCode(PVMCPUCC pVCpu, uint32_t uErrCode) 710 708 { 711 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 712 pVmcs->u32RoExitIntErrCode = uErrCode; 709 pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32RoExitIntErrCode = uErrCode; 713 710 } 714 711 … … 722 719 DECL_FORCE_INLINE(void) iemVmxVmcsSetIdtVectoringInfo(PVMCPUCC pVCpu, uint32_t uIdtVectorInfo) 723 720 { 724 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 725 pVmcs->u32RoIdtVectoringInfo = uIdtVectorInfo; 721 pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32RoIdtVectoringInfo = uIdtVectorInfo; 726 722 } 727 723 … … 735 731 DECL_FORCE_INLINE(void) iemVmxVmcsSetIdtVectoringErrCode(PVMCPUCC pVCpu, uint32_t uErrCode) 736 732 { 737 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 738 pVmcs->u32RoIdtVectoringErrCode = uErrCode; 733 pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32RoIdtVectoringErrCode = uErrCode; 739 734 } 740 735 … … 748 743 DECL_FORCE_INLINE(void) iemVmxVmcsSetExitGuestLinearAddr(PVMCPUCC pVCpu, uint64_t uGuestLinearAddr) 749 744 { 750 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 751 pVmcs->u64RoGuestLinearAddr.u = uGuestLinearAddr; 745 pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64RoGuestLinearAddr.u = uGuestLinearAddr; 752 746 } 753 747 … … 761 755 DECL_FORCE_INLINE(void) iemVmxVmcsSetExitGuestPhysAddr(PVMCPUCC pVCpu, uint64_t uGuestPhysAddr) 762 756 { 763 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 764 pVmcs->u64RoGuestPhysAddr.u = uGuestPhysAddr; 757 pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64RoGuestPhysAddr.u = uGuestPhysAddr; 765 758 } 766 759 … … 777 770 DECL_FORCE_INLINE(void) iemVmxVmcsSetExitInstrLen(PVMCPUCC pVCpu, uint32_t cbInstr) 778 771 { 779 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 780 pVmcs->u32RoExitInstrLen = cbInstr; 772 pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32RoExitInstrLen = cbInstr; 781 773 } 782 774 … … 790 782 DECL_FORCE_INLINE(void) iemVmxVmcsSetExitInstrInfo(PVMCPUCC pVCpu, uint32_t uExitInstrInfo) 791 783 { 792 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 793 pVmcs->u32RoExitInstrInfo = uExitInstrInfo; 784 pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32RoExitInstrInfo = uExitInstrInfo; 794 785 } 795 786 … … 803 794 DECL_FORCE_INLINE(void) iemVmxVmcsSetGuestPendingDbgXcpts(PVMCPUCC pVCpu, uint64_t uGuestPendingDbgXcpts) 804 795 { 805 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);806 796 Assert(!(uGuestPendingDbgXcpts & VMX_VMCS_GUEST_PENDING_DEBUG_VALID_MASK)); 807 pV mcs->u64GuestPendingDbgXcpts.u = uGuestPendingDbgXcpts;797 pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64GuestPendingDbgXcpts.u = uGuestPendingDbgXcpts; 808 798 } 809 799 … … 870 860 DECL_FORCE_INLINE(int) iemVmxWriteCurrentVmcsToGstMem(PVMCPUCC pVCpu) 871 861 { 872 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs));873 862 Assert(IEM_VMX_HAS_CURRENT_VMCS(pVCpu)); 874 863 int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), IEM_VMX_GET_CURRENT_VMCS(pVCpu), 875 pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs), sizeof(VMXVVMCS));864 &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs, sizeof(pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs)); 876 865 return rc; 877 866 } … … 886 875 DECL_FORCE_INLINE(int) iemVmxReadCurrentVmcsFromGstMem(PVMCPUCC pVCpu) 887 876 { 888 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs));889 877 Assert(IEM_VMX_HAS_CURRENT_VMCS(pVCpu)); 890 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs),891 IEM_VMX_GET_CURRENT_VMCS(pVCpu), sizeof( VMXVVMCS));878 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs, 879 IEM_VMX_GET_CURRENT_VMCS(pVCpu), sizeof(pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs)); 892 880 return rc; 893 881 } … … 1167 1155 * See Intel spec. 27.3.1 "Saving Control Registers, Debug Registers and MSRs". 1168 1156 */ 1169 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);1157 PVMXVVMCS pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs; 1170 1158 1171 1159 /* Save control registers. */ … … 1310 1298 IEM_STATIC uint32_t iemVmxCalcPreemptTimer(PVMCPUCC pVCpu) 1311 1299 { 1312 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);1313 Assert(pVmcs);1314 1315 1300 /* 1316 1301 * Assume the following: … … 1340 1325 */ 1341 1326 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT); 1342 uint32_t const uVmcsPreemptVal = pV mcs->u32PreemptTimer;1327 uint32_t const uVmcsPreemptVal = pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32PreemptTimer; 1343 1328 if (uVmcsPreemptVal > 0) 1344 1329 { … … 1366 1351 */ 1367 1352 /* CS, SS, ES, DS, FS, GS. */ 1368 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);1353 PVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs; 1369 1354 for (unsigned iSegReg = 0; iSegReg < X86_SREG_COUNT; iSegReg++) 1370 1355 { … … 1469 1454 * See Intel spec. 27.3.4 "Saving Non-Register State". 1470 1455 */ 1471 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);1456 PVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs; 1472 1457 1473 1458 /* … … 1559 1544 IEM_STATIC void iemVmxVmexitSaveGuestState(PVMCPUCC pVCpu, uint32_t uExitReason) 1560 1545 { 1561 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);1562 Assert(pVmcs);1563 1564 1546 iemVmxVmexitSaveGuestControlRegsMsrs(pVCpu); 1565 1547 iemVmxVmexitSaveGuestSegRegs(pVCpu); 1566 1548 1567 pV mcs->u64GuestRip.u = pVCpu->cpum.GstCtx.rip;1568 pV mcs->u64GuestRsp.u = pVCpu->cpum.GstCtx.rsp;1569 pV mcs->u64GuestRFlags.u = pVCpu->cpum.GstCtx.rflags.u; /** @todo NSTVMX: Check RFLAGS.RF handling. */1549 pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64GuestRip.u = pVCpu->cpum.GstCtx.rip; 1550 pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64GuestRsp.u = pVCpu->cpum.GstCtx.rsp; 1551 pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64GuestRFlags.u = pVCpu->cpum.GstCtx.rflags.u; /** @todo NSTVMX: Check RFLAGS.RF handling. */ 1570 1552 1571 1553 iemVmxVmexitSaveGuestNonRegState(pVCpu, uExitReason); … … 1586 1568 * See Intel spec. 27.4 "Saving MSRs". 1587 1569 */ 1588 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);1589 const char * const pszFailure = "VMX-abort";1570 PVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs; 1571 const char * const pszFailure = "VMX-abort"; 1590 1572 1591 1573 /* … … 1720 1702 * See Intel spec. 27.5.1 "Loading Host Control Registers, Debug Registers, MSRs". 1721 1703 */ 1722 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);1723 bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);1704 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs; 1705 bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE); 1724 1706 1725 1707 /* CR0. */ … … 1809 1791 * e.g. segment limit high bits stored in segment attributes (in bits 11:8). 1810 1792 */ 1811 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);1812 bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);1793 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs; 1794 bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE); 1813 1795 1814 1796 /* CS, SS, ES, DS, FS, GS. */ … … 1916 1898 * See Intel spec. 27.5.4 "Checking and Loading Host Page-Directory-Pointer-Table Entries". 1917 1899 */ 1918 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);1919 const char * const pszFailure= "VMX-abort";1920 bool const fHostInLongMode= RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);1900 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs; 1901 const char * const pszFailure = "VMX-abort"; 1902 bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE); 1921 1903 1922 1904 if ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE) … … 1963 1945 * See Intel spec. 27.6 "Loading MSRs". 1964 1946 */ 1965 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);1966 const char * const pszFailure = "VMX-abort";1947 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs; 1948 const char * const pszFailure = "VMX-abort"; 1967 1949 1968 1950 /* … … 2048 2030 * See Intel spec. 27.5 "Loading Host State". 2049 2031 */ 2050 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);2051 bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);2032 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs; 2033 bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE); 2052 2034 2053 2035 /* We cannot return from a long-mode guest to a host that is not in long mode. */ … … 2474 2456 return VERR_IEM_IPE_7; 2475 2457 # else 2476 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 2477 Assert(pVmcs); 2458 PVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs; 2478 2459 2479 2460 /* … … 2890 2871 * See Intel Spec. 25.3 "Changes To Instruction Behavior In VMX Non-root Operation". 2891 2872 */ 2892 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 2893 Assert(pVmcs); 2894 uint32_t const fGstHostMask = pVmcs->u64Cr0Mask.u; 2873 uint32_t const fGstHostMask = pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr0Mask.u; 2895 2874 uint32_t const fGstHostLmswMask = fGstHostMask & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS); 2896 2875 *pu16NewMsw = (uGuestCr0 & fGstHostLmswMask) | (uNewMsw & ~fGstHostLmswMask); … … 2914 2893 IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrClts(PVMCPUCC pVCpu, uint8_t cbInstr) 2915 2894 { 2916 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 2917 Assert(pVmcs); 2918 2919 uint32_t const fGstHostMask = pVmcs->u64Cr0Mask.u; 2920 uint32_t const fReadShadow = pVmcs->u64Cr0ReadShadow.u; 2895 uint32_t const fGstHostMask = pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr0Mask.u; 2896 uint32_t const fReadShadow = pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr0ReadShadow.u; 2921 2897 2922 2898 /* … … 2996 2972 * See Intel Spec. 25.3 "Changes To Instruction Behavior In VMX Non-root Operation". 2997 2973 */ 2998 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);2999 Assert(pVmcs);3000 2974 uint64_t uGuestCrX; 3001 2975 uint64_t fGstHostMask; … … 3004 2978 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0); 3005 2979 uGuestCrX = pVCpu->cpum.GstCtx.cr0; 3006 fGstHostMask = pV mcs->u64Cr0Mask.u;2980 fGstHostMask = pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr0Mask.u; 3007 2981 } 3008 2982 else … … 3010 2984 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4); 3011 2985 uGuestCrX = pVCpu->cpum.GstCtx.cr4; 3012 fGstHostMask = pV mcs->u64Cr4Mask.u;2986 fGstHostMask = pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr4Mask.u; 3013 2987 } 3014 2988 … … 3028 3002 IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrMovFromCr3(PVMCPUCC pVCpu, uint8_t iGReg, uint8_t cbInstr) 3029 3003 { 3030 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);3031 Assert(pVmcs);3032 3004 Assert(iGReg < X86_GREG_COUNT); 3033 3005 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3); … … 3037 3009 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally". 3038 3010 */ 3039 if (pV mcs->u32ProcCtls & VMX_PROC_CTLS_CR3_STORE_EXIT)3011 if (pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ProcCtls & VMX_PROC_CTLS_CR3_STORE_EXIT) 3040 3012 { 3041 3013 Log2(("mov_Rd_Cr: (CR3) Guest intercept -> VM-exit\n")); … … 3103 3075 IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrMovFromCr8(PVMCPUCC pVCpu, uint8_t iGReg, uint8_t cbInstr) 3104 3076 { 3105 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);3106 Assert(pVmcs);3107 3077 Assert(iGReg < X86_GREG_COUNT); 3108 3078 … … 3111 3081 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally". 3112 3082 */ 3113 if (pV mcs->u32ProcCtls & VMX_PROC_CTLS_CR8_STORE_EXIT)3083 if (pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ProcCtls & VMX_PROC_CTLS_CR8_STORE_EXIT) 3114 3084 { 3115 3085 Log2(("mov_Rd_Cr: (CR8) Guest intercept -> VM-exit\n")); … … 3140 3110 IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrMovToCr8(PVMCPUCC pVCpu, uint8_t iGReg, uint8_t cbInstr) 3141 3111 { 3142 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);3143 Assert(pVmcs);3144 3112 Assert(iGReg < X86_GREG_COUNT); 3145 3113 … … 3148 3116 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally". 3149 3117 */ 3150 if (pV mcs->u32ProcCtls & VMX_PROC_CTLS_CR8_LOAD_EXIT)3118 if (pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ProcCtls & VMX_PROC_CTLS_CR8_LOAD_EXIT) 3151 3119 { 3152 3120 Log2(("mov_Cr_Rd: (CR8) Guest intercept -> VM-exit\n")); … … 3186 3154 Assert(iGReg < X86_GREG_COUNT); 3187 3155 3188 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 3189 Assert(pVmcs); 3190 3191 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_MOV_DR_EXIT) 3156 if (pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ProcCtls & VMX_PROC_CTLS_MOV_DR_EXIT) 3192 3157 { 3193 3158 uint32_t const uDirection = uInstrId == VMXINSTRID_MOV_TO_DRX ? VMX_EXIT_QUAL_DRX_DIRECTION_WRITE … … 3350 3315 IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrPause(PVMCPUCC pVCpu, uint8_t cbInstr) 3351 3316 { 3352 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 3353 Assert(pVmcs); 3317 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs; 3354 3318 3355 3319 /* … … 3491 3455 IEM_STATIC VBOXSTRICTRC iemVmxVmexitPreemptTimer(PVMCPUCC pVCpu) 3492 3456 { 3493 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);3494 Assert(pVmcs);3495 3457 Assert(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER)); 3496 Assert(pV mcs->u32PinCtls & VMX_PIN_CTLS_PREEMPT_TIMER);3458 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32PinCtls & VMX_PIN_CTLS_PREEMPT_TIMER); 3497 3459 3498 3460 /* Import the hardware virtualization state (for nested-guest VM-entry TSC-tick). */ … … 3500 3462 3501 3463 /* Save the VMX-preemption timer value (of 0) back in to the VMCS if the CPU supports this feature. */ 3502 if (pV mcs->u32ExitCtls & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER)3503 pV mcs->u32PreemptTimer = 0;3464 if (pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ExitCtls & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER) 3465 pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32PreemptTimer = 0; 3504 3466 3505 3467 /* Cause the VMX-preemption timer VM-exit. The Exit qualification MBZ. */ … … 3521 3483 IEM_STATIC VBOXSTRICTRC iemVmxVmexitExtInt(PVMCPUCC pVCpu, uint8_t uVector, bool fIntPending) 3522 3484 { 3523 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);3524 Assert(pVmcs);3525 3485 Assert(!fIntPending || uVector == 0); 3526 3486 3527 3487 /* The VM-exit is subject to "External interrupt exiting" being set. */ 3528 if (pV mcs->u32PinCtls & VMX_PIN_CTLS_EXT_INT_EXIT)3488 if (pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32PinCtls & VMX_PIN_CTLS_EXT_INT_EXIT) 3529 3489 { 3530 3490 if (fIntPending) … … 3536 3496 * See Intel spec 25.2 "Other Causes Of VM Exits". 3537 3497 */ 3538 if (!(pV mcs->u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT))3498 if (!(pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT)) 3539 3499 return iemVmxVmexit(pVCpu, VMX_EXIT_EXT_INT, 0 /* u64ExitQual */); 3540 3500 … … 3557 3517 */ 3558 3518 uint32_t uExitIntInfo; 3559 if (pV mcs->u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT)3519 if (pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT) 3560 3520 { 3561 3521 bool const fNmiUnblocking = pVCpu->cpum.GstCtx.hwvirt.vmx.fNmiUnblockingIret; … … 3589 3549 IEM_STATIC VBOXSTRICTRC iemVmxVmexitEventDoubleFault(PVMCPUCC pVCpu) 3590 3550 { 3591 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 3592 Assert(pVmcs); 3593 3594 uint32_t const fXcptBitmap = pVmcs->u32XcptBitmap; 3551 uint32_t const fXcptBitmap = pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32XcptBitmap; 3595 3552 if (fXcptBitmap & RT_BIT(X86_XCPT_DF)) 3596 3553 { … … 3653 3610 uint8_t cbInstr) 3654 3611 { 3655 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);3656 Assert(pVmcs);3657 3658 3612 /* 3659 3613 * If the event is being injected as part of VM-entry, it is -not- subject to event … … 3673 3627 if ( uVector == X86_XCPT_NMI 3674 3628 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT) 3675 && (pV mcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI))3629 && (pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)) 3676 3630 pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking = true; 3677 3631 else … … 3690 3644 if (fFlags & IEM_XCPT_FLAGS_T_EXT_INT) 3691 3645 { 3692 Assert(!VMX_IDT_VECTORING_INFO_IS_VALID(pV mcs->u32RoIdtVectoringInfo));3646 Assert(!VMX_IDT_VECTORING_INFO_IS_VALID(pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32RoIdtVectoringInfo)); 3693 3647 return iemVmxVmexitExtInt(pVCpu, uVector, false /* fIntPending */); 3694 3648 } … … 3703 3657 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) 3704 3658 || (fFlags & (IEM_XCPT_FLAGS_BP_INSTR | IEM_XCPT_FLAGS_OF_INSTR | IEM_XCPT_FLAGS_ICEBP_INSTR))) 3705 {3706 3659 fIntercept = CPUMIsGuestVmxXcptInterceptSet(&pVCpu->cpum.GstCtx, uVector, uErrCode); 3707 }3708 3660 else 3709 3661 { … … 3887 3839 IEM_STATIC uint32_t iemVmxVirtApicReadRaw32(PVMCPUCC pVCpu, uint16_t offReg) 3888 3840 { 3889 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);3890 Assert(pVmcs);3891 3841 Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uint32_t)); 3892 3842 3893 uint32_t uReg;3894 RTGCPHYS const GCPhysVirtApic = pV mcs->u64AddrVirtApic.u;3843 uint32_t uReg = 0; 3844 RTGCPHYS const GCPhysVirtApic = pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64AddrVirtApic.u; 3895 3845 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &uReg, GCPhysVirtApic + offReg, sizeof(uReg)); 3896 if (RT_SUCCESS(rc)) 3897 { /* likely */ } 3898 else 3899 { 3900 AssertMsgFailed(("Failed to read %u bytes at offset %#x of the virtual-APIC page at %#RGp\n", sizeof(uReg), offReg, 3901 GCPhysVirtApic)); 3902 uReg = 0; 3903 } 3846 AssertMsgStmt(RT_SUCCESS(rc), 3847 ("Failed to read %u bytes at offset %#x of the virtual-APIC page at %#RGp: %Rrc\n", 3848 sizeof(uReg), offReg, GCPhysVirtApic, rc), 3849 uReg = 0); 3904 3850 return uReg; 3905 3851 } … … 3915 3861 IEM_STATIC uint64_t iemVmxVirtApicReadRaw64(PVMCPUCC pVCpu, uint16_t offReg) 3916 3862 { 3917 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);3918 Assert(pVmcs);3919 3863 Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uint64_t)); 3920 3864 3921 uint64_t uReg;3922 RTGCPHYS const GCPhysVirtApic = pV mcs->u64AddrVirtApic.u;3865 uint64_t uReg = 0; 3866 RTGCPHYS const GCPhysVirtApic = pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64AddrVirtApic.u; 3923 3867 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &uReg, GCPhysVirtApic + offReg, sizeof(uReg)); 3924 if (RT_SUCCESS(rc)) 3925 { /* likely */ } 3926 else 3927 { 3928 AssertMsgFailed(("Failed to read %u bytes at offset %#x of the virtual-APIC page at %#RGp\n", sizeof(uReg), offReg, 3929 GCPhysVirtApic)); 3930 uReg = 0; 3931 } 3868 AssertMsgStmt(RT_SUCCESS(rc), 3869 ("Failed to read %u bytes at offset %#x of the virtual-APIC page at %#RGp: %Rrc\n", 3870 sizeof(uReg), offReg, GCPhysVirtApic, rc), 3871 uReg = 0); 3932 3872 return uReg; 3933 3873 } … … 3943 3883 IEM_STATIC void iemVmxVirtApicWriteRaw32(PVMCPUCC pVCpu, uint16_t offReg, uint32_t uReg) 3944 3884 { 3945 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);3946 Assert(pVmcs);3947 3885 Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uint32_t)); 3948 3886 3949 RTGCPHYS const GCPhysVirtApic = pV mcs->u64AddrVirtApic.u;3887 RTGCPHYS const GCPhysVirtApic = pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64AddrVirtApic.u; 3950 3888 int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVirtApic + offReg, &uReg, sizeof(uReg)); 3951 if (RT_SUCCESS(rc)) 3952 { /* likely */ } 3953 else 3954 { 3955 AssertMsgFailed(("Failed to write %u bytes at offset %#x of the virtual-APIC page at %#RGp\n", sizeof(uReg), offReg, 3956 GCPhysVirtApic)); 3957 } 3889 AssertMsgRC(rc, ("Failed to write %u bytes at offset %#x of the virtual-APIC page at %#RGp: %Rrc\n", 3890 sizeof(uReg), offReg, GCPhysVirtApic, rc)); 3958 3891 } 3959 3892 … … 3968 3901 IEM_STATIC void iemVmxVirtApicWriteRaw64(PVMCPUCC pVCpu, uint16_t offReg, uint64_t uReg) 3969 3902 { 3970 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);3971 Assert(pVmcs);3972 3903 Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uint64_t)); 3973 3904 3974 RTGCPHYS const GCPhysVirtApic = pV mcs->u64AddrVirtApic.u;3905 RTGCPHYS const GCPhysVirtApic = pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64AddrVirtApic.u; 3975 3906 int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVirtApic + offReg, &uReg, sizeof(uReg)); 3976 if (RT_SUCCESS(rc)) 3977 { /* likely */ } 3978 else 3979 { 3980 AssertMsgFailed(("Failed to write %u bytes at offset %#x of the virtual-APIC page at %#RGp\n", sizeof(uReg), offReg, 3981 GCPhysVirtApic)); 3982 } 3907 AssertMsgRC(rc, ("Failed to write %u bytes at offset %#x of the virtual-APIC page at %#RGp: %Rrc\n", 3908 sizeof(uReg), offReg, GCPhysVirtApic, rc)); 3983 3909 } 3984 3910 … … 3995 3921 IEM_STATIC void iemVmxVirtApicSetVectorInReg(PVMCPUCC pVCpu, uint16_t offReg, uint8_t uVector) 3996 3922 { 3997 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);3998 Assert(pVmcs);3999 4000 3923 /* Determine the vector offset within the chunk. */ 4001 3924 uint16_t const offVector = (uVector & UINT32_C(0xe0)) >> 1; … … 4003 3926 /* Read the chunk at the offset. */ 4004 3927 uint32_t uReg; 4005 RTGCPHYS const GCPhysVirtApic = pV mcs->u64AddrVirtApic.u;3928 RTGCPHYS const GCPhysVirtApic = pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64AddrVirtApic.u; 4006 3929 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &uReg, GCPhysVirtApic + offReg + offVector, sizeof(uReg)); 4007 3930 if (RT_SUCCESS(rc)) … … 4013 3936 /* Write the chunk. */ 4014 3937 rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVirtApic + offReg + offVector, &uReg, sizeof(uReg)); 4015 if (RT_SUCCESS(rc)) 4016 { /* likely */ } 4017 else 4018 { 4019 AssertMsgFailed(("Failed to set vector %#x in 256-bit register at %#x of the virtual-APIC page at %#RGp\n", 4020 uVector, offReg, GCPhysVirtApic)); 4021 } 3938 AssertMsgRC(rc, ("Failed to set vector %#x in 256-bit register at %#x of the virtual-APIC page at %#RGp: %Rrc\n", 3939 uVector, offReg, GCPhysVirtApic, rc)); 4022 3940 } 4023 3941 else 4024 { 4025 AssertMsgFailed(("Failed to get vector %#x in 256-bit register at %#x of the virtual-APIC page at %#RGp\n", 4026 uVector, offReg, GCPhysVirtApic)); 4027 } 3942 AssertMsgFailed(("Failed to get vector %#x in 256-bit register at %#x of the virtual-APIC page at %#RGp: %Rrc\n", 3943 uVector, offReg, GCPhysVirtApic, rc)); 4028 3944 } 4029 3945 … … 4040 3956 IEM_STATIC void iemVmxVirtApicClearVectorInReg(PVMCPUCC pVCpu, uint16_t offReg, uint8_t uVector) 4041 3957 { 4042 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);4043 Assert(pVmcs);4044 4045 3958 /* Determine the vector offset within the chunk. */ 4046 3959 uint16_t const offVector = (uVector & UINT32_C(0xe0)) >> 1; … … 4048 3961 /* Read the chunk at the offset. */ 4049 3962 uint32_t uReg; 4050 RTGCPHYS const GCPhysVirtApic = pV mcs->u64AddrVirtApic.u;3963 RTGCPHYS const GCPhysVirtApic = pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64AddrVirtApic.u; 4051 3964 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &uReg, GCPhysVirtApic + offReg + offVector, sizeof(uReg)); 4052 3965 if (RT_SUCCESS(rc)) … … 4058 3971 /* Write the chunk. */ 4059 3972 rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVirtApic + offReg + offVector, &uReg, sizeof(uReg)); 4060 if (RT_SUCCESS(rc)) 4061 { /* likely */ } 4062 else 4063 { 4064 AssertMsgFailed(("Failed to clear vector %#x in 256-bit register at %#x of the virtual-APIC page at %#RGp\n", 4065 uVector, offReg, GCPhysVirtApic)); 4066 } 3973 AssertMsgRC(rc, ("Failed to clear vector %#x in 256-bit register at %#x of the virtual-APIC page at %#RGp: %Rrc\n", 3974 uVector, offReg, GCPhysVirtApic, rc)); 4067 3975 } 4068 3976 else 4069 { 4070 AssertMsgFailed(("Failed to get vector %#x in 256-bit register at %#x of the virtual-APIC page at %#RGp\n", 4071 uVector, offReg, GCPhysVirtApic)); 4072 } 3977 AssertMsgFailed(("Failed to get vector %#x in 256-bit register at %#x of the virtual-APIC page at %#RGp: %Rrc\n", 3978 uVector, offReg, GCPhysVirtApic, rc)); 4073 3979 } 4074 3980 … … 4089 3995 IEM_STATIC bool iemVmxVirtApicIsMemAccessIntercepted(PVMCPUCC pVCpu, uint16_t offAccess, size_t cbAccess, uint32_t fAccess) 4090 3996 { 4091 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 4092 Assert(pVmcs); 3997 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs; 4093 3998 Assert(fAccess == IEM_ACCESS_TYPE_READ || fAccess == IEM_ACCESS_TYPE_WRITE); 4094 3999 … … 4256 4161 IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessUnused(PVMCPUCC pVCpu, PRTGCPHYS pGCPhysAccess) 4257 4162 { 4258 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 4259 Assert(pVmcs); 4260 Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS); 4163 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS); 4261 4164 Assert(pGCPhysAccess); 4262 4165 4263 4166 RTGCPHYS const GCPhysAccess = *pGCPhysAccess & ~(RTGCPHYS)PAGE_OFFSET_MASK; 4264 RTGCPHYS const GCPhysApic = pV mcs->u64AddrApicAccess.u;4167 RTGCPHYS const GCPhysApic = pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64AddrApicAccess.u; 4265 4168 Assert(!(GCPhysApic & PAGE_OFFSET_MASK)); 4266 4169 … … 4301 4204 uint32_t fAccess) 4302 4205 { 4303 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 4304 Assert(pVmcs); 4305 Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS); NOREF(pVmcs); 4206 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS); 4306 4207 Assert(pvData); 4307 4208 Assert( (fAccess & IEM_ACCESS_TYPE_READ) … … 4377 4278 IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMsrRead(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t *pu64Value) 4378 4279 { 4379 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 4380 Assert(pVmcs); 4381 Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_X2APIC_MODE); 4280 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_X2APIC_MODE); 4382 4281 Assert(pu64Value); 4383 4282 4384 if (pV mcs->u32ProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT)4283 if (pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT) 4385 4284 { 4386 4285 if ( idMsr >= MSR_IA32_X2APIC_START … … 4422 4321 IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMsrWrite(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t u64Value) 4423 4322 { 4424 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);4425 Assert(pVmcs);4426 4427 4323 /* 4428 4324 * Check if the access is to be virtualized. … … 4430 4326 */ 4431 4327 if ( idMsr == MSR_IA32_X2APIC_TPR 4432 || ( (pV mcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)4328 || ( (pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY) 4433 4329 && ( idMsr == MSR_IA32_X2APIC_EOI 4434 4330 || idMsr == MSR_IA32_X2APIC_SELF_IPI))) … … 4490 4386 Assert(offReg < XAPIC_OFF_END + 4); 4491 4387 Assert(pidxHighestBit); 4492 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs));4493 4388 4494 4389 /* … … 4522 4417 IEM_STATIC void iemVmxEvalPendingVirtIntrs(PVMCPUCC pVCpu) 4523 4418 { 4524 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 4525 Assert(pVmcs); 4526 Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY); 4527 4528 if (!(pVmcs->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT)) 4529 { 4530 uint8_t const uRvi = RT_LO_U8(pVmcs->u16GuestIntStatus); 4419 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY); 4420 4421 if (!(pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT)) 4422 { 4423 uint8_t const uRvi = RT_LO_U8(pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u16GuestIntStatus); 4531 4424 uint8_t const uPpr = iemVmxVirtApicReadRaw32(pVCpu, XAPIC_OFF_PPR); 4532 4425 … … 4550 4443 IEM_STATIC void iemVmxPprVirtualization(PVMCPUCC pVCpu) 4551 4444 { 4552 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 4553 Assert(pVmcs); 4554 Assert(pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW); 4555 Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY); 4445 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW); 4446 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY); 4556 4447 4557 4448 /* … … 4562 4453 */ 4563 4454 uint32_t const uTpr = iemVmxVirtApicReadRaw32(pVCpu, XAPIC_OFF_TPR); 4564 uint32_t const uSvi = RT_HI_U8(pV mcs->u16GuestIntStatus);4455 uint32_t const uSvi = RT_HI_U8(pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u16GuestIntStatus); 4565 4456 4566 4457 uint32_t uPpr; … … 4583 4474 IEM_STATIC VBOXSTRICTRC iemVmxTprVirtualization(PVMCPUCC pVCpu) 4584 4475 { 4585 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 4586 Assert(pVmcs); 4587 Assert(pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW); 4476 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW); 4588 4477 4589 4478 /* … … 4593 4482 * See Intel spec. 29.1.2 "TPR Virtualization". 4594 4483 */ 4595 if (!(pV mcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY))4596 { 4597 uint32_t const uTprThreshold = pV mcs->u32TprThreshold;4484 if (!(pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)) 4485 { 4486 uint32_t const uTprThreshold = pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32TprThreshold; 4598 4487 uint32_t const uTpr = iemVmxVirtApicReadRaw32(pVCpu, XAPIC_OFF_TPR); 4599 4488 … … 4628 4517 IEM_STATIC bool iemVmxIsEoiInterceptSet(PCVMCPU pVCpu, uint8_t uVector) 4629 4518 { 4630 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 4631 Assert(pVmcs); 4519 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs; 4632 4520 Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY); 4633 4521 … … 4650 4538 IEM_STATIC VBOXSTRICTRC iemVmxEoiVirtualization(PVMCPUCC pVCpu) 4651 4539 { 4652 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 4653 Assert(pVmcs); 4540 PVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs; 4654 4541 Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY); 4655 4542 … … 4694 4581 IEM_STATIC VBOXSTRICTRC iemVmxSelfIpiVirtualization(PVMCPUCC pVCpu) 4695 4582 { 4696 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 4697 Assert(pVmcs); 4583 PVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs; 4698 4584 Assert(pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW); 4699 4585 … … 4724 4610 IEM_STATIC VBOXSTRICTRC iemVmxApicWriteEmulation(PVMCPUCC pVCpu) 4725 4611 { 4726 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 4727 Assert(pVmcs); 4612 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs; 4728 4613 4729 4614 /* Import the virtual-APIC write offset (part of the hardware-virtualization state). */ … … 4819 4704 * See Intel spec. 26.3.1.1 "Checks on Guest Control Registers, Debug Registers, and MSRs". 4820 4705 */ 4821 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);4822 const char * const pszFailure= "VM-exit";4823 bool const fUnrestrictedGuest = RT_BOOL(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST);4706 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs; 4707 const char * const pszFailure = "VM-exit"; 4708 bool const fUnrestrictedGuest = RT_BOOL(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST); 4824 4709 4825 4710 /* CR0 reserved bits. */ … … 4966 4851 * See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". 4967 4852 */ 4968 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);4969 const char * const pszFailure= "VM-exit";4970 bool const fGstInV86Mode = RT_BOOL(pVmcs->u64GuestRFlags.u & X86_EFL_VM);4971 bool const fUnrestrictedGuest = RT_BOOL(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST);4972 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);4853 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs; 4854 const char * const pszFailure = "VM-exit"; 4855 bool const fGstInV86Mode = RT_BOOL(pVmcs->u64GuestRFlags.u & X86_EFL_VM); 4856 bool const fUnrestrictedGuest = RT_BOOL(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST); 4857 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST); 4973 4858 4974 4859 /* Selectors. */ … … 5354 5239 * See Intel spec. 26.3.1.3 "Checks on Guest Descriptor-Table Registers". 5355 5240 */ 5356 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);5241 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs; 5357 5242 const char *const pszFailure = "VM-exit"; 5358 5243 … … 5400 5285 * See Intel spec. 26.3.1.4 "Checks on Guest RIP and RFLAGS". 5401 5286 */ 5402 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);5287 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs; 5403 5288 const char *const pszFailure = "VM-exit"; 5404 5289 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST); … … 5474 5359 * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State". 5475 5360 */ 5476 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);5361 PVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs; 5477 5362 const char *const pszFailure = "VM-exit"; 5478 5363 … … 5732 5617 * See Intel spec. 26.3.1.5 "Checks on Guest Page-Directory-Pointer-Table Entries". 5733 5618 */ 5734 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);5619 PVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs; 5735 5620 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST); 5736 5621 … … 5800 5685 * See Intel spec. 26.2.2 "Checks on Host Control Registers and MSRs". 5801 5686 */ 5802 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);5687 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs; 5803 5688 const char * const pszFailure = "VMFail"; 5804 5689 … … 6002 5887 IEM_STATIC int iemVmxVmentryCheckCtls(PVMCPUCC pVCpu, const char *pszInstr) 6003 5888 { 6004 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);5889 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs; 6005 5890 const char * const pszFailure = "VMFail"; 6006 5891 … … 6387 6272 * See Intel spec. 26.3.2.1 "Loading Guest Control Registers, Debug Registers and MSRs". 6388 6273 */ 6389 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);6274 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs; 6390 6275 6391 6276 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0); … … 6464 6349 */ 6465 6350 /* CS, SS, ES, DS, FS, GS. */ 6466 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);6351 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs; 6467 6352 for (unsigned iSegReg = 0; iSegReg < X86_SREG_COUNT; iSegReg++) 6468 6353 { … … 6566 6451 * See Intel spec. 26.4 "Loading MSRs". 6567 6452 */ 6568 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);6453 PVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs; 6569 6454 const char *const pszFailure = "VM-exit"; 6570 6455 … … 6659 6544 * See Intel spec. 26.6 "Special Features of VM Entry" 6660 6545 */ 6661 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);6546 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs; 6662 6547 6663 6548 /* … … 6713 6598 { 6714 6599 const char *const pszFailure = "VM-exit"; 6715 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);6600 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs; 6716 6601 6717 6602 /* … … 6912 6797 * See Intel spec. 26.3.2.3 "Loading Guest RIP, RSP and RFLAGS". 6913 6798 */ 6914 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);6799 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs; 6915 6800 pVCpu->cpum.GstCtx.rsp = pVmcs->u64GuestRsp.u; 6916 6801 pVCpu->cpum.GstCtx.rip = pVmcs->u64GuestRip.u; … … 6948 6833 * See Intel spec. 26.6.3 "Delivery of Pending Debug Exceptions after VM Entry". 6949 6834 */ 6950 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);6835 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs; 6951 6836 Assert(pVmcs); 6952 6837 … … 7018 6903 IEM_STATIC void iemVmxVmentrySetupMtf(PVMCPUCC pVCpu, const char *pszInstr) 7019 6904 { 7020 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);6905 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs; 7021 6906 Assert(pVmcs); 7022 6907 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_MONITOR_TRAP_FLAG) … … 7039 6924 IEM_STATIC void iemVmxVmentrySetupNmiWindow(PVMCPUCC pVCpu, const char *pszInstr) 7040 6925 { 7041 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);6926 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs; 7042 6927 Assert(pVmcs); 7043 6928 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT) … … 7061 6946 IEM_STATIC void iemVmxVmentrySetupIntWindow(PVMCPUCC pVCpu, const char *pszInstr) 7062 6947 { 7063 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);6948 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs; 7064 6949 Assert(pVmcs); 7065 6950 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT) … … 7082 6967 IEM_STATIC void iemVmxVmentrySetupPreemptTimer(PVMCPUCC pVCpu, const char *pszInstr) 7083 6968 { 7084 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);6969 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs; 7085 6970 Assert(pVmcs); 7086 6971 if (pVmcs->u32PinCtls & VMX_PIN_CTLS_PREEMPT_TIMER) … … 7184 7069 IEM_STATIC void iemVmxVmentryInjectEvent(PVMCPUCC pVCpu, const char *pszInstr) 7185 7070 { 7186 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);7071 PVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs; 7187 7072 7188 7073 /* … … 7195 7080 * See Intel spec. 26.5 "Event Injection". 7196 7081 */ 7197 uint32_t const uEntryIntInfo = pV Cpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->u32EntryIntInfo;7082 uint32_t const uEntryIntInfo = pVmcs->u32EntryIntInfo; 7198 7083 bool const fEntryIntInfoValid = VMX_ENTRY_INT_INFO_IS_VALID(uEntryIntInfo); 7199 7084 … … 7260 7145 * the same reasons. 7261 7146 */ 7262 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);7147 PVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs; 7263 7148 Assert(pVmcs); 7264 7149 … … 7346 7231 7347 7232 /* Current VMCS is not a shadow VMCS. */ 7348 if (!pVCpu->cpum.GstCtx.hwvirt.vmx. CTX_SUFF(pVmcs)->u32VmcsRevId.n.fIsShadowVmcs)7233 if (!pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32VmcsRevId.n.fIsShadowVmcs) 7349 7234 { /* likely */ } 7350 7235 else … … 7374 7259 { 7375 7260 /* VMLAUNCH with non-clear VMCS. */ 7376 if (pVCpu->cpum.GstCtx.hwvirt.vmx. CTX_SUFF(pVmcs)->fVmcsState == VMX_V_VMCS_LAUNCH_STATE_CLEAR)7261 if (pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.fVmcsState == VMX_V_VMCS_LAUNCH_STATE_CLEAR) 7377 7262 { /* likely */ } 7378 7263 else … … 7388 7273 { 7389 7274 /* VMRESUME with non-launched VMCS. */ 7390 if (pVCpu->cpum.GstCtx.hwvirt.vmx. CTX_SUFF(pVmcs)->fVmcsState == VMX_V_VMCS_LAUNCH_STATE_LAUNCHED)7275 if (pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.fVmcsState == VMX_V_VMCS_LAUNCH_STATE_LAUNCHED) 7391 7276 { /* likely */ } 7392 7277 else … … 7410 7295 * See Intel spec. 24.11.4 "Software Access to Related Structures". 7411 7296 */ 7412 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);7297 PVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs; 7413 7298 Assert(pVmcs); 7414 7299 Assert(IEM_VMX_HAS_CURRENT_VMCS(pVCpu)); … … 7564 7449 7565 7450 /* Consult the MSR bitmap if the feature is supported. */ 7566 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);7451 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs; 7567 7452 Assert(pVmcs); 7568 7453 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS) … … 7706 7591 */ 7707 7592 PCVMXVVMCS pVmcs = !IEM_VMX_IS_NON_ROOT_MODE(pVCpu) 7708 ? pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)7593 ? &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs 7709 7594 : pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs); 7710 7595 Assert(pVmcs); … … 7976 7861 bool const fInVmxNonRootMode = IEM_VMX_IS_NON_ROOT_MODE(pVCpu); 7977 7862 PVMXVVMCS pVmcs = !fInVmxNonRootMode 7978 ? pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)7863 ? &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs 7979 7864 : pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs); 7980 7865 Assert(pVmcs); … … 8104 7989 && IEM_VMX_GET_CURRENT_VMCS(pVCpu) == GCPhysVmcs) 8105 7990 { 8106 pVCpu->cpum.GstCtx.hwvirt.vmx. CTX_SUFF(pVmcs)->fVmcsState = fVmcsLaunchStateClear;7991 pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.fVmcsState = fVmcsLaunchStateClear; 8107 7992 iemVmxWriteCurrentVmcsToGstMem(pVCpu); 8108 7993 IEM_VMX_CLEAR_CURRENT_VMCS(pVCpu); -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r91271 r91297 1186 1186 { /* likely */ } 1187 1187 else 1188 { 1189 PCVMXVVMCS pVmcsNstGst = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 1190 uXcptMask &= ~pVmcsNstGst->u32XcptBitmap; 1191 } 1188 uXcptMask &= ~pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32XcptBitmap; 1192 1189 #endif 1193 1190 #ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS … … 5444 5441 static int hmR0VmxCopyNstGstToShadowVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo) 5445 5442 { 5446 PVMCC pVM= pVCpu->CTX_SUFF(pVM);5447 PCVMXVVMCS pVmcsNstGst = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);5443 PVMCC const pVM = pVCpu->CTX_SUFF(pVM); 5444 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs; 5448 5445 5449 5446 /* … … 5514 5511 { 5515 5512 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 5516 PVMCC pVM= pVCpu->CTX_SUFF(pVM);5517 PVMXVVMCS pVmcsNstGst = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);5513 PVMCC const pVM = pVCpu->CTX_SUFF(pVM); 5514 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs; 5518 5515 5519 5516 int rc = hmR0VmxLoadShadowVmcs(pVmcsInfo); … … 7903 7900 * re-construct CR0. See @bugref{9180#c95} for details. 7904 7901 */ 7905 PCVMXVMCSINFO pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;7906 P CVMXVVMCS pVmcsNstGst = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);7902 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo; 7903 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs; 7907 7904 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask) 7908 7905 | (pVmcsNstGst->u64GuestCr0.u & pVmcsNstGst->u64Cr0Mask.u) … … 7937 7934 * re-construct CR4. See @bugref{9180#c95} for details. 7938 7935 */ 7939 PCVMXVMCSINFO pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;7940 P CVMXVVMCS pVmcsNstGst = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);7936 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo; 7937 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs; 7941 7938 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask) 7942 7939 | (pVmcsNstGst->u64GuestCr4.u & pVmcsNstGst->u64Cr4Mask.u) … … 10422 10419 * up the nested-guest VMCS is not sufficient. 10423 10420 */ 10424 PCVMXVVMCS pVmcsNstGst = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);10421 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs; 10425 10422 if (pVmcsNstGst->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS) 10426 10423 { … … 10456 10453 static int hmR0VmxMergeVmcsNested(PVMCPUCC pVCpu) 10457 10454 { 10458 PVMCC pVM = pVCpu->CTX_SUFF(pVM); 10459 PCVMXVMCSINFO pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo; 10460 PCVMXVVMCS pVmcsNstGst = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 10461 Assert(pVmcsNstGst); 10455 PVMCC const pVM = pVCpu->CTX_SUFF(pVM); 10456 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo; 10457 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs; 10462 10458 10463 10459 /* … … 17175 17171 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS: 17176 17172 { 17177 PCVMXVVMCS pVmcsNstGst = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs); 17178 Assert(pVmcsNstGst); 17179 uint64_t const uGstHostMask = pVmcsNstGst->u64Cr0Mask.u; 17180 uint64_t const uReadShadow = pVmcsNstGst->u64Cr0ReadShadow.u; 17173 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs; 17174 uint64_t const uGstHostMask = pVmcsNstGst->u64Cr0Mask.u; 17175 uint64_t const uReadShadow = pVmcsNstGst->u64Cr0ReadShadow.u; 17181 17176 if ( (uGstHostMask & X86_CR0_TS) 17182 17177 && (uReadShadow & X86_CR0_TS)) -
trunk/src/VBox/VMM/VMMR3/CPUM.cpp
r91295 r91297 1062 1062 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest; 1063 1063 1064 if (pCtx->hwvirt.vmx.pVmcsR3)1065 {1066 SUPR3ContFree(pCtx->hwvirt.vmx.pVmcsR3, VMX_V_VMCS_PAGES);1067 pCtx->hwvirt.vmx.pVmcsR3 = NULL;1068 }1069 1064 if (pCtx->hwvirt.vmx.pShadowVmcsR3) 1070 1065 { … … 1139 1134 pCtx->hwvirt.enmHwvirt = CPUMHWVIRT_VMX; 1140 1135 1141 /* 1142 * Allocate the nested-guest current VMCS. 1143 */ 1144 pCtx->hwvirt.vmx.pVmcsR3 = (PVMXVVMCS)SUPR3ContAlloc(VMX_V_VMCS_PAGES, &pCtx->hwvirt.vmx.pVmcsR0, NULL); 1145 if (pCtx->hwvirt.vmx.pVmcsR3) 1146 { /* likely */ } 1147 else 1148 { 1149 LogRel(("CPUM%u: Failed to alloc %u pages for the nested-guest's VMCS\n", pVCpu->idCpu, VMX_V_VMCS_PAGES)); 1150 break; 1151 } 1136 AssertCompile(sizeof(pCtx->hwvirt.vmx.Vmcs) == VMX_V_VMCS_PAGES * X86_PAGE_SIZE); 1152 1137 1153 1138 /* … … 1278 1263 * Zero out all allocated pages (should compress well for saved-state). 1279 1264 */ 1280 memset(pCtx->hwvirt.vmx.CTX_SUFF(pVmcs), 0, VMX_V_VMCS_SIZE);1265 RT_ZERO(pCtx->hwvirt.vmx.Vmcs); 1281 1266 memset(pCtx->hwvirt.vmx.CTX_SUFF(pShadowVmcs), 0, VMX_V_SHADOW_VMCS_SIZE); 1282 1267 memset(pCtx->hwvirt.vmx.CTX_SUFF(pvVirtApicPage), 0, VMX_V_VIRT_APIC_SIZE); … … 1307 1292 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest; 1308 1293 Assert(pCtx->hwvirt.enmHwvirt == CPUMHWVIRT_VMX); 1309 Assert(pCtx->hwvirt.vmx.CTX_SUFF(pVmcs));1310 1294 Assert(pCtx->hwvirt.vmx.CTX_SUFF(pShadowVmcs)); 1311 1295 1312 memset(pCtx->hwvirt.vmx.CTX_SUFF(pVmcs), 0, VMX_V_VMCS_SIZE);1296 RT_ZERO(pCtx->hwvirt.vmx.Vmcs); 1313 1297 memset(pCtx->hwvirt.vmx.CTX_SUFF(pShadowVmcs), 0, VMX_V_SHADOW_VMCS_SIZE); 1314 1298 pCtx->hwvirt.vmx.GCPhysVmxon = NIL_RTGCPHYS; … … 2586 2570 if (pVM->cpum.s.GuestFeatures.fVmx) 2587 2571 { 2588 Assert(pGstCtx->hwvirt.vmx.CTX_SUFF(pVmcs));2589 2572 SSMR3PutGCPhys(pSSM, pGstCtx->hwvirt.vmx.GCPhysVmxon); 2590 2573 SSMR3PutGCPhys(pSSM, pGstCtx->hwvirt.vmx.GCPhysVmcs); … … 2594 2577 SSMR3PutBool(pSSM, pGstCtx->hwvirt.vmx.fInterceptEvents); 2595 2578 SSMR3PutBool(pSSM, pGstCtx->hwvirt.vmx.fNmiUnblockingIret); 2596 SSMR3PutStructEx(pSSM, pGstCtx->hwvirt.vmx.pVmcsR3, sizeof(VMXVVMCS), 0, g_aVmxHwvirtVmcs, NULL);2579 SSMR3PutStructEx(pSSM, &pGstCtx->hwvirt.vmx.Vmcs, sizeof(pGstCtx->hwvirt.vmx.Vmcs), 0, g_aVmxHwvirtVmcs, NULL); 2597 2580 SSMR3PutStructEx(pSSM, pGstCtx->hwvirt.vmx.pShadowVmcsR3, sizeof(VMXVVMCS), 0, g_aVmxHwvirtVmcs, NULL); 2598 2581 SSMR3PutMem(pSSM, pGstCtx->hwvirt.vmx.pvVmreadBitmapR3, VMX_V_VMREAD_VMWRITE_BITMAP_SIZE); … … 2875 2858 if (pVM->cpum.s.GuestFeatures.fVmx) 2876 2859 { 2877 Assert(pGstCtx->hwvirt.vmx.CTX_SUFF(pVmcs));2878 2860 SSMR3GetGCPhys(pSSM, &pGstCtx->hwvirt.vmx.GCPhysVmxon); 2879 2861 SSMR3GetGCPhys(pSSM, &pGstCtx->hwvirt.vmx.GCPhysVmcs); … … 2883 2865 SSMR3GetBool(pSSM, &pGstCtx->hwvirt.vmx.fInterceptEvents); 2884 2866 SSMR3GetBool(pSSM, &pGstCtx->hwvirt.vmx.fNmiUnblockingIret); 2885 SSMR3GetStructEx(pSSM, pGstCtx->hwvirt.vmx.pVmcsR3, sizeof(VMXVVMCS), 0, g_aVmxHwvirtVmcs, NULL); 2867 SSMR3GetStructEx(pSSM, &pGstCtx->hwvirt.vmx.Vmcs, sizeof(pGstCtx->hwvirt.vmx.Vmcs), 2868 0, g_aVmxHwvirtVmcs, NULL); 2886 2869 SSMR3GetStructEx(pSSM, pGstCtx->hwvirt.vmx.pShadowVmcsR3, sizeof(VMXVVMCS), 0, g_aVmxHwvirtVmcs, NULL); 2887 2870 SSMR3GetMem(pSSM, pGstCtx->hwvirt.vmx.pvVmreadBitmapR3, VMX_V_VMREAD_VMWRITE_BITMAP_SIZE); … … 4104 4087 pHlp->pfnPrintf(pHlp, " fVirtNmiBlocking = %RTbool\n", pCtx->hwvirt.vmx.fVirtNmiBlocking); 4105 4088 pHlp->pfnPrintf(pHlp, " VMCS cache:\n"); 4106 cpumR3InfoVmxVmcs(pVCpu, pHlp, pCtx->hwvirt.vmx.pVmcsR3, " " /* pszPrefix */);4089 cpumR3InfoVmxVmcs(pVCpu, pHlp, &pCtx->hwvirt.vmx.Vmcs, " " /* pszPrefix */); 4107 4090 } 4108 4091 else -
trunk/src/VBox/VMM/include/CPUMInternal.mac
r91291 r91297 243 243 .Guest.hwvirt.svm.fInterceptEvents resb 1 244 244 245 .Guest.hwvirt.vmx.Vmcs EQU .Guest.hwvirt.svm.Vmcb 246 245 247 ;.unnamed_padding.1 resb 0 246 248 alignb 8 -
trunk/src/VBox/VMM/testcase/tstVMStruct.h
r91291 r91297 148 148 GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.fInterceptEvents); 149 149 GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.fNmiUnblockingIret); 150 GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pVmcsR0);151 GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pVmcsR3);152 150 GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pShadowVmcsR0); 153 151 GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pShadowVmcsR3); -
trunk/src/VBox/VMM/testcase/tstVMStructSize.cpp
r91291 r91297 252 252 CHECK_MEMBER_ALIGNMENT(VMCPU, cpum.s.Guest.hwvirt.svm.abMsrBitmap, 4096); 253 253 CHECK_MEMBER_ALIGNMENT(VMCPU, cpum.s.Guest.hwvirt.svm.abIoBitmap, 4096); 254 CHECK_MEMBER_ALIGNMENT(VMCPU, cpum.s.Guest.hwvirt.vmx.Vmcs, 4096); 254 255 255 256 PVM pVM = NULL; NOREF(pVM);
Note:
See TracChangeset
for help on using the changeset viewer.