VirtualBox

Changeset 91297 in vbox


Ignore:
Timestamp:
Sep 17, 2021 11:51:23 AM (3 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
146957
Message:

VMM/CPUM,++: Moved the nested VT-X VMCS allocation into CPUMCTX. bugref:10093

Location:
trunk
Files:
11 edited

Legend:

Unmodified
Added
Removed
  • trunk/include/VBox/vmm/cpum.h

    r91287 r91297  
    20832083{
    20842084    Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
    2085     PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
    2086     Assert(pVmcs);
    2087     return RT_BOOL(pVmcs->u32PinCtls & uPinCtls);
     2085    return RT_BOOL(pCtx->hwvirt.vmx.Vmcs.u32PinCtls & uPinCtls);
    20882086}
    20892087
     
    21022100{
    21032101    Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
    2104     PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
    2105     Assert(pVmcs);
    2106     return RT_BOOL(pVmcs->u32ProcCtls & uProcCtls);
     2102    return RT_BOOL(pCtx->hwvirt.vmx.Vmcs.u32ProcCtls & uProcCtls);
    21072103}
    21082104
     
    21222118{
    21232119    Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
    2124     PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
    2125     Assert(pVmcs);
    2126     return RT_BOOL(pVmcs->u32ProcCtls2 & uProcCtls2);
     2120    return RT_BOOL(pCtx->hwvirt.vmx.Vmcs.u32ProcCtls2 & uProcCtls2);
    21272121}
    21282122
     
    21422136{
    21432137    Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
    2144     PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
    2145     Assert(pVmcs);
    2146     return RT_BOOL(pVmcs->u64ProcCtls3.u & uProcCtls3);
     2138    return RT_BOOL(pCtx->hwvirt.vmx.Vmcs.u64ProcCtls3.u & uProcCtls3);
    21472139}
    21482140
     
    21612153{
    21622154    Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
    2163     PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
    2164     Assert(pVmcs);
    2165     return RT_BOOL(pVmcs->u32ExitCtls & uExitCtls);
     2155    return RT_BOOL(pCtx->hwvirt.vmx.Vmcs.u32ExitCtls & uExitCtls);
    21662156}
    21672157
     
    21802170{
    21812171    Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
    2182     PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
    2183     Assert(pVmcs);
    2184     return RT_BOOL(pVmcs->u32EntryCtls & uEntryCtls);
     2172    return RT_BOOL(pCtx->hwvirt.vmx.Vmcs.u32EntryCtls & uEntryCtls);
    21852173}
    21862174
     
    22292217
    22302218    Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
    2231     PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
    2232     Assert(pVmcs);
    22332219
    22342220    /* NMIs have a dedicated VM-execution control for causing VM-exits. */
    22352221    if (uVector == X86_XCPT_NMI)
    2236         return RT_BOOL(pVmcs->u32PinCtls & VMX_PIN_CTLS_NMI_EXIT);
     2222        return RT_BOOL(pCtx->hwvirt.vmx.Vmcs.u32PinCtls & VMX_PIN_CTLS_NMI_EXIT);
    22372223
    22382224    /* Page-faults are subject to masking using its error code. */
    2239     uint32_t fXcptBitmap = pVmcs->u32XcptBitmap;
     2225    uint32_t fXcptBitmap = pCtx->hwvirt.vmx.Vmcs.u32XcptBitmap;
    22402226    if (uVector == X86_XCPT_PF)
    22412227    {
    2242         uint32_t const fXcptPFMask  = pVmcs->u32XcptPFMask;
    2243         uint32_t const fXcptPFMatch = pVmcs->u32XcptPFMatch;
     2228        uint32_t const fXcptPFMask  = pCtx->hwvirt.vmx.Vmcs.u32XcptPFMask;
     2229        uint32_t const fXcptPFMatch = pCtx->hwvirt.vmx.Vmcs.u32XcptPFMatch;
    22442230        if ((uErrCode & fXcptPFMask) != fXcptPFMatch)
    22452231            fXcptBitmap ^= RT_BIT(X86_XCPT_PF);
     
    22832269    pCtx->eflags.u32 &= ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
    22842270    pCtx->eflags.u32 |= X86_EFL_ZF;
    2285     pCtx->hwvirt.vmx.CTX_SUFF(pVmcs)->u32RoVmInstrError = enmInsErr;
     2271    pCtx->hwvirt.vmx.Vmcs.u32RoVmInstrError = enmInsErr;
    22862272}
    22872273
     
    23102296{
    23112297    Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
    2312     PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
    2313     Assert(pVmcs);
    2314     return pVmcs->u64AddrApicAccess.u;
     2298    return pCtx->hwvirt.vmx.Vmcs.u64AddrApicAccess.u;
    23152299}
    23162300
     
    23322316     */
    23332317    Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
    2334     PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
    2335     Assert(pVmcs);
    23362318    uint64_t const uGstCr0      = pCtx->cr0;
    2337     uint64_t const fReadShadow  = pVmcs->u64Cr0ReadShadow.u;
     2319    uint64_t const fReadShadow  = pCtx->hwvirt.vmx.Vmcs.u64Cr0ReadShadow.u;
    23382320    return (fReadShadow & fGstHostMask) | (uGstCr0 & ~fGstHostMask);
    23392321}
     
    23562338     */
    23572339    Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
    2358     PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
    2359     Assert(pVmcs);
    23602340    uint64_t const uGstCr4      = pCtx->cr4;
    2361     uint64_t const fReadShadow  = pVmcs->u64Cr4ReadShadow.u;
     2341    uint64_t const fReadShadow  = pCtx->hwvirt.vmx.Vmcs.u64Cr4ReadShadow.u;
    23622342    return (fReadShadow & fGstHostMask) | (uGstCr4 & ~fGstHostMask);
    23632343}
     
    23792359     */
    23802360    Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
    2381     PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
    2382     Assert(pVmcs);
    2383 
    2384     uint32_t const fGstHostMask = (uint32_t)pVmcs->u64Cr0Mask.u;
    2385     uint32_t const fReadShadow  = (uint32_t)pVmcs->u64Cr0ReadShadow.u;
     2361
     2362    uint32_t const fGstHostMask = (uint32_t)pCtx->hwvirt.vmx.Vmcs.u64Cr0Mask.u;
     2363    uint32_t const fReadShadow  = (uint32_t)pCtx->hwvirt.vmx.Vmcs.u64Cr0ReadShadow.u;
    23862364
    23872365    /*
     
    24272405     */
    24282406    Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
    2429     PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
    2430     Assert(pVmcs);
    24312407    Assert(iCrReg == 0 || iCrReg == 4);
    24322408
     
    24352411    if (iCrReg == 0)
    24362412    {
    2437         fGstHostMask = pVmcs->u64Cr0Mask.u;
    2438         fReadShadow  = pVmcs->u64Cr0ReadShadow.u;
     2413        fGstHostMask = pCtx->hwvirt.vmx.Vmcs.u64Cr0Mask.u;
     2414        fReadShadow  = pCtx->hwvirt.vmx.Vmcs.u64Cr0ReadShadow.u;
    24392415    }
    24402416    else
    24412417    {
    2442         fGstHostMask = pVmcs->u64Cr4Mask.u;
    2443         fReadShadow  = pVmcs->u64Cr4ReadShadow.u;
     2418        fGstHostMask = pCtx->hwvirt.vmx.Vmcs.u64Cr4Mask.u;
     2419        fReadShadow  = pCtx->hwvirt.vmx.Vmcs.u64Cr4ReadShadow.u;
    24442420    }
    24452421
  • trunk/include/VBox/vmm/cpumctx.h

    r91292 r91297  
    539539            struct
    540540            {
     541                /** 0x4000 - The current VMCS. */
     542                VMXVVMCS                Vmcs;
     543
    541544                /** 0x300 - Guest physical address of the VMXON region. */
    542545                RTGCPHYS                GCPhysVmxon;
     
    563566                 *  mode before execution of IRET. */
    564567                bool                    fNmiUnblockingIret;
    565                 /** 0x330 - The current VMCS - R0 ptr. */
    566                 R0PTRTYPE(PVMXVVMCS)    pVmcsR0;
    567                 /** 0x338 - The curent VMCS - R3 ptr. */
    568                 R3PTRTYPE(PVMXVVMCS)    pVmcsR3;
    569568                /** 0X340 - The shadow VMCS - R0 ptr. */
    570569                R0PTRTYPE(PVMXVVMCS)    pShadowVmcsR0;
     
    846845AssertCompileMemberAlignment(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.abMsrBitmap,           X86_PAGE_SIZE);
    847846AssertCompileMemberAlignment(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.abIoBitmap,            X86_PAGE_SIZE);
    848 AssertCompileMemberAlignment(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pVmcsR0,               8);
     847AssertCompileMemberAlignment(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.Vmcs,                  X86_PAGE_SIZE);
    849848AssertCompileMemberAlignment(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pShadowVmcsR0,         8);
    850849AssertCompileMemberAlignment(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) vmx.pvVmreadBitmapR0,      8);
  • trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp

    r91287 r91297  
    22002200    if (CPUMIsGuestInVmxNonRootMode(pCtx))
    22012201    {
    2202         PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
    2203         Assert(pVmcs);
    22042202        if (CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_USE_TSC_OFFSETTING))
    2205             return uTscValue + pVmcs->u64TscOffset.u;
     2203            return uTscValue + pCtx->hwvirt.vmx.Vmcs.u64TscOffset.u;
    22062204        return uTscValue;
    22072205    }
     
    22342232    {
    22352233        if (CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_USE_TSC_OFFSETTING))
    2236         {
    2237             PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
    2238             Assert(pVmcs);
    2239             return uTscValue - pVmcs->u64TscOffset.u;
    2240         }
     2234            return uTscValue - pCtx->hwvirt.vmx.Vmcs.u64TscOffset.u;
    22412235        return uTscValue;
    22422236    }
     
    28332827     * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
    28342828     */
    2835     PCCPUMCTX  pCtx  = &pVCpu->cpum.s.Guest;
    2836     PCVMXVVMCS pVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
     2829    PCCPUMCTX const pCtx = &pVCpu->cpum.s.Guest;
    28372830    if (CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_CR3_LOAD_EXIT))
    28382831    {
    2839         uint32_t const uCr3TargetCount = pVmcs->u32Cr3TargetCount;
     2832        uint32_t const uCr3TargetCount = pCtx->hwvirt.vmx.Vmcs.u32Cr3TargetCount;
    28402833        Assert(uCr3TargetCount <= VMX_V_CR3_TARGET_COUNT);
    28412834
     
    28462839        /* If the CR3 being written doesn't match any of the target values, cause a VM-exit. */
    28472840        AssertCompile(VMX_V_CR3_TARGET_COUNT == 4);
    2848         if (   uNewCr3 != pVmcs->u64Cr3Target0.u
    2849             && uNewCr3 != pVmcs->u64Cr3Target1.u
    2850             && uNewCr3 != pVmcs->u64Cr3Target2.u
    2851             && uNewCr3 != pVmcs->u64Cr3Target3.u)
     2841        if (   uNewCr3 != pCtx->hwvirt.vmx.Vmcs.u64Cr3Target0.u
     2842            && uNewCr3 != pCtx->hwvirt.vmx.Vmcs.u64Cr3Target1.u
     2843            && uNewCr3 != pCtx->hwvirt.vmx.Vmcs.u64Cr3Target2.u
     2844            && uNewCr3 != pCtx->hwvirt.vmx.Vmcs.u64Cr3Target3.u)
    28522845            return true;
    28532846    }
  • trunk/src/VBox/VMM/VMMAll/HMVMXAll.cpp

    r91044 r91297  
    845845    } while (0)
    846846
    847     PCCPUMCTX  pCtx  = &pVCpu->cpum.GstCtx;
    848     PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     847    PCCPUMCTX  const pCtx  = &pVCpu->cpum.GstCtx;
     848    PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
    849849    if (!pVmcs)
    850850    {
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h

    r91287 r91297  
    55845584    if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
    55855585    {
    5586         PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    5587         Assert(pVmcs);
    55885586        switch (iCrReg)
    55895587        {
    55905588            /* CR0/CR4 reads are subject to masking when in VMX non-root mode. */
    5591             case 0: crX = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVmcs->u64Cr0Mask.u); break;
    5592             case 4: crX = CPUMGetGuestVmxMaskedCr4(&pVCpu->cpum.GstCtx, pVmcs->u64Cr4Mask.u); break;
     5589            case 0: crX = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr0Mask.u); break;
     5590            case 4: crX = CPUMGetGuestVmxMaskedCr4(&pVCpu->cpum.GstCtx, pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr4Mask.u); break;
    55935591
    55945592            case 3:
     
    56295627        u64MaskedCr0 = pVCpu->cpum.GstCtx.cr0;
    56305628    else
    5631     {
    5632         PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    5633         Assert(pVmcs);
    5634         u64MaskedCr0 = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVmcs->u64Cr0Mask.u);
    5635     }
     5629        u64MaskedCr0 = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr0Mask.u);
    56365630    uint64_t const u64GuestCr0 = u64MaskedCr0;
    56375631#else
     
    56825676        u64MaskedCr0 = pVCpu->cpum.GstCtx.cr0;
    56835677    else
    5684     {
    5685         PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    5686         Assert(pVmcs);
    5687         u64MaskedCr0 = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVmcs->u64Cr0Mask.u);
    5688     }
     5678        u64MaskedCr0 = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr0Mask.u);
    56895679    uint64_t const u64GuestCr0 = u64MaskedCr0;
    56905680#else
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h

    r91044 r91297  
    683683DECL_FORCE_INLINE(void) iemVmxVmcsSetExitQual(PVMCPUCC pVCpu, uint64_t u64ExitQual)
    684684{
    685     PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    686     pVmcs->u64RoExitQual.u = u64ExitQual;
     685    pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64RoExitQual.u = u64ExitQual;
    687686}
    688687
     
    696695DECL_FORCE_INLINE(void) iemVmxVmcsSetExitIntInfo(PVMCPUCC pVCpu, uint32_t uExitIntInfo)
    697696{
    698     PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    699     pVmcs->u32RoExitIntInfo = uExitIntInfo;
     697    pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32RoExitIntInfo = uExitIntInfo;
    700698}
    701699
     
    709707DECL_FORCE_INLINE(void) iemVmxVmcsSetExitIntErrCode(PVMCPUCC pVCpu, uint32_t uErrCode)
    710708{
    711     PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    712     pVmcs->u32RoExitIntErrCode = uErrCode;
     709    pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32RoExitIntErrCode = uErrCode;
    713710}
    714711
     
    722719DECL_FORCE_INLINE(void) iemVmxVmcsSetIdtVectoringInfo(PVMCPUCC pVCpu, uint32_t uIdtVectorInfo)
    723720{
    724     PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    725     pVmcs->u32RoIdtVectoringInfo = uIdtVectorInfo;
     721    pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32RoIdtVectoringInfo = uIdtVectorInfo;
    726722}
    727723
     
    735731DECL_FORCE_INLINE(void) iemVmxVmcsSetIdtVectoringErrCode(PVMCPUCC pVCpu, uint32_t uErrCode)
    736732{
    737     PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    738     pVmcs->u32RoIdtVectoringErrCode = uErrCode;
     733    pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32RoIdtVectoringErrCode = uErrCode;
    739734}
    740735
     
    748743DECL_FORCE_INLINE(void) iemVmxVmcsSetExitGuestLinearAddr(PVMCPUCC pVCpu, uint64_t uGuestLinearAddr)
    749744{
    750     PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    751     pVmcs->u64RoGuestLinearAddr.u = uGuestLinearAddr;
     745    pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64RoGuestLinearAddr.u = uGuestLinearAddr;
    752746}
    753747
     
    761755DECL_FORCE_INLINE(void) iemVmxVmcsSetExitGuestPhysAddr(PVMCPUCC pVCpu, uint64_t uGuestPhysAddr)
    762756{
    763     PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    764     pVmcs->u64RoGuestPhysAddr.u = uGuestPhysAddr;
     757    pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64RoGuestPhysAddr.u = uGuestPhysAddr;
    765758}
    766759
     
    777770DECL_FORCE_INLINE(void) iemVmxVmcsSetExitInstrLen(PVMCPUCC pVCpu, uint32_t cbInstr)
    778771{
    779     PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    780     pVmcs->u32RoExitInstrLen = cbInstr;
     772    pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32RoExitInstrLen = cbInstr;
    781773}
    782774
     
    790782DECL_FORCE_INLINE(void) iemVmxVmcsSetExitInstrInfo(PVMCPUCC pVCpu, uint32_t uExitInstrInfo)
    791783{
    792     PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    793     pVmcs->u32RoExitInstrInfo = uExitInstrInfo;
     784    pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32RoExitInstrInfo = uExitInstrInfo;
    794785}
    795786
     
    803794DECL_FORCE_INLINE(void) iemVmxVmcsSetGuestPendingDbgXcpts(PVMCPUCC pVCpu, uint64_t uGuestPendingDbgXcpts)
    804795{
    805     PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    806796    Assert(!(uGuestPendingDbgXcpts & VMX_VMCS_GUEST_PENDING_DEBUG_VALID_MASK));
    807     pVmcs->u64GuestPendingDbgXcpts.u = uGuestPendingDbgXcpts;
     797    pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64GuestPendingDbgXcpts.u = uGuestPendingDbgXcpts;
    808798}
    809799
     
    870860DECL_FORCE_INLINE(int) iemVmxWriteCurrentVmcsToGstMem(PVMCPUCC pVCpu)
    871861{
    872     Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs));
    873862    Assert(IEM_VMX_HAS_CURRENT_VMCS(pVCpu));
    874863    int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), IEM_VMX_GET_CURRENT_VMCS(pVCpu),
    875                                       pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs), sizeof(VMXVVMCS));
     864                                      &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs, sizeof(pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs));
    876865    return rc;
    877866}
     
    886875DECL_FORCE_INLINE(int) iemVmxReadCurrentVmcsFromGstMem(PVMCPUCC pVCpu)
    887876{
    888     Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs));
    889877    Assert(IEM_VMX_HAS_CURRENT_VMCS(pVCpu));
    890     int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs),
    891                                      IEM_VMX_GET_CURRENT_VMCS(pVCpu), sizeof(VMXVVMCS));
     878    int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs,
     879                                     IEM_VMX_GET_CURRENT_VMCS(pVCpu), sizeof(pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs));
    892880    return rc;
    893881}
     
    11671155     * See Intel spec. 27.3.1 "Saving Control Registers, Debug Registers and MSRs".
    11681156     */
    1169     PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     1157    PVMXVVMCS pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
    11701158
    11711159    /* Save control registers. */
     
    13101298IEM_STATIC uint32_t iemVmxCalcPreemptTimer(PVMCPUCC pVCpu)
    13111299{
    1312     PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    1313     Assert(pVmcs);
    1314 
    13151300    /*
    13161301     * Assume the following:
     
    13401325     */
    13411326    IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT);
    1342     uint32_t const uVmcsPreemptVal = pVmcs->u32PreemptTimer;
     1327    uint32_t const uVmcsPreemptVal = pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32PreemptTimer;
    13431328    if (uVmcsPreemptVal > 0)
    13441329    {
     
    13661351     */
    13671352    /* CS, SS, ES, DS, FS, GS. */
    1368     PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     1353    PVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
    13691354    for (unsigned iSegReg = 0; iSegReg < X86_SREG_COUNT; iSegReg++)
    13701355    {
     
    14691454     * See Intel spec. 27.3.4 "Saving Non-Register State".
    14701455     */
    1471     PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     1456    PVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
    14721457
    14731458    /*
     
    15591544IEM_STATIC void iemVmxVmexitSaveGuestState(PVMCPUCC pVCpu, uint32_t uExitReason)
    15601545{
    1561     PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    1562     Assert(pVmcs);
    1563 
    15641546    iemVmxVmexitSaveGuestControlRegsMsrs(pVCpu);
    15651547    iemVmxVmexitSaveGuestSegRegs(pVCpu);
    15661548
    1567     pVmcs->u64GuestRip.u    = pVCpu->cpum.GstCtx.rip;
    1568     pVmcs->u64GuestRsp.u    = pVCpu->cpum.GstCtx.rsp;
    1569     pVmcs->u64GuestRFlags.u = pVCpu->cpum.GstCtx.rflags.u;  /** @todo NSTVMX: Check RFLAGS.RF handling. */
     1549    pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64GuestRip.u    = pVCpu->cpum.GstCtx.rip;
     1550    pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64GuestRsp.u    = pVCpu->cpum.GstCtx.rsp;
     1551    pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64GuestRFlags.u = pVCpu->cpum.GstCtx.rflags.u;  /** @todo NSTVMX: Check RFLAGS.RF handling. */
    15701552
    15711553    iemVmxVmexitSaveGuestNonRegState(pVCpu, uExitReason);
     
    15861568     * See Intel spec. 27.4 "Saving MSRs".
    15871569     */
    1588     PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    1589     const char *const pszFailure = "VMX-abort";
     1570    PVMXVVMCS const    pVmcs      = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
     1571    const char * const pszFailure = "VMX-abort";
    15901572
    15911573    /*
     
    17201702     * See Intel spec. 27.5.1 "Loading Host Control Registers, Debug Registers, MSRs".
    17211703     */
    1722     PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    1723     bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
     1704    PCVMXVVMCS const pVmcs           = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
     1705    bool const       fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
    17241706
    17251707    /* CR0. */
     
    18091791     * e.g. segment limit high bits stored in segment attributes (in bits 11:8).
    18101792     */
    1811     PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    1812     bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
     1793    PCVMXVVMCS const pVmcs           = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
     1794    bool const       fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
    18131795
    18141796    /* CS, SS, ES, DS, FS, GS. */
     
    19161898     * See Intel spec. 27.5.4 "Checking and Loading Host Page-Directory-Pointer-Table Entries".
    19171899     */
    1918     PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    1919     const char *const pszFailure = "VMX-abort";
    1920     bool const fHostInLongMode  = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
     1900    PCVMXVVMCS const    pVmcs           = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
     1901    const char * const  pszFailure      = "VMX-abort";
     1902    bool const          fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
    19211903
    19221904    if (   (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
     
    19631945     * See Intel spec. 27.6 "Loading MSRs".
    19641946     */
    1965     PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    1966     const char *const pszFailure = "VMX-abort";
     1947    PCVMXVVMCS const   pVmcs      = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
     1948    const char * const pszFailure = "VMX-abort";
    19671949
    19681950    /*
     
    20482030     * See Intel spec. 27.5 "Loading Host State".
    20492031     */
    2050     PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    2051     bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
     2032    PCVMXVVMCS const pVmcs           = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
     2033    bool const       fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
    20522034
    20532035    /* We cannot return from a long-mode guest to a host that is not in long mode. */
     
    24742456    return VERR_IEM_IPE_7;
    24752457# else
    2476     PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    2477     Assert(pVmcs);
     2458    PVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
    24782459
    24792460    /*
     
    28902871     * See Intel Spec. 25.3 "Changes To Instruction Behavior In VMX Non-root Operation".
    28912872     */
    2892     PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    2893     Assert(pVmcs);
    2894     uint32_t const fGstHostMask     = pVmcs->u64Cr0Mask.u;
     2873    uint32_t const fGstHostMask     = pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr0Mask.u;
    28952874    uint32_t const fGstHostLmswMask = fGstHostMask & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
    28962875    *pu16NewMsw = (uGuestCr0 & fGstHostLmswMask) | (uNewMsw & ~fGstHostLmswMask);
     
    29142893IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrClts(PVMCPUCC pVCpu, uint8_t cbInstr)
    29152894{
    2916     PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    2917     Assert(pVmcs);
    2918 
    2919     uint32_t const fGstHostMask = pVmcs->u64Cr0Mask.u;
    2920     uint32_t const fReadShadow  = pVmcs->u64Cr0ReadShadow.u;
     2895    uint32_t const fGstHostMask = pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr0Mask.u;
     2896    uint32_t const fReadShadow  = pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr0ReadShadow.u;
    29212897
    29222898    /*
     
    29962972     * See Intel Spec. 25.3 "Changes To Instruction Behavior In VMX Non-root Operation".
    29972973     */
    2998     PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    2999     Assert(pVmcs);
    30002974    uint64_t uGuestCrX;
    30012975    uint64_t fGstHostMask;
     
    30042978        IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
    30052979        uGuestCrX    = pVCpu->cpum.GstCtx.cr0;
    3006         fGstHostMask = pVmcs->u64Cr0Mask.u;
     2980        fGstHostMask = pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr0Mask.u;
    30072981    }
    30082982    else
     
    30102984        IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
    30112985        uGuestCrX    = pVCpu->cpum.GstCtx.cr4;
    3012         fGstHostMask = pVmcs->u64Cr4Mask.u;
     2986        fGstHostMask = pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr4Mask.u;
    30132987    }
    30142988
     
    30283002IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrMovFromCr3(PVMCPUCC pVCpu, uint8_t iGReg, uint8_t cbInstr)
    30293003{
    3030     PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    3031     Assert(pVmcs);
    30323004    Assert(iGReg < X86_GREG_COUNT);
    30333005    IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
     
    30373009     * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
    30383010     */
    3039     if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_CR3_STORE_EXIT)
     3011    if (pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ProcCtls & VMX_PROC_CTLS_CR3_STORE_EXIT)
    30403012    {
    30413013        Log2(("mov_Rd_Cr: (CR3) Guest intercept -> VM-exit\n"));
     
    31033075IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrMovFromCr8(PVMCPUCC pVCpu, uint8_t iGReg, uint8_t cbInstr)
    31043076{
    3105     PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    3106     Assert(pVmcs);
    31073077    Assert(iGReg < X86_GREG_COUNT);
    31083078
     
    31113081     * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
    31123082     */
    3113     if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_CR8_STORE_EXIT)
     3083    if (pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ProcCtls & VMX_PROC_CTLS_CR8_STORE_EXIT)
    31143084    {
    31153085        Log2(("mov_Rd_Cr: (CR8) Guest intercept -> VM-exit\n"));
     
    31403110IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrMovToCr8(PVMCPUCC pVCpu, uint8_t iGReg, uint8_t cbInstr)
    31413111{
    3142     PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    3143     Assert(pVmcs);
    31443112    Assert(iGReg < X86_GREG_COUNT);
    31453113
     
    31483116     * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
    31493117     */
    3150     if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_CR8_LOAD_EXIT)
     3118    if (pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ProcCtls & VMX_PROC_CTLS_CR8_LOAD_EXIT)
    31513119    {
    31523120        Log2(("mov_Cr_Rd: (CR8) Guest intercept -> VM-exit\n"));
     
    31863154    Assert(iGReg < X86_GREG_COUNT);
    31873155
    3188     PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    3189     Assert(pVmcs);
    3190 
    3191     if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_MOV_DR_EXIT)
     3156    if (pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ProcCtls & VMX_PROC_CTLS_MOV_DR_EXIT)
    31923157    {
    31933158        uint32_t const uDirection = uInstrId == VMXINSTRID_MOV_TO_DRX ? VMX_EXIT_QUAL_DRX_DIRECTION_WRITE
     
    33503315IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrPause(PVMCPUCC pVCpu, uint8_t cbInstr)
    33513316{
    3352     PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    3353     Assert(pVmcs);
     3317    PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
    33543318
    33553319    /*
     
    34913455IEM_STATIC VBOXSTRICTRC iemVmxVmexitPreemptTimer(PVMCPUCC pVCpu)
    34923456{
    3493     PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    3494     Assert(pVmcs);
    34953457    Assert(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
    3496     Assert(pVmcs->u32PinCtls & VMX_PIN_CTLS_PREEMPT_TIMER);
     3458    Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32PinCtls & VMX_PIN_CTLS_PREEMPT_TIMER);
    34973459
    34983460    /* Import the hardware virtualization state (for nested-guest VM-entry TSC-tick). */
     
    35003462
    35013463    /* Save the VMX-preemption timer value (of 0) back in to the VMCS if the CPU supports this feature. */
    3502     if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER)
    3503         pVmcs->u32PreemptTimer = 0;
     3464    if (pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ExitCtls & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER)
     3465        pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32PreemptTimer = 0;
    35043466
    35053467    /* Cause the VMX-preemption timer VM-exit. The Exit qualification MBZ. */
     
    35213483IEM_STATIC VBOXSTRICTRC iemVmxVmexitExtInt(PVMCPUCC pVCpu, uint8_t uVector, bool fIntPending)
    35223484{
    3523     PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    3524     Assert(pVmcs);
    35253485    Assert(!fIntPending || uVector == 0);
    35263486
    35273487    /* The VM-exit is subject to "External interrupt exiting" being set. */
    3528     if (pVmcs->u32PinCtls & VMX_PIN_CTLS_EXT_INT_EXIT)
     3488    if (pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32PinCtls & VMX_PIN_CTLS_EXT_INT_EXIT)
    35293489    {
    35303490        if (fIntPending)
     
    35363496             * See Intel spec 25.2 "Other Causes Of VM Exits".
    35373497             */
    3538             if (!(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT))
     3498            if (!(pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT))
    35393499                return iemVmxVmexit(pVCpu, VMX_EXIT_EXT_INT, 0 /* u64ExitQual */);
    35403500
     
    35573517         */
    35583518        uint32_t uExitIntInfo;
    3559         if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT)
     3519        if (pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT)
    35603520        {
    35613521            bool const fNmiUnblocking = pVCpu->cpum.GstCtx.hwvirt.vmx.fNmiUnblockingIret;
     
    35893549IEM_STATIC VBOXSTRICTRC iemVmxVmexitEventDoubleFault(PVMCPUCC pVCpu)
    35903550{
    3591     PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    3592     Assert(pVmcs);
    3593 
    3594     uint32_t const fXcptBitmap = pVmcs->u32XcptBitmap;
     3551    uint32_t const fXcptBitmap = pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32XcptBitmap;
    35953552    if (fXcptBitmap & RT_BIT(X86_XCPT_DF))
    35963553    {
     
    36533610                                          uint8_t cbInstr)
    36543611{
    3655     PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    3656     Assert(pVmcs);
    3657 
    36583612    /*
    36593613     * If the event is being injected as part of VM-entry, it is -not- subject to event
     
    36733627        if (   uVector == X86_XCPT_NMI
    36743628            && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
    3675             && (pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI))
     3629            && (pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32PinCtls & VMX_PIN_CTLS_VIRT_NMI))
    36763630            pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking = true;
    36773631        else
     
    36903644    if (fFlags & IEM_XCPT_FLAGS_T_EXT_INT)
    36913645    {
    3692         Assert(!VMX_IDT_VECTORING_INFO_IS_VALID(pVmcs->u32RoIdtVectoringInfo));
     3646        Assert(!VMX_IDT_VECTORING_INFO_IS_VALID(pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32RoIdtVectoringInfo));
    36933647        return iemVmxVmexitExtInt(pVCpu, uVector, false /* fIntPending */);
    36943648    }
     
    37033657    if (   !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
    37043658        ||  (fFlags & (IEM_XCPT_FLAGS_BP_INSTR | IEM_XCPT_FLAGS_OF_INSTR | IEM_XCPT_FLAGS_ICEBP_INSTR)))
    3705     {
    37063659        fIntercept = CPUMIsGuestVmxXcptInterceptSet(&pVCpu->cpum.GstCtx, uVector, uErrCode);
    3707     }
    37083660    else
    37093661    {
     
    38873839IEM_STATIC uint32_t iemVmxVirtApicReadRaw32(PVMCPUCC pVCpu, uint16_t offReg)
    38883840{
    3889     PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    3890     Assert(pVmcs);
    38913841    Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uint32_t));
    38923842
    3893     uint32_t uReg;
    3894     RTGCPHYS const GCPhysVirtApic = pVmcs->u64AddrVirtApic.u;
     3843    uint32_t       uReg           = 0;
     3844    RTGCPHYS const GCPhysVirtApic = pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64AddrVirtApic.u;
    38953845    int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &uReg, GCPhysVirtApic + offReg, sizeof(uReg));
    3896     if (RT_SUCCESS(rc))
    3897     { /* likely */ }
    3898     else
    3899     {
    3900         AssertMsgFailed(("Failed to read %u bytes at offset %#x of the virtual-APIC page at %#RGp\n", sizeof(uReg), offReg,
    3901                          GCPhysVirtApic));
    3902         uReg = 0;
    3903     }
     3846    AssertMsgStmt(RT_SUCCESS(rc),
     3847                  ("Failed to read %u bytes at offset %#x of the virtual-APIC page at %#RGp: %Rrc\n",
     3848                   sizeof(uReg), offReg, GCPhysVirtApic, rc),
     3849                  uReg = 0);
    39043850    return uReg;
    39053851}
     
    39153861IEM_STATIC uint64_t iemVmxVirtApicReadRaw64(PVMCPUCC pVCpu, uint16_t offReg)
    39163862{
    3917     PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    3918     Assert(pVmcs);
    39193863    Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uint64_t));
    39203864
    3921     uint64_t uReg;
    3922     RTGCPHYS const GCPhysVirtApic = pVmcs->u64AddrVirtApic.u;
     3865    uint64_t       uReg           = 0;
     3866    RTGCPHYS const GCPhysVirtApic = pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64AddrVirtApic.u;
    39233867    int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &uReg, GCPhysVirtApic + offReg, sizeof(uReg));
    3924     if (RT_SUCCESS(rc))
    3925     { /* likely */ }
    3926     else
    3927     {
    3928         AssertMsgFailed(("Failed to read %u bytes at offset %#x of the virtual-APIC page at %#RGp\n", sizeof(uReg), offReg,
    3929                          GCPhysVirtApic));
    3930         uReg = 0;
    3931     }
     3868    AssertMsgStmt(RT_SUCCESS(rc),
     3869                  ("Failed to read %u bytes at offset %#x of the virtual-APIC page at %#RGp: %Rrc\n",
     3870                   sizeof(uReg), offReg, GCPhysVirtApic, rc),
     3871                  uReg = 0);
    39323872    return uReg;
    39333873}
     
    39433883IEM_STATIC void iemVmxVirtApicWriteRaw32(PVMCPUCC pVCpu, uint16_t offReg, uint32_t uReg)
    39443884{
    3945     PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    3946     Assert(pVmcs);
    39473885    Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uint32_t));
    39483886
    3949     RTGCPHYS const GCPhysVirtApic = pVmcs->u64AddrVirtApic.u;
     3887    RTGCPHYS const GCPhysVirtApic = pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64AddrVirtApic.u;
    39503888    int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVirtApic + offReg, &uReg, sizeof(uReg));
    3951     if (RT_SUCCESS(rc))
    3952     { /* likely */ }
    3953     else
    3954     {
    3955         AssertMsgFailed(("Failed to write %u bytes at offset %#x of the virtual-APIC page at %#RGp\n", sizeof(uReg), offReg,
    3956                          GCPhysVirtApic));
    3957     }
     3889    AssertMsgRC(rc, ("Failed to write %u bytes at offset %#x of the virtual-APIC page at %#RGp: %Rrc\n",
     3890                     sizeof(uReg), offReg, GCPhysVirtApic, rc));
    39583891}
    39593892
     
    39683901IEM_STATIC void iemVmxVirtApicWriteRaw64(PVMCPUCC pVCpu, uint16_t offReg, uint64_t uReg)
    39693902{
    3970     PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    3971     Assert(pVmcs);
    39723903    Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uint64_t));
    39733904
    3974     RTGCPHYS const GCPhysVirtApic = pVmcs->u64AddrVirtApic.u;
     3905    RTGCPHYS const GCPhysVirtApic = pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64AddrVirtApic.u;
    39753906    int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVirtApic + offReg, &uReg, sizeof(uReg));
    3976     if (RT_SUCCESS(rc))
    3977     { /* likely */ }
    3978     else
    3979     {
    3980         AssertMsgFailed(("Failed to write %u bytes at offset %#x of the virtual-APIC page at %#RGp\n", sizeof(uReg), offReg,
    3981                          GCPhysVirtApic));
    3982     }
     3907    AssertMsgRC(rc, ("Failed to write %u bytes at offset %#x of the virtual-APIC page at %#RGp: %Rrc\n",
     3908                     sizeof(uReg), offReg, GCPhysVirtApic, rc));
    39833909}
    39843910
     
    39953921IEM_STATIC void iemVmxVirtApicSetVectorInReg(PVMCPUCC pVCpu, uint16_t offReg, uint8_t uVector)
    39963922{
    3997     PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    3998     Assert(pVmcs);
    3999 
    40003923    /* Determine the vector offset within the chunk. */
    40013924    uint16_t const offVector = (uVector & UINT32_C(0xe0)) >> 1;
     
    40033926    /* Read the chunk at the offset. */
    40043927    uint32_t uReg;
    4005     RTGCPHYS const GCPhysVirtApic = pVmcs->u64AddrVirtApic.u;
     3928    RTGCPHYS const GCPhysVirtApic = pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64AddrVirtApic.u;
    40063929    int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &uReg, GCPhysVirtApic + offReg + offVector, sizeof(uReg));
    40073930    if (RT_SUCCESS(rc))
     
    40133936        /* Write the chunk. */
    40143937        rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVirtApic + offReg + offVector, &uReg, sizeof(uReg));
    4015         if (RT_SUCCESS(rc))
    4016         { /* likely */ }
    4017         else
    4018         {
    4019             AssertMsgFailed(("Failed to set vector %#x in 256-bit register at %#x of the virtual-APIC page at %#RGp\n",
    4020                              uVector, offReg, GCPhysVirtApic));
    4021         }
     3938        AssertMsgRC(rc, ("Failed to set vector %#x in 256-bit register at %#x of the virtual-APIC page at %#RGp: %Rrc\n",
     3939                         uVector, offReg, GCPhysVirtApic, rc));
    40223940    }
    40233941    else
    4024     {
    4025         AssertMsgFailed(("Failed to get vector %#x in 256-bit register at %#x of the virtual-APIC page at %#RGp\n",
    4026                          uVector, offReg, GCPhysVirtApic));
    4027     }
     3942        AssertMsgFailed(("Failed to get vector %#x in 256-bit register at %#x of the virtual-APIC page at %#RGp: %Rrc\n",
     3943                         uVector, offReg, GCPhysVirtApic, rc));
    40283944}
    40293945
     
    40403956IEM_STATIC void iemVmxVirtApicClearVectorInReg(PVMCPUCC pVCpu, uint16_t offReg, uint8_t uVector)
    40413957{
    4042     PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    4043     Assert(pVmcs);
    4044 
    40453958    /* Determine the vector offset within the chunk. */
    40463959    uint16_t const offVector      = (uVector & UINT32_C(0xe0)) >> 1;
     
    40483961    /* Read the chunk at the offset. */
    40493962    uint32_t uReg;
    4050     RTGCPHYS const GCPhysVirtApic = pVmcs->u64AddrVirtApic.u;
     3963    RTGCPHYS const GCPhysVirtApic = pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64AddrVirtApic.u;
    40513964    int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &uReg, GCPhysVirtApic + offReg + offVector, sizeof(uReg));
    40523965    if (RT_SUCCESS(rc))
     
    40583971        /* Write the chunk. */
    40593972        rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVirtApic + offReg + offVector, &uReg, sizeof(uReg));
    4060         if (RT_SUCCESS(rc))
    4061         { /* likely */ }
    4062         else
    4063         {
    4064             AssertMsgFailed(("Failed to clear vector %#x in 256-bit register at %#x of the virtual-APIC page at %#RGp\n",
    4065                              uVector, offReg, GCPhysVirtApic));
    4066         }
     3973        AssertMsgRC(rc, ("Failed to clear vector %#x in 256-bit register at %#x of the virtual-APIC page at %#RGp: %Rrc\n",
     3974                         uVector, offReg, GCPhysVirtApic, rc));
    40673975    }
    40683976    else
    4069     {
    4070         AssertMsgFailed(("Failed to get vector %#x in 256-bit register at %#x of the virtual-APIC page at %#RGp\n",
    4071                          uVector, offReg, GCPhysVirtApic));
    4072     }
     3977        AssertMsgFailed(("Failed to get vector %#x in 256-bit register at %#x of the virtual-APIC page at %#RGp: %Rrc\n",
     3978                         uVector, offReg, GCPhysVirtApic, rc));
    40733979}
    40743980
     
    40893995IEM_STATIC bool iemVmxVirtApicIsMemAccessIntercepted(PVMCPUCC pVCpu, uint16_t offAccess, size_t cbAccess, uint32_t fAccess)
    40903996{
    4091     PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    4092     Assert(pVmcs);
     3997    PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
    40933998    Assert(fAccess == IEM_ACCESS_TYPE_READ || fAccess == IEM_ACCESS_TYPE_WRITE);
    40943999
     
    42564161IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessUnused(PVMCPUCC pVCpu, PRTGCPHYS pGCPhysAccess)
    42574162{
    4258     PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    4259     Assert(pVmcs);
    4260     Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS);
     4163    Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS);
    42614164    Assert(pGCPhysAccess);
    42624165
    42634166    RTGCPHYS const GCPhysAccess = *pGCPhysAccess & ~(RTGCPHYS)PAGE_OFFSET_MASK;
    4264     RTGCPHYS const GCPhysApic   = pVmcs->u64AddrApicAccess.u;
     4167    RTGCPHYS const GCPhysApic   = pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64AddrApicAccess.u;
    42654168    Assert(!(GCPhysApic & PAGE_OFFSET_MASK));
    42664169
     
    43014204                                                uint32_t fAccess)
    43024205{
    4303     PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    4304     Assert(pVmcs);
    4305     Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS); NOREF(pVmcs);
     4206    Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS);
    43064207    Assert(pvData);
    43074208    Assert(   (fAccess & IEM_ACCESS_TYPE_READ)
     
    43774278IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMsrRead(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t *pu64Value)
    43784279{
    4379     PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    4380     Assert(pVmcs);
    4381     Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_X2APIC_MODE);
     4280    Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_X2APIC_MODE);
    43824281    Assert(pu64Value);
    43834282
    4384     if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT)
     4283    if (pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT)
    43854284    {
    43864285        if (   idMsr >= MSR_IA32_X2APIC_START
     
    44224321IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMsrWrite(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t u64Value)
    44234322{
    4424     PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    4425     Assert(pVmcs);
    4426 
    44274323    /*
    44284324     * Check if the access is to be virtualized.
     
    44304326     */
    44314327    if (   idMsr == MSR_IA32_X2APIC_TPR
    4432         || (   (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
     4328        || (   (pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
    44334329            && (   idMsr == MSR_IA32_X2APIC_EOI
    44344330                || idMsr == MSR_IA32_X2APIC_SELF_IPI)))
     
    44904386    Assert(offReg < XAPIC_OFF_END + 4);
    44914387    Assert(pidxHighestBit);
    4492     Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs));
    44934388
    44944389    /*
     
    45224417IEM_STATIC void iemVmxEvalPendingVirtIntrs(PVMCPUCC pVCpu)
    45234418{
    4524     PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    4525     Assert(pVmcs);
    4526     Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY);
    4527 
    4528     if (!(pVmcs->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT))
    4529     {
    4530         uint8_t const uRvi = RT_LO_U8(pVmcs->u16GuestIntStatus);
     4419    Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY);
     4420
     4421    if (!(pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT))
     4422    {
     4423        uint8_t const uRvi = RT_LO_U8(pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u16GuestIntStatus);
    45314424        uint8_t const uPpr = iemVmxVirtApicReadRaw32(pVCpu, XAPIC_OFF_PPR);
    45324425
     
    45504443IEM_STATIC void iemVmxPprVirtualization(PVMCPUCC pVCpu)
    45514444{
    4552     PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    4553     Assert(pVmcs);
    4554     Assert(pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
    4555     Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY);
     4445    Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
     4446    Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY);
    45564447
    45574448    /*
     
    45624453     */
    45634454    uint32_t const uTpr = iemVmxVirtApicReadRaw32(pVCpu, XAPIC_OFF_TPR);
    4564     uint32_t const uSvi = RT_HI_U8(pVmcs->u16GuestIntStatus);
     4455    uint32_t const uSvi = RT_HI_U8(pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u16GuestIntStatus);
    45654456
    45664457    uint32_t uPpr;
     
    45834474IEM_STATIC VBOXSTRICTRC iemVmxTprVirtualization(PVMCPUCC pVCpu)
    45844475{
    4585     PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    4586     Assert(pVmcs);
    4587     Assert(pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
     4476    Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
    45884477
    45894478    /*
     
    45934482     * See Intel spec. 29.1.2 "TPR Virtualization".
    45944483     */
    4595     if (!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY))
    4596     {
    4597         uint32_t const uTprThreshold = pVmcs->u32TprThreshold;
     4484    if (!(pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY))
     4485    {
     4486        uint32_t const uTprThreshold = pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32TprThreshold;
    45984487        uint32_t const uTpr          = iemVmxVirtApicReadRaw32(pVCpu, XAPIC_OFF_TPR);
    45994488
     
    46284517IEM_STATIC bool iemVmxIsEoiInterceptSet(PCVMCPU pVCpu, uint8_t uVector)
    46294518{
    4630     PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    4631     Assert(pVmcs);
     4519    PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
    46324520    Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY);
    46334521
     
    46504538IEM_STATIC VBOXSTRICTRC iemVmxEoiVirtualization(PVMCPUCC pVCpu)
    46514539{
    4652     PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    4653     Assert(pVmcs);
     4540    PVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
    46544541    Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY);
    46554542
     
    46944581IEM_STATIC VBOXSTRICTRC iemVmxSelfIpiVirtualization(PVMCPUCC pVCpu)
    46954582{
    4696     PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    4697     Assert(pVmcs);
     4583    PVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
    46984584    Assert(pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
    46994585
     
    47244610IEM_STATIC VBOXSTRICTRC iemVmxApicWriteEmulation(PVMCPUCC pVCpu)
    47254611{
    4726     PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    4727     Assert(pVmcs);
     4612    PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
    47284613
    47294614    /* Import the virtual-APIC write offset (part of the hardware-virtualization state). */
     
    48194704     * See Intel spec. 26.3.1.1 "Checks on Guest Control Registers, Debug Registers, and MSRs".
    48204705     */
    4821     PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    4822     const char *const pszFailure  = "VM-exit";
    4823     bool const fUnrestrictedGuest = RT_BOOL(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST);
     4706    PCVMXVVMCS const   pVmcs              = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
     4707    const char * const pszFailure         = "VM-exit";
     4708    bool const         fUnrestrictedGuest = RT_BOOL(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST);
    48244709
    48254710    /* CR0 reserved bits. */
     
    49664851     * See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
    49674852     */
    4968     PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    4969     const char *const pszFailure  = "VM-exit";
    4970     bool const fGstInV86Mode      = RT_BOOL(pVmcs->u64GuestRFlags.u & X86_EFL_VM);
    4971     bool const fUnrestrictedGuest = RT_BOOL(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST);
    4972     bool const fGstInLongMode     = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
     4853    PCVMXVVMCS const   pVmcs              = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
     4854    const char * const pszFailure         = "VM-exit";
     4855    bool const         fGstInV86Mode      = RT_BOOL(pVmcs->u64GuestRFlags.u & X86_EFL_VM);
     4856    bool const         fUnrestrictedGuest = RT_BOOL(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST);
     4857    bool const         fGstInLongMode     = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
    49734858
    49744859    /* Selectors. */
     
    53545239     * See Intel spec. 26.3.1.3 "Checks on Guest Descriptor-Table Registers".
    53555240     */
    5356     PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     5241    PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
    53575242    const char *const pszFailure = "VM-exit";
    53585243
     
    54005285     * See Intel spec. 26.3.1.4 "Checks on Guest RIP and RFLAGS".
    54015286     */
    5402     PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     5287    PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
    54035288    const char *const pszFailure = "VM-exit";
    54045289    bool const fGstInLongMode    = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
     
    54745359     * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State".
    54755360     */
    5476     PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     5361    PVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
    54775362    const char *const pszFailure = "VM-exit";
    54785363
     
    57325617     * See Intel spec. 26.3.1.5 "Checks on Guest Page-Directory-Pointer-Table Entries".
    57335618     */
    5734     PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     5619    PVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
    57355620    bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
    57365621
     
    58005685     * See Intel spec. 26.2.2 "Checks on Host Control Registers and MSRs".
    58015686     */
    5802     PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     5687    PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
    58035688    const char * const pszFailure = "VMFail";
    58045689
     
    60025887IEM_STATIC int iemVmxVmentryCheckCtls(PVMCPUCC pVCpu, const char *pszInstr)
    60035888{
    6004     PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     5889    PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
    60055890    const char * const pszFailure = "VMFail";
    60065891
     
    63876272     * See Intel spec. 26.3.2.1 "Loading Guest Control Registers, Debug Registers and MSRs".
    63886273     */
    6389     PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     6274    PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
    63906275
    63916276    IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
     
    64646349     */
    64656350    /* CS, SS, ES, DS, FS, GS. */
    6466     PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     6351    PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
    64676352    for (unsigned iSegReg = 0; iSegReg < X86_SREG_COUNT; iSegReg++)
    64686353    {
     
    65666451     * See Intel spec. 26.4 "Loading MSRs".
    65676452     */
    6568     PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     6453    PVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
    65696454    const char *const pszFailure = "VM-exit";
    65706455
     
    66596544     * See Intel spec. 26.6 "Special Features of VM Entry"
    66606545     */
    6661     PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     6546    PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
    66626547
    66636548    /*
     
    67136598{
    67146599    const char *const pszFailure  = "VM-exit";
    6715     PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     6600    PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
    67166601
    67176602    /*
     
    69126797     * See Intel spec. 26.3.2.3 "Loading Guest RIP, RSP and RFLAGS".
    69136798     */
    6914     PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     6799    PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
    69156800    pVCpu->cpum.GstCtx.rsp      = pVmcs->u64GuestRsp.u;
    69166801    pVCpu->cpum.GstCtx.rip      = pVmcs->u64GuestRip.u;
     
    69486833     * See Intel spec. 26.6.3 "Delivery of Pending Debug Exceptions after VM Entry".
    69496834     */
    6950     PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     6835    PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
    69516836    Assert(pVmcs);
    69526837
     
    70186903IEM_STATIC void iemVmxVmentrySetupMtf(PVMCPUCC pVCpu, const char *pszInstr)
    70196904{
    7020     PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     6905    PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
    70216906    Assert(pVmcs);
    70226907    if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_MONITOR_TRAP_FLAG)
     
    70396924IEM_STATIC void iemVmxVmentrySetupNmiWindow(PVMCPUCC pVCpu, const char *pszInstr)
    70406925{
    7041     PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     6926    PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
    70426927    Assert(pVmcs);
    70436928    if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
     
    70616946IEM_STATIC void iemVmxVmentrySetupIntWindow(PVMCPUCC pVCpu, const char *pszInstr)
    70626947{
    7063     PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     6948    PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
    70646949    Assert(pVmcs);
    70656950    if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT)
     
    70826967IEM_STATIC void iemVmxVmentrySetupPreemptTimer(PVMCPUCC pVCpu, const char *pszInstr)
    70836968{
    7084     PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     6969    PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
    70856970    Assert(pVmcs);
    70866971    if (pVmcs->u32PinCtls & VMX_PIN_CTLS_PREEMPT_TIMER)
     
    71847069IEM_STATIC void iemVmxVmentryInjectEvent(PVMCPUCC pVCpu, const char *pszInstr)
    71857070{
    7186     PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     7071    PVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
    71877072
    71887073    /*
     
    71957080     * See Intel spec. 26.5 "Event Injection".
    71967081     */
    7197     uint32_t const uEntryIntInfo      = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->u32EntryIntInfo;
     7082    uint32_t const uEntryIntInfo      = pVmcs->u32EntryIntInfo;
    71987083    bool const     fEntryIntInfoValid = VMX_ENTRY_INT_INFO_IS_VALID(uEntryIntInfo);
    71997084
     
    72607145     * the same reasons.
    72617146     */
    7262     PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     7147    PVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
    72637148    Assert(pVmcs);
    72647149
     
    73467231
    73477232    /* Current VMCS is not a shadow VMCS. */
    7348     if (!pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->u32VmcsRevId.n.fIsShadowVmcs)
     7233    if (!pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32VmcsRevId.n.fIsShadowVmcs)
    73497234    { /* likely */ }
    73507235    else
     
    73747259    {
    73757260        /* VMLAUNCH with non-clear VMCS. */
    7376         if (pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->fVmcsState == VMX_V_VMCS_LAUNCH_STATE_CLEAR)
     7261        if (pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.fVmcsState == VMX_V_VMCS_LAUNCH_STATE_CLEAR)
    73777262        { /* likely */ }
    73787263        else
     
    73887273    {
    73897274        /* VMRESUME with non-launched VMCS. */
    7390         if (pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->fVmcsState == VMX_V_VMCS_LAUNCH_STATE_LAUNCHED)
     7275        if (pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.fVmcsState == VMX_V_VMCS_LAUNCH_STATE_LAUNCHED)
    73917276        { /* likely */ }
    73927277        else
     
    74107295     * See Intel spec. 24.11.4 "Software Access to Related Structures".
    74117296     */
    7412     PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     7297    PVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
    74137298    Assert(pVmcs);
    74147299    Assert(IEM_VMX_HAS_CURRENT_VMCS(pVCpu));
     
    75647449
    75657450    /* Consult the MSR bitmap if the feature is supported. */
    7566     PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     7451    PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
    75677452    Assert(pVmcs);
    75687453    if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
     
    77067591     */
    77077592    PCVMXVVMCS pVmcs = !IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
    7708                      ? pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)
     7593                     ? &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs
    77097594                     : pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs);
    77107595    Assert(pVmcs);
     
    79767861    bool const fInVmxNonRootMode = IEM_VMX_IS_NON_ROOT_MODE(pVCpu);
    79777862    PVMXVVMCS pVmcs = !fInVmxNonRootMode
    7978                     ? pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)
     7863                    ? &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs
    79797864                    : pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs);
    79807865    Assert(pVmcs);
     
    81047989        && IEM_VMX_GET_CURRENT_VMCS(pVCpu) == GCPhysVmcs)
    81057990    {
    8106         pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->fVmcsState = fVmcsLaunchStateClear;
     7991        pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.fVmcsState = fVmcsLaunchStateClear;
    81077992        iemVmxWriteCurrentVmcsToGstMem(pVCpu);
    81087993        IEM_VMX_CLEAR_CURRENT_VMCS(pVCpu);
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r91271 r91297  
    11861186        { /* likely */ }
    11871187        else
    1188         {
    1189             PCVMXVVMCS pVmcsNstGst = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    1190             uXcptMask &= ~pVmcsNstGst->u32XcptBitmap;
    1191         }
     1188            uXcptMask &= ~pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32XcptBitmap;
    11921189#endif
    11931190#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
     
    54445441static int hmR0VmxCopyNstGstToShadowVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
    54455442{
    5446     PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    5447     PCVMXVVMCS pVmcsNstGst = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     5443    PVMCC      const pVM        = pVCpu->CTX_SUFF(pVM);
     5444    PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
    54485445
    54495446    /*
     
    55145511{
    55155512    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    5516     PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    5517     PVMXVVMCS pVmcsNstGst = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     5513    PVMCC const     pVM        = pVCpu->CTX_SUFF(pVM);
     5514    PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
    55185515
    55195516    int rc = hmR0VmxLoadShadowVmcs(pVmcsInfo);
     
    79037900                         * re-construct CR0. See @bugref{9180#c95} for details.
    79047901                         */
    7905                         PCVMXVMCSINFO pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
    7906                         PCVMXVVMCS    pVmcsNstGst  = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     7902                        PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
     7903                        PVMXVVMCS const     pVmcsNstGst  = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
    79077904                        u64Cr0 = (u64Cr0                     & ~pVmcsInfo->u64Cr0Mask)
    79087905                               | (pVmcsNstGst->u64GuestCr0.u &  pVmcsNstGst->u64Cr0Mask.u)
     
    79377934                         * re-construct CR4. See @bugref{9180#c95} for details.
    79387935                         */
    7939                         PCVMXVMCSINFO pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
    7940                         PCVMXVVMCS    pVmcsNstGst  = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     7936                        PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
     7937                        PVMXVVMCS const     pVmcsNstGst  = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
    79417938                        u64Cr4 = (u64Cr4                     & ~pVmcsInfo->u64Cr4Mask)
    79427939                               | (pVmcsNstGst->u64GuestCr4.u &  pVmcsNstGst->u64Cr4Mask.u)
     
    1042210419     *       up the nested-guest VMCS is not sufficient.
    1042310420     */
    10424     PCVMXVVMCS pVmcsNstGst = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     10421    PCVMXVVMCS const pVmcsNstGst  = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
    1042510422    if (pVmcsNstGst->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
    1042610423    {
     
    1045610453static int hmR0VmxMergeVmcsNested(PVMCPUCC pVCpu)
    1045710454{
    10458     PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    10459     PCVMXVMCSINFO pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
    10460     PCVMXVVMCS    pVmcsNstGst  = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    10461     Assert(pVmcsNstGst);
     10455    PVMCC const         pVM          = pVCpu->CTX_SUFF(pVM);
     10456    PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
     10457    PCVMXVVMCS const    pVmcsNstGst  = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
    1046210458
    1046310459    /*
     
    1717517171        case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
    1717617172        {
    17177             PCVMXVVMCS pVmcsNstGst = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    17178             Assert(pVmcsNstGst);
    17179             uint64_t const uGstHostMask = pVmcsNstGst->u64Cr0Mask.u;
    17180             uint64_t const uReadShadow  = pVmcsNstGst->u64Cr0ReadShadow.u;
     17173            PCVMXVVMCS const pVmcsNstGst  = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
     17174            uint64_t const   uGstHostMask = pVmcsNstGst->u64Cr0Mask.u;
     17175            uint64_t const   uReadShadow  = pVmcsNstGst->u64Cr0ReadShadow.u;
    1718117176            if (   (uGstHostMask & X86_CR0_TS)
    1718217177                && (uReadShadow  & X86_CR0_TS))
  • trunk/src/VBox/VMM/VMMR3/CPUM.cpp

    r91295 r91297  
    10621062        PCPUMCTX pCtx  = &pVCpu->cpum.s.Guest;
    10631063
    1064         if (pCtx->hwvirt.vmx.pVmcsR3)
    1065         {
    1066             SUPR3ContFree(pCtx->hwvirt.vmx.pVmcsR3, VMX_V_VMCS_PAGES);
    1067             pCtx->hwvirt.vmx.pVmcsR3 = NULL;
    1068         }
    10691064        if (pCtx->hwvirt.vmx.pShadowVmcsR3)
    10701065        {
     
    11391134        pCtx->hwvirt.enmHwvirt = CPUMHWVIRT_VMX;
    11401135
    1141         /*
    1142          * Allocate the nested-guest current VMCS.
    1143          */
    1144         pCtx->hwvirt.vmx.pVmcsR3 = (PVMXVVMCS)SUPR3ContAlloc(VMX_V_VMCS_PAGES, &pCtx->hwvirt.vmx.pVmcsR0, NULL);
    1145         if (pCtx->hwvirt.vmx.pVmcsR3)
    1146         { /* likely */ }
    1147         else
    1148         {
    1149             LogRel(("CPUM%u: Failed to alloc %u pages for the nested-guest's VMCS\n", pVCpu->idCpu, VMX_V_VMCS_PAGES));
    1150             break;
    1151         }
     1136        AssertCompile(sizeof(pCtx->hwvirt.vmx.Vmcs) == VMX_V_VMCS_PAGES * X86_PAGE_SIZE);
    11521137
    11531138        /*
     
    12781263         * Zero out all allocated pages (should compress well for saved-state).
    12791264         */
    1280         memset(pCtx->hwvirt.vmx.CTX_SUFF(pVmcs),               0, VMX_V_VMCS_SIZE);
     1265        RT_ZERO(pCtx->hwvirt.vmx.Vmcs);
    12811266        memset(pCtx->hwvirt.vmx.CTX_SUFF(pShadowVmcs),         0, VMX_V_SHADOW_VMCS_SIZE);
    12821267        memset(pCtx->hwvirt.vmx.CTX_SUFF(pvVirtApicPage),      0, VMX_V_VIRT_APIC_SIZE);
     
    13071292    PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
    13081293    Assert(pCtx->hwvirt.enmHwvirt == CPUMHWVIRT_VMX);
    1309     Assert(pCtx->hwvirt.vmx.CTX_SUFF(pVmcs));
    13101294    Assert(pCtx->hwvirt.vmx.CTX_SUFF(pShadowVmcs));
    13111295
    1312     memset(pCtx->hwvirt.vmx.CTX_SUFF(pVmcs),       0, VMX_V_VMCS_SIZE);
     1296    RT_ZERO(pCtx->hwvirt.vmx.Vmcs);
    13131297    memset(pCtx->hwvirt.vmx.CTX_SUFF(pShadowVmcs), 0, VMX_V_SHADOW_VMCS_SIZE);
    13141298    pCtx->hwvirt.vmx.GCPhysVmxon       = NIL_RTGCPHYS;
     
    25862570        if (pVM->cpum.s.GuestFeatures.fVmx)
    25872571        {
    2588             Assert(pGstCtx->hwvirt.vmx.CTX_SUFF(pVmcs));
    25892572            SSMR3PutGCPhys(pSSM,   pGstCtx->hwvirt.vmx.GCPhysVmxon);
    25902573            SSMR3PutGCPhys(pSSM,   pGstCtx->hwvirt.vmx.GCPhysVmcs);
     
    25942577            SSMR3PutBool(pSSM,     pGstCtx->hwvirt.vmx.fInterceptEvents);
    25952578            SSMR3PutBool(pSSM,     pGstCtx->hwvirt.vmx.fNmiUnblockingIret);
    2596             SSMR3PutStructEx(pSSM, pGstCtx->hwvirt.vmx.pVmcsR3, sizeof(VMXVVMCS), 0, g_aVmxHwvirtVmcs, NULL);
     2579            SSMR3PutStructEx(pSSM, &pGstCtx->hwvirt.vmx.Vmcs, sizeof(pGstCtx->hwvirt.vmx.Vmcs), 0, g_aVmxHwvirtVmcs, NULL);
    25972580            SSMR3PutStructEx(pSSM, pGstCtx->hwvirt.vmx.pShadowVmcsR3, sizeof(VMXVVMCS), 0, g_aVmxHwvirtVmcs, NULL);
    25982581            SSMR3PutMem(pSSM,      pGstCtx->hwvirt.vmx.pvVmreadBitmapR3, VMX_V_VMREAD_VMWRITE_BITMAP_SIZE);
     
    28752858                    if (pVM->cpum.s.GuestFeatures.fVmx)
    28762859                    {
    2877                         Assert(pGstCtx->hwvirt.vmx.CTX_SUFF(pVmcs));
    28782860                        SSMR3GetGCPhys(pSSM,   &pGstCtx->hwvirt.vmx.GCPhysVmxon);
    28792861                        SSMR3GetGCPhys(pSSM,   &pGstCtx->hwvirt.vmx.GCPhysVmcs);
     
    28832865                        SSMR3GetBool(pSSM,     &pGstCtx->hwvirt.vmx.fInterceptEvents);
    28842866                        SSMR3GetBool(pSSM,     &pGstCtx->hwvirt.vmx.fNmiUnblockingIret);
    2885                         SSMR3GetStructEx(pSSM,  pGstCtx->hwvirt.vmx.pVmcsR3, sizeof(VMXVVMCS), 0, g_aVmxHwvirtVmcs, NULL);
     2867                        SSMR3GetStructEx(pSSM, &pGstCtx->hwvirt.vmx.Vmcs, sizeof(pGstCtx->hwvirt.vmx.Vmcs),
     2868                                         0, g_aVmxHwvirtVmcs, NULL);
    28862869                        SSMR3GetStructEx(pSSM,  pGstCtx->hwvirt.vmx.pShadowVmcsR3, sizeof(VMXVVMCS), 0, g_aVmxHwvirtVmcs, NULL);
    28872870                        SSMR3GetMem(pSSM,       pGstCtx->hwvirt.vmx.pvVmreadBitmapR3, VMX_V_VMREAD_VMWRITE_BITMAP_SIZE);
     
    41044087        pHlp->pfnPrintf(pHlp, "  fVirtNmiBlocking           = %RTbool\n",   pCtx->hwvirt.vmx.fVirtNmiBlocking);
    41054088        pHlp->pfnPrintf(pHlp, "  VMCS cache:\n");
    4106         cpumR3InfoVmxVmcs(pVCpu, pHlp, pCtx->hwvirt.vmx.pVmcsR3, "  " /* pszPrefix */);
     4089        cpumR3InfoVmxVmcs(pVCpu, pHlp, &pCtx->hwvirt.vmx.Vmcs, "  " /* pszPrefix */);
    41074090    }
    41084091    else
  • trunk/src/VBox/VMM/include/CPUMInternal.mac

    r91291 r91297  
    243243    .Guest.hwvirt.svm.fInterceptEvents       resb         1
    244244
     245    .Guest.hwvirt.vmx.Vmcs EQU .Guest.hwvirt.svm.Vmcb
     246
    245247    ;.unnamed_padding.1 resb 0
    246248    alignb 8
  • trunk/src/VBox/VMM/testcase/tstVMStruct.h

    r91291 r91297  
    148148    GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.fInterceptEvents);
    149149    GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.fNmiUnblockingIret);
    150     GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pVmcsR0);
    151     GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pVmcsR3);
    152150    GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pShadowVmcsR0);
    153151    GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pShadowVmcsR3);
  • trunk/src/VBox/VMM/testcase/tstVMStructSize.cpp

    r91291 r91297  
    252252    CHECK_MEMBER_ALIGNMENT(VMCPU, cpum.s.Guest.hwvirt.svm.abMsrBitmap, 4096);
    253253    CHECK_MEMBER_ALIGNMENT(VMCPU, cpum.s.Guest.hwvirt.svm.abIoBitmap, 4096);
     254    CHECK_MEMBER_ALIGNMENT(VMCPU, cpum.s.Guest.hwvirt.vmx.Vmcs, 4096);
    254255
    255256    PVM pVM = NULL; NOREF(pVM);
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette