VirtualBox

Changeset 72643 in vbox for trunk


Ignore:
Timestamp:
Jun 21, 2018 4:02:03 PM (7 years ago)
Author:
vboxsync
Message:

VMM: Make SVM R0 code use CPUMCTX_EXTRN_xxx flags and cleanups. bugref:9193

Location:
trunk
Files:
24 edited

Legend:

Unmodified
Added
Removed
  • trunk/include/VBox/vmm/cpum.h

    r72522 r72643  
    13761376 * @param   pCtx    Current CPU context.
    13771377 */
    1378 DECLINLINE(bool) CPUMIsGuestInLongModeEx(PCPUMCTX pCtx)
     1378DECLINLINE(bool) CPUMIsGuestInLongModeEx(PCCPUMCTX pCtx)
    13791379{
    13801380    return (pCtx->msrEFER & MSR_K6_EFER_LMA) == MSR_K6_EFER_LMA;
     
    14521452    if (!pVmcb)
    14531453        return false;
    1454     if (!pCtx->hwvirt.svm.fHMCachedVmcb)
    1455         return RT_BOOL(pVmcb->ctrl.u64InterceptCtrl & fIntercept);
    1456     return HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, fIntercept);
     1454    if (HMHasGuestSvmVmcbCached(pVCpu))
     1455        return HMIsGuestSvmCtrlInterceptSet(pVCpu, fIntercept);
     1456    return RT_BOOL(pVmcb->ctrl.u64InterceptCtrl & fIntercept);
    14571457}
    14581458
     
    14711471    if (!pVmcb)
    14721472        return false;
    1473     if (!pCtx->hwvirt.svm.fHMCachedVmcb)
    1474         return RT_BOOL(pVmcb->ctrl.u16InterceptRdCRx & (UINT16_C(1) << uCr));
    1475     return HMIsGuestSvmReadCRxInterceptSet(pVCpu, pCtx, uCr);
     1473    if (HMHasGuestSvmVmcbCached(pVCpu))
     1474        return HMIsGuestSvmReadCRxInterceptSet(pVCpu, uCr);
     1475    return RT_BOOL(pVmcb->ctrl.u16InterceptRdCRx & (UINT16_C(1) << uCr));
    14761476}
    14771477
     
    14901490    if (!pVmcb)
    14911491        return false;
    1492     if (!pCtx->hwvirt.svm.fHMCachedVmcb)
    1493         return RT_BOOL(pVmcb->ctrl.u16InterceptWrCRx & (UINT16_C(1) << uCr));
    1494     return HMIsGuestSvmWriteCRxInterceptSet(pVCpu, pCtx, uCr);
     1492    if (HMHasGuestSvmVmcbCached(pVCpu))
     1493        return HMIsGuestSvmWriteCRxInterceptSet(pVCpu, uCr);
     1494    return RT_BOOL(pVmcb->ctrl.u16InterceptWrCRx & (UINT16_C(1) << uCr));
    14951495}
    14961496
     
    15091509    if (!pVmcb)
    15101510        return false;
    1511     if (!pCtx->hwvirt.svm.fHMCachedVmcb)
    1512         return RT_BOOL(pVmcb->ctrl.u16InterceptRdDRx & (UINT16_C(1) << uDr));
    1513     return HMIsGuestSvmReadDRxInterceptSet(pVCpu, pCtx, uDr);
     1511    if (HMHasGuestSvmVmcbCached(pVCpu))
     1512        return HMIsGuestSvmReadDRxInterceptSet(pVCpu, uDr);
     1513    return RT_BOOL(pVmcb->ctrl.u16InterceptRdDRx & (UINT16_C(1) << uDr));
    15141514}
    15151515
     
    15281528    if (!pVmcb)
    15291529        return false;
    1530     if (!pCtx->hwvirt.svm.fHMCachedVmcb)
    1531         return RT_BOOL(pVmcb->ctrl.u16InterceptWrDRx & (UINT16_C(1) << uDr));
    1532     return HMIsGuestSvmWriteDRxInterceptSet(pVCpu, pCtx, uDr);
     1530    if (HMHasGuestSvmVmcbCached(pVCpu))
     1531        return HMIsGuestSvmWriteDRxInterceptSet(pVCpu, uDr);
     1532    return RT_BOOL(pVmcb->ctrl.u16InterceptWrDRx & (UINT16_C(1) << uDr));
    15331533}
    15341534
     
    15471547    if (!pVmcb)
    15481548        return false;
    1549     if (!pCtx->hwvirt.svm.fHMCachedVmcb)
    1550         return RT_BOOL(pVmcb->ctrl.u32InterceptXcpt & (UINT32_C(1) << uVector));
    1551     return HMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, uVector);
     1549    if (HMHasGuestSvmVmcbCached(pVCpu))
     1550        return HMIsGuestSvmXcptInterceptSet(pVCpu, uVector);
     1551    return RT_BOOL(pVmcb->ctrl.u32InterceptXcpt & (UINT32_C(1) << uVector));
    15521552}
    15531553
     
    15641564{
    15651565    PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
    1566     Assert(pVmcb);
    1567     if (!pCtx->hwvirt.svm.fHMCachedVmcb)
    1568         return pVmcb->ctrl.IntCtrl.n.u1VIntrMasking;
    1569     return HMIsGuestSvmVirtIntrMasking(pVCpu, pCtx);
     1566    if (!pVmcb)
     1567        return false;
     1568    if (HMHasGuestSvmVmcbCached(pVCpu))
     1569        return HMIsGuestSvmVirtIntrMasking(pVCpu);
     1570    return pVmcb->ctrl.IntCtrl.n.u1VIntrMasking;
    15701571}
    15711572
     
    15821583{
    15831584    PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
    1584     Assert(pVmcb);
    1585     if (!pCtx->hwvirt.svm.fHMCachedVmcb)
    1586         return pVmcb->ctrl.NestedPagingCtrl.n.u1NestedPaging;
    1587     return HMIsGuestSvmNestedPagingEnabled(pVCpu, pCtx);
     1585    if (!pVmcb)
     1586        return false;
     1587    if (HMHasGuestSvmVmcbCached(pVCpu))
     1588        return HMIsGuestSvmNestedPagingEnabled(pVCpu);
     1589    return pVmcb->ctrl.NestedPagingCtrl.n.u1NestedPaging;
    15881590}
    15891591
     
    16001602{
    16011603    PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
    1602     Assert(pVmcb);
    1603     if (!pCtx->hwvirt.svm.fHMCachedVmcb)
    1604         return pVmcb->ctrl.u16PauseFilterCount;
    1605     return HMGetGuestSvmPauseFilterCount(pVCpu, pCtx);
     1604    if (!pVmcb)
     1605        return false;
     1606    if (HMHasGuestSvmVmcbCached(pVCpu))
     1607        return HMGetGuestSvmPauseFilterCount(pVCpu);
     1608    return pVmcb->ctrl.u16PauseFilterCount;
    16061609}
    16071610
  • trunk/include/VBox/vmm/cpum.mac

    r71833 r72643  
    270270    .hwvirt.svm.cPauseFilterThreshold  resw          1
    271271    .hwvirt.svm.fInterceptEvents       resb          1
    272     .hwvirt.svm.fHMCachedVmcb          resb          1
    273272    alignb 8
    274273    .hwvirt.svm.pvMsrBitmapR0          RTR0PTR_RES   1
  • trunk/include/VBox/vmm/cpumctx.h

    r72510 r72643  
    511511                /** 0x3c4 - Whether the injected event is subject to event intercepts. */
    512512                bool                fInterceptEvents;
    513                 /** 0x3c5 - Whether parts of the VMCB are cached (and potentially modified) by HM. */
    514                 bool                fHMCachedVmcb;
    515                 /** 0x3c6 - Padding. */
    516                 bool                afPadding[2];
     513                /** 0x3c5 - Padding. */
     514                bool                afPadding[3];
    517515                /** 0x3c8 - MSR permission bitmap - R0 ptr. */
    518516                R0PTRTYPE(void *)   pvMsrBitmapR0;
     
    764762
    765763/** @name CPUMCTX_EXTRN_XXX
    766  * Used to parts of the CPUM state that is externalized and needs fetching
     764 * Used for parts of the CPUM state that is externalized and needs fetching
    767765 * before use.
    768766 *
     
    893891                                                 | CPUMCTX_EXTRN_SYSENTER_MSRS | CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS)
    894892
     893/** Hardware-virtualization (SVM or VMX) state is kept externally. */
     894#define CPUMCTX_EXTRN_HWVIRT                    UINT64_C(0x0000020000000000)
     895
    895896/** Mask of bits the keepers can use for state tracking. */
    896897#define CPUMCTX_EXTRN_KEEPER_STATE_MASK         UINT64_C(0xffff000000000000)
     
    905906#define CPUMCTX_EXTRN_NEM_WIN_MASK              UINT64_C(0x0007000000000000)
    906907
     908/** HM/SVM: Inhibit maskable interrupts (VMCPU_FF_INHIBIT_INTERRUPTS). */
     909#define CPUMCTX_EXTRN_HM_SVM_INT_SHADOW         UINT64_C(0x0001000000000000)
     910/** HM/SVM: Nested-guest interrupt pending (VMCPU_FF_INTERRUPT_NESTED_GUEST). */
     911#define CPUMCTX_EXTRN_HM_SVM_HWVIRT_VIRQ        UINT64_C(0x0002000000000000)
     912/** HM/SVM: Mask. */
     913#define CPUMCTX_EXTRN_HM_SVM_MASK               UINT64_C(0x0003000000000000)
     914
     915/** HM/VMX: Guest-interruptibility state (VMCPU_FF_INHIBIT_INTERRUPTS,
     916 *  VMCPU_FF_BLOCK_NMIS). */
     917#define CPUMCTX_EXTRN_HM_VMX_INT_STATE          UINT64_C(0x0001000000000000)
     918/** HM/VMX: Mask. */
     919#define CPUMCTX_EXTRN_HM_VMX_MASK               UINT64_C(0x0001000000000000)
     920
    907921/** All CPUM state bits, not including keeper specific ones. */
    908 #define CPUMCTX_EXTRN_ALL                       UINT64_C(0x000001fffffffffc)
     922#define CPUMCTX_EXTRN_ALL                       UINT64_C(0x000003fffffffffc)
    909923/** @} */
    910924
  • trunk/include/VBox/vmm/hm.h

    r72599 r72643  
    216216VMMR0_INT_DECL(void)            HMR0NotifyCpumModifiedHostCr0(PVMCPU VCpu);
    217217VMMR0_INT_DECL(bool)            HMR0SuspendPending(void);
     218VMMR0_INT_DECL(int)             HMR0InvalidatePage(PVMCPU pVCpu, RTGCPTR GCVirt);
     219VMMR0_INT_DECL(int)             HMR0ImportStateOnDemand(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t fWhat);
    218220
    219221# if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
  • trunk/include/VBox/vmm/hm_svm.h

    r72065 r72643  
    10671067    /** Cache of the LBR virtualization bit. */
    10681068    bool                fLbrVirt;
     1069    /** Whether the VMCB is cached by HM.  */
     1070    bool                fCacheValid;
    10691071    /** Alignment. */
    1070     bool                afPadding0[5];
     1072    bool                afPadding0[4];
    10711073} SVMNESTEDVMCBCACHE;
    10721074#pragma pack()
     
    10761078typedef const SVMNESTEDVMCBCACHE *PCSVMNESTEDVMCBCACHE;
    10771079AssertCompileSizeAlignment(SVMNESTEDVMCBCACHE, 8);
    1078 
    1079 #ifdef IN_RING0
    1080 VMMR0DECL(int) SVMR0InvalidatePage(PVM pVM, PVMCPU pVCpu, RTGCPTR GCVirt);
    1081 #endif /* IN_RING0 */
    10821080
    10831081/**
     
    11401138 * Don't add any more functions here unless there is no other option.
    11411139 */
    1142 VMM_INT_DECL(bool)     HMIsGuestSvmCtrlInterceptSet(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t fIntercept);
    1143 VMM_INT_DECL(bool)     HMIsGuestSvmReadCRxInterceptSet(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCr);
    1144 VMM_INT_DECL(bool)     HMIsGuestSvmWriteCRxInterceptSet(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCr);
    1145 VMM_INT_DECL(bool)     HMIsGuestSvmReadDRxInterceptSet(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uDr);
    1146 VMM_INT_DECL(bool)     HMIsGuestSvmWriteDRxInterceptSet(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uDr);
    1147 VMM_INT_DECL(bool)     HMIsGuestSvmXcptInterceptSet(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uVector);
    1148 VMM_INT_DECL(bool)     HMIsGuestSvmVirtIntrMasking(PVMCPU pVCpu, PCCPUMCTX pCtx);
    1149 VMM_INT_DECL(bool)     HMIsGuestSvmNestedPagingEnabled(PVMCPU pVCpu, PCCPUMCTX pCtx);
    1150 VMM_INT_DECL(uint16_t) HMGetGuestSvmPauseFilterCount(PVMCPU pVCpu, PCCPUMCTX pCtx);
     1140VMM_INT_DECL(bool)     HMHasGuestSvmVmcbCached(PVMCPU pVCpu);
     1141VMM_INT_DECL(bool)     HMIsGuestSvmCtrlInterceptSet(PVMCPU pVCpu, uint64_t fIntercept);
     1142VMM_INT_DECL(bool)     HMIsGuestSvmReadCRxInterceptSet(PVMCPU pVCpu, uint8_t uCr);
     1143VMM_INT_DECL(bool)     HMIsGuestSvmWriteCRxInterceptSet(PVMCPU pVCpu, uint8_t uCr);
     1144VMM_INT_DECL(bool)     HMIsGuestSvmReadDRxInterceptSet(PVMCPU pVCpu, uint8_t uDr);
     1145VMM_INT_DECL(bool)     HMIsGuestSvmWriteDRxInterceptSet(PVMCPU pVCpu, uint8_t uDr);
     1146VMM_INT_DECL(bool)     HMIsGuestSvmXcptInterceptSet(PVMCPU pVCpu, uint8_t uVector);
     1147VMM_INT_DECL(bool)     HMIsGuestSvmVirtIntrMasking(PVMCPU pVCpu);
     1148VMM_INT_DECL(bool)     HMIsGuestSvmNestedPagingEnabled(PVMCPU pVCpu);
     1149VMM_INT_DECL(uint16_t) HMGetGuestSvmPauseFilterCount(PVMCPU pVCpu);
     1150
    11511151/** @} */
    11521152
  • trunk/include/VBox/vmm/hm_vmx.h

    r69107 r72643  
    25352535}
    25362536
    2537 #ifdef IN_RING0
    2538 VMMR0DECL(int) VMXR0InvalidatePage(PVM pVM, PVMCPU pVCpu, RTGCPTR GCVirt);
    2539 VMMR0DECL(int) VMXR0InvalidatePhysPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys);
    2540 #endif /* IN_RING0 */
    2541 
    25422537/** @} */
    25432538
  • trunk/include/VBox/vmm/iem.h

    r72592 r72643  
    218218                                                    | CPUMCTX_EXTRN_DR7 /* for memory breakpoints */ )
    219219
     220#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
     221/** The CPUMCTX_EXTRN_XXX mask needed when calling IEMExecSvmVmexit().
     222 * IEM will ASSUME the caller has ensured these are already present. */
     223# define IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK   (  CPUMCTX_EXTRN_RSP \
     224                                              | CPUMCTX_EXTRN_RAX \
     225                                              | CPUMCTX_EXTRN_RIP \
     226                                              | CPUMCTX_EXTRN_RFLAGS \
     227                                              | CPUMCTX_EXTRN_CS \
     228                                              | CPUMCTX_EXTRN_SS \
     229                                              | CPUMCTX_EXTRN_DS \
     230                                              | CPUMCTX_EXTRN_ES \
     231                                              | CPUMCTX_EXTRN_GDTR \
     232                                              | CPUMCTX_EXTRN_IDTR \
     233                                              | CPUMCTX_EXTRN_CR_MASK \
     234                                              | CPUMCTX_EXTRN_EFER \
     235                                              | CPUMCTX_EXTRN_DR6 \
     236                                              | CPUMCTX_EXTRN_DR7 \
     237                                              | CPUMCTX_EXTRN_OTHER_MSRS \
     238                                              | CPUMCTX_EXTRN_HWVIRT)
     239#endif
    220240
    221241VMMDECL(VBOXSTRICTRC)       IEMExecOne(PVMCPU pVCpu);
  • trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp

    r72488 r72643  
    24502450     *         RPL = CPL.  Weird.
    24512451     */
     2452    Assert(!(pVCpu->cpum.s.Guest.fExtrn & (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS)));
    24522453    uint32_t uCpl;
    24532454    if (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PE)
     
    27662767    if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
    27672768    {
    2768         if (!pCtx->hwvirt.svm.fHMCachedVmcb)
     2769        if (!HMHasGuestSvmVmcbCached(pVCpu))
    27692770        {
    27702771            PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
     
    28072808            }
    28082809
    2809             case CPUMCTX_EXTRN_KEEPER_HM: /** @todo make HM use CPUMCTX_EXTRN_XXX. */
     2810            case CPUMCTX_EXTRN_KEEPER_HM:
     2811            {
     2812#ifdef IN_RING0
     2813                int rc = HMR0ImportStateOnDemand(pVCpu, &pVCpu->cpum.s.Guest, fExtrnImport);
     2814                Assert(rc == VINF_SUCCESS || RT_FAILURE_NP(rc));
     2815                return rc;
     2816#else
     2817                return VINF_SUCCESS;
     2818#endif
     2819            }
    28102820            default:
    28112821                AssertLogRelMsgFailedReturn(("%#RX64 vs %#RX64\n", pVCpu->cpum.s.Guest.fExtrn, fExtrnImport), VERR_CPUM_IPE_2);
  • trunk/src/VBox/VMM/VMMAll/HMAll.cpp

    r72462 r72643  
    8282    STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushPageManual);
    8383#ifdef IN_RING0
    84     PVM pVM = pVCpu->CTX_SUFF(pVM);
    85     if (pVM->hm.s.vmx.fSupported)
    86         return VMXR0InvalidatePage(pVM, pVCpu, GCVirt);
    87 
    88     Assert(pVM->hm.s.svm.fSupported);
    89     return SVMR0InvalidatePage(pVM, pVCpu, GCVirt);
    90 
     84    return HMR0InvalidatePage(pVCpu, GCVirt);
    9185#else
    9286    hmQueueInvlPage(pVCpu, GCVirt);
     
    288282        return VINF_SUCCESS;
    289283
    290 #ifdef IN_RING0
    291     if (pVM->hm.s.vmx.fSupported)
    292     {
    293         VMCPUID idThisCpu = VMMGetCpuId(pVM);
    294 
    295         for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
    296         {
    297             PVMCPU pVCpu = &pVM->aCpus[idCpu];
    298 
    299             if (idThisCpu == idCpu)
    300             {
    301                 /** @todo r=ramshankar: Intel does not support flushing by guest physical
    302                  *        address either. See comment in VMXR0InvalidatePhysPage(). Fix this. */
    303                 VMXR0InvalidatePhysPage(pVM, pVCpu, GCPhys);
    304             }
    305             else
    306             {
    307                 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
    308                 hmPokeCpuForTlbFlush(pVCpu, true /*fAccountFlushStat*/);
    309             }
    310         }
    311         return VINF_SUCCESS;
    312     }
    313 
    314     /* AMD-V doesn't support invalidation with guest physical addresses; see
    315        comment in SVMR0InvalidatePhysPage. */
    316     Assert(pVM->hm.s.svm.fSupported);
    317 #else
    318     NOREF(GCPhys);
    319 #endif
    320 
    321     HMFlushTLBOnAllVCpus(pVM);
    322     return VINF_SUCCESS;
     284    /*
     285     * AMD-V: Doesn't support invalidation with guest physical addresses.
     286     *
     287     * VT-x: Doesn't support invalidation with guest physical addresses.
     288     * INVVPID instruction takes only a linear address while invept only flushes by EPT
     289     * not individual addresses.
     290     *
     291     * We update the force flag and flush before the next VM-entry, see @bugref{6568}.
     292     */
     293    RT_NOREF(GCPhys);
     294    /** @todo Remove or figure out to way to update the Phys STAT counter.  */
     295    /* STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbInvlpgPhys); */
     296    return HMFlushTLBOnAllVCpus(pVM);
    323297}
    324298
  • trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp

    r72462 r72643  
    130130VMM_INT_DECL(void) HMSvmNstGstVmExitNotify(PVMCPU pVCpu, PCPUMCTX pCtx)
    131131{
    132     if (pCtx->hwvirt.svm.fHMCachedVmcb)
    133     {
    134         PSVMVMCBCTRL        pVmcbNstGstCtrl  = &pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl;
    135         PSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
    136 
     132    PSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
     133    if (pVmcbNstGstCache->fCacheValid)
     134    {
    137135        /*
    138136         * Restore fields as our own code might look at the VMCB controls as part
     
    141139         * by a physical CPU on #VMEXIT.
    142140         */
     141        PSVMVMCBCTRL pVmcbNstGstCtrl = &pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl;
    143142        pVmcbNstGstCtrl->u16InterceptRdCRx                 = pVmcbNstGstCache->u16InterceptRdCRx;
    144143        pVmcbNstGstCtrl->u16InterceptWrCRx                 = pVmcbNstGstCache->u16InterceptWrCRx;
     
    153152        pVmcbNstGstCtrl->NestedPagingCtrl.n.u1NestedPaging = pVmcbNstGstCache->fNestedPaging;
    154153        pVmcbNstGstCtrl->LbrVirt.n.u1LbrVirt               = pVmcbNstGstCache->fLbrVirt;
    155         pCtx->hwvirt.svm.fHMCachedVmcb = false;
     154        pVmcbNstGstCache->fCacheValid = false;
    156155    }
    157156
     
    166165     * change here.
    167166     */
     167    /** @todo Only signal state needed for VM-exit (e.g. skip
     168     *        LDTR, TR etc., see IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK.
     169     *        Do this while extending HM_CHANGED_xxx flags. See
     170     *        todo in hmR0SvmHandleExitNested(). */
    168171    HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
    169172}
     
    209212{
    210213    PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    211     Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
    212     Assert(pCtx->hwvirt.svm.fHMCachedVmcb);
    213     NOREF(pCtx);
    214     PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
     214    Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx)); RT_NOREF(pCtx);
     215    PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
     216    Assert(pVmcbNstGstCache->fCacheValid);
    215217    return uTicks + pVmcbNstGstCache->u64TSCOffset;
    216218}
     
    401403
    402404/**
     405 * Returns whether HM has cached the nested-guest VMCB.
     406 *
     407 * If the VMCB is cached by HM, it means HM may have potentially modified the
     408 * VMCB for execution using hardware-assisted SVM.
     409 *
     410 * @returns true if HM has cached the nested-guest VMCB, false otherwise.
     411 * @param   pVCpu   The cross context virtual CPU structure of the calling EMT.
     412 */
     413VMM_INT_DECL(bool) HMHasGuestSvmVmcbCached(PVMCPU pVCpu)
     414{
     415    PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
     416    return pVmcbNstGstCache->fCacheValid;
     417}
     418
     419
     420/**
    403421 * Checks if the nested-guest VMCB has the specified ctrl/instruction intercept
    404422 * active.
     
    406424 * @returns @c true if in intercept is set, @c false otherwise.
    407425 * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
    408  * @param   pCtx        Pointer to the context.
    409426 * @param   fIntercept  The SVM control/instruction intercept, see
    410427 *                      SVM_CTRL_INTERCEPT_*.
    411428 */
    412 VMM_INT_DECL(bool) HMIsGuestSvmCtrlInterceptSet(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t fIntercept)
    413 {
    414     Assert(pCtx->hwvirt.svm.fHMCachedVmcb); NOREF(pCtx);
     429VMM_INT_DECL(bool) HMIsGuestSvmCtrlInterceptSet(PVMCPU pVCpu, uint64_t fIntercept)
     430{
     431    Assert(HMHasGuestSvmVmcbCached(pVCpu));
    415432    PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
    416433    return RT_BOOL(pVmcbNstGstCache->u64InterceptCtrl & fIntercept);
     
    423440 * @returns @c true if in intercept is set, @c false otherwise.
    424441 * @param   pVCpu   The cross context virtual CPU structure of the calling EMT.
    425  * @param   pCtx    Pointer to the context.
    426442 * @param   uCr     The CR register number (0 to 15).
    427443 */
    428 VMM_INT_DECL(bool) HMIsGuestSvmReadCRxInterceptSet(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCr)
     444VMM_INT_DECL(bool) HMIsGuestSvmReadCRxInterceptSet(PVMCPU pVCpu, uint8_t uCr)
    429445{
    430446    Assert(uCr < 16);
    431     Assert(pCtx->hwvirt.svm.fHMCachedVmcb); NOREF(pCtx);
     447    Assert(HMHasGuestSvmVmcbCached(pVCpu));
    432448    PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
    433449    return RT_BOOL(pVmcbNstGstCache->u16InterceptRdCRx & (1 << uCr));
     
    440456 * @returns @c true if in intercept is set, @c false otherwise.
    441457 * @param   pVCpu   The cross context virtual CPU structure of the calling EMT.
    442  * @param   pCtx    Pointer to the context.
    443458 * @param   uCr     The CR register number (0 to 15).
    444459 */
    445 VMM_INT_DECL(bool) HMIsGuestSvmWriteCRxInterceptSet(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCr)
     460VMM_INT_DECL(bool) HMIsGuestSvmWriteCRxInterceptSet(PVMCPU pVCpu, uint8_t uCr)
    446461{
    447462    Assert(uCr < 16);
    448     Assert(pCtx->hwvirt.svm.fHMCachedVmcb); NOREF(pCtx);
     463    Assert(HMHasGuestSvmVmcbCached(pVCpu));
    449464    PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
    450465    return RT_BOOL(pVmcbNstGstCache->u16InterceptWrCRx & (1 << uCr));
     
    457472 * @returns @c true if in intercept is set, @c false otherwise.
    458473 * @param   pVCpu   The cross context virtual CPU structure of the calling EMT.
    459  * @param   pCtx    Pointer to the context.
    460474 * @param   uDr     The DR register number (0 to 15).
    461475 */
    462 VMM_INT_DECL(bool) HMIsGuestSvmReadDRxInterceptSet(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uDr)
     476VMM_INT_DECL(bool) HMIsGuestSvmReadDRxInterceptSet(PVMCPU pVCpu, uint8_t uDr)
    463477{
    464478    Assert(uDr < 16);
    465     Assert(pCtx->hwvirt.svm.fHMCachedVmcb); NOREF(pCtx);
     479    Assert(HMHasGuestSvmVmcbCached(pVCpu));
    466480    PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
    467481    return RT_BOOL(pVmcbNstGstCache->u16InterceptRdDRx & (1 << uDr));
     
    474488 * @returns @c true if in intercept is set, @c false otherwise.
    475489 * @param   pVCpu   The cross context virtual CPU structure of the calling EMT.
    476  * @param   pCtx    Pointer to the context.
    477490 * @param   uDr     The DR register number (0 to 15).
    478491 */
    479 VMM_INT_DECL(bool) HMIsGuestSvmWriteDRxInterceptSet(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uDr)
     492VMM_INT_DECL(bool) HMIsGuestSvmWriteDRxInterceptSet(PVMCPU pVCpu, uint8_t uDr)
    480493{
    481494    Assert(uDr < 16);
    482     Assert(pCtx->hwvirt.svm.fHMCachedVmcb); NOREF(pCtx);
     495    Assert(HMHasGuestSvmVmcbCached(pVCpu));
    483496    PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
    484497    return RT_BOOL(pVmcbNstGstCache->u16InterceptWrDRx & (1 << uDr));
     
    491504 * @returns true if in intercept is active, false otherwise.
    492505 * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
    493  * @param   pCtx        Pointer to the context.
    494506 * @param   uVector     The exception / interrupt vector.
    495507 */
    496 VMM_INT_DECL(bool) HMIsGuestSvmXcptInterceptSet(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uVector)
     508VMM_INT_DECL(bool) HMIsGuestSvmXcptInterceptSet(PVMCPU pVCpu, uint8_t uVector)
    497509{
    498510    Assert(uVector < 32);
    499     Assert(pCtx->hwvirt.svm.fHMCachedVmcb); NOREF(pCtx);
     511    Assert(HMHasGuestSvmVmcbCached(pVCpu));
    500512    PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
    501513    return RT_BOOL(pVmcbNstGstCache->u32InterceptXcpt & (1 << uVector));
     
    508520 * @returns true if virtual-interrupts are masked, @c false otherwise.
    509521 * @param   pVCpu   The cross context virtual CPU structure of the calling EMT.
    510  * @param   pCtx    Pointer to the context.
    511  */
    512 VMM_INT_DECL(bool) HMIsGuestSvmVirtIntrMasking(PVMCPU pVCpu, PCCPUMCTX pCtx)
    513 {
    514     Assert(pCtx->hwvirt.svm.fHMCachedVmcb); NOREF(pCtx);
     522 */
     523VMM_INT_DECL(bool) HMIsGuestSvmVirtIntrMasking(PVMCPU pVCpu)
     524{
     525    Assert(HMHasGuestSvmVmcbCached(pVCpu));
    515526    PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
    516527    return pVmcbNstGstCache->fVIntrMasking;
     
    523534 * @returns true if nested-paging is enabled, @c false otherwise.
    524535 * @param   pVCpu   The cross context virtual CPU structure of the calling EMT.
    525  * @param   pCtx    Pointer to the context.
    526  */
    527 VMM_INT_DECL(bool) HMIsGuestSvmNestedPagingEnabled(PVMCPU pVCpu, PCCPUMCTX pCtx)
    528 {
    529     Assert(pCtx->hwvirt.svm.fHMCachedVmcb); NOREF(pCtx);
     536 */
     537VMM_INT_DECL(bool) HMIsGuestSvmNestedPagingEnabled(PVMCPU pVCpu)
     538{
     539    Assert(HMHasGuestSvmVmcbCached(pVCpu));
    530540    PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
    531541    return pVmcbNstGstCache->fNestedPaging;
     
    538548 * @returns The pause-filter count.
    539549 * @param   pVCpu   The cross context virtual CPU structure of the calling EMT.
    540  * @param   pCtx    Pointer to the context.
    541  */
    542 VMM_INT_DECL(uint16_t) HMGetGuestSvmPauseFilterCount(PVMCPU pVCpu, PCCPUMCTX pCtx)
    543 {
    544     Assert(pCtx->hwvirt.svm.fHMCachedVmcb); NOREF(pCtx);
     550 */
     551VMM_INT_DECL(uint16_t) HMGetGuestSvmPauseFilterCount(PVMCPU pVCpu)
     552{
     553    Assert(HMHasGuestSvmVmcbCached(pVCpu));
    545554    PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
    546555    return pVmcbNstGstCache->u16PauseFilterCount;
  • trunk/src/VBox/VMM/VMMAll/IEMAll.cpp

    r72642 r72643  
    1516615166VMM_INT_DECL(VBOXSTRICTRC) IEMExecSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2)
    1516715167{
    15168     IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
     15168    IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
    1516915169    VBOXSTRICTRC rcStrict = iemSvmVmexit(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    1517015170    if (pVCpu->iem.s.cActiveMappings)
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImplSvmInstr.cpp.h

    r72518 r72643  
    143143        || uExitCode == SVM_EXIT_INVALID)
    144144    {
    145         LogFlow(("iemSvmVmexit: CS:RIP=%04x:%08RX64 uExitCode=%#RX64 uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", pVCpu->cpum.GstCtx.cs.Sel,
    146                  pVCpu->cpum.GstCtx.rip, uExitCode, uExitInfo1, uExitInfo2));
     145        LogFlow(("iemSvmVmexit: CS:RIP=%04x:%08RX64 uExitCode=%#RX64 uExitInfo1=%#RX64 uExitInfo2=%#RX64\n",
     146                 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uExitCode, uExitInfo1, uExitInfo2));
    147147
    148148        /*
     
    824824             *        below. */
    825825            LogFlow(("iemSvmVmrun: Injecting event: %04x:%08RX64 vec=%#x type=%d uErr=%u cr2=%#RX64 cr3=%#RX64 efer=%#RX64\n",
    826                      pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uVector, enmType, uErrorCode, pVCpu->cpum.GstCtx.cr2, pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.msrEFER));
     826                     pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uVector, enmType, uErrorCode, pVCpu->cpum.GstCtx.cr2,
     827                     pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.msrEFER));
    827828
    828829            /*
     
    840841        else
    841842            LogFlow(("iemSvmVmrun: Entering nested-guest: %04x:%08RX64 cr0=%#RX64 cr3=%#RX64 cr4=%#RX64 efer=%#RX64 efl=%#x\n",
    842                      pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER, pVCpu->cpum.GstCtx.rflags.u64));
     843                     pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr3,
     844                     pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER, pVCpu->cpum.GstCtx.rflags.u64));
    843845
    844846        LogFlow(("iemSvmVmrun: returns %d\n", VBOXSTRICTRC_VAL(rcStrict)));
     
    12641266    {
    12651267        LogFlow(("vmsave: Saving VMCB at %#RGp enmEffAddrMode=%d\n", GCPhysVmcb, pVCpu->iem.s.enmEffAddrMode));
     1268        IEM_CTX_IMPORT_RET(pVCpu,   CPUMCTX_EXTRN_FS | CPUMCTX_EXTRN_GS | CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_LDTR
     1269                                  | CPUMCTX_EXTRN_KERNEL_GS_BASE | CPUMCTX_EXTRN_SYSCALL_MSRS | CPUMCTX_EXTRN_SYSENTER_MSRS);
     1270
    12661271        HMSVM_SEG_REG_COPY_TO_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, FS, fs);
    12671272        HMSVM_SEG_REG_COPY_TO_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, GS, gs);
     
    14001405    if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmPauseFilter)
    14011406    {
     1407        IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_HWVIRT);
     1408
    14021409        /* TSC based pause-filter thresholding. */
    14031410        if (   IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmPauseFilterThreshold
  • trunk/src/VBox/VMM/VMMR0/HMR0.cpp

    r72208 r72643  
    9696    DECLR0CALLBACKMEMBER(int,  pfnInitVM, (PVM pVM));
    9797    DECLR0CALLBACKMEMBER(int,  pfnTermVM, (PVM pVM));
    98     DECLR0CALLBACKMEMBER(int,  pfnSetupVM ,(PVM pVM));
     98    DECLR0CALLBACKMEMBER(int,  pfnSetupVM, (PVM pVM));
    9999    /** @} */
    100100
     
    16441644
    16451645/**
     1646 * Invalidates a guest page from the host TLB.
     1647 *
     1648 * @param   pVCpu       The cross context virtual CPU structure.
     1649 * @param   GCVirt      Page to invalidate.
     1650 */
     1651VMMR0_INT_DECL(int) HMR0InvalidatePage(PVMCPU pVCpu, RTGCPTR GCVirt)
     1652{
     1653    PVM pVM = pVCpu->CTX_SUFF(pVM);
     1654    if (pVM->hm.s.vmx.fSupported)
     1655        return VMXR0InvalidatePage(pVM, pVCpu, GCVirt);
     1656    return SVMR0InvalidatePage(pVM, pVCpu, GCVirt);
     1657}
     1658
     1659
     1660/**
    16461661 * Returns the cpu structure for the current cpu.
    16471662 * Keep in mind that there is no guarantee it will stay the same (long jumps to ring 3!!!).
     
    16791694    return;
    16801695}
     1696
     1697
     1698/**
     1699 * Interface for importing state on demand (used by IEM).
     1700 *
     1701 * @returns VBox status code.
     1702 * @param   pVCpu       The cross context CPU structure.
     1703 * @param   pCtx        The target CPU context.
     1704 * @param   fWhat       What to import, CPUMCTX_EXTRN_XXX.
     1705 */
     1706VMMR0_INT_DECL(int) HMR0ImportStateOnDemand(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t fWhat)
     1707{
     1708    /** @todo Intel. */
     1709#if 0
     1710    if (pVCpu->CTX_SUFF(pVM).hm.s.vmx.fSupported)
     1711        return VMXR0ImportStateOnDemand(pVCpu, pCtx, fWhat);
     1712#endif
     1713    return SVMR0ImportStateOnDemand(pVCpu, pCtx, fWhat);
     1714}
     1715
    16811716
    16821717#ifdef VBOX_WITH_RAW_MODE
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r72639 r72643  
    8080#define HMSVM_EXIT_DECL                 static int
    8181
     82/**
     83 * Subset of the guest-CPU state that is kept by SVM R0 code while executing the
     84 * guest using hardware-assisted SVM.
     85 *
     86 * This excludes state like TSC AUX, GPRs (other than RSP, RAX) which are always
     87 * are swapped and restored across the world-switch and also registers like
     88 * EFER, PAT MSR etc. which cannot be modified by the guest without causing a
     89 * \#VMEXIT.
     90 */
     91#define HMSVM_CPUMCTX_EXTRN_ALL         (  CPUMCTX_EXTRN_RIP            \
     92                                         | CPUMCTX_EXTRN_RFLAGS         \
     93                                         | CPUMCTX_EXTRN_RAX            \
     94                                         | CPUMCTX_EXTRN_RSP            \
     95                                         | CPUMCTX_EXTRN_SREG_MASK      \
     96                                         | CPUMCTX_EXTRN_CR0            \
     97                                         | CPUMCTX_EXTRN_CR2            \
     98                                         | CPUMCTX_EXTRN_CR3            \
     99                                         | CPUMCTX_EXTRN_TABLE_MASK     \
     100                                         | CPUMCTX_EXTRN_DR6            \
     101                                         | CPUMCTX_EXTRN_DR7            \
     102                                         | CPUMCTX_EXTRN_KERNEL_GS_BASE \
     103                                         | CPUMCTX_EXTRN_SYSCALL_MSRS   \
     104                                         | CPUMCTX_EXTRN_SYSENTER_MSRS  \
     105                                         | CPUMCTX_EXTRN_HWVIRT         \
     106                                         | CPUMCTX_EXTRN_HM_SVM_MASK)
     107
     108/** Macro for importing guest state from the VMCB back into CPUMCTX.  */
     109#define HMSVM_CPUMCTX_IMPORT_STATE(a_pVCpu, a_pCtx, a_fWhat) \
     110    do { \
     111        hmR0SvmImportGuestState((a_pVCpu), (a_pCtx), (a_fWhat)); \
     112    } while (0)
     113
     114/** Assert that the required state bits are fetched. */
     115#define HMSVM_CPUMCTX_ASSERT(a_pVCpu, a_fExtrnMbz)          AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz)), \
     116                                                                      ("fExtrn=%#RX64 fExtrnMbz=%#RX64\n", \
     117                                                                      (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fExtrnMbz)))
     118
    82119/** Macro for checking and returning from the using function for
    83120 * \#VMEXIT intercepts that maybe caused during delivering of another
     
    92129        else if (     rc == VINF_EM_RESET \
    93130                 &&   CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_SHUTDOWN)) \
     131        { \
     132            HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK); \
    94133            return VBOXSTRICTRC_TODO(IEMExecSvmVmexit(pVCpu, SVM_EXIT_SHUTDOWN, 0, 0)); \
     134        } \
    95135        else \
    96136            return rc; \
     
    108148#endif
    109149
    110 /**
    111  * Updates interrupt shadow for the current RIP.
    112  */
     150/** Macro which updates interrupt shadow for the current RIP.  */
    113151#define HMSVM_UPDATE_INTR_SHADOW(pVCpu, pCtx) \
    114152    do { \
     
    154192/** Validate segment descriptor granularity bit. */
    155193#ifdef VBOX_STRICT
    156 # define HMSVM_ASSERT_SEG_GRANULARITY(reg) \
    157     AssertMsg(   !pMixedCtx->reg.Attr.n.u1Present \
    158               || (   pMixedCtx->reg.Attr.n.u1Granularity \
    159                   ? (pMixedCtx->reg.u32Limit & 0xfff) == 0xfff \
    160                   :  pMixedCtx->reg.u32Limit <= UINT32_C(0xfffff)), \
    161               ("Invalid Segment Attributes Limit=%#RX32 Attr=%#RX32 Base=%#RX64\n", pMixedCtx->reg.u32Limit, \
    162               pMixedCtx->reg.Attr.u, pMixedCtx->reg.u64Base))
     194# define HMSVM_ASSERT_SEG_GRANULARITY(a_pCtx, reg) \
     195    AssertMsg(   !(a_pCtx)->reg.Attr.n.u1Present \
     196              || (   (a_pCtx)->reg.Attr.n.u1Granularity \
     197                  ? ((a_pCtx)->reg.u32Limit & 0xfff) == 0xfff \
     198                  :  (a_pCtx)->reg.u32Limit <= UINT32_C(0xfffff)), \
     199              ("Invalid Segment Attributes Limit=%#RX32 Attr=%#RX32 Base=%#RX64\n", (a_pCtx)->reg.u32Limit, \
     200              (a_pCtx)->reg.Attr.u, (a_pCtx)->reg.u64Base))
    163201#else
    164 # define HMSVM_ASSERT_SEG_GRANULARITY(reg)              do { } while (0)
     202# define HMSVM_ASSERT_SEG_GRANULARITY(a_pCtx, reg)      do { } while (0)
    165203#endif
    166204
     
    320358 * @returns VBox status code.
    321359 * @param   pVCpu           The cross context virtual CPU structure.
    322  * @param   pMixedCtx       Pointer to the guest-CPU context.
     360 * @param   pCtx            Pointer to the guest-CPU context.
    323361 * @param   pSvmTransient   Pointer to the SVM-transient structure.
    324362 */
     
    329367*   Internal Functions                                                                                                           *
    330368*********************************************************************************************************************************/
    331 static void hmR0SvmSetMsrPermission(PCPUMCTX pCtx, uint8_t *pbMsrBitmap, unsigned uMsr, SVMMSREXITREAD enmRead,
     369static void hmR0SvmSetMsrPermission(PCCPUMCTX pCtx, uint8_t *pbMsrBitmap, unsigned uMsr, SVMMSREXITREAD enmRead,
    332370                                    SVMMSREXITWRITE enmWrite);
    333371static void hmR0SvmPendingEventToTrpmTrap(PVMCPU pVCpu);
    334 static void hmR0SvmLeave(PVMCPU pVCpu);
     372static void hmR0SvmLeave(PVMCPU pVCpu, bool fImportState);
     373static void hmR0SvmImportGuestState(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t fWhat);
     374
    335375
    336376/** @name \#VMEXIT handlers.
     
    385425/** @} */
    386426
    387 static int hmR0SvmHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PSVMTRANSIENT pSvmTransient);
     427static int hmR0SvmHandleExit(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
    388428#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
    389429static int hmR0SvmHandleExitNested(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient);
     
    402442
    403443#ifdef VBOX_STRICT
    404 # define HMSVM_LOG_CS           RT_BIT_32(0)
    405 # define HMSVM_LOG_SS           RT_BIT_32(1)
    406 # define HMSVM_LOG_FS           RT_BIT_32(2)
    407 # define HMSVM_LOG_GS           RT_BIT_32(3)
    408 # define HMSVM_LOG_LBR          RT_BIT_32(4)
    409 # define HMSVM_LOG_ALL          (  HMSVM_LOG_CS \
     444# define HMSVM_LOG_RBP_RSP      RT_BIT_32(0)
     445# define HMSVM_LOG_CR_REGS      RT_BIT_32(1)
     446# define HMSVM_LOG_CS           RT_BIT_32(2)
     447# define HMSVM_LOG_SS           RT_BIT_32(3)
     448# define HMSVM_LOG_FS           RT_BIT_32(4)
     449# define HMSVM_LOG_GS           RT_BIT_32(5)
     450# define HMSVM_LOG_LBR          RT_BIT_32(6)
     451# define HMSVM_LOG_ALL          (  HMSVM_LOG_RBP_RSP \
     452                                 | HMSVM_LOG_CR_REGS \
     453                                 | HMSVM_LOG_CS \
    410454                                 | HMSVM_LOG_SS \
    411455                                 | HMSVM_LOG_FS \
     
    421465 * @param   pszPrefix   Log prefix.
    422466 * @param   fFlags      Log flags, see HMSVM_LOG_XXX.
    423  * @param   uVerbose    The verbosity level, currently unused.
    424  */
    425 static void hmR0SvmLogState(PVMCPU pVCpu, PCSVMVMCB pVmcb, PCPUMCTX pCtx, const char *pszPrefix, uint32_t fFlags,
     467 * @param   uVerboses    The verbosity level, currently unused.
     468 */
     469static void hmR0SvmLogState(PVMCPU pVCpu, PCSVMVMCB pVmcb, PCCPUMCTX pCtx, const char *pszPrefix, uint32_t fFlags,
    426470                             uint8_t uVerbose)
    427471{
    428472    RT_NOREF2(pVCpu, uVerbose);
    429473
    430     Log4(("%s: cs:rip=%04x:%RX64 efl=%#RX64 cr0=%#RX64 cr3=%#RX64 cr4=%#RX64\n", pszPrefix, pCtx->cs.Sel, pCtx->rip,
    431           pCtx->rflags.u, pCtx->cr0, pCtx->cr3, pCtx->cr4));
    432     Log4(("%s: rsp=%#RX64 rbp=%#RX64 rdi=%#RX64\n", pszPrefix, pCtx->rsp, pCtx->rbp, pCtx->rdi));
     474    HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
     475    Log4(("%s: cs:rip=%04x:%RX64 efl=%#RX64\n", pszPrefix, pCtx->cs.Sel, pCtx->rip, pCtx->rflags.u));
     476
     477    if (fFlags & HMSVM_LOG_RBP_RSP)
     478    {
     479        HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_RBP);
     480        Log4(("%s: rsp=%#RX64 rbp=%#RX64\n", pszPrefix, pCtx->rsp, pCtx->rbp));
     481    }
     482
     483    if (fFlags & HMSVM_LOG_CR_REGS)
     484    {
     485        HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4);
     486        Log4(("%s: cr0=%#RX64 cr3=%#RX64 cr4=%#RX64\n", pszPrefix, pCtx->cr0, pCtx->cr3, pCtx->cr4));
     487    }
     488
    433489    if (fFlags & HMSVM_LOG_CS)
    434490    {
     491        HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS);
    435492        Log4(("%s: cs={%04x base=%016RX64 limit=%08x flags=%08x}\n", pszPrefix, pCtx->cs.Sel, pCtx->cs.u64Base,
    436493              pCtx->cs.u32Limit, pCtx->cs.Attr.u));
     
    438495    if (fFlags & HMSVM_LOG_SS)
    439496    {
     497        HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SS);
    440498        Log4(("%s: ss={%04x base=%016RX64 limit=%08x flags=%08x}\n", pszPrefix, pCtx->ss.Sel, pCtx->ss.u64Base,
    441499              pCtx->ss.u32Limit, pCtx->ss.Attr.u));
     
    443501    if (fFlags & HMSVM_LOG_FS)
    444502    {
     503        HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_FS);
    445504        Log4(("%s: fs={%04x base=%016RX64 limit=%08x flags=%08x}\n", pszPrefix, pCtx->fs.Sel, pCtx->fs.u64Base,
    446505              pCtx->fs.u32Limit, pCtx->fs.Attr.u));
     
    448507    if (fFlags & HMSVM_LOG_GS)
    449508    {
     509        HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GS);
    450510        Log4(("%s: gs={%04x base=%016RX64 limit=%08x flags=%08x}\n", pszPrefix, pCtx->gs.Sel, pCtx->gs.u64Base,
    451511              pCtx->gs.u32Limit, pCtx->gs.Attr.u));
     
    753813 * @param   pCtx        Pointer to the guest-CPU context.
    754814 */
    755 DECLINLINE(bool) hmR0SvmSupportsVmcbCleanBits(PVMCPU pVCpu, PCPUMCTX pCtx)
     815DECLINLINE(bool) hmR0SvmSupportsVmcbCleanBits(PVMCPU pVCpu, PCCPUMCTX pCtx)
    756816{
    757817    PVM pVM = pVCpu->CTX_SUFF(pVM);
     
    827887 *          caller needs to take care of this.
    828888 */
    829 static void hmR0SvmSetMsrPermission(PCPUMCTX pCtx, uint8_t *pbMsrBitmap, uint32_t idMsr, SVMMSREXITREAD enmRead,
     889static void hmR0SvmSetMsrPermission(PCCPUMCTX pCtx, uint8_t *pbMsrBitmap, uint32_t idMsr, SVMMSREXITREAD enmRead,
    830890                                    SVMMSREXITWRITE enmWrite)
    831891{
     
    10931153 * @returns Pointer to the nested-guest VMCB cache.
    10941154 * @param   pVCpu           The cross context virtual CPU structure.
    1095  * @param   pCtx            Pointer to the guest-CPU context.
    1096  */
    1097 DECLINLINE(PSVMNESTEDVMCBCACHE) hmR0SvmGetNestedVmcbCache(PVMCPU pVCpu, PCPUMCTX pCtx)
     1155 */
     1156DECLINLINE(PSVMNESTEDVMCBCACHE) hmR0SvmGetNestedVmcbCache(PVMCPU pVCpu)
    10981157{
    10991158#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
    1100     Assert(pCtx->hwvirt.svm.fHMCachedVmcb); RT_NOREF(pCtx);
     1159    Assert(pVCpu->hm.s.svm.NstGstVmcbCache.fCacheValid);
    11011160    return &pVCpu->hm.s.svm.NstGstVmcbCache;
    11021161#else
    1103     RT_NOREF2(pVCpu, pCtx);
     1162    RT_NOREF(pVCpu);
    11041163    return NULL;
    11051164#endif
     
    11541213 * @param   pHostCpu    Pointer to the HM host-CPU info.
    11551214 */
    1156 static void hmR0SvmFlushTaggedTlb(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMVMCB pVmcb, PHMGLOBALCPUINFO pHostCpu)
     1215static void hmR0SvmFlushTaggedTlb(PVMCPU pVCpu, PCCPUMCTX pCtx, PSVMVMCB pVmcb, PHMGLOBALCPUINFO pHostCpu)
    11571216{
    11581217#ifndef VBOX_WITH_NESTED_HWVIRT_SVM
     
    13941453 *          are not intercepting it.
    13951454 */
    1396 DECLINLINE(void) hmR0SvmClearXcptIntercept(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMVMCB pVmcb, uint8_t uXcpt)
     1455DECLINLINE(void) hmR0SvmClearXcptIntercept(PVMCPU pVCpu, PCCPUMCTX pCtx, PSVMVMCB pVmcb, uint8_t uXcpt)
    13971456{
    13981457    Assert(uXcpt != X86_XCPT_DB);
     
    14061465        if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
    14071466        {
    1408             PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = hmR0SvmGetNestedVmcbCache(pVCpu, pCtx);
     1467            PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = hmR0SvmGetNestedVmcbCache(pVCpu);
    14091468            fRemove = !(pVmcbNstGstCache->u32InterceptXcpt & RT_BIT(uXcpt));
    14101469        }
     
    14531512 *          are not intercepting it.
    14541513 */
    1455 DECLINLINE(bool) hmR0SvmClearCtrlIntercept(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMVMCB pVmcb, uint64_t fCtrlIntercept)
     1514DECLINLINE(bool) hmR0SvmClearCtrlIntercept(PVMCPU pVCpu, PCCPUMCTX pCtx, PSVMVMCB pVmcb, uint64_t fCtrlIntercept)
    14561515{
    14571516    if (pVmcb->ctrl.u64InterceptCtrl & fCtrlIntercept)
     
    14621521        if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
    14631522        {
    1464             PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = hmR0SvmGetNestedVmcbCache(pVCpu, pCtx);
     1523            PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = hmR0SvmGetNestedVmcbCache(pVCpu);
    14651524            fRemove = !(pVmcbNstGstCache->u64InterceptCtrl & fCtrlIntercept);
    14661525        }
     
    14931552 * @remarks No-long-jump zone!!!
    14941553 */
    1495 static void hmR0SvmLoadSharedCR0(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)
     1554static void hmR0SvmLoadSharedCR0(PVMCPU pVCpu, PSVMVMCB pVmcb, PCCPUMCTX pCtx)
    14961555{
    14971556    /* The guest FPU is now always pre-loaded before executing guest code, see @bugref{7243#c101}. */
     
    15421601        {
    15431602            /* If the nested-hypervisor intercepts CR0 reads/writes, we need to continue intercepting them. */
    1544             PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = hmR0SvmGetNestedVmcbCache(pVCpu, pCtx);
     1603            PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = hmR0SvmGetNestedVmcbCache(pVCpu);
    15451604            pVmcb->ctrl.u16InterceptRdCRx = (pVmcb->ctrl.u16InterceptRdCRx       & ~RT_BIT(0))
    15461605                                          | (pVmcbNstGstCache->u16InterceptRdCRx &  RT_BIT(0));
     
    15751634 * @remarks No-long-jump zone!!!
    15761635 */
    1577 static int hmR0SvmLoadGuestControlRegs(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)
     1636static int hmR0SvmLoadGuestControlRegs(PVMCPU pVCpu, PSVMVMCB pVmcb, PCCPUMCTX pCtx)
    15781637{
    15791638    PVM pVM = pVCpu->CTX_SUFF(pVM);
     
    16711730            {
    16721731                /* If the nested-hypervisor intercepts CR4 reads, we need to continue intercepting them. */
    1673                 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = hmR0SvmGetNestedVmcbCache(pVCpu, pCtx);
     1732                PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = hmR0SvmGetNestedVmcbCache(pVCpu);
    16741733                pVmcb->ctrl.u16InterceptRdCRx = (pVmcb->ctrl.u16InterceptRdCRx       & ~RT_BIT(4))
    16751734                                              | (pVmcbNstGstCache->u16InterceptRdCRx &  RT_BIT(4));
     
    17041763 * @remarks No-long-jump zone!!!
    17051764 */
    1706 static void hmR0SvmLoadGuestSegmentRegs(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)
     1765static void hmR0SvmLoadGuestSegmentRegs(PVMCPU pVCpu, PSVMVMCB pVmcb, PCCPUMCTX pCtx)
    17071766{
    17081767    /* Guest Segment registers: CS, SS, DS, ES, FS, GS. */
     
    17641823 * @remarks No-long-jump zone!!!
    17651824 */
    1766 static void hmR0SvmLoadGuestMsrs(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)
     1825static void hmR0SvmLoadGuestMsrs(PVMCPU pVCpu, PSVMVMCB pVmcb, PCCPUMCTX pCtx)
    17671826{
    17681827    /* Guest Sysenter MSRs. */
    1769     pVmcb->guest.u64SysEnterCS  = pCtx->SysEnter.cs;
    1770     pVmcb->guest.u64SysEnterEIP = pCtx->SysEnter.eip;
    1771     pVmcb->guest.u64SysEnterESP = pCtx->SysEnter.esp;
     1828    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR))
     1829    {
     1830        pVmcb->guest.u64SysEnterCS  = pCtx->SysEnter.cs;
     1831        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR);
     1832    }
     1833    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR))
     1834    {
     1835        pVmcb->guest.u64SysEnterEIP = pCtx->SysEnter.eip;
     1836        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR);
     1837    }
     1838    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR))
     1839    {
     1840        pVmcb->guest.u64SysEnterESP = pCtx->SysEnter.esp;
     1841        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR);
     1842    }
    17721843
    17731844    /*
     
    17871858    {
    17881859        /* Load these always as the guest may modify FS/GS base using MSRs in 64-bit mode which we don't intercept. */
    1789         pVmcb->guest.FS.u64Base = pCtx->fs.u64Base;
    1790         pVmcb->guest.GS.u64Base = pCtx->gs.u64Base;
    1791         pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_SEG;
     1860        //pVmcb->guest.FS.u64Base = pCtx->fs.u64Base;
     1861        //pVmcb->guest.GS.u64Base = pCtx->gs.u64Base;
     1862        //pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_SEG;
    17921863    }
    17931864    else
     
    18011872    }
    18021873
    1803     /** @todo The following are used in 64-bit only (SYSCALL/SYSRET) but they might
    1804      *        be writable in 32-bit mode. Clarify with AMD spec. */
     1874    /** @todo HM_CHANGED_GUEST_SYSCALL_MSRS,
     1875     *        HM_CHANGED_GUEST_KERNEL_GS_BASE */
    18051876    pVmcb->guest.u64STAR         = pCtx->msrSTAR;
    18061877    pVmcb->guest.u64LSTAR        = pCtx->msrLSTAR;
     
    18371908 * @remarks Requires EFLAGS to be up-to-date in the VMCB!
    18381909 */
    1839 static void hmR0SvmLoadSharedDebugState(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)
     1910static void hmR0SvmLoadSharedDebugState(PVMCPU pVCpu, PSVMVMCB pVmcb, PCCPUMCTX pCtx)
    18401911{
    18411912    bool fInterceptMovDRx = false;
     
    20052076 * @param   pCtx          Pointer to the guest-CPU or nested-guest-CPU context.
    20062077 */
    2007 static void hmR0SvmLoadGuestHwvirtStateNested(PVMCPU pVCpu, PSVMVMCB pVmcbNstGst, PCPUMCTX pCtx)
     2078static void hmR0SvmLoadGuestHwvirtStateNested(PVMCPU pVCpu, PSVMVMCB pVmcbNstGst, PCCPUMCTX pCtx)
    20082079{
    20092080    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_HWVIRT))
     
    20212092        uint16_t const uGuestPauseFilterCount     = pVM->hm.s.svm.cPauseFilter;
    20222093        uint16_t const uGuestPauseFilterThreshold = pVM->hm.s.svm.cPauseFilterThresholdTicks;
    2023         if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_PAUSE))
     2094        if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_PAUSE))
    20242095        {
    20252096            pVmcbNstGstCtrl->u16PauseFilterCount     = RT_MIN(pCtx->hwvirt.svm.cPauseFilter, uGuestPauseFilterCount);
     
    20462117 * @param   pCtx        Pointer to the guest-CPU context.
    20472118 */
    2048 static int hmR0SvmLoadGuestApicState(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)
     2119static int hmR0SvmLoadGuestApicState(PVMCPU pVCpu, PSVMVMCB pVmcb, PCCPUMCTX pCtx)
    20492120{
    20502121    if (!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_APIC_STATE))
     
    20662137        pVCpu->hm.s.svm.fSyncVTpr = false;
    20672138
    2068         /* 32-bit guests uses LSTAR MSR for patching guest code which touches the TPR. */
    2069         if (pVM->hm.s.fTPRPatchingActive)
    2070         {
    2071             pCtx->msrLSTAR = u8Tpr;
     2139        if (!pVM->hm.s.fTPRPatchingActive)
     2140        {
     2141            /* Bits 3-0 of the VTPR field correspond to bits 7-4 of the TPR (which is the Task-Priority Class). */
     2142            pVmcb->ctrl.IntCtrl.n.u8VTPR = (u8Tpr >> 4);
     2143
     2144            /* If there are interrupts pending, intercept CR8 writes to evaluate ASAP if we can deliver the interrupt to the guest. */
     2145            if (fPendingIntr)
     2146                pVmcb->ctrl.u16InterceptWrCRx |= RT_BIT(8);
     2147            else
     2148            {
     2149                pVmcb->ctrl.u16InterceptWrCRx &= ~RT_BIT(8);
     2150                pVCpu->hm.s.svm.fSyncVTpr = true;
     2151            }
     2152
     2153            pVmcb->ctrl.u32VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_INT_CTRL);
     2154        }
     2155        else
     2156        {
     2157            /* 32-bit guests uses LSTAR MSR for patching guest code which touches the TPR. */
     2158            pVmcb->guest.u64LSTAR = u8Tpr;
    20722159            uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.svm.pvMsrBitmap;
    20732160
     
    20822169            pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_IOPM_MSRPM;
    20832170        }
    2084         else
    2085         {
    2086             /* Bits 3-0 of the VTPR field correspond to bits 7-4 of the TPR (which is the Task-Priority Class). */
    2087             pVmcb->ctrl.IntCtrl.n.u8VTPR = (u8Tpr >> 4);
    2088 
    2089             /* If there are interrupts pending, intercept CR8 writes to evaluate ASAP if we can deliver the interrupt to the guest. */
    2090             if (fPendingIntr)
    2091                 pVmcb->ctrl.u16InterceptWrCRx |= RT_BIT(8);
    2092             else
    2093             {
    2094                 pVmcb->ctrl.u16InterceptWrCRx &= ~RT_BIT(8);
    2095                 pVCpu->hm.s.svm.fSyncVTpr = true;
    2096             }
    2097 
    2098             pVmcb->ctrl.u32VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_INT_CTRL);
    2099         }
    21002171    }
    21012172
     
    21132184 * @param   pCtx        Pointer to the guest-CPU context.
    21142185 */
    2115 static void hmR0SvmLoadGuestXcptIntercepts(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)
     2186static void hmR0SvmLoadGuestXcptIntercepts(PVMCPU pVCpu, PSVMVMCB pVmcb, PCCPUMCTX pCtx)
    21162187{
    21172188    /* If we modify intercepts from here, please check & adjust hmR0SvmLoadGuestXcptInterceptsNested()
     
    21502221 * @param   pCtx            Pointer to the nested-guest-CPU context.
    21512222 */
    2152 static void hmR0SvmMergeVmcbCtrlsNested(PVMCPU pVCpu, PCPUMCTX pCtx)
     2223static void hmR0SvmMergeVmcbCtrlsNested(PVMCPU pVCpu, PCCPUMCTX pCtx)
    21532224{
    21542225    PVM          pVM             = pVCpu->CTX_SUFF(pVM);
     
    23002371            if (!pVCpu->hm.s.fLeaveDone)
    23012372            {
    2302                 hmR0SvmLeave(pVCpu);
     2373                hmR0SvmLeave(pVCpu, false /* fImportState */);
    23032374                pVCpu->hm.s.fLeaveDone = true;
    23042375            }
     
    23772448 * @remarks No-long-jump zone!!!
    23782449 */
    2379 static int hmR0SvmLoadGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
     2450static int hmR0SvmLoadGuestState(PVM pVM, PVMCPU pVCpu, PCCPUMCTX pCtx)
    23802451{
    23812452    HMSVM_ASSERT_NOT_IN_NESTED_GUEST(pCtx);
     
    24052476#endif
    24062477
     2478    /* hmR0SvmLoadGuestApicState() must be called -after- hmR0SvmLoadGuestMsrs() as we
     2479       may overwrite LSTAR MSR in the VMCB in the case of TPR patching. */
    24072480    rc = hmR0SvmLoadGuestApicState(pVCpu, pVmcb, pCtx);
    24082481    AssertLogRelMsgRCReturn(rc, ("hmR0SvmLoadGuestApicState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
     
    24512524 * @param   pCtx            Pointer to the nested-guest-CPU context.
    24522525 */
    2453 static void hmR0SvmMergeMsrpmNested(PHMGLOBALCPUINFO pHostCpu, PVMCPU pVCpu, PCPUMCTX pCtx)
     2526static void hmR0SvmMergeMsrpmNested(PHMGLOBALCPUINFO pHostCpu, PVMCPU pVCpu, PCCPUMCTX pCtx)
    24542527{
    24552528    uint64_t const *pu64GstMsrpm    = (uint64_t const *)pVCpu->hm.s.svm.pvMsrBitmap;
     
    24732546 * @sa      HMSvmNstGstVmExitNotify.
    24742547 */
    2475 static bool hmR0SvmCacheVmcbNested(PVMCPU pVCpu, PCPUMCTX pCtx)
     2548static bool hmR0SvmCacheVmcbNested(PVMCPU pVCpu, PCCPUMCTX pCtx)
    24762549{
    24772550    /*
     
    24822555     * cache and restore it, see AMD spec. 15.25.4 "Nested Paging and VMRUN/#VMEXIT".
    24832556     */
    2484     bool const fWasCached = pCtx->hwvirt.svm.fHMCachedVmcb;
     2557    PSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
     2558    bool const fWasCached = pVmcbNstGstCache->fCacheValid;
    24852559    if (!fWasCached)
    24862560    {
    2487         PSVMVMCB            pVmcbNstGst      = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
    2488         PCSVMVMCBCTRL       pVmcbNstGstCtrl  = &pVmcbNstGst->ctrl;
    2489         PSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
    2490 
     2561        PCSVMVMCB      pVmcbNstGst    = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
     2562        PCSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl;
    24912563        pVmcbNstGstCache->u16InterceptRdCRx       = pVmcbNstGstCtrl->u16InterceptRdCRx;
    24922564        pVmcbNstGstCache->u16InterceptWrCRx       = pVmcbNstGstCtrl->u16InterceptWrCRx;
     
    25012573        pVmcbNstGstCache->fNestedPaging           = pVmcbNstGstCtrl->NestedPagingCtrl.n.u1NestedPaging;
    25022574        pVmcbNstGstCache->fLbrVirt                = pVmcbNstGstCtrl->LbrVirt.n.u1LbrVirt;
    2503         pCtx->hwvirt.svm.fHMCachedVmcb            = true;
     2575        pVmcbNstGstCache->fCacheValid             = true;
    25042576        Log4(("hmR0SvmCacheVmcbNested: Cached VMCB fields\n"));
    25052577    }
     
    25192591 * @param   pCtx            Pointer to the nested-guest-CPU context.
    25202592 */
    2521 static void hmR0SvmSetupVmcbNested(PVMCPU pVCpu, PCPUMCTX pCtx)
     2593static void hmR0SvmSetupVmcbNested(PVMCPU pVCpu, PCCPUMCTX pCtx)
    25222594{
    25232595    PSVMVMCB     pVmcbNstGst     = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
     
    25912663 * @remarks No-long-jump zone!!!
    25922664 */
    2593 static int hmR0SvmLoadGuestStateNested(PVMCPU pVCpu, PCPUMCTX pCtx)
     2665static int hmR0SvmLoadGuestStateNested(PVMCPU pVCpu, PCCPUMCTX pCtx)
    25942666{
    25952667    STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestState, x);
     
    26642736 * @remarks No-long-jump zone!!!
    26652737 */
    2666 static void hmR0SvmLoadSharedState(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)
     2738static void hmR0SvmLoadSharedState(PVMCPU pVCpu, PSVMVMCB pVmcb, PCCPUMCTX pCtx)
    26672739{
    26682740    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     
    26992771
    27002772/**
     2773 * Worker for SVMR0ImportStateOnDemand.
     2774 *
     2775 * @param   pVCpu   The cross context virtual CPU structure.
     2776 * @param   pCtx    Pointer to the guest-CPU or nested-guest-CPU context.
     2777 * @param   fWhat   What to import, CPUMCTX_EXTRN_XXX.
     2778 */
     2779static void hmR0SvmImportGuestState(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t fWhat)
     2780{
     2781    PCSVMVMCB          pVmcb      = hmR0SvmGetCurrentVmcb(pVCpu, pCtx);
     2782    PCSVMVMCBSTATESAVE pVmcbGuest = &pVmcb->guest;
     2783    PCSVMVMCBCTRL      pVmcbCtrl  = &pVmcb->ctrl;
     2784
     2785    Log4(("hmR0SvmImportGuestState: fExtrn=%#RX64 fWhat=%#RX64\n", pCtx->fExtrn, fWhat));
     2786    if (pCtx->fExtrn & HMSVM_CPUMCTX_EXTRN_ALL)
     2787    {
     2788        fWhat &= pCtx->fExtrn;
     2789
     2790#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
     2791        if (fWhat & CPUMCTX_EXTRN_HWVIRT)
     2792        {
     2793            if (   !CPUMIsGuestInSvmNestedHwVirtMode(pCtx)
     2794                && pVmcbCtrl->IntCtrl.n.u1VGifEnable)
     2795            {
     2796                /* We don't yet support passing VGIF feature to the guest. */
     2797                Assert(pVCpu->CTX_SUFF(pVM)->hm.s.svm.fVGif);
     2798                pCtx->hwvirt.fGif = pVmcbCtrl->IntCtrl.n.u1VGif;
     2799            }
     2800            ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_HWVIRT);
     2801        }
     2802
     2803        if (fWhat & CPUMCTX_EXTRN_HM_SVM_HWVIRT_VIRQ)
     2804        {
     2805            if (  !pVmcbCtrl->IntCtrl.n.u1VIrqPending
     2806                && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST))
     2807                VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
     2808            ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_HM_SVM_HWVIRT_VIRQ);
     2809        }
     2810#else
     2811        ASMAtomicUoAndU64(&pCtx->fExtrn, ~(CPUMCTX_EXTRN_HWVIRT | CPUMCTX_EXTRN_HM_SVM_HWVIRT_VIRQ));
     2812#endif
     2813
     2814        if (fWhat & CPUMCTX_EXTRN_HM_SVM_INT_SHADOW)
     2815        {
     2816            if (pVmcbCtrl->IntShadow.n.u1IntShadow)
     2817                EMSetInhibitInterruptsPC(pVCpu, pVmcbGuest->u64RIP);
     2818            else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
     2819                VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
     2820            ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_HM_SVM_INT_SHADOW);
     2821        }
     2822
     2823        if (fWhat & CPUMCTX_EXTRN_RIP)
     2824        {
     2825            pCtx->rip = pVmcbGuest->u64RIP;
     2826            ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_RIP);
     2827        }
     2828
     2829        if (fWhat & CPUMCTX_EXTRN_RFLAGS)
     2830        {
     2831            pCtx->eflags.u32 = pVmcbGuest->u64RFlags;
     2832            ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_RFLAGS);
     2833        }
     2834
     2835        if (fWhat & CPUMCTX_EXTRN_RSP)
     2836        {
     2837            pCtx->rsp = pVmcbGuest->u64RSP;
     2838            ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_RSP);
     2839        }
     2840
     2841        if (fWhat & CPUMCTX_EXTRN_RAX)
     2842        {
     2843            pCtx->rax = pVmcbGuest->u64RAX;
     2844            ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_RAX);
     2845        }
     2846
     2847        if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
     2848        {
     2849            if (fWhat & CPUMCTX_EXTRN_CS)
     2850            {
     2851                HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbGuest, CS, cs);
     2852                /*
     2853                 * Correct the hidden CS granularity bit. Haven't seen it being wrong in any other
     2854                 * register (yet).
     2855                 */
     2856                /** @todo SELM might need to be fixed as it too should not care about the
     2857                 *        granularity bit. See @bugref{6785}. */
     2858                if (   !pCtx->cs.Attr.n.u1Granularity
     2859                    &&  pCtx->cs.Attr.n.u1Present
     2860                    &&  pCtx->cs.u32Limit > UINT32_C(0xfffff))
     2861                {
     2862                    Assert((pCtx->cs.u32Limit & 0xfff) == 0xfff);
     2863                    pCtx->cs.Attr.n.u1Granularity = 1;
     2864                }
     2865                HMSVM_ASSERT_SEG_GRANULARITY(pCtx, cs);
     2866                ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_CS);
     2867            }
     2868            if (fWhat & CPUMCTX_EXTRN_SS)
     2869            {
     2870                HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbGuest, SS, ss);
     2871                HMSVM_ASSERT_SEG_GRANULARITY(pCtx, ss);
     2872                /*
     2873                 * Sync the hidden SS DPL field. AMD CPUs have a separate CPL field in the VMCB and uses that
     2874                 * and thus it's possible that when the CPL changes during guest execution that the SS DPL
     2875                 * isn't updated by AMD-V. Observed on some AMD Fusion CPUs with 64-bit guests.
     2876                 * See AMD spec. 15.5.1 "Basic operation".
     2877                 */
     2878                Assert(!(pVmcbGuest->u8CPL & ~0x3));
     2879                uint8_t const uCpl = pVmcbGuest->u8CPL;
     2880                if (pCtx->ss.Attr.n.u2Dpl != uCpl)
     2881                    pCtx->ss.Attr.n.u2Dpl = uCpl & 0x3;
     2882                ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_SS);
     2883            }
     2884            if (fWhat & CPUMCTX_EXTRN_DS)
     2885            {
     2886                HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbGuest, DS, ds);
     2887                HMSVM_ASSERT_SEG_GRANULARITY(pCtx, ds);
     2888                ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_DS);
     2889            }
     2890            if (fWhat & CPUMCTX_EXTRN_ES)
     2891            {
     2892                HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbGuest, ES, es);
     2893                HMSVM_ASSERT_SEG_GRANULARITY(pCtx, es);
     2894                ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_ES);
     2895            }
     2896            if (fWhat & CPUMCTX_EXTRN_FS)
     2897            {
     2898                HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbGuest, FS, fs);
     2899                HMSVM_ASSERT_SEG_GRANULARITY(pCtx, fs);
     2900                ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_FS);
     2901            }
     2902            if (fWhat & CPUMCTX_EXTRN_GS)
     2903            {
     2904                HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbGuest, GS, gs);
     2905                HMSVM_ASSERT_SEG_GRANULARITY(pCtx, gs);
     2906                ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_GS);
     2907            }
     2908        }
     2909
     2910        if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
     2911        {
     2912            if (fWhat & CPUMCTX_EXTRN_TR)
     2913            {
     2914                /*
     2915                 * Fixup TR attributes so it's compatible with Intel. Important when saved-states
     2916                 * are used between Intel and AMD, see @bugref{6208#c39}.
     2917                 * ASSUME that it's normally correct and that we're in 32-bit or 64-bit mode.
     2918                 */
     2919                HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbGuest, TR, tr);
     2920                if (pCtx->tr.Attr.n.u4Type != X86_SEL_TYPE_SYS_386_TSS_BUSY)
     2921                {
     2922                    if (   pCtx->tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL
     2923                        || CPUMIsGuestInLongModeEx(pCtx))
     2924                        pCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
     2925                    else if (pCtx->tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL)
     2926                        pCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_286_TSS_BUSY;
     2927                }
     2928                ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_TR);
     2929            }
     2930
     2931            if (fWhat & CPUMCTX_EXTRN_LDTR)
     2932            {
     2933                HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbGuest, LDTR, ldtr);
     2934                ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_LDTR);
     2935            }
     2936
     2937            if (fWhat & CPUMCTX_EXTRN_GDTR)
     2938            {
     2939                pCtx->gdtr.cbGdt = pVmcbGuest->GDTR.u32Limit;
     2940                pCtx->gdtr.pGdt  = pVmcbGuest->GDTR.u64Base;
     2941                ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_GDTR);
     2942            }
     2943
     2944            if (fWhat & CPUMCTX_EXTRN_IDTR)
     2945            {
     2946                pCtx->idtr.cbIdt = pVmcbGuest->IDTR.u32Limit;
     2947                pCtx->idtr.pIdt  = pVmcbGuest->IDTR.u64Base;
     2948                ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_IDTR);
     2949            }
     2950        }
     2951
     2952        if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
     2953        {
     2954            pCtx->msrSTAR   = pVmcbGuest->u64STAR;
     2955            pCtx->msrLSTAR  = pVmcbGuest->u64LSTAR;
     2956            pCtx->msrCSTAR  = pVmcbGuest->u64CSTAR;
     2957            pCtx->msrSFMASK = pVmcbGuest->u64SFMASK;
     2958            ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_SYSCALL_MSRS);
     2959        }
     2960
     2961        if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
     2962        {
     2963            pCtx->SysEnter.cs  = pVmcbGuest->u64SysEnterCS;
     2964            pCtx->SysEnter.eip = pVmcbGuest->u64SysEnterEIP;
     2965            pCtx->SysEnter.esp = pVmcbGuest->u64SysEnterESP;
     2966            ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_SYSENTER_MSRS);
     2967        }
     2968
     2969        if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
     2970        {
     2971            pCtx->msrKERNELGSBASE = pVmcbGuest->u64KernelGSBase;
     2972            ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_KERNEL_GS_BASE);
     2973        }
     2974
     2975        if (fWhat & CPUMCTX_EXTRN_DR_MASK)
     2976        {
     2977            if (fWhat & CPUMCTX_EXTRN_DR6)
     2978            {
     2979                if (!pVCpu->hm.s.fUsingHyperDR7)
     2980                    pCtx->dr[6] = pVmcbGuest->u64DR6;
     2981                else
     2982                    CPUMSetHyperDR6(pVCpu, pVmcbGuest->u64DR6);
     2983                ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_DR6);
     2984            }
     2985
     2986            if (fWhat & CPUMCTX_EXTRN_DR7)
     2987            {
     2988                if (!pVCpu->hm.s.fUsingHyperDR7)
     2989                    pCtx->dr[7] = pVmcbGuest->u64DR7;
     2990                else
     2991                    Assert(pVmcbGuest->u64DR7 == CPUMGetHyperDR7(pVCpu));
     2992                ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_DR7);
     2993            }
     2994        }
     2995
     2996        if (fWhat & CPUMCTX_EXTRN_CR_MASK)
     2997        {
     2998            if (fWhat & CPUMCTX_EXTRN_CR0)
     2999            {
     3000                /* We intercept changes to all CR0 bits except maybe TS & MP bits. */
     3001                uint64_t const uCr0 = (pCtx->cr0          & ~(X86_CR0_TS | X86_CR0_MP))
     3002                                    | (pVmcbGuest->u64CR0 &  (X86_CR0_TS | X86_CR0_MP));
     3003                CPUMSetGuestCR0(pVCpu, uCr0);
     3004                ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_CR0);
     3005            }
     3006
     3007            if (fWhat & CPUMCTX_EXTRN_CR2)
     3008            {
     3009                pCtx->cr2 = pVmcbGuest->u64CR2;
     3010                ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_CR2);
     3011            }
     3012
     3013            if (fWhat & CPUMCTX_EXTRN_CR3)
     3014            {
     3015                if (   pVmcbCtrl->NestedPagingCtrl.n.u1NestedPaging
     3016                    && pCtx->cr3 != pVmcbGuest->u64CR3)
     3017                {
     3018                    CPUMSetGuestCR3(pVCpu, pVmcbGuest->u64CR3);
     3019                    if (VMMRZCallRing3IsEnabled(pVCpu))
     3020                    {
     3021                        Log4(("hmR0SvmImportGuestState: Calling PGMUpdateCR3\n"));
     3022                        PGMUpdateCR3(pVCpu, pVmcbGuest->u64CR3);
     3023                    }
     3024                    else
     3025                    {
     3026                        Log4(("hmR0SvmImportGuestState: Setting VMCPU_FF_HM_UPDATE_CR3\n"));
     3027                        VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
     3028                    }
     3029                }
     3030                ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_CR3);
     3031            }
     3032
     3033            /* Changes to CR4 are always intercepted. */
     3034        }
     3035
     3036        /* If everything has been imported, clear the HM keeper bit. */
     3037        if (!(pCtx->fExtrn & HMSVM_CPUMCTX_EXTRN_ALL))
     3038        {
     3039            ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_KEEPER_HM);
     3040            Assert(!pCtx->fExtrn);
     3041        }
     3042    }
     3043    else
     3044        Assert(!pCtx->fExtrn);
     3045
     3046    /*
     3047     * Honor any pending CR3 updates.
     3048     *
     3049     * Consider this scenario: #VMEXIT -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp
     3050     * -> hmR0SvmCallRing3Callback() -> VMMRZCallRing3Disable() -> hmR0SvmImportGuestState()
     3051     * -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp -> continue with #VMEXIT
     3052     * handling -> hmR0SvmImportGuestState() and here we are.
     3053     *
     3054     * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be
     3055     * up-to-date and thus any CR3-saves -before- the VM-exit (longjmp) would've postponed the CR3
     3056     * update via the force-flag and cleared CR3 from fExtrn. Any SVM R0 VM-exit handler that requests
     3057     * CR3 to be saved will end up here and we call PGMUpdateCR3().
     3058     *
     3059     * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again,
     3060     * and does not process force-flag like regular exits to ring-3 either, we cover for it here.
     3061     */
     3062    if (   VMMRZCallRing3IsEnabled(pVCpu)
     3063        && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
     3064    {
     3065        Assert(pCtx->cr3 == pVmcbGuest->u64CR3);
     3066        PGMUpdateCR3(pVCpu, pCtx->cr3);
     3067    }
     3068}
     3069
     3070
     3071/**
    27013072 * Saves the guest (or nested-guest) state from the VMCB into the guest-CPU
    27023073 * context.
     
    27063077 *
    27073078 * @returns VBox status code.
    2708  * @param   pVCpu           The cross context virtual CPU structure.
    2709  * @param   pMixedCtx       Pointer to the guest-CPU or nested-guest-CPU
    2710  *                          context. The data may be out-of-sync. Make sure to
    2711  *                          update the required fields before using them.
    2712  * @param   pVmcb           Pointer to the VM control block.
    2713  */
    2714 static void hmR0SvmSaveGuestState(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PCSVMVMCB pVmcb)
     3079 * @param   pVCpu   The cross context virtual CPU structure.
     3080 * @param   pCtx    Pointer to the guest-CPU or nested-guest-CPU context. The
     3081 *                  data may be out-of-sync. Make sure to update the required
     3082 *                  fields before using them.
     3083 * @param   fWhat   What to import, CPUMCTX_EXTRN_XXX.
     3084 */
     3085VMMR0DECL(int) SVMR0ImportStateOnDemand(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t fWhat)
     3086{
     3087    hmR0SvmImportGuestState(pVCpu, pCtx, fWhat);
     3088    return VINF_SUCCESS;
     3089}
     3090
     3091
     3092/**
     3093 * Saves the guest (or nested-guest) state from the VMCB into the guest-CPU
     3094 * context.
     3095 *
     3096 * Currently there is no residual state left in the CPU that is not updated in the
     3097 * VMCB.
     3098 *
     3099 * @returns VBox status code.
     3100 * @param   pVCpu   The cross context virtual CPU structure.
     3101 * @param   pCtx    Pointer to the guest-CPU or nested-guest-CPU context. The
     3102 *                  data may be out-of-sync. Make sure to update the required
     3103 *                  fields before using them.
     3104 * @param   pVmcb   Pointer to the VM control block.
     3105 */
     3106static void hmR0SvmSaveGuestState(PVMCPU pVCpu, PCPUMCTX pCtx, PCSVMVMCB pVmcb)
    27153107{
    27163108    Assert(VMMRZCallRing3IsEnabled(pVCpu));
    27173109
    2718     pMixedCtx->rip        = pVmcb->guest.u64RIP;
    2719     pMixedCtx->rsp        = pVmcb->guest.u64RSP;
    2720     pMixedCtx->eflags.u32 = pVmcb->guest.u64RFlags;
    2721     pMixedCtx->rax        = pVmcb->guest.u64RAX;
    2722 
    2723     PCSVMVMCBCTRL pVmcbCtrl = &pVmcb->ctrl;
    2724 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
    2725     if (!CPUMIsGuestInSvmNestedHwVirtMode(pMixedCtx))
    2726     {
    2727         if (pVmcbCtrl->IntCtrl.n.u1VGifEnable)
    2728         {
    2729             /*
    2730              * Guest Virtual GIF (Global Interrupt Flag).
    2731              * We don't yet support passing VGIF feature to the guest.
    2732              */
    2733             Assert(pVCpu->CTX_SUFF(pVM)->hm.s.svm.fVGif);
    2734             pMixedCtx->hwvirt.fGif = pVmcbCtrl->IntCtrl.n.u1VGif;
    2735         }
    2736     }
    2737     else
    2738     {
    2739         /*
    2740          * Nested-guest interrupt pending.
    2741          * Sync nested-guest's V_IRQ and its force-flag.
    2742          */
    2743         if (  !pVmcbCtrl->IntCtrl.n.u1VIrqPending
    2744             && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST))
    2745             VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
    2746     }
    2747 #endif
    2748 
    27493110    /*
    2750      * Guest interrupt shadow.
     3111     * Always import the following:
     3112     *
     3113     *   - RIP, RFLAGS, int. shadow, GIF: we need them when as we evaluate
     3114     *     injecting events before re-entering guest execution.
     3115     *
     3116     *   - GPRS: Only RAX, RSP are in the VMCB. All the other GPRs are swapped
     3117     *     by the assembly switcher code. Import these two always just to simplify
     3118     *     assumptions on GPRs.
     3119     *
     3120     *   - SREG: We load them all together so we have to save all of them.
     3121     *
     3122     *   - KERNEL_GS_BASE, SYSCALL MSRS: We don't have a HM_CHANGED_GUEST flag
     3123     *     for it yet
    27513124     */
    2752     if (pVmcbCtrl->IntShadow.n.u1IntShadow)
    2753         EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip);
    2754     else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
    2755         VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
    2756 
    2757     /*
    2758      * Guest control registers: CR0, CR2, CR3 (handled at the end).
    2759      * Accesses to other control registers are always intercepted.
    2760      */
    2761     pMixedCtx->cr2 = pVmcb->guest.u64CR2;
    2762 
    2763     /* If we're not intercepting changes to CR0 TS & MP bits, sync those bits here. */
    2764     if (!(pVmcbCtrl->u16InterceptWrCRx & RT_BIT(0)))
    2765     {
    2766         pMixedCtx->cr0 = (pMixedCtx->cr0      & ~(X86_CR0_TS | X86_CR0_MP))
    2767                        | (pVmcb->guest.u64CR0 &  (X86_CR0_TS | X86_CR0_MP));
    2768     }
    2769 
    2770     /*
    2771      * Guest MSRs.
    2772      */
    2773     pMixedCtx->msrSTAR         = pVmcb->guest.u64STAR;            /* legacy syscall eip, cs & ss */
    2774     pMixedCtx->msrLSTAR        = pVmcb->guest.u64LSTAR;           /* 64-bit mode syscall rip */
    2775     pMixedCtx->msrCSTAR        = pVmcb->guest.u64CSTAR;           /* compatibility mode syscall rip */
    2776     pMixedCtx->msrSFMASK       = pVmcb->guest.u64SFMASK;          /* syscall flag mask */
    2777     pMixedCtx->msrKERNELGSBASE = pVmcb->guest.u64KernelGSBase;    /* swapgs exchange value */
    2778     pMixedCtx->SysEnter.cs     = pVmcb->guest.u64SysEnterCS;
    2779     pMixedCtx->SysEnter.eip    = pVmcb->guest.u64SysEnterEIP;
    2780     pMixedCtx->SysEnter.esp    = pVmcb->guest.u64SysEnterESP;
    2781 
    2782     /*
    2783      * Guest segment registers (includes FS, GS base MSRs for 64-bit guests).
    2784      */
    2785     HMSVM_SEG_REG_COPY_FROM_VMCB(pMixedCtx, &pVmcb->guest, CS, cs);
    2786     HMSVM_SEG_REG_COPY_FROM_VMCB(pMixedCtx, &pVmcb->guest, SS, ss);
    2787     HMSVM_SEG_REG_COPY_FROM_VMCB(pMixedCtx, &pVmcb->guest, DS, ds);
    2788     HMSVM_SEG_REG_COPY_FROM_VMCB(pMixedCtx, &pVmcb->guest, ES, es);
    2789     HMSVM_SEG_REG_COPY_FROM_VMCB(pMixedCtx, &pVmcb->guest, FS, fs);
    2790     HMSVM_SEG_REG_COPY_FROM_VMCB(pMixedCtx, &pVmcb->guest, GS, gs);
    2791 
    2792     /*
    2793      * Correct the hidden CS granularity bit. Haven't seen it being wrong in any other
    2794      * register (yet).
    2795      */
    2796     /** @todo SELM might need to be fixed as it too should not care about the
    2797      *        granularity bit. See @bugref{6785}. */
    2798     if (   !pMixedCtx->cs.Attr.n.u1Granularity
    2799         && pMixedCtx->cs.Attr.n.u1Present
    2800         && pMixedCtx->cs.u32Limit > UINT32_C(0xfffff))
    2801     {
    2802         Assert((pMixedCtx->cs.u32Limit & 0xfff) == 0xfff);
    2803         pMixedCtx->cs.Attr.n.u1Granularity = 1;
    2804     }
    2805 
    2806     HMSVM_ASSERT_SEG_GRANULARITY(cs);
    2807     HMSVM_ASSERT_SEG_GRANULARITY(ss);
    2808     HMSVM_ASSERT_SEG_GRANULARITY(ds);
    2809     HMSVM_ASSERT_SEG_GRANULARITY(es);
    2810     HMSVM_ASSERT_SEG_GRANULARITY(fs);
    2811     HMSVM_ASSERT_SEG_GRANULARITY(gs);
    2812 
    2813     /*
    2814      * Sync the hidden SS DPL field. AMD CPUs have a separate CPL field in the VMCB and uses that
    2815      * and thus it's possible that when the CPL changes during guest execution that the SS DPL
    2816      * isn't updated by AMD-V. Observed on some AMD Fusion CPUs with 64-bit guests.
    2817      * See AMD spec. 15.5.1 "Basic operation".
    2818      */
    2819     Assert(!(pVmcb->guest.u8CPL & ~0x3));
    2820     uint8_t const uCpl = pVmcb->guest.u8CPL;
    2821     if (pMixedCtx->ss.Attr.n.u2Dpl != uCpl)
    2822     {
    2823         Log4(("hmR0SvmSaveGuestState: CPL differs. SS.DPL=%u, CPL=%u, overwriting SS.DPL!\n", pMixedCtx->ss.Attr.n.u2Dpl, uCpl));
    2824         pMixedCtx->ss.Attr.n.u2Dpl = pVmcb->guest.u8CPL & 0x3;
    2825     }
    2826 
    2827     /*
    2828      * Guest TR.
    2829      * Fixup TR attributes so it's compatible with Intel. Important when saved-states are used
    2830      * between Intel and AMD. See @bugref{6208#c39}.
    2831      * ASSUME that it's normally correct and that we're in 32-bit or 64-bit mode.
    2832      */
    2833     HMSVM_SEG_REG_COPY_FROM_VMCB(pMixedCtx, &pVmcb->guest, TR, tr);
    2834     if (pMixedCtx->tr.Attr.n.u4Type != X86_SEL_TYPE_SYS_386_TSS_BUSY)
    2835     {
    2836         if (   pMixedCtx->tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL
    2837             || CPUMIsGuestInLongModeEx(pMixedCtx))
    2838             pMixedCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
    2839         else if (pMixedCtx->tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL)
    2840             pMixedCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_286_TSS_BUSY;
    2841     }
    2842 
    2843     /*
    2844      * Guest Descriptor-Table registers (GDTR, IDTR, LDTR).
    2845      */
    2846     HMSVM_SEG_REG_COPY_FROM_VMCB(pMixedCtx, &pVmcb->guest, LDTR, ldtr);
    2847     pMixedCtx->gdtr.cbGdt = pVmcb->guest.GDTR.u32Limit;
    2848     pMixedCtx->gdtr.pGdt  = pVmcb->guest.GDTR.u64Base;
    2849 
    2850     pMixedCtx->idtr.cbIdt = pVmcb->guest.IDTR.u32Limit;
    2851     pMixedCtx->idtr.pIdt  = pVmcb->guest.IDTR.u64Base;
    2852 
    2853     /*
    2854      * Guest Debug registers.
    2855      */
    2856     if (!pVCpu->hm.s.fUsingHyperDR7)
    2857     {
    2858         pMixedCtx->dr[6] = pVmcb->guest.u64DR6;
    2859         pMixedCtx->dr[7] = pVmcb->guest.u64DR7;
    2860     }
    2861     else
    2862     {
    2863         Assert(pVmcb->guest.u64DR7 == CPUMGetHyperDR7(pVCpu));
    2864         CPUMSetHyperDR6(pVCpu, pVmcb->guest.u64DR6);
    2865     }
    2866 
    2867     /*
    2868      * With Nested Paging, CR3 changes are not intercepted. Therefore, sync. it now.
    2869      * This is done as the very last step of syncing the guest state, as PGMUpdateCR3() may cause longjmp's to ring-3.
    2870      */
    2871     if (   pVmcbCtrl->NestedPagingCtrl.n.u1NestedPaging
    2872         && pMixedCtx->cr3 != pVmcb->guest.u64CR3)
    2873     {
    2874         CPUMSetGuestCR3(pVCpu, pVmcb->guest.u64CR3);
    2875         PGMUpdateCR3(pVCpu,    pVmcb->guest.u64CR3);
    2876     }
    2877 
    2878 #ifdef VBOX_STRICT
    2879     if (CPUMIsGuestInSvmNestedHwVirtMode(pMixedCtx))
    2880         hmR0SvmLogState(pVCpu, pVmcb, pMixedCtx, "hmR0SvmSaveGuestStateNested", HMSVM_LOG_ALL & ~HMSVM_LOG_LBR, 0 /* uVerbose */);
     3125    /** @todo Extend HM_CHANGED_GUEST_xxx so that we avoid saving segment
     3126     *        registers, kernel GS base and other MSRs each time. */
     3127    hmR0SvmImportGuestState(pVCpu, pCtx,   CPUMCTX_EXTRN_RIP
     3128                                         | CPUMCTX_EXTRN_SYSCALL_MSRS
     3129                                         | CPUMCTX_EXTRN_KERNEL_GS_BASE
     3130                                         | CPUMCTX_EXTRN_RFLAGS
     3131                                         | CPUMCTX_EXTRN_RAX
     3132                                         | CPUMCTX_EXTRN_SREG_MASK
     3133                                         | CPUMCTX_EXTRN_RSP
     3134                                         | CPUMCTX_EXTRN_HWVIRT
     3135                                         | CPUMCTX_EXTRN_HM_SVM_INT_SHADOW
     3136                                         | CPUMCTX_EXTRN_HM_SVM_HWVIRT_VIRQ);
     3137
     3138#ifdef DEBUG_ramshankar
     3139    if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
     3140    {
     3141        hmR0SvmImportGuestState(pVCpu, pCtx, HMSVM_CPUMCTX_EXTRN_ALL);
     3142        hmR0SvmLogState(pVCpu, pVmcb, pCtx, "hmR0SvmSaveGuestStateNested", HMSVM_LOG_ALL & ~HMSVM_LOG_LBR, 0 /* uVerbose */);
     3143    }
     3144#else
     3145    RT_NOREF(pVmcb);
    28813146#endif
    28823147}
     
    28873152 * (longjmp, preemption, voluntary exits to ring-3) from AMD-V.
    28883153 *
    2889  * @param   pVCpu       The cross context virtual CPU structure.
     3154 * @param   pVCpu           The cross context virtual CPU structure.
     3155 * @param   fImportState    Whether to import the guest state from the VMCB back
     3156 *                          to the guest-CPU context.
    28903157 *
    28913158 * @remarks No-long-jmp zone!!!
    28923159 */
    2893 static void hmR0SvmLeave(PVMCPU pVCpu)
     3160static void hmR0SvmLeave(PVMCPU pVCpu, bool fImportState)
    28943161{
    28953162    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     
    29023169     */
    29033170
     3171    /* Save the guest state if necessary. */
     3172    if (fImportState)
     3173        hmR0SvmImportGuestState(pVCpu, &pVCpu->cpum.GstCtx, HMSVM_CPUMCTX_EXTRN_ALL);
     3174
    29043175    /* Restore host FPU state if necessary and resync on next R0 reentry .*/
    2905     if (CPUMR0FpuStateMaybeSaveGuestAndRestoreHost(pVCpu))
    2906         HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0); /** @todo r=ramshankar: This shouldn't be necessary, it's set in HMR0EnterCpu. */
     3176    CPUMR0FpuStateMaybeSaveGuestAndRestoreHost(pVCpu);
    29073177
    29083178    /*
     
    29173187    }
    29183188#endif
    2919     if (CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, false /* save DR6 */))
    2920         HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);/** @todo r=ramshankar: This shouldn't be necessary, it's set in HMR0EnterCpu. */
     3189    CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, false /* save DR6 */);
    29213190
    29223191    Assert(!CPUMIsHyperDebugStateActive(pVCpu));
     
    29363205 * Leaves the AMD-V session.
    29373206 *
     3207 * Only used while returning to ring-3 either due to longjump or exits to
     3208 * ring-3.
     3209 *
    29383210 * @returns VBox status code.
    29393211 * @param   pVCpu       The cross context virtual CPU structure.
     
    29493221    if (!pVCpu->hm.s.fLeaveDone)
    29503222    {
    2951         hmR0SvmLeave(pVCpu);
     3223        hmR0SvmLeave(pVCpu, true /* fImportState */);
    29523224        pVCpu->hm.s.fLeaveDone = true;
    29533225    }
     
    31293401 * @remarks No-long-jump zone!!!
    31303402 */
    3131 static void hmR0SvmUpdateTscOffsetting(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, PSVMVMCB pVmcb)
     3403static void hmR0SvmUpdateTscOffsetting(PVM pVM, PVMCPU pVCpu, PCCPUMCTX pCtx, PSVMVMCB pVmcb)
    31323404{
    31333405    /*
     
    32493521
    32503522    /* Update CR2 of the guest. */
     3523    HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR2);
    32513524    if (pCtx->cr2 != uFaultAddress)
    32523525    {
     
    33003573 * @param   pVCpu       The cross context virtual CPU structure.
    33013574 * @param   pVmcb       Pointer to the guest VM control block.
    3302  * @param   pCtx        Pointer to the guest-CPU context.
    33033575 * @param   pEvent      Pointer to the event.
    33043576 *
     
    33063578 * @remarks Requires CR0!
    33073579 */
    3308 DECLINLINE(void) hmR0SvmInjectEventVmcb(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx, PSVMEVENT pEvent)
    3309 {
    3310     NOREF(pVCpu); NOREF(pCtx);
    3311 
     3580DECLINLINE(void) hmR0SvmInjectEventVmcb(PVMCPU pVCpu, PSVMVMCB pVmcb, PSVMEVENT pEvent)
     3581{
    33123582    Assert(!pVmcb->ctrl.EventInject.n.u1Valid);
    33133583    pVmcb->ctrl.EventInject.u = pEvent->u;
     
    34423712 * @remarks Has side-effects with VMCPU_FF_INHIBIT_INTERRUPTS force-flag.
    34433713 */
    3444 DECLINLINE(bool) hmR0SvmIsIntrShadowActive(PVMCPU pVCpu, PCPUMCTX pCtx)
     3714DECLINLINE(bool) hmR0SvmIsIntrShadowActive(PVMCPU pVCpu, PCCPUMCTX pCtx)
    34453715{
    34463716    /*
     
    35503820{
    35513821    HMSVM_ASSERT_IN_NESTED_GUEST(pCtx);
     3822    HMSVM_CPUMCTX_ASSERT(pVCpu,   CPUMCTX_EXTRN_HWVIRT
     3823                                | CPUMCTX_EXTRN_RFLAGS
     3824                                | CPUMCTX_EXTRN_HM_SVM_INT_SHADOW
     3825                                | CPUMCTX_EXTRN_HM_SVM_HWVIRT_VIRQ);
    35523826
    35533827    Assert(!pVCpu->hm.s.Event.fPending);
     
    35603834    bool const fBlockNmi   = VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS);
    35613835
    3562     Log4Func(("fVirtualGif=%RTbool fBlockNmi=%RTbool fIntShadow=%RTbool Intr. pending=%RTbool NMI pending=%RTbool\n",
     3836    Log4Func(("fVirtualGif=%RTbool fBlockNmi=%RTbool fIntShadow=%RTbool fIntrPending=%RTbool fNmiPending=%RTbool\n",
    35633837              fVirtualGif, fBlockNmi, fIntShadow, VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC),
    35643838              VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI)));
     
    35803854            {
    35813855                Log4(("Intercepting NMI -> #VMEXIT\n"));
     3856                HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
    35823857                return IEMExecSvmVmexit(pVCpu, SVM_EXIT_NMI, 0, 0);
    35833858            }
     
    36203895            {
    36213896                Log4(("Intercepting INTR -> #VMEXIT\n"));
     3897                HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
    36223898                return IEMExecSvmVmexit(pVCpu, SVM_EXIT_INTR, 0, 0);
    36233899            }
     
    36663942{
    36673943    HMSVM_ASSERT_NOT_IN_NESTED_GUEST(pCtx);
     3944    HMSVM_CPUMCTX_ASSERT(pVCpu,   CPUMCTX_EXTRN_HWVIRT
     3945                                | CPUMCTX_EXTRN_RFLAGS
     3946                                | CPUMCTX_EXTRN_HM_SVM_INT_SHADOW);
     3947
    36683948    Assert(!pVCpu->hm.s.Event.fPending);
    36693949    PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx);
     
    37644044 *          prematurely.
    37654045 */
    3766 static void hmR0SvmInjectPendingEvent(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMVMCB pVmcb)
     4046static void hmR0SvmInjectPendingEvent(PVMCPU pVCpu, PCCPUMCTX pCtx, PSVMVMCB pVmcb)
    37674047{
    37684048    Assert(!TRPMHasTrap(pVCpu));
     
    38254105         */
    38264106        Log4(("Injecting pending HM event\n"));
    3827         hmR0SvmInjectEventVmcb(pVCpu, pVmcb, pCtx, &Event);
     4107        hmR0SvmInjectEventVmcb(pVCpu, pVmcb, &Event);
    38284108        pVCpu->hm.s.Event.fPending = false;
    38294109
     
    38694149    HMSVM_ASSERT_PREEMPT_SAFE();
    38704150    HMSVM_ASSERT_NOT_IN_NESTED_GUEST(pCtx);
     4151    HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, HMSVM_CPUMCTX_EXTRN_ALL);
     4152
    38714153    PCSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
    3872 
    38734154    if (rcVMRun == VERR_SVM_INVALID_GUEST_STATE)
    38744155    {
     
    40274308{
    40284309    Assert(VMMRZCallRing3IsEnabled(pVCpu));
    4029 
    4030     /* On AMD-V we don't need to update CR3, PAE PDPES lazily. See hmR0SvmSaveGuestState(). */
     4310    Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
    40314311    Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
    4032     Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
     4312
     4313    /* Could happen as a result of longjump. */
     4314    if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
     4315    {
     4316        Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_CR3));
     4317        PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
     4318    }
    40334319
    40344320    /* Update pending interrupts into the APIC's IRR. */
     
    41574443
    41584444    /* Ensure we've cached (and hopefully modified) the VMCB for execution using hardware-assisted SVM. */
    4159     Assert(pCtx->hwvirt.svm.fHMCachedVmcb);
     4445    Assert(pVCpu->hm.s.svm.NstGstVmcbCache.fCacheValid);
    41604446
    41614447    /*
     
    42604546    if (pVCpu->hm.s.svm.fSyncVTpr)
    42614547    {
     4548        PCSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
    42624549        if (pVM->hm.s.fTPRPatchingActive)
    4263             pSvmTransient->u8GuestTpr = pCtx->msrLSTAR;
     4550            pSvmTransient->u8GuestTpr = pVmcb->guest.u64LSTAR;
    42644551        else
    4265         {
    4266             PCSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
    42674552            pSvmTransient->u8GuestTpr = pVmcb->ctrl.IntCtrl.n.u8VTPR;
    4268         }
    42694553    }
    42704554
     
    43234607 * @remarks No-long-jump zone!!!
    43244608 */
    4325 static void hmR0SvmPreRunGuestCommitted(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
     4609static void hmR0SvmPreRunGuestCommitted(PVMCPU pVCpu, PCCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
    43264610{
    43274611    Assert(!VMMRZCallRing3IsEnabled(pVCpu));
     
    44554739DECLINLINE(int) hmR0SvmRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
    44564740{
     4741    /* Mark that HM is the keeper of all guest-CPU registers now that we're going to execute guest code. */
     4742    pCtx->fExtrn |= HMSVM_CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_KEEPER_HM;
     4743
    44574744    /*
    44584745     * 64-bit Windows uses XMM registers in the kernel as the Microsoft compiler expresses floating-point operations
     
    44714758#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
    44724759/**
    4473  * Undoes the TSC offset applied for an SVM nested-guest and returns the TSC
    4474  * value for the guest.
    4475  *
    4476  * @returns The TSC offset after undoing any nested-guest TSC offset.
    4477  * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
    4478  * @param   uTicks      The nested-guest TSC.
    4479  *
    4480  * @note    If you make any changes to this function, please check if
    4481  *          hmR0SvmNstGstUndoTscOffset() needs adjusting.
    4482  *
    4483  * @sa      HMSvmNstGstApplyTscOffset().
    4484  */
    4485 DECLINLINE(uint64_t) hmR0SvmNstGstUndoTscOffset(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t uTicks)
    4486 {
    4487     Assert(pCtx->hwvirt.svm.fHMCachedVmcb); RT_NOREF(pCtx);
    4488     PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
    4489     return uTicks - pVmcbNstGstCache->u64TSCOffset;
    4490 }
    4491 
    4492 
    4493 /**
    44944760 * Wrapper for running the nested-guest code in AMD-V.
    44954761 *
     
    45034769DECLINLINE(int) hmR0SvmRunGuestNested(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
    45044770{
     4771    /* Mark that HM is the keeper of all guest-CPU registers now that we're going to execute guest code. */
     4772    pCtx->fExtrn |= HMSVM_CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_KEEPER_HM;
     4773
    45054774    /*
    45064775     * 64-bit Windows uses XMM registers in the kernel as the Microsoft compiler expresses floating-point operations
     
    45154784#endif
    45164785}
     4786
     4787
     4788/**
     4789 * Undoes the TSC offset applied for an SVM nested-guest and returns the TSC
     4790 * value for the guest.
     4791 *
     4792 * @returns The TSC offset after undoing any nested-guest TSC offset.
     4793 * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
     4794 * @param   uTicks      The nested-guest TSC.
     4795 *
     4796 * @note    If you make any changes to this function, please check if
     4797 *          hmR0SvmNstGstUndoTscOffset() needs adjusting.
     4798 *
     4799 * @sa      HMSvmNstGstApplyTscOffset().
     4800 */
     4801DECLINLINE(uint64_t) hmR0SvmNstGstUndoTscOffset(PVMCPU pVCpu, uint64_t uTicks)
     4802{
     4803    PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
     4804    Assert(pVmcbNstGstCache->fCacheValid);
     4805    return uTicks - pVmcbNstGstCache->u64TSCOffset;
     4806}
    45174807#endif
    45184808
     
    45224812 *
    45234813 * @param   pVCpu           The cross context virtual CPU structure.
    4524  * @param   pMixedCtx       Pointer to the guest-CPU context. The data maybe
     4814 * @param   pCtx       Pointer to the guest-CPU context. The data maybe
    45254815 *                          out-of-sync. Make sure to update the required fields
    45264816 *                          before using them.
     
    45324822 *          unconditionally when it is safe to do so.
    45334823 */
    4534 static void hmR0SvmPostRunGuest(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PSVMTRANSIENT pSvmTransient, int rcVMRun)
     4824static void hmR0SvmPostRunGuest(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient, int rcVMRun)
    45354825{
    45364826    Assert(!VMMRZCallRing3IsEnabled(pVCpu));
     
    45524842        {
    45534843            /* The nested-guest VMCB TSC offset shall eventually be restored on #VMEXIT via HMSvmNstGstVmExitNotify(). */
    4554             uint64_t const uGstTsc = hmR0SvmNstGstUndoTscOffset(pVCpu, pMixedCtx, uHostTsc + pVmcbCtrl->u64TSCOffset);
     4844            uint64_t const uGstTsc = hmR0SvmNstGstUndoTscOffset(pVCpu, uHostTsc + pVmcbCtrl->u64TSCOffset);
    45554845            TMCpuTickSetLastSeen(pVCpu, uGstTsc);
    45564846        }
     
    45814871    }
    45824872
    4583     pSvmTransient->u64ExitCode  = pVmcbCtrl->u64ExitCode;      /* Save the #VMEXIT reason. */
     4873    pSvmTransient->u64ExitCode        = pVmcbCtrl->u64ExitCode; /* Save the #VMEXIT reason. */
    45844874    pVmcbCtrl->u32VmcbCleanBits       = HMSVM_VMCB_CLEAN_ALL;   /* Mark the VMCB-state cache as unmodified by VMM. */
    45854875    pSvmTransient->fVectoringDoublePF = false;                  /* Vectoring double page-fault needs to be determined later. */
    45864876    pSvmTransient->fVectoringPF       = false;                  /* Vectoring page-fault needs to be determined later. */
    45874877
    4588     hmR0SvmSaveGuestState(pVCpu, pMixedCtx, pVmcb);             /* Save the guest state from the VMCB to the guest-CPU context. */
     4878    hmR0SvmSaveGuestState(pVCpu, pCtx, pVmcb);                  /* Save the guest state from the VMCB to the guest-CPU context. */
    45894879
    45904880    if (   pSvmTransient->u64ExitCode != SVM_EXIT_INVALID
     
    45944884        /* TPR patching (for 32-bit guests) uses LSTAR MSR for holding the TPR value, otherwise uses the VTPR. */
    45954885        if (   pVCpu->CTX_SUFF(pVM)->hm.s.fTPRPatchingActive
    4596             && (pMixedCtx->msrLSTAR & 0xff) != pSvmTransient->u8GuestTpr)
    4597         {
    4598             int rc = APICSetTpr(pVCpu, pMixedCtx->msrLSTAR & 0xff);
     4886            && (pVmcb->guest.u64LSTAR & 0xff) != pSvmTransient->u8GuestTpr)
     4887        {
     4888            int rc = APICSetTpr(pVCpu, pVmcb->guest.u64LSTAR & 0xff);
    45994889            AssertRC(rc);
    46004890            HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_APIC_STATE);
     
    46094899    }
    46104900
     4901    HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
    46114902    EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FLAGS_AND_TYPE(EMEXIT_F_KIND_SVM, pSvmTransient->u64ExitCode & EMEXIT_F_TYPE_MASK),
    4612                      pMixedCtx->cs.u64Base + pMixedCtx->rip, uHostTsc);
     4903                     pCtx->cs.u64Base + pCtx->rip, uHostTsc);
    46134904}
    46144905
     
    48585149
    48595150            /* Invalid nested-guest state. Cause a #VMEXIT but assert on strict builds. */
     5151            HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, HMSVM_CPUMCTX_EXTRN_ALL);
    48605152            AssertMsgFailed(("Invalid nested-guest state. rc=%Rrc u64ExitCode=%#RX64\n", rc, SvmTransient.u64ExitCode));
    48615153            rc = VBOXSTRICTRC_TODO(IEMExecSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0, 0));
     
    49795271    Assert(pSvmTransient->u64ExitCode <= SVM_EXIT_MAX);
    49805272
    4981 #define HM_SVM_VMEXIT_NESTED(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
    4982             VBOXSTRICTRC_TODO(IEMExecSvmVmexit(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2))
     5273    /** @todo Use IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK instead of
     5274     *        HMSVM_CPUMCTX_EXTRN_ALL below. See todo in
     5275     *        HMSvmNstGstVmExitNotify(). */
     5276#define NST_GST_VMEXIT_CALL_RET(a_pVCpu, a_pCtx, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
     5277    do { \
     5278        HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, HMSVM_CPUMCTX_EXTRN_ALL); \
     5279        return VBOXSTRICTRC_TODO(IEMExecSvmVmexit((a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2))); \
     5280    } while (0)
    49835281
    49845282    /*
     
    49865284     * by the nested-guest. If it isn't, it should be handled by the (outer) guest.
    49875285     */
    4988     PSVMVMCB            pVmcbNstGst      = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
    4989     PSVMVMCBCTRL        pVmcbNstGstCtrl = &pVmcbNstGst->ctrl;
    4990     uint64_t const      uExitCode        = pVmcbNstGstCtrl->u64ExitCode;
    4991     uint64_t const      uExitInfo1       = pVmcbNstGstCtrl->u64ExitInfo1;
    4992     uint64_t const      uExitInfo2       = pVmcbNstGstCtrl->u64ExitInfo2;
     5286    PSVMVMCB       pVmcbNstGst     = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
     5287    PSVMVMCBCTRL   pVmcbNstGstCtrl = &pVmcbNstGst->ctrl;
     5288    uint64_t const uExitCode       = pVmcbNstGstCtrl->u64ExitCode;
     5289    uint64_t const uExitInfo1      = pVmcbNstGstCtrl->u64ExitInfo1;
     5290    uint64_t const uExitInfo2      = pVmcbNstGstCtrl->u64ExitInfo2;
    49935291
    49945292    Assert(uExitCode == pVmcbNstGstCtrl->u64ExitCode);
     
    49975295        case SVM_EXIT_CPUID:
    49985296        {
    4999             if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_CPUID))
    5000                 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
     5297            if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_CPUID))
     5298                NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2);
    50015299            return hmR0SvmExitCpuid(pVCpu, pCtx, pSvmTransient);
    50025300        }
     
    50045302        case SVM_EXIT_RDTSC:
    50055303        {
    5006             if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_RDTSC))
    5007                 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
     5304            if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_RDTSC))
     5305                NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2);
    50085306            return hmR0SvmExitRdtsc(pVCpu, pCtx, pSvmTransient);
    50095307        }
     
    50115309        case SVM_EXIT_RDTSCP:
    50125310        {
    5013             if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_RDTSCP))
    5014                 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
     5311            if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_RDTSCP))
     5312                NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2);
    50155313            return hmR0SvmExitRdtscp(pVCpu, pCtx, pSvmTransient);
    50165314        }
     
    50185316        case SVM_EXIT_MONITOR:
    50195317        {
    5020             if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_MONITOR))
    5021                 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
     5318            if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_MONITOR))
     5319                NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2);
    50225320            return hmR0SvmExitMonitor(pVCpu, pCtx, pSvmTransient);
    50235321        }
     
    50255323        case SVM_EXIT_MWAIT:
    50265324        {
    5027             if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_MWAIT))
    5028                 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
     5325            if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_MWAIT))
     5326                NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2);
    50295327            return hmR0SvmExitMwait(pVCpu, pCtx, pSvmTransient);
    50305328        }
     
    50325330        case SVM_EXIT_HLT:
    50335331        {
    5034             if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_HLT))
    5035                 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
     5332            if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_HLT))
     5333                NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2);
    50365334            return hmR0SvmExitHlt(pVCpu, pCtx, pSvmTransient);
    50375335        }
     
    50395337        case SVM_EXIT_MSR:
    50405338        {
    5041             if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_MSR_PROT))
     5339            if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_MSR_PROT))
    50425340            {
    50435341                uint32_t const idMsr = pCtx->ecx;
     
    50585356                        || (fInterceptRead  && pVmcbNstGstCtrl->u64ExitInfo1 == SVM_EXIT1_MSR_READ))
    50595357                    {
    5060                         return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
     5358                        NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2);
    50615359                    }
    50625360                }
     
    50685366                     */
    50695367                    Assert(rc == VERR_OUT_OF_RANGE);
    5070                     return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
     5368                    NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2);
    50715369                }
    50725370            }
     
    50765374        case SVM_EXIT_IOIO:
    50775375        {
    5078             if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_IOIO_PROT))
     5376            if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT))
    50795377            {
    50805378                void *pvIoBitmap = pCtx->hwvirt.svm.CTX_SUFF(pvIoBitmap);
     
    50835381                bool const fIntercept = hmR0SvmIsIoInterceptActive(pvIoBitmap, &IoExitInfo);
    50845382                if (fIntercept)
    5085                     return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
     5383                    NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2);
    50865384            }
    50875385            return hmR0SvmExitIOInstr(pVCpu, pCtx, pSvmTransient);
     
    50975395
    50985396                /* If the nested-guest is intercepting #PFs, cause a #PF #VMEXIT. */
    5099                 if (HMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, X86_XCPT_PF))
    5100                     return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, u32ErrCode, uFaultAddress);
     5397                if (HMIsGuestSvmXcptInterceptSet(pVCpu, X86_XCPT_PF))
     5398                    NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, u32ErrCode, uFaultAddress);
    51015399
    51025400                /* If the nested-guest is not intercepting #PFs, forward the #PF to the guest. */
     5401                HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, CPUMCTX_EXTRN_CR2);
    51035402                hmR0SvmSetPendingXcptPF(pVCpu, pCtx, u32ErrCode, uFaultAddress);
    51045403                return VINF_SUCCESS;
     
    51095408        case SVM_EXIT_XCPT_UD:
    51105409        {
    5111             if (HMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, X86_XCPT_UD))
    5112                 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
     5410            if (HMIsGuestSvmXcptInterceptSet(pVCpu, X86_XCPT_UD))
     5411                NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2);
    51135412            hmR0SvmSetPendingXcptUD(pVCpu);
    51145413            return VINF_SUCCESS;
     
    51175416        case SVM_EXIT_XCPT_MF:
    51185417        {
    5119             if (HMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, X86_XCPT_MF))
    5120                 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
     5418            if (HMIsGuestSvmXcptInterceptSet(pVCpu, X86_XCPT_MF))
     5419                NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2);
    51215420            return hmR0SvmExitXcptMF(pVCpu, pCtx, pSvmTransient);
    51225421        }
     
    51245423        case SVM_EXIT_XCPT_DB:
    51255424        {
    5126             if (HMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, X86_XCPT_DB))
    5127                 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
     5425            if (HMIsGuestSvmXcptInterceptSet(pVCpu, X86_XCPT_DB))
     5426                NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2);
    51285427            return hmR0SvmNestedExitXcptDB(pVCpu, pCtx, pSvmTransient);
    51295428        }
     
    51315430        case SVM_EXIT_XCPT_AC:
    51325431        {
    5133             if (HMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, X86_XCPT_AC))
    5134                 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
     5432            if (HMIsGuestSvmXcptInterceptSet(pVCpu, X86_XCPT_AC))
     5433                NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2);
    51355434            return hmR0SvmExitXcptAC(pVCpu, pCtx, pSvmTransient);
    51365435        }
     
    51385437        case SVM_EXIT_XCPT_BP:
    51395438        {
    5140             if (HMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, X86_XCPT_BP))
    5141                 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
     5439            if (HMIsGuestSvmXcptInterceptSet(pVCpu, X86_XCPT_BP))
     5440                NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2);
    51425441            return hmR0SvmNestedExitXcptBP(pVCpu, pCtx, pSvmTransient);
    51435442        }
     
    51485447        {
    51495448            uint8_t const uCr = uExitCode - SVM_EXIT_READ_CR0;
    5150             if (HMIsGuestSvmReadCRxInterceptSet(pVCpu, pCtx, uCr))
    5151                 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
     5449            if (HMIsGuestSvmReadCRxInterceptSet(pVCpu, uCr))
     5450                NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2);
    51525451            return hmR0SvmExitReadCRx(pVCpu, pCtx, pSvmTransient);
    51535452        }
     
    51555454        case SVM_EXIT_CR0_SEL_WRITE:
    51565455        {
    5157             if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_CR0_SEL_WRITE))
    5158                 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
     5456            if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_CR0_SEL_WRITE))
     5457                NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2);
    51595458            return hmR0SvmExitWriteCRx(pVCpu, pCtx, pSvmTransient);
    51605459        }
     
    51685467            Log4(("hmR0SvmHandleExitNested: Write CR%u: uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uCr, uExitInfo1, uExitInfo2));
    51695468
    5170             if (HMIsGuestSvmWriteCRxInterceptSet(pVCpu, pCtx, uCr))
    5171                 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
     5469            if (HMIsGuestSvmWriteCRxInterceptSet(pVCpu, uCr))
     5470                NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2);
    51725471            return hmR0SvmExitWriteCRx(pVCpu, pCtx, pSvmTransient);
    51735472        }
     
    51755474        case SVM_EXIT_PAUSE:
    51765475        {
    5177             if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_PAUSE))
    5178                 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
     5476            if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_PAUSE))
     5477                NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2);
    51795478            return hmR0SvmExitPause(pVCpu, pCtx, pSvmTransient);
    51805479        }
     
    51825481        case SVM_EXIT_VINTR:
    51835482        {
    5184             if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_VINTR))
    5185                 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
     5483            if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_VINTR))
     5484                NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2);
    51865485            return hmR0SvmExitUnexpected(pVCpu, pCtx, pSvmTransient);
    51875486        }
     
    51895488        case SVM_EXIT_INTR:
    51905489        case SVM_EXIT_NMI:
    5191         case SVM_EXIT_XCPT_NMI: /* Shouldn't ever happen, SVM_EXIT_NMI is used instead. */
    51925490        case SVM_EXIT_SMI:
     5491        case SVM_EXIT_XCPT_NMI:     /* Should not occur, SVM_EXIT_NMI is used instead. */
    51935492        {
    51945493            /*
     
    52045503        case SVM_EXIT_FERR_FREEZE:
    52055504        {
    5206             if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_FERR_FREEZE))
    5207                 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
     5505            if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_FERR_FREEZE))
     5506                NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2);
    52085507            return hmR0SvmExitFerrFreeze(pVCpu, pCtx, pSvmTransient);
    52095508        }
     
    52115510        case SVM_EXIT_INVLPG:
    52125511        {
    5213             if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_INVLPG))
    5214                 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
     5512            if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_INVLPG))
     5513                NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2);
    52155514            return hmR0SvmExitInvlpg(pVCpu, pCtx, pSvmTransient);
    52165515        }
     
    52185517        case SVM_EXIT_WBINVD:
    52195518        {
    5220             if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_WBINVD))
    5221                 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
     5519            if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_WBINVD))
     5520                NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2);
    52225521            return hmR0SvmExitWbinvd(pVCpu, pCtx, pSvmTransient);
    52235522        }
     
    52255524        case SVM_EXIT_INVD:
    52265525        {
    5227             if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_INVD))
    5228                 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
     5526            if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_INVD))
     5527                NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2);
    52295528            return hmR0SvmExitInvd(pVCpu, pCtx, pSvmTransient);
    52305529        }
     
    52325531        case SVM_EXIT_RDPMC:
    52335532        {
    5234             if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_RDPMC))
    5235                 return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
     5533            if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_RDPMC))
     5534                NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2);
    52365535            return hmR0SvmExitRdpmc(pVCpu, pCtx, pSvmTransient);
    52375536        }
     
    52475546                {
    52485547                    uint8_t const uDr = uExitCode - SVM_EXIT_READ_DR0;
    5249                     if (HMIsGuestSvmReadDRxInterceptSet(pVCpu, pCtx, uDr))
    5250                         return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
     5548                    if (HMIsGuestSvmReadDRxInterceptSet(pVCpu, uDr))
     5549                        NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2);
    52515550                    return hmR0SvmExitReadDRx(pVCpu, pCtx, pSvmTransient);
    52525551                }
     
    52585557                {
    52595558                    uint8_t const uDr = uExitCode - SVM_EXIT_WRITE_DR0;
    5260                     if (HMIsGuestSvmWriteDRxInterceptSet(pVCpu, pCtx, uDr))
    5261                         return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
     5559                    if (HMIsGuestSvmWriteDRxInterceptSet(pVCpu, uDr))
     5560                        NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2);
    52625561                    return hmR0SvmExitWriteDRx(pVCpu, pCtx, pSvmTransient);
    52635562                }
     
    52885587                {
    52895588                    uint8_t const uVector = uExitCode - SVM_EXIT_XCPT_0;
    5290                     if (HMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, uVector))
    5291                         return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
     5589                    if (HMIsGuestSvmXcptInterceptSet(pVCpu, uVector))
     5590                        NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2);
    52925591                    return hmR0SvmExitXcptGeneric(pVCpu, pCtx, pSvmTransient);
    52935592                }
     
    52955594                case SVM_EXIT_XSETBV:
    52965595                {
    5297                     if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_XSETBV))
    5298                         return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
     5596                    if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_XSETBV))
     5597                        NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2);
    52995598                    return hmR0SvmExitXsetbv(pVCpu, pCtx, pSvmTransient);
    53005599                }
     
    53025601                case SVM_EXIT_TASK_SWITCH:
    53035602                {
    5304                     if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_TASK_SWITCH))
    5305                         return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
     5603                    if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
     5604                        NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2);
    53065605                    return hmR0SvmExitTaskSwitch(pVCpu, pCtx, pSvmTransient);
    53075606                }
     
    53095608                case SVM_EXIT_IRET:
    53105609                {
    5311                     if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_IRET))
    5312                         return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
     5610                    if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_IRET))
     5611                        NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2);
    53135612                    return hmR0SvmExitIret(pVCpu, pCtx, pSvmTransient);
    53145613                }
     
    53165615                case SVM_EXIT_SHUTDOWN:
    53175616                {
    5318                     if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_SHUTDOWN))
    5319                         return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
     5617                    if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
     5618                        NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2);
    53205619                    return hmR0SvmExitShutdown(pVCpu, pCtx, pSvmTransient);
    53215620                }
     
    53235622                case SVM_EXIT_VMMCALL:
    53245623                {
    5325                     if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_VMMCALL))
    5326                         return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
     5624                    if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_VMMCALL))
     5625                        NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2);
    53275626                    return hmR0SvmExitVmmCall(pVCpu, pCtx, pSvmTransient);
    53285627                }
     
    53305629                case SVM_EXIT_CLGI:
    53315630                {
    5332                     if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_CLGI))
    5333                         return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
     5631                    if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_CLGI))
     5632                        NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2);
    53345633                     return hmR0SvmExitClgi(pVCpu, pCtx, pSvmTransient);
    53355634                }
     
    53375636                case SVM_EXIT_STGI:
    53385637                {
    5339                     if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_STGI))
    5340                         return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
     5638                    if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_STGI))
     5639                        NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2);
    53415640                     return hmR0SvmExitStgi(pVCpu, pCtx, pSvmTransient);
    53425641                }
     
    53445643                case SVM_EXIT_VMLOAD:
    53455644                {
    5346                     if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_VMLOAD))
    5347                         return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
     5645                    if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_VMLOAD))
     5646                        NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2);
    53485647                    return hmR0SvmExitVmload(pVCpu, pCtx, pSvmTransient);
    53495648                }
     
    53515650                case SVM_EXIT_VMSAVE:
    53525651                {
    5353                     if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_VMSAVE))
    5354                         return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
     5652                    if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_VMSAVE))
     5653                        NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2);
    53555654                    return hmR0SvmExitVmsave(pVCpu, pCtx, pSvmTransient);
    53565655                }
     
    53585657                case SVM_EXIT_INVLPGA:
    53595658                {
    5360                     if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_INVLPGA))
    5361                         return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
     5659                    if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_INVLPGA))
     5660                        NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2);
    53625661                    return hmR0SvmExitInvlpga(pVCpu, pCtx, pSvmTransient);
    53635662                }
     
    53655664                case SVM_EXIT_VMRUN:
    53665665                {
    5367                     if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_VMRUN))
    5368                         return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
     5666                    if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_VMRUN))
     5667                        NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2);
    53695668                    return hmR0SvmExitVmrun(pVCpu, pCtx, pSvmTransient);
    53705669                }
     
    53725671                case SVM_EXIT_RSM:
    53735672                {
    5374                     if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_RSM))
    5375                         return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
     5673                    if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_RSM))
     5674                        NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2);
    53765675                    hmR0SvmSetPendingXcptUD(pVCpu);
    53775676                    return VINF_SUCCESS;
     
    53805679                case SVM_EXIT_SKINIT:
    53815680                {
    5382                     if (HMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_SKINIT))
    5383                         return HM_SVM_VMEXIT_NESTED(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
     5681                    if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_SKINIT))
     5682                        NST_GST_VMEXIT_CALL_RET(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2);
    53845683                    hmR0SvmSetPendingXcptUD(pVCpu);
    53855684                    return VINF_SUCCESS;
     
    54065705    /* not reached */
    54075706
    5408 #undef HM_SVM_VMEXIT_NESTED
     5707#undef NST_GST_VMEXIT_CALL_RET
    54095708}
    54105709#endif
     
    54245723    Assert(pSvmTransient->u64ExitCode <= SVM_EXIT_MAX);
    54255724
     5725#ifdef DEBUG_ramshankar
     5726# define VMEXIT_CALL_RET(a_fDbg, a_CallExpr) \
     5727        do { \
     5728            if ((a_fDbg) == 1) \
     5729                HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, HMSVM_CPUMCTX_EXTRN_ALL); \
     5730            int rc = a_CallExpr; \
     5731            /* if ((a_fDbg) == 1) */ \
     5732            /*     HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST); */ \
     5733            return rc; \
     5734        } while (0)
     5735#else
     5736# define VMEXIT_CALL_RET(a_fDbg, a_CallExpr) return a_CallExpr
     5737#endif
     5738
    54265739    /*
    5427      * The ordering of the case labels is based on most-frequently-occurring #VMEXITs for most guests under
    5428      * normal workloads (for some definition of "normal").
     5740     * The ordering of the case labels is based on most-frequently-occurring #VMEXITs
     5741     * for most guests under normal workloads (for some definition of "normal").
    54295742     */
    54305743    uint64_t const uExitCode = pSvmTransient->u64ExitCode;
    54315744    switch (uExitCode)
    54325745    {
    5433         case SVM_EXIT_NPF:
    5434             return hmR0SvmExitNestedPF(pVCpu, pCtx, pSvmTransient);
    5435 
    5436         case SVM_EXIT_IOIO:
    5437             return hmR0SvmExitIOInstr(pVCpu, pCtx, pSvmTransient);
    5438 
    5439         case SVM_EXIT_RDTSC:
    5440             return hmR0SvmExitRdtsc(pVCpu, pCtx, pSvmTransient);
    5441 
    5442         case SVM_EXIT_RDTSCP:
    5443             return hmR0SvmExitRdtscp(pVCpu, pCtx, pSvmTransient);
    5444 
    5445         case SVM_EXIT_CPUID:
    5446             return hmR0SvmExitCpuid(pVCpu, pCtx, pSvmTransient);
    5447 
    5448         case SVM_EXIT_XCPT_14:  /* X86_XCPT_PF */
    5449             return hmR0SvmExitXcptPF(pVCpu, pCtx, pSvmTransient);
    5450 
    5451         case SVM_EXIT_XCPT_6:   /* X86_XCPT_UD */
    5452             return hmR0SvmExitXcptUD(pVCpu, pCtx, pSvmTransient);
    5453 
    5454         case SVM_EXIT_XCPT_16:  /* X86_XCPT_MF */
    5455             return hmR0SvmExitXcptMF(pVCpu, pCtx, pSvmTransient);
    5456 
    5457         case SVM_EXIT_XCPT_1:   /* X86_XCPT_DB */
    5458             return hmR0SvmExitXcptDB(pVCpu, pCtx, pSvmTransient);
    5459 
    5460         case SVM_EXIT_XCPT_17:  /* X86_XCPT_AC */
    5461             return hmR0SvmExitXcptAC(pVCpu, pCtx, pSvmTransient);
    5462 
    5463         case SVM_EXIT_XCPT_3:   /* X86_XCPT_BP */
    5464             return hmR0SvmExitXcptBP(pVCpu, pCtx, pSvmTransient);
    5465 
    5466         case SVM_EXIT_MONITOR:
    5467             return hmR0SvmExitMonitor(pVCpu, pCtx, pSvmTransient);
    5468 
    5469         case SVM_EXIT_MWAIT:
    5470             return hmR0SvmExitMwait(pVCpu, pCtx, pSvmTransient);
    5471 
    5472         case SVM_EXIT_HLT:
    5473             return hmR0SvmExitHlt(pVCpu, pCtx, pSvmTransient);
     5746        case SVM_EXIT_NPF:          VMEXIT_CALL_RET(0, hmR0SvmExitNestedPF(pVCpu, pCtx, pSvmTransient));
     5747        case SVM_EXIT_IOIO:         VMEXIT_CALL_RET(0, hmR0SvmExitIOInstr(pVCpu, pCtx, pSvmTransient));
     5748        case SVM_EXIT_RDTSC:        VMEXIT_CALL_RET(0, hmR0SvmExitRdtsc(pVCpu, pCtx, pSvmTransient));
     5749        case SVM_EXIT_RDTSCP:       VMEXIT_CALL_RET(0, hmR0SvmExitRdtscp(pVCpu, pCtx, pSvmTransient));
     5750        case SVM_EXIT_CPUID:        VMEXIT_CALL_RET(0, hmR0SvmExitCpuid(pVCpu, pCtx, pSvmTransient));
     5751        case SVM_EXIT_XCPT_PF:      VMEXIT_CALL_RET(0, hmR0SvmExitXcptPF(pVCpu, pCtx, pSvmTransient));
     5752        case SVM_EXIT_MSR:          VMEXIT_CALL_RET(0, hmR0SvmExitMsr(pVCpu, pCtx, pSvmTransient));
     5753        case SVM_EXIT_MONITOR:      VMEXIT_CALL_RET(0, hmR0SvmExitMonitor(pVCpu, pCtx, pSvmTransient));
     5754        case SVM_EXIT_MWAIT:        VMEXIT_CALL_RET(0, hmR0SvmExitMwait(pVCpu, pCtx, pSvmTransient));
     5755        case SVM_EXIT_HLT:          VMEXIT_CALL_RET(0, hmR0SvmExitHlt(pVCpu, pCtx, pSvmTransient));
     5756
     5757        case SVM_EXIT_XCPT_NMI:     /* Should not occur, SVM_EXIT_NMI is used instead. */
     5758        case SVM_EXIT_INTR:
     5759        case SVM_EXIT_NMI:          VMEXIT_CALL_RET(0, hmR0SvmExitIntr(pVCpu, pCtx, pSvmTransient));
    54745760
    54755761        case SVM_EXIT_READ_CR0:
    54765762        case SVM_EXIT_READ_CR3:
    5477         case SVM_EXIT_READ_CR4:
    5478             return hmR0SvmExitReadCRx(pVCpu, pCtx, pSvmTransient);
     5763        case SVM_EXIT_READ_CR4:     VMEXIT_CALL_RET(0, hmR0SvmExitReadCRx(pVCpu, pCtx, pSvmTransient));
    54795764
    54805765        case SVM_EXIT_CR0_SEL_WRITE:
     
    54825767        case SVM_EXIT_WRITE_CR3:
    54835768        case SVM_EXIT_WRITE_CR4:
    5484         case SVM_EXIT_WRITE_CR8:
    5485         {
    5486             uint8_t const uCr = uExitCode == SVM_EXIT_CR0_SEL_WRITE ? 0 : uExitCode - SVM_EXIT_WRITE_CR0;
    5487             Log4(("hmR0SvmHandleExit: Write CR%u\n", uCr)); NOREF(uCr);
    5488             return hmR0SvmExitWriteCRx(pVCpu, pCtx, pSvmTransient);
    5489         }
    5490 
    5491         case SVM_EXIT_PAUSE:
    5492             return hmR0SvmExitPause(pVCpu, pCtx, pSvmTransient);
    5493 
    5494         case SVM_EXIT_VMMCALL:
    5495             return hmR0SvmExitVmmCall(pVCpu, pCtx, pSvmTransient);
    5496 
    5497         case SVM_EXIT_VINTR:
    5498             return hmR0SvmExitVIntr(pVCpu, pCtx, pSvmTransient);
    5499 
    5500         case SVM_EXIT_FERR_FREEZE:
    5501             return hmR0SvmExitFerrFreeze(pVCpu, pCtx, pSvmTransient);
    5502 
    5503         case SVM_EXIT_INTR:
    5504         case SVM_EXIT_NMI:
    5505         case SVM_EXIT_XCPT_NMI: /* Shouldn't ever happen, SVM_EXIT_NMI is used instead. */
    5506             return hmR0SvmExitIntr(pVCpu, pCtx, pSvmTransient);
    5507 
    5508         case SVM_EXIT_MSR:
    5509             return hmR0SvmExitMsr(pVCpu, pCtx, pSvmTransient);
    5510 
    5511         case SVM_EXIT_INVLPG:
    5512             return hmR0SvmExitInvlpg(pVCpu, pCtx, pSvmTransient);
    5513 
    5514         case SVM_EXIT_WBINVD:
    5515             return hmR0SvmExitWbinvd(pVCpu, pCtx, pSvmTransient);
    5516 
    5517         case SVM_EXIT_INVD:
    5518             return hmR0SvmExitInvd(pVCpu, pCtx, pSvmTransient);
    5519 
    5520         case SVM_EXIT_RDPMC:
    5521             return hmR0SvmExitRdpmc(pVCpu, pCtx, pSvmTransient);
     5769        case SVM_EXIT_WRITE_CR8:    VMEXIT_CALL_RET(0, hmR0SvmExitWriteCRx(pVCpu, pCtx, pSvmTransient));
     5770
     5771        case SVM_EXIT_VINTR:        VMEXIT_CALL_RET(0, hmR0SvmExitVIntr(pVCpu, pCtx, pSvmTransient));
     5772        case SVM_EXIT_PAUSE:        VMEXIT_CALL_RET(0, hmR0SvmExitPause(pVCpu, pCtx, pSvmTransient));
     5773        case SVM_EXIT_VMMCALL:      VMEXIT_CALL_RET(0, hmR0SvmExitVmmCall(pVCpu, pCtx, pSvmTransient));
     5774        case SVM_EXIT_INVLPG:       VMEXIT_CALL_RET(0, hmR0SvmExitInvlpg(pVCpu, pCtx, pSvmTransient));
     5775        case SVM_EXIT_WBINVD:       VMEXIT_CALL_RET(0, hmR0SvmExitWbinvd(pVCpu, pCtx, pSvmTransient));
     5776        case SVM_EXIT_INVD:         VMEXIT_CALL_RET(0, hmR0SvmExitInvd(pVCpu, pCtx, pSvmTransient));
     5777        case SVM_EXIT_RDPMC:        VMEXIT_CALL_RET(0, hmR0SvmExitRdpmc(pVCpu, pCtx, pSvmTransient));
     5778        case SVM_EXIT_IRET:         VMEXIT_CALL_RET(0, hmR0SvmExitIret(pVCpu, pCtx, pSvmTransient));
     5779        case SVM_EXIT_XCPT_UD:      VMEXIT_CALL_RET(0, hmR0SvmExitXcptUD(pVCpu, pCtx, pSvmTransient));
     5780        case SVM_EXIT_XCPT_MF:      VMEXIT_CALL_RET(0, hmR0SvmExitXcptMF(pVCpu, pCtx, pSvmTransient));
     5781        case SVM_EXIT_XCPT_DB:      VMEXIT_CALL_RET(0, hmR0SvmExitXcptDB(pVCpu, pCtx, pSvmTransient));
     5782        case SVM_EXIT_XCPT_AC:      VMEXIT_CALL_RET(0, hmR0SvmExitXcptAC(pVCpu, pCtx, pSvmTransient));
     5783        case SVM_EXIT_XCPT_BP:      VMEXIT_CALL_RET(0, hmR0SvmExitXcptBP(pVCpu, pCtx, pSvmTransient));
     5784        case SVM_EXIT_XSETBV:       VMEXIT_CALL_RET(0, hmR0SvmExitXsetbv(pVCpu, pCtx, pSvmTransient));
     5785        case SVM_EXIT_FERR_FREEZE:  VMEXIT_CALL_RET(0, hmR0SvmExitFerrFreeze(pVCpu, pCtx, pSvmTransient));
    55225786
    55235787        default:
     
    55295793                case SVM_EXIT_READ_DR10:    case SVM_EXIT_READ_DR11:    case SVM_EXIT_READ_DR12:    case SVM_EXIT_READ_DR13:
    55305794                case SVM_EXIT_READ_DR14:    case SVM_EXIT_READ_DR15:
    5531                     return hmR0SvmExitReadDRx(pVCpu, pCtx, pSvmTransient);
     5795                    VMEXIT_CALL_RET(0, hmR0SvmExitReadDRx(pVCpu, pCtx, pSvmTransient));
    55325796
    55335797                case SVM_EXIT_WRITE_DR0:    case SVM_EXIT_WRITE_DR1:    case SVM_EXIT_WRITE_DR2:    case SVM_EXIT_WRITE_DR3:
     
    55355799                case SVM_EXIT_WRITE_DR10:   case SVM_EXIT_WRITE_DR11:   case SVM_EXIT_WRITE_DR12:   case SVM_EXIT_WRITE_DR13:
    55365800                case SVM_EXIT_WRITE_DR14:   case SVM_EXIT_WRITE_DR15:
    5537                     return hmR0SvmExitWriteDRx(pVCpu, pCtx, pSvmTransient);
    5538 
    5539                 case SVM_EXIT_XSETBV:
    5540                     return hmR0SvmExitXsetbv(pVCpu, pCtx, pSvmTransient);
    5541 
    5542                 case SVM_EXIT_TASK_SWITCH:
    5543                     return hmR0SvmExitTaskSwitch(pVCpu, pCtx, pSvmTransient);
    5544 
    5545                 case SVM_EXIT_IRET:
    5546                     return hmR0SvmExitIret(pVCpu, pCtx, pSvmTransient);
    5547 
    5548                 case SVM_EXIT_SHUTDOWN:
    5549                     return hmR0SvmExitShutdown(pVCpu, pCtx, pSvmTransient);
     5801                    VMEXIT_CALL_RET(0, hmR0SvmExitWriteDRx(pVCpu, pCtx, pSvmTransient));
     5802
     5803                case SVM_EXIT_TASK_SWITCH:  VMEXIT_CALL_RET(0, hmR0SvmExitTaskSwitch(pVCpu, pCtx, pSvmTransient));
     5804                case SVM_EXIT_SHUTDOWN:     VMEXIT_CALL_RET(0, hmR0SvmExitShutdown(pVCpu, pCtx, pSvmTransient));
    55505805
    55515806                case SVM_EXIT_SMI:
     
    55565811                     * If it ever does, we want to know about it so log the exit code and bail.
    55575812                     */
    5558                     return hmR0SvmExitUnexpected(pVCpu, pCtx, pSvmTransient);
     5813                    VMEXIT_CALL_RET(0, hmR0SvmExitUnexpected(pVCpu, pCtx, pSvmTransient));
    55595814                }
    55605815
    55615816#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
    5562                 case SVM_EXIT_CLGI:     return hmR0SvmExitClgi(pVCpu, pCtx, pSvmTransient);
    5563                 case SVM_EXIT_STGI:     return hmR0SvmExitStgi(pVCpu, pCtx, pSvmTransient);
    5564                 case SVM_EXIT_VMLOAD:   return hmR0SvmExitVmload(pVCpu, pCtx, pSvmTransient);
    5565                 case SVM_EXIT_VMSAVE:   return hmR0SvmExitVmsave(pVCpu, pCtx, pSvmTransient);
    5566                 case SVM_EXIT_INVLPGA:  return hmR0SvmExitInvlpga(pVCpu, pCtx, pSvmTransient);
    5567                 case SVM_EXIT_VMRUN:    return hmR0SvmExitVmrun(pVCpu, pCtx, pSvmTransient);
     5817                case SVM_EXIT_CLGI:     VMEXIT_CALL_RET(0, hmR0SvmExitClgi(pVCpu, pCtx, pSvmTransient));
     5818                case SVM_EXIT_STGI:     VMEXIT_CALL_RET(0, hmR0SvmExitStgi(pVCpu, pCtx, pSvmTransient));
     5819                case SVM_EXIT_VMLOAD:   VMEXIT_CALL_RET(0, hmR0SvmExitVmload(pVCpu, pCtx, pSvmTransient));
     5820                case SVM_EXIT_VMSAVE:   VMEXIT_CALL_RET(0, hmR0SvmExitVmsave(pVCpu, pCtx, pSvmTransient));
     5821                case SVM_EXIT_INVLPGA:  VMEXIT_CALL_RET(0, hmR0SvmExitInvlpga(pVCpu, pCtx, pSvmTransient));
     5822                case SVM_EXIT_VMRUN:    VMEXIT_CALL_RET(0, hmR0SvmExitVmrun(pVCpu, pCtx, pSvmTransient));
    55685823#else
    55695824                case SVM_EXIT_CLGI:
     
    55825837
    55835838#ifdef HMSVM_ALWAYS_TRAP_ALL_XCPTS
    5584                 case SVM_EXIT_XCPT_0:      /* #DE                   */
    5585                 /*   SVM_EXIT_XCPT_1: */   /* #DB  - Handled above. */
    5586                 /*   SVM_EXIT_XCPT_2: */   /* #NMI - Handled above. */
    5587                 /*   SVM_EXIT_XCPT_3: */   /* #BP  - Handled above. */
    5588                 case SVM_EXIT_XCPT_4:      /* #OF                   */
    5589                 case SVM_EXIT_XCPT_5:      /* #BR                   */
    5590                 /*   SVM_EXIT_XCPT_6: */   /* #UD  - Handled above. */
    5591                 case SVM_EXIT_XCPT_7:      /* #NM                   */
    5592                 case SVM_EXIT_XCPT_8:      /* #DF                   */
    5593                 case SVM_EXIT_XCPT_9:      /* #CO_SEG_OVERRUN       */
    5594                 case SVM_EXIT_XCPT_10:     /* #TS                   */
    5595                 case SVM_EXIT_XCPT_11:     /* #NP                   */
    5596                 case SVM_EXIT_XCPT_12:     /* #SS                   */
    5597                 case SVM_EXIT_XCPT_13:     /* #GP                   */
    5598                 /*   SVM_EXIT_XCPT_14: */  /* #PF  - Handled above. */
    5599                 case SVM_EXIT_XCPT_15:     /* Reserved.            */
    5600                 /*   SVM_EXIT_XCPT_16: */  /* #MF  - Handled above. */
    5601                 /*   SVM_EXIT_XCPT_17: */  /* #AC  - Handled above. */
    5602                 case SVM_EXIT_XCPT_18:     /* #MC                   */
    5603                 case SVM_EXIT_XCPT_19:     /* #XF                   */
     5839                case SVM_EXIT_XCPT_DE:
     5840                /*   SVM_EXIT_XCPT_DB: */       /* Handled above. */
     5841                /*   SVM_EXIT_XCPT_NMI: */      /* Handled above. */
     5842                /*   SVM_EXIT_XCPT_BP: */       /* Handled above. */
     5843                case SVM_EXIT_XCPT_OF:
     5844                case SVM_EXIT_XCPT_BR:
     5845                /*   SVM_EXIT_XCPT_UD: */       /* Handled above. */
     5846                case SVM_EXIT_XCPT_NM:
     5847                case SVM_EXIT_XCPT_DF:
     5848                case SVM_EXIT_XCPT_CO_SEG_OVERRUN:
     5849                case SVM_EXIT_XCPT_TS:
     5850                case SVM_EXIT_XCPT_NP:
     5851                case SVM_EXIT_XCPT_SS:
     5852                case SVM_EXIT_XCPT_GP:
     5853                /*   SVM_EXIT_XCPT_PF: */
     5854                case SVM_EXIT_XCPT_15:          /* Reserved. */
     5855                /*   SVM_EXIT_XCPT_MF: */       /* Handled above. */
     5856                /*   SVM_EXIT_XCPT_AC: */       /* Handled above. */
     5857                case SVM_EXIT_XCPT_MC:
     5858                case SVM_EXIT_XCPT_XF:
    56045859                case SVM_EXIT_XCPT_20: case SVM_EXIT_XCPT_21: case SVM_EXIT_XCPT_22: case SVM_EXIT_XCPT_23:
    56055860                case SVM_EXIT_XCPT_24: case SVM_EXIT_XCPT_25: case SVM_EXIT_XCPT_26: case SVM_EXIT_XCPT_27:
    56065861                case SVM_EXIT_XCPT_28: case SVM_EXIT_XCPT_29: case SVM_EXIT_XCPT_30: case SVM_EXIT_XCPT_31:
    5607                     return hmR0SvmExitXcptGeneric(pVCpu, pCtx, pSvmTransient);
     5862                    VMEXIT_CALL_RET(0, hmR0SvmExitXcptGeneric(pVCpu, pCtx, pSvmTransient));
    56085863#endif  /* HMSVM_ALWAYS_TRAP_ALL_XCPTS */
    56095864
     
    56185873    }
    56195874    /* not reached */
     5875#undef VMEXIT_CALL_RET
    56205876}
    56215877
     
    58286084    int rc = VINF_SUCCESS;
    58296085    PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx);
     6086    HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, CPUMCTX_EXTRN_CR2);
    58306087
    58316088    Log4(("EXITINTINFO: Pending vectoring event %#RX64 Valid=%RTbool ErrValid=%RTbool Err=%#RX32 Type=%u Vector=%u\n",
     
    61206377        Assert(pVmcb);
    61216378        Assert(pVmcb->ctrl.u64NextRIP);
     6379        Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_RIP));
    61226380        AssertRelease(pVmcb->ctrl.u64NextRIP - pCtx->rip == cb);    /* temporary, remove later */
    61236381        pCtx->rip = pVmcb->ctrl.u64NextRIP;
     
    62386496{
    62396497    HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
     6498
    62406499    PVM pVM = pVCpu->CTX_SUFF(pVM);
    62416500    int rc = EMInterpretCpuId(pVM, pVCpu, CPUMCTX2CORE(pCtx));
     
    62616520{
    62626521    HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
     6522    HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, IEM_CPUMCTX_EXTRN_MUST_MASK);
    62636523    VBOXSTRICTRC rcStrict = IEMExecDecodedRdtsc(pVCpu, hmR0SvmGetInstrLengthHwAssist(pVCpu, pCtx, 2));
    62646524    if (rcStrict == VINF_SUCCESS)
     
    62796539    HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
    62806540    VBOXSTRICTRC rcStrict = IEMExecDecodedRdtscp(pVCpu, hmR0SvmGetInstrLengthHwAssist(pVCpu, pCtx, 3));
     6541    HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, IEM_CPUMCTX_EXTRN_MUST_MASK);
    62816542    if (rcStrict == VINF_SUCCESS)
    62826543        pSvmTransient->fUpdateTscOffsetting = true;
     
    62956556{
    62966557    HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
     6558    HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx,   CPUMCTX_EXTRN_CR0
     6559                                    | CPUMCTX_EXTRN_CR4
     6560                                    | CPUMCTX_EXTRN_SS);
     6561
    62976562    int rc = EMInterpretRdpmc(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx));
    62986563    if (RT_LIKELY(rc == VINF_SUCCESS))
     
    63266591        && fSupportsNextRipSave)
    63276592    {
     6593        HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, IEM_CPUMCTX_EXTRN_MUST_MASK);
    63286594        PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx);
    63296595        uint8_t const cbInstr   = pVmcb->ctrl.u64NextRIP - pCtx->rip;
     
    63346600    }
    63356601
     6602    HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, HMSVM_CPUMCTX_EXTRN_ALL);
    63366603    int rc = hmR0SvmInterpretInvlpg(pVM, pVCpu, pCtx);    /* Updates RIP if successful. */
    63376604    Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER);
     
    63646631{
    63656632    HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
     6633    HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx,   CPUMCTX_EXTRN_CR0
     6634                                            | CPUMCTX_EXTRN_SS);
     6635
    63666636    int rc = EMInterpretMonitor(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx));
    63676637    if (RT_LIKELY(rc == VINF_SUCCESS))
     
    63866656{
    63876657    HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
     6658    HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx,   CPUMCTX_EXTRN_CR0
     6659                                            | CPUMCTX_EXTRN_SS);
     6660
    63886661    VBOXSTRICTRC rc2 = EMInterpretMWait(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx));
    63896662    int rc = VBOXSTRICTRC_VAL(rc2);
     
    64196692{
    64206693    HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
     6694    HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, HMSVM_CPUMCTX_EXTRN_ALL);
    64216695    return VINF_EM_RESET;
    64226696}
     
    64306704    RT_NOREF(pCtx);
    64316705    PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx);
     6706    HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, HMSVM_CPUMCTX_EXTRN_ALL);
    64326707    AssertMsgFailed(("hmR0SvmExitUnexpected: ExitCode=%#RX64 uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", pSvmTransient->u64ExitCode,
    64336708                     pVmcb->ctrl.u64ExitInfo1, pVmcb->ctrl.u64ExitInfo2));
     
    64576732        if (fMovCRx)
    64586733        {
     6734            HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, IEM_CPUMCTX_EXTRN_MUST_MASK);
    64596735            uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pCtx->rip;
    64606736            uint8_t const iCrReg  = pSvmTransient->u64ExitCode - SVM_EXIT_READ_CR0;
     
    64676743    }
    64686744
     6745    HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, HMSVM_CPUMCTX_EXTRN_ALL);
    64696746    VBOXSTRICTRC rc2 = EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0 /* pvFault */);
    64706747    int rc = VBOXSTRICTRC_VAL(rc2);
     
    64996776        if (fMovCRx)
    65006777        {
     6778            HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, IEM_CPUMCTX_EXTRN_MUST_MASK);
    65016779            uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pCtx->rip;
    65026780            uint8_t const iGReg   = pVmcb->ctrl.u64ExitInfo1 & SVM_EXIT1_MOV_CRX_GPR_NUMBER;
     
    65106788    if (!fDecodedInstr)
    65116789    {
     6790        HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, IEM_CPUMCTX_EXTRN_MUST_MASK);
    65126791        Log4(("hmR0SvmExitWriteCRx: iCrReg=%#x\n", iCrReg));
    65136792        rcStrict = IEMExecOneBypassEx(pVCpu, CPUMCTX2CORE(pCtx), NULL);
     
    65216800        switch (iCrReg)
    65226801        {
    6523             case 0:     /* CR0. */
    6524                 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
    6525                 break;
    6526 
    6527             case 3:     /* CR3. */
    6528                 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR3);
    6529                 break;
    6530 
    6531             case 4:     /* CR4. */
    6532                 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR4);
    6533                 break;
    6534 
    6535             case 8:     /* CR8 (TPR). */
    6536                 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_APIC_STATE);
    6537                 break;
    6538 
     6802            case 0: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);        break;
     6803            case 2: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR2);        break;
     6804            case 3: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR3);        break;
     6805            case 4: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR4);        break;
     6806            case 8: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_APIC_STATE); break;
    65396807            default:
     6808            {
    65406809                AssertMsgFailed(("hmR0SvmExitWriteCRx: Invalid/Unexpected Write-CRx exit. u64ExitCode=%#RX64 %#x\n",
    65416810                                 pSvmTransient->u64ExitCode, iCrReg));
    65426811                break;
     6812            }
    65436813        }
    65446814        HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict);
     
    65576827{
    65586828    HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
     6829    HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx,   CPUMCTX_EXTRN_CR0
     6830                                            | CPUMCTX_EXTRN_RFLAGS
     6831                                            | CPUMCTX_EXTRN_SS
     6832                                            | CPUMCTX_EXTRN_ALL_MSRS);
     6833
    65596834    PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx);
    65606835    PVM      pVM   = pVCpu->CTX_SUFF(pVM);
     
    65986873        else
    65996874        {
     6875            HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, HMSVM_CPUMCTX_EXTRN_ALL);
    66006876            rc = VBOXSTRICTRC_TODO(EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0 /* pvFault */));
    66016877            if (RT_LIKELY(rc == VINF_SUCCESS))
     
    66566932        else
    66576933        {
     6934            HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, HMSVM_CPUMCTX_EXTRN_ALL);
    66586935            rc = VBOXSTRICTRC_TODO(EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0));
    66596936            if (RT_UNLIKELY(rc != VINF_SUCCESS))
     
    66786955{
    66796956    HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
     6957    HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, HMSVM_CPUMCTX_EXTRN_ALL);
     6958
    66806959    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
    66816960
     
    67607039{
    67617040    HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
     7041    HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, IEM_CPUMCTX_EXTRN_MUST_MASK);
    67627042
    67637043    /** @todo decode assists... */
     
    67817061{
    67827062    HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
     7063    HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_SREG_MASK);
    67837064
    67847065    /* I/O operation lookup arrays. */
     
    69077188        /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the
    69087189         *  execution engines about whether hyper BPs and such are pending. */
     7190        HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, CPUMCTX_EXTRN_DR7);
    69097191        uint32_t const uDr7 = pCtx->dr[7];
    69107192        if (RT_UNLIKELY(   (   (uDr7 & X86_DR7_ENABLED_MASK)
     
    69737255{
    69747256    HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
     7257    HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, HMSVM_CPUMCTX_EXTRN_ALL);
     7258    HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY();
    69757259
    69767260    PVM pVM = pVCpu->CTX_SUFF(pVM);
    69777261    Assert(pVM->hm.s.fNestedPaging);
    6978 
    6979     HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY();
    69807262
    69817263    /* See AMD spec. 15.25.6 "Nested versus Guest Page Faults, Fault Ordering" for VMCB details for #NPF. */
     
    71037385{
    71047386    HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
    7105 
    71067387    HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY();
    71077388
     
    71357416{
    71367417    HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
     7418    HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, HMSVM_CPUMCTX_EXTRN_ALL);
     7419
    71377420    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitVmcall);
    71387421
     
    71897472{
    71907473    HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
     7474    HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, CPUMCTX_EXTRN_CR0);
    71917475    Assert(!(pCtx->cr0 & X86_CR0_NE));
    71927476
     
    72237507{
    72247508    HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
    7225 
     7509    HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, HMSVM_CPUMCTX_EXTRN_ALL);
    72267510    HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY();
    72277511
     
    73297613            /* If the nested-guest is intercepting #PFs, cause a #PF #VMEXIT. */
    73307614            if (   CPUMIsGuestInSvmNestedHwVirtMode(pCtx)
    7331                 && HMIsGuestSvmXcptInterceptSet(pVCpu, pCtx, X86_XCPT_PF))
     7615                && HMIsGuestSvmXcptInterceptSet(pVCpu, X86_XCPT_PF))
    73327616                return VBOXSTRICTRC_TODO(IEMExecSvmVmexit(pVCpu, SVM_EXIT_XCPT_PF, uErrCode, uFaultAddress));
    73337617#endif
     
    73697653    if (pVCpu->hm.s.fGIMTrapXcptUD)
    73707654    {
     7655        HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, HMSVM_CPUMCTX_EXTRN_ALL);
    73717656        uint8_t cbInstr = 0;
    73727657        VBOXSTRICTRC rcStrict = GIMXcptUD(pVCpu, pCtx, NULL /* pDis */, &cbInstr);
     
    74057690{
    74067691    HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
     7692    HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, HMSVM_CPUMCTX_EXTRN_ALL);
    74077693
    74087694    /* Paranoia; Ensure we cannot be called as a result of event delivery. */
     
    74427728{
    74437729    HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
    7444 
    7445     /* If this #DB is the result of delivering an event, go back to the interpreter. */
     7730    HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, HMSVM_CPUMCTX_EXTRN_ALL);
    74467731    HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY();
     7732
    74477733    if (RT_UNLIKELY(pVCpu->hm.s.Event.fPending))
    74487734    {
     
    74957781{
    74967782    HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
    7497 
    74987783    HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY();
    74997784
     
    75167801{
    75177802    HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
    7518 
     7803    HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, HMSVM_CPUMCTX_EXTRN_ALL);
    75197804    HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY();
    75207805
     
    75427827{
    75437828    HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
    7544 
    75457829    HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY();
    75467830
     
    75867870{
    75877871    HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
     7872    HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx,   IEM_CPUMCTX_EXTRN_MUST_MASK
     7873                                            | CPUMCTX_EXTRN_HWVIRT);
    75887874
    75897875#ifdef VBOX_STRICT
     
    76087894{
    76097895    HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
     7896    HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx,   IEM_CPUMCTX_EXTRN_MUST_MASK
     7897                                            | CPUMCTX_EXTRN_HWVIRT);
    76107898
    76117899    /*
     
    76297917{
    76307918    HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
     7919    HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx,   IEM_CPUMCTX_EXTRN_MUST_MASK
     7920                                            | CPUMCTX_EXTRN_FS
     7921                                            | CPUMCTX_EXTRN_GS
     7922                                            | CPUMCTX_EXTRN_TR
     7923                                            | CPUMCTX_EXTRN_LDTR
     7924                                            | CPUMCTX_EXTRN_KERNEL_GS_BASE
     7925                                            | CPUMCTX_EXTRN_SYSCALL_MSRS
     7926                                            | CPUMCTX_EXTRN_SYSENTER_MSRS);
    76317927
    76327928#ifdef VBOX_STRICT
     
    76447940        HMCPU_CF_SET(pVCpu,   HM_CHANGED_GUEST_SEGMENT_REGS
    76457941                            | HM_CHANGED_GUEST_TR
    7646                             | HM_CHANGED_GUEST_LDTR);
     7942                            | HM_CHANGED_GUEST_LDTR
     7943                            | HM_CHANGED_GUEST_SYSENTER_CS_MSR
     7944                            | HM_CHANGED_GUEST_SYSENTER_EIP_MSR
     7945                            | HM_CHANGED_GUEST_SYSENTER_ESP_MSR);
    76477946    }
    76487947    return VBOXSTRICTRC_VAL(rcStrict);
     
    76567955{
    76577956    HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
     7957    HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, IEM_CPUMCTX_EXTRN_MUST_MASK);
    76587958
    76597959#ifdef VBOX_STRICT
     
    76767976{
    76777977    HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
     7978    HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, IEM_CPUMCTX_EXTRN_MUST_MASK);
     7979
    76787980    /** @todo Stat. */
    76797981    /* STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvlpga); */
     
    76907992{
    76917993    HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
     7994    /** @todo Only save and reload what VMRUN changes (e.g. skip LDTR, TR etc). */
     7995    HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, HMSVM_CPUMCTX_EXTRN_ALL);
    76927996
    76937997    VBOXSTRICTRC rcStrict;
     
    77118015{
    77128016    HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
    7713 
    7714     /* If this #DB is the result of delivering an event, go back to the interpreter. */
    77158017    HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY();
     8018
    77168019    if (pVCpu->hm.s.Event.fPending)
    77178020    {
     
    77328035{
    77338036    HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
    7734 
    77358037    HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY();
    77368038
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.h

    r69474 r72643  
    3838#ifdef IN_RING0
    3939
    40 VMMR0DECL(int)  SVMR0GlobalInit(void);
    41 VMMR0DECL(void) SVMR0GlobalTerm(void);
    42 VMMR0DECL(int)  SVMR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu);
    43 VMMR0DECL(void) SVMR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit);
    44 VMMR0DECL(int)  SVMR0EnableCpu(PHMGLOBALCPUINFO pCpu, PVM pVM, void *pvPageCpu, RTHCPHYS HCPhysCpuPage, bool fEnabledBySystem,
    45                                void *pvArg);
    46 VMMR0DECL(int)  SVMR0DisableCpu(PHMGLOBALCPUINFO pCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys);
    47 VMMR0DECL(int)  SVMR0InitVM(PVM pVM);
    48 VMMR0DECL(int)  SVMR0TermVM(PVM pVM);
    49 VMMR0DECL(int)  SVMR0SetupVM(PVM pVM);
     40VMMR0DECL(int)          SVMR0GlobalInit(void);
     41VMMR0DECL(void)         SVMR0GlobalTerm(void);
     42VMMR0DECL(int)          SVMR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu);
     43VMMR0DECL(void)         SVMR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit);
     44VMMR0DECL(int)          SVMR0EnableCpu(PHMGLOBALCPUINFO pCpu, PVM pVM, void *pvPageCpu, RTHCPHYS HCPhysCpuPage,
     45                                       bool fEnabledBySystem, void *pvArg);
     46VMMR0DECL(int)          SVMR0DisableCpu(PHMGLOBALCPUINFO pCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys);
     47VMMR0DECL(int)          SVMR0InitVM(PVM pVM);
     48VMMR0DECL(int)          SVMR0TermVM(PVM pVM);
     49VMMR0DECL(int)          SVMR0SetupVM(PVM pVM);
    5050VMMR0DECL(VBOXSTRICTRC) SVMR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
    51 VMMR0DECL(int)  SVMR0SaveHostState(PVM pVM, PVMCPU pVCpu);
     51VMMR0DECL(int)          SVMR0SaveHostState(PVM pVM, PVMCPU pVCpu);
     52VMMR0DECL(int)          SVMR0ImportStateOnDemand(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t fWhat);
     53VMMR0DECL(int)          SVMR0InvalidatePage(PVM pVM, PVMCPU pVCpu, RTGCPTR GCVirt);
    5254
    5355#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
    54 DECLASM(int)   SVMR0VMSwitcherRun64(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu);
    55 VMMR0DECL(int) SVMR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, HM64ON32OP enmOp, uint32_t cbParam,
    56                                          uint32_t *paParam);
     56DECLASM(int)            SVMR0VMSwitcherRun64(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu);
     57VMMR0DECL(int)          SVMR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, HM64ON32OP enmOp, uint32_t cbParam,
     58                                                  uint32_t *paParam);
    5759#endif /* HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) */
    5860
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r72642 r72643  
    131131 * are used. Maybe later this can be extended (i.e. Nested Virtualization).
    132132 */
    133 #define HMVMX_VMCS_STATE_CLEAR                         RT_BIT(0)
    134 #define HMVMX_VMCS_STATE_ACTIVE                        RT_BIT(1)
    135 #define HMVMX_VMCS_STATE_LAUNCHED                      RT_BIT(2)
     133#define HMVMX_VMCS_STATE_CLEAR                              RT_BIT(0)
     134#define HMVMX_VMCS_STATE_ACTIVE                             RT_BIT(1)
     135#define HMVMX_VMCS_STATE_LAUNCHED                           RT_BIT(2)
    136136/** @} */
     137
     138/**
     139 * Subset of the guest-CPU state that is kept by VMX R0 code while executing the
     140 * guest using hardware-assisted VMX.
     141 *
     142 * This excludes state like GPRs (other than RSP) which are always are
     143 * swapped and restored across the world-switch and also registers like EFER,
     144 * MSR which cannot be modified by the guest without causing a VM-exit.
     145 */
     146#define HMVMX_CPUMCTX_EXTRN_ALL                             (  CPUMCTX_EXTRN_RIP            \
     147                                                             | CPUMCTX_EXTRN_RFLAGS         \
     148                                                             | CPUMCTX_EXTRN_SREG_MASK      \
     149                                                             | CPUMCTX_EXTRN_TABLE_MASK     \
     150                                                             | CPUMCTX_EXTRN_SYSENTER_MSRS  \
     151                                                             | CPUMCTX_EXTRN_SYSCALL_MSRS   \
     152                                                             | CPUMCTX_EXTRN_KERNEL_GS_BASE \
     153                                                             | CPUMCTX_EXTRN_TSC_AUX        \
     154                                                             | CPUMCTX_EXTRN_OTHER_MSRS     \
     155                                                             | CPUMCTX_EXTRN_CR0            \
     156                                                             | CPUMCTX_EXTRN_CR3            \
     157                                                             | CPUMCTX_EXTRN_CR4            \
     158                                                             | CPUMCTX_EXTRN_DR7)
    137159
    138160/**
     
    191213        return VERR_VMX_UNEXPECTED_EXIT; \
    192214    } while (0)
     215
     216/** Macro for saving segment registers from VMCS into the guest-CPU
     217 *  context. */
     218#ifdef VMX_USE_CACHED_VMCS_ACCESSES
     219# define HMVMX_SAVE_SREG(Sel, a_pCtxSelReg) \
     220    hmR0VmxSaveSegmentReg(pVCpu, VMX_VMCS16_GUEST_##Sel##_SEL, VMX_VMCS32_GUEST_##Sel##_LIMIT, \
     221                          VMX_VMCS_GUEST_##Sel##_BASE_CACHE_IDX, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, (a_pCtxSelReg))
     222#else
     223# define HMVMX_SAVE_SREG(Sel, a_pCtxSelReg) \
     224    hmR0VmxSaveSegmentReg(pVCpu, VMX_VMCS16_GUEST_##Sel##_SEL, VMX_VMCS32_GUEST_##Sel##_LIMIT, \
     225                          VMX_VMCS_GUEST_##Sel##_BASE, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, (a_pCtxSelReg))
     226#endif
    193227
    194228
     
    19291963
    19301964/**
    1931  * Invalidates a guest page by physical address. Only relevant for EPT/VPID,
    1932  * otherwise there is nothing really to invalidate.
    1933  *
    1934  * @returns VBox status code.
    1935  * @param   pVM         The cross context VM structure.
    1936  * @param   pVCpu       The cross context virtual CPU structure.
    1937  * @param   GCPhys      Guest physical address of the page to invalidate.
    1938  */
    1939 VMMR0DECL(int) VMXR0InvalidatePhysPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys)
    1940 {
    1941     NOREF(pVM); NOREF(GCPhys);
    1942     LogFlowFunc(("%RGp\n", GCPhys));
    1943 
    1944     /*
    1945      * We cannot flush a page by guest-physical address. invvpid takes only a linear address while invept only flushes
    1946      * by EPT not individual addresses. We update the force flag here and flush before the next VM-entry in hmR0VmxFlushTLB*().
    1947      * This function might be called in a loop. This should cause a flush-by-EPT if EPT is in use. See @bugref{6568}.
    1948      */
    1949     VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
    1950     STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbInvlpgPhys);
    1951     return VINF_SUCCESS;
    1952 }
    1953 
    1954 
    1955 /**
    19561965 * Dummy placeholder for tagged-TLB flush handling before VM-entry. Used in the
    19571966 * case where neither EPT nor VPID is supported by the CPU.
     
    67476756
    67486757/**
    6749  * Reads a guest segment register from the current VMCS into the guest-CPU
     6758 * Saves a guest segment register from the current VMCS into the guest-CPU
    67506759 * context.
    67516760 *
     
    67596768 *
    67606769 * @remarks No-long-jump zone!!!
    6761  * @remarks Never call this function directly!!! Use the VMXLOCAL_READ_SEG()
    6762  *          macro as that takes care of whether to read from the VMCS cache or
    6763  *          not.
    6764  */
    6765 DECLINLINE(int) hmR0VmxReadSegmentReg(PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase, uint32_t idxAccess,
     6770 * @remarks Never call this function directly!!! Use the
     6771 *          HMVMX_SAVE_SREG() macro as that takes care of whether to read
     6772 *          from the VMCS cache or not.
     6773 */
     6774static int hmR0VmxSaveSegmentReg(PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase, uint32_t idxAccess,
    67666775                                      PCPUMSELREG pSelReg)
    67676776{
     
    68246833}
    68256834
    6826 
    6827 #ifdef VMX_USE_CACHED_VMCS_ACCESSES
    6828 # define VMXLOCAL_READ_SEG(Sel, CtxSel) \
    6829     hmR0VmxReadSegmentReg(pVCpu, VMX_VMCS16_GUEST_##Sel##_SEL, VMX_VMCS32_GUEST_##Sel##_LIMIT, \
    6830                           VMX_VMCS_GUEST_##Sel##_BASE_CACHE_IDX, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, &pMixedCtx->CtxSel)
    6831 #else
    6832 # define VMXLOCAL_READ_SEG(Sel, CtxSel) \
    6833     hmR0VmxReadSegmentReg(pVCpu, VMX_VMCS16_GUEST_##Sel##_SEL, VMX_VMCS32_GUEST_##Sel##_LIMIT, \
    6834                           VMX_VMCS_GUEST_##Sel##_BASE, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, &pMixedCtx->CtxSel)
    6835 #endif
    6836 
    6837 
    68386835/**
    68396836 * Saves the guest segment registers from the current VMCS into the guest-CPU
     
    68536850    if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SEGMENT_REGS))
    68546851    {
     6852        /** @todo r=ramshankar: Why do we save CR0 here? */
    68556853        Assert(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS));
    68566854        int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
    68576855        AssertRCReturn(rc, rc);
    68586856
    6859         rc  = VMXLOCAL_READ_SEG(CS, cs);
    6860         rc |= VMXLOCAL_READ_SEG(SS, ss);
    6861         rc |= VMXLOCAL_READ_SEG(DS, ds);
    6862         rc |= VMXLOCAL_READ_SEG(ES, es);
    6863         rc |= VMXLOCAL_READ_SEG(FS, fs);
    6864         rc |= VMXLOCAL_READ_SEG(GS, gs);
     6857        rc  = HMVMX_SAVE_SREG(CS, &pMixedCtx->cs);
     6858        rc |= HMVMX_SAVE_SREG(SS, &pMixedCtx->ss);
     6859        rc |= HMVMX_SAVE_SREG(DS, &pMixedCtx->ds);
     6860        rc |= HMVMX_SAVE_SREG(ES, &pMixedCtx->es);
     6861        rc |= HMVMX_SAVE_SREG(FS, &pMixedCtx->fs);
     6862        rc |= HMVMX_SAVE_SREG(GS, &pMixedCtx->gs);
    68656863        AssertRCReturn(rc, rc);
    68666864
     
    69166914    {
    69176915        Assert(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_LDTR));
    6918         rc = VMXLOCAL_READ_SEG(LDTR, ldtr);
     6916        rc = HMVMX_SAVE_SREG(LDTR, &pMixedCtx->ldtr);
    69196917        AssertRCReturn(rc, rc);
    69206918        HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LDTR);
     
    69556953        if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
    69566954        {
    6957             rc = VMXLOCAL_READ_SEG(TR, tr);
     6955            rc = HMVMX_SAVE_SREG(TR, &pMixedCtx->tr);
    69586956            AssertRCReturn(rc, rc);
    69596957        }
     
    69626960    return rc;
    69636961}
    6964 
    6965 #undef VMXLOCAL_READ_SEG
    69666962
    69676963
     
    70147010    HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_APIC_STATE);
    70157011    return VINF_SUCCESS;
     7012}
     7013
     7014
     7015/**
     7016 * Worker for VMXR0ImportStateOnDemand.
     7017 *
     7018 * @returns VBox status code.
     7019 * @param   pVCpu   The cross context virtual CPU structure.
     7020 * @param   pCtx    Pointer to the guest-CPU context.
     7021 * @param   fWhat   What to import, CPUMCTX_EXTRN_XXX.
     7022 */
     7023static int hmR0VmxImportGuestState(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t fWhat)
     7024{
     7025    int      rc = VINF_SUCCESS;
     7026    PVM      pVM = pVCpu->CTX_SUFF(pVM);
     7027    uint64_t u64Val;
     7028    uint32_t u32Val;
     7029    uint32_t u32Shadow;
     7030
     7031    /*
     7032     * Though we can longjmp to ring-3 due to log-flushes here and get re-invoked
     7033     * on the ring-3 callback path, there is no real need to.
     7034     */
     7035    if (VMMRZCallRing3IsEnabled(pVCpu))
     7036        VMMR0LogFlushDisable(pVCpu);
     7037    else
     7038        Assert(VMMR0IsLogFlushDisabled(pVCpu));
     7039    Log4Func(("fExtrn=%#RX64 fWhat=%#RX64\n", pCtx->fExtrn, fWhat));
     7040
     7041    /*
     7042     * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> hmR0VmxCallRing3Callback()
     7043     * -> VMMRZCallRing3Disable() -> hmR0VmxImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
     7044     * -> continue with VM-exit handling -> hmR0VmxImportGuestState() and here we are.
     7045     *
     7046     * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
     7047     * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
     7048     * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
     7049     * -NOT- check if HMVMX_UPDATED_GUEST_CR3 is already set or not!
     7050     *
     7051     * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
     7052     */
     7053    if (VMMRZCallRing3IsEnabled(pVCpu))
     7054    {
     7055        if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
     7056            PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
     7057
     7058        if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
     7059            PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
     7060
     7061        Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
     7062        Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
     7063
     7064        VMMR0LogFlushEnable(pVCpu);
     7065    }
     7066
     7067    Assert(!(fWhat & CPUMCTX_EXTRN_KEEPER_HM));
     7068    fWhat &= pCtx->fExtrn;
     7069
     7070    /* If there is nothing more to import, bail early. */
     7071    if (!(fWhat & HMVMX_CPUMCTX_EXTRN_ALL))
     7072        return VINF_SUCCESS;
     7073
     7074    /* RIP required while saving interruptibility-state below, see EMSetInhibitInterruptsPC(). */
     7075    if (fWhat & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_HM_VMX_INT_STATE))
     7076    {
     7077        rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RIP, &u64Val);
     7078        AssertRCReturn(rc, rc);
     7079        pCtx->rip = u64Val;
     7080        ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_RIP);
     7081    }
     7082
     7083    /* RFLAGS and interruptibility-state required while re-evaluating interrupt injection, see hmR0VmxGetGuestIntrState(). */
     7084    if (fWhat & (CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_HM_VMX_INT_STATE))
     7085    {
     7086        rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RFLAGS, &u64Val);
     7087        AssertRCReturn(rc, rc);
     7088        pCtx->eflags.u32 = u64Val;
     7089        /* Restore eflags for real-on-v86-mode hack. */
     7090        if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
     7091        {
     7092            Assert(pVM->hm.s.vmx.pRealModeTSS);
     7093            pCtx->eflags.Bits.u1VM   = 0;
     7094            pCtx->eflags.Bits.u2IOPL = pVCpu->hm.s.vmx.RealMode.Eflags.Bits.u2IOPL;
     7095        }
     7096        ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_RFLAGS);
     7097    }
     7098
     7099    if (fWhat & CPUMCTX_EXTRN_HM_VMX_INT_STATE)
     7100    {
     7101        rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &u32Val);
     7102        AssertRCReturn(rc, rc);
     7103        if (!u32Val)
     7104        {
     7105            if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
     7106                VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
     7107
     7108            if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
     7109                VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
     7110        }
     7111        else
     7112        {
     7113            if (u32Val & (  VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS
     7114                          | VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI))
     7115            {
     7116                EMSetInhibitInterruptsPC(pVCpu, pCtx->rip);
     7117                Assert(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
     7118            }
     7119            else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
     7120                VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
     7121
     7122            if (u32Val & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI)
     7123            {
     7124                if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
     7125                    VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
     7126            }
     7127            else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
     7128                VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
     7129        }
     7130        ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_HM_VMX_INT_STATE);
     7131    }
     7132
     7133    if (fWhat & CPUMCTX_EXTRN_RSP)
     7134    {
     7135        rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RSP, &u64Val);
     7136        AssertRCReturn(rc, rc);
     7137        pCtx->rsp = u64Val;
     7138        ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_RSP);
     7139    }
     7140
     7141    if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
     7142    {
     7143        if (fWhat & CPUMCTX_EXTRN_CS)
     7144        {
     7145            rc = HMVMX_SAVE_SREG(CS, &pCtx->cs);
     7146            AssertRCReturn(rc, rc);
     7147            if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
     7148                pCtx->cs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrCS.u;
     7149            ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_CS);
     7150        }
     7151        if (fWhat & CPUMCTX_EXTRN_SS)
     7152        {
     7153            rc = HMVMX_SAVE_SREG(SS, &pCtx->ss);
     7154            if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
     7155                pCtx->ss.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrSS.u;
     7156            ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_SS);
     7157        }
     7158        if (fWhat & CPUMCTX_EXTRN_DS)
     7159        {
     7160            rc = HMVMX_SAVE_SREG(DS, &pCtx->ds);
     7161            AssertRCReturn(rc, rc);
     7162            if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
     7163                pCtx->ds.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrDS.u;
     7164            ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_DS);
     7165        }
     7166        if (fWhat & CPUMCTX_EXTRN_ES)
     7167        {
     7168            rc = HMVMX_SAVE_SREG(ES, &pCtx->es);
     7169            AssertRCReturn(rc, rc);
     7170            if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
     7171                pCtx->es.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrES.u;
     7172            ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_ES);
     7173        }
     7174       if (fWhat & CPUMCTX_EXTRN_FS)
     7175       {
     7176            rc = HMVMX_SAVE_SREG(FS, &pCtx->fs);
     7177            AssertRCReturn(rc, rc);
     7178            if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
     7179                pCtx->fs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrFS.u;
     7180            ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_FS);
     7181       }
     7182        if (fWhat & CPUMCTX_EXTRN_GS)
     7183        {
     7184            rc = HMVMX_SAVE_SREG(GS, &pCtx->gs);
     7185            AssertRCReturn(rc, rc);
     7186            if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
     7187                pCtx->gs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrGS.u;
     7188            ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_GS);
     7189        }
     7190    }
     7191
     7192    if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
     7193    {
     7194        if (fWhat & CPUMCTX_EXTRN_LDTR)
     7195        {
     7196            rc = HMVMX_SAVE_SREG(LDTR, &pCtx->ldtr);
     7197            AssertRCReturn(rc, rc);
     7198            ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_LDTR);
     7199        }
     7200
     7201        if (fWhat & CPUMCTX_EXTRN_GDTR)
     7202        {
     7203            rc  = VMXReadVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE,  &u64Val);
     7204            rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);
     7205            AssertRCReturn(rc, rc);
     7206            pCtx->gdtr.pGdt  = u64Val;
     7207            pCtx->gdtr.cbGdt = u32Val;
     7208            ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_GDTR);
     7209        }
     7210
     7211        /* Guest IDTR. */
     7212        if (fWhat & CPUMCTX_EXTRN_IDTR)
     7213        {
     7214            rc  = VMXReadVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE,  &u64Val);
     7215            rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);
     7216            AssertRCReturn(rc, rc);
     7217            pCtx->idtr.pIdt  = u64Val;
     7218            pCtx->idtr.cbIdt = u32Val;
     7219            ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_IDTR);
     7220        }
     7221
     7222        /* Guest TR. */
     7223        if (fWhat & CPUMCTX_EXTRN_TR)
     7224        {
     7225            /* Real-mode emulation using virtual-8086 mode has the fake TSS (pRealModeTSS) in TR, don't save that one. */
     7226            if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
     7227            {
     7228                rc = HMVMX_SAVE_SREG(TR, &pCtx->tr);
     7229                AssertRCReturn(rc, rc);
     7230            }
     7231            ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_TR);
     7232        }
     7233    }
     7234
     7235    if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
     7236    {
     7237        rc  = VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, &pCtx->SysEnter.eip);
     7238        rc |= VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, &pCtx->SysEnter.esp);
     7239        rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS,  &u32Val);
     7240        pCtx->SysEnter.cs = u32Val;
     7241        AssertRCReturn(rc, rc);
     7242        ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_SYSENTER_MSRS);
     7243    }
     7244
     7245#if HC_ARCH_BITS == 64
     7246    if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
     7247    {
     7248        if (   pVM->hm.s.fAllow64BitGuests
     7249            && (pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
     7250            pCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
     7251        ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_KERNEL_GS_BASE);
     7252    }
     7253
     7254    if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
     7255    {
     7256        if (   pVM->hm.s.fAllow64BitGuests
     7257            && (pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
     7258        {
     7259            pCtx->msrLSTAR  = ASMRdMsr(MSR_K8_LSTAR);
     7260            pCtx->msrSTAR   = ASMRdMsr(MSR_K6_STAR);
     7261            pCtx->msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
     7262        }
     7263        ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_SYSCALL_MSRS);
     7264    }
     7265#endif
     7266
     7267    if (   (fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
     7268#if HC_ARCH_BITS == 32
     7269        || (fWhat & (CPUMCTX_EXTRN_KERNEL_GS_BASE | CPUMCTX_EXTRN_SYSCALL_MSRS))
     7270#endif
     7271        )
     7272    {
     7273        PCVMXAUTOMSR   pMsr  = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
     7274        uint32_t const cMsrs = pVCpu->hm.s.vmx.cMsrs;
     7275        for (uint32_t i = 0; i < cMsrs; i++, pMsr++)
     7276        {
     7277            switch (pMsr->u32Msr)
     7278            {
     7279#if HC_ARCH_BITS == 32
     7280                case MSR_K8_LSTAR:          pCtx->msrLSTAR        = pMsr->u64Value;         break;
     7281                case MSR_K6_STAR:           pCtx->msrSTAR         = pMsr->u64Value;         break;
     7282                case MSR_K8_SF_MASK:        pCtx->msrSFMASK       = pMsr->u64Value;         break;
     7283                case MSR_K8_KERNEL_GS_BASE: pCtx->msrKERNELGSBASE = pMsr->u64Value;         break;
     7284#endif
     7285                case MSR_IA32_SPEC_CTRL:    CPUMSetGuestSpecCtrl(pVCpu, pMsr->u64Value);    break;
     7286                case MSR_K8_TSC_AUX:
     7287                {
     7288                    /* CPUMSetGuestTscAux alters fExtrn without using atomics, so disable preemption temporarily. */
     7289                    HM_DISABLE_PREEMPT();
     7290                    CPUMSetGuestTscAux(pVCpu, pMsr->u64Value);
     7291                    HM_RESTORE_PREEMPT();
     7292                    break;
     7293                }
     7294                default:
     7295                {
     7296                    AssertMsgFailed(("Unexpected MSR in auto-load/store area. uMsr=%#RX32 cMsrs=%u\n", pMsr->u32Msr, cMsrs));
     7297                    pVCpu->hm.s.u32HMError = pMsr->u32Msr;
     7298                    return VERR_HM_UNEXPECTED_LD_ST_MSR;
     7299                }
     7300            }
     7301        }
     7302        ASMAtomicUoAndU64(&pCtx->fExtrn, ~(  CPUMCTX_EXTRN_TSC_AUX
     7303                                           | CPUMCTX_EXTRN_OTHER_MSRS
     7304#if HC_ARCH_BITS == 32
     7305                                           | CPUMCTX_EXTRN_KERNEL_GS_BASE
     7306                                           | CPUMCTX_EXTRN_SYSCALL_MSRS
     7307#endif
     7308                                           ));
     7309    }
     7310
     7311    if (fWhat & CPUMCTX_EXTRN_DR7)
     7312    {
     7313        if (!pVCpu->hm.s.fUsingHyperDR7)
     7314        {
     7315            /* Upper 32-bits are always zero. See Intel spec. 2.7.3 "Loading and Storing Debug Registers". */
     7316            rc = VMXReadVmcs32(VMX_VMCS_GUEST_DR7, &u32Val);
     7317            AssertRCReturn(rc, rc);
     7318            pCtx->dr[7] = u32Val;
     7319        }
     7320        ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_DR7);
     7321    }
     7322
     7323    if (fWhat & CPUMCTX_EXTRN_CR_MASK)
     7324    {
     7325        /* CR0 required for saving CR3 below, see CPUMIsGuestPagingEnabledEx(). */
     7326        if (fWhat & (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3))
     7327        {
     7328            rc  = VMXReadVmcs32(VMX_VMCS_GUEST_CR0,            &u32Val);
     7329            rc |= VMXReadVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, &u32Shadow);
     7330            AssertRCReturn(rc, rc);
     7331            u32Val = (u32Val & ~pVCpu->hm.s.vmx.u32CR0Mask)
     7332                   | (u32Shadow & pVCpu->hm.s.vmx.u32CR0Mask);
     7333            CPUMSetGuestCR0(pVCpu, u32Val);
     7334            ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_CR0);
     7335        }
     7336
     7337        /* CR4 required for saving CR3 below, see CPUMIsGuestInPAEModeEx(). */
     7338        if (fWhat & (CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_CR3))
     7339        {
     7340            rc  = VMXReadVmcs32(VMX_VMCS_GUEST_CR4,            &u32Val);
     7341            rc |= VMXReadVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, &u32Shadow);
     7342            AssertRCReturn(rc, rc);
     7343            u32Val = (u32Val & ~pVCpu->hm.s.vmx.u32CR4Mask)
     7344                   | (u32Shadow & pVCpu->hm.s.vmx.u32CR4Mask);
     7345            CPUMSetGuestCR4(pVCpu, u32Val);
     7346            ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_CR4);
     7347        }
     7348
     7349        if (fWhat & CPUMCTX_EXTRN_CR3)
     7350        {
     7351            if (   pVM->hm.s.vmx.fUnrestrictedGuest
     7352                || (   pVM->hm.s.fNestedPaging
     7353                    && CPUMIsGuestPagingEnabledEx(pCtx)))
     7354            {
     7355                rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_CR3, &u64Val);
     7356                if (pCtx->cr3 != u64Val)
     7357                {
     7358                    CPUMSetGuestCR3(pVCpu, u64Val);
     7359                    if (VMMRZCallRing3IsEnabled(pVCpu))
     7360                    {
     7361                        PGMUpdateCR3(pVCpu, u64Val);
     7362                        Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
     7363                    }
     7364                    else
     7365                    {
     7366                        /* Set the force flag to inform PGM about it when necessary. It is cleared by PGMUpdateCR3().*/
     7367                        VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
     7368                    }
     7369                }
     7370
     7371                /* If the guest is in PAE mode, sync back the PDPE's into the guest state. */
     7372                if (CPUMIsGuestInPAEModeEx(pCtx))
     7373                {
     7374                    rc  = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, &pVCpu->hm.s.aPdpes[0].u);
     7375                    rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, &pVCpu->hm.s.aPdpes[1].u);
     7376                    rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, &pVCpu->hm.s.aPdpes[2].u);
     7377                    rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, &pVCpu->hm.s.aPdpes[3].u);
     7378                    AssertRCReturn(rc, rc);
     7379
     7380                    if (VMMRZCallRing3IsEnabled(pVCpu))
     7381                    {
     7382                        PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
     7383                        Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
     7384                    }
     7385                    else
     7386                    {
     7387                        /* Set the force flag to inform PGM about it when necessary. It is cleared by PGMGstUpdatePaePdpes(). */
     7388                        VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
     7389                    }
     7390                }
     7391            }
     7392            ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_CR3);
     7393        }
     7394    }
     7395
     7396    /* If everything has been imported, clear the HM keeper bit. */
     7397    if (!(pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
     7398    {
     7399        ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_KEEPER_HM);
     7400        Assert(!pCtx->fExtrn);
     7401    }
     7402
     7403    return VINF_SUCCESS;
     7404}
     7405
     7406
     7407/**
     7408 * Saves the guest state from the VMCS into the guest-CPU context.
     7409 *
     7410 * @returns VBox status code.
     7411 * @param   pVCpu   The cross context virtual CPU structure.
     7412 * @param   pCtx    Pointer to the guest-CPU or nested-guest-CPU context.
     7413 * @param   fWhat   What to import, CPUMCTX_EXTRN_XXX.
     7414 */
     7415VMMR0DECL(int) VMXR0ImportStateOnDemand(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t fWhat)
     7416{
     7417    return hmR0VmxImportGuestState(pVCpu, pCtx, fWhat);
    70167418}
    70177419
     
    72367638    AssertRCReturn(rc3, rc3);
    72377639
     7640    /** @todo  r=ramshankar: VMCPU_FF_HM_UPDATE_CR3 and VMCPU_FF_HM_UPDATE_PAE_PDPES
     7641     *         are not part of VMCPU_FF_HP_R0_PRE_HM_MASK. Hence, the two if
     7642     *         statements below won't ever be entered. Consider removing it or
     7643     *         determine if it is necessary to add these flags to VMCPU_FF_HP_R0_PRE_HM_MASK. */
    72387644    /* Pending HM CR3 sync. */
    72397645    if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
     
    1081811224{
    1081911225# ifdef DEBUG_ramshankar
    10820 #  define RETURN_EXIT_CALL(a_CallExpr) \
     11226#  define VMEXIT_CALL_RET(a_CallExpr) \
    1082111227       do { \
    1082211228            int rc2 = hmR0VmxSaveGuestState(pVCpu, pMixedCtx); AssertRC(rc2); \
     
    1082611232        } while (0)
    1082711233# else
    10828 #  define RETURN_EXIT_CALL(a_CallExpr) return a_CallExpr
     11234#  define VMEXIT_CALL_RET(a_CallExpr) return a_CallExpr
    1082911235# endif
    1083011236    switch (rcReason)
    1083111237    {
    10832         case VMX_EXIT_EPT_MISCONFIG:           RETURN_EXIT_CALL(hmR0VmxExitEptMisconfig(pVCpu, pMixedCtx, pVmxTransient));
    10833         case VMX_EXIT_EPT_VIOLATION:           RETURN_EXIT_CALL(hmR0VmxExitEptViolation(pVCpu, pMixedCtx, pVmxTransient));
    10834         case VMX_EXIT_IO_INSTR:                RETURN_EXIT_CALL(hmR0VmxExitIoInstr(pVCpu, pMixedCtx, pVmxTransient));
    10835         case VMX_EXIT_CPUID:                   RETURN_EXIT_CALL(hmR0VmxExitCpuid(pVCpu, pMixedCtx, pVmxTransient));
    10836         case VMX_EXIT_RDTSC:                   RETURN_EXIT_CALL(hmR0VmxExitRdtsc(pVCpu, pMixedCtx, pVmxTransient));
    10837         case VMX_EXIT_RDTSCP:                  RETURN_EXIT_CALL(hmR0VmxExitRdtscp(pVCpu, pMixedCtx, pVmxTransient));
    10838         case VMX_EXIT_APIC_ACCESS:             RETURN_EXIT_CALL(hmR0VmxExitApicAccess(pVCpu, pMixedCtx, pVmxTransient));
    10839         case VMX_EXIT_XCPT_OR_NMI:             RETURN_EXIT_CALL(hmR0VmxExitXcptOrNmi(pVCpu, pMixedCtx, pVmxTransient));
    10840         case VMX_EXIT_MOV_CRX:                 RETURN_EXIT_CALL(hmR0VmxExitMovCRx(pVCpu, pMixedCtx, pVmxTransient));
    10841         case VMX_EXIT_EXT_INT:                 RETURN_EXIT_CALL(hmR0VmxExitExtInt(pVCpu, pMixedCtx, pVmxTransient));
    10842         case VMX_EXIT_INT_WINDOW:              RETURN_EXIT_CALL(hmR0VmxExitIntWindow(pVCpu, pMixedCtx, pVmxTransient));
    10843         case VMX_EXIT_MWAIT:                   RETURN_EXIT_CALL(hmR0VmxExitMwait(pVCpu, pMixedCtx, pVmxTransient));
    10844         case VMX_EXIT_MONITOR:                 RETURN_EXIT_CALL(hmR0VmxExitMonitor(pVCpu, pMixedCtx, pVmxTransient));
    10845         case VMX_EXIT_TASK_SWITCH:             RETURN_EXIT_CALL(hmR0VmxExitTaskSwitch(pVCpu, pMixedCtx, pVmxTransient));
    10846         case VMX_EXIT_PREEMPT_TIMER:           RETURN_EXIT_CALL(hmR0VmxExitPreemptTimer(pVCpu, pMixedCtx, pVmxTransient));
    10847         case VMX_EXIT_RDMSR:                   RETURN_EXIT_CALL(hmR0VmxExitRdmsr(pVCpu, pMixedCtx, pVmxTransient));
    10848         case VMX_EXIT_WRMSR:                   RETURN_EXIT_CALL(hmR0VmxExitWrmsr(pVCpu, pMixedCtx, pVmxTransient));
    10849         case VMX_EXIT_MOV_DRX:                 RETURN_EXIT_CALL(hmR0VmxExitMovDRx(pVCpu, pMixedCtx, pVmxTransient));
    10850         case VMX_EXIT_TPR_BELOW_THRESHOLD:     RETURN_EXIT_CALL(hmR0VmxExitTprBelowThreshold(pVCpu, pMixedCtx, pVmxTransient));
    10851         case VMX_EXIT_HLT:                     RETURN_EXIT_CALL(hmR0VmxExitHlt(pVCpu, pMixedCtx, pVmxTransient));
    10852         case VMX_EXIT_INVD:                    RETURN_EXIT_CALL(hmR0VmxExitInvd(pVCpu, pMixedCtx, pVmxTransient));
    10853         case VMX_EXIT_INVLPG:                  RETURN_EXIT_CALL(hmR0VmxExitInvlpg(pVCpu, pMixedCtx, pVmxTransient));
    10854         case VMX_EXIT_RSM:                     RETURN_EXIT_CALL(hmR0VmxExitRsm(pVCpu, pMixedCtx, pVmxTransient));
    10855         case VMX_EXIT_MTF:                     RETURN_EXIT_CALL(hmR0VmxExitMtf(pVCpu, pMixedCtx, pVmxTransient));
    10856         case VMX_EXIT_PAUSE:                   RETURN_EXIT_CALL(hmR0VmxExitPause(pVCpu, pMixedCtx, pVmxTransient));
    10857         case VMX_EXIT_XDTR_ACCESS:             RETURN_EXIT_CALL(hmR0VmxExitXdtrAccess(pVCpu, pMixedCtx, pVmxTransient));
    10858         case VMX_EXIT_TR_ACCESS:               RETURN_EXIT_CALL(hmR0VmxExitXdtrAccess(pVCpu, pMixedCtx, pVmxTransient));
    10859         case VMX_EXIT_WBINVD:                  RETURN_EXIT_CALL(hmR0VmxExitWbinvd(pVCpu, pMixedCtx, pVmxTransient));
    10860         case VMX_EXIT_XSETBV:                  RETURN_EXIT_CALL(hmR0VmxExitXsetbv(pVCpu, pMixedCtx, pVmxTransient));
    10861         case VMX_EXIT_RDRAND:                  RETURN_EXIT_CALL(hmR0VmxExitRdrand(pVCpu, pMixedCtx, pVmxTransient));
    10862         case VMX_EXIT_INVPCID:                 RETURN_EXIT_CALL(hmR0VmxExitInvpcid(pVCpu, pMixedCtx, pVmxTransient));
    10863         case VMX_EXIT_GETSEC:                  RETURN_EXIT_CALL(hmR0VmxExitGetsec(pVCpu, pMixedCtx, pVmxTransient));
    10864         case VMX_EXIT_RDPMC:                   RETURN_EXIT_CALL(hmR0VmxExitRdpmc(pVCpu, pMixedCtx, pVmxTransient));
    10865         case VMX_EXIT_VMCALL:                  RETURN_EXIT_CALL(hmR0VmxExitVmcall(pVCpu, pMixedCtx, pVmxTransient));
     11238        case VMX_EXIT_EPT_MISCONFIG:           VMEXIT_CALL_RET(hmR0VmxExitEptMisconfig(pVCpu, pMixedCtx, pVmxTransient));
     11239        case VMX_EXIT_EPT_VIOLATION:           VMEXIT_CALL_RET(hmR0VmxExitEptViolation(pVCpu, pMixedCtx, pVmxTransient));
     11240        case VMX_EXIT_IO_INSTR:                VMEXIT_CALL_RET(hmR0VmxExitIoInstr(pVCpu, pMixedCtx, pVmxTransient));
     11241        case VMX_EXIT_CPUID:                   VMEXIT_CALL_RET(hmR0VmxExitCpuid(pVCpu, pMixedCtx, pVmxTransient));
     11242        case VMX_EXIT_RDTSC:                   VMEXIT_CALL_RET(hmR0VmxExitRdtsc(pVCpu, pMixedCtx, pVmxTransient));
     11243        case VMX_EXIT_RDTSCP:                  VMEXIT_CALL_RET(hmR0VmxExitRdtscp(pVCpu, pMixedCtx, pVmxTransient));
     11244        case VMX_EXIT_APIC_ACCESS:             VMEXIT_CALL_RET(hmR0VmxExitApicAccess(pVCpu, pMixedCtx, pVmxTransient));
     11245        case VMX_EXIT_XCPT_OR_NMI:             VMEXIT_CALL_RET(hmR0VmxExitXcptOrNmi(pVCpu, pMixedCtx, pVmxTransient));
     11246        case VMX_EXIT_MOV_CRX:                 VMEXIT_CALL_RET(hmR0VmxExitMovCRx(pVCpu, pMixedCtx, pVmxTransient));
     11247        case VMX_EXIT_EXT_INT:                 VMEXIT_CALL_RET(hmR0VmxExitExtInt(pVCpu, pMixedCtx, pVmxTransient));
     11248        case VMX_EXIT_INT_WINDOW:              VMEXIT_CALL_RET(hmR0VmxExitIntWindow(pVCpu, pMixedCtx, pVmxTransient));
     11249        case VMX_EXIT_MWAIT:                   VMEXIT_CALL_RET(hmR0VmxExitMwait(pVCpu, pMixedCtx, pVmxTransient));
     11250        case VMX_EXIT_MONITOR:                 VMEXIT_CALL_RET(hmR0VmxExitMonitor(pVCpu, pMixedCtx, pVmxTransient));
     11251        case VMX_EXIT_TASK_SWITCH:             VMEXIT_CALL_RET(hmR0VmxExitTaskSwitch(pVCpu, pMixedCtx, pVmxTransient));
     11252        case VMX_EXIT_PREEMPT_TIMER:           VMEXIT_CALL_RET(hmR0VmxExitPreemptTimer(pVCpu, pMixedCtx, pVmxTransient));
     11253        case VMX_EXIT_RDMSR:                   VMEXIT_CALL_RET(hmR0VmxExitRdmsr(pVCpu, pMixedCtx, pVmxTransient));
     11254        case VMX_EXIT_WRMSR:                   VMEXIT_CALL_RET(hmR0VmxExitWrmsr(pVCpu, pMixedCtx, pVmxTransient));
     11255        case VMX_EXIT_MOV_DRX:                 VMEXIT_CALL_RET(hmR0VmxExitMovDRx(pVCpu, pMixedCtx, pVmxTransient));
     11256        case VMX_EXIT_TPR_BELOW_THRESHOLD:     VMEXIT_CALL_RET(hmR0VmxExitTprBelowThreshold(pVCpu, pMixedCtx, pVmxTransient));
     11257        case VMX_EXIT_HLT:                     VMEXIT_CALL_RET(hmR0VmxExitHlt(pVCpu, pMixedCtx, pVmxTransient));
     11258        case VMX_EXIT_INVD:                    VMEXIT_CALL_RET(hmR0VmxExitInvd(pVCpu, pMixedCtx, pVmxTransient));
     11259        case VMX_EXIT_INVLPG:                  VMEXIT_CALL_RET(hmR0VmxExitInvlpg(pVCpu, pMixedCtx, pVmxTransient));
     11260        case VMX_EXIT_RSM:                     VMEXIT_CALL_RET(hmR0VmxExitRsm(pVCpu, pMixedCtx, pVmxTransient));
     11261        case VMX_EXIT_MTF:                     VMEXIT_CALL_RET(hmR0VmxExitMtf(pVCpu, pMixedCtx, pVmxTransient));
     11262        case VMX_EXIT_PAUSE:                   VMEXIT_CALL_RET(hmR0VmxExitPause(pVCpu, pMixedCtx, pVmxTransient));
     11263        case VMX_EXIT_XDTR_ACCESS:             VMEXIT_CALL_RET(hmR0VmxExitXdtrAccess(pVCpu, pMixedCtx, pVmxTransient));
     11264        case VMX_EXIT_TR_ACCESS:               VMEXIT_CALL_RET(hmR0VmxExitXdtrAccess(pVCpu, pMixedCtx, pVmxTransient));
     11265        case VMX_EXIT_WBINVD:                  VMEXIT_CALL_RET(hmR0VmxExitWbinvd(pVCpu, pMixedCtx, pVmxTransient));
     11266        case VMX_EXIT_XSETBV:                  VMEXIT_CALL_RET(hmR0VmxExitXsetbv(pVCpu, pMixedCtx, pVmxTransient));
     11267        case VMX_EXIT_RDRAND:                  VMEXIT_CALL_RET(hmR0VmxExitRdrand(pVCpu, pMixedCtx, pVmxTransient));
     11268        case VMX_EXIT_INVPCID:                 VMEXIT_CALL_RET(hmR0VmxExitInvpcid(pVCpu, pMixedCtx, pVmxTransient));
     11269        case VMX_EXIT_GETSEC:                  VMEXIT_CALL_RET(hmR0VmxExitGetsec(pVCpu, pMixedCtx, pVmxTransient));
     11270        case VMX_EXIT_RDPMC:                   VMEXIT_CALL_RET(hmR0VmxExitRdpmc(pVCpu, pMixedCtx, pVmxTransient));
     11271        case VMX_EXIT_VMCALL:                  VMEXIT_CALL_RET(hmR0VmxExitVmcall(pVCpu, pMixedCtx, pVmxTransient));
    1086611272
    1086711273        case VMX_EXIT_TRIPLE_FAULT:            return hmR0VmxExitTripleFault(pVCpu, pMixedCtx, pVmxTransient);
     
    1089611302            return hmR0VmxExitErrUndefined(pVCpu, pMixedCtx, pVmxTransient);
    1089711303    }
    10898 #undef RETURN_EXIT_CALL
     11304#undef VMEXIT_CALL_RET
    1089911305}
    1090011306#endif /* !HMVMX_USE_FUNCTION_TABLE */
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.h

    r69474 r72643  
    2929#ifdef IN_RING0
    3030
    31 VMMR0DECL(int)  VMXR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu);
    32 VMMR0DECL(void) VMXR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit);
    33 VMMR0DECL(int)  VMXR0EnableCpu(PHMGLOBALCPUINFO pCpu, PVM pVM, void *pvPageCpu, RTHCPHYS pPageCpuPhys, bool fEnabledBySystem,
    34                                void *pvMsrs);
    35 VMMR0DECL(int)  VMXR0DisableCpu(PHMGLOBALCPUINFO pCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys);
    36 VMMR0DECL(int)  VMXR0GlobalInit(void);
    37 VMMR0DECL(void) VMXR0GlobalTerm(void);
    38 VMMR0DECL(int)  VMXR0InitVM(PVM pVM);
    39 VMMR0DECL(int)  VMXR0TermVM(PVM pVM);
    40 VMMR0DECL(int)  VMXR0SetupVM(PVM pVM);
    41 VMMR0DECL(int)  VMXR0SaveHostState(PVM pVM, PVMCPU pVCpu);
     31VMMR0DECL(int)          VMXR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu);
     32VMMR0DECL(void)         VMXR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit);
     33VMMR0DECL(int)          VMXR0EnableCpu(PHMGLOBALCPUINFO pCpu, PVM pVM, void *pvPageCpu, RTHCPHYS pPageCpuPhys,
     34                                       bool fEnabledBySystem, void *pvMsrs);
     35VMMR0DECL(int)          VMXR0DisableCpu(PHMGLOBALCPUINFO pCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys);
     36VMMR0DECL(int)          VMXR0GlobalInit(void);
     37VMMR0DECL(void)         VMXR0GlobalTerm(void);
     38VMMR0DECL(int)          VMXR0InitVM(PVM pVM);
     39VMMR0DECL(int)          VMXR0TermVM(PVM pVM);
     40VMMR0DECL(int)          VMXR0SetupVM(PVM pVM);
     41VMMR0DECL(int)          VMXR0SaveHostState(PVM pVM, PVMCPU pVCpu);
     42VMMR0DECL(int)          VMXR0InvalidatePage(PVM pVM, PVMCPU pVCpu, RTGCPTR GCVirt);
     43VMMR0DECL(int)          VMXR0ImportStateOnDemand(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t fWhat);
    4244VMMR0DECL(VBOXSTRICTRC) VMXR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
    43 DECLASM(int)    VMXR0StartVM32(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu);
    44 DECLASM(int)    VMXR0StartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu);
    45 
     45DECLASM(int)            VMXR0StartVM32(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu);
     46DECLASM(int)            VMXR0StartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu);
    4647
    4748# if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
    48 DECLASM(int)    VMXR0SwitcherStartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu);
    49 VMMR0DECL(int)  VMXR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, HM64ON32OP enmOp, uint32_t cbParam,
    50                                          uint32_t *paParam);
     49DECLASM(int)            VMXR0SwitcherStartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu);
     50VMMR0DECL(int)          VMXR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, HM64ON32OP enmOp, uint32_t cbParam,
     51                                                  uint32_t *paParam);
    5152# endif
    5253
  • trunk/src/VBox/VMM/VMMR3/CPUM.cpp

    r72178 r72643  
    14331433            SSMR3PutU16(pSSM,    pGstCtx->hwvirt.svm.cPauseFilterThreshold);
    14341434            SSMR3PutBool(pSSM,   pGstCtx->hwvirt.svm.fInterceptEvents);
    1435             SSMR3PutBool(pSSM,   pGstCtx->hwvirt.svm.fHMCachedVmcb);
    14361435            SSMR3PutStructEx(pSSM, &pGstCtx->hwvirt.svm.HostState, sizeof(pGstCtx->hwvirt.svm.HostState), 0 /* fFlags */,
    14371436                             g_aSvmHwvirtHostState, NULL /* pvUser */);
     
    16741673                        SSMR3GetU16(pSSM,      &pGstCtx->hwvirt.svm.cPauseFilterThreshold);
    16751674                        SSMR3GetBool(pSSM,     &pGstCtx->hwvirt.svm.fInterceptEvents);
    1676                         SSMR3GetBool(pSSM,     &pGstCtx->hwvirt.svm.fHMCachedVmcb);
    16771675                        SSMR3GetStructEx(pSSM, &pGstCtx->hwvirt.svm.HostState, sizeof(pGstCtx->hwvirt.svm.HostState),
    16781676                                         0 /* fFlags */, g_aSvmHwvirtHostState, NULL /* pvUser */);
  • trunk/src/VBox/VMM/VMMR3/EM.cpp

    r72642 r72643  
    18861886                    if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_INTR))
    18871887                    {
     1888                        CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
    18881889                        VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_INTR, 0, 0);
    18891890                        if (RT_SUCCESS(rcStrict))
     
    19041905                    /* Note: it's important to make sure the return code from TRPMR3InjectEvent isn't ignored! */
    19051906                    /** @todo this really isn't nice, should properly handle this */
     1907                    CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
    19061908                    int rc = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT);
    19071909                    Assert(rc != VINF_PGM_CHANGE_MODE);
     
    19261928                if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_VINTR))
    19271929                {
     1930                    CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
    19281931                    VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_VINTR, 0, 0);
    19291932                    if (RT_SUCCESS(rcStrict))
  • trunk/src/VBox/VMM/VMMR3/HM.cpp

    r72598 r72643  
    34913491        {
    34923492            PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVM->aCpus[i].hm.s.svm.NstGstVmcbCache;
    3493             rc  = SSMR3PutU16(pSSM,  pVmcbNstGstCache->u16InterceptRdCRx);
     3493            rc  = SSMR3PutBool(pSSM, pVmcbNstGstCache->fCacheValid);
     3494            rc |= SSMR3PutU16(pSSM,  pVmcbNstGstCache->u16InterceptRdCRx);
    34943495            rc |= SSMR3PutU16(pSSM,  pVmcbNstGstCache->u16InterceptWrCRx);
    34953496            rc |= SSMR3PutU16(pSSM,  pVmcbNstGstCache->u16InterceptRdDRx);
     
    35763577            {
    35773578                PSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVM->aCpus[i].hm.s.svm.NstGstVmcbCache;
    3578                 rc  = SSMR3GetU16(pSSM,  &pVmcbNstGstCache->u16InterceptRdCRx);
     3579                rc  = SSMR3GetBool(pSSM, &pVmcbNstGstCache->fCacheValid);
     3580                rc |= SSMR3GetU16(pSSM,  &pVmcbNstGstCache->u16InterceptRdCRx);
    35793581                rc |= SSMR3GetU16(pSSM,  &pVmcbNstGstCache->u16InterceptWrCRx);
    35803582                rc |= SSMR3GetU16(pSSM,  &pVmcbNstGstCache->u16InterceptRdDRx);
     
    36873689
    36883690/**
    3689  * Displays the guest VM-exit history.
     3691 * Displays HM info.
    36903692 *
    36913693 * @param   pVM         The cross context VM structure.
     
    37613763        && pVM->cpum.ro.GuestFeatures.fSvm)
    37623764    {
    3763         PCCPUMCTX            pCtx = CPUMQueryGuestCtxPtr(pVCpu);
    37643765        PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
    37653766        pHlp->pfnPrintf(pHlp, "CPU[%u]: HM SVM nested-guest VMCB cache\n", pVCpu->idCpu);
    3766         pHlp->pfnPrintf(pHlp, "  fHMCachedVmcb           = %#RTbool\n", pCtx->hwvirt.svm.fHMCachedVmcb);
     3767        pHlp->pfnPrintf(pHlp, "  fCacheValid             = %#RTbool\n", pVmcbNstGstCache->fCacheValid);
    37673768        pHlp->pfnPrintf(pHlp, "  u16InterceptRdCRx       = %#RX16\n",   pVmcbNstGstCache->u16InterceptRdCRx);
    37683769        pHlp->pfnPrintf(pHlp, "  u16InterceptWrCRx       = %#RX16\n",   pVmcbNstGstCache->u16InterceptWrCRx);
  • trunk/src/VBox/VMM/include/CPUMInternal.mac

    r71833 r72643  
    243243    .Guest.hwvirt.svm.cPauseFilterThreshold  resw         1
    244244    .Guest.hwvirt.svm.fInterceptEvents       resb         1
    245     .Guest.hwvirt.svm.fHMCachedVmcb          resb         1
    246245    alignb 8
    247246    .Guest.hwvirt.svm.pvMsrBitmapR0          RTR0PTR_RES  1
     
    531530    .Hyper.hwvirt.svm.cPauseFilterThreshold  resw         1
    532531    .Hyper.hwvirt.svm.fInterceptEvents       resb         1
    533     .Hyper.hwvirt.svm.fHMCachedVmcb          resb         1
    534532    alignb 8
    535533    .Hyper.hwvirt.svm.pvMsrBitmapR0          RTR0PTR_RES  1
  • trunk/src/VBox/VMM/include/HMInternal.h

    r72560 r72643  
    143143 * @{
    144144 */
     145#if 0
     146#define HM_CHANGED_HOST_CONTEXT                    UINT64_C(0x0000000000000001)
     147#define HM_CHANGED_GUEST_RIP                       UINT64_C(0x0000000000000004)
     148#define HM_CHANGED_GUEST_RFLAGS                    UINT64_C(0x0000000000000008)
     149#define HM_CHANGED_GUEST_RAX                       UINT64_C(0x0000000000000010)
     150#define HM_CHANGED_GUEST_RCX                       UINT64_C(0x0000000000000020)
     151#define HM_CHANGED_GUEST_RDX                       UINT64_C(0x0000000000000040)
     152#define HM_CHANGED_GUEST_RBX                       UINT64_C(0x0000000000000080)
     153#define HM_CHANGED_GUEST_RSP                       UINT64_C(0x0000000000000100)
     154#define HM_CHANGED_GUEST_RBP                       UINT64_C(0x0000000000000200)
     155#define HM_CHANGED_GUEST_RSI                       UINT64_C(0x0000000000000400)
     156#define HM_CHANGED_GUEST_RDI                       UINT64_C(0x0000000000000800)
     157#define HM_CHANGED_GUEST_R8_R15                    UINT64_C(0x0000000000001000)
     158#define HM_CHANGED_GUEST_GPRS_MASK                 UINT64_C(0x0000000000001ff0)
     159#define HM_CHANGED_GUEST_ES                        UINT64_C(0x0000000000002000)
     160#define HM_CHANGED_GUEST_CS                        UINT64_C(0x0000000000004000)
     161#define HM_CHANGED_GUEST_SS                        UINT64_C(0x0000000000008000)
     162#define HM_CHANGED_GUEST_DS                        UINT64_C(0x0000000000010000)
     163#define HM_CHANGED_GUEST_FS                        UINT64_C(0x0000000000020000)
     164#define HM_CHANGED_GUEST_GS                        UINT64_C(0x0000000000040000)
     165#define HM_CHANGED_GUEST_SREG_MASK                 UINT64_C(0x000000000007e000)
     166#define HM_CHANGED_GUEST_GDTR                      UINT64_C(0x0000000000080000)
     167#define HM_CHANGED_GUEST_IDTR                      UINT64_C(0x0000000000100000)
     168#define HM_CHANGED_GUEST_LDTR                      UINT64_C(0x0000000000200000)
     169#define HM_CHANGED_GUEST_TR                        UINT64_C(0x0000000000400000)
     170#define HM_CHANGED_GUEST_TABLE_MASK                UINT64_C(0x0000000000780000)
     171#define HM_CHANGED_GUEST_CR0                       UINT64_C(0x0000000000800000)
     172#define HM_CHANGED_GUEST_CR2                       UINT64_C(0x0000000001000000)
     173#define HM_CHANGED_GUEST_CR3                       UINT64_C(0x0000000002000000)
     174#define HM_CHANGED_GUEST_CR4                       UINT64_C(0x0000000004000000)
     175#define HM_CHANGED_GUEST_CR_MASK                   UINT64_C(0x0000000007800000)
     176#define HM_CHANGED_GUEST_APIC_TPR                  UINT64_C(0x0000000008000000)
     177#define HM_CHANGED_GUEST_EFER                      UINT64_C(0x0000000010000000)
     178#define HM_CHANGED_GUEST_DR0_DR3                   UINT64_C(0x0000000020000000)
     179#define HM_CHANGED_GUEST_DR6                       UINT64_C(0x0000000040000000)
     180#define HM_CHANGED_GUEST_DR7                       UINT64_C(0x0000000080000000)
     181#define HM_CHANGED_GUEST_DR_MASK                   UINT64_C(0x00000000e0000000)
     182#define HM_CHANGED_GUEST_X87                       UINT64_C(0x0000000100000000)
     183#define HM_CHANGED_GUEST_SSE_AVX                   UINT64_C(0x0000000200000000)
     184#define HM_CHANGED_GUEST_OTHER_XSAVE               UINT64_C(0x0000000400000000)
     185#define HM_CHANGED_GUEST_XCRx                      UINT64_C(0x0000000800000000)
     186#define HM_CHANGED_GUEST_KERNEL_GS_BASE            UINT64_C(0x0000001000000000)
     187#define HM_CHANGED_GUEST_SYSCALL_MSRS              UINT64_C(0x0000002000000000)
     188#define HM_CHANGED_GUEST_SYSENTER_MSRS             UINT64_C(0x0000004000000000)
     189#define HM_CHANGED_GUEST_TSC_AUX                   UINT64_C(0x0000008000000000)
     190#define HM_CHANGED_GUEST_OTHER_MSRS                UINT64_C(0x0000010000000000)
     191#define HM_CHANGED_GUEST_ALL_MSRS                  (  HM_CHANGED_GUEST_EFER            \
     192                                                    | HM_CHANGED_GUEST_KERNEL_GS_BASE  \
     193                                                    | HM_CHANGED_GUEST_SYSCALL_MSRS    \
     194                                                    | HM_CHANGED_GUEST_SYSENTER_MSRS   \
     195                                                    | HM_CHANGED_GUEST_TSC_AUX         \
     196                                                    | HM_CHANGED_GUEST_OTHER_MSRS)
     197#define HM_CHANGED_GUEST_HWVIRT                    UINT64_C(0x0000020000000000)
     198#define HM_CHANGED_GUEST_CONTEXT                   UINT64_C(0x000003fffffffffc)
     199
     200#define HM_CHANGED_KEEPER_STATE_MASK               UINT64_C(0xffff000000000000)
     201
     202#define HM_CHANGED_VMX_GUEST_XCPT_INTERCEPTS       UINT64_C(0x0001000000000000)
     203#define HM_CHANGED_VMX_GUEST_AUTO_MSRS             UINT64_C(0x0002000000000000)
     204#define HM_CHANGED_VMX_GUEST_LAZY_MSRS             UINT64_C(0x0004000000000000)
     205#define HM_CHANGED_VMX_ENTRY_CTLS                  UINT64_C(0x0008000000000000)
     206#define HM_CHANGED_VMX_EXIT_CTLS                   UINT64_C(0x0010000000000000)
     207#define HM_CHANGED_VMX_MASK                        UINT64_C(0x001f000000000000)
     208
     209#define HM_CHANGED_SVM_GUEST_XCPT_INTERCEPTS       UINT64_C(0x0001000000000000)
     210#define HM_CHANGED_SVM_MASK                        UINT64_C(0x0001000000000000)
     211
     212#define HM_CHANGED_HOST_GUEST_SHARED_STATE         (  HM_CHANGED_GUEST_CR0             \
     213                                                    | HM_CHANGED_GUEST_DR_MASK         \
     214                                                    | HM_CHANGED_VMX_GUEST_LAZY_MSRS)
     215
     216#define HM_CHANGED_ALL_GUEST                       (  HM_CHANGED_GUEST_CONTEXT
     217                                                    | HM_CHANGED_KEEPER_STATE_MASK)
     218#endif
     219
    145220#define HM_CHANGED_GUEST_CR0                     RT_BIT(0)      /* Shared */
    146221#define HM_CHANGED_GUEST_CR3                     RT_BIT(1)
  • trunk/src/VBox/VMM/include/IEMInternal.h

    r72496 r72643  
    639639 * @param   a_fExtrnMbz     The mask of CPUMCTX_EXTRN_XXX flags that must be zero.
    640640 */
    641 #define IEM_CTX_ASSERT(a_pVCpu, a_fExtrnMbz)     Assert(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz)))
     641#define IEM_CTX_ASSERT(a_pVCpu, a_fExtrnMbz)    AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz)), \
     642                                                          ("fExtrn=%#RX64 fExtrnMbz=%#RX64\n", (a_pVCpu)->cpum.GstCtx.fExtrn, \
     643                                                          (a_fExtrnMbz)))
    642644
    643645/** @def IEM_CTX_IMPORT_RET
  • trunk/src/VBox/VMM/testcase/tstVMStruct.h

    r72634 r72643  
    141141    GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.cPauseFilterThreshold);
    142142    GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.fInterceptEvents);
    143     GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.fHMCachedVmcb);
    144143    GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.pvMsrBitmapR0);
    145144    GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.pvMsrBitmapR3);
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette