VirtualBox

Ignore:
Timestamp:
Jun 29, 2018 7:36:19 AM (6 years ago)
Author:
vboxsync
Message:

VMM: Extend HM changed flags. ​bugref:9193 [HM, CPUM]

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r72655 r72744  
    4242#include "dtrace/VBoxVMM.h"
    4343
    44 #define HMVMX_USE_IEM_EVENT_REFLECTION
    4544#ifdef DEBUG_ramshankar
    4645# define HMVMX_ALWAYS_SAVE_GUEST_RFLAGS
     
    6766#define HMVMX_FLUSH_TAGGED_TLB_NONE               3
    6867
    69 /** @name Updated-guest-state flags.
    70  * @{ */
    71 #define HMVMX_UPDATED_GUEST_RIP                   RT_BIT(0)
    72 #define HMVMX_UPDATED_GUEST_RSP                   RT_BIT(1)
    73 #define HMVMX_UPDATED_GUEST_RFLAGS                RT_BIT(2)
    74 #define HMVMX_UPDATED_GUEST_CR0                   RT_BIT(3)
    75 #define HMVMX_UPDATED_GUEST_CR3                   RT_BIT(4)
    76 #define HMVMX_UPDATED_GUEST_CR4                   RT_BIT(5)
    77 #define HMVMX_UPDATED_GUEST_GDTR                  RT_BIT(6)
    78 #define HMVMX_UPDATED_GUEST_IDTR                  RT_BIT(7)
    79 #define HMVMX_UPDATED_GUEST_LDTR                  RT_BIT(8)
    80 #define HMVMX_UPDATED_GUEST_TR                    RT_BIT(9)
    81 #define HMVMX_UPDATED_GUEST_SEGMENT_REGS          RT_BIT(10)
    82 #define HMVMX_UPDATED_GUEST_DR7                   RT_BIT(11)
    83 #define HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR       RT_BIT(12)
    84 #define HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR      RT_BIT(13)
    85 #define HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR      RT_BIT(14)
    86 #define HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS  RT_BIT(15)
    87 #define HMVMX_UPDATED_GUEST_LAZY_MSRS             RT_BIT(16)
    88 #define HMVMX_UPDATED_GUEST_ACTIVITY_STATE        RT_BIT(17)
    89 #define HMVMX_UPDATED_GUEST_INTR_STATE            RT_BIT(18)
    90 #define HMVMX_UPDATED_GUEST_APIC_STATE            RT_BIT(19)
    91 #define HMVMX_UPDATED_GUEST_ALL                   (  HMVMX_UPDATED_GUEST_RIP                   \
    92                                                    | HMVMX_UPDATED_GUEST_RSP                   \
    93                                                    | HMVMX_UPDATED_GUEST_RFLAGS                \
    94                                                    | HMVMX_UPDATED_GUEST_CR0                   \
    95                                                    | HMVMX_UPDATED_GUEST_CR3                   \
    96                                                    | HMVMX_UPDATED_GUEST_CR4                   \
    97                                                    | HMVMX_UPDATED_GUEST_GDTR                  \
    98                                                    | HMVMX_UPDATED_GUEST_IDTR                  \
    99                                                    | HMVMX_UPDATED_GUEST_LDTR                  \
    100                                                    | HMVMX_UPDATED_GUEST_TR                    \
    101                                                    | HMVMX_UPDATED_GUEST_SEGMENT_REGS          \
    102                                                    | HMVMX_UPDATED_GUEST_DR7                   \
    103                                                    | HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR       \
    104                                                    | HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR      \
    105                                                    | HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR      \
    106                                                    | HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS  \
    107                                                    | HMVMX_UPDATED_GUEST_LAZY_MSRS             \
    108                                                    | HMVMX_UPDATED_GUEST_ACTIVITY_STATE        \
    109                                                    | HMVMX_UPDATED_GUEST_INTR_STATE            \
    110                                                    | HMVMX_UPDATED_GUEST_APIC_STATE)
     68/** @name HMVMX_READ_XXX
     69 * Flags to skip redundant reads of some common VMCS fields that are not part of
     70 * the guest-CPU or VCPU state but are needed while handling VM-exits.
     71 */
     72#define HMVMX_READ_IDT_VECTORING_INFO            RT_BIT_32(0)
     73#define HMVMX_READ_IDT_VECTORING_ERROR_CODE      RT_BIT_32(1)
     74#define HMVMX_READ_EXIT_QUALIFICATION            RT_BIT_32(2)
     75#define HMVMX_READ_EXIT_INSTR_LEN                RT_BIT_32(3)
     76#define HMVMX_READ_EXIT_INTERRUPTION_INFO        RT_BIT_32(4)
     77#define HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE  RT_BIT_32(5)
     78#define HMVMX_READ_EXIT_INSTR_INFO               RT_BIT_32(6)
    11179/** @} */
    11280
    113 /** @name
    114  * Flags to skip redundant reads of some common VMCS fields that are not part of
    115  * the guest-CPU state but are in the transient structure.
    116  */
    117 #define HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_INFO            RT_BIT(0)
    118 #define HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_ERROR_CODE      RT_BIT(1)
    119 #define HMVMX_UPDATED_TRANSIENT_EXIT_QUALIFICATION            RT_BIT(2)
    120 #define HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_LEN                RT_BIT(3)
    121 #define HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO        RT_BIT(4)
    122 #define HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE  RT_BIT(5)
    123 #define HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_INFO               RT_BIT(6)
    124 /** @} */
    125 
    126 /** @name
     81/**
    12782 * States of the VMCS.
    12883 *
     
    13186 * are used. Maybe later this can be extended (i.e. Nested Virtualization).
    13287 */
    133 #define HMVMX_VMCS_STATE_CLEAR                              RT_BIT(0)
    134 #define HMVMX_VMCS_STATE_ACTIVE                             RT_BIT(1)
    135 #define HMVMX_VMCS_STATE_LAUNCHED                           RT_BIT(2)
    136 /** @} */
     88#define HMVMX_VMCS_STATE_CLEAR                   RT_BIT(0)
     89#define HMVMX_VMCS_STATE_ACTIVE                  RT_BIT(1)
     90#define HMVMX_VMCS_STATE_LAUNCHED                RT_BIT(2)
    13791
    13892/**
     
    14498 * MSR which cannot be modified by the guest without causing a VM-exit.
    14599 */
    146 #define HMVMX_CPUMCTX_EXTRN_ALL                             (  CPUMCTX_EXTRN_RIP            \
    147                                                              | CPUMCTX_EXTRN_RFLAGS         \
    148                                                              | CPUMCTX_EXTRN_SREG_MASK      \
    149                                                              | CPUMCTX_EXTRN_TABLE_MASK     \
    150                                                              | CPUMCTX_EXTRN_SYSENTER_MSRS  \
    151                                                              | CPUMCTX_EXTRN_SYSCALL_MSRS   \
    152                                                              | CPUMCTX_EXTRN_KERNEL_GS_BASE \
    153                                                              | CPUMCTX_EXTRN_TSC_AUX        \
    154                                                              | CPUMCTX_EXTRN_OTHER_MSRS     \
    155                                                              | CPUMCTX_EXTRN_CR0            \
    156                                                              | CPUMCTX_EXTRN_CR3            \
    157                                                              | CPUMCTX_EXTRN_CR4            \
    158                                                              | CPUMCTX_EXTRN_DR7)
     100#define HMVMX_CPUMCTX_EXTRN_ALL                  (  CPUMCTX_EXTRN_RIP             \
     101                                                  | CPUMCTX_EXTRN_RFLAGS          \
     102                                                  | CPUMCTX_EXTRN_RSP             \
     103                                                  | CPUMCTX_EXTRN_SREG_MASK       \
     104                                                  | CPUMCTX_EXTRN_TABLE_MASK      \
     105                                                  |  CPUMCTX_EXTRN_KERNEL_GS_BASE \
     106                                                  | CPUMCTX_EXTRN_SYSCALL_MSRS    \
     107                                                  | CPUMCTX_EXTRN_SYSENTER_MSRS   \
     108                                                  | CPUMCTX_EXTRN_TSC_AUX         \
     109                                                  | CPUMCTX_EXTRN_OTHER_MSRS      \
     110                                                  | CPUMCTX_EXTRN_CR0             \
     111                                                  | CPUMCTX_EXTRN_CR3             \
     112                                                  | CPUMCTX_EXTRN_CR4             \
     113                                                  | CPUMCTX_EXTRN_DR7             \
     114                                                  | CPUMCTX_EXTRN_HM_VMX_MASK)
    159115
    160116/**
     
    217173 *  context. */
    218174#ifdef VMX_USE_CACHED_VMCS_ACCESSES
    219 # define HMVMX_SAVE_SREG(Sel, a_pCtxSelReg) \
    220     hmR0VmxSaveSegmentReg(pVCpu, VMX_VMCS16_GUEST_##Sel##_SEL, VMX_VMCS32_GUEST_##Sel##_LIMIT, \
    221                           VMX_VMCS_GUEST_##Sel##_BASE_CACHE_IDX, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, (a_pCtxSelReg))
     175# define HMVMX_IMPORT_SREG(Sel, a_pCtxSelReg) \
     176    hmR0VmxImportGuestSegmentReg(pVCpu, VMX_VMCS16_GUEST_##Sel##_SEL, VMX_VMCS32_GUEST_##Sel##_LIMIT, \
     177                                 VMX_VMCS_GUEST_##Sel##_BASE_CACHE_IDX, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, (a_pCtxSelReg))
    222178#else
    223 # define HMVMX_SAVE_SREG(Sel, a_pCtxSelReg) \
    224     hmR0VmxSaveSegmentReg(pVCpu, VMX_VMCS16_GUEST_##Sel##_SEL, VMX_VMCS32_GUEST_##Sel##_LIMIT, \
    225                           VMX_VMCS_GUEST_##Sel##_BASE, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, (a_pCtxSelReg))
     179# define HMVMX_IMPORT_SREG(Sel, a_pCtxSelReg) \
     180    hmR0VmxImportGuestSegmentReg(pVCpu, VMX_VMCS16_GUEST_##Sel##_SEL, VMX_VMCS32_GUEST_##Sel##_LIMIT, \
     181                                 VMX_VMCS_GUEST_##Sel##_BASE, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, (a_pCtxSelReg))
    226182#endif
    227183
     
    318274    uint32_t        uIdtVectoringErrorCode;
    319275
    320     /** Mask of currently read VMCS fields; HMVMX_UPDATED_TRANSIENT_*. */
     276    /** Mask of currently read VMCS fields; HMVMX_READ_XXX. */
    321277    uint32_t        fVmcsFieldsRead;
    322278
     
    417373static void               hmR0VmxFlushVpid(PVM pVM, PVMCPU pVCpu, VMXFLUSHVPID enmFlush, RTGCPTR GCPtr);
    418374static void               hmR0VmxClearIntNmiWindowsVmcs(PVMCPU pVCpu);
    419 static VBOXSTRICTRC       hmR0VmxInjectEventVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint64_t u64IntInfo, uint32_t cbInstr,
    420                                                  uint32_t u32ErrCode, RTGCUINTREG GCPtrFaultAddress,
    421                                                  bool fStepping, uint32_t *puIntState);
     375static int                hmR0VmxImportGuestState(PVMCPU pVCpu, uint64_t fWhat);
     376static VBOXSTRICTRC       hmR0VmxInjectEventVmcs(PVMCPU pVCpu, uint64_t u64IntInfo, uint32_t cbInstr, uint32_t u32ErrCode,
     377                                                 RTGCUINTREG GCPtrFaultAddress, bool fStepping, uint32_t *pfIntrState);
    422378#if HC_ARCH_BITS == 32
    423379static int                hmR0VmxInitVmcsReadCache(PVM pVM, PVMCPU pVCpu);
     
    649605}
    650606
    651 
    652607#ifdef VBOX_STRICT
    653608/**
     
    666621    return VINF_SUCCESS;
    667622}
    668 #endif /* VBOX_STRICT */
    669 
    670 
    671 #ifdef VBOX_STRICT
     623
     624
    672625/**
    673626 * Reads the VM-entry exception error code field from the VMCS into
     
    697650DECLINLINE(int) hmR0VmxReadExitIntInfoVmcs(PVMXTRANSIENT pVmxTransient)
    698651{
    699     if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO))
     652    if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_INFO))
    700653    {
    701654        int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
    702         AssertRCReturn(rc, rc);
    703         pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO;
     655        AssertRCReturn(rc,rc);
     656        pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INTERRUPTION_INFO;
    704657    }
    705658    return VINF_SUCCESS;
     
    716669DECLINLINE(int) hmR0VmxReadExitIntErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
    717670{
    718     if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE))
     671    if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE))
    719672    {
    720673        int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
    721674        AssertRCReturn(rc, rc);
    722         pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE;
     675        pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE;
    723676    }
    724677    return VINF_SUCCESS;
     
    735688DECLINLINE(int) hmR0VmxReadExitInstrLenVmcs(PVMXTRANSIENT pVmxTransient)
    736689{
    737     if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_LEN))
     690    if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_LEN))
    738691    {
    739692        int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbInstr);
    740693        AssertRCReturn(rc, rc);
    741         pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_LEN;
     694        pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INSTR_LEN;
    742695    }
    743696    return VINF_SUCCESS;
     
    754707DECLINLINE(int) hmR0VmxReadExitInstrInfoVmcs(PVMXTRANSIENT pVmxTransient)
    755708{
    756     if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_INFO))
     709    if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_INFO))
    757710    {
    758711        int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
    759712        AssertRCReturn(rc, rc);
    760         pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_INFO;
     713        pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INSTR_INFO;
    761714    }
    762715    return VINF_SUCCESS;
     
    775728DECLINLINE(int) hmR0VmxReadExitQualificationVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    776729{
    777     if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_QUALIFICATION))
     730    if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_QUALIFICATION))
    778731    {
    779732        int rc = VMXReadVmcsGstN(VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQualification); NOREF(pVCpu);
    780733        AssertRCReturn(rc, rc);
    781         pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_QUALIFICATION;
     734        pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION;
    782735    }
    783736    return VINF_SUCCESS;
     
    796749DECLINLINE(int) hmR0VmxReadIdtVectoringInfoVmcs(PVMXTRANSIENT pVmxTransient)
    797750{
    798     if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_INFO))
    799     {
    800         int rc = VMXReadVmcs32(VMX_VMCS32_RO_IDT_INFO, &pVmxTransient->uIdtVectoringInfo);
     751    if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_INFO))
     752    {
     753        int rc = VMXReadVmcs32(VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
    801754        AssertRCReturn(rc, rc);
    802         pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_INFO;
     755        pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_IDT_VECTORING_INFO;
    803756    }
    804757    return VINF_SUCCESS;
     
    815768DECLINLINE(int) hmR0VmxReadIdtVectoringErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
    816769{
    817     if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_ERROR_CODE))
    818     {
    819         int rc = VMXReadVmcs32(VMX_VMCS32_RO_IDT_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
     770    if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_ERROR_CODE))
     771    {
     772        int rc = VMXReadVmcs32(VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
    820773        AssertRCReturn(rc, rc);
    821         pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_ERROR_CODE;
     774        pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_IDT_VECTORING_ERROR_CODE;
    822775    }
    823776    return VINF_SUCCESS;
     
    913866 *                          allocation.
    914867 */
    915 DECLINLINE(int) hmR0VmxPageAllocZ(PRTR0MEMOBJ pMemObj, PRTR0PTR ppVirt, PRTHCPHYS pHCPhys)
     868static int hmR0VmxPageAllocZ(PRTR0MEMOBJ pMemObj, PRTR0PTR ppVirt, PRTHCPHYS pHCPhys)
    916869{
    917870    AssertPtrReturn(pMemObj, VERR_INVALID_PARAMETER);
     
    938891 *                          allocation as 0.
    939892 */
    940 DECLINLINE(void) hmR0VmxPageFree(PRTR0MEMOBJ pMemObj, PRTR0PTR ppVirt, PRTHCPHYS pHCPhys)
     893static void hmR0VmxPageFree(PRTR0MEMOBJ pMemObj, PRTR0PTR ppVirt, PRTHCPHYS pHCPhys)
    941894{
    942895    AssertPtr(pMemObj);
     
    11631116
    11641117    /*
    1165      * Flush all EPT tagged-TLB entries (in case VirtualBox or any other hypervisor have been using EPTPs) so
    1166      * we don't retain any stale guest-physical mappings which won't get invalidated when flushing by VPID.
     1118     * Flush all EPT tagged-TLB entries (in case VirtualBox or any other hypervisor have been
     1119     * using EPTPs) so we don't retain any stale guest-physical mappings which won't get
     1120     * invalidated when flushing by VPID.
    11671121     */
    11681122    PVMXMSRS pMsrs = (PVMXMSRS)pvMsrs;
     
    13031257 * @param   cMsrs       The number of MSRs.
    13041258 */
    1305 DECLINLINE(int) hmR0VmxSetAutoLoadStoreMsrCount(PVMCPU pVCpu, uint32_t cMsrs)
     1259static int hmR0VmxSetAutoLoadStoreMsrCount(PVMCPU pVCpu, uint32_t cMsrs)
    13061260{
    13071261    /* Shouldn't ever happen but there -is- a number. We're well within the recommended 512. */
     
    13821336     * Update the host MSR only when requested by the caller AND when we're
    13831337     * adding it to the auto-load/store area. Otherwise, it would have been
    1384      * updated by hmR0VmxSaveHostMsrs(). We do this for performance reasons.
     1338     * updated by hmR0VmxExportHostMsrs(). We do this for performance reasons.
    13851339     */
    13861340    bool fUpdatedMsrValue = false;
     
    14511405            hmR0VmxSetMsrPermission(pVCpu, uMsr, VMXMSREXIT_INTERCEPT_READ, VMXMSREXIT_INTERCEPT_WRITE);
    14521406
    1453         Log4(("Removed MSR %#RX32 new cMsrs=%u\n", uMsr, pVCpu->hm.s.vmx.cMsrs));
     1407        Log4Func(("Removed MSR %#RX32 new cMsrs=%u\n", uMsr, pVCpu->hm.s.vmx.cMsrs));
    14541408        return VINF_SUCCESS;
    14551409    }
     
    15721526#endif
    15731527    return false;
    1574 }
    1575 
    1576 
    1577 /**
    1578  * Saves a set of guest MSRs back into the guest-CPU context.
    1579  *
    1580  * @param   pVCpu       The cross context virtual CPU structure.
    1581  * @param   pMixedCtx   Pointer to the guest-CPU context. The data may be
    1582  *                      out-of-sync. Make sure to update the required fields
    1583  *                      before using them.
    1584  *
    1585  * @remarks No-long-jump zone!!!
    1586  */
    1587 static void hmR0VmxLazySaveGuestMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    1588 {
    1589     Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    1590     Assert(!VMMRZCallRing3IsEnabled(pVCpu));
    1591 
    1592     if (pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
    1593     {
    1594         Assert(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_SAVED_HOST);
    1595 #if HC_ARCH_BITS == 64
    1596         if (pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests)
    1597         {
    1598             pMixedCtx->msrLSTAR        = ASMRdMsr(MSR_K8_LSTAR);
    1599             pMixedCtx->msrSTAR         = ASMRdMsr(MSR_K6_STAR);
    1600             pMixedCtx->msrSFMASK       = ASMRdMsr(MSR_K8_SF_MASK);
    1601             pMixedCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
    1602         }
    1603 #else
    1604         NOREF(pMixedCtx);
    1605 #endif
    1606     }
    16071528}
    16081529
     
    16781599 * @remarks No-long-jump zone!!!
    16791600 * @remarks The guest MSRs should have been saved back into the guest-CPU
    1680  *          context by hmR0VmxSaveGuestLazyMsrs()!!!
     1601 *          context by hmR0VmxImportGuestState()!!!
    16811602 */
    16821603static void hmR0VmxLazyRestoreHostMsrs(PVMCPU pVCpu)
     
    19261847    {
    19271848        /*
    1928          * We must invalidate the guest TLB entry in either case, we cannot ignore it even for the EPT case
    1929          * See @bugref{6043} and @bugref{6177}.
     1849         * We must invalidate the guest TLB entry in either case, we cannot ignore it even for
     1850         * the EPT case. See @bugref{6043} and @bugref{6177}.
    19301851         *
    1931          * Set the VMCPU_FF_TLB_FLUSH force flag and flush before VM-entry in hmR0VmxFlushTLB*() as this
    1932          * function maybe called in a loop with individual addresses.
     1852         * Set the VMCPU_FF_TLB_FLUSH force flag and flush before VM-entry in hmR0VmxFlushTLB*()
     1853         * as this function maybe called in a loop with individual addresses.
    19331854         */
    19341855        if (pVM->hm.s.vmx.fVpid)
     
    20241945
    20251946    /*
    2026      * Force a TLB flush for the first world-switch if the current CPU differs from the one we ran on last.
    2027      * If the TLB flush count changed, another VM (VCPU rather) has hit the ASID limit while flushing the TLB
    2028      * or the host CPU is online after a suspend/resume, so we cannot reuse the current ASID anymore.
     1947     * Force a TLB flush for the first world-switch if the current CPU differs from the one we
     1948     * ran on last. If the TLB flush count changed, another VM (VCPU rather) has hit the ASID
     1949     * limit while flushing the TLB or the host CPU is online after a suspend/resume, so we
     1950     * cannot reuse the current ASID anymore.
    20291951     */
    20301952    if (   pVCpu->hm.s.idLastCpu   != pCpu->idCpu
     
    20571979    {
    20581980        /*
    2059          * Changes to the EPT paging structure by VMM requires flushing by EPT as the CPU creates
    2060          * guest-physical (only EPT-tagged) mappings while traversing the EPT tables when EPT is in use.
    2061          * Flushing by VPID will only flush linear (only VPID-tagged) and combined (EPT+VPID tagged) mappings
    2062          * but not guest-physical mappings.
    2063          * See Intel spec. 28.3.2 "Creating and Using Cached Translation Information". See @bugref{6568}.
     1981         * Changes to the EPT paging structure by VMM requires flushing-by-EPT as the CPU
     1982         * creates guest-physical (ie. only EPT-tagged) mappings while traversing the EPT
     1983         * tables when EPT is in use. Flushing-by-VPID will only flush linear (only
     1984         * VPID-tagged) and combined (EPT+VPID tagged) mappings but not guest-physical
     1985         * mappings, see @bugref{6568}.
     1986         *
     1987         * See Intel spec. 28.3.2 "Creating and Using Cached Translation Information".
    20641988         */
    20651989        hmR0VmxFlushEpt(pVCpu, pVM->hm.s.vmx.enmFlushEpt);
     
    22462170            break;
    22472171    }
    2248 
    22492172    /* Don't assert that VMCPU_FF_TLB_FLUSH should no longer be pending. It can be set by other EMTs. */
    22502173}
     
    23312254        {
    23322255            /*  Shouldn't happen. VPID is supported but INVVPID is not supported by the CPU. Ignore VPID capability. */
    2333             Log4(("hmR0VmxSetupTaggedTlb: VPID supported without INVEPT support. Ignoring VPID.\n"));
     2256            Log4Func(("VPID supported without INVEPT support. Ignoring VPID.\n"));
    23342257            pVM->hm.s.vmx.enmFlushVpid = VMXFLUSHVPID_NOT_SUPPORTED;
    23352258            pVM->hm.s.vmx.fVpid = false;
     
    23642287    AssertPtr(pVCpu);
    23652288
    2366     uint32_t val = pVM->hm.s.vmx.Msrs.VmxPinCtls.n.disallowed0;         /* Bits set here must always be set. */
    2367     uint32_t zap = pVM->hm.s.vmx.Msrs.VmxPinCtls.n.allowed1;            /* Bits cleared here must always be cleared. */
    2368 
    2369     val |=   VMX_VMCS_CTRL_PIN_EXEC_EXT_INT_EXIT           /* External interrupts cause a VM-exit. */
    2370            | VMX_VMCS_CTRL_PIN_EXEC_NMI_EXIT;              /* Non-maskable interrupts (NMIs) cause a VM-exit. */
     2289    uint32_t fVal = pVM->hm.s.vmx.Msrs.VmxPinCtls.n.disallowed0;         /* Bits set here must always be set. */
     2290    uint32_t fZap = pVM->hm.s.vmx.Msrs.VmxPinCtls.n.allowed1;            /* Bits cleared here must always be cleared. */
     2291
     2292    fVal |= VMX_VMCS_CTRL_PIN_EXEC_EXT_INT_EXIT              /* External interrupts cause a VM-exit. */
     2293         |  VMX_VMCS_CTRL_PIN_EXEC_NMI_EXIT;                 /* Non-maskable interrupts (NMIs) cause a VM-exit. */
    23712294
    23722295    if (pVM->hm.s.vmx.Msrs.VmxPinCtls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI)
    2373         val |= VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI;         /* Use virtual NMIs and virtual-NMI blocking features. */
     2296        fVal |= VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI;         /* Use virtual NMIs and virtual-NMI blocking features. */
    23742297
    23752298    /* Enable the VMX preemption timer. */
     
    23772300    {
    23782301        Assert(pVM->hm.s.vmx.Msrs.VmxPinCtls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER);
    2379         val |= VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER;
     2302        fVal |= VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER;
    23802303    }
    23812304
     
    23862309        Assert(pVM->hm.s.vmx.Msrs.VmxPinCtls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_POSTED_INTR);
    23872310        Assert(pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1 & VMX_VMCS_CTRL_EXIT_ACK_EXT_INT);
    2388         val |= VMX_VMCS_CTRL_PIN_EXEC_POSTED_INTR;
     2311        fVal |= VMX_VMCS_CTRL_PIN_EXEC_POSTED_INTR;
    23892312    }
    23902313#endif
    23912314
    2392     if ((val & zap) != val)
    2393     {
    2394         LogRel(("hmR0VmxSetupPinCtls: Invalid pin-based VM-execution controls combo! cpu=%#RX64 val=%#RX64 zap=%#RX64\n",
    2395                 pVM->hm.s.vmx.Msrs.VmxPinCtls.n.disallowed0, val, zap));
     2315    if ((fVal & fZap) != fVal)
     2316    {
     2317        LogRel(("hmR0VmxSetupPinCtls: Invalid pin-based VM-execution controls combo! Cpu=%#RX64 fVal=%#RX64 fZap=%#RX64\n",
     2318                pVM->hm.s.vmx.Msrs.VmxPinCtls.n.disallowed0, fVal, fZap));
    23962319        pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PIN_EXEC;
    23972320        return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
    23982321    }
    23992322
    2400     int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, val);
     2323    int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, fVal);
    24012324    AssertRCReturn(rc, rc);
    24022325
    2403     pVCpu->hm.s.vmx.u32PinCtls = val;
     2326    pVCpu->hm.s.vmx.u32PinCtls = fVal;
    24042327    return rc;
    24052328}
     
    24192342
    24202343    int rc = VERR_INTERNAL_ERROR_5;
    2421     uint32_t val = pVM->hm.s.vmx.Msrs.VmxProcCtls.n.disallowed0;        /* Bits set here must be set in the VMCS. */
    2422     uint32_t zap = pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1;           /* Bits cleared here must be cleared in the VMCS. */
    2423 
    2424     val |=   VMX_VMCS_CTRL_PROC_EXEC_HLT_EXIT                  /* HLT causes a VM-exit. */
    2425            | VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING        /* Use TSC-offsetting. */
    2426            | VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT               /* MOV DRx causes a VM-exit. */
    2427            | VMX_VMCS_CTRL_PROC_EXEC_UNCOND_IO_EXIT            /* All IO instructions cause a VM-exit. */
    2428            | VMX_VMCS_CTRL_PROC_EXEC_RDPMC_EXIT                /* RDPMC causes a VM-exit. */
    2429            | VMX_VMCS_CTRL_PROC_EXEC_MONITOR_EXIT              /* MONITOR causes a VM-exit. */
    2430            | VMX_VMCS_CTRL_PROC_EXEC_MWAIT_EXIT;               /* MWAIT causes a VM-exit. */
     2344    uint32_t fVal = pVM->hm.s.vmx.Msrs.VmxProcCtls.n.disallowed0; /* Bits set here must be set in the VMCS. */
     2345    uint32_t fZap = pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1;    /* Bits cleared here must be cleared in the VMCS. */
     2346
     2347    fVal |= VMX_VMCS_CTRL_PROC_EXEC_HLT_EXIT                      /* HLT causes a VM-exit. */
     2348         |  VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING            /* Use TSC-offsetting. */
     2349         |  VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT                   /* MOV DRx causes a VM-exit. */
     2350         |  VMX_VMCS_CTRL_PROC_EXEC_UNCOND_IO_EXIT                /* All IO instructions cause a VM-exit. */
     2351         |  VMX_VMCS_CTRL_PROC_EXEC_RDPMC_EXIT                    /* RDPMC causes a VM-exit. */
     2352         |  VMX_VMCS_CTRL_PROC_EXEC_MONITOR_EXIT                  /* MONITOR causes a VM-exit. */
     2353         |  VMX_VMCS_CTRL_PROC_EXEC_MWAIT_EXIT;                   /* MWAIT causes a VM-exit. */
    24312354
    24322355    /* We toggle VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT later, check if it's not -always- needed to be set or clear. */
     
    24422365    if (!pVM->hm.s.fNestedPaging)
    24432366    {
    2444         Assert(!pVM->hm.s.vmx.fUnrestrictedGuest);                      /* Paranoia. */
    2445         val |=  VMX_VMCS_CTRL_PROC_EXEC_INVLPG_EXIT
    2446                | VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT
    2447                | VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT;
     2367        Assert(!pVM->hm.s.vmx.fUnrestrictedGuest);                /* Paranoia. */
     2368        fVal |= VMX_VMCS_CTRL_PROC_EXEC_INVLPG_EXIT
     2369             | VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT
     2370             | VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT;
    24482371    }
    24492372
     
    24532376    {
    24542377        Assert(pVCpu->hm.s.vmx.HCPhysVirtApic);
    2455         Assert(!(pVCpu->hm.s.vmx.HCPhysVirtApic & 0xfff));              /* Bits 11:0 MBZ. */
     2378        Assert(!(pVCpu->hm.s.vmx.HCPhysVirtApic & 0xfff));        /* Bits 11:0 MBZ. */
    24562379        rc  = VMXWriteVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, 0);
    24572380        rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_VAPIC_PAGEADDR_FULL, pVCpu->hm.s.vmx.HCPhysVirtApic);
    24582381        AssertRCReturn(rc, rc);
    24592382
    2460         val |= VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW;         /* CR8 reads from the Virtual-APIC page. */
    2461                                                                /* CR8 writes cause a VM-exit based on TPR threshold. */
    2462         Assert(!(val & VMX_VMCS_CTRL_PROC_EXEC_CR8_STORE_EXIT));
    2463         Assert(!(val & VMX_VMCS_CTRL_PROC_EXEC_CR8_LOAD_EXIT));
     2383        fVal |= VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW;           /* CR8 reads from the Virtual-APIC page. */
     2384                                                                  /* CR8 writes cause a VM-exit based on TPR threshold. */
     2385        Assert(!(fVal & VMX_VMCS_CTRL_PROC_EXEC_CR8_STORE_EXIT));
     2386        Assert(!(fVal & VMX_VMCS_CTRL_PROC_EXEC_CR8_LOAD_EXIT));
    24642387    }
    24652388    else
     
    24712394        if (pVM->hm.s.fAllow64BitGuests)
    24722395        {
    2473             val |=   VMX_VMCS_CTRL_PROC_EXEC_CR8_STORE_EXIT    /* CR8 reads cause a VM-exit. */
    2474                    | VMX_VMCS_CTRL_PROC_EXEC_CR8_LOAD_EXIT;    /* CR8 writes cause a VM-exit. */
     2396            fVal |= VMX_VMCS_CTRL_PROC_EXEC_CR8_STORE_EXIT        /* CR8 reads cause a VM-exit. */
     2397                 |  VMX_VMCS_CTRL_PROC_EXEC_CR8_LOAD_EXIT;        /* CR8 writes cause a VM-exit. */
    24752398        }
    24762399    }
     
    24792402    if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
    24802403    {
    2481         val |= VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS;
     2404        fVal |= VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS;
    24822405
    24832406        Assert(pVCpu->hm.s.vmx.HCPhysMsrBitmap);
    2484         Assert(!(pVCpu->hm.s.vmx.HCPhysMsrBitmap & 0xfff));             /* Bits 11:0 MBZ. */
     2407        Assert(!(pVCpu->hm.s.vmx.HCPhysMsrBitmap & 0xfff));       /* Bits 11:0 MBZ. */
    24852408        rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_MSR_BITMAP_FULL, pVCpu->hm.s.vmx.HCPhysMsrBitmap);
    24862409        AssertRCReturn(rc, rc);
     
    25202443    /* Use the secondary processor-based VM-execution controls if supported by the CPU. */
    25212444    if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
    2522         val |= VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL;
    2523 
    2524     if ((val & zap) != val)
    2525     {
    2526         LogRel(("hmR0VmxSetupProcCtls: Invalid processor-based VM-execution controls combo! cpu=%#RX64 val=%#RX64 zap=%#RX64\n",
    2527                 pVM->hm.s.vmx.Msrs.VmxProcCtls.n.disallowed0, val, zap));
     2445        fVal |= VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL;
     2446
     2447    if ((fVal & fZap) != fVal)
     2448    {
     2449        LogRel(("hmR0VmxSetupProcCtls: Invalid processor-based VM-execution controls combo! cpu=%#RX64 fVal=%#RX64 fZap=%#RX64\n",
     2450                pVM->hm.s.vmx.Msrs.VmxProcCtls.n.disallowed0, fVal, fZap));
    25282451        pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_EXEC;
    25292452        return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
    25302453    }
    25312454
    2532     rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, val);
     2455    rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, fVal);
    25332456    AssertRCReturn(rc, rc);
    25342457
    2535     pVCpu->hm.s.vmx.u32ProcCtls = val;
     2458    pVCpu->hm.s.vmx.u32ProcCtls = fVal;
    25362459
    25372460    /*
     
    25402463    if (RT_LIKELY(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL))
    25412464    {
    2542         val = pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.disallowed0;            /* Bits set here must be set in the VMCS. */
    2543         zap = pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1;               /* Bits cleared here must be cleared in the VMCS. */
     2465        fVal = pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.disallowed0;     /* Bits set here must be set in the VMCS. */
     2466        fZap = pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1;        /* Bits cleared here must be cleared in the VMCS. */
    25442467
    25452468        if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT)
    2546             val |= VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT;                /* WBINVD causes a VM-exit. */
     2469            fVal |= VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT;         /* WBINVD causes a VM-exit. */
    25472470
    25482471        if (pVM->hm.s.fNestedPaging)
    2549             val |= VMX_VMCS_CTRL_PROC_EXEC2_EPT;                        /* Enable EPT. */
     2472            fVal |= VMX_VMCS_CTRL_PROC_EXEC2_EPT;                 /* Enable EPT. */
    25502473
    25512474        /*
     
    25562479            && pVM->cpum.ro.GuestFeatures.fInvpcid)
    25572480        {
    2558             val |= VMX_VMCS_CTRL_PROC_EXEC2_INVPCID;
     2481            fVal |= VMX_VMCS_CTRL_PROC_EXEC2_INVPCID;
    25592482        }
    25602483
    25612484        if (pVM->hm.s.vmx.fVpid)
    2562             val |= VMX_VMCS_CTRL_PROC_EXEC2_VPID;                       /* Enable VPID. */
     2485            fVal |= VMX_VMCS_CTRL_PROC_EXEC2_VPID;                /* Enable VPID. */
    25632486
    25642487        if (pVM->hm.s.vmx.fUnrestrictedGuest)
    2565             val |= VMX_VMCS_CTRL_PROC_EXEC2_UNRESTRICTED_GUEST;         /* Enable Unrestricted Execution. */
     2488            fVal |= VMX_VMCS_CTRL_PROC_EXEC2_UNRESTRICTED_GUEST;  /* Enable Unrestricted Execution. */
    25662489
    25672490#if 0
     
    25692492        {
    25702493            Assert(pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_APIC_REG_VIRT);
    2571             val |= VMX_VMCS_CTRL_PROC_EXEC2_APIC_REG_VIRT;              /* Enable APIC-register virtualization. */
     2494            fVal |= VMX_VMCS_CTRL_PROC_EXEC2_APIC_REG_VIRT;       /* Enable APIC-register virtualization. */
    25722495
    25732496            Assert(pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_INTR_DELIVERY);
    2574             val |= VMX_VMCS_CTRL_PROC_EXEC2_VIRT_INTR_DELIVERY;         /* Enable virtual-interrupt delivery. */
     2497            fVal |= VMX_VMCS_CTRL_PROC_EXEC2_VIRT_INTR_DELIVERY;  /* Enable virtual-interrupt delivery. */
    25752498        }
    25762499#endif
     
    25822505        {
    25832506            Assert(pVM->hm.s.vmx.HCPhysApicAccess);
    2584             Assert(!(pVM->hm.s.vmx.HCPhysApicAccess & 0xfff));          /* Bits 11:0 MBZ. */
    2585             val |= VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC;                  /* Virtualize APIC accesses. */
     2507            Assert(!(pVM->hm.s.vmx.HCPhysApicAccess & 0xfff));    /* Bits 11:0 MBZ. */
     2508            fVal |= VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC;           /* Virtualize APIC accesses. */
    25862509            rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL, pVM->hm.s.vmx.HCPhysApicAccess);
    25872510            AssertRCReturn(rc, rc);
     
    25892512
    25902513        if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
    2591             val |= VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP;                     /* Enable RDTSCP support. */
     2514            fVal |= VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP;              /* Enable RDTSCP support. */
    25922515
    25932516        if (   pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT
     
    25952518            && pVM->hm.s.vmx.cPleWindowTicks)
    25962519        {
    2597             val |= VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT;            /* Enable pause-loop exiting. */
     2520            fVal |= VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT;     /* Enable pause-loop exiting. */
    25982521
    25992522            rc  = VMXWriteVmcs32(VMX_VMCS32_CTRL_PLE_GAP, pVM->hm.s.vmx.cPleGapTicks);
     
    26022525        }
    26032526
    2604         if ((val & zap) != val)
     2527        if ((fVal & fZap) != fVal)
    26052528        {
    26062529            LogRel(("hmR0VmxSetupProcCtls: Invalid secondary processor-based VM-execution controls combo! "
    2607                     "cpu=%#RX64 val=%#RX64 zap=%#RX64\n", pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.disallowed0, val, zap));
     2530                    "cpu=%#RX64 fVal=%#RX64 fZap=%#RX64\n", pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.disallowed0, fVal, fZap));
    26082531            pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_EXEC2;
    26092532            return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
    26102533        }
    26112534
    2612         rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, val);
     2535        rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, fVal);
    26132536        AssertRCReturn(rc, rc);
    26142537
    2615         pVCpu->hm.s.vmx.u32ProcCtls2 = val;
     2538        pVCpu->hm.s.vmx.u32ProcCtls2 = fVal;
    26162539    }
    26172540    else if (RT_UNLIKELY(pVM->hm.s.vmx.fUnrestrictedGuest))
     
    26452568    /* All fields are zero-initialized during allocation; but don't remove the commented block below. */
    26462569#if 0
    2647     /* All CR3 accesses cause VM-exits. Later we optimize CR3 accesses (see hmR0VmxLoadGuestCR3AndCR4())*/
     2570    /* All CR3 accesses cause VM-exits. Later we optimize CR3 accesses (see hmR0VmxExportGuestCR3AndCR4())*/
    26482571    rc  = VMXWriteVmcs32(VMX_VMCS32_CTRL_CR3_TARGET_COUNT, 0);
    26492572    rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, 0);
     
    26572580    rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH, 0);
    26582581
    2659     /** @todo Explore possibility of using IO-bitmaps. */
    26602582    /* All IO & IOIO instructions cause VM-exits. */
    26612583    rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_IO_BITMAP_A_FULL, 0);
     
    26882610#if 0
    26892611    /* Setup debug controls */
    2690     rc  = VMXWriteVmcs64(VMX_VMCS64_GUEST_DEBUGCTL_FULL, 0);       /** @todo We don't support IA32_DEBUGCTL MSR. Should we? */
     2612    rc  = VMXWriteVmcs64(VMX_VMCS64_GUEST_DEBUGCTL_FULL, 0);
    26912613    rc |= VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, 0);
    26922614    AssertRCReturn(rc, rc);
     
    27332655    AssertRCReturn(rc, rc);
    27342656    return rc;
    2735 }
    2736 
    2737 
    2738 /**
    2739  * Sets up the initial guest-state mask. The guest-state mask is consulted
    2740  * before reading guest-state fields from the VMCS as VMREADs can be expensive
    2741  * for the nested virtualization case (as it would cause a VM-exit).
    2742  *
    2743  * @param   pVCpu       The cross context virtual CPU structure.
    2744  */
    2745 static int hmR0VmxInitUpdatedGuestStateMask(PVMCPU pVCpu)
    2746 {
    2747     /* Initially the guest-state is up-to-date as there is nothing in the VMCS. */
    2748     HMVMXCPU_GST_RESET_TO(pVCpu, HMVMX_UPDATED_GUEST_ALL);
    2749     return VINF_SUCCESS;
    27502657}
    27512658
     
    28062713
    28072714    /*
    2808      * Without UnrestrictedGuest, pRealModeTSS and pNonPagingModeEPTPageTable *must* always be allocated.
    2809      * We no longer support the highly unlikely case of UnrestrictedGuest without pRealModeTSS. See hmR3InitFinalizeR0Intel().
     2715     * Without UnrestrictedGuest, pRealModeTSS and pNonPagingModeEPTPageTable *must* always be
     2716     * allocated. We no longer support the highly unlikely case of UnrestrictedGuest without
     2717     * pRealModeTSS, see hmR3InitFinalizeR0Intel().
    28102718     */
    28112719    if (   !pVM->hm.s.vmx.fUnrestrictedGuest
     
    28522760
    28532761        /* Log the VCPU pointers, useful for debugging SMP VMs. */
    2854         Log4(("VMXR0SetupVM: pVCpu=%p idCpu=%RU32\n", pVCpu, pVCpu->idCpu));
     2762        Log4Func(("pVCpu=%p idCpu=%RU32\n", pVCpu, pVCpu->idCpu));
    28552763
    28562764        /* Set revision dword at the beginning of the VMCS structure. */
     
    28812789        rc = hmR0VmxInitXcptBitmap(pVM, pVCpu);
    28822790        AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitXcptBitmap failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
    2883                                     hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
    2884 
    2885         rc = hmR0VmxInitUpdatedGuestStateMask(pVCpu);
    2886         AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitUpdatedGuestStateMask failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
    28872791                                    hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
    28882792
     
    29122816 *
    29132817 * @returns VBox status code.
    2914  * @param   pVM         The cross context VM structure.
    2915  * @param   pVCpu       The cross context virtual CPU structure.
    2916  */
    2917 DECLINLINE(int) hmR0VmxSaveHostControlRegs(PVM pVM, PVMCPU pVCpu)
    2918 {
    2919     NOREF(pVM); NOREF(pVCpu);
    2920 
     2818 */
     2819static int hmR0VmxExportHostControlRegs(void)
     2820{
    29212821    RTCCUINTREG uReg = ASMGetCR0();
    29222822    int rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR0, uReg);
     
    29342834
    29352835
     2836/**
     2837 * Saves the host segment registers and GDTR, IDTR, (TR, GS and FS bases) into
     2838 * the host-state area in the VMCS.
     2839 *
     2840 * @returns VBox status code.
     2841 * @param   pVCpu       The cross context virtual CPU structure.
     2842 */
     2843static int hmR0VmxExportHostSegmentRegs(PVMCPU pVCpu)
     2844{
    29362845#if HC_ARCH_BITS == 64
    29372846/**
    29382847 * Macro for adjusting host segment selectors to satisfy VT-x's VM-entry
    2939  * requirements. See hmR0VmxSaveHostSegmentRegs().
     2848 * requirements. See hmR0VmxExportHostSegmentRegs().
    29402849 */
    29412850# define VMXLOCAL_ADJUST_HOST_SEG(seg, selValue)  \
     
    29552864        (selValue) = 0; \
    29562865    }
    2957 #endif
    2958 
    2959 
    2960 /**
    2961  * Saves the host segment registers and GDTR, IDTR, (TR, GS and FS bases) into
    2962  * the host-state area in the VMCS.
    2963  *
    2964  * @returns VBox status code.
    2965  * @param   pVM         The cross context VM structure.
    2966  * @param   pVCpu       The cross context virtual CPU structure.
    2967  */
    2968 DECLINLINE(int) hmR0VmxSaveHostSegmentRegs(PVM pVM, PVMCPU pVCpu)
    2969 {
    2970     int rc = VERR_INTERNAL_ERROR_5;
    2971 
    2972 #if HC_ARCH_BITS == 64
     2866
    29732867    /*
    29742868     * If we've executed guest code using VT-x, the host-state bits will be messed up. We
    2975      * should -not- save the messed up state without restoring the original host-state. See @bugref{7240}.
     2869     * should -not- save the messed up state without restoring the original host-state,
     2870     * see @bugref{7240}.
    29762871     *
    2977      * This apparently can happen (most likely the FPU changes), deal with it rather than asserting.
    2978      * Was observed booting Solaris10u10 32-bit guest.
     2872     * This apparently can happen (most likely the FPU changes), deal with it rather than
     2873     * asserting. Was observed booting Solaris 10u10 32-bit guest.
    29792874     */
    29802875    if (   (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_REQUIRED)
     
    30182913#if HC_ARCH_BITS == 64
    30192914    /*
    3020      * Determine if the host segment registers are suitable for VT-x. Otherwise use zero to gain VM-entry and restore them
    3021      * before we get preempted. See Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers".
     2915     * Determine if the host segment registers are suitable for VT-x. Otherwise use zero to
     2916     * gain VM-entry and restore them before we get preempted.
     2917     *
     2918     * See Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers".
    30222919     */
    30232920    VMXLOCAL_ADJUST_HOST_SEG(DS, uSelDS);
     
    30462943
    30472944    /* Write these host selector fields into the host-state area in the VMCS. */
    3048     rc  = VMXWriteVmcs32(VMX_VMCS16_HOST_CS_SEL, uSelCS);
    3049     rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_SS_SEL, uSelSS);
     2945    int rc  = VMXWriteVmcs32(VMX_VMCS16_HOST_CS_SEL, uSelCS);
     2946    rc     |= VMXWriteVmcs32(VMX_VMCS16_HOST_SS_SEL, uSelSS);
    30502947#if HC_ARCH_BITS == 64
    3051     rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_DS_SEL, uSelDS);
    3052     rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_ES_SEL, uSelES);
    3053     rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_FS_SEL, uSelFS);
    3054     rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_GS_SEL, uSelGS);
     2948    rc     |= VMXWriteVmcs32(VMX_VMCS16_HOST_DS_SEL, uSelDS);
     2949    rc     |= VMXWriteVmcs32(VMX_VMCS16_HOST_ES_SEL, uSelES);
     2950    rc     |= VMXWriteVmcs32(VMX_VMCS16_HOST_FS_SEL, uSelFS);
     2951    rc     |= VMXWriteVmcs32(VMX_VMCS16_HOST_GS_SEL, uSelGS);
    30552952#else
    30562953    NOREF(uSelDS);
     
    30592956    NOREF(uSelGS);
    30602957#endif
    3061     rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_TR_SEL, uSelTR);
     2958    rc     |= VMXWriteVmcs32(VMX_VMCS16_HOST_TR_SEL, uSelTR);
    30622959    AssertRCReturn(rc, rc);
    30632960
     
    30772974#if HC_ARCH_BITS == 64
    30782975    /*
    3079      * Determine if we need to manually need to restore the GDTR and IDTR limits as VT-x zaps them to the
    3080      * maximum limit (0xffff) on every VM-exit.
     2976     * Determine if we need to manually need to restore the GDTR and IDTR limits as VT-x zaps
     2977     * them to the maximum limit (0xffff) on every VM-exit.
    30812978     */
    30822979    if (Gdtr.cbGdt != 0xffff)
     
    30852982    /*
    30862983     * IDT limit is effectively capped at 0xfff. (See Intel spec. 6.14.1 "64-Bit Mode IDT"
    3087      * and Intel spec. 6.2 "Exception and Interrupt Vectors".)  Therefore if the host has the limit as 0xfff, VT-x
    3088      * bloating the limit to 0xffff shouldn't cause any different CPU behavior.  However, several hosts either insists
    3089      * on 0xfff being the limit (Windows Patch Guard) or uses the limit for other purposes (darwin puts the CPU ID in there
    3090      * but botches sidt alignment in at least one consumer).  So, we're only allowing IDTR.LIMIT to be left at 0xffff on
    3091      * hosts where we are pretty sure it won't cause trouble.
     2984     * and Intel spec. 6.2 "Exception and Interrupt Vectors".)  Therefore if the host has the
     2985     * limit as 0xfff, VT-x bloating the limit to 0xffff shouldn't cause any different CPU
     2986     * behavior.  However, several hosts either insists on 0xfff being the limit (Windows
     2987     * Patch Guard) or uses the limit for other purposes (darwin puts the CPU ID in there
     2988     * but botches sidt alignment in at least one consumer).  So, we're only allowing the
     2989     * IDTR.LIMIT to be left at 0xffff on hosts where we are sure it won't cause trouble.
    30922990     */
    30932991# if defined(RT_OS_LINUX) || defined(RT_OS_SOLARIS)
     
    31043002
    31053003    /*
    3106      * Host TR base. Verify that TR selector doesn't point past the GDT. Masking off the TI and RPL bits
    3107      * is effectively what the CPU does for "scaling by 8". TI is always 0 and RPL should be too in most cases.
     3004     * Host TR base. Verify that TR selector doesn't point past the GDT. Masking off the TI
     3005     * and RPL bits is effectively what the CPU does for "scaling by 8". TI is always 0 and
     3006     * RPL should be too in most cases.
    31083007     */
    31093008    AssertMsgReturn((uSelTR | X86_SEL_RPL_LDT) <= Gdtr.cbGdt,
    3110                     ("hmR0VmxSaveHostSegmentRegs: TR selector exceeds limit. TR=%RTsel cbGdt=%#x\n", uSelTR, Gdtr.cbGdt),
    3111                     VERR_VMX_INVALID_HOST_STATE);
     3009                    ("TR selector exceeds limit. TR=%RTsel cbGdt=%#x\n", uSelTR, Gdtr.cbGdt), VERR_VMX_INVALID_HOST_STATE);
    31123010
    31133011    PCX86DESCHC pDesc = (PCX86DESCHC)(Gdtr.pGdt + (uSelTR & X86_SEL_MASK));
     
    31163014
    31173015    /*
    3118      * VT-x unconditionally restores the TR limit to 0x67 and type to 11 (32-bit busy TSS) on all VM-exits.
    3119      * The type is the same for 64-bit busy TSS[1]. The limit needs manual restoration if the host has something else.
    3120      * Task switching is not supported in 64-bit mode[2], but the limit still matters as IOPM is supported in 64-bit mode.
    3121      * Restoring the limit lazily while returning to ring-3 is safe because IOPM is not applicable in ring-0.
     3016     * VT-x unconditionally restores the TR limit to 0x67 and type to 11 (32-bit busy TSS) on
     3017     * all VM-exits. The type is the same for 64-bit busy TSS[1]. The limit needs manual
     3018     * restoration if the host has something else. Task switching is not supported in 64-bit
     3019     * mode[2], but the limit still matters as IOPM is supported in 64-bit mode. Restoring the
     3020     * limit lazily while returning to ring-3 is safe because IOPM is not applicable in ring-0.
    31223021     *
    31233022     * [1] See Intel spec. 3.5 "System Descriptor Types".
    31243023     * [2] See Intel spec. 7.2.3 "TSS Descriptor in 64-bit mode".
    31253024     */
     3025    PVM pVM = pVCpu->CTX_SUFF(pVM);
    31263026    Assert(pDesc->System.u4Type == 11);
    31273027    if (   pDesc->System.u16LimitLow != 0x67
     
    31523052    }
    31533053#else
    3154     NOREF(pVM);
    31553054    uintptr_t uTRBase = X86DESC_BASE(pDesc);
    31563055#endif
     
    31743073        pVCpu->hm.s.vmx.RestoreHost.uHostGSBase = u64GSBase;
    31753074#endif
    3176     return rc;
    3177 }
    3178 
    3179 
    3180 /**
    3181  * Saves certain host MSRs in the VM-exit MSR-load area and some in the
    3182  * host-state area of the VMCS. Theses MSRs will be automatically restored on
    3183  * the host after every successful VM-exit.
     3075    return VINF_SUCCESS;
     3076}
     3077
     3078
     3079/**
     3080 * Exports certain host MSRs in the VM-exit MSR-load area and some in the
     3081 * host-state area of the VMCS.
     3082 *
     3083 * Theses MSRs will be automatically restored on the host after every successful
     3084 * VM-exit.
    31843085 *
    31853086 * @returns VBox status code.
    3186  * @param   pVM         The cross context VM structure.
    31873087 * @param   pVCpu       The cross context virtual CPU structure.
    31883088 *
    31893089 * @remarks No-long-jump zone!!!
    31903090 */
    3191 DECLINLINE(int) hmR0VmxSaveHostMsrs(PVM pVM, PVMCPU pVCpu)
    3192 {
    3193     NOREF(pVM);
    3194 
     3091static int hmR0VmxExportHostMsrs(PVMCPU pVCpu)
     3092{
    31953093    AssertPtr(pVCpu);
    31963094    AssertPtr(pVCpu->hm.s.vmx.pvHostMsr);
     
    32053103     * Host Sysenter MSRs.
    32063104     */
    3207     int rc  = VMXWriteVmcs32(VMX_VMCS32_HOST_SYSENTER_CS,  ASMRdMsr_Low(MSR_IA32_SYSENTER_CS));
     3105    int rc = VMXWriteVmcs32(VMX_VMCS32_HOST_SYSENTER_CS, ASMRdMsr_Low(MSR_IA32_SYSENTER_CS));
    32083106#if HC_ARCH_BITS == 32
    3209     rc |= VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_ESP,        ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP));
    3210     rc |= VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_EIP,        ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP));
     3107    rc |= VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP));
     3108    rc |= VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP));
    32113109#else
    3212     rc |= VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_ESP,        ASMRdMsr(MSR_IA32_SYSENTER_ESP));
    3213     rc |= VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_EIP,        ASMRdMsr(MSR_IA32_SYSENTER_EIP));
     3110    rc |= VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr(MSR_IA32_SYSENTER_ESP));
     3111    rc |= VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr(MSR_IA32_SYSENTER_EIP));
    32143112#endif
    32153113    AssertRCReturn(rc, rc);
     
    32173115    /*
    32183116     * Host EFER MSR.
    3219      * If the CPU supports the newer VMCS controls for managing EFER, use it.
    3220      * Otherwise it's done as part of auto-load/store MSR area in the VMCS, see hmR0VmxLoadGuestMsrs().
     3117     *
     3118     * If the CPU supports the newer VMCS controls for managing EFER, use it. Otherwise it's
     3119     * done as part of auto-load/store MSR area in the VMCS, see hmR0VmxExportGuestMsrs().
    32213120     */
     3121    PVM pVM = pVCpu->CTX_SUFF(pVM);
    32223122    if (pVM->hm.s.vmx.fSupportsVmcsEfer)
    32233123    {
     
    32263126    }
    32273127
    3228     /** @todo IA32_PERF_GLOBALCTRL, IA32_PAT also see
    3229      *        hmR0VmxLoadGuestExitCtls() !! */
    3230 
    3231     return rc;
     3128    /** @todo IA32_PERF_GLOBALCTRL, IA32_PAT also see hmR0VmxExportGuestExitCtls(). */
     3129
     3130    return VINF_SUCCESS;
    32323131}
    32333132
     
    32373136 *
    32383137 * We check all relevant bits. For now, that's everything besides LMA/LME, as
    3239  * these two bits are handled by VM-entry, see hmR0VmxLoadGuestExitCtls() and
    3240  * hmR0VMxLoadGuestEntryCtls().
     3138 * these two bits are handled by VM-entry, see hmR0VmxExportGuestExitCtls() and
     3139 * hmR0VMxExportGuestEntryCtls().
    32413140 *
    32423141 * @returns true if we need to load guest EFER, false otherwise.
     
    32493148 * @remarks No-long-jump zone!!!
    32503149 */
    3251 static bool hmR0VmxShouldSwapEferMsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
     3150static bool hmR0VmxShouldSwapEferMsr(PVMCPU pVCpu, PCCPUMCTX pMixedCtx)
    32523151{
    32533152#ifdef HMVMX_ALWAYS_SWAP_EFER
     
    32573156#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
    32583157    /* For 32-bit hosts running 64-bit guests, we always swap EFER in the world-switcher. Nothing to do here. */
    3259     if (CPUMIsGuestInLongMode(pVCpu))
     3158    if (CPUMIsGuestInLongModeEx(pMixedCtx))
    32603159        return false;
    32613160#endif
     
    32663165
    32673166    /*
    3268      * For 64-bit guests, if EFER.SCE bit differs, we need to swap to ensure that the
    3269      * guest's SYSCALL behaviour isn't screwed. See @bugref{7386}.
     3167     * For 64-bit guests, if EFER.SCE bit differs, we need to swap EFER to ensure that the
     3168     * guest's SYSCALL behaviour isn't broken, see @bugref{7386}.
    32703169     */
    3271     if (   CPUMIsGuestInLongMode(pVCpu)
     3170    if (   CPUMIsGuestInLongModeEx(pMixedCtx)
    32723171        && (u64GuestEfer & MSR_K6_EFER_SCE) != (u64HostEfer & MSR_K6_EFER_SCE))
    32733172    {
     
    32893188    }
    32903189
    3291     /** @todo Check the latest Intel spec. for any other bits,
    3292      *        like SMEP/SMAP? */
    32933190    return false;
    32943191}
     
    32963193
    32973194/**
    3298  * Sets up VM-entry controls in the VMCS. These controls can affect things done
    3299  * on VM-exit; e.g. "load debug controls", see Intel spec. 24.8.1 "VM-entry
    3300  * controls".
     3195 * Exports the guest state with appropriate VM-entry controls in the VMCS.
     3196 *
     3197 * These controls can affect things done on VM-exit; e.g. "load debug controls",
     3198 * see Intel spec. 24.8.1 "VM-entry controls".
    33013199 *
    33023200 * @returns VBox status code.
     
    33093207 * @remarks No-long-jump zone!!!
    33103208 */
    3311 DECLINLINE(int) hmR0VmxLoadGuestEntryCtls(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    3312 {
    3313     int rc = VINF_SUCCESS;
    3314     if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_ENTRY_CTLS))
    3315     {
    3316         PVM pVM      = pVCpu->CTX_SUFF(pVM);
    3317         uint32_t val = pVM->hm.s.vmx.Msrs.VmxEntry.n.disallowed0;       /* Bits set here must be set in the VMCS. */
    3318         uint32_t zap = pVM->hm.s.vmx.Msrs.VmxEntry.n.allowed1;          /* Bits cleared here must be cleared in the VMCS. */
     3209static int hmR0VmxExportGuestEntryCtls(PVMCPU pVCpu, PCCPUMCTX pMixedCtx)
     3210{
     3211    if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_ENTRY_CTLS)
     3212    {
     3213        PVM pVM       = pVCpu->CTX_SUFF(pVM);
     3214        uint32_t fVal = pVM->hm.s.vmx.Msrs.VmxEntry.n.disallowed0;       /* Bits set here must be set in the VMCS. */
     3215        uint32_t fZap = pVM->hm.s.vmx.Msrs.VmxEntry.n.allowed1;          /* Bits cleared here must be cleared in the VMCS. */
    33193216
    33203217        /* Load debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x capable CPUs only supports the 1-setting of this bit. */
    3321         val |= VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG;
     3218        fVal |= VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG;
    33223219
    33233220        /* Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry. */
    33243221        if (CPUMIsGuestInLongModeEx(pMixedCtx))
    33253222        {
    3326             val |= VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST;
    3327             Log4(("Load[%RU32]: VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST\n", pVCpu->idCpu));
     3223            fVal |= VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST;
     3224            Log4Func(("VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST\n"));
    33283225        }
    33293226        else
    3330             Assert(!(val & VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST));
     3227            Assert(!(fVal & VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST));
    33313228
    33323229        /* If the CPU supports the newer VMCS controls for managing guest/host EFER, use it. */
     
    33343231            && hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx))
    33353232        {
    3336             val |= VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR;
    3337             Log4(("Load[%RU32]: VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR\n", pVCpu->idCpu));
     3233            fVal |= VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR;
     3234            Log4Func(("VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR\n"));
    33383235        }
    33393236
     
    33473244         *        VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PAT_MSR. */
    33483245
    3349         if ((val & zap) != val)
    3350         {
    3351             LogRel(("hmR0VmxLoadGuestEntryCtls: Invalid VM-entry controls combo! cpu=%RX64 val=%RX64 zap=%RX64\n",
    3352                     pVM->hm.s.vmx.Msrs.VmxEntry.n.disallowed0, val, zap));
     3246        if ((fVal & fZap) != fVal)
     3247        {
     3248            Log4Func(("Invalid VM-entry controls combo! Cpu=%RX64 fVal=%RX64 fZap=%RX64\n",
     3249                      pVM->hm.s.vmx.Msrs.VmxEntry.n.disallowed0, fVal, fZap));
    33533250            pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_ENTRY;
    33543251            return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
    33553252        }
    33563253
    3357         rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY, val);
     3254        int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY, fVal);
    33583255        AssertRCReturn(rc, rc);
    33593256
    3360         pVCpu->hm.s.vmx.u32EntryCtls = val;
    3361         HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMX_ENTRY_CTLS);
    3362     }
    3363     return rc;
    3364 }
    3365 
    3366 
    3367 /**
    3368  * Sets up the VM-exit controls in the VMCS.
     3257        pVCpu->hm.s.vmx.u32EntryCtls = fVal;
     3258        ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_VMX_ENTRY_CTLS);
     3259    }
     3260    return VINF_SUCCESS;
     3261}
     3262
     3263
     3264/**
     3265 * Exports the guest state with appropriate VM-exit controls in the VMCS.
    33693266 *
    33703267 * @returns VBox status code.
     
    33763273 * @remarks Requires EFER.
    33773274 */
    3378 DECLINLINE(int) hmR0VmxLoadGuestExitCtls(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    3379 {
    3380     NOREF(pMixedCtx);
    3381 
    3382     int rc = VINF_SUCCESS;
    3383     if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_EXIT_CTLS))
    3384     {
    3385         PVM pVM      = pVCpu->CTX_SUFF(pVM);
    3386         uint32_t val = pVM->hm.s.vmx.Msrs.VmxExit.n.disallowed0;        /* Bits set here must be set in the VMCS. */
    3387         uint32_t zap = pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1;           /* Bits cleared here must be cleared in the VMCS. */
     3275static int hmR0VmxExportGuestExitCtls(PVMCPU pVCpu, PCCPUMCTX pMixedCtx)
     3276{
     3277    if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_EXIT_CTLS)
     3278    {
     3279        PVM pVM       = pVCpu->CTX_SUFF(pVM);
     3280        uint32_t fVal = pVM->hm.s.vmx.Msrs.VmxExit.n.disallowed0;        /* Bits set here must be set in the VMCS. */
     3281        uint32_t fZap = pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1;           /* Bits cleared here must be cleared in the VMCS. */
    33883282
    33893283        /* Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only supported the 1-setting of this bit. */
    3390         val |= VMX_VMCS_CTRL_EXIT_SAVE_DEBUG;
     3284        fVal |= VMX_VMCS_CTRL_EXIT_SAVE_DEBUG;
    33913285
    33923286        /*
    33933287         * Set the host long mode active (EFER.LMA) bit (which Intel calls "Host address-space size") if necessary.
    3394          * On VM-exit, VT-x sets both the host EFER.LMA and EFER.LME bit to this value. See assertion in hmR0VmxSaveHostMsrs().
     3288         * On VM-exit, VT-x sets both the host EFER.LMA and EFER.LME bit to this value. See assertion in
     3289         * hmR0VmxExportHostMsrs().
    33953290         */
    33963291#if HC_ARCH_BITS == 64
    3397         val |= VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE;
    3398         Log4(("Load[%RU32]: VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE\n", pVCpu->idCpu));
     3292        fVal |= VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE;
     3293        Log4Func(("VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE\n"));
    33993294#else
    34003295        Assert(   pVCpu->hm.s.vmx.pfnStartVM == VMXR0SwitcherStartVM64
     
    34043299        {
    34053300            /* The switcher returns to long mode, EFER is managed by the switcher. */
    3406             val |= VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE;
    3407             Log4(("Load[%RU32]: VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE\n", pVCpu->idCpu));
     3301            fVal |= VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE;
     3302            Log4Func(("VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE\n"));
    34083303        }
    34093304        else
    3410             Assert(!(val & VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE));
     3305            Assert(!(fVal & VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE));
    34113306#endif
    34123307
     
    34153310            && hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx))
    34163311        {
    3417             val |=  VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR
    3418                    | VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR;
    3419             Log4(("Load[%RU32]: VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR, VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR\n", pVCpu->idCpu));
     3312            fVal |= VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR
     3313                 | VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR;
     3314            Log4Func(("VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR and VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR\n"));
    34203315        }
    34213316
    34223317        /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */
    3423         Assert(!(val & VMX_VMCS_CTRL_EXIT_ACK_EXT_INT));
     3318        Assert(!(fVal & VMX_VMCS_CTRL_EXIT_ACK_EXT_INT));
    34243319
    34253320        /** @todo VMX_VMCS_CTRL_EXIT_LOAD_PERF_MSR,
     
    34293324        if (    pVM->hm.s.vmx.fUsePreemptTimer
    34303325            && (pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1 & VMX_VMCS_CTRL_EXIT_SAVE_VMX_PREEMPT_TIMER))
    3431             val |= VMX_VMCS_CTRL_EXIT_SAVE_VMX_PREEMPT_TIMER;
    3432 
    3433         if ((val & zap) != val)
    3434         {
    3435             LogRel(("hmR0VmxSetupProcCtls: Invalid VM-exit controls combo! cpu=%RX64 val=%RX64 zap=%RX64\n",
    3436                     pVM->hm.s.vmx.Msrs.VmxExit.n.disallowed0, val, zap));
     3326            fVal |= VMX_VMCS_CTRL_EXIT_SAVE_VMX_PREEMPT_TIMER;
     3327
     3328        if ((fVal & fZap) != fVal)
     3329        {
     3330            LogRel(("hmR0VmxSetupProcCtls: Invalid VM-exit controls combo! cpu=%RX64 fVal=%RX64 fZap=%RX64\n",
     3331                    pVM->hm.s.vmx.Msrs.VmxExit.n.disallowed0, fVal, fZap));
    34373332            pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_EXIT;
    34383333            return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
    34393334        }
    34403335
    3441         rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT, val);
     3336        int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT, fVal);
    34423337        AssertRCReturn(rc, rc);
    34433338
    3444         pVCpu->hm.s.vmx.u32ExitCtls = val;
    3445         HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMX_EXIT_CTLS);
    3446     }
    3447     return rc;
     3339        pVCpu->hm.s.vmx.u32ExitCtls = fVal;
     3340        ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_VMX_EXIT_CTLS);
     3341    }
     3342    return VINF_SUCCESS;
    34483343}
    34493344
     
    34653360
    34663361/**
    3467  * Loads the guest APIC and related state.
     3362 * Exports the guest APIC TPR state into the VMCS.
    34683363 *
    34693364 * @returns VBox status code.
    34703365 * @param   pVCpu       The cross context virtual CPU structure.
    3471  * @param   pMixedCtx   Pointer to the guest-CPU context. The data may be
    3472  *                      out-of-sync. Make sure to update the required fields
    3473  *                      before using them.
    34743366 *
    34753367 * @remarks No-long-jump zone!!!
    34763368 */
    3477 DECLINLINE(int) hmR0VmxLoadGuestApicState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    3478 {
    3479     NOREF(pMixedCtx);
    3480 
    3481     int rc = VINF_SUCCESS;
    3482     if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_APIC_STATE))
     3369static int hmR0VmxExportGuestApicTpr(PVMCPU pVCpu)
     3370{
     3371    if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_APIC_TPR)
    34833372    {
    34843373        if (   PDMHasApic(pVCpu->CTX_SUFF(pVM))
     
    34953384                uint8_t u8Tpr         = 0;
    34963385                uint8_t u8PendingIntr = 0;
    3497                 rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);
     3386                int rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);
    34983387                AssertRCReturn(rc, rc);
    34993388
    35003389                /*
    3501                  * If there are interrupts pending but masked by the TPR, instruct VT-x to cause a TPR-below-threshold VM-exit
    3502                  * when the guest lowers its TPR below the priority of the pending interrupt so we can deliver the interrupt.
    3503                  * If there are no interrupts pending, set threshold to 0 to not cause any TPR-below-threshold VM-exits.
     3390                 * If there are interrupts pending but masked by the TPR, instruct VT-x to
     3391                 * cause a TPR-below-threshold VM-exit when the guest lowers its TPR below the
     3392                 * priority of the pending interrupt so we can deliver the interrupt. If there
     3393                 * are no interrupts pending, set threshold to 0 to not cause any
     3394                 * TPR-below-threshold VM-exits.
    35043395                 */
    35053396                pVCpu->hm.s.vmx.pbVirtApic[XAPIC_OFF_TPR] = u8Tpr;
     
    35183409            }
    35193410        }
    3520         HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_APIC_STATE);
    3521     }
    3522 
    3523     return rc;
     3411        ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_APIC_TPR);
     3412    }
     3413    return VINF_SUCCESS;
    35243414}
    35253415
     
    35363426 * @remarks No-long-jump zone!!!
    35373427 */
    3538 DECLINLINE(uint32_t) hmR0VmxGetGuestIntrState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
     3428static uint32_t hmR0VmxGetGuestIntrState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    35393429{
    35403430    /*
    35413431     * Check if we should inhibit interrupt delivery due to instructions like STI and MOV SS.
    35423432     */
    3543     uint32_t uIntrState = 0;
     3433    uint32_t fIntrState = 0;
    35443434    if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
    35453435    {
    3546         /* If inhibition is active, RIP & RFLAGS should've been accessed (i.e. read previously from the VMCS or from ring-3). */
    3547         AssertMsg(HMVMXCPU_GST_IS_SET(pVCpu, HMVMX_UPDATED_GUEST_RIP | HMVMX_UPDATED_GUEST_RFLAGS),
    3548                   ("%#x\n", HMVMXCPU_GST_VALUE(pVCpu)));
     3436        /* If inhibition is active, RIP & RFLAGS should've been accessed
     3437           (i.e. read previously from the VMCS or from ring-3). */
     3438#ifdef VBOX_STRICT
     3439        uint64_t const fExtrn = ASMAtomicUoReadU64(&pMixedCtx->fExtrn);
     3440        AssertMsg(!(fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)), ("%#x\n", fExtrn));
     3441#endif
    35493442        if (pMixedCtx->rip == EMGetInhibitInterruptsPC(pVCpu))
    35503443        {
    35513444            if (pMixedCtx->eflags.Bits.u1IF)
    3552                 uIntrState = VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI;
     3445                fIntrState = VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI;
    35533446            else
    3554                 uIntrState = VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS;
     3447                fIntrState = VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS;
    35553448        }
    35563449        else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
    35573450        {
    35583451            /*
    3559              * We can clear the inhibit force flag as even if we go back to the recompiler without executing guest code in
    3560              * VT-x, the flag's condition to be cleared is met and thus the cleared state is correct.
     3452             * We can clear the inhibit force flag as even if we go back to the recompiler
     3453             * without executing guest code in VT-x, the flag's condition to be cleared is
     3454             * met and thus the cleared state is correct.
    35613455             */
    35623456            VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
     
    35743468        && (pVCpu->hm.s.vmx.u32PinCtls & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI))
    35753469    {
    3576         uIntrState |= VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI;
    3577     }
    3578 
    3579     return uIntrState;
    3580 }
    3581 
    3582 
    3583 /**
    3584  * Loads the guest's interruptibility-state into the guest-state area in the
     3470        fIntrState |= VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI;
     3471    }
     3472
     3473    return fIntrState;
     3474}
     3475
     3476
     3477/**
     3478 * Exports the guest's interruptibility-state into the guest-state area in the
    35853479 * VMCS.
    35863480 *
    35873481 * @returns VBox status code.
    35883482 * @param   pVCpu       The cross context virtual CPU structure.
    3589  * @param   uIntrState  The interruptibility-state to set.
    3590  */
    3591 static int hmR0VmxLoadGuestIntrState(PVMCPU pVCpu, uint32_t uIntrState)
     3483 * @param   fIntrState  The interruptibility-state to set.
     3484 */
     3485static int hmR0VmxExportGuestIntrState(PVMCPU pVCpu, uint32_t fIntrState)
    35923486{
    35933487    NOREF(pVCpu);
    3594     AssertMsg(!(uIntrState & 0xfffffff0), ("%#x\n", uIntrState));   /* Bits 31:4 MBZ. */
    3595     Assert((uIntrState & 0x3) != 0x3);                              /* Block-by-STI and MOV SS cannot be simultaneously set. */
    3596     int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, uIntrState);
    3597     AssertRC(rc);
    3598     return rc;
    3599 }
    3600 
    3601 
    3602 /**
    3603  * Loads the exception intercepts required for guest execution in the VMCS.
     3488    AssertMsg(!(fIntrState & 0xfffffff0), ("%#x\n", fIntrState));   /* Bits 31:4 MBZ. */
     3489    Assert((fIntrState & 0x3) != 0x3);                              /* Block-by-STI and MOV SS cannot be simultaneously set. */
     3490    return VMXWriteVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, fIntrState);
     3491}
     3492
     3493
     3494/**
     3495 * Exports the exception intercepts required for guest execution in the VMCS.
    36043496 *
    36053497 * @returns VBox status code.
    36063498 * @param   pVCpu       The cross context virtual CPU structure.
    3607  * @param   pMixedCtx   Pointer to the guest-CPU context. The data may be
    3608  *                      out-of-sync. Make sure to update the required fields
    3609  *                      before using them.
    3610  */
    3611 static int hmR0VmxLoadGuestXcptIntercepts(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    3612 {
    3613     NOREF(pMixedCtx);
    3614     int rc = VINF_SUCCESS;
    3615     if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMM_GUEST_XCPT_INTERCEPTS))
    3616     {
    3617         /* The remaining exception intercepts are handled elsewhere, e.g. in hmR0VmxLoadSharedCR0(). */
     3499 *
     3500 * @remarks No-long-jump zone!!!
     3501 */
     3502static int hmR0VmxExportGuestXcptIntercepts(PVMCPU pVCpu)
     3503{
     3504    if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_GUEST_XCPT_INTERCEPTS)
     3505    {
     3506        /* The remaining exception intercepts are handled elsewhere, e.g. in hmR0VmxExportSharedCR0(). */
    36183507        if (pVCpu->hm.s.fGIMTrapXcptUD)
    36193508            pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_UD);
     
    36263515        Assert(pVCpu->hm.s.vmx.u32XcptBitmap & RT_BIT_32(X86_XCPT_DB));
    36273516
    3628         rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);
     3517        int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);
    36293518        AssertRCReturn(rc, rc);
    36303519
    3631         HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMM_GUEST_XCPT_INTERCEPTS);
    3632         Log4(("Load[%RU32]: VMX_VMCS32_CTRL_EXCEPTION_BITMAP=%#RX64 fContextUseFlags=%#RX32\n", pVCpu->idCpu,
    3633               pVCpu->hm.s.vmx.u32XcptBitmap, HMCPU_CF_VALUE(pVCpu)));
    3634     }
    3635     return rc;
    3636 }
    3637 
    3638 
    3639 /**
    3640  * Loads the guest's RIP into the guest-state area in the VMCS.
     3520        ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_VMX_GUEST_XCPT_INTERCEPTS);
     3521        Log4Func(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP=%#RX64\n", pVCpu->hm.s.vmx.u32XcptBitmap));
     3522    }
     3523    return VINF_SUCCESS;
     3524}
     3525
     3526
     3527/**
     3528 * Exports the guest's RIP into the guest-state area in the VMCS.
    36413529 *
    36423530 * @returns VBox status code.
     
    36483536 * @remarks No-long-jump zone!!!
    36493537 */
    3650 static int hmR0VmxLoadGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
     3538static int hmR0VmxExportGuestRip(PVMCPU pVCpu, PCCPUMCTX pMixedCtx)
    36513539{
    36523540    int rc = VINF_SUCCESS;
    3653     if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RIP))
     3541    if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_RIP)
    36543542    {
    36553543        rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RIP, pMixedCtx->rip);
    36563544        AssertRCReturn(rc, rc);
    36573545
    3658         HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_RIP);
    3659         Log4(("Load[%RU32]: VMX_VMCS_GUEST_RIP=%#RX64 fContextUseFlags=%#RX32\n", pVCpu->idCpu, pMixedCtx->rip,
    3660               HMCPU_CF_VALUE(pVCpu)));
    3661 
    36623546        /* Update the exit history entry with the correct CS.BASE + RIP or just RIP. */
    3663         if (HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS))
     3547        if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_CS)
    36643548            EMR0HistoryUpdatePC(pVCpu, pMixedCtx->cs.u64Base + pMixedCtx->rip, true);
    36653549        else
    36663550            EMR0HistoryUpdatePC(pVCpu, pMixedCtx->rip, false);
     3551
     3552        ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_RIP);
     3553        Log4Func(("RIP=%#RX64\n", pMixedCtx->rip));
    36673554    }
    36683555    return rc;
     
    36713558
    36723559/**
    3673  * Loads the guest's RSP into the guest-state area in the VMCS.
     3560 * Exports the guest's RSP into the guest-state area in the VMCS.
    36743561 *
    36753562 * @returns VBox status code.
     
    36813568 * @remarks No-long-jump zone!!!
    36823569 */
    3683 static int hmR0VmxLoadGuestRsp(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    3684 {
    3685     int rc = VINF_SUCCESS;
    3686     if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RSP))
    3687     {
    3688         rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RSP, pMixedCtx->rsp);
     3570static int hmR0VmxExportGuestRsp(PVMCPU pVCpu, PCCPUMCTX pMixedCtx)
     3571{
     3572    if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_RSP)
     3573    {
     3574        int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RSP, pMixedCtx->rsp);
    36893575        AssertRCReturn(rc, rc);
    36903576
    3691         HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_RSP);
    3692         Log4(("Load[%RU32]: VMX_VMCS_GUEST_RSP=%#RX64\n", pVCpu->idCpu, pMixedCtx->rsp));
    3693     }
    3694     return rc;
    3695 }
    3696 
    3697 
    3698 /**
    3699  * Loads the guest's RFLAGS into the guest-state area in the VMCS.
     3577        ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_RSP);
     3578    }
     3579    return VINF_SUCCESS;
     3580}
     3581
     3582
     3583/**
     3584 * Exports the guest's RFLAGS into the guest-state area in the VMCS.
    37003585 *
    37013586 * @returns VBox status code.
     
    37073592 * @remarks No-long-jump zone!!!
    37083593 */
    3709 static int hmR0VmxLoadGuestRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    3710 {
    3711     int rc = VINF_SUCCESS;
    3712     if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RFLAGS))
     3594static int hmR0VmxExportGuestRflags(PVMCPU pVCpu, PCCPUMCTX pMixedCtx)
     3595{
     3596    if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_RFLAGS)
    37133597    {
    37143598        /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits of RFLAGS are reserved (MBZ).
    37153599           Let us assert it as such and use 32-bit VMWRITE. */
    37163600        Assert(!(pMixedCtx->rflags.u64 >> 32));
    3717         X86EFLAGS Eflags = pMixedCtx->eflags;
    3718         /** @todo r=bird: There shall be no need to OR in X86_EFL_1 here, nor
    3719          * shall there be any reason for clearing bits 63:22, 15, 5 and 3.
    3720          * These will never be cleared/set, unless some other part of the VMM
    3721          * code is buggy - in which case we're better of finding and fixing
    3722          * those bugs than hiding them. */
    3723         Assert(Eflags.u32 & X86_EFL_RA1_MASK);
    3724         Assert(!(Eflags.u32 & ~(X86_EFL_1 | X86_EFL_LIVE_MASK)));
    3725         Eflags.u32 &= VMX_EFLAGS_RESERVED_0;                   /* Bits 22-31, 15, 5 & 3 MBZ. */
    3726         Eflags.u32 |= VMX_EFLAGS_RESERVED_1;                   /* Bit 1 MB1. */
     3601        X86EFLAGS fEFlags = pMixedCtx->eflags;
     3602        Assert(fEFlags.u32 & X86_EFL_RA1_MASK);
     3603        Assert(!(fEFlags.u32 & ~(X86_EFL_1 | X86_EFL_LIVE_MASK)));
    37273604
    37283605        /*
    3729          * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so we can restore them on VM-exit.
    3730          * Modify the real-mode guest's eflags so that VT-x can run the real-mode guest code under Virtual 8086 mode.
     3606         * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so
     3607         * we can restore them on VM-exit. Modify the real-mode guest's eflags so that VT-x
     3608         * can run the real-mode guest code under Virtual 8086 mode.
    37313609         */
    37323610        if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
     
    37343612            Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
    37353613            Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
    3736             pVCpu->hm.s.vmx.RealMode.Eflags.u32 = Eflags.u32;  /* Save the original eflags of the real-mode guest. */
    3737             Eflags.Bits.u1VM   = 1;                            /* Set the Virtual 8086 mode bit. */
    3738             Eflags.Bits.u2IOPL = 0;                            /* Change IOPL to 0, otherwise certain instructions won't fault. */
    3739         }
    3740 
    3741         rc = VMXWriteVmcs32(VMX_VMCS_GUEST_RFLAGS, Eflags.u32);
     3614            pVCpu->hm.s.vmx.RealMode.Eflags.u32 = fEFlags.u32;  /* Save the original eflags of the real-mode guest. */
     3615            fEFlags.Bits.u1VM   = 1;                            /* Set the Virtual 8086 mode bit. */
     3616            fEFlags.Bits.u2IOPL = 0;                            /* Change IOPL to 0, otherwise certain instructions won't fault. */
     3617        }
     3618
     3619        int rc = VMXWriteVmcs32(VMX_VMCS_GUEST_RFLAGS, fEFlags.u32);
    37423620        AssertRCReturn(rc, rc);
    37433621
    3744         HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_RFLAGS);
    3745         Log4(("Load[%RU32]: VMX_VMCS_GUEST_RFLAGS=%#RX32\n", pVCpu->idCpu, Eflags.u32));
    3746     }
    3747     return rc;
    3748 }
    3749 
    3750 
    3751 /**
    3752  * Loads the guest RIP, RSP and RFLAGS into the guest-state area in the VMCS.
     3622        ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_RFLAGS);
     3623        Log4Func(("EFlags=%#RX32\n", fEFlags.u32));
     3624    }
     3625    return VINF_SUCCESS;
     3626}
     3627
     3628
     3629/**
     3630 * Exports the guest CR0 control register into the guest-state area in the VMCS.
     3631 *
     3632 * The guest FPU state is always pre-loaded hence we don't need to bother about
     3633 * sharing FPU related CR0 bits between the guest and host.
    37533634 *
    37543635 * @returns VBox status code.
     
    37603641 * @remarks No-long-jump zone!!!
    37613642 */
    3762 DECLINLINE(int) hmR0VmxLoadGuestRipRspRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    3763 {
    3764     int rc = hmR0VmxLoadGuestRip(pVCpu, pMixedCtx);
    3765     rc    |= hmR0VmxLoadGuestRsp(pVCpu, pMixedCtx);
    3766     rc    |= hmR0VmxLoadGuestRflags(pVCpu, pMixedCtx);
    3767     AssertRCReturn(rc, rc);
    3768     return rc;
    3769 }
    3770 
    3771 
    3772 /**
    3773  * Loads the guest CR0 control register into the guest-state area in the VMCS.
    3774  * CR0 is partially shared with the host and we have to consider the FPU bits.
    3775  *
    3776  * @returns VBox status code.
    3777  * @param   pVCpu       The cross context virtual CPU structure.
    3778  * @param   pMixedCtx   Pointer to the guest-CPU context. The data may be
    3779  *                      out-of-sync. Make sure to update the required fields
    3780  *                      before using them.
    3781  *
    3782  * @remarks No-long-jump zone!!!
    3783  */
    3784 static int hmR0VmxLoadSharedCR0(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    3785 {
    3786     Assert(CPUMIsGuestFPUStateActive(pVCpu));
    3787 
    3788     /*
    3789      * Guest CR0.
    3790      * Guest FPU.
    3791      */
    3792     int rc = VINF_SUCCESS;
    3793     if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0))
    3794     {
    3795         Assert(!(pMixedCtx->cr0 >> 32));
    3796         uint32_t u32GuestCR0 = pMixedCtx->cr0;
    3797         PVM      pVM         = pVCpu->CTX_SUFF(pVM);
    3798 
    3799         /* The guest's view (read access) of its CR0 is unblemished. */
    3800         rc  = VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, u32GuestCR0);
    3801         AssertRCReturn(rc, rc);
    3802         Log4(("Load[%RU32]: VMX_VMCS_CTRL_CR0_READ_SHADOW=%#RX32\n", pVCpu->idCpu, u32GuestCR0));
    3803 
    3804         /* Setup VT-x's view of the guest CR0. */
    3805         /* Minimize VM-exits due to CR3 changes when we have NestedPaging. */
     3643static int hmR0VmxExportGuestCR0(PVMCPU pVCpu, PCCPUMCTX pMixedCtx)
     3644{
     3645    if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_CR0)
     3646    {
     3647        PVM pVM = pVCpu->CTX_SUFF(pVM);
     3648        Assert(!RT_HI_U32(pMixedCtx->cr0));
     3649        uint32_t const uShadowCR0 = pMixedCtx->cr0;
     3650        uint32_t       uGuestCR0  = pMixedCtx->cr0;
     3651
     3652        /*
     3653         * Setup VT-x's view of the guest CR0.
     3654         * Minimize VM-exits due to CR3 changes when we have NestedPaging.
     3655         */
     3656        uint32_t uProcCtls = pVCpu->hm.s.vmx.u32ProcCtls;
    38063657        if (pVM->hm.s.fNestedPaging)
    38073658        {
    3808             if (CPUMIsGuestPagingEnabledEx(pMixedCtx))
     3659            if (CPUMIsGuestPagingEnabled(pVCpu))
    38093660            {
    38103661                /* The guest has paging enabled, let it access CR3 without causing a VM-exit if supported. */
    3811                 pVCpu->hm.s.vmx.u32ProcCtls &= ~(  VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT
    3812                                                  | VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT);
     3662                uProcCtls &= ~(  VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT
     3663                               | VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT);
    38133664            }
    38143665            else
    38153666            {
    38163667                /* The guest doesn't have paging enabled, make CR3 access cause a VM-exit to update our shadow. */
    3817                 pVCpu->hm.s.vmx.u32ProcCtls |=  VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT
    3818                                                | VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT;
     3668                uProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT
     3669                          | VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT;
    38193670            }
    38203671
    38213672            /* If we have unrestricted guest execution, we never have to intercept CR3 reads. */
    38223673            if (pVM->hm.s.vmx.fUnrestrictedGuest)
    3823                 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT;
    3824 
    3825             rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
    3826             AssertRCReturn(rc, rc);
     3674                uProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT;
    38273675        }
    38283676        else
    3829             u32GuestCR0 |= X86_CR0_WP;     /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */
     3677        {
     3678            /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */
     3679            uGuestCR0 |= X86_CR0_WP;
     3680        }
    38303681
    38313682        /*
    38323683         * Guest FPU bits.
    3833          * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be set on the first
    3834          * CPUs to support VT-x and no mention of with regards to UX in VM-entry checks.
     3684         *
     3685         * Since we pre-load the guest FPU always before VM-entry there is no need to track lazy state
     3686         * using CR0.TS.
     3687         *
     3688         * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be
     3689         * set on the first CPUs to support VT-x and no mention of with regards to UX in VM-entry checks.
    38353690         */
    3836         u32GuestCR0 |= X86_CR0_NE;
    3837 
    3838         /* Catch floating point exceptions if we need to report them to the guest in a different way. */
    3839         bool fInterceptMF = false;
    3840         if (!(pMixedCtx->cr0 & X86_CR0_NE))
    3841             fInterceptMF = true;
    3842 
    3843         /* Finally, intercept all exceptions as we cannot directly inject them in real-mode, see hmR0VmxInjectEventVmcs(). */
     3691        uGuestCR0 |= X86_CR0_NE;
     3692
     3693        /* If CR0.NE isn't set, we need to intercept #MF exceptions and report them to the guest differently. */
     3694        bool const fInterceptMF = !(uShadowCR0 & X86_CR0_NE);
     3695
     3696        /*
     3697         * Update exception intercepts.
     3698         */
     3699        uint32_t uXcptBitmap = pVCpu->hm.s.vmx.u32XcptBitmap;
    38443700        if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
    38453701        {
    38463702            Assert(PDMVmmDevHeapIsEnabled(pVM));
    38473703            Assert(pVM->hm.s.vmx.pRealModeTSS);
    3848             pVCpu->hm.s.vmx.u32XcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;
     3704            uXcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;
    38493705        }
    38503706        else
    38513707        {
    38523708            /* For now, cleared here as mode-switches can happen outside HM/VT-x. See @bugref{7626#c11}. */
    3853             pVCpu->hm.s.vmx.u32XcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
     3709            uXcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
    38543710            if (fInterceptMF)
    3855                 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_MF);
    3856         }
    3857         HMCPU_CF_SET(pVCpu, HM_CHANGED_VMM_GUEST_XCPT_INTERCEPTS);
     3711                uXcptBitmap |= RT_BIT(X86_XCPT_MF);
     3712        }
    38583713
    38593714        /* Additional intercepts for debugging, define these yourself explicitly. */
    38603715#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
    3861         pVCpu->hm.s.vmx.u32XcptBitmap |= 0
    3862                                          | RT_BIT(X86_XCPT_BP)
    3863                                          | RT_BIT(X86_XCPT_DE)
    3864                                          | RT_BIT(X86_XCPT_NM)
    3865                                          | RT_BIT(X86_XCPT_TS)
    3866                                          | RT_BIT(X86_XCPT_UD)
    3867                                          | RT_BIT(X86_XCPT_NP)
    3868                                          | RT_BIT(X86_XCPT_SS)
    3869                                          | RT_BIT(X86_XCPT_GP)
    3870                                          | RT_BIT(X86_XCPT_PF)
    3871                                          | RT_BIT(X86_XCPT_MF)
    3872                                          ;
     3716        uXcptBitmap |= 0
     3717                    | RT_BIT(X86_XCPT_BP)
     3718                    | RT_BIT(X86_XCPT_DE)
     3719                    | RT_BIT(X86_XCPT_NM)
     3720                    | RT_BIT(X86_XCPT_TS)
     3721                    | RT_BIT(X86_XCPT_UD)
     3722                    | RT_BIT(X86_XCPT_NP)
     3723                    | RT_BIT(X86_XCPT_SS)
     3724                    | RT_BIT(X86_XCPT_GP)
     3725                    | RT_BIT(X86_XCPT_PF)
     3726                    | RT_BIT(X86_XCPT_MF)
     3727                    ;
    38733728#elif defined(HMVMX_ALWAYS_TRAP_PF)
    3874         pVCpu->hm.s.vmx.u32XcptBitmap    |= RT_BIT(X86_XCPT_PF);
     3729        uXcptBitmap |= RT_BIT(X86_XCPT_PF);
    38753730#endif
    3876 
     3731        if (uXcptBitmap != pVCpu->hm.s.vmx.u32XcptBitmap)
     3732        {
     3733            pVCpu->hm.s.vmx.u32XcptBitmap = uXcptBitmap;
     3734            ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_VMX_GUEST_XCPT_INTERCEPTS);
     3735        }
    38773736        Assert(pVM->hm.s.fNestedPaging || (pVCpu->hm.s.vmx.u32XcptBitmap & RT_BIT(X86_XCPT_PF)));
    38783737
    3879         /* Set/clear the CR0 specific bits along with their exceptions (PE, PG, CD, NW). */
    3880         uint32_t uSetCR0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);
    3881         uint32_t uZapCR0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);
    3882         if (pVM->hm.s.vmx.fUnrestrictedGuest)               /* Exceptions for unrestricted-guests for fixed CR0 bits (PE, PG). */
    3883             uSetCR0 &= ~(X86_CR0_PE | X86_CR0_PG);
     3738        /*
     3739         * Set/clear the CR0 specific bits along with their exceptions (PE, PG, CD, NW).
     3740         */
     3741        uint32_t fSetCR0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);
     3742        uint32_t fZapCR0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);
     3743        if (pVM->hm.s.vmx.fUnrestrictedGuest)             /* Exceptions for unrestricted-guests for fixed CR0 bits (PE, PG). */
     3744            fSetCR0 &= ~(X86_CR0_PE | X86_CR0_PG);
    38843745        else
    3885             Assert((uSetCR0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG));
    3886 
    3887         u32GuestCR0 |= uSetCR0;
    3888         u32GuestCR0 &= uZapCR0;
    3889         u32GuestCR0 &= ~(X86_CR0_CD | X86_CR0_NW);          /* Always enable caching. */
    3890 
    3891         /* Write VT-x's view of the guest CR0 into the VMCS. */
    3892         rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR0, u32GuestCR0);
    3893         AssertRCReturn(rc, rc);
    3894         Log4(("Load[%RU32]: VMX_VMCS_GUEST_CR0=%#RX32 (uSetCR0=%#RX32 uZapCR0=%#RX32)\n", pVCpu->idCpu, u32GuestCR0, uSetCR0,
    3895               uZapCR0));
     3746            Assert((fSetCR0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG));
     3747
     3748        uGuestCR0 |= fSetCR0;
     3749        uGuestCR0 &= fZapCR0;
     3750        uGuestCR0 &= ~(X86_CR0_CD | X86_CR0_NW);          /* Always enable caching. */
    38963751
    38973752        /*
     
    39003755         * we want to be notified immediately of guest CR0 changes (e.g. PG to update our shadow page tables).
    39013756         */
    3902         uint32_t u32CR0Mask = 0;
    3903         u32CR0Mask =  X86_CR0_PE
    3904                     | X86_CR0_NE
    3905                     | X86_CR0_WP
    3906                     | X86_CR0_PG
    3907                     | X86_CR0_ET    /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.ET */
    3908                     | X86_CR0_CD    /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.CD */
    3909                     | X86_CR0_NW;   /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.NW */
     3757        uint32_t uCR0Mask = X86_CR0_PE
     3758                          | X86_CR0_NE
     3759                          | (pVM->hm.s.fNestedPaging ? 0 : X86_CR0_WP)
     3760                          | X86_CR0_PG
     3761                          | X86_CR0_ET    /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.ET */
     3762                          | X86_CR0_CD    /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.CD */
     3763                          | X86_CR0_NW;   /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.NW */
    39103764
    39113765        /** @todo Avoid intercepting CR0.PE with unrestricted guests. Fix PGM
     
    39143768#if 0
    39153769        if (pVM->hm.s.vmx.fUnrestrictedGuest)
    3916             u32CR0Mask &= ~X86_CR0_PE;
     3770            uCr0Mask &= ~X86_CR0_PE;
    39173771#endif
    3918         if (pVM->hm.s.fNestedPaging)
    3919             u32CR0Mask &= ~X86_CR0_WP;
    3920 
    3921         /* Write the CR0 mask into the VMCS and update the VCPU's copy of the current CR0 mask. */
    3922         pVCpu->hm.s.vmx.u32CR0Mask = u32CR0Mask;
    3923         rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_MASK, u32CR0Mask);
     3772        /* Update the HMCPU's copy of the CR0 mask. */
     3773        pVCpu->hm.s.vmx.u32CR0Mask = uCR0Mask;
     3774
     3775        /*
     3776         * Finally, update VMCS fields with the CR0 values.
     3777         */
     3778        int rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR0, uGuestCR0);
     3779        rc    |= VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, uShadowCR0);
     3780        rc    |= VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_MASK, uCR0Mask);
     3781        if (uProcCtls != pVCpu->hm.s.vmx.u32ProcCtls)
     3782            rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
    39243783        AssertRCReturn(rc, rc);
    3925         Log4(("Load[%RU32]: VMX_VMCS_CTRL_CR0_MASK=%#RX32\n", pVCpu->idCpu, u32CR0Mask));
    3926 
    3927         HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR0);
    3928     }
    3929     return rc;
    3930 }
    3931 
    3932 
    3933 /**
    3934  * Loads the guest control registers (CR3, CR4) into the guest-state area
     3784        pVCpu->hm.s.vmx.u32ProcCtls = uProcCtls;
     3785
     3786        ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_CR0);
     3787
     3788        Log4Func(("uCr0Mask=%#RX32 uShadowCR0=%#RX32 uGuestCR0=%#RX32 (fSetCR0=%#RX32 fZapCR0=%#RX32\n", uCR0Mask, uShadowCR0,
     3789                  uGuestCR0, fSetCR0, fZapCR0));
     3790    }
     3791
     3792    return VINF_SUCCESS;
     3793}
     3794
     3795
     3796/**
     3797 * Exports the guest control registers (CR3, CR4) into the guest-state area
    39353798 * in the VMCS.
    39363799 *
     
    39473810 * @remarks No-long-jump zone!!!
    39483811 */
    3949 static VBOXSTRICTRC hmR0VmxLoadGuestCR3AndCR4(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
     3812static VBOXSTRICTRC hmR0VmxExportGuestCR3AndCR4(PVMCPU pVCpu, PCCPUMCTX pMixedCtx)
    39503813{
    39513814    int rc  = VINF_SUCCESS;
     
    39603823     * Guest CR3.
    39613824     */
    3962     if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR3))
     3825    if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_CR3)
    39633826    {
    39643827        RTGCPHYS GCPhysGuestCR3 = NIL_RTGCPHYS;
     
    39863849            rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, pVCpu->hm.s.vmx.HCPhysEPTP);
    39873850            AssertRCReturn(rc, rc);
    3988             Log4(("Load[%RU32]: VMX_VMCS64_CTRL_EPTP_FULL=%#RX64\n", pVCpu->idCpu, pVCpu->hm.s.vmx.HCPhysEPTP));
    39893851
    39903852            if (   pVM->hm.s.vmx.fUnrestrictedGuest
     
    40033865                }
    40043866
    4005                 /* The guest's view of its CR3 is unblemished with Nested Paging when the guest is using paging or we
    4006                    have Unrestricted Execution to handle the guest when it's not using paging. */
     3867                /*
     3868                 * The guest's view of its CR3 is unblemished with Nested Paging when the
     3869                 * guest is using paging or we have unrestricted guest execution to handle
     3870                 * the guest when it's not using paging.
     3871                 */
    40073872                GCPhysGuestCR3 = pMixedCtx->cr3;
    40083873            }
     
    40103875            {
    40113876                /*
    4012                  * The guest is not using paging, but the CPU (VT-x) has to. While the guest thinks it accesses physical memory
    4013                  * directly, we use our identity-mapped page table to map guest-linear to guest-physical addresses.
    4014                  * EPT takes care of translating it to host-physical addresses.
     3877                 * The guest is not using paging, but the CPU (VT-x) has to. While the guest
     3878                 * thinks it accesses physical memory directly, we use our identity-mapped
     3879                 * page  table to map guest-linear to guest-physical addresses. EPT takes care
     3880                 * of translating it to host-physical addresses.
    40153881                 */
    40163882                RTGCPHYS GCPhys;
     
    40233889                else if (rc == VERR_PDM_DEV_HEAP_R3_TO_GCPHYS)
    40243890                {
    4025                     Log4(("Load[%RU32]: VERR_PDM_DEV_HEAP_R3_TO_GCPHYS -> VINF_EM_RESCHEDULE_REM\n", pVCpu->idCpu));
     3891                    Log4Func(("VERR_PDM_DEV_HEAP_R3_TO_GCPHYS -> VINF_EM_RESCHEDULE_REM\n"));
    40263892                    return VINF_EM_RESCHEDULE_REM;  /* We cannot execute now, switch to REM/IEM till the guest maps in VMMDev. */
    40273893                }
     
    40323898            }
    40333899
    4034             Log4(("Load[%RU32]: VMX_VMCS_GUEST_CR3=%#RGp (GstN)\n", pVCpu->idCpu, GCPhysGuestCR3));
     3900            Log4Func(("uGuestCR3=%#RGp (GstN)\n", GCPhysGuestCR3));
    40353901            rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_CR3, GCPhysGuestCR3);
     3902            AssertRCReturn(rc, rc);
    40363903        }
    40373904        else
     
    40403907            RTHCPHYS HCPhysGuestCR3 = PGMGetHyperCR3(pVCpu);
    40413908
    4042             Log4(("Load[%RU32]: VMX_VMCS_GUEST_CR3=%#RHv (HstN)\n", pVCpu->idCpu, HCPhysGuestCR3));
     3909            Log4Func(("uGuestCR3=%#RHv (HstN)\n", HCPhysGuestCR3));
    40433910            rc = VMXWriteVmcsHstN(VMX_VMCS_GUEST_CR3, HCPhysGuestCR3);
    4044         }
    4045         AssertRCReturn(rc, rc);
    4046 
    4047         HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR3);
     3911            AssertRCReturn(rc, rc);
     3912        }
     3913
     3914        ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_CR3);
    40483915    }
    40493916
     
    40523919     * ASSUMES this is done everytime we get in from ring-3! (XCR0)
    40533920     */
    4054     if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR4))
    4055     {
    4056         Assert(!(pMixedCtx->cr4 >> 32));
    4057         uint32_t u32GuestCR4 = pMixedCtx->cr4;
     3921    if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_CR4)
     3922    {
     3923        Assert(!RT_HI_U32(pMixedCtx->cr4));
     3924        uint32_t uGuestCR4 = pMixedCtx->cr4;
    40583925
    40593926        /* The guest's view of its CR4 is unblemished. */
    4060         rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, u32GuestCR4);
     3927        rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, uGuestCR4);
    40613928        AssertRCReturn(rc, rc);
    4062         Log4(("Load[%RU32]: VMX_VMCS_CTRL_CR4_READ_SHADOW=%#RX32\n", pVCpu->idCpu, u32GuestCR4));
    4063 
    4064         /* Setup VT-x's view of the guest CR4. */
     3929        Log4Func(("uShadowCR4=%#RX32\n", uGuestCR4));
     3930
    40653931        /*
    4066          * If we're emulating real-mode using virtual-8086 mode, we want to redirect software interrupts to the 8086 program
    4067          * interrupt handler. Clear the VME bit (the interrupt redirection bitmap is already all 0, see hmR3InitFinalizeR0())
     3932         * Setup VT-x's view of the guest CR4.
     3933         *
     3934         * If we're emulating real-mode using virtual-8086 mode, we want to redirect software
     3935         * interrupts to the 8086 program interrupt handler. Clear the VME bit (the interrupt
     3936         * redirection bitmap is already all 0, see hmR3InitFinalizeR0())
     3937         *
    40683938         * See Intel spec. 20.2 "Software Interrupt Handling Methods While in Virtual-8086 Mode".
    40693939         */
     
    40723942            Assert(pVM->hm.s.vmx.pRealModeTSS);
    40733943            Assert(PDMVmmDevHeapIsEnabled(pVM));
    4074             u32GuestCR4 &= ~X86_CR4_VME;
     3944            uGuestCR4 &= ~X86_CR4_VME;
    40753945        }
    40763946
     
    40813951            {
    40823952                /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */
    4083                 u32GuestCR4 |= X86_CR4_PSE;
     3953                uGuestCR4 |= X86_CR4_PSE;
    40843954                /* Our identity mapping is a 32-bit page directory. */
    4085                 u32GuestCR4 &= ~X86_CR4_PAE;
     3955                uGuestCR4 &= ~X86_CR4_PAE;
    40863956            }
    40873957            /* else use guest CR4.*/
     
    40993969                case PGMMODE_32_BIT:            /* 32-bit paging. */
    41003970                {
    4101                     u32GuestCR4 &= ~X86_CR4_PAE;
     3971                    uGuestCR4 &= ~X86_CR4_PAE;
    41023972                    break;
    41033973                }
     
    41063976                case PGMMODE_PAE_NX:            /* PAE paging with NX. */
    41073977                {
    4108                     u32GuestCR4 |= X86_CR4_PAE;
     3978                    uGuestCR4 |= X86_CR4_PAE;
    41093979                    break;
    41103980                }
     
    41223992
    41233993        /* We need to set and clear the CR4 specific bits here (mainly the X86_CR4_VMXE bit). */
    4124         uint64_t uSetCR4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);
    4125         uint64_t uZapCR4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);
    4126         u32GuestCR4 |= uSetCR4;
    4127         u32GuestCR4 &= uZapCR4;
     3994        uint64_t fSetCR4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);
     3995        uint64_t fZapCR4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);
     3996        uGuestCR4 |= fSetCR4;
     3997        uGuestCR4 &= fZapCR4;
    41283998
    41293999        /* Write VT-x's view of the guest CR4 into the VMCS. */
    4130         Log4(("Load[%RU32]: VMX_VMCS_GUEST_CR4=%#RX32 (Set=%#RX32 Zap=%#RX32)\n", pVCpu->idCpu, u32GuestCR4, uSetCR4, uZapCR4));
    4131         rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR4, u32GuestCR4);
     4000        Log4Func(("uGuestCR4=%#RX32 (fSetCR4=%#RX32 fZapCR4=%#RX32)\n", uGuestCR4, fSetCR4, fZapCR4));
     4001        rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR4, uGuestCR4);
    41324002        AssertRCReturn(rc, rc);
    41334003
     
    41494019        pVCpu->hm.s.fLoadSaveGuestXcr0 = (pMixedCtx->cr4 & X86_CR4_OSXSAVE) && pMixedCtx->aXcr[0] != ASMGetXcr0();
    41504020
    4151         HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR4);
     4021        ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_CR4);
    41524022    }
    41534023    return rc;
     
    41564026
    41574027/**
    4158  * Loads the guest debug registers into the guest-state area in the VMCS.
     4028 * Exports the guest debug registers into the guest-state area in the VMCS.
     4029 * The guest debug bits are partially shared with the host (e.g. DR6, DR0-3).
    41594030 *
    41604031 * This also sets up whether \#DB and MOV DRx accesses cause VM-exits.
    4161  *
    4162  * The guest debug bits are partially shared with the host (e.g. DR6, DR0-3).
    41634032 *
    41644033 * @returns VBox status code.
     
    41704039 * @remarks No-long-jump zone!!!
    41714040 */
    4172 static int hmR0VmxLoadSharedDebugState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    4173 {
    4174     if (!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_DEBUG))
    4175         return VINF_SUCCESS;
     4041static int hmR0VmxExportSharedDebugState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
     4042{
     4043    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    41764044
    41774045#ifdef VBOX_STRICT
     
    41854053#endif
    41864054
    4187     int  rc;
    4188     PVM  pVM              = pVCpu->CTX_SUFF(pVM);
    4189     bool fSteppingDB      = false;
    4190     bool fInterceptMovDRx = false;
     4055    bool     fSteppingDB      = false;
     4056    bool     fInterceptMovDRx = false;
     4057    uint32_t uProcCtls        = pVCpu->hm.s.vmx.u32ProcCtls;
    41914058    if (pVCpu->hm.s.fSingleInstruction)
    41924059    {
    41934060        /* If the CPU supports the monitor trap flag, use it for single stepping in DBGF and avoid intercepting #DB. */
     4061        PVM pVM = pVCpu->CTX_SUFF(pVM);
    41944062        if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG)
    41954063        {
    4196             pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG;
    4197             rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
    4198             AssertRCReturn(rc, rc);
     4064            uProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG;
    41994065            Assert(fSteppingDB == false);
    42004066        }
     
    42024068        {
    42034069            pMixedCtx->eflags.u32 |= X86_EFL_TF;
     4070            pVCpu->hm.s.fCtxChanged |= HM_CHANGED_GUEST_RFLAGS;
    42044071            pVCpu->hm.s.fClearTrapFlag = true;
    4205             HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RFLAGS);
    42064072            fSteppingDB = true;
    42074073        }
    42084074    }
    42094075
     4076    uint32_t uGuestDR7;
    42104077    if (   fSteppingDB
    42114078        || (CPUMGetHyperDR7(pVCpu) & X86_DR7_ENABLED_MASK))
    42124079    {
    42134080        /*
    4214          * Use the combined guest and host DRx values found in the hypervisor
    4215          * register set because the debugger has breakpoints active or someone
    4216          * is single stepping on the host side without a monitor trap flag.
     4081         * Use the combined guest and host DRx values found in the hypervisor register set
     4082         * because the debugger has breakpoints active or someone is single stepping on the
     4083         * host side without a monitor trap flag.
    42174084         *
    42184085         * Note! DBGF expects a clean DR6 state before executing guest code.
    42194086         */
    42204087#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
    4221         if (   CPUMIsGuestInLongModeEx(pMixedCtx)
     4088        if (    CPUMIsGuestInLongModeEx(pMixedCtx)
    42224089            && !CPUMIsHyperDebugStateActivePending(pVCpu))
    42234090        {
     
    42354102        }
    42364103
    4237         /* Update DR7. (The other DRx values are handled by CPUM one way or the other.) */
    4238         rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, (uint32_t)CPUMGetHyperDR7(pVCpu));
    4239         AssertRCReturn(rc, rc);
    4240 
     4104        /* Update DR7 with the hypervisor value (other DRx registers are handled by CPUM one way or another). */
     4105        uGuestDR7 = (uint32_t)CPUMGetHyperDR7(pVCpu);
    42414106        pVCpu->hm.s.fUsingHyperDR7 = true;
    42424107        fInterceptMovDRx = true;
     
    42484113         * executing guest code so they'll trigger at the right time.
    42494114         */
    4250         if (pMixedCtx->dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD)) /** @todo Why GD? */
     4115        if (pMixedCtx->dr[7] & X86_DR7_ENABLED_MASK)
    42514116        {
    42524117#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
    4253             if (   CPUMIsGuestInLongModeEx(pMixedCtx)
     4118            if (    CPUMIsGuestInLongModeEx(pMixedCtx)
    42544119                && !CPUMIsGuestDebugStateActivePending(pVCpu))
    42554120            {
     
    42864151        }
    42874152
    4288         /* Update guest DR7. */
    4289         rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, pMixedCtx->dr[7]);
    4290         AssertRCReturn(rc, rc);
    4291 
     4153        /* Update DR7 with the actual guest value. */
     4154        uGuestDR7 = pMixedCtx->dr[7];
    42924155        pVCpu->hm.s.fUsingHyperDR7 = false;
    42934156    }
    42944157
     4158    if (fInterceptMovDRx)
     4159        uProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT;
     4160    else
     4161        uProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT;
     4162
    42954163    /*
    4296      * Update the processor-based VM-execution controls regarding intercepting MOV DRx instructions.
     4164     * Update the processor-based VM-execution controls for MOV-DRx intercepts and the monitor-trap flag.
    42974165     */
    4298     if (fInterceptMovDRx)
    4299         pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT;
    4300     else
    4301         pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT;
    4302     rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
     4166    if (uProcCtls != pVCpu->hm.s.vmx.u32ProcCtls)
     4167    {
     4168        int rc2 = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
     4169        AssertRCReturn(rc2, rc2);
     4170        pVCpu->hm.s.vmx.u32ProcCtls = uProcCtls;
     4171    }
     4172
     4173    /*
     4174     * Update guest DR7.
     4175     */
     4176    int rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, uGuestDR7);
    43034177    AssertRCReturn(rc, rc);
    43044178
    4305     HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_DEBUG);
    43064179    return VINF_SUCCESS;
    43074180}
     
    43124185 * Strict function to validate segment registers.
    43134186 *
    4314  * @remarks ASSUMES CR0 is up to date.
    4315  */
    4316 static void hmR0VmxValidateSegmentRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
    4317 {
    4318     /* Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
    4319     /* NOTE: The reason we check for attribute value 0 and not just the unusable bit here is because hmR0VmxWriteSegmentReg()
    4320      * only updates the VMCS' copy of the value with the unusable bit and doesn't change the guest-context value. */
     4187 * @remarks Will import guest CR0 on strict builds during validation of
     4188 *          segments.
     4189 */
     4190static void hmR0VmxValidateSegmentRegs(PVM pVM, PVMCPU pVCpu, PCCPUMCTX pCtx)
     4191{
     4192    /*
     4193     * Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
     4194     *
     4195     * The reason we check for attribute value 0 in this function and not just the unusable bit is
     4196     * because hmR0VmxWriteSegmentReg() only updates the VMCS' copy of the value with the unusable bit
     4197     * and doesn't change the guest-context value.
     4198     */
     4199    hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR0);
    43214200    if (   !pVM->hm.s.vmx.fUnrestrictedGuest
    43224201        && (   !CPUMIsGuestInRealModeEx(pCtx)
     
    44924371 */
    44934372static int hmR0VmxWriteSegmentReg(PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase,
    4494                                        uint32_t idxAccess, PCPUMSELREG pSelReg)
     4373                                       uint32_t idxAccess, PCCPUMSELREG pSelReg)
    44954374{
    44964375    int rc = VMXWriteVmcs32(idxSel,    pSelReg->Sel);       /* 16-bit guest selector field. */
     
    45104389    {
    45114390        /*
    4512          * The way to differentiate between whether this is really a null selector or was just a selector loaded with 0 in
    4513          * real-mode is using the segment attributes. A selector loaded in real-mode with the value 0 is valid and usable in
    4514          * protected-mode and we should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures NULL selectors
    4515          * loaded in protected-mode have their attribute as 0.
     4391         * The way to differentiate between whether this is really a null selector or was just
     4392         * a selector loaded with 0 in real-mode is using the segment attributes. A selector
     4393         * loaded in real-mode with the value 0 is valid and usable in protected-mode and we
     4394         * should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures
     4395         * NULL selectors loaded in protected-mode have their attribute as 0.
    45164396         */
    45174397        if (!u32Access)
     
    45304410
    45314411/**
    4532  * Loads the guest segment registers, GDTR, IDTR, LDTR, (TR, FS and GS bases)
     4412 * Exports the guest segment registers, GDTR, IDTR, LDTR, (TR, FS and GS bases)
    45334413 * into the guest-state area in the VMCS.
    45344414 *
     
    45394419 *                      before using them.
    45404420 *
    4541  * @remarks ASSUMES pMixedCtx->cr0 is up to date (strict builds validation).
     4421 * @remarks Will import guest CR0 on strict builds during validation of
     4422 *          segments.
    45424423 * @remarks No-long-jump zone!!!
    45434424 */
    4544 static int hmR0VmxLoadGuestSegmentRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
     4425static int hmR0VmxExportGuestSegmentRegs(PVMCPU pVCpu, PCCPUMCTX pMixedCtx)
    45454426{
    45464427    int rc  = VERR_INTERNAL_ERROR_5;
     
    45504431     * Guest Segment registers: CS, SS, DS, ES, FS, GS.
    45514432     */
    4552     if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS))
     4433    if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SREG_MASK)
    45534434    {
    45544435        /* Save the segment attributes for real-on-v86 mode hack, so we can restore them on VM-exit. */
     
    45744455                   in real-mode (e.g. OpenBSD 4.0) */
    45754456                REMFlushTBs(pVM);
    4576                 Log4(("Load[%RU32]: Switch to protected mode detected!\n", pVCpu->idCpu));
     4457                Log4Func(("Switch to protected mode detected!\n"));
    45774458                pVCpu->hm.s.vmx.fWasInRealMode = false;
    45784459            }
     
    46034484#endif
    46044485
    4605         HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS);
    4606         Log4(("Load[%RU32]: CS=%#RX16 Base=%#RX64 Limit=%#RX32 Attr=%#RX32\n", pVCpu->idCpu, pMixedCtx->cs.Sel,
    4607               pMixedCtx->cs.u64Base, pMixedCtx->cs.u32Limit, pMixedCtx->cs.Attr.u));
    4608 
    46094486        /* Update the exit history entry with the correct CS.BASE + RIP. */
    4610         if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RIP))
     4487        if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_RIP)
    46114488            EMR0HistoryUpdatePC(pVCpu, pMixedCtx->cs.u64Base + pMixedCtx->rip, true);
     4489
     4490        ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_SREG_MASK);
     4491        Log4Func(("CS=%#RX16 Base=%#RX64 Limit=%#RX32 Attr=%#RX32\n", pMixedCtx->cs.Sel, pMixedCtx->cs.u64Base,
     4492                  pMixedCtx->cs.u32Limit, pMixedCtx->cs.Attr.u));
    46124493    }
    46134494
     
    46154496     * Guest TR.
    46164497     */
    4617     if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_TR))
     4498    if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_TR)
    46184499    {
    46194500        /*
     
    46754556        AssertRCReturn(rc, rc);
    46764557
    4677         HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_TR);
    4678         Log4(("Load[%RU32]: VMX_VMCS_GUEST_TR_BASE=%#RX64\n", pVCpu->idCpu, u64Base));
     4558        ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_TR);
     4559        Log4Func(("TR base=%#RX64\n", pMixedCtx->tr.u64Base));
    46794560    }
    46804561
     
    46824563     * Guest GDTR.
    46834564     */
    4684     if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_GDTR))
     4565    if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_GDTR)
    46854566    {
    46864567        rc  = VMXWriteVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, pMixedCtx->gdtr.cbGdt);
     
    46914572        Assert(!(pMixedCtx->gdtr.cbGdt & 0xffff0000));          /* Bits 31:16 MBZ. */
    46924573
    4693         HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_GDTR);
    4694         Log4(("Load[%RU32]: VMX_VMCS_GUEST_GDTR_BASE=%#RX64\n", pVCpu->idCpu, pMixedCtx->gdtr.pGdt));
     4574        ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_GDTR);
     4575        Log4Func(("GDTR base=%#RX64\n", pMixedCtx->gdtr.pGdt));
    46954576    }
    46964577
     
    46984579     * Guest LDTR.
    46994580     */
    4700     if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_LDTR))
     4581    if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_LDTR)
    47014582    {
    47024583        /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
     
    47284609        }
    47294610
    4730         HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_LDTR);
    4731         Log4(("Load[%RU32]: VMX_VMCS_GUEST_LDTR_BASE=%#RX64\n", pVCpu->idCpu, pMixedCtx->ldtr.u64Base));
     4611        ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_LDTR);
     4612        Log4Func(("LDTR base=%#RX64\n", pMixedCtx->ldtr.u64Base));
    47324613    }
    47334614
     
    47354616     * Guest IDTR.
    47364617     */
    4737     if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_IDTR))
     4618    if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_IDTR)
    47384619    {
    47394620        rc  = VMXWriteVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, pMixedCtx->idtr.cbIdt);
     
    47444625        Assert(!(pMixedCtx->idtr.cbIdt & 0xffff0000));          /* Bits 31:16 MBZ. */
    47454626
    4746         HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_IDTR);
    4747         Log4(("Load[%RU32]: VMX_VMCS_GUEST_IDTR_BASE=%#RX64\n", pVCpu->idCpu, pMixedCtx->idtr.pIdt));
     4627        ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_IDTR);
     4628        Log4Func(("IDTR base=%#RX64\n", pMixedCtx->idtr.pIdt));
    47484629    }
    47494630
     
    47534634
    47544635/**
    4755  * Loads certain guest MSRs into the VM-entry MSR-load and VM-exit MSR-store
     4636 * Exports certain guest MSRs into the VM-entry MSR-load and VM-exit MSR-store
    47564637 * areas.
    47574638 *
     
    47594640 * VM-entry and stored from the host CPU on every successful VM-exit. This also
    47604641 * creates/updates MSR slots for the host MSRs. The actual host MSR values are
    4761  * -not- updated here for performance reasons. See hmR0VmxSaveHostMsrs().
    4762  *
    4763  * Also loads the sysenter MSRs into the guest-state area in the VMCS.
     4642 * -not- updated here for performance reasons. See hmR0VmxExportHostMsrs().
     4643 *
     4644 * Also exports the guest sysenter MSRs into the guest-state area in the VMCS.
    47644645 *
    47654646 * @returns VBox status code.
     
    47714652 * @remarks No-long-jump zone!!!
    47724653 */
    4773 static int hmR0VmxLoadGuestMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
     4654static int hmR0VmxExportGuestMsrs(PVMCPU pVCpu, PCCPUMCTX pMixedCtx)
    47744655{
    47754656    AssertPtr(pVCpu);
     
    47784659    /*
    47794660     * MSRs that we use the auto-load/store MSR area in the VMCS.
     4661     * For 64-bit hosts, we load/restore them lazily, see hmR0VmxLazyLoadGuestMsrs().
    47804662     */
    47814663    PVM pVM = pVCpu->CTX_SUFF(pVM);
    4782     if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS))
    4783     {
    4784         /* For 64-bit hosts, we load/restore them lazily, see hmR0VmxLazyLoadGuestMsrs(). */
     4664    if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_GUEST_AUTO_MSRS)
     4665    {
     4666        if (pVM->hm.s.fAllow64BitGuests)
     4667        {
    47854668#if HC_ARCH_BITS == 32
    4786         if (pVM->hm.s.fAllow64BitGuests)
    4787         {
    47884669            int rc = hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_LSTAR,          pMixedCtx->msrLSTAR,        false, NULL);
    47894670            rc    |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K6_STAR,           pMixedCtx->msrSTAR,         false, NULL);
     
    47924673            AssertRCReturn(rc, rc);
    47934674# ifdef LOG_ENABLED
    4794             PVMXAUTOMSR pMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
     4675            PCVMXAUTOMSR pMsr = (PVMXAUTOMSR)CpVCpu->hm.s.vmx.pvGuestMsr;
    47954676            for (uint32_t i = 0; i < pVCpu->hm.s.vmx.cMsrs; i++, pMsr++)
    4796             {
    4797                 Log4(("Load[%RU32]: MSR[%RU32]: u32Msr=%#RX32 u64Value=%#RX64\n", pVCpu->idCpu, i, pMsr->u32Msr,
    4798                       pMsr->u64Value));
    4799             }
     4677                Log4Func(("MSR[%RU32]: u32Msr=%#RX32 u64Value=%#RX64\n", i, pMsr->u32Msr, pMsr->u64Value));
    48004678# endif
    4801         }
    48024679#endif
    4803         HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
     4680        }
     4681        ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_VMX_GUEST_AUTO_MSRS);
    48044682    }
    48054683
     
    48094687     * VM-exits on WRMSRs for these MSRs.
    48104688     */
    4811     if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR))
    4812     {
    4813         int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, pMixedCtx->SysEnter.cs);      AssertRCReturn(rc, rc);
    4814         HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR);
    4815     }
    4816 
    4817     if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR))
    4818     {
    4819         int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, pMixedCtx->SysEnter.eip);    AssertRCReturn(rc, rc);
    4820         HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR);
    4821     }
    4822 
    4823     if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR))
    4824     {
    4825         int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, pMixedCtx->SysEnter.esp);    AssertRCReturn(rc, rc);
    4826         HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR);
    4827     }
    4828 
    4829     if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_EFER_MSR))
     4689    if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_MSR_MASK)
     4690    {
     4691        if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_CS_MSR)
     4692        {
     4693            int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, pMixedCtx->SysEnter.cs);
     4694            AssertRCReturn(rc, rc);
     4695            ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_SYSENTER_CS_MSR);
     4696        }
     4697
     4698        if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_EIP_MSR)
     4699        {
     4700            int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, pMixedCtx->SysEnter.eip);
     4701            AssertRCReturn(rc, rc);
     4702            ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_SYSENTER_EIP_MSR);
     4703        }
     4704
     4705        if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_ESP_MSR)
     4706        {
     4707            int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, pMixedCtx->SysEnter.esp);
     4708            AssertRCReturn(rc, rc);
     4709            ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_SYSENTER_ESP_MSR);
     4710        }
     4711    }
     4712
     4713    if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_EFER_MSR)
    48304714    {
    48314715        if (hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx))
     
    48394723                int rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_EFER_FULL, pMixedCtx->msrEFER);
    48404724                AssertRCReturn(rc,rc);
    4841                 Log4(("Load[%RU32]: VMX_VMCS64_GUEST_EFER_FULL=%#RX64\n", pVCpu->idCpu, pMixedCtx->msrEFER));
     4725                Log4Func(("EFER=%#RX64\n", pMixedCtx->msrEFER));
    48424726            }
    48434727            else
     
    48504734                if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
    48514735                    hmR0VmxSetMsrPermission(pVCpu, MSR_K6_EFER, VMXMSREXIT_INTERCEPT_READ, VMXMSREXIT_INTERCEPT_WRITE);
    4852                 Log4(("Load[%RU32]: MSR[--]: u32Msr=%#RX32 u64Value=%#RX64 cMsrs=%u\n", pVCpu->idCpu, MSR_K6_EFER,
    4853                       pMixedCtx->msrEFER, pVCpu->hm.s.vmx.cMsrs));
     4736                Log4Func(("MSR[--]: u32Msr=%#RX32 u64Value=%#RX64 cMsrs=%u\n", MSR_K6_EFER, pMixedCtx->msrEFER,
     4737                          pVCpu->hm.s.vmx.cMsrs));
    48544738            }
    48554739        }
    48564740        else if (!pVM->hm.s.vmx.fSupportsVmcsEfer)
    48574741            hmR0VmxRemoveAutoLoadStoreMsr(pVCpu, MSR_K6_EFER);
    4858         HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_EFER_MSR);
     4742        ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_EFER_MSR);
    48594743    }
    48604744
     
    48634747
    48644748
    4865 /**
    4866  * Loads the guest activity state into the guest-state area in the VMCS.
     4749#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
     4750/**
     4751 * Check if guest state allows safe use of 32-bit switcher again.
     4752 *
     4753 * Segment bases and protected mode structures must be 32-bit addressable
     4754 * because the  32-bit switcher will ignore high dword when writing these VMCS
     4755 * fields.  See @bugref{8432} for details.
     4756 *
     4757 * @returns true if safe, false if must continue to use the 64-bit switcher.
     4758 * @param   pMixedCtx   Pointer to the guest-CPU context. The data may be
     4759 *                      out-of-sync. Make sure to update the required fields
     4760 *                      before using them.
     4761 *
     4762 * @remarks No-long-jump zone!!!
     4763 */
     4764static bool hmR0VmxIs32BitSwitcherSafe(PCCPUMCTX pMixedCtx)
     4765{
     4766    if (pMixedCtx->gdtr.pGdt    & UINT64_C(0xffffffff00000000))     return false;
     4767    if (pMixedCtx->idtr.pIdt    & UINT64_C(0xffffffff00000000))     return false;
     4768    if (pMixedCtx->ldtr.u64Base & UINT64_C(0xffffffff00000000))     return false;
     4769    if (pMixedCtx->tr.u64Base   & UINT64_C(0xffffffff00000000))     return false;
     4770    if (pMixedCtx->es.u64Base   & UINT64_C(0xffffffff00000000))     return false;
     4771    if (pMixedCtx->cs.u64Base   & UINT64_C(0xffffffff00000000))     return false;
     4772    if (pMixedCtx->ss.u64Base   & UINT64_C(0xffffffff00000000))     return false;
     4773    if (pMixedCtx->ds.u64Base   & UINT64_C(0xffffffff00000000))     return false;
     4774    if (pMixedCtx->fs.u64Base   & UINT64_C(0xffffffff00000000))     return false;
     4775    if (pMixedCtx->gs.u64Base   & UINT64_C(0xffffffff00000000))     return false;
     4776
     4777    /* All good, bases are 32-bit. */
     4778    return true;
     4779}
     4780#endif
     4781
     4782
     4783/**
     4784 * Selects up the appropriate function to run guest code.
    48674785 *
    48684786 * @returns VBox status code.
     
    48744792 * @remarks No-long-jump zone!!!
    48754793 */
    4876 static int hmR0VmxLoadGuestActivityState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    4877 {
    4878     NOREF(pMixedCtx);
    4879     /** @todo See if we can make use of other states, e.g.
    4880      *        VMX_VMCS_GUEST_ACTIVITY_SHUTDOWN or HLT.  */
    4881     if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_GUEST_ACTIVITY_STATE))
    4882     {
    4883         int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_ACTIVITY_STATE, VMX_VMCS_GUEST_ACTIVITY_ACTIVE);
    4884         AssertRCReturn(rc, rc);
    4885 
    4886         HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMX_GUEST_ACTIVITY_STATE);
    4887     }
    4888     return VINF_SUCCESS;
    4889 }
    4890 
    4891 
    4892 #if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
    4893 /**
    4894  * Check if guest state allows safe use of 32-bit switcher again.
    4895  *
    4896  * Segment bases and protected mode structures must be 32-bit addressable
    4897  * because the  32-bit switcher will ignore high dword when writing these VMCS
    4898  * fields.  See @bugref{8432} for details.
    4899  *
    4900  * @returns true if safe, false if must continue to use the 64-bit switcher.
    4901  * @param   pMixedCtx   Pointer to the guest-CPU context. The data may be
    4902  *                      out-of-sync. Make sure to update the required fields
    4903  *                      before using them.
    4904  *
    4905  * @remarks No-long-jump zone!!!
    4906  */
    4907 static bool hmR0VmxIs32BitSwitcherSafe(PCPUMCTX pMixedCtx)
    4908 {
    4909     if (pMixedCtx->gdtr.pGdt    & UINT64_C(0xffffffff00000000))
    4910         return false;
    4911     if (pMixedCtx->idtr.pIdt    & UINT64_C(0xffffffff00000000))
    4912         return false;
    4913     if (pMixedCtx->ldtr.u64Base & UINT64_C(0xffffffff00000000))
    4914         return false;
    4915     if (pMixedCtx->tr.u64Base   & UINT64_C(0xffffffff00000000))
    4916         return false;
    4917     if (pMixedCtx->es.u64Base   & UINT64_C(0xffffffff00000000))
    4918         return false;
    4919     if (pMixedCtx->cs.u64Base   & UINT64_C(0xffffffff00000000))
    4920         return false;
    4921     if (pMixedCtx->ss.u64Base   & UINT64_C(0xffffffff00000000))
    4922         return false;
    4923     if (pMixedCtx->ds.u64Base   & UINT64_C(0xffffffff00000000))
    4924         return false;
    4925     if (pMixedCtx->fs.u64Base   & UINT64_C(0xffffffff00000000))
    4926         return false;
    4927     if (pMixedCtx->gs.u64Base   & UINT64_C(0xffffffff00000000))
    4928         return false;
    4929     /* All good, bases are 32-bit. */
    4930     return true;
    4931 }
    4932 #endif
    4933 
    4934 
    4935 /**
    4936  * Sets up the appropriate function to run guest code.
    4937  *
    4938  * @returns VBox status code.
    4939  * @param   pVCpu       The cross context virtual CPU structure.
    4940  * @param   pMixedCtx   Pointer to the guest-CPU context. The data may be
    4941  *                      out-of-sync. Make sure to update the required fields
    4942  *                      before using them.
    4943  *
    4944  * @remarks No-long-jump zone!!!
    4945  */
    4946 static int hmR0VmxSetupVMRunHandler(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
     4794static int hmR0VmxSelectVMRunHandler(PVMCPU pVCpu, PCCPUMCTX pMixedCtx)
    49474795{
    49484796    if (CPUMIsGuestInLongModeEx(pMixedCtx))
     
    49564804        if (pVCpu->hm.s.vmx.pfnStartVM != VMXR0SwitcherStartVM64)
    49574805        {
     4806#ifdef VBOX_STRICT
    49584807            if (pVCpu->hm.s.vmx.pfnStartVM != NULL) /* Very first entry would have saved host-state already, ignore it. */
    49594808            {
    49604809                /* Currently, all mode changes sends us back to ring-3, so these should be set. See @bugref{6944}. */
    4961                 AssertMsg(HMCPU_CF_IS_SET(pVCpu,   HM_CHANGED_VMX_EXIT_CTLS
    4962                                                  | HM_CHANGED_VMX_ENTRY_CTLS
    4963                                                  | HM_CHANGED_GUEST_EFER_MSR), ("flags=%#x\n", HMCPU_CF_VALUE(pVCpu)));
     4810                uint64_t const fCtxChanged = ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged);
     4811                AssertMsg(fCtxChanged & (  HM_CHANGED_VMX_EXIT_CTLS
     4812                                         | HM_CHANGED_VMX_ENTRY_CTLS
     4813                                         | HM_CHANGED_GUEST_EFER_MSR), ("fCtxChanged=%#RX64\n", fCtxChanged));
    49644814            }
     4815#endif
    49654816            pVCpu->hm.s.vmx.pfnStartVM = VMXR0SwitcherStartVM64;
    49664817
     
    49684819               the rest of the VM run (until VM reset). See @bugref{8432#c7}. */
    49694820            pVCpu->hm.s.vmx.fSwitchedTo64on32 = true;
    4970             Log4(("Load[%RU32]: hmR0VmxSetupVMRunHandler: selected 64-bit switcher\n", pVCpu->idCpu));
     4821            Log4Func(("Selected 64-bit switcher\n"));
    49714822        }
    49724823#else
     
    49834834            &&  pVCpu->hm.s.vmx.pfnStartVM != NULL) /* Very first entry would have saved host-state already, ignore it. */
    49844835        {
     4836# ifdef VBOX_STRICT
    49854837            /* Currently, all mode changes sends us back to ring-3, so these should be set. See @bugref{6944}. */
    4986             AssertMsg(HMCPU_CF_IS_SET(pVCpu,   HM_CHANGED_VMX_EXIT_CTLS
    4987                                              | HM_CHANGED_VMX_ENTRY_CTLS
    4988                                              | HM_CHANGED_GUEST_EFER_MSR), ("flags=%#x\n", HMCPU_CF_VALUE(pVCpu)));
     4838            uint64_t const fCtxChanged = ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged);
     4839            AssertMsg(fCtxChanged & (  HM_CHANGED_VMX_EXIT_CTLS
     4840                                     | HM_CHANGED_VMX_ENTRY_CTLS
     4841                                     | HM_CHANGED_GUEST_EFER_MSR), ("fCtxChanged=%#RX64\n", fCtxChanged));
     4842# endif
    49894843        }
    49904844# ifdef VBOX_ENABLE_64_BITS_GUESTS
    49914845        /*
    4992          * Keep using the 64-bit switcher even though we're in 32-bit because of bad Intel design, see @bugref{8432#c7}.
    4993          * If real-on-v86 mode is active, clear the 64-bit switcher flag because now we know the guest is in a sane
    4994          * state where it's safe to use the 32-bit switcher. Otherwise check the guest state if it's safe to use
     4846         * Keep using the 64-bit switcher even though we're in 32-bit because of bad Intel
     4847         * design, see @bugref{8432#c7}. If real-on-v86 mode is active, clear the 64-bit
     4848         * switcher flag because now we know the guest is in a sane state where it's safe
     4849         * to use the 32-bit switcher. Otherwise check the guest state if it's safe to use
    49954850         * the much faster 32-bit switcher again.
    49964851         */
     
    49984853        {
    49994854            if (pVCpu->hm.s.vmx.pfnStartVM != VMXR0StartVM32)
    5000                 Log4(("Load[%RU32]: hmR0VmxSetupVMRunHandler: selected 32-bit switcher\n", pVCpu->idCpu));
     4855                Log4Func(("Selected 32-bit switcher\n"));
    50014856            pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM32;
    50024857        }
     
    50094864                pVCpu->hm.s.vmx.fSwitchedTo64on32 = false;
    50104865                pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM32;
    5011                 HMCPU_CF_SET(pVCpu,   HM_CHANGED_GUEST_EFER_MSR
    5012                                     | HM_CHANGED_VMX_ENTRY_CTLS
    5013                                     | HM_CHANGED_VMX_EXIT_CTLS
    5014                                     | HM_CHANGED_HOST_CONTEXT);
    5015                 Log4(("Load[%RU32]: hmR0VmxSetupVMRunHandler: selected 32-bit switcher (safe)\n", pVCpu->idCpu));
     4866                ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged,   HM_CHANGED_GUEST_EFER_MSR
     4867                                                           | HM_CHANGED_VMX_ENTRY_CTLS
     4868                                                           | HM_CHANGED_VMX_EXIT_CTLS
     4869                                                           | HM_CHANGED_HOST_CONTEXT);
     4870                Log4Func(("Selected 32-bit switcher (safe)\n"));
    50164871            }
    50174872        }
     
    50404895DECLINLINE(int) hmR0VmxRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
    50414896{
     4897    /* Mark that HM is the keeper of all guest-CPU registers now that we're going to execute guest code. */
     4898    pCtx->fExtrn |= HMVMX_CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_KEEPER_HM;
     4899
    50424900    /*
    5043      * 64-bit Windows uses XMM registers in the kernel as the Microsoft compiler expresses floating-point operations
    5044      * using SSE instructions. Some XMM registers (XMM6-XMM15) are callee-saved and thus the need for this XMM wrapper.
    5045      * Refer MSDN docs. "Configuring Programs for 64-bit / x64 Software Conventions / Register Usage" for details.
     4901     * 64-bit Windows uses XMM registers in the kernel as the Microsoft compiler expresses
     4902     * floating-point operations using SSE instructions. Some XMM registers (XMM6-XMM15) are
     4903     * callee-saved and thus the need for this XMM wrapper.
     4904     *
     4905     * See MSDN "Configuring Programs for 64-bit/x64 Software Conventions / Register Usage".
    50464906     */
    50474907    bool const fResumeVM = RT_BOOL(pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_LAUNCHED);
     
    50754935    HMVMX_ASSERT_PREEMPT_SAFE();
    50764936
    5077     Log4(("VM-entry failure: %Rrc\n", rcVMRun));
     4937    Log4Func(("VM-entry failure: %Rrc\n", rcVMRun));
    50784938    switch (rcVMRun)
    50794939    {
     
    53225182 *
    53235183 * @returns VBox status code (no informational status codes).
    5324  * @param   pVM         The cross context VM structure.
    53255184 * @param   pVCpu       The cross context virtual CPU structure.
    5326  * @param   pCtx        Pointer to the guest CPU context.
    53275185 * @param   enmOp       The operation to perform.
    53285186 * @param   cParams     Number of parameters.
    53295187 * @param   paParam     Array of 32-bit parameters.
    53305188 */
    5331 VMMR0DECL(int) VMXR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, HM64ON32OP enmOp,
     5189VMMR0DECL(int) VMXR0Execute64BitsHandler(PVMCPU pVCpu, HM64ON32OP enmOp,
    53325190                                         uint32_t cParams, uint32_t *paParam)
    53335191{
    5334     NOREF(pCtx);
    5335 
     5192    PVM pVM = pVCpu->CTX_SUFF(pVM);
    53365193    AssertReturn(pVM->hm.s.pfnHost32ToGuest64R0, VERR_HM_NO_32_TO_64_SWITCHER);
    53375194    Assert(enmOp > HM64ON32OP_INVALID && enmOp < HM64ON32OP_END);
     
    54535310    *(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 1;
    54545311#endif
    5455     int rc = VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, HM64ON32OP_VMXRCStartVM64, RT_ELEMENTS(aParam), &aParam[0]);
     5312    int rc = VMXR0Execute64BitsHandler(pVCpu, HM64ON32OP_VMXRCStartVM64, RT_ELEMENTS(aParam), &aParam[0]);
    54565313
    54575314#ifdef VBOX_WITH_CRASHDUMP_MAGIC
     
    55375394    VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SYSENTER_EIP);
    55385395
    5539     /* 64-bit guest-state fields; unused as we use two 32-bit VMREADs for these 64-bit fields (using "FULL" and "HIGH" fields). */
     5396    /* 64-bit guest-state fields; unused as we use two 32-bit VMREADs for
     5397       these 64-bit fields (using "FULL" and "HIGH" fields). */
    55405398#if 0
    55415399    VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL);
     
    57745632
    57755633
    5776 #ifdef HMVMX_USE_IEM_EVENT_REFLECTION
    57775634/**
    57785635 * Gets the IEM exception flags for the specified vector and IDT vectoring /
     
    58325689}
    58335690
    5834 #else
    5835 /**
    5836  * Determines if an exception is a contributory exception.
    5837  *
    5838  * Contributory exceptions are ones which can cause double-faults unless the
    5839  * original exception was a benign exception. Page-fault is intentionally not
    5840  * included here as it's a conditional contributory exception.
    5841  *
    5842  * @returns true if the exception is contributory, false otherwise.
    5843  * @param   uVector     The exception vector.
    5844  */
    5845 DECLINLINE(bool) hmR0VmxIsContributoryXcpt(const uint32_t uVector)
    5846 {
    5847     switch (uVector)
    5848     {
    5849         case X86_XCPT_GP:
    5850         case X86_XCPT_SS:
    5851         case X86_XCPT_NP:
    5852         case X86_XCPT_TS:
    5853         case X86_XCPT_DE:
    5854             return true;
    5855         default:
    5856             break;
    5857     }
    5858     return false;
    5859 }
    5860 #endif /* HMVMX_USE_IEM_EVENT_REFLECTION */
    5861 
    58625691
    58635692/**
     
    59305759    uint32_t const uExitVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVmxTransient->uExitIntInfo);
    59315760
    5932     int rc2 = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);       AssertRCReturn(rc2, rc2);
    5933     rc2     = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);            AssertRCReturn(rc2, rc2);
     5761    int rc2 = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
     5762    rc2    |= hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
     5763    AssertRCReturn(rc2, rc2);
    59345764
    59355765    VBOXSTRICTRC rcStrict = VINF_SUCCESS;
     
    59385768        uint32_t const uIdtVectorType = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo);
    59395769        uint32_t const uIdtVector     = VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo);
    5940 #ifdef HMVMX_USE_IEM_EVENT_REFLECTION
     5770
    59415771        /*
    5942          * If the event was a software interrupt (generated with INT n) or a software exception (generated
    5943          * by INT3/INTO) or a privileged software exception (generated by INT1), we can handle the VM-exit
    5944          * and continue guest execution which will re-execute the instruction rather than re-injecting the
    5945          * exception, as that can cause premature trips to ring-3 before injection and involve TRPM which
    5946          * currently has no way of storing that these exceptions were caused by these instructions
    5947          * (ICEBP's #DB poses the problem).
     5772         * If the event was a software interrupt (generated with INT n) or a software exception
     5773         * (generated by INT3/INTO) or a privileged software exception (generated by INT1), we
     5774         * can handle the VM-exit and continue guest execution which will re-execute the
     5775         * instruction rather than re-injecting the exception, as that can cause premature
     5776         * trips to ring-3 before injection and involve TRPM which currently has no way of
     5777         * storing that these exceptions were caused by these instructions (ICEBP's #DB poses
     5778         * the problem).
    59485779         */
    59495780        IEMXCPTRAISE     enmRaise;
     
    59655796                            ("hmR0VmxCheckExitDueToEventDelivery: Unexpected VM-exit interruption info. %#x!\n",
    59665797                             uExitVectorType), VERR_VMX_IPE_5);
     5798
    59675799            enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fIdtVectorFlags, uIdtVector, fExitVectorFlags, uExitVector, &fRaiseInfo);
    59685800
     
    60095841            case IEMXCPTRAISE_CURRENT_XCPT:
    60105842            {
    6011                 Log4(("IDT: vcpu[%RU32] Pending secondary xcpt: uIdtVectoringInfo=%#RX64 uExitIntInfo=%#RX64\n", pVCpu->idCpu,
    6012                       pVmxTransient->uIdtVectoringInfo, pVmxTransient->uExitIntInfo));
     5843                Log4Func(("IDT: Pending secondary Xcpt: uIdtVectoringInfo=%#RX64 uExitIntInfo=%#RX64\n",
     5844                          pVmxTransient->uIdtVectoringInfo, pVmxTransient->uExitIntInfo));
    60135845                Assert(rcStrict == VINF_SUCCESS);
    60145846                break;
     
    60325864                                       0 /* cbInstr */, u32ErrCode, pMixedCtx->cr2);
    60335865
    6034                 Log4(("IDT: vcpu[%RU32] Pending vectoring event %#RX64 Err=%#RX32\n", pVCpu->idCpu, pVCpu->hm.s.Event.u64IntInfo,
    6035                       pVCpu->hm.s.Event.u32ErrCode));
     5866                Log4Func(("IDT: Pending vectoring event %#RX64 Err=%#RX32\n", pVCpu->hm.s.Event.u64IntInfo,
     5867                          pVCpu->hm.s.Event.u32ErrCode));
    60365868                Assert(rcStrict == VINF_SUCCESS);
    60375869                break;
     
    60515883                {
    60525884                    pVmxTransient->fVectoringDoublePF = true;
    6053                     Log4(("IDT: vcpu[%RU32] Vectoring double #PF %#RX64 cr2=%#RX64\n", pVCpu->idCpu, pVCpu->hm.s.Event.u64IntInfo,
     5885                    Log4Func(("IDT: Vectoring double #PF %#RX64 cr2=%#RX64\n", pVCpu->hm.s.Event.u64IntInfo,
    60545886                          pMixedCtx->cr2));
    60555887                    rcStrict = VINF_SUCCESS;
     
    60595891                    STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingReflect);
    60605892                    hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx);
    6061                     Log4(("IDT: vcpu[%RU32] Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", pVCpu->idCpu,
    6062                           pVCpu->hm.s.Event.u64IntInfo, uIdtVector, uExitVector));
     5893                    Log4Func(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", pVCpu->hm.s.Event.u64IntInfo,
     5894                              uIdtVector, uExitVector));
    60635895                    rcStrict = VINF_HM_DOUBLE_FAULT;
    60645896                }
     
    60685900            case IEMXCPTRAISE_TRIPLE_FAULT:
    60695901            {
    6070                 Log4(("IDT: vcpu[%RU32] Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", pVCpu->idCpu, uIdtVector,
    6071                       uExitVector));
     5902                Log4Func(("IDT: Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", uIdtVector, uExitVector));
    60725903                rcStrict = VINF_EM_RESET;
    60735904                break;
     
    60765907            case IEMXCPTRAISE_CPU_HANG:
    60775908            {
    6078                 Log4(("IDT: vcpu[%RU32] Bad guest! Entering CPU hang. fRaiseInfo=%#x\n", pVCpu->idCpu, fRaiseInfo));
     5909                Log4Func(("IDT: Bad guest! Entering CPU hang. fRaiseInfo=%#x\n", fRaiseInfo));
    60795910                rcStrict = VERR_EM_GUEST_CPU_HANG;
    60805911                break;
     
    60885919            }
    60895920        }
    6090 #else
    6091         typedef enum
    6092         {
    6093             VMXREFLECTXCPT_XCPT,    /* Reflect the exception to the guest or for further evaluation by VMM. */
    6094             VMXREFLECTXCPT_DF,      /* Reflect the exception as a double-fault to the guest. */
    6095             VMXREFLECTXCPT_TF,      /* Indicate a triple faulted state to the VMM. */
    6096             VMXREFLECTXCPT_HANG,    /* Indicate bad VM trying to deadlock the CPU. */
    6097             VMXREFLECTXCPT_NONE     /* Nothing to reflect. */
    6098         } VMXREFLECTXCPT;
    6099 
    6100         /* See Intel spec. 30.7.1.1 "Reflecting Exceptions to Guest Software". */
    6101         VMXREFLECTXCPT enmReflect = VMXREFLECTXCPT_NONE;
    6102         if (VMX_EXIT_INTERRUPTION_INFO_IS_VALID(pVmxTransient->uExitIntInfo))
    6103         {
    6104             if (uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT)
    6105             {
    6106                 enmReflect = VMXREFLECTXCPT_XCPT;
    6107 #ifdef VBOX_STRICT
    6108                 if (   hmR0VmxIsContributoryXcpt(uIdtVector)
    6109                     && uExitVector == X86_XCPT_PF)
    6110                 {
    6111                     Log4(("IDT: vcpu[%RU32] Contributory #PF uCR2=%#RX64\n", pVCpu->idCpu, pMixedCtx->cr2));
    6112                 }
    6113 #endif
    6114                 if (   uExitVector == X86_XCPT_PF
    6115                     && uIdtVector == X86_XCPT_PF)
    6116                 {
    6117                     pVmxTransient->fVectoringDoublePF = true;
    6118                     Log4(("IDT: vcpu[%RU32] Vectoring Double #PF uCR2=%#RX64\n", pVCpu->idCpu, pMixedCtx->cr2));
    6119                 }
    6120                 else if (   uExitVector == X86_XCPT_AC
    6121                          && uIdtVector == X86_XCPT_AC)
    6122                 {
    6123                     enmReflect = VMXREFLECTXCPT_HANG;
    6124                     Log4(("IDT: Nested #AC - Bad guest\n"));
    6125                 }
    6126                 else if (   (pVCpu->hm.s.vmx.u32XcptBitmap & HMVMX_CONTRIBUTORY_XCPT_MASK)
    6127                          && hmR0VmxIsContributoryXcpt(uExitVector)
    6128                          && (   hmR0VmxIsContributoryXcpt(uIdtVector)
    6129                              || uIdtVector == X86_XCPT_PF))
    6130                 {
    6131                     enmReflect = VMXREFLECTXCPT_DF;
    6132                 }
    6133                 else if (uIdtVector == X86_XCPT_DF)
    6134                     enmReflect = VMXREFLECTXCPT_TF;
    6135             }
    6136             else if (   uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT
    6137                      || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI)
    6138             {
    6139                 /*
    6140                  * Ignore software interrupts (INT n), software exceptions (#BP, #OF) and
    6141                  * privileged software exception (#DB from ICEBP) as they reoccur when restarting the instruction.
    6142                  */
    6143                 enmReflect = VMXREFLECTXCPT_XCPT;
    6144 
    6145                 if (uExitVector == X86_XCPT_PF)
    6146                 {
    6147                     pVmxTransient->fVectoringPF = true;
    6148                     Log4(("IDT: vcpu[%RU32] Vectoring #PF due to Ext-Int/NMI. uCR2=%#RX64\n", pVCpu->idCpu, pMixedCtx->cr2));
    6149                 }
    6150             }
    6151         }
    6152         else if (   uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
    6153                  || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT
    6154                  || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI)
    6155         {
    6156             /*
    6157              * If event delivery caused an EPT violation/misconfig or APIC access VM-exit, then the VM-exit
    6158              * interruption-information will not be valid as it's not an exception and we end up here. In such cases,
    6159              * it is sufficient to reflect the original exception to the guest after handling the VM-exit.
    6160              */
    6161             enmReflect = VMXREFLECTXCPT_XCPT;
    6162         }
    6163 
    6164         /*
    6165          * On CPUs that support Virtual NMIs, if this VM-exit (be it an exception or EPT violation/misconfig etc.) occurred
    6166          * while delivering the NMI, we need to clear the block-by-NMI field in the guest interruptibility-state before
    6167          * re-delivering the NMI after handling the VM-exit. Otherwise the subsequent VM-entry would fail.
    6168          *
    6169          * See Intel spec. 30.7.1.2 "Resuming Guest Software after Handling an Exception". See @bugref{7445}.
    6170          */
    6171         if (   uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
    6172             && enmReflect == VMXREFLECTXCPT_XCPT
    6173             && (pVCpu->hm.s.vmx.u32PinCtls & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI)
    6174             && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
    6175         {
    6176             VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
    6177         }
    6178 
    6179         switch (enmReflect)
    6180         {
    6181             case VMXREFLECTXCPT_XCPT:
    6182             {
    6183                 Assert(   uIdtVectorType != VMX_IDT_VECTORING_INFO_TYPE_SW_INT
    6184                        && uIdtVectorType != VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
    6185                        && uIdtVectorType != VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT);
    6186 
    6187                 uint32_t u32ErrCode = 0;
    6188                 if (VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(pVmxTransient->uIdtVectoringInfo))
    6189                 {
    6190                     rc2 = hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);
    6191                     AssertRCReturn(rc2, rc2);
    6192                     u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
    6193                 }
    6194 
    6195                 /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF. See hmR0VmxExitXcptPF(). */
    6196                 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingReflect);
    6197                 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
    6198                                        0 /* cbInstr */,  u32ErrCode, pMixedCtx->cr2);
    6199                 rcStrict = VINF_SUCCESS;
    6200                 Log4(("IDT: vcpu[%RU32] Pending vectoring event %#RX64 Err=%#RX32\n", pVCpu->idCpu,
    6201                       pVCpu->hm.s.Event.u64IntInfo, pVCpu->hm.s.Event.u32ErrCode));
    6202 
    6203                 break;
    6204             }
    6205 
    6206             case VMXREFLECTXCPT_DF:
    6207             {
    6208                 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingReflect);
    6209                 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx);
    6210                 rcStrict = VINF_HM_DOUBLE_FAULT;
    6211                 Log4(("IDT: vcpu[%RU32] Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", pVCpu->idCpu,
    6212                       pVCpu->hm.s.Event.u64IntInfo, uIdtVector, uExitVector));
    6213 
    6214                 break;
    6215             }
    6216 
    6217             case VMXREFLECTXCPT_TF:
    6218             {
    6219                 rcStrict = VINF_EM_RESET;
    6220                 Log4(("IDT: vcpu[%RU32] Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", pVCpu->idCpu, uIdtVector,
    6221                       uExitVector));
    6222                 break;
    6223             }
    6224 
    6225             case VMXREFLECTXCPT_HANG:
    6226             {
    6227                 rcStrict = VERR_EM_GUEST_CPU_HANG;
    6228                 break;
    6229             }
    6230 
    6231             default:
    6232                 Assert(rcStrict == VINF_SUCCESS);
    6233                 break;
    6234         }
    6235 #endif /* HMVMX_USE_IEM_EVENT_REFLECTION */
    62365921    }
    62375922    else if (   VMX_EXIT_INTERRUPTION_INFO_IS_VALID(pVmxTransient->uExitIntInfo)
     
    62475932        if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
    62485933        {
    6249             Log4(("hmR0VmxCheckExitDueToEventDelivery: vcpu[%RU32] Setting VMCPU_FF_BLOCK_NMIS. Valid=%RTbool uExitReason=%u\n",
    6250                   pVCpu->idCpu, VMX_EXIT_INTERRUPTION_INFO_IS_VALID(pVmxTransient->uExitIntInfo), pVmxTransient->uExitReason));
     5934            Log4Func(("Setting VMCPU_FF_BLOCK_NMIS. fValid=%RTbool uExitReason=%u\n",
     5935                      VMX_EXIT_INTERRUPTION_INFO_IS_VALID(pVmxTransient->uExitIntInfo), pVmxTransient->uExitReason));
    62515936            VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
    62525937        }
     
    62605945
    62615946/**
    6262  * Saves the guest's CR0 register from the VMCS into the guest-CPU context.
    6263  *
    6264  * @returns VBox status code.
    6265  * @param   pVCpu       The cross context virtual CPU structure.
    6266  * @param   pMixedCtx   Pointer to the guest-CPU context. The data maybe
    6267  *                      out-of-sync. Make sure to update the required fields
    6268  *                      before using them.
    6269  *
    6270  * @remarks No-long-jump zone!!!
    6271  */
    6272 static int hmR0VmxSaveGuestCR0(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    6273 {
    6274     NOREF(pMixedCtx);
    6275 
    6276     /*
    6277      * While in the middle of saving guest-CR0, we could get preempted and re-invoked from the preemption hook,
    6278      * see hmR0VmxLeave(). Safer to just make this code non-preemptible.
    6279      */
    6280     VMMRZCallRing3Disable(pVCpu);
    6281     HM_DISABLE_PREEMPT();
    6282 
    6283     if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR0))
    6284     {
    6285 #ifndef DEBUG_bird /** @todo this triggers running bs3-cpu-generated-1.img with --debug-command-line
    6286                     * and 'dbgc-init' containing:
    6287                     *     sxe "xcpt_de"
    6288                     *     sxe "xcpt_bp"
    6289                     *     sxi "xcpt_gp"
    6290                     *     sxi "xcpt_ss"
    6291                     *     sxi "xcpt_np"
    6292                     */
    6293         /** @todo r=ramshankar: Should be fixed after r119291. */
    6294         Assert(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0));
    6295 #endif
    6296         uint32_t uVal    = 0;
    6297         uint32_t uShadow = 0;
    6298         int rc  = VMXReadVmcs32(VMX_VMCS_GUEST_CR0,            &uVal);
    6299         rc     |= VMXReadVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uShadow);
    6300         AssertRCReturn(rc, rc);
    6301 
    6302         uVal = (uShadow & pVCpu->hm.s.vmx.u32CR0Mask) | (uVal & ~pVCpu->hm.s.vmx.u32CR0Mask);
    6303         CPUMSetGuestCR0(pVCpu, uVal);
    6304         HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR0);
    6305     }
    6306 
    6307     HM_RESTORE_PREEMPT();
    6308     VMMRZCallRing3Enable(pVCpu);
    6309     return VINF_SUCCESS;
    6310 }
    6311 
    6312 
    6313 /**
    6314  * Saves the guest's CR4 register from the VMCS into the guest-CPU context.
    6315  *
    6316  * @returns VBox status code.
    6317  * @param   pVCpu       The cross context virtual CPU structure.
    6318  * @param   pMixedCtx   Pointer to the guest-CPU context. The data maybe
    6319  *                      out-of-sync. Make sure to update the required fields
    6320  *                      before using them.
    6321  *
    6322  * @remarks No-long-jump zone!!!
    6323  */
    6324 static int hmR0VmxSaveGuestCR4(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    6325 {
    6326     NOREF(pMixedCtx);
    6327 
    6328     int rc = VINF_SUCCESS;
    6329     if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR4))
    6330     {
    6331         Assert(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR4));
    6332         uint32_t uVal    = 0;
    6333         uint32_t uShadow = 0;
    6334         rc  = VMXReadVmcs32(VMX_VMCS_GUEST_CR4,            &uVal);
    6335         rc |= VMXReadVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uShadow);
    6336         AssertRCReturn(rc, rc);
    6337 
    6338         uVal = (uShadow & pVCpu->hm.s.vmx.u32CR4Mask) | (uVal & ~pVCpu->hm.s.vmx.u32CR4Mask);
    6339         CPUMSetGuestCR4(pVCpu, uVal);
    6340         HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR4);
    6341     }
    6342     return rc;
    6343 }
    6344 
    6345 
    6346 /**
    6347  * Saves the guest's RIP register from the VMCS into the guest-CPU context.
    6348  *
    6349  * @returns VBox status code.
    6350  * @param   pVCpu       The cross context virtual CPU structure.
    6351  * @param   pMixedCtx   Pointer to the guest-CPU context. The data maybe
    6352  *                      out-of-sync. Make sure to update the required fields
    6353  *                      before using them.
    6354  *
    6355  * @remarks No-long-jump zone!!!
    6356  */
    6357 static int hmR0VmxSaveGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    6358 {
    6359     int rc = VINF_SUCCESS;
    6360     if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RIP))
    6361     {
    6362         Assert(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RIP));
    6363         uint64_t u64Val = 0;
    6364         rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RIP, &u64Val);
    6365         AssertRCReturn(rc, rc);
    6366 
    6367         pMixedCtx->rip = u64Val;
    6368         HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RIP);
    6369     }
    6370     return rc;
    6371 }
    6372 
    6373 
    6374 /**
    6375  * Saves the guest's RSP register from the VMCS into the guest-CPU context.
    6376  *
    6377  * @returns VBox status code.
    6378  * @param   pVCpu       The cross context virtual CPU structure.
    6379  * @param   pMixedCtx   Pointer to the guest-CPU context. The data maybe
    6380  *                      out-of-sync. Make sure to update the required fields
    6381  *                      before using them.
    6382  *
    6383  * @remarks No-long-jump zone!!!
    6384  */
    6385 static int hmR0VmxSaveGuestRsp(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    6386 {
    6387     int rc = VINF_SUCCESS;
    6388     if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RSP))
    6389     {
    6390         Assert(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RSP));
    6391         uint64_t u64Val = 0;
    6392         rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RSP, &u64Val);
    6393         AssertRCReturn(rc, rc);
    6394 
    6395         pMixedCtx->rsp = u64Val;
    6396         HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RSP);
    6397     }
    6398     return rc;
    6399 }
    6400 
    6401 
    6402 /**
    6403  * Saves the guest's RFLAGS from the VMCS into the guest-CPU context.
    6404  *
    6405  * @returns VBox status code.
    6406  * @param   pVCpu       The cross context virtual CPU structure.
    6407  * @param   pMixedCtx   Pointer to the guest-CPU context. The data maybe
    6408  *                      out-of-sync. Make sure to update the required fields
    6409  *                      before using them.
    6410  *
    6411  * @remarks No-long-jump zone!!!
    6412  */
    6413 static int hmR0VmxSaveGuestRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    6414 {
    6415     if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RFLAGS))
    6416     {
    6417         Assert(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RFLAGS));
    6418         uint32_t uVal = 0;
    6419         int rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &uVal);
    6420         AssertRCReturn(rc, rc);
    6421 
    6422         pMixedCtx->eflags.u32 = uVal;
    6423         if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)        /* Undo our real-on-v86-mode changes to eflags if necessary. */
    6424         {
    6425             Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
    6426             Log4(("Saving real-mode EFLAGS VT-x view=%#RX32\n", pMixedCtx->eflags.u32));
    6427 
    6428             pMixedCtx->eflags.Bits.u1VM   = 0;
    6429             pMixedCtx->eflags.Bits.u2IOPL = pVCpu->hm.s.vmx.RealMode.Eflags.Bits.u2IOPL;
    6430         }
    6431 
    6432         HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RFLAGS);
    6433     }
    6434     return VINF_SUCCESS;
    6435 }
    6436 
    6437 
    6438 /**
    6439  * Wrapper for saving the guest's RIP, RSP and RFLAGS from the VMCS into the
    6440  * guest-CPU context.
    6441  */
    6442 DECLINLINE(int) hmR0VmxSaveGuestRipRspRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    6443 {
    6444     int rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
    6445     rc    |= hmR0VmxSaveGuestRsp(pVCpu, pMixedCtx);
    6446     rc    |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
    6447     return rc;
    6448 }
    6449 
    6450 
    6451 /**
    6452  * Saves the guest's interruptibility-state ("interrupt shadow" as AMD calls it)
    6453  * from the guest-state area in the VMCS.
    6454  *
    6455  * @param   pVCpu       The cross context virtual CPU structure.
    6456  * @param   pMixedCtx   Pointer to the guest-CPU context. The data maybe
    6457  *                      out-of-sync. Make sure to update the required fields
    6458  *                      before using them.
    6459  *
    6460  * @remarks No-long-jump zone!!!
    6461  */
    6462 static void hmR0VmxSaveGuestIntrState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    6463 {
    6464     if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_INTR_STATE))
    6465     {
    6466         uint32_t uIntrState = 0;
    6467         int rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &uIntrState);
    6468         AssertRC(rc);
    6469 
    6470         if (!uIntrState)
    6471         {
    6472             if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
    6473                 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
    6474 
    6475             if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
    6476                 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
    6477         }
    6478         else
    6479         {
    6480             if (uIntrState & (  VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS
    6481                               | VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI))
    6482             {
    6483                 rc  = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
    6484                 AssertRC(rc);
    6485                 rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);    /* for hmR0VmxGetGuestIntrState(). */
    6486                 AssertRC(rc);
    6487 
    6488                 EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip);
    6489                 Assert(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
    6490             }
    6491             else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
    6492                 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
    6493 
    6494             if (uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI)
    6495             {
    6496                 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
    6497                     VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
    6498             }
    6499             else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
    6500                 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
    6501         }
    6502 
    6503         HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_INTR_STATE);
    6504     }
    6505 }
    6506 
    6507 
    6508 /**
    6509  * Saves the guest's activity state.
    6510  *
    6511  * @returns VBox status code.
    6512  * @param   pVCpu       The cross context virtual CPU structure.
    6513  * @param   pMixedCtx   Pointer to the guest-CPU context. The data maybe
    6514  *                      out-of-sync. Make sure to update the required fields
    6515  *                      before using them.
    6516  *
    6517  * @remarks No-long-jump zone!!!
    6518  */
    6519 static int hmR0VmxSaveGuestActivityState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    6520 {
    6521     NOREF(pMixedCtx);
    6522     /* Nothing to do for now until we make use of different guest-CPU activity state. Just update the flag. */
    6523     HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_ACTIVITY_STATE);
    6524     return VINF_SUCCESS;
    6525 }
    6526 
    6527 
    6528 /**
    6529  * Saves the guest SYSENTER MSRs (SYSENTER_CS, SYSENTER_EIP, SYSENTER_ESP) from
    6530  * the current VMCS into the guest-CPU context.
    6531  *
    6532  * @returns VBox status code.
    6533  * @param   pVCpu       The cross context virtual CPU structure.
    6534  * @param   pMixedCtx   Pointer to the guest-CPU context. The data maybe
    6535  *                      out-of-sync. Make sure to update the required fields
    6536  *                      before using them.
    6537  *
    6538  * @remarks No-long-jump zone!!!
    6539  */
    6540 static int hmR0VmxSaveGuestSysenterMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    6541 {
    6542     if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR))
    6543     {
    6544         Assert(!HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR));
    6545         uint32_t u32Val = 0;
    6546         int rc = VMXReadVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val);     AssertRCReturn(rc, rc);
    6547         pMixedCtx->SysEnter.cs = u32Val;
    6548         HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR);
    6549     }
    6550 
    6551     if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR))
    6552     {
    6553         Assert(!HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR));
    6554         uint64_t u64Val = 0;
    6555         int rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, &u64Val);    AssertRCReturn(rc, rc);
    6556         pMixedCtx->SysEnter.eip = u64Val;
    6557         HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR);
    6558     }
    6559     if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR))
    6560     {
    6561         Assert(!HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR));
    6562         uint64_t u64Val = 0;
    6563         int rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, &u64Val);    AssertRCReturn(rc, rc);
    6564         pMixedCtx->SysEnter.esp = u64Val;
    6565         HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR);
    6566     }
    6567     return VINF_SUCCESS;
    6568 }
    6569 
    6570 
    6571 /**
    6572  * Saves the set of guest MSRs (that we restore lazily while leaving VT-x) from
    6573  * the CPU back into the guest-CPU context.
    6574  *
    6575  * @returns VBox status code.
    6576  * @param   pVCpu       The cross context virtual CPU structure.
    6577  * @param   pMixedCtx   Pointer to the guest-CPU context. The data maybe
    6578  *                      out-of-sync. Make sure to update the required fields
    6579  *                      before using them.
    6580  *
    6581  * @remarks No-long-jump zone!!!
    6582  */
    6583 static int hmR0VmxSaveGuestLazyMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    6584 {
    6585     /* Since this can be called from our preemption hook it's safer to make the guest-MSRs update non-preemptible. */
    6586     VMMRZCallRing3Disable(pVCpu);
    6587     HM_DISABLE_PREEMPT();
    6588 
    6589     /* Doing the check here ensures we don't overwrite already-saved guest MSRs from a preemption hook. */
    6590     if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS))
    6591     {
    6592         Assert(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMM_GUEST_LAZY_MSRS));
    6593         hmR0VmxLazySaveGuestMsrs(pVCpu, pMixedCtx);
    6594         HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS);
    6595     }
    6596 
    6597     HM_RESTORE_PREEMPT();
    6598     VMMRZCallRing3Enable(pVCpu);
    6599 
    6600     return VINF_SUCCESS;
    6601 }
    6602 
    6603 
    6604 /**
    6605  * Saves the auto load/store'd guest MSRs from the current VMCS into
     5947 * Imports a guest segment register from the current VMCS into
    66065948 * the guest-CPU context.
    6607  *
    6608  * @returns VBox status code.
    6609  * @param   pVCpu       The cross context virtual CPU structure.
    6610  * @param   pMixedCtx   Pointer to the guest-CPU context. The data maybe
    6611  *                      out-of-sync. Make sure to update the required fields
    6612  *                      before using them.
    6613  *
    6614  * @remarks No-long-jump zone!!!
    6615  */
    6616 static int hmR0VmxSaveGuestAutoLoadStoreMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    6617 {
    6618     if (HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS))
    6619         return VINF_SUCCESS;
    6620 
    6621     Assert(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS));
    6622     PVMXAUTOMSR pMsr  = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
    6623     uint32_t    cMsrs = pVCpu->hm.s.vmx.cMsrs;
    6624     Log4(("hmR0VmxSaveGuestAutoLoadStoreMsrs: cMsrs=%u\n", cMsrs));
    6625     for (uint32_t i = 0; i < cMsrs; i++, pMsr++)
    6626     {
    6627         switch (pMsr->u32Msr)
    6628         {
    6629             case MSR_K8_TSC_AUX:        CPUMSetGuestTscAux(pVCpu, pMsr->u64Value);      break;
    6630             case MSR_K8_LSTAR:          pMixedCtx->msrLSTAR        = pMsr->u64Value;    break;
    6631             case MSR_K6_STAR:           pMixedCtx->msrSTAR         = pMsr->u64Value;    break;
    6632             case MSR_K8_SF_MASK:        pMixedCtx->msrSFMASK       = pMsr->u64Value;    break;
    6633             case MSR_K8_KERNEL_GS_BASE: pMixedCtx->msrKERNELGSBASE = pMsr->u64Value;    break;
    6634             case MSR_IA32_SPEC_CTRL:    CPUMSetGuestSpecCtrl(pVCpu, pMsr->u64Value);    break;
    6635             case MSR_K6_EFER: /* Nothing to do here since we intercept writes, see hmR0VmxLoadGuestMsrs(). */
    6636                 break;
    6637 
    6638             default:
    6639             {
    6640                 AssertMsgFailed(("Unexpected MSR in auto-load/store area. uMsr=%#RX32 cMsrs=%u\n", pMsr->u32Msr, cMsrs));
    6641                 pVCpu->hm.s.u32HMError = pMsr->u32Msr;
    6642                 return VERR_HM_UNEXPECTED_LD_ST_MSR;
    6643             }
    6644         }
    6645     }
    6646 
    6647     HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS);
    6648     return VINF_SUCCESS;
    6649 }
    6650 
    6651 
    6652 /**
    6653  * Saves the guest control registers from the current VMCS into the guest-CPU
    6654  * context.
    6655  *
    6656  * @returns VBox status code.
    6657  * @param   pVCpu       The cross context virtual CPU structure.
    6658  * @param   pMixedCtx   Pointer to the guest-CPU context. The data maybe
    6659  *                      out-of-sync. Make sure to update the required fields
    6660  *                      before using them.
    6661  *
    6662  * @remarks No-long-jump zone!!!
    6663  */
    6664 static int hmR0VmxSaveGuestControlRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    6665 {
    6666     /* Guest CR0. Guest FPU. */
    6667     int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
    6668     AssertRCReturn(rc, rc);
    6669 
    6670     /* Guest CR4. */
    6671     rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx);
    6672     AssertRCReturn(rc, rc);
    6673 
    6674     /* Guest CR2 - updated always during the world-switch or in #PF. */
    6675     /* Guest CR3. Only changes with Nested Paging. This must be done -after- saving CR0 and CR4 from the guest! */
    6676     if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR3))
    6677     {
    6678         Assert(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR3));
    6679         Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR0));
    6680         Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR4));
    6681 
    6682         PVM pVM = pVCpu->CTX_SUFF(pVM);
    6683         if (   pVM->hm.s.vmx.fUnrestrictedGuest
    6684             || (   pVM->hm.s.fNestedPaging
    6685                 && CPUMIsGuestPagingEnabledEx(pMixedCtx)))
    6686         {
    6687             uint64_t u64Val = 0;
    6688             rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_CR3, &u64Val);
    6689             if (pMixedCtx->cr3 != u64Val)
    6690             {
    6691                 CPUMSetGuestCR3(pVCpu, u64Val);
    6692                 if (VMMRZCallRing3IsEnabled(pVCpu))
    6693                 {
    6694                     PGMUpdateCR3(pVCpu, u64Val);
    6695                     Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
    6696                 }
    6697                 else
    6698                 {
    6699                     /* Set the force flag to inform PGM about it when necessary. It is cleared by PGMUpdateCR3().*/
    6700                     VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
    6701                 }
    6702             }
    6703 
    6704             /* If the guest is in PAE mode, sync back the PDPE's into the guest state. */
    6705             if (CPUMIsGuestInPAEModeEx(pMixedCtx))  /* Reads CR0, CR4 and EFER MSR (EFER is always up-to-date). */
    6706             {
    6707                 rc  = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, &pVCpu->hm.s.aPdpes[0].u);
    6708                 rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, &pVCpu->hm.s.aPdpes[1].u);
    6709                 rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, &pVCpu->hm.s.aPdpes[2].u);
    6710                 rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, &pVCpu->hm.s.aPdpes[3].u);
    6711                 AssertRCReturn(rc, rc);
    6712 
    6713                 if (VMMRZCallRing3IsEnabled(pVCpu))
    6714                 {
    6715                     PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
    6716                     Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
    6717                 }
    6718                 else
    6719                 {
    6720                     /* Set the force flag to inform PGM about it when necessary. It is cleared by PGMGstUpdatePaePdpes(). */
    6721                     VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
    6722                 }
    6723             }
    6724         }
    6725 
    6726         HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR3);
    6727     }
    6728 
    6729     /*
    6730      * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> hmR0VmxCallRing3Callback()
    6731      * -> VMMRZCallRing3Disable() -> hmR0VmxSaveGuestState() -> Set VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
    6732      * -> continue with VM-exit handling -> hmR0VmxSaveGuestControlRegs() and here we are.
    6733      *
    6734      * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
    6735      * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
    6736      * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
    6737      * -NOT- check if HMVMX_UPDATED_GUEST_CR3 is already set or not!
    6738      *
    6739      * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
    6740      */
    6741     if (VMMRZCallRing3IsEnabled(pVCpu))
    6742     {
    6743         if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
    6744             PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
    6745 
    6746         if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
    6747             PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
    6748 
    6749         Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
    6750         Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
    6751     }
    6752 
    6753     return rc;
    6754 }
    6755 
    6756 
    6757 /**
    6758  * Saves a guest segment register from the current VMCS into the guest-CPU
    6759  * context.
    67605949 *
    67615950 * @returns VBox status code.
     
    67685957 *
    67695958 * @remarks No-long-jump zone!!!
     5959 *
    67705960 * @remarks Never call this function directly!!! Use the
    6771  *          HMVMX_SAVE_SREG() macro as that takes care of whether to read
    6772  *          from the VMCS cache or not.
    6773  */
    6774 static int hmR0VmxSaveSegmentReg(PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase, uint32_t idxAccess,
    6775                                       PCPUMSELREG pSelReg)
     5961 *          HMVMX_IMPORT_SREG() macro as that takes care
     5962 *          of whether to read from the VMCS cache or not.
     5963 */
     5964static int hmR0VmxImportGuestSegmentReg(PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase, uint32_t idxAccess,
     5965                                        PCPUMSELREG pSelReg)
    67765966{
    67775967    NOREF(pVCpu);
    67785968
    6779     uint32_t u32Val = 0;
    6780     int rc = VMXReadVmcs32(idxSel, &u32Val);
     5969    uint32_t u32Sel;
     5970    uint32_t u32Limit;
     5971    uint32_t u32Attr;
     5972    uint64_t u64Base;
     5973    int rc = VMXReadVmcs32(idxSel, &u32Sel);
     5974    rc    |= VMXReadVmcs32(idxLimit, &u32Limit);
     5975    rc    |= VMXReadVmcs32(idxAccess, &u32Attr);
     5976    rc    |= VMXReadVmcsGstNByIdxVal(idxBase, &u64Base);
    67815977    AssertRCReturn(rc, rc);
    6782     pSelReg->Sel      = (uint16_t)u32Val;
    6783     pSelReg->ValidSel = (uint16_t)u32Val;
     5978
     5979    pSelReg->Sel      = (uint16_t)u32Sel;
     5980    pSelReg->ValidSel = (uint16_t)u32Sel;
    67845981    pSelReg->fFlags   = CPUMSELREG_FLAGS_VALID;
    6785 
    6786     rc = VMXReadVmcs32(idxLimit, &u32Val);
    6787     AssertRCReturn(rc, rc);
    6788     pSelReg->u32Limit = u32Val;
    6789 
    6790     uint64_t u64Val = 0;
    6791     rc = VMXReadVmcsGstNByIdxVal(idxBase, &u64Val);
    6792     AssertRCReturn(rc, rc);
    6793     pSelReg->u64Base = u64Val;
    6794 
    6795     rc = VMXReadVmcs32(idxAccess, &u32Val);
    6796     AssertRCReturn(rc, rc);
    6797     pSelReg->Attr.u = u32Val;
     5982    pSelReg->u32Limit = u32Limit;
     5983    pSelReg->u64Base  = u64Base;
     5984    pSelReg->Attr.u   = u32Attr;
    67985985
    67995986    /*
     
    68206007
    68216008        /* Masking off: X86DESCATTR_P, X86DESCATTR_LIMIT_HIGH, and X86DESCATTR_AVL. The latter two are really irrelevant. */
    6822         pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
    6823                          | X86DESCATTR_DPL | X86DESCATTR_TYPE | X86DESCATTR_DT;
    6824 
    6825         Log4(("hmR0VmxReadSegmentReg: Unusable idxSel=%#x attr=%#x -> %#x\n", idxSel, u32Val, pSelReg->Attr.u));
     6009        pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L    | X86DESCATTR_D | X86DESCATTR_G
     6010                         | X86DESCATTR_DPL      | X86DESCATTR_TYPE | X86DESCATTR_DT;
     6011
     6012        Log4Func(("Unusable idxSel=%#x attr=%#x -> %#x\n", idxSel, u32Sel, pSelReg->Attr.u));
    68266013#ifdef DEBUG_bird
    6827         AssertMsg((u32Val & ~X86DESCATTR_P) == pSelReg->Attr.u,
     6014        AssertMsg((u32Attr & ~X86DESCATTR_P) == pSelReg->Attr.u,
    68286015                  ("%#x: %#x != %#x (sel=%#x base=%#llx limit=%#x)\n",
    68296016                   idxSel, u32Val, pSelReg->Attr.u, pSelReg->Sel, pSelReg->u64Base, pSelReg->u32Limit));
     
    68336020}
    68346021
    6835 /**
    6836  * Saves the guest segment registers from the current VMCS into the guest-CPU
    6837  * context.
    6838  *
    6839  * @returns VBox status code.
    6840  * @param   pVCpu       The cross context virtual CPU structure.
    6841  * @param   pMixedCtx   Pointer to the guest-CPU context. The data maybe
    6842  *                      out-of-sync. Make sure to update the required fields
    6843  *                      before using them.
    6844  *
    6845  * @remarks No-long-jump zone!!!
    6846  */
    6847 static int hmR0VmxSaveGuestSegmentRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    6848 {
    6849     /* Guest segment registers. */
    6850     if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SEGMENT_REGS))
    6851     {
    6852         /** @todo r=ramshankar: Why do we save CR0 here? */
    6853         Assert(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS));
    6854         int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
    6855         AssertRCReturn(rc, rc);
    6856 
    6857         rc  = HMVMX_SAVE_SREG(CS, &pMixedCtx->cs);
    6858         rc |= HMVMX_SAVE_SREG(SS, &pMixedCtx->ss);
    6859         rc |= HMVMX_SAVE_SREG(DS, &pMixedCtx->ds);
    6860         rc |= HMVMX_SAVE_SREG(ES, &pMixedCtx->es);
    6861         rc |= HMVMX_SAVE_SREG(FS, &pMixedCtx->fs);
    6862         rc |= HMVMX_SAVE_SREG(GS, &pMixedCtx->gs);
    6863         AssertRCReturn(rc, rc);
    6864 
    6865         /* Restore segment attributes for real-on-v86 mode hack. */
    6866         if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
    6867         {
    6868             pMixedCtx->cs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrCS.u;
    6869             pMixedCtx->ss.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrSS.u;
    6870             pMixedCtx->ds.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrDS.u;
    6871             pMixedCtx->es.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrES.u;
    6872             pMixedCtx->fs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrFS.u;
    6873             pMixedCtx->gs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrGS.u;
    6874         }
    6875         HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SEGMENT_REGS);
    6876     }
    6877 
    6878     return VINF_SUCCESS;
    6879 }
    6880 
    6881 
    6882 /**
    6883  * Saves the guest SS register from the current VMCS into the guest-CPU context.
    6884  *
    6885  * @returns VBox status code.
    6886  * @param   pVCpu       The cross context virtual CPU structure.
    6887  * @remarks No-long-jump zone!!!
    6888  */
    6889 static int hmR0VmxSaveGuestCs(PVMCPU pVCpu)
    6890 {
    6891     /** @todo optimize this? */
    6892     return hmR0VmxSaveGuestSegmentRegs(pVCpu, &pVCpu->cpum.GstCtx);
    6893 }
    6894 
    6895 
    6896 /**
    6897  * Saves the guest descriptor table registers and task register from the current
    6898  * VMCS into the guest-CPU context.
    6899  *
    6900  * @returns VBox status code.
    6901  * @param   pVCpu       The cross context virtual CPU structure.
    6902  * @param   pMixedCtx   Pointer to the guest-CPU context. The data maybe
    6903  *                      out-of-sync. Make sure to update the required fields
    6904  *                      before using them.
    6905  *
    6906  * @remarks No-long-jump zone!!!
    6907  */
    6908 static int hmR0VmxSaveGuestTableRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    6909 {
    6910     int rc = VINF_SUCCESS;
    6911 
    6912     /* Guest LDTR. */
    6913     if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LDTR))
    6914     {
    6915         Assert(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_LDTR));
    6916         rc = HMVMX_SAVE_SREG(LDTR, &pMixedCtx->ldtr);
    6917         AssertRCReturn(rc, rc);
    6918         HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LDTR);
    6919     }
    6920 
    6921     /* Guest GDTR. */
    6922     uint64_t u64Val = 0;
    6923     uint32_t u32Val = 0;
    6924     if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_GDTR))
    6925     {
    6926         Assert(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_GDTR));
    6927         rc  = VMXReadVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, &u64Val);
    6928         rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);      AssertRCReturn(rc, rc);
    6929         pMixedCtx->gdtr.pGdt  = u64Val;
    6930         pMixedCtx->gdtr.cbGdt = u32Val;
    6931         HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_GDTR);
    6932     }
    6933 
    6934     /* Guest IDTR. */
    6935     if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_IDTR))
    6936     {
    6937         Assert(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_IDTR));
    6938         rc  = VMXReadVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, &u64Val);
    6939         rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);      AssertRCReturn(rc, rc);
    6940         pMixedCtx->idtr.pIdt  = u64Val;
    6941         pMixedCtx->idtr.cbIdt = u32Val;
    6942         HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_IDTR);
    6943     }
    6944 
    6945     /* Guest TR. */
    6946     if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_TR))
    6947     {
    6948         Assert(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_TR));
    6949         rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
    6950         AssertRCReturn(rc, rc);
    6951 
    6952         /* For real-mode emulation using virtual-8086 mode we have the fake TSS (pRealModeTSS) in TR, don't save the fake one. */
    6953         if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
    6954         {
    6955             rc = HMVMX_SAVE_SREG(TR, &pMixedCtx->tr);
    6956             AssertRCReturn(rc, rc);
    6957         }
    6958         HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_TR);
    6959     }
    6960     return rc;
    6961 }
    6962 
    6963 
    6964 /**
    6965  * Saves the guest debug-register DR7 from the current VMCS into the guest-CPU
    6966  * context.
    6967  *
    6968  * @returns VBox status code.
    6969  * @param   pVCpu       The cross context virtual CPU structure.
    6970  * @param   pMixedCtx   Pointer to the guest-CPU context. The data maybe
    6971  *                      out-of-sync. Make sure to update the required fields
    6972  *                      before using them.
    6973  *
    6974  * @remarks No-long-jump zone!!!
    6975  */
    6976 static int hmR0VmxSaveGuestDR7(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    6977 {
    6978     if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_DR7))
    6979     {
    6980         if (!pVCpu->hm.s.fUsingHyperDR7)
    6981         {
    6982             /* Upper 32-bits are always zero. See Intel spec. 2.7.3 "Loading and Storing Debug Registers". */
    6983             uint32_t u32Val;
    6984             int rc = VMXReadVmcs32(VMX_VMCS_GUEST_DR7, &u32Val);    AssertRCReturn(rc, rc);
    6985             pMixedCtx->dr[7] = u32Val;
    6986         }
    6987 
    6988         HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_DR7);
    6989     }
    6990     return VINF_SUCCESS;
    6991 }
    6992 
    6993 
    6994 /**
    6995  * Saves the guest APIC state from the current VMCS into the guest-CPU context.
    6996  *
    6997  * @returns VBox status code.
    6998  * @param   pVCpu       The cross context virtual CPU structure.
    6999  * @param   pMixedCtx   Pointer to the guest-CPU context. The data maybe
    7000  *                      out-of-sync. Make sure to update the required fields
    7001  *                      before using them.
    7002  *
    7003  * @remarks No-long-jump zone!!!
    7004  */
    7005 static int hmR0VmxSaveGuestApicState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    7006 {
    7007     NOREF(pMixedCtx);
    7008 
    7009     /* Updating TPR is already done in hmR0VmxPostRunGuest(). Just update the flag. */
    7010     HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_APIC_STATE);
    7011     return VINF_SUCCESS;
    7012 }
    7013 
    7014 
    7015 /**
    7016  * Worker for VMXR0ImportStateOnDemand.
     6022
     6023/**
     6024 * Imports the guest RIP from the VMCS back into the guest-CPU context.
    70176025 *
    70186026 * @returns VBox status code.
    70196027 * @param   pVCpu   The cross context virtual CPU structure.
    7020  * @param   pCtx    Pointer to the guest-CPU context.
     6028 *
     6029 * @remarks Called with interrupts and/or preemption disabled, should not assert!
     6030 * @remarks Do -not- call this function directly, use hmR0VmxImportGuestState()
     6031 *          instead!!!
     6032 */
     6033DECLINLINE(int) hmR0VmxImportGuestRip(PVMCPU pVCpu)
     6034{
     6035    uint64_t u64Val;
     6036    PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
     6037    if (pCtx->fExtrn & CPUMCTX_EXTRN_RIP)
     6038    {
     6039        int rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RIP, &u64Val);
     6040        if (RT_SUCCESS(rc))
     6041        {
     6042            pCtx->rip = u64Val;
     6043            pCtx->fExtrn &= ~CPUMCTX_EXTRN_RIP;
     6044        }
     6045        return rc;
     6046    }
     6047    return VINF_SUCCESS;
     6048}
     6049
     6050
     6051/**
     6052 * Imports the guest RFLAGS from the VMCS back into the guest-CPU context.
     6053 *
     6054 * @returns VBox status code.
     6055 * @param   pVCpu   The cross context virtual CPU structure.
     6056 *
     6057 * @remarks Called with interrupts and/or preemption disabled, should not assert!
     6058 * @remarks Do -not- call this function directly, use hmR0VmxImportGuestState()
     6059 *          instead!!!
     6060 */
     6061DECLINLINE(int) hmR0VmxImportGuestRFlags(PVMCPU pVCpu)
     6062{
     6063    uint32_t u32Val;
     6064    PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
     6065    if (pCtx->fExtrn & CPUMCTX_EXTRN_RFLAGS)
     6066    {
     6067        int rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &u32Val);
     6068        if (RT_SUCCESS(rc))
     6069        {
     6070            pCtx->eflags.u32 = u32Val;
     6071
     6072            /* Restore eflags for real-on-v86-mode hack. */
     6073            if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
     6074            {
     6075                pCtx->eflags.Bits.u1VM   = 0;
     6076                pCtx->eflags.Bits.u2IOPL = pVCpu->hm.s.vmx.RealMode.Eflags.Bits.u2IOPL;
     6077            }
     6078        }
     6079        pCtx->fExtrn &= ~CPUMCTX_EXTRN_RFLAGS;
     6080        return rc;
     6081    }
     6082    return VINF_SUCCESS;
     6083}
     6084
     6085
     6086/**
     6087 * Imports the guest interruptibility-state from the VMCS back into the guest-CPU
     6088 * context.
     6089 *
     6090 * @returns VBox status code.
     6091 * @param   pVCpu   The cross context virtual CPU structure.
     6092 *
     6093 * @remarks Called with interrupts and/or preemption disabled, should not assert!
     6094 * @remarks Do -not- call this function directly, use hmR0VmxImportGuestState()
     6095 *          instead!!!
     6096 */
     6097DECLINLINE(int) hmR0VmxImportGuestIntrState(PVMCPU pVCpu)
     6098{
     6099    uint32_t u32Val;
     6100    PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
     6101    int rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &u32Val);
     6102    if (RT_SUCCESS(rc))
     6103    {
     6104        /*
     6105         * We additionally have a requirement to import RIP, RFLAGS depending on whether we
     6106         * might need them in hmR0VmxEvaluatePendingEvent().
     6107         */
     6108        if (!u32Val)
     6109        {
     6110            if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
     6111            {
     6112                rc =  hmR0VmxImportGuestRip(pVCpu);
     6113                rc |= hmR0VmxImportGuestRFlags(pVCpu);
     6114                VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
     6115            }
     6116
     6117            if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
     6118                VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
     6119        }
     6120        else
     6121        {
     6122            rc =  hmR0VmxImportGuestRip(pVCpu);
     6123            rc |= hmR0VmxImportGuestRFlags(pVCpu);
     6124
     6125            if (u32Val & (  VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS
     6126                          | VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI))
     6127            {
     6128                EMSetInhibitInterruptsPC(pVCpu, pCtx->rip);
     6129            }
     6130            else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
     6131                VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
     6132
     6133            if (u32Val & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI)
     6134            {
     6135                if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
     6136                    VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
     6137            }
     6138            else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
     6139                VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
     6140        }
     6141    }
     6142    return rc;
     6143}
     6144
     6145
     6146/**
     6147 * Worker for VMXR0ImportStateOnDemand.
     6148 *
     6149 * @returns VBox status code.
     6150 * @param   pVCpu   The cross context virtual CPU structure.
    70216151 * @param   fWhat   What to import, CPUMCTX_EXTRN_XXX.
    70226152 */
    7023 static int hmR0VmxImportGuestState(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t fWhat)
    7024 {
    7025     int      rc = VINF_SUCCESS;
    7026     PVM      pVM = pVCpu->CTX_SUFF(pVM);
     6153static int hmR0VmxImportGuestState(PVMCPU pVCpu, uint64_t fWhat)
     6154{
     6155#define VMXLOCAL_BREAK_RC(a_rc) \
     6156    if (RT_FAILURE(a_rc))       \
     6157        break
     6158
     6159    STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatImportGuestState, x);
     6160
     6161    int      rc   = VINF_SUCCESS;
     6162    PVM      pVM  = pVCpu->CTX_SUFF(pVM);
     6163    PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    70276164    uint64_t u64Val;
    70286165    uint32_t u32Val;
    7029     uint32_t u32Shadow;
     6166
     6167    Log4Func(("fExtrn=%#RX64 fWhat=%#RX64\n", pCtx->fExtrn, fWhat));
    70306168
    70316169    /*
    7032      * Though we can longjmp to ring-3 due to log-flushes here and get re-invoked
    7033      * on the ring-3 callback path, there is no real need to.
     6170     * We disable interrupts to make the updating of the state and in particular
     6171     * the fExtrn modification atomic wrt to preemption hooks.
    70346172     */
    7035     if (VMMRZCallRing3IsEnabled(pVCpu))
    7036         VMMR0LogFlushDisable(pVCpu);
     6173    RTCCUINTREG const fEFlags = ASMIntDisableFlags();
     6174
     6175    fWhat &= pCtx->fExtrn;
     6176    if (fWhat & pCtx->fExtrn)
     6177    {
     6178        do
     6179        {
     6180            if (fWhat & CPUMCTX_EXTRN_RIP)
     6181            {
     6182                rc = hmR0VmxImportGuestRip(pVCpu);
     6183                VMXLOCAL_BREAK_RC(rc);
     6184            }
     6185
     6186            if (fWhat & CPUMCTX_EXTRN_RFLAGS)
     6187            {
     6188                rc = hmR0VmxImportGuestRFlags(pVCpu);
     6189                VMXLOCAL_BREAK_RC(rc);
     6190            }
     6191
     6192            if (fWhat & CPUMCTX_EXTRN_HM_VMX_INT_STATE)
     6193            {
     6194                rc = hmR0VmxImportGuestIntrState(pVCpu);
     6195                VMXLOCAL_BREAK_RC(rc);
     6196            }
     6197
     6198            if (fWhat & CPUMCTX_EXTRN_RSP)
     6199            {
     6200                rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RSP, &u64Val);
     6201                VMXLOCAL_BREAK_RC(rc);
     6202                pCtx->rsp = u64Val;
     6203            }
     6204
     6205            if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
     6206            {
     6207                if (fWhat & CPUMCTX_EXTRN_CS)
     6208                {
     6209                    rc = HMVMX_IMPORT_SREG(CS, &pCtx->cs);
     6210                    VMXLOCAL_BREAK_RC(rc);
     6211                    if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
     6212                        pCtx->cs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrCS.u;
     6213                }
     6214                if (fWhat & CPUMCTX_EXTRN_SS)
     6215                {
     6216                    rc = HMVMX_IMPORT_SREG(SS, &pCtx->ss);
     6217                    VMXLOCAL_BREAK_RC(rc);
     6218                    if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
     6219                        pCtx->ss.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrSS.u;
     6220                }
     6221                if (fWhat & CPUMCTX_EXTRN_DS)
     6222                {
     6223                    rc = HMVMX_IMPORT_SREG(DS, &pCtx->ds);
     6224                    VMXLOCAL_BREAK_RC(rc);
     6225                    if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
     6226                        pCtx->ds.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrDS.u;
     6227                }
     6228                if (fWhat & CPUMCTX_EXTRN_ES)
     6229                {
     6230                    rc = HMVMX_IMPORT_SREG(ES, &pCtx->es);
     6231                    VMXLOCAL_BREAK_RC(rc);
     6232                    if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
     6233                        pCtx->es.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrES.u;
     6234                }
     6235               if (fWhat & CPUMCTX_EXTRN_FS)
     6236               {
     6237                    rc = HMVMX_IMPORT_SREG(FS, &pCtx->fs);
     6238                    VMXLOCAL_BREAK_RC(rc);
     6239                    if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
     6240                        pCtx->fs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrFS.u;
     6241               }
     6242                if (fWhat & CPUMCTX_EXTRN_GS)
     6243                {
     6244                    rc = HMVMX_IMPORT_SREG(GS, &pCtx->gs);
     6245                    VMXLOCAL_BREAK_RC(rc);
     6246                    if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
     6247                        pCtx->gs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrGS.u;
     6248                }
     6249            }
     6250
     6251            if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
     6252            {
     6253                if (fWhat & CPUMCTX_EXTRN_LDTR)
     6254                {
     6255                    rc = HMVMX_IMPORT_SREG(LDTR, &pCtx->ldtr);
     6256                    VMXLOCAL_BREAK_RC(rc);
     6257                }
     6258
     6259                if (fWhat & CPUMCTX_EXTRN_GDTR)
     6260                {
     6261                    rc  = VMXReadVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE,  &u64Val);
     6262                    rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);
     6263                    VMXLOCAL_BREAK_RC(rc);
     6264                    pCtx->gdtr.pGdt  = u64Val;
     6265                    pCtx->gdtr.cbGdt = u32Val;
     6266                }
     6267
     6268                /* Guest IDTR. */
     6269                if (fWhat & CPUMCTX_EXTRN_IDTR)
     6270                {
     6271                    rc  = VMXReadVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE,  &u64Val);
     6272                    rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);
     6273                    VMXLOCAL_BREAK_RC(rc);
     6274                    pCtx->idtr.pIdt  = u64Val;
     6275                    pCtx->idtr.cbIdt = u32Val;
     6276                }
     6277
     6278                /* Guest TR. */
     6279                if (fWhat & CPUMCTX_EXTRN_TR)
     6280                {
     6281                    /* Real-mode emulation using virtual-8086 mode has the fake TSS (pRealModeTSS) in TR, don't save that one. */
     6282                    if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
     6283                    {
     6284                        rc = HMVMX_IMPORT_SREG(TR, &pCtx->tr);
     6285                        VMXLOCAL_BREAK_RC(rc);
     6286                    }
     6287                }
     6288            }
     6289
     6290            if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
     6291            {
     6292                rc  = VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, &pCtx->SysEnter.eip);
     6293                rc |= VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, &pCtx->SysEnter.esp);
     6294                rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS,  &u32Val);
     6295                pCtx->SysEnter.cs = u32Val;
     6296                VMXLOCAL_BREAK_RC(rc);
     6297            }
     6298
     6299#if HC_ARCH_BITS == 64
     6300            if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
     6301            {
     6302                if (   pVM->hm.s.fAllow64BitGuests
     6303                    && (pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
     6304                    pCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
     6305            }
     6306
     6307            if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
     6308            {
     6309                if (   pVM->hm.s.fAllow64BitGuests
     6310                    && (pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
     6311                {
     6312                    pCtx->msrLSTAR  = ASMRdMsr(MSR_K8_LSTAR);
     6313                    pCtx->msrSTAR   = ASMRdMsr(MSR_K6_STAR);
     6314                    pCtx->msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
     6315                }
     6316            }
     6317#endif
     6318
     6319            if (   (fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
     6320#if HC_ARCH_BITS == 32
     6321                || (fWhat & (CPUMCTX_EXTRN_KERNEL_GS_BASE | CPUMCTX_EXTRN_SYSCALL_MSRS))
     6322#endif
     6323                )
     6324            {
     6325                PCVMXAUTOMSR   pMsr  = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
     6326                uint32_t const cMsrs = pVCpu->hm.s.vmx.cMsrs;
     6327                for (uint32_t i = 0; i < cMsrs; i++, pMsr++)
     6328                {
     6329                    switch (pMsr->u32Msr)
     6330                    {
     6331#if HC_ARCH_BITS == 32
     6332                        case MSR_K8_LSTAR:          pCtx->msrLSTAR        = pMsr->u64Value;         break;
     6333                        case MSR_K6_STAR:           pCtx->msrSTAR         = pMsr->u64Value;         break;
     6334                        case MSR_K8_SF_MASK:        pCtx->msrSFMASK       = pMsr->u64Value;         break;
     6335                        case MSR_K8_KERNEL_GS_BASE: pCtx->msrKERNELGSBASE = pMsr->u64Value;         break;
     6336#endif
     6337                        case MSR_IA32_SPEC_CTRL:    CPUMSetGuestSpecCtrl(pVCpu, pMsr->u64Value);    break;
     6338                        case MSR_K8_TSC_AUX:        CPUMSetGuestTscAux(pVCpu, pMsr->u64Value);      break;
     6339                        default:
     6340                        {
     6341                            AssertMsgFailed(("Unexpected MSR in auto-load/store area. uMsr=%#RX32 cMsrs=%u\n", pMsr->u32Msr,
     6342                                             cMsrs));
     6343                            pVCpu->hm.s.u32HMError = pMsr->u32Msr;
     6344                            return VERR_HM_UNEXPECTED_LD_ST_MSR;
     6345                        }
     6346                    }
     6347                }
     6348            }
     6349
     6350            if (fWhat & CPUMCTX_EXTRN_DR7)
     6351            {
     6352                if (!pVCpu->hm.s.fUsingHyperDR7)
     6353                {
     6354                    /* Upper 32-bits are always zero. See Intel spec. 2.7.3 "Loading and Storing Debug Registers". */
     6355                    rc = VMXReadVmcs32(VMX_VMCS_GUEST_DR7, &u32Val);
     6356                    VMXLOCAL_BREAK_RC(rc);
     6357                    pCtx->dr[7] = u32Val;
     6358                }
     6359            }
     6360
     6361            if (fWhat & CPUMCTX_EXTRN_CR_MASK)
     6362            {
     6363                uint32_t u32Shadow;
     6364                /* CR0 required for saving CR3 below, see CPUMIsGuestPagingEnabledEx(). */
     6365                if (fWhat & CPUMCTX_EXTRN_CR0)
     6366                {
     6367                    rc  = VMXReadVmcs32(VMX_VMCS_GUEST_CR0,            &u32Val);
     6368                    rc |= VMXReadVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, &u32Shadow);
     6369                    VMXLOCAL_BREAK_RC(rc);
     6370                    u32Val = (u32Val & ~pVCpu->hm.s.vmx.u32CR0Mask)
     6371                           | (u32Shadow & pVCpu->hm.s.vmx.u32CR0Mask);
     6372                    VMMRZCallRing3Disable(pVCpu);   /* Calls into PGM which has Log statements. */
     6373                    CPUMSetGuestCR0(pVCpu, u32Val);
     6374                    VMMRZCallRing3Enable(pVCpu);
     6375                }
     6376
     6377                /* CR4 required for saving CR3 below, see CPUMIsGuestInPAEModeEx(). */
     6378                if (fWhat & CPUMCTX_EXTRN_CR4)
     6379                {
     6380                    rc  = VMXReadVmcs32(VMX_VMCS_GUEST_CR4,            &u32Val);
     6381                    rc |= VMXReadVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, &u32Shadow);
     6382                    VMXLOCAL_BREAK_RC(rc);
     6383                    u32Val = (u32Val & ~pVCpu->hm.s.vmx.u32CR4Mask)
     6384                           | (u32Shadow & pVCpu->hm.s.vmx.u32CR4Mask);
     6385                    CPUMSetGuestCR4(pVCpu, u32Val);
     6386                }
     6387
     6388                if (fWhat & CPUMCTX_EXTRN_CR3)
     6389                {
     6390                    if (   pVM->hm.s.vmx.fUnrestrictedGuest
     6391                        || (   pVM->hm.s.fNestedPaging
     6392                            && CPUMIsGuestPagingEnabledEx(pCtx))) /* PG bit changes are always intercepted, so it's up to date. */
     6393                    {
     6394                        rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_CR3, &u64Val);
     6395                        if (pCtx->cr3 != u64Val)
     6396                        {
     6397                            CPUMSetGuestCR3(pVCpu, u64Val);
     6398                            VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
     6399                        }
     6400
     6401                        /* If the guest is in PAE mode, sync back the PDPE's into the guest state. */
     6402                        if (CPUMIsGuestInPAEModeEx(pCtx))
     6403                        {
     6404                            rc  = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, &pVCpu->hm.s.aPdpes[0].u);
     6405                            rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, &pVCpu->hm.s.aPdpes[1].u);
     6406                            rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, &pVCpu->hm.s.aPdpes[2].u);
     6407                            rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, &pVCpu->hm.s.aPdpes[3].u);
     6408                            VMXLOCAL_BREAK_RC(rc);
     6409                            VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
     6410                        }
     6411                    }
     6412                }
     6413            }
     6414        } while (0);
     6415
     6416        if (RT_SUCCESS(rc))
     6417        {
     6418            /* Update fExtrn. */
     6419            pCtx->fExtrn &= ~fWhat;
     6420
     6421            /* If everything has been imported, clear the HM keeper bit. */
     6422            if (!(pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
     6423            {
     6424                pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
     6425                Assert(!pCtx->fExtrn);
     6426            }
     6427        }
     6428    }
    70376429    else
    7038         Assert(VMMR0IsLogFlushDisabled(pVCpu));
    7039     Log4Func(("fExtrn=%#RX64 fWhat=%#RX64\n", pCtx->fExtrn, fWhat));
     6430        AssertMsg(!pCtx->fExtrn || (pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL), ("%#RX64\n", pCtx->fExtrn));
     6431
     6432    ASMSetFlags(fEFlags);
     6433
     6434    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatImportGuestState, x);
    70406435
    70416436    /*
     6437     * Honor any pending CR3 updates.
     6438     *
    70426439     * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> hmR0VmxCallRing3Callback()
    70436440     * -> VMMRZCallRing3Disable() -> hmR0VmxImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
     
    70476444     * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
    70486445     * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
    7049      * -NOT- check if HMVMX_UPDATED_GUEST_CR3 is already set or not!
     6446     * -NOT- check if CPUMCTX_EXTRN_CR3 is set!
    70506447     *
    70516448     * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
     
    70536450    if (VMMRZCallRing3IsEnabled(pVCpu))
    70546451    {
     6452        VMMR0LogFlushDisable(pVCpu);
     6453
    70556454        if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
    70566455            PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
     
    70656464    }
    70666465
    7067     Assert(!(fWhat & CPUMCTX_EXTRN_KEEPER_HM));
    7068     fWhat &= pCtx->fExtrn;
    7069 
    7070     /* If there is nothing more to import, bail early. */
    7071     if (!(fWhat & HMVMX_CPUMCTX_EXTRN_ALL))
    7072         return VINF_SUCCESS;
    7073 
    7074     /* RIP required while saving interruptibility-state below, see EMSetInhibitInterruptsPC(). */
    7075     if (fWhat & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_HM_VMX_INT_STATE))
    7076     {
    7077         rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RIP, &u64Val);
    7078         AssertRCReturn(rc, rc);
    7079         pCtx->rip = u64Val;
    7080         ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_RIP);
    7081     }
    7082 
    7083     /* RFLAGS and interruptibility-state required while re-evaluating interrupt injection, see hmR0VmxGetGuestIntrState(). */
    7084     if (fWhat & (CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_HM_VMX_INT_STATE))
    7085     {
    7086         rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &u32Val);
    7087         AssertRCReturn(rc, rc);
    7088         pCtx->eflags.u32 = u32Val;
    7089         /* Restore eflags for real-on-v86-mode hack. */
    7090         if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
    7091         {
    7092             Assert(pVM->hm.s.vmx.pRealModeTSS);
    7093             pCtx->eflags.Bits.u1VM   = 0;
    7094             pCtx->eflags.Bits.u2IOPL = pVCpu->hm.s.vmx.RealMode.Eflags.Bits.u2IOPL;
    7095         }
    7096         ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_RFLAGS);
    7097     }
    7098 
    7099     if (fWhat & CPUMCTX_EXTRN_HM_VMX_INT_STATE)
    7100     {
    7101         rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &u32Val);
    7102         AssertRCReturn(rc, rc);
    7103         if (!u32Val)
    7104         {
    7105             if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
    7106                 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
    7107 
    7108             if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
    7109                 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
    7110         }
    7111         else
    7112         {
    7113             if (u32Val & (  VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS
    7114                           | VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI))
    7115             {
    7116                 EMSetInhibitInterruptsPC(pVCpu, pCtx->rip);
    7117                 Assert(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
    7118             }
    7119             else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
    7120                 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
    7121 
    7122             if (u32Val & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI)
    7123             {
    7124                 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
    7125                     VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
    7126             }
    7127             else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
    7128                 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
    7129         }
    7130         ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_HM_VMX_INT_STATE);
    7131     }
    7132 
    7133     if (fWhat & CPUMCTX_EXTRN_RSP)
    7134     {
    7135         rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RSP, &u64Val);
    7136         AssertRCReturn(rc, rc);
    7137         pCtx->rsp = u64Val;
    7138         ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_RSP);
    7139     }
    7140 
    7141     if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
    7142     {
    7143         if (fWhat & CPUMCTX_EXTRN_CS)
    7144         {
    7145             rc = HMVMX_SAVE_SREG(CS, &pCtx->cs);
    7146             AssertRCReturn(rc, rc);
    7147             if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
    7148                 pCtx->cs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrCS.u;
    7149             ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_CS);
    7150         }
    7151         if (fWhat & CPUMCTX_EXTRN_SS)
    7152         {
    7153             rc = HMVMX_SAVE_SREG(SS, &pCtx->ss);
    7154             if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
    7155                 pCtx->ss.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrSS.u;
    7156             ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_SS);
    7157         }
    7158         if (fWhat & CPUMCTX_EXTRN_DS)
    7159         {
    7160             rc = HMVMX_SAVE_SREG(DS, &pCtx->ds);
    7161             AssertRCReturn(rc, rc);
    7162             if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
    7163                 pCtx->ds.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrDS.u;
    7164             ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_DS);
    7165         }
    7166         if (fWhat & CPUMCTX_EXTRN_ES)
    7167         {
    7168             rc = HMVMX_SAVE_SREG(ES, &pCtx->es);
    7169             AssertRCReturn(rc, rc);
    7170             if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
    7171                 pCtx->es.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrES.u;
    7172             ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_ES);
    7173         }
    7174        if (fWhat & CPUMCTX_EXTRN_FS)
    7175        {
    7176             rc = HMVMX_SAVE_SREG(FS, &pCtx->fs);
    7177             AssertRCReturn(rc, rc);
    7178             if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
    7179                 pCtx->fs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrFS.u;
    7180             ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_FS);
    7181        }
    7182         if (fWhat & CPUMCTX_EXTRN_GS)
    7183         {
    7184             rc = HMVMX_SAVE_SREG(GS, &pCtx->gs);
    7185             AssertRCReturn(rc, rc);
    7186             if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
    7187                 pCtx->gs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrGS.u;
    7188             ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_GS);
    7189         }
    7190     }
    7191 
    7192     if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
    7193     {
    7194         if (fWhat & CPUMCTX_EXTRN_LDTR)
    7195         {
    7196             rc = HMVMX_SAVE_SREG(LDTR, &pCtx->ldtr);
    7197             AssertRCReturn(rc, rc);
    7198             ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_LDTR);
    7199         }
    7200 
    7201         if (fWhat & CPUMCTX_EXTRN_GDTR)
    7202         {
    7203             rc  = VMXReadVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE,  &u64Val);
    7204             rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);
    7205             AssertRCReturn(rc, rc);
    7206             pCtx->gdtr.pGdt  = u64Val;
    7207             pCtx->gdtr.cbGdt = u32Val;
    7208             ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_GDTR);
    7209         }
    7210 
    7211         /* Guest IDTR. */
    7212         if (fWhat & CPUMCTX_EXTRN_IDTR)
    7213         {
    7214             rc  = VMXReadVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE,  &u64Val);
    7215             rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);
    7216             AssertRCReturn(rc, rc);
    7217             pCtx->idtr.pIdt  = u64Val;
    7218             pCtx->idtr.cbIdt = u32Val;
    7219             ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_IDTR);
    7220         }
    7221 
    7222         /* Guest TR. */
    7223         if (fWhat & CPUMCTX_EXTRN_TR)
    7224         {
    7225             /* Real-mode emulation using virtual-8086 mode has the fake TSS (pRealModeTSS) in TR, don't save that one. */
    7226             if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
    7227             {
    7228                 rc = HMVMX_SAVE_SREG(TR, &pCtx->tr);
    7229                 AssertRCReturn(rc, rc);
    7230             }
    7231             ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_TR);
    7232         }
    7233     }
    7234 
    7235     if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
    7236     {
    7237         rc  = VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, &pCtx->SysEnter.eip);
    7238         rc |= VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, &pCtx->SysEnter.esp);
    7239         rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS,  &u32Val);
    7240         pCtx->SysEnter.cs = u32Val;
    7241         AssertRCReturn(rc, rc);
    7242         ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_SYSENTER_MSRS);
    7243     }
    7244 
    7245 #if HC_ARCH_BITS == 64
    7246     if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
    7247     {
    7248         if (   pVM->hm.s.fAllow64BitGuests
    7249             && (pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
    7250             pCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
    7251         ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_KERNEL_GS_BASE);
    7252     }
    7253 
    7254     if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
    7255     {
    7256         if (   pVM->hm.s.fAllow64BitGuests
    7257             && (pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
    7258         {
    7259             pCtx->msrLSTAR  = ASMRdMsr(MSR_K8_LSTAR);
    7260             pCtx->msrSTAR   = ASMRdMsr(MSR_K6_STAR);
    7261             pCtx->msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
    7262         }
    7263         ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_SYSCALL_MSRS);
    7264     }
    7265 #endif
    7266 
    7267     if (   (fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
    7268 #if HC_ARCH_BITS == 32
    7269         || (fWhat & (CPUMCTX_EXTRN_KERNEL_GS_BASE | CPUMCTX_EXTRN_SYSCALL_MSRS))
    7270 #endif
    7271         )
    7272     {
    7273         PCVMXAUTOMSR   pMsr  = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
    7274         uint32_t const cMsrs = pVCpu->hm.s.vmx.cMsrs;
    7275         for (uint32_t i = 0; i < cMsrs; i++, pMsr++)
    7276         {
    7277             switch (pMsr->u32Msr)
    7278             {
    7279 #if HC_ARCH_BITS == 32
    7280                 case MSR_K8_LSTAR:          pCtx->msrLSTAR        = pMsr->u64Value;         break;
    7281                 case MSR_K6_STAR:           pCtx->msrSTAR         = pMsr->u64Value;         break;
    7282                 case MSR_K8_SF_MASK:        pCtx->msrSFMASK       = pMsr->u64Value;         break;
    7283                 case MSR_K8_KERNEL_GS_BASE: pCtx->msrKERNELGSBASE = pMsr->u64Value;         break;
    7284 #endif
    7285                 case MSR_IA32_SPEC_CTRL:    CPUMSetGuestSpecCtrl(pVCpu, pMsr->u64Value);    break;
    7286                 case MSR_K8_TSC_AUX:
    7287                 {
    7288                     /* CPUMSetGuestTscAux alters fExtrn without using atomics, so disable preemption temporarily. */
    7289                     HM_DISABLE_PREEMPT();
    7290                     CPUMSetGuestTscAux(pVCpu, pMsr->u64Value);
    7291                     HM_RESTORE_PREEMPT();
    7292                     break;
    7293                 }
    7294                 default:
    7295                 {
    7296                     AssertMsgFailed(("Unexpected MSR in auto-load/store area. uMsr=%#RX32 cMsrs=%u\n", pMsr->u32Msr, cMsrs));
    7297                     pVCpu->hm.s.u32HMError = pMsr->u32Msr;
    7298                     return VERR_HM_UNEXPECTED_LD_ST_MSR;
    7299                 }
    7300             }
    7301         }
    7302         ASMAtomicUoAndU64(&pCtx->fExtrn, ~(  CPUMCTX_EXTRN_TSC_AUX
    7303                                            | CPUMCTX_EXTRN_OTHER_MSRS
    7304 #if HC_ARCH_BITS == 32
    7305                                            | CPUMCTX_EXTRN_KERNEL_GS_BASE
    7306                                            | CPUMCTX_EXTRN_SYSCALL_MSRS
    7307 #endif
    7308                                            ));
    7309     }
    7310 
    7311     if (fWhat & CPUMCTX_EXTRN_DR7)
    7312     {
    7313         if (!pVCpu->hm.s.fUsingHyperDR7)
    7314         {
    7315             /* Upper 32-bits are always zero. See Intel spec. 2.7.3 "Loading and Storing Debug Registers". */
    7316             rc = VMXReadVmcs32(VMX_VMCS_GUEST_DR7, &u32Val);
    7317             AssertRCReturn(rc, rc);
    7318             pCtx->dr[7] = u32Val;
    7319         }
    7320         ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_DR7);
    7321     }
    7322 
    7323     if (fWhat & CPUMCTX_EXTRN_CR_MASK)
    7324     {
    7325         /* CR0 required for saving CR3 below, see CPUMIsGuestPagingEnabledEx(). */
    7326         if (fWhat & (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3))
    7327         {
    7328             rc  = VMXReadVmcs32(VMX_VMCS_GUEST_CR0,            &u32Val);
    7329             rc |= VMXReadVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, &u32Shadow);
    7330             AssertRCReturn(rc, rc);
    7331             u32Val = (u32Val & ~pVCpu->hm.s.vmx.u32CR0Mask)
    7332                    | (u32Shadow & pVCpu->hm.s.vmx.u32CR0Mask);
    7333             CPUMSetGuestCR0(pVCpu, u32Val);
    7334             ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_CR0);
    7335         }
    7336 
    7337         /* CR4 required for saving CR3 below, see CPUMIsGuestInPAEModeEx(). */
    7338         if (fWhat & (CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_CR3))
    7339         {
    7340             rc  = VMXReadVmcs32(VMX_VMCS_GUEST_CR4,            &u32Val);
    7341             rc |= VMXReadVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, &u32Shadow);
    7342             AssertRCReturn(rc, rc);
    7343             u32Val = (u32Val & ~pVCpu->hm.s.vmx.u32CR4Mask)
    7344                    | (u32Shadow & pVCpu->hm.s.vmx.u32CR4Mask);
    7345             CPUMSetGuestCR4(pVCpu, u32Val);
    7346             ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_CR4);
    7347         }
    7348 
    7349         if (fWhat & CPUMCTX_EXTRN_CR3)
    7350         {
    7351             if (   pVM->hm.s.vmx.fUnrestrictedGuest
    7352                 || (   pVM->hm.s.fNestedPaging
    7353                     && CPUMIsGuestPagingEnabledEx(pCtx)))
    7354             {
    7355                 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_CR3, &u64Val);
    7356                 if (pCtx->cr3 != u64Val)
    7357                 {
    7358                     CPUMSetGuestCR3(pVCpu, u64Val);
    7359                     if (VMMRZCallRing3IsEnabled(pVCpu))
    7360                     {
    7361                         PGMUpdateCR3(pVCpu, u64Val);
    7362                         Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
    7363                     }
    7364                     else
    7365                     {
    7366                         /* Set the force flag to inform PGM about it when necessary. It is cleared by PGMUpdateCR3().*/
    7367                         VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
    7368                     }
    7369                 }
    7370 
    7371                 /* If the guest is in PAE mode, sync back the PDPE's into the guest state. */
    7372                 if (CPUMIsGuestInPAEModeEx(pCtx))
    7373                 {
    7374                     rc  = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, &pVCpu->hm.s.aPdpes[0].u);
    7375                     rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, &pVCpu->hm.s.aPdpes[1].u);
    7376                     rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, &pVCpu->hm.s.aPdpes[2].u);
    7377                     rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, &pVCpu->hm.s.aPdpes[3].u);
    7378                     AssertRCReturn(rc, rc);
    7379 
    7380                     if (VMMRZCallRing3IsEnabled(pVCpu))
    7381                     {
    7382                         PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
    7383                         Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
    7384                     }
    7385                     else
    7386                     {
    7387                         /* Set the force flag to inform PGM about it when necessary. It is cleared by PGMGstUpdatePaePdpes(). */
    7388                         VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
    7389                     }
    7390                 }
    7391             }
    7392             ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_CR3);
    7393         }
    7394     }
    7395 
    7396     /* If everything has been imported, clear the HM keeper bit. */
    7397     if (!(pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
    7398     {
    7399         ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_KEEPER_HM);
    7400         Assert(!pCtx->fExtrn);
    7401     }
    7402 
    74036466    return VINF_SUCCESS;
     6467#undef VMXLOCAL_BREAK_RC
    74046468}
    74056469
     
    74106474 * @returns VBox status code.
    74116475 * @param   pVCpu   The cross context virtual CPU structure.
    7412  * @param   pCtx    Pointer to the guest-CPU or nested-guest-CPU context.
    74136476 * @param   fWhat   What to import, CPUMCTX_EXTRN_XXX.
    74146477 */
    7415 VMMR0DECL(int) VMXR0ImportStateOnDemand(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t fWhat)
    7416 {
    7417     return hmR0VmxImportGuestState(pVCpu, pCtx, fWhat);
    7418 }
    7419 
    7420 
    7421 /**
    7422  * Saves the entire guest state from the currently active VMCS into the
    7423  * guest-CPU context.
    7424  *
    7425  * This essentially VMREADs all guest-data.
    7426  *
    7427  * @returns VBox status code.
    7428  * @param   pVCpu       The cross context virtual CPU structure.
    7429  * @param   pMixedCtx   Pointer to the guest-CPU context. The data may be
    7430  *                      out-of-sync. Make sure to update the required fields
    7431  *                      before using them.
    7432  */
    7433 static int hmR0VmxSaveGuestState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    7434 {
    7435     Assert(pVCpu);
    7436     Assert(pMixedCtx);
    7437 
    7438     if (HMVMXCPU_GST_VALUE(pVCpu) == HMVMX_UPDATED_GUEST_ALL)
    7439         return VINF_SUCCESS;
    7440 
    7441     /* Though we can longjmp to ring-3 due to log-flushes here and get recalled
    7442        again on the ring-3 callback path, there is no real need to. */
    7443     if (VMMRZCallRing3IsEnabled(pVCpu))
    7444         VMMR0LogFlushDisable(pVCpu);
    7445     else
    7446         Assert(VMMR0IsLogFlushDisabled(pVCpu));
    7447     Log4Func(("vcpu[%RU32]\n", pVCpu->idCpu));
    7448 
    7449     int rc = hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
    7450     AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestRipRspRflags failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
    7451 
    7452     rc = hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
    7453     AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestControlRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
    7454 
    7455     rc = hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
    7456     AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestSegmentRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
    7457 
    7458     rc = hmR0VmxSaveGuestTableRegs(pVCpu, pMixedCtx);
    7459     AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestTableRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
    7460 
    7461     rc = hmR0VmxSaveGuestDR7(pVCpu, pMixedCtx);
    7462     AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestDR7 failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
    7463 
    7464     rc = hmR0VmxSaveGuestSysenterMsrs(pVCpu, pMixedCtx);
    7465     AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestSysenterMsrs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
    7466 
    7467     rc = hmR0VmxSaveGuestLazyMsrs(pVCpu, pMixedCtx);
    7468     AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestLazyMsrs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
    7469 
    7470     rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
    7471     AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestAutoLoadStoreMsrs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
    7472 
    7473     rc = hmR0VmxSaveGuestActivityState(pVCpu, pMixedCtx);
    7474     AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestActivityState failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
    7475 
    7476     rc = hmR0VmxSaveGuestApicState(pVCpu, pMixedCtx);
    7477     AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestApicState failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
    7478 
    7479     AssertMsg(HMVMXCPU_GST_VALUE(pVCpu) == HMVMX_UPDATED_GUEST_ALL,
    7480               ("Missed guest state bits while saving state; missing %RX32 (got %RX32, want %RX32) - check log for any previous errors!\n",
    7481                HMVMX_UPDATED_GUEST_ALL ^ HMVMXCPU_GST_VALUE(pVCpu), HMVMXCPU_GST_VALUE(pVCpu), HMVMX_UPDATED_GUEST_ALL));
    7482 
    7483     if (VMMRZCallRing3IsEnabled(pVCpu))
    7484         VMMR0LogFlushEnable(pVCpu);
    7485 
    7486     return VINF_SUCCESS;
    7487 }
    7488 
    7489 
    7490 /**
    7491  * Saves basic guest registers needed for IEM instruction execution.
    7492  *
    7493  * @returns VBox status code (OR-able).
    7494  * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
    7495  * @param   pMixedCtx   Pointer to the CPU context of the guest.
    7496  * @param   fMemory     Whether the instruction being executed operates on
    7497  *                      memory or not.  Only CR0 is synced up if clear.
    7498  * @param   fNeedRsp    Need RSP (any instruction working on GPRs or stack).
    7499  */
    7500 static int hmR0VmxSaveGuestRegsForIemExec(PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fMemory, bool fNeedRsp)
    7501 {
    7502     /*
    7503      * We assume all general purpose registers other than RSP are available.
    7504      *
    7505      *   - RIP is a must, as it will be incremented or otherwise changed.
    7506      *   - RFLAGS are always required to figure the CPL.
    7507      *   - RSP isn't always required, however it's a GPR, so frequently required.
    7508      *   - SS and CS are the only segment register needed if IEM doesn't do memory
    7509      *     access (CPL + 16/32/64-bit mode), but we can only get all segment registers.
    7510      *   - CR0 is always required by IEM for the CPL, while CR3 and CR4 will only
    7511      *     be required for memory accesses.
    7512      *
    7513      * Note! Before IEM dispatches an exception, it will call us to sync in everything.
    7514      */
    7515     int rc  = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
    7516     rc     |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
    7517     if (fNeedRsp)
    7518         rc |= hmR0VmxSaveGuestRsp(pVCpu, pMixedCtx);
    7519     rc     |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); /** @todo Only CS and SS are required here. */
    7520     if (!fMemory)
    7521         rc |= hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
    7522     else
    7523         rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
    7524     AssertRCReturn(rc, rc);
    7525     return rc;
    7526 }
    7527 
    7528 
    7529 /**
    7530  * Saves guest registers needed for IEM instruction interpretation.
    7531  *
    7532  * @returns VBox status code (OR-able).
    7533  * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
    7534  */
    7535 static int hmR0VmxSaveGuestRegsForIemInterpreting(PVMCPU pVCpu)
    7536 {
    7537     /*
    7538      * Our goal here is IEM_CPUMCTX_EXTRN_MUST_MASK.
    7539      *
    7540      * Note! Before IEM dispatches an exception, it will call us to sync in everything.
    7541      */
    7542 #if 0 /* later with CPUMCTX_EXTRN_XXX */
    7543     int rc  = hmR0VmxSaveGuestRip(pVCpu, &pVCpu->cpum.GstCtx);
    7544     rc     |= hmR0VmxSaveGuestRflags(pVCpu, &pVCpu->cpum.GstCtx);
    7545     rc     |= hmR0VmxSaveGuestRsp(pVCpu, &pVCpu->cpum.GstCtx);
    7546     rc     |= hmR0VmxSaveGuestSegmentRegs(pVCpu, &pVCpu->cpum.GstCtx); /** @todo Only CS and SS are strictly required here. */
    7547     rc     |= hmR0VmxSaveGuestControlRegs(pVCpu, &pVCpu->cpum.GstCtx); /** @todo We don't need CR2 here. */
    7548     rc     |= hmR0VmxSaveGuestApicState(pVCpu, &pVCpu->cpum.GstCtx);   /** @todo Only TPR is needed here. */
    7549     rc     |= hmR0VmxSaveGuestDR7(pVCpu, &pVCpu->cpum.GstCtx);
    7550     /* EFER is always up to date. */
    7551     AssertRCReturn(rc, rc);
    7552     HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST - fixme); /** @todo fix me */
    7553 #else
    7554     int rc = hmR0VmxSaveGuestState(pVCpu, &pVCpu->cpum.GstCtx);
    7555     AssertRCReturn(rc, rc);
    7556     HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
    7557 #endif
    7558 
    7559     return rc;
    7560 }
    7561 
    7562 
    7563 /**
    7564  * Ensures that we've got a complete basic guest-context.
    7565  *
    7566  * This excludes the FPU, SSE, AVX, and similar extended state.  The interface
    7567  * is for the interpreter.
    7568  *
    7569  * @returns VBox status code.
    7570  * @param   pVCpu           The cross context virtual CPU structure of the calling EMT.
    7571  * @param   pMixedCtx       Pointer to the guest-CPU context which may have data
    7572  *                          needing to be synced in.
    7573  * @thread  EMT(pVCpu)
    7574  */
    7575 VMMR0_INT_DECL(int) HMR0EnsureCompleteBasicContext(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    7576 {
    7577     /* Note! Since this is only applicable to VT-x, the implementation is placed
    7578              in the VT-x part of the sources instead of the generic stuff. */
    7579     int rc;
    7580     PVM pVM = pVCpu->CTX_SUFF(pVM);
    7581     if (   pVM->hm.s.vmx.fSupported
    7582         && VM_IS_HM_ENABLED(pVM))
    7583         rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
    7584     else
    7585         rc = VINF_SUCCESS;
    7586 
    7587     /*
    7588      * For now, imply that the caller might change everything too. Do this after
    7589      * saving the guest state so as to not trigger assertions.
    7590      *
    7591      * This is required for AMD-V too as it too only selectively re-loads changed
    7592      * guest state back in to the VMCB.
    7593      */
    7594     HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
    7595     return rc;
     6478VMMR0DECL(int) VMXR0ImportStateOnDemand(PVMCPU pVCpu, uint64_t fWhat)
     6479{
     6480    return hmR0VmxImportGuestState(pVCpu, fWhat);
    75966481}
    75976482
     
    76346519        return VINF_SUCCESS;
    76356520
     6521#if 0
    76366522    /* We need the control registers now, make sure the guest-CPU context is updated. */
    7637     int rc3 = hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
     6523    int rc3 = hmR0VmxImportGuestStatae(pVCpu, CPUMCTX_EXTRN_CR0);
    76386524    AssertRCReturn(rc3, rc3);
    76396525
     
    76576543        Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
    76586544    }
     6545#endif
    76596546
    76606547    /* Pending PGM C3 sync. */
    76616548    if (VMCPU_FF_IS_PENDING(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
    76626549    {
     6550        Assert(!(ASMAtomicUoReadU64(&pMixedCtx->fExtrn) & (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4)));
    76636551        VBOXSTRICTRC rcStrict2 = PGMSyncCR3(pVCpu, pMixedCtx->cr0, pMixedCtx->cr3, pMixedCtx->cr4,
    76646552                                            VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
     
    76666554        {
    76676555            AssertRC(VBOXSTRICTRC_VAL(rcStrict2));
    7668             Log4(("hmR0VmxCheckForceFlags: PGMSyncCR3 forcing us back to ring-3. rc2=%d\n", VBOXSTRICTRC_VAL(rcStrict2)));
     6556            Log4Func(("PGMSyncCR3 forcing us back to ring-3. rc2=%d\n", VBOXSTRICTRC_VAL(rcStrict2)));
    76696557            return rcStrict2;
    76706558        }
     
    76776565        STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
    76786566        int rc2 = RT_UNLIKELY(VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_NO_MEMORY : VINF_EM_RAW_TO_R3;
    7679         Log4(("hmR0VmxCheckForceFlags: HM_TO_R3 forcing us back to ring-3. rc=%d\n", rc2));
     6567        Log4Func(("HM_TO_R3 forcing us back to ring-3. rc=%d\n", rc2));
    76806568        return rc2;
    76816569    }
     
    76856573        || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST))
    76866574    {
    7687         Log4(("hmR0VmxCheckForceFlags: Pending VM request forcing us back to ring-3\n"));
     6575        Log4Func(("Pending VM request forcing us back to ring-3\n"));
    76886576        return VINF_EM_PENDING_REQUEST;
    76896577    }
     
    76926580    if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
    76936581    {
    7694         Log4(("hmR0VmxCheckForceFlags: PGM pool flush pending forcing us back to ring-3\n"));
     6582        Log4Func(("PGM pool flush pending forcing us back to ring-3\n"));
    76956583        return VINF_PGM_POOL_FLUSH_PENDING;
    76966584    }
     
    76996587    if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_DMA))
    77006588    {
    7701         Log4(("hmR0VmxCheckForceFlags: Pending DMA request forcing us back to ring-3\n"));
     6589        Log4Func(("Pending DMA request forcing us back to ring-3\n"));
    77026590        return VINF_EM_RAW_TO_R3;
    77036591    }
     
    77666654    AssertRC(rc);
    77676655    Log4(("TRPM->HM event: u32IntInfo=%#RX32 enmTrpmEvent=%d cbInstr=%u uErrCode=%#RX32 GCPtrFaultAddress=%#RGv\n",
    7768          u32IntInfo, enmTrpmEvent, cbInstr, uErrCode, GCPtrFaultAddress));
     6656          u32IntInfo, enmTrpmEvent, cbInstr, uErrCode, GCPtrFaultAddress));
    77696657
    77706658    hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, cbInstr, uErrCode, GCPtrFaultAddress);
     
    78506738 *
    78516739 * @returns VBox status code.
    7852  * @param   pVCpu               The cross context virtual CPU structure.
    7853  * @param   pMixedCtx           Pointer to the guest-CPU context. The data may
    7854  *                              be out-of-sync. Make sure to update the required
    7855  *                              fields before using them.
    7856  * @param   fSaveGuestState     Whether to save the guest state or not.
     6740 * @param   pVCpu           The cross context virtual CPU structure.
     6741 * @param   fImportState    Whether to import the guest state from the VMCS back
     6742 *                          to the guest-CPU context.
    78576743 *
    78586744 * @remarks No-long-jmp zone!!!
    78596745 */
    7860 static int hmR0VmxLeave(PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fSaveGuestState)
     6746static int hmR0VmxLeave(PVMCPU pVCpu, bool fImportState)
    78616747{
    78626748    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     
    78726758
    78736759    /* Save the guest state if necessary. */
    7874     if (   fSaveGuestState
    7875         && HMVMXCPU_GST_VALUE(pVCpu) != HMVMX_UPDATED_GUEST_ALL)
    7876     {
    7877         int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
     6760    if (fImportState)
     6761    {
     6762        int rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
    78786763        AssertRCReturn(rc, rc);
    7879         Assert(HMVMXCPU_GST_VALUE(pVCpu) == HMVMX_UPDATED_GUEST_ALL);
    7880     }
    7881 
    7882     /* Restore host FPU state if necessary and resync on next R0 reentry .*/
    7883     if (CPUMR0FpuStateMaybeSaveGuestAndRestoreHost(pVCpu))
    7884     {
    7885         /* We shouldn't reload CR0 without saving it first. */
    7886         if (!fSaveGuestState)
    7887         {
    7888             int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
    7889             AssertRCReturn(rc, rc);
    7890         }
    7891         HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
    7892     }
    7893 
    7894     /* Restore host debug registers if necessary and resync on next R0 reentry. */
     6764    }
     6765
     6766    /* Restore host FPU state if necessary. We will resync on next R0 reentry. */
     6767    CPUMR0FpuStateMaybeSaveGuestAndRestoreHost(pVCpu);
     6768    Assert(!CPUMIsGuestFPUStateActive(pVCpu));
     6769
     6770    /* Restore host debug registers if necessary. We will resync on next R0 reentry. */
    78956771#ifdef VBOX_STRICT
    78966772    if (CPUMIsHyperDebugStateActive(pVCpu))
    78976773        Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT);
    78986774#endif
    7899     if (CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, true /* save DR6 */))
    7900         HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);
     6775    CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, true /* save DR6 */);
    79016776    Assert(!CPUMIsGuestDebugStateActive(pVCpu) && !CPUMIsGuestDebugStateActivePending(pVCpu));
    79026777    Assert(!CPUMIsHyperDebugStateActive(pVCpu) && !CPUMIsHyperDebugStateActivePending(pVCpu));
     
    79146789
    79156790    /* Restore the lazy host MSRs as we're leaving VT-x context. */
    7916     if (pVCpu->hm.s.vmx.fLazyMsrs)
    7917     {
    7918         /* We shouldn't reload the guest MSRs without saving it first. */
    7919         if (!fSaveGuestState)
    7920         {
    7921             int rc = hmR0VmxSaveGuestLazyMsrs(pVCpu, pMixedCtx);
     6791    if (pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
     6792    {
     6793        /* We shouldn't restore the host MSRs without saving the guest MSRs first. */
     6794        if (!fImportState)
     6795        {
     6796            int rc = hmR0VmxImportGuestState(pVCpu,   CPUMCTX_EXTRN_KERNEL_GS_BASE
     6797                                                    | CPUMCTX_EXTRN_SYSCALL_MSRS);
    79226798            AssertRCReturn(rc, rc);
    79236799        }
    7924         Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS));
    79256800        hmR0VmxLazyRestoreHostMsrs(pVCpu);
    79266801        Assert(!pVCpu->hm.s.vmx.fLazyMsrs);
    79276802    }
     6803    else
     6804        pVCpu->hm.s.vmx.fLazyMsrs = 0;
    79286805
    79296806    /* Update auto-load/store host MSRs values when we re-enter VT-x (as we could be on a different CPU). */
     
    79316808
    79326809    STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatEntry);
    7933     STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatLoadGuestState);
     6810    STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatImportGuestState);
     6811    STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExportGuestState);
    79346812    STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit1);
    79356813    STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit2);
     
    79726850 * @remarks No-long-jmp zone!!!
    79736851 */
    7974 DECLINLINE(int) hmR0VmxLeaveSession(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
     6852static int hmR0VmxLeaveSession(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    79756853{
    79766854    HM_DISABLE_PREEMPT();
     
    79836861    if (!pVCpu->hm.s.fLeaveDone)
    79846862    {
    7985         int rc2 = hmR0VmxLeave(pVCpu, pMixedCtx, true /* fSaveGuestState */);
     6863        int rc2 = hmR0VmxLeave(pVCpu, true /* fImportState */);
    79866864        AssertRCReturnStmt(rc2, HM_RESTORE_PREEMPT(), rc2);
    79876865        pVCpu->hm.s.fLeaveDone = true;
    79886866    }
    7989     Assert(HMVMXCPU_GST_VALUE(pVCpu) == HMVMX_UPDATED_GUEST_ALL);
     6867    Assert(!pMixedCtx->fExtrn);
    79906868
    79916869    /*
     
    80596937    /* Please, no longjumps here (any logging shouldn't flush jump back to ring-3). NO LOGGING BEFORE THIS POINT! */
    80606938    VMMRZCallRing3Disable(pVCpu);
    8061     Log4(("hmR0VmxExitToRing3: pVCpu=%p idCpu=%RU32 rcExit=%d\n", pVCpu, pVCpu->idCpu, VBOXSTRICTRC_VAL(rcExit)));
     6939    Log4Func(("rcExit=%d\n", VBOXSTRICTRC_VAL(rcExit)));
    80626940
    80636941    /* We need to do this only while truly exiting the "inner loop" back to ring-3 and -not- for any longjmp to ring3. */
     
    80746952       and if we're injecting an event we should have a TRPM trap pending. */
    80756953    AssertMsg(rcExit != VINF_EM_RAW_INJECT_TRPM_EVENT || TRPMHasTrap(pVCpu), ("%Rrc\n", VBOXSTRICTRC_VAL(rcExit)));
    8076 #ifndef DEBUG_bird /* Triggered after firing an NMI against NT4SP1, possibly a tripple fault in progress. */
     6954#ifndef DEBUG_bird /* Triggered after firing an NMI against NT4SP1, possibly a triple fault in progress. */
    80776955    AssertMsg(rcExit != VINF_EM_RAW_EMULATE_INSTR || !TRPMHasTrap(pVCpu), ("%Rrc\n", VBOXSTRICTRC_VAL(rcExit)));
    80786956#endif
     
    80926970                              | CPUM_CHANGED_TR
    80936971                              | CPUM_CHANGED_HIDDEN_SEL_REGS);
    8094     Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR0));
    80956972    if (   pVM->hm.s.fNestedPaging
    80966973        && CPUMIsGuestPagingEnabledEx(pMixedCtx))
     
    81016978    Assert(!pVCpu->hm.s.fClearTrapFlag);
    81026979
     6980    /* Update the exit-to-ring 3 reason. */
     6981    pVCpu->hm.s.rcLastExitToR3 = VBOXSTRICTRC_VAL(rcExit);
     6982
    81036983    /* On our way back from ring-3 reload the guest state if there is a possibility of it being changed. */
    81046984    if (rcExit != VINF_EM_RAW_INTERRUPT)
    8105         HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
     6985        ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
    81066986
    81076987    STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchExitToR3);
     
    81507030        pVCpu->hm.s.vmx.fRestoreHostFlags = 0;
    81517031#endif
     7032
    81527033        /* Restore the lazy host MSRs as we're leaving VT-x context. */
    8153         if (pVCpu->hm.s.vmx.fLazyMsrs)
     7034        if (pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
    81547035            hmR0VmxLazyRestoreHostMsrs(pVCpu);
    81557036
     
    81787059    Assert(VMMR0IsLogFlushDisabled(pVCpu));
    81797060
    8180     Log4(("hmR0VmxCallRing3Callback->hmR0VmxLongJmpToRing3 pVCpu=%p idCpu=%RU32 enmOperation=%d\n", pVCpu, pVCpu->idCpu,
    8181           enmOperation));
     7061    Log4Func((" -> hmR0VmxLongJmpToRing3 enmOperation=%d\n", enmOperation));
    81827062
    81837063    int rc = hmR0VmxLongJmpToRing3(pVCpu, (PCPUMCTX)pvUser);
     
    82047084            int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
    82057085            AssertRC(rc);
    8206             Log4(("Setup interrupt-window exiting\n"));
     7086            Log4Func(("Setup interrupt-window exiting\n"));
    82077087        }
    82087088    } /* else we will deliver interrupts whenever the guest exits next and is in a state to receive events. */
     
    82217101    int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
    82227102    AssertRC(rc);
    8223     Log4(("Cleared interrupt-window exiting\n"));
     7103    Log4Func(("Cleared interrupt-window exiting\n"));
    82247104}
    82257105
     
    82407120            int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
    82417121            AssertRC(rc);
    8242             Log4(("Setup NMI-window exiting\n"));
     7122            Log4Func(("Setup NMI-window exiting\n"));
    82437123        }
    82447124    } /* else we will deliver NMIs whenever we VM-exit next, even possibly nesting NMIs. Can't be helped on ancient CPUs. */
     
    82577137    int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
    82587138    AssertRC(rc);
    8259     Log4(("Cleared NMI-window exiting\n"));
     7139    Log4Func(("Cleared NMI-window exiting\n"));
    82607140}
    82617141
     
    82747154{
    82757155    /* Get the current interruptibility-state of the guest and then figure out what can be injected. */
    8276     uint32_t const uIntrState = hmR0VmxGetGuestIntrState(pVCpu, pMixedCtx);
    8277     bool const fBlockMovSS    = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
    8278     bool const fBlockSti      = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
    8279     bool const fBlockNmi      = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI);
    8280 
    8281     Assert(!fBlockSti || HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RFLAGS));
    8282     Assert(!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI));    /* We don't support block-by-SMI yet.*/
     7156    uint32_t const fIntrState = hmR0VmxGetGuestIntrState(pVCpu, pMixedCtx);
     7157    bool const fBlockMovSS    = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
     7158    bool const fBlockSti      = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
     7159    bool const fBlockNmi      = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI);
     7160
     7161    Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pMixedCtx->fExtrn) & CPUMCTX_EXTRN_RFLAGS));
     7162    Assert(!(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI));    /* We don't support block-by-SMI yet.*/
    82837163    Assert(!fBlockSti || pMixedCtx->eflags.Bits.u1IF);     /* Cannot set block-by-STI when interrupts are disabled. */
    82847164    Assert(!TRPMHasTrap(pVCpu));
     
    83007180            && !fBlockMovSS)
    83017181        {
    8302             Log4(("Pending NMI vcpu[%RU32]\n", pVCpu->idCpu));
     7182            Log4Func(("Pending NMI\n"));
    83037183            uint32_t u32IntInfo = X86_XCPT_NMI | VMX_EXIT_INTERRUPTION_INFO_VALID;
    83047184            u32IntInfo         |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
     
    83187198    {
    83197199        Assert(!DBGFIsStepping(pVCpu));
    8320         int rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
    8321         AssertRC(rc);
     7200        int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_RFLAGS);
     7201        AssertRCReturn(rc, 0);
    83227202        bool const fBlockInt = !(pMixedCtx->eflags.u32 & X86_EFL_IF);
    83237203        if (   !pVCpu->hm.s.Event.fPending
     
    83307210            if (RT_SUCCESS(rc))
    83317211            {
    8332                 Log4(("Pending interrupt vcpu[%RU32] u8Interrupt=%#x \n", pVCpu->idCpu, u8Interrupt));
    8333                 uint32_t u32IntInfo = u8Interrupt | VMX_EXIT_INTERRUPTION_INFO_VALID;
    8334                 u32IntInfo         |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
     7212                Log4Func(("Pending external interrupt u8Interrupt=%#x\n", u8Interrupt));
     7213                uint32_t u32IntInfo = u8Interrupt
     7214                                    | VMX_EXIT_INTERRUPTION_INFO_VALID
     7215                                    | (VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
    83357216
    83367217                hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrfaultAddress */);
     
    83557236    }
    83567237
    8357     return uIntrState;
     7238    return fIntrState;
    83587239}
    83597240
     
    83647245 *
    83657246 * @param   pVCpu           The cross context virtual CPU structure.
    8366  */
    8367 DECLINLINE(void) hmR0VmxSetPendingDebugXcptVmcs(PVMCPU pVCpu)
    8368 {
    8369     Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RFLAGS)); NOREF(pVCpu);
    8370     int rc = VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, VMX_VMCS_GUEST_DEBUG_EXCEPTIONS_BS);
    8371     AssertRC(rc);
     7247 * @param   pMixedCtx       Pointer to the guest-CPU context. The data may be
     7248 *                          out-of-sync. Make sure to update the required fields
     7249 *                          before using them.
     7250 */
     7251DECLINLINE(int) hmR0VmxSetPendingDebugXcptVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
     7252{
     7253    RT_NOREF(pVCpu);
     7254    Assert(!(ASMAtomicUoReadU64(&pMixedCtx->fExtrn) & CPUMCTX_EXTRN_RFLAGS));
     7255    return VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, VMX_VMCS_GUEST_DEBUG_EXCEPTIONS_BS);
    83727256}
    83737257
     
    83827266 *                          out-of-sync. Make sure to update the required fields
    83837267 *                          before using them.
    8384  * @param   uIntrState      The VT-x guest-interruptibility state.
     7268 * @param   fIntrState      The VT-x guest-interruptibility state.
    83857269 * @param   fStepping       Running in hmR0VmxRunGuestCodeStep() and we should
    83867270 *                          return VINF_EM_DBG_STEPPED if the event was
    83877271 *                          dispatched directly.
    83887272 */
    8389 static VBOXSTRICTRC hmR0VmxInjectPendingEvent(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint32_t uIntrState, bool fStepping)
     7273static VBOXSTRICTRC hmR0VmxInjectPendingEvent(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint32_t fIntrState, bool fStepping)
    83907274{
    83917275    HMVMX_ASSERT_PREEMPT_SAFE();
    83927276    Assert(VMMRZCallRing3IsEnabled(pVCpu));
    83937277
    8394     bool fBlockMovSS    = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
    8395     bool fBlockSti      = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
    8396 
    8397     Assert(!fBlockSti || HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RFLAGS));
    8398     Assert(!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI));     /* We don't support block-by-SMI yet.*/
     7278    bool fBlockMovSS    = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
     7279    bool fBlockSti      = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
     7280
     7281    Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pMixedCtx->fExtrn) & CPUMCTX_EXTRN_RFLAGS));
     7282    Assert(!(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI));     /* We don't support block-by-SMI yet.*/
    83997283    Assert(!fBlockSti || pMixedCtx->eflags.Bits.u1IF);       /* Cannot set block-by-STI when interrupts are disabled. */
    84007284    Assert(!TRPMHasTrap(pVCpu));
     
    84217305        else if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)
    84227306        {
    8423             bool const fBlockNmi = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI);
     7307            bool const fBlockNmi = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI);
    84247308            Assert(!fBlockSti);
    84257309            Assert(!fBlockMovSS);
     
    84297313        Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#x\n", pVCpu->idCpu, pVCpu->hm.s.Event.u64IntInfo,
    84307314              (uint8_t)uIntType));
    8431         rcStrict = hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, pVCpu->hm.s.Event.u64IntInfo, pVCpu->hm.s.Event.cbInstr,
    8432                                           pVCpu->hm.s.Event.u32ErrCode, pVCpu->hm.s.Event.GCPtrFaultAddress,
    8433                                           fStepping, &uIntrState);
     7315        rcStrict = hmR0VmxInjectEventVmcs(pVCpu, pVCpu->hm.s.Event.u64IntInfo, pVCpu->hm.s.Event.cbInstr,
     7316                                          pVCpu->hm.s.Event.u32ErrCode, pVCpu->hm.s.Event.GCPtrFaultAddress, fStepping,
     7317                                          &fIntrState);
    84347318        AssertRCReturn(VBOXSTRICTRC_VAL(rcStrict), rcStrict);
    84357319
    84367320        /* Update the interruptibility-state as it could have been changed by
    84377321           hmR0VmxInjectEventVmcs() (e.g. real-on-v86 guest injecting software interrupts) */
    8438         fBlockMovSS = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
    8439         fBlockSti   = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
     7322        fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
     7323        fBlockSti   = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
    84407324
    84417325        if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT)
     
    84577341             */
    84587342            Assert(!DBGFIsStepping(pVCpu));
    8459             int rc2 = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
    8460             AssertRCReturn(rc2, rc2);
     7343            int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_RFLAGS);
     7344            AssertRCReturn(rc, rc);
    84617345            if (pMixedCtx->eflags.Bits.u1TF)
    8462                 hmR0VmxSetPendingDebugXcptVmcs(pVCpu);
     7346            {
     7347                int rc2 = hmR0VmxSetPendingDebugXcptVmcs(pVCpu, pMixedCtx);
     7348                AssertRCReturn(rc2, rc2);
     7349            }
    84637350        }
    84647351        else if (pMixedCtx->eflags.Bits.u1TF)
     
    84697356             */
    84707357            Assert(!(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG));
    8471             uIntrState = 0;
     7358            fIntrState = 0;
    84727359        }
    84737360    }
     
    84777364     * VT-x clears the valid bit on every VM-exit. See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
    84787365     */
    8479     int rc2 = hmR0VmxLoadGuestIntrState(pVCpu, uIntrState);
    8480     AssertRC(rc2);
     7366    int rc3 = hmR0VmxExportGuestIntrState(pVCpu, fIntrState);
     7367    AssertRCReturn(rc3, rc3);
    84817368
    84827369    Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping));
     
    85147401 *                          is injected directly (register modified by us, not
    85157402 *                          by hardware on VM-entry).
    8516  * @param   puIntrState     Pointer to the current guest interruptibility-state.
     7403 * @param   pfIntrState     Pointer to the current guest interruptibility-state.
    85177404 *                          This interruptibility-state will be updated if
    85187405 *                          necessary. This cannot not be NULL.
    85197406 */
    8520 DECLINLINE(VBOXSTRICTRC) hmR0VmxInjectXcptDF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fStepping, uint32_t *puIntrState)
    8521 {
     7407DECLINLINE(VBOXSTRICTRC) hmR0VmxInjectXcptDF(PVMCPU pVCpu, PCCPUMCTX pMixedCtx, bool fStepping, uint32_t *pfIntrState)
     7408{
     7409    NOREF(pMixedCtx);
    85227410    uint32_t u32IntInfo  = X86_XCPT_DF | VMX_EXIT_INTERRUPTION_INFO_VALID;
    85237411    u32IntInfo          |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
    85247412    u32IntInfo          |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
    8525     return hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */,
    8526                                   fStepping, puIntrState);
     7413    return hmR0VmxInjectEventVmcs(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */, fStepping,
     7414                                  pfIntrState);
    85277415}
    85287416
     
    85807468 *                              directly (register modified by us, not by
    85817469 *                              hardware on VM-entry).
    8582  * @param   puIntrState         Pointer to the current guest interruptibility-state.
     7470 * @param   pfIntrState         Pointer to the current guest interruptibility-state.
    85837471 *                              This interruptibility-state will be updated if
    85847472 *                              necessary. This cannot not be NULL.
    85857473 */
    8586 DECLINLINE(VBOXSTRICTRC) hmR0VmxInjectXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fErrorCodeValid, uint32_t u32ErrorCode,
    8587                                              bool fStepping, uint32_t *puIntrState)
    8588 {
     7474DECLINLINE(VBOXSTRICTRC) hmR0VmxInjectXcptGP(PVMCPU pVCpu, PCCPUMCTX pMixedCtx, bool fErrorCodeValid, uint32_t u32ErrorCode,
     7475                                             bool fStepping, uint32_t *pfIntrState)
     7476{
     7477    NOREF(pMixedCtx);
    85897478    uint32_t u32IntInfo  = X86_XCPT_GP | VMX_EXIT_INTERRUPTION_INFO_VALID;
    85907479    u32IntInfo          |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
    85917480    if (fErrorCodeValid)
    85927481        u32IntInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
    8593     return hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, u32IntInfo, 0 /* cbInstr */, u32ErrorCode, 0 /* GCPtrFaultAddress */,
    8594                                   fStepping, puIntrState);
     7482    return hmR0VmxInjectEventVmcs(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrorCode, 0 /* GCPtrFaultAddress */, fStepping,
     7483                                  pfIntrState);
    85957484}
    85967485
     
    86777566 *
    86787567 * @param   pVCpu               The cross context virtual CPU structure.
    8679  * @param   pMixedCtx           Pointer to the guest-CPU context. The data may
    8680  *                              be out-of-sync. Make sure to update the required
    8681  *                              fields before using them.
    86827568 * @param   u64IntInfo          The VM-entry interruption-information field.
    86837569 * @param   cbInstr             The VM-entry instruction length in bytes (for
     
    86867572 * @param   u32ErrCode          The VM-entry exception error code.
    86877573 * @param   GCPtrFaultAddress   The page-fault address for \#PF exceptions.
    8688  * @param   puIntrState         Pointer to the current guest interruptibility-state.
     7574 * @param   pfIntrState         Pointer to the current guest interruptibility-state.
    86897575 *                              This interruptibility-state will be updated if
    86907576 *                              necessary. This cannot not be NULL.
     
    86977583 * @remarks Requires CR0!
    86987584 */
    8699 static VBOXSTRICTRC hmR0VmxInjectEventVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint64_t u64IntInfo, uint32_t cbInstr,
    8700                                            uint32_t u32ErrCode, RTGCUINTREG GCPtrFaultAddress, bool fStepping,
    8701                                            uint32_t *puIntrState)
     7585static VBOXSTRICTRC hmR0VmxInjectEventVmcs(PVMCPU pVCpu, uint64_t u64IntInfo, uint32_t cbInstr, uint32_t u32ErrCode,
     7586                                           RTGCUINTREG GCPtrFaultAddress, bool fStepping, uint32_t *pfIntrState)
    87027587{
    87037588    /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */
    87047589    AssertMsg(u64IntInfo >> 32 == 0, ("%#RX64\n", u64IntInfo));
    8705     Assert(puIntrState);
    8706     uint32_t u32IntInfo = (uint32_t)u64IntInfo;
    8707 
    8708     uint32_t const uVector  = VMX_EXIT_INTERRUPTION_INFO_VECTOR(u32IntInfo);
    8709     uint32_t const uIntType = VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntInfo);
     7590    Assert(pfIntrState);
     7591
     7592    PCPUMCTX       pMixedCtx  = &pVCpu->cpum.GstCtx;
     7593    uint32_t       u32IntInfo = (uint32_t)u64IntInfo;
     7594    uint32_t const uVector    = VMX_EXIT_INTERRUPTION_INFO_VECTOR(u32IntInfo);
     7595    uint32_t const uIntType   = VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntInfo);
    87107596
    87117597#ifdef VBOX_STRICT
     
    87377623    /* Cannot inject an NMI when block-by-MOV SS is in effect. */
    87387624    Assert(   uIntType != VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI
    8739            || !(*puIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS));
     7625           || !(*pfIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS));
    87407626
    87417627    STAM_COUNTER_INC(&pVCpu->hm.s.paStatInjectedIrqsR0[uVector & MASK_INJECT_IRQ_STAT]);
    87427628
    8743     /* We require CR0 to check if the guest is in real-mode. */
    8744     int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
    8745     AssertRCReturn(rc, rc);
    8746 
    8747     /*
    8748      * Hardware interrupts & exceptions cannot be delivered through the software interrupt redirection bitmap to the real
    8749      * mode task in virtual-8086 mode. We must jump to the interrupt handler in the (real-mode) guest.
    8750      * See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode" for interrupt & exception classes.
    8751      * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling.
    8752      */
    8753     if (CPUMIsGuestInRealModeEx(pMixedCtx))
    8754     {
    8755         PVM pVM = pVCpu->CTX_SUFF(pVM);
    8756         if (!pVM->hm.s.vmx.fUnrestrictedGuest)
    8757         {
     7629    if (pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUnrestrictedGuest)
     7630    {
     7631        /*
     7632         * For unrestricted execution enabled CPUs running real-mode guests, we must not set the deliver-error-code bit.
     7633         * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
     7634         */
     7635        u32IntInfo &= ~VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
     7636    }
     7637    else
     7638    {
     7639        /* We require CR0 to check if the guest is in real-mode. */
     7640        int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR0);
     7641        AssertRCReturn(rc, rc);
     7642
     7643        /*
     7644         * Hardware interrupts & exceptions cannot be delivered through the software interrupt
     7645         * redirection bitmap to the real mode task in virtual-8086 mode. We must jump to the
     7646         * interrupt handler in the (real-mode) guest.
     7647         *
     7648         * See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode".
     7649         * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling.
     7650         */
     7651        if (CPUMIsGuestInRealModeEx(pMixedCtx))
     7652        {
     7653            PVM pVM = pVCpu->CTX_SUFF(pVM);
    87587654            Assert(PDMVmmDevHeapIsEnabled(pVM));
    87597655            Assert(pVM->hm.s.vmx.pRealModeTSS);
    87607656
    8761             /* We require RIP, RSP, RFLAGS, CS, IDTR. Save the required ones from the VMCS. */
    8762             rc  = hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
    8763             rc |= hmR0VmxSaveGuestTableRegs(pVCpu, pMixedCtx);
    8764             rc |= hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
     7657            /* We require RIP, RSP, RFLAGS, CS, IDTR, import them. */
     7658            rc = hmR0VmxImportGuestState(pVCpu,   CPUMCTX_EXTRN_SREG_MASK
     7659                                                | CPUMCTX_EXTRN_TABLE_MASK
     7660                                                | CPUMCTX_EXTRN_RIP
     7661                                                | CPUMCTX_EXTRN_RSP
     7662                                                | CPUMCTX_EXTRN_RFLAGS);
    87657663            AssertRCReturn(rc, rc);
    8766             Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RIP));
    87677664
    87687665            /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */
     
    87767673                /* If we're injecting a #GP with no valid IDT entry, inject a double-fault. */
    87777674                if (uVector == X86_XCPT_GP)
    8778                     return hmR0VmxInjectXcptDF(pVCpu, pMixedCtx, fStepping, puIntrState);
     7675                    return hmR0VmxInjectXcptDF(pVCpu, pMixedCtx, fStepping, pfIntrState);
    87797676
    87807677                /* If we're injecting an interrupt/exception with no valid IDT entry, inject a general-protection fault. */
    87817678                /* No error codes for exceptions in real-mode. See Intel spec. 20.1.4 "Interrupt and Exception Handling" */
    87827679                return hmR0VmxInjectXcptGP(pVCpu, pMixedCtx, false /* fErrCodeValid */, 0 /* u32ErrCode */,
    8783                                            fStepping, puIntrState);
     7680                                           fStepping, pfIntrState);
    87847681            }
    87857682
     
    88037700            /* Construct the stack frame for the interrupt/exception handler. */
    88047701            VBOXSTRICTRC rcStrict;
    8805             rcStrict  = hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, pMixedCtx->eflags.u32);
     7702            rcStrict = hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, pMixedCtx->eflags.u32);
    88067703            if (rcStrict == VINF_SUCCESS)
    88077704                rcStrict = hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, pMixedCtx->cs.Sel);
     
    88237720                /* If any other guest-state bits are changed here, make sure to update
    88247721                   hmR0VmxPreRunGuestCommitted() when thread-context hooks are used. */
    8825                 HMCPU_CF_SET(pVCpu,   HM_CHANGED_GUEST_SEGMENT_REGS
    8826                                     | HM_CHANGED_GUEST_RIP
    8827                                     | HM_CHANGED_GUEST_RFLAGS
    8828                                     | HM_CHANGED_GUEST_RSP);
     7722                ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged,   HM_CHANGED_GUEST_CS
     7723                                                           | HM_CHANGED_GUEST_CR2
     7724                                                           | HM_CHANGED_GUEST_RIP
     7725                                                           | HM_CHANGED_GUEST_RFLAGS
     7726                                                           | HM_CHANGED_GUEST_RSP);
    88297727
    88307728                /* We're clearing interrupts, which means no block-by-STI interrupt-inhibition. */
    8831                 if (*puIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI)
     7729                if (*pfIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI)
    88327730                {
    88337731                    Assert(   uIntType != VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI
    88347732                           && uIntType != VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT);
    8835                     Log4(("Clearing inhibition due to STI.\n"));
    8836                     *puIntrState &= ~VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI;
     7733                    Log4Func(("Clearing inhibition due to STI\n"));
     7734                    *pfIntrState &= ~VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI;
    88377735                }
    88387736                Log4(("Injecting real-mode: u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x Eflags=%#x CS:EIP=%04x:%04x\n",
     
    88437741                pVCpu->hm.s.Event.fPending = false;
    88447742
    8845                 /* Make hmR0VmxPreRunGuest return if we're stepping since we've changed cs:rip. */
     7743                /* Make hmR0VmxPreRunGuest() return if we're stepping since we've changed cs:rip. */
    88467744                if (fStepping)
    88477745                    rcStrict = VINF_EM_DBG_STEPPED;
     
    88517749            return rcStrict;
    88527750        }
    8853 
    8854         /*
    8855          * For unrestricted execution enabled CPUs running real-mode guests, we must not set the deliver-error-code bit.
    8856          * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
    8857          */
    8858         u32IntInfo &= ~VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
    88597751    }
    88607752
     
    88657757
    88667758    /* Inject. */
    8867     rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntInfo);
     7759    int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntInfo);
    88687760    if (VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(u32IntInfo))
    88697761        rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, u32ErrCode);
    88707762    rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, cbInstr);
     7763    AssertRCReturn(rc, rc);
    88717764
    88727765    if (   VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT
     
    88747767        pMixedCtx->cr2 = GCPtrFaultAddress;
    88757768
    8876     Log4(("Injecting vcpu[%RU32] u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x pMixedCtx->uCR2=%#RX64\n", pVCpu->idCpu,
    8877           u32IntInfo, u32ErrCode, cbInstr, pMixedCtx->cr2));
    8878 
    8879     AssertRCReturn(rc, rc);
     7769    Log4(("Injecting u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x pMixedCtx->uCR2=%#RX64\n", u32IntInfo, u32ErrCode, cbInstr,
     7770          pMixedCtx->cr2));
     7771
    88807772    return VINF_SUCCESS;
    88817773}
     
    88957787static void hmR0VmxClearIntNmiWindowsVmcs(PVMCPU pVCpu)
    88967788{
    8897     Log4Func(("vcpu[%d]\n", pVCpu->idCpu));
    8898 
    88997789    if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT)
     7790    {
    89007791        hmR0VmxClearIntWindowExitVmcs(pVCpu);
     7792        Log4Func(("Cleared interrupt widow\n"));
     7793    }
    89017794
    89027795    if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT)
     7796    {
    89037797        hmR0VmxClearNmiWindowExitVmcs(pVCpu);
     7798        Log4Func(("Cleared interrupt widow\n"));
     7799    }
    89047800}
    89057801
     
    89227818
    89237819    LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
    8924     Assert(HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE));
     7820    Assert((pVCpu->hm.s.fCtxChanged &  (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE))
     7821                                    == (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE));
    89257822
    89267823#ifdef VBOX_STRICT
     
    89707867            VMCPU_ASSERT_EMT(pVCpu);
    89717868
    8972             PCPUMCTX pMixedCtx = CPUMQueryGuestCtxPtr(pVCpu);
    8973 
    89747869            /* No longjmps (logger flushes, locks) in this fragile context. */
    89757870            VMMRZCallRing3Disable(pVCpu);
     
    89817876            if (!pVCpu->hm.s.fLeaveDone)
    89827877            {
    8983                 /* Do -not- save guest-state here as we might already be in the middle of saving it (esp. bad if we are
    8984                    holding the PGM lock while saving the guest state (see hmR0VmxSaveGuestControlRegs()). */
    8985                 hmR0VmxLeave(pVCpu, pMixedCtx, false /* fSaveGuestState */);
     7878                /*
     7879                 * Do -not- import the guest-state here as we might already be in the middle of importing
     7880                 * it, esp. bad if we're holding the PGM lock, see comment in hmR0VmxImportGuestState().
     7881                 */
     7882                hmR0VmxLeave(pVCpu, false /* fImportState */);
    89867883                pVCpu->hm.s.fLeaveDone = true;
    89877884            }
     
    90117908            int rc = HMR0EnterCpu(pVCpu);
    90127909            AssertRC(rc);
    9013             Assert(HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE));
     7910            Assert((pVCpu->hm.s.fCtxChanged &  (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE))
     7911                                            == (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE));
    90147912
    90157913            /* Load the active VMCS as the current one. */
     
    90357933
    90367934/**
     7935 * Exports the host state into the VMCS host-state area.
     7936 * Sets up the VM-exit MSR-load area.
     7937 *
     7938 * The CPU state will be loaded from these fields on every successful VM-exit.
     7939 *
     7940 * @returns VBox status code.
     7941 * @param   pVCpu       The cross context virtual CPU structure.
     7942 *
     7943 * @remarks No-long-jump zone!!!
     7944 */
     7945static int hmR0VmxExportHostState(PVMCPU pVCpu)
     7946{
     7947    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     7948
     7949    int rc = VINF_SUCCESS;
     7950    if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_HOST_CONTEXT)
     7951    {
     7952        rc = hmR0VmxExportHostControlRegs();
     7953        AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
     7954
     7955        rc = hmR0VmxExportHostSegmentRegs(pVCpu);
     7956        AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
     7957
     7958        rc = hmR0VmxExportHostMsrs(pVCpu);
     7959        AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
     7960
     7961        pVCpu->hm.s.fCtxChanged &= ~HM_CHANGED_HOST_CONTEXT;
     7962    }
     7963    return rc;
     7964}
     7965
     7966
     7967/**
    90377968 * Saves the host state in the VMCS host-state.
    9038  * Sets up the VM-exit MSR-load area.
    9039  *
    9040  * The CPU state will be loaded from these fields on every successful VM-exit.
    90417969 *
    90427970 * @returns VBox status code.
     
    90467974 * @remarks No-long-jump zone!!!
    90477975 */
    9048 static int hmR0VmxSaveHostState(PVM pVM, PVMCPU pVCpu)
    9049 {
     7976VMMR0DECL(int) VMXR0ExportHostState(PVMCPU pVCpu)
     7977{
     7978    AssertPtr(pVCpu);
    90507979    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    90517980
    9052     int rc = VINF_SUCCESS;
    9053     if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_CONTEXT))
    9054     {
    9055         rc = hmR0VmxSaveHostControlRegs(pVM, pVCpu);
    9056         AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostControlRegisters failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
    9057 
    9058         rc = hmR0VmxSaveHostSegmentRegs(pVM, pVCpu);
    9059         AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostSegmentRegisters failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
    9060 
    9061         rc = hmR0VmxSaveHostMsrs(pVM, pVCpu);
    9062         AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostMsrs failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
    9063 
    9064         HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_HOST_CONTEXT);
    9065     }
    9066     return rc;
    9067 }
    9068 
    9069 
    9070 /**
    9071  * Saves the host state in the VMCS host-state.
    9072  *
    9073  * @returns VBox status code.
    9074  * @param   pVM         The cross context VM structure.
    9075  * @param   pVCpu       The cross context virtual CPU structure.
    9076  *
    9077  * @remarks No-long-jump zone!!!
    9078  */
    9079 VMMR0DECL(int) VMXR0SaveHostState(PVM pVM, PVMCPU pVCpu)
    9080 {
    9081     AssertPtr(pVM);
    9082     AssertPtr(pVCpu);
    9083 
    9084     LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
    9085 
    9086     /* Save the host state here while entering HM context. When thread-context hooks are used, we might get preempted
    9087        and have to resave the host state but most of the time we won't be, so do it here before we disable interrupts. */
    9088     Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    9089     return hmR0VmxSaveHostState(pVM, pVCpu);
    9090 }
    9091 
    9092 
    9093 /**
    9094  * Loads the guest state into the VMCS guest-state area.
     7981    /*
     7982     * Export the host state here while entering HM context.
     7983     * When thread-context hooks are used, we might get preempted and have to re-save the host
     7984     * state but most of the time we won't be, so do it here before we disable interrupts.
     7985     */
     7986    return hmR0VmxExportHostState(pVCpu);
     7987}
     7988
     7989
     7990/**
     7991 * Exports the guest state into the VMCS guest-state area.
    90957992 *
    90967993 * The will typically be done before VM-entry when the guest-CPU state and the
     
    91158012 * @remarks No-long-jump zone!!!
    91168013 */
    9117 static VBOXSTRICTRC hmR0VmxLoadGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
     8014static VBOXSTRICTRC hmR0VmxExportGuestState(PVM pVM, PVMCPU pVCpu, PCCPUMCTX pMixedCtx)
    91188015{
    91198016    AssertPtr(pVM);
     
    91248021    LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
    91258022
    9126     STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestState, x);
     8023    STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExportGuestState, x);
    91278024
    91288025    /* Determine real-on-v86 mode. */
     
    91358032
    91368033    /*
    9137      * Load the guest-state into the VMCS.
    91388034     * Any ordering dependency among the sub-functions below must be explicitly stated using comments.
    91398035     * Ideally, assert that the cross-dependent bits are up-to-date at the point of using it.
    91408036     */
    9141     int rc = hmR0VmxSetupVMRunHandler(pVCpu, pMixedCtx);
    9142     AssertLogRelMsgRCReturn(rc, ("hmR0VmxSetupVMRunHandler! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
    9143 
    9144     /* This needs to be done after hmR0VmxSetupVMRunHandler() as changing pfnStartVM may require VM-entry control updates. */
    9145     rc = hmR0VmxLoadGuestEntryCtls(pVCpu, pMixedCtx);
    9146     AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestEntryCtls! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
    9147 
    9148     /* This needs to be done after hmR0VmxSetupVMRunHandler() as changing pfnStartVM may require VM-exit control updates. */
    9149     rc = hmR0VmxLoadGuestExitCtls(pVCpu, pMixedCtx);
    9150     AssertLogRelMsgRCReturn(rc, ("hmR0VmxSetupExitCtls failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
    9151 
    9152     rc = hmR0VmxLoadGuestActivityState(pVCpu, pMixedCtx);
    9153     AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestActivityState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
    9154 
    9155     VBOXSTRICTRC rcStrict = hmR0VmxLoadGuestCR3AndCR4(pVCpu, pMixedCtx);
     8037    int rc = hmR0VmxSelectVMRunHandler(pVCpu, pMixedCtx);
     8038    AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
     8039
     8040    /* This needs to be done after hmR0VmxSelectVMRunHandler() as changing pfnStartVM may require VM-entry control updates. */
     8041    rc = hmR0VmxExportGuestEntryCtls(pVCpu, pMixedCtx);
     8042    AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
     8043
     8044    /* This needs to be done after hmR0VmxSelectVMRunHandler() as changing pfnStartVM may require VM-exit control updates. */
     8045    rc = hmR0VmxExportGuestExitCtls(pVCpu, pMixedCtx);
     8046    AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
     8047
     8048    rc = hmR0VmxExportGuestCR0(pVCpu, pMixedCtx);
     8049    AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
     8050
     8051    VBOXSTRICTRC rcStrict = hmR0VmxExportGuestCR3AndCR4(pVCpu, pMixedCtx);
    91568052    if (rcStrict == VINF_SUCCESS)
    91578053    { /* likely */ }
     
    91628058    }
    91638059
    9164     /* Assumes pMixedCtx->cr0 is up-to-date (strict builds require CR0 for segment register validation checks). */
    9165     rc = hmR0VmxLoadGuestSegmentRegs(pVCpu, pMixedCtx);
    9166     AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestSegmentRegs: rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
    9167 
    9168     /* This needs to be done after hmR0VmxLoadGuestEntryCtls() and hmR0VmxLoadGuestExitCtls() as it may alter controls if we
    9169        determine we don't have to swap EFER after all. */
    9170     rc = hmR0VmxLoadGuestMsrs(pVCpu, pMixedCtx);
    9171     AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestMsrs! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
    9172 
    9173     rc = hmR0VmxLoadGuestApicState(pVCpu, pMixedCtx);
    9174     AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestApicState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
    9175 
    9176     rc = hmR0VmxLoadGuestXcptIntercepts(pVCpu, pMixedCtx);
    9177     AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestXcptIntercepts! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
    9178 
    9179     /*
    9180      * Loading Rflags here is fine, even though Rflags.TF might depend on guest debug state (which is not loaded here).
    9181      * It is re-evaluated and updated if necessary in hmR0VmxLoadSharedState().
    9182      */
    9183     rc = hmR0VmxLoadGuestRipRspRflags(pVCpu, pMixedCtx);
    9184     AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestRipRspRflags! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
    9185 
    9186     /* Clear any unused and reserved bits. */
    9187     HMCPU_CF_CLEAR(pVCpu,   HM_CHANGED_GUEST_CR2
    9188                           | HM_CHANGED_GUEST_HWVIRT);
    9189 
    9190     STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestState, x);
     8060    rc = hmR0VmxExportGuestSegmentRegs(pVCpu, pMixedCtx);
     8061    AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
     8062
     8063    /* This needs to be done after hmR0VmxExportGuestEntryCtls() and hmR0VmxExportGuestExitCtls() as it
     8064       may alter controls if we determine we don't have to swap EFER after all. */
     8065    rc = hmR0VmxExportGuestMsrs(pVCpu, pMixedCtx);
     8066    AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
     8067
     8068    rc = hmR0VmxExportGuestApicTpr(pVCpu);
     8069    AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
     8070
     8071    /* This needs to be done after hmR0VmxExportGuestCR0() as it may alter intercepted exceptions. */
     8072    rc = hmR0VmxExportGuestXcptIntercepts(pVCpu);
     8073    AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
     8074
     8075    /* Exporting RFLAGS here is fine, even though RFLAGS.TF might depend on guest debug state which is
     8076       not exported here. It is re-evaluated and updated if necessary in hmR0VmxExportSharedState(). */
     8077    rc  = hmR0VmxExportGuestRip(pVCpu, pMixedCtx);
     8078    rc |= hmR0VmxExportGuestRsp(pVCpu, pMixedCtx);
     8079    rc |= hmR0VmxExportGuestRflags(pVCpu, pMixedCtx);
     8080    AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
     8081
     8082    /* Clear any bits that may be set but exported unconditionally or unused/reserved bits. */
     8083    ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~(  (HM_CHANGED_GUEST_GPRS_MASK & ~HM_CHANGED_GUEST_RSP)
     8084                                                  |  HM_CHANGED_GUEST_CR2
     8085                                                  | (HM_CHANGED_GUEST_DR_MASK & ~HM_CHANGED_GUEST_DR7)
     8086                                                  |  HM_CHANGED_GUEST_X87
     8087                                                  |  HM_CHANGED_GUEST_SSE_AVX
     8088                                                  |  HM_CHANGED_GUEST_OTHER_XSAVE
     8089                                                  |  HM_CHANGED_GUEST_XCRx
     8090                                                  |  HM_CHANGED_GUEST_KERNEL_GS_BASE /* Part of lazy or auto load-store MSRs. */
     8091                                                  |  HM_CHANGED_GUEST_SYSCALL_MSRS   /* Part of lazy or auto load-store MSRs. */
     8092                                                  |  HM_CHANGED_GUEST_TSC_AUX
     8093                                                  |  HM_CHANGED_GUEST_OTHER_MSRS
     8094                                                  |  HM_CHANGED_GUEST_HWVIRT
     8095                                                  | (HM_CHANGED_KEEPER_STATE_MASK & ~HM_CHANGED_VMX_MASK)));
     8096
     8097    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExportGuestState, x);
    91918098    return rc;
    91928099}
     
    91948101
    91958102/**
    9196  * Loads the state shared between the host and guest into the VMCS.
     8103 * Exports the state shared between the host and guest into the VMCS.
    91978104 *
    91988105 * @param   pVM         The cross context VM structure.
     
    92028109 * @remarks No-long-jump zone!!!
    92038110 */
    9204 static void hmR0VmxLoadSharedState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
     8111static void hmR0VmxExportSharedState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
    92058112{
    92068113    NOREF(pVM);
     
    92098116    Assert(!VMMRZCallRing3IsEnabled(pVCpu));
    92108117
    9211     if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0))
    9212     {
    9213         int rc = hmR0VmxLoadSharedCR0(pVCpu, pCtx);
     8118    if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_DR_MASK)
     8119    {
     8120        int rc = hmR0VmxExportSharedDebugState(pVCpu, pCtx);
    92148121        AssertRC(rc);
    9215     }
    9216 
    9217     if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_DEBUG))
    9218     {
    9219         int rc = hmR0VmxLoadSharedDebugState(pVCpu, pCtx);
    9220         AssertRC(rc);
     8122        pVCpu->hm.s.fCtxChanged &= ~HM_CHANGED_GUEST_DR_MASK;
    92218123
    92228124        /* Loading shared debug bits might have changed eflags.TF bit for debugging purposes. */
    9223         if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RFLAGS))
    9224         {
    9225             rc = hmR0VmxLoadGuestRflags(pVCpu, pCtx);
     8125        if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_RFLAGS)
     8126        {
     8127            rc = hmR0VmxExportGuestRflags(pVCpu, pCtx);
    92268128            AssertRC(rc);
    92278129        }
    92288130    }
    92298131
    9230     if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMM_GUEST_LAZY_MSRS))
     8132    if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_VMX_GUEST_LAZY_MSRS)
    92318133    {
    92328134        hmR0VmxLazyLoadGuestMsrs(pVCpu, pCtx);
    9233         HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMM_GUEST_LAZY_MSRS);
    9234     }
    9235 
    9236     /* Loading CR0, debug state might have changed intercepts, update VMCS. */
    9237     if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMM_GUEST_XCPT_INTERCEPTS))
    9238     {
    9239         Assert(pVCpu->hm.s.vmx.u32XcptBitmap & RT_BIT_32(X86_XCPT_AC));
    9240         Assert(pVCpu->hm.s.vmx.u32XcptBitmap & RT_BIT_32(X86_XCPT_DB));
    9241         int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);
    9242         AssertRC(rc);
    9243         HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMM_GUEST_XCPT_INTERCEPTS);
    9244     }
    9245 
    9246     AssertMsg(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_GUEST_SHARED_STATE),
    9247               ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));
     8135        pVCpu->hm.s.fCtxChanged &= ~HM_CHANGED_VMX_GUEST_LAZY_MSRS;
     8136    }
     8137
     8138    AssertMsg(!(pVCpu->hm.s.fCtxChanged & HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE),
     8139              ("fCtxChanged=%#RX64\n", pVCpu->hm.s.fCtxChanged));
    92488140}
    92498141
     
    92658157 * @remarks No-long-jump zone!!!
    92668158 */
    9267 static VBOXSTRICTRC hmR0VmxLoadGuestStateOptimal(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
     8159static VBOXSTRICTRC hmR0VmxExportGuestStateOptimal(PVM pVM, PVMCPU pVCpu, PCCPUMCTX pMixedCtx)
    92688160{
    92698161    HMVMX_ASSERT_PREEMPT_SAFE();
     
    92718163    Assert(VMMR0IsLogFlushDisabled(pVCpu));
    92728164
    9273     Log5(("LoadFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));
    92748165#ifdef HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE
    9275     HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
     8166    pVCpu->hm.s.fCtxChanged |= HM_CHANGED_ALL_GUEST;
    92768167#endif
    92778168
    92788169    /*
    9279      * RIP is what changes the most often and hence if it's the only bit needing to be
    9280      * updated, we shall handle it early for performance reasons.
     8170     * For many exits it's only RIP that changes and hence try to export it first
     8171     * without going through a lot of change flag checks.
    92818172     */
    9282     VBOXSTRICTRC rcStrict = VINF_SUCCESS;
    9283     if (HMCPU_CF_IS_SET_ONLY(pVCpu, HM_CHANGED_GUEST_RIP))
    9284     {
    9285         rcStrict = hmR0VmxLoadGuestRip(pVCpu, pMixedCtx);
     8173    VBOXSTRICTRC rcStrict;
     8174    uint64_t     fCtxChanged = ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged);
     8175    if ((fCtxChanged & (HM_CHANGED_ALL_GUEST & ~HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE)) == HM_CHANGED_GUEST_RIP)
     8176    {
     8177        rcStrict = hmR0VmxExportGuestRip(pVCpu, pMixedCtx);
    92868178        if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    92878179        { /* likely */}
    92888180        else
    9289         {
    9290             AssertMsgFailedReturn(("hmR0VmxLoadGuestStateOptimal: hmR0VmxLoadGuestRip failed! rc=%Rrc\n",
    9291                                    VBOXSTRICTRC_VAL(rcStrict)), rcStrict);
    9292         }
    9293         STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadMinimal);
    9294     }
    9295     else if (HMCPU_CF_VALUE(pVCpu))
    9296     {
    9297         rcStrict = hmR0VmxLoadGuestState(pVM, pVCpu, pMixedCtx);
     8181            AssertMsgFailedReturn(("hmR0VmxExportGuestRip failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)), rcStrict);
     8182        STAM_COUNTER_INC(&pVCpu->hm.s.StatExportMinimal);
     8183    }
     8184    else if (fCtxChanged & (HM_CHANGED_ALL_GUEST & ~HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE))
     8185    {
     8186        rcStrict = hmR0VmxExportGuestState(pVM, pVCpu, pMixedCtx);
    92988187        if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    92998188        { /* likely */}
    93008189        else
    93018190        {
    9302             AssertMsg(rcStrict == VINF_EM_RESCHEDULE_REM,
    9303                       ("hmR0VmxLoadGuestStateOptimal: hmR0VmxLoadGuestState failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
     8191            AssertMsg(rcStrict == VINF_EM_RESCHEDULE_REM, ("hmR0VmxExportGuestState failed! rc=%Rrc\n",
     8192                                                          VBOXSTRICTRC_VAL(rcStrict)));
    93048193            Assert(!VMMRZCallRing3IsEnabled(pVCpu));
    93058194            return rcStrict;
    93068195        }
    9307         STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadFull);
    9308     }
    9309 
     8196        STAM_COUNTER_INC(&pVCpu->hm.s.StatExportFull);
     8197    }
     8198    else
     8199        rcStrict = VINF_SUCCESS;
     8200
     8201#ifdef VBOX_STRICT
    93108202    /* All the guest state bits should be loaded except maybe the host context and/or the shared host/guest bits. */
    9311     AssertMsg(   !HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_ALL_GUEST)
    9312               ||  HMCPU_CF_IS_PENDING_ONLY(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE),
    9313               ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));
     8203    fCtxChanged = ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged);
     8204    AssertMsg(!(fCtxChanged & (HM_CHANGED_ALL_GUEST & ~HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE)),
     8205              ("fCtxChanged=%#RX64\n", fCtxChanged));
     8206#endif
    93148207    return rcStrict;
    93158208}
     
    93708263     * in both PGMHandlerPhysicalReset() and IOMMMIOMapMMIOHCPage(), see @bugref{8721}.
    93718264     *
    9372      * This is the reason we do it here and not in hmR0VmxLoadGuestState().
     8265     * This is the reason we do it here and not in hmR0VmxExportGuestState().
    93738266     */
    93748267    if (   !pVCpu->hm.s.vmx.u64MsrApicBase
     
    93878280
    93888281        /* Map the HC APIC-access page in place of the MMIO page, also updates the shadow page tables if necessary. */
    9389         Log4(("hmR0VmxPreRunGuest: VCPU%u: Mapped HC APIC-access page at %#RGp\n", pVCpu->idCpu, GCPhysApicBase));
     8282        Log4Func(("Mapped HC APIC-access page at %#RGp\n", GCPhysApicBase));
    93908283        rc = IOMMMIOMapMMIOHCPage(pVM, pVCpu, GCPhysApicBase, pVM->hm.s.vmx.HCPhysApicAccess, X86_PTE_RW | X86_PTE_P);
    93918284        AssertRCReturn(rc, rc);
     
    93978290    if (TRPMHasTrap(pVCpu))
    93988291        hmR0VmxTrpmTrapToPendingEvent(pVCpu);
    9399     uint32_t uIntrState = hmR0VmxEvaluatePendingEvent(pVCpu, pMixedCtx);
     8292    uint32_t fIntrState = hmR0VmxEvaluatePendingEvent(pVCpu, pMixedCtx);
    94008293
    94018294    /*
     
    94038296     * longjmps or interrupts + preemption enabled. Event injection might also result in triple-faulting the VM.
    94048297     */
    9405     rcStrict = hmR0VmxInjectPendingEvent(pVCpu, pMixedCtx, uIntrState, fStepping);
     8298    rcStrict = hmR0VmxInjectPendingEvent(pVCpu, pMixedCtx, fIntrState, fStepping);
    94068299    if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    94078300    { /* likely */ }
     
    94218314
    94228315    /*
    9423      * Load the guest state bits.
     8316     * Export the guest state bits.
    94248317     *
    94258318     * We cannot perform longjmps while loading the guest state because we do not preserve the
     
    94318324     * Hence, loading of the guest state needs to be done -after- injection of events.
    94328325     */
    9433     rcStrict = hmR0VmxLoadGuestStateOptimal(pVM, pVCpu, pMixedCtx);
     8326    rcStrict = hmR0VmxExportGuestStateOptimal(pVM, pVCpu, pMixedCtx);
    94348327    if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    94358328    { /* likely */ }
     
    95208413        STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestFpuState, x);
    95218414        if (CPUMR0LoadGuestFPU(pVM, pVCpu) == VINF_CPUM_HOST_CR0_MODIFIED)
    9522             HMCPU_CF_SET(pVCpu, HM_CHANGED_HOST_CONTEXT);
     8415            pVCpu->hm.s.fCtxChanged |= HM_CHANGED_HOST_CONTEXT;
    95238416        STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestFpuState, x);
    95248417        STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadGuestFpu);
    9525         Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR0));
    9526         HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
    95278418    }
    95288419
     
    95328423    if (   !pVCpu->hm.s.vmx.fUpdatedHostMsrs
    95338424        && pVCpu->hm.s.vmx.cMsrs > 0)
    9534     {
    95358425        hmR0VmxUpdateAutoLoadStoreHostMsrs(pVCpu);
    9536     }
    95378426
    95388427    /*
    9539      * Load the host state bits as we may've been preempted (only happens when
     8428     * Re-save the host state bits as we may've been preempted (only happens when
    95408429     * thread-context hooks are used or when hmR0VmxSetupVMRunHandler() changes pfnStartVM).
    95418430     * Note that the 64-on-32 switcher saves the (64-bit) host state into the VMCS and
     
    95438432     * See @bugref{8432}.
    95448433     */
    9545     if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_CONTEXT))
    9546     {
    9547         int rc = hmR0VmxSaveHostState(pVM, pVCpu);
     8434    if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_HOST_CONTEXT)
     8435    {
     8436        int rc = hmR0VmxExportHostState(pVCpu);
    95488437        AssertRC(rc);
    9549         STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchPreemptSaveHostState);
    9550     }
    9551     Assert(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_CONTEXT));
     8438        STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchPreemptExportHostState);
     8439    }
     8440    Assert(!(pVCpu->hm.s.fCtxChanged & HM_CHANGED_HOST_CONTEXT));
    95528441
    95538442    /*
    9554      * Load the state shared between host and guest (FPU, debug, lazy MSRs).
     8443     * Export the state shared between host and guest (FPU, debug, lazy MSRs).
    95558444     */
    9556     if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_GUEST_SHARED_STATE))
    9557         hmR0VmxLoadSharedState(pVM, pVCpu, pMixedCtx);
    9558     AssertMsg(!HMCPU_CF_VALUE(pVCpu), ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));
     8445    if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE)
     8446        hmR0VmxExportSharedState(pVM, pVCpu, pMixedCtx);
     8447    AssertMsg(!pVCpu->hm.s.fCtxChanged, ("fCtxChanged=%#RX64\n", pVCpu->hm.s.fCtxChanged));
    95598448
    95608449    /* Store status of the shared guest-host state at the time of VM-entry. */
     
    96058494        {
    96068495            bool fMsrUpdated;
    9607             int rc2 = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
    9608             AssertRC(rc2);
    9609             Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS));
    9610 
    9611             rc2 = hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_TSC_AUX, CPUMGetGuestTscAux(pVCpu), true /* fUpdateHostMsr */,
     8496            hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_TSC_AUX);
     8497            int rc2 = hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_TSC_AUX, CPUMGetGuestTscAux(pVCpu), true /* fUpdateHostMsr */,
    96128498                                             &fMsrUpdated);
    96138499            AssertRC(rc2);
    96148500            Assert(fMsrUpdated || pVCpu->hm.s.vmx.fUpdatedHostMsrs);
    9615 
    96168501            /* Finally, mark that all host MSR values are updated so we don't redo it without leaving VT-x. See @bugref{6956}. */
    96178502            pVCpu->hm.s.vmx.fUpdatedHostMsrs = true;
     
    96278512    {
    96288513        bool fMsrUpdated;
    9629         int rc2 = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
    9630         AssertRC(rc2);
    9631         Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS));
    9632 
    9633         rc2 = hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_IA32_SPEC_CTRL, CPUMGetGuestSpecCtrl(pVCpu), true /* fUpdateHostMsr */,
    9634                                          &fMsrUpdated);
     8514        hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_OTHER_MSRS);
     8515        int rc2 = hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_IA32_SPEC_CTRL, CPUMGetGuestSpecCtrl(pVCpu), true /* fUpdateHostMsr */,
     8516                                             &fMsrUpdated);
    96358517        AssertRC(rc2);
    96368518        Assert(fMsrUpdated || pVCpu->hm.s.vmx.fUpdatedHostMsrs);
     
    96588540 * @param   pVM             The cross context VM structure.
    96598541 * @param   pVCpu           The cross context virtual CPU structure.
    9660  * @param   pMixedCtx       Pointer to the guest-CPU context. The data maybe
    9661  *                          out-of-sync. Make sure to update the required fields
    9662  *                          before using them.
    96638542 * @param   pVmxTransient   Pointer to the VMX transient structure.
    96648543 * @param   rcVMRun         Return code of VMLAUNCH/VMRESUME.
     
    96698548 *          unconditionally when it is safe to do so.
    96708549 */
    9671 static void hmR0VmxPostRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, int rcVMRun)
    9672 {
    9673     NOREF(pVM);
    9674     uint64_t uHostTsc = ASMReadTSC();
    9675 
     8550static void hmR0VmxPostRunGuest(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, int rcVMRun)
     8551{
     8552    uint64_t const uHostTsc = ASMReadTSC();
    96768553    Assert(!VMMRZCallRing3IsEnabled(pVCpu));
    96778554
    96788555    ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false);   /* See HMInvalidatePageOnAllVCpus(): used for TLB flushing. */
    96798556    ASMAtomicIncU32(&pVCpu->hm.s.cWorldSwitchExits);            /* Initialized in vmR3CreateUVM(): used for EMT poking. */
    9680     HMVMXCPU_GST_RESET_TO(pVCpu, 0);                            /* Exits/longjmps to ring-3 requires saving the guest state. */
     8557    pVCpu->hm.s.fCtxChanged            = 0;                     /* Exits/longjmps to ring-3 requires saving the guest state. */
    96818558    pVmxTransient->fVmcsFieldsRead     = 0;                     /* Transient fields need to be read from the VMCS. */
    96828559    pVmxTransient->fVectoringPF        = false;                 /* Vectoring page-fault needs to be determined later. */
     
    97358612        if (!pVmxTransient->fVMEntryFailed)
    97368613        {
    9737             /** @todo We can optimize this by only syncing with our force-flags when
    9738              *        really needed and keeping the VMCS state as it is for most
    9739              *        VM-exits. */
    9740             /* Update the guest interruptibility-state from the VMCS. */
    9741             hmR0VmxSaveGuestIntrState(pVCpu, pMixedCtx);
     8614            VMMRZCallRing3Enable(pVCpu);
    97428615
    97438616            /*
    9744              * Allow longjmps to ring-3 -after- saving the guest-interruptibility state
    9745              * as it's not part of hmR0VmxSaveGuestState() and thus would trigger an assertion
    9746              * on the longjmp path to ring-3 while saving the (rest of) the guest state,
    9747              * see @bugref{6208#c63}.
     8617             * Import the guest-interruptibility state always as we need it while evaluating
     8618             * injecting events on re-entry.
     8619             *
     8620             * We don't import CR0 (when Unrestricted guest execution is unavailable) despite
     8621             * checking for real-mode while exporting the state because all bits that cause
     8622             * mode changes wrt CR0 are intercepted.
    97488623             */
    9749             VMMRZCallRing3Enable(pVCpu);
     8624            rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_HM_VMX_INT_STATE);
     8625            AssertRC(rc);
    97508626
    97518627#if defined(HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE) || defined(HMVMX_ALWAYS_SAVE_FULL_GUEST_STATE)
    9752             rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
     8628            rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
    97538629            AssertRC(rc);
    97548630#elif defined(HMVMX_ALWAYS_SAVE_GUEST_RFLAGS)
    9755             rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
     8631            rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_RFLAGS);
    97568632            AssertRC(rc);
    97578633#endif
     
    97658641                rc = APICSetTpr(pVCpu, pVCpu->hm.s.vmx.pbVirtApic[XAPIC_OFF_TPR]);
    97668642                AssertRC(rc);
    9767                 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_APIC_STATE);
     8643                ASMAtomicOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
    97688644            }
    97698645
     
    97738649    else
    97748650    {
    9775         Log4(("VM-entry failure: pVCpu=%p idCpu=%RU32 rcVMRun=%Rrc fVMEntryFailed=%RTbool\n", pVCpu, pVCpu->idCpu, rcVMRun,
    9776           pVmxTransient->fVMEntryFailed));
     8651        Log4Func(("VM-entry failure: rcVMRun=%Rrc fVMEntryFailed=%RTbool\n", rcVMRun, pVmxTransient->fVMEntryFailed));
    97778652    }
    97788653
     
    98168691        /* Restore any residual host-state and save any bits shared between host
    98178692           and guest into the guest-CPU state.  Re-enables interrupts! */
    9818         hmR0VmxPostRunGuest(pVM, pVCpu, pCtx, &VmxTransient, rcRun);
     8693        hmR0VmxPostRunGuest(pVCpu, &VmxTransient, rcRun);
    98198694
    98208695        /* Check for errors with running the VM (VMLAUNCH/VMRESUME). */
     
    99318806 * @param   pDbgState       The structure to initialize.
    99328807 */
    9933 DECLINLINE(void) hmR0VmxRunDebugStateInit(PVMCPU pVCpu, PCCPUMCTX pCtx, PVMXRUNDBGSTATE pDbgState)
     8808static void hmR0VmxRunDebugStateInit(PVMCPU pVCpu, PCCPUMCTX pCtx, PVMXRUNDBGSTATE pDbgState)
    99348809{
    99358810    pDbgState->uRipStart            = pCtx->rip;
     
    99628837 * @param   pDbgState   The debug state.
    99638838 */
    9964 DECLINLINE(void) hmR0VmxPreRunGuestDebugStateApply(PVMCPU pVCpu, PVMXRUNDBGSTATE pDbgState)
     8839static void hmR0VmxPreRunGuestDebugStateApply(PVMCPU pVCpu, PVMXRUNDBGSTATE pDbgState)
    99658840{
    99668841    /*
     
    100138888
    100148889
    10015 DECLINLINE(VBOXSTRICTRC) hmR0VmxRunDebugStateRevert(PVMCPU pVCpu, PVMXRUNDBGSTATE pDbgState, VBOXSTRICTRC rcStrict)
     8890static VBOXSTRICTRC hmR0VmxRunDebugStateRevert(PVMCPU pVCpu, PVMXRUNDBGSTATE pDbgState, VBOXSTRICTRC rcStrict)
    100168891{
    100178892    /*
     
    100558930 * the necessary VM-exits demanded by DBGF and DTrace.
    100568931 *
    10057  * @param   pVM             The cross context VM structure.
    100588932 * @param   pVCpu           The cross context virtual CPU structure.
    10059  * @param   pCtx            Pointer to the guest-CPU context.
    100608933 * @param   pDbgState       The debug state.
    100618934 * @param   pVmxTransient   Pointer to the VMX transient structure.  May update
    100628935 *                          fUpdateTscOffsettingAndPreemptTimer.
    100638936 */
    10064 static void hmR0VmxPreRunGuestDebugStateUpdate(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx,
    10065                                                PVMXRUNDBGSTATE pDbgState, PVMXTRANSIENT pVmxTransient)
     8937static void hmR0VmxPreRunGuestDebugStateUpdate(PVMCPU pVCpu, PVMXRUNDBGSTATE pDbgState, PVMXTRANSIENT pVmxTransient)
    100668938{
    100678939    /*
     
    100858957     * Software interrupts (INT XXh) - no idea how to trigger these...
    100868958     */
     8959    PVM pVM = pVCpu->CTX_SUFF(pVM);
    100878960    if (   DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_INTERRUPT_SOFTWARE)
    100888961        || VBOXVMM_INT_SOFTWARE_ENABLED())
     
    101299002     * Process events and probes for VM-exits, making sure we get the wanted VM-exits.
    101309003     *
    10131      * Note! This is the reverse of waft hmR0VmxHandleExitDtraceEvents does.
     9004     * Note! This is the reverse of what hmR0VmxHandleExitDtraceEvents does.
    101329005     *       So, when adding/changing/removing please don't forget to update it.
    101339006     *
     
    102119084        || IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
    102129085    {
    10213         int rc2 = hmR0VmxSaveGuestCR0(pVCpu, pCtx);
    10214         rc2    |= hmR0VmxSaveGuestCR4(pVCpu, pCtx);
    10215         rc2    |= hmR0VmxSaveGuestApicState(pVCpu, pCtx);
    10216         AssertRC(rc2);
     9086        int rc = hmR0VmxImportGuestState(pVCpu,   CPUMCTX_EXTRN_CR0
     9087                                               | CPUMCTX_EXTRN_CR4
     9088                                               | CPUMCTX_EXTRN_APIC_TPR);
     9089        AssertRC(rc);
    102179090
    102189091#if 0 /** @todo fix me */
     
    102349107        {
    102359108            pDbgState->fClearCr0Mask = false;
    10236             HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
     9109            ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR0);
    102379110        }
    102389111        if (pDbgState->fClearCr4Mask)
    102399112        {
    102409113            pDbgState->fClearCr4Mask = false;
    10241             HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR4);
     9114            ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR4);
    102429115        }
    102439116    }
     
    104799352        case VMX_EXIT_MOV_CRX:
    104809353            hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
    10481 /** @todo r=bird: I feel these macros aren't very descriptive and needs to be at least 30 chars longer! ;-)
    10482 * Sensible abbreviations strongly recommended here because even with 130 columns this stuff get too wide! */
    10483             if (   VMX_EXIT_QUALIFICATION_CRX_ACCESS(pVmxTransient->uExitQualification)
    10484                 == VMX_EXIT_QUALIFICATION_CRX_ACCESS_READ)
     9354            if (VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQualification) == VMX_EXIT_QUAL_CRX_ACCESS_READ)
    104859355                SET_BOTH(CRX_READ);
    104869356            else
    104879357                SET_BOTH(CRX_WRITE);
    10488             uEventArg = VMX_EXIT_QUALIFICATION_CRX_REGISTER(pVmxTransient->uExitQualification);
     9358            uEventArg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQualification);
    104899359            break;
    104909360        case VMX_EXIT_MOV_DRX:
    104919361            hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
    10492             if (   VMX_EXIT_QUALIFICATION_DRX_DIRECTION(pVmxTransient->uExitQualification)
    10493                 == VMX_EXIT_QUALIFICATION_DRX_DIRECTION_READ)
     9362            if (   VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQualification)
     9363                == VMX_EXIT_QUAL_DRX_DIRECTION_READ)
    104949364                SET_BOTH(DRX_READ);
    104959365            else
    104969366                SET_BOTH(DRX_WRITE);
    10497             uEventArg = VMX_EXIT_QUALIFICATION_DRX_REGISTER(pVmxTransient->uExitQualification);
     9367            uEventArg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQualification);
    104989368            break;
    104999369        case VMX_EXIT_RDMSR:            SET_BOTH(RDMSR); break;
     
    105719441    {
    105729442        hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
    10573         hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
     9443        hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
    105749444        switch (enmEvent1)
    105759445        {
     
    107599629    {
    107609630        hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
    10761         hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
     9631        int rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
     9632        AssertRC(rc);
    107629633        VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, pMixedCtx, pVmxTransient->uExitReason, pVmxTransient->uExitQualification);
    107639634    }
     
    108419712            case VMX_EXIT_XRSTORS:
    108429713            {
    10843                 int rc2 = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
    10844                 rc2    |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
    10845                 AssertRCReturn(rc2, rc2);
     9714                int rc = hmR0VmxImportGuestState(pVCpu,   CPUMCTX_EXTRN_RIP
     9715                                                        | CPUMCTX_EXTRN_CS);
     9716                AssertRCReturn(rc, rc);
    108469717                if (   pMixedCtx->rip    != pDbgState->uRipStart
    108479718                    || pMixedCtx->cs.Sel != pDbgState->uCsStart)
     
    109059776
    109069777    /* Set HMCPU indicators.  */
    10907     bool const fSavedSingleInstruction  = pVCpu->hm.s.fSingleInstruction;
    10908     pVCpu->hm.s.fSingleInstruction      = pVCpu->hm.s.fSingleInstruction || DBGFIsStepping(pVCpu);
    10909     pVCpu->hm.s.fDebugWantRdTscExit     = false;
    10910     pVCpu->hm.s.fUsingDebugLoop         = true;
     9778    bool const fSavedSingleInstruction = pVCpu->hm.s.fSingleInstruction;
     9779    pVCpu->hm.s.fSingleInstruction     = pVCpu->hm.s.fSingleInstruction || DBGFIsStepping(pVCpu);
     9780    pVCpu->hm.s.fDebugWantRdTscExit    = false;
     9781    pVCpu->hm.s.fUsingDebugLoop        = true;
    109119782
    109129783    /* State we keep to help modify and later restore the VMCS fields we alter, and for detecting steps.  */
    109139784    VMXRUNDBGSTATE DbgState;
    109149785    hmR0VmxRunDebugStateInit(pVCpu, pCtx, &DbgState);
    10915     hmR0VmxPreRunGuestDebugStateUpdate(pVM, pVCpu, pCtx, &DbgState, &VmxTransient);
     9786    hmR0VmxPreRunGuestDebugStateUpdate(pVCpu, &DbgState, &VmxTransient);
    109169787
    109179788    /*
     
    109499820         * and guest into the guest-CPU state.  Re-enables interrupts!
    109509821         */
    10951         hmR0VmxPostRunGuest(pVM, pVCpu, pCtx, &VmxTransient, rcRun);
     9822        hmR0VmxPostRunGuest(pVCpu, &VmxTransient, rcRun);
    109529823
    109539824        /* Check for errors with running the VM (VMLAUNCH/VMRESUME). */
     
    109909861        if (fStepping)
    109919862        {
    10992             int rc2 = hmR0VmxSaveGuestRip(pVCpu, pCtx);
    10993             rc2    |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pCtx);
    10994             AssertRCReturn(rc2, rc2);
     9863            int rc = hmR0VmxImportGuestState(pVCpu,   CPUMCTX_EXTRN_RIP
     9864                                                    | CPUMCTX_EXTRN_CS);
     9865            AssertRC(rc);
    109959866            if (   pCtx->rip    != DbgState.uRipStart
    109969867                || pCtx->cs.Sel != DbgState.uCsStart)
     
    109999870                break;
    110009871            }
    11001             HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);
     9872            ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_DR7);
    110029873        }
    110039874
     
    110069877         */
    110079878        if (VBOXVMM_GET_SETTINGS_SEQ_NO() != DbgState.uDtraceSettingsSeqNo)
    11008             hmR0VmxPreRunGuestDebugStateUpdate(pVM, pVCpu, pCtx, &DbgState, &VmxTransient);
     9879            hmR0VmxPreRunGuestDebugStateUpdate(pVCpu, &DbgState, &VmxTransient);
    110099880    }
    110109881
     
    110149885    if (pVCpu->hm.s.fClearTrapFlag)
    110159886    {
    11016         int rc2 = hmR0VmxSaveGuestRflags(pVCpu, pCtx);
    11017         AssertRCReturn(rc2, rc2);
     9887        int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_RFLAGS);
     9888        AssertRC(rc);
    110189889        pVCpu->hm.s.fClearTrapFlag = false;
    110199890        pCtx->eflags.Bits.u1TF = 0;
     
    1119010061{
    1119110062    Assert(VMMRZCallRing3IsEnabled(pVCpu));
    11192     Assert(HMVMXCPU_GST_VALUE(pVCpu) == HMVMX_UPDATED_GUEST_ALL);
     10063    Assert(!ASMAtomicUoReadU64(&pCtx->fExtrn));
    1119310064    HMVMX_ASSERT_PREEMPT_SAFE();
    1119410065
     
    1122310094DECLINLINE(VBOXSTRICTRC) hmR0VmxHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, uint32_t rcReason)
    1122410095{
    11225 # ifdef DEBUG_ramshankar
    11226 #  define VMEXIT_CALL_RET(a_CallExpr) \
     10096#ifdef DEBUG_ramshankar
     10097#define VMEXIT_CALL_RET(a_fSave, a_CallExpr) \
    1122710098       do { \
    11228             int rc2 = hmR0VmxSaveGuestState(pVCpu, pMixedCtx); AssertRC(rc2); \
     10099            if (a_fSave != 0) \
     10100                hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL); \
    1122910101            VBOXSTRICTRC rcStrict = a_CallExpr; \
    11230             HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST); \
     10102            if (a_fSave != 0) \
     10103                ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST); \
    1123110104            return rcStrict; \
    1123210105        } while (0)
    11233 # else
    11234 #  define VMEXIT_CALL_RET(a_CallExpr) return a_CallExpr
    11235 # endif
     10106#else
     10107# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) return a_CallExpr
     10108#endif
    1123610109    switch (rcReason)
    1123710110    {
    11238         case VMX_EXIT_EPT_MISCONFIG:           VMEXIT_CALL_RET(hmR0VmxExitEptMisconfig(pVCpu, pMixedCtx, pVmxTransient));
    11239         case VMX_EXIT_EPT_VIOLATION:           VMEXIT_CALL_RET(hmR0VmxExitEptViolation(pVCpu, pMixedCtx, pVmxTransient));
    11240         case VMX_EXIT_IO_INSTR:                VMEXIT_CALL_RET(hmR0VmxExitIoInstr(pVCpu, pMixedCtx, pVmxTransient));
    11241         case VMX_EXIT_CPUID:                   VMEXIT_CALL_RET(hmR0VmxExitCpuid(pVCpu, pMixedCtx, pVmxTransient));
    11242         case VMX_EXIT_RDTSC:                   VMEXIT_CALL_RET(hmR0VmxExitRdtsc(pVCpu, pMixedCtx, pVmxTransient));
    11243         case VMX_EXIT_RDTSCP:                  VMEXIT_CALL_RET(hmR0VmxExitRdtscp(pVCpu, pMixedCtx, pVmxTransient));
    11244         case VMX_EXIT_APIC_ACCESS:             VMEXIT_CALL_RET(hmR0VmxExitApicAccess(pVCpu, pMixedCtx, pVmxTransient));
    11245         case VMX_EXIT_XCPT_OR_NMI:             VMEXIT_CALL_RET(hmR0VmxExitXcptOrNmi(pVCpu, pMixedCtx, pVmxTransient));
    11246         case VMX_EXIT_MOV_CRX:                 VMEXIT_CALL_RET(hmR0VmxExitMovCRx(pVCpu, pMixedCtx, pVmxTransient));
    11247         case VMX_EXIT_EXT_INT:                 VMEXIT_CALL_RET(hmR0VmxExitExtInt(pVCpu, pMixedCtx, pVmxTransient));
    11248         case VMX_EXIT_INT_WINDOW:              VMEXIT_CALL_RET(hmR0VmxExitIntWindow(pVCpu, pMixedCtx, pVmxTransient));
    11249         case VMX_EXIT_MWAIT:                   VMEXIT_CALL_RET(hmR0VmxExitMwait(pVCpu, pMixedCtx, pVmxTransient));
    11250         case VMX_EXIT_MONITOR:                 VMEXIT_CALL_RET(hmR0VmxExitMonitor(pVCpu, pMixedCtx, pVmxTransient));
    11251         case VMX_EXIT_TASK_SWITCH:             VMEXIT_CALL_RET(hmR0VmxExitTaskSwitch(pVCpu, pMixedCtx, pVmxTransient));
    11252         case VMX_EXIT_PREEMPT_TIMER:           VMEXIT_CALL_RET(hmR0VmxExitPreemptTimer(pVCpu, pMixedCtx, pVmxTransient));
    11253         case VMX_EXIT_RDMSR:                   VMEXIT_CALL_RET(hmR0VmxExitRdmsr(pVCpu, pMixedCtx, pVmxTransient));
    11254         case VMX_EXIT_WRMSR:                   VMEXIT_CALL_RET(hmR0VmxExitWrmsr(pVCpu, pMixedCtx, pVmxTransient));
    11255         case VMX_EXIT_MOV_DRX:                 VMEXIT_CALL_RET(hmR0VmxExitMovDRx(pVCpu, pMixedCtx, pVmxTransient));
    11256         case VMX_EXIT_TPR_BELOW_THRESHOLD:     VMEXIT_CALL_RET(hmR0VmxExitTprBelowThreshold(pVCpu, pMixedCtx, pVmxTransient));
    11257         case VMX_EXIT_HLT:                     VMEXIT_CALL_RET(hmR0VmxExitHlt(pVCpu, pMixedCtx, pVmxTransient));
    11258         case VMX_EXIT_INVD:                    VMEXIT_CALL_RET(hmR0VmxExitInvd(pVCpu, pMixedCtx, pVmxTransient));
    11259         case VMX_EXIT_INVLPG:                  VMEXIT_CALL_RET(hmR0VmxExitInvlpg(pVCpu, pMixedCtx, pVmxTransient));
    11260         case VMX_EXIT_RSM:                     VMEXIT_CALL_RET(hmR0VmxExitRsm(pVCpu, pMixedCtx, pVmxTransient));
    11261         case VMX_EXIT_MTF:                     VMEXIT_CALL_RET(hmR0VmxExitMtf(pVCpu, pMixedCtx, pVmxTransient));
    11262         case VMX_EXIT_PAUSE:                   VMEXIT_CALL_RET(hmR0VmxExitPause(pVCpu, pMixedCtx, pVmxTransient));
    11263         case VMX_EXIT_XDTR_ACCESS:             VMEXIT_CALL_RET(hmR0VmxExitXdtrAccess(pVCpu, pMixedCtx, pVmxTransient));
    11264         case VMX_EXIT_TR_ACCESS:               VMEXIT_CALL_RET(hmR0VmxExitXdtrAccess(pVCpu, pMixedCtx, pVmxTransient));
    11265         case VMX_EXIT_WBINVD:                  VMEXIT_CALL_RET(hmR0VmxExitWbinvd(pVCpu, pMixedCtx, pVmxTransient));
    11266         case VMX_EXIT_XSETBV:                  VMEXIT_CALL_RET(hmR0VmxExitXsetbv(pVCpu, pMixedCtx, pVmxTransient));
    11267         case VMX_EXIT_RDRAND:                  VMEXIT_CALL_RET(hmR0VmxExitRdrand(pVCpu, pMixedCtx, pVmxTransient));
    11268         case VMX_EXIT_INVPCID:                 VMEXIT_CALL_RET(hmR0VmxExitInvpcid(pVCpu, pMixedCtx, pVmxTransient));
    11269         case VMX_EXIT_GETSEC:                  VMEXIT_CALL_RET(hmR0VmxExitGetsec(pVCpu, pMixedCtx, pVmxTransient));
    11270         case VMX_EXIT_RDPMC:                   VMEXIT_CALL_RET(hmR0VmxExitRdpmc(pVCpu, pMixedCtx, pVmxTransient));
    11271         case VMX_EXIT_VMCALL:                  VMEXIT_CALL_RET(hmR0VmxExitVmcall(pVCpu, pMixedCtx, pVmxTransient));
     10111        case VMX_EXIT_EPT_MISCONFIG:           VMEXIT_CALL_RET(0, hmR0VmxExitEptMisconfig(pVCpu, pMixedCtx, pVmxTransient));
     10112        case VMX_EXIT_EPT_VIOLATION:           VMEXIT_CALL_RET(0, hmR0VmxExitEptViolation(pVCpu, pMixedCtx, pVmxTransient));
     10113        case VMX_EXIT_IO_INSTR:                VMEXIT_CALL_RET(0, hmR0VmxExitIoInstr(pVCpu, pMixedCtx, pVmxTransient));
     10114        case VMX_EXIT_CPUID:                   VMEXIT_CALL_RET(0, hmR0VmxExitCpuid(pVCpu, pMixedCtx, pVmxTransient));
     10115        case VMX_EXIT_RDTSC:                   VMEXIT_CALL_RET(0, hmR0VmxExitRdtsc(pVCpu, pMixedCtx, pVmxTransient));
     10116        case VMX_EXIT_RDTSCP:                  VMEXIT_CALL_RET(0, hmR0VmxExitRdtscp(pVCpu, pMixedCtx, pVmxTransient));
     10117        case VMX_EXIT_APIC_ACCESS:             VMEXIT_CALL_RET(0, hmR0VmxExitApicAccess(pVCpu, pMixedCtx, pVmxTransient));
     10118        case VMX_EXIT_XCPT_OR_NMI:             VMEXIT_CALL_RET(0, hmR0VmxExitXcptOrNmi(pVCpu, pMixedCtx, pVmxTransient));
     10119        case VMX_EXIT_MOV_CRX:                 VMEXIT_CALL_RET(0, hmR0VmxExitMovCRx(pVCpu, pMixedCtx, pVmxTransient));
     10120        case VMX_EXIT_EXT_INT:                 VMEXIT_CALL_RET(0, hmR0VmxExitExtInt(pVCpu, pMixedCtx, pVmxTransient));
     10121        case VMX_EXIT_INT_WINDOW:              VMEXIT_CALL_RET(0, hmR0VmxExitIntWindow(pVCpu, pMixedCtx, pVmxTransient));
     10122        case VMX_EXIT_TPR_BELOW_THRESHOLD:     VMEXIT_CALL_RET(0, hmR0VmxExitTprBelowThreshold(pVCpu, pMixedCtx, pVmxTransient));
     10123        case VMX_EXIT_MWAIT:                   VMEXIT_CALL_RET(0, hmR0VmxExitMwait(pVCpu, pMixedCtx, pVmxTransient));
     10124        case VMX_EXIT_MONITOR:                 VMEXIT_CALL_RET(0, hmR0VmxExitMonitor(pVCpu, pMixedCtx, pVmxTransient));
     10125        case VMX_EXIT_TASK_SWITCH:             VMEXIT_CALL_RET(0, hmR0VmxExitTaskSwitch(pVCpu, pMixedCtx, pVmxTransient));
     10126        case VMX_EXIT_PREEMPT_TIMER:           VMEXIT_CALL_RET(0, hmR0VmxExitPreemptTimer(pVCpu, pMixedCtx, pVmxTransient));
     10127        case VMX_EXIT_RDMSR:                   VMEXIT_CALL_RET(0, hmR0VmxExitRdmsr(pVCpu, pMixedCtx, pVmxTransient));
     10128        case VMX_EXIT_WRMSR:                   VMEXIT_CALL_RET(0, hmR0VmxExitWrmsr(pVCpu, pMixedCtx, pVmxTransient));
     10129        case VMX_EXIT_VMCALL:                  VMEXIT_CALL_RET(0, hmR0VmxExitVmcall(pVCpu, pMixedCtx, pVmxTransient));
     10130        case VMX_EXIT_MOV_DRX:                 VMEXIT_CALL_RET(0, hmR0VmxExitMovDRx(pVCpu, pMixedCtx, pVmxTransient));
     10131        case VMX_EXIT_HLT:                     VMEXIT_CALL_RET(0, hmR0VmxExitHlt(pVCpu, pMixedCtx, pVmxTransient));
     10132        case VMX_EXIT_INVD:                    VMEXIT_CALL_RET(0, hmR0VmxExitInvd(pVCpu, pMixedCtx, pVmxTransient));
     10133        case VMX_EXIT_INVLPG:                  VMEXIT_CALL_RET(0, hmR0VmxExitInvlpg(pVCpu, pMixedCtx, pVmxTransient));
     10134        case VMX_EXIT_RSM:                     VMEXIT_CALL_RET(0, hmR0VmxExitRsm(pVCpu, pMixedCtx, pVmxTransient));
     10135        case VMX_EXIT_MTF:                     VMEXIT_CALL_RET(0, hmR0VmxExitMtf(pVCpu, pMixedCtx, pVmxTransient));
     10136        case VMX_EXIT_PAUSE:                   VMEXIT_CALL_RET(0, hmR0VmxExitPause(pVCpu, pMixedCtx, pVmxTransient));
     10137        case VMX_EXIT_XDTR_ACCESS:             VMEXIT_CALL_RET(0, hmR0VmxExitXdtrAccess(pVCpu, pMixedCtx, pVmxTransient));
     10138        case VMX_EXIT_TR_ACCESS:               VMEXIT_CALL_RET(0, hmR0VmxExitXdtrAccess(pVCpu, pMixedCtx, pVmxTransient));
     10139        case VMX_EXIT_WBINVD:                  VMEXIT_CALL_RET(0, hmR0VmxExitWbinvd(pVCpu, pMixedCtx, pVmxTransient));
     10140        case VMX_EXIT_XSETBV:                  VMEXIT_CALL_RET(0, hmR0VmxExitXsetbv(pVCpu, pMixedCtx, pVmxTransient));
     10141        case VMX_EXIT_RDRAND:                  VMEXIT_CALL_RET(0, hmR0VmxExitRdrand(pVCpu, pMixedCtx, pVmxTransient));
     10142        case VMX_EXIT_INVPCID:                 VMEXIT_CALL_RET(0, hmR0VmxExitInvpcid(pVCpu, pMixedCtx, pVmxTransient));
     10143        case VMX_EXIT_GETSEC:                  VMEXIT_CALL_RET(0, hmR0VmxExitGetsec(pVCpu, pMixedCtx, pVmxTransient));
     10144        case VMX_EXIT_RDPMC:                   VMEXIT_CALL_RET(0, hmR0VmxExitRdpmc(pVCpu, pMixedCtx, pVmxTransient));
    1127210145
    1127310146        case VMX_EXIT_TRIPLE_FAULT:            return hmR0VmxExitTripleFault(pVCpu, pMixedCtx, pVmxTransient);
     
    1129610169        case VMX_EXIT_XRSTORS:
    1129710170            return hmR0VmxExitSetPendingXcptUD(pVCpu, pMixedCtx, pVmxTransient);
     10171
    1129810172        case VMX_EXIT_ENCLS:
    1129910173        case VMX_EXIT_RDSEED: /* only spurious VM-exits, so undefined */
     
    1136310237    /* Advance the RIP. */
    1136410238    pMixedCtx->rip += cbInstr;
    11365     HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP);
     10239    ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP);
    1136610240
    1136710241    /* Update interrupt inhibition. */
     
    1138710261{
    1138810262    int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    11389     rc    |= hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
    11390     rc    |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
     10263    rc    |= hmR0VmxImportGuestState(pVCpu,   CPUMCTX_EXTRN_RIP
     10264                                            | CPUMCTX_EXTRN_RFLAGS);
    1139110265    AssertRCReturn(rc, rc);
    1139210266
     
    1140110275    if (  !pVCpu->hm.s.fSingleInstruction
    1140210276        && pMixedCtx->eflags.Bits.u1TF)
    11403         hmR0VmxSetPendingDebugXcptVmcs(pVCpu);
     10277    {
     10278        rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu, pMixedCtx);
     10279        AssertRCReturn(rc, rc);
     10280    }
    1140410281
    1140510282    return VINF_SUCCESS;
     
    1144010317         * CR0.
    1144110318         */
    11442         uint32_t uSetCR0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);
    11443         uint32_t uZapCR0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);
     10319        uint32_t fSetCR0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);
     10320        uint32_t fZapCR0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);
    1144410321        /* Exceptions for unrestricted-guests for fixed CR0 bits (PE, PG).
    1144510322           See Intel spec. 26.3.1 "Checks on Guest Control Registers, Debug Registers and MSRs." */
    1144610323        if (fUnrestrictedGuest)
    11447             uSetCR0 &= ~(X86_CR0_PE | X86_CR0_PG);
    11448 
    11449         uint32_t u32GuestCR0;
    11450         rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &u32GuestCR0);
     10324            fSetCR0 &= ~(X86_CR0_PE | X86_CR0_PG);
     10325
     10326        uint32_t uGuestCR0;
     10327        rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &uGuestCR0);
    1145110328        AssertRCBreak(rc);
    11452         HMVMX_CHECK_BREAK((u32GuestCR0 & uSetCR0) == uSetCR0, VMX_IGS_CR0_FIXED1);
    11453         HMVMX_CHECK_BREAK(!(u32GuestCR0 & ~uZapCR0), VMX_IGS_CR0_FIXED0);
     10329        HMVMX_CHECK_BREAK((uGuestCR0 & fSetCR0) == fSetCR0, VMX_IGS_CR0_FIXED1);
     10330        HMVMX_CHECK_BREAK(!(uGuestCR0 & ~fZapCR0), VMX_IGS_CR0_FIXED0);
    1145410331        if (   !fUnrestrictedGuest
    11455             && (u32GuestCR0 & X86_CR0_PG)
    11456             && !(u32GuestCR0 & X86_CR0_PE))
     10332            &&  (uGuestCR0 & X86_CR0_PG)
     10333            && !(uGuestCR0 & X86_CR0_PE))
    1145710334        {
    1145810335            HMVMX_ERROR_BREAK(VMX_IGS_CR0_PG_PE_COMBO);
     
    1146210339         * CR4.
    1146310340         */
    11464         uint64_t uSetCR4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);
    11465         uint64_t uZapCR4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);
    11466 
    11467         uint32_t u32GuestCR4;
    11468         rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR4, &u32GuestCR4);
     10341        uint64_t fSetCR4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);
     10342        uint64_t fZapCR4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);
     10343
     10344        uint32_t uGuestCR4;
     10345        rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR4, &uGuestCR4);
    1146910346        AssertRCBreak(rc);
    11470         HMVMX_CHECK_BREAK((u32GuestCR4 & uSetCR4) == uSetCR4, VMX_IGS_CR4_FIXED1);
    11471         HMVMX_CHECK_BREAK(!(u32GuestCR4 & ~uZapCR4), VMX_IGS_CR4_FIXED0);
     10347        HMVMX_CHECK_BREAK((uGuestCR4 & fSetCR4) == fSetCR4, VMX_IGS_CR4_FIXED1);
     10348        HMVMX_CHECK_BREAK(!(uGuestCR4 & ~fZapCR4), VMX_IGS_CR4_FIXED0);
    1147210349
    1147310350        /*
     
    1152510402        if (   fLongModeGuest
    1152610403            || (   fUnrestrictedGuest
    11527                 && !(u32GuestCR0 & X86_CR0_PE)))
     10404                && !(uGuestCR0 & X86_CR0_PE)))
    1152810405        {
    1152910406            HMVMX_CHECK_BREAK(!(u32Eflags & X86_EFL_VM), VMX_IGS_RFLAGS_VM_INVALID);
     
    1154510422        if (fLongModeGuest)
    1154610423        {
    11547             HMVMX_CHECK_BREAK(u32GuestCR0 & X86_CR0_PG, VMX_IGS_CR0_PG_LONGMODE);
    11548             HMVMX_CHECK_BREAK(u32GuestCR4 & X86_CR4_PAE, VMX_IGS_CR4_PAE_LONGMODE);
     10424            HMVMX_CHECK_BREAK(uGuestCR0 & X86_CR0_PG, VMX_IGS_CR0_PG_LONGMODE);
     10425            HMVMX_CHECK_BREAK(uGuestCR4 & X86_CR4_PAE, VMX_IGS_CR4_PAE_LONGMODE);
    1154910426        }
    1155010427
    1155110428        if (   !fLongModeGuest
    11552             && (u32GuestCR4 & X86_CR4_PCIDE))
     10429            && (uGuestCR4 & X86_CR4_PCIDE))
    1155310430        {
    1155410431            HMVMX_ERROR_BREAK(VMX_IGS_CR4_PCIDE);
     
    1162210499                              VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH);
    1162310500            HMVMX_CHECK_BREAK(   fUnrestrictedGuest
    11624                               || !(u32GuestCR0 & X86_CR0_PG)
     10501                              || !(uGuestCR0 & X86_CR0_PG)
    1162510502                              || RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(u64Val & MSR_K6_EFER_LME),
    1162610503                              VMX_IGS_EFER_LMA_LME_MISMATCH);
     
    1204810925    {
    1204910926        /*
    12050          * This cannot be a guest NMI as the only way for the guest to receive an NMI is if we injected it ourselves and
    12051          * anything we inject is not going to cause a VM-exit directly for the event being injected.
    12052          * See Intel spec. 27.2.3 "Information for VM Exits During Event Delivery".
     10927         * This cannot be a guest NMI as the only way for the guest to receive an NMI is if we
     10928         * injected it ourselves and anything we inject is not going to cause a VM-exit directly
     10929         * for the event being injected[1]. Go ahead and dispatch the NMI to the host[2].
    1205310930         *
    12054          * Dispatch the NMI to the host. See Intel spec. 27.5.5 "Updating Non-Register State".
     10931         * [1] -- See Intel spec. 27.2.3 "Information for VM Exits During Event Delivery".
     10932         * [2] -- See Intel spec. 27.5.5 "Updating Non-Register State".
    1205510933         */
    1205610934        VMXDispatchHostNmi();
     
    1212311001                default:
    1212411002                {
    12125                     rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
    12126                     AssertRCReturn(rc, rc);
    12127 
    1212811003                    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXcpUnk);
    1212911004                    if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
     
    1213311008                        Assert(CPUMIsGuestInRealModeEx(pMixedCtx));
    1213411009
    12135                         rc  = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
     11010                        rc  = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR0);
     11011                        rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    1213611012                        rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
    1213711013                        AssertRCReturn(rc, rc);
     
    1213911015                                               pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode,
    1214011016                                               0 /* GCPtrFaultAddress */);
    12141                         AssertRCReturn(rc, rc);
    1214211017                    }
    1214311018                    else
     
    1220011075     * It is therefore safe to unblock STI and deliver the NMI ourselves. See @bugref{7445}.
    1220111076     */
    12202     uint32_t uIntrState = 0;
    12203     int rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &uIntrState);
     11077    uint32_t fIntrState = 0;
     11078    int rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &fIntrState);
    1220411079    AssertRCReturn(rc, rc);
    1220511080
    12206     bool const fBlockSti = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
     11081    bool const fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
    1220711082    if (   fBlockSti
    1220811083        && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
     
    1222511100{
    1222611101    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
    12227     STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWbinvd);
    1222811102    return hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
    1222911103}
     
    1223611110{
    1223711111    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
    12238     STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvd);
    1223911112    return hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
    1224011113}
     
    1224711120{
    1224811121    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
    12249     STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCpuid);
    1225011122    Assert(pMixedCtx == &pVCpu->cpum.GstCtx);
    1225111123
     
    1225411126     */
    1225511127    int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    12256     rc    |= hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
    12257     rc    |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
    12258     rc    |= hmR0VmxSaveGuestCs(pVCpu);
     11128    rc    |= hmR0VmxImportGuestState(pVCpu,   CPUMCTX_EXTRN_RIP
     11129                                            | CPUMCTX_EXTRN_CS);
    1225911130    AssertRCReturn(rc, rc);
    1226011131
     
    1228011151            rcStrict = VERR_EM_INTERPRETER;
    1228111152        }
    12282         STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCpuid);
    1228311153    }
    1228411154    else
     
    1228811158         */
    1228911159        Assert(pMixedCtx == &pVCpu->cpum.GstCtx);
    12290         int rc2 = hmR0VmxSaveGuestRegsForIemInterpreting(pVCpu);
     11160        int rc2 = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
    1229111161        AssertRCReturn(rc2, rc2);
    1229211162
     
    1229511165
    1229611166        rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
    12297         HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
     11167        ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
    1229811168
    1229911169        Log4(("CpuIdExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
     
    1231111181{
    1231211182    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
    12313     int rc  = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx);
     11183    int rc  = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR4);
    1231411184    AssertRCReturn(rc, rc);
    1231511185
     
    1232811198{
    1232911199    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
    12330     int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx);      /* Needed for CPL < 0 only, really. */
    12331     rc    |= hmR0VmxSaveGuestRegsForIemExec(pVCpu, pMixedCtx, false /*fMemory*/, false /*fNeedRsp*/);
     11200    int rc = hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
    1233211201    rc    |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    1233311202    AssertRCReturn(rc, rc);
     11203
    1233411204    VBOXSTRICTRC rcStrict = IEMExecDecodedRdtsc(pVCpu, pVmxTransient->cbInstr);
    1233511205    if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    1233611206    {
    12337         /* If we get a spurious VM-exit when offsetting is enabled, we must reset offsetting on VM-reentry. See @bugref{6634}. */
     11207        /* If we get a spurious VM-exit when offsetting is enabled,
     11208           we must reset offsetting on VM-reentry. See @bugref{6634}. */
    1233811209        if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING)
    1233911210            pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
     11211        ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged,   HM_CHANGED_GUEST_RIP
     11212                                                   | HM_CHANGED_GUEST_RFLAGS);
    1234011213    }
    1234111214    else if (rcStrict == VINF_IEM_RAISED_XCPT)
     11215    {
    1234211216        rcStrict = VINF_SUCCESS;
    12343     HMCPU_CF_SET(pVCpu, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS : HM_CHANGED_ALL_GUEST);
    12344     STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtsc);
     11217        ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_XCPT_RAISED_MASK);
     11218    }
    1234511219    return rcStrict;
    1234611220}
     
    1235311227{
    1235411228    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
    12355     int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx);      /* Needed for CPL < 0 only, really. */
    12356     rc    |= hmR0VmxSaveGuestRegsForIemExec(pVCpu, pMixedCtx, false /*fMemory*/, false /*fNeedRsp*/);
    12357     rc    |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);  /* For MSR_K8_TSC_AUX */
     11229    int rc = hmR0VmxImportGuestState(pVCpu,   IEM_CPUMCTX_EXTRN_MUST_MASK
     11230                                            | CPUMCTX_EXTRN_TSC_AUX);
    1235811231    rc    |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    1235911232    AssertRCReturn(rc, rc);
     11233
    1236011234    VBOXSTRICTRC rcStrict = IEMExecDecodedRdtscp(pVCpu, pVmxTransient->cbInstr);
    1236111235    if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    1236211236    {
    12363         /* If we get a spurious VM-exit when offsetting is enabled, we must reset offsetting on VM-reentry. See @bugref{6634}. */
     11237        /* If we get a spurious VM-exit when offsetting is enabled,
     11238           we must reset offsetting on VM-reentry. See @bugref{6634}. */
    1236411239        if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING)
    1236511240            pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
     11241        ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged,   HM_CHANGED_GUEST_RIP
     11242                                                   | HM_CHANGED_GUEST_RFLAGS);
    1236611243    }
    1236711244    else if (rcStrict == VINF_IEM_RAISED_XCPT)
     11245    {
    1236811246        rcStrict = VINF_SUCCESS;
    12369     HMCPU_CF_SET(pVCpu, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS : HM_CHANGED_ALL_GUEST);
    12370     STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtscp);
     11247        ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_XCPT_RAISED_MASK);
     11248    }
    1237111249    return rcStrict;
    1237211250}
     
    1237911257{
    1238011258    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
    12381     int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx);
    12382     rc    |= hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
     11259    int rc = hmR0VmxImportGuestState(pVCpu,   CPUMCTX_EXTRN_CR4
     11260                                            | CPUMCTX_EXTRN_CR0
     11261                                            | CPUMCTX_EXTRN_RFLAGS
     11262                                            | CPUMCTX_EXTRN_SS);
    1238311263    AssertRCReturn(rc, rc);
    1238411264
     
    1239511275        rc = VERR_EM_INTERPRETER;
    1239611276    }
    12397     STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdpmc);
    1239811277    return rc;
    1239911278}
     
    1240611285{
    1240711286    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
    12408     STAM_COUNTER_INC(&pVCpu->hm.s.StatExitVmcall);
    1240911287
    1241011288    VBOXSTRICTRC rcStrict = VERR_VMX_IPE_3;
    1241111289    if (EMAreHypercallInstructionsEnabled(pVCpu))
    1241211290    {
    12413 #if 0
    12414         int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
    12415 #else
    12416         /* Aggressive state sync. for now. */
    12417         int rc  = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
    12418         rc     |= hmR0VmxSaveGuestRflags(pVCpu,pMixedCtx);          /* For CPL checks in gimHvHypercall() & gimKvmHypercall() */
    12419         rc     |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);    /* For long-mode checks in gimKvmHypercall(). */
     11291        int rc = hmR0VmxImportGuestState(pVCpu,   CPUMCTX_EXTRN_RIP
     11292                                                | CPUMCTX_EXTRN_RFLAGS
     11293                                                | CPUMCTX_EXTRN_CR0
     11294                                                | CPUMCTX_EXTRN_SS
     11295                                                | CPUMCTX_EXTRN_CS
     11296                                                | CPUMCTX_EXTRN_EFER);
    1242011297        AssertRCReturn(rc, rc);
    12421 #endif
    1242211298
    1242311299        /* Perform the hypercall. */
     
    1243711313    }
    1243811314    else
    12439         Log4(("hmR0VmxExitVmcall: Hypercalls not enabled\n"));
     11315        Log4Func(("Hypercalls not enabled\n"));
    1244011316
    1244111317    /* If hypercalls are disabled or the hypercall failed for some reason, raise #UD and continue. */
     
    1246011336
    1246111337    int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
    12462     rc    |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
     11338    rc    |= hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_SREG_MASK);
    1246311339    AssertRCReturn(rc, rc);
    1246411340
     
    1246911345        AssertMsg(rcStrict == VERR_EM_INTERPRETER, ("hmR0VmxExitInvlpg: EMInterpretInvlpg %#RX64 failed with %Rrc\n",
    1247011346                                                    pVmxTransient->uExitQualification, VBOXSTRICTRC_VAL(rcStrict)));
    12471     STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvlpg);
    1247211347    return rcStrict;
    1247311348}
     
    1248011355{
    1248111356    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
    12482     int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
    12483     rc    |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
    12484     rc    |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
     11357    int rc = hmR0VmxImportGuestState(pVCpu,   CPUMCTX_EXTRN_CR0
     11358                                            | CPUMCTX_EXTRN_RFLAGS
     11359                                            | CPUMCTX_EXTRN_SS);
    1248511360    AssertRCReturn(rc, rc);
    1248611361
     
    1250511380{
    1250611381    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
    12507     int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
    12508     rc    |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
    12509     rc    |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
     11382    int rc = hmR0VmxImportGuestState(pVCpu,   CPUMCTX_EXTRN_CR0
     11383                                            | CPUMCTX_EXTRN_RFLAGS
     11384                                            | CPUMCTX_EXTRN_SS);
    1251011385    AssertRCReturn(rc, rc);
    1251111386
     
    1252111396        if (   rc == VINF_EM_HALT
    1252211397            && EMMonitorWaitShouldContinue(pVCpu, pMixedCtx))
    12523         {
    1252411398            rc = VINF_SUCCESS;
    12525         }
    1252611399    }
    1252711400    else
     
    1254311416{
    1254411417    /*
    12545      * Execution of RSM outside of SMM mode causes #UD regardless of VMX root or VMX non-root mode. In theory, we should never
    12546      * get this VM-exit. This can happen only if dual-monitor treatment of SMI and VMX is enabled, which can (only?) be done by
    12547      * executing VMCALL in VMX root operation. If we get here, something funny is going on.
    12548      * See Intel spec. "33.15.5 Enabling the Dual-Monitor Treatment".
     11418     * Execution of RSM outside of SMM mode causes #UD regardless of VMX root or VMX non-root
     11419     * mode. In theory, we should never get this VM-exit. This can happen only if dual-monitor
     11420     * treatment of SMI and VMX is enabled, which can (only?) be done by executing VMCALL in
     11421     * VMX root operation. If we get here, something funny is going on.
     11422     *
     11423     * See Intel spec. 33.15.5 "Enabling the Dual-Monitor Treatment".
    1254911424     */
    1255011425    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
     
    1256011435{
    1256111436    /*
    12562      * This can only happen if we support dual-monitor treatment of SMI, which can be activated by executing VMCALL in VMX
    12563      * root operation. Only an STM (SMM transfer monitor) would get this VM-exit when we (the executive monitor) execute a VMCALL
    12564      * in VMX root mode or receive an SMI. If we get here, something funny is going on.
    12565      * See Intel spec. "33.15.6 Activating the Dual-Monitor Treatment" and Intel spec. 25.3 "Other Causes of VM-Exits"
     11437     * This can only happen if we support dual-monitor treatment of SMI, which can be activated
     11438     * by executing VMCALL in VMX root operation. Only an STM (SMM transfer monitor) would get
     11439     * this VM-exit when we (the executive monitor) execute a VMCALL in VMX root mode or receive
     11440     * an SMI. If we get here, something funny is going on.
     11441     *
     11442     * See Intel spec. 33.15.6 "Activating the Dual-Monitor Treatment"
     11443     * See Intel spec. 25.3 "Other Causes of VM-Exits"
    1256611444     */
    1256711445    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
     
    1258911467{
    1259011468    /*
    12591      * SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest activity state is used. We currently
    12592      * don't make use of it (see hmR0VmxLoadGuestActivityState()) as our guests don't have direct access to the host LAPIC.
     11469     * SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest activity state is used.
     11470     * We don't make use of it as our guests don't have direct access to the host LAPIC.
    1259311471     * See Intel spec. 25.3 "Other Causes of VM-exits".
    1259411472     */
     
    1268911567
    1269011568    int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    12691     rc    |= hmR0VmxSaveGuestRegsForIemExec(pVCpu, pMixedCtx, false /*fMemory*/, false /*fNeedRsp*/);
    12692     rc    |= hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx);
     11569    rc    |= hmR0VmxImportGuestState(pVCpu,   IEM_CPUMCTX_EXTRN_MUST_MASK
     11570                                            | CPUMCTX_EXTRN_CR4);
    1269311571    AssertRCReturn(rc, rc);
    1269411572
    1269511573    VBOXSTRICTRC rcStrict = IEMExecDecodedXsetbv(pVCpu, pVmxTransient->cbInstr);
    12696     HMCPU_CF_SET(pVCpu, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS : HM_CHANGED_ALL_GUEST);
     11574    ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
     11575                                                                                : HM_CHANGED_XCPT_RAISED_MASK);
    1269711576
    1269811577    pVCpu->hm.s.fLoadSaveGuestXcr0 = (pMixedCtx->cr4 & X86_CR4_OSXSAVE) && pMixedCtx->aXcr[0] != ASMGetXcr0();
     
    1271911598HMVMX_EXIT_NSRC_DECL hmR0VmxExitErrInvalidGuestState(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
    1272011599{
    12721     int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
    12722     AssertRCReturn(rc, rc);
    12723 
    12724     rc = hmR0VmxCheckVmcsCtls(pVCpu);
     11600    int rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
     11601    rc    |= hmR0VmxCheckVmcsCtls(pVCpu);
    1272511602    AssertRCReturn(rc, rc);
    1272611603
     
    1272911606
    1273011607#ifdef VBOX_STRICT
    12731     uint32_t       uIntrState;
     11608    uint32_t       fIntrState;
    1273211609    RTHCUINTREG    uHCReg;
    1273311610    uint64_t       u64Val;
     
    1273711614    rc |= hmR0VmxReadEntryXcptErrorCodeVmcs(pVmxTransient);
    1273811615    rc |= hmR0VmxReadEntryInstrLenVmcs(pVmxTransient);
    12739     rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &uIntrState);
     11616    rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &fIntrState);
    1274011617    AssertRCReturn(rc, rc);
    1274111618
     
    1274411621    Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE    %#RX32\n", pVmxTransient->uEntryXcptErrorCode));
    1274511622    Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH         %#RX32\n", pVmxTransient->cbEntryInstr));
    12746     Log4(("VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE    %#RX32\n", uIntrState));
     11623    Log4(("VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE    %#RX32\n", fIntrState));
    1274711624
    1274811625    rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &u32Val);                        AssertRC(rc);
     
    1282911706
    1283011707    /* By default, we don't enable VMX_VMCS_CTRL_PROC_EXEC2_RDRAND_EXIT. */
    12831     STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdrand);
    1283211708    if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDRAND_EXIT)
    1283311709        return VERR_EM_INTERPRETER;
     
    1284511721
    1284611722    /* EMInterpretRdmsr() requires CR0, Eflags and SS segment register. */
    12847     int rc  = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
    12848     rc     |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
    12849     rc     |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
     11723    int rc = hmR0VmxImportGuestState(pVCpu,   CPUMCTX_EXTRN_CR0
     11724                                            | CPUMCTX_EXTRN_RFLAGS
     11725                                            | CPUMCTX_EXTRN_SS);
    1285011726    if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS))
    12851     {
    12852         rc |= hmR0VmxSaveGuestLazyMsrs(pVCpu, pMixedCtx);
    12853         rc |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
    12854     }
     11727        rc |= hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_ALL_MSRS);
    1285511728    AssertRCReturn(rc, rc);
    12856     Log4(("ecx=%#RX32\n", pMixedCtx->ecx));
     11729    Log4Func(("ecx=%#RX32\n", pMixedCtx->ecx));
    1285711730
    1285811731#ifdef VBOX_STRICT
     
    1290511778
    1290611779    /* EMInterpretWrmsr() requires CR0, EFLAGS and SS segment register. */
    12907     rc  = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
    12908     rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
    12909     rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
     11780    rc  = hmR0VmxImportGuestState(pVCpu,   CPUMCTX_EXTRN_CR0
     11781                                         | CPUMCTX_EXTRN_RFLAGS
     11782                                         | CPUMCTX_EXTRN_SS);
    1291011783    if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS))
    12911     {
    12912         rc |= hmR0VmxSaveGuestLazyMsrs(pVCpu, pMixedCtx);
    12913         rc |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
    12914     }
     11784        rc |= hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_ALL_MSRS);
    1291511785    AssertRCReturn(rc, rc);
    12916     Log4(("ecx=%#RX32 edx:eax=%#RX32:%#RX32\n", pMixedCtx->ecx, pMixedCtx->edx, pMixedCtx->eax));
     11786    Log4Func(("ecx=%#RX32 edx:eax=%#RX32:%#RX32\n", pMixedCtx->ecx, pMixedCtx->edx, pMixedCtx->eax));
    1291711787
    1291811788    rc = EMInterpretWrmsr(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
     
    1293411804             * EMInterpretWrmsr() changes it.
    1293511805             */
    12936             HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_APIC_STATE);
     11806            ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
    1293711807        }
    1293811808        else if (pMixedCtx->ecx == MSR_IA32_TSC)        /* Windows 7 does this during bootup. See @bugref{6398}. */
     
    1294511815             * the other bits as well, SCE and NXE. See @bugref{7368}.
    1294611816             */
    12947             HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_CTLS | HM_CHANGED_VMX_EXIT_CTLS);
     11817            ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged,   HM_CHANGED_GUEST_EFER_MSR
     11818                                                       | HM_CHANGED_VMX_ENTRY_CTLS
     11819                                                       | HM_CHANGED_VMX_EXIT_CTLS);
    1294811820        }
    1294911821
     
    1295311825            switch (pMixedCtx->ecx)
    1295411826            {
    12955                 /*
    12956                  * For SYSENTER CS, EIP, ESP MSRs, we set both the flags here so we don't accidentally
    12957                  * overwrite the changed guest-CPU context value while going to ring-3, see @bufref{8745}.
    12958                  */
    12959                 case MSR_IA32_SYSENTER_CS:
    12960                     HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR);
    12961                     HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR);
    12962                     break;
    12963                 case MSR_IA32_SYSENTER_EIP:
    12964                     HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR);
    12965                     HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR);
    12966                     break;
    12967                 case MSR_IA32_SYSENTER_ESP:
    12968                     HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR);
    12969                     HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR);
    12970                     break;
    12971                 case MSR_K8_FS_BASE:        RT_FALL_THRU();
    12972                 case MSR_K8_GS_BASE:        HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS);     break;
    12973                 case MSR_K6_EFER:           /* already handled above */                             break;
     11827                case MSR_IA32_SYSENTER_CS:  ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_SYSENTER_CS_MSR);   break;
     11828                case MSR_IA32_SYSENTER_EIP: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_SYSENTER_EIP_MSR);  break;
     11829                case MSR_IA32_SYSENTER_ESP: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_SYSENTER_ESP_MSR);  break;
     11830                case MSR_K8_FS_BASE:        ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_FS);                break;
     11831                case MSR_K8_GS_BASE:        ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_GS);                break;
     11832                case MSR_K6_EFER:           /* Nothing to do, already handled above. */ break;
    1297411833                default:
    1297511834                {
    1297611835                    if (hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, pMixedCtx->ecx))
    12977                         HMCPU_CF_SET(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
     11836                        ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
    1297811837                    else if (hmR0VmxIsLazyGuestMsr(pVCpu, pMixedCtx->ecx))
    12979                         HMCPU_CF_SET(pVCpu, HM_CHANGED_VMM_GUEST_LAZY_MSRS);
     11838                        ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_VMX_GUEST_LAZY_MSRS);
    1298011839                    break;
    1298111840                }
     
    1300311862                    if (hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, pMixedCtx->ecx))
    1300411863                    {
    13005                         /* EFER writes are always intercepted, see hmR0VmxLoadGuestMsrs(). */
     11864                        /* EFER writes are always intercepted, see hmR0VmxExportGuestMsrs(). */
    1300611865                        if (pMixedCtx->ecx != MSR_K6_EFER)
    1300711866                        {
     
    1304011899{
    1304111900    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
    13042 
    13043     STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPause);
     11901    /** @todo The guest has likely hit a contended spinlock. We might want to
     11902     *        poke a schedule different guest VCPU. */
    1304411903    return VINF_EM_RAW_INTERRUPT;
    1304511904}
     
    1307811937    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
    1307911938    STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitMovCRx, y2);
     11939
    1308011940    int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
    1308111941    rc    |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
     11942    rc    |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
    1308211943    AssertRCReturn(rc, rc);
    1308311944
     11945    VBOXSTRICTRC rcStrict;
     11946    PVM pVM                              = pVCpu->CTX_SUFF(pVM);
    1308411947    RTGCUINTPTR const uExitQualification = pVmxTransient->uExitQualification;
    13085     uint32_t const uAccessType           = VMX_EXIT_QUALIFICATION_CRX_ACCESS(uExitQualification);
    13086     PVM pVM                              = pVCpu->CTX_SUFF(pVM);
    13087     VBOXSTRICTRC rcStrict;
    13088     rc = hmR0VmxSaveGuestRegsForIemExec(pVCpu, pMixedCtx, false /*fMemory*/, true /*fNeedRsp*/);
     11948    uint32_t const uAccessType           = VMX_EXIT_QUAL_CRX_ACCESS(uExitQualification);
    1308911949    switch (uAccessType)
    1309011950    {
    13091         case VMX_EXIT_QUALIFICATION_CRX_ACCESS_WRITE:       /* MOV to CRx */
    13092         {
    13093             rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
    13094             AssertRCReturn(rc, rc);
    13095 
     11951        case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:       /* MOV to CRx */
     11952        {
    1309611953            rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, pVmxTransient->cbInstr,
    13097                                                  VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification),
    13098                                                  VMX_EXIT_QUALIFICATION_CRX_GENREG(uExitQualification));
    13099             AssertMsg(   rcStrict == VINF_SUCCESS || rcStrict == VINF_IEM_RAISED_XCPT || rcStrict == VINF_PGM_CHANGE_MODE
     11954                                                 VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification),
     11955                                                 VMX_EXIT_QUAL_CRX_GENREG(uExitQualification));
     11956            AssertMsg(   rcStrict == VINF_SUCCESS
     11957                      || rcStrict == VINF_IEM_RAISED_XCPT
     11958                      || rcStrict == VINF_PGM_CHANGE_MODE
    1310011959                      || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
    13101             switch (VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification))
     11960
     11961            switch (VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification))
    1310211962            {
    13103                 case 0: /* CR0 */
    13104                     HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
     11963                case 0:
     11964                {
     11965                    ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR0);
     11966                    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR0Write);
    1310511967                    Log4(("CRX CR0 write rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pMixedCtx->cr0));
    1310611968                    break;
    13107                 case 2: /* CR2 */
     11969                }
     11970
     11971                case 2:
     11972                {
     11973                    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR2Write);
    1310811974                    /* Nothing to do here, CR2 it's not part of the VMCS. */
    1310911975                    break;
    13110                 case 3: /* CR3 */
     11976                }
     11977
     11978                case 3:
     11979                {
    1311111980                    Assert(!pVM->hm.s.fNestedPaging || !CPUMIsGuestPagingEnabledEx(pMixedCtx) || pVCpu->hm.s.fUsingDebugLoop);
    13112                     HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR3);
     11981                    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR3Write);
     11982                    ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR3);
    1311311983                    Log4(("CRX CR3 write rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pMixedCtx->cr3));
    1311411984                    break;
    13115                 case 4: /* CR4 */
    13116                     HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR4);
    13117                     Log4(("CRX CR4 write rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n",
    13118                           VBOXSTRICTRC_VAL(rcStrict), pMixedCtx->cr4, pVCpu->hm.s.fLoadSaveGuestXcr0));
     11985                }
     11986
     11987                case 4:
     11988                {
     11989                    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR4Write);
     11990                    ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR4);
     11991                    Log4(("CRX CR4 write rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict),
     11992                          pMixedCtx->cr4, pVCpu->hm.s.fLoadSaveGuestXcr0));
    1311911993                    break;
    13120                 case 8: /* CR8 */
     11994                }
     11995
     11996                case 8:
     11997                {
     11998                    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR8Write);
    1312111999                    Assert(!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW));
    13122                     /* CR8 contains the APIC TPR. Was updated by IEMExecDecodedMovCRxWrite(). */
    13123                     HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_APIC_STATE);
     12000                    ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
    1312412001                    break;
     12002                }
    1312512003                default:
    13126                     AssertMsgFailed(("Invalid CRx register %#x\n", VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification)));
     12004                    AssertMsgFailed(("Invalid CRx register %#x\n", VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification)));
    1312712005                    break;
    1312812006            }
    13129 
    13130             STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxWrite[VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification)]);
    1313112007            break;
    1313212008        }
    1313312009
    13134         case VMX_EXIT_QUALIFICATION_CRX_ACCESS_READ:        /* MOV from CRx */
    13135         {
    13136             rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
    13137             AssertRCReturn(rc, rc);
    13138 
     12010        case VMX_EXIT_QUAL_CRX_ACCESS_READ:        /* MOV from CRx */
     12011        {
    1313912012            Assert(   !pVM->hm.s.fNestedPaging
    1314012013                   || !CPUMIsGuestPagingEnabledEx(pMixedCtx)
    1314112014                   || pVCpu->hm.s.fUsingDebugLoop
    13142                    || VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification) != 3);
    13143 
     12015                   || VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification) != 3);
    1314412016            /* CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
    13145             Assert(   VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification) != 8
     12017            Assert(   VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification) != 8
    1314612018                   || !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW));
    1314712019
    1314812020            rcStrict = IEMExecDecodedMovCRxRead(pVCpu, pVmxTransient->cbInstr,
    13149                                                 VMX_EXIT_QUALIFICATION_CRX_GENREG(uExitQualification),
    13150                                                 VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification));
    13151             AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
    13152             STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxRead[VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification)]);
    13153             Log4(("CRX CR%d Read access rcStrict=%Rrc\n", VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification),
     12021                                                VMX_EXIT_QUAL_CRX_GENREG(uExitQualification),
     12022                                                VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification));
     12023            AssertMsg(   rcStrict == VINF_SUCCESS
     12024                      || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
     12025#ifdef VBOX_WITH_STATISTICS
     12026            switch (VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification))
     12027            {
     12028                case 0: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR0Read); break;
     12029                case 2: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR2Read); break;
     12030                case 3: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR3Read); break;
     12031                case 4: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR4Read); break;
     12032                case 8: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR8Read); break;
     12033            }
     12034#endif
     12035            Log4(("CRX CR%d Read access rcStrict=%Rrc\n", VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification),
    1315412036                  VBOXSTRICTRC_VAL(rcStrict)));
    13155             if (VMX_EXIT_QUALIFICATION_CRX_GENREG(uExitQualification) == X86_GREG_xSP)
    13156                 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RSP);
     12037            if (VMX_EXIT_QUAL_CRX_GENREG(uExitQualification) == X86_GREG_xSP)
     12038                ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RSP);
    1315712039            break;
    1315812040        }
    1315912041
    13160         case VMX_EXIT_QUALIFICATION_CRX_ACCESS_CLTS:        /* CLTS (Clear Task-Switch Flag in CR0) */
    13161         {
    13162             AssertRCReturn(rc, rc);
     12042        case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:        /* CLTS (Clear Task-Switch Flag in CR0) */
     12043        {
    1316312044            rcStrict = IEMExecDecodedClts(pVCpu, pVmxTransient->cbInstr);
    13164             AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
    13165             HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
     12045            AssertMsg(   rcStrict == VINF_SUCCESS
     12046                      || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
     12047
     12048            ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR0);
    1316612049            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitClts);
    1316712050            Log4(("CRX CLTS rcStrict=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
     
    1316912052        }
    1317012053
    13171         case VMX_EXIT_QUALIFICATION_CRX_ACCESS_LMSW:        /* LMSW (Load Machine-Status Word into CR0) */
    13172         {
    13173             AssertRCReturn(rc, rc);
     12054        case VMX_EXIT_QUAL_CRX_ACCESS_LMSW:        /* LMSW (Load Machine-Status Word into CR0) */
     12055        {
    1317412056            rcStrict = IEMExecDecodedLmsw(pVCpu, pVmxTransient->cbInstr,
    13175                                           VMX_EXIT_QUALIFICATION_CRX_LMSW_DATA(uExitQualification));
    13176             AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_IEM_RAISED_XCPT || rcStrict == VINF_PGM_CHANGE_MODE,
     12057                                          VMX_EXIT_QUAL_CRX_LMSW_DATA(uExitQualification));
     12058            AssertMsg(   rcStrict == VINF_SUCCESS
     12059                      || rcStrict == VINF_IEM_RAISED_XCPT
     12060                      || rcStrict == VINF_PGM_CHANGE_MODE,
    1317712061                      ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
    13178             HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
     12062
     12063            ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR0);
    1317912064            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitLmsw);
    1318012065            Log4(("CRX LMSW rcStrict=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
     
    1318712072    }
    1318812073
    13189     HMCPU_CF_SET(pVCpu, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS : HM_CHANGED_ALL_GUEST);
     12074    ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
     12075                                                                                : HM_CHANGED_XCPT_RAISED_MASK);
    1319012076    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitMovCRx, y2);
    1319112077    NOREF(pVM);
     
    1320612092    int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
    1320712093    rc    |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    13208     rc    |= hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
    13209     rc    |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
    13210     rc    |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
    13211     rc    |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
     12094    rc    |= hmR0VmxImportGuestState(pVCpu,   IEM_CPUMCTX_EXTRN_MUST_MASK
     12095                                            | CPUMCTX_EXTRN_SREG_MASK
     12096                                            | CPUMCTX_EXTRN_EFER);
    1321212097    /* EFER also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
    1321312098    AssertRCReturn(rc, rc);
    1321412099
    1321512100    /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
    13216     uint32_t uIOPort      = VMX_EXIT_QUALIFICATION_IO_PORT(pVmxTransient->uExitQualification);
    13217     uint8_t  uIOWidth     = VMX_EXIT_QUALIFICATION_IO_WIDTH(pVmxTransient->uExitQualification);
    13218     bool     fIOWrite     = (   VMX_EXIT_QUALIFICATION_IO_DIRECTION(pVmxTransient->uExitQualification)
    13219                              == VMX_EXIT_QUALIFICATION_IO_DIRECTION_OUT);
    13220     bool     fIOString    = VMX_EXIT_QUALIFICATION_IO_IS_STRING(pVmxTransient->uExitQualification);
     12101    uint32_t uIOPort      = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQualification);
     12102    uint8_t  uIOWidth     = VMX_EXIT_QUAL_IO_WIDTH(pVmxTransient->uExitQualification);
     12103    bool     fIOWrite     = (   VMX_EXIT_QUAL_IO_DIRECTION(pVmxTransient->uExitQualification)
     12104                             == VMX_EXIT_QUAL_IO_DIRECTION_OUT);
     12105    bool     fIOString    = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQualification);
    1322112106    bool     fGstStepping = RT_BOOL(pMixedCtx->eflags.Bits.u1TF);
    1322212107    bool     fDbgStepping = pVCpu->hm.s.fSingleInstruction;
     
    1324212127    {
    1324312128        /* I/O operation lookup arrays. */
    13244         static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 };                   /* Size of the I/O accesses. */
    13245         static uint32_t const s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff };  /* AND masks for saving the result (in AL/AX/EAX). */
    13246 
     12129        static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 };                    /* Size of the I/O accesses. */
     12130        static uint32_t const s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff };   /* AND masks for saving result in AL/AX/EAX. */
    1324712131        uint32_t const cbValue  = s_aIOSizes[uIOWidth];
    1324812132        uint32_t const cbInstr  = pVmxTransient->cbInstr;
     
    1326312147            {
    1326412148                int rc2  = hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
    13265                 /** @todo optimize this, IEM should request the additional state if it needs it (GP, PF, ++). */
    13266                 rc2 |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
    1326712149                AssertRCReturn(rc2, rc2);
    1326812150                AssertReturn(pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize <= 2, VERR_VMX_IPE_3);
    1326912151                AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
    13270                 IEMMODE enmAddrMode = (IEMMODE)pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize;
    13271                 bool    fRep        = VMX_EXIT_QUALIFICATION_IO_IS_REP(pVmxTransient->uExitQualification);
     12152                IEMMODE const enmAddrMode = (IEMMODE)pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize;
     12153                bool const fRep           = VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQualification);
    1327212154                if (fIOWrite)
    1327312155                    rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr,
     
    1328512167            }
    1328612168            else
    13287             {
    13288                 /** @todo optimize this, IEM should request the additional state if it needs it (GP, PF, ++). */
    13289                 int rc2 = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
    13290                 AssertRCReturn(rc2, rc2);
    1329112169                rcStrict = IEMExecOne(pVCpu);
    13292             }
    13293             /** @todo IEM needs to be setting these flags somehow. */
    13294             HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP);
     12170
     12171            ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP);
    1329512172            fUpdateRipAlready = true;
    1329612173        }
     
    1330012177             * IN/OUT - I/O instruction.
    1330112178             */
    13302             Log4(("CS:RIP=%04x:%08RX64 %#06x/%u %c\n", pMixedCtx->cs.Sel, pMixedCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
     12179            Log4(("CS:RIP=%04x:%08RX64 %#06x/%u %c\n", pMixedCtx->cs.Sel, pMixedCtx->rip, uIOPort, cbValue,
     12180                  fIOWrite ? 'w' : 'r'));
    1330312181            uint32_t const uAndVal = s_aIOOpAnd[uIOWidth];
    13304             Assert(!VMX_EXIT_QUALIFICATION_IO_IS_REP(pVmxTransient->uExitQualification));
     12182            Assert(!VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQualification));
    1330512183            if (fIOWrite)
    1330612184            {
     
    1332812206            {
    1332912207                hmR0VmxAdvanceGuestRipBy(pVCpu, pMixedCtx, cbInstr);
    13330                 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP);
     12208                ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP);
    1333112209            }
    1333212210
    1333312211            /*
    13334              * INS/OUTS with REP prefix updates RFLAGS, can be observed with triple-fault guru while booting Fedora 17 64-bit guest.
     12212             * INS/OUTS with REP prefix updates RFLAGS, can be observed with triple-fault guru
     12213             * while booting Fedora 17 64-bit guest.
     12214             *
    1333512215             * See Intel Instruction reference for REP/REPE/REPZ/REPNE/REPNZ.
    1333612216             */
     
    1333812218            {
    1333912219                /** @todo Single-step for INS/OUTS with REP prefix? */
    13340                 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RFLAGS);
     12220                ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RFLAGS);
    1334112221            }
    1334212222            else if (  !fDbgStepping
    1334312223                     && fGstStepping)
    1334412224            {
    13345                 hmR0VmxSetPendingDebugXcptVmcs(pVCpu);
     12225                rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu, pMixedCtx);
     12226                AssertRCReturn(rc, rc);
    1334612227            }
    1334712228
     
    1335112232             * Note that the I/O breakpoint type is undefined if CR4.DE is 0.
    1335212233             */
    13353             int rc2 = hmR0VmxSaveGuestDR7(pVCpu, pMixedCtx);
    13354             AssertRCReturn(rc2, rc2);
     12234            rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_DR7);
     12235            AssertRCReturn(rc, rc);
    1335512236
    1335612237            /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the
     
    1337712258                        ASMSetDR6(pMixedCtx->dr[6]);
    1337812259                    if (pMixedCtx->dr[7] != uDr7)
    13379                         HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);
     12260                        pVCpu->hm.s.fCtxChanged |= HM_CHANGED_GUEST_DR7;
    1338012261
    1338112262                    hmR0VmxSetPendingXcptDB(pVCpu, pMixedCtx);
     
    1342012301         * Frequent exit or something needing probing.  Get state and call EMHistoryExec.
    1342112302         */
    13422         int rc2 = hmR0VmxSaveGuestRegsForIemInterpreting(pVCpu);
     12303        int rc2 = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
    1342312304        AssertRCReturn(rc2, rc2);
    1342412305        STAM_COUNTER_INC(!fIOString ? fIOWrite ? &pVCpu->hm.s.StatExitIOWrite : &pVCpu->hm.s.StatExitIORead
     
    1342612307        Log4(("IOExit/%u: %04x:%08RX64: %s%s%s %#x LB %u -> EMHistoryExec\n",
    1342712308              pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
    13428               VMX_EXIT_QUALIFICATION_IO_IS_REP(pVmxTransient->uExitQualification) ? "REP " : "",
     12309              VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQualification) ? "REP " : "",
    1342912310              fIOWrite ? "OUT" : "IN", fIOString ? "S" : "", uIOPort, uIOWidth));
    1343012311
    1343112312        rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
    13432         HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
     12313        ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
    1343312314
    1343412315        Log4(("IOExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
     
    1345112332    int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
    1345212333    AssertRCReturn(rc, rc);
    13453     if (VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE_IDT)
     12334    if (VMX_EXIT_QUAL_TASK_SWITCH_TYPE(pVmxTransient->uExitQualification) == VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IDT)
    1345412335    {
    1345512336        rc = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
     
    1353412415    }
    1353512416
    13536 #if 0
    13537     /** @todo Investigate if IOMMMIOPhysHandler() requires a lot of state, for now
    13538      *   just sync the whole thing. */
    13539     int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
    13540 #else
    13541     /* Aggressive state sync. for now. */
    13542     int rc  = hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
    13543     rc     |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
    13544     rc     |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
    13545 #endif
     12417    /* IOMMIOPhysHandler() below may call into IEM, save the necessary state. */
     12418    int rc  = hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
    1354612419    rc     |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
    1354712420    AssertRCReturn(rc, rc);
    1354812421
    1354912422    /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */
    13550     uint32_t uAccessType = VMX_EXIT_QUALIFICATION_APIC_ACCESS_TYPE(pVmxTransient->uExitQualification);
     12423    uint32_t uAccessType = VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQualification);
    1355112424    VBOXSTRICTRC rcStrict2;
    1355212425    switch (uAccessType)
     
    1355612429        {
    1355712430            AssertMsg(   !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
    13558                       || VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification) != XAPIC_OFF_TPR,
     12431                      || VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification) != XAPIC_OFF_TPR,
    1355912432                      ("hmR0VmxExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
    1356012433
    1356112434            RTGCPHYS GCPhys = pVCpu->hm.s.vmx.u64MsrApicBase;   /* Always up-to-date, u64MsrApicBase is not part of the VMCS. */
    1356212435            GCPhys &= PAGE_BASE_GC_MASK;
    13563             GCPhys += VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification);
     12436            GCPhys += VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification);
    1356412437            PVM pVM = pVCpu->CTX_SUFF(pVM);
    13565             Log4(("ApicAccess uAccessType=%#x GCPhys=%#RGp Off=%#x\n", uAccessType, GCPhys,
    13566                  VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification)));
     12438            Log4Func(("Linear access uAccessType=%#x GCPhys=%#RGp Off=%#x\n", uAccessType, GCPhys,
     12439                 VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification)));
    1356712440
    1356812441            rcStrict2 = IOMMMIOPhysHandler(pVM, pVCpu,
    1356912442                                           uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ ? 0 : X86_TRAP_PF_RW,
    1357012443                                           CPUMCTX2CORE(pMixedCtx), GCPhys);
    13571             Log4(("ApicAccess rcStrict2=%d\n", VBOXSTRICTRC_VAL(rcStrict2)));
     12444            Log4Func(("IOMMMIOPhysHandler returned %Rrc\n", VBOXSTRICTRC_VAL(rcStrict2)));
    1357212445            if (   rcStrict2 == VINF_SUCCESS
    1357312446                || rcStrict2 == VERR_PAGE_TABLE_NOT_PRESENT
    1357412447                || rcStrict2 == VERR_PAGE_NOT_PRESENT)
    1357512448            {
    13576                 HMCPU_CF_SET(pVCpu,   HM_CHANGED_GUEST_RIP
    13577                                     | HM_CHANGED_GUEST_RSP
    13578                                     | HM_CHANGED_GUEST_RFLAGS
    13579                                     | HM_CHANGED_GUEST_APIC_STATE);
     12449                ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged,   HM_CHANGED_GUEST_RIP
     12450                                                           | HM_CHANGED_GUEST_RSP
     12451                                                           | HM_CHANGED_GUEST_RFLAGS
     12452                                                           | HM_CHANGED_GUEST_APIC_TPR);
    1358012453                rcStrict2 = VINF_SUCCESS;
    1358112454            }
     
    1358412457
    1358512458        default:
    13586             Log4(("ApicAccess uAccessType=%#x\n", uAccessType));
     12459            Log4Func(("uAccessType=%#x\n", uAccessType));
    1358712460            rcStrict2 = VINF_EM_RAW_EMULATE_INSTR;
    1358812461            break;
     
    1363512508        rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
    1363612509        AssertRCReturn(rc, rc);
    13637         if (VMX_EXIT_QUALIFICATION_DRX_DIRECTION(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_DRX_DIRECTION_WRITE)
     12510        if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQualification) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
    1363812511            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
    1363912512        else
     
    1364912522     */
    1365012523    int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
    13651     rc    |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
    13652     rc    |= hmR0VmxSaveGuestDR7(pVCpu, pMixedCtx);
     12524    rc    |= hmR0VmxImportGuestState(pVCpu,   CPUMCTX_EXTRN_SREG_MASK
     12525                                            | CPUMCTX_EXTRN_DR7);
    1365312526    AssertRCReturn(rc, rc);
    13654     Log4(("CS:RIP=%04x:%08RX64\n", pMixedCtx->cs.Sel, pMixedCtx->rip));
     12527    Log4Func(("CS:RIP=%04x:%08RX64\n", pMixedCtx->cs.Sel, pMixedCtx->rip));
    1365512528
    1365612529    PVM pVM = pVCpu->CTX_SUFF(pVM);
    13657     if (VMX_EXIT_QUALIFICATION_DRX_DIRECTION(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_DRX_DIRECTION_WRITE)
     12530    if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQualification) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
    1365812531    {
    1365912532        rc = EMInterpretDRxWrite(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
    13660                                  VMX_EXIT_QUALIFICATION_DRX_REGISTER(pVmxTransient->uExitQualification),
    13661                                  VMX_EXIT_QUALIFICATION_DRX_GENREG(pVmxTransient->uExitQualification));
     12533                                 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQualification),
     12534                                 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQualification));
    1366212535        if (RT_SUCCESS(rc))
    13663             HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);
     12536            ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_DR7);
    1366412537        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
    1366512538    }
     
    1366712540    {
    1366812541        rc = EMInterpretDRxRead(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
    13669                                 VMX_EXIT_QUALIFICATION_DRX_GENREG(pVmxTransient->uExitQualification),
    13670                                 VMX_EXIT_QUALIFICATION_DRX_REGISTER(pVmxTransient->uExitQualification));
     12542                                VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQualification),
     12543                                VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQualification));
    1367112544        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
    1367212545    }
     
    1371412587     * Get sufficent state and update the exit history entry.
    1371512588     */
    13716     RTGCPHYS GCPhys = 0;
     12589    RTGCPHYS GCPhys;
    1371712590    int rc = VMXReadVmcs64(VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL, &GCPhys);
    13718 
    13719 #if 0
    13720     rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx);     /** @todo Can we do better?  */
    13721 #else
    13722     /* Aggressive state sync. for now. */
    13723     rc |= hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
    13724     rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
    13725     rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
    13726 #endif
     12591    rc    |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
    1372712592    AssertRCReturn(rc, rc);
    1372812593
     
    1374812613        {
    1374912614            /* Successfully handled MMIO operation. */
    13750             HMCPU_CF_SET(pVCpu,   HM_CHANGED_GUEST_RIP
    13751                                 | HM_CHANGED_GUEST_RSP
    13752                                 | HM_CHANGED_GUEST_RFLAGS
    13753                                 | HM_CHANGED_GUEST_APIC_STATE);
     12615            ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged,   HM_CHANGED_GUEST_RIP
     12616                                                       | HM_CHANGED_GUEST_RSP
     12617                                                       | HM_CHANGED_GUEST_RFLAGS
     12618                                                       | HM_CHANGED_GUEST_APIC_TPR);
    1375412619            rcStrict = VINF_SUCCESS;
    1375512620        }
     
    1376112626         */
    1376212627        Assert(pMixedCtx == &pVCpu->cpum.GstCtx);
    13763         int rc2 = hmR0VmxSaveGuestRegsForIemInterpreting(pVCpu);
     12628        int rc2 = hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
    1376412629        AssertRCReturn(rc2, rc2);
    1376512630
     
    1376812633
    1376912634        rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
    13770         HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
     12635        ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
    1377112636
    1377212637        Log4(("EptMisscfgExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
     
    1379312658        /* In the unlikely case that the EPT violation happened as a result of delivering an event, log it. */
    1379412659        if (RT_UNLIKELY(pVCpu->hm.s.Event.fPending))
    13795             Log4(("EPT violation with an event pending u64IntInfo=%#RX64\n", pVCpu->hm.s.Event.u64IntInfo));
     12660            Log4Func(("EPT violation with an event pending u64IntInfo=%#RX64\n", pVCpu->hm.s.Event.u64IntInfo));
    1379612661    }
    1379712662    else
     
    1380212667    }
    1380312668
    13804     RTGCPHYS GCPhys = 0;
     12669    RTGCPHYS GCPhys;
    1380512670    int rc  = VMXReadVmcs64(VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL, &GCPhys);
    13806     rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
    13807 #if 0
    13808     rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx);     /** @todo Can we do better?  */
    13809 #else
    13810     /* Aggressive state sync. for now. */
    13811     rc |= hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
    13812     rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
    13813     rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
    13814 #endif
     12671    rc     |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
     12672    rc     |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
    1381512673    AssertRCReturn(rc, rc);
    1381612674
     
    1381912677
    1382012678    RTGCUINT uErrorCode = 0;
    13821     if (pVmxTransient->uExitQualification & VMX_EXIT_QUALIFICATION_EPT_INSTR_FETCH)
     12679    if (pVmxTransient->uExitQualification & VMX_EXIT_QUAL_EPT_INSTR_FETCH)
    1382212680        uErrorCode |= X86_TRAP_PF_ID;
    13823     if (pVmxTransient->uExitQualification & VMX_EXIT_QUALIFICATION_EPT_DATA_WRITE)
     12681    if (pVmxTransient->uExitQualification & VMX_EXIT_QUAL_EPT_DATA_WRITE)
    1382412682        uErrorCode |= X86_TRAP_PF_RW;
    13825     if (pVmxTransient->uExitQualification & VMX_EXIT_QUALIFICATION_EPT_ENTRY_PRESENT)
     12683    if (pVmxTransient->uExitQualification & VMX_EXIT_QUAL_EPT_ENTRY_PRESENT)
    1382612684        uErrorCode |= X86_TRAP_PF_P;
    1382712685
    1382812686    TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode);
    1382912687
    13830     Log4(("EPT violation %#x at %#RX64 ErrorCode %#x CS:RIP=%04x:%08RX64\n", pVmxTransient->uExitQualification, GCPhys,
    13831           uErrorCode, pMixedCtx->cs.Sel, pMixedCtx->rip));
     12688    Log4Func(("EPT violation %#x at %#RX64 ErrorCode %#x CS:RIP=%04x:%08RX64\n", pVmxTransient->uExitQualification, GCPhys,
     12689              uErrorCode, pMixedCtx->cs.Sel, pMixedCtx->rip));
    1383212690
    1383312691    /* Handle the pagefault trap for the nested shadow table. */
     
    1384312701        /* Successfully synced our nested page tables. */
    1384412702        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitReasonNpf);
    13845         HMCPU_CF_SET(pVCpu,   HM_CHANGED_GUEST_RIP
    13846                             | HM_CHANGED_GUEST_RSP
    13847                             | HM_CHANGED_GUEST_RFLAGS);
     12703        ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged,   HM_CHANGED_GUEST_RIP
     12704                                                   | HM_CHANGED_GUEST_RSP
     12705                                                   | HM_CHANGED_GUEST_RFLAGS);
    1384812706        return VINF_SUCCESS;
    1384912707    }
    1385012708
    13851     Log4(("EPT return to ring-3 rcStrict2=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict2)));
     12709    Log4Func(("EPT return to ring-3 rcStrict2=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict2)));
    1385212710    return rcStrict2;
    1385312711}
     
    1387112729    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestMF);
    1387212730
    13873     int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
     12731    int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR0);
    1387412732    AssertRCReturn(rc, rc);
    1387512733
     
    1390112759    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestBP);
    1390212760
    13903     /** @todo Try optimize this by not saving the entire guest state unless
    13904      *        really needed. */
    13905     int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
     12761    int rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
    1390612762    AssertRCReturn(rc, rc);
    1390712763
    13908     PVM pVM = pVCpu->CTX_SUFF(pVM);
    13909     rc = DBGFRZTrap03Handler(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
     12764    rc = DBGFRZTrap03Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pMixedCtx));
    1391012765    if (rc == VINF_EM_RAW_GUEST_TRAP)
    1391112766    {
     
    1393812793    rc    |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    1393912794    AssertRCReturn(rc, rc);
    13940     Assert(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO);
     12795    Assert(ASMAtomicUoReadU32(&pVmxTransient->fVmcsFieldsRead) & HMVMX_READ_EXIT_INTERRUPTION_INFO);
    1394112796
    1394212797    hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
     
    1395312808    HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
    1395412809    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB);
    13955     Log6(("XcptDB\n"));
    1395612810
    1395712811    /*
     
    1396012814     */
    1396112815    int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
    13962     AssertRCReturn(rc, rc);
    1396312816
    1396412817    /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
     
    1396812821
    1396912822    rc = DBGFRZTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pMixedCtx), uDR6, pVCpu->hm.s.fSingleInstruction);
     12823    Log6Func(("rc=%Rrc\n", rc));
    1397012824    if (rc == VINF_EM_RAW_GUEST_TRAP)
    1397112825    {
     
    1398612840        VMMRZCallRing3Enable(pVCpu);
    1398712841
    13988         rc = hmR0VmxSaveGuestDR7(pVCpu, pMixedCtx);
     12842        rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_DR7);
    1398912843        AssertRCReturn(rc, rc);
    1399012844
     
    1400212856         * Raise #DB in the guest.
    1400312857         *
    14004          * It is important to reflect what the VM-exit gave us (preserving the interruption-type) rather than use
    14005          * hmR0VmxSetPendingXcptDB() as the #DB could've been raised while executing ICEBP (INT1) and not the
    14006          * regular #DB. Thus it -may- trigger different handling in the CPU (like skipped DPL checks), see @bugref{6398}.
     12858         * It is important to reflect exactly what the VM-exit gave us (preserving the
     12859         * interruption-type) rather than use hmR0VmxSetPendingXcptDB() as the #DB could've
     12860         * been raised while executing ICEBP (INT1) and not the regular #DB. Thus it may
     12861         * trigger different handling in the CPU (like skipping DPL checks), see @bugref{6398}.
    1400712862         *
    14008          * Intel re-documented ICEBP/INT1 on May 2018 previously documented as part of Intel 386,
    14009          * see Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
     12863         * Intel re-documented ICEBP/INT1 on May 2018 previously documented as part of
     12864         * Intel 386, see Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
    1401012865         */
    1401112866        rc  = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
     
    1405112906        rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
    1405212907        rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    14053         rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
     12908        rc |= hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
    1405412909        AssertRCReturn(rc, rc);
    14055         Log4(("#GP Gst: CS:RIP %04x:%08RX64 ErrorCode=%#x CR0=%#RX64 CPL=%u TR=%#04x\n", pMixedCtx->cs.Sel, pMixedCtx->rip,
    14056               pVmxTransient->uExitIntErrorCode, pMixedCtx->cr0, CPUMGetGuestCPL(pVCpu), pMixedCtx->tr.Sel));
     12910        Log4Func(("Gst: CS:RIP %04x:%08RX64 ErrorCode=%#x CR0=%#RX64 CPL=%u TR=%#04x\n", pMixedCtx->cs.Sel, pMixedCtx->rip,
     12911                  pVmxTransient->uExitIntErrorCode, pMixedCtx->cr0, CPUMGetGuestCPL(pVCpu), pMixedCtx->tr.Sel));
    1405712912        hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
    1405812913                               pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
     
    1406412919
    1406512920    /* EMInterpretDisasCurrent() requires a lot of the state, save the entire state. */
    14066     rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
     12921    rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
    1406712922    AssertRCReturn(rc, rc);
    1406812923
     
    1407612931        rc = VINF_SUCCESS;
    1407712932        Assert(cbOp == pDis->cbInstr);
    14078         Log4(("#GP Disas OpCode=%u CS:EIP %04x:%04RX64\n", pDis->pCurInstr->uOpcode, pMixedCtx->cs.Sel, pMixedCtx->rip));
     12933        Log4Func(("Disas OpCode=%u CS:EIP %04x:%04RX64\n", pDis->pCurInstr->uOpcode, pMixedCtx->cs.Sel, pMixedCtx->rip));
    1407912934        switch (pDis->pCurInstr->uOpcode)
    1408012935        {
     
    1408412939                pMixedCtx->eflags.Bits.u1RF = 0;
    1408512940                pMixedCtx->rip += pDis->cbInstr;
    14086                 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
     12941                ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
    1408712942                if (   !fDbgStepping
    1408812943                    && pMixedCtx->eflags.Bits.u1TF)
    14089                     hmR0VmxSetPendingDebugXcptVmcs(pVCpu);
     12944                {
     12945                    rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu, pMixedCtx);
     12946                    AssertRCReturn(rc, rc);
     12947                }
    1409012948                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCli);
    1409112949                break;
     
    1410312961                    Assert(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
    1410412962                }
    14105                 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
     12963                ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
    1410612964                if (   !fDbgStepping
    1410712965                    && pMixedCtx->eflags.Bits.u1TF)
    14108                     hmR0VmxSetPendingDebugXcptVmcs(pVCpu);
     12966                {
     12967                    rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu, pMixedCtx);
     12968                    AssertRCReturn(rc, rc);
     12969                }
    1410912970                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitSti);
    1411012971                break;
     
    1411612977                pMixedCtx->rip += pDis->cbInstr;
    1411712978                pMixedCtx->eflags.Bits.u1RF = 0;
    14118                 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
     12979                ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
    1411912980                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt);
    1412012981                break;
     
    1412312984            case OP_POPF:
    1412412985            {
    14125                 Log4(("POPF CS:EIP %04x:%04RX64\n", pMixedCtx->cs.Sel, pMixedCtx->rip));
     12986                Log4Func(("POPF CS:EIP %04x:%04RX64\n", pMixedCtx->cs.Sel, pMixedCtx->rip));
    1412612987                uint32_t cbParm;
    1412712988                uint32_t uMask;
     
    1415513016                    break;
    1415613017                }
    14157                 Log4(("POPF %#x -> %#RX64 mask=%#x RIP=%#RX64\n", Eflags.u, pMixedCtx->rsp, uMask, pMixedCtx->rip));
     13018                Log4Func(("POPF %#x -> %#RX64 mask=%#x RIP=%#RX64\n", Eflags.u, pMixedCtx->rsp, uMask, pMixedCtx->rip));
    1415813019                pMixedCtx->eflags.u32 = (pMixedCtx->eflags.u32 & ~((X86_EFL_POPF_BITS & uMask) | X86_EFL_RF))
    1415913020                                      | (Eflags.u32 & X86_EFL_POPF_BITS & uMask);
    14160                 pMixedCtx->esp              += cbParm;
    14161                 pMixedCtx->esp              &= uMask;
    14162                 pMixedCtx->rip              += pDis->cbInstr;
    14163                 HMCPU_CF_SET(pVCpu,   HM_CHANGED_GUEST_RIP
    14164                                     | HM_CHANGED_GUEST_RSP
    14165                                     | HM_CHANGED_GUEST_RFLAGS);
     13021                pMixedCtx->esp += cbParm;
     13022                pMixedCtx->esp &= uMask;
     13023                pMixedCtx->rip += pDis->cbInstr;
     13024                ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged,   HM_CHANGED_GUEST_RIP
     13025                                                           | HM_CHANGED_GUEST_RSP
     13026                                                           | HM_CHANGED_GUEST_RFLAGS);
    1416613027                /* Generate a pending-debug exception when the guest stepping over POPF regardless of how
    1416713028                   POPF restores EFLAGS.TF. */
    1416813029                if (  !fDbgStepping
    1416913030                    && fGstStepping)
    14170                     hmR0VmxSetPendingDebugXcptVmcs(pVCpu);
     13031                {
     13032                    rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu, pMixedCtx);
     13033                    AssertRCReturn(rc, rc);
     13034                }
    1417113035                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPopf);
    1417213036                break;
     
    1420913073                    break;
    1421013074                }
    14211                 Log4(("PUSHF %#x -> %#RGv\n", Eflags.u, GCPtrStack));
    14212                 pMixedCtx->esp               -= cbParm;
    14213                 pMixedCtx->esp               &= uMask;
    14214                 pMixedCtx->rip               += pDis->cbInstr;
    14215                 pMixedCtx->eflags.Bits.u1RF   = 0;
    14216                 HMCPU_CF_SET(pVCpu,   HM_CHANGED_GUEST_RIP
    14217                                     | HM_CHANGED_GUEST_RSP
    14218                                     | HM_CHANGED_GUEST_RFLAGS);
     13075                Log4Func(("PUSHF %#x -> %#RGv\n", Eflags.u, GCPtrStack));
     13076                pMixedCtx->esp -= cbParm;
     13077                pMixedCtx->esp &= uMask;
     13078                pMixedCtx->rip += pDis->cbInstr;
     13079                pMixedCtx->eflags.Bits.u1RF = 0;
     13080                ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged,   HM_CHANGED_GUEST_RIP
     13081                                                           | HM_CHANGED_GUEST_RSP
     13082                                                           | HM_CHANGED_GUEST_RFLAGS);
    1421913083                if (  !fDbgStepping
    1422013084                    && pMixedCtx->eflags.Bits.u1TF)
    14221                     hmR0VmxSetPendingDebugXcptVmcs(pVCpu);
     13085                {
     13086                    rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu, pMixedCtx);
     13087                    AssertRCReturn(rc, rc);
     13088                }
    1422213089                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPushf);
    1422313090                break;
     
    1425813125                                              | (aIretFrame[2] & X86_EFL_POPF_BITS & uMask);
    1425913126                pMixedCtx->sp                += sizeof(aIretFrame);
    14260                 HMCPU_CF_SET(pVCpu,   HM_CHANGED_GUEST_RIP
    14261                                     | HM_CHANGED_GUEST_SEGMENT_REGS
    14262                                     | HM_CHANGED_GUEST_RSP
    14263                                     | HM_CHANGED_GUEST_RFLAGS);
     13127                ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged,   HM_CHANGED_GUEST_RIP
     13128                                                           | HM_CHANGED_GUEST_CS
     13129                                                           | HM_CHANGED_GUEST_RSP
     13130                                                           | HM_CHANGED_GUEST_RFLAGS);
    1426413131                /* Generate a pending-debug exception when stepping over IRET regardless of how IRET modifies EFLAGS.TF. */
    1426513132                if (   !fDbgStepping
    1426613133                    && fGstStepping)
    14267                     hmR0VmxSetPendingDebugXcptVmcs(pVCpu);
    14268                 Log4(("IRET %#RX32 to %04x:%04x\n", GCPtrStack, pMixedCtx->cs.Sel, pMixedCtx->ip));
     13134                {
     13135                    rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu, pMixedCtx);
     13136                    AssertRCReturn(rc, rc);
     13137                }
     13138                Log4Func(("IRET %#RX32 to %04x:%04x\n", GCPtrStack, pMixedCtx->cs.Sel, pMixedCtx->ip));
    1426913139                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIret);
    1427013140                break;
     
    1429113161                {
    1429213162                    pMixedCtx->eflags.Bits.u1RF = 0;
    14293                     HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RFLAGS);
     13163                    ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RFLAGS);
    1429413164                }
    1429513165                break;
     
    1430213172                                                                    EMCODETYPE_SUPERVISOR);
    1430313173                rc = VBOXSTRICTRC_VAL(rc2);
    14304                 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
     13174                ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
    1430513175                /** @todo We have to set pending-debug exceptions here when the guest is
    1430613176                 *        single-stepping depending on the instruction that was interpreted. */
    14307                 Log4(("#GP rc=%Rrc\n", rc));
     13177                Log4Func(("#GP rc=%Rrc\n", rc));
    1430813178                break;
    1430913179            }
     
    1433213202#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
    1433313203    AssertMsg(pVCpu->hm.s.fUsingDebugLoop || pVCpu->hm.s.vmx.RealMode.fRealOnV86Active,
    14334               ("uVector=%#04x u32XcptBitmap=%#010RX32\n",
     13204              ("uVector=%#x u32XcptBitmap=%#X32\n",
    1433513205               VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVmxTransient->uExitIntInfo), pVCpu->hm.s.vmx.u32XcptBitmap));
    1433613206#endif
     
    1434113211    rc    |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    1434213212    AssertRCReturn(rc, rc);
    14343     Assert(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO);
     13213    Assert(ASMAtomicUoReadU32(&pVmxTransient->fVmcsFieldsRead) & HMVMX_READ_EXIT_INTERRUPTION_INFO);
    1434413214
    1434513215#ifdef DEBUG_ramshankar
    14346     rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
     13216    rc |= hmR0VmxImportGuestState(pVCpu,  CPUMCTX_EXTRN_CS
     13217                                        | CPUMCTX_EXTRN_RIP);
    1434713218    uint8_t uVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVmxTransient->uExitIntInfo);
    1434813219    Log(("hmR0VmxExitXcptGeneric: Reinjecting Xcpt. uVector=%#x cs:rip=%#04x:%#RX64\n", uVector, pCtx->cs.Sel, pCtx->rip));
     
    1438413255            /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
    1438513256            hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx);
    14386             Log4(("Pending #DF due to vectoring #PF. NP\n"));
     13257            Log4Func(("Pending #DF due to vectoring #PF w/ NestedPaging\n"));
    1438713258        }
    1438813259        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
     
    1439813269    }
    1439913270
    14400     rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
     13271    rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
    1440113272    AssertRCReturn(rc, rc);
    1440213273
    14403     Log4(("#PF: cr2=%#RX64 cs:rip=%#04x:%#RX64 uErrCode %#RX32 cr3=%#RX64\n", pVmxTransient->uExitQualification,
    14404           pMixedCtx->cs.Sel, pMixedCtx->rip, pVmxTransient->uExitIntErrorCode, pMixedCtx->cr3));
     13274    Log4Func(("#PF: cr2=%#RX64 cs:rip=%#04x:%#RX64 uErrCode %#RX32 cr3=%#RX64\n", pVmxTransient->uExitQualification,
     13275              pMixedCtx->cs.Sel, pMixedCtx->rip, pVmxTransient->uExitIntErrorCode, pMixedCtx->cr3));
    1440513276
    1440613277    TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQualification, (RTGCUINT)pVmxTransient->uExitIntErrorCode);
     
    1440813279                          (RTGCPTR)pVmxTransient->uExitQualification);
    1440913280
    14410     Log4(("#PF: rc=%Rrc\n", rc));
     13281    Log4Func(("#PF: rc=%Rrc\n", rc));
    1441113282    if (rc == VINF_SUCCESS)
    1441213283    {
    14413 #if 0
    14414         /* Successfully synced shadow pages tables or emulated an MMIO instruction. */
    14415         /** @todo this isn't quite right, what if guest does lgdt with some MMIO
    14416          *        memory? We don't update the whole state here... */
    14417         HMCPU_CF_SET(pVCpu,   HM_CHANGED_GUEST_RIP
    14418                             | HM_CHANGED_GUEST_RSP
    14419                             | HM_CHANGED_GUEST_RFLAGS
    14420                             | HM_CHANGED_GUEST_APIC_STATE);
    14421 #else
    1442213284        /*
    1442313285         * This is typically a shadow page table sync or a MMIO instruction. But we may have
    1442413286         * emulated something like LTR or a far jump. Any part of the CPU context may have changed.
    1442513287         */
    14426         /** @todo take advantage of CPUM changed flags instead of brute forcing. */
    14427         HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
    14428 #endif
     13288        ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
    1442913289        TRPMResetTrap(pVCpu);
    1443013290        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF);
     
    1444913309            pVCpu->hm.s.Event.fPending = false;     /* Clear pending #PF to replace it with #DF. */
    1445013310            hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx);
    14451             Log4(("#PF: Pending #DF due to vectoring #PF\n"));
     13311            Log4Func(("#PF: Pending #DF due to vectoring #PF\n"));
    1445213312        }
    1445313313
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette