VirtualBox

Changeset 74468 in vbox for trunk/src/VBox/VMM/VMMAll


Ignore:
Timestamp:
Sep 26, 2018 4:10:23 AM (6 years ago)
Author:
vboxsync
Message:

VMM/IEM: Nested VMX: bugref:9180 VM-exit bits.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h

    r74450 r74468  
    113113    /* VMX_VMCS_ENC_WIDTH_64BIT | VMX_VMCS_ENC_TYPE_VMEXIT_INFO: */
    114114    {
    115         /*     0 */ RT_OFFSETOF(VMXVVMCS, u64GuestPhysAddr),
     115        /*     0 */ RT_OFFSETOF(VMXVVMCS, u64RoGuestPhysAddr),
    116116        /*   1-8 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
    117117        /*  9-16 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
     
    231231    /* VMX_VMCS_ENC_WIDTH_NATURAL | VMX_VMCS_ENC_TYPE_VMEXIT_INFO: */
    232232    {
    233         /*     0 */ RT_OFFSETOF(VMXVVMCS, u64ExitQual),
    234         /*     1 */ RT_OFFSETOF(VMXVVMCS, u64IoRcx),
    235         /*     2 */ RT_OFFSETOF(VMXVVMCS, u64IoRsi),
    236         /*     3 */ RT_OFFSETOF(VMXVVMCS, u64IoRdi),
    237         /*     4 */ RT_OFFSETOF(VMXVVMCS, u64IoRip),
    238         /*     5 */ RT_OFFSETOF(VMXVVMCS, u64GuestLinearAddr),
     233        /*     0 */ RT_OFFSETOF(VMXVVMCS, u64RoExitQual),
     234        /*     1 */ RT_OFFSETOF(VMXVVMCS, u64RoIoRcx),
     235        /*     2 */ RT_OFFSETOF(VMXVVMCS, u64RoIoRsi),
     236        /*     3 */ RT_OFFSETOF(VMXVVMCS, u64RoIoRdi),
     237        /*     4 */ RT_OFFSETOF(VMXVVMCS, u64RoIoRip),
     238        /*     5 */ RT_OFFSETOF(VMXVVMCS, u64RoGuestLinearAddr),
    239239        /*  6-13 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
    240240        /* 14-21 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
     
    12291229
    12301230/**
     1231 * Sets the VM-exit qualification VMCS field.
     1232 *
     1233 * @param   pVCpu       The cross context virtual CPU structure.
     1234 * @param   uExitQual   The VM-exit qualification field.
     1235 */
     1236DECL_FORCE_INLINE(void) iemVmxVmcsSetExitQual(PVMCPU pVCpu, uint64_t uExitQual)
     1237{
     1238    PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     1239    pVmcs->u64RoExitQual.u = uExitQual;
     1240}
     1241
     1242
     1243/**
     1244 * Sets the VM-exit instruction length VMCS field.
     1245 *
     1246 * @param   pVCpu       The cross context virtual CPU structure.
     1247 * @param   cbInstr     The VM-exit instruction length (in bytes).
     1248 */
     1249DECL_FORCE_INLINE(void) iemVmxVmcsSetExitInstrLen(PVMCPU pVCpu, uint32_t cbInstr)
     1250{
     1251    PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     1252    pVmcs->u32RoExitInstrLen = cbInstr;
     1253}
     1254
     1255
     1256/**
     1257 * Sets the VM-exit instruction info. VMCS field.
     1258 *
     1259 * @param   pVCpu           The cross context virtual CPU structure.
     1260 * @param   uExitInstrInfo  The VM-exit instruction info. field.
     1261 */
     1262DECL_FORCE_INLINE(void) iemVmxVmcsSetExitInstrInfo(PVMCPU pVCpu, uint32_t uExitInstrInfo)
     1263{
     1264    PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     1265    pVmcs->u32RoExitInstrInfo = uExitInstrInfo;
     1266}
     1267
     1268
     1269/**
    12311270 * Implements VMSucceed for VMX instruction success.
    12321271 *
     
    24752514IEM_STATIC void iemVmxVmentrySaveForceFlags(PVMCPU pVCpu)
    24762515{
    2477     /* Assert that we are not called multiple times during VM-entry. */
     2516    /* We shouldn't be called multiple times during VM-entry. */
    24782517    Assert(pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions == 0);
     2518
     2519    /* MTF should not be set outside VMX non-root mode. */
     2520    Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_MTF));
    24792521
    24802522    /*
    24812523     * Preserve the required force-flags.
    24822524     *
    2483      * We only preserve the force-flags that would affect the execution of the
    2484      * nested-guest (or the guest).
     2525     * We cache and clear force-flags that would affect the execution of the
     2526     * nested-guest. Cached flags are then restored while returning to the guest
     2527     * if necessary.
    24852528     *
    2486      *   - VMCPU_FF_INHIBIT_INTERRUPTS need not be preserved as VM-exit explicitly
    2487      *     clears interrupt-inhibition and on VM-entry the guest-interruptibility
    2488      *     state provides the inhibition if any.
     2529     *   - VMCPU_FF_INHIBIT_INTERRUPTS need not be cached as it only affects
     2530     *     interrupts until the completion of the current VMLAUNCH/VMRESUME
     2531     *     instruction. Interrupt inhibition for any nested-guest instruction
     2532     *     will be set later while loading the guest-interruptibility state.
    24892533     *
    2490      *   - VMCPU_FF_BLOCK_NMIS needs not be preserved as VM-entry does not discard
    2491      *     any NMI blocking. VM-exits caused directly by NMIs (intercepted by the
    2492      *     exception bitmap) do block subsequent NMIs.
     2534     *   - VMCPU_FF_BLOCK_NMIS needs to be cached as VM-exits caused before
     2535     *     successful VM-entry needs to continue blocking NMIs if it was in effect
     2536     *     during VM-entry.
    24932537     *
    24942538     *   - MTF need not be preserved as it's used only in VMX non-root mode and
    24952539     *     is supplied on VM-entry through the VM-execution controls.
    24962540     *
    2497      * The remaining FFs (e.g. timers) can stay in place so that we will be able to
    2498      * generate interrupts that should cause #VMEXITs for the nested-guest.
     2541     * The remaining FFs (e.g. timers, APIC updates) must stay in place so that
     2542     * we will be able to generate interrupts that may cause VM-exits for
     2543     * the nested-guest.
    24992544     */
    2500     uint32_t const fDiscardMask = VMCPU_FF_INHIBIT_INTERRUPTS | VMCPU_FF_MTF | VMCPU_FF_BLOCK_NMIS;
    2501     pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions = pVCpu->fLocalForcedActions & fDiscardMask;
    2502     VMCPU_FF_CLEAR(pVCpu, fDiscardMask);
     2545    pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions = pVCpu->fLocalForcedActions & VMCPU_FF_BLOCK_NMIS;
     2546
     2547    if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS | VMCPU_FF_BLOCK_NMIS))
     2548        VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS | VMCPU_FF_BLOCK_NMIS);
    25032549}
    25042550
     
    26722718    PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    26732719
    2674     /*
    2675      * Activity-state: VM-exits occur before changing the activity state
    2676      * of the processor and hence we shouldn't need to change it.
    2677      */
     2720    /* Activity-state: VM-exits occur before changing the activity state, nothing further to do */
    26782721
    26792722    /* Interruptibility-state. */
     
    27292772    Assert(pVmcs);
    27302773
    2731     /*
    2732      * Save guest control, debug, segment, descriptor-table registers and some MSRs.
    2733      */
    27342774    iemVmxVmexitSaveGuestControlRegsMsrs(pVCpu);
    27352775    iemVmxVmexitSaveGuestSegRegs(pVCpu);
     
    27372777    /*
    27382778     * Save guest RIP, RSP and RFLAGS.
     2779     * See Intel spec. 27.3.3 "Saving RIP, RSP and RFLAGS".
    27392780     */
    27402781    /* We don't support enclave mode yet. */
     
    27432784    pVmcs->u64GuestRFlags.u = pVCpu->cpum.GstCtx.rflags.u;  /** @todo NSTVMX: Check RFLAGS.RF handling. */
    27442785
    2745     /* Save guest non-register state. */
    27462786    iemVmxVmexitSaveGuestNonRegState(pVCpu, uExitReason);
    27472787}
     
    27892829    {
    27902830        if (   !pMsr->u32Reserved
    2791             &&  pMsr->u32Msr >> 8 != MSR_IA32_X2APIC_START >> 8
    2792             &&  pMsr->u32Msr != MSR_IA32_SMBASE)
     2831            &&  pMsr->u32Msr != MSR_IA32_SMBASE
     2832            &&  pMsr->u32Msr >> 8 != MSR_IA32_X2APIC_START >> 8)
    27932833        {
    27942834            VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr(pVCpu, pMsr->u32Msr, &pMsr->u64Value);
     
    28362876 * Performs a VMX abort (due to an fatal error during VM-exit).
    28372877 *
    2838  * @returns VBox status code.
     2878 * @returns Strict VBox status code.
    28392879 * @param   pVCpu       The cross context virtual CPU structure.
    28402880 * @param   enmAbort    The VMX abort reason.
    28412881 */
    2842 IEM_STATIC int iemVmxAbort(PVMCPU pVCpu, VMXABORT enmAbort)
     2882IEM_STATIC VBOXSTRICTRC iemVmxAbort(PVMCPU pVCpu, VMXABORT enmAbort)
    28432883{
    28442884    /*
     
    30793119    PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    30803120    const char *const pszFailure = "VMX-abort";
    3081     bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
     3121    bool const fHostInLongMode   = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
    30823122
    30833123    if (   (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
     
    31603200                &&  pMsr->u32Msr != MSR_K8_GS_BASE
    31613201                &&  pMsr->u32Msr != MSR_K6_EFER
    3162                 &&  pMsr->u32Msr >> 8 != MSR_IA32_X2APIC_START >> 8
    3163                 &&  pMsr->u32Msr != MSR_IA32_SMM_MONITOR_CTL)
     3202                &&  pMsr->u32Msr != MSR_IA32_SMM_MONITOR_CTL
     3203                &&  pMsr->u32Msr >> 8 != MSR_IA32_X2APIC_START >> 8)
    31643204            {
    31653205                VBOXSTRICTRC rcStrict = CPUMSetGuestMsr(pVCpu, pMsr->u32Msr, pMsr->u64Value);
     
    31993239 * Loads the host state as part of VM-exit.
    32003240 *
    3201  * @returns VBox status code.
     3241 * @returns Strict VBox status code.
    32023242 * @param   pVCpu           The cross context virtual CPU structure.
    32033243 * @param   uExitReason     The VM-exit reason (for logging purposes).
    32043244 */
    3205 IEM_STATIC int iemVmxVmexitLoadHostState(PVMCPU pVCpu, uint32_t uExitReason)
     3245IEM_STATIC VBOXSTRICTRC iemVmxVmexitLoadHostState(PVMCPU pVCpu, uint32_t uExitReason)
    32063246{
    32073247    /*
     
    32203260    }
    32213261
    3222     /*
    3223      * Load host control, debug, segment, descriptor-table registers and some MSRs.
    3224      */
    32253262    iemVmxVmexitLoadHostControlRegsMsrs(pVCpu);
    32263263    iemVmxVmexitLoadHostSegRegs(pVCpu);
     
    32443281    if (rcStrict == VINF_SUCCESS)
    32453282    {
    3246         /* Check host PDPTEs. */
     3283        /* Check host PDPTEs (only when we've fully switched page tables_. */
    32473284        /** @todo r=ramshankar: I don't know if PGM does this for us already or not... */
    32483285        int rc = iemVmxVmexitCheckHostPdptes(pVCpu, uExitReason);
     
    32623299    {
    32633300        Log3(("VM-exit: iemVmxWorldSwitch failed! rc=%Rrc (uExitReason=%u)\n", VBOXSTRICTRC_VAL(rcStrict), uExitReason));
    3264         return rcStrict;
     3301        return VBOXSTRICTRC_VAL(rcStrict);
    32653302    }
    32663303
     
    32753312    }
    32763313
    3277     return VINF_SUCCESS;
     3314    return rcStrict;
    32783315}
    32793316
     
    32913328    Assert(pVmcs);
    32923329
    3293     pVmcs->u32RoExitReason   = uExitReason;
     3330    pVmcs->u32RoExitReason = uExitReason;
    32943331
    32953332    /** @todo NSTVMX: Update VM-exit instruction length for instruction VM-exits. */
     
    33133350    }
    33143351
    3315     int rc = iemVmxVmexitLoadHostState(pVCpu, uExitReason);
    3316     if (RT_FAILURE(rc))
    3317         return rc;
    3318 
    3319     /** @todo NSTVMX: rest of VM-exit. */
     3352    /*
     3353     * The high bits of the VM-exit reason are only relevant when the VM-exit occurs in
     3354     * enclave mode/SMM which we don't support yet. If we ever add support for it, we can
     3355     * pass just the lower bits, till then an assert should suffice.
     3356     */
     3357    Assert(!RT_HI_U16(uExitReason));
     3358
     3359    VBOXSTRICTRC rcStrict = iemVmxVmexitLoadHostState(pVCpu, uExitReason);
     3360    if (RT_FAILURE(rcStrict))
     3361        LogFunc(("Loading host-state failed. uExitReason=%u rc=%Rrc\n", uExitReason, VBOXSTRICTRC_VAL(rcStrict)));
    33203362
    33213363    /* We're no longer in nested-guest execution mode. */
    33223364    pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode = false;
    33233365
    3324     return VINF_SUCCESS;
     3366    return rcStrict;
    33253367}
    33263368
     
    33393381     */
    33403382    PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     3383    const char *const pszFailure  = "VM-exit";
    33413384    bool const fUnrestrictedGuest = RT_BOOL(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST);
    3342     const char *const pszFailure = "VM-exit";
    33433385
    33443386    /* CR0 reserved bits. */
     
    34683510     */
    34693511    PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     3512    const char *const pszFailure  = "VM-exit";
    34703513    bool const fGstInV86Mode      = RT_BOOL(pVmcs->u64GuestRFlags.u & X86_EFL_VM);
    34713514    bool const fUnrestrictedGuest = RT_BOOL(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST);
    34723515    bool const fGstInLongMode     = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
    3473     const char *const pszFailure = "VM-exit";
    34743516
    34753517    /* Selectors. */
     
    38573899    PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    38583900    const char *const pszFailure = "VM-exit";
     3901
    38593902    if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
    38603903    {
     
    39023945    PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    39033946    const char *const pszFailure = "VM-exit";
    3904     bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
     3947    bool const fGstInLongMode    = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
    39053948
    39063949    /* RIP. */
     
    40774120            { /* likely */ }
    40784121            else
     4122            {
     4123                /*
     4124                 * We don't support injecting NMIs when blocking-by-STI would be in effect.
     4125                 * We update the VM-exit qualification only when blocking-by-STI is set
     4126                 * without blocking-by-MovSS being set. Although in practise it  does not
     4127                 * make much difference since the order of checks are implementation defined.
     4128                 */
     4129                if (!(pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
     4130                    iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_NMI_INJECT);
    40794131                IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateNmi);
     4132            }
    40804133
    40814134            if (   !(pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
     
    41394192        else
    41404193        {
    4141             pVmcs->u64ExitQual.u = VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR;
     4194            iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR);
    41424195            IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmcsLinkPtrCurVmcs);
    41434196        }
     
    41484201            || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysShadowVmcs))
    41494202        {
    4150             pVmcs->u64ExitQual.u = VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR;
     4203            iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR);
    41514204            IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVmcsLinkPtr);
    41524205        }
     
    41584211        if (RT_FAILURE(rc))
    41594212        {
    4160             pVmcs->u64ExitQual.u = VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR;
     4213            iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR);
    41614214            IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmcsLinkPtrReadPhys);
    41624215        }
     
    41674220        else
    41684221        {
    4169             pVmcs->u64ExitQual.u = VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR;
     4222            iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR);
    41704223            IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmcsLinkPtrRevId);
    41714224        }
     
    41774230        else
    41784231        {
    4179             pVmcs->u64ExitQual.u = VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR;
     4232            iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR);
    41804233            IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmcsLinkPtrShadow);
    41814234        }
     
    42204273            else
    42214274            {
    4222                 pVmcs->u64ExitQual.u = VMX_ENTRY_FAIL_QUAL_PDPTE;
     4275                iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_PDPTE);
    42234276                VMXVDIAG const enmDiag = iemVmxGetDiagVmentryPdpteRsvd(iPdpte);
    42244277                IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
     
    42284281    else
    42294282    {
    4230         pVmcs->u64ExitQual.u = VMX_ENTRY_FAIL_QUAL_PDPTE;
     4283        iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_PDPTE);
    42314284        IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPdpteCr3ReadPhys);
    42324285    }
     
    42814334IEM_STATIC int iemVmxVmentryCheckGuestState(PVMCPU pVCpu, const char *pszInstr)
    42824335{
    4283     /* Check control registers, debug registers and MSRs. */
    42844336    int rc = iemVmxVmentryCheckGuestControlRegsMsrs(pVCpu, pszInstr);
    42854337    if (RT_SUCCESS(rc))
    42864338    {
    4287         /* Check guest segment registers, LDTR, TR. */
    42884339        rc = iemVmxVmentryCheckGuestSegRegs(pVCpu, pszInstr);
    42894340        if (RT_SUCCESS(rc))
    42904341        {
    4291             /* Check guest GDTR and IDTR. */
    42924342            rc = iemVmxVmentryCheckGuestGdtrIdtr(pVCpu, pszInstr);
    42934343            if (RT_SUCCESS(rc))
    42944344            {
    4295                 /* Check guest RIP, RSP and RFLAGS. */
    42964345                rc = iemVmxVmentryCheckGuestRipRFlags(pVCpu, pszInstr);
    42974346                if (RT_SUCCESS(rc))
    42984347                {
    4299                     /* Check guest non-register state. */
    43004348                    rc = iemVmxVmentryCheckGuestNonRegState(pVCpu, pszInstr);
    43014349                    if (RT_SUCCESS(rc))
    4302                     {
    4303                         /* Check guest PDPTEs. */
    43044350                        return iemVmxVmentryCheckGuestPdptes(pVCpu, pszInstr);
    4305                     }
    43064351                }
    43074352            }
     
    50835128    else
    50845129    {
    5085         pVmcs->u64ExitQual.u = VMX_V_AUTOMSR_AREA_SIZE / sizeof(VMXAUTOMSR);
     5130        iemVmxVmcsSetExitQual(pVCpu, VMX_V_AUTOMSR_AREA_SIZE / sizeof(VMXAUTOMSR));
    50865131        IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_MsrLoadCount);
    50875132    }
     
    51005145                &&  pMsr->u32Msr != MSR_K8_GS_BASE
    51015146                &&  pMsr->u32Msr != MSR_K6_EFER
    5102                 &&  pMsr->u32Msr >> 8 != MSR_IA32_X2APIC_START >> 8
    5103                 &&  pMsr->u32Msr != MSR_IA32_SMM_MONITOR_CTL)
     5147                &&  pMsr->u32Msr != MSR_IA32_SMM_MONITOR_CTL
     5148                &&  pMsr->u32Msr >> 8 != MSR_IA32_X2APIC_START >> 8)
    51045149            {
    51055150                VBOXSTRICTRC rcStrict = CPUMSetGuestMsr(pVCpu, pMsr->u32Msr, pMsr->u64Value);
     
    51145159                 * MSR in ring-0 if possible, or come up with a better, generic solution.
    51155160                 */
    5116                 pVmcs->u64ExitQual.u = idxMsr;
     5161                iemVmxVmcsSetExitQual(pVCpu, idxMsr);
    51175162                VMXVDIAG const enmDiag = rcStrict == VINF_CPUM_R3_MSR_WRITE
    51185163                                       ? kVmxVDiag_Vmentry_MsrLoadRing3
     
    51225167            else
    51235168            {
    5124                 pVmcs->u64ExitQual.u = idxMsr;
     5169                iemVmxVmcsSetExitQual(pVCpu, idxMsr);
    51255170                IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_MsrLoadRsvd);
    51265171            }
     
    51615206        Assert(!(pVmcs->u64GuestPendingDbgXcpt.u));
    51625207
    5163         if (pVmcs->u32GuestIntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI)
     5208        if (pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI)
    51645209        {
    51655210            /** @todo NSTVMX: Virtual-NMIs doesn't affect NMI blocking in the normal sense.
     
    51695214            VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
    51705215        }
    5171         else if (   pVmcs->u32GuestIntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_STI
    5172                  || pVmcs->u32GuestIntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)
    5173         {
     5216        else
     5217            Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS));
     5218
     5219        if (pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
    51745220            EMSetInhibitInterruptsPC(pVCpu, pVCpu->cpum.GstCtx.rip);
    5175         }
     5221        else
     5222            Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
    51765223
    51775224        /* SMI blocking is irrelevant. We don't support SMIs yet. */
     
    52005247IEM_STATIC int iemVmxVmentryLoadGuestState(PVMCPU pVCpu, const char *pszInstr)
    52015248{
    5202     /*
    5203      * Load guest control, debug, segment, descriptor-table registers and some MSRs.
    5204      */
    52055249    iemVmxVmentryLoadGuestControlRegsMsrs(pVCpu);
    52065250    iemVmxVmentryLoadGuestSegRegs(pVCpu);
     
    52155259    pVCpu->cpum.GstCtx.rflags.u = pVmcs->u64GuestRFlags.u;
    52165260
    5217     /* Load guest non-register state. */
    52185261    iemVmxVmentryLoadGuestNonRegState(pVCpu);
    52195262
     
    53645407    }
    53655408
    5366     /* Check VM-execution control fields. */
    53675409    rc = iemVmxVmentryCheckExecCtls(pVCpu, pszInstr);
    53685410    if (RT_SUCCESS(rc))
    53695411    {
    5370         /* Check VM-exit control fields. */
    53715412        rc = iemVmxVmentryCheckExitCtls(pVCpu, pszInstr);
    53725413        if (RT_SUCCESS(rc))
    53735414        {
    5374             /* Check VM-entry control fields. */
    53755415            rc = iemVmxVmentryCheckEntryCtls(pVCpu, pszInstr);
    53765416            if (RT_SUCCESS(rc))
    53775417            {
    5378                 /* Check host-state fields. */
    53795418                rc = iemVmxVmentryCheckHostState(pVCpu, pszInstr);
    53805419                if (RT_SUCCESS(rc))
    53815420                {
    5382                     /* Save the (outer) guest force-flags as VM-exits can occur from this point on. */
     5421                    /* Save the guest force-flags as VM-exits can occur from this point on. */
    53835422                    iemVmxVmentrySaveForceFlags(pVCpu);
    53845423
    5385                     /* Check guest-state fields. */
    53865424                    rc = iemVmxVmentryCheckGuestState(pVCpu, pszInstr);
    53875425                    if (RT_SUCCESS(rc))
    53885426                    {
    5389                         /* Load guest-state fields. */
    53905427                        rc = iemVmxVmentryLoadGuestState(pVCpu, pszInstr);
    53915428                        if (RT_SUCCESS(rc))
    53925429                        {
    5393                             /* Load MSRs from the VM-entry auto-load MSR area. */
    53945430                            rc = iemVmxVmentryLoadGuestAutoMsrs(pVCpu, pszInstr);
    53955431                            if (RT_SUCCESS(rc))
     
    54205456                                pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode = true;
    54215457
    5422                                 /* Event injection. */
     5458                                /* Now that we've switched page tables, we can inject events if any. */
    54235459                                iemVmxVmentryInjectEvent(pVCpu, pszInstr);
    54245460
     
    54285464                                return VINF_SUCCESS;
    54295465                            }
    5430 
    54315466                            return iemVmxVmexit(pVCpu, VMX_EXIT_ERR_MSR_LOAD | VMX_EXIT_REASON_ENTRY_FAILED);
    54325467                        }
    54335468                    }
    5434 
    54355469                    return iemVmxVmexit(pVCpu, VMX_EXIT_ERR_INVALID_GUEST_STATE | VMX_EXIT_REASON_ENTRY_FAILED);
    54365470                }
     
    54925526    }
    54935527
    5494     /*
    5495      * Record that we're no longer in VMX root operation, block INIT, block and disable A20M.
    5496      */
     5528    /* Record that we're no longer in VMX root operation, block INIT, block and disable A20M. */
    54975529    pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxRootMode = false;
    54985530    Assert(!pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode);
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette