VirtualBox

Changeset 103053 in vbox for trunk/src/VBox/VMM/VMMAll


Ignore:
Timestamp:
Jan 25, 2024 9:16:55 AM (13 months ago)
Author:
vboxsync
Message:

VMM: Nested VMX: bugref:10318 Fixed nested-guest (Hyper-V enabled Windows 10) BSODs triggered by split-lock #AC exceptions.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/VMXAllTemplate.cpp.h

    r102852 r103053  
    8787                                      | CPUMCTX_EXTRN_INHIBIT_INT     \
    8888                                      | CPUMCTX_EXTRN_INHIBIT_NMI)
     89
     90/**
     91 * Guest-CPU state required for split-lock \#AC handling VM-exits.
     92 */
     93#define HMVMX_CPUMCTX_XPCT_AC        (  CPUMCTX_EXTRN_CR0 \
     94                                      | CPUMCTX_EXTRN_RFLAGS \
     95                                      | CPUMCTX_EXTRN_SS \
     96                                      | CPUMCTX_EXTRN_CS)
    8997
    9098/**
     
    811819
    812820/**
     821 * Checks whether an \#AC exception generated while executing a guest (or
     822 * nested-guest) was due to a split-lock memory access.
     823 *
     824 * @returns @c true if split-lock triggered the \#AC, @c false otherwise.
     825 * @param   pVCpu   The cross context virtual CPU structure.
     826 */
     827DECL_FORCE_INLINE(bool) vmxHCIsSplitLockAcXcpt(PVMCPU pVCpu)
     828{
     829    HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS);
     830    if (   !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)      /* 1. If 486-style alignment checks aren't enabled, this must be a split-lock #AC. */
     831        || !(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_AC) /* 2. When the EFLAGS.AC != 0 this can only be a split-lock case. */
     832        ||  CPUMGetGuestCPL(pVCpu) != 3)               /* 3. #AC cannot happen in rings 0-2 except for split-lock detection. */
     833        return true;
     834    return false;
     835}
     836
     837
     838/**
    813839 * Adds one or more exceptions to the exception bitmap and commits it to the current
    814840 * VMCS.
     
    13941420                        VERR_VMX_VMCS_FIELD_CACHE_INVALID);
    13951421
    1396     rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, &u32Val);
    1397     AssertRC(rc);
    1398     AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls == u32Val,
    1399                         ("%s proc controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls, u32Val),
    1400                         VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC,
    1401                         VERR_VMX_VMCS_FIELD_CACHE_INVALID);
     1422    /** @todo Currently disabled for nested-guests because we run into bit differences
     1423     *        with for INT_WINDOW, RDTSC/P, see @bugref{10318}. Later try figure out
     1424     *        why and re-enable. */
     1425    if (!fIsNstGstVmcs)
     1426    {
     1427        rc = VMX_VMCS_READ_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, &u32Val);
     1428        AssertRC(rc);
     1429        AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls == u32Val,
     1430                            ("%s proc controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls, u32Val),
     1431                            VCPU_2_VMXSTATE(pVCpu).u32HMError = VMX_VCI_CTRL_PROC_EXEC,
     1432                            VERR_VMX_VMCS_FIELD_CACHE_INVALID);
     1433    }
    14021434
    14031435    if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
     
    71287160
    71297161/**
     7162 * VM-exit helper for split-lock access triggered \#AC exceptions.
     7163 */
     7164static VBOXSTRICTRC vmxHCHandleSplitLockAcXcpt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
     7165{
     7166    /*
     7167     * Check for debug/trace events and import state accordingly.
     7168     */
     7169    if (!pVmxTransient->fIsNestedGuest)
     7170        STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestACSplitLock);
     7171    else
     7172        STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatNestedExitACSplitLock);
     7173    PVMCC pVM = pVCpu->CTX_SUFF(pVM);
     7174    if (   !DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK)
     7175#ifndef IN_NEM_DARWIN
     7176        && !VBOXVMM_VMX_SPLIT_LOCK_ENABLED()
     7177#endif
     7178        )
     7179    {
     7180        if (pVM->cCpus == 1)
     7181        {
     7182#if 0 /** @todo r=bird: This is potentially wrong.  Might have to just do a whole state sync above and mark everything changed to be safe... */
     7183            int rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK,
     7184                                           HMVMX_CPUMCTX_XPCT_AC>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
     7185#else
     7186            int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
     7187                                           HMVMX_CPUMCTX_XPCT_AC>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
     7188#endif
     7189            AssertRCReturn(rc, rc);
     7190        }
     7191    }
     7192    else
     7193    {
     7194        int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
     7195                                       HMVMX_CPUMCTX_XPCT_AC>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
     7196        AssertRCReturn(rc, rc);
     7197
     7198        VBOXVMM_XCPT_DF(pVCpu, &pVCpu->cpum.GstCtx);
     7199
     7200        if (DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK))
     7201        {
     7202            VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, DBGFEVENT_VMX_SPLIT_LOCK, DBGFEVENTCTX_HM, 0);
     7203            if (rcStrict != VINF_SUCCESS)
     7204                return rcStrict;
     7205        }
     7206    }
     7207
     7208    /*
     7209     * Emulate the instruction.
     7210     *
     7211     * We have to ignore the LOCK prefix here as we must not retrigger the
     7212     * detection on the host.  This isn't all that satisfactory, though...
     7213     */
     7214    if (pVM->cCpus == 1)
     7215    {
     7216        Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC\n", pVCpu->cpum.GstCtx.cs.Sel,
     7217                  pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
     7218
     7219        /** @todo For SMP configs we should do a rendezvous here. */
     7220        VBOXSTRICTRC rcStrict = IEMExecOneIgnoreLock(pVCpu);
     7221        if (rcStrict == VINF_SUCCESS)
     7222#if 0 /** @todo r=bird: This is potentially wrong.  Might have to just do a whole state sync above and mark everything changed to be safe... */
     7223            ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
     7224                               HM_CHANGED_GUEST_RIP
     7225                             | HM_CHANGED_GUEST_RFLAGS
     7226                             | HM_CHANGED_GUEST_GPRS_MASK
     7227                             | HM_CHANGED_GUEST_CS
     7228                             | HM_CHANGED_GUEST_SS);
     7229#else
     7230            ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
     7231#endif
     7232        else if (rcStrict == VINF_IEM_RAISED_XCPT)
     7233        {
     7234            ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
     7235            rcStrict = VINF_SUCCESS;
     7236        }
     7237        return rcStrict;
     7238    }
     7239    Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC -> VINF_EM_EMULATE_SPLIT_LOCK\n",
     7240              pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
     7241    return VINF_EM_EMULATE_SPLIT_LOCK;
     7242}
     7243
     7244
     7245/**
    71307246 * VM-exit exception handler for \#AC (Alignment-check exception).
    71317247 *
     
    71407256     * Emulate such instructions.
    71417257     */
    7142 #define VMX_HC_EXIT_XCPT_AC_INITIAL_REGS    (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS)
    7143     int rc = vmxHCImportGuestState<VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
     7258    int rc = vmxHCImportGuestState<HMVMX_CPUMCTX_XPCT_AC>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
    71447259    AssertRCReturn(rc, rc);
    71457260    /** @todo detect split lock in cpu feature?   */
    7146     if (   /* 1. If 486-style alignment checks aren't enabled, then this must be a split-lock exception */
    7147            !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
    7148            /* 2. #AC cannot happen in rings 0-2 except for split-lock detection. */
    7149         || CPUMGetGuestCPL(pVCpu) != 3
    7150            /* 3. When the EFLAGS.AC != 0 this can only be a split-lock case. */
    7151         || !(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_AC) )
    7152     {
    7153         /*
    7154          * Check for debug/trace events and import state accordingly.
    7155          */
    7156         STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestACSplitLock);
    7157         PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    7158         if (   !DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK)
    7159 #ifndef IN_NEM_DARWIN
    7160             && !VBOXVMM_VMX_SPLIT_LOCK_ENABLED()
    7161 #endif
    7162             )
    7163         {
    7164             if (pVM->cCpus == 1)
    7165             {
    7166 #if 0 /** @todo r=bird: This is potentially wrong.  Might have to just do a whole state sync above and mark everything changed to be safe... */
    7167                 rc = vmxHCImportGuestState<IEM_CPUMCTX_EXTRN_MUST_MASK,
    7168                                            VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
    7169 #else
    7170                 rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
    7171                                            VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
    7172 #endif
    7173                 AssertRCReturn(rc, rc);
    7174             }
    7175         }
    7176         else
    7177         {
    7178             rc = vmxHCImportGuestState<HMVMX_CPUMCTX_EXTRN_ALL,
    7179                                        VMX_HC_EXIT_XCPT_AC_INITIAL_REGS>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
    7180             AssertRCReturn(rc, rc);
    7181 
    7182             VBOXVMM_XCPT_DF(pVCpu, &pVCpu->cpum.GstCtx);
    7183 
    7184             if (DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK))
    7185             {
    7186                 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, DBGFEVENT_VMX_SPLIT_LOCK, DBGFEVENTCTX_HM, 0);
    7187                 if (rcStrict != VINF_SUCCESS)
    7188                     return rcStrict;
    7189             }
    7190         }
    7191 
    7192         /*
    7193          * Emulate the instruction.
    7194          *
    7195          * We have to ignore the LOCK prefix here as we must not retrigger the
    7196          * detection on the host.  This isn't all that satisfactory, though...
    7197          */
    7198         if (pVM->cCpus == 1)
    7199         {
    7200             Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC\n", pVCpu->cpum.GstCtx.cs.Sel,
    7201                       pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
    7202 
    7203             /** @todo For SMP configs we should do a rendezvous here. */
    7204             VBOXSTRICTRC rcStrict = IEMExecOneIgnoreLock(pVCpu);
    7205             if (rcStrict == VINF_SUCCESS)
    7206 #if 0 /** @todo r=bird: This is potentially wrong.  Might have to just do a whole state sync above and mark everything changed to be safe... */
    7207                 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged,
    7208                                    HM_CHANGED_GUEST_RIP
    7209                                  | HM_CHANGED_GUEST_RFLAGS
    7210                                  | HM_CHANGED_GUEST_GPRS_MASK
    7211                                  | HM_CHANGED_GUEST_CS
    7212                                  | HM_CHANGED_GUEST_SS);
    7213 #else
    7214                 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_ALL_GUEST);
    7215 #endif
    7216             else if (rcStrict == VINF_IEM_RAISED_XCPT)
    7217             {
    7218                 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
    7219                 rcStrict = VINF_SUCCESS;
    7220             }
    7221             return rcStrict;
    7222         }
    7223         Log8Func(("cs:rip=%#04x:%08RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC -> VINF_EM_EMULATE_SPLIT_LOCK\n",
    7224                   pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
    7225         return VINF_EM_EMULATE_SPLIT_LOCK;
    7226     }
     7261    /** @todo r=ramshankar: is cpu feature detection really necessary since we are able
     7262     *        to detect the split-lock \#AC condition without it? More so since the
     7263     *        feature isn't cleanly detectable, see @bugref{10318#c125}. */
     7264    if (vmxHCIsSplitLockAcXcpt(pVCpu))
     7265        return vmxHCHandleSplitLockAcXcpt(pVCpu, pVmxTransient);
    72277266
    72287267    STAM_REL_COUNTER_INC(&VCPU_2_VMXSTATS(pVCpu).StatExitGuestAC);
     
    77767815    /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
    77777816    PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    7778     Assert(!pVmxTransient->fIsNestedGuest);
    77797817    vmxHCClearIntWindowExitVmcs(pVCpu, pVmcsInfo);
    77807818
     
    1012910167
    1013010168            PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    10131             if (CPUMIsGuestVmxXcptInterceptSet(pCtx, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo), pVmxTransient->uExitIntErrorCode))
     10169            uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo);
     10170            if (CPUMIsGuestVmxXcptInterceptSet(pCtx, uVector, pVmxTransient->uExitIntErrorCode))
    1013210171            {
     10172                /*
     10173                 * Split-lock triggered #ACs should not be injected into the nested-guest
     10174                 * since we don't support split-lock detection for nested-guests yet.
     10175                 */
     10176                if (   uVector == X86_XCPT_AC
     10177                    && uExitIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
     10178                {
     10179                    int const rc = vmxHCImportGuestState<HMVMX_CPUMCTX_XPCT_AC>(pVCpu, pVmxTransient->pVmcsInfo, __FUNCTION__);
     10180                    AssertRCReturn(rc, rc);
     10181                    if (vmxHCIsSplitLockAcXcpt(pVCpu))
     10182                    {
     10183                        VBOXSTRICTRC rcStrict = vmxHCCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
     10184                        if (    rcStrict == VINF_SUCCESS
     10185                            && !VCPU_2_VMXSTATE(pVCpu).Event.fPending)
     10186                            return vmxHCHandleSplitLockAcXcpt(pVCpu, pVmxTransient);
     10187                        if (rcStrict == VINF_HM_DOUBLE_FAULT)
     10188                        {
     10189                            Assert(VCPU_2_VMXSTATE(pVCpu).Event.fPending);
     10190                            rcStrict = VINF_SUCCESS;
     10191                        }
     10192                        return rcStrict;
     10193                    }
     10194                }
     10195
    1013310196                /* Exit qualification is required for debug and page-fault exceptions. */
    1013410197                vmxHCReadToTransient<HMVMX_READ_EXIT_QUALIFICATION>(pVCpu, pVmxTransient);
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette