VirtualBox

Changeset 66686 in vbox


Ignore:
Timestamp:
Apr 27, 2017 12:38:17 PM (8 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
115079
Message:

VMM/IEM: Handle raising of exceptions during delivery of a previous exception or interrupt.
The code takes into account additional info. required by HM for handling recursive exceptions as well.

Location:
trunk
Files:
5 edited

Legend:

Unmodified
Added
Removed
  • trunk/include/VBox/vmm/iem.h

    r66581 r66686  
    3838 * @{
    3939 */
     40
     41/** @name IEMXCPTRAISEINFO_XXX - Extra info. on a recursive exception situation.
     42 *
     43 * This is primarily used by HM for working around a PGM limitation (see
     44 * @bugref{6607}) and special NMI/IRET handling. In the future, this may be
     45 * used for diagnostics.
     46 *
     47 * @{
     48 */
     49typedef uint32_t IEMXCPTRAISEINFO;
     50/** Pointer to a IEMXCPTINFO type. */
     51typedef IEMXCPTRAISEINFO *PIEMXCPTRAISEINFO;
     52/** No addition info. available. */
     53#define IEMXCPTRAISEINFO_NONE                    RT_BIT_32(0)
     54/** Delivery of a \#AC caused another \#AC. */
     55#define IEMXCPTRAISEINFO_AC_AC                   RT_BIT_32(1)
     56/** Delivery of a \#PF caused another \#PF. */
     57#define IEMXCPTRAISEINFO_PF_PF                   RT_BIT_32(2)
     58/** Delivery of a \#PF caused some contributory exception. */
     59#define IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT    RT_BIT_32(3)
     60/** Delivery of an external interrupt caused an exception. */
     61#define IEMXCPTRAISEINFO_EXT_INT_XCPT            RT_BIT_32(4)
     62/** Delivery of an software interrupt caused an exception. */
     63#define IEMXCPTRAISEINFO_EXT_INT_PF              RT_BIT_32(5)
     64/** Delivery of an external interrupt caused a \#PF. */
     65#define IEMXCPTRAISEINFO_SOFT_INT_XCPT           RT_BIT_32(6)
     66/** Delivery of an NMI caused a \#PF. */
     67#define IEMXCPTRAISEINFO_NMI_PF                  RT_BIT_32(7)
     68/** Can re-execute the instruction at CS:RIP. */
     69#define IEMXCPTRAISEINFO_CAN_REEXEC_INSTR        RT_BIT_32(8)
     70/** @} */
     71
     72
     73/** @name IEMXCPTRAISE_XXX - Ways to handle a recursive exception condition.
     74 * @{ */
     75typedef enum IEMXCPTRAISE
     76{
     77    /** Raise the current (second) exception. */
     78    IEMXCPTRAISE_CURRENT_XCPT = 0,
     79    /** Re-raise the previous (first) event (for HM, unused by IEM). */
     80    IEMXCPTRAISE_PREV_EVENT,
     81    /** Re-execute instruction at CS:RIP (for HM, unused by IEM). */
     82    IEMXCPTRAISE_REEXEC_INSTR,
     83    /** Raise a \#DF exception. */
     84    IEMXCPTRAISE_DOUBLE_FAULT,
     85    /** Raise a triple fault. */
     86    IEMXCPTRAISE_TRIPLE_FAULT,
     87    /** Cause a CPU hang. */
     88    IEMXCPTRAISE_CPU_HANG,
     89    /** Invalid sequence of events. */
     90    IEMXCPTRAISE_INVALID = 0x7fffffff
     91} IEMXCPTRAISE;
     92/** Pointer to a IEMXCPTRAISE type. */
     93typedef IEMXCPTRAISE *PIEMXCPTRAISE;
     94/** @} */
    4095
    4196
     
    66121/** Generated by a DRx instruction breakpoint and RF should be cleared. */
    67122#define IEM_XCPT_FLAGS_DRx_INSTR_BP     RT_BIT_32(6)
     123/** Generated by the icebp instruction. */
     124#define IEM_XCPT_FLAGS_ICEBP_INSTR      RT_BIT_32(7)
     125/** Generated by the overflow instruction. */
     126#define IEM_XCPT_FLAGS_OF_INSTR         RT_BIT_32(8)
    68127/** @}  */
    69128
     
    139198VMM_INT_DECL(bool)          IEMGetCurrentXcpt(PVMCPU pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr,
    140199                                              uint64_t *puCr2);
     200VMM_INT_DECL(IEMXCPTRAISE)  IEMEvaluateRecursiveXcpt(PVMCPU pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
     201                                                     uint8_t uCurVector, PIEMXCPTRAISEINFO pXcptRaiseInfo);
    141202
    142203/** @name Given Instruction Interpreters
  • trunk/src/VBox/VMM/VMMAll/IEMAll.cpp

    r66604 r66686  
    215215/** Pointer to a selector descriptor table entry. */
    216216typedef IEMSELDESC *PIEMSELDESC;
     217
     218/**
     219 * CPU exception classes.
     220 */
     221typedef enum IEMXCPTCLASS
     222{
     223    IEMXCPTCLASS_BENIGN,
     224    IEMXCPTCLASS_CONTRIBUTORY,
     225    IEMXCPTCLASS_PAGE_FAULT
     226} IEMXCPTCLASS;
    217227
    218228
     
    32123222 */
    32133223
    3214 /* Currently used only with nested hw.virt. */
    3215 #ifdef VBOX_WITH_NESTED_HWVIRT
    3216 /**
    3217  * Initiates a CPU shutdown sequence.
     3224/**
     3225 * Gets the exception class for the specified exception vector.
     3226 * 
     3227 * @returns The class of the specified exception.
     3228 * @param   uVector       The exception vector.
     3229 */
     3230IEM_STATIC IEMXCPTCLASS iemGetXcptClass(uint8_t uVector)
     3231{
     3232    Assert(uVector <= X86_XCPT_LAST);
     3233    switch (uVector)
     3234    {
     3235        case X86_XCPT_DE:
     3236        case X86_XCPT_TS:
     3237        case X86_XCPT_NP:
     3238        case X86_XCPT_SS:
     3239        case X86_XCPT_GP:
     3240        case X86_XCPT_SX:   /* AMD only */
     3241            return IEMXCPTCLASS_CONTRIBUTORY;
     3242
     3243        case X86_XCPT_PF:
     3244        case X86_XCPT_VE:   /* Intel only */
     3245            return IEMXCPTCLASS_PAGE_FAULT;
     3246    }
     3247    return IEMXCPTCLASS_BENIGN;
     3248}
     3249
     3250
     3251/**
     3252 * Evaluates how to handle an exception caused during delivery of another event
     3253 * (exception / interrupt).
     3254 * 
     3255 * @returns How to handle the recursive exception.
     3256 * @param   pVCpu               The cross context virtual CPU structure of the
     3257 *                              calling thread.
     3258 * @param   fPrevFlags          The flags of the previous event.
     3259 * @param   uPrevVector         The vector of the previous event.
     3260 * @param   fCurFlags           The flags of the current exception.
     3261 * @param   uCurVector          The vector of the current exception.
     3262 * @param   pfXcptRaiseInfo     Where to store additional information about the
     3263 *                              exception condition. Optional.
     3264 */
     3265VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPU pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
     3266                                                    uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
     3267{
     3268    /*
     3269     * Only CPU exceptions can be raised while delivering other events, software interrupt
     3270     * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
     3271     */
     3272    AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
     3273    Assert(pVCpu);
     3274
     3275    IEMXCPTRAISE     enmRaise   = IEMXCPTRAISE_CURRENT_XCPT;
     3276    IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
     3277    if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
     3278    {
     3279        IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
     3280        if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
     3281        {
     3282            IEMXCPTCLASS enmCurXcptClass  = iemGetXcptClass(uCurVector);
     3283            if (   enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
     3284                && (   enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
     3285                    || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
     3286            {
     3287                enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
     3288                if (enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT)
     3289                    fRaiseInfo = IEMXCPTRAISEINFO_PF_PF;
     3290                else
     3291                    fRaiseInfo = IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
     3292                Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
     3293                      uCurVector, IEM_GET_CTX(pVCpu)->cr2));
     3294            }
     3295            else if (   enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
     3296                     && enmCurXcptClass  == IEMXCPTCLASS_CONTRIBUTORY)
     3297            {
     3298                enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
     3299                Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%u uCurVector=%u -> #DF\n", uPrevVector, uCurVector));
     3300            }
     3301            else if (   uPrevVector == X86_XCPT_DF
     3302                     && (   enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
     3303                         || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
     3304            {
     3305                enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
     3306                Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
     3307            }
     3308        }
     3309        else
     3310        {
     3311            if (   uPrevVector == X86_XCPT_NMI
     3312                && uCurVector  == X86_XCPT_PF)
     3313            {
     3314                fRaiseInfo = IEMXCPTRAISEINFO_NMI_PF;
     3315                Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
     3316            }
     3317            else if (   uPrevVector == X86_XCPT_AC
     3318                     && uCurVector  == X86_XCPT_AC)
     3319            {
     3320                enmRaise   = IEMXCPTRAISE_CPU_HANG;
     3321                fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
     3322                Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
     3323            }
     3324        }
     3325    }
     3326    else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
     3327    {
     3328        fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
     3329        if (uCurVector == X86_XCPT_PF)
     3330            fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
     3331    }
     3332    else
     3333        fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
     3334
     3335    if (pfXcptRaiseInfo)
     3336        *pfXcptRaiseInfo = fRaiseInfo;
     3337    return enmRaise;
     3338}
     3339
     3340
     3341/**
     3342 * Enters the CPU shutdown state initiated by a triple fault or other
     3343 * unrecoverable conditions.
    32183344 *
    32193345 * @returns Strict VBox status code.
     
    32303356
    32313357    RT_NOREF_PV(pVCpu);
    3232     /** @todo Probably need a separate error code and handling for this to
    3233      *        distinguish it from the regular triple fault. */
    32343358    return VINF_EM_TRIPLE_FAULT;
     3359}
     3360
     3361
     3362#ifdef VBOX_WITH_NESTED_HWVIRT
     3363IEM_STATIC VBOXSTRICTRC iemHandleSvmNstGstEventIntercept(PVMCPU pVCpu, PCPUMCTX pCtx, uint8_t u8Vector, uint32_t fFlags,
     3364                                                         uint32_t uErr, uint64_t uCr2)
     3365{
     3366    Assert(IEM_IS_SVM_ENABLED(pVCpu));
     3367
     3368    /*
     3369     * Handle nested-guest SVM exception and software interrupt intercepts,
     3370     * see AMD spec. 15.12 "Exception Intercepts".
     3371     *
     3372     *   - NMI intercepts have their own exit code and do not cause SVM_EXIT_EXCEPTION_2 #VMEXITs.
     3373     *   - External interrupts and software interrupts (INTn instruction) do not check the exception intercepts
     3374     *     even when they use a vector in the range 0 to 31.
     3375     *   - ICEBP should not trigger #DB intercept, but its own intercept.
     3376     *   - For #PF exceptions, its intercept is checked before CR2 is written by the exception.
     3377     */
     3378    /* Check NMI intercept */
     3379    if (   u8Vector == X86_XCPT_NMI
     3380        && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
     3381        && IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_NMI))
     3382    {
     3383        Log2(("iemHandleSvmNstGstEventIntercept: NMI intercept -> #VMEXIT\n"));
     3384        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_NMI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     3385    }
     3386
     3387    /* Check ICEBP intercept. */
     3388    if (   (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)
     3389        && IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_ICEBP))
     3390    {
     3391        Log2(("iemHandleSvmNstGstEventIntercept: ICEBP intercept -> #VMEXIT\n"));
     3392        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_ICEBP, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     3393    }
     3394
     3395    /* Check CPU exception intercepts. */
     3396    if (   (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
     3397        && IEM_IS_SVM_XCPT_INTERCEPT_SET(pVCpu, u8Vector))
     3398    {
     3399        Assert(u8Vector <= X86_XCPT_LAST);
     3400        uint64_t const uExitInfo1 = fFlags & IEM_XCPT_FLAGS_ERR ? uErr : 0;
     3401        uint64_t const uExitInfo2 = fFlags & IEM_XCPT_FLAGS_CR2 ? uCr2 : 0;
     3402        if (   IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssist
     3403            && u8Vector == X86_XCPT_PF
     3404            && !(uErr & X86_TRAP_PF_ID))
     3405        {
     3406            /** @todo Nested-guest SVM - figure out fetching op-code bytes from IEM. */
     3407#ifdef IEM_WITH_CODE_TLB
     3408#else
     3409            uint8_t const offOpCode = pVCpu->iem.s.offOpcode;
     3410            uint8_t const cbCurrent = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode;
     3411            if (   cbCurrent > 0
     3412                && cbCurrent < sizeof(pCtx->hwvirt.svm.VmcbCtrl.abInstr))
     3413            {
     3414                Assert(cbCurrent <= sizeof(pVCpu->iem.s.abOpcode));
     3415                memcpy(&pCtx->hwvirt.svm.VmcbCtrl.abInstr[0], &pVCpu->iem.s.abOpcode[offOpCode], cbCurrent);
     3416            }
     3417#endif
     3418        }
     3419        Log2(("iemHandleSvmNstGstEventIntercept: Xcpt intercept. u8Vector=%#x uExitInfo1=%#RX64, uExitInfo2=%#RX64 -> #VMEXIT\n",
     3420             u8Vector, uExitInfo1, uExitInfo2));
     3421        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_EXCEPTION_0 + u8Vector, uExitInfo1, uExitInfo2);
     3422    }
     3423
     3424    /* Check software interrupt (INTn) intercepts. */
     3425    if (   (fFlags & (  IEM_XCPT_FLAGS_T_SOFT_INT
     3426                      | IEM_XCPT_FLAGS_BP_INSTR
     3427                      | IEM_XCPT_FLAGS_ICEBP_INSTR
     3428                      | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
     3429        && IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_INTN))
     3430    {
     3431        uint64_t const uExitInfo1 = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssist ? u8Vector : 0;
     3432        Log2(("iemHandleSvmNstGstEventIntercept: Software INT intercept (u8Vector=%#x) -> #VMEXIT\n", u8Vector));
     3433        IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_SWINT, uExitInfo1, 0 /* uExitInfo2 */);
     3434    }
     3435
     3436    return VINF_HM_INTERCEPT_NOT_ACTIVE;
    32353437}
    32363438#endif
     
    52535455    {
    52545456        /*
    5255          * Handle nested-guest SVM exception and software interrupt intercepts,
    5256          * see AMD spec. 15.12 "Exception Intercepts".
    5257          *
    5258          *   - NMI intercepts have their own exit code and do not cause SVM_EXIT_EXCEPTION_2 #VMEXITs.
    5259          *   - External interrupts and software interrupts (INTn instruction) do not check the exception intercepts
    5260          *     even when they use a vector in the range 0 to 31.
    5261          *   - ICEBP should not trigger #DB intercept, but its own intercept, so we catch it early in iemOp_int1.
    5262          *   - For #PF exceptions, its intercept is checked before CR2 is written by the exception.
     5457         * If the event is being injected as part of VMRUN, it isn't subject to event
     5458         * intercepts in the nested-guest. However, secondary exceptions that occur
     5459         * during injection of any event -are- subject to exception intercepts.
     5460         * See AMD spec. 15.20 "Event Injection".
    52635461         */
    5264         /* Check NMI intercept */
    5265         if (   u8Vector == X86_XCPT_NMI
    5266             && IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_NMI))
    5267         {
    5268             Log(("iemRaiseXcptOrInt: NMI intercept -> #VMEXIT\n"));
    5269             IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_NMI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
    5270         }
    5271 
    5272         /* Check CPU exception intercepts. */
    5273         if (   IEM_IS_SVM_XCPT_INTERCEPT_SET(pVCpu, u8Vector)
    5274             && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
    5275         {
    5276             Assert(u8Vector <= X86_XCPT_LAST);
    5277             uint64_t const uExitInfo1 = fFlags & IEM_XCPT_FLAGS_ERR ? uErr : 0;
    5278             uint64_t const uExitInfo2 = fFlags & IEM_XCPT_FLAGS_CR2 ? uCr2 : 0;
    5279             if (   IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssist
    5280                 && u8Vector == X86_XCPT_PF
    5281                 && !(uErr & X86_TRAP_PF_ID))
    5282             {
    5283                 /** @todo Nested-guest SVM - figure out fetching op-code bytes from IEM. */
    5284 #ifdef IEM_WITH_CODE_TLB
    5285 #else
    5286                 uint8_t const offOpCode = pVCpu->iem.s.offOpcode;
    5287                 uint8_t const cbCurrent = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode;
    5288                 if (   cbCurrent > 0
    5289                     && cbCurrent < sizeof(pCtx->hwvirt.svm.VmcbCtrl.abInstr))
    5290                 {
    5291                     Assert(cbCurrent <= sizeof(pVCpu->iem.s.abOpcode));
    5292                     memcpy(&pCtx->hwvirt.svm.VmcbCtrl.abInstr[0], &pVCpu->iem.s.abOpcode[offOpCode], cbCurrent);
    5293                 }
    5294 #endif
    5295             }
    5296             Log(("iemRaiseXcptOrInt: Xcpt intercept (u8Vector=%#x uExitInfo1=%#RX64, uExitInfo2=%#RX64 -> #VMEXIT\n", u8Vector,
    5297                  uExitInfo1, uExitInfo2));
    5298             IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_EXCEPTION_0 + u8Vector, uExitInfo1, uExitInfo2);
    5299         }
    5300 
    5301         /* Check software interrupt (INTn) intercepts. */
    5302         if (   IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_INTN)
    5303             && (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
    5304         {
    5305             uint64_t const uExitInfo1 = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssist ? u8Vector : 0;
    5306             Log(("iemRaiseXcptOrInt: Software INT intercept (u8Vector=%#x) -> #VMEXIT\n", u8Vector));
    5307             IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_SWINT, uExitInfo1, 0 /* uExitInfo2 */);
     5462        if (!pCtx->hwvirt.svm.fInterceptEvents)
     5463            pCtx->hwvirt.svm.fInterceptEvents = 1;
     5464        else
     5465        {
     5466            /*
     5467             * Check and handle if the event being raised is intercepted.
     5468             */
     5469            VBOXSTRICTRC rcStrict0 = iemHandleSvmNstGstEventIntercept(pVCpu, pCtx, u8Vector, fFlags, uErr, uCr2);
     5470            if (rcStrict0 != VINF_HM_INTERCEPT_NOT_ACTIVE)
     5471                return rcStrict0;
    53085472        }
    53095473    }
     
    53215485    {
    53225486        Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
    5323              u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt, pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
    5324 
    5325         /** @todo double and tripple faults. */
    5326         /** @todo When implementing \#DF, the SVM nested-guest \#DF intercepts needs
    5327          *        some care. See AMD spec. 15.12 "Exception Intercepts". */
     5487             u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
     5488             pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
     5489
    53285490        if (pVCpu->iem.s.cXcptRecursions >= 3)
    53295491        {
     
    53345496        }
    53355497
    5336         /** @todo set X86_TRAP_ERR_EXTERNAL when appropriate.
    5337         if (fPrevXcpt & IEM_XCPT_FLAGS_T_EXT_INT)
    5338         {
    5339         ....
    5340         } */
    5341     }
     5498        /*
     5499         * Evaluate the sequence of recurring events.
     5500         */
     5501        IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
     5502                                                         NULL /* pXcptRaiseInfo */);
     5503        if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
     5504        { /* likely */ }
     5505        else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
     5506        {
     5507            fFlags   = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
     5508            u8Vector = X86_XCPT_DF;
     5509            uErr     = 0;
     5510            /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
     5511            if (IEM_IS_SVM_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
     5512                IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_EXCEPTION_0 + X86_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     5513        }
     5514        else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
     5515        {
     5516            Log2(("iemRaiseXcptOrInt: raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
     5517            return iemInitiateCpuShutdown(pVCpu);
     5518        }
     5519        else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
     5520        {
     5521            /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
     5522            Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
     5523            if (!CPUMIsGuestInNestedHwVirtMode(pCtx))
     5524                return VERR_EM_GUEST_CPU_HANG;
     5525        }
     5526        else
     5527        {
     5528            AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
     5529                             enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
     5530            return VERR_IEM_IPE_9;
     5531        }
     5532
     5533        /*
     5534         * The 'EXT' bit is set when an exception occurs during deliver of an external
     5535         * event (such as an interrupt or earlier exception), see Intel spec. 6.13
     5536         * "Error Code".
     5537         *
     5538         * For exceptions generated by software interrupts and INTO, INT3 instructions,
     5539         * the 'EXT' bit will not be set, see Intel Instruction reference for INT n.
     5540         */
     5541        /** @todo Would INT1/ICEBP raised \#DB set the 'EXT' bit or not? Testcase... */
     5542        if (   (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT))
     5543            && (fFlags & IEM_XCPT_FLAGS_ERR)
     5544            && u8Vector != X86_XCPT_PF
     5545            && u8Vector != X86_XCPT_DF)
     5546        {
     5547            uErr |= X86_TRAP_ERR_EXTERNAL;
     5548        }
     5549    }
     5550
    53425551    pVCpu->iem.s.cXcptRecursions++;
    53435552    pVCpu->iem.s.uCurXcpt    = u8Vector;
     
    1522315432 * @param   puCr2           Where to store the CR2 associated with the event,
    1522415433 *                          optional.
     15434 * @remarks The caller should check the flags to determine if the error code and
     15435 *          CR2 are valid for the event.
    1522515436 */
    1522615437VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPU pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
     
    1523315444        if (pfFlags)
    1523415445            *pfFlags = pVCpu->iem.s.fCurXcpt;
    15235         /* The caller should check the flags to determine if the error code & CR2 are valid for the event. */
    1523615446        if (puErr)
    1523715447            *puErr = pVCpu->iem.s.uCurXcptErr;
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h

    r66581 r66686  
    28622862 *
    28632863 * @param   u8Int       The interrupt vector number.
    2864  * @param   fIsBpInstr  Is it the breakpoint instruction.
    2865  */
    2866 IEM_CIMPL_DEF_2(iemCImpl_int, uint8_t, u8Int, bool, fIsBpInstr)
     2864 * @param   enmInt      The int instruction type.
     2865 */
     2866IEM_CIMPL_DEF_2(iemCImpl_int, uint8_t, u8Int, IEMINT, enmInt)
    28672867{
    28682868    Assert(pVCpu->iem.s.cXcptRecursions == 0);
     
    28702870                             cbInstr,
    28712871                             u8Int,
    2872                              (fIsBpInstr ? IEM_XCPT_FLAGS_BP_INSTR : 0) | IEM_XCPT_FLAGS_T_SOFT_INT,
     2872                             IEM_XCPT_FLAGS_T_SOFT_INT | enmInt,
    28732873                             0,
    28742874                             0);
  • trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsOneByte.cpp.h

    r66581 r66686  
    65076507{
    65086508    IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
    6509     return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_int, X86_XCPT_BP, true /*fIsBpInstr*/);
     6509    return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_int, X86_XCPT_BP, IEMINT_INT3);
    65106510}
    65116511
     
    65186518    uint8_t u8Int; IEM_OPCODE_GET_NEXT_U8(&u8Int);
    65196519    IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
    6520     return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_int, u8Int, false /*fIsBpInstr*/);
     6520    return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_int, u8Int, IEMINT_INTN);
    65216521}
    65226522
     
    65316531
    65326532    IEM_MC_BEGIN(2, 0);
    6533     IEM_MC_ARG_CONST(uint8_t,   u8Int,      /*=*/ X86_XCPT_OF, 0);
    6534     IEM_MC_ARG_CONST(bool,      fIsBpInstr, /*=*/ false, 1);
    6535     IEM_MC_CALL_CIMPL_2(iemCImpl_int, u8Int, fIsBpInstr);
     6533    IEM_MC_ARG_CONST(uint8_t,   u8Int,  /*=*/ X86_XCPT_OF, 0);
     6534    IEM_MC_ARG_CONST(IEMINT,    enmInt, /*=*/ IEMINT_INTO, 1);
     6535    IEM_MC_CALL_CIMPL_2(iemCImpl_int, u8Int, enmInt);
    65366536    IEM_MC_END();
    65376537    return VINF_SUCCESS;
     
    1059010590    IEMOP_HLP_MIN_386(); /** @todo does not generate #UD on 286, or so they say... */
    1059110591    /** @todo testcase! */
    10592     IEMOP_HLP_SVM_CTRL_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_ICEBP, SVM_EXIT_ICEBP, 0, 0);
    10593     return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_int, X86_XCPT_DB, false /*fIsBpInstr*/);
     10592    return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_int, X86_XCPT_DB, IEMINT_INT1);
    1059410593}
    1059510594
  • trunk/src/VBox/VMM/include/IEMInternal.h

    r66581 r66686  
    137137} IEMBRANCH;
    138138AssertCompileSize(IEMBRANCH, 4);
     139
     140
     141/**
     142 * INT instruction types.
     143 */
     144typedef enum IEMINT
     145{
     146    /** INT n instruction (opcode 0xcd imm). */
     147    IEMINT_INTN  = 0,
     148    /** Single byte INT3 instruction (opcode 0xcc). */
     149    IEMINT_INT3  = IEM_XCPT_FLAGS_BP_INSTR,
     150    /** Single byte INTO instruction (opcode 0xce). */
     151    IEMINT_INTO  = IEM_XCPT_FLAGS_OF_INSTR,
     152    /** Single byte INT1 (ICEBP) instruction (opcode 0xf1). */
     153    IEMINT_INT1 = IEM_XCPT_FLAGS_ICEBP_INSTR
     154} IEMINT;
     155AssertCompileSize(IEMINT, 4);
    139156
    140157
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette