VirtualBox

Changeset 79534 in vbox


Ignore:
Timestamp:
Jul 5, 2019 5:59:14 AM (5 years ago)
Author:
vboxsync
Message:

VMM/HMVMXR0: Nested VMX: bugref:9180 Cleanup.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r79533 r79534  
    420420 */
    421421static FNVMXEXITHANDLER            hmR0VmxExitXcptOrNmiNested;
    422 //static FNVMXEXITHANDLER            hmR0VmxExitExtIntNested;
    423422static FNVMXEXITHANDLER            hmR0VmxExitTripleFaultNested;
    424423static FNVMXEXITHANDLERNSRC        hmR0VmxExitIntWindowNested;
    425424static FNVMXEXITHANDLERNSRC        hmR0VmxExitNmiWindowNested;
    426425static FNVMXEXITHANDLER            hmR0VmxExitTaskSwitchNested;
    427 //static FNVMXEXITHANDLER            hmR0VmxExitCpuid;
    428 //static FNVMXEXITHANDLER            hmR0VmxExitGetsec;
    429426static FNVMXEXITHANDLER            hmR0VmxExitHltNested;
    430 //static FNVMXEXITHANDLERNSRC        hmR0VmxExitInvd;
    431427static FNVMXEXITHANDLER            hmR0VmxExitInvlpgNested;
    432428static FNVMXEXITHANDLER            hmR0VmxExitRdpmcNested;
    433 //static FNVMXEXITHANDLER            hmR0VmxExitVmcall;
    434 //static FNVMXEXITHANDLER            hmR0VmxExitVmclear;
    435 //static FNVMXEXITHANDLER            hmR0VmxExitVmlaunch;
    436 //static FNVMXEXITHANDLER            hmR0VmxExitVmptrld;
    437 //static FNVMXEXITHANDLER            hmR0VmxExitVmptrst;
    438429static FNVMXEXITHANDLER            hmR0VmxExitVmreadVmwriteNested;
    439 //static FNVMXEXITHANDLER            hmR0VmxExitVmresume;
    440 //static FNVMXEXITHANDLER            hmR0VmxExitVmwrite;
    441 //static FNVMXEXITHANDLER            hmR0VmxExitVmxoff;
    442 //static FNVMXEXITHANDLER            hmR0VmxExitVmxon;
    443 //static FNVMXEXITHANDLER            hmR0VmxExitInvvpid;
    444430static FNVMXEXITHANDLER            hmR0VmxExitRdtscNested;
    445431static FNVMXEXITHANDLER            hmR0VmxExitMovCRxNested;
     
    456442static FNVMXEXITHANDLER            hmR0VmxExitApicWriteNested;
    457443static FNVMXEXITHANDLER            hmR0VmxExitVirtEoiNested;
    458 //static FNVMXEXITHANDLER            hmR0VmxExitEptViolation;
    459 //static FNVMXEXITHANDLER            hmR0VmxExitEptMisconfig;
    460444static FNVMXEXITHANDLER            hmR0VmxExitRdtscpNested;
    461 //static FNVMXEXITHANDLER            hmR0VmxExitPreemptTimer;
    462445static FNVMXEXITHANDLERNSRC        hmR0VmxExitWbinvdNested;
    463 //static FNVMXEXITHANDLER            hmR0VmxExitXsetbv;
    464 //static FNVMXEXITHANDLER            hmR0VmxExitErrUnexpected;
    465446static FNVMXEXITHANDLER            hmR0VmxExitInvpcidNested;
    466 //static FNVMXEXITHANDLERNSRC        hmR0VmxExitSetPendingXcptUD;
    467447static FNVMXEXITHANDLERNSRC        hmR0VmxExitErrInvalidGuestStateNested;
    468 //static FNVMXEXITHANDLERNSRC        hmR0VmxExitErrUnexpected;
    469448static FNVMXEXITHANDLER            hmR0VmxExitInstrNested;
    470449static FNVMXEXITHANDLER            hmR0VmxExitInstrWithInfoNested;
    471450/** @} */
    472451#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
    473 
    474 /** @name Helpers for hardware exceptions VM-exit handlers.
    475  * @{
    476  */
    477 static VBOXSTRICTRC hmR0VmxExitXcptPF(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
    478 static VBOXSTRICTRC hmR0VmxExitXcptMF(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
    479 static VBOXSTRICTRC hmR0VmxExitXcptDB(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
    480 static VBOXSTRICTRC hmR0VmxExitXcptBP(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
    481 static VBOXSTRICTRC hmR0VmxExitXcptGP(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
    482 static VBOXSTRICTRC hmR0VmxExitXcptAC(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
    483 static VBOXSTRICTRC hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
    484 static VBOXSTRICTRC hmR0VmxExitLmsw(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint16_t uMsw, RTGCPTR GCPtrEffDst);
    485 static VBOXSTRICTRC hmR0VmxExitClts(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr);
    486 static VBOXSTRICTRC hmR0VmxExitMovFromCrX(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg);
    487 static VBOXSTRICTRC hmR0VmxExitMovToCrX(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg);
    488 static VBOXSTRICTRC hmR0VmxExitHostNmi(PVMCPU pVCpu);
    489 /** @} */
    490452
    491453
     
    1103210994
    1103310995/**
    11034  * Setup the APIC-access page for virtualizing APIC access.
    11035  *
    11036  * This can cause a longjumps to R3 due to the acquisition of the PGM lock, hence
     10996 * Map the APIC-access page for virtualizing APIC accesses.
     10997 *
     10998 * This can cause a longjumps to R3 due to the acquisition of the PGM lock. Hence,
    1103710999 * this not done as part of exporting guest state, see @bugref{8721}.
    1103811000 *
     
    1106211024    /* Update the per-VCPU cache of the APIC base MSR. */
    1106311025    pVCpu->hm.s.vmx.u64GstMsrApicBase = u64MsrApicBase;
     11026    return VINF_SUCCESS;
     11027}
     11028
     11029
     11030/**
     11031 * Wrapper for dispatching host NMIs.
     11032 *
     11033 * @returns VBox status code.
     11034 * @param   pVCpu   The cross context virtual CPU structure.
     11035 */
     11036static int hmR0VmxExitHostNmi(PVMCPU pVCpu)
     11037{
     11038    VMXDispatchHostNmi();
     11039    STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatExitHostNmiInGC);
    1106411040    return VINF_SUCCESS;
    1106511041}
     
    1383813814
    1383913815
     13816/** @name VM-exit helpers.
     13817 * @{
     13818 */
     13819/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
     13820/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= VM-exit helpers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
     13821/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
     13822
    1384013823#ifdef VBOX_STRICT
    1384113824/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
     
    1388713870# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient)      do { } while (0)
    1388813871#endif
    13889 
    1389013872
    1389113873/**
     
    1413214114    return rcStrict;
    1413314115}
     14116
     14117
     14118/**
     14119 * VM-exit helper for LMSW.
     14120 */
     14121static VBOXSTRICTRC hmR0VmxExitLmsw(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint16_t uMsw, RTGCPTR GCPtrEffDst)
     14122{
     14123    int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
     14124    AssertRCReturn(rc, rc);
     14125
     14126    VBOXSTRICTRC rcStrict = IEMExecDecodedLmsw(pVCpu, cbInstr, uMsw, GCPtrEffDst);
     14127    AssertMsg(   rcStrict == VINF_SUCCESS
     14128              || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
     14129
     14130    ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
     14131    if (rcStrict == VINF_IEM_RAISED_XCPT)
     14132    {
     14133        ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
     14134        rcStrict = VINF_SUCCESS;
     14135    }
     14136
     14137    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitLmsw);
     14138    Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
     14139    return rcStrict;
     14140}
     14141
     14142
     14143/**
     14144 * VM-exit helper for CLTS.
     14145 */
     14146static VBOXSTRICTRC hmR0VmxExitClts(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr)
     14147{
     14148    int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
     14149    AssertRCReturn(rc, rc);
     14150
     14151    VBOXSTRICTRC rcStrict = IEMExecDecodedClts(pVCpu, cbInstr);
     14152    AssertMsg(   rcStrict == VINF_SUCCESS
     14153              || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
     14154
     14155    ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
     14156    if (rcStrict == VINF_IEM_RAISED_XCPT)
     14157    {
     14158        ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
     14159        rcStrict = VINF_SUCCESS;
     14160    }
     14161
     14162    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitClts);
     14163    Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
     14164    return rcStrict;
     14165}
     14166
     14167
     14168/**
     14169 * VM-exit helper for MOV from CRx (CRx read).
     14170 */
     14171static VBOXSTRICTRC hmR0VmxExitMovFromCrX(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
     14172{
     14173    Assert(iCrReg < 16);
     14174    Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
     14175
     14176    int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
     14177    AssertRCReturn(rc, rc);
     14178
     14179    VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxRead(pVCpu, cbInstr, iGReg, iCrReg);
     14180    AssertMsg(   rcStrict == VINF_SUCCESS
     14181              || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
     14182
     14183    if (iGReg == X86_GREG_xSP)
     14184        ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_RSP);
     14185    else
     14186        ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
     14187#ifdef VBOX_WITH_STATISTICS
     14188    switch (iCrReg)
     14189    {
     14190        case 0: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR0Read); break;
     14191        case 2: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR2Read); break;
     14192        case 3: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR3Read); break;
     14193        case 4: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR4Read); break;
     14194        case 8: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR8Read); break;
     14195    }
     14196#endif
     14197    Log4Func(("CR%d Read access rcStrict=%Rrc\n", iCrReg, VBOXSTRICTRC_VAL(rcStrict)));
     14198    return rcStrict;
     14199}
     14200
     14201
     14202/**
     14203 * VM-exit helper for MOV to CRx (CRx write).
     14204 */
     14205static VBOXSTRICTRC hmR0VmxExitMovToCrX(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
     14206{
     14207    int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
     14208    AssertRCReturn(rc, rc);
     14209
     14210    VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, cbInstr, iCrReg, iGReg);
     14211    AssertMsg(   rcStrict == VINF_SUCCESS
     14212              || rcStrict == VINF_IEM_RAISED_XCPT
     14213              || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
     14214
     14215    switch (iCrReg)
     14216    {
     14217        case 0:
     14218            ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
     14219            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR0Write);
     14220            Log4Func(("CR0 write. rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr0));
     14221            break;
     14222
     14223        case 2:
     14224            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR2Write);
     14225            /* Nothing to do here, CR2 it's not part of the VMCS. */
     14226            break;
     14227
     14228        case 3:
     14229            ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR3);
     14230            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR3Write);
     14231            Log4Func(("CR3 write. rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr3));
     14232            break;
     14233
     14234        case 4:
     14235            ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR4);
     14236            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR4Write);
     14237            Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict),
     14238                      pVCpu->cpum.GstCtx.cr4, pVCpu->hm.s.fLoadSaveGuestXcr0));
     14239            break;
     14240
     14241        case 8:
     14242            ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged,
     14243                             HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_APIC_TPR);
     14244            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR8Write);
     14245            break;
     14246
     14247        default:
     14248            AssertMsgFailed(("Invalid CRx register %#x\n", iCrReg));
     14249            break;
     14250    }
     14251
     14252    if (rcStrict == VINF_IEM_RAISED_XCPT)
     14253    {
     14254        ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
     14255        rcStrict = VINF_SUCCESS;
     14256    }
     14257    return rcStrict;
     14258}
     14259
     14260
     14261/**
     14262 * VM-exit exception handler for \#PF (Page-fault exception).
     14263 */
     14264static VBOXSTRICTRC hmR0VmxExitXcptPF(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
     14265{
     14266    HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
     14267    PVM pVM = pVCpu->CTX_SUFF(pVM);
     14268    int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
     14269    rc    |= hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
     14270    rc    |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
     14271    AssertRCReturn(rc, rc);
     14272
     14273    if (!pVM->hm.s.fNestedPaging)
     14274    { /* likely */ }
     14275    else
     14276    {
     14277#if !defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) && !defined(HMVMX_ALWAYS_TRAP_PF)
     14278        Assert(pVmxTransient->fIsNestedGuest || pVCpu->hm.s.fUsingDebugLoop);
     14279#endif
     14280        pVCpu->hm.s.Event.fPending = false;                  /* In case it's a contributory or vectoring #PF. */
     14281        if (RT_LIKELY(!pVmxTransient->fVectoringDoublePF))
     14282        {
     14283            hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
     14284                                   pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual);
     14285        }
     14286        else
     14287        {
     14288            /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
     14289            hmR0VmxSetPendingXcptDF(pVCpu);
     14290            Log4Func(("Pending #DF due to vectoring #PF w/ NestedPaging\n"));
     14291        }
     14292        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
     14293        return rc;
     14294    }
     14295
     14296    /* If it's a vectoring #PF, emulate injecting the original event injection as PGMTrap0eHandler() is incapable
     14297       of differentiating between instruction emulation and event injection that caused a #PF. See @bugref{6607}. */
     14298    if (pVmxTransient->fVectoringPF)
     14299    {
     14300        Assert(pVCpu->hm.s.Event.fPending);
     14301        return VINF_EM_RAW_INJECT_TRPM_EVENT;
     14302    }
     14303
     14304    PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
     14305    rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
     14306    AssertRCReturn(rc, rc);
     14307
     14308    Log4Func(("#PF: cr2=%#RX64 cs:rip=%#04x:%#RX64 uErrCode %#RX32 cr3=%#RX64\n", pVmxTransient->uExitQual, pCtx->cs.Sel,
     14309              pCtx->rip, pVmxTransient->uExitIntErrorCode, pCtx->cr3));
     14310
     14311    TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQual, (RTGCUINT)pVmxTransient->uExitIntErrorCode);
     14312    rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, CPUMCTX2CORE(pCtx), (RTGCPTR)pVmxTransient->uExitQual);
     14313
     14314    Log4Func(("#PF: rc=%Rrc\n", rc));
     14315    if (rc == VINF_SUCCESS)
     14316    {
     14317        /*
     14318         * This is typically a shadow page table sync or a MMIO instruction. But we may have
     14319         * emulated something like LTR or a far jump. Any part of the CPU context may have changed.
     14320         */
     14321        ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
     14322        TRPMResetTrap(pVCpu);
     14323        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF);
     14324        return rc;
     14325    }
     14326
     14327    if (rc == VINF_EM_RAW_GUEST_TRAP)
     14328    {
     14329        if (!pVmxTransient->fVectoringDoublePF)
     14330        {
     14331            /* It's a guest page fault and needs to be reflected to the guest. */
     14332            uint32_t uGstErrorCode = TRPMGetErrorCode(pVCpu);
     14333            TRPMResetTrap(pVCpu);
     14334            pVCpu->hm.s.Event.fPending = false;                 /* In case it's a contributory #PF. */
     14335            hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
     14336                                   uGstErrorCode, pVmxTransient->uExitQual);
     14337        }
     14338        else
     14339        {
     14340            /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
     14341            TRPMResetTrap(pVCpu);
     14342            pVCpu->hm.s.Event.fPending = false;     /* Clear pending #PF to replace it with #DF. */
     14343            hmR0VmxSetPendingXcptDF(pVCpu);
     14344            Log4Func(("#PF: Pending #DF due to vectoring #PF\n"));
     14345        }
     14346
     14347        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
     14348        return VINF_SUCCESS;
     14349    }
     14350
     14351    TRPMResetTrap(pVCpu);
     14352    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPFEM);
     14353    return rc;
     14354}
     14355
     14356
     14357/**
     14358 * VM-exit exception handler for \#MF (Math Fault: floating point exception).
     14359 */
     14360static VBOXSTRICTRC hmR0VmxExitXcptMF(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
     14361{
     14362    HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
     14363    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestMF);
     14364
     14365    int rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR0);
     14366    AssertRCReturn(rc, rc);
     14367
     14368    if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE))
     14369    {
     14370        /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
     14371        rc = PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13, 1, 0 /* uTagSrc */);
     14372
     14373        /** @todo r=ramshankar: The Intel spec. does -not- specify that this VM-exit
     14374         *        provides VM-exit instruction length. If this causes problem later,
     14375         *        disassemble the instruction like it's done on AMD-V. */
     14376        int rc2 = hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient);
     14377        AssertRCReturn(rc2, rc2);
     14378        return rc;
     14379    }
     14380
     14381    hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbInstr,
     14382                           pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
     14383    return rc;
     14384}
     14385
     14386
     14387/**
     14388 * VM-exit exception handler for \#BP (Breakpoint exception).
     14389 */
     14390static VBOXSTRICTRC hmR0VmxExitXcptBP(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
     14391{
     14392    HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
     14393    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestBP);
     14394
     14395    int rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
     14396    AssertRCReturn(rc, rc);
     14397
     14398    PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
     14399    rc = DBGFRZTrap03Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx));
     14400    if (rc == VINF_EM_RAW_GUEST_TRAP)
     14401    {
     14402        rc  = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
     14403        rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
     14404        rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
     14405        AssertRCReturn(rc, rc);
     14406
     14407        hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbInstr,
     14408                               pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
     14409    }
     14410
     14411    Assert(rc == VINF_SUCCESS || rc == VINF_EM_RAW_GUEST_TRAP || rc == VINF_EM_DBG_BREAKPOINT);
     14412    return rc;
     14413}
     14414
     14415
     14416/**
     14417 * VM-exit exception handler for \#AC (alignment check exception).
     14418 */
     14419static VBOXSTRICTRC hmR0VmxExitXcptAC(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
     14420{
     14421    HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
     14422
     14423    /*
     14424     * Re-inject it. We'll detect any nesting before getting here.
     14425     */
     14426    int rc = hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
     14427    rc    |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
     14428    AssertRCReturn(rc, rc);
     14429    Assert(ASMAtomicUoReadU32(&pVmxTransient->fVmcsFieldsRead) & HMVMX_READ_EXIT_INTERRUPTION_INFO);
     14430
     14431    hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbInstr,
     14432                           pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
     14433    return VINF_SUCCESS;
     14434}
     14435
     14436
     14437/**
     14438 * VM-exit exception handler for \#DB (Debug exception).
     14439 */
     14440static VBOXSTRICTRC hmR0VmxExitXcptDB(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
     14441{
     14442    HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
     14443    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB);
     14444
     14445    /*
     14446     * Get the DR6-like values from the Exit qualification and pass it to DBGF for processing.
     14447     */
     14448    int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
     14449
     14450    /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
     14451    uint64_t const uDR6 = X86_DR6_INIT_VAL
     14452                        | (pVmxTransient->uExitQual & (  X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3
     14453                                                       | X86_DR6_BD | X86_DR6_BS));
     14454
     14455    PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
     14456    rc = DBGFRZTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx), uDR6, pVCpu->hm.s.fSingleInstruction);
     14457    Log6Func(("rc=%Rrc\n", rc));
     14458    if (rc == VINF_EM_RAW_GUEST_TRAP)
     14459    {
     14460        /*
     14461         * The exception was for the guest.  Update DR6, DR7.GD and
     14462         * IA32_DEBUGCTL.LBR before forwarding it.
     14463         * See Intel spec. 27.1 "Architectural State before a VM-Exit".
     14464         */
     14465        VMMRZCallRing3Disable(pVCpu);
     14466        HM_DISABLE_PREEMPT(pVCpu);
     14467
     14468        pCtx->dr[6] &= ~X86_DR6_B_MASK;
     14469        pCtx->dr[6] |= uDR6;
     14470        if (CPUMIsGuestDebugStateActive(pVCpu))
     14471            ASMSetDR6(pCtx->dr[6]);
     14472
     14473        HM_RESTORE_PREEMPT();
     14474        VMMRZCallRing3Enable(pVCpu);
     14475
     14476        rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_DR7);
     14477        AssertRCReturn(rc, rc);
     14478
     14479        /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
     14480        pCtx->dr[7] &= ~X86_DR7_GD;
     14481
     14482        /* Paranoia. */
     14483        pCtx->dr[7] &= ~X86_DR7_RAZ_MASK;
     14484        pCtx->dr[7] |= X86_DR7_RA1_MASK;
     14485
     14486        rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, (uint32_t)pCtx->dr[7]);
     14487        AssertRCReturn(rc, rc);
     14488
     14489        /*
     14490         * Raise #DB in the guest.
     14491         *
     14492         * It is important to reflect exactly what the VM-exit gave us (preserving the
     14493         * interruption-type) rather than use hmR0VmxSetPendingXcptDB() as the #DB could've
     14494         * been raised while executing ICEBP (INT1) and not the regular #DB. Thus it may
     14495         * trigger different handling in the CPU (like skipping DPL checks), see @bugref{6398}.
     14496         *
     14497         * Intel re-documented ICEBP/INT1 on May 2018 previously documented as part of
     14498         * Intel 386, see Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
     14499         */
     14500        rc  = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
     14501        rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
     14502        rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
     14503        AssertRCReturn(rc, rc);
     14504        hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbInstr,
     14505                               pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
     14506        return VINF_SUCCESS;
     14507    }
     14508
     14509    /*
     14510     * Not a guest trap, must be a hypervisor related debug event then.
     14511     * Update DR6 in case someone is interested in it.
     14512     */
     14513    AssertMsg(rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_DBG_BREAKPOINT, ("%Rrc\n", rc));
     14514    AssertReturn(pVmxTransient->fWasHyperDebugStateActive, VERR_HM_IPE_5);
     14515    CPUMSetHyperDR6(pVCpu, uDR6);
     14516
     14517    return rc;
     14518}
     14519
     14520
     14521/**
     14522 * Hacks its way around the lovely mesa driver's backdoor accesses.
     14523 *
     14524 * @sa hmR0SvmHandleMesaDrvGp.
     14525 */
     14526static int hmR0VmxHandleMesaDrvGp(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
     14527{
     14528    LogFunc(("cs:rip=%#04x:%#RX64 rcx=%#RX64 rbx=%#RX64\n", pCtx->cs.Sel, pCtx->rip, pCtx->rcx, pCtx->rbx));
     14529    RT_NOREF(pCtx);
     14530
     14531    /* For now we'll just skip the instruction. */
     14532    return hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient);
     14533}
     14534
     14535
     14536/**
     14537 * Checks if the \#GP'ing instruction is the mesa driver doing it's lovely
     14538 * backdoor logging w/o checking what it is running inside.
     14539 *
     14540 * This recognizes an "IN EAX,DX" instruction executed in flat ring-3, with the
     14541 * backdoor port and magic numbers loaded in registers.
     14542 *
     14543 * @returns true if it is, false if it isn't.
     14544 * @sa      hmR0SvmIsMesaDrvGp.
     14545 */
     14546DECLINLINE(bool) hmR0VmxIsMesaDrvGp(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
     14547{
     14548    /* 0xed:  IN eAX,dx */
     14549    uint8_t abInstr[1];
     14550    if (pVmxTransient->cbInstr != sizeof(abInstr))
     14551        return false;
     14552
     14553    /* Check that it is #GP(0). */
     14554    if (pVmxTransient->uExitIntErrorCode != 0)
     14555        return false;
     14556
     14557    /* Check magic and port. */
     14558    Assert(!(pCtx->fExtrn & (CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RCX)));
     14559    /*Log(("hmR0VmxIsMesaDrvGp: rax=%RX64 rdx=%RX64\n", pCtx->rax, pCtx->rdx));*/
     14560    if (pCtx->rax != UINT32_C(0x564d5868))
     14561        return false;
     14562    if (pCtx->dx != UINT32_C(0x5658))
     14563        return false;
     14564
     14565    /* Flat ring-3 CS. */
     14566    AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_CS);
     14567    Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_CS));
     14568    /*Log(("hmR0VmxIsMesaDrvGp: cs.Attr.n.u2Dpl=%d base=%Rx64\n", pCtx->cs.Attr.n.u2Dpl, pCtx->cs.u64Base));*/
     14569    if (pCtx->cs.Attr.n.u2Dpl != 3)
     14570        return false;
     14571    if (pCtx->cs.u64Base != 0)
     14572        return false;
     14573
     14574    /* Check opcode. */
     14575    AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_RIP);
     14576    Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_RIP));
     14577    int rc = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, pCtx->rip, sizeof(abInstr));
     14578    /*Log(("hmR0VmxIsMesaDrvGp: PGMPhysSimpleReadGCPtr -> %Rrc %#x\n", rc, abInstr[0]));*/
     14579    if (RT_FAILURE(rc))
     14580        return false;
     14581    if (abInstr[0] != 0xed)
     14582        return false;
     14583
     14584    return true;
     14585}
     14586
     14587/**
     14588 * VM-exit exception handler for \#GP (General-protection exception).
     14589 *
     14590 * @remarks Requires pVmxTransient->uExitIntInfo to be up-to-date.
     14591 */
     14592static VBOXSTRICTRC hmR0VmxExitXcptGP(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
     14593{
     14594    HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
     14595    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestGP);
     14596
     14597    PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
     14598    PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
     14599    if (pVmcsInfo->RealMode.fRealOnV86Active)
     14600    { /* likely */ }
     14601    else
     14602    {
     14603#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
     14604        Assert(pVCpu->hm.s.fUsingDebugLoop || pVCpu->hm.s.fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
     14605#endif
     14606        /* If the guest is not in real-mode or we have unrestricted guest execution support, reflect #GP to the guest. */
     14607        int rc  = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
     14608        rc     |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
     14609        rc     |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
     14610        rc     |= hmR0VmxImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
     14611        AssertRCReturn(rc, rc);
     14612        Log4Func(("Gst: cs:rip=%#04x:%#RX64 ErrorCode=%#x cr0=%#RX64 cpl=%u tr=%#04x\n", pCtx->cs.Sel, pCtx->rip,
     14613                  pVmxTransient->uExitIntErrorCode, pCtx->cr0, CPUMGetGuestCPL(pVCpu), pCtx->tr.Sel));
     14614
     14615        if (    pVmxTransient->fIsNestedGuest
     14616            || !pVCpu->hm.s.fTrapXcptGpForLovelyMesaDrv
     14617            || !hmR0VmxIsMesaDrvGp(pVCpu, pVmxTransient, pCtx))
     14618            hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
     14619                                   pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
     14620        else
     14621            rc = hmR0VmxHandleMesaDrvGp(pVCpu, pVmxTransient, pCtx);
     14622        return rc;
     14623    }
     14624
     14625    Assert(CPUMIsGuestInRealModeEx(pCtx));
     14626    Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUnrestrictedGuest);
     14627    Assert(!pVmxTransient->fIsNestedGuest);
     14628
     14629    int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
     14630    AssertRCReturn(rc, rc);
     14631
     14632    VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
     14633    if (rcStrict == VINF_SUCCESS)
     14634    {
     14635        if (!CPUMIsGuestInRealModeEx(pCtx))
     14636        {
     14637            /*
     14638             * The guest is no longer in real-mode, check if we can continue executing the
     14639             * guest using hardware-assisted VMX. Otherwise, fall back to emulation.
     14640             */
     14641            pVmcsInfo->RealMode.fRealOnV86Active = false;
     14642            if (HMCanExecuteVmxGuest(pVCpu, pCtx))
     14643            {
     14644                Log4Func(("Mode changed but guest still suitable for executing using hardware-assisted VMX\n"));
     14645                ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
     14646            }
     14647            else
     14648            {
     14649                Log4Func(("Mode changed -> VINF_EM_RESCHEDULE\n"));
     14650                rcStrict = VINF_EM_RESCHEDULE;
     14651            }
     14652        }
     14653        else
     14654            ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
     14655    }
     14656    else if (rcStrict == VINF_IEM_RAISED_XCPT)
     14657    {
     14658        rcStrict = VINF_SUCCESS;
     14659        ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
     14660    }
     14661    return VBOXSTRICTRC_VAL(rcStrict);
     14662}
     14663
     14664
     14665/**
     14666 * VM-exit exception handler wrapper for generic exceptions. Simply re-injects
     14667 * the exception reported in the VMX transient structure back into the VM.
     14668 *
     14669 * @remarks Requires uExitIntInfo in the VMX transient structure to be
     14670 *          up-to-date.
     14671 */
     14672static VBOXSTRICTRC hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
     14673{
     14674    HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
     14675#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
     14676    PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
     14677    AssertMsg(pVCpu->hm.s.fUsingDebugLoop || pVmcsInfo->RealMode.fRealOnV86Active || pVmxTransient->fIsNestedGuest,
     14678              ("uVector=%#x u32XcptBitmap=%#X32\n",
     14679               VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo), pVmcsInfo->u32XcptBitmap));
     14680    NOREF(pVmcsInfo);
     14681#endif
     14682
     14683    /*
     14684     * Re-inject the exception into the guest. This cannot be a double-fault condition which
     14685     * would have been handled while checking exits due to event delivery.
     14686     */
     14687    int rc = hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
     14688    rc    |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
     14689    AssertRCReturn(rc, rc);
     14690    Assert(ASMAtomicUoReadU32(&pVmxTransient->fVmcsFieldsRead) & HMVMX_READ_EXIT_INTERRUPTION_INFO);
     14691
     14692#ifdef DEBUG_ramshankar
     14693    rc |= hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
     14694    Log(("hmR0VmxExitXcptGeneric: Reinjecting Xcpt. uVector=%#x cs:rip=%#04x:%#RX64\n",
     14695         VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo), pCtx->cs.Sel, pCtx->rip));
     14696#endif
     14697
     14698    hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbInstr,
     14699                           pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
     14700    return VINF_SUCCESS;
     14701}
     14702/** @} */
    1413414703
    1413514704
     
    1593116500}
    1593216501
    15933 /** @} */
    15934 
    15935 /* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
    15936 /* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= VM-exit exception handlers =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
    15937 /* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
    15938 
    15939 /**
    15940  * VM-exit exception handler for \#MF (Math Fault: floating point exception).
    15941  */
    15942 static VBOXSTRICTRC hmR0VmxExitXcptMF(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    15943 {
    15944     HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    15945     STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestMF);
    15946 
    15947     int rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR0);
    15948     AssertRCReturn(rc, rc);
    15949 
    15950     if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE))
    15951     {
    15952         /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
    15953         rc = PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13, 1, 0 /* uTagSrc */);
    15954 
    15955         /** @todo r=ramshankar: The Intel spec. does -not- specify that this VM-exit
    15956          *        provides VM-exit instruction length. If this causes problem later,
    15957          *        disassemble the instruction like it's done on AMD-V. */
    15958         int rc2 = hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient);
    15959         AssertRCReturn(rc2, rc2);
    15960         return rc;
    15961     }
    15962 
    15963     hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbInstr,
    15964                            pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
    15965     return rc;
    15966 }
    15967 
    15968 
    15969 /**
    15970  * VM-exit exception handler for \#BP (Breakpoint exception).
    15971  */
    15972 static VBOXSTRICTRC hmR0VmxExitXcptBP(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    15973 {
    15974     HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    15975     STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestBP);
    15976 
    15977     int rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
    15978     AssertRCReturn(rc, rc);
    15979 
    15980     PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    15981     rc = DBGFRZTrap03Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx));
    15982     if (rc == VINF_EM_RAW_GUEST_TRAP)
    15983     {
    15984         rc  = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
    15985         rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    15986         rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
    15987         AssertRCReturn(rc, rc);
    15988 
    15989         hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbInstr,
    15990                                pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
    15991     }
    15992 
    15993     Assert(rc == VINF_SUCCESS || rc == VINF_EM_RAW_GUEST_TRAP || rc == VINF_EM_DBG_BREAKPOINT);
    15994     return rc;
    15995 }
    15996 
    15997 
    15998 /**
    15999  * VM-exit exception handler for \#AC (alignment check exception).
    16000  */
    16001 static VBOXSTRICTRC hmR0VmxExitXcptAC(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    16002 {
    16003     HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    16004 
    16005     /*
    16006      * Re-inject it. We'll detect any nesting before getting here.
    16007      */
    16008     int rc = hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
    16009     rc    |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    16010     AssertRCReturn(rc, rc);
    16011     Assert(ASMAtomicUoReadU32(&pVmxTransient->fVmcsFieldsRead) & HMVMX_READ_EXIT_INTERRUPTION_INFO);
    16012 
    16013     hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbInstr,
    16014                            pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
    16015     return VINF_SUCCESS;
    16016 }
    16017 
    16018 
    16019 /**
    16020  * VM-exit exception handler for \#DB (Debug exception).
    16021  */
    16022 static VBOXSTRICTRC hmR0VmxExitXcptDB(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    16023 {
    16024     HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    16025     STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB);
    16026 
    16027     /*
    16028      * Get the DR6-like values from the Exit qualification and pass it to DBGF for processing.
    16029      */
    16030     int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
    16031 
    16032     /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
    16033     uint64_t const uDR6 = X86_DR6_INIT_VAL
    16034                         | (pVmxTransient->uExitQual & (  X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3
    16035                                                        | X86_DR6_BD | X86_DR6_BS));
    16036 
    16037     PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    16038     rc = DBGFRZTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx), uDR6, pVCpu->hm.s.fSingleInstruction);
    16039     Log6Func(("rc=%Rrc\n", rc));
    16040     if (rc == VINF_EM_RAW_GUEST_TRAP)
    16041     {
    16042         /*
    16043          * The exception was for the guest.  Update DR6, DR7.GD and
    16044          * IA32_DEBUGCTL.LBR before forwarding it.
    16045          * See Intel spec. 27.1 "Architectural State before a VM-Exit".
    16046          */
    16047         VMMRZCallRing3Disable(pVCpu);
    16048         HM_DISABLE_PREEMPT(pVCpu);
    16049 
    16050         pCtx->dr[6] &= ~X86_DR6_B_MASK;
    16051         pCtx->dr[6] |= uDR6;
    16052         if (CPUMIsGuestDebugStateActive(pVCpu))
    16053             ASMSetDR6(pCtx->dr[6]);
    16054 
    16055         HM_RESTORE_PREEMPT();
    16056         VMMRZCallRing3Enable(pVCpu);
    16057 
    16058         rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_DR7);
    16059         AssertRCReturn(rc, rc);
    16060 
    16061         /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
    16062         pCtx->dr[7] &= ~X86_DR7_GD;
    16063 
    16064         /* Paranoia. */
    16065         pCtx->dr[7] &= ~X86_DR7_RAZ_MASK;
    16066         pCtx->dr[7] |= X86_DR7_RA1_MASK;
    16067 
    16068         rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, (uint32_t)pCtx->dr[7]);
    16069         AssertRCReturn(rc, rc);
    16070 
    16071         /*
    16072          * Raise #DB in the guest.
    16073          *
    16074          * It is important to reflect exactly what the VM-exit gave us (preserving the
    16075          * interruption-type) rather than use hmR0VmxSetPendingXcptDB() as the #DB could've
    16076          * been raised while executing ICEBP (INT1) and not the regular #DB. Thus it may
    16077          * trigger different handling in the CPU (like skipping DPL checks), see @bugref{6398}.
    16078          *
    16079          * Intel re-documented ICEBP/INT1 on May 2018 previously documented as part of
    16080          * Intel 386, see Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
    16081          */
    16082         rc  = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
    16083         rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    16084         rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
    16085         AssertRCReturn(rc, rc);
    16086         hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbInstr,
    16087                                pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
    16088         return VINF_SUCCESS;
    16089     }
    16090 
    16091     /*
    16092      * Not a guest trap, must be a hypervisor related debug event then.
    16093      * Update DR6 in case someone is interested in it.
    16094      */
    16095     AssertMsg(rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_DBG_BREAKPOINT, ("%Rrc\n", rc));
    16096     AssertReturn(pVmxTransient->fWasHyperDebugStateActive, VERR_HM_IPE_5);
    16097     CPUMSetHyperDR6(pVCpu, uDR6);
    16098 
    16099     return rc;
    16100 }
    16101 
    16102 
    16103 /**
    16104  * Hacks its way around the lovely mesa driver's backdoor accesses.
    16105  *
    16106  * @sa hmR0SvmHandleMesaDrvGp.
    16107  */
    16108 static int hmR0VmxHandleMesaDrvGp(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
    16109 {
    16110     LogFunc(("cs:rip=%#04x:%#RX64 rcx=%#RX64 rbx=%#RX64\n", pCtx->cs.Sel, pCtx->rip, pCtx->rcx, pCtx->rbx));
    16111     RT_NOREF(pCtx);
    16112 
    16113     /* For now we'll just skip the instruction. */
    16114     return hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient);
    16115 }
    16116 
    16117 
    16118 /**
    16119  * Checks if the \#GP'ing instruction is the mesa driver doing it's lovely
    16120  * backdoor logging w/o checking what it is running inside.
    16121  *
    16122  * This recognizes an "IN EAX,DX" instruction executed in flat ring-3, with the
    16123  * backdoor port and magic numbers loaded in registers.
    16124  *
    16125  * @returns true if it is, false if it isn't.
    16126  * @sa      hmR0SvmIsMesaDrvGp.
    16127  */
    16128 DECLINLINE(bool) hmR0VmxIsMesaDrvGp(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
    16129 {
    16130     /* 0xed:  IN eAX,dx */
    16131     uint8_t abInstr[1];
    16132     if (pVmxTransient->cbInstr != sizeof(abInstr))
    16133         return false;
    16134 
    16135     /* Check that it is #GP(0). */
    16136     if (pVmxTransient->uExitIntErrorCode != 0)
    16137         return false;
    16138 
    16139     /* Check magic and port. */
    16140     Assert(!(pCtx->fExtrn & (CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RCX)));
    16141     /*Log(("hmR0VmxIsMesaDrvGp: rax=%RX64 rdx=%RX64\n", pCtx->rax, pCtx->rdx));*/
    16142     if (pCtx->rax != UINT32_C(0x564d5868))
    16143         return false;
    16144     if (pCtx->dx != UINT32_C(0x5658))
    16145         return false;
    16146 
    16147     /* Flat ring-3 CS. */
    16148     AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_CS);
    16149     Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_CS));
    16150     /*Log(("hmR0VmxIsMesaDrvGp: cs.Attr.n.u2Dpl=%d base=%Rx64\n", pCtx->cs.Attr.n.u2Dpl, pCtx->cs.u64Base));*/
    16151     if (pCtx->cs.Attr.n.u2Dpl != 3)
    16152         return false;
    16153     if (pCtx->cs.u64Base != 0)
    16154         return false;
    16155 
    16156     /* Check opcode. */
    16157     AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_RIP);
    16158     Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_RIP));
    16159     int rc = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, pCtx->rip, sizeof(abInstr));
    16160     /*Log(("hmR0VmxIsMesaDrvGp: PGMPhysSimpleReadGCPtr -> %Rrc %#x\n", rc, abInstr[0]));*/
    16161     if (RT_FAILURE(rc))
    16162         return false;
    16163     if (abInstr[0] != 0xed)
    16164         return false;
    16165 
    16166     return true;
    16167 }
    16168 
    16169 
    16170 /**
    16171  * VM-exit exception handler for \#GP (General-protection exception).
    16172  *
    16173  * @remarks Requires pVmxTransient->uExitIntInfo to be up-to-date.
    16174  */
    16175 static VBOXSTRICTRC hmR0VmxExitXcptGP(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    16176 {
    16177     HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    16178     STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestGP);
    16179 
    16180     PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    16181     PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    16182     if (pVmcsInfo->RealMode.fRealOnV86Active)
    16183     { /* likely */ }
    16184     else
    16185     {
    16186 #ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
    16187         Assert(pVCpu->hm.s.fUsingDebugLoop || pVCpu->hm.s.fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
    16188 #endif
    16189         /* If the guest is not in real-mode or we have unrestricted guest execution support, reflect #GP to the guest. */
    16190         int rc  = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
    16191         rc     |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
    16192         rc     |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    16193         rc     |= hmR0VmxImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
    16194         AssertRCReturn(rc, rc);
    16195         Log4Func(("Gst: cs:rip=%#04x:%#RX64 ErrorCode=%#x cr0=%#RX64 cpl=%u tr=%#04x\n", pCtx->cs.Sel, pCtx->rip,
    16196                   pVmxTransient->uExitIntErrorCode, pCtx->cr0, CPUMGetGuestCPL(pVCpu), pCtx->tr.Sel));
    16197 
    16198         if (    pVmxTransient->fIsNestedGuest
    16199             || !pVCpu->hm.s.fTrapXcptGpForLovelyMesaDrv
    16200             || !hmR0VmxIsMesaDrvGp(pVCpu, pVmxTransient, pCtx))
    16201             hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
    16202                                    pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
    16203         else
    16204             rc = hmR0VmxHandleMesaDrvGp(pVCpu, pVmxTransient, pCtx);
    16205         return rc;
    16206     }
    16207 
    16208     Assert(CPUMIsGuestInRealModeEx(pCtx));
    16209     Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUnrestrictedGuest);
    16210     Assert(!pVmxTransient->fIsNestedGuest);
    16211 
    16212     int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
    16213     AssertRCReturn(rc, rc);
    16214 
    16215     VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
    16216     if (rcStrict == VINF_SUCCESS)
    16217     {
    16218         if (!CPUMIsGuestInRealModeEx(pCtx))
    16219         {
    16220             /*
    16221              * The guest is no longer in real-mode, check if we can continue executing the
    16222              * guest using hardware-assisted VMX. Otherwise, fall back to emulation.
    16223              */
    16224             pVmcsInfo->RealMode.fRealOnV86Active = false;
    16225             if (HMCanExecuteVmxGuest(pVCpu, pCtx))
    16226             {
    16227                 Log4Func(("Mode changed but guest still suitable for executing using hardware-assisted VMX\n"));
    16228                 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
    16229             }
    16230             else
    16231             {
    16232                 Log4Func(("Mode changed -> VINF_EM_RESCHEDULE\n"));
    16233                 rcStrict = VINF_EM_RESCHEDULE;
    16234             }
    16235         }
    16236         else
    16237             ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
    16238     }
    16239     else if (rcStrict == VINF_IEM_RAISED_XCPT)
    16240     {
    16241         rcStrict = VINF_SUCCESS;
    16242         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
    16243     }
    16244     return VBOXSTRICTRC_VAL(rcStrict);
    16245 }
    16246 
    16247 
    16248 /**
    16249  * VM-exit exception handler wrapper for generic exceptions. Simply re-injects
    16250  * the exception reported in the VMX transient structure back into the VM.
    16251  *
    16252  * @remarks Requires uExitIntInfo in the VMX transient structure to be
    16253  *          up-to-date.
    16254  */
    16255 static VBOXSTRICTRC hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    16256 {
    16257     HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    16258 #ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
    16259     PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
    16260     AssertMsg(pVCpu->hm.s.fUsingDebugLoop || pVmcsInfo->RealMode.fRealOnV86Active || pVmxTransient->fIsNestedGuest,
    16261               ("uVector=%#x u32XcptBitmap=%#X32\n",
    16262                VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo), pVmcsInfo->u32XcptBitmap));
    16263     NOREF(pVmcsInfo);
    16264 #endif
    16265 
    16266     /*
    16267      * Re-inject the exception into the guest. This cannot be a double-fault condition which
    16268      * would have been handled while checking exits due to event delivery.
    16269      */
    16270     int rc = hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
    16271     rc    |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    16272     AssertRCReturn(rc, rc);
    16273     Assert(ASMAtomicUoReadU32(&pVmxTransient->fVmcsFieldsRead) & HMVMX_READ_EXIT_INTERRUPTION_INFO);
    16274 
    16275 #ifdef DEBUG_ramshankar
    16276     rc |= hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
    16277     Log(("hmR0VmxExitXcptGeneric: Reinjecting Xcpt. uVector=%#x cs:rip=%#04x:%#RX64\n",
    16278          VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo), pCtx->cs.Sel, pCtx->rip));
    16279 #endif
    16280 
    16281     hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbInstr,
    16282                            pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
    16283     return VINF_SUCCESS;
    16284 }
    16285 
    16286 
    16287 /**
    16288  * VM-exit exception handler for \#PF (Page-fault exception).
    16289  */
    16290 static VBOXSTRICTRC hmR0VmxExitXcptPF(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
    16291 {
    16292     HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
    16293     PVM pVM = pVCpu->CTX_SUFF(pVM);
    16294     int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
    16295     rc    |= hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
    16296     rc    |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
    16297     AssertRCReturn(rc, rc);
    16298 
    16299     if (!pVM->hm.s.fNestedPaging)
    16300     { /* likely */ }
    16301     else
    16302     {
    16303 #if !defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) && !defined(HMVMX_ALWAYS_TRAP_PF)
    16304         Assert(pVmxTransient->fIsNestedGuest || pVCpu->hm.s.fUsingDebugLoop);
    16305 #endif
    16306         pVCpu->hm.s.Event.fPending = false;                  /* In case it's a contributory or vectoring #PF. */
    16307         if (RT_LIKELY(!pVmxTransient->fVectoringDoublePF))
    16308         {
    16309             hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
    16310                                    pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual);
    16311         }
    16312         else
    16313         {
    16314             /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
    16315             hmR0VmxSetPendingXcptDF(pVCpu);
    16316             Log4Func(("Pending #DF due to vectoring #PF w/ NestedPaging\n"));
    16317         }
    16318         STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
    16319         return rc;
    16320     }
    16321 
    16322     /* If it's a vectoring #PF, emulate injecting the original event injection as PGMTrap0eHandler() is incapable
    16323        of differentiating between instruction emulation and event injection that caused a #PF. See @bugref{6607}. */
    16324     if (pVmxTransient->fVectoringPF)
    16325     {
    16326         Assert(pVCpu->hm.s.Event.fPending);
    16327         return VINF_EM_RAW_INJECT_TRPM_EVENT;
    16328     }
    16329 
    16330     PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    16331     rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
    16332     AssertRCReturn(rc, rc);
    16333 
    16334     Log4Func(("#PF: cr2=%#RX64 cs:rip=%#04x:%#RX64 uErrCode %#RX32 cr3=%#RX64\n", pVmxTransient->uExitQual, pCtx->cs.Sel,
    16335               pCtx->rip, pVmxTransient->uExitIntErrorCode, pCtx->cr3));
    16336 
    16337     TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQual, (RTGCUINT)pVmxTransient->uExitIntErrorCode);
    16338     rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, CPUMCTX2CORE(pCtx), (RTGCPTR)pVmxTransient->uExitQual);
    16339 
    16340     Log4Func(("#PF: rc=%Rrc\n", rc));
    16341     if (rc == VINF_SUCCESS)
    16342     {
    16343         /*
    16344          * This is typically a shadow page table sync or a MMIO instruction. But we may have
    16345          * emulated something like LTR or a far jump. Any part of the CPU context may have changed.
    16346          */
    16347         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
    16348         TRPMResetTrap(pVCpu);
    16349         STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF);
    16350         return rc;
    16351     }
    16352 
    16353     if (rc == VINF_EM_RAW_GUEST_TRAP)
    16354     {
    16355         if (!pVmxTransient->fVectoringDoublePF)
    16356         {
    16357             /* It's a guest page fault and needs to be reflected to the guest. */
    16358             uint32_t uGstErrorCode = TRPMGetErrorCode(pVCpu);
    16359             TRPMResetTrap(pVCpu);
    16360             pVCpu->hm.s.Event.fPending = false;                 /* In case it's a contributory #PF. */
    16361             hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
    16362                                    uGstErrorCode, pVmxTransient->uExitQual);
    16363         }
    16364         else
    16365         {
    16366             /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
    16367             TRPMResetTrap(pVCpu);
    16368             pVCpu->hm.s.Event.fPending = false;     /* Clear pending #PF to replace it with #DF. */
    16369             hmR0VmxSetPendingXcptDF(pVCpu);
    16370             Log4Func(("#PF: Pending #DF due to vectoring #PF\n"));
    16371         }
    16372 
    16373         STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
    16374         return VINF_SUCCESS;
    16375     }
    16376 
    16377     TRPMResetTrap(pVCpu);
    16378     STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPFEM);
    16379     return rc;
    16380 }
    16381 
    16382 
    16383 /**
    16384  * VM-exit helper for LMSW.
    16385  */
    16386 static VBOXSTRICTRC hmR0VmxExitLmsw(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint16_t uMsw, RTGCPTR GCPtrEffDst)
    16387 {
    16388     int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
    16389     AssertRCReturn(rc, rc);
    16390 
    16391     VBOXSTRICTRC rcStrict = IEMExecDecodedLmsw(pVCpu, cbInstr, uMsw, GCPtrEffDst);
    16392     AssertMsg(   rcStrict == VINF_SUCCESS
    16393               || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
    16394 
    16395     ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
    16396     if (rcStrict == VINF_IEM_RAISED_XCPT)
    16397     {
    16398         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
    16399         rcStrict = VINF_SUCCESS;
    16400     }
    16401 
    16402     STAM_COUNTER_INC(&pVCpu->hm.s.StatExitLmsw);
    16403     Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
    16404     return rcStrict;
    16405 }
    16406 
    16407 
    16408 /**
    16409  * VM-exit helper for CLTS.
    16410  */
    16411 static VBOXSTRICTRC hmR0VmxExitClts(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr)
    16412 {
    16413     int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
    16414     AssertRCReturn(rc, rc);
    16415 
    16416     VBOXSTRICTRC rcStrict = IEMExecDecodedClts(pVCpu, cbInstr);
    16417     AssertMsg(   rcStrict == VINF_SUCCESS
    16418               || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
    16419 
    16420     ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
    16421     if (rcStrict == VINF_IEM_RAISED_XCPT)
    16422     {
    16423         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
    16424         rcStrict = VINF_SUCCESS;
    16425     }
    16426 
    16427     STAM_COUNTER_INC(&pVCpu->hm.s.StatExitClts);
    16428     Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
    16429     return rcStrict;
    16430 }
    16431 
    16432 
    16433 /**
    16434  * VM-exit helper for MOV from CRx (CRx read).
    16435  */
    16436 static VBOXSTRICTRC hmR0VmxExitMovFromCrX(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
    16437 {
    16438     Assert(iCrReg < 16);
    16439     Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
    16440 
    16441     int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
    16442     AssertRCReturn(rc, rc);
    16443 
    16444     VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxRead(pVCpu, cbInstr, iGReg, iCrReg);
    16445     AssertMsg(   rcStrict == VINF_SUCCESS
    16446               || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
    16447 
    16448     if (iGReg == X86_GREG_xSP)
    16449         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_RSP);
    16450     else
    16451         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
    16452 #ifdef VBOX_WITH_STATISTICS
    16453     switch (iCrReg)
    16454     {
    16455         case 0: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR0Read); break;
    16456         case 2: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR2Read); break;
    16457         case 3: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR3Read); break;
    16458         case 4: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR4Read); break;
    16459         case 8: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR8Read); break;
    16460     }
    16461 #endif
    16462     Log4Func(("CR%d Read access rcStrict=%Rrc\n", iCrReg, VBOXSTRICTRC_VAL(rcStrict)));
    16463     return rcStrict;
    16464 }
    16465 
    16466 
    16467 /**
    16468  * VM-exit helper for MOV to CRx (CRx write).
    16469  */
    16470 static VBOXSTRICTRC hmR0VmxExitMovToCrX(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
    16471 {
    16472     int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
    16473     AssertRCReturn(rc, rc);
    16474 
    16475     VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, cbInstr, iCrReg, iGReg);
    16476     AssertMsg(   rcStrict == VINF_SUCCESS
    16477               || rcStrict == VINF_IEM_RAISED_XCPT
    16478               || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
    16479 
    16480     switch (iCrReg)
    16481     {
    16482         case 0:
    16483         {
    16484             ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
    16485             STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR0Write);
    16486             Log4Func(("CR0 write. rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr0));
    16487             break;
    16488         }
    16489 
    16490         case 2:
    16491         {
    16492             STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR2Write);
    16493             /* Nothing to do here, CR2 it's not part of the VMCS. */
    16494             break;
    16495         }
    16496 
    16497         case 3:
    16498         {
    16499             ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR3);
    16500             STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR3Write);
    16501             Log4Func(("CR3 write. rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr3));
    16502             break;
    16503         }
    16504 
    16505         case 4:
    16506         {
    16507             ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR4);
    16508             STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR4Write);
    16509             Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict),
    16510                       pVCpu->cpum.GstCtx.cr4, pVCpu->hm.s.fLoadSaveGuestXcr0));
    16511             break;
    16512         }
    16513 
    16514         case 8:
    16515         {
    16516             ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged,
    16517                              HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_APIC_TPR);
    16518             STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR8Write);
    16519             break;
    16520         }
    16521 
    16522         default:
    16523             AssertMsgFailed(("Invalid CRx register %#x\n", iCrReg));
    16524             break;
    16525     }
    16526 
    16527     if (rcStrict == VINF_IEM_RAISED_XCPT)
    16528     {
    16529         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
    16530         rcStrict = VINF_SUCCESS;
    16531     }
    16532     return rcStrict;
    16533 }
    16534 
    16535 
    16536 /**
    16537  * VM-exit helper for handling host NMIs.
    16538  */
    16539 static VBOXSTRICTRC hmR0VmxExitHostNmi(PVMCPU pVCpu)
    16540 {
    16541     VMXDispatchHostNmi();
    16542 
    16543     STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatExitHostNmiInGC);
    16544     return VINF_SUCCESS;
    16545 }
    16546 
    1654716502
    1654816503#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    16549 /** @name VMX instruction handlers.
    16550  * @{
    16551  */
    16552 /* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
    16553 /* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VMX instructions VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
    16554 /* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
    16555 
    1655616504/**
    1655716505 * VM-exit handler for VMCLEAR (VMX_EXIT_VMCLEAR). Unconditional VM-exit.
     
    1690716855    return rcStrict;
    1690816856}
    16909 
     16857#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
    1691016858/** @} */
    1691116859
     16860
     16861#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    1691216862/** @name Nested-guest VM-exit handlers.
    1691316863 * @{
     
    1781717767
    1781817768/** @} */
    17819 
    1782017769#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
    1782117770
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette