VirtualBox

Changeset 53176 in vbox


Ignore:
Timestamp:
Nov 2, 2014 12:34:02 AM (10 years ago)
Author:
vboxsync
Message:

HMVMXR0.cpp: Fixed bug in hmR0VmxInjectEventVmcs where cs.ValidSel wasn't updated in the real-mode case. Fixed a few single stepping issues. Some cleanups like CS:RIP formatting, unnecessary {} and else after return.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r53089 r53176  
    355355# define HMVMX_EXIT_DECL  static DECLCALLBACK(int)
    356356#endif
     357DECLINLINE(VBOXSTRICTRC)  hmR0VmxHandleExitStep(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient,
     358                                                uint32_t uExitReason, uint16_t uCsStart, uint64_t uRipStart);
    357359
    358360/** @name VM-exit handlers.
     
    77407742    if (   uVector == X86_XCPT_BP
    77417743        || uVector == X86_XCPT_OF)
    7742     {
    77437744        u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
    7744     }
    77457745    else
    77467746        u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
     
    79117911                pMixedCtx->rip         = IdtEntry.offSel;
    79127912                pMixedCtx->cs.Sel      = IdtEntry.uSel;
     7913                pMixedCtx->cs.ValidSel = IdtEntry.uSel;
    79137914                pMixedCtx->cs.u64Base  = IdtEntry.uSel << cbIdtEntry;
    79147915                if (   uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT
    79157916                    && uVector  == X86_XCPT_PF)
    7916                 {
    79177917                    pMixedCtx->cr2 = GCPtrFaultAddress;
    7918                 }
    79197918
    79207919                /* If any other guest-state bits are changed here, make sure to update
     
    79427941            return rc;
    79437942        }
    7944         else
    7945         {
    7946             /*
    7947              * For unrestricted execution enabled CPUs running real-mode guests, we must not set the deliver-error-code bit.
    7948              * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
    7949              */
    7950             u32IntInfo &= ~VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
    7951         }
     7943
     7944        /*
     7945         * For unrestricted execution enabled CPUs running real-mode guests, we must not set the deliver-error-code bit.
     7946         * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
     7947         */
     7948        u32IntInfo &= ~VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
    79527949    }
    79537950
     
    79657962    if (   VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT
    79667963        && uVector == X86_XCPT_PF)
    7967     {
    79687964        pMixedCtx->cr2 = GCPtrFaultAddress;
    7969     }
    79707965
    79717966    Log4(("Injecting vcpu[%RU32] u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x pMixedCtx->uCR2=%#RX64\n", pVCpu->idCpu,
     
    84158410 *                          before using them.
    84168411 * @param   pVmxTransient   Pointer to the VMX transient structure.
    8417  */
    8418 static int hmR0VmxPreRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
     8412 * @param   fStepping       Set if called from hmR0VmxRunGuestCodeStep, makes us
     8413 *                          ignore some of the reasons for returning to ring-3.
     8414 */
     8415static int hmR0VmxPreRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, bool fStepping)
    84198416{
    84208417    Assert(VMMRZCallRing3IsEnabled(pVCpu));
     
    84958492     */
    84968493    pVmxTransient->uEflags = ASMIntDisableFlags();
    8497     if (   VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC)
    8498         || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
     8494    if (  (   VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC)
     8495           || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
     8496        && (   !fStepping /* Optimized for the non-stepping case, of course. */
     8497            || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK & ~(VMCPU_FF_TIMER | VMCPU_FF_PDM_CRITSECT))) )
    84998498    {
    85008499        hmR0VmxClearEventVmcs(pVCpu);
     
    88028801           to ring-3.  This bugger disables interrupts on VINF_SUCCESS! */
    88038802        STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
    8804         rc = hmR0VmxPreRunGuest(pVM, pVCpu, pCtx, &VmxTransient);
     8803        rc = hmR0VmxPreRunGuest(pVM, pVCpu, pCtx, &VmxTransient, false /*fStepping*/);
    88058804        if (rc != VINF_SUCCESS)
    88068805            break;
     
    88638862    VMXTRANSIENT VmxTransient;
    88648863    VmxTransient.fUpdateTscOffsettingAndPreemptTimer = true;
    8865     int          rc        = VERR_INTERNAL_ERROR_5;
     8864    VBOXSTRICTRC rcStrict  = VERR_INTERNAL_ERROR_5;
    88668865    uint32_t     cLoops    = 0;
    88678866    uint16_t     uCsStart  = pCtx->cs.Sel;
     
    88768875           to ring-3.  This bugger disables interrupts on VINF_SUCCESS! */
    88778876        STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
    8878         rc = hmR0VmxPreRunGuest(pVM, pVCpu, pCtx, &VmxTransient);
    8879         if (rc != VINF_SUCCESS)
     8877        rcStrict = hmR0VmxPreRunGuest(pVM, pVCpu, pCtx, &VmxTransient, true /*fStepping*/);
     8878        if (rcStrict != VINF_SUCCESS)
    88808879            break;
    88818880
    88828881        hmR0VmxPreRunGuestCommitted(pVM, pVCpu, pCtx, &VmxTransient);
    8883         rc = hmR0VmxRunGuest(pVM, pVCpu, pCtx);
     8882        rcStrict = hmR0VmxRunGuest(pVM, pVCpu, pCtx);
    88848883        /* The guest-CPU context is now outdated, 'pCtx' is to be treated as 'pMixedCtx' from this point on!!! */
    88858884
    88868885        /* Restore any residual host-state and save any bits shared between host
    88878886           and guest into the guest-CPU state.  Re-enables interrupts! */
    8888         hmR0VmxPostRunGuest(pVM, pVCpu, pCtx, &VmxTransient, rc);
     8887        hmR0VmxPostRunGuest(pVM, pVCpu, pCtx, &VmxTransient, VBOXSTRICTRC_TODO(rcStrict));
    88898888
    88908889        /* Check for errors with running the VM (VMLAUNCH/VMRESUME). */
    8891         if (RT_UNLIKELY(rc != VINF_SUCCESS))
     8890        if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
    88928891        {
    88938892            STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit1, x);
    8894             hmR0VmxReportWorldSwitchError(pVM, pVCpu, rc, pCtx, &VmxTransient);
    8895             return rc;
    8896         }
    8897 
    8898         /* Handle the VM-exit. */
     8893            hmR0VmxReportWorldSwitchError(pVM, pVCpu, VBOXSTRICTRC_TODO(rcStrict), pCtx, &VmxTransient);
     8894            return VBOXSTRICTRC_TODO(rcStrict);
     8895        }
     8896
     8897        /* Handle the VM-exit - we quit earlier on certain exits, see hmR0VmxHandleExitStep. */
    88998898        AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason));
    89008899        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitAll);
     
    89028901        STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x);
    89038902        HMVMX_START_EXIT_DISPATCH_PROF();
    8904 #ifdef HMVMX_USE_FUNCTION_TABLE
    8905         rc = g_apfnVMExitHandlers[VmxTransient.uExitReason](pVCpu, pCtx, &VmxTransient);
    8906 #else
    8907         rc = hmR0VmxHandleExit(pVCpu, pCtx, &VmxTransient, VmxTransient.uExitReason);
    8908 #endif
     8903        rcStrict = hmR0VmxHandleExitStep(pVCpu, pCtx, &VmxTransient, VmxTransient.uExitReason, uCsStart, uRipStart);
    89098904        STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x);
    8910         if (rc != VINF_SUCCESS)
     8905        if (rcStrict != VINF_SUCCESS)
    89118906            break;
    8912         else if (cLoops > pVM->hm.s.cMaxResumeLoops)
     8907        if (cLoops > pVM->hm.s.cMaxResumeLoops)
    89138908        {
    89148909            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMaxResume);
    8915             rc = VINF_EM_RAW_INTERRUPT;
     8910            rcStrict = VINF_EM_RAW_INTERRUPT;
    89168911            break;
    89178912        }
     
    89278922            || pCtx->cs.Sel != uCsStart)
    89288923        {
    8929             rc = VINF_EM_DBG_STEPPED;
     8924            rcStrict = VINF_EM_DBG_STEPPED;
    89308925            break;
    89318926        }
     
    89488943
    89498944    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
    8950     return rc;
     8945    return VBOXSTRICTRC_TODO(rcStrict);
    89518946}
    89528947
     
    90659060    return rc;
    90669061}
    9067 #endif
     9062#endif /* !HMVMX_USE_FUNCTION_TABLE */
     9063
     9064
     9065/**
     9066 * Single stepping exit filtering.
     9067 *
     9068 * This is preprocessing the exits and deciding whether we've gotten far enough
     9069 * to return VINF_EM_DBG_STEPPED already.  If not, normal exit handling is
     9070 * performed.
     9071 *
     9072 * @returns Strict VBox status code.
     9073 * @param   pVCpu           The virtual CPU of the calling EMT.
     9074 * @param   pMixedCtx       Pointer to the guest-CPU context. The data may be
     9075 *                          out-of-sync. Make sure to update the required
     9076 *                          fields before using them.
     9077 * @param   pVmxTransient   Pointer to the VMX-transient structure.
     9078 * @param   uExitReason     The exit reason.
     9079 */
     9080DECLINLINE(VBOXSTRICTRC) hmR0VmxHandleExitStep(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient,
     9081                                               uint32_t uExitReason, uint16_t uCsStart, uint64_t uRipStart)
     9082{
     9083    switch (uExitReason)
     9084    {
     9085        case VMX_EXIT_XCPT_OR_NMI:
     9086        {
     9087            /* Check for NMI. */
     9088            int rc2 = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
     9089            AssertRCReturn(rc2, rc2);
     9090            uint32_t uIntType = VMX_EXIT_INTERRUPTION_INFO_TYPE(pVmxTransient->uExitIntInfo);
     9091            if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)
     9092                return hmR0VmxExitXcptOrNmi(pVCpu, pMixedCtx, pVmxTransient);
     9093            /* fall thru */
     9094        }
     9095
     9096        case VMX_EXIT_EPT_MISCONFIG:
     9097        case VMX_EXIT_TRIPLE_FAULT:
     9098        case VMX_EXIT_APIC_ACCESS:
     9099        case VMX_EXIT_TPR_BELOW_THRESHOLD:
     9100        case VMX_EXIT_TASK_SWITCH:
     9101
     9102        /* Instruction specfic exits: */
     9103        case VMX_EXIT_IO_INSTR:
     9104        case VMX_EXIT_CPUID:
     9105        case VMX_EXIT_RDTSC:
     9106        case VMX_EXIT_RDTSCP:
     9107        case VMX_EXIT_MOV_CRX:
     9108        case VMX_EXIT_MWAIT:
     9109        case VMX_EXIT_MONITOR:
     9110        case VMX_EXIT_RDMSR:
     9111        case VMX_EXIT_WRMSR:
     9112        case VMX_EXIT_MOV_DRX:
     9113        case VMX_EXIT_HLT:
     9114        case VMX_EXIT_INVD:
     9115        case VMX_EXIT_INVLPG:
     9116        case VMX_EXIT_RSM:
     9117        case VMX_EXIT_PAUSE:
     9118        case VMX_EXIT_XDTR_ACCESS:
     9119        case VMX_EXIT_TR_ACCESS:
     9120        case VMX_EXIT_WBINVD:
     9121        case VMX_EXIT_XSETBV:
     9122        case VMX_EXIT_RDRAND:
     9123        case VMX_EXIT_INVPCID:
     9124        case VMX_EXIT_GETSEC:
     9125        case VMX_EXIT_RDPMC:
     9126        case VMX_EXIT_VMCALL:
     9127        case VMX_EXIT_VMCLEAR:
     9128        case VMX_EXIT_VMLAUNCH:
     9129        case VMX_EXIT_VMPTRLD:
     9130        case VMX_EXIT_VMPTRST:
     9131        case VMX_EXIT_VMREAD:
     9132        case VMX_EXIT_VMRESUME:
     9133        case VMX_EXIT_VMWRITE:
     9134        case VMX_EXIT_VMXOFF:
     9135        case VMX_EXIT_VMXON:
     9136        case VMX_EXIT_INVEPT:
     9137        case VMX_EXIT_INVVPID:
     9138        case VMX_EXIT_VMFUNC:
     9139        {
     9140            int rc2 = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
     9141            rc2    |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
     9142            AssertRCReturn(rc2, rc2);
     9143            if (   pMixedCtx->rip    != uRipStart
     9144                || pMixedCtx->cs.Sel != uCsStart)
     9145                return VINF_EM_DBG_STEPPED;
     9146            break;
     9147        }
     9148    }
     9149
     9150    /*
     9151     * Normal processing.
     9152     */
     9153#ifdef HMVMX_USE_FUNCTION_TABLE
     9154    return g_apfnVMExitHandlers[uExitReason](pVCpu, pMixedCtx, pVmxTransient);
     9155#else
     9156    return hmR0VmxHandleExit(pVCpu, pMixedCtx, pVmxTransient, uExitReason);
     9157#endif
     9158}
     9159
    90689160
    90699161#ifdef DEBUG
     
    1091511007         * interpreting the instruction.
    1091611008         */
    10917         Log4(("CS:RIP=%04x:%#RX64 %#06x/%u %c str\n", pMixedCtx->cs.Sel, pMixedCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
     11009        Log4(("CS:RIP=%04x:%08RX64 %#06x/%u %c str\n", pMixedCtx->cs.Sel, pMixedCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
    1091811010        AssertReturn(pMixedCtx->dx == uIOPort, VERR_VMX_IPE_2);
    1091911011        if (MSR_IA32_VMX_BASIC_INFO_VMCS_INS_OUTS(pVM->hm.s.vmx.Msrs.u64BasicInfo))
     
    1098311075         * IN/OUT - I/O instruction.
    1098411076         */
    10985         Log4(("CS:RIP=%04x:%#RX64 %#06x/%u %c\n", pMixedCtx->cs.Sel, pMixedCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
     11077        Log4(("CS:RIP=%04x:%08RX64 %#06x/%u %c\n", pMixedCtx->cs.Sel, pMixedCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
    1098611078        uint32_t const uAndVal = s_aIOOpAnd[uIOWidth];
    1098711079        Assert(!VMX_EXIT_QUALIFICATION_IO_IS_REP(pVmxTransient->uExitQualification));
     
    1130611398    rc |= hmR0VmxSaveGuestDR7(pVCpu, pMixedCtx);
    1130711399    AssertRCReturn(rc, rc);
    11308     Log4(("CS:RIP=%04x:%#RX64\n", pMixedCtx->cs.Sel, pMixedCtx->rip));
     11400    Log4(("CS:RIP=%04x:%08RX64\n", pMixedCtx->cs.Sel, pMixedCtx->rip));
    1130911401
    1131011402    PVM pVM = pVCpu->CTX_SUFF(pVM);
     
    1143311525    TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode);
    1143411526
    11435     Log4(("EPT violation %#x at %#RX64 ErrorCode %#x CS:EIP=%04x:%#RX64\n", pVmxTransient->uExitQualification, GCPhys,
    11436          uErrorCode, pMixedCtx->cs.Sel, pMixedCtx->rip));
     11527    Log4(("EPT violation %#x at %#RX64 ErrorCode %#x CS:EIP=%04x:%08RX64\n", pVmxTransient->uExitQualification, GCPhys,
     11528          uErrorCode, pMixedCtx->cs.Sel, pMixedCtx->rip));
    1143711529
    1143811530    /* Handle the pagefault trap for the nested shadow table. */
     
    1168811780        rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
    1168911781        AssertRCReturn(rc, rc);
    11690         Log4(("#GP Gst: CS:RIP %04x:%#RX64 ErrorCode=%#x CR0=%#RX64 CPL=%u TR=%#04x\n", pMixedCtx->cs.Sel, pMixedCtx->rip,
    11691              pVmxTransient->uExitIntErrorCode, pMixedCtx->cr0, CPUMGetGuestCPL(pVCpu), pMixedCtx->tr.Sel));
     11782        Log4(("#GP Gst: CS:RIP %04x:%08RX64 ErrorCode=%#x CR0=%#RX64 CPL=%u TR=%#04x\n", pMixedCtx->cs.Sel, pMixedCtx->rip,
     11783              pVmxTransient->uExitIntErrorCode, pMixedCtx->cr0, CPUMGetGuestCPL(pVCpu), pMixedCtx->tr.Sel));
    1169211784        hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
    1169311785                               pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
     
    1171611808        rc = VINF_SUCCESS;
    1171711809        Assert(cbOp == pDis->cbInstr);
    11718         Log4(("#GP Disas OpCode=%u CS:EIP %04x:%#RX64\n", pDis->pCurInstr->uOpcode, pMixedCtx->cs.Sel, pMixedCtx->rip));
     11810        Log4(("#GP Disas OpCode=%u CS:EIP %04x:%04RX64\n", pDis->pCurInstr->uOpcode, pMixedCtx->cs.Sel, pMixedCtx->rip));
    1171911811        switch (pDis->pCurInstr->uOpcode)
    1172011812        {
     
    1175211844            case OP_POPF:
    1175311845            {
    11754                 Log4(("POPF CS:RIP %04x:%#RX64\n", pMixedCtx->cs.Sel, pMixedCtx->rip));
     11846                Log4(("POPF CS:RIP %04x:%04RX64\n", pMixedCtx->cs.Sel, pMixedCtx->rip));
    1175511847                uint32_t cbParm;
    1175611848                uint32_t uMask;
     
    1179111883                pMixedCtx->rip              += pDis->cbInstr;
    1179211884                HMCPU_CF_SET(pVCpu,   HM_CHANGED_GUEST_RIP
    11793                                       | HM_CHANGED_GUEST_RSP
    11794                                       | HM_CHANGED_GUEST_RFLAGS);
     11885                                    | HM_CHANGED_GUEST_RSP
     11886                                    | HM_CHANGED_GUEST_RFLAGS);
    1179511887                /* Generate a pending-debug exception when stepping over POPF regardless of how POPF modifies EFLAGS.TF. */
    1179611888                if (fStepping)
     
    1188311975                if (fStepping)
    1188411976                    hmR0VmxSetPendingDebugXcpt(pVCpu, pMixedCtx);
    11885                 Log4(("IRET %#RX32 to %04x:%x\n", GCPtrStack, pMixedCtx->cs.Sel, pMixedCtx->ip));
     11977                Log4(("IRET %#RX32 to %04x:%04x\n", GCPtrStack, pMixedCtx->cs.Sel, pMixedCtx->ip));
    1188611978                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIret);
    1188711979                break;
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette