VirtualBox

Changeset 47736 in vbox for trunk/src/VBox/VMM/VMMR0


Ignore:
Timestamp:
Aug 14, 2013 7:49:47 PM (11 years ago)
Author:
vboxsync
Message:

HNVMXR0.cpp: Separate run-loop for single stepping to catch exceptions and such without harming performance of the normal execution (requires RIP and CS to be saved). Simplified the UNUSABLE handling in hmR0VmxReadSegmentReg to just make sure P and a few irrelevant other bits are cleared. Always clearing the G would cause the limit to change on reload. If having bits set here is causing trouble, we'll fix the other code making the trouble, not work around it here.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r47719 r47736  
    55055505
    55065506    /*
    5507      * If VT-x marks the segment as unusable, the rest of the attributes are undefined with certain exceptions (some bits in
    5508      * CS, SS). Regardless, we have to clear the bits here and only retain the unusable bit because the unusable bit is specific
    5509      * to VT-x, everyone else relies on the attribute being zero and have no clue what the unusable bit is.
     5507     * If VT-x marks the segment as unusable, most other bits remain undefined:
     5508     *    - For CS the L, D and G bits have meaning.
     5509     *    - For SS the DPL have meaning (it -is- the CPL for Intel and VBox).
     5510     *    - For the remaining data segments no bits are defined.
     5511     *
     5512     * What should be important for the rest of the VBox code that the P bit is
     5513     * cleared.  Some of the other VBox code recognizes the unusable bit, but
     5514     * AMD-V certainly don't, and REM doesn't really either.  So, to be on the
     5515     * safe side here we'll strip off P and other bits we don't care about.  If
     5516     * any code breaks because attr.u != 0 when Sel < 4, it should be fixed.
    55105517     *
    55115518     * See Intel spec. 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
    5512      *
    5513      * bird: This isn't quite as simple.  VT-x and VBox(!) requires the DPL for SS to be the same as CPL.  In 64-bit mode it
    5514      *       is possible (int/trap/xxx injects does this when switching rings) to load SS with a NULL selector and RPL=CPL.
    5515      *       The Attr.u = X86DESCATTR_UNUSABLE works fine as long as nobody uses ring-1 or ring-2.  VT-x updates the DPL
    5516      *       correctly in the attributes of SS even when the unusable bit is set, we need to preserve the DPL or we get invalid
    5517      *       guest state trouble.  Try bs2-cpu-hidden-regs-1.
    55185519     */
    55195520    if (pSelReg->Attr.u & X86DESCATTR_UNUSABLE)
    55205521    {
    55215522        Assert(idxSel != VMX_VMCS16_GUEST_FIELD_TR);          /* TR is the only selector that can never be unusable. */
    5522         Log4(("hmR0VmxReadSegmentReg: Unusable idxSel=%#x attr=%#x\n", idxSel, pSelReg->Attr.u));
    5523 
    5524         if (idxSel == VMX_VMCS16_GUEST_FIELD_SS)
    5525             pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_DPL;
    5526         else if (idxSel == VMX_VMCS16_GUEST_FIELD_CS)
    5527             pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G;
    5528         else
    5529             pSelReg->Attr.u = X86DESCATTR_UNUSABLE;
     5523#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
     5524        uint32_t fAttr = pSelReg->Attr.u;
     5525#endif
     5526        /* Masking off: X86DESCATTR_P, X86DESCATTR_LIMIT_HIGH, and X86DESCATTR_AVL. The latter two are really irrelevant. */
     5527        pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
     5528                         | X86DESCATTR_DPL | X86DESCATTR_TYPE | X86DESCATTR_DT;
     5529        Log4(("hmR0VmxReadSegmentReg: Unusable idxSel=%#x attr=%#x -> %#x\n", idxSel, fAttr, pSelReg->Attr.u));
     5530#ifdef DEBUG_bird
     5531        AssertMsg(fAttr == pSelReg->Attr.u, ("%#x: %#x != %#x\n", idxSel, fAttr, pSelReg->Attr.u));
     5532#endif
    55305533    }
    55315534    return VINF_SUCCESS;
     
    61556158        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_HOST_CONTEXT | HM_CHANGED_ALL_GUEST;
    61566159
    6157     /* Make sure we've undo the trap flag if we tried to single step something. */
    6158     if (pVCpu->hm.s.fClearTrapFlag)
    6159     {
    6160         pVCpu->hm.s.fClearTrapFlag = false;
    6161         pMixedCtx->eflags.Bits.u1TF = 0;
    6162     }
    6163 
    61646160    STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchExitToR3);
    61656161    VMMRZCallRing3Enable(pVCpu);
     
    63006296            hmR0VmxSetIntWindowExitVmcs(pVCpu);
    63016297    }
    6302     else if (VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)))
     6298    else if (   VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
     6299             && !pVCpu->hm.s.fSingleInstruction)
    63036300    {
    63046301        /* Check if there are guest external interrupts (PIC/APIC) pending and inject them if the guest can receive them. */
     
    72237220
    72247221
    7225 /**
    7226  * Runs the guest code using VT-x.
     7222
     7223/**
     7224 * Runs the guest code using VT-x the normal way.
    72277225 *
    72287226 * @returns VBox status code.
     
    72317229 * @param   pCtx        Pointer to the guest-CPU context.
    72327230 *
     7231 * @note    Mostly the same as hmR0VmxRunGuestCodeStep.
    72337232 * @remarks Called with preemption disabled.
    72347233 */
    7235 VMMR0DECL(int) VMXR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
    7236 {
    7237     Assert(VMMRZCallRing3IsEnabled(pVCpu));
    7238     Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    7239 
     7234static int hmR0VmxRunGuestCodeNormal(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
     7235{
    72407236    VMXTRANSIENT VmxTransient;
    72417237    VmxTransient.fUpdateTscOffsettingAndPreemptTimer = true;
     
    73027298
    73037299    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
     7300    return rc;
     7301}
     7302
     7303
     7304/**
     7305 * Single steps guest code using VT-x.
     7306 *
     7307 * @returns VBox status code.
     7308 * @param   pVM         Pointer to the VM.
     7309 * @param   pVCpu       Pointer to the VMCPU.
     7310 * @param   pCtx        Pointer to the guest-CPU context.
     7311 *
     7312 * @note    Mostly the same as hmR0VmxRunGuestCodeNormal.
     7313 * @remarks Called with preemption disabled.
     7314 */
     7315static int hmR0VmxRunGuestCodeStep(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
     7316{
     7317    VMXTRANSIENT VmxTransient;
     7318    VmxTransient.fUpdateTscOffsettingAndPreemptTimer = true;
     7319    int          rc     = VERR_INTERNAL_ERROR_5;
     7320    uint32_t     cLoops = 0;
     7321    uint16_t     uCsStart  = pCtx->cs.Sel;
     7322    uint64_t     uRipStart = pCtx->rip;
     7323
     7324    for (;; cLoops++)
     7325    {
     7326        Assert(!HMR0SuspendPending());
     7327        AssertMsg(pVCpu->hm.s.idEnteredCpu == RTMpCpuId(),
     7328                  ("Illegal migration! Entered on CPU %u Current %u cLoops=%u\n", (unsigned)pVCpu->hm.s.idEnteredCpu,
     7329                  (unsigned)RTMpCpuId(), cLoops));
     7330
     7331        /* Preparatory work for running guest code, this may return to ring-3 for some last minute updates. */
     7332        STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
     7333        rc = hmR0VmxPreRunGuest(pVM, pVCpu, pCtx, &VmxTransient);
     7334        if (rc != VINF_SUCCESS)
     7335            break;
     7336
     7337        /*
     7338         * No longjmps to ring-3 from this point on!!!
     7339         * Asserts() will still longjmp to ring-3 (but won't return), which is intentional, better than a kernel panic.
     7340         * This also disables flushing of the R0-logger instance (if any).
     7341         */
     7342        VMMRZCallRing3Disable(pVCpu);
     7343        VMMRZCallRing3RemoveNotification(pVCpu);
     7344        hmR0VmxPreRunGuestCommitted(pVM, pVCpu, pCtx, &VmxTransient);
     7345
     7346        rc = hmR0VmxRunGuest(pVM, pVCpu, pCtx);
     7347        /* The guest-CPU context is now outdated, 'pCtx' is to be treated as 'pMixedCtx' from this point on!!! */
     7348
     7349        /*
     7350         * Restore any residual host-state and save any bits shared between host and guest into the guest-CPU state.
     7351         * This will also re-enable longjmps to ring-3 when it has reached a safe point!!!
     7352         */
     7353        hmR0VmxPostRunGuest(pVM, pVCpu, pCtx, &VmxTransient, rc);
     7354        if (RT_UNLIKELY(rc != VINF_SUCCESS))        /* Check for errors with running the VM (VMLAUNCH/VMRESUME). */
     7355        {
     7356            STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit1, x);
     7357            hmR0VmxReportWorldSwitchError(pVM, pVCpu, rc, pCtx, &VmxTransient);
     7358            return rc;
     7359        }
     7360
     7361        /* Handle the VM-exit. */
     7362        AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason));
     7363        STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);
     7364        STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x);
     7365        HMVMX_START_EXIT_DISPATCH_PROF();
     7366#ifdef HMVMX_USE_FUNCTION_TABLE
     7367        rc = g_apfnVMExitHandlers[VmxTransient.uExitReason](pVCpu, pCtx, &VmxTransient);
     7368#else
     7369        rc = hmR0VmxHandleExit(pVCpu, pCtx, &VmxTransient, VmxTransient.uExitReason);
     7370#endif
     7371        STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x);
     7372        if (rc != VINF_SUCCESS)
     7373            break;
     7374        else if (cLoops > pVM->hm.s.cMaxResumeLoops)
     7375        {
     7376            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMaxResume);
     7377            rc = VINF_EM_RAW_INTERRUPT;
     7378            break;
     7379        }
     7380
     7381        /*
     7382         * Did the RIP change, if so, consider it a single step.
     7383         * Otherwise, make sure one of the TFs gets set.
     7384         */
     7385        int rc2 = hmR0VmxLoadGuestRip(pVCpu, pCtx);
     7386        rc2 |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pCtx);
     7387        AssertRCReturn(rc2, rc2);
     7388        if (   pCtx->rip    != uRipStart
     7389            || pCtx->cs.Sel != uCsStart)
     7390        {
     7391            rc = VINF_EM_DBG_STEPPED;
     7392            break;
     7393        }
     7394        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
     7395    }
     7396
     7397    /*
     7398     * Clear the X86_EFL_TF if necessary .
     7399     */
     7400    if (pVCpu->hm.s.fClearTrapFlag)
     7401    {
     7402        int rc2 = hmR0VmxSaveGuestRflags(pVCpu, pCtx);
     7403        AssertRCReturn(rc2, rc2);
     7404        pVCpu->hm.s.fClearTrapFlag = false;
     7405        pCtx->eflags.Bits.u1TF = 0;
     7406    }
     7407/** @todo there seems to be issues with the resume flag when the monitor trap
     7408 *        flag is pending without being used. Seen early in bios init when
     7409 *        accessing APIC page in prot mode. */
     7410
     7411    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
     7412    return rc;
     7413}
     7414
     7415
     7416/**
     7417 * Runs the guest code using VT-x.
     7418 *
     7419 * @returns VBox status code.
     7420 * @param   pVM         Pointer to the VM.
     7421 * @param   pVCpu       Pointer to the VMCPU.
     7422 * @param   pCtx        Pointer to the guest-CPU context.
     7423 *
     7424 * @remarks Called with preemption disabled.
     7425 */
     7426VMMR0DECL(int) VMXR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
     7427{
     7428    Assert(VMMRZCallRing3IsEnabled(pVCpu));
     7429    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     7430
     7431    int rc;
     7432    if (!pVCpu->hm.s.fSingleInstruction && !DBGFIsStepping(pVCpu))
     7433        rc = hmR0VmxRunGuestCodeNormal(pVM, pVCpu, pCtx);
     7434    else
     7435        rc = hmR0VmxRunGuestCodeStep(pVM, pVCpu, pCtx);
     7436
    73047437    if (rc == VERR_EM_INTERPRETER)
    73057438        rc = VINF_EM_RAW_EMULATE_INSTR;
     
    96659798    AssertRCReturn(rc, rc);
    96669799
    9667     /* If we sat the trap flag above, we have to clear it. */
    9668     if (pVCpu->hm.s.fClearTrapFlag)
    9669     {
    9670         pVCpu->hm.s.fClearTrapFlag = false;
    9671         pMixedCtx->eflags.Bits.u1TF = 0;
    9672     }
    9673 
    96749800    /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
    96759801    uint64_t uDR6 = X86_DR6_INIT_VAL;
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette