VirtualBox

Changeset 46580 in vbox for trunk/src/VBox/VMM


Ignore:
Timestamp:
Jun 17, 2013 10:20:31 AM (12 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
86455
Message:

VMM/HMSVMR0, HMVMXR0: AMD-V bits, VT-x minor change.

Location:
trunk/src/VBox/VMM/VMMR0
Files:
2 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r46569 r46580  
    26282628    Assert(pSvmTransient->u64ExitCode <= SVM_EXIT_MAX);
    26292629
    2630     int      rc;
     2630    /*
     2631     * The ordering of the case labels is based on most-frequently-occurring VM-exits for most guests under
     2632     * normal workloads (for some definition of "normal").
     2633     */
    26312634    uint32_t u32ExitCode = pSvmTransient->u64ExitCode;
    26322635    switch (pSvmTransient->u64ExitCode)
    26332636    {
     2637        case SVM_EXIT_NPF:
     2638            return hmR0SvmExitNestedPF(pVCpu, pCtx, pSvmTransient);
     2639
    26342640        case SVM_EXIT_IOIO:
    26352641            return hmR0SvmExitIOInstr(pVCpu, pCtx, pSvmTransient);
    26362642
     2643        case SVM_EXIT_RDTSC:
     2644            return hmR0SvmExitRdtsc(pVCpu, pCtx, pSvmTransient);
     2645
     2646        case SVM_EXIT_RDTSCP:
     2647            return hmR0SvmExitRdtscp(pVCpu, pCtx, pSvmTransient);
     2648
    26372649        case SVM_EXIT_CPUID:
    26382650            return hmR0SvmExitCpuid(pVCpu, pCtx, pSvmTransient);
    26392651
    2640         case SVM_EXIT_RDTSC:
    2641             return hmR0SvmExitRdtsc(pVCpu, pCtx, pSvmTransient);
    2642 
    2643         case SVM_EXIT_RDTSCP:
    2644             return hmR0SvmExitRdtscp(pVCpu, pCtx, pSvmTransient);
    2645 
    26462652        case SVM_EXIT_MONITOR:
    26472653            return hmR0SvmExitMonitor(pVCpu, pCtx, pSvmTransient);
     
    26492655        case SVM_EXIT_MWAIT:
    26502656            return hmR0SvmExitMwait(pVCpu, pCtx, pSvmTransient);
     2657
     2658        case SVM_EXIT_READ_CR0:
     2659        case SVM_EXIT_READ_CR3:
     2660        case SVM_EXIT_READ_CR4:
     2661            return hmR0SvmExitReadCRx(pVCpu, pCtx, pSvmTransient);
    26512662
    26522663        case SVM_EXIT_WRITE_CR0:
     
    26562667            return hmR0SvmExitWriteCRx(pVCpu, pCtx, pSvmTransient);
    26572668
    2658         case SVM_EXIT_READ_CR0:
    2659         case SVM_EXIT_READ_CR3:
    2660         case SVM_EXIT_READ_CR4:
    2661             return hmR0SvmExitReadCRx(pVCpu, pCtx, pSvmTransient);
    2662 
    2663         case SVM_EXIT_MSR:
    2664             return hmR0SvmExitMsr(pVCpu, pCtx, pSvmTransient);
    2665 
    26662669        case SVM_EXIT_INTR:
    26672670        case SVM_EXIT_FERR_FREEZE:
     
    26692672        case SVM_EXIT_INIT:
    26702673            return hmR0SvmExitIntr(pVCpu, pCtx, pSvmTransient);
     2674
     2675        case SVM_EXIT_MSR:
     2676            return hmR0SvmExitMsr(pVCpu, pCtx, pSvmTransient);
    26712677
    26722678        case SVM_EXIT_WBINVD:
     
    27052711            default:
    27062712            {
    2707                 rc = VERR_SVM_UNEXPECTED_EXIT;
    27082713                AssertMsgFailed(("hmR0SvmHandleExit: Unexpected exit code %#x\n", u32ExitCode));
    2709                 break;
     2714                return VERR_SVM_UNEXPECTED_EXIT;
    27102715            }
    27112716        }
    27122717    }
    2713     return rc;
     2718    return VERR_INTERNAL_ERROR_5;   /* Should never happen. */
    27142719}
    27152720
     
    33923397}
    33933398
     3399
     3400/**
     3401 * #VMEXIT handler for Nested Page-faults (SVM_EXIT_NPF). Conditional
     3402 * #VMEXIT.
     3403 */
     3404HMSVM_EXIT_DECL hmR0SvmExitNestedPF(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
     3405{
     3406    HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
     3407    PVM pVM = pVCpu->CTX_SUFF(pVM);
     3408    Assert(pVM->hm.s.fNestedPaging);
     3409
     3410    /* See AMD spec. 15.25.6 "Nested versus Guest Page Faults, Fault Ordering" for VMCB details for #NPF. */
     3411    PSVMVMCB pVmcb           = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
     3412    uint32_t u32ErrCode      = pVmcb->ctrl.u64ExitInfo1;
     3413    RTGCPHYS GCPhysFaultAddr = pVmcb->ctrl.u64ExitInfo2;
     3414
     3415    Log4(("#NPF at CS:RIP=%04x:%#RX64 faultaddr=%RGp errcode=%#x \n", pCtx->cs.Sel, pCtx->rip, GCPhysFaultAddr, u32ErrCode));
     3416
     3417#ifdef VBOX_HM_WITH_GUEST_PATCHING
     3418    /* TPR patching for 32-bit guests, using the reserved bit in the page tables for MMIO regions.  */
     3419    if (   pVM->hm.s.fTRPPatchingAllowed
     3420        && (GCPhysFaultAddr & PAGE_OFFSET_MASK) == 0x80
     3421        && (   !(u32ErrCode & X86_TRAP_PF_P)                                                             /* Not present */
     3422            || (u32ErrCode & (X86_TRAP_PF_P | X86_TRAP_PF_RSVD)) == (X86_TRAP_PF_P | X86_TRAP_PF_RSVD))  /* MMIO page. */
     3423        && !CPUMGetGuestCPL(pVCpu)
     3424        && !CPUMIsGuestInLongModeEx(pCtx)
     3425        && pVM->hm.s.cPatches < RT_ELEMENTS(pVM->hm.s.aPatches))
     3426    {
     3427        RTGCPHYS GCPhysApicBase = pCtx->msrApicBase;
     3428        GCPhysApicBase &= PAGE_BASE_GC_MASK;
     3429
     3430        if (GCPhysFaultAddr == GCPhysApicBase + 0x80)
     3431        {
     3432            /* Only attempt to patch the instruction once. */
     3433            PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
     3434            if (!pPatch)
     3435            {
     3436                rc = VINF_EM_HM_PATCH_TPR_INSTR;
     3437                return rc;
     3438            }
     3439        }
     3440    }
     3441#endif
     3442
     3443    /*
     3444     * Determine the nested paging mode.
     3445     */
     3446    PGMMODE enmNestedPagingMode;
     3447#if HC_ARCH_BITS == 32
     3448    if (CPUMIsGuestInLongModeEx(pCtx))
     3449        enmNestedPagingMode = PGMMODE_AMD64_NX;
     3450    else
     3451#endif
     3452        enmNestedPagingMode = PGMGetHostMode(pVM);
     3453
     3454    /*
     3455     * MMIO optimization using the reserved (RSVD) bit in the guest page tables for MMIO pages.
     3456     */
     3457    int rc;
     3458    Assert((u32ErrCode & (X86_TRAP_PF_RSVD | X86_TRAP_PF_P)) != X86_TRAP_PF_RSVD);
     3459    if ((u32ErrCode & (X86_TRAP_PF_RSVD | X86_TRAP_PF_P)) == (X86_TRAP_PF_RSVD | X86_TRAP_PF_P))
     3460    {
     3461        rc = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, enmNestedPagingMode, CPUMCTX2CORE(pCtx), GCPhysFaultAddr, u32ErrCode);
     3462
     3463        /*
     3464         * If we succeed, resume guest execution.
     3465         * If we fail in interpreting the instruction because we couldn't get the guest physical address
     3466         * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
     3467         * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
     3468         * weird case. See @bugref{6043}.
     3469         */
     3470        if (   rc == VINF_SUCCESS
     3471            || rc == VERR_PAGE_TABLE_NOT_PRESENT
     3472            || rc == VERR_PAGE_NOT_PRESENT)
     3473        {
     3474            /* Successfully handled MMIO operation. */
     3475            pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_SVM_GUEST_APIC_STATE;
     3476            rc = VINF_SUCCESS;
     3477        }
     3478        return rc;
     3479    }
     3480
     3481    TRPMAssertXcptPF(pVCpu, GCPhysFaultAddr, u32ErrCode);
     3482    rc = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, enmNestedPagingMode, u32ErrCode, CPUMCTX2CORE(pCtx), GCPhysFaultAddr);
     3483    TRPMResetTrap(pVCpu);
     3484
     3485    Log2(("#NPF: PGMR0Trap0eHandlerNestedPaging returned %Rrc\n",  rc));
     3486
     3487    /*
     3488     * Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}.
     3489     */
     3490    if (   rc == VINF_SUCCESS
     3491        || rc == VERR_PAGE_TABLE_NOT_PRESENT
     3492        || rc == VERR_PAGE_NOT_PRESENT)
     3493    {
     3494        /* We've successfully synced our shadow page tables. */
     3495        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF);
     3496        rc = VINF_SUCCESS;
     3497    }
     3498
     3499    return rc;
     3500}
     3501
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r46566 r46580  
    85728572        pVCpu->hm.s.fContextUseFlags |=   HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
    85738573                                        | HM_CHANGED_VMX_GUEST_APIC_STATE;
    8574         return VINF_SUCCESS;
     8574        rc = VINF_SUCCESS;
    85758575    }
    85768576    return rc;
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette