VirtualBox

Changeset 74450 in vbox


Ignore:
Timestamp:
Sep 25, 2018 5:02:48 AM (7 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
125299
Message:

VMM/IEM: Nested VMX: bugref:9180 Re-arrange static functions so VM-exit can be called for VM-entry failures.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h

    r74437 r74450  
    24152415
    24162416/**
     2417 * Saves the guest control registers, debug registers and some MSRs are part of
     2418 * VM-exit.
     2419 *
     2420 * @param   pVCpu       The cross context virtual CPU structure.
     2421 */
     2422IEM_STATIC void iemVmxVmexitSaveGuestControlRegsMsrs(PVMCPU pVCpu)
     2423{
     2424    /*
     2425     * Saves the guest control registers, debug registers and some MSRs.
     2426     * See Intel spec. 27.3.1 "Saving Control Registers, Debug Registers and MSRs".
     2427     */
     2428    PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     2429
     2430    /* Save control registers. */
     2431    pVmcs->u64GuestCr0.u = pVCpu->cpum.GstCtx.cr0;
     2432    pVmcs->u64GuestCr3.u = pVCpu->cpum.GstCtx.cr3;
     2433    pVmcs->u64GuestCr4.u = pVCpu->cpum.GstCtx.cr4;
     2434
     2435    /* Save SYSENTER CS, ESP, EIP. */
     2436    pVmcs->u32GuestSysenterCS    = pVCpu->cpum.GstCtx.SysEnter.cs;
     2437    if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
     2438    {
     2439        pVmcs->u64GuestSysenterEsp.u = pVCpu->cpum.GstCtx.SysEnter.esp;
     2440        pVmcs->u64GuestSysenterEip.u = pVCpu->cpum.GstCtx.SysEnter.eip;
     2441    }
     2442    else
     2443    {
     2444        pVmcs->u64GuestSysenterEsp.s.Lo = pVCpu->cpum.GstCtx.SysEnter.esp;
     2445        pVmcs->u64GuestSysenterEip.s.Lo = pVCpu->cpum.GstCtx.SysEnter.eip;
     2446    }
     2447
     2448    /* Save debug registers (DR7 and IA32_DEBUGCTL MSR). */
     2449    if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_SAVE_DEBUG)
     2450    {
     2451        pVmcs->u64GuestDr7.u = pVCpu->cpum.GstCtx.dr[7];
     2452        /** @todo NSTVMX: Support IA32_DEBUGCTL MSR */
     2453    }
     2454
     2455    /* Save PAT MSR. */
     2456    if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_SAVE_PAT_MSR)
     2457        pVmcs->u64GuestPatMsr.u = pVCpu->cpum.GstCtx.msrPAT;
     2458
     2459    /* Save EFER MSR. */
     2460    if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_SAVE_EFER_MSR)
     2461        pVmcs->u64GuestEferMsr.u = pVCpu->cpum.GstCtx.msrEFER;
     2462
     2463    /* We don't support clearing IA32_BNDCFGS MSR yet. */
     2464    Assert(!(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_CLEAR_BNDCFGS_MSR));
     2465
     2466    /* Nothing to do for SMBASE register - We don't support SMM yet. */
     2467}
     2468
     2469
     2470/**
     2471 * Saves the guest force-flags in prepartion of entering the nested-guest.
     2472 *
     2473 * @param   pVCpu       The cross context virtual CPU structure.
     2474 */
     2475IEM_STATIC void iemVmxVmentrySaveForceFlags(PVMCPU pVCpu)
     2476{
     2477    /* Assert that we are not called multiple times during VM-entry. */
     2478    Assert(pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions == 0);
     2479
     2480    /*
     2481     * Preserve the required force-flags.
     2482     *
     2483     * We only preserve the force-flags that would affect the execution of the
     2484     * nested-guest (or the guest).
     2485     *
     2486     *   - VMCPU_FF_INHIBIT_INTERRUPTS need not be preserved as VM-exit explicitly
     2487     *     clears interrupt-inhibition and on VM-entry the guest-interruptibility
     2488     *     state provides the inhibition if any.
     2489     *
     2490     *   - VMCPU_FF_BLOCK_NMIS needs not be preserved as VM-entry does not discard
     2491     *     any NMI blocking. VM-exits caused directly by NMIs (intercepted by the
     2492     *     exception bitmap) do block subsequent NMIs.
     2493     *
     2494     *   - MTF need not be preserved as it's used only in VMX non-root mode and
     2495     *     is supplied on VM-entry through the VM-execution controls.
     2496     *
     2497     * The remaining FFs (e.g. timers) can stay in place so that we will be able to
     2498     * generate interrupts that should cause #VMEXITs for the nested-guest.
     2499     */
     2500    uint32_t const fDiscardMask = VMCPU_FF_INHIBIT_INTERRUPTS | VMCPU_FF_MTF | VMCPU_FF_BLOCK_NMIS;
     2501    pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions = pVCpu->fLocalForcedActions & fDiscardMask;
     2502    VMCPU_FF_CLEAR(pVCpu, fDiscardMask);
     2503}
     2504
     2505
     2506/**
     2507 * Restores the guest force-flags in prepartion of exiting the nested-guest.
     2508 *
     2509 * @param   pVCpu       The cross context virtual CPU structure.
     2510 */
     2511IEM_STATIC void iemVmxVmexitRestoreForceFlags(PVMCPU pVCpu)
     2512{
     2513    if (pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions)
     2514    {
     2515        VMCPU_FF_SET(pVCpu, pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions);
     2516        pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions = 0;
     2517    }
     2518}
     2519
     2520
     2521/**
     2522 * Perform a VMX transition updated PGM, IEM and CPUM.
     2523 *
     2524 * @param   pVCpu       The cross context virtual CPU structure.
     2525 */
     2526IEM_STATIC int iemVmxWorldSwitch(PVMCPU pVCpu)
     2527{
     2528    /*
     2529     * Inform PGM about paging mode changes.
     2530     * We include X86_CR0_PE because PGM doesn't handle paged-real mode yet,
     2531     * see comment in iemMemPageTranslateAndCheckAccess().
     2532     */
     2533    int rc = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0 | X86_CR0_PE, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER);
     2534# ifdef IN_RING3
     2535    Assert(rc != VINF_PGM_CHANGE_MODE);
     2536# endif
     2537    AssertRCReturn(rc, rc);
     2538
     2539    /* Inform CPUM (recompiler), can later be removed. */
     2540    CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
     2541
     2542    /*
     2543     * Flush the TLB with new CR3. This is required in case the PGM mode change
     2544     * above doesn't actually change anything.
     2545     */
     2546    if (rc == VINF_SUCCESS)
     2547    {
     2548        rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true);
     2549        AssertRCReturn(rc, rc);
     2550    }
     2551
     2552    /* Re-initialize IEM cache/state after the drastic mode switch. */
     2553    iemReInitExec(pVCpu);
     2554    return rc;
     2555}
     2556
     2557
     2558/**
     2559 * Saves guest segment registers, GDTR, IDTR, LDTR, TR as part of VM-exit.
     2560 *
     2561 * @param   pVCpu       The cross context virtual CPU structure.
     2562 */
     2563IEM_STATIC void iemVmxVmexitSaveGuestSegRegs(PVMCPU pVCpu)
     2564{
     2565    /*
     2566     * Save guest segment registers, GDTR, IDTR, LDTR, TR.
     2567     * See Intel spec 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
     2568     */
     2569    /* CS, SS, ES, DS, FS, GS. */
     2570    PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     2571    for (unsigned iSegReg = 0; iSegReg < X86_SREG_COUNT; iSegReg++)
     2572    {
     2573        PCCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
     2574        if (!pSelReg->Attr.n.u1Unusable)
     2575            iemVmxVmcsSetGuestSegReg(pVmcs, iSegReg, pSelReg);
     2576        else
     2577        {
     2578            /*
     2579             * For unusable segments the attributes are undefined except for CS and SS.
     2580             * For the rest we don't bother preserving anything but the unusable bit.
     2581             */
     2582            switch (iSegReg)
     2583            {
     2584                case X86_SREG_CS:
     2585                    pVmcs->GuestCs          = pSelReg->Sel;
     2586                    pVmcs->u64GuestCsBase.u = pSelReg->u64Base;
     2587                    pVmcs->u32GuestCsLimit  = pSelReg->u32Limit;
     2588                    pVmcs->u32GuestCsAttr   = pSelReg->Attr.u & (  X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
     2589                                                                 | X86DESCATTR_UNUSABLE);
     2590                    break;
     2591
     2592                case X86_SREG_SS:
     2593                    pVmcs->GuestSs        = pSelReg->Sel;
     2594                    if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
     2595                        pVmcs->u64GuestSsBase.u &= UINT32_C(0xffffffff);
     2596                    pVmcs->u32GuestSsAttr = pSelReg->Attr.u & (X86DESCATTR_DPL | X86DESCATTR_UNUSABLE);
     2597                    break;
     2598
     2599                case X86_SREG_DS:
     2600                    pVmcs->GuestDs        = pSelReg->Sel;
     2601                    if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
     2602                        pVmcs->u64GuestDsBase.u &= UINT32_C(0xffffffff);
     2603                    pVmcs->u32GuestDsAttr = X86DESCATTR_UNUSABLE;
     2604                    break;
     2605
     2606                case X86_SREG_ES:
     2607                    pVmcs->GuestEs        = pSelReg->Sel;
     2608                    if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
     2609                        pVmcs->u64GuestEsBase.u &= UINT32_C(0xffffffff);
     2610                    pVmcs->u32GuestEsAttr = X86DESCATTR_UNUSABLE;
     2611                    break;
     2612
     2613                case X86_SREG_FS:
     2614                    pVmcs->GuestFs          = pSelReg->Sel;
     2615                    pVmcs->u64GuestFsBase.u = pSelReg->u64Base;
     2616                    pVmcs->u32GuestFsAttr   = X86DESCATTR_UNUSABLE;
     2617                    break;
     2618
     2619                case X86_SREG_GS:
     2620                    pVmcs->GuestGs          = pSelReg->Sel;
     2621                    pVmcs->u64GuestGsBase.u = pSelReg->u64Base;
     2622                    pVmcs->u32GuestGsAttr   = X86DESCATTR_UNUSABLE;
     2623                    break;
     2624            }
     2625        }
     2626    }
     2627
     2628    /* Segment attribute bits 31:7 and 11:8 MBZ. */
     2629    uint32_t const fValidAttrMask = X86DESCATTR_TYPE | X86DESCATTR_DT  | X86DESCATTR_DPL | X86DESCATTR_P
     2630                                  | X86DESCATTR_AVL  | X86DESCATTR_L   | X86DESCATTR_D   | X86DESCATTR_G | X86DESCATTR_UNUSABLE;
     2631    /* LDTR. */
     2632    {
     2633        PCCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.ldtr;
     2634        pVmcs->GuestLdtr          = pSelReg->Sel;
     2635        pVmcs->u64GuestLdtrBase.u = pSelReg->u64Base;
     2636        Assert(X86_IS_CANONICAL(pSelReg->u64Base));
     2637        pVmcs->u32GuestLdtrLimit  = pSelReg->u32Limit;
     2638        pVmcs->u32GuestLdtrAttr   = pSelReg->Attr.u & fValidAttrMask;
     2639    }
     2640
     2641    /* TR. */
     2642    {
     2643        PCCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.tr;
     2644        pVmcs->GuestTr          = pSelReg->Sel;
     2645        pVmcs->u64GuestTrBase.u = pSelReg->u64Base;
     2646        pVmcs->u32GuestTrLimit  = pSelReg->u32Limit;
     2647        pVmcs->u32GuestTrAttr   = pSelReg->Attr.u & fValidAttrMask;
     2648    }
     2649
     2650    /* GDTR. */
     2651    pVmcs->u64GuestGdtrBase.u = pVCpu->cpum.GstCtx.gdtr.pGdt;
     2652    pVmcs->u32GuestGdtrLimit  = pVCpu->cpum.GstCtx.gdtr.cbGdt;
     2653
     2654    /* IDTR. */
     2655    pVmcs->u64GuestIdtrBase.u = pVCpu->cpum.GstCtx.idtr.pIdt;
     2656    pVmcs->u32GuestIdtrLimit  = pVCpu->cpum.GstCtx.idtr.cbIdt;
     2657}
     2658
     2659
     2660/**
     2661 * Saves guest non-register state as part of VM-exit.
     2662 *
     2663 * @param   pVCpu           The cross context virtual CPU structure.
     2664 * @param   uExitReason     The VM-exit reason.
     2665 */
     2666IEM_STATIC void iemVmxVmexitSaveGuestNonRegState(PVMCPU pVCpu, uint32_t uExitReason)
     2667{
     2668    /*
     2669     * Save guest non-register state.
     2670     * See Intel spec. 27.3.4 "Saving Non-Register State".
     2671     */
     2672    PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     2673
     2674    /*
     2675     * Activity-state: VM-exits occur before changing the activity state
     2676     * of the processor and hence we shouldn't need to change it.
     2677     */
     2678
     2679    /* Interruptibility-state. */
     2680    pVmcs->u32GuestIntrState = 0;
     2681    if (pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
     2682    { /** @todo NSTVMX: Virtual-NMI blocking. */ }
     2683    else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
     2684        pVmcs->u32GuestIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI;
     2685
     2686    if (   VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
     2687        && pVCpu->cpum.GstCtx.rip == EMGetInhibitInterruptsPC(pVCpu))
     2688    {
     2689        /** @todo NSTVMX: We can't distinguish between blocking-by-MovSS and blocking-by-STI
     2690         *        currently. */
     2691        pVmcs->u32GuestIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
     2692        VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
     2693    }
     2694    /* Nothing to do for SMI/enclave. We don't support enclaves or SMM yet. */
     2695
     2696    /* Pending debug exceptions. */
     2697    if (    uExitReason != VMX_EXIT_INIT_SIGNAL
     2698        &&  uExitReason != VMX_EXIT_SMI
     2699        &&  uExitReason != VMX_EXIT_ERR_MACHINE_CHECK
     2700        && !HMVmxIsTrapLikeVmexit(uExitReason))
     2701    {
     2702        /** @todo NSTVMX: also must exclude VM-exits caused by debug exceptions when
     2703         *        block-by-MovSS is in effect. */
     2704        pVmcs->u64GuestPendingDbgXcpt.u = 0;
     2705    }
     2706
     2707    /** @todo NSTVMX: Save VMX preemption timer value. */
     2708
     2709    /* PDPTEs. */
     2710    /* We don't support EPT yet. */
     2711    Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT));
     2712    pVmcs->u64GuestPdpte0.u = 0;
     2713    pVmcs->u64GuestPdpte1.u = 0;
     2714    pVmcs->u64GuestPdpte2.u = 0;
     2715    pVmcs->u64GuestPdpte3.u = 0;
     2716}
     2717
     2718
     2719/**
     2720 * Saves the guest-state as part of VM-exit.
     2721 *
     2722 * @returns VBox status code.
     2723 * @param   pVCpu           The cross context virtual CPU structure.
     2724 * @param   uExitReason     The VM-exit reason.
     2725 */
     2726IEM_STATIC void iemVmxVmexitSaveGuestState(PVMCPU pVCpu, uint32_t uExitReason)
     2727{
     2728    PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     2729    Assert(pVmcs);
     2730
     2731    /*
     2732     * Save guest control, debug, segment, descriptor-table registers and some MSRs.
     2733     */
     2734    iemVmxVmexitSaveGuestControlRegsMsrs(pVCpu);
     2735    iemVmxVmexitSaveGuestSegRegs(pVCpu);
     2736
     2737    /*
     2738     * Save guest RIP, RSP and RFLAGS.
     2739     */
     2740    /* We don't support enclave mode yet. */
     2741    pVmcs->u64GuestRip.u    = pVCpu->cpum.GstCtx.rip;
     2742    pVmcs->u64GuestRsp.u    = pVCpu->cpum.GstCtx.rsp;
     2743    pVmcs->u64GuestRFlags.u = pVCpu->cpum.GstCtx.rflags.u;  /** @todo NSTVMX: Check RFLAGS.RF handling. */
     2744
     2745    /* Save guest non-register state. */
     2746    iemVmxVmexitSaveGuestNonRegState(pVCpu, uExitReason);
     2747}
     2748
     2749
     2750/**
     2751 * Saves the guest MSRs into the VM-exit auto-store MSRs area as part of VM-exit.
     2752 *
     2753 * @returns VBox status code.
     2754 * @param   pVCpu           The cross context virtual CPU structure.
     2755 * @param   uExitReason     The VM-exit reason (for diagnostic purposes).
     2756 */
     2757IEM_STATIC int iemVmxVmexitSaveGuestAutoMsrs(PVMCPU pVCpu, uint32_t uExitReason)
     2758{
     2759    /*
     2760     * Save guest MSRs.
     2761     * See Intel spec. 27.4 "Saving MSRs".
     2762     */
     2763    PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     2764    const char *const pszFailure = "VMX-abort";
     2765
     2766    /*
     2767     * The VM-exit MSR-store area address need not be a valid guest-physical address if the
     2768     * VM-exit MSR-store count is 0. If this is the case, bail early without reading it.
     2769     * See Intel spec. 24.7.2 "VM-Exit Controls for MSRs".
     2770     */
     2771    uint32_t const cMsrs = pVmcs->u32ExitMsrStoreCount;
     2772    if (!cMsrs)
     2773        return VINF_SUCCESS;
     2774
     2775    /*
     2776     * Verify the MSR auto-store count. Physical CPUs can behave unpredictably if the count
     2777     * is exceeded including possibly raising #MC exceptions during VMX transition. Our
     2778     * implementation causes a VMX-abort followed by a triple-fault.
     2779     */
     2780    bool const fIsMsrCountValid = iemVmxIsAutoMsrCountValid(pVCpu, cMsrs);
     2781    if (fIsMsrCountValid)
     2782    { /* likely */ }
     2783    else
     2784        IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrStoreCount);
     2785
     2786    PVMXAUTOMSR pMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea);
     2787    Assert(pMsr);
     2788    for (uint32_t idxMsr = 0; idxMsr < cMsrs; idxMsr++, pMsr++)
     2789    {
     2790        if (   !pMsr->u32Reserved
     2791            &&  pMsr->u32Msr >> 8 != MSR_IA32_X2APIC_START >> 8
     2792            &&  pMsr->u32Msr != MSR_IA32_SMBASE)
     2793        {
     2794            VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr(pVCpu, pMsr->u32Msr, &pMsr->u64Value);
     2795            if (rcStrict == VINF_SUCCESS)
     2796                continue;
     2797
     2798            /*
     2799             * If we're in ring-0, we cannot handle returns to ring-3 at this point and continue VM-exit.
     2800             * If any guest hypervisor loads MSRs that require ring-3 handling, we cause a VMX-abort
     2801             * recording the MSR index in the auxiliary info. field and indicated further by our
     2802             * own, specific diagnostic code. Later, we can try implement handling of the MSR in ring-0
     2803             * if possible, or come up with a better, generic solution.
     2804             */
     2805            pVCpu->cpum.GstCtx.hwvirt.vmx.uAbortAux = pMsr->u32Msr;
     2806            VMXVDIAG const enmDiag = rcStrict == VINF_CPUM_R3_MSR_READ
     2807                                   ? kVmxVDiag_Vmexit_MsrStoreRing3
     2808                                   : kVmxVDiag_Vmexit_MsrStore;
     2809            IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, enmDiag);
     2810        }
     2811        else
     2812        {
     2813            pVCpu->cpum.GstCtx.hwvirt.vmx.uAbortAux = pMsr->u32Msr;
     2814            IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrStoreRsvd);
     2815        }
     2816    }
     2817
     2818    RTGCPHYS const GCPhysAutoMsrArea = pVmcs->u64AddrExitMsrStore.u;
     2819    int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysAutoMsrArea,
     2820                                      pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea), VMX_V_AUTOMSR_AREA_SIZE);
     2821    if (RT_SUCCESS(rc))
     2822    { /* likely */ }
     2823    else
     2824    {
     2825        AssertMsgFailed(("VM-exit: Failed to write MSR auto-store area at %#RGp, rc=%Rrc\n", GCPhysAutoMsrArea, rc));
     2826        IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrStorePtrWritePhys);
     2827    }
     2828
     2829    NOREF(uExitReason);
     2830    NOREF(pszFailure);
     2831    return VINF_SUCCESS;
     2832}
     2833
     2834
     2835/**
     2836 * Performs a VMX abort (due to an fatal error during VM-exit).
     2837 *
     2838 * @returns VBox status code.
     2839 * @param   pVCpu       The cross context virtual CPU structure.
     2840 * @param   enmAbort    The VMX abort reason.
     2841 */
     2842IEM_STATIC int iemVmxAbort(PVMCPU pVCpu, VMXABORT enmAbort)
     2843{
     2844    /*
     2845     * Perform the VMX abort.
     2846     * See Intel spec. 27.7 "VMX Aborts".
     2847     */
     2848    LogFunc(("enmAbort=%u (%s) -> RESET\n", enmAbort, HMVmxGetAbortDesc(enmAbort)));
     2849
     2850    /* We don't support SMX yet. */
     2851    pVCpu->cpum.GstCtx.hwvirt.vmx.enmAbort = enmAbort;
     2852    if (IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
     2853    {
     2854        RTGCPHYS const GCPhysVmcs  = IEM_VMX_GET_CURRENT_VMCS(pVCpu);
     2855        uint32_t const offVmxAbort = RT_OFFSETOF(VMXVVMCS, u32VmxAbortId);
     2856        PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVmcs + offVmxAbort, &enmAbort, sizeof(enmAbort));
     2857    }
     2858
     2859    return VINF_EM_TRIPLE_FAULT;
     2860}
     2861
     2862
     2863/**
     2864 * Loads host control registers, debug registers and MSRs as part of VM-exit.
     2865 *
     2866 * @param   pVCpu   The cross context virtual CPU structure.
     2867 */
     2868IEM_STATIC void iemVmxVmexitLoadHostControlRegsMsrs(PVMCPU pVCpu)
     2869{
     2870    /*
     2871     * Load host control registers, debug registers and MSRs.
     2872     * See Intel spec. 27.5.1 "Loading Host Control Registers, Debug Registers, MSRs".
     2873     */
     2874    PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     2875    bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
     2876
     2877    /* CR0. */
     2878    {
     2879        /* Bits 63:32, 28:19, 17, 15:6, ET, CD, NW and CR0 MB1 bits are not modified. */
     2880        uint64_t const uCr0Fixed0  = CPUMGetGuestIa32VmxCr0Fixed0(pVCpu);
     2881        uint64_t const fCr0IgnMask = UINT64_C(0xffffffff1ff8ffc0) | X86_CR0_ET | X86_CR0_CD | X86_CR0_NW | uCr0Fixed0;
     2882        uint64_t const uHostCr0    = pVmcs->u64HostCr0.u;
     2883        uint64_t const uGuestCr0   = pVCpu->cpum.GstCtx.cr0;
     2884        uint64_t const uValidCr0   = (uHostCr0 & ~fCr0IgnMask) | (uGuestCr0 & fCr0IgnMask);
     2885        CPUMSetGuestCR0(pVCpu, uValidCr0);
     2886    }
     2887
     2888    /* CR4. */
     2889    {
     2890        /* CR4 MB1 bits are not modified. */
     2891        uint64_t const fCr4IgnMask = CPUMGetGuestIa32VmxCr4Fixed0(pVCpu);
     2892        uint64_t const uHostCr4    = pVmcs->u64HostCr4.u;
     2893        uint64_t const uGuestCr4   = pVCpu->cpum.GstCtx.cr4;
     2894        uint64_t       uValidCr4   = (uHostCr4 & ~fCr4IgnMask) | (uGuestCr4 & fCr4IgnMask);
     2895        if (fHostInLongMode)
     2896            uValidCr4 |= X86_CR4_PAE;
     2897        else
     2898            uValidCr4 &= ~X86_CR4_PCIDE;
     2899        CPUMSetGuestCR4(pVCpu, uValidCr4);
     2900    }
     2901
     2902    /* CR3 (host value validated while checking host-state during VM-entry). */
     2903    pVCpu->cpum.GstCtx.cr3 = pVmcs->u64HostCr3.u;
     2904
     2905    /* DR7. */
     2906    pVCpu->cpum.GstCtx.dr[7] = X86_DR7_INIT_VAL;
     2907
     2908    /** @todo NSTVMX: Support IA32_DEBUGCTL MSR */
     2909
     2910    /* Save SYSENTER CS, ESP, EIP (host value validated while checking host-state during VM-entry). */
     2911    pVCpu->cpum.GstCtx.SysEnter.eip = pVmcs->u64HostSysenterEip.u;
     2912    pVCpu->cpum.GstCtx.SysEnter.esp = pVmcs->u64HostSysenterEsp.u;
     2913    pVCpu->cpum.GstCtx.SysEnter.cs  = pVmcs->u32HostSysenterCs;
     2914
     2915    /* FS, GS bases are loaded later while we load host segment registers. */
     2916
     2917    /* EFER MSR (host value validated while checking host-state during VM-entry). */
     2918    if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_EFER_MSR)
     2919        pVCpu->cpum.GstCtx.msrEFER = pVmcs->u64HostEferMsr.u;
     2920    else if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
     2921    {
     2922        if (fHostInLongMode)
     2923            pVCpu->cpum.GstCtx.msrEFER |=  (MSR_K6_EFER_LMA | MSR_K6_EFER_LME);
     2924        else
     2925            pVCpu->cpum.GstCtx.msrEFER &= ~(MSR_K6_EFER_LMA | MSR_K6_EFER_LME);
     2926    }
     2927
     2928    /* We don't support IA32_PERF_GLOBAL_CTRL MSR yet. */
     2929
     2930    /* PAT MSR (host value is validated while checking host-state during VM-entry). */
     2931    if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_PAT_MSR)
     2932        pVCpu->cpum.GstCtx.msrPAT = pVmcs->u64HostPatMsr.u;
     2933
     2934    /* We don't support IA32_BNDCFGS MSR yet. */
     2935}
     2936
     2937
     2938/**
     2939 * Loads host segment registers, GDTR, IDTR, LDTR and TR as part of VM-exit.
     2940 *
     2941 * @param   pVCpu   The cross context virtual CPU structure.
     2942 */
     2943IEM_STATIC void iemVmxVmexitLoadHostSegRegs(PVMCPU pVCpu)
     2944{
     2945    /*
     2946     * Load host segment registers, GDTR, IDTR, LDTR and TR.
     2947     * See Intel spec. 27.5.2 "Loading Host Segment and Descriptor-Table Registers".
     2948     *
     2949     * Warning! Be careful to not touch fields that are reserved by VT-x,
     2950     * e.g. segment limit high bits stored in segment attributes (in bits 11:8).
     2951     */
     2952    PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     2953    bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
     2954
     2955    /* CS, SS, ES, DS, FS, GS. */
     2956    for (unsigned iSegReg = 0; iSegReg < X86_SREG_COUNT; iSegReg++)
     2957    {
     2958        RTSEL const HostSel  = iemVmxVmcsGetHostSelReg(pVmcs, iSegReg);
     2959        bool const  fUnusable = RT_BOOL(HostSel == 0);
     2960
     2961        /* Selector. */
     2962        pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel      = HostSel;
     2963        pVCpu->cpum.GstCtx.aSRegs[iSegReg].ValidSel = HostSel;
     2964        pVCpu->cpum.GstCtx.aSRegs[iSegReg].fFlags   = CPUMSELREG_FLAGS_VALID;
     2965
     2966        /* Limit. */
     2967        pVCpu->cpum.GstCtx.aSRegs[iSegReg].u32Limit = 0xffffffff;
     2968
     2969        /* Base and Attributes. */
     2970        switch (iSegReg)
     2971        {
     2972            case X86_SREG_CS:
     2973            {
     2974                pVCpu->cpum.GstCtx.cs.u64Base = 0;
     2975                pVCpu->cpum.GstCtx.cs.Attr.n.u4Type        = X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ | X86_SEL_TYPE_ACCESSED;
     2976                pVCpu->cpum.GstCtx.ss.Attr.n.u1DescType    = 1;
     2977                pVCpu->cpum.GstCtx.cs.Attr.n.u2Dpl         = 0;
     2978                pVCpu->cpum.GstCtx.cs.Attr.n.u1Present     = 1;
     2979                pVCpu->cpum.GstCtx.cs.Attr.n.u1Long        = fHostInLongMode;
     2980                pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig      = !fHostInLongMode;
     2981                pVCpu->cpum.GstCtx.cs.Attr.n.u1Granularity = 1;
     2982                Assert(!pVCpu->cpum.GstCtx.cs.Attr.n.u1Unusable);
     2983                Assert(!fUnusable);
     2984                break;
     2985            }
     2986
     2987            case X86_SREG_SS:
     2988            case X86_SREG_ES:
     2989            case X86_SREG_DS:
     2990            {
     2991                pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base = 0;
     2992                pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u4Type        = X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED;
     2993                pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u1DescType    = 1;
     2994                pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u2Dpl         = 0;
     2995                pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u1Present     = 1;
     2996                pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u1DefBig      = 1;
     2997                pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u1Granularity = 1;
     2998                pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u1Unusable    = fUnusable;
     2999                break;
     3000            }
     3001
     3002            case X86_SREG_FS:
     3003            {
     3004                Assert(X86_IS_CANONICAL(pVmcs->u64HostFsBase.u));
     3005                pVCpu->cpum.GstCtx.fs.u64Base = !fUnusable ? pVmcs->u64HostFsBase.u : 0;
     3006                pVCpu->cpum.GstCtx.fs.Attr.n.u4Type        = X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED;
     3007                pVCpu->cpum.GstCtx.fs.Attr.n.u1DescType    = 1;
     3008                pVCpu->cpum.GstCtx.fs.Attr.n.u2Dpl         = 0;
     3009                pVCpu->cpum.GstCtx.fs.Attr.n.u1Present     = 1;
     3010                pVCpu->cpum.GstCtx.fs.Attr.n.u1DefBig      = 1;
     3011                pVCpu->cpum.GstCtx.fs.Attr.n.u1Granularity = 1;
     3012                pVCpu->cpum.GstCtx.fs.Attr.n.u1Unusable    = fUnusable;
     3013                break;
     3014            }
     3015
     3016            case X86_SREG_GS:
     3017            {
     3018                Assert(X86_IS_CANONICAL(pVmcs->u64HostGsBase.u));
     3019                pVCpu->cpum.GstCtx.gs.u64Base = !fUnusable ? pVmcs->u64HostGsBase.u : 0;
     3020                pVCpu->cpum.GstCtx.gs.Attr.n.u4Type        = X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED;
     3021                pVCpu->cpum.GstCtx.gs.Attr.n.u1DescType    = 1;
     3022                pVCpu->cpum.GstCtx.gs.Attr.n.u2Dpl         = 0;
     3023                pVCpu->cpum.GstCtx.gs.Attr.n.u1Present     = 1;
     3024                pVCpu->cpum.GstCtx.gs.Attr.n.u1DefBig      = 1;
     3025                pVCpu->cpum.GstCtx.gs.Attr.n.u1Granularity = 1;
     3026                pVCpu->cpum.GstCtx.gs.Attr.n.u1Unusable    = fUnusable;
     3027                break;
     3028            }
     3029        }
     3030    }
     3031
     3032    /* TR. */
     3033    Assert(X86_IS_CANONICAL(pVmcs->u64HostTrBase.u));
     3034    Assert(!pVCpu->cpum.GstCtx.tr.Attr.n.u1Unusable);
     3035    pVCpu->cpum.GstCtx.tr.Sel                  = pVmcs->HostTr;
     3036    pVCpu->cpum.GstCtx.tr.ValidSel             = pVmcs->HostTr;
     3037    pVCpu->cpum.GstCtx.tr.fFlags               = CPUMSELREG_FLAGS_VALID;
     3038    pVCpu->cpum.GstCtx.tr.u32Limit             = X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN;
     3039    pVCpu->cpum.GstCtx.tr.u64Base              = pVmcs->u64HostTrBase.u;
     3040    pVCpu->cpum.GstCtx.tr.Attr.n.u4Type        = X86_SEL_TYPE_SYS_386_TSS_BUSY;
     3041    pVCpu->cpum.GstCtx.tr.Attr.n.u1DescType    = 0;
     3042    pVCpu->cpum.GstCtx.tr.Attr.n.u2Dpl         = 0;
     3043    pVCpu->cpum.GstCtx.tr.Attr.n.u1Present     = 1;
     3044    pVCpu->cpum.GstCtx.tr.Attr.n.u1DefBig      = 0;
     3045    pVCpu->cpum.GstCtx.tr.Attr.n.u1Granularity = 0;
     3046
     3047    /* LDTR. */
     3048    pVCpu->cpum.GstCtx.ldtr.Sel               = 0;
     3049    pVCpu->cpum.GstCtx.ldtr.ValidSel          = 0;
     3050    pVCpu->cpum.GstCtx.ldtr.fFlags            = CPUMSELREG_FLAGS_VALID;
     3051    pVCpu->cpum.GstCtx.ldtr.u32Limit          = 0;
     3052    pVCpu->cpum.GstCtx.ldtr.u64Base           = 0;
     3053    pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Unusable = 1;
     3054
     3055    /* GDTR. */
     3056    Assert(X86_IS_CANONICAL(pVmcs->u64HostGdtrBase.u));
     3057    pVCpu->cpum.GstCtx.gdtr.pGdt  = pVmcs->u64HostGdtrBase.u;
     3058    pVCpu->cpum.GstCtx.gdtr.cbGdt = 0xfff;
     3059
     3060    /* IDTR.*/
     3061    Assert(X86_IS_CANONICAL(pVmcs->u64HostIdtrBase.u));
     3062    pVCpu->cpum.GstCtx.idtr.pIdt  = pVmcs->u64HostIdtrBase.u;
     3063    pVCpu->cpum.GstCtx.idtr.cbIdt = 0xfff;
     3064}
     3065
     3066
     3067/**
     3068 * Checks host PDPTes as part of VM-exit.
     3069 *
     3070 * @param   pVCpu           The cross context virtual CPU structure.
     3071 * @param   uExitReason     The VM-exit reason (for logging purposes).
     3072 */
     3073IEM_STATIC int iemVmxVmexitCheckHostPdptes(PVMCPU pVCpu, uint32_t uExitReason)
     3074{
     3075    /*
     3076     * Check host PDPTEs.
     3077     * See Intel spec. 27.5.4 "Checking and Loading Host Page-Directory-Pointer-Table Entries".
     3078     */
     3079    PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     3080    const char *const pszFailure = "VMX-abort";
     3081    bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
     3082
     3083    if (   (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
     3084        && !fHostInLongMode)
     3085    {
     3086        uint64_t const uHostCr3 = pVCpu->cpum.GstCtx.cr3 & X86_CR3_PAE_PAGE_MASK;
     3087        X86PDPE aPdptes[X86_PG_PAE_PDPE_ENTRIES];
     3088        int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)&aPdptes[0], uHostCr3, sizeof(aPdptes));
     3089        if (RT_SUCCESS(rc))
     3090        {
     3091            for (unsigned iPdpte = 0; iPdpte < RT_ELEMENTS(aPdptes); iPdpte++)
     3092            {
     3093                if (   !(aPdptes[iPdpte].u & X86_PDPE_P)
     3094                    || !(aPdptes[iPdpte].u & X86_PDPE_PAE_MBZ_MASK))
     3095                { /* likely */ }
     3096                else
     3097                {
     3098                    VMXVDIAG const enmDiag = iemVmxGetDiagVmexitPdpteRsvd(iPdpte);
     3099                    IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, enmDiag);
     3100                }
     3101            }
     3102        }
     3103        else
     3104            IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_HostPdpteCr3ReadPhys);
     3105    }
     3106
     3107    NOREF(pszFailure);
     3108    NOREF(uExitReason);
     3109    return VINF_SUCCESS;
     3110}
     3111
     3112
     3113/**
     3114 * Loads the host MSRs from the VM-exit auto-load MSRs area as part of VM-exit.
     3115 *
     3116 * @returns VBox status code.
     3117 * @param   pVCpu       The cross context virtual CPU structure.
     3118 * @param   pszInstr    The VMX instruction name (for logging purposes).
     3119 */
     3120IEM_STATIC int iemVmxVmexitLoadHostAutoMsrs(PVMCPU pVCpu, uint32_t uExitReason)
     3121{
     3122    /*
     3123     * Load host MSRs.
     3124     * See Intel spec. 27.6 "Loading MSRs".
     3125     */
     3126    PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     3127    const char *const pszFailure = "VMX-abort";
     3128
     3129    /*
     3130     * The VM-exit MSR-load area address need not be a valid guest-physical address if the
     3131     * VM-exit MSR load count is 0. If this is the case, bail early without reading it.
     3132     * See Intel spec. 24.7.2 "VM-Exit Controls for MSRs".
     3133     */
     3134    uint32_t const cMsrs = pVmcs->u32ExitMsrLoadCount;
     3135    if (!cMsrs)
     3136        return VINF_SUCCESS;
     3137
     3138    /*
     3139     * Verify the MSR auto-load count. Physical CPUs can behave unpredictably if the count
     3140     * is exceeded including possibly raising #MC exceptions during VMX transition. Our
     3141     * implementation causes a VMX-abort followed by a triple-fault.
     3142     */
     3143    bool const fIsMsrCountValid = iemVmxIsAutoMsrCountValid(pVCpu, cMsrs);
     3144    if (fIsMsrCountValid)
     3145    { /* likely */ }
     3146    else
     3147        IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrLoadCount);
     3148
     3149    RTGCPHYS const GCPhysAutoMsrArea = pVmcs->u64AddrExitMsrLoad.u;
     3150    int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)&pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea),
     3151                                     GCPhysAutoMsrArea, VMX_V_AUTOMSR_AREA_SIZE);
     3152    if (RT_SUCCESS(rc))
     3153    {
     3154        PCVMXAUTOMSR pMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea);
     3155        Assert(pMsr);
     3156        for (uint32_t idxMsr = 0; idxMsr < cMsrs; idxMsr++, pMsr++)
     3157        {
     3158            if (   !pMsr->u32Reserved
     3159                &&  pMsr->u32Msr != MSR_K8_FS_BASE
     3160                &&  pMsr->u32Msr != MSR_K8_GS_BASE
     3161                &&  pMsr->u32Msr != MSR_K6_EFER
     3162                &&  pMsr->u32Msr >> 8 != MSR_IA32_X2APIC_START >> 8
     3163                &&  pMsr->u32Msr != MSR_IA32_SMM_MONITOR_CTL)
     3164            {
     3165                VBOXSTRICTRC rcStrict = CPUMSetGuestMsr(pVCpu, pMsr->u32Msr, pMsr->u64Value);
     3166                if (rcStrict == VINF_SUCCESS)
     3167                    continue;
     3168
     3169                /*
     3170                 * If we're in ring-0, we cannot handle returns to ring-3 at this point and continue VM-exit.
     3171                 * If any guest hypervisor loads MSRs that require ring-3 handling, we cause a VMX-abort
     3172                 * recording the MSR index in the auxiliary info. field and indicated further by our
     3173                 * own, specific diagnostic code. Later, we can try implement handling of the MSR in ring-0
     3174                 * if possible, or come up with a better, generic solution.
     3175                 */
     3176                pVCpu->cpum.GstCtx.hwvirt.vmx.uAbortAux = pMsr->u32Msr;
     3177                VMXVDIAG const enmDiag = rcStrict == VINF_CPUM_R3_MSR_WRITE
     3178                                       ? kVmxVDiag_Vmexit_MsrLoadRing3
     3179                                       : kVmxVDiag_Vmexit_MsrLoad;
     3180                IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, enmDiag);
     3181            }
     3182            else
     3183                IEM_VMX_VMENTRY_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrLoadRsvd);
     3184        }
     3185    }
     3186    else
     3187    {
     3188        AssertMsgFailed(("VM-exit: Failed to read MSR auto-load area at %#RGp, rc=%Rrc\n", GCPhysAutoMsrArea, rc));
     3189        IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrLoadPtrReadPhys);
     3190    }
     3191
     3192    NOREF(uExitReason);
     3193    NOREF(pszFailure);
     3194    return VINF_SUCCESS;
     3195}
     3196
     3197
     3198/**
     3199 * Loads the host state as part of VM-exit.
     3200 *
     3201 * @returns VBox status code.
     3202 * @param   pVCpu           The cross context virtual CPU structure.
     3203 * @param   uExitReason     The VM-exit reason (for logging purposes).
     3204 */
     3205IEM_STATIC int iemVmxVmexitLoadHostState(PVMCPU pVCpu, uint32_t uExitReason)
     3206{
     3207    /*
     3208     * Load host state.
     3209     * See Intel spec. 27.5 "Loading Host State".
     3210     */
     3211    PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     3212    bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
     3213
     3214    /* We cannot return from a long-mode guest to a host that is not in long mode. */
     3215    if (    CPUMIsGuestInLongMode(pVCpu)
     3216        && !fHostInLongMode)
     3217    {
     3218        Log(("VM-exit from long-mode guest to host not in long-mode -> VMX-Abort\n"));
     3219        return iemVmxAbort(pVCpu, VMXABORT_HOST_NOT_IN_LONG_MODE);
     3220    }
     3221
     3222    /*
     3223     * Load host control, debug, segment, descriptor-table registers and some MSRs.
     3224     */
     3225    iemVmxVmexitLoadHostControlRegsMsrs(pVCpu);
     3226    iemVmxVmexitLoadHostSegRegs(pVCpu);
     3227
     3228    /*
     3229     * Load host RIP, RSP and RFLAGS.
     3230     * See Intel spec. 27.5.3 "Loading Host RIP, RSP and RFLAGS"
     3231     */
     3232    pVCpu->cpum.GstCtx.rip      = pVmcs->u64HostRip.u;
     3233    pVCpu->cpum.GstCtx.rsp      = pVmcs->u64HostRsp.u;
     3234    pVCpu->cpum.GstCtx.rflags.u = X86_EFL_1;
     3235
     3236    /* Update non-register state. */
     3237    iemVmxVmexitRestoreForceFlags(pVCpu);
     3238
     3239    /* Clear address range monitoring. */
     3240    EMMonitorWaitClear(pVCpu);
     3241
     3242    /* Perform the VMX transition (PGM updates). */
     3243    VBOXSTRICTRC rcStrict = iemVmxWorldSwitch(pVCpu);
     3244    if (rcStrict == VINF_SUCCESS)
     3245    {
     3246        /* Check host PDPTEs. */
     3247        /** @todo r=ramshankar: I don't know if PGM does this for us already or not... */
     3248        int rc = iemVmxVmexitCheckHostPdptes(pVCpu, uExitReason);
     3249        if (RT_FAILURE(rc))
     3250        {
     3251            Log(("VM-exit failed while restoring host PDPTEs -> VMX-Abort\n"));
     3252            return iemVmxAbort(pVCpu, VMXBOART_HOST_PDPTE);
     3253        }
     3254    }
     3255    else if (RT_SUCCESS(rcStrict))
     3256    {
     3257        Log3(("VM-exit: iemVmxWorldSwitch returns %Rrc (uExitReason=%u) -> Setting passup status\n", VBOXSTRICTRC_VAL(rcStrict),
     3258              uExitReason));
     3259        rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
     3260    }
     3261    else
     3262    {
     3263        Log3(("VM-exit: iemVmxWorldSwitch failed! rc=%Rrc (uExitReason=%u)\n", VBOXSTRICTRC_VAL(rcStrict), uExitReason));
     3264        return rcStrict;
     3265    }
     3266
     3267    Assert(rcStrict == VINF_SUCCESS);
     3268
     3269    /* Load MSRs from the VM-exit auto-load MSR area. */
     3270    int rc = iemVmxVmexitLoadHostAutoMsrs(pVCpu, uExitReason);
     3271    if (RT_FAILURE(rc))
     3272    {
     3273        Log(("VM-exit failed while loading host MSRs -> VMX-Abort\n"));
     3274        return iemVmxAbort(pVCpu, VMXABORT_LOAD_HOST_MSR);
     3275    }
     3276
     3277    return VINF_SUCCESS;
     3278}
     3279
     3280
     3281/**
     3282 * VMX VM-exit handler.
     3283 *
     3284 * @returns Strict VBox status code.
     3285 * @param   pVCpu           The cross context virtual CPU structure.
     3286 * @param   uExitReason     The VM-exit reason.
     3287 */
     3288IEM_STATIC VBOXSTRICTRC iemVmxVmexit(PVMCPU pVCpu, uint32_t uExitReason)
     3289{
     3290    PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     3291    Assert(pVmcs);
     3292
     3293    pVmcs->u32RoExitReason   = uExitReason;
     3294
     3295    /** @todo NSTVMX: Update VM-exit instruction length for instruction VM-exits. */
     3296    /** @todo NSTVMX: IEMGetCurrentXcpt will be VM-exit interruption info. */
     3297    /** @todo NSTVMX: The source event should be recorded in IDT-vectoring info
     3298     *        during injection. */
     3299
     3300    /*
     3301     * Save the guest state back into the VMCS.
     3302     * We only need to save the state when the VM-entry was successful.
     3303     */
     3304    bool const fVmentryFailed = VMX_EXIT_REASON_HAS_ENTRY_FAILED(uExitReason);
     3305    if (!fVmentryFailed)
     3306    {
     3307        iemVmxVmexitSaveGuestState(pVCpu, uExitReason);
     3308        int rc = iemVmxVmexitSaveGuestAutoMsrs(pVCpu, uExitReason);
     3309        if (RT_SUCCESS(rc))
     3310        { /* likely */ }
     3311        else
     3312            return iemVmxAbort(pVCpu, VMXABORT_SAVE_GUEST_MSRS);
     3313    }
     3314
     3315    int rc = iemVmxVmexitLoadHostState(pVCpu, uExitReason);
     3316    if (RT_FAILURE(rc))
     3317        return rc;
     3318
     3319    /** @todo NSTVMX: rest of VM-exit. */
     3320
     3321    /* We're no longer in nested-guest execution mode. */
     3322    pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode = false;
     3323
     3324    return VINF_SUCCESS;
     3325}
     3326
     3327
     3328/**
    24173329 * Checks guest control registers, debug registers and MSRs as part of VM-entry.
    24183330 *
     
    42775189
    42785190/**
    4279  * Saves the guest force-flags in prepartion of entering the nested-guest.
    4280  *
    4281  * @param   pVCpu       The cross context virtual CPU structure.
    4282  */
    4283 IEM_STATIC void iemVmxVmentrySaveForceFlags(PVMCPU pVCpu)
    4284 {
    4285     /* Assert that we are not called multiple times during VM-entry. */
    4286     Assert(pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions == 0);
    4287 
    4288     /*
    4289      * Preserve the required force-flags.
    4290      *
    4291      * We only preserve the force-flags that would affect the execution of the
    4292      * nested-guest (or the guest).
    4293      *
    4294      *   - VMCPU_FF_INHIBIT_INTERRUPTS need not be preserved as VM-exit explicitly
    4295      *     clears interrupt-inhibition and on VM-entry the guest-interruptibility
    4296      *     state provides the inhibition if any.
    4297      *
    4298      *   - VMCPU_FF_BLOCK_NMIS needs not be preserved as VM-entry does not discard
    4299      *     any NMI blocking. VM-exits caused directly by NMIs (intercepted by the
    4300      *     exception bitmap) do block subsequent NMIs.
    4301      *
    4302      *   - MTF need not be preserved as it's used only in VMX non-root mode and
    4303      *     is supplied on VM-entry through the VM-execution controls.
    4304      *
    4305      * The remaining FFs (e.g. timers) can stay in place so that we will be able to
    4306      * generate interrupts that should cause #VMEXITs for the nested-guest.
    4307      */
    4308     uint32_t const fDiscardMask = VMCPU_FF_INHIBIT_INTERRUPTS | VMCPU_FF_MTF | VMCPU_FF_BLOCK_NMIS;
    4309     pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions = pVCpu->fLocalForcedActions & fDiscardMask;
    4310     VMCPU_FF_CLEAR(pVCpu, fDiscardMask);
    4311 }
    4312 
    4313 
    4314 /**
    4315  * Restores the guest force-flags in prepartion of exiting the nested-guest.
    4316  *
    4317  * @param   pVCpu       The cross context virtual CPU structure.
    4318  */
    4319 IEM_STATIC void iemVmxVmexitRestoreForceFlags(PVMCPU pVCpu)
    4320 {
    4321     if (pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions)
    4322     {
    4323         VMCPU_FF_SET(pVCpu, pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions);
    4324         pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions = 0;
    4325     }
    4326 }
    4327 
    4328 
    4329 /**
    43305191 * Loads the guest-state as part of VM-entry.
    43315192 *
     
    44015262    NOREF(pszInstr);
    44025263    return VINF_SUCCESS;
    4403 }
    4404 
    4405 
    4406 /**
    4407  * Perform a VMX transition updated PGM, IEM and CPUM.
    4408  *
    4409  * @param   pVCpu       The cross context virtual CPU structure.
    4410  */
    4411 IEM_STATIC int iemVmxWorldSwitch(PVMCPU pVCpu)
    4412 {
    4413     /*
    4414      * Inform PGM about paging mode changes.
    4415      * We include X86_CR0_PE because PGM doesn't handle paged-real mode yet,
    4416      * see comment in iemMemPageTranslateAndCheckAccess().
    4417      */
    4418     int rc = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0 | X86_CR0_PE, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER);
    4419 # ifdef IN_RING3
    4420     Assert(rc != VINF_PGM_CHANGE_MODE);
    4421 # endif
    4422     AssertRCReturn(rc, rc);
    4423 
    4424     /* Inform CPUM (recompiler), can later be removed. */
    4425     CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
    4426 
    4427     /*
    4428      * Flush the TLB with new CR3. This is required in case the PGM mode change
    4429      * above doesn't actually change anything.
    4430      */
    4431     if (rc == VINF_SUCCESS)
    4432     {
    4433         rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true);
    4434         AssertRCReturn(rc, rc);
    4435     }
    4436 
    4437     /* Re-initialize IEM cache/state after the drastic mode switch. */
    4438     iemReInitExec(pVCpu);
    4439     return rc;
    44405264}
    44415265
     
    46045428                                return VINF_SUCCESS;
    46055429                            }
    4606                             /** @todo NSTVMX: VMExit with VMX_EXIT_ERR_MSR_LOAD and set
    4607                              *        VMX_BF_EXIT_REASON_ENTRY_FAILED. */
     5430
     5431                            return iemVmxVmexit(pVCpu, VMX_EXIT_ERR_MSR_LOAD | VMX_EXIT_REASON_ENTRY_FAILED);
    46085432                        }
    46095433                    }
    4610                     /** @todo NSTVMX: VMExit with VMX_EXIT_ERR_INVALID_GUEST_STATE and set
    4611                      *        VMX_BF_EXIT_REASON_ENTRY_FAILED. */
    4612                     return VINF_SUCCESS;
     5434
     5435                    return iemVmxVmexit(pVCpu, VMX_EXIT_ERR_INVALID_GUEST_STATE | VMX_EXIT_REASON_ENTRY_FAILED);
    46135436                }
    46145437
     
    46255448}
    46265449
    4627 
    4628 /**
    4629  * Saves the guest control registers, debug registers and some MSRs are part of
    4630  * VM-exit.
    4631  *
    4632  * @param   pVCpu       The cross context virtual CPU structure.
    4633  */
    4634 IEM_STATIC void iemVmxVmexitSaveGuestControlRegsMsrs(PVMCPU pVCpu)
    4635 {
    4636     /*
    4637      * Saves the guest control registers, debug registers and some MSRs.
    4638      * See Intel spec. 27.3.1 "Saving Control Registers, Debug Registers and MSRs".
    4639      */
    4640     PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    4641 
    4642     /* Save control registers. */
    4643     pVmcs->u64GuestCr0.u = pVCpu->cpum.GstCtx.cr0;
    4644     pVmcs->u64GuestCr3.u = pVCpu->cpum.GstCtx.cr3;
    4645     pVmcs->u64GuestCr4.u = pVCpu->cpum.GstCtx.cr4;
    4646 
    4647     /* Save SYSENTER CS, ESP, EIP. */
    4648     pVmcs->u32GuestSysenterCS    = pVCpu->cpum.GstCtx.SysEnter.cs;
    4649     if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
    4650     {
    4651         pVmcs->u64GuestSysenterEsp.u = pVCpu->cpum.GstCtx.SysEnter.esp;
    4652         pVmcs->u64GuestSysenterEip.u = pVCpu->cpum.GstCtx.SysEnter.eip;
    4653     }
    4654     else
    4655     {
    4656         pVmcs->u64GuestSysenterEsp.s.Lo = pVCpu->cpum.GstCtx.SysEnter.esp;
    4657         pVmcs->u64GuestSysenterEip.s.Lo = pVCpu->cpum.GstCtx.SysEnter.eip;
    4658     }
    4659 
    4660     /* Save debug registers (DR7 and IA32_DEBUGCTL MSR). */
    4661     if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_SAVE_DEBUG)
    4662     {
    4663         pVmcs->u64GuestDr7.u = pVCpu->cpum.GstCtx.dr[7];
    4664         /** @todo NSTVMX: Support IA32_DEBUGCTL MSR */
    4665     }
    4666 
    4667     /* Save PAT MSR. */
    4668     if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_SAVE_PAT_MSR)
    4669         pVmcs->u64GuestPatMsr.u = pVCpu->cpum.GstCtx.msrPAT;
    4670 
    4671     /* Save EFER MSR. */
    4672     if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_SAVE_EFER_MSR)
    4673         pVmcs->u64GuestEferMsr.u = pVCpu->cpum.GstCtx.msrEFER;
    4674 
    4675     /* We don't support clearing IA32_BNDCFGS MSR yet. */
    4676     Assert(!(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_CLEAR_BNDCFGS_MSR));
    4677 
    4678     /* Nothing to do for SMBASE register - We don't support SMM yet. */
    4679 }
    4680 
    4681 
    4682 /**
    4683  * Saves guest segment registers, GDTR, IDTR, LDTR, TR as part of VM-exit.
    4684  *
    4685  * @param   pVCpu       The cross context virtual CPU structure.
    4686  */
    4687 IEM_STATIC void iemVmxVmexitSaveGuestSegRegs(PVMCPU pVCpu)
    4688 {
    4689     /*
    4690      * Save guest segment registers, GDTR, IDTR, LDTR, TR.
    4691      * See Intel spec 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
    4692      */
    4693     /* CS, SS, ES, DS, FS, GS. */
    4694     PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    4695     for (unsigned iSegReg = 0; iSegReg < X86_SREG_COUNT; iSegReg++)
    4696     {
    4697         PCCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
    4698         if (!pSelReg->Attr.n.u1Unusable)
    4699             iemVmxVmcsSetGuestSegReg(pVmcs, iSegReg, pSelReg);
    4700         else
    4701         {
    4702             /*
    4703              * For unusable segments the attributes are undefined except for CS and SS.
    4704              * For the rest we don't bother preserving anything but the unusable bit.
    4705              */
    4706             switch (iSegReg)
    4707             {
    4708                 case X86_SREG_CS:
    4709                     pVmcs->GuestCs          = pSelReg->Sel;
    4710                     pVmcs->u64GuestCsBase.u = pSelReg->u64Base;
    4711                     pVmcs->u32GuestCsLimit  = pSelReg->u32Limit;
    4712                     pVmcs->u32GuestCsAttr   = pSelReg->Attr.u & (  X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
    4713                                                                  | X86DESCATTR_UNUSABLE);
    4714                     break;
    4715 
    4716                 case X86_SREG_SS:
    4717                     pVmcs->GuestSs        = pSelReg->Sel;
    4718                     if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
    4719                         pVmcs->u64GuestSsBase.u &= UINT32_C(0xffffffff);
    4720                     pVmcs->u32GuestSsAttr = pSelReg->Attr.u & (X86DESCATTR_DPL | X86DESCATTR_UNUSABLE);
    4721                     break;
    4722 
    4723                 case X86_SREG_DS:
    4724                     pVmcs->GuestDs        = pSelReg->Sel;
    4725                     if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
    4726                         pVmcs->u64GuestDsBase.u &= UINT32_C(0xffffffff);
    4727                     pVmcs->u32GuestDsAttr = X86DESCATTR_UNUSABLE;
    4728                     break;
    4729 
    4730                 case X86_SREG_ES:
    4731                     pVmcs->GuestEs        = pSelReg->Sel;
    4732                     if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
    4733                         pVmcs->u64GuestEsBase.u &= UINT32_C(0xffffffff);
    4734                     pVmcs->u32GuestEsAttr = X86DESCATTR_UNUSABLE;
    4735                     break;
    4736 
    4737                 case X86_SREG_FS:
    4738                     pVmcs->GuestFs          = pSelReg->Sel;
    4739                     pVmcs->u64GuestFsBase.u = pSelReg->u64Base;
    4740                     pVmcs->u32GuestFsAttr   = X86DESCATTR_UNUSABLE;
    4741                     break;
    4742 
    4743                 case X86_SREG_GS:
    4744                     pVmcs->GuestGs          = pSelReg->Sel;
    4745                     pVmcs->u64GuestGsBase.u = pSelReg->u64Base;
    4746                     pVmcs->u32GuestGsAttr   = X86DESCATTR_UNUSABLE;
    4747                     break;
    4748             }
    4749         }
    4750     }
    4751 
    4752     /* Segment attribute bits 31:7 and 11:8 MBZ. */
    4753     uint32_t const fValidAttrMask = X86DESCATTR_TYPE | X86DESCATTR_DT  | X86DESCATTR_DPL | X86DESCATTR_P
    4754                                   | X86DESCATTR_AVL  | X86DESCATTR_L   | X86DESCATTR_D   | X86DESCATTR_G | X86DESCATTR_UNUSABLE;
    4755     /* LDTR. */
    4756     {
    4757         PCCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.ldtr;
    4758         pVmcs->GuestLdtr          = pSelReg->Sel;
    4759         pVmcs->u64GuestLdtrBase.u = pSelReg->u64Base;
    4760         Assert(X86_IS_CANONICAL(pSelReg->u64Base));
    4761         pVmcs->u32GuestLdtrLimit  = pSelReg->u32Limit;
    4762         pVmcs->u32GuestLdtrAttr   = pSelReg->Attr.u & fValidAttrMask;
    4763     }
    4764 
    4765     /* TR. */
    4766     {
    4767         PCCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.tr;
    4768         pVmcs->GuestTr          = pSelReg->Sel;
    4769         pVmcs->u64GuestTrBase.u = pSelReg->u64Base;
    4770         pVmcs->u32GuestTrLimit  = pSelReg->u32Limit;
    4771         pVmcs->u32GuestTrAttr   = pSelReg->Attr.u & fValidAttrMask;
    4772     }
    4773 
    4774     /* GDTR. */
    4775     pVmcs->u64GuestGdtrBase.u = pVCpu->cpum.GstCtx.gdtr.pGdt;
    4776     pVmcs->u32GuestGdtrLimit  = pVCpu->cpum.GstCtx.gdtr.cbGdt;
    4777 
    4778     /* IDTR. */
    4779     pVmcs->u64GuestIdtrBase.u = pVCpu->cpum.GstCtx.idtr.pIdt;
    4780     pVmcs->u32GuestIdtrLimit  = pVCpu->cpum.GstCtx.idtr.cbIdt;
    4781 }
    4782 
    4783 
    4784 /**
    4785  * Saves guest non-register state as part of VM-exit.
    4786  *
    4787  * @param   pVCpu           The cross context virtual CPU structure.
    4788  * @param   uExitReason     The VM-exit reason.
    4789  */
    4790 IEM_STATIC void iemVmxVmexitSaveGuestNonRegState(PVMCPU pVCpu, uint32_t uExitReason)
    4791 {
    4792     /*
    4793      * Save guest non-register state.
    4794      * See Intel spec. 27.3.4 "Saving Non-Register State".
    4795      */
    4796     PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    4797 
    4798     /*
    4799      * Activity-state: VM-exits occur before changing the activity state
    4800      * of the processor and hence we shouldn't need to change it.
    4801      */
    4802 
    4803     /* Interruptibility-state. */
    4804     pVmcs->u32GuestIntrState = 0;
    4805     if (pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
    4806     { /** @todo NSTVMX: Virtual-NMI blocking. */ }
    4807     else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
    4808         pVmcs->u32GuestIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI;
    4809 
    4810     if (   VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
    4811         && pVCpu->cpum.GstCtx.rip == EMGetInhibitInterruptsPC(pVCpu))
    4812     {
    4813         /** @todo NSTVMX: We can't distinguish between blocking-by-MovSS and blocking-by-STI
    4814          *        currently. */
    4815         pVmcs->u32GuestIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
    4816         VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
    4817     }
    4818     /* Nothing to do for SMI/enclave. We don't support enclaves or SMM yet. */
    4819 
    4820     /* Pending debug exceptions. */
    4821     if (    uExitReason != VMX_EXIT_INIT_SIGNAL
    4822         &&  uExitReason != VMX_EXIT_SMI
    4823         &&  uExitReason != VMX_EXIT_ERR_MACHINE_CHECK
    4824         && !HMVmxIsTrapLikeVmexit(uExitReason))
    4825     {
    4826         /** @todo NSTVMX: also must exclude VM-exits caused by debug exceptions when
    4827          *        block-by-MovSS is in effect. */
    4828         pVmcs->u64GuestPendingDbgXcpt.u = 0;
    4829     }
    4830 
    4831     /** @todo NSTVMX: Save VMX preemption timer value. */
    4832 
    4833     /* PDPTEs. */
    4834     /* We don't support EPT yet. */
    4835     Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT));
    4836     pVmcs->u64GuestPdpte0.u = 0;
    4837     pVmcs->u64GuestPdpte1.u = 0;
    4838     pVmcs->u64GuestPdpte2.u = 0;
    4839     pVmcs->u64GuestPdpte3.u = 0;
    4840 }
    4841 
    4842 
    4843 /**
    4844  * Saves the guest-state as part of VM-exit.
    4845  *
    4846  * @returns VBox status code.
    4847  * @param   pVCpu           The cross context virtual CPU structure.
    4848  * @param   uExitReason     The VM-exit reason.
    4849  */
    4850 IEM_STATIC void iemVmxVmexitSaveGuestState(PVMCPU pVCpu, uint32_t uExitReason)
    4851 {
    4852     PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    4853     Assert(pVmcs);
    4854 
    4855     /*
    4856      * Save guest control, debug, segment, descriptor-table registers and some MSRs.
    4857      */
    4858     iemVmxVmexitSaveGuestControlRegsMsrs(pVCpu);
    4859     iemVmxVmexitSaveGuestSegRegs(pVCpu);
    4860 
    4861     /*
    4862      * Save guest RIP, RSP and RFLAGS.
    4863      */
    4864     /* We don't support enclave mode yet. */
    4865     pVmcs->u64GuestRip.u    = pVCpu->cpum.GstCtx.rip;
    4866     pVmcs->u64GuestRsp.u    = pVCpu->cpum.GstCtx.rsp;
    4867     pVmcs->u64GuestRFlags.u = pVCpu->cpum.GstCtx.rflags.u;  /** @todo NSTVMX: Check RFLAGS.RF handling. */
    4868 
    4869     /* Save guest non-register state. */
    4870     iemVmxVmexitSaveGuestNonRegState(pVCpu, uExitReason);
    4871 }
    4872 
    4873 
    4874 /**
    4875  * Saves the guest MSRs into the VM-exit auto-store MSRs area as part of VM-exit.
    4876  *
    4877  * @returns VBox status code.
    4878  * @param   pVCpu           The cross context virtual CPU structure.
    4879  * @param   uExitReason     The VM-exit reason (for diagnostic purposes).
    4880  */
    4881 IEM_STATIC int iemVmxVmexitSaveGuestAutoMsrs(PVMCPU pVCpu, uint32_t uExitReason)
    4882 {
    4883     /*
    4884      * Save guest MSRs.
    4885      * See Intel spec. 27.4 "Saving MSRs".
    4886      */
    4887     PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    4888     const char *const pszFailure = "VMX-abort";
    4889 
    4890     /*
    4891      * The VM-exit MSR-store area address need not be a valid guest-physical address if the
    4892      * VM-exit MSR-store count is 0. If this is the case, bail early without reading it.
    4893      * See Intel spec. 24.7.2 "VM-Exit Controls for MSRs".
    4894      */
    4895     uint32_t const cMsrs = pVmcs->u32ExitMsrStoreCount;
    4896     if (!cMsrs)
    4897         return VINF_SUCCESS;
    4898 
    4899     /*
    4900      * Verify the MSR auto-store count. Physical CPUs can behave unpredictably if the count
    4901      * is exceeded including possibly raising #MC exceptions during VMX transition. Our
    4902      * implementation causes a VMX-abort followed by a triple-fault.
    4903      */
    4904     bool const fIsMsrCountValid = iemVmxIsAutoMsrCountValid(pVCpu, cMsrs);
    4905     if (fIsMsrCountValid)
    4906     { /* likely */ }
    4907     else
    4908         IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrStoreCount);
    4909 
    4910     PVMXAUTOMSR pMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea);
    4911     Assert(pMsr);
    4912     for (uint32_t idxMsr = 0; idxMsr < cMsrs; idxMsr++, pMsr++)
    4913     {
    4914         if (   !pMsr->u32Reserved
    4915             &&  pMsr->u32Msr >> 8 != MSR_IA32_X2APIC_START >> 8
    4916             &&  pMsr->u32Msr != MSR_IA32_SMBASE)
    4917         {
    4918             VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr(pVCpu, pMsr->u32Msr, &pMsr->u64Value);
    4919             if (rcStrict == VINF_SUCCESS)
    4920                 continue;
    4921 
    4922             /*
    4923              * If we're in ring-0, we cannot handle returns to ring-3 at this point and continue VM-exit.
    4924              * If any guest hypervisor loads MSRs that require ring-3 handling, we cause a VMX-abort
    4925              * recording the MSR index in the auxiliary info. field and indicated further by our
    4926              * own, specific diagnostic code. Later, we can try implement handling of the MSR in ring-0
    4927              * if possible, or come up with a better, generic solution.
    4928              */
    4929             pVCpu->cpum.GstCtx.hwvirt.vmx.uAbortAux = pMsr->u32Msr;
    4930             VMXVDIAG const enmDiag = rcStrict == VINF_CPUM_R3_MSR_READ
    4931                                    ? kVmxVDiag_Vmexit_MsrStoreRing3
    4932                                    : kVmxVDiag_Vmexit_MsrStore;
    4933             IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, enmDiag);
    4934         }
    4935         else
    4936         {
    4937             pVCpu->cpum.GstCtx.hwvirt.vmx.uAbortAux = pMsr->u32Msr;
    4938             IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrStoreRsvd);
    4939         }
    4940     }
    4941 
    4942     RTGCPHYS const GCPhysAutoMsrArea = pVmcs->u64AddrExitMsrStore.u;
    4943     int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysAutoMsrArea,
    4944                                       pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea), VMX_V_AUTOMSR_AREA_SIZE);
    4945     if (RT_SUCCESS(rc))
    4946     { /* likely */ }
    4947     else
    4948     {
    4949         AssertMsgFailed(("VM-exit: Failed to write MSR auto-store area at %#RGp, rc=%Rrc\n", GCPhysAutoMsrArea, rc));
    4950         IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrStorePtrWritePhys);
    4951     }
    4952 
    4953     NOREF(uExitReason);
    4954     NOREF(pszFailure);
    4955     return VINF_SUCCESS;
    4956 }
    4957 
    4958 
    4959 /**
    4960  * Performs a VMX abort (due to an fatal error during VM-exit).
    4961  *
    4962  * @returns VBox status code.
    4963  * @param   pVCpu       The cross context virtual CPU structure.
    4964  * @param   enmAbort    The VMX abort reason.
    4965  */
    4966 IEM_STATIC int iemVmxAbort(PVMCPU pVCpu, VMXABORT enmAbort)
    4967 {
    4968     /*
    4969      * Perform the VMX abort.
    4970      * See Intel spec. 27.7 "VMX Aborts".
    4971      */
    4972     LogFunc(("enmAbort=%u (%s) -> RESET\n", enmAbort, HMVmxGetAbortDesc(enmAbort)));
    4973 
    4974     /* We don't support SMX yet. */
    4975     pVCpu->cpum.GstCtx.hwvirt.vmx.enmAbort = enmAbort;
    4976     if (IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
    4977     {
    4978         RTGCPHYS const GCPhysVmcs  = IEM_VMX_GET_CURRENT_VMCS(pVCpu);
    4979         uint32_t const offVmxAbort = RT_OFFSETOF(VMXVVMCS, u32VmxAbortId);
    4980         PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVmcs + offVmxAbort, &enmAbort, sizeof(enmAbort));
    4981     }
    4982 
    4983     return VINF_EM_TRIPLE_FAULT;
    4984 }
    4985 
    4986 
    4987 /**
    4988  * Loads host control registers, debug registers and MSRs as part of VM-exit.
    4989  *
    4990  * @param   pVCpu   The cross context virtual CPU structure.
    4991  */
    4992 IEM_STATIC void iemVmxVmexitLoadHostControlRegsMsrs(PVMCPU pVCpu)
    4993 {
    4994     /*
    4995      * Load host control registers, debug registers and MSRs.
    4996      * See Intel spec. 27.5.1 "Loading Host Control Registers, Debug Registers, MSRs".
    4997      */
    4998     PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    4999     bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
    5000 
    5001     /* CR0. */
    5002     {
    5003         /* Bits 63:32, 28:19, 17, 15:6, ET, CD, NW and CR0 MB1 bits are not modified. */
    5004         uint64_t const uCr0Fixed0  = CPUMGetGuestIa32VmxCr0Fixed0(pVCpu);
    5005         uint64_t const fCr0IgnMask = UINT64_C(0xffffffff1ff8ffc0) | X86_CR0_ET | X86_CR0_CD | X86_CR0_NW | uCr0Fixed0;
    5006         uint64_t const uHostCr0    = pVmcs->u64HostCr0.u;
    5007         uint64_t const uGuestCr0   = pVCpu->cpum.GstCtx.cr0;
    5008         uint64_t const uValidCr0   = (uHostCr0 & ~fCr0IgnMask) | (uGuestCr0 & fCr0IgnMask);
    5009         CPUMSetGuestCR0(pVCpu, uValidCr0);
    5010     }
    5011 
    5012     /* CR4. */
    5013     {
    5014         /* CR4 MB1 bits are not modified. */
    5015         uint64_t const fCr4IgnMask = CPUMGetGuestIa32VmxCr4Fixed0(pVCpu);
    5016         uint64_t const uHostCr4    = pVmcs->u64HostCr4.u;
    5017         uint64_t const uGuestCr4   = pVCpu->cpum.GstCtx.cr4;
    5018         uint64_t       uValidCr4   = (uHostCr4 & ~fCr4IgnMask) | (uGuestCr4 & fCr4IgnMask);
    5019         if (fHostInLongMode)
    5020             uValidCr4 |= X86_CR4_PAE;
    5021         else
    5022             uValidCr4 &= ~X86_CR4_PCIDE;
    5023         CPUMSetGuestCR4(pVCpu, uValidCr4);
    5024     }
    5025 
    5026     /* CR3 (host value validated while checking host-state during VM-entry). */
    5027     pVCpu->cpum.GstCtx.cr3 = pVmcs->u64HostCr3.u;
    5028 
    5029     /* DR7. */
    5030     pVCpu->cpum.GstCtx.dr[7] = X86_DR7_INIT_VAL;
    5031 
    5032     /** @todo NSTVMX: Support IA32_DEBUGCTL MSR */
    5033 
    5034     /* Save SYSENTER CS, ESP, EIP (host value validated while checking host-state during VM-entry). */
    5035     pVCpu->cpum.GstCtx.SysEnter.eip = pVmcs->u64HostSysenterEip.u;
    5036     pVCpu->cpum.GstCtx.SysEnter.esp = pVmcs->u64HostSysenterEsp.u;
    5037     pVCpu->cpum.GstCtx.SysEnter.cs  = pVmcs->u32HostSysenterCs;
    5038 
    5039     /* FS, GS bases are loaded later while we load host segment registers. */
    5040 
    5041     /* EFER MSR (host value validated while checking host-state during VM-entry). */
    5042     if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_EFER_MSR)
    5043         pVCpu->cpum.GstCtx.msrEFER = pVmcs->u64HostEferMsr.u;
    5044     else if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
    5045     {
    5046         if (fHostInLongMode)
    5047             pVCpu->cpum.GstCtx.msrEFER |=  (MSR_K6_EFER_LMA | MSR_K6_EFER_LME);
    5048         else
    5049             pVCpu->cpum.GstCtx.msrEFER &= ~(MSR_K6_EFER_LMA | MSR_K6_EFER_LME);
    5050     }
    5051 
    5052     /* We don't support IA32_PERF_GLOBAL_CTRL MSR yet. */
    5053 
    5054     /* PAT MSR (host value is validated while checking host-state during VM-entry). */
    5055     if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_PAT_MSR)
    5056         pVCpu->cpum.GstCtx.msrPAT = pVmcs->u64HostPatMsr.u;
    5057 
    5058     /* We don't support IA32_BNDCFGS MSR yet. */
    5059 }
    5060 
    5061 
    5062 /**
    5063  * Loads host segment registers, GDTR, IDTR, LDTR and TR as part of VM-exit.
    5064  *
    5065  * @param   pVCpu   The cross context virtual CPU structure.
    5066  */
    5067 IEM_STATIC void iemVmxVmexitLoadHostSegRegs(PVMCPU pVCpu)
    5068 {
    5069     /*
    5070      * Load host segment registers, GDTR, IDTR, LDTR and TR.
    5071      * See Intel spec. 27.5.2 "Loading Host Segment and Descriptor-Table Registers".
    5072      *
    5073      * Warning! Be careful to not touch fields that are reserved by VT-x,
    5074      * e.g. segment limit high bits stored in segment attributes (in bits 11:8).
    5075      */
    5076     PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    5077     bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
    5078 
    5079     /* CS, SS, ES, DS, FS, GS. */
    5080     for (unsigned iSegReg = 0; iSegReg < X86_SREG_COUNT; iSegReg++)
    5081     {
    5082         RTSEL const HostSel  = iemVmxVmcsGetHostSelReg(pVmcs, iSegReg);
    5083         bool const  fUnusable = RT_BOOL(HostSel == 0);
    5084 
    5085         /* Selector. */
    5086         pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel      = HostSel;
    5087         pVCpu->cpum.GstCtx.aSRegs[iSegReg].ValidSel = HostSel;
    5088         pVCpu->cpum.GstCtx.aSRegs[iSegReg].fFlags   = CPUMSELREG_FLAGS_VALID;
    5089 
    5090         /* Limit. */
    5091         pVCpu->cpum.GstCtx.aSRegs[iSegReg].u32Limit = 0xffffffff;
    5092 
    5093         /* Base and Attributes. */
    5094         switch (iSegReg)
    5095         {
    5096             case X86_SREG_CS:
    5097             {
    5098                 pVCpu->cpum.GstCtx.cs.u64Base = 0;
    5099                 pVCpu->cpum.GstCtx.cs.Attr.n.u4Type        = X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ | X86_SEL_TYPE_ACCESSED;
    5100                 pVCpu->cpum.GstCtx.ss.Attr.n.u1DescType    = 1;
    5101                 pVCpu->cpum.GstCtx.cs.Attr.n.u2Dpl         = 0;
    5102                 pVCpu->cpum.GstCtx.cs.Attr.n.u1Present     = 1;
    5103                 pVCpu->cpum.GstCtx.cs.Attr.n.u1Long        = fHostInLongMode;
    5104                 pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig      = !fHostInLongMode;
    5105                 pVCpu->cpum.GstCtx.cs.Attr.n.u1Granularity = 1;
    5106                 Assert(!pVCpu->cpum.GstCtx.cs.Attr.n.u1Unusable);
    5107                 Assert(!fUnusable);
    5108                 break;
    5109             }
    5110 
    5111             case X86_SREG_SS:
    5112             case X86_SREG_ES:
    5113             case X86_SREG_DS:
    5114             {
    5115                 pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base = 0;
    5116                 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u4Type        = X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED;
    5117                 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u1DescType    = 1;
    5118                 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u2Dpl         = 0;
    5119                 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u1Present     = 1;
    5120                 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u1DefBig      = 1;
    5121                 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u1Granularity = 1;
    5122                 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u1Unusable    = fUnusable;
    5123                 break;
    5124             }
    5125 
    5126             case X86_SREG_FS:
    5127             {
    5128                 Assert(X86_IS_CANONICAL(pVmcs->u64HostFsBase.u));
    5129                 pVCpu->cpum.GstCtx.fs.u64Base = !fUnusable ? pVmcs->u64HostFsBase.u : 0;
    5130                 pVCpu->cpum.GstCtx.fs.Attr.n.u4Type        = X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED;
    5131                 pVCpu->cpum.GstCtx.fs.Attr.n.u1DescType    = 1;
    5132                 pVCpu->cpum.GstCtx.fs.Attr.n.u2Dpl         = 0;
    5133                 pVCpu->cpum.GstCtx.fs.Attr.n.u1Present     = 1;
    5134                 pVCpu->cpum.GstCtx.fs.Attr.n.u1DefBig      = 1;
    5135                 pVCpu->cpum.GstCtx.fs.Attr.n.u1Granularity = 1;
    5136                 pVCpu->cpum.GstCtx.fs.Attr.n.u1Unusable    = fUnusable;
    5137                 break;
    5138             }
    5139 
    5140             case X86_SREG_GS:
    5141             {
    5142                 Assert(X86_IS_CANONICAL(pVmcs->u64HostGsBase.u));
    5143                 pVCpu->cpum.GstCtx.gs.u64Base = !fUnusable ? pVmcs->u64HostGsBase.u : 0;
    5144                 pVCpu->cpum.GstCtx.gs.Attr.n.u4Type        = X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED;
    5145                 pVCpu->cpum.GstCtx.gs.Attr.n.u1DescType    = 1;
    5146                 pVCpu->cpum.GstCtx.gs.Attr.n.u2Dpl         = 0;
    5147                 pVCpu->cpum.GstCtx.gs.Attr.n.u1Present     = 1;
    5148                 pVCpu->cpum.GstCtx.gs.Attr.n.u1DefBig      = 1;
    5149                 pVCpu->cpum.GstCtx.gs.Attr.n.u1Granularity = 1;
    5150                 pVCpu->cpum.GstCtx.gs.Attr.n.u1Unusable    = fUnusable;
    5151                 break;
    5152             }
    5153         }
    5154     }
    5155 
    5156     /* TR. */
    5157     Assert(X86_IS_CANONICAL(pVmcs->u64HostTrBase.u));
    5158     Assert(!pVCpu->cpum.GstCtx.tr.Attr.n.u1Unusable);
    5159     pVCpu->cpum.GstCtx.tr.Sel                  = pVmcs->HostTr;
    5160     pVCpu->cpum.GstCtx.tr.ValidSel             = pVmcs->HostTr;
    5161     pVCpu->cpum.GstCtx.tr.fFlags               = CPUMSELREG_FLAGS_VALID;
    5162     pVCpu->cpum.GstCtx.tr.u32Limit             = X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN;
    5163     pVCpu->cpum.GstCtx.tr.u64Base              = pVmcs->u64HostTrBase.u;
    5164     pVCpu->cpum.GstCtx.tr.Attr.n.u4Type        = X86_SEL_TYPE_SYS_386_TSS_BUSY;
    5165     pVCpu->cpum.GstCtx.tr.Attr.n.u1DescType    = 0;
    5166     pVCpu->cpum.GstCtx.tr.Attr.n.u2Dpl         = 0;
    5167     pVCpu->cpum.GstCtx.tr.Attr.n.u1Present     = 1;
    5168     pVCpu->cpum.GstCtx.tr.Attr.n.u1DefBig      = 0;
    5169     pVCpu->cpum.GstCtx.tr.Attr.n.u1Granularity = 0;
    5170 
    5171     /* LDTR. */
    5172     pVCpu->cpum.GstCtx.ldtr.Sel               = 0;
    5173     pVCpu->cpum.GstCtx.ldtr.ValidSel          = 0;
    5174     pVCpu->cpum.GstCtx.ldtr.fFlags            = CPUMSELREG_FLAGS_VALID;
    5175     pVCpu->cpum.GstCtx.ldtr.u32Limit          = 0;
    5176     pVCpu->cpum.GstCtx.ldtr.u64Base           = 0;
    5177     pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Unusable = 1;
    5178 
    5179     /* GDTR. */
    5180     Assert(X86_IS_CANONICAL(pVmcs->u64HostGdtrBase.u));
    5181     pVCpu->cpum.GstCtx.gdtr.pGdt  = pVmcs->u64HostGdtrBase.u;
    5182     pVCpu->cpum.GstCtx.gdtr.cbGdt = 0xfff;
    5183 
    5184     /* IDTR.*/
    5185     Assert(X86_IS_CANONICAL(pVmcs->u64HostIdtrBase.u));
    5186     pVCpu->cpum.GstCtx.idtr.pIdt  = pVmcs->u64HostIdtrBase.u;
    5187     pVCpu->cpum.GstCtx.idtr.cbIdt = 0xfff;
    5188 }
    5189 
    5190 
    5191 /**
    5192  * Checks host PDPTes as part of VM-exit.
    5193  *
    5194  * @param   pVCpu           The cross context virtual CPU structure.
    5195  * @param   uExitReason     The VM-exit reason (for logging purposes).
    5196  */
    5197 IEM_STATIC int iemVmxVmexitCheckHostPdptes(PVMCPU pVCpu, uint32_t uExitReason)
    5198 {
    5199     /*
    5200      * Check host PDPTEs.
    5201      * See Intel spec. 27.5.4 "Checking and Loading Host Page-Directory-Pointer-Table Entries".
    5202      */
    5203     PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    5204     const char *const pszFailure = "VMX-abort";
    5205     bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
    5206 
    5207     if (   (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
    5208         && !fHostInLongMode)
    5209     {
    5210         uint64_t const uHostCr3 = pVCpu->cpum.GstCtx.cr3 & X86_CR3_PAE_PAGE_MASK;
    5211         X86PDPE aPdptes[X86_PG_PAE_PDPE_ENTRIES];
    5212         int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)&aPdptes[0], uHostCr3, sizeof(aPdptes));
    5213         if (RT_SUCCESS(rc))
    5214         {
    5215             for (unsigned iPdpte = 0; iPdpte < RT_ELEMENTS(aPdptes); iPdpte++)
    5216             {
    5217                 if (   !(aPdptes[iPdpte].u & X86_PDPE_P)
    5218                     || !(aPdptes[iPdpte].u & X86_PDPE_PAE_MBZ_MASK))
    5219                 { /* likely */ }
    5220                 else
    5221                 {
    5222                     VMXVDIAG const enmDiag = iemVmxGetDiagVmexitPdpteRsvd(iPdpte);
    5223                     IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, enmDiag);
    5224                 }
    5225             }
    5226         }
    5227         else
    5228             IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_HostPdpteCr3ReadPhys);
    5229     }
    5230 
    5231     NOREF(pszFailure);
    5232     NOREF(uExitReason);
    5233     return VINF_SUCCESS;
    5234 }
    5235 
    5236 
    5237 /**
    5238  * Loads the host MSRs from the VM-exit auto-load MSRs area as part of VM-exit.
    5239  *
    5240  * @returns VBox status code.
    5241  * @param   pVCpu       The cross context virtual CPU structure.
    5242  * @param   pszInstr    The VMX instruction name (for logging purposes).
    5243  */
    5244 IEM_STATIC int iemVmxVmexitLoadHostAutoMsrs(PVMCPU pVCpu, uint32_t uExitReason)
    5245 {
    5246     /*
    5247      * Load host MSRs.
    5248      * See Intel spec. 27.6 "Loading MSRs".
    5249      */
    5250     PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    5251     const char *const pszFailure = "VMX-abort";
    5252 
    5253     /*
    5254      * The VM-exit MSR-load area address need not be a valid guest-physical address if the
    5255      * VM-exit MSR load count is 0. If this is the case, bail early without reading it.
    5256      * See Intel spec. 24.7.2 "VM-Exit Controls for MSRs".
    5257      */
    5258     uint32_t const cMsrs = pVmcs->u32ExitMsrLoadCount;
    5259     if (!cMsrs)
    5260         return VINF_SUCCESS;
    5261 
    5262     /*
    5263      * Verify the MSR auto-load count. Physical CPUs can behave unpredictably if the count
    5264      * is exceeded including possibly raising #MC exceptions during VMX transition. Our
    5265      * implementation causes a VMX-abort followed by a triple-fault.
    5266      */
    5267     bool const fIsMsrCountValid = iemVmxIsAutoMsrCountValid(pVCpu, cMsrs);
    5268     if (fIsMsrCountValid)
    5269     { /* likely */ }
    5270     else
    5271         IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrLoadCount);
    5272 
    5273     RTGCPHYS const GCPhysAutoMsrArea = pVmcs->u64AddrExitMsrLoad.u;
    5274     int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)&pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea),
    5275                                      GCPhysAutoMsrArea, VMX_V_AUTOMSR_AREA_SIZE);
    5276     if (RT_SUCCESS(rc))
    5277     {
    5278         PCVMXAUTOMSR pMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea);
    5279         Assert(pMsr);
    5280         for (uint32_t idxMsr = 0; idxMsr < cMsrs; idxMsr++, pMsr++)
    5281         {
    5282             if (   !pMsr->u32Reserved
    5283                 &&  pMsr->u32Msr != MSR_K8_FS_BASE
    5284                 &&  pMsr->u32Msr != MSR_K8_GS_BASE
    5285                 &&  pMsr->u32Msr != MSR_K6_EFER
    5286                 &&  pMsr->u32Msr >> 8 != MSR_IA32_X2APIC_START >> 8
    5287                 &&  pMsr->u32Msr != MSR_IA32_SMM_MONITOR_CTL)
    5288             {
    5289                 VBOXSTRICTRC rcStrict = CPUMSetGuestMsr(pVCpu, pMsr->u32Msr, pMsr->u64Value);
    5290                 if (rcStrict == VINF_SUCCESS)
    5291                     continue;
    5292 
    5293                 /*
    5294                  * If we're in ring-0, we cannot handle returns to ring-3 at this point and continue VM-exit.
    5295                  * If any guest hypervisor loads MSRs that require ring-3 handling, we cause a VMX-abort
    5296                  * recording the MSR index in the auxiliary info. field and indicated further by our
    5297                  * own, specific diagnostic code. Later, we can try implement handling of the MSR in ring-0
    5298                  * if possible, or come up with a better, generic solution.
    5299                  */
    5300                 pVCpu->cpum.GstCtx.hwvirt.vmx.uAbortAux = pMsr->u32Msr;
    5301                 VMXVDIAG const enmDiag = rcStrict == VINF_CPUM_R3_MSR_WRITE
    5302                                        ? kVmxVDiag_Vmexit_MsrLoadRing3
    5303                                        : kVmxVDiag_Vmexit_MsrLoad;
    5304                 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, enmDiag);
    5305             }
    5306             else
    5307                 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrLoadRsvd);
    5308         }
    5309     }
    5310     else
    5311     {
    5312         AssertMsgFailed(("VM-exit: Failed to read MSR auto-load area at %#RGp, rc=%Rrc\n", GCPhysAutoMsrArea, rc));
    5313         IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrLoadPtrReadPhys);
    5314     }
    5315 
    5316     NOREF(uExitReason);
    5317     NOREF(pszFailure);
    5318     return VINF_SUCCESS;
    5319 }
    5320 
    5321 
    5322 /**
    5323  * Loads the host state as part of VM-exit.
    5324  *
    5325  * @returns VBox status code.
    5326  * @param   pVCpu           The cross context virtual CPU structure.
    5327  * @param   uExitReason     The VM-exit reason (for logging purposes).
    5328  */
    5329 IEM_STATIC int iemVmxVmexitLoadHostState(PVMCPU pVCpu, uint32_t uExitReason)
    5330 {
    5331     /*
    5332      * Load host state.
    5333      * See Intel spec. 27.5 "Loading Host State".
    5334      */
    5335     PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    5336     bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
    5337 
    5338     /* We cannot return from a long-mode guest to a host that is not in long mode. */
    5339     if (    CPUMIsGuestInLongMode(pVCpu)
    5340         && !fHostInLongMode)
    5341     {
    5342         Log(("VM-exit from long-mode guest to host not in long-mode -> VMX-Abort\n"));
    5343         return iemVmxAbort(pVCpu, VMXABORT_HOST_NOT_IN_LONG_MODE);
    5344     }
    5345 
    5346     /*
    5347      * Load host control, debug, segment, descriptor-table registers and some MSRs.
    5348      */
    5349     iemVmxVmexitLoadHostControlRegsMsrs(pVCpu);
    5350     iemVmxVmexitLoadHostSegRegs(pVCpu);
    5351 
    5352     /*
    5353      * Load host RIP, RSP and RFLAGS.
    5354      * See Intel spec. 27.5.3 "Loading Host RIP, RSP and RFLAGS"
    5355      */
    5356     pVCpu->cpum.GstCtx.rip      = pVmcs->u64HostRip.u;
    5357     pVCpu->cpum.GstCtx.rsp      = pVmcs->u64HostRsp.u;
    5358     pVCpu->cpum.GstCtx.rflags.u = X86_EFL_1;
    5359 
    5360     /* Update non-register state. */
    5361     iemVmxVmexitRestoreForceFlags(pVCpu);
    5362 
    5363     /* Clear address range monitoring. */
    5364     EMMonitorWaitClear(pVCpu);
    5365 
    5366     /* Perform the VMX transition (PGM updates). */
    5367     VBOXSTRICTRC rcStrict = iemVmxWorldSwitch(pVCpu);
    5368     if (rcStrict == VINF_SUCCESS)
    5369     {
    5370         /* Check host PDPTEs. */
    5371         /** @todo r=ramshankar: I don't know if PGM does this for us already or not... */
    5372         int rc = iemVmxVmexitCheckHostPdptes(pVCpu, uExitReason);
    5373         if (RT_FAILURE(rc))
    5374         {
    5375             Log(("VM-exit failed while restoring host PDPTEs -> VMX-Abort\n"));
    5376             return iemVmxAbort(pVCpu, VMXBOART_HOST_PDPTE);
    5377         }
    5378     }
    5379     else if (RT_SUCCESS(rcStrict))
    5380     {
    5381         Log3(("VM-exit: iemVmxWorldSwitch returns %Rrc (uExitReason=%u) -> Setting passup status\n", VBOXSTRICTRC_VAL(rcStrict),
    5382               uExitReason));
    5383         rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
    5384     }
    5385     else
    5386     {
    5387         Log3(("VM-exit: iemVmxWorldSwitch failed! rc=%Rrc (uExitReason=%u)\n", VBOXSTRICTRC_VAL(rcStrict), uExitReason));
    5388         return rcStrict;
    5389     }
    5390 
    5391     Assert(rcStrict == VINF_SUCCESS);
    5392 
    5393     /* Load MSRs from the VM-exit auto-load MSR area. */
    5394     int rc = iemVmxVmexitLoadHostAutoMsrs(pVCpu, uExitReason);
    5395     if (RT_FAILURE(rc))
    5396     {
    5397         Log(("VM-exit failed while loading host MSRs -> VMX-Abort\n"));
    5398         return iemVmxAbort(pVCpu, VMXABORT_LOAD_HOST_MSR);
    5399     }
    5400 
    5401     return VINF_SUCCESS;
    5402 }
    5403 
    5404 
    5405 /**
    5406  * VMX VM-exit handler.
    5407  *
    5408  * @returns Strict VBox status code.
    5409  * @param   pVCpu           The cross context virtual CPU structure.
    5410  * @param   uExitReason     The VM-exit reason.
    5411  */
    5412 IEM_STATIC VBOXSTRICTRC iemVmxVmexit(PVMCPU pVCpu, uint32_t uExitReason)
    5413 {
    5414     PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    5415     Assert(pVmcs);
    5416 
    5417     pVmcs->u32RoExitReason   = uExitReason;
    5418 
    5419     /** @todo NSTVMX: Update VM-exit instruction length for instruction VM-exits. */
    5420     /** @todo NSTVMX: IEMGetCurrentXcpt will be VM-exit interruption info. */
    5421     /** @todo NSTVMX: The source event should be recorded in IDT-vectoring info
    5422      *        during injection. */
    5423 
    5424     /*
    5425      * Save the guest state back into the VMCS.
    5426      * We only need to save the state when the VM-entry was successful.
    5427      */
    5428     if (   uExitReason != VMX_EXIT_ERR_INVALID_GUEST_STATE
    5429         && uExitReason != VMX_EXIT_ERR_MSR_LOAD
    5430         && uExitReason != VMX_EXIT_ERR_MACHINE_CHECK)
    5431     {
    5432         iemVmxVmexitSaveGuestState(pVCpu, uExitReason);
    5433         int rc = iemVmxVmexitSaveGuestAutoMsrs(pVCpu, uExitReason);
    5434         if (RT_SUCCESS(rc))
    5435         { /* likely */ }
    5436         else
    5437             return iemVmxAbort(pVCpu, VMXABORT_SAVE_GUEST_MSRS);
    5438     }
    5439 
    5440     int rc = iemVmxVmexitLoadHostState(pVCpu, uExitReason);
    5441     if (RT_FAILURE(rc))
    5442         return rc;
    5443 
    5444     /** @todo NSTVMX: rest of VM-exit. */
    5445 
    5446     /* We're no longer in nested-guest execution mode. */
    5447     pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode = false;
    5448 
    5449     return VINF_SUCCESS;
    5450 }
    54515450
    54525451/**
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette