VirtualBox

Changeset 81206 in vbox for trunk/src/VBox/VMM


Ignore:
Timestamp:
Oct 10, 2019 10:58:18 AM (5 years ago)
Author:
vboxsync
Message:

VMM/HMVMXR0: Nested VMX: bugref:9180 Streamline exporting state a bit when executing nested-guests. Also gathers heuristics on optimizing nested-guest VMLAUNCH/VMRESUME emulation.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r81153 r81206  
    48494849 * Exports the guest APIC TPR state into the VMCS.
    48504850 *
    4851  * @returns VBox status code.
    48524851 * @param   pVCpu           The cross context virtual CPU structure.
    48534852 * @param   pVmxTransient   The VMX-transient structure.
     
    48554854 * @remarks No-long-jump zone!!!
    48564855 */
    4857 static int hmR0VmxExportGuestApicTpr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
     4856static void hmR0VmxExportGuestApicTpr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    48584857{
    48594858    if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_APIC_TPR)
     
    48764875                    uint8_t u8PendingIntr = 0;
    48774876                    int rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);
    4878                     AssertRCReturn(rc, rc);
     4877                    AssertRC(rc);
    48794878
    48804879                    /*
     
    49034902        ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_APIC_TPR);
    49044903    }
    4905     return VINF_SUCCESS;
    49064904}
    49074905
     
    49674965 * Exports the exception intercepts required for guest execution in the VMCS.
    49684966 *
    4969  * @returns VBox status code.
    49704967 * @param   pVCpu           The cross context virtual CPU structure.
    49714968 * @param   pVmxTransient   The VMX-transient structure.
     
    49734970 * @remarks No-long-jump zone!!!
    49744971 */
    4975 static int hmR0VmxExportGuestXcptIntercepts(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
     4972static void hmR0VmxExportGuestXcptIntercepts(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    49764973{
    49774974    if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_XCPT_INTERCEPTS)
     
    49874984        ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_VMX_XCPT_INTERCEPTS);
    49884985    }
    4989     return VINF_SUCCESS;
    49904986}
    49914987
     
    49944990 * Exports the guest's RIP into the guest-state area in the VMCS.
    49954991 *
    4996  * @returns VBox status code.
    49974992 * @param   pVCpu   The cross context virtual CPU structure.
    49984993 *
    49994994 * @remarks No-long-jump zone!!!
    50004995 */
    5001 static int hmR0VmxExportGuestRip(PVMCPUCC pVCpu)
     4996static void hmR0VmxExportGuestRip(PVMCPUCC pVCpu)
    50024997{
    50034998    if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_RIP)
     
    50115006        Log4Func(("rip=%#RX64\n", pVCpu->cpum.GstCtx.rip));
    50125007    }
    5013     return VINF_SUCCESS;
    50145008}
    50155009
     
    50185012 * Exports the guest's RSP into the guest-state area in the VMCS.
    50195013 *
    5020  * @returns VBox status code.
    50215014 * @param   pVCpu   The cross context virtual CPU structure.
    50225015 *
    50235016 * @remarks No-long-jump zone!!!
    50245017 */
    5025 static int hmR0VmxExportGuestRsp(PVMCPUCC pVCpu)
     5018static void hmR0VmxExportGuestRsp(PVMCPUCC pVCpu)
    50265019{
    50275020    if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_RSP)
     
    50355028        Log4Func(("rsp=%#RX64\n", pVCpu->cpum.GstCtx.rsp));
    50365029    }
    5037     return VINF_SUCCESS;
    50385030}
    50395031
     
    50425034 * Exports the guest's RFLAGS into the guest-state area in the VMCS.
    50435035 *
    5044  * @returns VBox status code.
    50455036 * @param   pVCpu           The cross context virtual CPU structure.
    50465037 * @param   pVmxTransient   The VMX-transient structure.
     
    50485039 * @remarks No-long-jump zone!!!
    50495040 */
    5050 static int hmR0VmxExportGuestRflags(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
     5041static void hmR0VmxExportGuestRflags(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
    50515042{
    50525043    if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_RFLAGS)
     
    51005091        Log4Func(("eflags=%#RX32\n", fEFlags.u32));
    51015092    }
    5102     return VINF_SUCCESS;
    51035093}
    51045094
     
    90879077    AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
    90889078
    9089     rc = hmR0VmxExportGuestApicTpr(pVCpu, pVmxTransient);
    9090     AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
    9091 
    9092     rc = hmR0VmxExportGuestXcptIntercepts(pVCpu, pVmxTransient);
    9093     AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
    9094 
    9095     rc  = hmR0VmxExportGuestRip(pVCpu);
    9096     rc |= hmR0VmxExportGuestRsp(pVCpu);
    9097     rc |= hmR0VmxExportGuestRflags(pVCpu, pVmxTransient);
    9098     AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
     9079    hmR0VmxExportGuestApicTpr(pVCpu, pVmxTransient);
     9080    hmR0VmxExportGuestXcptIntercepts(pVCpu, pVmxTransient);
     9081    hmR0VmxExportGuestRip(pVCpu);
     9082    hmR0VmxExportGuestRsp(pVCpu);
     9083    hmR0VmxExportGuestRflags(pVCpu, pVmxTransient);
    90999084
    91009085    rc = hmR0VmxExportGuestHwvirtState(pVCpu, pVmxTransient);
     
    91419126        /* Loading shared debug bits might have changed eflags.TF bit for debugging purposes. */
    91429127        if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_RFLAGS)
    9143         {
    9144             rc = hmR0VmxExportGuestRflags(pVCpu, pVmxTransient);
    9145             AssertRC(rc);
    9146         }
     9128            hmR0VmxExportGuestRflags(pVCpu, pVmxTransient);
    91479129    }
    91489130
     
    91829164
    91839165    /*
    9184      * For many exits it's only RIP that changes and hence try to export it first
     9166     * For many exits it's only RIP/RSP/RFLAGS that changes and hence try to export it first
    91859167     * without going through a lot of change flag checks.
    91869168     */
    9187     VBOXSTRICTRC rcStrict;
    9188     uint64_t     fCtxChanged = ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged);
    9189     RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
    9190     if ((fCtxChanged & (HM_CHANGED_ALL_GUEST & ~HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE)) == HM_CHANGED_GUEST_RIP)
    9191     {
    9192         rcStrict = hmR0VmxExportGuestRip(pVCpu);
    9193         if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    9194         { /* likely */}
    9195         else
    9196             AssertMsgFailedReturn(("Failed to export guest RIP! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)), rcStrict);
     9169    VBOXSTRICTRC   rcStrict;
     9170    uint64_t const fCtxMask     = HM_CHANGED_ALL_GUEST & ~HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE;
     9171    uint64_t const fMinimalMask = HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT;
     9172    uint64_t const fCtxChanged  = ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged);
     9173
     9174    /* If only RIP/RSP/RFLAGS/HWVIRT changed, export only those (quicker, happens more often).*/
     9175    if (    (fCtxChanged & fMinimalMask)
     9176        && !(fCtxChanged & (fCtxMask & ~fMinimalMask)))
     9177    {
     9178        hmR0VmxExportGuestRip(pVCpu);
     9179        hmR0VmxExportGuestRsp(pVCpu);
     9180        hmR0VmxExportGuestRflags(pVCpu, pVmxTransient);
     9181        rcStrict = hmR0VmxExportGuestHwvirtState(pVCpu, pVmxTransient);
    91979182        STAM_COUNTER_INC(&pVCpu->hm.s.StatExportMinimal);
    91989183    }
    9199     else if (fCtxChanged & (HM_CHANGED_ALL_GUEST & ~HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE))
     9184    /* If anything else also changed, go through the full export routine and export as required. */
     9185    else if (fCtxChanged & fCtxMask)
    92009186    {
    92019187        rcStrict = hmR0VmxExportGuestState(pVCpu, pVmxTransient);
     
    92119197        STAM_COUNTER_INC(&pVCpu->hm.s.StatExportFull);
    92129198    }
    9213     else
    9214         rcStrict = VINF_SUCCESS;
     9199    /* else: Nothing changed, nothing to load here. */
    92159200
    92169201#ifdef VBOX_STRICT
    92179202    /* All the guest state bits should be loaded except maybe the host context and/or the shared host/guest bits. */
    92189203    fCtxChanged = ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged);
    9219     RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
    92209204    AssertMsg(!(fCtxChanged & (HM_CHANGED_ALL_GUEST & ~HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE)),
    92219205              ("fCtxChanged=%#RX64\n", fCtxChanged));
     
    1484214826    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdmsr);
    1484314827    if (rcStrict == VINF_SUCCESS)
    14844         ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
    14845                                                  | HM_CHANGED_GUEST_RAX | HM_CHANGED_GUEST_RDX);
     14828        ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
    1484614829    else if (rcStrict == VINF_IEM_RAISED_XCPT)
    1484714830    {
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette