VirtualBox

Changeset 71918 in vbox for trunk/src/VBox/VMM


Ignore:
Timestamp:
Apr 19, 2018 11:06:54 AM (7 years ago)
Author:
vboxsync
Message:

VMM/HMSVMR0: Nested hw.virt: Clean up and unify hmR0SvmPostRunGuest and hmR0SvmPostRunGuestNested.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r71915 r71918  
    178178 *
    179179 * SMIs can and do happen in normal operation. We need not intercept them
    180  * while executing the guest or nested-guest.
     180 * while executing the guest (or nested-guest).
    181181 */
    182182#define HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS           (  SVM_CTRL_INTERCEPT_INTR          \
     
    269269    uint8_t         abAlignment0[7];
    270270
     271    /** Pointer to the currently executing VMCB. */
     272    PSVMVMCB        pVmcb;
     273    /** Whether we are currently executing a nested-guest. */
     274    bool            fIsNestedGuest;
     275
    271276    /** Whether the guest debug state was active at the time of \#VMEXIT. */
    272277    bool            fWasGuestDebugStateActive;
     
    284289    bool            fVectoringPF;
    285290} SVMTRANSIENT, *PSVMTRANSIENT;
    286 AssertCompileMemberAlignment(SVMTRANSIENT, u64ExitCode,               sizeof(uint64_t));
    287 AssertCompileMemberAlignment(SVMTRANSIENT, fWasGuestDebugStateActive, sizeof(uint64_t));
     291AssertCompileMemberAlignment(SVMTRANSIENT, u64ExitCode, sizeof(uint64_t));
     292AssertCompileMemberAlignment(SVMTRANSIENT, pVmcb,      sizeof(uint64_t));
    288293/** @}  */
    289294
     
    10741079
    10751080/**
    1076  * Gets a pointer to the currently active guest or nested-guest VMCB.
     1081 * Gets a pointer to the currently active guest (or nested-guest) VMCB.
    10771082 *
    10781083 * @returns Pointer to the current context VMCB.
     
    26672672
    26682673/**
    2669  * Loads the state shared between the host and guest or nested-guest into the
     2674 * Loads the state shared between the host and guest (or nested-guest) into the
    26702675 * VMCB.
    26712676 *
     
    27192724 * @returns VBox status code.
    27202725 * @param   pVCpu           The cross context virtual CPU structure.
    2721  * @param   pMixedCtx       Pointer to the guest-CPU context. The data may be
    2722  *                          out-of-sync. Make sure to update the required fields
    2723  *                          before using them.
     2726 * @param   pMixedCtx       Pointer to the guest-CPU or nested-guest-CPU
     2727 *                          context. The data may be out-of-sync. Make sure to
     2728 *                          update the required fields before using them.
    27242729 * @param   pVmcb           Pointer to the VM control block.
    27252730 */
     
    27332738    pMixedCtx->rax        = pVmcb->guest.u64RAX;
    27342739
     2740    PCSVMVMCBCTRL pVmcbCtrl = &pVmcb->ctrl;
    27352741#ifdef VBOX_WITH_NESTED_HWVIRT
    27362742    if (!CPUMIsGuestInSvmNestedHwVirtMode(pMixedCtx))
    27372743    {
    2738         if (pVmcb->ctrl.IntCtrl.n.u1VGifEnable)
     2744        if (pVmcbCtrl->IntCtrl.n.u1VGifEnable)
    27392745        {
    27402746            /*
     
    27432749             */
    27442750            Assert(pVCpu->CTX_SUFF(pVM)->hm.s.svm.fVGif);
    2745             pMixedCtx->hwvirt.fGif = pVmcb->ctrl.IntCtrl.n.u1VGif;
     2751            pMixedCtx->hwvirt.fGif = pVmcbCtrl->IntCtrl.n.u1VGif;
    27462752        }
    27472753    }
     
    27492755    {
    27502756        /* Sync/verify nested-guest's V_IRQ pending and our force-flag. */
    2751         if (!pVmcb->ctrl.IntCtrl.n.u1VIrqPending)
     2757        if (!pVmcbCtrl->IntCtrl.n.u1VIrqPending)
    27522758        {
    27532759            if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST))
     
    27622768     * Guest interrupt shadow.
    27632769     */
    2764     if (pVmcb->ctrl.IntShadow.n.u1IntShadow)
     2770    if (pVmcbCtrl->IntShadow.n.u1IntShadow)
    27652771        EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip);
    27662772    else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
     
    27682774
    27692775    /*
    2770      * Guest Control registers: CR0, CR2, CR3 (handled at the end) - accesses to other control registers are always intercepted.
     2776     * Guest control registers: CR0, CR2, CR3 (handled at the end).
     2777     * Accesses to other control registers are always intercepted.
    27712778     */
    2772     pMixedCtx->cr2        = pVmcb->guest.u64CR2;
     2779    pMixedCtx->cr2 = pVmcb->guest.u64CR2;
    27732780
    27742781    /* If we're not intercepting changes to CR0 TS & MP bits, sync those bits here. */
    2775     if (!(pVmcb->ctrl.u16InterceptWrCRx & RT_BIT(0)))
     2782    if (!(pVmcbCtrl->u16InterceptWrCRx & RT_BIT(0)))
    27762783    {
    27772784        pMixedCtx->cr0 = (pMixedCtx->cr0      & ~(X86_CR0_TS | X86_CR0_MP))
     
    28802887     * This is done as the very last step of syncing the guest state, as PGMUpdateCR3() may cause longjmp's to ring-3.
    28812888     */
    2882     if (   pVmcb->ctrl.NestedPagingCtrl.n.u1NestedPaging
     2889    if (   pVmcbCtrl->NestedPagingCtrl.n.u1NestedPaging
    28832890        && pMixedCtx->cr3 != pVmcb->guest.u64CR3)
    28842891    {
     
    37943801
    37953802/**
    3796  * Injects any pending events into the guest or nested-guest.
     3803 * Injects any pending events into the guest (or nested-guest).
    37973804 *
    37983805 * @param   pVCpu       The cross context virtual CPU structure.
     
    38583865
    38593866    /*
    3860      * Update the guest interrupt shadow in the guest or nested-guest VMCB.
     3867     * Update the guest interrupt shadow in the guest (or nested-guest) VMCB.
    38613868     *
    38623869     * For nested-guests: We need to update it too for the scenario where IEM executes
     
    41774184    STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadFull);    /** @todo Get new STAM counter for this? */
    41784185
    4179     /* Ensure we've cached (and hopefully modified) the VMCB for execution using hardware SVM. */
     4186    /* Ensure we've cached (and hopefully modified) the VMCB for execution using hardware-assisted SVM. */
    41804187    Assert(pCtx->hwvirt.svm.fHMCachedVmcb);
    41814188
     
    43694376
    43704377/**
    4371  * Prepares to run guest or nested-guest code in AMD-V and we've committed to
     4378 * Prepares to run guest (or nested-guest) code in AMD-V and we've committed to
    43724379 * doing so.
    43734380 *
    43744381 * This means there is no backing out to ring-3 or anywhere else at this point.
    43754382 *
    4376  * @param   pVM             The cross context VM structure.
    43774383 * @param   pVCpu           The cross context virtual CPU structure.
    43784384 * @param   pCtx            Pointer to the guest-CPU context.
     
    43824388 * @remarks No-long-jump zone!!!
    43834389 */
    4384 static void hmR0SvmPreRunGuestCommitted(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
     4390static void hmR0SvmPreRunGuestCommitted(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
    43854391{
    43864392    Assert(!VMMRZCallRing3IsEnabled(pVCpu));
     
    43914397    VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);            /* Indicate the start of guest execution. */
    43924398
    4393     bool const fInNestedGuestMode = CPUMIsGuestInSvmNestedHwVirtMode(pCtx);
    4394     PSVMVMCB pVmcb = !fInNestedGuestMode ? pVCpu->hm.s.svm.pVmcb : pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
    4395 
     4399    PVM      pVM = pVCpu->CTX_SUFF(pVM);
     4400    PSVMVMCB pVmcb = pSvmTransient->pVmcb;
    43964401    hmR0SvmInjectPendingEvent(pVCpu, pCtx, pVmcb);
    43974402
     
    44434448
    44444449    uint8_t *pbMsrBitmap;
    4445     if (!fInNestedGuestMode)
     4450    if (!pSvmTransient->fIsNestedGuest)
    44464451        pbMsrBitmap = (uint8_t *)pVCpu->hm.s.svm.pvMsrBitmap;
    44474452    else
     
    45234528}
    45244529
    4525 
    45264530#ifdef VBOX_WITH_NESTED_HWVIRT
    45274531/**
     
    45494553#endif
    45504554}
    4551 
    4552 
    4553 /**
    4554  * Performs some essential restoration of state after running nested-guest code in
    4555  * AMD-V.
    4556  *
    4557  * @param   pVM             The cross context VM structure.
    4558  * @param   pVCpu           The cross context virtual CPU structure.
    4559  * @param   pMixedCtx       Pointer to the nested-guest-CPU context. The data maybe
    4560  *                          out-of-sync. Make sure to update the required fields
    4561  *                          before using them.
    4562  * @param   pSvmTransient   Pointer to the SVM transient structure.
    4563  * @param   rcVMRun         Return code of VMRUN.
    4564  *
    4565  * @remarks Called with interrupts disabled.
    4566  * @remarks No-long-jump zone!!! This function will however re-enable longjmps
    4567  *          unconditionally when it is safe to do so.
    4568  */
    4569 static void hmR0SvmPostRunGuestNested(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PSVMTRANSIENT pSvmTransient, int rcVMRun)
    4570 {
    4571     RT_NOREF(pVM);
    4572     Assert(!VMMRZCallRing3IsEnabled(pVCpu));
    4573 
    4574     ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false);   /* See HMInvalidatePageOnAllVCpus(): used for TLB flushing. */
    4575     ASMAtomicIncU32(&pVCpu->hm.s.cWorldSwitchExits);            /* Initialized in vmR3CreateUVM(): used for EMT poking. */
    4576 
    4577     /* TSC read must be done early for maximum accuracy. */
    4578     PSVMVMCB             pVmcbNstGst      = pMixedCtx->hwvirt.svm.CTX_SUFF(pVmcb);
    4579     PSVMVMCBCTRL         pVmcbNstGstCtrl  = &pVmcbNstGst->ctrl;
    4580     PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = hmR0SvmGetNestedVmcbCache(pVCpu, pMixedCtx);
    4581     if (!(pVmcbNstGstCtrl->u64InterceptCtrl & SVM_CTRL_INTERCEPT_RDTSC))
    4582     {
    4583         /*
    4584          * Undo what we did in hmR0SvmUpdateTscOffsetting() and HMSvmNstGstApplyTscOffset()
    4585          * but don't restore the nested-guest VMCB TSC offset here. It shall eventually be
    4586          * restored on #VMEXIT in HMSvmNstGstVmExitNotify().
    4587          */
    4588         TMCpuTickSetLastSeen(pVCpu, ASMReadTSC() + pVmcbNstGstCtrl->u64TSCOffset - pVmcbNstGstCache->u64TSCOffset);
    4589     }
    4590 
    4591     if (pSvmTransient->fRestoreTscAuxMsr)
    4592     {
    4593         uint64_t u64GuestTscAuxMsr = ASMRdMsr(MSR_K8_TSC_AUX);
    4594         CPUMR0SetGuestTscAux(pVCpu, u64GuestTscAuxMsr);
    4595         if (u64GuestTscAuxMsr != pVCpu->hm.s.u64HostTscAux)
    4596             ASMWrMsr(MSR_K8_TSC_AUX, pVCpu->hm.s.u64HostTscAux);
    4597     }
    4598 
    4599     STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatExit1, x);
    4600     TMNotifyEndOfExecution(pVCpu);                              /* Notify TM that the guest is no longer running. */
    4601     VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
    4602 
    4603     Assert(!(ASMGetFlags() & X86_EFL_IF));
    4604     ASMSetFlags(pSvmTransient->fEFlags);                        /* Enable interrupts. */
    4605     VMMRZCallRing3Enable(pVCpu);                                /* It is now safe to do longjmps to ring-3!!! */
    4606 
    4607     /* Mark the VMCB-state cache as unmodified by VMM. */
    4608     pVmcbNstGstCtrl->u32VmcbCleanBits = HMSVM_VMCB_CLEAN_ALL;
    4609 
    4610     /* If VMRUN failed, we can bail out early. This does -not- cover SVM_EXIT_INVALID. */
    4611     if (RT_UNLIKELY(rcVMRun != VINF_SUCCESS))
    4612     {
    4613         Log4(("VMRUN failure: rcVMRun=%Rrc\n", rcVMRun));
    4614         return;
    4615     }
    4616 
    4617     pSvmTransient->u64ExitCode  = pVmcbNstGstCtrl->u64ExitCode; /* Save the #VMEXIT reason. */
    4618     HMCPU_EXIT_HISTORY_ADD(pVCpu, pVmcbNstGstCtrl->u64ExitCode);/* Update the #VMEXIT history array. */
    4619     pSvmTransient->fVectoringDoublePF = false;                  /* Vectoring double page-fault needs to be determined later. */
    4620     pSvmTransient->fVectoringPF       = false;                  /* Vectoring page-fault needs to be determined later. */
    4621 
    4622     Assert(!pVCpu->hm.s.svm.fSyncVTpr);
    4623     hmR0SvmSaveGuestState(pVCpu, pMixedCtx, pVmcbNstGst);       /* Save the nested-guest state from the VMCB to the
    4624                                                                    guest-CPU context. */
    4625 }
    4626 #endif
    4627 
    4628 /**
    4629  * Performs some essential restoration of state after running guest code in
    4630  * AMD-V.
    4631  *
    4632  * @param   pVM             The cross context VM structure.
     4555#endif
     4556
     4557/**
     4558 * Performs some essential restoration of state after running guest (or
     4559 * nested-guest) code in AMD-V.
     4560 *
    46334561 * @param   pVCpu           The cross context virtual CPU structure.
    46344562 * @param   pMixedCtx       Pointer to the guest-CPU context. The data maybe
     
    46424570 *          unconditionally when it is safe to do so.
    46434571 */
    4644 static void hmR0SvmPostRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PSVMTRANSIENT pSvmTransient, int rcVMRun)
     4572static void hmR0SvmPostRunGuest(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PSVMTRANSIENT pSvmTransient, int rcVMRun)
    46454573{
    46464574    Assert(!VMMRZCallRing3IsEnabled(pVCpu));
    46474575
     4576    uint64_t const uHostTsc = ASMReadTSC();                     /* Read the TSC as soon as possible. */
    46484577    ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false);   /* See HMInvalidatePageOnAllVCpus(): used for TLB flushing. */
    46494578    ASMAtomicIncU32(&pVCpu->hm.s.cWorldSwitchExits);            /* Initialized in vmR3CreateUVM(): used for EMT poking. */
    46504579
    4651     PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
    4652     pVmcb->ctrl.u32VmcbCleanBits = HMSVM_VMCB_CLEAN_ALL;        /* Mark the VMCB-state cache as unmodified by VMM. */
     4580    PSVMVMCB     pVmcb     = pSvmTransient->pVmcb;
     4581    PSVMVMCBCTRL pVmcbCtrl = &pVmcb->ctrl;
    46534582
    46544583    /* TSC read must be done early for maximum accuracy. */
    4655     if (!(pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_RDTSC))
    4656         TMCpuTickSetLastSeen(pVCpu, ASMReadTSC() + pVmcb->ctrl.u64TSCOffset);
     4584    if (!(pVmcbCtrl->u64InterceptCtrl & SVM_CTRL_INTERCEPT_RDTSC))
     4585    {
     4586        if (!pSvmTransient->fIsNestedGuest)
     4587            TMCpuTickSetLastSeen(pVCpu, uHostTsc + pVmcbCtrl->u64TSCOffset);
     4588        else
     4589        {
     4590            /*
     4591             * Undo what we did in hmR0SvmUpdateTscOffsetting() and HMSvmNstGstApplyTscOffset()
     4592             * but don't restore the nested-guest VMCB TSC offset here. It shall eventually be
     4593             * restored on #VMEXIT in HMSvmNstGstVmExitNotify().
     4594             */
     4595            PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = hmR0SvmGetNestedVmcbCache(pVCpu, pMixedCtx);
     4596            TMCpuTickSetLastSeen(pVCpu, uHostTsc + pVmcbCtrl->u64TSCOffset - pVmcbNstGstCache->u64TSCOffset);
     4597        }
     4598    }
    46574599
    46584600    if (pSvmTransient->fRestoreTscAuxMsr)
     
    46794621    }
    46804622
    4681     pSvmTransient->u64ExitCode  = pVmcb->ctrl.u64ExitCode;      /* Save the #VMEXIT reason. */
    4682     HMCPU_EXIT_HISTORY_ADD(pVCpu, pVmcb->ctrl.u64ExitCode);     /* Update the #VMEXIT history array. */
     4623    pSvmTransient->u64ExitCode  = pVmcbCtrl->u64ExitCode;       /* Save the #VMEXIT reason. */
     4624    HMCPU_EXIT_HISTORY_ADD(pVCpu, pVmcbCtrl->u64ExitCode);      /* Update the #VMEXIT history array. */
     4625    pVmcbCtrl->u32VmcbCleanBits       = HMSVM_VMCB_CLEAN_ALL;   /* Mark the VMCB-state cache as unmodified by VMM. */
    46834626    pSvmTransient->fVectoringDoublePF = false;                  /* Vectoring double page-fault needs to be determined later. */
    4684     pSvmTransient->fVectoringPF = false;                        /* Vectoring page-fault needs to be determined later. */
     4627    pSvmTransient->fVectoringPF       = false;                  /* Vectoring page-fault needs to be determined later. */
    46854628
    46864629    hmR0SvmSaveGuestState(pVCpu, pMixedCtx, pVmcb);             /* Save the guest state from the VMCB to the guest-CPU context. */
    46874630
    4688     if (RT_LIKELY(pSvmTransient->u64ExitCode != SVM_EXIT_INVALID))
    4689     {
    4690         if (pVCpu->hm.s.svm.fSyncVTpr)
    4691         {
    4692             /* TPR patching (for 32-bit guests) uses LSTAR MSR for holding the TPR value, otherwise uses the VTPR. */
    4693             if (   pVM->hm.s.fTPRPatchingActive
    4694                 && (pMixedCtx->msrLSTAR & 0xff) != pSvmTransient->u8GuestTpr)
    4695             {
    4696                 int rc = APICSetTpr(pVCpu, pMixedCtx->msrLSTAR & 0xff);
    4697                 AssertRC(rc);
    4698                 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_APIC_STATE);
    4699             }
    4700             else if (pSvmTransient->u8GuestTpr != pVmcb->ctrl.IntCtrl.n.u8VTPR)
    4701             {
    4702                 int rc = APICSetTpr(pVCpu, pVmcb->ctrl.IntCtrl.n.u8VTPR << 4);
    4703                 AssertRC(rc);
    4704                 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_APIC_STATE);
    4705             }
     4631    if (   pSvmTransient->u64ExitCode != SVM_EXIT_INVALID
     4632        && pVCpu->hm.s.svm.fSyncVTpr)
     4633    {
     4634        Assert(!pSvmTransient->fIsNestedGuest);
     4635        /* TPR patching (for 32-bit guests) uses LSTAR MSR for holding the TPR value, otherwise uses the VTPR. */
     4636        if (   pVCpu->CTX_SUFF(pVM)->hm.s.fTPRPatchingActive
     4637            && (pMixedCtx->msrLSTAR & 0xff) != pSvmTransient->u8GuestTpr)
     4638        {
     4639            int rc = APICSetTpr(pVCpu, pMixedCtx->msrLSTAR & 0xff);
     4640            AssertRC(rc);
     4641            HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_APIC_STATE);
     4642        }
     4643        /* Sync TPR when we aren't intercepting CR8 writes. */
     4644        else if (pSvmTransient->u8GuestTpr != pVmcbCtrl->IntCtrl.n.u8VTPR)
     4645        {
     4646            int rc = APICSetTpr(pVCpu, pVmcbCtrl->IntCtrl.n.u8VTPR << 4);
     4647            AssertRC(rc);
     4648            HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_APIC_STATE);
    47064649        }
    47074650    }
     
    47254668
    47264669    SVMTRANSIENT SvmTransient;
     4670    RT_ZERO(SvmTransient);
    47274671    SvmTransient.fUpdateTscOffsetting = true;
     4672    SvmTransient.pVmcb = pVCpu->hm.s.svm.pVmcb;
    47284673
    47294674    int rc = VERR_INTERNAL_ERROR_5;
     
    47454690         * This also disables flushing of the R0-logger instance (if any).
    47464691         */
    4747         hmR0SvmPreRunGuestCommitted(pVM, pVCpu, pCtx, &SvmTransient);
     4692        hmR0SvmPreRunGuestCommitted(pVCpu, pCtx, &SvmTransient);
    47484693        rc = hmR0SvmRunGuest(pVM, pVCpu, pCtx);
    47494694
    47504695        /* Restore any residual host-state and save any bits shared between host
    47514696           and guest into the guest-CPU state.  Re-enables interrupts! */
    4752         hmR0SvmPostRunGuest(pVM, pVCpu, pCtx, &SvmTransient, rc);
     4697        hmR0SvmPostRunGuest(pVCpu, pCtx, &SvmTransient, rc);
    47534698
    47544699        if (RT_UNLIKELY(   rc != VINF_SUCCESS                               /* Check for VMRUN errors. */
     
    47994744
    48004745    SVMTRANSIENT SvmTransient;
     4746    RT_ZERO(SvmTransient);
    48014747    SvmTransient.fUpdateTscOffsetting = true;
     4748    SvmTransient.pVmcb = pVCpu->hm.s.svm.pVmcb;
    48024749
    48034750    uint16_t uCsStart  = pCtx->cs.Sel;
     
    48264773        VMMRZCallRing3Disable(pVCpu);
    48274774        VMMRZCallRing3RemoveNotification(pVCpu);
    4828         hmR0SvmPreRunGuestCommitted(pVM, pVCpu, pCtx, &SvmTransient);
     4775        hmR0SvmPreRunGuestCommitted(pVCpu, pCtx, &SvmTransient);
    48294776
    48304777        rc = hmR0SvmRunGuest(pVM, pVCpu, pCtx);
     
    48344781         * This will also re-enable longjmps to ring-3 when it has reached a safe point!!!
    48354782         */
    4836         hmR0SvmPostRunGuest(pVM, pVCpu, pCtx, &SvmTransient, rc);
     4783        hmR0SvmPostRunGuest(pVCpu, pCtx, &SvmTransient, rc);
    48374784        if (RT_UNLIKELY(   rc != VINF_SUCCESS                               /* Check for VMRUN errors. */
    48384785                        || SvmTransient.u64ExitCode == SVM_EXIT_INVALID))   /* Check for invalid guest-state errors. */
     
    49054852
    49064853    SVMTRANSIENT SvmTransient;
     4854    RT_ZERO(SvmTransient);
    49074855    SvmTransient.fUpdateTscOffsetting = true;
     4856    SvmTransient.pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
     4857    SvmTransient.fIsNestedGuest = true;
    49084858
    49094859    int rc = VERR_INTERNAL_ERROR_4;
     
    49284878         * This also disables flushing of the R0-logger instance (if any).
    49294879         */
    4930         hmR0SvmPreRunGuestCommitted(pVM, pVCpu, pCtx, &SvmTransient);
     4880        hmR0SvmPreRunGuestCommitted(pVCpu, pCtx, &SvmTransient);
    49314881
    49324882        rc = hmR0SvmRunGuestNested(pVM, pVCpu, pCtx);
     
    49344884        /* Restore any residual host-state and save any bits shared between host
    49354885           and guest into the guest-CPU state.  Re-enables interrupts! */
    4936         hmR0SvmPostRunGuestNested(pVM, pVCpu, pCtx, &SvmTransient, rc);
     4886        hmR0SvmPostRunGuest(pVCpu, pCtx, &SvmTransient, rc);
    49374887
    49384888        if (RT_LIKELY(   rc == VINF_SUCCESS
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette