Changeset 71918 in vbox for trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
- Timestamp:
- Apr 19, 2018 11:06:54 AM (7 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r71915 r71918 178 178 * 179 179 * SMIs can and do happen in normal operation. We need not intercept them 180 * while executing the guest or nested-guest.180 * while executing the guest (or nested-guest). 181 181 */ 182 182 #define HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS ( SVM_CTRL_INTERCEPT_INTR \ … … 269 269 uint8_t abAlignment0[7]; 270 270 271 /** Pointer to the currently executing VMCB. */ 272 PSVMVMCB pVmcb; 273 /** Whether we are currently executing a nested-guest. */ 274 bool fIsNestedGuest; 275 271 276 /** Whether the guest debug state was active at the time of \#VMEXIT. */ 272 277 bool fWasGuestDebugStateActive; … … 284 289 bool fVectoringPF; 285 290 } SVMTRANSIENT, *PSVMTRANSIENT; 286 AssertCompileMemberAlignment(SVMTRANSIENT, u64ExitCode, 287 AssertCompileMemberAlignment(SVMTRANSIENT, fWasGuestDebugStateActive,sizeof(uint64_t));291 AssertCompileMemberAlignment(SVMTRANSIENT, u64ExitCode, sizeof(uint64_t)); 292 AssertCompileMemberAlignment(SVMTRANSIENT, pVmcb, sizeof(uint64_t)); 288 293 /** @} */ 289 294 … … 1074 1079 1075 1080 /** 1076 * Gets a pointer to the currently active guest or nested-guestVMCB.1081 * Gets a pointer to the currently active guest (or nested-guest) VMCB. 1077 1082 * 1078 1083 * @returns Pointer to the current context VMCB. … … 2667 2672 2668 2673 /** 2669 * Loads the state shared between the host and guest or nested-guestinto the2674 * Loads the state shared between the host and guest (or nested-guest) into the 2670 2675 * VMCB. 2671 2676 * … … 2719 2724 * @returns VBox status code. 2720 2725 * @param pVCpu The cross context virtual CPU structure. 2721 * @param pMixedCtx Pointer to the guest-CPU context. The data may be2722 * out-of-sync. Make sure to update the required fields2723 * before using them.2726 * @param pMixedCtx Pointer to the guest-CPU or nested-guest-CPU 2727 * context. The data may be out-of-sync. Make sure to 2728 * update the required fields before using them. 2724 2729 * @param pVmcb Pointer to the VM control block. 2725 2730 */ … … 2733 2738 pMixedCtx->rax = pVmcb->guest.u64RAX; 2734 2739 2740 PCSVMVMCBCTRL pVmcbCtrl = &pVmcb->ctrl; 2735 2741 #ifdef VBOX_WITH_NESTED_HWVIRT 2736 2742 if (!CPUMIsGuestInSvmNestedHwVirtMode(pMixedCtx)) 2737 2743 { 2738 if (pVmcb ->ctrl.IntCtrl.n.u1VGifEnable)2744 if (pVmcbCtrl->IntCtrl.n.u1VGifEnable) 2739 2745 { 2740 2746 /* … … 2743 2749 */ 2744 2750 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.svm.fVGif); 2745 pMixedCtx->hwvirt.fGif = pVmcb ->ctrl.IntCtrl.n.u1VGif;2751 pMixedCtx->hwvirt.fGif = pVmcbCtrl->IntCtrl.n.u1VGif; 2746 2752 } 2747 2753 } … … 2749 2755 { 2750 2756 /* Sync/verify nested-guest's V_IRQ pending and our force-flag. */ 2751 if (!pVmcb ->ctrl.IntCtrl.n.u1VIrqPending)2757 if (!pVmcbCtrl->IntCtrl.n.u1VIrqPending) 2752 2758 { 2753 2759 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)) … … 2762 2768 * Guest interrupt shadow. 2763 2769 */ 2764 if (pVmcb ->ctrl.IntShadow.n.u1IntShadow)2770 if (pVmcbCtrl->IntShadow.n.u1IntShadow) 2765 2771 EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip); 2766 2772 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) … … 2768 2774 2769 2775 /* 2770 * Guest Control registers: CR0, CR2, CR3 (handled at the end) - accesses to other control registers are always intercepted. 2776 * Guest control registers: CR0, CR2, CR3 (handled at the end). 2777 * Accesses to other control registers are always intercepted. 2771 2778 */ 2772 pMixedCtx->cr2 2779 pMixedCtx->cr2 = pVmcb->guest.u64CR2; 2773 2780 2774 2781 /* If we're not intercepting changes to CR0 TS & MP bits, sync those bits here. */ 2775 if (!(pVmcb ->ctrl.u16InterceptWrCRx & RT_BIT(0)))2782 if (!(pVmcbCtrl->u16InterceptWrCRx & RT_BIT(0))) 2776 2783 { 2777 2784 pMixedCtx->cr0 = (pMixedCtx->cr0 & ~(X86_CR0_TS | X86_CR0_MP)) … … 2880 2887 * This is done as the very last step of syncing the guest state, as PGMUpdateCR3() may cause longjmp's to ring-3. 2881 2888 */ 2882 if ( pVmcb ->ctrl.NestedPagingCtrl.n.u1NestedPaging2889 if ( pVmcbCtrl->NestedPagingCtrl.n.u1NestedPaging 2883 2890 && pMixedCtx->cr3 != pVmcb->guest.u64CR3) 2884 2891 { … … 3794 3801 3795 3802 /** 3796 * Injects any pending events into the guest or nested-guest.3803 * Injects any pending events into the guest (or nested-guest). 3797 3804 * 3798 3805 * @param pVCpu The cross context virtual CPU structure. … … 3858 3865 3859 3866 /* 3860 * Update the guest interrupt shadow in the guest or nested-guestVMCB.3867 * Update the guest interrupt shadow in the guest (or nested-guest) VMCB. 3861 3868 * 3862 3869 * For nested-guests: We need to update it too for the scenario where IEM executes … … 4177 4184 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadFull); /** @todo Get new STAM counter for this? */ 4178 4185 4179 /* Ensure we've cached (and hopefully modified) the VMCB for execution using hardware SVM. */4186 /* Ensure we've cached (and hopefully modified) the VMCB for execution using hardware-assisted SVM. */ 4180 4187 Assert(pCtx->hwvirt.svm.fHMCachedVmcb); 4181 4188 … … 4369 4376 4370 4377 /** 4371 * Prepares to run guest or nested-guestcode in AMD-V and we've committed to4378 * Prepares to run guest (or nested-guest) code in AMD-V and we've committed to 4372 4379 * doing so. 4373 4380 * 4374 4381 * This means there is no backing out to ring-3 or anywhere else at this point. 4375 4382 * 4376 * @param pVM The cross context VM structure.4377 4383 * @param pVCpu The cross context virtual CPU structure. 4378 4384 * @param pCtx Pointer to the guest-CPU context. … … 4382 4388 * @remarks No-long-jump zone!!! 4383 4389 */ 4384 static void hmR0SvmPreRunGuestCommitted(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)4390 static void hmR0SvmPreRunGuestCommitted(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient) 4385 4391 { 4386 4392 Assert(!VMMRZCallRing3IsEnabled(pVCpu)); … … 4391 4397 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC); /* Indicate the start of guest execution. */ 4392 4398 4393 bool const fInNestedGuestMode = CPUMIsGuestInSvmNestedHwVirtMode(pCtx); 4394 PSVMVMCB pVmcb = !fInNestedGuestMode ? pVCpu->hm.s.svm.pVmcb : pCtx->hwvirt.svm.CTX_SUFF(pVmcb); 4395 4399 PVM pVM = pVCpu->CTX_SUFF(pVM); 4400 PSVMVMCB pVmcb = pSvmTransient->pVmcb; 4396 4401 hmR0SvmInjectPendingEvent(pVCpu, pCtx, pVmcb); 4397 4402 … … 4443 4448 4444 4449 uint8_t *pbMsrBitmap; 4445 if (! fInNestedGuestMode)4450 if (!pSvmTransient->fIsNestedGuest) 4446 4451 pbMsrBitmap = (uint8_t *)pVCpu->hm.s.svm.pvMsrBitmap; 4447 4452 else … … 4523 4528 } 4524 4529 4525 4526 4530 #ifdef VBOX_WITH_NESTED_HWVIRT 4527 4531 /** … … 4549 4553 #endif 4550 4554 } 4551 4552 4553 /** 4554 * Performs some essential restoration of state after running nested-guest code in 4555 * AMD-V. 4556 * 4557 * @param pVM The cross context VM structure. 4558 * @param pVCpu The cross context virtual CPU structure. 4559 * @param pMixedCtx Pointer to the nested-guest-CPU context. The data maybe 4560 * out-of-sync. Make sure to update the required fields 4561 * before using them. 4562 * @param pSvmTransient Pointer to the SVM transient structure. 4563 * @param rcVMRun Return code of VMRUN. 4564 * 4565 * @remarks Called with interrupts disabled. 4566 * @remarks No-long-jump zone!!! This function will however re-enable longjmps 4567 * unconditionally when it is safe to do so. 4568 */ 4569 static void hmR0SvmPostRunGuestNested(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PSVMTRANSIENT pSvmTransient, int rcVMRun) 4570 { 4571 RT_NOREF(pVM); 4572 Assert(!VMMRZCallRing3IsEnabled(pVCpu)); 4573 4574 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false); /* See HMInvalidatePageOnAllVCpus(): used for TLB flushing. */ 4575 ASMAtomicIncU32(&pVCpu->hm.s.cWorldSwitchExits); /* Initialized in vmR3CreateUVM(): used for EMT poking. */ 4576 4577 /* TSC read must be done early for maximum accuracy. */ 4578 PSVMVMCB pVmcbNstGst = pMixedCtx->hwvirt.svm.CTX_SUFF(pVmcb); 4579 PSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl; 4580 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = hmR0SvmGetNestedVmcbCache(pVCpu, pMixedCtx); 4581 if (!(pVmcbNstGstCtrl->u64InterceptCtrl & SVM_CTRL_INTERCEPT_RDTSC)) 4582 { 4583 /* 4584 * Undo what we did in hmR0SvmUpdateTscOffsetting() and HMSvmNstGstApplyTscOffset() 4585 * but don't restore the nested-guest VMCB TSC offset here. It shall eventually be 4586 * restored on #VMEXIT in HMSvmNstGstVmExitNotify(). 4587 */ 4588 TMCpuTickSetLastSeen(pVCpu, ASMReadTSC() + pVmcbNstGstCtrl->u64TSCOffset - pVmcbNstGstCache->u64TSCOffset); 4589 } 4590 4591 if (pSvmTransient->fRestoreTscAuxMsr) 4592 { 4593 uint64_t u64GuestTscAuxMsr = ASMRdMsr(MSR_K8_TSC_AUX); 4594 CPUMR0SetGuestTscAux(pVCpu, u64GuestTscAuxMsr); 4595 if (u64GuestTscAuxMsr != pVCpu->hm.s.u64HostTscAux) 4596 ASMWrMsr(MSR_K8_TSC_AUX, pVCpu->hm.s.u64HostTscAux); 4597 } 4598 4599 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatExit1, x); 4600 TMNotifyEndOfExecution(pVCpu); /* Notify TM that the guest is no longer running. */ 4601 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM); 4602 4603 Assert(!(ASMGetFlags() & X86_EFL_IF)); 4604 ASMSetFlags(pSvmTransient->fEFlags); /* Enable interrupts. */ 4605 VMMRZCallRing3Enable(pVCpu); /* It is now safe to do longjmps to ring-3!!! */ 4606 4607 /* Mark the VMCB-state cache as unmodified by VMM. */ 4608 pVmcbNstGstCtrl->u32VmcbCleanBits = HMSVM_VMCB_CLEAN_ALL; 4609 4610 /* If VMRUN failed, we can bail out early. This does -not- cover SVM_EXIT_INVALID. */ 4611 if (RT_UNLIKELY(rcVMRun != VINF_SUCCESS)) 4612 { 4613 Log4(("VMRUN failure: rcVMRun=%Rrc\n", rcVMRun)); 4614 return; 4615 } 4616 4617 pSvmTransient->u64ExitCode = pVmcbNstGstCtrl->u64ExitCode; /* Save the #VMEXIT reason. */ 4618 HMCPU_EXIT_HISTORY_ADD(pVCpu, pVmcbNstGstCtrl->u64ExitCode);/* Update the #VMEXIT history array. */ 4619 pSvmTransient->fVectoringDoublePF = false; /* Vectoring double page-fault needs to be determined later. */ 4620 pSvmTransient->fVectoringPF = false; /* Vectoring page-fault needs to be determined later. */ 4621 4622 Assert(!pVCpu->hm.s.svm.fSyncVTpr); 4623 hmR0SvmSaveGuestState(pVCpu, pMixedCtx, pVmcbNstGst); /* Save the nested-guest state from the VMCB to the 4624 guest-CPU context. */ 4625 } 4626 #endif 4627 4628 /** 4629 * Performs some essential restoration of state after running guest code in 4630 * AMD-V. 4631 * 4632 * @param pVM The cross context VM structure. 4555 #endif 4556 4557 /** 4558 * Performs some essential restoration of state after running guest (or 4559 * nested-guest) code in AMD-V. 4560 * 4633 4561 * @param pVCpu The cross context virtual CPU structure. 4634 4562 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe … … 4642 4570 * unconditionally when it is safe to do so. 4643 4571 */ 4644 static void hmR0SvmPostRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PSVMTRANSIENT pSvmTransient, int rcVMRun)4572 static void hmR0SvmPostRunGuest(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PSVMTRANSIENT pSvmTransient, int rcVMRun) 4645 4573 { 4646 4574 Assert(!VMMRZCallRing3IsEnabled(pVCpu)); 4647 4575 4576 uint64_t const uHostTsc = ASMReadTSC(); /* Read the TSC as soon as possible. */ 4648 4577 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false); /* See HMInvalidatePageOnAllVCpus(): used for TLB flushing. */ 4649 4578 ASMAtomicIncU32(&pVCpu->hm.s.cWorldSwitchExits); /* Initialized in vmR3CreateUVM(): used for EMT poking. */ 4650 4579 4651 PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;4652 pVmcb->ctrl.u32VmcbCleanBits = HMSVM_VMCB_CLEAN_ALL; /* Mark the VMCB-state cache as unmodified by VMM. */4580 PSVMVMCB pVmcb = pSvmTransient->pVmcb; 4581 PSVMVMCBCTRL pVmcbCtrl = &pVmcb->ctrl; 4653 4582 4654 4583 /* TSC read must be done early for maximum accuracy. */ 4655 if (!(pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_RDTSC)) 4656 TMCpuTickSetLastSeen(pVCpu, ASMReadTSC() + pVmcb->ctrl.u64TSCOffset); 4584 if (!(pVmcbCtrl->u64InterceptCtrl & SVM_CTRL_INTERCEPT_RDTSC)) 4585 { 4586 if (!pSvmTransient->fIsNestedGuest) 4587 TMCpuTickSetLastSeen(pVCpu, uHostTsc + pVmcbCtrl->u64TSCOffset); 4588 else 4589 { 4590 /* 4591 * Undo what we did in hmR0SvmUpdateTscOffsetting() and HMSvmNstGstApplyTscOffset() 4592 * but don't restore the nested-guest VMCB TSC offset here. It shall eventually be 4593 * restored on #VMEXIT in HMSvmNstGstVmExitNotify(). 4594 */ 4595 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = hmR0SvmGetNestedVmcbCache(pVCpu, pMixedCtx); 4596 TMCpuTickSetLastSeen(pVCpu, uHostTsc + pVmcbCtrl->u64TSCOffset - pVmcbNstGstCache->u64TSCOffset); 4597 } 4598 } 4657 4599 4658 4600 if (pSvmTransient->fRestoreTscAuxMsr) … … 4679 4621 } 4680 4622 4681 pSvmTransient->u64ExitCode = pVmcb->ctrl.u64ExitCode; /* Save the #VMEXIT reason. */ 4682 HMCPU_EXIT_HISTORY_ADD(pVCpu, pVmcb->ctrl.u64ExitCode); /* Update the #VMEXIT history array. */ 4623 pSvmTransient->u64ExitCode = pVmcbCtrl->u64ExitCode; /* Save the #VMEXIT reason. */ 4624 HMCPU_EXIT_HISTORY_ADD(pVCpu, pVmcbCtrl->u64ExitCode); /* Update the #VMEXIT history array. */ 4625 pVmcbCtrl->u32VmcbCleanBits = HMSVM_VMCB_CLEAN_ALL; /* Mark the VMCB-state cache as unmodified by VMM. */ 4683 4626 pSvmTransient->fVectoringDoublePF = false; /* Vectoring double page-fault needs to be determined later. */ 4684 pSvmTransient->fVectoringPF = false;/* Vectoring page-fault needs to be determined later. */4627 pSvmTransient->fVectoringPF = false; /* Vectoring page-fault needs to be determined later. */ 4685 4628 4686 4629 hmR0SvmSaveGuestState(pVCpu, pMixedCtx, pVmcb); /* Save the guest state from the VMCB to the guest-CPU context. */ 4687 4630 4688 if ( RT_LIKELY(pSvmTransient->u64ExitCode != SVM_EXIT_INVALID))4689 {4690 if (pVCpu->hm.s.svm.fSyncVTpr)4691 {4692 4693 if ( pVM->hm.s.fTPRPatchingActive4694 4695 4696 4697 4698 4699 4700 else if (pSvmTransient->u8GuestTpr != pVmcb->ctrl.IntCtrl.n.u8VTPR)4701 {4702 int rc = APICSetTpr(pVCpu, pVmcb->ctrl.IntCtrl.n.u8VTPR << 4);4703 AssertRC(rc);4704 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_APIC_STATE);4705 }4631 if ( pSvmTransient->u64ExitCode != SVM_EXIT_INVALID 4632 && pVCpu->hm.s.svm.fSyncVTpr) 4633 { 4634 Assert(!pSvmTransient->fIsNestedGuest); 4635 /* TPR patching (for 32-bit guests) uses LSTAR MSR for holding the TPR value, otherwise uses the VTPR. */ 4636 if ( pVCpu->CTX_SUFF(pVM)->hm.s.fTPRPatchingActive 4637 && (pMixedCtx->msrLSTAR & 0xff) != pSvmTransient->u8GuestTpr) 4638 { 4639 int rc = APICSetTpr(pVCpu, pMixedCtx->msrLSTAR & 0xff); 4640 AssertRC(rc); 4641 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_APIC_STATE); 4642 } 4643 /* Sync TPR when we aren't intercepting CR8 writes. */ 4644 else if (pSvmTransient->u8GuestTpr != pVmcbCtrl->IntCtrl.n.u8VTPR) 4645 { 4646 int rc = APICSetTpr(pVCpu, pVmcbCtrl->IntCtrl.n.u8VTPR << 4); 4647 AssertRC(rc); 4648 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_APIC_STATE); 4706 4649 } 4707 4650 } … … 4725 4668 4726 4669 SVMTRANSIENT SvmTransient; 4670 RT_ZERO(SvmTransient); 4727 4671 SvmTransient.fUpdateTscOffsetting = true; 4672 SvmTransient.pVmcb = pVCpu->hm.s.svm.pVmcb; 4728 4673 4729 4674 int rc = VERR_INTERNAL_ERROR_5; … … 4745 4690 * This also disables flushing of the R0-logger instance (if any). 4746 4691 */ 4747 hmR0SvmPreRunGuestCommitted(pV M, pVCpu, pCtx, &SvmTransient);4692 hmR0SvmPreRunGuestCommitted(pVCpu, pCtx, &SvmTransient); 4748 4693 rc = hmR0SvmRunGuest(pVM, pVCpu, pCtx); 4749 4694 4750 4695 /* Restore any residual host-state and save any bits shared between host 4751 4696 and guest into the guest-CPU state. Re-enables interrupts! */ 4752 hmR0SvmPostRunGuest(pV M, pVCpu, pCtx, &SvmTransient, rc);4697 hmR0SvmPostRunGuest(pVCpu, pCtx, &SvmTransient, rc); 4753 4698 4754 4699 if (RT_UNLIKELY( rc != VINF_SUCCESS /* Check for VMRUN errors. */ … … 4799 4744 4800 4745 SVMTRANSIENT SvmTransient; 4746 RT_ZERO(SvmTransient); 4801 4747 SvmTransient.fUpdateTscOffsetting = true; 4748 SvmTransient.pVmcb = pVCpu->hm.s.svm.pVmcb; 4802 4749 4803 4750 uint16_t uCsStart = pCtx->cs.Sel; … … 4826 4773 VMMRZCallRing3Disable(pVCpu); 4827 4774 VMMRZCallRing3RemoveNotification(pVCpu); 4828 hmR0SvmPreRunGuestCommitted(pV M, pVCpu, pCtx, &SvmTransient);4775 hmR0SvmPreRunGuestCommitted(pVCpu, pCtx, &SvmTransient); 4829 4776 4830 4777 rc = hmR0SvmRunGuest(pVM, pVCpu, pCtx); … … 4834 4781 * This will also re-enable longjmps to ring-3 when it has reached a safe point!!! 4835 4782 */ 4836 hmR0SvmPostRunGuest(pV M, pVCpu, pCtx, &SvmTransient, rc);4783 hmR0SvmPostRunGuest(pVCpu, pCtx, &SvmTransient, rc); 4837 4784 if (RT_UNLIKELY( rc != VINF_SUCCESS /* Check for VMRUN errors. */ 4838 4785 || SvmTransient.u64ExitCode == SVM_EXIT_INVALID)) /* Check for invalid guest-state errors. */ … … 4905 4852 4906 4853 SVMTRANSIENT SvmTransient; 4854 RT_ZERO(SvmTransient); 4907 4855 SvmTransient.fUpdateTscOffsetting = true; 4856 SvmTransient.pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb); 4857 SvmTransient.fIsNestedGuest = true; 4908 4858 4909 4859 int rc = VERR_INTERNAL_ERROR_4; … … 4928 4878 * This also disables flushing of the R0-logger instance (if any). 4929 4879 */ 4930 hmR0SvmPreRunGuestCommitted(pV M, pVCpu, pCtx, &SvmTransient);4880 hmR0SvmPreRunGuestCommitted(pVCpu, pCtx, &SvmTransient); 4931 4881 4932 4882 rc = hmR0SvmRunGuestNested(pVM, pVCpu, pCtx); … … 4934 4884 /* Restore any residual host-state and save any bits shared between host 4935 4885 and guest into the guest-CPU state. Re-enables interrupts! */ 4936 hmR0SvmPostRunGuest Nested(pVM,pVCpu, pCtx, &SvmTransient, rc);4886 hmR0SvmPostRunGuest(pVCpu, pCtx, &SvmTransient, rc); 4937 4887 4938 4888 if (RT_LIKELY( rc == VINF_SUCCESS
Note:
See TracChangeset
for help on using the changeset viewer.