Changeset 48196 in vbox for trunk/src/VBox
- Timestamp:
- Aug 30, 2013 2:51:26 PM (11 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r48183 r48196 1051 1051 1052 1052 /** 1053 * Loads the guest control registers (CR0, CR2, CR3, CR4) into the VMCB. 1053 * Loads the guest CR0 control register into the guest-state area in the VMCB. 1054 * Although the guest CR0 is a separate field in the VMCB we have to consider 1055 * the FPU state itself which is shared between the host and the guest. 1054 1056 * 1055 1057 * @returns VBox status code. 1056 * @param pV CpuPointer to the VMCPU.1058 * @param pVM Pointer to the VMCPU. 1057 1059 * @param pVmcb Pointer to the VMCB. 1058 * @param pCtx Pointer t he guest-CPU context.1060 * @param pCtx Pointer to the guest-CPU context. 1059 1061 * 1060 1062 * @remarks No-long-jump zone!!! 1061 1063 */ 1062 DECLINLINE(int) hmR0SvmLoadGuestControlRegs(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)1064 static void hmR0SvmLoadSharedCR0(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx) 1063 1065 { 1064 1066 /* … … 1121 1123 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR0; 1122 1124 } 1125 } 1126 1127 1128 /** 1129 * Loads the guest control registers (CR2, CR3, CR4) into the VMCB. 1130 * 1131 * @returns VBox status code. 1132 * @param pVCpu Pointer to the VMCPU. 1133 * @param pVmcb Pointer to the VMCB. 1134 * @param pCtx Pointer to the guest-CPU context. 1135 * 1136 * @remarks No-long-jump zone!!! 1137 */ 1138 DECLINLINE(int) hmR0SvmLoadGuestControlRegs(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx) 1139 { 1140 PVM pVM = pVCpu->CTX_SUFF(pVM); 1123 1141 1124 1142 /* … … 1324 1342 1325 1343 /** 1326 * Loads the guest debug registers (DR6, DR7) into the VMCB and programs the1327 * necessary interceptsaccordingly.1344 * Loads the guest state into the VMCB and programs the necessary intercepts 1345 * accordingly. 1328 1346 * 1329 1347 * @param pVCpu Pointer to the VMCPU. … … 1334 1352 * @remarks Requires EFLAGS to be up-to-date in the VMCB! 1335 1353 */ 1336 DECLINLINE(void) hmR0SvmLoad GuestDebugRegs(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)1354 DECLINLINE(void) hmR0SvmLoadSharedDebugState(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx) 1337 1355 { 1338 1356 if (!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_DEBUG)) … … 1386 1404 fInterceptDB = true; 1387 1405 fInterceptMovDRx = true; 1388 Log5(("hmR0SvmLoad GuestDebugRegs: Loaded hyper DRx\n"));1406 Log5(("hmR0SvmLoadSharedDebugState: Loaded hyper DRx\n")); 1389 1407 } 1390 1408 else … … 1414 1432 Assert(!CPUMIsHyperDebugStateActive(pVCpu)); 1415 1433 Assert(CPUMIsGuestDebugStateActive(pVCpu) || HC_ARCH_BITS == 32); 1416 Log5(("hmR0SvmLoad GuestDebugRegs: Loaded guest DRx\n"));1434 Log5(("hmR0SvmLoadSharedDebugState: Loaded guest DRx\n")); 1417 1435 } 1418 1436 /* … … 1685 1703 pVmcb->guest.u64RAX = pCtx->rax; 1686 1704 1687 /* hmR0SvmLoadGuestDebugRegs() must be called -after- updating guest RFLAGS as the RFLAGS may need to be changed. */1688 hmR0SvmLoadGuestDebugRegs(pVCpu, pVmcb, pCtx);1689 1690 1705 rc = hmR0SvmLoadGuestApicState(pVCpu, pVmcb, pCtx); 1691 1706 AssertLogRelMsgRCReturn(rc, ("hmR0SvmLoadGuestApicState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); … … 1715 1730 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestState, x); 1716 1731 return rc; 1732 } 1733 1734 1735 /** 1736 * Loads the state shared between the host and guest into the 1737 * VMCB. 1738 * 1739 * @param pVM Pointer to the VM. 1740 * @param pVCpu Pointer to the VMCPU. 1741 * @param pCtx Pointer to the guest-CPU context. 1742 * 1743 * @remarks No-long-jump zone!!! 1744 */ 1745 static void hmR0VmxLoadSharedState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 1746 { 1747 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 1748 Assert(!VMMRZCallRing3IsEnabled(pVCpu)); 1749 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb; 1750 1751 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR0) 1752 hmR0SvmLoadSharedCR0(pVCpu, pVmcb, pCtx); 1753 1754 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_DEBUG) 1755 hmR0SvmLoadSharedDebugState(pVCpu, pVmcb, pCtx); 1756 1757 AssertMsg(!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_GUEST_SHARED_STATE), ("fContextUseFlags=%#x\n", 1758 pVCpu->hm.s.fContextUseFlags)); 1717 1759 } 1718 1760 … … 1929 1971 1930 1972 /* Deregister hook now that we've left HM context before re-enabling preemption. */ 1931 /** @todo This is bad. Deregistering here means we need to VMCLEAR always1932 * (longjmp/exit-to-r3) in VT-x which is not efficient. */1933 1973 if (VMMR0ThreadCtxHooksAreRegistered(pVCpu)) 1934 1974 VMMR0ThreadCtxHooksDeregister(pVCpu); … … 2708 2748 2709 2749 /* 2750 * Re-enable nested paging (automatically disabled on every VM-exit). See AMD spec. 15.25.3 "Enabling Nested Paging". 2751 * We avoid changing the corresponding VMCB Clean Bit as we're not changing it to a different value since the previous run. 2752 */ 2753 /** @todo The above assumption could be wrong. It's not documented what 2754 * should be done wrt to the VMCB Clean Bit, but we'll find out the 2755 * hard way. */ 2756 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb; 2757 pVmcb->ctrl.NestedPaging.n.u1NestedPaging = pVM->hm.s.fNestedPaging; 2758 2759 #ifdef HMSVM_SYNC_FULL_GUEST_STATE 2760 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL_GUEST; 2761 #endif 2762 2763 /* Load the guest bits that are not shared with the host in any way since we can longjmp or get preempted. */ 2764 rc = hmR0SvmLoadGuestState(pVM, pVCpu, pCtx); 2765 AssertRCReturn(rc, rc); 2766 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadFull); 2767 2768 /* 2769 * If we're not intercepting TPR changes in the guest, save the guest TPR before the world-switch 2770 * so we can update it on the way back if the guest changed the TPR. 2771 */ 2772 if (pVCpu->hm.s.svm.fSyncVTpr) 2773 { 2774 if (pVM->hm.s.fTPRPatchingActive) 2775 pSvmTransient->u8GuestTpr = pCtx->msrLSTAR; 2776 else 2777 pSvmTransient->u8GuestTpr = pVmcb->ctrl.IntCtrl.n.u8VTPR; 2778 } 2779 2780 /* 2710 2781 * We disable interrupts so that we don't miss any interrupts that would flag preemption (IPI/timers etc.) 2711 2782 * when thread-context hooks aren't used and we've been running with preemption disabled for a while. … … 2760 2831 hmR0SvmInjectPendingEvent(pVCpu, pCtx); 2761 2832 2762 /* 2763 * Re-enable nested paging (automatically disabled on every VM-exit). See AMD spec. 15.25.3 "Enabling Nested Paging".2764 * We avoid changing the corresponding VMCB Clean Bit as we're not changing it to a different value since the previous run.2765 */2766 /** @todo The above assumption could be wrong. It's not documented what2767 * should be done wrt to the VMCB Clean Bit, but we'll find out the 2768 * hard way. */2833 /* Load the state shared between host and guest (FPU, debug). */ 2834 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_GUEST_SHARED_STATE) 2835 hmR0VmxLoadSharedState(pVM, pVCpu, pCtx); 2836 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_HOST_CONTEXT; /* Preemption might set this, nothing to do on AMD-V. */ 2837 AssertMsg(!pVCpu->hm.s.fContextUseFlags, ("fContextUseFlags=%#x\n", pVCpu->hm.s.fContextUseFlags)); 2838 2839 /* If VMCB Clean Bits isn't supported by the CPU, simply mark all state-bits as dirty, indicating (re)load-from-VMCB. */ 2769 2840 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb; 2770 pVmcb->ctrl.NestedPaging.n.u1NestedPaging = pVM->hm.s.fNestedPaging;2771 2772 #ifdef HMSVM_SYNC_FULL_GUEST_STATE2773 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL_GUEST;2774 #endif2775 2776 /* Load the guest state. */2777 int rc = hmR0SvmLoadGuestState(pVM, pVCpu, pCtx);2778 AssertRC(rc);2779 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_HOST_CONTEXT; /* Preemption might set this, nothing to do on AMD-V. */2780 AssertMsg(!pVCpu->hm.s.fContextUseFlags, ("fContextUseFlags =%#x\n", pVCpu->hm.s.fContextUseFlags));2781 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadFull);2782 2783 /* If VMCB Clean Bits isn't supported by the CPU, simply mark all state-bits as dirty, indicating (re)load-from-VMCB. */2784 2841 if (!(pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_VMCB_CLEAN)) 2785 2842 pVmcb->ctrl.u64VmcbCleanBits = 0; 2786 2787 /*2788 * If we're not intercepting TPR changes in the guest, save the guest TPR before the world-switch2789 * so we can update it on the way back if the guest changed the TPR.2790 */2791 if (pVCpu->hm.s.svm.fSyncVTpr)2792 {2793 if (pVM->hm.s.fTPRPatchingActive)2794 pSvmTransient->u8GuestTpr = pCtx->msrLSTAR;2795 else2796 pSvmTransient->u8GuestTpr = pVmcb->ctrl.IntCtrl.n.u8VTPR;2797 }2798 2843 2799 2844 /* Setup TSC offsetting. */ … … 4169 4214 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS; 4170 4215 4216 /* We're playing with the host CPU state here, make sure we don't preempt. */ 4217 HM_DISABLE_PREEMPT_IF_NEEDED(); 4218 4171 4219 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */ 4172 4220 CPUMR0LoadGuestDebugState(pVCpu, false /* include DR6 */); 4173 4221 Assert(CPUMIsGuestDebugStateActive(pVCpu) || HC_ARCH_BITS == 32); 4222 4223 HM_RESTORE_PREEMPT_IF_NEEDED(); 4174 4224 4175 4225 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxContextSwitch); … … 4315 4365 || DBGFBpIsHwIoArmed(pVM))) 4316 4366 { 4367 /* We're playing with the host CPU state here, make sure we don't preempt. */ 4368 HM_DISABLE_PREEMPT_IF_NEEDED(); 4369 4317 4370 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxIoCheck); 4318 4371 CPUMR0DebugStateMaybeSaveGuest(pVCpu, false /*fDr6*/); … … 4331 4384 && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict)) 4332 4385 rcStrict = rcStrict2; 4386 4387 HM_RESTORE_PREEMPT_IF_NEEDED(); 4333 4388 } 4334 4389 … … 4664 4719 #endif 4665 4720 4721 /* We're playing with the host CPU state here, make sure we don't preempt. */ 4722 HM_DISABLE_PREEMPT_IF_NEEDED(); 4723 4666 4724 /* Lazy FPU loading; load the guest-FPU state transparently and continue execution of the guest. */ 4667 4725 int rc = CPUMR0LoadGuestFPU(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx); … … 4669 4727 { 4670 4728 Assert(CPUMIsGuestFPUStateActive(pVCpu)); 4729 HM_RESTORE_PREEMPT_IF_NEEDED(); 4730 4671 4731 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0; 4672 4732 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowNM); 4673 4733 return VINF_SUCCESS; 4674 4734 } 4735 4736 HM_RESTORE_PREEMPT_IF_NEEDED(); 4675 4737 4676 4738 /* Forward #NM to the guest. */
Note:
See TracChangeset
for help on using the changeset viewer.