Changeset 70177 in vbox for trunk/src/VBox
- Timestamp:
- Dec 16, 2017 4:07:02 AM (7 years ago)
- svn:sync-xref-src-repo-rev:
- 119742
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r70116 r70177 1854 1854 /* First, load the guest intercepts into the guest VMCB. */ 1855 1855 PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb; 1856 Assert(!(pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_VINTR)); 1856 1857 hmR0SvmLoadGuestXcptIntercepts(pVCpu, pVmcb, pCtx); 1857 1858 … … 1891 1892 == HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS); 1892 1893 pVmcbNstGst->ctrl.u64InterceptCtrl &= ~SVM_CTRL_INTERCEPT_VMMCALL; 1894 1895 /* Finally, update the VMCB clean bits. */ 1896 pVmcbNstGst->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS; 1893 1897 1894 1898 Assert(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS)); … … 2261 2265 ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu))); 2262 2266 2263 Log4(("hmR0SvmLoadGuestStateNested: CS:RIP=%04x:%RX64 EFL=%#x CR0=%#RX32 CR3=%#RX32 (HyperCR3=%#RX64) CR4=%#RX32 rc=%d\n", 2264 pCtx->cs.Sel, pCtx->rip, pCtx->eflags.u, pCtx->cr0, pCtx->cr3, pVmcbNstGst->guest.u64CR3, pCtx->cr4, rc)); 2267 Log4(("hmR0SvmLoadGuestStateNested: CS:RIP=%04x:%RX64 EFL=%#x CR0=%#RX32 CR3=%#RX32 (HyperCR3=%#RX64) CR4=%#RX32 " 2268 "ESP=%#RX32 EBP=%#RX32 rc=%d\n", pCtx->cs.Sel, pCtx->rip, pCtx->eflags.u, pCtx->cr0, pCtx->cr3, 2269 pVmcbNstGst->guest.u64CR3, pCtx->cr4, pCtx->esp, pCtx->ebp, rc)); 2265 2270 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestState, x); 2271 2266 2272 return rc; 2267 2273 } … … 3117 3123 3118 3124 /** 3119 * Sets the virtual interrupt intercept control in the VMCB which 3120 * instructs AMD-V to cause a \#VMEXIT as soon as the guest is in a state to 3121 * receive interrupts. 3125 * Sets the virtual interrupt intercept control in the VMCB. 3122 3126 * 3123 3127 * @param pVmcb Pointer to the VM control block. … … 3125 3129 DECLINLINE(void) hmR0SvmSetVirtIntrIntercept(PSVMVMCB pVmcb) 3126 3130 { 3131 /* 3132 * When AVIC isn't supported, indicate that a virtual interrupt is pending and to 3133 * cause a #VMEXIT when the guest is ready to accept interrupts. At #VMEXIT, we 3134 * then get the interrupt from the APIC (updating ISR at the right time) and 3135 * inject the interrupt. 3136 * 3137 * With AVIC is supported, we could make use of the asynchronously delivery without 3138 * #VMEXIT and we would be passing the AVIC page to SVM. 3139 */ 3127 3140 if (!(pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_VINTR)) 3128 3141 { 3129 pVmcb->ctrl.IntCtrl.n.u1VIrqPending = 1; /* A virtual interrupt is pending. */3130 pVmcb->ctrl.IntCtrl.n.u 8VIntrVector = 0; /* Vector not necessary as we #VMEXIT for delivering the interrupt. */3142 Assert(pVmcb->ctrl.IntCtrl.n.u1VIrqPending == 0); 3143 pVmcb->ctrl.IntCtrl.n.u1VIrqPending = 1; 3131 3144 pVmcb->ctrl.u64InterceptCtrl |= SVM_CTRL_INTERCEPT_VINTR; 3132 3145 pVmcb->ctrl.u64VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_TPR); 3133 3134 Log4(("Setting VINTR intercept\n")); 3135 } 3136 } 3137 3138 3139 #if 0 3146 Log4(("Set VINTR intercept\n")); 3147 } 3148 } 3149 3150 3140 3151 /** 3141 3152 * Clears the virtual interrupt intercept control in the VMCB as … … 3149 3160 if (pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_VINTR) 3150 3161 { 3162 Assert(pVmcb->ctrl.IntCtrl.n.u1VIrqPending == 1); 3163 pVmcb->ctrl.IntCtrl.n.u1VIrqPending = 0; 3151 3164 pVmcb->ctrl.u64InterceptCtrl &= ~SVM_CTRL_INTERCEPT_VINTR; 3152 pVmcb->ctrl.u64VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS); 3153 Log4(("Clearing VINTR intercept\n")); 3154 } 3155 } 3156 #endif 3165 pVmcb->ctrl.u64VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_TPR); 3166 Log4(("Cleared VINTR intercept\n")); 3167 } 3168 } 3157 3169 3158 3170 … … 6911 6923 6912 6924 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx); 6913 pVmcb->ctrl.IntCtrl.n.u1VIrqPending = 0; /* No virtual interrupts pending, we'll inject the current one/NMI before reentry. */ 6914 pVmcb->ctrl.IntCtrl.n.u8VIntrVector = 0; 6915 6916 /* Indicate that we no longer need to #VMEXIT when the guest is ready to receive interrupts/NMIs, it is now ready. */ 6917 pVmcb->ctrl.u64InterceptCtrl &= ~SVM_CTRL_INTERCEPT_VINTR; 6918 pVmcb->ctrl.u64VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_TPR); 6925 hmR0SvmClearVirtIntrIntercept(pVmcb); 6919 6926 6920 6927 /* Deliver the pending interrupt/NMI via hmR0SvmEvaluatePendingEvent() and resume guest execution. */ … … 7459 7466 uint8_t const cbInstr = hmR0SvmGetInstrLengthHwAssist(pVCpu, pCtx, 3); 7460 7467 VBOXSTRICTRC rcStrict = IEMExecDecodedClgi(pVCpu, cbInstr); 7468 7469 /* 7470 * The guest should no longer receive interrupts. Until VGIF is supported, 7471 * clear virtual interrupt intercepts here. 7472 */ 7473 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx); 7474 hmR0SvmClearVirtIntrIntercept(pVmcb); 7475 7461 7476 return VBOXSTRICTRC_VAL(rcStrict); 7462 7477 }
Note:
See TracChangeset
for help on using the changeset viewer.