Changeset 45408 in vbox
- Timestamp:
- Apr 8, 2013 2:01:55 PM (12 years ago)
- svn:sync-xref-src-repo-rev:
- 84836
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r45387 r45408 73 73 #define VMX_UPDATED_GUEST_SYSENTER_EIP_MSR RT_BIT(16) 74 74 #define VMX_UPDATED_GUEST_SYSENTER_ESP_MSR RT_BIT(17) 75 #define VMX_UPDATED_GUEST_INTR_STATE RT_BIT(18) 76 #define VMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS RT_BIT(19) 77 #define VMX_UPDATED_GUEST_ACTIVITY_STATE RT_BIT(20) 78 #define VMX_UPDATED_GUEST_APIC_STATE RT_BIT(21) 75 #define VMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS RT_BIT(18) 76 #define VMX_UPDATED_GUEST_ACTIVITY_STATE RT_BIT(19) 77 #define VMX_UPDATED_GUEST_APIC_STATE RT_BIT(20) 79 78 #define VMX_UPDATED_GUEST_ALL ( VMX_UPDATED_GUEST_FPU \ 80 79 | VMX_UPDATED_GUEST_RIP \ … … 95 94 | VMX_UPDATED_GUEST_SYSENTER_EIP_MSR \ 96 95 | VMX_UPDATED_GUEST_SYSENTER_ESP_MSR \ 97 | VMX_UPDATED_GUEST_INTR_STATE \98 96 | VMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS \ 99 97 | VMX_UPDATED_GUEST_ACTIVITY_STATE \ … … 2436 2434 2437 2435 /** 2436 * Loads the guest's interruptibility-state ("interrupt shadow" as AMD calls it) 2437 * into the guest-state area in the VMCS. 2438 * 2439 * @param pVM Pointer to the VM. 2440 * @param pVCpu Pointer to the VMCPU. 2441 * @param pMixedCtx Pointer to the guest-CPU context. The data may be 2442 * out-of-sync. Make sure to update the required fields 2443 * before using them. 2444 * 2445 * @remarks No-long-jump zone!!! 2446 */ 2447 DECLINLINE(void) hmR0VmxLoadGuestIntrState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx) 2448 { 2449 /* 2450 * Instructions like STI and MOV SS inhibit interrupts till the next instruction completes. Check if we should 2451 * inhibit interrupts or clear any existing interrupt-inhibition. 2452 */ 2453 uint32_t uIntrState = 0; 2454 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 2455 { 2456 /* If inhibition is active, RIP & RFLAGS should've been accessed (i.e. read previously from the VMCS or from ring-3). */ 2457 AssertMsg((pVCpu->hm.s.vmx.fUpdatedGuestState & (VMX_UPDATED_GUEST_RIP | VMX_UPDATED_GUEST_RFLAGS)), 2458 ("%#x\n", pVCpu->hm.s.vmx.fUpdatedGuestState)); 2459 if (pMixedCtx->rip != EMGetInhibitInterruptsPC(pVCpu)) 2460 { 2461 /* 2462 * We can clear the inhibit force flag as even if we go back to the recompiler without executing guest code in 2463 * VT-x the flag's condition to be cleared is met and thus the cleared state is correct. 2464 * hmR0VmxInjectPendingInterrupt() relies on us clearing this flag here. 2465 */ 2466 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 2467 } 2468 else if (pMixedCtx->eflags.Bits.u1IF) 2469 uIntrState = VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI; 2470 else 2471 uIntrState = VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS; 2472 } 2473 2474 Assert(!(uIntrState & 0xfffffff0)); /* Bits 31:4 MBZ. */ 2475 Assert((uIntrState & 0x3) != 0x3); /* Block-by-STI and MOV SS cannot be simultaneously set. */ 2476 int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, uIntrState); 2477 AssertRC(rc); 2478 } 2479 2480 2481 /** 2438 2482 * Loads the guest's RIP into the guest-state area in the VMCS. 2439 2483 * … … 3599 3643 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_GUEST_ACTIVITY_STATE; 3600 3644 } 3601 return rc;3602 }3603 3604 3605 /**3606 * Loads the guest-interruptibility state (or "interrupt shadow" as AMD calls3607 * it) into the guest-state area in the VMCS.3608 *3609 * @returns VBox status code.3610 * @param pVM Pointer to the VM.3611 * @param pVCpu Pointer to the VMCPU.3612 * @param pCtx Pointer to the guest-CPU context.3613 *3614 * @remarks No-long-jump zone!!!3615 * @remarks Requires RIP, RFLAGS.3616 */3617 DECLINLINE(int) hmR0VmxLoadGuestIntrState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)3618 {3619 if (!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_INTR_STATE))3620 return VINF_SUCCESS;3621 3622 /*3623 * Instructions like STI and MOV SS inhibit interrupts till the next instruction completes. Check if we should3624 * inhibit interrupts or clear any existing interrupt-inhibition.3625 */3626 uint32_t uIntrState = 0;3627 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))3628 {3629 if (pCtx->rip != EMGetInhibitInterruptsPC(pVCpu))3630 {3631 /*3632 * We can clear the inhibit force flag as even if we go back to the recompiler without executing guest code in VT-x3633 * the flag's condition to be cleared is met and thus the cleared state is correct. Additionally, this means3634 * we need not re-read the VMCS field on the VM-exit path and clear/set this flag on every VM-exit. Finally,3635 * hmR0VmxInjectPendingInterrupt() relies on us clearing this flag here.3636 */3637 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);3638 uIntrState = 0; /* Clear interrupt inhibition. */3639 }3640 else if (pCtx->eflags.u32 & X86_EFL_IF)3641 {3642 /** @todo Pretty sure we don't need to check for Rflags.IF here.3643 * Interrupt-shadow only matters when RIP changes. */3644 /*3645 * We don't have enough information to distinguish a block-by-STI vs. block-by-MOV SS. Intel seems to think there3646 * is a slight difference regarding MOV SS additionally blocking some debug exceptions.3647 * See Intel spec. 24.2.2 "Guest Non-Register State" table "Format of Interruptibility State".3648 */3649 uIntrState = VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI;3650 }3651 }3652 else3653 uIntrState = 0; /* No interrupt inhibition. */3654 3655 Assert((uIntrState & 0x3) != 0x3); /* Block-by-STI and MOV SS cannot be simultaneously set. */3656 Assert((pCtx->eflags.u32 & X86_EFL_IF) || uIntrState == 0); /* If EFLAGS.IF is not set, no interrupt inhibition. */3657 int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, uIntrState);3658 AssertRCReturn(rc ,rc);3659 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_INTR_STATE;3660 3645 return rc; 3661 3646 } … … 4444 4429 * @param uVector The exception vector. 4445 4430 */ 4446 DECLINLINE(bool) hmR0VmxIsBenignXcpt(const uint 8_t uVector)4431 DECLINLINE(bool) hmR0VmxIsBenignXcpt(const uint32_t uVector) 4447 4432 { 4448 4433 switch (uVector) … … 4474 4459 * @param uVector The exception vector. 4475 4460 */ 4476 DECLINLINE(bool) hmR0VmxIsContributoryXcpt(const uint 8_t uVector)4461 DECLINLINE(bool) hmR0VmxIsContributoryXcpt(const uint32_t uVector) 4477 4462 { 4478 4463 switch (uVector) … … 4540 4525 AssertRCReturn(rc, rc); 4541 4526 4542 uint 8_t uIntType = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo);4543 uint 8_t uExitVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVmxTransient->uExitIntrInfo);4544 uint 8_t uIdtVector = VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo);4527 uint32_t uIntType = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo); 4528 uint32_t uExitVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVmxTransient->uExitIntrInfo); 4529 uint32_t uIdtVector = VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo); 4545 4530 4546 4531 typedef enum … … 4751 4736 4752 4737 RTGCUINTREG uVal = 0; 4753 int rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RSP, 4738 int rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RSP, &uVal); 4754 4739 AssertRCReturn(rc, rc); 4755 4740 pMixedCtx->rsp = uVal; … … 4810 4795 4811 4796 /** 4812 * Saves the guest's interruptibility state.4813 * 4814 * @returns VBox status code.4797 * Gets the guest's interruptibility-state ("interrupt shadow" as AMD calls it) 4798 * from the guest-state area in the VMCS. 4799 * 4815 4800 * @param pVM Pointer to the VM. 4816 4801 * @param pVCpu Pointer to the VMCPU. … … 4821 4806 * @remarks No-long-jump zone!!! 4822 4807 */ 4823 DECLINLINE(int) hmR0VmxSaveGuestIntrState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx) 4824 { 4825 if (pVCpu->hm.s.vmx.fUpdatedGuestState & VMX_UPDATED_GUEST_INTR_STATE) 4826 return VINF_SUCCESS; 4827 4808 DECLINLINE(void) hmR0VmxSaveGuestIntrState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx) 4809 { 4828 4810 uint32_t uIntrState = 0; 4829 4811 int rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &uIntrState); 4812 AssertRC(rc); 4813 4830 4814 if (!uIntrState) 4831 4815 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); … … 4835 4819 || uIntrState == VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS); 4836 4820 rc = hmR0VmxSaveGuestRip(pVM, pVCpu, pMixedCtx); 4837 rc |= hmR0VmxSaveGuestRflags(pVM, pVCpu, pMixedCtx); /* RFLAGS is needed inhmR0VmxLoadGuestIntrState(). */4838 AssertRC Return(rc,rc);4821 rc |= hmR0VmxSaveGuestRflags(pVM, pVCpu, pMixedCtx); /* for hmR0VmxLoadGuestIntrState(). */ 4822 AssertRC(rc); 4839 4823 EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip); 4840 4824 Assert(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)); 4841 4825 } 4842 4843 pVCpu->hm.s.vmx.fUpdatedGuestState |= VMX_UPDATED_GUEST_INTR_STATE;4844 return rc;4845 4826 } 4846 4827 … … 5309 5290 rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVM, pVCpu, pMixedCtx); 5310 5291 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestAutoLoadStoreMsrs failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); 5311 5312 rc = hmR0VmxSaveGuestIntrState(pVM, pVCpu, pMixedCtx);5313 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestIntrState failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);5314 5292 5315 5293 rc = hmR0VmxSaveGuestActivityState(pVM, pVCpu, pMixedCtx); … … 5497 5475 { 5498 5476 Assert(!VMMRZCallRing3IsEnabled(pVCpu)); 5499 Log(("hmR0VmxLongJmpToRing3: rcExit=%d\n", rcExit));5500 5477 5501 5478 int rc = hmR0VmxSaveGuestState(pVM, pVCpu, pMixedCtx); … … 5603 5580 5604 5581 VMMRZCallRing3Disable(pVCpu); 5582 Log(("hmR0VmxLongJmpToRing3\n")); 5605 5583 hmR0VmxLongJmpToRing3(pVCpu->CTX_SUFF(pVM), pVCpu, (PCPUMCTX)pvUser, VINF_VMM_UNKNOWN_RING3_CALL); 5606 5584 VMMRZCallRing3Enable(pVCpu); … … 5687 5665 * before using them. 5688 5666 * 5689 * @remarks This must be called only after hmR0VmxSaveGuestIntrState().5667 * @remarks Must be called after hmR0VmxLoadGuestIntrState(). 5690 5668 */ 5691 5669 static int hmR0VmxInjectPendingInterrupt(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx) … … 5727 5705 { 5728 5706 /* 5729 * When external interrupts are pending and the guest has disabled interrupts, cause a VM-exit using "interrupt-window 5730 * exiting" so we can deliver the interrupt when the guest is ready to receive them. Otherwise, if the guest 5731 * can receive interrupts now, convert the PDM interrupt into a TRPM event and inject it. 5707 * If the guest can receive interrupts now (interrupts enabled and no interrupt inhibition is active) convert 5708 * the PDM interrupt into a TRPM event and inject it. 5732 5709 */ 5733 if (!(pMixedCtx->eflags.u32 & X86_EFL_IF)) /** @todo we can use interrupt-window exiting for block-by-STI. */ 5734 { 5735 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INT_WINDOW_EXIT)) 5736 { 5737 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INT_WINDOW_EXIT; 5738 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls); 5739 AssertRCReturn(rc, rc); 5740 } 5741 /* else we will deliver interrupts whenever the guest exits next and it's in a state to receive interrupts. */ 5742 } 5743 else if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 5710 if ( (pMixedCtx->eflags.u32 & X86_EFL_IF) 5711 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 5744 5712 { 5745 5713 uint8_t u8Interrupt = 0; … … 5753 5721 else 5754 5722 { 5755 /** @todo Does this actually happen? If not turn it into an assertion. 5723 /** @todo Does this actually happen? If not turn it into an assertion. */ 5756 5724 Assert(!VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))); 5757 5725 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq); 5758 5726 } 5759 5727 } 5728 else if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INT_WINDOW_EXIT)) 5729 { 5730 /* Instruct VT-x to cause an interrupt-window exit as soon as the guest is ready to receive interrupts again. */ 5731 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INT_WINDOW_EXIT; 5732 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls); 5733 AssertRCReturn(rc, rc); 5734 } 5735 /* else we will deliver interrupts whenever the guest exits next and it's in a state to receive interrupts. */ 5760 5736 } 5761 5737 } … … 6232 6208 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestGprs! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); 6233 6209 6234 rc = hmR0VmxLoadGuestIntrState(pVM, pVCpu, pCtx);6235 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestIntrState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);6236 6237 6210 rc = hmR0VmxSetupVMRunHandler(pVM, pVCpu, pCtx); 6238 6211 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSetupVMRunHandler! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); … … 6326 6299 * This is why this is done after all possible exits-to-ring-3 paths in this code. 6327 6300 */ 6301 hmR0VmxLoadGuestIntrState(pVM, pVCpu, pMixedCtx); 6328 6302 rc = hmR0VmxInjectPendingInterrupt(pVM, pVCpu, pMixedCtx); 6329 6303 AssertRCReturn(rc, rc); … … 6472 6446 } 6473 6447 6474 /* We need to update our interruptibility-state on every VM-exit and VM-entry. */6475 rc = hmR0VmxSaveGuestIntrState(pVM, pVCpu, pMixedCtx);6476 AssertRC(rc);6477 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_INTR_STATE;6478 6479 /*6480 * If the TPR was raised by the guest, it wouldn't cause a VM-exit immediately. Instead we sync the TPR lazily whenever we6481 * eventually get a VM-exit for any reason. This maybe expensive as PDMApicSetTPR() can longjmp to ring-3; also why we do6482 *it outside of hmR0VmxSaveGuestState() which must never cause longjmps.6483 */6484 if ( !pVmxTransient->fVMEntryFailed6485 && (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)6486 && pVmxTransient->u8GuestTpr != pVCpu->hm.s.vmx.pbVirtApic[0x80])6487 {6488 rc = PDMApicSetTPR(pVCpu, pVCpu->hm.s.vmx.pbVirtApic[0x80]);6489 AssertRC(rc);6490 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_APIC_STATE;6448 if (RT_LIKELY(!pVmxTransient->fVMEntryFailed)) 6449 { 6450 /* Update the guest interruptibility-state from the VMCS. */ 6451 hmR0VmxSaveGuestIntrState(pVM, pVCpu, pMixedCtx); 6452 6453 /* 6454 * If the TPR was raised by the guest, it wouldn't cause a VM-exit immediately. Instead we sync the TPR lazily whenever 6455 * we eventually get a VM-exit for any reason. This maybe expensive as PDMApicSetTPR() can longjmp to ring-3; also why 6456 * we do it outside of hmR0VmxSaveGuestState() which must never cause longjmps. 6457 */ 6458 if ( (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW) 6459 && pVmxTransient->u8GuestTpr != pVCpu->hm.s.vmx.pbVirtApic[0x80]) 6460 { 6461 rc = PDMApicSetTPR(pVCpu, pVCpu->hm.s.vmx.pbVirtApic[0x80]); 6462 AssertRC(rc); 6463 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_APIC_STATE; 6464 } 6491 6465 } 6492 6466 } … … 6722 6696 { 6723 6697 VMX_VALIDATE_EXIT_HANDLER_PARAMS(); 6724 int rc = VERR_INTERNAL_ERROR_5;6725 #ifdef DEBUG6726 rc = hmR0VmxSaveGuestRflags(pVM, pVCpu, pMixedCtx);6727 AssertRC(rc);6728 Assert(pMixedCtx->eflags.u32 & X86_EFL_IF);6729 #endif6730 6698 6731 6699 /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */ 6732 6700 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INT_WINDOW_EXIT; 6733 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls);6701 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls); 6734 6702 AssertRCReturn(rc, rc); 6735 6703 … … 7478 7446 7479 7447 const RTGCUINTPTR uExitQualification = pVmxTransient->uExitQualification; 7480 const uint 8_t uAccessType= VMX_EXIT_QUALIFICATION_CRX_ACCESS(uExitQualification);7448 const uint32_t uAccessType = VMX_EXIT_QUALIFICATION_CRX_ACCESS(uExitQualification); 7481 7449 switch (uAccessType) 7482 7450 { … … 8306 8274 EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip); 8307 8275 Assert(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)); 8308 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_ INTR_STATE | HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS;8276 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS; 8309 8277 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitSti); 8310 8278 break;
Note:
See TracChangeset
for help on using the changeset viewer.