- Timestamp:
- Jun 17, 2013 1:18:53 PM (12 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r46580 r46588 1301 1301 } 1302 1302 1303 pVmcb->ctrl.u64VmcbCleanBits &= ~ HMSVM_VMCB_CLEAN_INTERCEPTS;1303 pVmcb->ctrl.u64VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_TPR); 1304 1304 } 1305 1305 … … 1981 1981 pVmcb->ctrl.IntCtrl.n.u8VIrqVector = 0; /* Not necessary as we #VMEXIT for delivering the interrupt. */ 1982 1982 pVmcb->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_VINTR; 1983 pVmcb->ctrl.u64VmcbCleanBits &= ~ HMSVM_VMCB_CLEAN_INTERCEPTS;1983 pVmcb->ctrl.u64VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_TPR); 1984 1984 } 1985 1985 } … … 2004 2004 if (pVCpu->hm.s.Event.fPending) /* First, inject any pending HM events. */ 2005 2005 { 2006 Assert(Event.n.u1Valid); 2006 2007 Event.u = pVCpu->hm.s.Event.u64IntrInfo; 2007 2008 bool fInject = true; … … 2013 2014 } 2014 2015 2015 if ( fInject 2016 && Event.n.u1Valid) 2016 if (fInject) 2017 2017 { 2018 2018 pVCpu->hm.s.Event.fPending = false; … … 2027 2027 { 2028 2028 Log4(("Injecting NMI\n")); 2029 Event.n.u1Valid = 1; 2030 Event.n.u8Vector = X86_XCPT_NMI; 2031 Event.n.u3Type = SVM_EVENT_NMI; 2029 2030 Event.n.u1Valid = 1; 2031 Event.n.u8Vector = X86_XCPT_NMI; 2032 Event.n.u3Type = SVM_EVENT_NMI; 2032 2033 2033 2034 hmR0SvmInjectEvent(pVCpu, pVmcb, pCtx, &Event); … … 2039 2040 else if (VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))) 2040 2041 { 2041 /* Check if there are guest external interrupts (PIC/APIC) pending and inject them if the guest can receive them. */2042 /* Check if there are guest external interrupts (PIC/APIC) pending and inject them, if the guest can receive them. */ 2042 2043 const bool fBlockInt = !(pCtx->eflags.u32 & X86_EFL_IF); 2043 2044 if ( !fBlockInt … … 2048 2049 if (RT_SUCCESS(rc)) 2049 2050 { 2050 Log4(("Injecting interrupt u8Interrupt=%#x\n", u8Interrupt));2051 Log4(("Injecting external interrupt u8Interrupt=%#x\n", u8Interrupt)); 2051 2052 2052 2053 Event.n.u1Valid = 1; … … 2635 2636 switch (pSvmTransient->u64ExitCode) 2636 2637 { 2638 /** @todo */ 2639 //SVM_EXIT_EXCEPTION_x: 2640 2637 2641 case SVM_EXIT_NPF: 2638 2642 return hmR0SvmExitNestedPF(pVCpu, pCtx, pSvmTransient); … … 2667 2671 return hmR0SvmExitWriteCRx(pVCpu, pCtx, pSvmTransient); 2668 2672 2673 case SVM_EXIT_VINTR: 2674 return hmR0SvmExitVIntr(pVCpu, pCtx, pSvmTransient); 2675 2669 2676 case SVM_EXIT_INTR: 2670 2677 case SVM_EXIT_FERR_FREEZE: … … 2676 2683 return hmR0SvmExitMsr(pVCpu, pCtx, pSvmTransient); 2677 2684 2685 case SVM_EXIT_INVLPG: 2686 return hmR0SvmExitInvlpg(pVCpu, pCtx, pSvmTransient); 2687 2678 2688 case SVM_EXIT_WBINVD: 2679 2689 return hmR0SvmExitWbinvd(pVCpu, pCtx, pSvmTransient); … … 2685 2695 return hmR0SvmExitRdpmc(pVCpu, pCtx, pSvmTransient); 2686 2696 2687 case SVM_EXIT_READ_DR0: case SVM_EXIT_READ_DR1: case SVM_EXIT_READ_DR2: case SVM_EXIT_READ_DR3:2688 case SVM_EXIT_READ_DR6: case SVM_EXIT_READ_DR7: case SVM_EXIT_READ_DR8: case SVM_EXIT_READ_DR9:2689 case SVM_EXIT_READ_DR10: case SVM_EXIT_READ_DR11: case SVM_EXIT_READ_DR12: case SVM_EXIT_READ_DR13:2690 case SVM_EXIT_READ_DR14: case SVM_EXIT_READ_DR15:2691 return hmR0SvmExitReadDRx(pVCpu, pCtx, pSvmTransient);2692 2693 case SVM_EXIT_WRITE_DR0: case SVM_EXIT_WRITE_DR1: case SVM_EXIT_WRITE_DR2: case SVM_EXIT_WRITE_DR3:2694 case SVM_EXIT_WRITE_DR6: case SVM_EXIT_WRITE_DR7: case SVM_EXIT_WRITE_DR8: case SVM_EXIT_WRITE_DR9:2695 case SVM_EXIT_WRITE_DR10: case SVM_EXIT_WRITE_DR11: case SVM_EXIT_WRITE_DR12: case SVM_EXIT_WRITE_DR13:2696 case SVM_EXIT_WRITE_DR14: case SVM_EXIT_WRITE_DR15:2697 return hmR0SvmExitWriteDRx(pVCpu, pCtx, pSvmTransient);2698 2699 2697 default: 2700 2698 { 2699 case SVM_EXIT_READ_DR0: case SVM_EXIT_READ_DR1: case SVM_EXIT_READ_DR2: case SVM_EXIT_READ_DR3: 2700 case SVM_EXIT_READ_DR6: case SVM_EXIT_READ_DR7: case SVM_EXIT_READ_DR8: case SVM_EXIT_READ_DR9: 2701 case SVM_EXIT_READ_DR10: case SVM_EXIT_READ_DR11: case SVM_EXIT_READ_DR12: case SVM_EXIT_READ_DR13: 2702 case SVM_EXIT_READ_DR14: case SVM_EXIT_READ_DR15: 2703 return hmR0SvmExitReadDRx(pVCpu, pCtx, pSvmTransient); 2704 2705 case SVM_EXIT_WRITE_DR0: case SVM_EXIT_WRITE_DR1: case SVM_EXIT_WRITE_DR2: case SVM_EXIT_WRITE_DR3: 2706 case SVM_EXIT_WRITE_DR6: case SVM_EXIT_WRITE_DR7: case SVM_EXIT_WRITE_DR8: case SVM_EXIT_WRITE_DR9: 2707 case SVM_EXIT_WRITE_DR10: case SVM_EXIT_WRITE_DR11: case SVM_EXIT_WRITE_DR12: case SVM_EXIT_WRITE_DR13: 2708 case SVM_EXIT_WRITE_DR14: case SVM_EXIT_WRITE_DR15: 2709 return hmR0SvmExitWriteDRx(pVCpu, pCtx, pSvmTransient); 2710 2711 case SVM_EXIT_TASK_SWITCH: 2712 return hmR0SvmExitTaskSwitch(pVCpu, pCtx, pSvmTransient); 2713 2714 case SVM_EXIT_VMMCALL: 2715 return hmR0SvmExitVmmCall(pVCpu, pCtx, pSvmTransient); 2716 2701 2717 case SVM_EXIT_INVLPGA: 2702 2718 case SVM_EXIT_RSM: … … 2854 2870 2855 2871 2872 /** 2873 * Emulates a simple MOV TPR (CR8) instruction, used for TPR patching on 32-bit 2874 * guests. This simply looks up the patch record at EIP and does the required. 2875 * 2876 * This VMMCALL is used a fallback mechanism when mov to/from cr8 isn't exactly 2877 * like how we want it to be (e.g. not followed by shr 4 as is usually done for 2878 * TPR). See hmR3ReplaceTprInstr() for the details. 2879 * 2880 * @returns VBox status code. 2881 * @param pVM Pointer to the VM. 2882 * @param pVCpu Pointer to the VMCPU. 2883 * @param pCtx Pointer to the guest-CPU context. 2884 */ 2885 static int hmR0SvmEmulateMovTpr(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 2886 { 2887 int rc; 2888 Log4(("Emulated VMMCall TPR access replacement at RIP=%RGv\n", pCtx->rip)); 2889 2890 for (;;) 2891 { 2892 bool fPending; 2893 uint8_t u8Tpr; 2894 2895 PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip); 2896 if (!pPatch) 2897 break; 2898 2899 switch (pPatch->enmType) 2900 { 2901 case HMTPRINSTR_READ: 2902 rc = PDMApicGetTPR(pVCpu, &u8Tpr, &fPending, NULL /* pu8PendingIrq */); 2903 AssertRC(rc); 2904 2905 rc = DISWriteReg32(CPUMCTX2CORE(pCtx), pPatch->uDstOperand, u8Tpr); 2906 AssertRC(rc); 2907 pCtx->rip += pPatch->cbOp; 2908 break; 2909 2910 case HMTPRINSTR_WRITE_REG: 2911 case HMTPRINSTR_WRITE_IMM: 2912 if (pPatch->enmType == HMTPRINSTR_WRITE_REG) 2913 { 2914 uint32_t u32Val; 2915 rc = DISFetchReg32(CPUMCTX2CORE(pCtx), pPatch->uSrcOperand, &u32Val); 2916 AssertRC(rc); 2917 u8Tpr = u32Val; 2918 } 2919 else 2920 u8Tpr = (uint8_t)pPatch->uSrcOperand; 2921 2922 rc = PDMApicSetTPR(pVCpu, u8Tpr); 2923 AssertRC(rc); 2924 pCtx->rip += pPatch->cbOp; 2925 break; 2926 2927 default: 2928 AssertMsgFailedReturn(("Unexpected patch type %d\n", pPatch->enmType), VERR_SVM_UNEXPECTED_PATCH_TYPE); 2929 break; 2930 } 2931 } 2932 2933 return VINF_SUCCESS; 2934 } 2935 2936 2856 2937 /* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */ 2857 2938 /* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- #VMEXIT handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */ … … 3500 3581 } 3501 3582 3583 3584 /** 3585 * #VMEXIT handler for virtual interrupt (SVM_EXIT_VINTR). Conditional #VMEXIT. 3586 */ 3587 HMSVM_EXIT_DECL hmR0SvmExitVIntr(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient) 3588 { 3589 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 3590 3591 pVmcb->ctrl.IntCtrl.n.u1VIrqValid = 0; /* No virtual interrupts pending, we'll inject the current one before reentry. */ 3592 pVmcb->ctrl.IntCtrl.n.u8VIrqVector = 0; 3593 3594 /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */ 3595 pVmcb->ctrl.u32InterceptCtrl1 &= ~SVM_CTRL1_INTERCEPT_VINTR; 3596 pVmcb->ctrl.u64VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_TPR); 3597 3598 /* Deliver the pending interrupt via hmR0SvmPreRunGuest()->hmR0SvmInjectEvent() and resume guest execution. */ 3599 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIntWindow); 3600 return VINF_SUCCESS; 3601 } 3602 3603 3604 /** 3605 * #VMEXIT handler for task switches (SVM_EXIT_TASK_SWITCH). Conditional #VMEXIT. 3606 */ 3607 HMSVM_EXIT_DECL hmR0SvmExitTaskSwitch(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient) 3608 { 3609 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 3610 3611 /* Check if this task-switch occurred while delivery an event through the guest IDT. */ 3612 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb; 3613 if ( !(pVmcb->ctrl.u64ExitInfo2 & (SVM_EXIT2_TASK_SWITCH_IRET | SVM_EXIT2_TASK_SWITCH_JMP)) 3614 && pVCpu->hm.s.Event.fPending) 3615 { 3616 /* 3617 * AMD-V does not provide us with the original exception but we have it in u64IntrInfo since we 3618 * injected the event during VM-entry. Software interrupts and exceptions will be regenerated 3619 * when the recompiler restarts the instruction. 3620 */ 3621 SVMEVENT Event; 3622 Event.u = pVCpu->hm.s.Event.u64IntrInfo; 3623 if ( Event.n.u3Type == SVM_EVENT_EXCEPTION 3624 || Event.n.u3Type == SVM_EVENT_SOFTWARE_INT) 3625 { 3626 pVCpu->hm.s.Event.fPending = false; 3627 } 3628 else 3629 Log4(("hmR0SvmExitTaskSwitch: TS occurred during event delivery. Kept pending u8Vector=%#x\n", Event.n.u8Vector)); 3630 } 3631 3632 /** @todo Emulate task switch someday, currently just going back to ring-3 for 3633 * emulation. */ 3634 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch); 3635 return VERR_EM_INTERPRETER; 3636 } 3637 3638 3639 /** 3640 * #VMEXIT handler for VMMCALL (SVM_EXIT_VMMCALL). Conditional #VMEXIT. 3641 */ 3642 HMSVM_EXIT_DECL hmR0SvmExitVmmCall(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient) 3643 { 3644 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 3645 3646 int rc = hmR0SvmEmulateMovTpr(pVM, pVCpu, pCtx); 3647 if (RT_UNLIKELY(rc != VINF_SUCCESS)) 3648 hmR0SvmSetPendingXcptUD(pVCpu); 3649 return VINF_SUCCESS; 3650 } 3651 3652
Note:
See TracChangeset
for help on using the changeset viewer.