Changeset 46541 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Jun 13, 2013 4:47:00 PM (12 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r46530 r46541 39 39 # define HMSVM_EXITCODE_STAM_COUNTER_INC(u64ExitCode) do { } while (0) 40 40 #endif 41 42 /** If we decide to use a function table approach this can be useful to 43 * switch to a "static DECLCALLBACK(int)". */ 44 #define HMSVM_EXIT_DECL static int 41 45 42 46 /** @name Segment attribute conversion between CPU and AMD-V VMCB format. … … 1458 1462 1459 1463 1464 1460 1465 /** 1461 1466 * Saves the entire guest state from the VMCB into the … … 1481 1486 1482 1487 /* 1483 * Control registers: CR2, CR3 (handled at the end) - accesses to other control registers are always intercepted. 1488 * Guest interrupt shadow. 1489 */ 1490 if (pVmcb->ctrl.u64IntShadow & SVM_INTERRUPT_SHADOW_ACTIVE) 1491 EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip); 1492 else 1493 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 1494 1495 /* 1496 * Guest Control registers: CR2, CR3 (handled at the end) - accesses to other control registers are always intercepted. 1484 1497 */ 1485 1498 pMixedCtx->cr2 = pVmcb->guest.u64CR2; … … 2163 2176 Log4(("guest.TR.u32Limit %#RX32\n", pVmcb->guest.TR.u32Limit)); 2164 2177 Log4(("guest.TR.u64Base %#RX64\n", pVmcb->guest.TR.u64Base)); 2165 4 2178 2166 2179 Log4(("guest.u8CPL %#x\n", pVmcb->guest.u8CPL)); 2167 2180 Log4(("guest.u64CR0 %#RX64\n", pVmcb->guest.u64CR0)); … … 2337 2350 hmR0SvmInjectPendingEvent(pVCpu, pCtx); 2338 2351 2339 /** @todo -XXX- TPR patching. */2340 2352 return VINF_SUCCESS; 2341 2353 } … … 2491 2503 VMMRZCallRing3Enable(pVCpu); /* It is now safe to do longjmps to ring-3!!! */ 2492 2504 2505 /* If VMRUN failed, we can bail out early. This does -not- cover SVM_EXIT_INVALID. */ 2506 if (RT_UNLIKELY(rcVMRun != VINF_SUCCESS)) 2507 { 2508 Log4(("VMRUN failure: rcVMRun=%Rrc\n", rcVMRun)); 2509 return; 2510 } 2511 2493 2512 pSvmTransient->u64ExitCode = pVmcb->ctrl.u64ExitCode; 2494 2513 hmR0SvmSaveGuestState(pVCpu, pMixedCtx); /* Save the guest state from the VMCB to the guest-CPU context. */ 2495 2514 2496 if (pVCpu->hm.s.svm.fSyncVTpr) 2497 { 2498 /* TPR patching (for 32-bit guests) uses LSTAR MSR for holding the TPR value, otherwise uses the VTPR. */ 2499 if ( pVM->hm.s.fTPRPatchingActive 2500 && (pCtx->msrLSTAR & 0xff) != pSvmTransient->u8GuestTpr) 2501 { 2502 int rc = PDMApicSetTPR(pVCpu, pCtx->msrLSTAR & 0xff); 2503 AssertRC(rc); 2504 } 2505 else if ((uint8_t)(pSvmTransient->u8GuestTpr >> 4) != pVmcb->ctrl.IntCtrl.n.u8VTPR) 2506 { 2507 int rc = PDMApicSetTPR(pVCpu, (pVmcb->ctrl.IntCtrl.n.u8VTPR << 4)); 2508 AssertRC(rc); 2509 } 2510 } 2511 2512 /* -XXX- premature interruption during event injection */ 2513 2515 if (RT_LIKELY(pSvmTransient->u64ExitCode != SVM_EXIT_INVALID)) 2516 { 2517 if (pVCpu->hm.s.svm.fSyncVTpr) 2518 { 2519 /* TPR patching (for 32-bit guests) uses LSTAR MSR for holding the TPR value, otherwise uses the VTPR. */ 2520 if ( pVM->hm.s.fTPRPatchingActive 2521 && (pCtx->msrLSTAR & 0xff) != pSvmTransient->u8GuestTpr) 2522 { 2523 int rc = PDMApicSetTPR(pVCpu, pCtx->msrLSTAR & 0xff); 2524 AssertRC(rc); 2525 } 2526 else if ((uint8_t)(pSvmTransient->u8GuestTpr >> 4) != pVmcb->ctrl.IntCtrl.n.u8VTPR) 2527 { 2528 int rc = PDMApicSetTPR(pVCpu, (pVmcb->ctrl.IntCtrl.n.u8VTPR << 4)); 2529 AssertRC(rc); 2530 } 2531 } 2532 2533 /* -XXX- premature interruption during event injection */ 2534 } 2514 2535 } 2515 2536 … … 2613 2634 2614 2635 } 2636 2615 2637 2616 2638 #ifdef DEBUG … … 2644 2666 #endif 2645 2667 2668 2669 /** 2670 * Worker for hmR0SvmInterpretInvlpg(). 2671 * 2672 * @return VBox status code. 2673 * @param pVCpu Pointer to the VMCPU. 2674 * @param pCpu Pointer to the disassembler state. 2675 * @param pRegFrame Pointer to the register frame. 2676 */ 2677 static int hmR0SvmInterpretInvlPgEx(PVMCPU pVCpu, PDISCPUSTATE pCpu, PCPUMCTXCORE pRegFrame) 2678 { 2679 DISQPVPARAMVAL Param1; 2680 RTGCPTR GCPtrPage; 2681 2682 int rc = DISQueryParamVal(pRegFrame, pCpu, &pCpu->Param1, &Param1, DISQPVWHICH_SRC); 2683 if (RT_FAILURE(rc)) 2684 return VERR_EM_INTERPRETER; 2685 2686 if ( Param1.type == DISQPV_TYPE_IMMEDIATE 2687 || Param1.type == DISQPV_TYPE_ADDRESS) 2688 { 2689 if (!(Param1.flags & (DISQPV_FLAG_32 | DISQPV_FLAG_64))) 2690 return VERR_EM_INTERPRETER; 2691 2692 GCPtrPage = Param1.val.val64; 2693 rc = EMInterpretInvlpg(pVCpu->CTX_SUFF(pVM), pVCpu, pRegFrame, GCPtrPage); 2694 } 2695 else 2696 { 2697 Log4(("hmR0SvmInterpretInvlPgEx invalid parameter type %#x\n", Param1.type)); 2698 rc = VERR_EM_INTERPRETER; 2699 } 2700 2701 return rc; 2702 } 2703 2704 2705 /** 2706 * Interprets INVLPG. 2707 * 2708 * @returns VBox status code. 2709 * @retval VINF_* Scheduling instructions. 2710 * @retval VERR_EM_INTERPRETER Something we can't cope with. 2711 * @retval VERR_* Fatal errors. 2712 * 2713 * @param pVM Pointer to the VM. 2714 * @param pRegFrame Pointer to the register frame. 2715 * 2716 * @remarks Updates the RIP if the instruction was executed successfully. 2717 */ 2718 static int hmR0SvmInterpretInvlpg(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame) 2719 { 2720 /* Only allow 32 & 64 bit code. */ 2721 if (CPUMGetGuestCodeBits(pVCpu) != 16) 2722 { 2723 PDISSTATE pDis = &pVCpu->hm.s.DisState; 2724 int rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, NULL /* pcbInstr */); 2725 if ( RT_SUCCESS(rc) 2726 && pDis->pCurInstr->uOpcode == OP_INVLPG) 2727 { 2728 rc = hmR0SvmInterpretInvlPgEx(pVCpu, pDis, pRegFrame); 2729 if (RT_SUCCESS(rc)) 2730 pRegFrame->rip += pDis->cbInstr; 2731 return rc; 2732 } 2733 } 2734 return VERR_EM_INTERPRETER; 2735 } 2736 2737 2738 /** 2739 * Sets an invalid-opcode (#UD) exception as pending-for-injection into the VM. 2740 * 2741 * @param pVCpu Pointer to the VMCPU. 2742 */ 2743 DECLINLINE(void) hmR0SvmSetPendingXcptUD(PVMCPU pVCpu) 2744 { 2745 SVMEVENT Event; 2746 Event.u = 0; 2747 Event.n.u3Type = SVM_EVENT_EXCEPTION; 2748 Event.n.u1Valid = 1; 2749 Event.n.u8Vector = X86_XCPT_UD; 2750 hmR0SvmSetPendingEvent(pVCpu, &Event); 2751 } 2752 2753 2754 /* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */ 2755 /* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- #VMEXIT handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */ 2756 /* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */ 2757 2758 /** 2759 * #VMEXIT handler for external interrupts (SVM_EXIT_INTR). 2760 */ 2761 HMSVM_EXIT_DECL hmR0SvmExitIntr(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient) 2762 { 2763 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 2764 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitExtInt); 2765 /* 32-bit Windows hosts (4 cores) has trouble with this on Intel; causes higher interrupt latency. Assuming the 2766 same for AMD-V.*/ 2767 #if HC_ARCH_BITS == 64 && defined(VBOX_WITH_VMMR0_DISABLE_PREEMPTION) 2768 Assert(ASMIntAreEnabled()); 2769 return VINF_SUCCESS; 2770 #else 2771 return VINF_EM_RAW_INTERRUPT; 2772 #endif 2773 } 2774 2775 2776 /** 2777 * #VMEXIT handler for WBINVD (SVM_EXIT_WBINVD). Conditional #VMEXIT. 2778 */ 2779 HMSVM_EXIT_DECL hmR0SvmExitWbinvd(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient) 2780 { 2781 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 2782 pCtx->rip += 2; /* Hardcoded opcode, AMD-V doesn't give us this information. */ 2783 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWbinvd); 2784 return VINF_SUCCESS; 2785 } 2786 2787 2788 /** 2789 * #VMEXIT handler for INVD (SVM_EXIT_INVD). Unconditional #VMEXIT. 2790 */ 2791 HMSVM_EXIT_DECL hmR0SvmExitInvd(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient) 2792 { 2793 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 2794 pCtx->rip += 2; /* Hardcoded opcode, AMD-V doesn't give us this information. */ 2795 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvd); 2796 return VINF_SUCCESS; 2797 } 2798 2799 2800 /** 2801 * #VMEXIT handler for INVD (SVM_EXIT_CPUID). Conditional #VMEXIT. 2802 */ 2803 HMSVM_EXIT_DECL hmR0SvmExitCpuid(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient) 2804 { 2805 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 2806 PVM pVM = pVCpu->CTX_SUFF(pVM); 2807 int rc = EMInterpretCpuId(pVM, pVCpu, CPUMCTX2CORE(pCtx)); 2808 if (RT_LIKELY(rc == VINF_SUCCESS)) 2809 pCtx->rip += 2; /* Hardcoded opcode, AMD-V doesn't give us this information. */ 2810 else 2811 { 2812 AssertMsgFailed(("hmR0SvmExitCpuid: EMInterpretCpuId failed with %Rrc\n", rc)); 2813 rc = VERR_EM_INTERPRETER; 2814 } 2815 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCpuid); 2816 return rc; 2817 } 2818 2819 2820 /** 2821 * #VMEXIT handler for RDTSC (SVM_EXIT_RDTSC). Conditional #VMEXIT. 2822 */ 2823 HMSVM_EXIT_DECL hmR0SvmExitRdtsc(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient) 2824 { 2825 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 2826 PVM pVM = pVCpu->CTX_SUFF(pVM); 2827 rc = EMInterpretRdtsc(pVM, pVCpu, CPUMCTX2CORE(pCtx)); 2828 if (RT_LIKELY(rc == VINF_SUCCESS)) 2829 pCtx->rip += 2; /* Hardcoded opcode, AMD-V doesn't give us this information. */ 2830 else 2831 { 2832 AssertMsgFailed(("hmR0SvmExitRdtsc: EMInterpretRdtsc failed with %Rrc\n", rc)); 2833 rc = VERR_EM_INTERPRETER; 2834 } 2835 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtsc); 2836 return rc; 2837 } 2838 2839 2840 /** 2841 * #VMEXIT handler for RDTSCP (SVM_EXIT_RDTSCP). Conditional #VMEXIT. 2842 */ 2843 HMSVM_EXIT_DECL hmR0SvmExitRdtsc(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient) 2844 { 2845 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 2846 int rc = EMInterpretRdtscp(pVM, pVCpu, pCtx); 2847 if (RT_LIKELY(rc == VINF_SUCCESS)) 2848 pCtx->rip += 3; /* Hardcoded opcode, AMD-V doesn't give us this information. */ 2849 else 2850 { 2851 AssertMsgFailed(("hmR0SvmExitRdtsc: EMInterpretRdtscp failed with %Rrc\n", rc)); 2852 rc = VERR_EM_INTERPRETER; 2853 } 2854 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtscp); 2855 return rc; 2856 } 2857 2858 2859 /** 2860 * #VMEXIT handler for RDPMC (SVM_EXIT_RDPMC). Conditional #VMEXIT. 2861 */ 2862 HMSVM_EXIT_DECL hmR0SvmExitRdpmc(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient) 2863 { 2864 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 2865 int rc = EMInterpretRdpmc(pVM, pVCpu, CPUMCTX2CORE(pCtx)); 2866 if (RT_LIKELY(rc == VINF_SUCCESS)) 2867 pCtx->rip += 2; /* Hardcoded opcode, AMD-V doesn't give us this information. */ 2868 else 2869 { 2870 AssertMsgFailed(("hmR0SvmExitRdpmc: EMInterpretRdpmc failed with %Rrc\n", rc)); 2871 rc = VERR_EM_INTERPRETER; 2872 } 2873 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdpmc); 2874 return rc; 2875 } 2876 2877 2878 /** 2879 * #VMEXIT handler for INVLPG (SVM_EXIT_INVLPG). Conditional #VMEXIT. 2880 */ 2881 HMSVM_EXIT_DECL hmR0SvmExitInvlpg(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient) 2882 { 2883 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 2884 Assert(!pVM->hm.s.fNestedPaging); 2885 2886 /** @todo With decode assist we no longer need to interpret the instruction. */ 2887 int rc = hmR0SvmInterpretInvlpg(pVM, pVCpu, CPUMCTX2CORE(pCtx)); /* Updates RIP if successful. */ 2888 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvlpg); 2889 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER); 2890 return rc; 2891 } 2892 2893 2894 /** 2895 * #VMEXIT handler for HLT (SVM_EXIT_HLT). Conditional #VMEXIT. 2896 */ 2897 HMSVM_EXIT_DECL hmR0SvmExitHlt(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient) 2898 { 2899 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 2900 pCtx->rip++; /* Hardcoded opcode, AMD-V doesn't give us this information. */ 2901 int rc = EMShouldContinueAfterHalt(pVCpu, pCtx) ? VINF_SUCCESS : VINF_EM_HALT; 2902 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt); 2903 return rc; 2904 } 2905 2906 2907 /** 2908 * #VMEXIT handler for MONITOR (SVM_EXIT_MONITOR). Conditional #VMEXIT. 2909 */ 2910 HMSVM_EXIT_DECL hmR0SvmExitMonitor(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient) 2911 { 2912 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 2913 int rc = EMInterpretMonitor(pVM, pVCpu, CPUMCTX2CORE(pCtx)); 2914 if (RT_LIKELY(rc == VINF_SUCCESS)) 2915 pCtx->rip += 3; /* Hardcoded opcode, AMD-V doesn't give us this information. */ 2916 else 2917 { 2918 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0SvmExitMonitor: EMInterpretMonitor failed with %Rrc\n", rc)); 2919 rc = VERR_EM_INTERPRETER; 2920 } 2921 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMonitor); 2922 return rc; 2923 } 2924 2925 2926 /** 2927 * #VMEXIT handler for MWAIT (SVM_EXIT_MWAIT_UNCOND). Conditional #VMEXIT. 2928 */ 2929 HMSVM_EXIT_DECL hmR0SvmExitMonitor(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient) 2930 { 2931 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 2932 int rc = EMInterpretMWait(pVM, pVCpu, CPUMCTX2CORE(pCtx)); 2933 if ( rc == VINF_EM_HALT 2934 || rc == VINF_SUCCESS) 2935 { 2936 pCtx->rip += 3; /* Hardcoded opcode, AMD-V doesn't give us this information. */ 2937 2938 if ( rc == VINF_EM_HALT 2939 && EMShouldContinueAfterHalt(pVCpu, pMixedCtx)) 2940 { 2941 rc = VINF_SUCCESS; 2942 } 2943 } 2944 else 2945 { 2946 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0SvmExitMwait: EMInterpretMWait failed with %Rrc\n", rc)); 2947 rc = VERR_EM_INTERPRETER; 2948 } 2949 AssertMsg(rc == VINF_SUCCESS || rc == VINF_EM_HALT || rc == VERR_EM_INTERPRETER, 2950 ("hmR0SvmExitMwait: failed, invalid error code %Rrc\n", rc)); 2951 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMwait); 2952 return rc; 2953 } 2954 2955 2956 2957 /** 2958 * #VMEXIT handler for shutdown (triple-fault) (SVM_EXIT_SHUTDOWN). 2959 * Conditional #VMEXIT. 2960 */ 2961 HMSVM_EXIT_DECL hmR0SvmExitShutdown(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient) 2962 { 2963 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 2964 return VINF_EM_RESET; 2965 } 2966
Note:
See TracChangeset
for help on using the changeset viewer.