- Timestamp:
- Jun 14, 2013 12:01:11 PM (12 years ago)
- Location:
- trunk
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/hm_svm.h
r46512 r46551 265 265 /** Nested paging: host-level page fault occurred (EXITINFO1 contains fault errorcode; EXITINFO2 contains the guest physical address causing the fault). */ 266 266 #define SVM_EXIT_NPF 0x400 267 268 /** @} */ 269 270 271 /** @name SVMVMCB.u64ExitInfo2 267 /** AVIC: Virtual IPI delivery not completed. */ 268 #define SVM_EXIT_AVIC_INCOMPLETE_IPI 0x401 269 /** AVIC: Attempted access by guest to a vAPIC register not handled by AVIC 270 * hardware. */ 271 #define SVM_EXIT_AVIC_NOACCEL 0x402 272 273 /** The maximum possible exit value. */ 274 #define SVM_EXIT_MAX (SVM_EXIT_AVIC_NOACCEL) 275 /** @} */ 276 277 278 /** @name SVMVMCB.u64ExitInfo2 for task switches 272 279 * @{ 273 280 */ … … 280 287 /** The value of EFLAGS.RF that would be saved in the outgoing TSS if the task switch were not intercepted. */ 281 288 #define SVM_EXIT2_TASK_SWITCH_EFLAGS_RF RT_BIT_64(48) 289 /** @} */ 290 291 /** @name SVMVMCB.u64ExitInfo1 for MSR accesses 292 * @{ 293 */ 294 /** The access was a read MSR. */ 295 #define SVM_EXIT1_MSR_READ 0x0 296 /** The access was a write MSR. */ 297 #define SVM_EXIT1_MSR_WRITE 0x1 282 298 /** @} */ 283 299 -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r46541 r46551 2623 2623 * @param pSvmTransient Pointer to the SVM transient structure. 2624 2624 */ 2625 DECLINLINE(int) hmR0SvmHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PSVMTRANSIENT pSvmTransient) 2626 { 2627 int rc; 2625 DECLINLINE(int) hmR0SvmHandleExit(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient) 2626 { 2627 Assert(pSvmTransient->u64ExitCode > 0); 2628 Assert(pSvmTransient->u64ExitCode <= SVM_EXIT_MAX); 2629 2630 int rc; 2628 2631 uint32_t u32ExitCode = pSvmTransient->u64ExitCode; 2629 switch (u32ExitCode) 2630 { 2631 2632 switch (pSvmTransient->u64ExitCode) 2633 { 2634 case SVM_EXIT_WRITE_CR0: 2635 case SVM_EXIT_WRITE_CR3: 2636 case SVM_EXIT_WRITE_CR4: 2637 case SVM_EXIT_WRITE_CR8: 2638 return hmR0SvmExitWriteCRx(pVCpu, pCtx, pSvmTransient); 2639 2640 case SVM_EXIT_READ_CR0: 2641 case SVM_EXIT_READ_CR3: 2642 case SVM_EXIT_READ_CR4: 2643 return hmR0SvmExitReadCRx(pVCpu, pCtx, pSvmTransient); 2644 2645 case SVM_EXIT_MSR: 2646 return hmR0SvmExitMsr(pVCpu, pCtx, pSvmTransient); 2647 2648 case SVM_EXIT_INTR: 2649 case SVM_EXIT_FERR_FREEZE: 2650 case SVM_EXIT_NMI: 2651 case SVM_EXIT_INIT: 2652 return hmR0SvmExitIntr(pVCpu, pCtx, pSvmTransient); 2653 2654 case SVM_EXIT_WBINVD: 2655 return hmR0SvmExitWbinvd(pVCpu, pCtx, pSvmTransient); 2656 2657 case SVM_EXIT_INVD: 2658 return hmR0SvmExitInvd(pVCpu, pCtx, pSvmTransient); 2659 2660 default: 2661 { 2662 case SVM_EXIT_INVLPGA: 2663 case SVM_EXIT_RSM: 2664 case SVM_EXIT_VMRUN: 2665 case SVM_EXIT_VMLOAD: 2666 case SVM_EXIT_VMSAVE: 2667 case SVM_EXIT_STGI: 2668 case SVM_EXIT_CLGI: 2669 case SVM_EXIT_SKINIT: 2670 return hmR0SvmExitSetPendingXcptUD(pVCpu, pCtx, pSvmTransient); 2671 2672 case SVM_EXIT_MWAIT_ARMED: 2673 case SVM_EXIT_PAUSE: 2674 case SVM_EXIT_IDTR_READ: 2675 case SVM_EXIT_GDTR_READ: 2676 case SVM_EXIT_LDTR_READ: 2677 case SVM_EXIT_TR_READ: 2678 case SVM_EXIT_IDTR_WRITE: 2679 case SVM_EXIT_GDTR_WRITE: 2680 case SVM_EXIT_LDTR_WRITE: 2681 case SVM_EXIT_TR_WRITE: 2682 case SVM_EXIT_CR0_SEL_WRITE: 2683 case SVM_EXIT_READ_CR1: case SVM_EXIT_WRITE_CR1: 2684 case SVM_EXIT_READ_CR2: case SVM_EXIT_WRITE_CR2: 2685 case SVM_EXIT_READ_CR5: case SVM_EXIT_WRITE_CR5: 2686 case SVM_EXIT_READ_CR6: case SVM_EXIT_WRITE_CR6: 2687 case SVM_EXIT_READ_CR7: case SVM_EXIT_WRITE_CR7: 2688 case SVM_EXIT_READ_CR8: 2689 case SVM_EXIT_READ_CR9: case SVM_EXIT_WRITE_CR9: 2690 case SVM_EXIT_READ_CR10: case SVM_EXIT_WRITE_CR10: 2691 case SVM_EXIT_READ_CR11: case SVM_EXIT_WRITE_CR11: 2692 case SVM_EXIT_READ_CR12: case SVM_EXIT_WRITE_CR12: 2693 case SVM_EXIT_READ_CR13: case SVM_EXIT_WRITE_CR13: 2694 case SVM_EXIT_READ_CR14: case SVM_EXIT_WRITE_CR14: 2695 case SVM_EXIT_READ_CR15: case SVM_EXIT_WRITE_CR15: 2696 default: 2697 { 2698 rc = VERR_SVM_UNEXPECTED_EXIT; 2699 AssertMsgFailed(("hmR0SvmHandleExit: Unexpected exit code %#x\n", u32ExitCode)); 2700 break; 2701 } 2702 } 2632 2703 } 2633 2704 return rc; 2634 2635 2705 } 2636 2706 … … 2731 2801 return rc; 2732 2802 } 2803 else 2804 Log4(("hmR0SvmInterpretInvlpg: EMInterpretDisasCurrent returned %Rrc uOpCode=%#x\n", rc, pDis->pCurInstr->uOpcode)); 2733 2805 } 2734 2806 return VERR_EM_INTERPRETER; … … 2757 2829 2758 2830 /** 2759 * #VMEXIT handler for external interrupts (SVM_EXIT_INTR). 2831 * #VMEXIT handler for external interrupts, NMIs, FPU assertion freeze and INIT 2832 * signals (SVM_EXIT_INTR, SVM_EXIT_NMI, SVM_EXIT_FERR_FREEZE, SVM_EXIT_INIT). 2760 2833 */ 2761 2834 HMSVM_EXIT_DECL hmR0SvmExitIntr(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient) … … 2884 2957 Assert(!pVM->hm.s.fNestedPaging); 2885 2958 2886 /** @todo With decode assist we no longer need to interpret the instruction. */2959 /** @todo Decode Assist. */ 2887 2960 int rc = hmR0SvmInterpretInvlpg(pVM, pVCpu, CPUMCTX2CORE(pCtx)); /* Updates RIP if successful. */ 2888 2961 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvlpg); … … 2948 3021 } 2949 3022 AssertMsg(rc == VINF_SUCCESS || rc == VINF_EM_HALT || rc == VERR_EM_INTERPRETER, 2950 ("hmR0SvmExitMwait: failed, invalid error code%Rrc\n", rc));3023 ("hmR0SvmExitMwait: EMInterpretMWait failed rc=%Rrc\n", rc)); 2951 3024 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMwait); 2952 3025 return rc; … … 2954 3027 2955 3028 2956 2957 3029 /** 2958 3030 * #VMEXIT handler for shutdown (triple-fault) (SVM_EXIT_SHUTDOWN). … … 2965 3037 } 2966 3038 3039 3040 /** 3041 * #VMEXIT handler for CRx reads (SVM_EXIT_READ_CR*). Conditional #VMEXIT. 3042 */ 3043 HMSVM_EXIT_DECL hmR0SvmExitReadCRx(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient) 3044 { 3045 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 3046 /** @todo Decode Assist. */ 3047 int rc = EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0 /* pvFault */); 3048 Assert(rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_PGM_SYNC_CR3); 3049 Assert((pSvmTransient->u64ExitCode - SVM_EXIT_READ_CR0) <= 15); 3050 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxRead[pSvmTransient->u64ExitCode - SVM_EXIT_READ_CR0]); 3051 return rc; 3052 } 3053 3054 3055 /** 3056 * #VMEXIT handler for CRx writes (SVM_EXIT_WRITE_CR*). Conditional #VMEXIT. 3057 */ 3058 HMSVM_EXIT_DECL hmR0SvmExitWriteCRx(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient) 3059 { 3060 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 3061 /** @todo Decode Assist. */ 3062 int rc = EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0 /* pvFault */); 3063 if (rc == VINF_SUCCCES) 3064 { 3065 /* RIP has been updated by EMInterpretInstruction(). */ 3066 Assert((pSvmTransient->u64ExitCode - SVM_EXIT_WRITE_CR0) <= 15); 3067 switch (pSvmTransient->u64ExitCode - SVM_EXIT_WRITE_CR0) 3068 { 3069 case 0: /* CR0. */ 3070 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0; 3071 break; 3072 3073 case 3: /* CR3. */ 3074 Assert(!pVM->hm.s.fNestedPaging); 3075 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR3; 3076 break; 3077 3078 case 4: /* CR4. */ 3079 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR4; 3080 break; 3081 3082 case 8: /* CR8 (TPR). */ 3083 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_SVM_GUEST_APIC_STATE; 3084 break; 3085 3086 default: 3087 AsserMsgFailed(("hmR0SvmExitWriteCRx: Invalid/Unexpected Write-CRx exit. u64ExitCode=%#RX64 %#x CRx=%#RX64\n", 3088 pSvmTransient->u64ExitCode, pSvmTransient->u64ExitCode - SVM_EXIT_WRITE_CR0)); 3089 break; 3090 } 3091 } 3092 else 3093 Assert(rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_PGM_SYNC_CR3); 3094 return rc; 3095 } 3096 3097 3098 /** 3099 * #VMEXIT handler for instructions that result in a #UD exception delivered to 3100 * the guest. 3101 */ 3102 HMSVM_EXIT_DECL hmR0SvmExitSetPendingXcptUD(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient) 3103 { 3104 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 3105 return hmR0SvmSetPendingXcptUD(pVCpu); 3106 } 3107 3108 3109 /** 3110 * #VMEXIT handler for MSR read and writes (SVM_EXIT_MSR). Conditional #VMEXIT. 3111 */ 3112 HMSVM_EXIT_DECL hmR0SvmExitMsr(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient) 3113 { 3114 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 3115 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb; 3116 3117 int rc; 3118 if (pVmcb->ctrl.u64ExitInfo1 == SVM_EXIT1_MSR_WRITE) 3119 { 3120 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWrmsr); 3121 3122 /* Handle TPR patching; intercepted LSTAR write. */ 3123 if ( pVM->hm.s.fTPRPatchingActive 3124 && pCtx->ecx == MSR_K8_LSTAR) 3125 { 3126 if ((pCtx->eax & 0xff) != pSvmTransient->u8GuestTpr) 3127 { 3128 /* Our patch code uses LSTAR for TPR caching for 32-bit guests. */ 3129 int rc2 = PDMApicSetTPR(pVCpu, pCtx->eax & 0xff); 3130 AssertRC(rc2); 3131 } 3132 pCtx->rip += 2; /* Hardcoded opcode, AMD-V doesn't give us this information. */ 3133 return VINF_SUCCESS; 3134 } 3135 3136 rc = EMInterpretWrmsr(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx)); 3137 AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER, ("hmR0SvmExitMsr: EMInterpretWrmsr failed rc=%Rrc\n", rc)); 3138 } 3139 else 3140 { 3141 /* MSR Read access. */ 3142 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdmsr); 3143 int rc = EMInterpretRdmsr(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx)); 3144 AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER, ("hmR0SvmExitMsr: EMInterpretRdmsr failed rc=%Rrc\n", rc)); 3145 } 3146 3147 /* RIP has been updated by EMInterpret[Rd|Wr]msr(). */ 3148 return rc; 3149 } 3150 3151
Note:
See TracChangeset
for help on using the changeset viewer.