Changeset 49153 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Oct 17, 2013 7:31:36 AM (12 years ago)
- svn:sync-xref-src-repo-rev:
- 90013
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r49134 r49153 2160 2160 } 2161 2161 2162 /* Make sure we've undo the trap flag if we tried to single step something. */2163 if (pVCpu->hm.s.fClearTrapFlag)2164 {2165 pCtx->eflags.Bits.u1TF = 0;2166 pVCpu->hm.s.fClearTrapFlag = false;2167 }2168 2169 2162 /* On our way back from ring-3 reload the guest state if there is a possibility of it being changed. */ 2170 2163 if (rcExit != VINF_EM_RAW_INTERRUPT) … … 3098 3091 * @param pCtx Pointer to the guest-CPU context. 3099 3092 */ 3100 VMMR0DECL(int) SVMR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 3101 { 3102 Assert(VMMRZCallRing3IsEnabled(pVCpu)); 3103 HMSVM_ASSERT_PREEMPT_SAFE(); 3104 VMMRZCallRing3SetNotification(pVCpu, hmR0SvmCallRing3Callback, pCtx); 3105 3093 static int hmR0SvmRunGuestCodeNormal(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, PSVMVMCB pVmcb) 3094 { 3106 3095 SVMTRANSIENT SvmTransient; 3107 3096 SvmTransient.fUpdateTscOffsetting = true; 3108 3097 uint32_t cLoops = 0; 3109 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;3110 3098 int rc = VERR_INTERNAL_ERROR_5; 3111 3099 … … 3155 3143 3156 3144 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x); 3145 return rc; 3146 } 3147 3148 3149 /** 3150 * Runs the guest code using AMD-V in single step mode. 3151 * 3152 * @returns VBox status code. 3153 * @param pVM Pointer to the VM. 3154 * @param pVCpu Pointer to the VMCPU. 3155 * @param pCtx Pointer to the guest-CPU context. 3156 * @param pVmcb The VM control block. 3157 */ 3158 static int hmR0SvmRunGuestCodeStep(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, PSVMVMCB pVmcb) 3159 { 3160 SVMTRANSIENT SvmTransient; 3161 SvmTransient.fUpdateTscOffsetting = true; 3162 uint32_t cLoops = 0; 3163 int rc = VERR_INTERNAL_ERROR_5; 3164 uint16_t uCsStart = pCtx->cs.Sel; 3165 uint64_t uRipStart = pCtx->rip; 3166 3167 for (;; cLoops++) 3168 { 3169 Assert(!HMR0SuspendPending()); 3170 AssertMsg(pVCpu->hm.s.idEnteredCpu == RTMpCpuId(), 3171 ("Illegal migration! Entered on CPU %u Current %u cLoops=%u\n", (unsigned)pVCpu->hm.s.idEnteredCpu, 3172 (unsigned)RTMpCpuId(), cLoops)); 3173 3174 /* Preparatory work for running guest code, this may return to ring-3 for some last minute updates. */ 3175 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x); 3176 rc = hmR0SvmPreRunGuest(pVM, pVCpu, pCtx, &SvmTransient); 3177 if (rc != VINF_SUCCESS) 3178 break; 3179 3180 /* 3181 * No longjmps to ring-3 from this point on!!! 3182 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional, better than a kernel panic. 3183 * This also disables flushing of the R0-logger instance (if any). 3184 */ 3185 VMMRZCallRing3Disable(pVCpu); 3186 VMMRZCallRing3RemoveNotification(pVCpu); 3187 hmR0SvmPreRunGuestCommitted(pVM, pVCpu, pCtx, &SvmTransient); 3188 3189 rc = hmR0SvmRunGuest(pVM, pVCpu, pCtx); 3190 3191 /* 3192 * Restore any residual host-state and save any bits shared between host and guest into the guest-CPU state. 3193 * This will also re-enable longjmps to ring-3 when it has reached a safe point!!! 3194 */ 3195 hmR0SvmPostRunGuest(pVM, pVCpu, pCtx, &SvmTransient, rc); 3196 if (RT_UNLIKELY( rc != VINF_SUCCESS /* Check for VMRUN errors. */ 3197 || SvmTransient.u64ExitCode == (uint64_t)SVM_EXIT_INVALID)) /* Check for invalid guest-state errors. */ 3198 { 3199 if (rc == VINF_SUCCESS) 3200 rc = VERR_SVM_INVALID_GUEST_STATE; 3201 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit1, x); 3202 hmR0SvmReportWorldSwitchError(pVM, pVCpu, rc, pCtx); 3203 return rc; 3204 } 3205 3206 /* Handle the #VMEXIT. */ 3207 HMSVM_EXITCODE_STAM_COUNTER_INC(SvmTransient.u64ExitCode); 3208 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x); 3209 rc = hmR0SvmHandleExit(pVCpu, pCtx, &SvmTransient); 3210 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x); 3211 if (rc != VINF_SUCCESS) 3212 break; 3213 else if (cLoops > pVM->hm.s.cMaxResumeLoops) 3214 { 3215 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMaxResume); 3216 rc = VINF_EM_RAW_INTERRUPT; 3217 break; 3218 } 3219 3220 /* 3221 * Did the RIP change, if so, consider it a single step. 3222 * Otherwise, make sure one of the TFs gets set. 3223 */ 3224 if ( pCtx->rip != uRipStart 3225 || pCtx->cs.Sel != uCsStart) 3226 { 3227 rc = VINF_EM_DBG_STEPPED; 3228 break; 3229 } 3230 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG; 3231 } 3232 3233 /* 3234 * Clear the X86_EFL_TF if necessary . 3235 */ 3236 if (pVCpu->hm.s.fClearTrapFlag) 3237 { 3238 pVCpu->hm.s.fClearTrapFlag = false; 3239 pCtx->eflags.Bits.u1TF = 0; 3240 } 3241 3242 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x); 3243 return rc; 3244 } 3245 3246 3247 /** 3248 * Runs the guest code using AMD-V. 3249 * 3250 * @returns VBox status code. 3251 * @param pVM Pointer to the VM. 3252 * @param pVCpu Pointer to the VMCPU. 3253 * @param pCtx Pointer to the guest-CPU context. 3254 */ 3255 VMMR0DECL(int) SVMR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 3256 { 3257 Assert(VMMRZCallRing3IsEnabled(pVCpu)); 3258 HMSVM_ASSERT_PREEMPT_SAFE(); 3259 VMMRZCallRing3SetNotification(pVCpu, hmR0SvmCallRing3Callback, pCtx); 3260 3261 int rc; 3262 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb; 3263 if (!pVCpu->hm.s.fSingleInstruction && !DBGFIsStepping(pVCpu)) 3264 rc = hmR0SvmRunGuestCodeNormal(pVM, pVCpu, pCtx, pVmcb); 3265 else 3266 rc = hmR0SvmRunGuestCodeStep(pVM, pVCpu, pCtx, pVmcb); 3267 3157 3268 if (rc == VERR_EM_INTERPRETER) 3158 3269 rc = VINF_EM_RAW_EMULATE_INSTR; … … 4894 5005 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB); 4895 5006 4896 /* If we set the trap flag above, we have to clear it. */4897 if (pVCpu->hm.s.fClearTrapFlag)4898 {4899 pVCpu->hm.s.fClearTrapFlag = false;4900 pCtx->eflags.Bits.u1TF = 0;4901 }4902 5007 4903 5008 /* This can be a fault-type #DB (instruction breakpoint) or a trap-type #DB (data breakpoint). However, for both cases
Note:
See TracChangeset
for help on using the changeset viewer.