Changeset 47736 in vbox for trunk/src/VBox/VMM/VMMR0
- Timestamp:
- Aug 14, 2013 7:49:47 PM (11 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r47719 r47736 5505 5505 5506 5506 /* 5507 * If VT-x marks the segment as unusable, the rest of the attributes are undefined with certain exceptions (some bits in 5508 * CS, SS). Regardless, we have to clear the bits here and only retain the unusable bit because the unusable bit is specific 5509 * to VT-x, everyone else relies on the attribute being zero and have no clue what the unusable bit is. 5507 * If VT-x marks the segment as unusable, most other bits remain undefined: 5508 * - For CS the L, D and G bits have meaning. 5509 * - For SS the DPL have meaning (it -is- the CPL for Intel and VBox). 5510 * - For the remaining data segments no bits are defined. 5511 * 5512 * What should be important for the rest of the VBox code that the P bit is 5513 * cleared. Some of the other VBox code recognizes the unusable bit, but 5514 * AMD-V certainly don't, and REM doesn't really either. So, to be on the 5515 * safe side here we'll strip off P and other bits we don't care about. If 5516 * any code breaks because attr.u != 0 when Sel < 4, it should be fixed. 5510 5517 * 5511 5518 * See Intel spec. 27.3.2 "Saving Segment Registers and Descriptor-Table Registers". 5512 *5513 * bird: This isn't quite as simple. VT-x and VBox(!) requires the DPL for SS to be the same as CPL. In 64-bit mode it5514 * is possible (int/trap/xxx injects does this when switching rings) to load SS with a NULL selector and RPL=CPL.5515 * The Attr.u = X86DESCATTR_UNUSABLE works fine as long as nobody uses ring-1 or ring-2. VT-x updates the DPL5516 * correctly in the attributes of SS even when the unusable bit is set, we need to preserve the DPL or we get invalid5517 * guest state trouble. Try bs2-cpu-hidden-regs-1.5518 5519 */ 5519 5520 if (pSelReg->Attr.u & X86DESCATTR_UNUSABLE) 5520 5521 { 5521 5522 Assert(idxSel != VMX_VMCS16_GUEST_FIELD_TR); /* TR is the only selector that can never be unusable. */ 5522 Log4(("hmR0VmxReadSegmentReg: Unusable idxSel=%#x attr=%#x\n", idxSel, pSelReg->Attr.u)); 5523 5524 if (idxSel == VMX_VMCS16_GUEST_FIELD_SS) 5525 pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_DPL; 5526 else if (idxSel == VMX_VMCS16_GUEST_FIELD_CS) 5527 pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G; 5528 else 5529 pSelReg->Attr.u = X86DESCATTR_UNUSABLE; 5523 #if defined(LOG_ENABLED) || defined(VBOX_STRICT) 5524 uint32_t fAttr = pSelReg->Attr.u; 5525 #endif 5526 /* Masking off: X86DESCATTR_P, X86DESCATTR_LIMIT_HIGH, and X86DESCATTR_AVL. The latter two are really irrelevant. */ 5527 pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G 5528 | X86DESCATTR_DPL | X86DESCATTR_TYPE | X86DESCATTR_DT; 5529 Log4(("hmR0VmxReadSegmentReg: Unusable idxSel=%#x attr=%#x -> %#x\n", idxSel, fAttr, pSelReg->Attr.u)); 5530 #ifdef DEBUG_bird 5531 AssertMsg(fAttr == pSelReg->Attr.u, ("%#x: %#x != %#x\n", idxSel, fAttr, pSelReg->Attr.u)); 5532 #endif 5530 5533 } 5531 5534 return VINF_SUCCESS; … … 6155 6158 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_HOST_CONTEXT | HM_CHANGED_ALL_GUEST; 6156 6159 6157 /* Make sure we've undo the trap flag if we tried to single step something. */6158 if (pVCpu->hm.s.fClearTrapFlag)6159 {6160 pVCpu->hm.s.fClearTrapFlag = false;6161 pMixedCtx->eflags.Bits.u1TF = 0;6162 }6163 6164 6160 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchExitToR3); 6165 6161 VMMRZCallRing3Enable(pVCpu); … … 6300 6296 hmR0VmxSetIntWindowExitVmcs(pVCpu); 6301 6297 } 6302 else if (VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))) 6298 else if ( VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)) 6299 && !pVCpu->hm.s.fSingleInstruction) 6303 6300 { 6304 6301 /* Check if there are guest external interrupts (PIC/APIC) pending and inject them if the guest can receive them. */ … … 7223 7220 7224 7221 7225 /** 7226 * Runs the guest code using VT-x. 7222 7223 /** 7224 * Runs the guest code using VT-x the normal way. 7227 7225 * 7228 7226 * @returns VBox status code. … … 7231 7229 * @param pCtx Pointer to the guest-CPU context. 7232 7230 * 7231 * @note Mostly the same as hmR0VmxRunGuestCodeStep. 7233 7232 * @remarks Called with preemption disabled. 7234 7233 */ 7235 VMMR0DECL(int) VMXR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 7236 { 7237 Assert(VMMRZCallRing3IsEnabled(pVCpu)); 7238 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 7239 7234 static int hmR0VmxRunGuestCodeNormal(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 7235 { 7240 7236 VMXTRANSIENT VmxTransient; 7241 7237 VmxTransient.fUpdateTscOffsettingAndPreemptTimer = true; … … 7302 7298 7303 7299 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x); 7300 return rc; 7301 } 7302 7303 7304 /** 7305 * Single steps guest code using VT-x. 7306 * 7307 * @returns VBox status code. 7308 * @param pVM Pointer to the VM. 7309 * @param pVCpu Pointer to the VMCPU. 7310 * @param pCtx Pointer to the guest-CPU context. 7311 * 7312 * @note Mostly the same as hmR0VmxRunGuestCodeNormal. 7313 * @remarks Called with preemption disabled. 7314 */ 7315 static int hmR0VmxRunGuestCodeStep(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 7316 { 7317 VMXTRANSIENT VmxTransient; 7318 VmxTransient.fUpdateTscOffsettingAndPreemptTimer = true; 7319 int rc = VERR_INTERNAL_ERROR_5; 7320 uint32_t cLoops = 0; 7321 uint16_t uCsStart = pCtx->cs.Sel; 7322 uint64_t uRipStart = pCtx->rip; 7323 7324 for (;; cLoops++) 7325 { 7326 Assert(!HMR0SuspendPending()); 7327 AssertMsg(pVCpu->hm.s.idEnteredCpu == RTMpCpuId(), 7328 ("Illegal migration! Entered on CPU %u Current %u cLoops=%u\n", (unsigned)pVCpu->hm.s.idEnteredCpu, 7329 (unsigned)RTMpCpuId(), cLoops)); 7330 7331 /* Preparatory work for running guest code, this may return to ring-3 for some last minute updates. */ 7332 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x); 7333 rc = hmR0VmxPreRunGuest(pVM, pVCpu, pCtx, &VmxTransient); 7334 if (rc != VINF_SUCCESS) 7335 break; 7336 7337 /* 7338 * No longjmps to ring-3 from this point on!!! 7339 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional, better than a kernel panic. 7340 * This also disables flushing of the R0-logger instance (if any). 7341 */ 7342 VMMRZCallRing3Disable(pVCpu); 7343 VMMRZCallRing3RemoveNotification(pVCpu); 7344 hmR0VmxPreRunGuestCommitted(pVM, pVCpu, pCtx, &VmxTransient); 7345 7346 rc = hmR0VmxRunGuest(pVM, pVCpu, pCtx); 7347 /* The guest-CPU context is now outdated, 'pCtx' is to be treated as 'pMixedCtx' from this point on!!! */ 7348 7349 /* 7350 * Restore any residual host-state and save any bits shared between host and guest into the guest-CPU state. 7351 * This will also re-enable longjmps to ring-3 when it has reached a safe point!!! 7352 */ 7353 hmR0VmxPostRunGuest(pVM, pVCpu, pCtx, &VmxTransient, rc); 7354 if (RT_UNLIKELY(rc != VINF_SUCCESS)) /* Check for errors with running the VM (VMLAUNCH/VMRESUME). */ 7355 { 7356 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit1, x); 7357 hmR0VmxReportWorldSwitchError(pVM, pVCpu, rc, pCtx, &VmxTransient); 7358 return rc; 7359 } 7360 7361 /* Handle the VM-exit. */ 7362 AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason)); 7363 STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[VmxTransient.uExitReason & MASK_EXITREASON_STAT]); 7364 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x); 7365 HMVMX_START_EXIT_DISPATCH_PROF(); 7366 #ifdef HMVMX_USE_FUNCTION_TABLE 7367 rc = g_apfnVMExitHandlers[VmxTransient.uExitReason](pVCpu, pCtx, &VmxTransient); 7368 #else 7369 rc = hmR0VmxHandleExit(pVCpu, pCtx, &VmxTransient, VmxTransient.uExitReason); 7370 #endif 7371 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x); 7372 if (rc != VINF_SUCCESS) 7373 break; 7374 else if (cLoops > pVM->hm.s.cMaxResumeLoops) 7375 { 7376 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMaxResume); 7377 rc = VINF_EM_RAW_INTERRUPT; 7378 break; 7379 } 7380 7381 /* 7382 * Did the RIP change, if so, consider it a single step. 7383 * Otherwise, make sure one of the TFs gets set. 7384 */ 7385 int rc2 = hmR0VmxLoadGuestRip(pVCpu, pCtx); 7386 rc2 |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pCtx); 7387 AssertRCReturn(rc2, rc2); 7388 if ( pCtx->rip != uRipStart 7389 || pCtx->cs.Sel != uCsStart) 7390 { 7391 rc = VINF_EM_DBG_STEPPED; 7392 break; 7393 } 7394 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG; 7395 } 7396 7397 /* 7398 * Clear the X86_EFL_TF if necessary . 7399 */ 7400 if (pVCpu->hm.s.fClearTrapFlag) 7401 { 7402 int rc2 = hmR0VmxSaveGuestRflags(pVCpu, pCtx); 7403 AssertRCReturn(rc2, rc2); 7404 pVCpu->hm.s.fClearTrapFlag = false; 7405 pCtx->eflags.Bits.u1TF = 0; 7406 } 7407 /** @todo there seems to be issues with the resume flag when the monitor trap 7408 * flag is pending without being used. Seen early in bios init when 7409 * accessing APIC page in prot mode. */ 7410 7411 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x); 7412 return rc; 7413 } 7414 7415 7416 /** 7417 * Runs the guest code using VT-x. 7418 * 7419 * @returns VBox status code. 7420 * @param pVM Pointer to the VM. 7421 * @param pVCpu Pointer to the VMCPU. 7422 * @param pCtx Pointer to the guest-CPU context. 7423 * 7424 * @remarks Called with preemption disabled. 7425 */ 7426 VMMR0DECL(int) VMXR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 7427 { 7428 Assert(VMMRZCallRing3IsEnabled(pVCpu)); 7429 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 7430 7431 int rc; 7432 if (!pVCpu->hm.s.fSingleInstruction && !DBGFIsStepping(pVCpu)) 7433 rc = hmR0VmxRunGuestCodeNormal(pVM, pVCpu, pCtx); 7434 else 7435 rc = hmR0VmxRunGuestCodeStep(pVM, pVCpu, pCtx); 7436 7304 7437 if (rc == VERR_EM_INTERPRETER) 7305 7438 rc = VINF_EM_RAW_EMULATE_INSTR; … … 9665 9798 AssertRCReturn(rc, rc); 9666 9799 9667 /* If we sat the trap flag above, we have to clear it. */9668 if (pVCpu->hm.s.fClearTrapFlag)9669 {9670 pVCpu->hm.s.fClearTrapFlag = false;9671 pMixedCtx->eflags.Bits.u1TF = 0;9672 }9673 9674 9800 /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */ 9675 9801 uint64_t uDR6 = X86_DR6_INIT_VAL;
Note:
See TracChangeset
for help on using the changeset viewer.