Changeset 73617 in vbox for trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
- Timestamp:
- Aug 10, 2018 2:09:55 PM (6 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r73609 r73617 4553 4553 { 4554 4554 Assert(pVM->hm.s.vmx.pRealModeTSS); 4555 Assert(PDMVmmDevHeapIsEnabled(pVM)); /* Guaranteed by HM R3CanExecuteGuest() -XXX- what about inner loop changes? */4555 Assert(PDMVmmDevHeapIsEnabled(pVM)); /* Guaranteed by HMCanExecuteGuest() -XXX- what about inner loop changes? */ 4556 4556 4557 4557 /* We obtain it here every time as PCI regions could be reconfigured in the guest, changing the VMMDev base. */ … … 8272 8272 if ( !pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUnrestrictedGuest 8273 8273 && CPUMIsGuestInRealModeEx(&pVCpu->cpum.GstCtx)) 8274 {8275 8274 pVCpu->hm.s.vmx.RealMode.fRealOnV86Active = true; 8276 }8277 8275 8278 8276 /* … … 13171 13169 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestGP); 13172 13170 13173 int rc;13174 13171 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 13175 13172 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) … … 13181 13178 #endif 13182 13179 /* If the guest is not in real-mode or we have unrestricted execution support, reflect #GP to the guest. */ 13183 rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);13184 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);13185 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);13186 rc |= hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);13180 int rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient); 13181 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient); 13182 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 13183 rc |= hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL); 13187 13184 AssertRCReturn(rc, rc); 13188 13185 Log4Func(("Gst: CS:RIP %04x:%08RX64 ErrorCode=%#x CR0=%#RX64 CPL=%u TR=%#04x\n", pCtx->cs.Sel, pCtx->rip, … … 13196 13193 Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUnrestrictedGuest); 13197 13194 13198 /* EMInterpretDisasCurrent() requires a lot of the state, save the entire state. */ 13199 rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL); 13195 int rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL); 13200 13196 AssertRCReturn(rc, rc); 13201 13197 13202 PDISCPUSTATE pDis = &pVCpu->hm.s.DisState; 13203 uint32_t cbOp = 0; 13204 PVM pVM = pVCpu->CTX_SUFF(pVM); 13205 bool fDbgStepping = pVCpu->hm.s.fSingleInstruction; 13206 rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp); 13207 if (RT_SUCCESS(rc)) 13208 { 13209 rc = VINF_SUCCESS; 13210 Assert(cbOp == pDis->cbInstr); 13211 Log4Func(("Disas OpCode=%u CS:EIP %04x:%04RX64\n", pDis->pCurInstr->uOpcode, pCtx->cs.Sel, pCtx->rip)); 13212 switch (pDis->pCurInstr->uOpcode) 13213 { 13214 case OP_CLI: 13198 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu); 13199 if (rcStrict == VINF_SUCCESS) 13200 { 13201 if (!CPUMIsGuestInRealModeEx(pCtx)) 13202 { 13203 /* 13204 * The guest is no longer in real-mode, check if we can continue executing the 13205 * guest using hardware-assisted VMX. Otherwise, fall back to emulation. 13206 */ 13207 if (HMVmxCanExecuteGuest(pVCpu, pCtx)) 13215 13208 { 13216 pCtx->eflags.Bits.u1IF = 0; 13217 pCtx->eflags.Bits.u1RF = 0; 13218 pCtx->rip += pDis->cbInstr; 13219 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS); 13220 if ( !fDbgStepping 13221 && pCtx->eflags.Bits.u1TF) 13222 { 13223 rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu); 13224 AssertRCReturn(rc, rc); 13225 } 13226 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCli); 13227 break; 13209 Log4Func(("Mode changed but guest still suitable for executing using VT-x\n")); 13210 pVCpu->hm.s.vmx.RealMode.fRealOnV86Active = false; 13211 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST); 13228 13212 } 13229 13230 case OP_STI: 13213 else 13231 13214 { 13232 bool fOldIF = pCtx->eflags.Bits.u1IF; 13233 pCtx->eflags.Bits.u1IF = 1; 13234 pCtx->eflags.Bits.u1RF = 0; 13235 pCtx->rip += pDis->cbInstr; 13236 if (!fOldIF) 13237 { 13238 EMSetInhibitInterruptsPC(pVCpu, pCtx->rip); 13239 Assert(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)); 13240 } 13241 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS); 13242 if ( !fDbgStepping 13243 && pCtx->eflags.Bits.u1TF) 13244 { 13245 rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu); 13246 AssertRCReturn(rc, rc); 13247 } 13248 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitSti); 13249 break; 13215 Log4Func(("Mode changed -> VINF_EM_RESCHEDULE\n")); 13216 rcStrict = VINF_EM_RESCHEDULE; 13250 13217 } 13251 13252 case OP_HLT: 13253 { 13254 rc = VINF_EM_HALT; 13255 pCtx->rip += pDis->cbInstr; 13256 pCtx->eflags.Bits.u1RF = 0; 13257 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS); 13258 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt); 13259 break; 13260 } 13261 13262 case OP_POPF: 13263 { 13264 Log4Func(("POPF CS:EIP %04x:%04RX64\n", pCtx->cs.Sel, pCtx->rip)); 13265 uint32_t cbParm; 13266 uint32_t uMask; 13267 bool fGstStepping = RT_BOOL(pCtx->eflags.Bits.u1TF); 13268 if (pDis->fPrefix & DISPREFIX_OPSIZE) 13269 { 13270 cbParm = 4; 13271 uMask = 0xffffffff; 13272 } 13273 else 13274 { 13275 cbParm = 2; 13276 uMask = 0xffff; 13277 } 13278 13279 /* Get the stack pointer & pop the contents of the stack onto Eflags. */ 13280 RTGCPTR GCPtrStack = 0; 13281 X86EFLAGS Eflags; 13282 Eflags.u32 = 0; 13283 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pCtx), pCtx->esp & uMask, SELMTOFLAT_FLAGS_CPL0, 13284 &GCPtrStack); 13285 if (RT_SUCCESS(rc)) 13286 { 13287 Assert(sizeof(Eflags.u32) >= cbParm); 13288 rc = VBOXSTRICTRC_TODO(PGMPhysRead(pVM, (RTGCPHYS)GCPtrStack, &Eflags.u32, cbParm, PGMACCESSORIGIN_HM)); 13289 AssertMsg(rc == VINF_SUCCESS, ("%Rrc\n", rc)); /** @todo allow strict return codes here */ 13290 } 13291 if (RT_FAILURE(rc)) 13292 { 13293 rc = VERR_EM_INTERPRETER; 13294 break; 13295 } 13296 Log4Func(("POPF %#x -> %#RX64 mask=%#x RIP=%#RX64\n", Eflags.u, pCtx->rsp, uMask, pCtx->rip)); 13297 pCtx->eflags.u32 = (pCtx->eflags.u32 & ~((X86_EFL_POPF_BITS & uMask) | X86_EFL_RF)) 13298 | (Eflags.u32 & X86_EFL_POPF_BITS & uMask); 13299 pCtx->esp += cbParm; 13300 pCtx->esp &= uMask; 13301 pCtx->rip += pDis->cbInstr; 13302 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS); 13303 /* Generate a pending-debug exception when the guest stepping over POPF regardless of how 13304 POPF restores EFLAGS.TF. */ 13305 if ( !fDbgStepping 13306 && fGstStepping) 13307 { 13308 rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu); 13309 AssertRCReturn(rc, rc); 13310 } 13311 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPopf); 13312 break; 13313 } 13314 13315 case OP_PUSHF: 13316 { 13317 uint32_t cbParm; 13318 uint32_t uMask; 13319 if (pDis->fPrefix & DISPREFIX_OPSIZE) 13320 { 13321 cbParm = 4; 13322 uMask = 0xffffffff; 13323 } 13324 else 13325 { 13326 cbParm = 2; 13327 uMask = 0xffff; 13328 } 13329 13330 /* Get the stack pointer & push the contents of eflags onto the stack. */ 13331 RTGCPTR GCPtrStack = 0; 13332 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pCtx), (pCtx->esp - cbParm) & uMask, 13333 SELMTOFLAT_FLAGS_CPL0, &GCPtrStack); 13334 if (RT_FAILURE(rc)) 13335 { 13336 rc = VERR_EM_INTERPRETER; 13337 break; 13338 } 13339 X86EFLAGS Eflags = pCtx->eflags; 13340 /* The RF & VM bits are cleared on image stored on stack; see Intel Instruction reference for PUSHF. */ 13341 Eflags.Bits.u1RF = 0; 13342 Eflags.Bits.u1VM = 0; 13343 13344 rc = VBOXSTRICTRC_TODO(PGMPhysWrite(pVM, (RTGCPHYS)GCPtrStack, &Eflags.u, cbParm, PGMACCESSORIGIN_HM)); 13345 if (RT_UNLIKELY(rc != VINF_SUCCESS)) 13346 { 13347 AssertMsgFailed(("%Rrc\n", rc)); /** @todo allow strict return codes here */ 13348 rc = VERR_EM_INTERPRETER; 13349 break; 13350 } 13351 Log4Func(("PUSHF %#x -> %#RGv\n", Eflags.u, GCPtrStack)); 13352 pCtx->esp -= cbParm; 13353 pCtx->esp &= uMask; 13354 pCtx->rip += pDis->cbInstr; 13355 pCtx->eflags.Bits.u1RF = 0; 13356 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS); 13357 if ( !fDbgStepping 13358 && pCtx->eflags.Bits.u1TF) 13359 { 13360 rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu); 13361 AssertRCReturn(rc, rc); 13362 } 13363 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPushf); 13364 break; 13365 } 13366 13367 case OP_IRET: 13368 { 13369 /** @todo Handle 32-bit operand sizes and check stack limits. See Intel 13370 * instruction reference. */ 13371 RTGCPTR GCPtrStack = 0; 13372 uint32_t uMask = 0xffff; 13373 bool fGstStepping = RT_BOOL(pCtx->eflags.Bits.u1TF); 13374 uint16_t aIretFrame[3]; 13375 if (pDis->fPrefix & (DISPREFIX_OPSIZE | DISPREFIX_ADDRSIZE)) 13376 { 13377 rc = VERR_EM_INTERPRETER; 13378 break; 13379 } 13380 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pCtx), pCtx->esp & uMask, SELMTOFLAT_FLAGS_CPL0, 13381 &GCPtrStack); 13382 if (RT_SUCCESS(rc)) 13383 { 13384 rc = VBOXSTRICTRC_TODO(PGMPhysRead(pVM, (RTGCPHYS)GCPtrStack, &aIretFrame[0], sizeof(aIretFrame), 13385 PGMACCESSORIGIN_HM)); 13386 AssertMsg(rc == VINF_SUCCESS, ("%Rrc\n", rc)); /** @todo allow strict return codes here */ 13387 } 13388 if (RT_FAILURE(rc)) 13389 { 13390 rc = VERR_EM_INTERPRETER; 13391 break; 13392 } 13393 pCtx->eip = 0; 13394 pCtx->ip = aIretFrame[0]; 13395 pCtx->cs.Sel = aIretFrame[1]; 13396 pCtx->cs.ValidSel = aIretFrame[1]; 13397 pCtx->cs.u64Base = (uint64_t)pCtx->cs.Sel << 4; 13398 pCtx->eflags.u32 = (pCtx->eflags.u32 & ((UINT32_C(0xffff0000) | X86_EFL_1) & ~X86_EFL_RF)) 13399 | (aIretFrame[2] & X86_EFL_POPF_BITS & uMask); 13400 pCtx->sp += sizeof(aIretFrame); 13401 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS 13402 | HM_CHANGED_GUEST_CS); 13403 /* Generate a pending-debug exception when stepping over IRET regardless of how IRET modifies EFLAGS.TF. */ 13404 if ( !fDbgStepping 13405 && fGstStepping) 13406 { 13407 rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu); 13408 AssertRCReturn(rc, rc); 13409 } 13410 Log4Func(("IRET %#RX32 to %04x:%04x\n", GCPtrStack, pCtx->cs.Sel, pCtx->ip)); 13411 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIret); 13412 break; 13413 } 13414 13415 case OP_INT: 13416 { 13417 uint16_t uVector = pDis->Param1.uValue & 0xff; 13418 hmR0VmxSetPendingIntN(pVCpu, uVector, pDis->cbInstr); 13419 /* INT clears EFLAGS.TF, we must not set any pending debug exceptions here. */ 13420 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInt); 13421 break; 13422 } 13423 13424 case OP_INTO: 13425 { 13426 if (pCtx->eflags.Bits.u1OF) 13427 { 13428 hmR0VmxSetPendingXcptOF(pVCpu, pDis->cbInstr); 13429 /* INTO clears EFLAGS.TF, we must not set any pending debug exceptions here. */ 13430 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInt); 13431 } 13432 else 13433 { 13434 pCtx->eflags.Bits.u1RF = 0; 13435 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RFLAGS); 13436 } 13437 break; 13438 } 13439 13440 default: 13441 { 13442 pCtx->eflags.Bits.u1RF = 0; /* This is correct most of the time... */ 13443 VBOXSTRICTRC rc2 = EMInterpretInstructionDisasState(pVCpu, pDis, CPUMCTX2CORE(pCtx), 0 /* pvFault */, 13444 EMCODETYPE_SUPERVISOR); 13445 rc = VBOXSTRICTRC_VAL(rc2); 13446 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST); 13447 /** @todo We have to set pending-debug exceptions here when the guest is 13448 * single-stepping depending on the instruction that was interpreted. */ 13449 13450 /* 13451 * HACK ALERT! Detect mode change and go to ring-3 to properly exit this 13452 * real mode emulation stuff. 13453 */ 13454 if ( rc == VINF_SUCCESS 13455 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE)) 13456 { 13457 Log4Func(("Mode changed -> VINF_EM_RESCHEDULE\n")); 13458 /** @todo Exit fRealOnV86Active here w/o dropping back to ring-3. */ 13459 rc = VINF_EM_RESCHEDULE; 13460 } 13461 13462 Log4Func(("#GP rc=%Rrc\n", rc)); 13463 break; 13464 } 13465 } 13466 } 13467 else 13468 rc = VERR_EM_INTERPRETER; 13469 13470 AssertMsg( rc == VINF_SUCCESS 13471 || rc == VERR_EM_INTERPRETER 13472 || rc == VINF_EM_HALT 13473 || rc == VINF_EM_RESCHEDULE 13474 , ("#GP Unexpected rc=%Rrc\n", rc)); 13475 return rc; 13218 } 13219 else 13220 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST); 13221 } 13222 else if (rcStrict == VINF_IEM_RAISED_XCPT) 13223 { 13224 rcStrict = VINF_SUCCESS; 13225 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK); 13226 } 13227 return VBOXSTRICTRC_VAL(rcStrict); 13476 13228 } 13477 13229
Note:
See TracChangeset
for help on using the changeset viewer.