Changeset 72634 in vbox for trunk/src/VBox/VMM/VMMR3/EMRaw.cpp
- Timestamp:
- Jun 20, 2018 4:08:42 PM (6 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR3/EMRaw.cpp
r72488 r72634 61 61 * Internal Functions * 62 62 *********************************************************************************************************************************/ 63 static int emR3RawForcedActions(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx); 63 static int emR3RawHandleRC(PVM pVM, PVMCPU pVCpu, int rc); 64 static int emR3RawForcedActions(PVM pVM, PVMCPU pVCpu); 64 65 DECLINLINE(int) emR3RawExecuteInstruction(PVM pVM, PVMCPU pVCpu, const char *pszPrefix, int rcGC = VINF_SUCCESS); 65 66 static int emR3RawGuestTrap(PVM pVM, PVMCPU pVCpu); 66 static int emR3RawPatchTrap(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx,int gcret);67 static int emR3RawPatchTrap(PVM pVM, PVMCPU pVCpu, int gcret); 67 68 static int emR3RawPrivileged(PVM pVM, PVMCPU pVCpu); 68 69 static int emR3RawExecuteIOInstruction(PVM pVM, PVMCPU pVCpu); 69 70 static int emR3RawRingSwitch(PVM pVM, PVMCPU pVCpu); 71 static int emR3RawUpdateForceFlag(PVM pVM, PVMCPU pVCpu, int rc); 70 72 71 73 #define EMHANDLERC_WITH_PATM … … 124 126 { 125 127 int rc; 126 PCPUMCTX pCtx = pVCpu->em.s.pCtx;127 128 Assert(pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER); 128 Log(("emR3RawResumeHyper: cs:eip=%RTsel:%RGr efl=%RGr\n", p Ctx->cs.Sel, pCtx->eip, pCtx->eflags));129 Log(("emR3RawResumeHyper: cs:eip=%RTsel:%RGr efl=%RGr\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags)); 129 130 130 131 /* … … 134 135 CPUMSetHyperEFlags(pVCpu, CPUMGetHyperEFlags(pVCpu) | X86_EFL_RF); 135 136 rc = VMMR3ResumeHyper(pVM, pVCpu); 136 Log(("emR3RawResumeHyper: cs:eip=%RTsel:%RGr efl=%RGr - returned from GC with rc=%Rrc\n", p Ctx->cs.Sel, pCtx->eip, pCtx->eflags, rc));137 Log(("emR3RawResumeHyper: cs:eip=%RTsel:%RGr efl=%RGr - returned from GC with rc=%Rrc\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags, rc)); 137 138 rc = CPUMRawLeave(pVCpu, rc); 138 139 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_RESUME_GUEST_MASK); … … 142 143 */ 143 144 rc = VBOXSTRICTRC_TODO(emR3HighPriorityPostForcedActions(pVM, pVCpu, rc)); 144 rc = emR3RawHandleRC(pVM, pVCpu, pCtx,rc);145 rc = emR3RawUpdateForceFlag(pVM, pVCpu, pCtx,rc);145 rc = emR3RawHandleRC(pVM, pVCpu, rc); 146 rc = emR3RawUpdateForceFlag(pVM, pVCpu, rc); 146 147 return rc; 147 148 } … … 161 162 || pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_REM); 162 163 int rc; 163 PCPUMCTX pCtx = pVCpu->em.s.pCtx;164 164 bool fGuest = pVCpu->em.s.enmState != EMSTATE_DEBUG_HYPER; 165 165 #ifndef DEBUG_sander … … 175 175 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK)) 176 176 { 177 rc = emR3RawForcedActions(pVM, pVCpu , pCtx);177 rc = emR3RawForcedActions(pVM, pVCpu); 178 178 VBOXVMM_EM_FF_RAW_RET(pVCpu, rc); 179 179 if (rc != VINF_SUCCESS) … … 222 222 */ 223 223 rc = VBOXSTRICTRC_TODO(emR3HighPriorityPostForcedActions(pVM, pVCpu, rc)); 224 rc = emR3RawHandleRC(pVM, pVCpu, pCtx,rc);225 rc = emR3RawUpdateForceFlag(pVM, pVCpu, pCtx,rc);224 rc = emR3RawHandleRC(pVM, pVCpu, rc); 225 rc = emR3RawUpdateForceFlag(pVM, pVCpu, rc); 226 226 return rc; 227 227 } … … 273 273 #endif 274 274 { 275 PCPUMCTX pCtx = pVCpu->em.s.pCtx;276 275 int rc; 277 276 … … 293 292 * to allow us execute the code in REM. 294 293 */ 295 if (PATMIsPatchGCAddr(pVM, p Ctx->eip))296 { 297 Log(("emR3RawExecuteInstruction: In patch block. eip=%RRv\n", (RTRCPTR)p Ctx->eip));294 if (PATMIsPatchGCAddr(pVM, pVCpu->cpum.GstCtx.eip)) 295 { 296 Log(("emR3RawExecuteInstruction: In patch block. eip=%RRv\n", (RTRCPTR)pVCpu->cpum.GstCtx.eip)); 298 297 299 298 RTGCPTR uNewEip; 300 rc = PATMR3HandleTrap(pVM, pCtx, pCtx->eip, &uNewEip);299 rc = PATMR3HandleTrap(pVM, &pVCpu->cpum.GstCtx, pVCpu->cpum.GstCtx.eip, &uNewEip); 301 300 switch (rc) 302 301 { … … 307 306 case VINF_SUCCESS: 308 307 Log(("emR3RawExecuteInstruction: Executing instruction starting at new address %RGv IF=%d VMIF=%x\n", 309 uNewEip, p Ctx->eflags.Bits.u1IF, pVCpu->em.s.pPatmGCState->uVMFlags));310 p Ctx->eip = uNewEip;311 Assert(p Ctx->eip);312 313 if (p Ctx->eflags.Bits.u1IF)308 uNewEip, pVCpu->cpum.GstCtx.eflags.Bits.u1IF, pVCpu->em.s.pPatmGCState->uVMFlags)); 309 pVCpu->cpum.GstCtx.eip = uNewEip; 310 Assert(pVCpu->cpum.GstCtx.eip); 311 312 if (pVCpu->cpum.GstCtx.eflags.Bits.u1IF) 314 313 { 315 314 /* … … 331 330 case VINF_PATCH_EMULATE_INSTR: 332 331 Log(("emR3RawExecuteInstruction: Emulate patched instruction at %RGv IF=%d VMIF=%x\n", 333 uNewEip, p Ctx->eflags.Bits.u1IF, pVCpu->em.s.pPatmGCState->uVMFlags));334 p Ctx->eip = uNewEip;332 uNewEip, pVCpu->cpum.GstCtx.eflags.Bits.u1IF, pVCpu->em.s.pPatmGCState->uVMFlags)); 333 pVCpu->cpum.GstCtx.eip = uNewEip; 335 334 return emR3RawExecuteInstruction(pVM, pVCpu, "PATCHIR"); 336 335 … … 340 339 case VERR_PATCH_DISABLED: 341 340 Log(("emR3RawExecuteInstruction: Disabled patch -> new eip %RGv IF=%d VMIF=%x\n", 342 uNewEip, p Ctx->eflags.Bits.u1IF, pVCpu->em.s.pPatmGCState->uVMFlags));343 p Ctx->eip = uNewEip;344 if (p Ctx->eflags.Bits.u1IF)341 uNewEip, pVCpu->cpum.GstCtx.eflags.Bits.u1IF, pVCpu->em.s.pPatmGCState->uVMFlags)); 342 pVCpu->cpum.GstCtx.eip = uNewEip; 343 if (pVCpu->cpum.GstCtx.eflags.Bits.u1IF) 345 344 { 346 345 /* … … 369 368 #define VBOX_WITH_FIRST_IEM_STEP_B 370 369 #if defined(VBOX_WITH_FIRST_IEM_STEP_B) || !defined(VBOX_WITH_REM) 371 Log(("EMINS: %04x:%RGv RSP=%RGv\n", p Ctx->cs.Sel, (RTGCPTR)pCtx->rip, (RTGCPTR)pCtx->rsp));370 Log(("EMINS: %04x:%RGv RSP=%RGv\n", pVCpu->cpum.GstCtx.cs.Sel, (RTGCPTR)pVCpu->cpum.GstCtx.rip, (RTGCPTR)pVCpu->cpum.GstCtx.rsp)); 372 371 STAM_PROFILE_START(&pVCpu->em.s.StatIEMEmu, a); 373 372 rc = VBOXSTRICTRC_TODO(IEMExecOne(pVCpu)); … … 385 384 STAM_PROFILE_START(&pVCpu->em.s.StatREMEmu, b); 386 385 # ifndef VBOX_WITH_FIRST_IEM_STEP_B 387 Log(("EMINS[rem]: %04x:%RGv RSP=%RGv\n", p Ctx->cs.Sel, (RTGCPTR)pCtx->rip, (RTGCPTR)pCtx->rsp));386 Log(("EMINS[rem]: %04x:%RGv RSP=%RGv\n", pVCpu->cpum.GstCtx.cs.Sel, (RTGCPTR)pVCpu->cpum.GstCtx.rip, (RTGCPTR)pVCpu->cpum.GstCtx.rsp)); 388 387 //# elif defined(DEBUG_bird) 389 388 // AssertFailed(); … … 457 456 static int emR3RawGuestTrap(PVM pVM, PVMCPU pVCpu) 458 457 { 459 PCPUMCTX pCtx = pVCpu->em.s.pCtx;460 461 458 /* 462 459 * Get the trap info. … … 484 481 uint32_t uCpl = CPUMGetGuestCPL(pVCpu); 485 482 if ( uCpl == 0 486 && PATMIsPatchGCAddr(pVM, p Ctx->eip))487 { 488 LogFlow(("emR3RawGuestTrap: trap %#x in patch code; eip=%08x\n", u8TrapNo, p Ctx->eip));489 return emR3RawPatchTrap(pVM, pVCpu, pCtx,rc);483 && PATMIsPatchGCAddr(pVM, pVCpu->cpum.GstCtx.eip)) 484 { 485 LogFlow(("emR3RawGuestTrap: trap %#x in patch code; eip=%08x\n", u8TrapNo, pVCpu->cpum.GstCtx.eip)); 486 return emR3RawPatchTrap(pVM, pVCpu, rc); 490 487 } 491 488 #endif … … 512 509 ? TRPM_TRAP_HAS_ERRORCODE 513 510 : TRPM_TRAP_NO_ERRORCODE; 514 rc = TRPMForwardTrap(pVCpu, CPUMCTX2CORE( pCtx), u8TrapNo, uErrorCode, enmError, TRPM_TRAP, -1);511 rc = TRPMForwardTrap(pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx), u8TrapNo, uErrorCode, enmError, TRPM_TRAP, -1); 515 512 if (rc == VINF_SUCCESS /* Don't use RT_SUCCESS */) 516 513 { … … 526 523 */ 527 524 /** @todo move this up before the dispatching? */ 528 if ( (p Ctx->ss.Sel & X86_SEL_RPL) <= 1529 && !p Ctx->eflags.Bits.u1VM)530 { 531 Assert(!PATMIsPatchGCAddr(pVM, p Ctx->eip));532 CSAMR3CheckCodeEx(pVM, pCtx, pCtx->eip);525 if ( (pVCpu->cpum.GstCtx.ss.Sel & X86_SEL_RPL) <= 1 526 && !pVCpu->cpum.GstCtx.eflags.Bits.u1VM) 527 { 528 Assert(!PATMIsPatchGCAddr(pVM, pVCpu->cpum.GstCtx.eip)); 529 CSAMR3CheckCodeEx(pVM, &pVCpu->cpum.GstCtx, pVCpu->cpum.GstCtx.eip); 533 530 } 534 531 … … 542 539 */ 543 540 DISCPUSTATE cpu; 544 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCtx->rip, &cpu, "Guest Trap (#UD): ");541 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, &pVCpu->cpum.GstCtx, pVCpu->cpum.GstCtx.rip, &cpu, "Guest Trap (#UD): "); 545 542 if ( RT_SUCCESS(rc) 546 543 && (cpu.pCurInstr->uOpcode == OP_MONITOR || cpu.pCurInstr->uOpcode == OP_MWAIT)) … … 553 550 AssertRC(rc); 554 551 555 rc = VBOXSTRICTRC_TODO(EMInterpretInstructionDisasState(pVCpu, &cpu, CPUMCTX2CORE(pCtx), 0, EMCODETYPE_SUPERVISOR)); 552 rc = VBOXSTRICTRC_TODO(EMInterpretInstructionDisasState(pVCpu, &cpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx), 553 0, EMCODETYPE_SUPERVISOR)); 556 554 if (RT_SUCCESS(rc)) 557 555 return rc; … … 568 566 * I/O access. We can easily handle those in RC. */ 569 567 DISCPUSTATE cpu; 570 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCtx->rip, &cpu, "Guest Trap: ");568 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, &pVCpu->cpum.GstCtx, pVCpu->cpum.GstCtx.rip, &cpu, "Guest Trap: "); 571 569 if ( RT_SUCCESS(rc) 572 570 && (cpu.pCurInstr->fOpType & DISOPTYPE_PORTIO)) … … 591 589 int rc2 = PGMGstGetPage(pVCpu, uCR2, &fFlags, &GCPhys); 592 590 Log(("emR3RawGuestTrap: cs:eip=%04x:%08x: trap=%02x err=%08x cr2=%08x cr0=%08x%s: Phys=%RGp fFlags=%08llx %s %s %s%s rc2=%d\n", 593 p Ctx->cs.Sel, pCtx->eip, u8TrapNo, uErrorCode, uCR2, (uint32_t)pCtx->cr0,591 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, u8TrapNo, uErrorCode, uCR2, (uint32_t)pVCpu->cpum.GstCtx.cr0, 594 592 (enmType == TRPM_SOFTWARE_INT) ? " software" : "", GCPhys, fFlags, 595 593 fFlags & X86_PTE_P ? "P " : "NP", fFlags & X86_PTE_US ? "U" : "S", … … 602 600 */ 603 601 if (u8TrapNo == 14 /* #PG */) 604 p Ctx->cr2 = uCR2;602 pVCpu->cpum.GstCtx.cr2 = uCR2; 605 603 606 604 return VINF_EM_RESCHEDULE_REM; … … 620 618 int rc; 621 619 DISCPUSTATE Cpu; 622 PCPUMCTX pCtx = pVCpu->em.s.pCtx;623 620 624 621 /* 625 622 * sysenter, syscall & callgate 626 623 */ 627 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCtx->rip, &Cpu, "RSWITCH: ");624 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, &pVCpu->cpum.GstCtx, pVCpu->cpum.GstCtx.rip, &Cpu, "RSWITCH: "); 628 625 if (RT_SUCCESS(rc)) 629 626 { 630 627 if (Cpu.pCurInstr->uOpcode == OP_SYSENTER) 631 628 { 632 if (p Ctx->SysEnter.cs != 0)629 if (pVCpu->cpum.GstCtx.SysEnter.cs != 0) 633 630 { 634 rc = PATMR3InstallPatch(pVM, SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE( pCtx), pCtx->eip),631 rc = PATMR3InstallPatch(pVM, SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(&pVCpu->cpum.GstCtx), pVCpu->cpum.GstCtx.eip), 635 632 CPUMGetGuestCodeBits(pVCpu) == 32 ? PATMFL_CODE32 : 0); 636 633 if (RT_SUCCESS(rc)) … … 674 671 * @param pVM The cross context VM structure. 675 672 * @param pVCpu The cross context virtual CPU structure. 676 * @param pCtx Pointer to the guest CPU context.677 673 * @param gcret GC return code. 678 674 */ 679 static int emR3RawPatchTrap(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx,int gcret)675 static int emR3RawPatchTrap(PVM pVM, PVMCPU pVCpu, int gcret) 680 676 { 681 677 uint8_t u8TrapNo; … … 685 681 RTGCUINTPTR uCR2; 686 682 687 Assert(PATMIsPatchGCAddr(pVM, p Ctx->eip));683 Assert(PATMIsPatchGCAddr(pVM, pVCpu->cpum.GstCtx.eip)); 688 684 689 685 if (gcret == VINF_PATM_PATCH_INT3) … … 723 719 724 720 DISCPUSTATE Cpu; 725 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCtx->eip, &Cpu, "Patch code: ");721 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, &pVCpu->cpum.GstCtx, pVCpu->cpum.GstCtx.eip, &Cpu, "Patch code: "); 726 722 if ( RT_SUCCESS(rc) 727 723 && Cpu.pCurInstr->uOpcode == OP_IRET) … … 730 726 731 727 /* Iret crashes are bad as we have already changed the flags on the stack */ 732 rc = PGMPhysSimpleReadGCPtr(pVCpu, &eip, p Ctx->esp, 4);733 rc |= PGMPhysSimpleReadGCPtr(pVCpu, &selCS, p Ctx->esp+4, 4);734 rc |= PGMPhysSimpleReadGCPtr(pVCpu, &uEFlags, p Ctx->esp+8, 4);728 rc = PGMPhysSimpleReadGCPtr(pVCpu, &eip, pVCpu->cpum.GstCtx.esp, 4); 729 rc |= PGMPhysSimpleReadGCPtr(pVCpu, &selCS, pVCpu->cpum.GstCtx.esp+4, 4); 730 rc |= PGMPhysSimpleReadGCPtr(pVCpu, &uEFlags, pVCpu->cpum.GstCtx.esp+8, 4); 735 731 if (rc == VINF_SUCCESS) 736 732 { … … 740 736 uint32_t selSS, esp; 741 737 742 rc |= PGMPhysSimpleReadGCPtr(pVCpu, &esp, p Ctx->esp + 12, 4);743 rc |= PGMPhysSimpleReadGCPtr(pVCpu, &selSS, p Ctx->esp + 16, 4);738 rc |= PGMPhysSimpleReadGCPtr(pVCpu, &esp, pVCpu->cpum.GstCtx.esp + 12, 4); 739 rc |= PGMPhysSimpleReadGCPtr(pVCpu, &selSS, pVCpu->cpum.GstCtx.esp + 16, 4); 744 740 745 741 if (uEFlags & X86_EFL_VM) 746 742 { 747 743 uint32_t selDS, selES, selFS, selGS; 748 rc = PGMPhysSimpleReadGCPtr(pVCpu, &selES, p Ctx->esp + 20, 4);749 rc |= PGMPhysSimpleReadGCPtr(pVCpu, &selDS, p Ctx->esp + 24, 4);750 rc |= PGMPhysSimpleReadGCPtr(pVCpu, &selFS, p Ctx->esp + 28, 4);751 rc |= PGMPhysSimpleReadGCPtr(pVCpu, &selGS, p Ctx->esp + 32, 4);744 rc = PGMPhysSimpleReadGCPtr(pVCpu, &selES, pVCpu->cpum.GstCtx.esp + 20, 4); 745 rc |= PGMPhysSimpleReadGCPtr(pVCpu, &selDS, pVCpu->cpum.GstCtx.esp + 24, 4); 746 rc |= PGMPhysSimpleReadGCPtr(pVCpu, &selFS, pVCpu->cpum.GstCtx.esp + 28, 4); 747 rc |= PGMPhysSimpleReadGCPtr(pVCpu, &selGS, pVCpu->cpum.GstCtx.esp + 32, 4); 752 748 if (rc == VINF_SUCCESS) 753 749 { … … 765 761 #endif /* LOG_ENABLED */ 766 762 Log(("emR3RawPatchTrap: in patch: eip=%08x: trap=%02x err=%08x cr2=%08x cr0=%08x\n", 767 p Ctx->eip, u8TrapNo, uErrorCode, uCR2, (uint32_t)pCtx->cr0));763 pVCpu->cpum.GstCtx.eip, u8TrapNo, uErrorCode, uCR2, (uint32_t)pVCpu->cpum.GstCtx.cr0)); 768 764 769 765 RTGCPTR uNewEip; 770 rc = PATMR3HandleTrap(pVM, pCtx, pCtx->eip, &uNewEip);766 rc = PATMR3HandleTrap(pVM, &pVCpu->cpum.GstCtx, pVCpu->cpum.GstCtx.eip, &uNewEip); 771 767 switch (rc) 772 768 { … … 781 777 Log(("emR3RawPatchTrap: Virtual IF flag disabled!!\n")); 782 778 783 p Ctx->eip = uNewEip;784 AssertRelease(p Ctx->eip);785 786 if (p Ctx->eflags.Bits.u1IF)779 pVCpu->cpum.GstCtx.eip = uNewEip; 780 AssertRelease(pVCpu->cpum.GstCtx.eip); 781 782 if (pVCpu->cpum.GstCtx.eflags.Bits.u1IF) 787 783 { 788 784 /* Windows XP lets irets fault intentionally and then takes action based on the opcode; an … … 790 786 */ 791 787 if ( u8TrapNo == X86_XCPT_GP 792 && PATMIsInt3Patch(pVM, p Ctx->eip, NULL, NULL))788 && PATMIsInt3Patch(pVM, pVCpu->cpum.GstCtx.eip, NULL, NULL)) 793 789 { 794 790 /** @todo move to PATMR3HandleTrap */ 795 Log(("Possible Windows XP iret fault at %08RX32\n", p Ctx->eip));796 PATMR3RemovePatch(pVM, p Ctx->eip);791 Log(("Possible Windows XP iret fault at %08RX32\n", pVCpu->cpum.GstCtx.eip)); 792 PATMR3RemovePatch(pVM, pVCpu->cpum.GstCtx.eip); 797 793 } 798 794 … … 812 808 case VINF_PATCH_EMULATE_INSTR: 813 809 Log(("emR3RawPatchTrap: Emulate patched instruction at %RGv IF=%d VMIF=%x\n", 814 uNewEip, p Ctx->eflags.Bits.u1IF, pVCpu->em.s.pPatmGCState->uVMFlags));815 p Ctx->eip = uNewEip;816 AssertRelease(p Ctx->eip);810 uNewEip, pVCpu->cpum.GstCtx.eflags.Bits.u1IF, pVCpu->em.s.pPatmGCState->uVMFlags)); 811 pVCpu->cpum.GstCtx.eip = uNewEip; 812 AssertRelease(pVCpu->cpum.GstCtx.eip); 817 813 return emR3RawExecuteInstruction(pVM, pVCpu, "PATCHEMUL: "); 818 814 … … 823 819 if (!(pVCpu->em.s.pPatmGCState->uVMFlags & X86_EFL_IF)) 824 820 Log(("emR3RawPatchTrap: Virtual IF flag disabled!!\n")); 825 p Ctx->eip = uNewEip;826 AssertRelease(p Ctx->eip);827 828 if (p Ctx->eflags.Bits.u1IF)821 pVCpu->cpum.GstCtx.eip = uNewEip; 822 AssertRelease(pVCpu->cpum.GstCtx.eip); 823 824 if (pVCpu->cpum.GstCtx.eflags.Bits.u1IF) 829 825 { 830 826 /* … … 861 857 static int emR3RawPrivileged(PVM pVM, PVMCPU pVCpu) 862 858 { 863 PCPUMCTX pCtx = pVCpu->em.s.pCtx; 864 865 Assert(!pCtx->eflags.Bits.u1VM); 859 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM); 866 860 867 861 if (PATMIsEnabled(pVM)) … … 870 864 * Check if in patch code. 871 865 */ 872 if (PATMR3IsInsidePatchJump(pVM, p Ctx->eip, NULL))866 if (PATMR3IsInsidePatchJump(pVM, pVCpu->cpum.GstCtx.eip, NULL)) 873 867 { 874 868 #ifdef LOG_ENABLED 875 869 DBGFR3_INFO_LOG(pVM, pVCpu, "cpumguest", "PRIV"); 876 870 #endif 877 AssertMsgFailed(("FATAL ERROR: executing random instruction inside generated patch jump %08x\n", p Ctx->eip));871 AssertMsgFailed(("FATAL ERROR: executing random instruction inside generated patch jump %08x\n", pVCpu->cpum.GstCtx.eip)); 878 872 return VERR_EM_RAW_PATCH_CONFLICT; 879 873 } 880 if ( (p Ctx->ss.Sel & X86_SEL_RPL) == 0881 && !p Ctx->eflags.Bits.u1VM882 && !PATMIsPatchGCAddr(pVM, p Ctx->eip))883 { 884 int rc = PATMR3InstallPatch(pVM, SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE( pCtx), pCtx->eip),874 if ( (pVCpu->cpum.GstCtx.ss.Sel & X86_SEL_RPL) == 0 875 && !pVCpu->cpum.GstCtx.eflags.Bits.u1VM 876 && !PATMIsPatchGCAddr(pVM, pVCpu->cpum.GstCtx.eip)) 877 { 878 int rc = PATMR3InstallPatch(pVM, SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(&pVCpu->cpum.GstCtx), pVCpu->cpum.GstCtx.eip), 885 879 CPUMGetGuestCodeBits(pVCpu) == 32 ? PATMFL_CODE32 : 0); 886 880 if (RT_SUCCESS(rc)) … … 896 890 897 891 #ifdef LOG_ENABLED 898 if (!PATMIsPatchGCAddr(pVM, p Ctx->eip))892 if (!PATMIsPatchGCAddr(pVM, pVCpu->cpum.GstCtx.eip)) 899 893 { 900 894 DBGFR3_INFO_LOG(pVM, pVCpu, "cpumguest", "PRIV"); … … 909 903 int rc; 910 904 911 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCtx->rip, &Cpu, "PRIV: ");905 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, &pVCpu->cpum.GstCtx, pVCpu->cpum.GstCtx.rip, &Cpu, "PRIV: "); 912 906 if (RT_SUCCESS(rc)) 913 907 { … … 924 918 case OP_CLI: 925 919 STAM_COUNTER_INC(&pStats->StatCli); 926 emR3RecordCli(pVM, pVCpu, p Ctx->rip);920 emR3RecordCli(pVM, pVCpu, pVCpu->cpum.GstCtx.rip); 927 921 break; 928 922 case OP_STI: … … 988 982 } 989 983 #endif /* VBOX_WITH_STATISTICS */ 990 if ( (p Ctx->ss.Sel & X86_SEL_RPL) == 0991 && !p Ctx->eflags.Bits.u1VM984 if ( (pVCpu->cpum.GstCtx.ss.Sel & X86_SEL_RPL) == 0 985 && !pVCpu->cpum.GstCtx.eflags.Bits.u1VM 992 986 && CPUMGetGuestCodeBits(pVCpu) == 32) 993 987 { … … 996 990 { 997 991 case OP_CLI: 998 p Ctx->eflags.u32 &= ~X86_EFL_IF;992 pVCpu->cpum.GstCtx.eflags.u32 &= ~X86_EFL_IF; 999 993 Assert(Cpu.cbInstr == 1); 1000 p Ctx->rip += Cpu.cbInstr;994 pVCpu->cpum.GstCtx.rip += Cpu.cbInstr; 1001 995 STAM_PROFILE_STOP(&pVCpu->em.s.StatPrivEmu, a); 1002 996 return VINF_EM_RESCHEDULE_REM; /* must go to the recompiler now! */ 1003 997 1004 998 case OP_STI: 1005 p Ctx->eflags.u32 |= X86_EFL_IF;1006 EMSetInhibitInterruptsPC(pVCpu, p Ctx->rip + Cpu.cbInstr);999 pVCpu->cpum.GstCtx.eflags.u32 |= X86_EFL_IF; 1000 EMSetInhibitInterruptsPC(pVCpu, pVCpu->cpum.GstCtx.rip + Cpu.cbInstr); 1007 1001 Assert(Cpu.cbInstr == 1); 1008 p Ctx->rip += Cpu.cbInstr;1002 pVCpu->cpum.GstCtx.rip += Cpu.cbInstr; 1009 1003 STAM_PROFILE_STOP(&pVCpu->em.s.StatPrivEmu, a); 1010 1004 return VINF_SUCCESS; 1011 1005 1012 1006 case OP_HLT: 1013 if (PATMIsPatchGCAddr(pVM, p Ctx->eip))1007 if (PATMIsPatchGCAddr(pVM, pVCpu->cpum.GstCtx.eip)) 1014 1008 { 1015 1009 PATMTRANSSTATE enmState; 1016 RTGCPTR pOrgInstrGC = PATMR3PatchToGCPtr(pVM, p Ctx->eip, &enmState);1010 RTGCPTR pOrgInstrGC = PATMR3PatchToGCPtr(pVM, pVCpu->cpum.GstCtx.eip, &enmState); 1017 1011 1018 1012 if (enmState == PATMTRANS_OVERWRITTEN) … … 1021 1015 Assert(rc == VERR_PATCH_DISABLED); 1022 1016 /* Conflict detected, patch disabled */ 1023 Log(("emR3RawPrivileged: detected conflict -> disabled patch at %08RX32\n", p Ctx->eip));1017 Log(("emR3RawPrivileged: detected conflict -> disabled patch at %08RX32\n", pVCpu->cpum.GstCtx.eip)); 1024 1018 1025 1019 enmState = PATMTRANS_SAFE; … … 1027 1021 1028 1022 /* The translation had better be successful. Otherwise we can't recover. */ 1029 AssertReleaseMsg(pOrgInstrGC && enmState != PATMTRANS_OVERWRITTEN, ("Unable to translate instruction address at %08RX32\n", p Ctx->eip));1023 AssertReleaseMsg(pOrgInstrGC && enmState != PATMTRANS_OVERWRITTEN, ("Unable to translate instruction address at %08RX32\n", pVCpu->cpum.GstCtx.eip)); 1030 1024 if (enmState != PATMTRANS_OVERWRITTEN) 1031 p Ctx->eip = pOrgInstrGC;1025 pVCpu->cpum.GstCtx.eip = pOrgInstrGC; 1032 1026 } 1033 1027 /* no break; we could just return VINF_EM_HALT here */ … … 1037 1031 case OP_MOV_DR: 1038 1032 #ifdef LOG_ENABLED 1039 if (PATMIsPatchGCAddr(pVM, p Ctx->eip))1033 if (PATMIsPatchGCAddr(pVM, pVCpu->cpum.GstCtx.eip)) 1040 1034 { 1041 1035 DBGFR3_INFO_LOG(pVM, pVCpu, "cpumguest", "PRIV"); … … 1044 1038 #endif 1045 1039 1046 rc = VBOXSTRICTRC_TODO(EMInterpretInstructionDisasState(pVCpu, &Cpu, CPUMCTX2CORE(pCtx), 0, EMCODETYPE_SUPERVISOR)); 1040 rc = VBOXSTRICTRC_TODO(EMInterpretInstructionDisasState(pVCpu, &Cpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx), 1041 0, EMCODETYPE_SUPERVISOR)); 1047 1042 if (RT_SUCCESS(rc)) 1048 1043 { … … 1056 1051 * us to go to the recompiler. 1057 1052 */ 1058 if ( PATMIsPatchGCAddr(pVM, p Ctx->rip)1059 && (p Ctx->cr0 & (X86_CR0_WP|X86_CR0_PG|X86_CR0_PE)) != (X86_CR0_WP|X86_CR0_PG|X86_CR0_PE))1053 if ( PATMIsPatchGCAddr(pVM, pVCpu->cpum.GstCtx.rip) 1054 && (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_WP|X86_CR0_PG|X86_CR0_PE)) != (X86_CR0_WP|X86_CR0_PG|X86_CR0_PE)) 1060 1055 { 1061 1056 PATMTRANSSTATE enmState; 1062 RTGCPTR pOrgInstrGC = PATMR3PatchToGCPtr(pVM, p Ctx->rip, &enmState);1063 1064 Log(("Force recompiler switch due to cr0 (%RGp) update rip=%RGv -> %RGv (enmState=%d)\n", p Ctx->cr0, pCtx->rip, pOrgInstrGC, enmState));1057 RTGCPTR pOrgInstrGC = PATMR3PatchToGCPtr(pVM, pVCpu->cpum.GstCtx.rip, &enmState); 1058 1059 Log(("Force recompiler switch due to cr0 (%RGp) update rip=%RGv -> %RGv (enmState=%d)\n", pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.rip, pOrgInstrGC, enmState)); 1065 1060 if (enmState == PATMTRANS_OVERWRITTEN) 1066 1061 { … … 1068 1063 Assert(rc == VERR_PATCH_DISABLED); 1069 1064 /* Conflict detected, patch disabled */ 1070 Log(("emR3RawPrivileged: detected conflict -> disabled patch at %RGv\n", (RTGCPTR)p Ctx->rip));1065 Log(("emR3RawPrivileged: detected conflict -> disabled patch at %RGv\n", (RTGCPTR)pVCpu->cpum.GstCtx.rip)); 1071 1066 enmState = PATMTRANS_SAFE; 1072 1067 } 1073 1068 /* The translation had better be successful. Otherwise we can't recover. */ 1074 AssertReleaseMsg(pOrgInstrGC && enmState != PATMTRANS_OVERWRITTEN, ("Unable to translate instruction address at %RGv\n", (RTGCPTR)p Ctx->rip));1069 AssertReleaseMsg(pOrgInstrGC && enmState != PATMTRANS_OVERWRITTEN, ("Unable to translate instruction address at %RGv\n", (RTGCPTR)pVCpu->cpum.GstCtx.rip)); 1075 1070 if (enmState != PATMTRANS_OVERWRITTEN) 1076 p Ctx->rip = pOrgInstrGC;1071 pVCpu->cpum.GstCtx.rip = pOrgInstrGC; 1077 1072 } 1078 1073 … … 1089 1084 } 1090 1085 1091 if (PATMIsPatchGCAddr(pVM, p Ctx->eip))1092 return emR3RawPatchTrap(pVM, pVCpu, pCtx,VINF_PATM_PATCH_TRAP_GP);1086 if (PATMIsPatchGCAddr(pVM, pVCpu->cpum.GstCtx.eip)) 1087 return emR3RawPatchTrap(pVM, pVCpu, VINF_PATM_PATCH_TRAP_GP); 1093 1088 1094 1089 return emR3RawExecuteInstruction(pVM, pVCpu, "PRIV"); … … 1111 1106 * @param pVM The cross context VM structure. 1112 1107 * @param pVCpu The cross context virtual CPU structure. 1113 * @param pCtx Pointer to the guest CPU context.1114 1108 * @param rc The result code. 1115 1109 */ 1116 int emR3RawUpdateForceFlag(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int rc)1110 static int emR3RawUpdateForceFlag(PVM pVM, PVMCPU pVCpu, int rc) 1117 1111 { 1118 if (PATMIsPatchGCAddr(pVM, p Ctx->eip)) /** @todo check cs selector base/type */1112 if (PATMIsPatchGCAddr(pVM, pVCpu->cpum.GstCtx.eip)) /** @todo check cs selector base/type */ 1119 1113 { 1120 1114 /* ignore reschedule attempts. */ … … 1145 1139 VMMR3_INT_DECL(int) EMR3CheckRawForcedActions(PVM pVM, PVMCPU pVCpu) 1146 1140 { 1147 int rc = emR3RawForcedActions(pVM, pVCpu , pVCpu->em.s.pCtx);1141 int rc = emR3RawForcedActions(pVM, pVCpu); 1148 1142 VBOXVMM_EM_FF_RAW_RET(pVCpu, rc); 1149 1143 return rc; … … 1160 1154 * @param pVM The cross context VM structure. 1161 1155 * @param pVCpu The cross context virtual CPU structure. 1162 * @param pCtx Pointer to the guest CPU context.1163 1156 */ 1164 static int emR3RawForcedActions(PVM pVM, PVMCPU pVCpu , PCPUMCTX pCtx)1157 static int emR3RawForcedActions(PVM pVM, PVMCPU pVCpu) 1165 1158 { 1166 1159 /* … … 1193 1186 && CSAMIsEnabled(pVM)) 1194 1187 { 1195 int rc = PGMSyncCR3(pVCpu, p Ctx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));1188 int rc = PGMSyncCR3(pVCpu, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.cr4, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3)); 1196 1189 if (RT_FAILURE(rc)) 1197 1190 return rc; … … 1219 1212 { 1220 1213 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI); 1221 int rc = PGMSyncCR3(pVCpu, p Ctx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));1214 int rc = PGMSyncCR3(pVCpu, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.cr4, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3)); 1222 1215 if (RT_FAILURE(rc)) 1223 1216 return rc == VERR_PGM_NO_HYPERVISOR_ADDRESS ? VINF_EM_RESCHEDULE_REM : rc; … … 1227 1220 /* Prefetch pages for EIP and ESP. */ 1228 1221 /** @todo This is rather expensive. Should investigate if it really helps at all. */ 1229 rc = PGMPrefetchPage(pVCpu, SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE( pCtx), pCtx->rip));1222 rc = PGMPrefetchPage(pVCpu, SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(&pVCpu->cpum.GstCtx), pVCpu->cpum.GstCtx.rip)); 1230 1223 if (rc == VINF_SUCCESS) 1231 rc = PGMPrefetchPage(pVCpu, SELMToFlat(pVM, DISSELREG_SS, CPUMCTX2CORE( pCtx), pCtx->rsp));1224 rc = PGMPrefetchPage(pVCpu, SELMToFlat(pVM, DISSELREG_SS, CPUMCTX2CORE(&pVCpu->cpum.GstCtx), pVCpu->cpum.GstCtx.rsp)); 1232 1225 if (rc != VINF_SUCCESS) 1233 1226 { … … 1237 1230 return rc; 1238 1231 } 1239 rc = PGMSyncCR3(pVCpu, p Ctx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));1232 rc = PGMSyncCR3(pVCpu, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.cr4, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3)); 1240 1233 if (RT_FAILURE(rc)) 1241 1234 return rc; … … 1288 1281 1289 1282 int rc = VERR_IPE_UNINITIALIZED_STATUS; 1290 PCPUMCTX pCtx = pVCpu->em.s.pCtx; 1291 LogFlow(("emR3RawExecute: (cs:eip=%04x:%08x)\n", pCtx->cs.Sel, pCtx->eip)); 1283 LogFlow(("emR3RawExecute: (cs:eip=%04x:%08x)\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip)); 1292 1284 pVCpu->em.s.fForceRAW = false; 1293 1285 *pfFFDone = false; … … 1308 1300 */ 1309 1301 #ifdef VBOX_STRICT 1310 Assert(p Ctx->eflags.Bits.u1VM || (pCtx->ss.Sel & X86_SEL_RPL) == 3 || (pCtx->ss.Sel & X86_SEL_RPL) == 01311 || (EMIsRawRing1Enabled(pVM) && (p Ctx->ss.Sel & X86_SEL_RPL) == 1));1312 AssertMsg( (p Ctx->eflags.u32 & X86_EFL_IF)1313 || PATMShouldUseRawMode(pVM, (RTGCPTR)p Ctx->eip),1314 ("Tried to execute code with IF at EIP=%08x!\n", p Ctx->eip));1302 Assert(pVCpu->cpum.GstCtx.eflags.Bits.u1VM || (pVCpu->cpum.GstCtx.ss.Sel & X86_SEL_RPL) == 3 || (pVCpu->cpum.GstCtx.ss.Sel & X86_SEL_RPL) == 0 1303 || (EMIsRawRing1Enabled(pVM) && (pVCpu->cpum.GstCtx.ss.Sel & X86_SEL_RPL) == 1)); 1304 AssertMsg( (pVCpu->cpum.GstCtx.eflags.u32 & X86_EFL_IF) 1305 || PATMShouldUseRawMode(pVM, (RTGCPTR)pVCpu->cpum.GstCtx.eip), 1306 ("Tried to execute code with IF at EIP=%08x!\n", pVCpu->cpum.GstCtx.eip)); 1315 1307 if ( !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL) 1316 1308 && PGMMapHasConflicts(pVM)) … … 1328 1320 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK)) 1329 1321 { 1330 rc = emR3RawForcedActions(pVM, pVCpu , pCtx);1322 rc = emR3RawForcedActions(pVM, pVCpu); 1331 1323 VBOXVMM_EM_FF_RAW_RET(pVCpu, rc); 1332 1324 if (rc != VINF_SUCCESS) … … 1349 1341 * Scan code before executing it. Don't bother with user mode or V86 code 1350 1342 */ 1351 if ( (p Ctx->ss.Sel & X86_SEL_RPL) <= 11352 && !p Ctx->eflags.Bits.u1VM1353 && !PATMIsPatchGCAddr(pVM, p Ctx->eip))1343 if ( (pVCpu->cpum.GstCtx.ss.Sel & X86_SEL_RPL) <= 1 1344 && !pVCpu->cpum.GstCtx.eflags.Bits.u1VM 1345 && !PATMIsPatchGCAddr(pVM, pVCpu->cpum.GstCtx.eip)) 1354 1346 { 1355 1347 STAM_PROFILE_ADV_SUSPEND(&pVCpu->em.s.StatRAWEntry, b); 1356 CSAMR3CheckCodeEx(pVM, pCtx, pCtx->eip);1348 CSAMR3CheckCodeEx(pVM, &pVCpu->cpum.GstCtx, pVCpu->cpum.GstCtx.eip); 1357 1349 STAM_PROFILE_ADV_RESUME(&pVCpu->em.s.StatRAWEntry, b); 1358 1350 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK) 1359 1351 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK)) 1360 1352 { 1361 rc = emR3RawForcedActions(pVM, pVCpu , pCtx);1353 rc = emR3RawForcedActions(pVM, pVCpu); 1362 1354 VBOXVMM_EM_FF_RAW_RET(pVCpu, rc); 1363 1355 if (rc != VINF_SUCCESS) … … 1374 1366 */ 1375 1367 PPATMGCSTATE pGCState = PATMR3QueryGCStateHC(pVM); 1376 if (p Ctx->eflags.Bits.u1VM)1377 Log(("RV86: %04x:%08x IF=%d VMFlags=%x\n", p Ctx->cs.Sel, pCtx->eip, pCtx->eflags.Bits.u1IF, pGCState->uVMFlags));1378 else if ((p Ctx->ss.Sel & X86_SEL_RPL) == 1)1368 if (pVCpu->cpum.GstCtx.eflags.Bits.u1VM) 1369 Log(("RV86: %04x:%08x IF=%d VMFlags=%x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.Bits.u1IF, pGCState->uVMFlags)); 1370 else if ((pVCpu->cpum.GstCtx.ss.Sel & X86_SEL_RPL) == 1) 1379 1371 Log(("RR0: %x:%08x ESP=%x:%08x EFL=%x IF=%d/%d VMFlags=%x PIF=%d CPL=%d (Scanned=%d)\n", 1380 p Ctx->cs.Sel, pCtx->eip, pCtx->ss.Sel, pCtx->esp, CPUMRawGetEFlags(pVCpu), !!(pGCState->uVMFlags & X86_EFL_IF), pCtx->eflags.Bits.u1IF,1381 pGCState->uVMFlags, pGCState->fPIF, (p Ctx->ss.Sel & X86_SEL_RPL), CSAMIsPageScanned(pVM, (RTGCPTR)pCtx->eip)));1372 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, CPUMRawGetEFlags(pVCpu), !!(pGCState->uVMFlags & X86_EFL_IF), pVCpu->cpum.GstCtx.eflags.Bits.u1IF, 1373 pGCState->uVMFlags, pGCState->fPIF, (pVCpu->cpum.GstCtx.ss.Sel & X86_SEL_RPL), CSAMIsPageScanned(pVM, (RTGCPTR)pVCpu->cpum.GstCtx.eip))); 1382 1374 # ifdef VBOX_WITH_RAW_RING1 1383 else if ((p Ctx->ss.Sel & X86_SEL_RPL) == 2)1384 Log(("RR1: %x:%08x ESP=%x:%08x IF=%d VMFlags=%x CPL=%x\n", p Ctx->cs.Sel, pCtx->eip, pCtx->ss.Sel, pCtx->esp, pCtx->eflags.Bits.u1IF, pGCState->uVMFlags, (pCtx->ss.Sel & X86_SEL_RPL)));1375 else if ((pVCpu->cpum.GstCtx.ss.Sel & X86_SEL_RPL) == 2) 1376 Log(("RR1: %x:%08x ESP=%x:%08x IF=%d VMFlags=%x CPL=%x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.eflags.Bits.u1IF, pGCState->uVMFlags, (pVCpu->cpum.GstCtx.ss.Sel & X86_SEL_RPL))); 1385 1377 # endif 1386 else if ((p Ctx->ss.Sel & X86_SEL_RPL) == 3)1387 Log(("RR3: %x:%08x ESP=%x:%08x IF=%d VMFlags=%x\n", p Ctx->cs.Sel, pCtx->eip, pCtx->ss.Sel, pCtx->esp, pCtx->eflags.Bits.u1IF, pGCState->uVMFlags));1378 else if ((pVCpu->cpum.GstCtx.ss.Sel & X86_SEL_RPL) == 3) 1379 Log(("RR3: %x:%08x ESP=%x:%08x IF=%d VMFlags=%x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.eflags.Bits.u1IF, pGCState->uVMFlags)); 1388 1380 #endif /* LOG_ENABLED */ 1389 1381 … … 1397 1389 { 1398 1390 STAM_PROFILE_START(&pVCpu->em.s.StatRAWExec, c); 1399 VBOXVMM_EM_RAW_RUN_PRE(pVCpu, pCtx);1391 VBOXVMM_EM_RAW_RUN_PRE(pVCpu, &pVCpu->cpum.GstCtx); 1400 1392 rc = VMMR3RawRunGC(pVM, pVCpu); 1401 VBOXVMM_EM_RAW_RUN_RET(pVCpu, pCtx, rc);1393 VBOXVMM_EM_RAW_RUN_RET(pVCpu, &pVCpu->cpum.GstCtx, rc); 1402 1394 STAM_PROFILE_STOP(&pVCpu->em.s.StatRAWExec, c); 1403 1395 } … … 1413 1405 1414 1406 LogFlow(("RR%u-E: %08x ESP=%08x EFL=%x IF=%d/%d VMFlags=%x PIF=%d\n", 1415 (p Ctx->ss.Sel & X86_SEL_RPL), pCtx->eip, pCtx->esp, CPUMRawGetEFlags(pVCpu),1416 !!(pGCState->uVMFlags & X86_EFL_IF), p Ctx->eflags.Bits.u1IF, pGCState->uVMFlags, pGCState->fPIF));1407 (pVCpu->cpum.GstCtx.ss.Sel & X86_SEL_RPL), pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, CPUMRawGetEFlags(pVCpu), 1408 !!(pGCState->uVMFlags & X86_EFL_IF), pVCpu->cpum.GstCtx.eflags.Bits.u1IF, pGCState->uVMFlags, pGCState->fPIF)); 1417 1409 LogFlow(("VMMR3RawRunGC returned %Rrc\n", rc)); 1418 1410 … … 1450 1442 1451 1443 default: 1452 if (PATMIsPatchGCAddr(pVM, p Ctx->eip) && !(pCtx->eflags.u32 & X86_EFL_TF))1444 if (PATMIsPatchGCAddr(pVM, pVCpu->cpum.GstCtx.eip) && !(pVCpu->cpum.GstCtx.eflags.u32 & X86_EFL_TF)) 1453 1445 LogIt(0, LOG_GROUP_PATM, ("Patch code interrupted at %RRv for reason %Rrc\n", (RTRCPTR)CPUMGetGuestEIP(pVCpu), rc)); 1454 1446 break; … … 1474 1466 break; 1475 1467 } 1476 rc = emR3RawHandleRC(pVM, pVCpu, pCtx,rc);1468 rc = emR3RawHandleRC(pVM, pVCpu, rc); 1477 1469 if (rc != VINF_SUCCESS) 1478 1470 { 1479 rc = emR3RawUpdateForceFlag(pVM, pVCpu, pCtx,rc);1471 rc = emR3RawUpdateForceFlag(pVM, pVCpu, rc); 1480 1472 if (rc != VINF_SUCCESS) 1481 1473 { … … 1495 1487 || VMCPU_FF_IS_PENDING(pVCpu, ~VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK)) 1496 1488 { 1497 Assert(p Ctx->eflags.Bits.u1VM || (pCtx->ss.Sel & X86_SEL_RPL) != (EMIsRawRing1Enabled(pVM) ? 2U : 1U));1489 Assert(pVCpu->cpum.GstCtx.eflags.Bits.u1VM || (pVCpu->cpum.GstCtx.ss.Sel & X86_SEL_RPL) != (EMIsRawRing1Enabled(pVM) ? 2U : 1U)); 1498 1490 1499 1491 STAM_REL_PROFILE_ADV_SUSPEND(&pVCpu->em.s.StatRAWTotal, a); … … 1504 1496 && rc != VINF_EM_RESCHEDULE_RAW) 1505 1497 { 1506 rc = emR3RawUpdateForceFlag(pVM, pVCpu, pCtx,rc);1498 rc = emR3RawUpdateForceFlag(pVM, pVCpu, rc); 1507 1499 if (rc != VINF_SUCCESS) 1508 1500 {
Note:
See TracChangeset
for help on using the changeset viewer.