Changeset 41737 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Jun 15, 2012 1:01:49 AM (13 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 15 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/EMAll.cpp
r41736 r41737 749 749 static const char *emGetMnemonic(PDISCPUSTATE pDis) 750 750 { 751 switch (pDis->pCurInstr-> opcode)751 switch (pDis->pCurInstr->uOpcode) 752 752 { 753 753 case OP_XCHG: return "Xchg"; … … 793 793 794 794 default: 795 Log(("Unknown opcode %d\n", pDis->pCurInstr-> opcode));795 Log(("Unknown opcode %d\n", pDis->pCurInstr->uOpcode)); 796 796 return "???"; 797 797 } … … 2657 2657 dtr32.uAddr &= 0xffffff; /* 16 bits operand size */ 2658 2658 2659 if (pDis->pCurInstr-> opcode == OP_LIDT)2659 if (pDis->pCurInstr->uOpcode == OP_LIDT) 2660 2660 CPUMSetGuestIDTR(pVCpu, dtr32.uAddr, dtr32.cb); 2661 2661 else … … 3093 3093 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pRegFrame); 3094 3094 if ( cpl != 0 3095 && pDis->pCurInstr-> opcode != OP_RDTSC) /* rdtsc requires emulation in ring 3 as well */3095 && pDis->pCurInstr->uOpcode != OP_RDTSC) /* rdtsc requires emulation in ring 3 as well */ 3096 3096 { 3097 3097 Log(("WARNING: refusing instruction emulation for user-mode code!!\n")); … … 3106 3106 if ( (pDis->fPrefix & (DISPREFIX_REPNE | DISPREFIX_REP)) 3107 3107 || ( (pDis->fPrefix & DISPREFIX_LOCK) 3108 && pDis->pCurInstr-> opcode != OP_CMPXCHG3109 && pDis->pCurInstr-> opcode != OP_CMPXCHG8B3110 && pDis->pCurInstr-> opcode != OP_XADD3111 && pDis->pCurInstr-> opcode != OP_OR3112 && pDis->pCurInstr-> opcode != OP_AND3113 && pDis->pCurInstr-> opcode != OP_XOR3114 && pDis->pCurInstr-> opcode != OP_BTR3108 && pDis->pCurInstr->uOpcode != OP_CMPXCHG 3109 && pDis->pCurInstr->uOpcode != OP_CMPXCHG8B 3110 && pDis->pCurInstr->uOpcode != OP_XADD 3111 && pDis->pCurInstr->uOpcode != OP_OR 3112 && pDis->pCurInstr->uOpcode != OP_AND 3113 && pDis->pCurInstr->uOpcode != OP_XOR 3114 && pDis->pCurInstr->uOpcode != OP_BTR 3115 3115 ) 3116 3116 ) … … 3118 3118 if ( (pDis->fPrefix & DISPREFIX_REPNE) 3119 3119 || ( (pDis->fPrefix & DISPREFIX_REP) 3120 && pDis->pCurInstr-> opcode != OP_STOSWD3120 && pDis->pCurInstr->uOpcode != OP_STOSWD 3121 3121 ) 3122 3122 || ( (pDis->fPrefix & DISPREFIX_LOCK) 3123 && pDis->pCurInstr-> opcode != OP_OR3124 && pDis->pCurInstr-> opcode != OP_AND3125 && pDis->pCurInstr-> opcode != OP_XOR3126 && pDis->pCurInstr-> opcode != OP_BTR3127 && pDis->pCurInstr-> opcode != OP_CMPXCHG3128 && pDis->pCurInstr-> opcode != OP_CMPXCHG8B3123 && pDis->pCurInstr->uOpcode != OP_OR 3124 && pDis->pCurInstr->uOpcode != OP_AND 3125 && pDis->pCurInstr->uOpcode != OP_XOR 3126 && pDis->pCurInstr->uOpcode != OP_BTR 3127 && pDis->pCurInstr->uOpcode != OP_CMPXCHG 3128 && pDis->pCurInstr->uOpcode != OP_CMPXCHG8B 3129 3129 ) 3130 3130 ) … … 3144 3144 && CPUMIsGuestIn64BitCode(pVCpu, pRegFrame)) 3145 3145 { 3146 uint32_t uOpCode = pDis->pCurInstr-> opcode;3146 uint32_t uOpCode = pDis->pCurInstr->uOpcode; 3147 3147 if ( uOpCode != OP_STOSWD 3148 3148 && uOpCode != OP_MOV … … 3165 3165 { 3166 3166 # ifdef VBOX_WITH_STATISTICS 3167 switch (pDis->pCurInstr-> opcode)3167 switch (pDis->pCurInstr->uOpcode) 3168 3168 { 3169 3169 # define INTERPRET_FAILED_CASE(opcode, Instr) \ … … 3221 3221 LogFlow(("emInterpretInstructionCPU %s\n", emGetMnemonic(pDis))); 3222 3222 #endif 3223 switch (pDis->pCurInstr-> opcode)3223 switch (pDis->pCurInstr->uOpcode) 3224 3224 { 3225 3225 /* … … 3329 3329 3330 3330 default: 3331 Log3(("emInterpretInstructionCPU: opcode=%d\n", pDis->pCurInstr-> opcode));3331 Log3(("emInterpretInstructionCPU: opcode=%d\n", pDis->pCurInstr->uOpcode)); 3332 3332 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FailedMisc)); 3333 3333 return VERR_EM_INTERPRETER; -
trunk/src/VBox/VMM/VMMAll/IOMAllMMIO.cpp
r41735 r41737 549 549 */ 550 550 /** @todo checkup MOVSX implementation! */ 551 if (pCpu->pCurInstr-> opcode == OP_MOVSX)551 if (pCpu->pCurInstr->uOpcode == OP_MOVSX) 552 552 { 553 553 if (cb == 1) … … 1177 1177 const char *pszInstr; 1178 1178 1179 if (pCpu->pCurInstr-> opcode == OP_XOR)1179 if (pCpu->pCurInstr->uOpcode == OP_XOR) 1180 1180 pszInstr = "Xor"; 1181 else if (pCpu->pCurInstr-> opcode == OP_OR)1181 else if (pCpu->pCurInstr->uOpcode == OP_OR) 1182 1182 pszInstr = "Or"; 1183 else if (pCpu->pCurInstr-> opcode == OP_AND)1183 else if (pCpu->pCurInstr->uOpcode == OP_AND) 1184 1184 pszInstr = "And"; 1185 1185 else … … 1528 1528 return rc; 1529 1529 } 1530 switch (pDis->pCurInstr-> opcode)1530 switch (pDis->pCurInstr->uOpcode) 1531 1531 { 1532 1532 case OP_MOV: … … 2144 2144 RTIOPORT Port = pRegFrame->edx & 0xffff; 2145 2145 unsigned cb = 0; 2146 if (pCpu->pCurInstr-> opcode == OP_INSB)2146 if (pCpu->pCurInstr->uOpcode == OP_INSB) 2147 2147 cb = 1; 2148 2148 else … … 2313 2313 bool fRc = iomGetRegImmData(pCpu, &pCpu->param1, pRegFrame, &Port, &cb); 2314 2314 AssertMsg(fRc, ("Failed to get reg/imm port number!\n")); NOREF(fRc); 2315 if (pCpu->pCurInstr-> opcode == OP_OUTSB)2315 if (pCpu->pCurInstr->uOpcode == OP_OUTSB) 2316 2316 cb = 1; 2317 2317 else -
trunk/src/VBox/VMM/VMMAll/PATMAll.cpp
r40280 r41737 381 381 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(VMMGetCpu0(pVM)); 382 382 383 if (pCpu->pCurInstr-> opcode == OP_SYSENTER)383 if (pCpu->pCurInstr->uOpcode == OP_SYSENTER) 384 384 { 385 385 if ( pCtx->SysEnter.cs == 0 … … 409 409 } 410 410 else 411 if (pCpu->pCurInstr-> opcode == OP_SYSEXIT)411 if (pCpu->pCurInstr->uOpcode == OP_SYSEXIT) 412 412 { 413 413 if ( pCtx->SysEnter.cs == 0 … … 429 429 } 430 430 else 431 if (pCpu->pCurInstr-> opcode == OP_SYSCALL)431 if (pCpu->pCurInstr->uOpcode == OP_SYSCALL) 432 432 { 433 433 /** @todo implement syscall */ 434 434 } 435 435 else 436 if (pCpu->pCurInstr-> opcode == OP_SYSRET)436 if (pCpu->pCurInstr->uOpcode == OP_SYSRET) 437 437 { 438 438 /** @todo implement sysret */ -
trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp
r41736 r41737 721 721 * do_fork 722 722 */ 723 if ( pDis->pCurInstr-> opcode == OP_BTR723 if ( pDis->pCurInstr->uOpcode == OP_BTR 724 724 && !(offFault & 4) 725 725 /** @todo Validate that the bit index is X86_PTE_RW. */ … … 761 761 #endif 762 762 763 LogFlow(("Reused instr %RGv %d at %RGv param1.fUse=%llx param1.reg=%d\n", pRegFrame->rip, pDis->pCurInstr-> opcode, pvFault, pDis->param1.fUse, pDis->param1.base.reg_gen));763 LogFlow(("Reused instr %RGv %d at %RGv param1.fUse=%llx param1.reg=%d\n", pRegFrame->rip, pDis->pCurInstr->uOpcode, pvFault, pDis->param1.fUse, pDis->param1.base.reg_gen)); 764 764 765 765 /* Non-supervisor mode write means it's used for something else. */ … … 767 767 return true; 768 768 769 switch (pDis->pCurInstr-> opcode)769 switch (pDis->pCurInstr->uOpcode) 770 770 { 771 771 /* call implies the actual push of the return address faulted */ … … 990 990 { 991 991 LogFlow(("pgmPoolAccessHandlerPTWorker: Interpretation failed for %04x:%RGv - opcode=%d\n", 992 pRegFrame->cs, (RTGCPTR)pRegFrame->rip, pDis->pCurInstr-> opcode));992 pRegFrame->cs, (RTGCPTR)pRegFrame->rip, pDis->pCurInstr->uOpcode)); 993 993 rc = VINF_EM_RAW_EMULATE_INSTR; 994 994 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,EmulateInstr)); … … 1166 1166 if ( rc == VINF_SUCCESS 1167 1167 && !pPage->cLocked /* only applies to unlocked pages as we can't free locked ones (e.g. cr3 root). */ 1168 && pDis->pCurInstr-> opcode == OP_MOV1168 && pDis->pCurInstr->uOpcode == OP_MOV 1169 1169 && (pvFault & PAGE_OFFSET_MASK) == 0) 1170 1170 { … … 1196 1196 * We have to deal with these or we'll kill the cache and performance. 1197 1197 */ 1198 if ( pDis->pCurInstr-> opcode == OP_STOSWD1198 if ( pDis->pCurInstr->uOpcode == OP_STOSWD 1199 1199 && !pRegFrame->eflags.Bits.u1DF 1200 1200 && pDis->uOpMode == pDis->uCpuMode … … 1238 1238 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,RepPrefix)); 1239 1239 Log4(("pgmPoolAccessHandler: eax=%#x ecx=%#x edi=%#x esi=%#x rip=%RGv opcode=%d prefix=%#x\n", 1240 pRegFrame->eax, pRegFrame->ecx, pRegFrame->edi, pRegFrame->esi, (RTGCPTR)pRegFrame->rip, pDis->pCurInstr-> opcode, pDis->fPrefix));1240 pRegFrame->eax, pRegFrame->ecx, pRegFrame->edi, pRegFrame->esi, (RTGCPTR)pRegFrame->rip, pDis->pCurInstr->uOpcode, pDis->fPrefix)); 1241 1241 fNotReusedNotForking = true; 1242 1242 } -
trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp
r41736 r41737 2984 2984 pDis->uCpuMode = enmMode; 2985 2985 rc = EMInterpretDisasOneEx(pVM, pVCpu, pbCode, pRegFrame, pDis, &cbOp); 2986 Assert(RT_FAILURE(rc) || pDis->pCurInstr-> opcode == OP_INVLPG);2987 if (RT_SUCCESS(rc) && pDis->pCurInstr-> opcode == OP_INVLPG)2986 Assert(RT_FAILURE(rc) || pDis->pCurInstr->uOpcode == OP_INVLPG); 2987 if (RT_SUCCESS(rc) && pDis->pCurInstr->uOpcode == OP_INVLPG) 2988 2988 { 2989 2989 Assert(cbOp == pDis->cbInstr); -
trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp
r41735 r41737 3554 3554 rc = VINF_SUCCESS; 3555 3555 Assert(cbOp == pDis->cbInstr); 3556 switch (pDis->pCurInstr-> opcode)3556 switch (pDis->pCurInstr->uOpcode) 3557 3557 { 3558 3558 case OP_CLI: -
trunk/src/VBox/VMM/VMMR3/CSAM.cpp
r41732 r41737 797 797 NOREF(pInstrGC); 798 798 799 switch (pCpu->pCurInstr-> opcode)799 switch (pCpu->pCurInstr->uOpcode) 800 800 { 801 801 case OP_INT: … … 821 821 822 822 // Check for exit points 823 switch (pCpu->pCurInstr-> opcode)823 switch (pCpu->pCurInstr->uOpcode) 824 824 { 825 825 /* It's not a good idea to patch pushf instructions: … … 897 897 case OP_IRET: 898 898 #ifdef DEBUG 899 switch(pCpu->pCurInstr-> opcode)899 switch(pCpu->pCurInstr->uOpcode) 900 900 { 901 901 case OP_STR: … … 947 947 } 948 948 } 949 if (pCpu->pCurInstr-> opcode == OP_IRET)949 if (pCpu->pCurInstr->uOpcode == OP_IRET) 950 950 return VINF_SUCCESS; /* Look no further in this branch. */ 951 951 … … 959 959 { 960 960 #ifdef DEBUG 961 switch(pCpu->pCurInstr-> opcode)961 switch(pCpu->pCurInstr->uOpcode) 962 962 { 963 963 case OP_JMP: … … 1084 1084 } 1085 1085 1086 switch (cpu.pCurInstr-> opcode)1086 switch (cpu.pCurInstr->uOpcode) 1087 1087 { 1088 1088 case OP_NOP: … … 1314 1314 /* Remember the address of the instruction following the ret in case the parent instruction was a call. */ 1315 1315 if ( pCacheRec->pCallExitRec 1316 && cpu.pCurInstr-> opcode == OP_RETN1316 && cpu.pCurInstr->uOpcode == OP_RETN 1317 1317 && pCacheRec->pCallExitRec->cInstrAfterRet < CSAM_MAX_CALLEXIT_RET) 1318 1318 { … … 1328 1328 // For our first attempt, we'll handle only simple relative jumps and calls (immediate offset coded in instruction) 1329 1329 if ( ((cpu.pCurInstr->optype & DISOPTYPE_CONTROLFLOW) && (OP_PARM_VTYPE(cpu.pCurInstr->param1) == OP_PARM_J)) 1330 || (cpu.pCurInstr-> opcode == OP_CALL && cpu.param1.fUse == DISUSE_DISPLACEMENT32)) /* simple indirect call (call dword ptr [address]) */1330 || (cpu.pCurInstr->uOpcode == OP_CALL && cpu.param1.fUse == DISUSE_DISPLACEMENT32)) /* simple indirect call (call dword ptr [address]) */ 1331 1331 { 1332 1332 /* We need to parse 'call dword ptr [address]' type of calls to catch cpuid instructions in some recent Linux distributions (e.g. OpenSuse 10.3) */ 1333 if ( cpu.pCurInstr-> opcode == OP_CALL1333 if ( cpu.pCurInstr->uOpcode == OP_CALL 1334 1334 && cpu.param1.fUse == DISUSE_DISPLACEMENT32) 1335 1335 { … … 1379 1379 Assert(pPage); 1380 1380 } 1381 if (cpu.pCurInstr-> opcode == OP_CALL)1381 if (cpu.pCurInstr->uOpcode == OP_CALL) 1382 1382 rc = csamAnalyseCallCodeStream(pVM, pInstrGC, addr, fCode32, pfnCSAMR3Analyse, (void *)pJmpPage, pCacheRec); 1383 1383 else … … 1388 1388 } 1389 1389 } 1390 if (cpu.pCurInstr-> opcode == OP_JMP)1390 if (cpu.pCurInstr->uOpcode == OP_JMP) 1391 1391 {//unconditional jump; return to caller 1392 1392 rc = VINF_SUCCESS; … … 1398 1398 #ifdef CSAM_SCAN_JUMP_TABLE 1399 1399 else 1400 if ( cpu.pCurInstr-> opcode == OP_JMP1400 if ( cpu.pCurInstr->uOpcode == OP_JMP 1401 1401 && (cpu.param1.fUse & (DISUSE_DISPLACEMENT32|DISUSE_INDEX|DISUSE_SCALE)) == (DISUSE_DISPLACEMENT32|DISUSE_INDEX|DISUSE_SCALE) 1402 1402 ) … … 1458 1458 } 1459 1459 next_please: 1460 if (cpu.pCurInstr-> opcode == OP_JMP)1460 if (cpu.pCurInstr->uOpcode == OP_JMP) 1461 1461 { 1462 1462 rc = VINF_SUCCESS; … … 2566 2566 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pHandler - aOpenBsdPushCSOffset[i], &cpu, NULL); 2567 2567 if ( rc == VINF_SUCCESS 2568 && cpu.pCurInstr-> opcode == OP_PUSH2568 && cpu.pCurInstr->uOpcode == OP_PUSH 2569 2569 && cpu.pCurInstr->param1 == OP_PARM_REG_CS) 2570 2570 { -
trunk/src/VBox/VMM/VMMR3/EMHwaccm.cpp
r41734 r41737 201 201 if (RT_SUCCESS(rc)) 202 202 { 203 switch (Cpu.pCurInstr-> opcode)203 switch (Cpu.pCurInstr->uOpcode) 204 204 { 205 205 /* @todo we can do more now */ … … 309 309 if (!(Cpu.fPrefix & (DISPREFIX_REP | DISPREFIX_REPNE))) 310 310 { 311 switch (Cpu.pCurInstr-> opcode)311 switch (Cpu.pCurInstr->uOpcode) 312 312 { 313 313 case OP_IN: … … 328 328 else if (Cpu.fPrefix & DISPREFIX_REP) 329 329 { 330 switch (Cpu.pCurInstr-> opcode)330 switch (Cpu.pCurInstr->uOpcode) 331 331 { 332 332 case OP_INSB: -
trunk/src/VBox/VMM/VMMR3/EMRaw.cpp
r41734 r41737 430 430 if (!(Cpu.fPrefix & (DISPREFIX_REP | DISPREFIX_REPNE))) 431 431 { 432 switch (Cpu.pCurInstr-> opcode)432 switch (Cpu.pCurInstr->uOpcode) 433 433 { 434 434 case OP_IN: … … 449 449 else if (Cpu.fPrefix & DISPREFIX_REP) 450 450 { 451 switch (Cpu.pCurInstr-> opcode)451 switch (Cpu.pCurInstr->uOpcode) 452 452 { 453 453 case OP_INSB: … … 596 596 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCtx->rip, &cpu, "Guest Trap (#UD): "); 597 597 if ( RT_SUCCESS(rc) 598 && (cpu.pCurInstr-> opcode == OP_MONITOR || cpu.pCurInstr->opcode == OP_MWAIT))598 && (cpu.pCurInstr->uOpcode == OP_MONITOR || cpu.pCurInstr->uOpcode == OP_MWAIT)) 599 599 { 600 600 uint32_t u32Dummy, u32Features, u32ExtFeatures; … … 679 679 if (RT_SUCCESS(rc)) 680 680 { 681 if (Cpu.pCurInstr-> opcode == OP_SYSENTER)681 if (Cpu.pCurInstr->uOpcode == OP_SYSENTER) 682 682 { 683 683 if (pCtx->SysEnter.cs != 0) … … 694 694 695 695 #ifdef VBOX_WITH_STATISTICS 696 switch (Cpu.pCurInstr-> opcode)696 switch (Cpu.pCurInstr->uOpcode) 697 697 { 698 698 case OP_SYSENTER: … … 776 776 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCtx->eip, &Cpu, "Patch code: "); 777 777 if ( RT_SUCCESS(rc) 778 && Cpu.pCurInstr-> opcode == OP_IRET)778 && Cpu.pCurInstr->uOpcode == OP_IRET) 779 779 { 780 780 uint32_t eip, selCS, uEFlags; … … 965 965 #ifdef VBOX_WITH_STATISTICS 966 966 PEMSTATS pStats = pVCpu->em.s.CTX_SUFF(pStats); 967 switch (Cpu.pCurInstr-> opcode)967 switch (Cpu.pCurInstr->uOpcode) 968 968 { 969 969 case OP_INVLPG: … … 1035 1035 default: 1036 1036 STAM_COUNTER_INC(&pStats->StatMisc); 1037 Log4(("emR3RawPrivileged: opcode=%d\n", Cpu.pCurInstr-> opcode));1037 Log4(("emR3RawPrivileged: opcode=%d\n", Cpu.pCurInstr->uOpcode)); 1038 1038 break; 1039 1039 } … … 1044 1044 { 1045 1045 STAM_PROFILE_START(&pVCpu->em.s.StatPrivEmu, a); 1046 switch (Cpu.pCurInstr-> opcode)1046 switch (Cpu.pCurInstr->uOpcode) 1047 1047 { 1048 1048 case OP_CLI: … … 1099 1099 STAM_PROFILE_STOP(&pVCpu->em.s.StatPrivEmu, a); 1100 1100 1101 if ( Cpu.pCurInstr-> opcode == OP_MOV_CR1101 if ( Cpu.pCurInstr->uOpcode == OP_MOV_CR 1102 1102 && Cpu.param1.fUse == DISUSE_REG_CR /* write */ 1103 1103 ) -
trunk/src/VBox/VMM/VMMR3/HWACCM.cpp
r41727 r41737 1820 1820 AssertRC(rc); 1821 1821 if ( rc == VINF_SUCCESS 1822 && pDis->pCurInstr-> opcode == OP_MOV1822 && pDis->pCurInstr->uOpcode == OP_MOV 1823 1823 && cbOp >= 3) 1824 1824 { … … 1871 1871 pCtx->rip = oldrip; 1872 1872 if ( rc == VINF_SUCCESS 1873 && pDis->pCurInstr-> opcode == OP_SHR1873 && pDis->pCurInstr->uOpcode == OP_SHR 1874 1874 && pDis->param1.fUse == DISUSE_REG_GEN32 1875 1875 && pDis->param1.base.reg_gen == uMmioReg … … 1986 1986 AssertRC(rc); 1987 1987 if ( rc == VINF_SUCCESS 1988 && pDis->pCurInstr-> opcode == OP_MOV1988 && pDis->pCurInstr->uOpcode == OP_MOV 1989 1989 && cbOp >= 5) 1990 1990 { -
trunk/src/VBox/VMM/VMMR3/PATM.cpp
r41736 r41737 1396 1396 /* No unconditional jumps or calls without fixed displacements. */ 1397 1397 if ( (pCpu->pCurInstr->optype & DISOPTYPE_CONTROLFLOW) 1398 && (pCpu->pCurInstr-> opcode == OP_JMP || pCpu->pCurInstr->opcode == OP_CALL)1398 && (pCpu->pCurInstr->uOpcode == OP_JMP || pCpu->pCurInstr->uOpcode == OP_CALL) 1399 1399 ) 1400 1400 { 1401 1401 Assert(pCpu->param1.cb <= 4 || pCpu->param1.cb == 6); 1402 1402 if ( pCpu->param1.cb == 6 /* far call/jmp */ 1403 || (pCpu->pCurInstr-> opcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))1403 || (pCpu->pCurInstr->uOpcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS)) 1404 1404 || (OP_PARM_VTYPE(pCpu->pCurInstr->param1) != OP_PARM_J && !(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS)) 1405 1405 ) … … 1411 1411 1412 1412 /* An unconditional (short) jump right after a cli is a potential problem; we will overwrite whichever function comes afterwards */ 1413 if (pPatch->opcode == OP_CLI && pCpu->pCurInstr-> opcode == OP_JMP)1413 if (pPatch->opcode == OP_CLI && pCpu->pCurInstr->uOpcode == OP_JMP) 1414 1414 { 1415 1415 if ( pCurInstrGC > pPatch->pPrivInstrGC … … 1425 1425 if (pPatch->opcode == OP_PUSHF) 1426 1426 { 1427 if (pCurInstrGC != pInstrGC && pCpu->pCurInstr-> opcode == OP_PUSHF)1427 if (pCurInstrGC != pInstrGC && pCpu->pCurInstr->uOpcode == OP_PUSHF) 1428 1428 { 1429 1429 fIllegalInstr = true; … … 1433 1433 1434 1434 /* no far returns */ 1435 if (pCpu->pCurInstr-> opcode == OP_RETF)1435 if (pCpu->pCurInstr->uOpcode == OP_RETF) 1436 1436 { 1437 1437 pPatch->pTempInfo->nrRetInstr++; … … 1439 1439 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC); 1440 1440 } 1441 else if ( pCpu->pCurInstr-> opcode == OP_INT31442 || pCpu->pCurInstr-> opcode == OP_INT1443 || pCpu->pCurInstr-> opcode == OP_INTO)1441 else if ( pCpu->pCurInstr->uOpcode == OP_INT3 1442 || pCpu->pCurInstr->uOpcode == OP_INT 1443 || pCpu->pCurInstr->uOpcode == OP_INTO) 1444 1444 { 1445 1445 /* No int xx or into either. */ … … 1456 1456 1457 1457 /* Check for exit points. */ 1458 switch (pCpu->pCurInstr-> opcode)1458 switch (pCpu->pCurInstr->uOpcode) 1459 1459 { 1460 1460 case OP_SYSEXIT: … … 1478 1478 if (pPatch->opcode == OP_PUSHF) 1479 1479 { 1480 if (pCpu->pCurInstr-> opcode == OP_POPF)1480 if (pCpu->pCurInstr->uOpcode == OP_POPF) 1481 1481 { 1482 1482 if (pPatch->cbPatchBlockSize >= SIZEOF_NEARJUMP32) … … 1555 1555 // no unconditional jumps or calls without fixed displacements 1556 1556 if ( (pCpu->pCurInstr->optype & DISOPTYPE_CONTROLFLOW) 1557 && (pCpu->pCurInstr-> opcode == OP_JMP || pCpu->pCurInstr->opcode == OP_CALL)1557 && (pCpu->pCurInstr->uOpcode == OP_JMP || pCpu->pCurInstr->uOpcode == OP_CALL) 1558 1558 ) 1559 1559 { 1560 1560 Assert(pCpu->param1.cb <= 4 || pCpu->param1.cb == 6); 1561 1561 if ( pCpu->param1.cb == 6 /* far call/jmp */ 1562 || (pCpu->pCurInstr-> opcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))1562 || (pCpu->pCurInstr->uOpcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS)) 1563 1563 || (OP_PARM_VTYPE(pCpu->pCurInstr->param1) != OP_PARM_J && !(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS)) 1564 1564 ) … … 1569 1569 } 1570 1570 else /* no far returns */ 1571 if (pCpu->pCurInstr-> opcode == OP_RETF)1571 if (pCpu->pCurInstr->uOpcode == OP_RETF) 1572 1572 { 1573 1573 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC); … … 1575 1575 } 1576 1576 else /* no int xx or into either */ 1577 if (pCpu->pCurInstr-> opcode == OP_INT3 || pCpu->pCurInstr->opcode == OP_INT || pCpu->pCurInstr->opcode == OP_INTO)1577 if (pCpu->pCurInstr->uOpcode == OP_INT3 || pCpu->pCurInstr->uOpcode == OP_INT || pCpu->pCurInstr->uOpcode == OP_INTO) 1578 1578 { 1579 1579 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC); … … 1583 1583 #if 0 1584 1584 ///@todo we can handle certain in/out and privileged instructions in the guest context 1585 if (pCpu->pCurInstr->optype & DISOPTYPE_PRIVILEGED && pCpu->pCurInstr-> opcode != OP_STI)1585 if (pCpu->pCurInstr->optype & DISOPTYPE_PRIVILEGED && pCpu->pCurInstr->uOpcode != OP_STI) 1586 1586 { 1587 1587 Log(("Illegal instructions for function patch!!\n")); … … 1600 1600 1601 1601 // Check for exit points 1602 switch (pCpu->pCurInstr-> opcode)1602 switch (pCpu->pCurInstr->uOpcode) 1603 1603 { 1604 1604 case OP_ILLUD2: … … 1695 1695 */ 1696 1696 if ( (pCpu->pCurInstr->optype & DISOPTYPE_CONTROLFLOW) 1697 && (pCpu->pCurInstr-> opcode != OP_CALL || (pPatch->flags & PATMFL_SUPPORT_CALLS))1697 && (pCpu->pCurInstr->uOpcode != OP_CALL || (pPatch->flags & PATMFL_SUPPORT_CALLS)) 1698 1698 && (OP_PARM_VTYPE(pCpu->pCurInstr->param1) == OP_PARM_J)) 1699 1699 { … … 1705 1705 } 1706 1706 1707 if (pCpu->pCurInstr-> opcode == OP_CALL)1707 if (pCpu->pCurInstr->uOpcode == OP_CALL) 1708 1708 { 1709 1709 Assert(!PATMIsPatchGCAddr(pVM, pTargetGC)); … … 1713 1713 } 1714 1714 else 1715 rc = patmPatchGenRelJump(pVM, pPatch, pTargetGC, pCpu->pCurInstr-> opcode, !!(pCpu->fPrefix & DISPREFIX_OPSIZE));1715 rc = patmPatchGenRelJump(pVM, pPatch, pTargetGC, pCpu->pCurInstr->uOpcode, !!(pCpu->fPrefix & DISPREFIX_OPSIZE)); 1716 1716 1717 1717 if (RT_SUCCESS(rc)) … … 1721 1721 } 1722 1722 1723 switch (pCpu->pCurInstr-> opcode)1723 switch (pCpu->pCurInstr->uOpcode) 1724 1724 { 1725 1725 case OP_CLI: … … 2121 2121 NOREF(pInstrGC); 2122 2122 2123 if (pCpu->pCurInstr-> opcode == OP_INT3)2123 if (pCpu->pCurInstr->uOpcode == OP_INT3) 2124 2124 { 2125 2125 /* Could be an int3 inserted in a call patch. Check to be sure */ … … 2133 2133 2134 2134 bool disret = patmR3DisInstr(pVM, pPatch, pOrgJumpGC, pOrgJumpHC, PATMREAD_ORGCODE, &cpu, NULL); 2135 if (!disret || cpu.pCurInstr-> opcode != OP_CALL || cpu.param1.cb != 4 /* only near calls */)2135 if (!disret || cpu.pCurInstr->uOpcode != OP_CALL || cpu.param1.cb != 4 /* only near calls */) 2136 2136 return VINF_SUCCESS; 2137 2137 } … … 2139 2139 } 2140 2140 2141 if ( pCpu->pCurInstr-> opcode == OP_ILLUD22141 if ( pCpu->pCurInstr->uOpcode == OP_ILLUD2 2142 2142 && PATMIsPatchGCAddr(pVM, pCurInstrGC)) 2143 2143 { … … 2146 2146 } 2147 2147 2148 if ( (pCpu->pCurInstr-> opcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))2149 || pCpu->pCurInstr-> opcode == OP_INT2150 || pCpu->pCurInstr-> opcode == OP_IRET2151 || pCpu->pCurInstr-> opcode == OP_RETN2152 || pCpu->pCurInstr-> opcode == OP_RETF2148 if ( (pCpu->pCurInstr->uOpcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS)) 2149 || pCpu->pCurInstr->uOpcode == OP_INT 2150 || pCpu->pCurInstr->uOpcode == OP_IRET 2151 || pCpu->pCurInstr->uOpcode == OP_RETN 2152 || pCpu->pCurInstr->uOpcode == OP_RETF 2153 2153 ) 2154 2154 { … … 2156 2156 } 2157 2157 2158 if (pCpu->pCurInstr-> opcode == OP_ILLUD2)2158 if (pCpu->pCurInstr->uOpcode == OP_ILLUD2) 2159 2159 return VINF_SUCCESS; 2160 2160 … … 2234 2234 if ( (cpu.pCurInstr->optype & DISOPTYPE_CONTROLFLOW) 2235 2235 && (OP_PARM_VTYPE(cpu.pCurInstr->param1) == OP_PARM_J) 2236 && cpu.pCurInstr-> opcode != OP_CALL /* complete functions are replaced; don't bother here. */2236 && cpu.pCurInstr->uOpcode != OP_CALL /* complete functions are replaced; don't bother here. */ 2237 2237 ) 2238 2238 { … … 2266 2266 patmPatchAddDisasmJump(pVM, pPatch, pTargetGC); 2267 2267 2268 if (cpu.pCurInstr-> opcode == OP_CALL) pPatch->pTempInfo->nrCalls++;2268 if (cpu.pCurInstr->uOpcode == OP_CALL) pPatch->pTempInfo->nrCalls++; 2269 2269 rc = patmr3DisasmCode(pVM, pInstrGC, pTargetGC, pfnPATMR3Disasm, pCacheRec); 2270 if (cpu.pCurInstr-> opcode == OP_CALL) pPatch->pTempInfo->nrCalls--;2270 if (cpu.pCurInstr->uOpcode == OP_CALL) pPatch->pTempInfo->nrCalls--; 2271 2271 2272 2272 if (rc != VINF_SUCCESS) { … … 2274 2274 } 2275 2275 } 2276 if (cpu.pCurInstr-> opcode == OP_JMP)2276 if (cpu.pCurInstr->uOpcode == OP_JMP) 2277 2277 { 2278 2278 /* Unconditional jump; return to caller. */ … … 2411 2411 goto end; 2412 2412 } 2413 switch(cpunext.pCurInstr-> opcode)2413 switch(cpunext.pCurInstr->uOpcode) 2414 2414 { 2415 2415 case OP_IRET: /* inhibit cleared in generated code */ … … 2432 2432 2433 2433 /* Note: after a cli we must continue to a proper exit point */ 2434 if (cpunext.pCurInstr-> opcode != OP_CLI)2434 if (cpunext.pCurInstr->uOpcode != OP_CLI) 2435 2435 { 2436 2436 rc = pfnPATMR3Recompile(pVM, &cpunext, pInstrGC, pNextInstrGC, pCacheRec); … … 2455 2455 if ( (cpu.pCurInstr->optype & DISOPTYPE_CONTROLFLOW) 2456 2456 && (OP_PARM_VTYPE(cpu.pCurInstr->param1) == OP_PARM_J) 2457 && cpu.pCurInstr-> opcode != OP_CALL /* complete functions are replaced; don't bother here. */2457 && cpu.pCurInstr->uOpcode != OP_CALL /* complete functions are replaced; don't bother here. */ 2458 2458 ) 2459 2459 { … … 2499 2499 } 2500 2500 2501 if (cpu.pCurInstr-> opcode == OP_CALL) pPatch->pTempInfo->nrCalls++;2501 if (cpu.pCurInstr->uOpcode == OP_CALL) pPatch->pTempInfo->nrCalls++; 2502 2502 rc = patmRecompileCodeStream(pVM, pInstrGC, addr, pfnPATMR3Recompile, pCacheRec); 2503 if (cpu.pCurInstr-> opcode == OP_CALL) pPatch->pTempInfo->nrCalls--;2503 if (cpu.pCurInstr->uOpcode == OP_CALL) pPatch->pTempInfo->nrCalls--; 2504 2504 2505 2505 if(pTargetPatch) … … 2978 2978 disret = patmR3DisInstr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_ORGCODE, &cpuPush, &cbInstr); 2979 2979 Assert(disret); 2980 if (disret && cpuPush.pCurInstr-> opcode == OP_PUSH)2980 if (disret && cpuPush.pCurInstr->uOpcode == OP_PUSH) 2981 2981 { 2982 2982 RTRCPTR pJmpInstrGC; … … 2986 2986 disret = patmR3DisInstr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_ORGCODE, &cpuJmp, &cbInstr); 2987 2987 if ( disret 2988 && cpuJmp.pCurInstr-> opcode == OP_JMP2988 && cpuJmp.pCurInstr->uOpcode == OP_JMP 2989 2989 && (pJmpInstrGC = PATMResolveBranch(&cpuJmp, pCurInstrGC)) 2990 2990 ) … … 3570 3570 3571 3571 Assert(pPatch->flags & PATMFL_REPLACE_FUNCTION_CALL); 3572 Assert((pCpu->pCurInstr-> opcode == OP_CALL || pCpu->pCurInstr->opcode == OP_JMP) && pCpu->cbInstr == SIZEOF_NEARJUMP32);3573 3574 if ((pCpu->pCurInstr-> opcode != OP_CALL && pCpu->pCurInstr->opcode != OP_JMP) || pCpu->cbInstr != SIZEOF_NEARJUMP32)3572 Assert((pCpu->pCurInstr->uOpcode == OP_CALL || pCpu->pCurInstr->uOpcode == OP_JMP) && pCpu->cbInstr == SIZEOF_NEARJUMP32); 3573 3574 if ((pCpu->pCurInstr->uOpcode != OP_CALL && pCpu->pCurInstr->uOpcode != OP_JMP) || pCpu->cbInstr != SIZEOF_NEARJUMP32) 3575 3575 { 3576 3576 rc = VERR_PATCHING_REFUSED; … … 3602 3602 3603 3603 disret = patmR3DisInstr(pVM, pPatch, pTargetGC, pTmpInstrHC, PATMREAD_ORGCODE, &cpu, &cbInstr); 3604 if (disret == false || cpu.pCurInstr-> opcode != OP_JMP)3604 if (disret == false || cpu.pCurInstr->uOpcode != OP_JMP) 3605 3605 break; 3606 3606 … … 3894 3894 * make sure this never happens. (unless a trap is triggered (intentionally or not)) 3895 3895 */ 3896 switch (pCpu->pCurInstr-> opcode)3896 switch (pCpu->pCurInstr->uOpcode) 3897 3897 { 3898 3898 case OP_JO: … … 4251 4251 4252 4252 pPatchRec->patch.cbPrivInstr = cbInstr; 4253 pPatchRec->patch.opcode = cpu.pCurInstr-> opcode;4253 pPatchRec->patch.opcode = cpu.pCurInstr->uOpcode; 4254 4254 4255 4255 /* Restricted hinting for now. */ 4256 Assert(!(flags & PATMFL_INSTR_HINT) || cpu.pCurInstr-> opcode == OP_CLI);4256 Assert(!(flags & PATMFL_INSTR_HINT) || cpu.pCurInstr->uOpcode == OP_CLI); 4257 4257 4258 4258 /* Initialize cache record patch pointer. */ … … 4317 4317 if (pPatchRec->patch.flags & PATMFL_GUEST_SPECIFIC) 4318 4318 { 4319 switch (cpu.pCurInstr-> opcode)4319 switch (cpu.pCurInstr->uOpcode) 4320 4320 { 4321 4321 case OP_SYSENTER: … … 4337 4337 else 4338 4338 { 4339 switch (cpu.pCurInstr-> opcode)4339 switch (cpu.pCurInstr->uOpcode) 4340 4340 { 4341 4341 case OP_SYSENTER: … … 4381 4381 case OP_CLI: 4382 4382 Log(("PATMR3InstallPatch %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0)); 4383 rc = PATMR3PatchBlock(pVM, pInstrGC, pInstrHC, cpu.pCurInstr-> opcode, cbInstr, pPatchRec);4383 rc = PATMR3PatchBlock(pVM, pInstrGC, pInstrHC, cpu.pCurInstr->uOpcode, cbInstr, pPatchRec); 4384 4384 break; 4385 4385 … … 5198 5198 if ( disret == true 5199 5199 && (pConflictPatch->flags & PATMFL_CODE32) 5200 && (cpu.pCurInstr-> opcode == OP_JMP || (cpu.pCurInstr->optype & DISOPTYPE_COND_CONTROLFLOW))5200 && (cpu.pCurInstr->uOpcode == OP_JMP || (cpu.pCurInstr->optype & DISOPTYPE_COND_CONTROLFLOW)) 5201 5201 && (cpu.param1.fUse & DISUSE_IMMEDIATE32_REL)) 5202 5202 { … … 6280 6280 6281 6281 if ( rc == VINF_SUCCESS 6282 && ( Cpu.pCurInstr-> opcode == OP_PUSHF6283 || Cpu.pCurInstr-> opcode == OP_PUSH6284 || Cpu.pCurInstr-> opcode == OP_CALL)6282 && ( Cpu.pCurInstr->uOpcode == OP_PUSHF 6283 || Cpu.pCurInstr->uOpcode == OP_PUSH 6284 || Cpu.pCurInstr->uOpcode == OP_CALL) 6285 6285 ) 6286 6286 { … … 6289 6289 STAM_COUNTER_INC(&pVM->patm.s.StatPushTrap); 6290 6290 6291 if (Cpu.pCurInstr-> opcode == OP_PUSH)6291 if (Cpu.pCurInstr->uOpcode == OP_PUSH) 6292 6292 { 6293 6293 rc = PGMShwGetPage(pVCpu, pCtx->esp, &fFlags, NULL); … … 6409 6409 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock); 6410 6410 6411 if (disret && cpu.pCurInstr-> opcode == OP_RETN)6411 if (disret && cpu.pCurInstr->uOpcode == OP_RETN) 6412 6412 { 6413 6413 RTRCPTR retaddr; … … 6448 6448 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock); 6449 6449 6450 if (disret && (cpu.pCurInstr-> opcode == OP_SYSEXIT || cpu.pCurInstr->opcode == OP_HLT || cpu.pCurInstr->opcode == OP_INT3))6450 if (disret && (cpu.pCurInstr->uOpcode == OP_SYSEXIT || cpu.pCurInstr->uOpcode == OP_HLT || cpu.pCurInstr->uOpcode == OP_INT3)) 6451 6451 { 6452 6452 disret = patmR3DisInstr(pVM, &pPatch->patch, pNewEip, PATMGCVirtToHCVirt(pVM, &cacheRec, pNewEip), PATMREAD_RAWCODE, … … 6455 6455 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock); 6456 6456 6457 Assert(cpu.pCurInstr-> opcode == OP_SYSEXIT || cpu.pCurInstr->opcode == OP_HLT || cpu.pCurInstr->opcode == OP_IRET);6457 Assert(cpu.pCurInstr->uOpcode == OP_SYSEXIT || cpu.pCurInstr->uOpcode == OP_HLT || cpu.pCurInstr->uOpcode == OP_IRET); 6458 6458 } 6459 6459 #endif -
trunk/src/VBox/VMM/VMMR3/PATMGuest.cpp
r35348 r41737 215 215 216 216 /** @todo might have to check if the patch crosses a page boundary. Currently not necessary, but that might change in the future!! */ 217 switch (pCpu->pCurInstr-> opcode)217 switch (pCpu->pCurInstr->uOpcode) 218 218 { 219 219 case OP_SYSENTER: … … 241 241 242 242 default: 243 AssertMsgFailed(("PATMInstallGuestSpecificPatch: unknown opcode %d\n", pCpu->pCurInstr-> opcode));243 AssertMsgFailed(("PATMInstallGuestSpecificPatch: unknown opcode %d\n", pCpu->pCurInstr->uOpcode)); 244 244 return VERR_PATCHING_REFUSED; 245 245 } -
trunk/src/VBox/VMM/VMMR3/PATMPatch.cpp
r41734 r41737 859 859 && pPatch->pTempInfo->uPatchRetParam1 == (uint32_t)pCpu->param1.parval) /* nr of bytes popped off the stack should be identical of course! */ 860 860 { 861 Assert(pCpu->pCurInstr-> opcode == OP_RETN);861 Assert(pCpu->pCurInstr->uOpcode == OP_RETN); 862 862 STAM_COUNTER_INC(&pVM->patm.s.StatGenRetReused); 863 863 … … 1336 1336 /* Modify REG part according to destination of original instruction */ 1337 1337 pPB[offset++] = MAKE_MODRM(0, pCpu->param1.base.reg_gen, 5); 1338 if (pCpu->pCurInstr-> opcode == OP_STR)1338 if (pCpu->pCurInstr->uOpcode == OP_STR) 1339 1339 { 1340 1340 *(RTRCPTR *)&pPB[offset] = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, tr); … … 1381 1381 pPB[offset++] = 0x66; // mov ax, CPUMCTX.tr/ldtr 1382 1382 pPB[offset++] = 0xA1; 1383 if (pCpu->pCurInstr-> opcode == OP_STR)1383 if (pCpu->pCurInstr->uOpcode == OP_STR) 1384 1384 { 1385 1385 *(RTRCPTR *)&pPB[offset] = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, tr); … … 1426 1426 // sidt %Ms 1427 1427 1428 switch (pCpu->pCurInstr-> opcode)1428 switch (pCpu->pCurInstr->uOpcode) 1429 1429 { 1430 1430 case OP_SGDT: -
trunk/src/VBox/VMM/VMMRC/IOMRC.cpp
r40442 r41737 67 67 VMMRCDECL(VBOXSTRICTRC) IOMRCIOPortHandler(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu) 68 68 { 69 switch (pCpu->pCurInstr-> opcode)69 switch (pCpu->pCurInstr->uOpcode) 70 70 { 71 71 case OP_IN: … … 87 87 */ 88 88 default: 89 AssertMsgFailed(("Unknown I/O port access opcode %d.\n", pCpu->pCurInstr-> opcode));89 AssertMsgFailed(("Unknown I/O port access opcode %d.\n", pCpu->pCurInstr->uOpcode)); 90 90 return VERR_IOM_IOPORT_UNKNOWN_OPCODE; 91 91 } -
trunk/src/VBox/VMM/VMMRC/TRPMRCHandlers.cpp
r41734 r41737 521 521 * Note! PATMGCHandleIllegalInstrTrap doesn't always return. 522 522 */ 523 if ( Cpu.pCurInstr-> opcode == OP_ILLUD2523 if ( Cpu.pCurInstr->uOpcode == OP_ILLUD2 524 524 && PATMIsPatchGCAddr(pVM, pRegFrame->eip)) 525 525 { … … 544 544 else if (Cpu.fPrefix & DISPREFIX_LOCK) 545 545 { 546 Log(("TRPMGCTrap06Handler: pc=%08x op=%d\n", pRegFrame->eip, Cpu.pCurInstr-> opcode));546 Log(("TRPMGCTrap06Handler: pc=%08x op=%d\n", pRegFrame->eip, Cpu.pCurInstr->uOpcode)); 547 547 #ifdef DTRACE_EXPERIMENT /** @todo fix/remove/permanent-enable this when DIS/PATM handles invalid lock sequences. */ 548 548 Assert(!PATMIsPatchGCAddr(pVM, pRegFrame->eip)); … … 556 556 * Handle MONITOR - it causes an #UD exception instead of #GP when not executed in ring 0. 557 557 */ 558 else if (Cpu.pCurInstr-> opcode == OP_MONITOR)558 else if (Cpu.pCurInstr->uOpcode == OP_MONITOR) 559 559 { 560 560 LogFlow(("TRPMGCTrap06Handler: -> EMInterpretInstructionCPU\n")); … … 732 732 * Try handle it here, if not return to HC and emulate/interpret it there. 733 733 */ 734 switch (pCpu->pCurInstr-> opcode)734 switch (pCpu->pCurInstr->uOpcode) 735 735 { 736 736 case OP_INT3: … … 830 830 Assert(!pRegFrame->eflags.Bits.u1VM); 831 831 832 switch (pCpu->pCurInstr-> opcode)832 switch (pCpu->pCurInstr->uOpcode) 833 833 { 834 834 /* … … 985 985 * Note: it's no longer safe to access the instruction opcode directly due to possible stale code TLB entries 986 986 */ 987 if (Cpu.pCurInstr-> opcode == OP_RDTSC)987 if (Cpu.pCurInstr->uOpcode == OP_RDTSC) 988 988 return trpmGCTrap0dHandlerRdTsc(pVM, pVCpu, pRegFrame); 989 989
Note:
See TracChangeset
for help on using the changeset viewer.