Changeset 36840 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Apr 25, 2011 7:47:38 PM (14 years ago)
- Location:
- trunk/src/VBox/VMM/VMMAll
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r36838 r36840 614 614 RTGCPHYS const offPrevOpcodes = GCPhys - pIemCpu->GCPhysOpcodes; 615 615 pIemCpu->GCPhysOpcodes = GCPhys; 616 if (offPrevOpcodes < cbOldOpcodes) 616 if ( offPrevOpcodes < cbOldOpcodes 617 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pIemCpu->abOpcode)) 617 618 { 618 619 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes; … … 660 661 */ 661 662 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 663 uint8_t cbLeft = pIemCpu->cbOpcode - pIemCpu->offOpcode; Assert(cbLeft < cbMin); 662 664 uint32_t cbToTryRead; 663 665 RTGCPTR GCPtrNext; … … 669 671 return iemRaiseGeneralProtectionFault0(pIemCpu); 670 672 cbToTryRead = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK); 671 Assert(cbToTryRead >= cbMin ); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */673 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */ 672 674 } 673 675 else … … 679 681 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION); 680 682 cbToTryRead = pCtx->csHid.u32Limit - GCPtrNext32 + 1; 681 if (cbToTryRead < cbMin )683 if (cbToTryRead < cbMin - cbLeft) 682 684 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION); 683 685 GCPtrNext = pCtx->csHid.u64Base + GCPtrNext32; … … 694 696 return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED); 695 697 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK; 698 //Log(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pIemCpu->cbOpcode)); 696 699 /** @todo Check reserved bits and such stuff. PGM is better at doing 697 700 * that, so do it when implementing the guest virtual address … … 706 709 if (cbToTryRead > sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode) 707 710 cbToTryRead = sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode; 711 Assert(cbToTryRead >= cbMin - cbLeft); 708 712 if (!pIemCpu->fByPassHandlers) 709 713 rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, &pIemCpu->abOpcode[pIemCpu->cbOpcode], cbToTryRead); … … 713 717 return rc; 714 718 pIemCpu->cbOpcode += cbToTryRead; 719 //Log(("%.*Rhxs\n", pIemCpu->cbOpcode, pIemCpu->abOpcode)); 715 720 716 721 return VINF_SUCCESS; … … 4173 4178 pIemCpu->fNoRem = !LogIsEnabled(); /* logging triggers the no-rem/rem verification stuff */ 4174 4179 4180 #if 0 4181 // Auto enable; DSL. 4182 if ( pIemCpu->fNoRem 4183 && pOrgCtx->cs == 0x10 4184 && ( pOrgCtx->rip == 0x00100fc7 4185 || pOrgCtx->rip == 0x00100ffc 4186 || pOrgCtx->rip == 0x00100ffe 4187 ) 4188 ) 4189 { 4190 RTLogFlags(NULL, "enabled"); 4191 pIemCpu->fNoRem = false; 4192 } 4193 #endif 4194 4175 4195 /* 4176 4196 * Switch state. … … 4503 4523 && pEvtRec->u.RamWrite.cb != 4) ) 4504 4524 { 4505 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__); 4506 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys); 4507 RTAssertMsg2Add("REM: %.*Rhxs\n" 4508 "IEM: %.*Rhxs\n", 4509 pEvtRec->u.RamWrite.cb, abBuf, 4510 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab); 4511 iemVerifyAssertAddRecordDump(pEvtRec); 4512 iemOpStubMsg2(pIemCpu); 4513 RTAssertPanic(); 4525 /* fend off ROMs */ 4526 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000c0000) > UINT32_C(0x8000) 4527 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000e0000) > UINT32_C(0x20000) 4528 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) ) 4529 { 4530 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__); 4531 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys); 4532 RTAssertMsg2Add("REM: %.*Rhxs\n" 4533 "IEM: %.*Rhxs\n", 4534 pEvtRec->u.RamWrite.cb, abBuf, 4535 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab); 4536 iemVerifyAssertAddRecordDump(pEvtRec); 4537 iemOpStubMsg2(pIemCpu); 4538 RTAssertPanic(); 4539 } 4514 4540 } 4515 4541 } … … 4574 4600 } while (0) 4575 4601 4602 # define CHECK_SEL(a_Sel) \ 4603 do \ 4604 { \ 4605 CHECK_FIELD(a_Sel); \ 4606 if ( pOrgCtx->a_Sel##Hid.Attr.u != pDebugCtx->a_Sel##Hid.Attr.u \ 4607 && (pOrgCtx->a_Sel##Hid.Attr.u | X86_SEL_TYPE_ACCESSED) != pDebugCtx->a_Sel##Hid.Attr.u) \ 4608 { \ 4609 RTAssertMsg2Weak(" %8sHid.Attr differs - iem=%02x - rem=%02x\n", #a_Sel, pDebugCtx->a_Sel##Hid.Attr.u, pOrgCtx->a_Sel##Hid.Attr.u); \ 4610 cDiffs++; \ 4611 } \ 4612 CHECK_FIELD(a_Sel##Hid.u64Base); \ 4613 CHECK_FIELD(a_Sel##Hid.u32Limit); \ 4614 } while (0) 4615 4576 4616 if (memcmp(&pOrgCtx->fpu, &pDebugCtx->fpu, sizeof(pDebugCtx->fpu))) 4577 4617 { … … 4589 4629 fFlagsMask &= ~(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF); 4590 4630 if (pIemCpu->fShiftOfHack) 4591 fFlagsMask &= ~(X86_EFL_OF );4631 fFlagsMask &= ~(X86_EFL_OF | X86_EFL_AF); 4592 4632 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask)) 4593 4633 { … … 4631 4671 CHECK_FIELD(r12); 4632 4672 CHECK_FIELD(r13); 4633 CHECK_FIELD(cs); 4634 CHECK_FIELD(csHid.u64Base); 4635 CHECK_FIELD(csHid.u32Limit); 4636 CHECK_FIELD(csHid.Attr.u); 4637 CHECK_FIELD(ss); 4638 CHECK_FIELD(ssHid.u64Base); 4639 CHECK_FIELD(ssHid.u32Limit); 4640 CHECK_FIELD(ssHid.Attr.u); 4641 CHECK_FIELD(ds); 4642 CHECK_FIELD(dsHid.u64Base); 4643 CHECK_FIELD(dsHid.u32Limit); 4644 CHECK_FIELD(dsHid.Attr.u); 4645 CHECK_FIELD(es); 4646 CHECK_FIELD(esHid.u64Base); 4647 CHECK_FIELD(esHid.u32Limit); 4648 CHECK_FIELD(esHid.Attr.u); 4649 CHECK_FIELD(fs); 4650 CHECK_FIELD(fsHid.u64Base); 4651 CHECK_FIELD(fsHid.u32Limit); 4652 CHECK_FIELD(fsHid.Attr.u); 4653 CHECK_FIELD(gs); 4654 CHECK_FIELD(gsHid.u64Base); 4655 CHECK_FIELD(gsHid.u32Limit); 4656 CHECK_FIELD(gsHid.Attr.u); 4673 CHECK_SEL(cs); 4674 CHECK_SEL(ss); 4675 CHECK_SEL(ds); 4676 CHECK_SEL(es); 4677 CHECK_SEL(fs); 4678 CHECK_SEL(gs); 4657 4679 CHECK_FIELD(cr0); 4658 4680 CHECK_FIELD(cr2); -
trunk/src/VBox/VMM/VMMAll/IEMAllAImpl.asm
r36815 r36840 618 618 IEMIMPL_SHIFT_OP rcl, (X86_EFL_OF | X86_EFL_CF), 0 619 619 IEMIMPL_SHIFT_OP rcr, (X86_EFL_OF | X86_EFL_CF), 0 620 %ifndef IEM_VERIFICATION_MODE 620 621 IEMIMPL_SHIFT_OP shl, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF), (X86_EFL_AF) 621 622 IEMIMPL_SHIFT_OP shr, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF), (X86_EFL_AF) 622 623 IEMIMPL_SHIFT_OP sar, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF), (X86_EFL_AF) 624 %else 625 IEMIMPL_SHIFT_OP shl, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF | X86_EFL_AF), 0 626 IEMIMPL_SHIFT_OP shr, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF | X86_EFL_AF), 0 627 IEMIMPL_SHIFT_OP sar, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF | X86_EFL_AF), 0 628 %endif 623 629 624 630 -
trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h
r36838 r36840 1992 1992 uint64_t uNewCr0 = pCtx->cr0 & ~(X86_CR0_MP | X86_CR0_EM | X86_CR0_TS); 1993 1993 uNewCr0 |= u16NewMsw & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS); 1994 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, 0, uNewCr0); 1994 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0); 1995 } 1996 1997 1998 /** 1999 * Implements 'CLTS'. 2000 */ 2001 IEM_CIMPL_DEF_0(iemOpCImpl_clts) 2002 { 2003 if (pIemCpu->uCpl != 0) 2004 return iemRaiseGeneralProtectionFault0(pIemCpu); 2005 2006 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 2007 uint64_t uNewCr0 = pCtx->cr0; 2008 uNewCr0 &= ~X86_CR0_TS; 2009 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0); 1995 2010 } 1996 2011 … … 2197 2212 2198 2213 2214 /** 2215 * Implements 'CPUID'. 2216 */ 2217 IEM_CIMPL_DEF_0(iemOpCImpl_cpuid) 2218 { 2219 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 2220 2221 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), pCtx->eax, &pCtx->eax, &pCtx->ebx, &pCtx->ecx, &pCtx->edx); 2222 pCtx->rax &= UINT32_C(0xffffffff); 2223 pCtx->rbx &= UINT32_C(0xffffffff); 2224 pCtx->rcx &= UINT32_C(0xffffffff); 2225 pCtx->rdx &= UINT32_C(0xffffffff); 2226 2227 iemRegAddToRip(pIemCpu, cbInstr); 2228 return VINF_SUCCESS; 2229 } 2230 2231 2199 2232 /* 2200 2233 * Instantiate the various string operation combinations. -
trunk/src/VBox/VMM/VMMAll/IEMAllInstructions.cpp.h
r36838 r36840 880 880 /** Opcode 0x0f 0x04. */ 881 881 FNIEMOP_STUB(iemOp_syscall); 882 883 882 884 /** Opcode 0x0f 0x05. */ 883 FNIEMOP_STUB(iemOp_clts); 885 FNIEMOP_DEF(iemOp_clts) 886 { 887 IEMOP_MNEMONIC("clts"); 888 IEMOP_HLP_NO_LOCK_PREFIX(); 889 return IEM_MC_DEFER_TO_CIMPL_0(iemOpCImpl_clts); 890 } 891 892 884 893 /** Opcode 0x0f 0x06. */ 885 894 FNIEMOP_STUB(iemOp_sysret); … … 2401 2410 FNIEMOP_DEF(iemOp_push_fs) 2402 2411 { 2412 IEMOP_MNEMONIC("push fs"); 2403 2413 IEMOP_HLP_NO_LOCK_PREFIX(); 2404 2414 return FNIEMOP_CALL_1(iemOpCommonPushSReg, X86_SREG_FS); … … 2409 2419 FNIEMOP_DEF(iemOp_pop_fs) 2410 2420 { 2421 IEMOP_MNEMONIC("pop fs"); 2411 2422 IEMOP_HLP_NO_LOCK_PREFIX(); 2412 2423 return IEM_MC_DEFER_TO_CIMPL_2(iemOpCImpl_pop_Sreg, X86_SREG_FS, pIemCpu->enmEffOpSize); … … 2415 2426 2416 2427 /** Opcode 0x0f 0xa2. */ 2417 FNIEMOP_STUB(iemOp_cpuid); 2428 FNIEMOP_DEF(iemOp_cpuid) 2429 { 2430 IEMOP_MNEMONIC("cpuid"); 2431 IEMOP_HLP_NO_LOCK_PREFIX(); 2432 return IEM_MC_DEFER_TO_CIMPL_0(iemOpCImpl_cpuid); 2433 } 2434 2435 2418 2436 /** Opcode 0x0f 0xa3. */ 2419 2437 FNIEMOP_STUB(iemOp_bt_Ev_Gv); … … 2427 2445 FNIEMOP_DEF(iemOp_push_gs) 2428 2446 { 2447 IEMOP_MNEMONIC("push gs"); 2429 2448 IEMOP_HLP_NO_LOCK_PREFIX(); 2430 2449 return FNIEMOP_CALL_1(iemOpCommonPushSReg, X86_SREG_GS); … … 2435 2454 FNIEMOP_DEF(iemOp_pop_gs) 2436 2455 { 2456 IEMOP_MNEMONIC("pop gs"); 2437 2457 IEMOP_HLP_NO_LOCK_PREFIX(); 2438 2458 return IEM_MC_DEFER_TO_CIMPL_2(iemOpCImpl_pop_Sreg, X86_SREG_GS, pIemCpu->enmEffOpSize); … … 6369 6389 /** Opcode 0x9b. */ 6370 6390 FNIEMOP_STUB(iemOp_wait); 6391 /** @todo Do WAIT next. */ 6371 6392 6372 6393 … … 6453 6474 * Get the offset and fend of lock prefixes. 6454 6475 */ 6476 IEMOP_MNEMONIC("mov rAX,Ov"); 6455 6477 RTGCPTR GCPtrMemOff; 6456 6478 IEMOP_FETCH_MOFFS_XX(GCPtrMemOff); … … 7581 7603 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe stupid */ 7582 7604 } 7605 #ifdef IEM_VERIFICATION_MODE 7606 pIemCpu->fShiftOfHack = true; 7607 #endif 7583 7608 7584 7609 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) … … 7593 7618 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB); 7594 7619 IEM_MC_REF_EFLAGS(pEFlags); 7595 #ifdef IEM_VERIFICATION_MODE7596 if (cShift > 1) pIemCpu->fShiftOfHack = true;7597 #endif7598 7620 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags); 7599 7621 IEM_MC_ADVANCE_RIP(); … … 7615 7637 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/); 7616 7638 IEM_MC_FETCH_EFLAGS(EFlags); 7617 #ifdef IEM_VERIFICATION_MODE7618 if (cShift > 1) pIemCpu->fShiftOfHack = true;7619 #endif7620 7639 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags); 7621 7640 … … 7646 7665 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe stupid */ 7647 7666 } 7667 #ifdef IEM_VERIFICATION_MODE 7668 pIemCpu->fShiftOfHack = true; 7669 #endif 7648 7670 7649 7671 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) … … 7651 7673 /* register */ 7652 7674 uint8_t cShift; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &cShift); 7653 #ifdef IEM_VERIFICATION_MODE7654 if (cShift > 1) pIemCpu->fShiftOfHack = true;7655 #endif7656 7675 IEMOP_HLP_NO_LOCK_PREFIX(); 7657 7676 switch (pIemCpu->enmEffOpSize) … … 7711 7730 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); 7712 7731 uint8_t cShift; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &cShift); 7713 #ifdef IEM_VERIFICATION_MODE7714 if (cShift > 1) pIemCpu->fShiftOfHack = true;7715 #endif7716 7732 IEM_MC_ASSIGN(cShiftArg, cShift); 7717 7733 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/); … … 7734 7750 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); 7735 7751 uint8_t cShift; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &cShift); 7736 #ifdef IEM_VERIFICATION_MODE7737 if (cShift > 1) pIemCpu->fShiftOfHack = true;7738 #endif7739 7752 IEM_MC_ASSIGN(cShiftArg, cShift); 7740 7753 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/); … … 7757 7770 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); 7758 7771 uint8_t cShift; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &cShift); 7759 #ifdef IEM_VERIFICATION_MODE7760 if (cShift > 1) pIemCpu->fShiftOfHack = true;7761 #endif7762 7772 IEM_MC_ASSIGN(cShiftArg, cShift); 7763 7773 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/); … … 8011 8021 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe, well... */ 8012 8022 } 8023 #ifdef IEM_VERIFICATION_MODE 8024 pIemCpu->fShiftOfHack = true; 8025 #endif 8013 8026 8014 8027 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) … … 8068 8081 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe, well... */ 8069 8082 } 8083 #ifdef IEM_VERIFICATION_MODE 8084 pIemCpu->fShiftOfHack = true; 8085 #endif 8070 8086 8071 8087 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) … … 8197 8213 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc, grr. */ 8198 8214 } 8215 #ifdef IEM_VERIFICATION_MODE 8216 pIemCpu->fShiftOfHack = true; 8217 #endif 8199 8218 8200 8219 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) … … 8209 8228 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX); 8210 8229 IEM_MC_REF_EFLAGS(pEFlags); 8211 #ifdef IEM_VERIFICATION_MODE8212 if (cShiftArg > 1) pIemCpu->fShiftOfHack = true;8213 #endif8214 8230 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags); 8215 8231 IEM_MC_ADVANCE_RIP(); … … 8230 8246 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/); 8231 8247 IEM_MC_FETCH_EFLAGS(EFlags); 8232 #ifdef IEM_VERIFICATION_MODE8233 if (cShiftArg > 1) pIemCpu->fShiftOfHack = true;8234 #endif8235 8248 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags); 8236 8249 … … 8261 8274 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe stupid */ 8262 8275 } 8276 #ifdef IEM_VERIFICATION_MODE 8277 pIemCpu->fShiftOfHack = true; 8278 #endif 8263 8279 8264 8280 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) … … 8276 8292 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX); 8277 8293 IEM_MC_REF_EFLAGS(pEFlags); 8278 #ifdef IEM_VERIFICATION_MODE8279 if (cShiftArg > 1) pIemCpu->fShiftOfHack = true;8280 #endif8281 8294 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags); 8282 8295 IEM_MC_ADVANCE_RIP(); … … 8292 8305 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX); 8293 8306 IEM_MC_REF_EFLAGS(pEFlags); 8294 #ifdef IEM_VERIFICATION_MODE8295 if (cShiftArg > 1) pIemCpu->fShiftOfHack = true;8296 #endif8297 8307 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags); 8298 8308 IEM_MC_ADVANCE_RIP(); … … 8308 8318 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX); 8309 8319 IEM_MC_REF_EFLAGS(pEFlags); 8310 #ifdef IEM_VERIFICATION_MODE8311 if (cShiftArg > 1) pIemCpu->fShiftOfHack = true;8312 #endif8313 8320 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags); 8314 8321 IEM_MC_ADVANCE_RIP(); … … 8336 8343 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/); 8337 8344 IEM_MC_FETCH_EFLAGS(EFlags); 8338 #ifdef IEM_VERIFICATION_MODE8339 if (cShiftArg > 1) pIemCpu->fShiftOfHack = true;8340 #endif8341 8345 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags); 8342 8346 … … 8358 8362 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/); 8359 8363 IEM_MC_FETCH_EFLAGS(EFlags); 8360 #ifdef IEM_VERIFICATION_MODE8361 if (cShiftArg > 1) pIemCpu->fShiftOfHack = true;8362 #endif8363 8364 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags); 8364 8365 … … 8380 8381 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/); 8381 8382 IEM_MC_FETCH_EFLAGS(EFlags); 8382 #ifdef IEM_VERIFICATION_MODE8383 if (cShiftArg > 1) pIemCpu->fShiftOfHack = true;8384 #endif8385 8383 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags); 8386 8384 … … 8457 8455 /** Opcode 0xdb. */ 8458 8456 FNIEMOP_STUB(iemOp_EscF3); 8457 /** @todo Do FINIT next. */ 8458 8459 8459 /** Opcode 0xdc. */ 8460 8460 FNIEMOP_STUB(iemOp_EscF4); … … 8465 8465 /** Opcode 0xdf. */ 8466 8466 FNIEMOP_STUB(iemOp_EscF7); 8467 /** @todo Do FNSTSW next. */ 8467 8468 8468 8469
Note:
See TracChangeset
for help on using the changeset viewer.