Changeset 100731 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Jul 28, 2023 10:22:22 PM (17 months ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 9 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r100694 r100731 799 799 pVCpu->iem.s.cbOpcode = cbInstr; /* Note! SVM and VT-x may set this to zero on exit, rather than the instruction length. */ 800 800 #elif 1 801 pVCpu->iem.s.pbInstrBuf = NULL; 801 pVCpu->iem.s.pbInstrBuf = NULL; 802 pVCpu->iem.s.cbInstrBufTotal = 0; 802 803 RT_NOREF(cbInstr); 803 804 #else … … 1003 1004 if (cbDst <= cbMaxRead) 1004 1005 { 1006 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0; /** @todo Spurious load effect on branch handling? */ 1007 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf; 1008 1005 1009 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst; 1006 1010 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK; 1007 1011 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys; 1008 1012 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3; 1009 pVCpu->iem.s.fTbCrossedPage |= offPg == 0;1010 1013 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst); 1011 1014 return; … … 1092 1095 /* Update the state and probably return. */ 1093 1096 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK); 1097 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0; 1098 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf; 1099 1094 1100 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr); 1095 1101 pVCpu->iem.s.offInstrNextByte = offPg + cbInstr + cbToRead; 1096 1102 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead + cbInstr) - cbToRead - cbInstr; 1097 pVCpu->iem.s.cbInstrBufTotal = X86_PAGE_SIZE; 1103 pVCpu->iem.s.cbInstrBufTotal = X86_PAGE_SIZE; /** @todo ??? */ 1098 1104 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys; 1099 1105 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK; 1100 1106 pVCpu->iem.s.pbInstrBuf = NULL; 1101 pVCpu->iem.s.fTbCrossedPage |= offPg == 0;1102 1107 if (cbToRead == cbDst) 1103 1108 return; … … 4069 4074 4070 4075 /* Flush the prefetch buffer. */ 4071 #ifdef IEM_WITH_CODE_TLB 4072 pVCpu->iem.s.pbInstrBuf = NULL; 4073 #else 4074 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu); 4075 #endif 4076 iemOpcodeFlushHeavy(pVCpu, IEM_GET_INSTR_LEN(pVCpu)); 4076 4077 4077 4078 /* -
trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsOneByte.cpp.h
r100714 r100731 792 792 IEMOP_HLP_NO_64BIT(); 793 793 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 794 IEM_MC_DEFER_TO_CIMPL_2_RET(IEM_CIMPL_F_BRANCH_INDIR | IEM_CIMPL_F_END_TB/*?*/, 794 /** @todo eliminate END_TB here */ 795 IEM_MC_DEFER_TO_CIMPL_2_RET(IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_FAR | IEM_CIMPL_F_END_TB, 795 796 iemCImpl_pop_Sreg, X86_SREG_CS, pVCpu->iem.s.enmEffOpSize); 796 797 } … … 5018 5019 */ 5019 5020 uint8_t const iSegReg = IEM_GET_MODRM_REG_8(bRm); 5020 if ( 5021 if (iSegReg > X86_SREG_GS) 5021 5022 IEMOP_RAISE_INVALID_OPCODE_RET(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */ 5022 5023 … … 5159 5160 */ 5160 5161 uint8_t const iSegReg = IEM_GET_MODRM_REG_8(bRm); 5162 /** @todo r=bird: What does 8086 do here wrt CS? */ 5161 5163 if ( iSegReg == X86_SREG_CS 5162 5164 || iSegReg > X86_SREG_GS) … … 5682 5684 uint16_t u16Sel; IEM_OPCODE_GET_NEXT_U16(&u16Sel); 5683 5685 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 5684 IEM_MC_DEFER_TO_CIMPL_3_RET(IEM_CIMPL_F_BRANCH_UNCOND | IEM_CIMPL_F_MODE | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_VMEXIT, 5686 IEM_MC_DEFER_TO_CIMPL_3_RET(IEM_CIMPL_F_BRANCH_DIRECT | IEM_CIMPL_F_BRANCH_FAR 5687 | IEM_CIMPL_F_MODE | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_VMEXIT, 5685 5688 iemCImpl_callf, u16Sel, off32Seg, pVCpu->iem.s.enmEffOpSize); 5686 5689 } … … 7207 7210 { 7208 7211 case IEMMODE_16BIT: 7209 IEM_MC_DEFER_TO_CIMPL_1_RET(IEM_CIMPL_F_BRANCH_INDIR , iemCImpl_retn_iw_16, u16Imm);7212 IEM_MC_DEFER_TO_CIMPL_1_RET(IEM_CIMPL_F_BRANCH_INDIRECT, iemCImpl_retn_iw_16, u16Imm); 7210 7213 case IEMMODE_32BIT: 7211 IEM_MC_DEFER_TO_CIMPL_1_RET(IEM_CIMPL_F_BRANCH_INDIR , iemCImpl_retn_iw_32, u16Imm);7214 IEM_MC_DEFER_TO_CIMPL_1_RET(IEM_CIMPL_F_BRANCH_INDIRECT, iemCImpl_retn_iw_32, u16Imm); 7212 7215 case IEMMODE_64BIT: 7213 IEM_MC_DEFER_TO_CIMPL_1_RET(IEM_CIMPL_F_BRANCH_INDIR , iemCImpl_retn_iw_64, u16Imm);7216 IEM_MC_DEFER_TO_CIMPL_1_RET(IEM_CIMPL_F_BRANCH_INDIRECT, iemCImpl_retn_iw_64, u16Imm); 7214 7217 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 7215 7218 } … … 7228 7231 { 7229 7232 case IEMMODE_16BIT: 7230 IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_BRANCH_INDIR , iemCImpl_retn_16);7233 IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_BRANCH_INDIRECT, iemCImpl_retn_16); 7231 7234 case IEMMODE_32BIT: 7232 IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_BRANCH_INDIR , iemCImpl_retn_32);7235 IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_BRANCH_INDIRECT, iemCImpl_retn_32); 7233 7236 case IEMMODE_64BIT: 7234 IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_BRANCH_INDIR , iemCImpl_retn_64);7237 IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_BRANCH_INDIRECT, iemCImpl_retn_64); 7235 7238 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 7236 7239 } … … 7516 7519 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm); 7517 7520 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 7518 IEM_MC_DEFER_TO_CIMPL_2_RET(IEM_CIMPL_F_MODE | IEM_CIMPL_F_BRANCH_INDIR, iemCImpl_retf, pVCpu->iem.s.enmEffOpSize, u16Imm); 7521 IEM_MC_DEFER_TO_CIMPL_2_RET(IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_FAR | IEM_CIMPL_F_MODE, 7522 iemCImpl_retf, pVCpu->iem.s.enmEffOpSize, u16Imm); 7519 7523 } 7520 7524 … … 7527 7531 IEMOP_MNEMONIC(retf, "retf"); 7528 7532 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 7529 IEM_MC_DEFER_TO_CIMPL_2_RET(IEM_CIMPL_F_MODE | IEM_CIMPL_F_BRANCH_INDIR, iemCImpl_retf, pVCpu->iem.s.enmEffOpSize, 0); 7533 IEM_MC_DEFER_TO_CIMPL_2_RET(IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_FAR | IEM_CIMPL_F_MODE, 7534 iemCImpl_retf, pVCpu->iem.s.enmEffOpSize, 0); 7530 7535 } 7531 7536 … … 7538 7543 IEMOP_MNEMONIC(int3, "int3"); 7539 7544 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 7540 IEM_MC_DEFER_TO_CIMPL_2_RET(IEM_CIMPL_F_MODE | IEM_CIMPL_F_BRANCH_INDIR | IEM_CIMPL_F_VMEXIT | IEM_CIMPL_F_RFLAGS, 7545 IEM_MC_DEFER_TO_CIMPL_2_RET(IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_FAR 7546 | IEM_CIMPL_F_MODE | IEM_CIMPL_F_VMEXIT | IEM_CIMPL_F_RFLAGS, 7541 7547 iemCImpl_int, X86_XCPT_BP, IEMINT_INT3); 7542 7548 } … … 7551 7557 uint8_t u8Int; IEM_OPCODE_GET_NEXT_U8(&u8Int); 7552 7558 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 7553 IEM_MC_DEFER_TO_CIMPL_2_RET(IEM_CIMPL_F_MODE | IEM_CIMPL_F_BRANCH_INDIR | IEM_CIMPL_F_VMEXIT | IEM_CIMPL_F_RFLAGS, 7559 IEM_MC_DEFER_TO_CIMPL_2_RET(IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_FAR 7560 | IEM_CIMPL_F_MODE | IEM_CIMPL_F_VMEXIT | IEM_CIMPL_F_RFLAGS, 7554 7561 iemCImpl_int, u8Int, IEMINT_INTN); 7555 7562 } … … 7563 7570 IEMOP_MNEMONIC(into, "into"); 7564 7571 IEMOP_HLP_NO_64BIT(); 7565 IEM_MC_DEFER_TO_CIMPL_2_RET(IEM_CIMPL_F_MODE | IEM_CIMPL_F_BRANCH_INDIR | IEM_CIMPL_F_BRANCH_COND | IEM_CIMPL_F_VMEXIT | IEM_CIMPL_F_RFLAGS, 7572 IEM_MC_DEFER_TO_CIMPL_2_RET(IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_FAR | IEM_CIMPL_F_BRANCH_CONDITIONAL 7573 | IEM_CIMPL_F_MODE | IEM_CIMPL_F_VMEXIT | IEM_CIMPL_F_RFLAGS, 7566 7574 iemCImpl_int, X86_XCPT_OF, IEMINT_INTO); 7567 7575 } … … 7575 7583 IEMOP_MNEMONIC(iret, "iret"); 7576 7584 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 7577 IEM_MC_DEFER_TO_CIMPL_1_RET(IEM_CIMPL_F_MODE | IEM_CIMPL_F_BRANCH_INDIR | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_VMEXIT, 7585 IEM_MC_DEFER_TO_CIMPL_1_RET(IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_FAR 7586 | IEM_CIMPL_F_MODE | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_VMEXIT, 7578 7587 iemCImpl_iret, pVCpu->iem.s.enmEffOpSize); 7579 7588 } … … 11484 11493 { 11485 11494 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm); 11486 IEM_MC_DEFER_TO_CIMPL_1_RET(IEM_CIMPL_F_BRANCH_ UNCOND, iemCImpl_call_rel_16, (int16_t)u16Imm);11495 IEM_MC_DEFER_TO_CIMPL_1_RET(IEM_CIMPL_F_BRANCH_RELATIVE, iemCImpl_call_rel_16, (int16_t)u16Imm); 11487 11496 } 11488 11497 … … 11490 11499 { 11491 11500 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm); 11492 IEM_MC_DEFER_TO_CIMPL_1_RET(IEM_CIMPL_F_BRANCH_ UNCOND, iemCImpl_call_rel_32, (int32_t)u32Imm);11501 IEM_MC_DEFER_TO_CIMPL_1_RET(IEM_CIMPL_F_BRANCH_RELATIVE, iemCImpl_call_rel_32, (int32_t)u32Imm); 11493 11502 } 11494 11503 … … 11496 11505 { 11497 11506 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm); 11498 IEM_MC_DEFER_TO_CIMPL_1_RET(IEM_CIMPL_F_BRANCH_ UNCOND, iemCImpl_call_rel_64, u64Imm);11507 IEM_MC_DEFER_TO_CIMPL_1_RET(IEM_CIMPL_F_BRANCH_RELATIVE, iemCImpl_call_rel_64, u64Imm); 11499 11508 } 11500 11509 … … 11555 11564 uint16_t u16Sel; IEM_OPCODE_GET_NEXT_U16(&u16Sel); 11556 11565 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 11557 IEM_MC_DEFER_TO_CIMPL_3_RET(IEM_CIMPL_F_BRANCH_UNCOND | IEM_CIMPL_F_MODE | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_VMEXIT, 11566 IEM_MC_DEFER_TO_CIMPL_3_RET(IEM_CIMPL_F_BRANCH_DIRECT | IEM_CIMPL_F_BRANCH_FAR 11567 | IEM_CIMPL_F_MODE | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_VMEXIT, 11558 11568 iemCImpl_FarJmp, u16Sel, off32Seg, pVCpu->iem.s.enmEffOpSize); 11559 11569 } … … 11641 11651 IEMOP_HLP_MIN_386(); 11642 11652 /** @todo testcase! */ 11643 IEM_MC_DEFER_TO_CIMPL_2_RET(IEM_CIMPL_F_MODE | IEM_CIMPL_F_BRANCH_INDIR | IEM_CIMPL_F_VMEXIT | IEM_CIMPL_F_RFLAGS, 11653 IEM_MC_DEFER_TO_CIMPL_2_RET(IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_FAR 11654 | IEM_CIMPL_F_MODE | IEM_CIMPL_F_VMEXIT | IEM_CIMPL_F_RFLAGS, 11644 11655 iemCImpl_int, X86_XCPT_DB, IEMINT_INT1); 11645 11656 } … … 12640 12651 IEM_MC_ARG(uint16_t, u16Target, 0); 12641 12652 IEM_MC_FETCH_GREG_U16(u16Target, IEM_GET_MODRM_RM(pVCpu, bRm)); 12642 IEM_MC_CALL_CIMPL_1(IEM_CIMPL_F_BRANCH_INDIR , iemCImpl_call_16, u16Target);12653 IEM_MC_CALL_CIMPL_1(IEM_CIMPL_F_BRANCH_INDIRECT, iemCImpl_call_16, u16Target); 12643 12654 IEM_MC_END(); 12644 12655 break; … … 12649 12660 IEM_MC_ARG(uint32_t, u32Target, 0); 12650 12661 IEM_MC_FETCH_GREG_U32(u32Target, IEM_GET_MODRM_RM(pVCpu, bRm)); 12651 IEM_MC_CALL_CIMPL_1(IEM_CIMPL_F_BRANCH_INDIR , iemCImpl_call_32, u32Target);12662 IEM_MC_CALL_CIMPL_1(IEM_CIMPL_F_BRANCH_INDIRECT, iemCImpl_call_32, u32Target); 12652 12663 IEM_MC_END(); 12653 12664 break; … … 12658 12669 IEM_MC_ARG(uint64_t, u64Target, 0); 12659 12670 IEM_MC_FETCH_GREG_U64(u64Target, IEM_GET_MODRM_RM(pVCpu, bRm)); 12660 IEM_MC_CALL_CIMPL_1(IEM_CIMPL_F_BRANCH_INDIR , iemCImpl_call_64, u64Target);12671 IEM_MC_CALL_CIMPL_1(IEM_CIMPL_F_BRANCH_INDIRECT, iemCImpl_call_64, u64Target); 12661 12672 IEM_MC_END(); 12662 12673 break; … … 12677 12688 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 12678 12689 IEM_MC_FETCH_MEM_U16(u16Target, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 12679 IEM_MC_CALL_CIMPL_1(IEM_CIMPL_F_BRANCH_INDIR , iemCImpl_call_16, u16Target);12690 IEM_MC_CALL_CIMPL_1(IEM_CIMPL_F_BRANCH_INDIRECT, iemCImpl_call_16, u16Target); 12680 12691 IEM_MC_END(); 12681 12692 break; … … 12688 12699 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 12689 12700 IEM_MC_FETCH_MEM_U32(u32Target, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 12690 IEM_MC_CALL_CIMPL_1(IEM_CIMPL_F_BRANCH_INDIR , iemCImpl_call_32, u32Target);12701 IEM_MC_CALL_CIMPL_1(IEM_CIMPL_F_BRANCH_INDIRECT, iemCImpl_call_32, u32Target); 12691 12702 IEM_MC_END(); 12692 12703 break; … … 12699 12710 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 12700 12711 IEM_MC_FETCH_MEM_U64(u64Target, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 12701 IEM_MC_CALL_CIMPL_1(IEM_CIMPL_F_BRANCH_INDIR , iemCImpl_call_64, u64Target);12712 IEM_MC_CALL_CIMPL_1(IEM_CIMPL_F_BRANCH_INDIRECT, iemCImpl_call_64, u64Target); 12702 12713 IEM_MC_END(); 12703 12714 break; … … 12735 12746 IEM_MC_FETCH_MEM_U16(offSeg, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); \ 12736 12747 IEM_MC_FETCH_MEM_U16_DISP(u16Sel, pVCpu->iem.s.iEffSeg, GCPtrEffSrc, 2); \ 12737 IEM_MC_CALL_CIMPL_3(IEM_CIMPL_F_BRANCH_INDIR | IEM_CIMPL_F_MODE | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_VMEXIT, \ 12748 IEM_MC_CALL_CIMPL_3(IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_FAR \ 12749 | IEM_CIMPL_F_MODE | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_VMEXIT, \ 12738 12750 a_fnCImpl, u16Sel, offSeg, enmEffOpSize); \ 12739 12751 IEM_MC_END(); \ … … 12750 12762 IEM_MC_FETCH_MEM_U32(offSeg, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); \ 12751 12763 IEM_MC_FETCH_MEM_U16_DISP(u16Sel, pVCpu->iem.s.iEffSeg, GCPtrEffSrc, 4); \ 12752 IEM_MC_CALL_CIMPL_3(IEM_CIMPL_F_BRANCH_INDIR | IEM_CIMPL_F_MODE | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_VMEXIT, \ 12764 IEM_MC_CALL_CIMPL_3(IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_FAR \ 12765 | IEM_CIMPL_F_MODE | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_VMEXIT, \ 12753 12766 a_fnCImpl, u16Sel, offSeg, enmEffOpSize); \ 12754 12767 IEM_MC_END(); \ … … 12766 12779 IEM_MC_FETCH_MEM_U64(offSeg, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); \ 12767 12780 IEM_MC_FETCH_MEM_U16_DISP(u16Sel, pVCpu->iem.s.iEffSeg, GCPtrEffSrc, 8); \ 12768 IEM_MC_CALL_CIMPL_3(IEM_CIMPL_F_BRANCH_INDIR | IEM_CIMPL_F_MODE /* no gates */, \12781 IEM_MC_CALL_CIMPL_3(IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_FAR | IEM_CIMPL_F_MODE /* no gates */, \ 12769 12782 a_fnCImpl, u16Sel, offSeg, enmEffOpSize); \ 12770 12783 IEM_MC_END(); \ -
trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsTwoByte0f.cpp.h
r100714 r100731 1414 1414 IEMOP_HLP_VMX_INSTR("vmlaunch", kVmxVDiag_Vmentry); 1415 1415 IEMOP_HLP_DONE_DECODING(); 1416 IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_MODE | IEM_CIMPL_F_BRANCH_INDIR | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_VMEXIT, 1416 IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_FAR 1417 | IEM_CIMPL_F_MODE | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_VMEXIT | IEM_CIMPL_F_END_TB, 1417 1418 iemCImpl_vmlaunch); 1418 1419 } … … 1434 1435 IEMOP_HLP_VMX_INSTR("vmresume", kVmxVDiag_Vmentry); 1435 1436 IEMOP_HLP_DONE_DECODING(); 1436 IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_MODE | IEM_CIMPL_F_BRANCH_INDIR | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_VMEXIT, 1437 IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_FAR 1438 | IEM_CIMPL_F_MODE | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_VMEXIT | IEM_CIMPL_F_END_TB, 1437 1439 iemCImpl_vmresume); 1438 1440 } … … 1576 1578 IEMOP_MNEMONIC(vmrun, "vmrun"); 1577 1579 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); /** @todo check prefix effect on the SVM instructions. ASSUMING no lock for now. */ 1578 IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_MODE | IEM_CIMPL_F_BRANCH_INDIR | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_VMEXIT, 1580 IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_FAR 1581 | IEM_CIMPL_F_MODE | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_VMEXIT | IEM_CIMPL_F_END_TB, 1579 1582 iemCImpl_vmrun); 1580 1583 } … … 1964 1967 IEMOP_MNEMONIC(syscall, "syscall"); /** @todo 286 LOADALL */ 1965 1968 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 1966 IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_MODE | IEM_CIMPL_F_BRANCH_INDIR | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_END_TB, 1969 IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_FAR 1970 | IEM_CIMPL_F_MODE | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_END_TB, 1967 1971 iemCImpl_syscall); 1968 1972 } … … 1983 1987 IEMOP_MNEMONIC(sysret, "sysret"); /** @todo 386 LOADALL */ 1984 1988 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 1985 IEM_MC_DEFER_TO_CIMPL_1_RET(IEM_CIMPL_F_MODE | IEM_CIMPL_F_BRANCH_INDIR | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_END_TB, 1989 IEM_MC_DEFER_TO_CIMPL_1_RET(IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_FAR 1990 | IEM_CIMPL_F_MODE | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_END_TB, 1986 1991 iemCImpl_sysret, pVCpu->iem.s.enmEffOpSize); 1987 1992 } … … 5176 5181 IEMOP_MNEMONIC0(FIXED, SYSENTER, sysenter, DISOPTYPE_CONTROLFLOW | DISOPTYPE_UNCOND_CONTROLFLOW, 0); 5177 5182 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 5178 IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_MODE | IEM_CIMPL_F_BRANCH_INDIR | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_VMEXIT | IEM_CIMPL_F_END_TB, 5183 IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_FAR 5184 | IEM_CIMPL_F_MODE | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_VMEXIT | IEM_CIMPL_F_END_TB, 5179 5185 iemCImpl_sysenter); 5180 5186 } … … 5185 5191 IEMOP_MNEMONIC0(FIXED, SYSEXIT, sysexit, DISOPTYPE_CONTROLFLOW | DISOPTYPE_UNCOND_CONTROLFLOW, 0); 5186 5192 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 5187 IEM_MC_DEFER_TO_CIMPL_1_RET(IEM_CIMPL_F_MODE | IEM_CIMPL_F_BRANCH_INDIR | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_VMEXIT | IEM_CIMPL_F_END_TB, 5193 IEM_MC_DEFER_TO_CIMPL_1_RET(IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_FAR 5194 | IEM_CIMPL_F_MODE | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_VMEXIT | IEM_CIMPL_F_END_TB, 5188 5195 iemCImpl_sysexit, pVCpu->iem.s.enmEffOpSize); 5189 5196 } … … 9572 9579 IEMOP_HLP_MIN_386(); /* 386SL and later. */ 9573 9580 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 9574 IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_MODE | IEM_CIMPL_F_BRANCH_INDIR | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_VMEXIT | IEM_CIMPL_F_END_TB, 9581 IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_FAR 9582 | IEM_CIMPL_F_MODE | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_VMEXIT | IEM_CIMPL_F_END_TB, 9575 9583 iemCImpl_rsm); 9576 9584 } -
trunk/src/VBox/VMM/VMMAll/IEMAllThreadedFunctionsBltIn.cpp
r100694 r100731 126 126 */ 127 127 #define BODY_CHECK_OPCODES(a_pTb, a_idxRange, a_offRange, a_cbInstr) do { \ 128 Assert((a_idxRange) < (a_pTb)->cRanges && (a_pTb)->cRanges < RT_ELEMENTS((a_pTb)->aRanges)); \128 Assert((a_idxRange) < (a_pTb)->cRanges && (a_pTb)->cRanges <= RT_ELEMENTS((a_pTb)->aRanges)); \ 129 129 Assert((a_offRange) < (a_pTb)->aRanges[(a_idxRange)].cbOpcodes); \ 130 130 /* We can use pbInstrBuf here as it will be updated when branching (and prior to executing a TB). */ \ … … 174 174 * 175 175 * This differs from BODY_LOAD_TLB_FOR_NEW_PAGE in that it will first check if 176 * it is an inter-page branch .176 * it is an inter-page branch and also check the page offset. 177 177 * 178 178 * This may long jump if we're raising a \#PF, \#GP or similar trouble. 179 179 */ 180 #define BODY_LOAD_TLB_ FOR_BRANCH(a_pTb, a_idxRange, a_cbInstr) do { \180 #define BODY_LOAD_TLB_AFTER_BRANCH(a_pTb, a_idxRange, a_cbInstr) do { \ 181 181 /* Is RIP within the current code page? */ \ 182 182 Assert(pVCpu->cpum.GstCtx.cs.u64Base == 0 || !IEM_IS_64BIT_CODE(pVCpu)); \ … … 184 184 uint64_t const off = uPc - pVCpu->iem.s.uInstrBufPc; \ 185 185 if (off < pVCpu->iem.s.cbInstrBufTotal) \ 186 { \ 186 187 Assert(!(pVCpu->iem.s.GCPhysInstrBuf & GUEST_PAGE_OFFSET_MASK)); \ 188 Assert(pVCpu->iem.s.pbInstrBuf); \ 189 RTGCPHYS const GCPhysRangePageWithOffset = iemTbGetRangePhysPageAddr(a_pTb, a_idxRange) \ 190 | pTb->aRanges[(a_idxRange)].offPhysPage; \ 191 if (GCPhysRangePageWithOffset == pVCpu->iem.s.GCPhysInstrBuf + off) \ 192 { /* we're good */ } \ 193 else if (pTb->aRanges[(a_idxRange)].offPhysPage != off) \ 194 { \ 195 Log7(("TB jmp miss: %p at %04x:%08RX64 LB %u; branching/1; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \ 196 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \ 197 pVCpu->iem.s.GCPhysInstrBuf + off, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \ 198 RT_NOREF(a_cbInstr); \ 199 return VINF_IEM_REEXEC_MODE_CHANGED; /** @todo new status code? */ \ 200 } \ 201 else \ 202 { \ 203 Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; branching/1; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \ 204 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \ 205 pVCpu->iem.s.GCPhysInstrBuf + off, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \ 206 RT_NOREF(a_cbInstr); \ 207 return iemThreadeFuncWorkerObsoleteTb(pVCpu); \ 208 } \ 209 } \ 187 210 else \ 188 211 { \ … … 192 215 pVCpu->iem.s.offInstrNextByte = 0; \ 193 216 iemOpcodeFetchBytesJmp(pVCpu, 0, NULL); \ 217 Assert(!(pVCpu->iem.s.GCPhysInstrBuf & GUEST_PAGE_OFFSET_MASK) || !pVCpu->iem.s.pbInstrBuf); \ 194 218 \ 195 RTGCPHYS const GCPhysNewPage = iemTbGetRangePhysPageAddr(a_pTb, a_idxRange); \ 196 if (RT_LIKELY( pVCpu->iem.s.GCPhysInstrBuf == GCPhysNewPage \ 197 && pVCpu->iem.s.pbInstrBuf)) \ 219 RTGCPHYS const GCPhysRangePageWithOffset = iemTbGetRangePhysPageAddr(a_pTb, a_idxRange) \ 220 | pTb->aRanges[(a_idxRange)].offPhysPage; \ 221 uint64_t const offNew = uPc - pVCpu->iem.s.uInstrBufPc; \ 222 if ( GCPhysRangePageWithOffset == pVCpu->iem.s.GCPhysInstrBuf + offNew \ 223 && pVCpu->iem.s.pbInstrBuf) \ 198 224 { /* likely */ } \ 225 else if ( pTb->aRanges[(a_idxRange)].offPhysPage != offNew \ 226 && pVCpu->iem.s.pbInstrBuf) \ 227 { \ 228 Log7(("TB jmp miss: %p at %04x:%08RX64 LB %u; branching/2; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \ 229 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \ 230 pVCpu->iem.s.GCPhysInstrBuf + offNew, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \ 231 RT_NOREF(a_cbInstr); \ 232 return VINF_IEM_REEXEC_MODE_CHANGED; /** @todo new status code? */ \ 233 } \ 199 234 else \ 200 235 { \ 201 Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; branching ; GCPhys=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \236 Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; branching/2; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \ 202 237 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \ 203 pVCpu->iem.s.GCPhysInstrBuf , GCPhysNewPage, pVCpu->iem.s.pbInstrBuf, __LINE__)); \238 pVCpu->iem.s.GCPhysInstrBuf + offNew, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \ 204 239 RT_NOREF(a_cbInstr); \ 205 240 return iemThreadeFuncWorkerObsoleteTb(pVCpu); \ … … 208 243 } while(0) 209 244 245 /** 246 * Macro that implements PC check after a conditional branch. 247 */ 248 #define BODY_CHECK_PC_AFTER_BRANCH(a_pTb, a_idxRange, a_cbInstr) do { \ 249 /* Is RIP within the current code page? */ \ 250 Assert(pVCpu->cpum.GstCtx.cs.u64Base == 0 || !IEM_IS_64BIT_CODE(pVCpu)); \ 251 uint64_t const uPc = pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base; \ 252 uint64_t const off = uPc - pVCpu->iem.s.uInstrBufPc; \ 253 Assert(!(pVCpu->iem.s.GCPhysInstrBuf & GUEST_PAGE_OFFSET_MASK)); \ 254 RTGCPHYS const GCPhysRangePageWithOffset = iemTbGetRangePhysPageAddr(a_pTb, a_idxRange) \ 255 | pTb->aRanges[(a_idxRange)].offPhysPage; \ 256 if ( GCPhysRangePageWithOffset == pVCpu->iem.s.GCPhysInstrBuf + off \ 257 && off < pVCpu->iem.s.cbInstrBufTotal) \ 258 { /* we're good */ } \ 259 else \ 260 { \ 261 Log7(("TB jmp miss: %p at %04x:%08RX64 LB %u; GCPhysWithOffset=%RGp hoped for %RGp, pbInstrBuf=%p - #%u\n", \ 262 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \ 263 pVCpu->iem.s.GCPhysInstrBuf + off, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \ 264 RT_NOREF(a_cbInstr); \ 265 return VINF_IEM_REEXEC_MODE_CHANGED; /** @todo new status code? */ \ 266 } \ 267 } while(0) 268 210 269 211 270 /** … … 255 314 } 256 315 316 317 /* 318 * Post-branching checkers. 319 */ 320 321 /** 322 * Built-in function for checking CS.LIM, checking the PC and checking opcodes 323 * after conditional branching within the same page. 324 * 325 * @see iemThreadedFunc_BltIn_CheckPcAndOpcodes 326 */ 327 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckCsLimAndPcAndOpcodes, 328 (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2)) 329 { 330 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3; 331 uint32_t const cbInstr = (uint32_t)uParam0; 332 uint32_t const idxRange = (uint32_t)uParam1; 333 uint32_t const offRange = (uint32_t)uParam2; 334 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes)); 335 BODY_CHECK_CS_LIM(cbInstr); 336 BODY_CHECK_PC_AFTER_BRANCH(pTb, idxRange, cbInstr); 337 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr); 338 //LogFunc(("okay\n")); 339 return VINF_SUCCESS; 340 } 341 342 343 /** 344 * Built-in function for checking the PC and checking opcodes after conditional 345 * branching within the same page. 346 * 347 * @see iemThreadedFunc_BltIn_CheckCsLimAndPcAndOpcodes 348 */ 349 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckPcAndOpcodes, 350 (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2)) 351 { 352 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3; 353 uint32_t const cbInstr = (uint32_t)uParam0; 354 uint32_t const idxRange = (uint32_t)uParam1; 355 uint32_t const offRange = (uint32_t)uParam2; 356 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes)); 357 BODY_CHECK_PC_AFTER_BRANCH(pTb, idxRange, cbInstr); 358 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr); 359 //LogFunc(("okay\n")); 360 return VINF_SUCCESS; 361 } 362 363 364 /** 365 * Built-in function for checking CS.LIM, loading TLB and checking opcodes when 366 * transitioning to a different code page. 367 * 368 * The code page transition can either be natural over onto the next page (with 369 * the instruction starting at page offset zero) or by means of branching. 370 * 371 * @see iemThreadedFunc_BltIn_CheckOpcodesLoadingTlb 372 */ 373 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb, 374 (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2)) 375 { 376 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3; 377 uint32_t const cbInstr = (uint32_t)uParam0; 378 uint32_t const idxRange = (uint32_t)uParam1; 379 uint32_t const offRange = (uint32_t)uParam2; 380 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes)); 381 BODY_CHECK_CS_LIM(cbInstr); 382 BODY_LOAD_TLB_AFTER_BRANCH(pTb, idxRange, cbInstr); 383 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr); 384 //LogFunc(("okay\n")); 385 return VINF_SUCCESS; 386 } 387 388 389 /** 390 * Built-in function for loading TLB and checking opcodes when transitioning to 391 * a different code page. 392 * 393 * The code page transition can either be natural over onto the next page (with 394 * the instruction starting at page offset zero) or by means of branching. 395 * 396 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb 397 */ 398 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckOpcodesLoadingTlb, 399 (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2)) 400 { 401 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3; 402 uint32_t const cbInstr = (uint32_t)uParam0; 403 uint32_t const idxRange = (uint32_t)uParam1; 404 uint32_t const offRange = (uint32_t)uParam2; 405 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes)); 406 BODY_LOAD_TLB_AFTER_BRANCH(pTb, idxRange, cbInstr); 407 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr); 408 //LogFunc(("okay\n")); 409 return VINF_SUCCESS; 410 } 411 412 413 414 /* 415 * Natural page crossing checkers. 416 */ 257 417 258 418 /** … … 311 471 /** 312 472 * Built-in function for checking CS.LIM, loading TLB and checking opcodes when 313 * transitioning to a different code page.314 *315 * The code page transition can either be natural over onto the next page (with316 * the instruction starting at page offset zero) or by means of branching.317 *318 * @see iemThreadedFunc_BltIn_CheckOpcodesLoadingTlb319 */320 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb,321 (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2))322 {323 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;324 uint32_t const cbInstr = (uint32_t)uParam0;325 uint32_t const idxRange = (uint32_t)uParam1;326 uint32_t const offRange = (uint32_t)uParam2;327 BODY_CHECK_CS_LIM(cbInstr);328 BODY_LOAD_TLB_FOR_BRANCH(pTb, idxRange, cbInstr);329 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);330 return VINF_SUCCESS;331 }332 333 334 /**335 * Built-in function for loading TLB and checking opcodes when transitioning to336 * a different code page.337 *338 * The code page transition can either be natural over onto the next page (with339 * the instruction starting at page offset zero) or by means of branching.340 *341 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb342 */343 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckOpcodesLoadingTlb,344 (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2))345 {346 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;347 uint32_t const cbInstr = (uint32_t)uParam0;348 uint32_t const idxRange = (uint32_t)uParam1;349 uint32_t const offRange = (uint32_t)uParam2;350 BODY_LOAD_TLB_FOR_BRANCH(pTb, idxRange, cbInstr);351 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);352 return VINF_SUCCESS;353 }354 355 356 /**357 * Built-in function for checking CS.LIM, loading TLB and checking opcodes when358 473 * advancing naturally to a different code page. 359 474 * … … 402 517 } 403 518 519 520 /** 521 * Built-in function for checking CS.LIM, loading TLB and checking opcodes when 522 * advancing naturally to a different code page with first instr at byte 0. 523 * 524 * @see iemThreadedFunc_BltIn_CheckOpcodesOnNewPageLoadingTlb 525 */ 526 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNewPageLoadingTlb, 527 (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2)) 528 { 529 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3; 530 uint32_t const cbInstr = (uint32_t)uParam0; 531 uint32_t const idxRange = (uint32_t)uParam1; 532 Assert(uParam2 == 0 /*offRange*/); RT_NOREF(uParam2); 533 BODY_CHECK_CS_LIM(cbInstr); 534 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, 0, idxRange, cbInstr); 535 Assert(pVCpu->iem.s.offCurInstrStart == 0); 536 BODY_CHECK_OPCODES(pTb, idxRange, 0, cbInstr); 537 return VINF_SUCCESS; 538 } 539 540 541 /** 542 * Built-in function for loading TLB and checking opcodes when advancing 543 * naturally to a different code page with first instr at byte 0. 544 * 545 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNewPageLoadingTlb 546 */ 547 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckOpcodesOnNewPageLoadingTlb, 548 (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2)) 549 { 550 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3; 551 uint32_t const cbInstr = (uint32_t)uParam0; 552 uint32_t const idxRange = (uint32_t)uParam1; 553 Assert(uParam2 == 0 /*offRange*/); RT_NOREF(uParam2); 554 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, 0, idxRange, cbInstr); 555 Assert(pVCpu->iem.s.offCurInstrStart == 0); 556 BODY_CHECK_OPCODES(pTb, idxRange, 0, cbInstr); 557 return VINF_SUCCESS; 558 } 559 -
trunk/src/VBox/VMM/VMMAll/IEMAllThreadedPython.py
r100701 r100731 197 197 198 198 ## IEM_CIMPL_F_XXX flags that we know. 199 ## The value indicates whether it terminates the TB or not. The goal is to 200 ## improve the recompiler so all but END_TB will be False. 199 201 kdCImplFlags = { 200 'IEM_CIMPL_F_MODE': True, 201 'IEM_CIMPL_F_BRANCH_UNCOND': False, 202 'IEM_CIMPL_F_BRANCH_COND': False, 203 'IEM_CIMPL_F_BRANCH_INDIR': True, 204 'IEM_CIMPL_F_RFLAGS': False, 205 'IEM_CIMPL_F_STATUS_FLAGS': False, 206 'IEM_CIMPL_F_VMEXIT': False, 207 'IEM_CIMPL_F_FPU': False, 208 'IEM_CIMPL_F_REP': False, 209 'IEM_CIMPL_F_END_TB': False, 210 'IEM_CIMPL_F_XCPT': True, 202 'IEM_CIMPL_F_MODE': True, 203 'IEM_CIMPL_F_BRANCH_DIRECT': False, 204 'IEM_CIMPL_F_BRANCH_INDIRECT': False, 205 'IEM_CIMPL_F_BRANCH_RELATIVE': False, 206 'IEM_CIMPL_F_BRANCH_FAR': True, 207 'IEM_CIMPL_F_BRANCH_CONDITIONAL': False, 208 'IEM_CIMPL_F_RFLAGS': False, 209 'IEM_CIMPL_F_STATUS_FLAGS': False, 210 'IEM_CIMPL_F_VMEXIT': True, 211 'IEM_CIMPL_F_FPU': False, 212 'IEM_CIMPL_F_REP': True, 213 'IEM_CIMPL_F_END_TB': True, 214 'IEM_CIMPL_F_XCPT': True, 211 215 }; 212 216 … … 506 510 507 511 508 def analyzeCodeOperation(self, aoStmts ):512 def analyzeCodeOperation(self, aoStmts, fSeenConditional = False): 509 513 """ 510 514 Analyzes the code looking clues as to additional side-effects. … … 513 517 collecting these in self.dsCImplFlags. 514 518 """ 515 fSeenConditional = False;516 519 for oStmt in aoStmts: 517 520 # Pick up hints from CIMPL calls and deferals. … … 526 529 self.raiseProblem('Unknown CIMPL flag value: %s' % (sFlag,)); 527 530 528 # Check for conditional so we can categorize any branches correctly.529 if ( oStmt.sName.startswith('IEM_MC_IF_')530 or oStmt.sName == 'IEM_MC_ENDIF'):531 fSeenConditional = True;532 533 531 # Set IEM_IMPL_C_F_BRANCH if we see any branching MCs. 534 532 elif oStmt.sName.startswith('IEM_MC_SET_RIP'): 535 533 assert not fSeenConditional; 536 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_INDIR '] = True;534 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_INDIRECT'] = True; 537 535 elif oStmt.sName.startswith('IEM_MC_REL_JMP'): 536 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_RELATIVE'] = True; 538 537 if fSeenConditional: 539 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_COND'] = True; 540 else: 541 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_UNCOND'] = True; 538 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_CONDITIONAL'] = True; 542 539 543 540 # Process branches of conditionals recursively. 544 541 if isinstance(oStmt, iai.McStmtCond): 545 self.analyzeCodeOperation(oStmt.aoIfBranch );542 self.analyzeCodeOperation(oStmt.aoIfBranch, True); 546 543 if oStmt.aoElseBranch: 547 self.analyzeCodeOperation(oStmt.aoElseBranch );544 self.analyzeCodeOperation(oStmt.aoElseBranch, True); 548 545 549 546 return True; … … 914 911 aoStmts.append(iai.McCppGeneric('IEM_MC2_END_EMIT_CALLS(' + sCImplFlags + ');', 915 912 cchIndent = cchIndent)); # For closing the scope. 913 914 # Emit fEndTb = true or fTbBranched = true if any of the CIMPL flags 915 # indicates we should do so. 916 asEndTbFlags = []; 917 asTbBranchedFlags = []; 918 for sFlag in self.dsCImplFlags: 919 if self.kdCImplFlags[sFlag] is True: 920 asEndTbFlags.append(sFlag); 921 elif sFlag.startswith('IEM_CIMPL_F_BRANCH_'): 922 asTbBranchedFlags.append(sFlag); 923 if asTbBranchedFlags: 924 aoStmts.extend([ 925 iai.McCppGeneric('iemThreadedSetBranched(pVCpu, %s);' 926 % ((' | '.join(asTbBranchedFlags)).replace('IEM_CIMPL_F_BRANCH', 'IEMBRANCHED_F'),), 927 cchIndent = cchIndent), # Using the inline fn saves ~2 seconds for gcc 13/dbg (1m13s vs 1m15s). 928 #iai.McCppGeneric('pVCpu->iem.s.fTbBranched = %s;' 929 # % ((' | '.join(asTbBranchedFlags)).replace('IEM_CIMPL_F_BRANCH', 'IEMBRANCHED_F'),), 930 # cchIndent = cchIndent), 931 #iai.McCppGeneric('pVCpu->iem.s.GCPhysTbBranchSrcBuf = pVCpu->iem.s.GCPhysInstrBuf;', cchIndent = cchIndent), 932 #iai.McCppGeneric('pVCpu->iem.s.GCVirtTbBranchSrcBuf = pVCpu->iem.s.uInstrBufPc;', cchIndent = cchIndent), 933 ]); 934 if asEndTbFlags: 935 aoStmts.append(iai.McCppGeneric('pVCpu->iem.s.fEndTb = true; /* %s */' % (','.join(asEndTbFlags),), 936 cchIndent = cchIndent)); 937 916 938 return aoStmts; 917 939 … … 1096 1118 #print('McBlock at %s:%s' % (os.path.split(self.oMcBlock.sSrcFile)[1], self.oMcBlock.iBeginLine,)); 1097 1119 aoDecoderStmts = []; 1098 1099 # Take a very simple approach to problematic instructions for now.1100 if cDepth == 0:1101 dsCImplFlags = {};1102 for oVar in self.aoVariations:1103 dsCImplFlags.update(oVar.dsCImplFlags);1104 if ( 'IEM_CIMPL_F_BRANCH_UNCOND' in dsCImplFlags1105 or 'IEM_CIMPL_F_BRANCH_COND' in dsCImplFlags1106 or 'IEM_CIMPL_F_BRANCH_INDIR' in dsCImplFlags1107 or 'IEM_CIMPL_F_MODE' in dsCImplFlags1108 or 'IEM_CIMPL_F_REP' in dsCImplFlags):1109 aoDecoderStmts.append(iai.McCppGeneric('pVCpu->iem.s.fEndTb = true;'));1110 1120 1111 1121 for oStmt in aoStmts: … … 1282 1292 ' kIemThreadedFunc_CheckCsLim,', 1283 1293 ' kIemThreadedFunc_CheckCsLimAndOpcodes,', 1294 ' kIemThreadedFunc_CheckOpcodes,', 1295 ' kIemThreadedFunc_CheckCsLimAndPcAndOpcodes,', 1296 ' kIemThreadedFunc_CheckPcAndOpcodes,', 1284 1297 ' kIemThreadedFunc_CheckCsLimAndOpcodesAcrossPageLoadingTlb,', 1298 ' kIemThreadedFunc_CheckOpcodesAcrossPageLoadingTlb,', 1285 1299 ' kIemThreadedFunc_CheckCsLimAndOpcodesLoadingTlb,', 1300 ' kIemThreadedFunc_CheckOpcodesLoadingTlb,', 1286 1301 ' kIemThreadedFunc_CheckCsLimAndOpcodesOnNextPageLoadingTlb,', 1287 ' kIemThreadedFunc_CheckOpcodes,',1288 ' kIemThreadedFunc_CheckOpcodesAcrossPageLoadingTlb,',1289 ' kIemThreadedFunc_CheckOpcodesLoadingTlb,',1290 1302 ' kIemThreadedFunc_CheckOpcodesOnNextPageLoadingTlb,', 1303 ' kIemThreadedFunc_CheckCsLimAndOpcodesOnNewPageLoadingTlb,', 1304 ' kIemThreadedFunc_CheckOpcodesOnNewPageLoadingTlb,', 1291 1305 ]; 1292 1306 iThreadedFunction = 1; … … 1449 1463 + ' iemThreadedFunc_BltIn_CheckCsLim,\n' 1450 1464 + ' iemThreadedFunc_BltIn_CheckCsLimAndOpcodes,\n' 1465 + ' iemThreadedFunc_BltIn_CheckOpcodes,\n' 1466 + ' iemThreadedFunc_BltIn_CheckCsLimAndPcAndOpcodes,\n' 1467 + ' iemThreadedFunc_BltIn_CheckPcAndOpcodes,\n' 1451 1468 + ' iemThreadedFunc_BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb,\n' 1469 + ' iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlb,\n' 1452 1470 + ' iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb,\n' 1471 + ' iemThreadedFunc_BltIn_CheckOpcodesLoadingTlb,\n' 1453 1472 + ' iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb,\n' 1454 + ' iemThreadedFunc_BltIn_CheckOpcodes,\n'1455 + ' iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlb,\n'1456 + ' iemThreadedFunc_BltIn_CheckOpcodesLoadingTlb,\n'1457 1473 + ' iemThreadedFunc_BltIn_CheckOpcodesOnNextPageLoadingTlb,\n' 1474 + ' iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNewPageLoadingTlb,\n' 1475 + ' iemThreadedFunc_BltIn_CheckOpcodesOnNewPageLoadingTlb,\n' 1458 1476 ); 1459 1477 iThreadedFunction = 1; … … 1490 1508 + ' "BltIn_CheckCsLim",\n' 1491 1509 + ' "BltIn_CheckCsLimAndOpcodes",\n' 1510 + ' "BltIn_CheckOpcodes",\n' 1511 + ' "BltIn_CheckCsLimAndPcAndOpcodes",\n' 1512 + ' "BltIn_CheckPcAndOpcodes",\n' 1492 1513 + ' "BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb",\n' 1514 + ' "BltIn_CheckOpcodesAcrossPageLoadingTlb",\n' 1493 1515 + ' "BltIn_CheckCsLimAndOpcodesLoadingTlb",\n' 1516 + ' "BltIn_CheckOpcodesLoadingTlb",\n' 1494 1517 + ' "BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb",\n' 1495 + ' "BltIn_CheckOpcodes",\n'1496 + ' "BltIn_CheckOpcodesAcrossPageLoadingTlb",\n'1497 + ' "BltIn_CheckOpcodesLoadingTlb",\n'1498 1518 + ' "BltIn_CheckOpcodesOnNextPageLoadingTlb",\n' 1519 + ' "BltIn_CheckCsLimAndOpcodesOnNewPageLoadingTlb",\n' 1520 + ' "BltIn_CheckOpcodesOnNewPageLoadingTlb",\n' 1499 1521 ); 1500 1522 iThreadedFunction = 1; -
trunk/src/VBox/VMM/VMMAll/IEMAllThreadedRecompiler.cpp
r100701 r100731 6 6 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events. [same as IEM] 7 7 * - Flow (LogFlow) : 8 * - Level 2 (Log2) : 9 * - Level 3 (Log3) : More detailed e nter/exit IEMstate info. [same as IEM]8 * - Level 2 (Log2) : Basic instruction execution state info. [same as IEM] 9 * - Level 3 (Log3) : More detailed execution state info. [same as IEM] 10 10 * - Level 4 (Log4) : Decoding mnemonics w/ EIP. [same as IEM] 11 11 * - Level 5 (Log5) : Decoding details. [same as IEM] … … 112 112 113 113 /********************************************************************************************************************************* 114 * Structures and Typedefs *115 *********************************************************************************************************************************/116 117 118 119 /*********************************************************************************************************************************120 114 * Internal Functions * 121 115 *********************************************************************************************************************************/ … … 130 124 131 125 126 /* 127 * Override IEM_MC_CALC_RM_EFF_ADDR to use iemOpHlpCalcRmEffAddrJmpEx and produce uEffAddrInfo. 128 */ 132 129 #undef IEM_MC_CALC_RM_EFF_ADDR 133 130 #ifndef IEM_WITH_SETJMP … … 141 138 #endif 142 139 140 /* 141 * Override the IEM_MC_REL_JMP_S*_AND_FINISH macros to check for zero byte jumps. 142 */ 143 #undef IEM_MC_REL_JMP_S8_AND_FINISH 144 #define IEM_MC_REL_JMP_S8_AND_FINISH(a_i8) do { \ 145 Assert(pVCpu->iem.s.fTbBranched != 0); \ 146 if ((a_i8) == 0) \ 147 pVCpu->iem.s.fTbBranched |= IEMBRANCHED_F_ZERO; \ 148 return iemRegRipRelativeJumpS8AndFinishClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu), (a_i8), pVCpu->iem.s.enmEffOpSize); \ 149 } while (0) 150 151 #undef IEM_MC_REL_JMP_S16_AND_FINISH 152 #define IEM_MC_REL_JMP_S16_AND_FINISH(a_i16) do { \ 153 Assert(pVCpu->iem.s.fTbBranched != 0); \ 154 if ((a_i16) == 0) \ 155 pVCpu->iem.s.fTbBranched |= IEMBRANCHED_F_ZERO; \ 156 return iemRegRipRelativeJumpS16AndFinishClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu), (a_i16)); \ 157 } while (0) 158 159 #undef IEM_MC_REL_JMP_S32_AND_FINISH 160 #define IEM_MC_REL_JMP_S32_AND_FINISH(a_i32) do { \ 161 Assert(pVCpu->iem.s.fTbBranched != 0); \ 162 if ((a_i32) == 0) \ 163 pVCpu->iem.s.fTbBranched |= IEMBRANCHED_F_ZERO; \ 164 return iemRegRipRelativeJumpS32AndFinishClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu), (a_i32), pVCpu->iem.s.enmEffOpSize); \ 165 } while (0) 166 167 168 /* 169 * Emit call macros. 170 */ 143 171 #define IEM_MC2_BEGIN_EMIT_CALLS() \ 144 172 { \ … … 169 197 \ 170 198 do { } while (0) 171 172 199 #define IEM_MC2_EMIT_CALL_0(a_enmFunction) do { \ 173 200 IEMTHREADEDFUNCS const enmFunctionCheck = a_enmFunction; RT_NOREF(enmFunctionCheck); \ … … 224 251 pCall->auParams[2] = a_uArg2; \ 225 252 } while (0) 226 227 253 #define IEM_MC2_END_EMIT_CALLS(a_fCImplFlags) \ 228 254 Assert(pTb->cInstructions <= pTb->Thrd.cCalls); \ … … 572 598 573 599 600 /** 601 * Helper for indicating that we've branched. 602 */ 603 DECL_FORCE_INLINE(void) iemThreadedSetBranched(PVMCPUCC pVCpu, uint8_t fTbBranched) 604 { 605 pVCpu->iem.s.fTbBranched = fTbBranched; 606 pVCpu->iem.s.GCPhysTbBranchSrcBuf = pVCpu->iem.s.GCPhysInstrBuf; 607 pVCpu->iem.s.GCVirtTbBranchSrcBuf = pVCpu->iem.s.uInstrBufPc; 608 } 609 610 574 611 /* 575 612 * Include the "annotated" IEMAllInstructions*.cpp.h files. … … 748 785 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->iem.s.StatTbThreadedInstr, pTb->cInstructions); 749 786 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->iem.s.StatTbThreadedCalls, pTb->Thrd.cCalls); 750 Log12(("TB added: %p %RGp LB %#x fl=%#x idxHash=%#x\n", pTb, pTb->GCPhysPc, pTb->cbOpcodes, pTb->fFlags, idxHash)); 787 if (LogIs12Enabled()) 788 { 789 Log12(("TB added: %p %RGp LB %#x fl=%#x idxHash=%#x cRanges=%u cInstr=%u cCalls=%u\n", 790 pTb, pTb->GCPhysPc, pTb->cbOpcodes, pTb->fFlags, idxHash, pTb->cRanges, pTb->cInstructions, pTb->Thrd.cCalls)); 791 for (uint8_t idxRange = 0; idxRange < pTb->cRanges; idxRange++) 792 Log12((" range#%u: offPg=%#05x offOp=%#04x LB %#04x pg#%u=%RGp\n", idxRange, pTb->aRanges[idxRange].offPhysPage, 793 pTb->aRanges[idxRange].offOpcodes, pTb->aRanges[idxRange].cbOpcodes, pTb->aRanges[idxRange].idxPhysPage, 794 pTb->aRanges[idxRange].idxPhysPage == 0 795 ? pTb->GCPhysPc & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK 796 : pTb->aGCPhysPages[pTb->aRanges[idxRange].idxPhysPage - 1])); 797 } 751 798 RT_NOREF(pVM); 752 799 } … … 861 908 pVCpu->iem.s.fEndTb = false; 862 909 pVCpu->iem.s.fTbCheckOpcodes = false; 863 pVCpu->iem.s.fTbBranched = false;910 pVCpu->iem.s.fTbBranched = IEMBRANCHED_F_NO; 864 911 pVCpu->iem.s.fTbCrossedPage = false; 865 912 } … … 1019 1066 * Case 1: We've branched (RIP changed). 1020 1067 * 1021 * Sub-case 1a: Same page, no TLB load , so fTbCrossedPage is false.1068 * Sub-case 1a: Same page, no TLB load (fTbCrossedPage is false). 1022 1069 * Req: 1 extra range, no extra phys. 1023 1070 * 1024 * Sub-case 1b: Different page, so TLB load necessary and fTbCrossedPage is true. 1071 * Sub-case 1b: Different page but no page boundrary crossing, so TLB load 1072 * necessary (fTbCrossedPage is true). 1025 1073 * Req: 1 extra range, probably 1 extra phys page entry. 1026 1074 * 1027 * Sub-case 1c: Different page, so TLB load necessary and fTbCrossedPage is true,1075 * Sub-case 1c: Different page, so TLB load necessary (fTbCrossedPage is true), 1028 1076 * but in addition we cross into the following page and require 1029 1077 * another TLB load. … … 1031 1079 * 1032 1080 * Sub-case 1d: Same page, so no initial TLB load necessary, but we cross into 1033 * the following page and thus fTbCrossedPage is true.1081 * the following page (thus fTbCrossedPage is true). 1034 1082 * Req: 2 extra ranges, probably 1 extra phys page entry. 1035 1083 * 1084 * Note! The setting fTbCrossedPage is done by the iemOpcodeFetchBytesJmp, but 1085 * it may trigger "spuriously" from the CPU point of view because of 1086 * physical page changes that'll invalid the physical TLB and trigger a 1087 * call to the function. In theory this be a big deal, just a bit 1088 * performance loss as we'll pick the LoadingTlb variants. 1089 * 1036 1090 * Note! We do not currently optimize branching to the next instruction (sorry 1037 * 32-bit PIC code). We could maybe do that in the branching code that sets (or not) fTbBranched. 1038 */ 1039 if (pVCpu->iem.s.fTbBranched) 1040 { 1041 AssertFailed(); /** @todo enable including branches in TBs and debug this code. */ 1091 * 32-bit PIC code). We could maybe do that in the branching code that 1092 * sets (or not) fTbBranched. 1093 */ 1094 /** @todo Optimize 'jmp .next_instr' and 'call .next_instr'. Seen the jmp 1095 * variant in win 3.1 code and the call variant in 32-bit linux PIC 1096 * code. This'll require filtering out far jmps and calls, as they 1097 * load CS which should technically be considered indirect since the 1098 * GDT/LDT entry's base address can be modified independently from 1099 * the code. */ 1100 if (pVCpu->iem.s.fTbBranched != 0) 1101 { 1042 1102 if ( !pVCpu->iem.s.fTbCrossedPage /* 1a */ 1043 1103 || pVCpu->iem.s.offCurInstrStart >= 0 /* 1b */ ) … … 1047 1107 Assert(pVCpu->iem.s.offCurInstrStart + cbInstr <= GUEST_PAGE_SIZE); 1048 1108 1049 /* Check that we've got a free range. */ 1050 idxRange += 1; 1051 if (idxRange < RT_ELEMENTS(pTb->aRanges)) 1052 { /* likely */ } 1053 else 1054 return false; 1055 pCall->idxRange = idxRange; 1056 pCall->auParams[1] = idxRange; 1057 pCall->auParams[2] = 0; 1058 1059 /* Check that we've got a free page slot. */ 1060 AssertCompile(RT_ELEMENTS(pTb->aGCPhysPages) == 2); 1061 RTGCPHYS const GCPhysNew = pVCpu->iem.s.GCPhysInstrBuf & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; 1062 if ((pTb->GCPhysPc & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK) == GCPhysNew) 1063 pTb->aRanges[idxRange].idxPhysPage = 0; 1064 else if ( pTb->aGCPhysPages[0] == NIL_RTGCPHYS 1065 || pTb->aGCPhysPages[0] == GCPhysNew) 1109 if (!(pVCpu->iem.s.fTbBranched & IEMBRANCHED_F_ZERO)) 1066 1110 { 1067 pTb->aGCPhysPages[0] = GCPhysNew; 1068 pTb->aRanges[idxRange].idxPhysPage = 1; 1069 } 1070 else if ( pTb->aGCPhysPages[1] == NIL_RTGCPHYS 1071 || pTb->aGCPhysPages[1] == GCPhysNew) 1072 { 1073 pTb->aGCPhysPages[1] = GCPhysNew; 1074 pTb->aRanges[idxRange].idxPhysPage = 2; 1111 /* Check that we've got a free range. */ 1112 idxRange += 1; 1113 if (idxRange < RT_ELEMENTS(pTb->aRanges)) 1114 { /* likely */ } 1115 else 1116 { 1117 Log8(("%04x:%08RX64: out of ranges after branch\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip)); 1118 return false; 1119 } 1120 pCall->idxRange = idxRange; 1121 pCall->auParams[1] = idxRange; 1122 pCall->auParams[2] = 0; 1123 1124 /* Check that we've got a free page slot. */ 1125 AssertCompile(RT_ELEMENTS(pTb->aGCPhysPages) == 2); 1126 RTGCPHYS const GCPhysNew = pVCpu->iem.s.GCPhysInstrBuf & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; 1127 if ((pTb->GCPhysPc & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK) == GCPhysNew) 1128 pTb->aRanges[idxRange].idxPhysPage = 0; 1129 else if ( pTb->aGCPhysPages[0] == NIL_RTGCPHYS 1130 || pTb->aGCPhysPages[0] == GCPhysNew) 1131 { 1132 pTb->aGCPhysPages[0] = GCPhysNew; 1133 pTb->aRanges[idxRange].idxPhysPage = 1; 1134 } 1135 else if ( pTb->aGCPhysPages[1] == NIL_RTGCPHYS 1136 || pTb->aGCPhysPages[1] == GCPhysNew) 1137 { 1138 pTb->aGCPhysPages[1] = GCPhysNew; 1139 pTb->aRanges[idxRange].idxPhysPage = 2; 1140 } 1141 else 1142 { 1143 Log8(("%04x:%08RX64: out of aGCPhysPages entires after branch\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip)); 1144 return false; 1145 } 1146 1147 /* Finish setting up the new range. */ 1148 pTb->aRanges[idxRange].offPhysPage = pVCpu->iem.s.offCurInstrStart; 1149 pTb->aRanges[idxRange].offOpcodes = offOpcode; 1150 pTb->aRanges[idxRange].cbOpcodes = cbInstr; 1151 pTb->aRanges[idxRange].u2Unused = 0; 1152 pTb->cRanges++; 1075 1153 } 1076 1154 else 1077 return false; 1078 1079 /* Finish setting up the new range. */ 1080 pTb->aRanges[idxRange].offPhysPage = pVCpu->iem.s.offCurInstrStart; 1081 pTb->aRanges[idxRange].offOpcodes = offOpcode; 1082 pTb->aRanges[idxRange].cbOpcodes = cbInstr; 1083 pTb->aRanges[idxRange].u2Unused = 0; 1084 pTb->cRanges++; 1155 { 1156 Log8(("%04x:%08RX64: zero byte jump\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip)); 1157 pTb->aRanges[idxRange].cbOpcodes += cbInstr; 1158 } 1085 1159 1086 1160 /* Determin which function we need to load & check. … … 1088 1162 fTbCrossedPage to avoid unnecessary TLB work for intra 1089 1163 page branching */ 1090 if (pVCpu->iem.s.fTbCrossedPage) 1164 if ( (pVCpu->iem.s.fTbBranched & (IEMBRANCHED_F_INDIRECT | IEMBRANCHED_F_FAR)) /* Far is basically indirect. */ 1165 || pVCpu->iem.s.fTbCrossedPage) 1091 1166 pCall->enmFunction = pTb->fFlags & IEMTB_F_CS_LIM_CHECKS 1092 1167 ? kIemThreadedFunc_CheckCsLimAndOpcodesLoadingTlb 1093 1168 : kIemThreadedFunc_CheckOpcodesLoadingTlb; 1169 else if (pVCpu->iem.s.fTbBranched & (IEMBRANCHED_F_CONDITIONAL | /* paranoia: */ IEMBRANCHED_F_DIRECT)) 1170 pCall->enmFunction = pTb->fFlags & IEMTB_F_CS_LIM_CHECKS 1171 ? kIemThreadedFunc_CheckCsLimAndPcAndOpcodes 1172 : kIemThreadedFunc_CheckPcAndOpcodes; 1094 1173 else 1174 { 1175 Assert(pVCpu->iem.s.fTbBranched & IEMBRANCHED_F_RELATIVE); 1095 1176 pCall->enmFunction = pTb->fFlags & IEMTB_F_CS_LIM_CHECKS 1096 1177 ? kIemThreadedFunc_CheckCsLimAndOpcodes 1097 1178 : kIemThreadedFunc_CheckOpcodes; 1179 } 1098 1180 } 1099 1181 else … … 1126 1208 1127 1209 #else 1210 Log8(("%04x:%08RX64: complicated post-branch condition, ending TB.\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip)); 1128 1211 return false; 1129 1212 #endif … … 1150 1233 { /* likely */ } 1151 1234 else 1235 { 1236 Log8(("%04x:%08RX64: out of ranges while crossing page\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip)); 1152 1237 return false; 1238 } 1153 1239 1154 1240 /* Check that we've got a free page slot. */ … … 1170 1256 } 1171 1257 else 1258 { 1259 Log8(("%04x:%08RX64: out of aGCPhysPages entires while crossing page\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip)); 1172 1260 return false; 1261 } 1173 1262 1174 1263 if (((pTb->aRanges[idxRange - 1].offPhysPage + pTb->aRanges[idxRange - 1].cbOpcodes) & GUEST_PAGE_OFFSET_MASK) == 0) … … 1188 1277 /* Determin which function we need to load & check. */ 1189 1278 pCall->enmFunction = pTb->fFlags & IEMTB_F_CS_LIM_CHECKS 1190 ? kIemThreadedFunc_CheckCsLimAndOpcodes LoadingTlb1191 : kIemThreadedFunc_CheckOpcodes LoadingTlb;1279 ? kIemThreadedFunc_CheckCsLimAndOpcodesOnNewPageLoadingTlb 1280 : kIemThreadedFunc_CheckOpcodesOnNewPageLoadingTlb; 1192 1281 } 1193 1282 else … … 1246 1335 * Clear state. 1247 1336 */ 1248 pVCpu->iem.s.fTbBranched = false;1337 pVCpu->iem.s.fTbBranched = IEMBRANCHED_F_NO; 1249 1338 pVCpu->iem.s.fTbCrossedPage = false; 1250 1339 pVCpu->iem.s.fTbCheckOpcodes = false; … … 1376 1465 { 1377 1466 /* Check the opcodes in the first page before starting execution. */ 1378 uint32_t const cbLeadOpcodes = RT_MIN(pTb->cbOpcodes, pVCpu->iem.s.cbInstrBufTotal - pVCpu->iem.s.offInstrNextByte);1379 if (memcmp(pTb->pabOpcodes, &pVCpu->iem.s.pbInstrBuf[pVCpu->iem.s.offInstrNextByte], cbLeadOpcodes) == 0)1380 Assert( pTb->cbOpcodes == cbLeadOpcodes1381 || cbLeadOpcodes == (GUEST_PAGE_SIZE - (pTb->GCPhysPc & GUEST_PAGE_OFFSET_MASK)));1467 Assert(!(pVCpu->iem.s.GCPhysInstrBuf & (RTGCPHYS)GUEST_PAGE_OFFSET_MASK)); 1468 Assert(pTb->aRanges[0].cbOpcodes <= pVCpu->iem.s.cbInstrBufTotal - pVCpu->iem.s.offInstrNextByte); 1469 if (memcmp(pTb->pabOpcodes, &pVCpu->iem.s.pbInstrBuf[pTb->aRanges[0].offPhysPage], pTb->aRanges[0].cbOpcodes) == 0) 1470 { /* likely */ } 1382 1471 else 1383 1472 { -
trunk/src/VBox/VMM/include/IEMInternal.h
r100695 r100731 81 81 * Linux, but it should be quite a bit faster for normal code. 82 82 */ 83 #if (defined( IEM_WITH_SETJMP) && defined(IN_RING3) && (defined(__GNUC__) || defined(_MSC_VER))) \83 #if (defined(__cplusplus) && defined(IEM_WITH_SETJMP) && defined(IN_RING3) && (defined(__GNUC__) || defined(_MSC_VER))) \ 84 84 || defined(DOXYGEN_RUNNING) 85 85 # define IEM_WITH_THROW_CATCH … … 843 843 typedef IEMTB const *PCIEMTB; 844 844 845 /** @name IEMBRANCHED_F_XXX - Branched indicator (IEMCPU::fTbBranched). 846 * 847 * These flags parallels IEM_CIMPL_F_BRANCH_XXX. 848 * 849 * @{ */ 850 /** Value if no branching happened recently. */ 851 #define IEMBRANCHED_F_NO UINT8_C(0x00) 852 /** Flag set if direct branch, clear if absolute or indirect. */ 853 #define IEMBRANCHED_F_DIRECT UINT8_C(0x01) 854 /** Flag set if indirect branch, clear if direct or relative. */ 855 #define IEMBRANCHED_F_INDIRECT UINT8_C(0x02) 856 /** Flag set if relative branch, clear if absolute or indirect. */ 857 #define IEMBRANCHED_F_RELATIVE UINT8_C(0x04) 858 /** Flag set if conditional branch, clear if unconditional. */ 859 #define IEMBRANCHED_F_CONDITIONAL UINT8_C(0x08) 860 /** Flag set if it's a far branch. */ 861 #define IEMBRANCHED_F_FAR UINT8_C(0x10) 862 /** Flag set (by IEM_MC_REL_JMP_XXX) if it's a zero bytes relative jump. */ 863 #define IEMBRANCHED_F_ZERO UINT8_C(0x20) 864 /** @} */ 865 845 866 846 867 /** … … 1139 1160 * This is set by a previous instruction if it modified memory or similar. */ 1140 1161 bool fTbCheckOpcodes; 1141 /** Whether we just branched and need to start a new opcode range and emit code 1142 * to do a TLB load and check them again. */ 1143 bool fTbBranched; 1162 /** Indicates whether and how we just branched - IEMBRANCHED_F_XXX. */ 1163 uint8_t fTbBranched; 1144 1164 /** Set when GCPhysInstrBuf is updated because of a page crossing. */ 1145 1165 bool fTbCrossedPage; … … 1148 1168 /** Spaced reserved for recompiler data / alignment. */ 1149 1169 bool afRecompilerStuff1[4]; 1170 /** Previous GCPhysInstrBuf value - only valid if fTbCrossedPage is set. */ 1171 RTGCPHYS GCPhysInstrBufPrev; 1172 /** Copy of IEMCPU::GCPhysInstrBuf after decoding a branch instruction. 1173 * This is used together with fTbBranched and GCVirtTbBranchSrcBuf to determin 1174 * whether a branch instruction jumps to a new page or stays within the 1175 * current one. */ 1176 RTGCPHYS GCPhysTbBranchSrcBuf; 1177 /** Copy of IEMCPU::uInstrBufPc after decoding a branch instruction. */ 1178 uint64_t GCVirtTbBranchSrcBuf; 1179 /* Alignment. */ 1180 uint64_t au64RecompilerStuff2[5]; 1150 1181 /** Threaded TB statistics: Number of instructions per TB. */ 1151 1182 STAMPROFILE StatTbThreadedInstr; … … 4386 4417 4387 4418 /** 4388 * Macro for calling iemCImplRaiseInvalidOpcode() .4389 * 4390 * This enables us to add/remove arguments and force different levels of4391 * inlining as we wish.4419 * Macro for calling iemCImplRaiseInvalidOpcode() for decode/static \#UDs. 4420 * 4421 * This is for things that will _always_ decode to an \#UD, taking the 4422 * recompiler into consideration and everything. 4392 4423 * 4393 4424 * @return Strict VBox status code. 4394 4425 */ 4395 4426 #define IEMOP_RAISE_INVALID_OPCODE_RET() IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_XCPT, iemCImplRaiseInvalidOpcode) 4427 4428 /** 4429 * Macro for calling iemCImplRaiseInvalidOpcode() for runtime-style \#UDs. 4430 * 4431 * Using this macro means you've got _buggy_ _code_ and are doing things that 4432 * belongs exclusively in IEMAllCImpl.cpp during decoding. 4433 * 4434 * @return Strict VBox status code. 4435 * @see IEMOP_RAISE_INVALID_OPCODE_RET 4436 */ 4437 #define IEMOP_RAISE_INVALID_OPCODE_RUNTIME_RET() IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_XCPT, iemCImplRaiseInvalidOpcode) 4438 4396 4439 /** @} */ 4397 4440 … … 4899 4942 4900 4943 void iemThreadedTbObsolete(PVMCPUCC pVCpu, PIEMTB pTb); 4944 4901 4945 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckMode, 4902 4946 (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2)); 4903 4947 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckCsLim, 4904 4948 (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2)); 4949 4905 4950 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckCsLimAndOpcodes, 4906 (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2));4907 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb,4908 (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2));4909 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb,4910 (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2));4911 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb,4912 4951 (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2)); 4913 4952 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckOpcodes, 4914 4953 (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2)); 4915 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlb, 4954 4955 /* Branching: */ 4956 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckCsLimAndPcAndOpcodes, 4957 (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2)); 4958 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckPcAndOpcodes, 4959 (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2)); 4960 4961 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb, 4916 4962 (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2)); 4917 4963 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckOpcodesLoadingTlb, 4918 4964 (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2)); 4965 4966 /* Natural page crossing: */ 4967 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb, 4968 (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2)); 4969 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlb, 4970 (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2)); 4971 4972 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb, 4973 (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2)); 4919 4974 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckOpcodesOnNextPageLoadingTlb, 4920 4975 (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2)); 4921 4976 4977 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNewPageLoadingTlb, 4978 (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2)); 4979 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckOpcodesOnNewPageLoadingTlb, 4980 (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2)); 4981 4982 4922 4983 4923 4984 extern const PFNIEMOP g_apfnIemInterpretOnlyOneByteMap[256]; -
trunk/src/VBox/VMM/include/IEMMc.h
r100701 r100731 1240 1240 * 1241 1241 * @{ */ 1242 /** Flag set if direct branch, clear if absolute or indirect. */ 1243 #define IEM_CIMPL_F_BRANCH_DIRECT RT_BIT_32(0) 1244 /** Flag set if indirect branch, clear if direct or relative. 1245 * This is also used for all system control transfers (SYSCALL, SYSRET, INT, ++) 1246 * as well as for return instructions (RET, IRET, RETF). */ 1247 #define IEM_CIMPL_F_BRANCH_INDIRECT RT_BIT_32(1) 1248 /** Flag set if relative branch, clear if absolute or indirect. */ 1249 #define IEM_CIMPL_F_BRANCH_RELATIVE RT_BIT_32(2) 1250 /** Flag set if conditional branch, clear if unconditional. */ 1251 #define IEM_CIMPL_F_BRANCH_CONDITIONAL RT_BIT_32(3) 1252 /** Flag set if it's a far branch (changes CS). */ 1253 #define IEM_CIMPL_F_BRANCH_FAR RT_BIT_32(4) 1254 /** Convenience: Testing any kind of branch. */ 1255 #define IEM_CIMPL_F_BRANCH_ANY (IEM_CIMPL_F_BRANCH_DIRECT | IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_RELATIVE) 1256 1242 1257 /** Execution flags may change (IEMCPU::fExec). */ 1243 #define IEM_CIMPL_F_MODE RT_BIT_32(0) 1244 /** Unconditional direct branches (changes RIP, maybe CS). */ 1245 #define IEM_CIMPL_F_BRANCH_UNCOND RT_BIT_32(1) 1246 /** Conditional direct branch (may change RIP, maybe CS). */ 1247 #define IEM_CIMPL_F_BRANCH_COND RT_BIT_32(2) 1248 /** Indirect unconditional branch (changes RIP, maybe CS). 1249 * 1250 * This is used for all system control transfers (SYSCALL, SYSRET, INT, ++) as 1251 * well as for return instructions (RET, IRET, RETF). 1252 * 1253 * Since the INTO instruction is currently the only indirect branch instruction 1254 * that is conditional (depends on the overflow flag), that instruction will 1255 * have both IEM_CIMPL_F_BRANCH_INDIR and IEM_CIMPL_F_BRANCH_COND set. All 1256 * other branch instructions will have exactly one of the branch flags set. */ 1257 #define IEM_CIMPL_F_BRANCH_INDIR RT_BIT_32(3) 1258 #define IEM_CIMPL_F_MODE RT_BIT_32(5) 1258 1259 /** May change significant portions of RFLAGS. */ 1259 #define IEM_CIMPL_F_RFLAGS RT_BIT_32(4)1260 #define IEM_CIMPL_F_RFLAGS RT_BIT_32(6) 1260 1261 /** May change the status bits (X86_EFL_STATUS_BITS) in RFLAGS . */ 1261 #define IEM_CIMPL_F_STATUS_FLAGS RT_BIT_32(5)1262 #define IEM_CIMPL_F_STATUS_FLAGS RT_BIT_32(7) 1262 1263 /** May trigger a VM exit. */ 1263 #define IEM_CIMPL_F_VMEXIT RT_BIT_32(6)1264 #define IEM_CIMPL_F_VMEXIT RT_BIT_32(8) 1264 1265 /** May modify FPU state. */ 1265 #define IEM_CIMPL_F_FPU RT_BIT_32(7)1266 #define IEM_CIMPL_F_FPU RT_BIT_32(9) 1266 1267 /** REP prefixed instruction which may yield before updating PC. */ 1267 #define IEM_CIMPL_F_REP RT_BIT_32(8)1268 #define IEM_CIMPL_F_REP RT_BIT_32(10) 1268 1269 /** Force end of TB after the instruction. */ 1269 #define IEM_CIMPL_F_END_TB RT_BIT_32(9)1270 #define IEM_CIMPL_F_END_TB RT_BIT_32(11) 1270 1271 /** Convenience: Raise exception (technically unnecessary, since it shouldn't return VINF_SUCCESS). */ 1271 #define IEM_CIMPL_F_XCPT (IEM_CIMPL_F_MODE | IEM_CIMPL_F_BRANCH_UNCOND | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_VMEXIT) 1272 /** Convenience: Testing any kind of branch. */ 1273 #define IEM_CIMPL_F_BRANCH_ANY (IEM_CIMPL_F_BRANCH_UNCOND | IEM_CIMPL_F_BRANCH_COND | IEM_CIMPL_F_BRANCH_INDIR) 1272 #define IEM_CIMPL_F_XCPT \ 1273 (IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_FAR | IEM_CIMPL_F_MODE | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_VMEXIT) 1274 1274 /** @} */ 1275 1275 -
trunk/src/VBox/VMM/include/IEMOpHlp.h
r100714 r100731 296 296 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 297 297 /** This instruction raises an \#UD in real and V8086 mode or when not using a 298 * 64-bit code segment when in long mode (applicable to all VMX instructions 299 * except VMCALL). 300 */ 301 #define IEMOP_HLP_VMX_INSTR(a_szInstr, a_InsDiagPrefix) \ 298 * 64-bit code segment when in long mode (applicable to all VMX instructions 299 * except VMCALL). 300 * 301 * @todo r=bird: This is not recompiler friendly. The scenario with 302 * 16-bit/32-bit code running in long mode doesn't fit at all. 303 */ 304 # define IEMOP_HLP_VMX_INSTR(a_szInstr, a_InsDiagPrefix) \ 302 305 do \ 303 306 { \ … … 318 321 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_LongModeCS; \ 319 322 Log5((a_szInstr ": Long mode without 64-bit code segment -> #UD\n")); \ 320 IEMOP_RAISE_INVALID_OPCODE_R ET();\323 IEMOP_RAISE_INVALID_OPCODE_RUNTIME_RET(); /** @todo This doesn't work. */ \ 321 324 } \ 322 325 } \ … … 340 343 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_VmxRoot; \ 341 344 Log5((a_szInstr ": Not in VMX operation (root mode) -> #UD\n")); \ 342 IEMOP_RAISE_INVALID_OPCODE_R ET();\345 IEMOP_RAISE_INVALID_OPCODE_RUNTIME_RET(); /** @todo This doesn't work. */ \ 343 346 } \ 344 347 } while (0)
Note:
See TracChangeset
for help on using the changeset viewer.