- Timestamp:
- Jun 23, 2023 2:15:10 PM (18 months ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 7 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r100222 r100266 288 288 pVCpu->iem.s.offInstrNextByte = 0; 289 289 pVCpu->iem.s.offCurInstrStart = 0; 290 # ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF 291 pVCpu->iem.s.offOpcode = 0; 292 # endif 290 293 # ifdef VBOX_STRICT 291 294 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS; … … 401 404 # endif 402 405 } 403 #else 406 # ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF 407 pVCpu->iem.s.offOpcode = 0; 408 # endif 409 #else /* !IEM_WITH_CODE_TLB */ 404 410 pVCpu->iem.s.cbOpcode = 0; 405 411 pVCpu->iem.s.offOpcode = 0; 406 #endif 412 #endif /* !IEM_WITH_CODE_TLB */ 407 413 pVCpu->iem.s.offModRm = 0; 408 414 Assert(pVCpu->iem.s.cActiveMappings == 0); … … 1091 1097 } 1092 1098 1093 #else 1099 #else /* !IEM_WITH_CODE_TLB */ 1094 1100 1095 1101 /** … … 9742 9748 } 9743 9749 9744 9745 #ifdef IEM_WITH_SETJMP9746 /**9747 * Calculates the effective address of a ModR/M memory operand, extended version9748 * for use in the recompilers.9749 *9750 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.9751 *9752 * May longjmp on internal error.9753 *9754 * @return The effective address.9755 * @param pVCpu The cross context virtual CPU structure of the calling thread.9756 * @param bRm The ModRM byte.9757 * @param cbImmAndRspOffset - First byte: The size of any immediate9758 * following the effective address opcode bytes9759 * (only for RIP relative addressing).9760 * - Second byte: RSP displacement (for POP [ESP]).9761 * @param puInfo Extra info: 32-bit displacement (bits 31:0) and9762 * SIB byte (bits 39:32).9763 */9764 RTGCPTR iemOpHlpCalcRmEffAddrJmpEx(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, uint64_t *puInfo) IEM_NOEXCEPT_MAY_LONGJMP9765 {9766 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));9767 # define SET_SS_DEF() \9768 do \9769 { \9770 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \9771 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \9772 } while (0)9773 9774 if (!IEM_IS_64BIT_CODE(pVCpu))9775 {9776 /** @todo Check the effective address size crap! */9777 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)9778 {9779 uint16_t u16EffAddr;9780 9781 /* Handle the disp16 form with no registers first. */9782 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)9783 {9784 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);9785 *puInfo = u16EffAddr;9786 }9787 else9788 {9789 /* Get the displacment. */9790 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)9791 {9792 case 0: u16EffAddr = 0; break;9793 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;9794 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;9795 default: AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_1)); /* (caller checked for these) */9796 }9797 *puInfo = u16EffAddr;9798 9799 /* Add the base and index registers to the disp. */9800 switch (bRm & X86_MODRM_RM_MASK)9801 {9802 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;9803 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;9804 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;9805 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;9806 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;9807 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;9808 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;9809 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;9810 }9811 }9812 9813 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16 uInfo=%#RX64\n", u16EffAddr, *puInfo));9814 return u16EffAddr;9815 }9816 9817 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);9818 uint32_t u32EffAddr;9819 uint64_t uInfo;9820 9821 /* Handle the disp32 form with no registers first. */9822 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)9823 {9824 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);9825 uInfo = u32EffAddr;9826 }9827 else9828 {9829 /* Get the register (or SIB) value. */9830 uInfo = 0;9831 switch ((bRm & X86_MODRM_RM_MASK))9832 {9833 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;9834 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;9835 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;9836 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;9837 case 4: /* SIB */9838 {9839 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);9840 uInfo = (uint64_t)bSib << 32;9841 9842 /* Get the index and scale it. */9843 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)9844 {9845 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;9846 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;9847 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;9848 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;9849 case 4: u32EffAddr = 0; /*none */ break;9850 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;9851 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;9852 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;9853 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);9854 }9855 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;9856 9857 /* add base */9858 switch (bSib & X86_SIB_BASE_MASK)9859 {9860 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;9861 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;9862 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;9863 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;9864 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;9865 case 5:9866 if ((bRm & X86_MODRM_MOD_MASK) != 0)9867 {9868 u32EffAddr += pVCpu->cpum.GstCtx.ebp;9869 SET_SS_DEF();9870 }9871 else9872 {9873 uint32_t u32Disp;9874 IEM_OPCODE_GET_NEXT_U32(&u32Disp);9875 u32EffAddr += u32Disp;9876 uInfo |= u32Disp;9877 }9878 break;9879 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;9880 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;9881 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);9882 }9883 break;9884 }9885 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;9886 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;9887 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;9888 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);9889 }9890 9891 /* Get and add the displacement. */9892 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)9893 {9894 case 0:9895 break;9896 case 1:9897 {9898 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);9899 u32EffAddr += i8Disp;9900 uInfo |= (uint32_t)(int32_t)i8Disp;9901 break;9902 }9903 case 2:9904 {9905 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);9906 u32EffAddr += u32Disp;9907 uInfo |= u32Disp;9908 break;9909 }9910 default:9911 AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_2)); /* (caller checked for these) */9912 }9913 }9914 9915 *puInfo = uInfo;9916 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32 uInfo=%#RX64\n", u32EffAddr, uInfo));9917 return u32EffAddr;9918 }9919 9920 uint64_t u64EffAddr;9921 uint64_t uInfo;9922 9923 /* Handle the rip+disp32 form with no registers first. */9924 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)9925 {9926 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);9927 uInfo = (uint32_t)u64EffAddr;9928 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));9929 }9930 else9931 {9932 /* Get the register (or SIB) value. */9933 uInfo = 0;9934 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)9935 {9936 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;9937 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;9938 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;9939 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;9940 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;9941 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;9942 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;9943 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;9944 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;9945 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;9946 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;9947 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;9948 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;9949 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;9950 /* SIB */9951 case 4:9952 case 12:9953 {9954 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);9955 uInfo = (uint64_t)bSib << 32;9956 9957 /* Get the index and scale it. */9958 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)9959 {9960 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;9961 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;9962 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;9963 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;9964 case 4: u64EffAddr = 0; /*none */ break;9965 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;9966 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;9967 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;9968 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;9969 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;9970 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;9971 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;9972 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;9973 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;9974 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;9975 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;9976 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);9977 }9978 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;9979 9980 /* add base */9981 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)9982 {9983 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;9984 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;9985 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;9986 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;9987 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;9988 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;9989 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;9990 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;9991 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;9992 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;9993 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;9994 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;9995 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;9996 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;9997 /* complicated encodings */9998 case 5:9999 case 13:10000 if ((bRm & X86_MODRM_MOD_MASK) != 0)10001 {10002 if (!pVCpu->iem.s.uRexB)10003 {10004 u64EffAddr += pVCpu->cpum.GstCtx.rbp;10005 SET_SS_DEF();10006 }10007 else10008 u64EffAddr += pVCpu->cpum.GstCtx.r13;10009 }10010 else10011 {10012 uint32_t u32Disp;10013 IEM_OPCODE_GET_NEXT_U32(&u32Disp);10014 u64EffAddr += (int32_t)u32Disp;10015 uInfo |= u32Disp;10016 }10017 break;10018 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);10019 }10020 break;10021 }10022 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);10023 }10024 10025 /* Get and add the displacement. */10026 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)10027 {10028 case 0:10029 break;10030 case 1:10031 {10032 int8_t i8Disp;10033 IEM_OPCODE_GET_NEXT_S8(&i8Disp);10034 u64EffAddr += i8Disp;10035 uInfo |= (uint32_t)(int32_t)i8Disp;10036 break;10037 }10038 case 2:10039 {10040 uint32_t u32Disp;10041 IEM_OPCODE_GET_NEXT_U32(&u32Disp);10042 u64EffAddr += (int32_t)u32Disp;10043 uInfo |= u32Disp;10044 break;10045 }10046 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */10047 }10048 10049 }10050 10051 *puInfo = uInfo;10052 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)10053 {10054 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv uInfo=%#RX64\n", u64EffAddr, uInfo));10055 return u64EffAddr;10056 }10057 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);10058 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv uInfo=%#RX64\n", u64EffAddr & UINT32_MAX, uInfo));10059 return u64EffAddr & UINT32_MAX;10060 }10061 #endif /* IEM_WITH_SETJMP */10062 10063 9750 /** @} */ 10064 9751 -
trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsTwoByte0f.cpp.h
r100072 r100266 12544 12544 FNIEMOP_DEF(iemOp_Grp9) 12545 12545 { 12546 uint8_t bRm; IEM_OPCODE_GET_NEXT_ RM(&bRm);12546 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 12547 12547 if (IEM_IS_MODRM_REG_MODE(bRm)) 12548 12548 /* register, register */ -
trunk/src/VBox/VMM/VMMAll/IEMAllThreadedPython.py
r100231 r100266 877 877 sCode += ', ' + ' | '.join(asFrags); 878 878 sCode += ');'; 879 aoStmts = [ iai.McCppGeneric(sCode, cchIndent = cchIndent), ]; 879 880 aoStmts = [ 881 iai.McCppGeneric('IEM_MC2_PRE_EMIT_CALLS();', cchIndent = cchIndent), # Serves as a hook for various stuff. 882 iai.McCppGeneric(sCode, cchIndent = cchIndent), 883 ]; 880 884 881 885 # For CIMPL stuff, we need to consult the associated IEM_CIMPL_F_XXX -
trunk/src/VBox/VMM/VMMAll/IEMAllThreadedRecompiler.cpp
r100231 r100266 48 48 # define LOG_GROUP LOG_GROUP_IEM_RE_THREADED 49 49 #endif 50 #define IEM_WITH_CODE_TLB_AND_OPCODE_BUF /* A bit hackish, but its all in IEMInline.h. */ 50 51 #define VMCPU_INCL_CPUM_GST_CTX 51 52 #include <VBox/vmm/iem.h> … … 161 162 struct 162 163 { 163 /** @todo we actually need BASE, LIM and CS? If we don't tie a TB to a RIP164 * range, because that's bad for PIC/PIE code on unix with address space165 * randomization enabled, the assumption is that anything involving PC166 * (RIP/EIP/IP, maybe + CS.BASE) will be done by reading current register167 * values and not embedding presumed values into the code. Thus the uCsBase168 * member here shouldn't be needed. For the same reason, uCsLimit isn't helpful169 * either as RIP/EIP/IP may differ between address spaces. So, before TB170 * execution we'd need to check CS.LIM against RIP+cbPC (ditto for 64-bit171 * canonicallity).172 *173 * We could bake instruction limit / canonicallity checks into the generated174 * code if we find ourselves close to the limit and should expect to run into175 * it by the end of the translation block. That would just be using a very176 * simple threshold distance and be a special IEMTB_F_XXX flag so we figure out177 * it out when picking the TB.178 *179 * The CS value is likewise useless as we'll always be using the actual CS180 * register value whenever it is relevant (mainly pushing to the stack in a181 * call, trap, whatever).182 *183 * The segment attributes should be handled via the IEM_F_MODE_XXX and184 * IEM_F_X86_CPL_MASK portions of fFlags, so we could skip those too, I think.185 * All the places where they matter, we would be in CIMPL code which would186 * consult the actual CS.ATTR and not depend on the recompiled code block.187 */188 /** The CS base. */189 uint32_t uCsBase;190 /** The CS limit (UINT32_MAX for 64-bit code). */191 uint32_t uCsLimit;192 /** The CS selector value. */193 uint16_t CS;194 164 /**< Relevant CS X86DESCATTR_XXX bits. */ 195 uint16_t fAttr;165 uint16_t fAttr; 196 166 } x86; 197 167 }; 198 168 /** @} */ 199 200 /** Number of bytes of opcodes covered by this block.201 * @todo Support discontiguous chunks of opcodes in same block, though maybe202 * restrict to the initial page or smth. */203 uint32_t cbPC;204 169 205 170 union … … 215 180 } Thrd; 216 181 }; 182 183 184 /** Number of bytes of opcodes stored in pabOpcodes. */ 185 uint16_t cbOpcodes; 186 /** The max storage available in the pabOpcodes block. */ 187 uint16_t cbOpcodesAllocated; 188 /** Pointer to the opcode bytes this block was recompiled from. */ 189 uint8_t *pabOpcodes; 217 190 } IEMTB; 218 191 … … 241 214 #endif 242 215 216 #define IEM_MC2_PRE_EMIT_CALLS() do { \ 217 AssertMsg(pVCpu->iem.s.offOpcode == IEM_GET_INSTR_LEN(pVCpu), \ 218 ("%u vs %u (%04x:%08RX64)\n", pVCpu->iem.s.offOpcode, IEM_GET_INSTR_LEN(pVCpu), \ 219 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip)); \ 220 } while (0) 243 221 #define IEM_MC2_EMIT_CALL_0(a_enmFunction) do { \ 244 222 IEMTHREADEDFUNCS const enmFunctionCheck = a_enmFunction; RT_NOREF(enmFunctionCheck); \ … … 310 288 } 311 289 290 /** 291 * Calculates the effective address of a ModR/M memory operand, extended version 292 * for use in the recompilers. 293 * 294 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR. 295 * 296 * May longjmp on internal error. 297 * 298 * @return The effective address. 299 * @param pVCpu The cross context virtual CPU structure of the calling thread. 300 * @param bRm The ModRM byte. 301 * @param cbImmAndRspOffset - First byte: The size of any immediate 302 * following the effective address opcode bytes 303 * (only for RIP relative addressing). 304 * - Second byte: RSP displacement (for POP [ESP]). 305 * @param puInfo Extra info: 32-bit displacement (bits 31:0) and 306 * SIB byte (bits 39:32). 307 * 308 * @note This must be defined in a source file with matching 309 * IEM_WITH_CODE_TLB_AND_OPCODE_BUF define till the define is made default 310 * or implemented differently... 311 */ 312 RTGCPTR iemOpHlpCalcRmEffAddrJmpEx(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, uint64_t *puInfo) IEM_NOEXCEPT_MAY_LONGJMP 313 { 314 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm)); 315 # define SET_SS_DEF() \ 316 do \ 317 { \ 318 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \ 319 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \ 320 } while (0) 321 322 if (!IEM_IS_64BIT_CODE(pVCpu)) 323 { 324 /** @todo Check the effective address size crap! */ 325 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT) 326 { 327 uint16_t u16EffAddr; 328 329 /* Handle the disp16 form with no registers first. */ 330 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6) 331 { 332 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); 333 *puInfo = u16EffAddr; 334 } 335 else 336 { 337 /* Get the displacment. */ 338 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK) 339 { 340 case 0: u16EffAddr = 0; break; 341 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break; 342 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break; 343 default: AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_1)); /* (caller checked for these) */ 344 } 345 *puInfo = u16EffAddr; 346 347 /* Add the base and index registers to the disp. */ 348 switch (bRm & X86_MODRM_RM_MASK) 349 { 350 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break; 351 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break; 352 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break; 353 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break; 354 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break; 355 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break; 356 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break; 357 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break; 358 } 359 } 360 361 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16 uInfo=%#RX64\n", u16EffAddr, *puInfo)); 362 return u16EffAddr; 363 } 364 365 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT); 366 uint32_t u32EffAddr; 367 uint64_t uInfo; 368 369 /* Handle the disp32 form with no registers first. */ 370 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5) 371 { 372 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr); 373 uInfo = u32EffAddr; 374 } 375 else 376 { 377 /* Get the register (or SIB) value. */ 378 uInfo = 0; 379 switch ((bRm & X86_MODRM_RM_MASK)) 380 { 381 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break; 382 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break; 383 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break; 384 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break; 385 case 4: /* SIB */ 386 { 387 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib); 388 uInfo = (uint64_t)bSib << 32; 389 390 /* Get the index and scale it. */ 391 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) 392 { 393 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break; 394 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break; 395 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break; 396 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break; 397 case 4: u32EffAddr = 0; /*none */ break; 398 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break; 399 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break; 400 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break; 401 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); 402 } 403 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK; 404 405 /* add base */ 406 switch (bSib & X86_SIB_BASE_MASK) 407 { 408 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break; 409 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break; 410 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break; 411 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break; 412 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break; 413 case 5: 414 if ((bRm & X86_MODRM_MOD_MASK) != 0) 415 { 416 u32EffAddr += pVCpu->cpum.GstCtx.ebp; 417 SET_SS_DEF(); 418 } 419 else 420 { 421 uint32_t u32Disp; 422 IEM_OPCODE_GET_NEXT_U32(&u32Disp); 423 u32EffAddr += u32Disp; 424 uInfo |= u32Disp; 425 } 426 break; 427 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break; 428 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break; 429 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); 430 } 431 break; 432 } 433 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break; 434 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break; 435 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break; 436 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); 437 } 438 439 /* Get and add the displacement. */ 440 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK) 441 { 442 case 0: 443 break; 444 case 1: 445 { 446 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp); 447 u32EffAddr += i8Disp; 448 uInfo |= (uint32_t)(int32_t)i8Disp; 449 break; 450 } 451 case 2: 452 { 453 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp); 454 u32EffAddr += u32Disp; 455 uInfo |= u32Disp; 456 break; 457 } 458 default: 459 AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_2)); /* (caller checked for these) */ 460 } 461 } 462 463 *puInfo = uInfo; 464 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32 uInfo=%#RX64\n", u32EffAddr, uInfo)); 465 return u32EffAddr; 466 } 467 468 uint64_t u64EffAddr; 469 uint64_t uInfo; 470 471 /* Handle the rip+disp32 form with no registers first. */ 472 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5) 473 { 474 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr); 475 uInfo = (uint32_t)u64EffAddr; 476 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff)); 477 } 478 else 479 { 480 /* Get the register (or SIB) value. */ 481 uInfo = 0; 482 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB) 483 { 484 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break; 485 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break; 486 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break; 487 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break; 488 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break; 489 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break; 490 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break; 491 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break; 492 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break; 493 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break; 494 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break; 495 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break; 496 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break; 497 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break; 498 /* SIB */ 499 case 4: 500 case 12: 501 { 502 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib); 503 uInfo = (uint64_t)bSib << 32; 504 505 /* Get the index and scale it. */ 506 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex) 507 { 508 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break; 509 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break; 510 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break; 511 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break; 512 case 4: u64EffAddr = 0; /*none */ break; 513 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break; 514 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break; 515 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break; 516 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break; 517 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break; 518 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break; 519 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break; 520 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break; 521 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break; 522 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break; 523 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break; 524 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); 525 } 526 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK; 527 528 /* add base */ 529 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB) 530 { 531 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break; 532 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break; 533 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break; 534 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break; 535 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break; 536 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break; 537 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break; 538 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break; 539 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break; 540 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break; 541 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break; 542 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break; 543 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break; 544 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break; 545 /* complicated encodings */ 546 case 5: 547 case 13: 548 if ((bRm & X86_MODRM_MOD_MASK) != 0) 549 { 550 if (!pVCpu->iem.s.uRexB) 551 { 552 u64EffAddr += pVCpu->cpum.GstCtx.rbp; 553 SET_SS_DEF(); 554 } 555 else 556 u64EffAddr += pVCpu->cpum.GstCtx.r13; 557 } 558 else 559 { 560 uint32_t u32Disp; 561 IEM_OPCODE_GET_NEXT_U32(&u32Disp); 562 u64EffAddr += (int32_t)u32Disp; 563 uInfo |= u32Disp; 564 } 565 break; 566 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); 567 } 568 break; 569 } 570 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); 571 } 572 573 /* Get and add the displacement. */ 574 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK) 575 { 576 case 0: 577 break; 578 case 1: 579 { 580 int8_t i8Disp; 581 IEM_OPCODE_GET_NEXT_S8(&i8Disp); 582 u64EffAddr += i8Disp; 583 uInfo |= (uint32_t)(int32_t)i8Disp; 584 break; 585 } 586 case 2: 587 { 588 uint32_t u32Disp; 589 IEM_OPCODE_GET_NEXT_U32(&u32Disp); 590 u64EffAddr += (int32_t)u32Disp; 591 uInfo |= u32Disp; 592 break; 593 } 594 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */ 595 } 596 597 } 598 599 *puInfo = uInfo; 600 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT) 601 { 602 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv uInfo=%#RX64\n", u64EffAddr, uInfo)); 603 return u64EffAddr; 604 } 605 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT); 606 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv uInfo=%#RX64\n", u64EffAddr & UINT32_MAX, uInfo)); 607 return u64EffAddr & UINT32_MAX; 608 } 609 312 610 313 611 /* … … 351 649 if (pTb) 352 650 { 353 pTb->Thrd.paCalls = (PIEMTHRDEDCALLENTRY)RTMemAlloc(sizeof(IEMTHRDEDCALLENTRY) * 128); 651 unsigned const cCalls = 128; 652 pTb->Thrd.paCalls = (PIEMTHRDEDCALLENTRY)RTMemAlloc(sizeof(IEMTHRDEDCALLENTRY) * cCalls); 354 653 if (pTb->Thrd.paCalls) 355 654 { 356 pTb->Thrd.cAllocated = 128; 357 pTb->Thrd.cCalls = 0; 358 pTb->pNext = NULL; 359 RTListInit(&pTb->LocalList); 360 pTb->cbPC = 0; 361 pTb->GCPhysPc = GCPhysPc; 362 pTb->x86.uCsBase = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base; 363 pTb->x86.uCsLimit = (uint32_t)pVCpu->cpum.GstCtx.cs.u32Limit; 364 pTb->x86.CS = (uint32_t)pVCpu->cpum.GstCtx.cs.Sel; 365 pTb->x86.fAttr = (uint16_t)pVCpu->cpum.GstCtx.cs.Attr.u; 366 pTb->fFlags = (pVCpu->iem.s.fExec & IEMTB_F_IEM_F_MASK) | fExtraFlags; 367 pVCpu->iem.s.cTbAllocs++; 368 return pTb; 655 pTb->pabOpcodes = (uint8_t *)RTMemAlloc(cCalls * 16); /* This will be reallocated later. */ 656 if (pTb->pabOpcodes) 657 { 658 pTb->Thrd.cAllocated = cCalls; 659 pTb->cbOpcodesAllocated = cCalls * 16; 660 pTb->Thrd.cCalls = 0; 661 pTb->cbOpcodes = 0; 662 pTb->pNext = NULL; 663 RTListInit(&pTb->LocalList); 664 pTb->GCPhysPc = GCPhysPc; 665 pTb->x86.fAttr = (uint16_t)pVCpu->cpum.GstCtx.cs.Attr.u; 666 pTb->fFlags = (pVCpu->iem.s.fExec & IEMTB_F_IEM_F_MASK) | fExtraFlags; 667 pVCpu->iem.s.cTbAllocs++; 668 return pTb; 669 } 670 RTMemFree(pTb->Thrd.paCalls); 369 671 } 370 672 RTMemFree(pTb); … … 388 690 AssertPtr(pTb); 389 691 390 AssertCompile( (IEMTB_F_STATE_OBSOLETE >> IEMTB_F_STATE_SHIFT) == (IEMTB_F_STATE_MASK >> IEMTB_F_STATE_SHIFT));692 AssertCompile(IEMTB_F_STATE_OBSOLETE == IEMTB_F_STATE_MASK); 391 693 pTb->fFlags |= IEMTB_F_STATE_OBSOLETE; /* works, both bits set */ 392 694 695 /* Unlink it from the hash table: */ 696 uint32_t const idxHash = IEMTBCACHE_HASH(&g_TbCache, pTb->fFlags, pTb->GCPhysPc); 697 PIEMTB pTbCur = g_TbCache.apHash[idxHash]; 698 if (pTbCur == pTb) 699 g_TbCache.apHash[idxHash] = pTb->pNext; 700 else 701 while (pTbCur) 702 { 703 PIEMTB const pNextTb = pTbCur->pNext; 704 if (pNextTb == pTb) 705 { 706 pTbCur->pNext = pTb->pNext; 707 break; 708 } 709 pTbCur = pNextTb; 710 } 711 712 /* Free it. */ 393 713 RTMemFree(pTb->Thrd.paCalls); 394 714 pTb->Thrd.paCalls = NULL; 715 716 RTMemFree(pTb->pabOpcodes); 717 pTb->pabOpcodes = NULL; 395 718 396 719 RTMemFree(pTb); … … 415 738 #ifdef VBOX_WITH_STATISTICS 416 739 pVCpu->iem.s.cTbLookupHits++; 740 #endif 417 741 return pTb; 418 #endif419 742 } 420 743 Log11(("TB miss: CS: %#x, wanted %#x\n", pTb->x86.fAttr, (uint16_t)pVCpu->cpum.GstCtx.cs.Attr.u)); … … 439 762 pTb->pNext = g_TbCache.apHash[idxHash]; 440 763 g_TbCache.apHash[idxHash] = pTb; 441 Log12(("TB added: %p %RGp LB %#x fl=%#x idxHash=%#x\n", pTb, pTb->GCPhysPc, pTb->cb PC, pTb->fFlags, idxHash));764 Log12(("TB added: %p %RGp LB %#x fl=%#x idxHash=%#x\n", pTb, pTb->GCPhysPc, pTb->cbOpcodes, pTb->fFlags, idxHash)); 442 765 RT_NOREF(pVM, pVCpu); 443 766 } … … 588 911 pVCpu->iem.s.offInstrNextByte = 0; 589 912 pVCpu->iem.s.offCurInstrStart = 0; 913 #ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF 914 pVCpu->iem.s.offOpcode = 0; 915 #endif 590 916 #ifdef VBOX_STRICT 591 917 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS; … … 641 967 #endif 642 968 } 969 #ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF 970 pVCpu->iem.s.offOpcode = 0; 971 #endif 643 972 } 644 973 … … 698 1027 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b); 699 1028 uint16_t const cCallsPrev = pTb->Thrd.cCalls; 1029 700 1030 rcStrict = FNIEMOP_CALL(g_apfnIemThreadedRecompilerOneByteMap[b]); 701 1031 if ( rcStrict == VINF_SUCCESS … … 704 1034 Assert(pTb->Thrd.cCalls > cCallsPrev); 705 1035 Assert(cCallsPrev - pTb->Thrd.cCalls < 5); 1036 1037 memcpy(&pTb->pabOpcodes[pTb->cbOpcodes], pVCpu->iem.s.abOpcode, pVCpu->iem.s.offOpcode); 1038 pTb->cbOpcodes += pVCpu->iem.s.offOpcode; 1039 Assert(pTb->cbOpcodes <= pTb->cbOpcodesAllocated); 706 1040 } 707 1041 else if (pTb->Thrd.cCalls > 0) 708 1042 { 709 1043 Log8(("%04x:%08RX64: End TB - %u calls, rc=%d\n", uCsLog, uRipLog, pTb->Thrd.cCalls, VBOXSTRICTRC_VAL(rcStrict))); 1044 1045 if (cCallsPrev != pTb->Thrd.cCalls) 1046 { 1047 memcpy(&pTb->pabOpcodes[pTb->cbOpcodes], pVCpu->iem.s.abOpcode, pVCpu->iem.s.offOpcode); 1048 pTb->cbOpcodes += pVCpu->iem.s.offOpcode; 1049 Assert(pTb->cbOpcodes <= pTb->cbOpcodesAllocated); 1050 } 710 1051 break; 711 1052 } … … 755 1096 static VBOXSTRICTRC iemThreadedTbExec(PVMCPUCC pVCpu, PIEMTB pTb) 756 1097 { 1098 if (memcmp(pTb->pabOpcodes, &pVCpu->iem.s.pbInstrBuf[pVCpu->iem.s.offInstrNextByte], 1099 RT_MIN(pTb->cbOpcodes, pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte)) == 0) 1100 { /* likely */ } 1101 else 1102 { 1103 Log11(("TB obsolete: %p GCPhys=%RGp\n", pTb, pTb->GCPhysPc)); 1104 iemThreadedTbFree(pVCpu->pVMR3, pVCpu, pTb); 1105 return VINF_SUCCESS; 1106 } 1107 757 1108 /* Set the current TB so CIMPL function may get at it. */ 758 1109 pVCpu->iem.s.pCurTbR3 = pTb; -
trunk/src/VBox/VMM/include/IEMInline.h
r100096 r100266 434 434 pVCpu->iem.s.offCurInstrStart = INT16_MAX; 435 435 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff); 436 # ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF 437 pVCpu->iem.s.offOpcode = 127; 438 # endif 436 439 # else 437 440 pVCpu->iem.s.offOpcode = 127; … … 587 590 */ 588 591 # ifdef IEM_WITH_CODE_TLB 592 uint8_t bRet; 589 593 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte; 590 594 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf; … … 593 597 { 594 598 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1; 595 return pbBuf[offBuf]; 596 } 597 # else 599 bRet = pbBuf[offBuf]; 600 } 601 else 602 bRet = iemOpcodeGetNextU8SlowJmp(pVCpu); 603 # ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF 604 Assert(pVCpu->iem.s.offOpcode == 0); 605 pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++] = bRet; 606 # endif 607 return bRet; 608 609 # else /* !IEM_WITH_CODE_TLB */ 598 610 uintptr_t offOpcode = pVCpu->iem.s.offOpcode; 599 611 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode)) … … 602 614 return pVCpu->iem.s.abOpcode[offOpcode]; 603 615 } 616 return iemOpcodeGetNextU8SlowJmp(pVCpu); 604 617 # endif 605 return iemOpcodeGetNextU8SlowJmp(pVCpu);606 618 } 607 619 … … 662 674 { 663 675 # ifdef IEM_WITH_CODE_TLB 676 uint8_t bRet; 664 677 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte; 665 678 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf; … … 668 681 { 669 682 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1; 670 return pbBuf[offBuf]; 671 } 672 # else 683 bRet = pbBuf[offBuf]; 684 } 685 else 686 bRet = iemOpcodeGetNextU8SlowJmp(pVCpu); 687 # ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF 688 Assert(pVCpu->iem.s.offOpcode < sizeof(pVCpu->iem.s.abOpcode)); 689 pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++] = bRet; 690 # endif 691 return bRet; 692 693 # else /* !IEM_WITH_CODE_TLB */ 673 694 uintptr_t offOpcode = pVCpu->iem.s.offOpcode; 674 695 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode)) … … 677 698 return pVCpu->iem.s.abOpcode[offOpcode]; 678 699 } 700 return iemOpcodeGetNextU8SlowJmp(pVCpu); 679 701 # endif 680 return iemOpcodeGetNextU8SlowJmp(pVCpu);681 702 } 682 703 … … 863 884 864 885 # ifndef IEM_WITH_SETJMP 865 /**866 * Fetches the next opcode byte.867 *868 * @returns Strict VBox status code.869 * @param pVCpu The cross context virtual CPU structure of the870 * calling thread.871 * @param pu8 Where to return the opcode byte.872 */873 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextRm(PVMCPUCC pVCpu, uint8_t *pu8) RT_NOEXCEPT874 {875 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;876 pVCpu->iem.s.offModRm = offOpcode;877 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))878 {879 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;880 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];881 return VINF_SUCCESS;882 }883 return iemOpcodeGetNextU8Slow(pVCpu, pu8);884 }885 # else /* IEM_WITH_SETJMP */886 /**887 * Fetches the next opcode byte, longjmp on error.888 *889 * @returns The opcode byte.890 * @param pVCpu The cross context virtual CPU structure of the calling thread.891 */892 DECL_INLINE_THROW(uint8_t) iemOpcodeGetNextRmJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP893 {894 # ifdef IEM_WITH_CODE_TLB895 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;896 pVCpu->iem.s.offModRm = offBuf;897 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;898 if (RT_LIKELY( pbBuf != NULL899 && offBuf < pVCpu->iem.s.cbInstrBuf))900 {901 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;902 return pbBuf[offBuf];903 }904 # else905 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;906 pVCpu->iem.s.offModRm = offOpcode;907 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))908 {909 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;910 return pVCpu->iem.s.abOpcode[offOpcode];911 }912 # endif913 return iemOpcodeGetNextU8SlowJmp(pVCpu);914 }915 # endif /* IEM_WITH_SETJMP */916 917 /**918 * Fetches the next opcode byte, which is a ModR/M byte, returns automatically919 * on failure.920 *921 * Will note down the position of the ModR/M byte for VT-x exits.922 *923 * @param a_pbRm Where to return the RM opcode byte.924 * @remark Implicitly references pVCpu.925 */926 # ifndef IEM_WITH_SETJMP927 # define IEM_OPCODE_GET_NEXT_RM(a_pbRm) \928 do \929 { \930 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextRm(pVCpu, (a_pbRm)); \931 if (rcStrict2 == VINF_SUCCESS) \932 { /* likely */ } \933 else \934 return rcStrict2; \935 } while (0)936 # else937 # define IEM_OPCODE_GET_NEXT_RM(a_pbRm) (*(a_pbRm) = iemOpcodeGetNextRmJmp(pVCpu))938 # endif /* IEM_WITH_SETJMP */939 940 941 # ifndef IEM_WITH_SETJMP942 886 943 887 /** … … 975 919 { 976 920 # ifdef IEM_WITH_CODE_TLB 921 uint16_t u16Ret; 977 922 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte; 978 923 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf; … … 982 927 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2; 983 928 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS 984 return*(uint16_t const *)&pbBuf[offBuf];929 u16Ret = *(uint16_t const *)&pbBuf[offBuf]; 985 930 # else 986 returnRT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);931 u16Ret = RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]); 987 932 # endif 988 933 } 934 else 935 u16Ret = iemOpcodeGetNextU16SlowJmp(pVCpu); 936 937 # ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF 938 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode; 939 Assert(offOpcode + 1 < sizeof(pVCpu->iem.s.abOpcode)); 940 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS 941 *(uint16_t *)&pVCpu->iem.s.abOpcode[offOpcode] = u16Ret; 942 # else 943 pVCpu->iem.s.abOpcode[offOpcode] = RT_LO_U8(u16Ret); 944 pVCpu->iem.s.abOpcode[offOpcode + 1] = RT_HI_U8(u16Ret); 945 # endif 946 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + (uint8_t)2; 947 # endif 948 949 return u16Ret; 950 989 951 # else /* !IEM_WITH_CODE_TLB */ 990 952 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode; … … 998 960 # endif 999 961 } 962 return iemOpcodeGetNextU16SlowJmp(pVCpu); 1000 963 # endif /* !IEM_WITH_CODE_TLB */ 1001 return iemOpcodeGetNextU16SlowJmp(pVCpu);1002 964 } 1003 965 … … 1174 1136 { 1175 1137 # ifdef IEM_WITH_CODE_TLB 1138 uint32_t u32Ret; 1176 1139 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte; 1177 1140 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf; … … 1181 1144 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4; 1182 1145 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS 1183 return*(uint32_t const *)&pbBuf[offBuf];1146 u32Ret = *(uint32_t const *)&pbBuf[offBuf]; 1184 1147 # else 1185 returnRT_MAKE_U32_FROM_U8(pbBuf[offBuf],1186 pbBuf[offBuf + 1],1187 pbBuf[offBuf + 2],1188 pbBuf[offBuf + 3]);1148 u32Ret = RT_MAKE_U32_FROM_U8(pbBuf[offBuf], 1149 pbBuf[offBuf + 1], 1150 pbBuf[offBuf + 2], 1151 pbBuf[offBuf + 3]); 1189 1152 # endif 1190 1153 } 1191 # else 1154 else 1155 u32Ret = iemOpcodeGetNextU32SlowJmp(pVCpu); 1156 1157 # ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF 1158 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode; 1159 Assert(offOpcode + 3 < sizeof(pVCpu->iem.s.abOpcode)); 1160 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS 1161 *(uint32_t *)&pVCpu->iem.s.abOpcode[offOpcode] = u32Ret; 1162 # else 1163 pVCpu->iem.s.abOpcode[offOpcode] = RT_BYTE1(u32Ret); 1164 pVCpu->iem.s.abOpcode[offOpcode + 1] = RT_BYTE2(u32Ret); 1165 pVCpu->iem.s.abOpcode[offOpcode + 2] = RT_BYTE3(u32Ret); 1166 pVCpu->iem.s.abOpcode[offOpcode + 3] = RT_BYTE4(u32Ret); 1167 # endif 1168 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + (uint8_t)4; 1169 # endif /* IEM_WITH_CODE_TLB_AND_OPCODE_BUF */ 1170 1171 return u32Ret; 1172 1173 # else /* !IEM_WITH_CODE_TLB */ 1192 1174 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode; 1193 1175 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode)) … … 1203 1185 # endif 1204 1186 } 1187 return iemOpcodeGetNextU32SlowJmp(pVCpu); 1205 1188 # endif 1206 return iemOpcodeGetNextU32SlowJmp(pVCpu);1207 1189 } 1208 1190 … … 1389 1371 { 1390 1372 # ifdef IEM_WITH_CODE_TLB 1373 uint64_t u64Ret; 1391 1374 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte; 1392 1375 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf; … … 1396 1379 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8; 1397 1380 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS 1398 return*(uint64_t const *)&pbBuf[offBuf];1381 u64Ret = *(uint64_t const *)&pbBuf[offBuf]; 1399 1382 # else 1400 returnRT_MAKE_U64_FROM_U8(pbBuf[offBuf],1401 pbBuf[offBuf + 1],1402 pbBuf[offBuf + 2],1403 pbBuf[offBuf + 3],1404 pbBuf[offBuf + 4],1405 pbBuf[offBuf + 5],1406 pbBuf[offBuf + 6],1407 pbBuf[offBuf + 7]);1383 u64Ret = RT_MAKE_U64_FROM_U8(pbBuf[offBuf], 1384 pbBuf[offBuf + 1], 1385 pbBuf[offBuf + 2], 1386 pbBuf[offBuf + 3], 1387 pbBuf[offBuf + 4], 1388 pbBuf[offBuf + 5], 1389 pbBuf[offBuf + 6], 1390 pbBuf[offBuf + 7]); 1408 1391 # endif 1409 1392 } 1410 # else 1393 else 1394 u64Ret = iemOpcodeGetNextU64SlowJmp(pVCpu); 1395 1396 # ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF 1397 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode; 1398 Assert(offOpcode + 7 < sizeof(pVCpu->iem.s.abOpcode)); 1399 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS 1400 *(uint64_t *)&pVCpu->iem.s.abOpcode[offOpcode] = u64Ret; 1401 # else 1402 pVCpu->iem.s.abOpcode[offOpcode] = RT_BYTE1(u64Ret); 1403 pVCpu->iem.s.abOpcode[offOpcode + 1] = RT_BYTE2(u64Ret); 1404 pVCpu->iem.s.abOpcode[offOpcode + 2] = RT_BYTE3(u64Ret); 1405 pVCpu->iem.s.abOpcode[offOpcode + 3] = RT_BYTE4(u64Ret); 1406 pVCpu->iem.s.abOpcode[offOpcode + 4] = RT_BYTE5(u64Ret); 1407 pVCpu->iem.s.abOpcode[offOpcode + 5] = RT_BYTE6(u64Ret); 1408 pVCpu->iem.s.abOpcode[offOpcode + 6] = RT_BYTE7(u64Ret); 1409 pVCpu->iem.s.abOpcode[offOpcode + 7] = RT_BYTE8(u64Ret); 1410 # endif 1411 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + (uint8_t)8; 1412 # endif /* IEM_WITH_CODE_TLB_AND_OPCODE_BUF */ 1413 1414 return u64Ret; 1415 1416 # else /* !IEM_WITH_CODE_TLB */ 1411 1417 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode; 1412 1418 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode)) … … 1426 1432 # endif 1427 1433 } 1428 # endif1429 1434 return iemOpcodeGetNextU64SlowJmp(pVCpu); 1435 # endif /* !IEM_WITH_CODE_TLB */ 1430 1436 } 1431 1437 -
trunk/src/VBox/VMM/include/IEMInternal.h
r100231 r100266 789 789 /** The offset of the ModR/M byte relative to the start of the instruction. */ 790 790 uint8_t offModRm; /* 0x34 */ 791 792 # ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF 793 /** The current offset into abOpcode. */ 794 uint8_t offOpcode; /* 0x35 */ 795 # else 796 uint8_t bUnused; /* 0x35 */ 797 # endif 791 798 # else /* !IEM_WITH_CODE_TLB */ 792 799 /** The size of what has currently been fetched into abOpcode. */ … … 813 820 814 821 /** The effective operand mode. */ 815 IEMMODE enmEffOpSize; /* 0x3 5, 0x13 */822 IEMMODE enmEffOpSize; /* 0x36, 0x13 */ 816 823 /** The default addressing mode. */ 817 IEMMODE enmDefAddrMode; /* 0x3 6, 0x14 */824 IEMMODE enmDefAddrMode; /* 0x37, 0x14 */ 818 825 /** The effective addressing mode. */ 819 IEMMODE enmEffAddrMode; /* 0x3 7, 0x15 */826 IEMMODE enmEffAddrMode; /* 0x38, 0x15 */ 820 827 /** The default operand mode. */ 821 IEMMODE enmDefOpSize; /* 0x3 8, 0x16 */828 IEMMODE enmDefOpSize; /* 0x39, 0x16 */ 822 829 823 830 /** Prefix index (VEX.pp) for two byte and three byte tables. */ 824 uint8_t idxPrefix; /* 0x3 9, 0x17 */831 uint8_t idxPrefix; /* 0x3a, 0x17 */ 825 832 /** 3rd VEX/EVEX/XOP register. 826 833 * Please use IEM_GET_EFFECTIVE_VVVV to access. */ 827 uint8_t uVex3rdReg; /* 0x3 a, 0x18 */834 uint8_t uVex3rdReg; /* 0x3b, 0x18 */ 828 835 /** The VEX/EVEX/XOP length field. */ 829 uint8_t uVexLength; /* 0x3 b, 0x19 */836 uint8_t uVexLength; /* 0x3c, 0x19 */ 830 837 /** Additional EVEX stuff. */ 831 uint8_t fEvexStuff; /* 0x3c, 0x1a */ 832 838 uint8_t fEvexStuff; /* 0x3d, 0x1a */ 839 840 # ifndef IEM_WITH_CODE_TLB 833 841 /** Explicit alignment padding. */ 834 uint8_t abAlignment2a[1]; /* 0x3d, 0x1b */ 842 uint8_t abAlignment2a[1]; /* 0x1b */ 843 # endif 835 844 /** The FPU opcode (FOP). */ 836 845 uint16_t uFpuOpcode; /* 0x3e, 0x1c */ -
trunk/src/VBox/VMM/testcase/tstIEMCheckMc.cpp
r100072 r100266 130 130 131 131 132 #define IEM_OPCODE_GET_NEXT_RM(a_pu8) do { *(a_pu8) = g_bRandom; CHK_PTYPE(uint8_t *, a_pu8); } while (0)133 132 #define IEM_OPCODE_GET_NEXT_U8(a_pu8) do { *(a_pu8) = g_bRandom; CHK_PTYPE(uint8_t *, a_pu8); } while (0) 134 133 #define IEM_OPCODE_GET_NEXT_S8(a_pi8) do { *(a_pi8) = g_bRandom; CHK_PTYPE(int8_t *, a_pi8); } while (0)
Note:
See TracChangeset
for help on using the changeset viewer.