Changeset 99299 in vbox
- Timestamp:
- Apr 6, 2023 12:06:25 AM (22 months ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsPython.py
r99296 r99299 4575 4575 4576 4576 # 4577 # Complete and discard the current block.4578 #4579 4577 # HACK ALERT! For blocks orginating from macro expansion the start and 4580 4578 # end line will be the same, but the line has multiple … … 4601 4599 asLines = [sLine + '\n' for sLine in sRawLine.split('\n')]; 4602 4600 4601 # 4602 # Strip anything following the IEM_MC_END(); statement in the final line, 4603 # so that we don't carry on any trailing 'break' after macro expansions 4604 # like for iemOp_movsb_Xb_Yb. 4605 # 4606 while asLines[-1].strip() == '': 4607 asLines.pop(); 4608 sFinal = asLines[-1]; 4609 offFinalEnd = sFinal.find('IEM_MC_END'); 4610 if offFinalEnd < 0: self.raiseError('bogus IEM_MC_END: Not in final line: %s' % (sFinal,)); 4611 offFinalEnd += len('IEM_MC_END'); 4612 4613 while sFinal[offFinalEnd].isspace(): 4614 offFinalEnd += 1; 4615 if sFinal[offFinalEnd] != '(': self.raiseError('bogus IEM_MC_END: Expected "(" at %s: %s' % (offFinalEnd, sFinal,)); 4616 offFinalEnd += 1; 4617 4618 while sFinal[offFinalEnd].isspace(): 4619 offFinalEnd += 1; 4620 if sFinal[offFinalEnd] != ')': self.raiseError('bogus IEM_MC_END: Expected ")" at %s: %s' % (offFinalEnd, sFinal,)); 4621 offFinalEnd += 1; 4622 4623 while sFinal[offFinalEnd].isspace(): 4624 offFinalEnd += 1; 4625 if sFinal[offFinalEnd] != ';': self.raiseError('bogus IEM_MC_END: Expected ";" at %s: %s' % (offFinalEnd, sFinal,)); 4626 offFinalEnd += 1; 4627 4628 asLines[-1] = sFinal[: offFinalEnd]; 4629 4630 # 4631 # Complete and discard the current block. 4632 # 4603 4633 self.oCurMcBlock.complete(self.iLine, offEndStatementInLine, asLines); 4604 4634 self.oCurMcBlock = None; -
trunk/src/VBox/VMM/VMMAll/IEMAllThreadedPython.py
r99298 r99299 132 132 ## @note Effective operand size is generally handled in the decoder, at present 133 133 ## we only do variations on addressing and memory accessing. 134 ## @todo Blocks without addressing should have 64-bit and 32-bit PC update 135 ## variations to reduce code size (see iemRegAddToRip). 134 136 ## @{ 135 137 ksVariation_Default = ''; ##< No variations. … … 370 372 self.dParamRefs['u32Disp'][0].sNewName, self.dParamRefs['cbInstr'][0].sNewName, 371 373 ]; 372 # ... and IEM_MC_ADVANCE_RIP_AND_FINISH into *_THREADED ...374 # ... and IEM_MC_ADVANCE_RIP_AND_FINISH into *_THREADED and maybe *_LM64/_NOT64 ... 373 375 elif oNewStmt.sName in ('IEM_MC_ADVANCE_RIP_AND_FINISH', 'IEM_MC_REL_JMP_S8_AND_FINISH', 374 376 'IEM_MC_REL_JMP_S16_AND_FINISH', 'IEM_MC_REL_JMP_S32_AND_FINISH'): … … 377 379 oNewStmt.asParams.append(self.dParamRefs['pVCpu->iem.s.enmEffOpSize'][0].sNewName); 378 380 oNewStmt.sName += '_THREADED'; 381 if self.sVariation in (self.ksVariation_Addr64, self.ksVariation_Addr64_32): 382 oNewStmt.sName += '_LM64'; 383 elif self.sVariation != self.ksVariation_Default: 384 oNewStmt.sName += '_NOT64'; 379 385 380 386 # ... and IEM_MC_*_GREG_U8 into *_THREADED w/ reworked index taking REX into account -
trunk/src/VBox/VMM/VMMAll/IEMThreadedFunctions.cpp
r99298 r99299 80 80 #define IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED(a_cbInstr) \ 81 81 return iemRegAddToRipAndFinishingClearingRF(pVCpu, a_cbInstr) 82 #undef IEM_MC_ADVANCE_RIP_AND_FINISH 82 #undef IEM_MC_ADVANCE_RIP_AND_FINISH 83 84 /** Variant of IEM_MC_ADVANCE_RIP_AND_FINISH with instruction length as param 85 * and only used when we're in 64-bit code. */ 86 #define IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_LM64(a_cbInstr) \ 87 return iemRegAddToRip64AndFinishingClearingRF(pVCpu, a_cbInstr) 88 #undef IEM_MC_ADVANCE_RIP_AND_FINISH 89 90 /** Variant of IEM_MC_ADVANCE_RIP_AND_FINISH with instruction length as param 91 * and never used in 64-bit code. */ 92 #define IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_NOT64(a_cbInstr) \ 93 return iemRegAddToEip32AndFinishingClearingRF(pVCpu, a_cbInstr) 94 #undef IEM_MC_ADVANCE_RIP_AND_FINISH 83 95 84 96 /** Variant of IEM_MC_REL_JMP_S8_AND_FINISH with instruction length as param. */ 85 97 #define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED(a_i8, a_cbInstr, a_enmEffOpSize) \ 86 98 return iemRegRipRelativeJumpS8AndFinishClearingRF(pVCpu, a_cbInstr, (a_i8), a_enmEffOpSize) 87 #undef IEM_MC_REL_JMP_S8_AND_FINISH99 #undef IEM_MC_REL_JMP_S8_AND_FINISH 88 100 89 101 /** Variant of IEM_MC_REL_JMP_S16_AND_FINISH with instruction length as param. */ 90 102 #define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED(a_i16, a_cbInstr) \ 91 103 return iemRegRipRelativeJumpS16AndFinishClearingRF(pVCpu, a_cbInstr, (a_i16)) 92 #undef IEM_MC_REL_JMP_S16_AND_FINISH104 #undef IEM_MC_REL_JMP_S16_AND_FINISH 93 105 94 106 /** Variant of IEM_MC_REL_JMP_S32_AND_FINISH with instruction length as param. */ 95 107 #define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED(a_i32, a_cbInstr, a_enmEffOpSize) \ 96 108 return iemRegRipRelativeJumpS32AndFinishClearingRF(pVCpu, a_cbInstr, (a_i32), a_enmEffOpSize) 97 #undef IEM_MC_REL_JMP_S32_AND_FINISH109 #undef IEM_MC_REL_JMP_S32_AND_FINISH 98 110 99 111 /** Variant of IEM_MC_CALC_RM_EFF_ADDR with additional parameters. */ 100 # 112 #define IEM_MC_CALC_RM_EFF_ADDR_THREADED_ADDR16(a_GCPtrEff, a_bRm, a_u16Disp) \ 101 113 (a_GCPtrEff) = iemOpHlpCalcRmEffAddrThreadedAddr16(pVCpu, a_bRm, a_u16Disp) 114 #undef IEM_MC_CALC_RM_EFF_ADDR 102 115 103 116 /** Variant of IEM_MC_CALC_RM_EFF_ADDR with additional parameters. */ 104 # 117 #define IEM_MC_CALC_RM_EFF_ADDR_THREADED_ADDR32(a_GCPtrEff, a_bRm, a_bSib, a_u32Disp) \ 105 118 (a_GCPtrEff) = iemOpHlpCalcRmEffAddrThreadedAddr32(pVCpu, a_bRm, a_bSib, a_u32Disp) 119 #undef IEM_MC_CALC_RM_EFF_ADDR 106 120 107 121 /** Variant of IEM_MC_CALC_RM_EFF_ADDR with additional parameters. */ 108 # 122 #define IEM_MC_CALC_RM_EFF_ADDR_THREADED_ADDR32FLAT(a_GCPtrEff, a_bRm, a_bSib, a_u32Disp) \ 109 123 (a_GCPtrEff) = iemOpHlpCalcRmEffAddrThreadedAddr32(pVCpu, a_bRm, a_bSib, a_u32Disp) 124 #undef IEM_MC_CALC_RM_EFF_ADDR 110 125 111 126 /** Variant of IEM_MC_CALC_RM_EFF_ADDR with additional parameters. */ 112 # 127 #define IEM_MC_CALC_RM_EFF_ADDR_THREADED_ADDR64(a_GCPtrEff, a_bRmEx, a_bSib, a_u32Disp, a_cbImm) \ 113 128 (a_GCPtrEff) = iemOpHlpCalcRmEffAddrThreadedAddr64(pVCpu, a_bRmEx, a_bSib, a_u32Disp, a_cbImm) 129 #undef IEM_MC_CALC_RM_EFF_ADDR 114 130 115 131 /** Variant of IEM_MC_CALC_RM_EFF_ADDR with additional parameters. */ 116 # 132 #define IEM_MC_CALC_RM_EFF_ADDR_THREADED_ADDR6432(a_GCPtrEff, a_bRmEx, a_bSib, a_u32Disp, a_cbImm) \ 117 133 (a_GCPtrEff) = (uint32_t)iemOpHlpCalcRmEffAddrThreadedAddr64(pVCpu, a_bRmEx, a_bSib, a_u32Disp, a_cbImm) 134 #undef IEM_MC_CALC_RM_EFF_ADDR 118 135 119 136 /** Variant of IEM_MC_CALL_CIMPL_1 with explicit instruction length parameter. */ 120 # 137 #define IEM_MC_CALL_CIMPL_1_THREADED(a_cbInstr, a_pfnCImpl, a0) \ 121 138 return (a_pfnCImpl)(pVCpu, (a_cbInstr), a0) 139 #undef IEM_MC_CALL_CIMPL_1 122 140 123 141 /** Variant of IEM_MC_CALL_CIMPL_2 with explicit instruction length parameter. */ 124 # 142 #define IEM_MC_CALL_CIMPL_2_THREADED(a_cbInstr, a_pfnCImpl, a0, a1) \ 125 143 return (a_pfnCImpl)(pVCpu, (a_cbInstr), a0, a1) 144 #undef IEM_MC_CALL_CIMPL_2 126 145 127 146 /** Variant of IEM_MC_CALL_CIMPL_3 with explicit instruction length parameter. */ 128 # 147 #define IEM_MC_CALL_CIMPL_3_THREADED(a_cbInstr, a_pfnCImpl, a0, a1, a2) \ 129 148 return (a_pfnCImpl)(pVCpu, (a_cbInstr), a0, a1, a2) 149 #undef IEM_MC_CALL_CIMPL_3 130 150 131 151 /** Variant of IEM_MC_CALL_CIMPL_4 with explicit instruction length parameter. */ 132 # 152 #define IEM_MC_CALL_CIMPL_4_THREADED(a_cbInstr, a_pfnCImpl, a0, a1, a2, a3) \ 133 153 return (a_pfnCImpl)(pVCpu, (a_cbInstr), a0, a1, a2, a3) 154 #undef IEM_MC_CALL_CIMPL_4 134 155 135 156 /** Variant of IEM_MC_CALL_CIMPL_5 with explicit instruction length parameter. */ 136 # 157 #define IEM_MC_CALL_CIMPL_5_THREADED(a_cbInstr, a_pfnCImpl, a0, a1, a2, a3, a4) \ 137 158 return (a_pfnCImpl)(pVCpu, (a_cbInstr), a0, a1, a2, a3, a4) 159 #undef IEM_MC_CALL_CIMPL_5 138 160 139 161 /** Variant of IEM_MC_FETCH_GREG_U8 with extended (20) register index. */ … … 160 182 #define IEM_MC_FETCH_GREG_U8_SX_U32_THREADED(a_u32Dst, a_iGRegEx) \ 161 183 (a_u32Dst) = (int8_t)iemGRegFetchU8Ex(pVCpu, (a_iGRegEx)) 184 #undef IEM_MC_FETCH_GREG_U8_SX_U32 162 185 163 186 /** Variant of IEM_MC_FETCH_GREG_U8_SX_U64 with extended (20) register index. */ 164 187 #define IEM_MC_FETCH_GREG_U8_SX_U64_THREADED(a_u64Dst, a_iGRegEx) \ 165 188 (a_u64Dst) = (int8_t)iemGRegFetchU8Ex(pVCpu, (a_iGRegEx)) 189 #undef IEM_MC_FETCH_GREG_U8_SX_U64 166 190 167 191 /** Variant of IEM_MC_STORE_GREG_U8 with extended (20) register index. */ 168 192 #define IEM_MC_STORE_GREG_U8_THREADED(a_iGRegEx, a_u8Value) \ 169 193 *iemGRegRefU8Ex(pVCpu, (a_iGRegEx)) = (a_u8Value) 194 #undef IEM_MC_STORE_GREG_U8 170 195 171 196 /** Variant of IEM_MC_STORE_GREG_U8 with extended (20) register index. */ 172 197 #define IEM_MC_STORE_GREG_U8_CONST_THREADED(a_iGRegEx, a_u8Value) \ 173 198 *iemGRegRefU8Ex(pVCpu, (a_iGRegEx)) = (a_u8Value) 199 #undef IEM_MC_STORE_GREG_U8 174 200 175 201 /** Variant of IEM_MC_REF_GREG_U8 with extended (20) register index. */ 176 202 #define IEM_MC_REF_GREG_U8_THREADED(a_pu8Dst, a_iGRegEx) \ 177 203 (a_pu8Dst) = iemGRegRefU8Ex(pVCpu, (a_iGRegEx)) 204 #undef IEM_MC_REF_GREG_U8 178 205 179 206 /** Variant of IEM_MC_ADD_GREG_U8 with extended (20) register index. */ 180 207 #define IEM_MC_ADD_GREG_U8_THREADED(a_iGRegEx, a_u8Value) \ 181 208 *iemGRegRefU8Ex(pVCpu, (a_iGRegEx)) += (a_u8Value) 209 #undef IEM_MC_ADD_GREG_U8 182 210 183 211 /** Variant of IEM_MC_SUB_GREG_U8 with extended (20) register index. */ 184 212 #define IEM_MC_SUB_GREG_U8_THREADED(a_iGRegEx, a_u8Value) \ 185 213 *iemGRegRefU8Ex(pVCpu, (a_iGRegEx)) -= (a_u8Value) 214 #undef IEM_MC_SUB_GREG_U8 186 215 187 216 /** Variant of IEM_MC_ADD_GREG_U8_TO_LOCAL with extended (20) register index. */ 188 217 #define IEM_MC_ADD_GREG_U8_TO_LOCAL_THREADED(a_u8Value, a_iGRegEx) \ 189 do { (a_u8Value) += iemGRegFetchU8Ex(pVCpu, (a_iGRegEx)); } while (0) 218 do { (a_u8Value) += iemGRegFetchU8Ex(pVCpu, (a_iGRegEx)); } while (0) 219 #undef IEM_MC_ADD_GREG_U8_TO_LOCAL 190 220 191 221 /** Variant of IEM_MC_AND_GREG_U8 with extended (20) register index. */ 192 222 #define IEM_MC_AND_GREG_U8_THREADED(a_iGRegEx, a_u8Value) \ 193 223 *iemGRegRefU8Ex(pVCpu, (a_iGRegEx)) &= (a_u8Value) 224 #undef IEM_MC_AND_GREG_U8 194 225 195 226 /** Variant of IEM_MC_OR_GREG_U8 with extended (20) register index. */ 196 227 #define IEM_MC_OR_GREG_U8_THREADED(a_iGRegEx, a_u8Value) \ 197 228 *iemGRegRefU8Ex(pVCpu, (a_iGRegEx)) |= (a_u8Value) 229 #undef IEM_MC_OR_GREG_U8 198 230 199 231 /** … … 278 310 /* Get the register (or SIB) value. */ 279 311 uint32_t u32EffAddr; 312 #ifdef _MSC_VER 313 u32EffAddr = 0;/* MSC uninitialized variable analysis is too simple, it seems. */ 314 #endif 280 315 switch (bRm & X86_MODRM_RM_MASK) 281 316 { … … 372 407 { 373 408 /* Get the register (or SIB) value. */ 409 #ifdef _MSC_VER 410 u64EffAddr = 0; /* MSC uninitialized variable analysis is too simple, it seems. */ 411 #endif 374 412 switch (bRmEx & (X86_MODRM_RM_MASK | 0x8)) /* bRmEx[bit 3] = REX.B */ 375 413 { 414 default: 376 415 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break; 377 416 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break; -
trunk/src/VBox/VMM/include/IEMInline.h
r99298 r99299 1771 1771 1772 1772 /** 1773 * Updates the EIP/IP to point to the next instruction - only for 32-bit and 1774 * 16-bit code. 1775 * 1776 * @param pVCpu The cross context virtual CPU structure of the calling thread. 1777 * @param cbInstr The number of bytes to add. 1778 */ 1779 DECL_FORCE_INLINE(void) iemRegAddToEip32(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT 1780 { 1781 /* See comment in iemRegAddToRip. */ 1782 uint32_t const uEipPrev = pVCpu->cpum.GstCtx.eip; 1783 uint32_t const uEipNext = uEipPrev + cbInstr; 1784 if (IEM_GET_TARGET_CPU(pVCpu) >= IEMTARGETCPU_386) 1785 pVCpu->cpum.GstCtx.rip = (uint32_t)uEipNext; 1786 else 1787 pVCpu->cpum.GstCtx.rip = (uint16_t)uEipNext; 1788 } 1789 1790 1791 /** 1773 1792 * Called by iemRegAddToRipAndFinishingClearingRF and others when any of the 1774 1793 * following EFLAGS bits are set: … … 1871 1890 * @param cbInstr The number of bytes to add. 1872 1891 */ 1873 DECL INLINE(VBOXSTRICTRC) iemRegAddToRipAndFinishingClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT1892 DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegAddToRipAndFinishingClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT 1874 1893 { 1875 1894 iemRegAddToRip(pVCpu, cbInstr); 1895 return iemRegFinishClearingRF(pVCpu); 1896 } 1897 1898 1899 /** 1900 * Updates the RIP to point to the next instruction and clears EFLAGS.RF 1901 * and CPUMCTX_INHIBIT_SHADOW. 1902 * 1903 * Only called from 64-code code. 1904 * 1905 * @param pVCpu The cross context virtual CPU structure of the calling thread. 1906 * @param cbInstr The number of bytes to add. 1907 */ 1908 DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegAddToRip64AndFinishingClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT 1909 { 1910 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.rip + cbInstr; 1911 return iemRegFinishClearingRF(pVCpu); 1912 } 1913 1914 1915 /** 1916 * Updates the EIP to point to the next instruction and clears EFLAGS.RF and 1917 * CPUMCTX_INHIBIT_SHADOW. 1918 * 1919 * This is never from 64-code code. 1920 * 1921 * @param pVCpu The cross context virtual CPU structure of the calling thread. 1922 * @param cbInstr The number of bytes to add. 1923 */ 1924 DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegAddToEip32AndFinishingClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT 1925 { 1926 iemRegAddToEip32(pVCpu, cbInstr); 1876 1927 return iemRegFinishClearingRF(pVCpu); 1877 1928 }
Note:
See TracChangeset
for help on using the changeset viewer.