Changeset 98969 in vbox for trunk/src/VBox/VMM/VMMAll
- Timestamp:
- Mar 15, 2023 12:24:47 AM (2 years ago)
- svn:sync-xref-src-repo-rev:
- 156318
- Location:
- trunk/src/VBox/VMM/VMMAll
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsOneByte.cpp.h
r98918 r98969 11244 11244 * lines of useless output. */ 11245 11245 #if defined(LOG_ENABLED) 11246 if ((LogIs3Enabled() || LogIs4Enabled()) && (-(int8_t)IEM_GET_INSTR_LEN(pVCpu) == i8Imm))11246 if ((LogIs3Enabled() || LogIs4Enabled()) && -(int8_t)IEM_GET_INSTR_LEN(pVCpu) == i8Imm) 11247 11247 switch (pVCpu->iem.s.enmEffAddrMode) 11248 11248 { -
trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsPython.py
r98916 r98969 1766 1766 return ''.join([oStmt.renderCode(cchIndent) for oStmt in aoStmts]); 1767 1767 1768 @staticmethod 1769 def findStmtByNames(aoStmts, dNames): 1770 """ 1771 Returns first statement with any of the given names in from the list. 1772 1773 Note! The names are passed as a dictionary for quick lookup, the value 1774 does not matter. 1775 """ 1776 for oStmt in aoStmts: 1777 if oStmt.sName in dNames: 1778 return oStmt; 1779 if isinstance(oStmt, McStmtCond): 1780 oHit = McStmt.findStmtByNames(oStmt.aoIfBranch, dNames); 1781 if not oHit: 1782 oHit = McStmt.findStmtByNames(oStmt.aoElseBranch, dNames); 1783 if oHit: 1784 return oHit; 1785 return None; 1786 1768 1787 def isCppStmt(self): 1769 1788 """ Checks if this is a C++ statement. """ … … 1789 1808 1790 1809 class McStmtVar(McStmt): 1791 """ IEM_MC_LOCAL_VAR *"""1810 """ IEM_MC_LOCAL_VAR, IEM_MC_LOCAL_CONST """ 1792 1811 def __init__(self, sName, asParams, sType, sVarName, sConstValue = None): 1793 1812 McStmt.__init__(self, sName, asParams); … … 1797 1816 1798 1817 class McStmtArg(McStmtVar): 1799 """ IEM_MC_ARG *"""1818 """ IEM_MC_ARG, IEM_MC_ARG_CONST, IEM_MC_ARG_LOCAL_REF """ 1800 1819 def __init__(self, sName, asParams, sType, sVarName, iArg, sConstValue = None, sRef = None, sRefType = 'none'): 1801 1820 McStmtVar.__init__(self, sName, asParams, sType, sVarName, sConstValue); … … 2374 2393 self.raiseDecodeError(sRawCode, off, 'Mixed up else/#ifdef or something confusing us.'); 2375 2394 2376 2377 2395 return aoStmts; 2378 2379 2396 2380 2397 def decode(self): -
trunk/src/VBox/VMM/VMMAll/IEMAllThreadedPython.py
r98951 r98969 112 112 self.offNewParam = 1024 ##< The bit offset in iNewParam. 113 113 114 class ThreadedFunction(object): 115 """ 116 A threaded function. 117 """ 118 119 def __init__(self, oMcBlock): 120 self.oMcBlock = oMcBlock # type: IEMAllInstructionsPython.McBlock 121 ## Dictionary of local variables (IEM_MC_LOCAL[_CONST]) and call arguments (IEM_MC_ARG*). 122 self.dVariables = {} # type: dict(str,McStmtVar) 123 ## 124 self.aoParamRefs = [] # type: list(ThreadedParamRef) 125 self.dParamRefs = {} # type: dict(str,list(ThreadedParamRef)) 126 self.cMinParams = 0; ##< Minimum number of parameters to the threaded function. 114 115 class ThreadedFunctionVariation(object): 116 """ Threaded function variation. """ 117 118 ## @name Variations. 119 ## These variations will match translation block selection/distinctions as well. 120 ## @note Effective operand size is generally handled in the decoder, at present 121 ## we only do variations on addressing and memory accessing. 122 ## @{ 123 ksVariation_Default = ''; ##< No variations. 124 ksVariation_Addr16 = '_Addr16'; ##< 16-bit addressing mode. 125 ksVariation_Addr32 = '_Addr32'; ##< 32-bit addressing mode. 126 ksVariation_Addr32Flat = '_Addr32Flat'; ##< 32-bit addressing mode with CS, DS, ES and SS flat and 4GB wide. 127 ksVariation_Addr64 = '_Addr64'; ##< 64-bit addressing mode. 128 ksVariation_Addr64_32 = '_Addr6432'; ##< 32-bit addressing in 64-bit mode. 129 kasVariations_EffAddr = ( 130 ksVariation_Addr16, ksVariation_Addr32, ksVariation_Addr32Flat, ksVariation_Addr64, ksVariation_Addr64_32 131 ); 132 ## @} 133 134 def __init__(self, oThreadedFunction, sVariation = ksVariation_Default): 135 self.oParent = oThreadedFunction # type: ThreadedFunction 136 ##< ksVariation_Xxxx. 137 self.sVariation = sVariation 138 139 ## Threaded function parameter references. 140 self.aoParamRefs = [] # type: list(ThreadedParamRef) 141 ## Unique parameter references. 142 self.dParamRefs = {} # type: dict(str,list(ThreadedParamRef)) 143 ## Minimum number of parameters to the threaded function. 144 self.cMinParams = 0; 127 145 128 146 ## List/tree of statements for the threaded function. 129 147 self.aoStmtsForThreadedFunction = [] # type list(McStmt) 130 148 131 @staticmethod 132 def dummyInstance(): 133 """ Gets a dummy instance. """ 134 return ThreadedFunction(iai.McBlock('null', 999999999, 999999999, 'nil', 999999999)); 149 def getIndexName(self): 150 sName = self.oParent.oMcBlock.sFunction; 151 if sName.startswith('iemOp_'): 152 sName = sName[len('iemOp_'):]; 153 if self.oParent.oMcBlock.iInFunction == 0: 154 return 'kIemThreadedFunc_%s%s' % ( sName, self.sVariation, ); 155 return 'kIemThreadedFunc_%s_%s%s' % ( sName, self.oParent.oMcBlock.iInFunction, self.sVariation, ); 156 157 def getFunctionName(self): 158 sName = self.oParent.oMcBlock.sFunction; 159 if sName.startswith('iemOp_'): 160 sName = sName[len('iemOp_'):]; 161 if self.oParent.oMcBlock.iInFunction == 0: 162 return 'iemThreadedFunc_%s%s' % ( sName, self.sVariation, ); 163 return 'iemThreadedFunc_%s_%s%s' % ( sName, self.oParent.oMcBlock.iInFunction, self.sVariation, ); 164 165 # 166 # Analysis and code morphing. 167 # 135 168 136 169 def raiseProblem(self, sMessage): 137 170 """ Raises a problem. """ 138 raise Exception('%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, )); 139 140 def getIndexName(self): 141 sName = self.oMcBlock.sFunction; 142 if sName.startswith('iemOp_'): 143 sName = sName[len('iemOp_'):]; 144 if self.oMcBlock.iInFunction == 0: 145 return 'kIemThreadedFunc_%s' % ( sName, ); 146 return 'kIemThreadedFunc_%s_%s' % ( sName, self.oMcBlock.iInFunction, ); 147 148 def getFunctionName(self): 149 sName = self.oMcBlock.sFunction; 150 if sName.startswith('iemOp_'): 151 sName = sName[len('iemOp_'):]; 152 if self.oMcBlock.iInFunction == 0: 153 return 'iemThreadedFunc_%s' % ( sName, ); 154 return 'iemThreadedFunc_%s_%s' % ( sName, self.oMcBlock.iInFunction, ); 171 self.oParent.raiseProblem(sMessage); 155 172 156 173 def analyzeReferenceToType(self, sRef): … … 183 200 if sRef.startswith('i64'): 184 201 return 'int64_t'; 185 if sRef in ('iReg', 'i SegReg', 'iSrcReg', 'iDstReg'):202 if sRef in ('iReg', 'iGReg', 'iSegReg', 'iSrcReg', 'iDstReg'): 186 203 return 'uint8_t'; 187 204 elif ch0 == 'p': … … 196 213 elif ch0 == 'G' and sRef.startswith('GCPtr'): 197 214 return 'uint64_t'; 215 elif ch0 == 'e': 216 if sRef == 'enmEffOpSize': 217 return 'IEMMODE'; 198 218 elif sRef == 'cShift': ## @todo risky 199 219 return 'uint8_t'; 220 200 221 self.raiseProblem('Unknown reference: %s' % (sRef,)); 201 222 return None; # Shut up pylint 2.16.2. … … 244 265 + oCurRef.sNewName \ 245 266 + sSrcParam[oCurRef.offParam + len(oCurRef.sOrgRef) : ]; 267 268 # Morph IEM_MC_CALC_RM_EFF_ADDR into IEM_MC_CALC_RM_EFF_ADDR_THREADED ... 269 if oNewStmt.sName == 'IEM_MC_CALC_RM_EFF_ADDR': 270 assert self.sVariation != self.ksVariation_Default; 271 oNewStmt.sName = 'IEM_MC_CALC_RM_EFF_ADDR_THREADED' + self.sVariation.upper(); 272 assert len(oNewStmt.asParams) == 3; 273 if self.sVariation == self.ksVariation_Addr16: 274 oNewStmt.asParams = [ 275 oNewStmt.asParams[0], oNewStmt.asParams[1], self.dParamRefs['u16Disp'][0].sNewName, 276 ]; 277 elif self.sVariation in (self.ksVariation_Addr32, self.ksVariation_Addr32Flat): 278 oNewStmt.asParams = [ 279 oNewStmt.asParams[0], oNewStmt.asParams[1], self.dParamRefs['bSib'][0].sNewName, 280 self.dParamRefs['u32Disp'][0].sNewName, 281 ]; 282 else: 283 oNewStmt.asParams = [ 284 oNewStmt.asParams[0], self.dParamRefs['bRmEx'][0].sNewName, self.dParamRefs['bSib'][0].sNewName, 285 self.dParamRefs['u32Disp'][0].sNewName, self.dParamRefs['cbInstr'][0].sNewName, 286 ]; 287 # ... and IEM_MC_ADVANCE_RIP_AND_FINISH into *_THREADED ... 288 elif oNewStmt.sName in ('IEM_MC_ADVANCE_RIP_AND_FINISH', 'IEM_MC_REL_JMP_S8_AND_FINISH', 289 'IEM_MC_REL_JMP_S16_AND_FINISH', 'IEM_MC_REL_JMP_S32_AND_FINISH'): 290 oNewStmt.sName += '_THREADED'; 291 oNewStmt.asParams.append(self.dParamRefs['cbInstr'][0].sNewName); 292 # ... and IEM_MC_CALL_CIMPL_[0-5] into *_THREADED ... 293 elif oNewStmt.sName.startswith('IEM_MC_CALL_CIMPL_'): 294 oNewStmt.sName += '_THREADED'; 295 oNewStmt.asParams.insert(0, self.dParamRefs['cbInstr'][0].sNewName); 246 296 247 297 # Process branches of conditionals recursively. … … 295 345 dBySize = {} # type: dict(str,str) 296 346 for sStdRef, aoRefs in self.dParamRefs.items(): 297 cBits = g_kdTypeInfo[aoRefs[0].sType][0]; 298 assert(cBits <= 64); 347 if aoRefs[0].sType[0] != 'P': 348 cBits = g_kdTypeInfo[aoRefs[0].sType][0]; 349 assert(cBits <= 64); 350 else: 351 cBits = 64; 352 299 353 if cBits not in dBySize: 300 354 dBySize[cBits] = [sStdRef,] … … 323 377 324 378 # Currently there are a few that requires 4 parameters, list these so we can figure out why: 325 if self.cMinParams >= 3:379 if self.cMinParams >= 4: 326 380 print('debug: cMinParams=%s cRawParams=%s - %s:%d' 327 % (self.cMinParams, len(self.dParamRefs), self.o McBlock.sSrcFile, self.oMcBlock.iBeginLine,));381 % (self.cMinParams, len(self.dParamRefs), self.oParent.oMcBlock.sSrcFile, self.oParent.oMcBlock.iBeginLine,)); 328 382 329 383 return True; … … 338 392 for oStmt in aoStmts: 339 393 # Some statements we can skip alltogether. 340 if isinstance(oStmt, (iai.McStmtVar, iai.McCppPreProc)):394 if isinstance(oStmt, iai.McCppPreProc): 341 395 continue; 342 396 if oStmt.isCppStmt() and oStmt.fDecode: 343 397 continue; 344 398 399 if isinstance(oStmt, iai.McStmtVar): 400 if oStmt.sConstValue is None: 401 continue; 402 aiSkipParams = { 0: True, 1: True, 3: True }; 403 else: 404 aiSkipParams = {}; 405 345 406 # Several statements have implicit parameters. 346 407 if oStmt.sName in ('IEM_MC_ADVANCE_RIP_AND_FINISH', 'IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_REL_JMP_S16_AND_FINISH', 347 408 'IEM_MC_REL_JMP_S32_AND_FINISH', 'IEM_MC_CALL_CIMPL_0', 'IEM_MC_CALL_CIMPL_1', 348 'IEM_MC_CALL_CIMPL_2', 'IEM_MC_CALL_CIMPL_3', 'IEM_MC_CALL_CIMPL_4', 'IEM_MC_CALL_CIMPL_5' ):409 'IEM_MC_CALL_CIMPL_2', 'IEM_MC_CALL_CIMPL_3', 'IEM_MC_CALL_CIMPL_4', 'IEM_MC_CALL_CIMPL_5', ): 349 410 self.aoParamRefs.append(ThreadedParamRef('cbInstr', 'uint4_t', oStmt)); 350 411 351 # We can skip the rest for statements w/o parameters. 352 if not oStmt.asParams: 353 continue; 412 if oStmt.sName == 'IEM_MC_CALC_RM_EFF_ADDR': 413 ## @todo figure out how to do this in the input part... 414 if self.sVariation == self.ksVariation_Addr16: 415 self.aoParamRefs.append(ThreadedParamRef('bRm', 'uint8_t', oStmt)); 416 self.aoParamRefs.append(ThreadedParamRef('u16Disp', 'uint16_t', oStmt)); 417 elif self.sVariation in (self.ksVariation_Addr32, self.ksVariation_Addr32Flat): 418 self.aoParamRefs.append(ThreadedParamRef('bRm', 'uint8_t', oStmt)); 419 self.aoParamRefs.append(ThreadedParamRef('bSib', 'uint8_t', oStmt)); 420 self.aoParamRefs.append(ThreadedParamRef('u32Disp', 'uint32_t', oStmt)); 421 else: 422 assert self.sVariation in (self.ksVariation_Addr64, self.ksVariation_Addr64_32); 423 self.aoParamRefs.append(ThreadedParamRef('bRmEx', 'uint8_t', oStmt)); 424 self.aoParamRefs.append(ThreadedParamRef('bSib', 'uint8_t', oStmt)); 425 self.aoParamRefs.append(ThreadedParamRef('u32Disp', 'uint32_t', oStmt)); 426 self.aoParamRefs.append(ThreadedParamRef('cbInstr', 'uint4_t', oStmt)); 354 427 355 428 # Inspect the target of calls to see if we need to pass down a 356 429 # function pointer or function table pointer for it to work. 357 aiSkipParams = {};358 430 if isinstance(oStmt, iai.McStmtCall): 359 431 if oStmt.sFn[0] == 'p': … … 367 439 # Check all the parameters for bogus references. 368 440 for iParam, sParam in enumerate(oStmt.asParams): 369 if iParam not in aiSkipParams and sParam not in self. dVariables:441 if iParam not in aiSkipParams and sParam not in self.oParent.dVariables: 370 442 # The parameter may contain a C expression, so we have to try 371 443 # extract the relevant bits, i.e. variables and fields while … … 409 481 410 482 # We can skip known variables. 411 elif sRef in self. dVariables:483 elif sRef in self.oParent.dVariables: 412 484 pass; 413 485 … … 429 501 elif ( sRef.startswith('IEM_OP_PRF_') 430 502 or sRef.startswith('IEM_ACCESS_') 503 or sRef.startswith('IEMINT_') 431 504 or sRef.startswith('X86_GREG_') 432 505 or sRef.startswith('X86_SREG_') … … 434 507 or sRef.startswith('X86_FSW_') 435 508 or sRef.startswith('X86_FCW_') 509 or sRef.startswith('X86_XCPT_') 510 or sRef.startswith('IEMMODE_') 436 511 or sRef.startswith('g_') 437 512 or sRef in ( 'int8_t', 'int16_t', 'int32_t', … … 439 514 'UINT8_C', 'UINT16_C', 'UINT32_C', 'UINT64_C', 440 515 'UINT8_MAX', 'UINT16_MAX', 'UINT32_MAX', 'UINT64_MAX', 441 'sizeof', 'NOREF', 'RT_NOREF', 'IEMMODE_64BIT' ) ): 516 'INT8_MAX', 'INT16_MAX', 'INT32_MAX', 'INT64_MAX', 517 'INT8_MIN', 'INT16_MIN', 'INT32_MIN', 'INT64_MIN', 518 'sizeof', 'NOREF', 'RT_NOREF', 'IEMMODE_64BIT', 519 'NIL_RTGCPTR' ) ): 442 520 pass; 443 521 … … 472 550 return True; 473 551 552 def analyzeVariation(self, aoStmts): 553 """ 554 2nd part of the analysis, done on each variation. 555 556 The variations may differ in parameter requirements and will end up with 557 slightly different MC sequences. Thus this is done on each individually. 558 559 Returns dummy True - raises exception on trouble. 560 """ 561 # Now scan the code for variables and field references that needs to 562 # be passed to the threaded function because they are related to the 563 # instruction decoding. 564 self.analyzeFindThreadedParamRefs(aoStmts); 565 self.analyzeConsolidateThreadedParamRefs(); 566 567 # Morph the statement stream for the block into what we'll be using in the threaded function. 568 (self.aoStmtsForThreadedFunction, iParamRef) = self.analyzeMorphStmtForThreaded(aoStmts); 569 if iParamRef != len(self.aoParamRefs): 570 raise Exception('iParamRef=%s, expected %s!' % (iParamRef, len(self.aoParamRefs),)); 571 572 return True; 573 574 575 class ThreadedFunction(object): 576 """ 577 A threaded function. 578 """ 579 580 def __init__(self, oMcBlock): 581 self.oMcBlock = oMcBlock # type: IEMAllInstructionsPython.McBlock 582 ## Variations for this block. There is at least one. 583 self.aoVariations = [] # type: list(ThreadedFunctionVariation) 584 ## Dictionary of local variables (IEM_MC_LOCAL[_CONST]) and call arguments (IEM_MC_ARG*). 585 self.dVariables = {} # type: dict(str,McStmtVar) 586 587 @staticmethod 588 def dummyInstance(): 589 """ Gets a dummy instance. """ 590 return ThreadedFunction(iai.McBlock('null', 999999999, 999999999, 'nil', 999999999)); 591 592 def raiseProblem(self, sMessage): 593 """ Raises a problem. """ 594 raise Exception('%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, )); 595 474 596 def analyzeFindVariablesAndCallArgs(self, aoStmts): 475 597 """ Scans the statements for MC variables and call arguments. """ … … 493 615 """ 494 616 Analyzes the code, identifying the number of parameters it requires and such. 495 May raise exceptions if we cannot grok the code. 617 618 Returns dummy True - raises exception on trouble. 496 619 """ 497 620 … … 502 625 self.analyzeFindVariablesAndCallArgs(aoStmts); 503 626 504 # Now scan the code for variables and field references that needs to505 # be passed to the threaded function because they are related to the506 # instruction decoding.507 self.analyzeFindThreadedParamRefs(aoStmts);508 self.analyzeConsolidateThreadedParamRefs();509 510 # Morph the statement stream for the block into what we'll be using in the threaded function. 511 (self.aoStmtsForThreadedFunction, iParamRef) = self.analyzeMorphStmtForThreaded(aoStmts);512 if iParamRef != len(self.aoParamRefs):513 raise Exception('iParamRef=%s, expected %s!' % (iParamRef, len(self.aoParamRefs),));627 # Create variations if needed. 628 if iai.McStmt.findStmtByNames(aoStmts, {'IEM_MC_CALC_RM_EFF_ADDR' : True,}): 629 self.aoVariations = [ThreadedFunctionVariation(self, sVar) 630 for sVar in ThreadedFunctionVariation.kasVariations_EffAddr]; 631 else: 632 self.aoVariations = [ThreadedFunctionVariation(self),]; 633 634 # Continue the analysis on each variation. 635 for oVariation in self.aoVariations: 636 oVariation.analyzeVariation(aoStmts); 514 637 515 638 return True; … … 546 669 self.aoParsers = iai.parseFiles(self.oOptions.asInFiles); 547 670 548 # Wrap MC blocks into threaded functions and analyze these.671 # Create threaded functions for the MC blocks. 549 672 self.aoThreadedFuncs = [ThreadedFunction(oMcBlock) for oMcBlock in iai.g_aoMcBlocks]; 673 674 # Analyze the threaded functions. 550 675 dRawParamCounts = {}; 551 676 dMinParamCounts = {}; 552 677 for oThreadedFunction in self.aoThreadedFuncs: 553 678 oThreadedFunction.analyze(); 554 dRawParamCounts[len(oThreadedFunction.dParamRefs)] = dRawParamCounts.get(len(oThreadedFunction.dParamRefs), 0) + 1; 555 dMinParamCounts[oThreadedFunction.cMinParams] = dMinParamCounts.get(oThreadedFunction.cMinParams, 0) + 1; 679 for oVariation in oThreadedFunction.aoVariations: 680 dRawParamCounts[len(oVariation.dParamRefs)] = dRawParamCounts.get(len(oVariation.dParamRefs), 0) + 1; 681 dMinParamCounts[oVariation.cMinParams] = dMinParamCounts.get(oVariation.cMinParams, 0) + 1; 556 682 print('debug: param count distribution, raw and optimized:', file = sys.stderr); 557 683 for cCount in sorted({cBits: True for cBits in list(dRawParamCounts.keys()) + list(dMinParamCounts.keys())}.keys()): … … 626 752 ' kIemThreadedFunc_Invalid = 0,', 627 753 ]; 628 for oFunc in self.aoThreadedFuncs: 629 asLines.append(' ' + oFunc.getIndexName() + ','); 754 for oThreadedFunction in self.aoThreadedFuncs: 755 for oVariation in oThreadedFunction.aoVariations: 756 asLines.append(' ' + oVariation.getIndexName() + ','); 630 757 asLines += [ 631 758 ' kIemThreadedFunc_End', … … 678 805 for oThreadedFunction in self.aoThreadedFuncs: 679 806 oMcBlock = oThreadedFunction.oMcBlock; 680 # Function header 681 oOut.write( '\n' 682 + '\n' 683 + '/**\n' 684 + ' * %s at line %s offset %s in %s%s\n' 685 % (oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine, os.path.split(oMcBlock.sSrcFile)[1], 686 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '') 687 + ' */\n' 688 + 'static IEM_DECL_IMPL_DEF(VBOXSTRICTRC, ' + oThreadedFunction.getFunctionName() + ',\n' 689 + ' ' + sParamList 690 + '{\n'); 691 692 aasVars = []; 693 for aoRefs in oThreadedFunction.dParamRefs.values(): 694 oRef = aoRefs[0]; 695 cBits = g_kdTypeInfo[oRef.sType][0]; 696 697 sTypeDecl = oRef.sType + ' const'; 698 699 if cBits == 64: 700 assert oRef.offNewParam == 0; 701 if oRef.sType == 'uint64_t': 702 sUnpack = 'uParam%s;' % (oRef.iNewParam,); 807 for oVariation in oThreadedFunction.aoVariations: 808 # Function header 809 oOut.write( '\n' 810 + '\n' 811 + '/**\n' 812 + ' * %s at line %s offset %s in %s%s\n' 813 % (oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine, 814 os.path.split(oMcBlock.sSrcFile)[1], 815 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '') 816 + ' */\n' 817 + 'static IEM_DECL_IMPL_DEF(VBOXSTRICTRC, ' + oVariation.getFunctionName() + ',\n' 818 + ' ' + sParamList 819 + '{\n'); 820 821 aasVars = []; 822 for aoRefs in oVariation.dParamRefs.values(): 823 oRef = aoRefs[0]; 824 if oRef.sType[0] != 'P': 825 cBits = g_kdTypeInfo[oRef.sType][0]; 826 sType = g_kdTypeInfo[oRef.sType][2]; 703 827 else: 704 sUnpack = '(%s)uParam%s;' % (oRef.sType, oRef.iNewParam,); 705 elif oRef.offNewParam == 0: 706 sUnpack = '(%s)(uParam%s & %s);' % (oRef.sType, oRef.iNewParam, self.ksBitsToIntMask[cBits]); 707 else: 708 sUnpack = '(%s)((uParam%s >> %s) & %s);' \ 709 % (oRef.sType, oRef.iNewParam, oRef.offNewParam, self.ksBitsToIntMask[cBits]); 710 711 sComment = '/* %s - %s ref%s */' % (oRef.sOrgRef, len(aoRefs), 's' if len(aoRefs) != 1 else '',); 712 713 aasVars.append([ '%s:%02u' % (oRef.iNewParam, oRef.offNewParam), sTypeDecl, oRef.sNewName, sUnpack, sComment ]); 714 acchVars = [0, 0, 0, 0, 0]; 715 for asVar in aasVars: 716 for iCol, sStr in enumerate(asVar): 717 acchVars[iCol] = max(acchVars[iCol], len(sStr)); 718 sFmt = ' %%-%ss %%-%ss = %%-%ss %%s\n' % (acchVars[1], acchVars[2], acchVars[3]); 719 for asVar in sorted(aasVars): 720 oOut.write(sFmt % (asVar[1], asVar[2], asVar[3], asVar[4],)); 721 722 # RT_NOREF for unused parameters. 723 if oThreadedFunction.cMinParams < g_kcThreadedParams: 724 oOut.write(' RT_NOREF(' 725 + ', '.join(['uParam%u' % (i,) for i in range(oThreadedFunction.cMinParams, g_kcThreadedParams)]) 726 + ');\n'); 727 728 # Now for the actual statements. 729 oOut.write(iai.McStmt.renderCodeForList(oThreadedFunction.aoStmtsForThreadedFunction, cchIndent = 4)); 730 731 oOut.write('}\n'); 828 cBits = 64; 829 sType = oRef.sType; 830 831 sTypeDecl = sType + ' const'; 832 833 if cBits == 64: 834 assert oRef.offNewParam == 0; 835 if sType == 'uint64_t': 836 sUnpack = 'uParam%s;' % (oRef.iNewParam,); 837 else: 838 sUnpack = '(%s)uParam%s;' % (sType, oRef.iNewParam,); 839 elif oRef.offNewParam == 0: 840 sUnpack = '(%s)(uParam%s & %s);' % (sType, oRef.iNewParam, self.ksBitsToIntMask[cBits]); 841 else: 842 sUnpack = '(%s)((uParam%s >> %s) & %s);' \ 843 % (sType, oRef.iNewParam, oRef.offNewParam, self.ksBitsToIntMask[cBits]); 844 845 sComment = '/* %s - %s ref%s */' % (oRef.sOrgRef, len(aoRefs), 's' if len(aoRefs) != 1 else '',); 846 847 aasVars.append([ '%s:%02u' % (oRef.iNewParam, oRef.offNewParam), sTypeDecl, oRef.sNewName, sUnpack, sComment ]); 848 acchVars = [0, 0, 0, 0, 0]; 849 for asVar in aasVars: 850 for iCol, sStr in enumerate(asVar): 851 acchVars[iCol] = max(acchVars[iCol], len(sStr)); 852 sFmt = ' %%-%ss %%-%ss = %%-%ss %%s\n' % (acchVars[1], acchVars[2], acchVars[3]); 853 for asVar in sorted(aasVars): 854 oOut.write(sFmt % (asVar[1], asVar[2], asVar[3], asVar[4],)); 855 856 # RT_NOREF for unused parameters. 857 if oVariation.cMinParams < g_kcThreadedParams: 858 oOut.write(' RT_NOREF(' 859 + ', '.join(['uParam%u' % (i,) for i in range(oVariation.cMinParams, g_kcThreadedParams)]) 860 + ');\n'); 861 862 # Now for the actual statements. 863 oOut.write(iai.McStmt.renderCodeForList(oVariation.aoStmtsForThreadedFunction, cchIndent = 4)); 864 865 oOut.write('}\n'); 866 732 867 733 868 # … … 742 877 + '{\n' 743 878 + ' /*Invalid*/ NULL, \n'); 744 for iThreadedFunction, oThreadedFunction in enumerate(self.aoThreadedFuncs): 745 oOut.write(' /*%4u*/ %s,\n' % (iThreadedFunction + 1, oThreadedFunction.getFunctionName(),)); 879 iThreadedFunction = 0; 880 for oThreadedFunction in self.aoThreadedFuncs: 881 for oVariation in oThreadedFunction.aoVariations: 882 iThreadedFunction += 1; 883 oOut.write(' /*%4u*/ %s,\n' % (iThreadedFunction, oVariation.getFunctionName(),)); 746 884 oOut.write('};\n'); 747 885 -
trunk/src/VBox/VMM/VMMAll/IEMThreadedFunctions.cpp
r98961 r98969 34 34 #endif 35 35 #define VMCPU_INCL_CPUM_GST_CTX 36 #define IEM_WITH_OPAQUE_DECODER_STATE 36 37 #include <VBox/vmm/iem.h> 37 38 #include <VBox/vmm/cpum.h> … … 76 77 *********************************************************************************************************************************/ 77 78 79 /** Variant of IEM_MC_ADVANCE_RIP_AND_FINISH with instruction length as param. */ 80 #define IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED(a_cbInstr) \ 81 return iemRegAddToRipAndFinishingClearingRF(pVCpu, a_cbInstr) 82 #undef IEM_MC_ADVANCE_RIP_AND_FINISH 83 84 /** Variant of IEM_MC_REL_JMP_S8_AND_FINISH with instruction length as param. */ 85 #define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED(a_i8, a_cbInstr) \ 86 return iemRegRipRelativeJumpS8AndFinishClearingRF(pVCpu, a_cbInstr, (a_i8), pVCpu->iem.s.enmEffOpSize) 87 #undef IEM_MC_REL_JMP_S8_AND_FINISH 88 89 /** Variant of IEM_MC_REL_JMP_S16_AND_FINISH with instruction length as param. */ 90 #define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED(a_i16, a_cbInstr) \ 91 return iemRegRipRelativeJumpS16AndFinishClearingRF(pVCpu, a_cbInstr, (a_i16)) 92 #undef IEM_MC_REL_JMP_S16_AND_FINISH 93 94 /** Variant of IEM_MC_REL_JMP_S32_AND_FINISH with instruction length as param. */ 95 #define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED(a_i32, a_cbInstr) \ 96 return iemRegRipRelativeJumpS32AndFinishClearingRF(pVCpu, a_cbInstr, (a_i32), pVCpu->iem.s.enmEffOpSize) 97 #undef IEM_MC_REL_JMP_S32_AND_FINISH 98 99 /** Variant of IEM_MC_CALC_RM_EFF_ADDR with additional parameters. */ 100 # define IEM_MC_CALC_RM_EFF_ADDR_THREADED_ADDR16(a_GCPtrEff, a_bRm, a_u16Disp) \ 101 (a_GCPtrEff) = iemOpHlpCalcRmEffAddrThreadedAddr16(pVCpu, a_bRm, a_u16Disp) 102 103 /** Variant of IEM_MC_CALC_RM_EFF_ADDR with additional parameters. */ 104 # define IEM_MC_CALC_RM_EFF_ADDR_THREADED_ADDR32(a_GCPtrEff, a_bRm, a_bSib, a_u32Disp) \ 105 (a_GCPtrEff) = iemOpHlpCalcRmEffAddrThreadedAddr32(pVCpu, a_bRm, a_bSib, a_u32Disp) 106 107 /** Variant of IEM_MC_CALC_RM_EFF_ADDR with additional parameters. */ 108 # define IEM_MC_CALC_RM_EFF_ADDR_THREADED_ADDR32FLAT(a_GCPtrEff, a_bRm, a_bSib, a_u32Disp) \ 109 (a_GCPtrEff) = iemOpHlpCalcRmEffAddrThreadedAddr32(pVCpu, a_bRm, a_bSib, a_u32Disp) 110 111 /** Variant of IEM_MC_CALC_RM_EFF_ADDR with additional parameters. */ 112 # define IEM_MC_CALC_RM_EFF_ADDR_THREADED_ADDR64(a_GCPtrEff, a_bRmEx, a_bSib, a_u32Disp, a_cbImm) \ 113 (a_GCPtrEff) = iemOpHlpCalcRmEffAddrThreadedAddr64(pVCpu, a_bRmEx, a_bSib, a_u32Disp, a_cbImm) 114 115 /** Variant of IEM_MC_CALC_RM_EFF_ADDR with additional parameters. */ 116 # define IEM_MC_CALC_RM_EFF_ADDR_THREADED_ADDR6432(a_GCPtrEff, a_bRmEx, a_bSib, a_u32Disp, a_cbImm) \ 117 (a_GCPtrEff) = (uint32_t)iemOpHlpCalcRmEffAddrThreadedAddr64(pVCpu, a_bRmEx, a_bSib, a_u32Disp, a_cbImm) 118 119 /** 120 * Calculates the effective address of a ModR/M memory operand, 16-bit 121 * addressing variant. 122 * 123 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR_THREADED_ADDR16. 124 * 125 * @returns The effective address. 126 * @param pVCpu The cross context virtual CPU structure of the calling thread. 127 * @param bRm The ModRM byte. 128 * @param u16Disp The displacement byte/word, if any. 129 * RIP relative addressing. 130 * @param pGCPtrEff Where to return the effective address. 131 */ 132 static RTGCPTR iemOpHlpCalcRmEffAddrThreadedAddr16(PVMCPUCC pVCpu, uint8_t bRm, uint16_t u16Disp) RT_NOEXCEPT 133 { 134 Log5(("iemOpHlpCalcRmEffAddrThreadedAddr16: bRm=%#x\n", bRm)); 135 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT); 136 137 /* Handle the disp16 form with no registers first. */ 138 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6) 139 { 140 Log5(("iemOpHlpCalcRmEffAddrThreadedAddr16: EffAddr=%#010RGv\n", (RTGCPTR)u16Disp)); 141 return u16Disp; 142 } 143 144 /* Get the displacment. */ 145 /** @todo we can eliminate this step by making u16Disp have this value 146 * already! */ 147 uint16_t u16EffAddr; 148 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK) 149 { 150 case 0: u16EffAddr = 0; break; 151 case 1: u16EffAddr = (int16_t)(int8_t)u16Disp; break; 152 case 2: u16EffAddr = u16Disp; break; 153 default: AssertFailedStmt(u16EffAddr = 0); 154 } 155 156 /* Add the base and index registers to the disp. */ 157 switch (bRm & X86_MODRM_RM_MASK) 158 { 159 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break; 160 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break; 161 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; break; 162 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; break; 163 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break; 164 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break; 165 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; break; 166 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break; 167 } 168 169 Log5(("iemOpHlpCalcRmEffAddrThreadedAddr16: EffAddr=%#010RGv\n", (RTGCPTR)u16EffAddr)); 170 return u16EffAddr; 171 } 172 173 174 /** 175 * Calculates the effective address of a ModR/M memory operand, 32-bit 176 * addressing variant. 177 * 178 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR_THREADED_ADDR32 and 179 * IEM_MC_CALC_RM_EFF_ADDR_THREADED_ADDR32FLAT. 180 * 181 * @returns The effective address. 182 * @param pVCpu The cross context virtual CPU structure of the calling thread. 183 * @param bRm The ModRM byte. 184 * @param bSib The SIB byte, if any. 185 * @param u32Disp The displacement byte/dword, if any. 186 */ 187 static RTGCPTR iemOpHlpCalcRmEffAddrThreadedAddr32(PVMCPUCC pVCpu, uint8_t bRm, uint8_t bSib, uint32_t u32Disp) RT_NOEXCEPT 188 { 189 Log5(("iemOpHlpCalcRmEffAddrThreadedAddr32: bRm=%#x\n", bRm)); 190 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT); 191 192 /* Handle the disp32 form with no registers first. */ 193 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5) 194 { 195 Log5(("iemOpHlpCalcRmEffAddrThreadedAddr32: EffAddr=%#010RGv\n", (RTGCPTR)u32Disp)); 196 return u32Disp; 197 } 198 199 /* Get the register (or SIB) value. */ 200 uint32_t u32EffAddr; 201 switch (bRm & X86_MODRM_RM_MASK) 202 { 203 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break; 204 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break; 205 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break; 206 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break; 207 case 4: /* SIB */ 208 { 209 /* Get the index and scale it. */ 210 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) 211 { 212 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break; 213 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break; 214 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break; 215 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break; 216 case 4: u32EffAddr = 0; /*none */ break; 217 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break; 218 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break; 219 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break; 220 } 221 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK; 222 223 /* add base */ 224 switch (bSib & X86_SIB_BASE_MASK) 225 { 226 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break; 227 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break; 228 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break; 229 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break; 230 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; break; 231 case 5: 232 if ((bRm & X86_MODRM_MOD_MASK) != 0) 233 u32EffAddr += pVCpu->cpum.GstCtx.ebp; 234 else 235 u32EffAddr += u32Disp; 236 break; 237 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break; 238 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break; 239 } 240 break; 241 } 242 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break; 243 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break; 244 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break; 245 } 246 247 /* Get and add the displacement. */ 248 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK) 249 { 250 case 0: break; 251 case 1: u32EffAddr += (int8_t)u32Disp; break; 252 case 2: u32EffAddr += u32Disp; break; 253 default: AssertFailed(); 254 } 255 256 Log5(("iemOpHlpCalcRmEffAddrThreadedAddr32: EffAddr=%#010RGv\n", (RTGCPTR)u32EffAddr)); 257 return u32EffAddr; 258 } 259 260 261 /** 262 * Calculates the effective address of a ModR/M memory operand. 263 * 264 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR_THREADED_ADDR64. 265 * 266 * @returns The effective address. 267 * @param pVCpu The cross context virtual CPU structure of the calling thread. 268 * @param bRmEx The ModRM byte but with bit 3 set to REX.B and 269 * bit 4 to REX.X. The two bits are part of the 270 * REG sub-field, which isn't needed in this 271 * function. 272 * @param bSib The SIB byte, if any. 273 * @param u32Disp The displacement byte/word/dword, if any. 274 * @param cbInstr The size of the fully decoded instruction. Used 275 * for RIP relative addressing. 276 * @todo combine cbInstr and cbImm! 277 */ 278 static RTGCPTR iemOpHlpCalcRmEffAddrThreadedAddr64(PVMCPUCC pVCpu, uint8_t bRmEx, uint8_t bSib, 279 uint32_t u32Disp, uint8_t cbInstr) RT_NOEXCEPT 280 { 281 Log5(("iemOpHlpCalcRmEffAddrThreadedAddr64: bRmEx=%#x\n", bRmEx)); 282 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT); 283 284 uint64_t u64EffAddr; 285 286 /* Handle the rip+disp32 form with no registers first. */ 287 if ((bRmEx & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5) 288 { 289 u64EffAddr = (int32_t)u32Disp; 290 u64EffAddr += pVCpu->cpum.GstCtx.rip + cbInstr; 291 } 292 else 293 { 294 /* Get the register (or SIB) value. */ 295 switch (bRmEx & (X86_MODRM_RM_MASK | 0x8)) /* bRmEx[bit 3] = REX.B */ 296 { 297 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break; 298 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break; 299 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break; 300 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break; 301 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break; 302 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break; 303 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break; 304 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break; 305 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break; 306 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break; 307 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break; 308 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break; 309 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break; 310 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break; 311 /* SIB */ 312 case 4: 313 case 12: 314 { 315 /* Get the index and scale it. */ 316 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | ((bRmEx & 0x10) >> 1)) /* bRmEx[bit 4] = REX.X */ 317 { 318 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break; 319 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break; 320 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break; 321 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break; 322 case 4: u64EffAddr = 0; /*none */ break; 323 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break; 324 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break; 325 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break; 326 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break; 327 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break; 328 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break; 329 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break; 330 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break; 331 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break; 332 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break; 333 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break; 334 } 335 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK; 336 337 /* add base */ 338 switch ((bSib & X86_SIB_BASE_MASK) | (bRmEx & 0x8)) /* bRmEx[bit 3] = REX.B */ 339 { 340 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break; 341 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break; 342 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break; 343 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break; 344 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; break; 345 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break; 346 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break; 347 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break; 348 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break; 349 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break; 350 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break; 351 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break; 352 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break; 353 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break; 354 /* complicated encodings */ 355 case 5: 356 if ((bRmEx & X86_MODRM_MOD_MASK) != 0) 357 u64EffAddr += pVCpu->cpum.GstCtx.rbp; 358 else 359 u64EffAddr += (int32_t)u32Disp; 360 break; 361 case 13: 362 if ((bRmEx & X86_MODRM_MOD_MASK) != 0) 363 u64EffAddr += pVCpu->cpum.GstCtx.r13; 364 else 365 u64EffAddr += (int32_t)u32Disp; 366 break; 367 } 368 break; 369 } 370 } 371 372 /* Get and add the displacement. */ 373 switch ((bRmEx >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK) 374 { 375 case 0: break; 376 case 1: u64EffAddr += (int8_t)u32Disp; break; 377 case 2: u64EffAddr += (int32_t)u32Disp; break; 378 default: AssertFailed(); 379 } 380 } 381 382 Log5(("iemOpHlpCalcRmEffAddrThreadedAddr64: EffAddr=%#010RGv\n", u64EffAddr)); 383 return u64EffAddr; 384 } 385 78 386 79 387 … … 82 390 */ 83 391 #include "IEMThreadedFunctions.cpp.h" 392
Note:
See TracChangeset
for help on using the changeset viewer.