Changeset 98969 in vbox
- Timestamp:
- Mar 15, 2023 12:24:47 AM (21 months ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsOneByte.cpp.h
r98918 r98969 11244 11244 * lines of useless output. */ 11245 11245 #if defined(LOG_ENABLED) 11246 if ((LogIs3Enabled() || LogIs4Enabled()) && (-(int8_t)IEM_GET_INSTR_LEN(pVCpu) == i8Imm))11246 if ((LogIs3Enabled() || LogIs4Enabled()) && -(int8_t)IEM_GET_INSTR_LEN(pVCpu) == i8Imm) 11247 11247 switch (pVCpu->iem.s.enmEffAddrMode) 11248 11248 { -
trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsPython.py
r98916 r98969 1766 1766 return ''.join([oStmt.renderCode(cchIndent) for oStmt in aoStmts]); 1767 1767 1768 @staticmethod 1769 def findStmtByNames(aoStmts, dNames): 1770 """ 1771 Returns first statement with any of the given names in from the list. 1772 1773 Note! The names are passed as a dictionary for quick lookup, the value 1774 does not matter. 1775 """ 1776 for oStmt in aoStmts: 1777 if oStmt.sName in dNames: 1778 return oStmt; 1779 if isinstance(oStmt, McStmtCond): 1780 oHit = McStmt.findStmtByNames(oStmt.aoIfBranch, dNames); 1781 if not oHit: 1782 oHit = McStmt.findStmtByNames(oStmt.aoElseBranch, dNames); 1783 if oHit: 1784 return oHit; 1785 return None; 1786 1768 1787 def isCppStmt(self): 1769 1788 """ Checks if this is a C++ statement. """ … … 1789 1808 1790 1809 class McStmtVar(McStmt): 1791 """ IEM_MC_LOCAL_VAR *"""1810 """ IEM_MC_LOCAL_VAR, IEM_MC_LOCAL_CONST """ 1792 1811 def __init__(self, sName, asParams, sType, sVarName, sConstValue = None): 1793 1812 McStmt.__init__(self, sName, asParams); … … 1797 1816 1798 1817 class McStmtArg(McStmtVar): 1799 """ IEM_MC_ARG *"""1818 """ IEM_MC_ARG, IEM_MC_ARG_CONST, IEM_MC_ARG_LOCAL_REF """ 1800 1819 def __init__(self, sName, asParams, sType, sVarName, iArg, sConstValue = None, sRef = None, sRefType = 'none'): 1801 1820 McStmtVar.__init__(self, sName, asParams, sType, sVarName, sConstValue); … … 2374 2393 self.raiseDecodeError(sRawCode, off, 'Mixed up else/#ifdef or something confusing us.'); 2375 2394 2376 2377 2395 return aoStmts; 2378 2379 2396 2380 2397 def decode(self): -
trunk/src/VBox/VMM/VMMAll/IEMAllThreadedPython.py
r98951 r98969 112 112 self.offNewParam = 1024 ##< The bit offset in iNewParam. 113 113 114 class ThreadedFunction(object): 115 """ 116 A threaded function. 117 """ 118 119 def __init__(self, oMcBlock): 120 self.oMcBlock = oMcBlock # type: IEMAllInstructionsPython.McBlock 121 ## Dictionary of local variables (IEM_MC_LOCAL[_CONST]) and call arguments (IEM_MC_ARG*). 122 self.dVariables = {} # type: dict(str,McStmtVar) 123 ## 124 self.aoParamRefs = [] # type: list(ThreadedParamRef) 125 self.dParamRefs = {} # type: dict(str,list(ThreadedParamRef)) 126 self.cMinParams = 0; ##< Minimum number of parameters to the threaded function. 114 115 class ThreadedFunctionVariation(object): 116 """ Threaded function variation. """ 117 118 ## @name Variations. 119 ## These variations will match translation block selection/distinctions as well. 120 ## @note Effective operand size is generally handled in the decoder, at present 121 ## we only do variations on addressing and memory accessing. 122 ## @{ 123 ksVariation_Default = ''; ##< No variations. 124 ksVariation_Addr16 = '_Addr16'; ##< 16-bit addressing mode. 125 ksVariation_Addr32 = '_Addr32'; ##< 32-bit addressing mode. 126 ksVariation_Addr32Flat = '_Addr32Flat'; ##< 32-bit addressing mode with CS, DS, ES and SS flat and 4GB wide. 127 ksVariation_Addr64 = '_Addr64'; ##< 64-bit addressing mode. 128 ksVariation_Addr64_32 = '_Addr6432'; ##< 32-bit addressing in 64-bit mode. 129 kasVariations_EffAddr = ( 130 ksVariation_Addr16, ksVariation_Addr32, ksVariation_Addr32Flat, ksVariation_Addr64, ksVariation_Addr64_32 131 ); 132 ## @} 133 134 def __init__(self, oThreadedFunction, sVariation = ksVariation_Default): 135 self.oParent = oThreadedFunction # type: ThreadedFunction 136 ##< ksVariation_Xxxx. 137 self.sVariation = sVariation 138 139 ## Threaded function parameter references. 140 self.aoParamRefs = [] # type: list(ThreadedParamRef) 141 ## Unique parameter references. 142 self.dParamRefs = {} # type: dict(str,list(ThreadedParamRef)) 143 ## Minimum number of parameters to the threaded function. 144 self.cMinParams = 0; 127 145 128 146 ## List/tree of statements for the threaded function. 129 147 self.aoStmtsForThreadedFunction = [] # type list(McStmt) 130 148 131 @staticmethod 132 def dummyInstance(): 133 """ Gets a dummy instance. """ 134 return ThreadedFunction(iai.McBlock('null', 999999999, 999999999, 'nil', 999999999)); 149 def getIndexName(self): 150 sName = self.oParent.oMcBlock.sFunction; 151 if sName.startswith('iemOp_'): 152 sName = sName[len('iemOp_'):]; 153 if self.oParent.oMcBlock.iInFunction == 0: 154 return 'kIemThreadedFunc_%s%s' % ( sName, self.sVariation, ); 155 return 'kIemThreadedFunc_%s_%s%s' % ( sName, self.oParent.oMcBlock.iInFunction, self.sVariation, ); 156 157 def getFunctionName(self): 158 sName = self.oParent.oMcBlock.sFunction; 159 if sName.startswith('iemOp_'): 160 sName = sName[len('iemOp_'):]; 161 if self.oParent.oMcBlock.iInFunction == 0: 162 return 'iemThreadedFunc_%s%s' % ( sName, self.sVariation, ); 163 return 'iemThreadedFunc_%s_%s%s' % ( sName, self.oParent.oMcBlock.iInFunction, self.sVariation, ); 164 165 # 166 # Analysis and code morphing. 167 # 135 168 136 169 def raiseProblem(self, sMessage): 137 170 """ Raises a problem. """ 138 raise Exception('%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, )); 139 140 def getIndexName(self): 141 sName = self.oMcBlock.sFunction; 142 if sName.startswith('iemOp_'): 143 sName = sName[len('iemOp_'):]; 144 if self.oMcBlock.iInFunction == 0: 145 return 'kIemThreadedFunc_%s' % ( sName, ); 146 return 'kIemThreadedFunc_%s_%s' % ( sName, self.oMcBlock.iInFunction, ); 147 148 def getFunctionName(self): 149 sName = self.oMcBlock.sFunction; 150 if sName.startswith('iemOp_'): 151 sName = sName[len('iemOp_'):]; 152 if self.oMcBlock.iInFunction == 0: 153 return 'iemThreadedFunc_%s' % ( sName, ); 154 return 'iemThreadedFunc_%s_%s' % ( sName, self.oMcBlock.iInFunction, ); 171 self.oParent.raiseProblem(sMessage); 155 172 156 173 def analyzeReferenceToType(self, sRef): … … 183 200 if sRef.startswith('i64'): 184 201 return 'int64_t'; 185 if sRef in ('iReg', 'i SegReg', 'iSrcReg', 'iDstReg'):202 if sRef in ('iReg', 'iGReg', 'iSegReg', 'iSrcReg', 'iDstReg'): 186 203 return 'uint8_t'; 187 204 elif ch0 == 'p': … … 196 213 elif ch0 == 'G' and sRef.startswith('GCPtr'): 197 214 return 'uint64_t'; 215 elif ch0 == 'e': 216 if sRef == 'enmEffOpSize': 217 return 'IEMMODE'; 198 218 elif sRef == 'cShift': ## @todo risky 199 219 return 'uint8_t'; 220 200 221 self.raiseProblem('Unknown reference: %s' % (sRef,)); 201 222 return None; # Shut up pylint 2.16.2. … … 244 265 + oCurRef.sNewName \ 245 266 + sSrcParam[oCurRef.offParam + len(oCurRef.sOrgRef) : ]; 267 268 # Morph IEM_MC_CALC_RM_EFF_ADDR into IEM_MC_CALC_RM_EFF_ADDR_THREADED ... 269 if oNewStmt.sName == 'IEM_MC_CALC_RM_EFF_ADDR': 270 assert self.sVariation != self.ksVariation_Default; 271 oNewStmt.sName = 'IEM_MC_CALC_RM_EFF_ADDR_THREADED' + self.sVariation.upper(); 272 assert len(oNewStmt.asParams) == 3; 273 if self.sVariation == self.ksVariation_Addr16: 274 oNewStmt.asParams = [ 275 oNewStmt.asParams[0], oNewStmt.asParams[1], self.dParamRefs['u16Disp'][0].sNewName, 276 ]; 277 elif self.sVariation in (self.ksVariation_Addr32, self.ksVariation_Addr32Flat): 278 oNewStmt.asParams = [ 279 oNewStmt.asParams[0], oNewStmt.asParams[1], self.dParamRefs['bSib'][0].sNewName, 280 self.dParamRefs['u32Disp'][0].sNewName, 281 ]; 282 else: 283 oNewStmt.asParams = [ 284 oNewStmt.asParams[0], self.dParamRefs['bRmEx'][0].sNewName, self.dParamRefs['bSib'][0].sNewName, 285 self.dParamRefs['u32Disp'][0].sNewName, self.dParamRefs['cbInstr'][0].sNewName, 286 ]; 287 # ... and IEM_MC_ADVANCE_RIP_AND_FINISH into *_THREADED ... 288 elif oNewStmt.sName in ('IEM_MC_ADVANCE_RIP_AND_FINISH', 'IEM_MC_REL_JMP_S8_AND_FINISH', 289 'IEM_MC_REL_JMP_S16_AND_FINISH', 'IEM_MC_REL_JMP_S32_AND_FINISH'): 290 oNewStmt.sName += '_THREADED'; 291 oNewStmt.asParams.append(self.dParamRefs['cbInstr'][0].sNewName); 292 # ... and IEM_MC_CALL_CIMPL_[0-5] into *_THREADED ... 293 elif oNewStmt.sName.startswith('IEM_MC_CALL_CIMPL_'): 294 oNewStmt.sName += '_THREADED'; 295 oNewStmt.asParams.insert(0, self.dParamRefs['cbInstr'][0].sNewName); 246 296 247 297 # Process branches of conditionals recursively. … … 295 345 dBySize = {} # type: dict(str,str) 296 346 for sStdRef, aoRefs in self.dParamRefs.items(): 297 cBits = g_kdTypeInfo[aoRefs[0].sType][0]; 298 assert(cBits <= 64); 347 if aoRefs[0].sType[0] != 'P': 348 cBits = g_kdTypeInfo[aoRefs[0].sType][0]; 349 assert(cBits <= 64); 350 else: 351 cBits = 64; 352 299 353 if cBits not in dBySize: 300 354 dBySize[cBits] = [sStdRef,] … … 323 377 324 378 # Currently there are a few that requires 4 parameters, list these so we can figure out why: 325 if self.cMinParams >= 3:379 if self.cMinParams >= 4: 326 380 print('debug: cMinParams=%s cRawParams=%s - %s:%d' 327 % (self.cMinParams, len(self.dParamRefs), self.o McBlock.sSrcFile, self.oMcBlock.iBeginLine,));381 % (self.cMinParams, len(self.dParamRefs), self.oParent.oMcBlock.sSrcFile, self.oParent.oMcBlock.iBeginLine,)); 328 382 329 383 return True; … … 338 392 for oStmt in aoStmts: 339 393 # Some statements we can skip alltogether. 340 if isinstance(oStmt, (iai.McStmtVar, iai.McCppPreProc)):394 if isinstance(oStmt, iai.McCppPreProc): 341 395 continue; 342 396 if oStmt.isCppStmt() and oStmt.fDecode: 343 397 continue; 344 398 399 if isinstance(oStmt, iai.McStmtVar): 400 if oStmt.sConstValue is None: 401 continue; 402 aiSkipParams = { 0: True, 1: True, 3: True }; 403 else: 404 aiSkipParams = {}; 405 345 406 # Several statements have implicit parameters. 346 407 if oStmt.sName in ('IEM_MC_ADVANCE_RIP_AND_FINISH', 'IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_REL_JMP_S16_AND_FINISH', 347 408 'IEM_MC_REL_JMP_S32_AND_FINISH', 'IEM_MC_CALL_CIMPL_0', 'IEM_MC_CALL_CIMPL_1', 348 'IEM_MC_CALL_CIMPL_2', 'IEM_MC_CALL_CIMPL_3', 'IEM_MC_CALL_CIMPL_4', 'IEM_MC_CALL_CIMPL_5' ):409 'IEM_MC_CALL_CIMPL_2', 'IEM_MC_CALL_CIMPL_3', 'IEM_MC_CALL_CIMPL_4', 'IEM_MC_CALL_CIMPL_5', ): 349 410 self.aoParamRefs.append(ThreadedParamRef('cbInstr', 'uint4_t', oStmt)); 350 411 351 # We can skip the rest for statements w/o parameters. 352 if not oStmt.asParams: 353 continue; 412 if oStmt.sName == 'IEM_MC_CALC_RM_EFF_ADDR': 413 ## @todo figure out how to do this in the input part... 414 if self.sVariation == self.ksVariation_Addr16: 415 self.aoParamRefs.append(ThreadedParamRef('bRm', 'uint8_t', oStmt)); 416 self.aoParamRefs.append(ThreadedParamRef('u16Disp', 'uint16_t', oStmt)); 417 elif self.sVariation in (self.ksVariation_Addr32, self.ksVariation_Addr32Flat): 418 self.aoParamRefs.append(ThreadedParamRef('bRm', 'uint8_t', oStmt)); 419 self.aoParamRefs.append(ThreadedParamRef('bSib', 'uint8_t', oStmt)); 420 self.aoParamRefs.append(ThreadedParamRef('u32Disp', 'uint32_t', oStmt)); 421 else: 422 assert self.sVariation in (self.ksVariation_Addr64, self.ksVariation_Addr64_32); 423 self.aoParamRefs.append(ThreadedParamRef('bRmEx', 'uint8_t', oStmt)); 424 self.aoParamRefs.append(ThreadedParamRef('bSib', 'uint8_t', oStmt)); 425 self.aoParamRefs.append(ThreadedParamRef('u32Disp', 'uint32_t', oStmt)); 426 self.aoParamRefs.append(ThreadedParamRef('cbInstr', 'uint4_t', oStmt)); 354 427 355 428 # Inspect the target of calls to see if we need to pass down a 356 429 # function pointer or function table pointer for it to work. 357 aiSkipParams = {};358 430 if isinstance(oStmt, iai.McStmtCall): 359 431 if oStmt.sFn[0] == 'p': … … 367 439 # Check all the parameters for bogus references. 368 440 for iParam, sParam in enumerate(oStmt.asParams): 369 if iParam not in aiSkipParams and sParam not in self. dVariables:441 if iParam not in aiSkipParams and sParam not in self.oParent.dVariables: 370 442 # The parameter may contain a C expression, so we have to try 371 443 # extract the relevant bits, i.e. variables and fields while … … 409 481 410 482 # We can skip known variables. 411 elif sRef in self. dVariables:483 elif sRef in self.oParent.dVariables: 412 484 pass; 413 485 … … 429 501 elif ( sRef.startswith('IEM_OP_PRF_') 430 502 or sRef.startswith('IEM_ACCESS_') 503 or sRef.startswith('IEMINT_') 431 504 or sRef.startswith('X86_GREG_') 432 505 or sRef.startswith('X86_SREG_') … … 434 507 or sRef.startswith('X86_FSW_') 435 508 or sRef.startswith('X86_FCW_') 509 or sRef.startswith('X86_XCPT_') 510 or sRef.startswith('IEMMODE_') 436 511 or sRef.startswith('g_') 437 512 or sRef in ( 'int8_t', 'int16_t', 'int32_t', … … 439 514 'UINT8_C', 'UINT16_C', 'UINT32_C', 'UINT64_C', 440 515 'UINT8_MAX', 'UINT16_MAX', 'UINT32_MAX', 'UINT64_MAX', 441 'sizeof', 'NOREF', 'RT_NOREF', 'IEMMODE_64BIT' ) ): 516 'INT8_MAX', 'INT16_MAX', 'INT32_MAX', 'INT64_MAX', 517 'INT8_MIN', 'INT16_MIN', 'INT32_MIN', 'INT64_MIN', 518 'sizeof', 'NOREF', 'RT_NOREF', 'IEMMODE_64BIT', 519 'NIL_RTGCPTR' ) ): 442 520 pass; 443 521 … … 472 550 return True; 473 551 552 def analyzeVariation(self, aoStmts): 553 """ 554 2nd part of the analysis, done on each variation. 555 556 The variations may differ in parameter requirements and will end up with 557 slightly different MC sequences. Thus this is done on each individually. 558 559 Returns dummy True - raises exception on trouble. 560 """ 561 # Now scan the code for variables and field references that needs to 562 # be passed to the threaded function because they are related to the 563 # instruction decoding. 564 self.analyzeFindThreadedParamRefs(aoStmts); 565 self.analyzeConsolidateThreadedParamRefs(); 566 567 # Morph the statement stream for the block into what we'll be using in the threaded function. 568 (self.aoStmtsForThreadedFunction, iParamRef) = self.analyzeMorphStmtForThreaded(aoStmts); 569 if iParamRef != len(self.aoParamRefs): 570 raise Exception('iParamRef=%s, expected %s!' % (iParamRef, len(self.aoParamRefs),)); 571 572 return True; 573 574 575 class ThreadedFunction(object): 576 """ 577 A threaded function. 578 """ 579 580 def __init__(self, oMcBlock): 581 self.oMcBlock = oMcBlock # type: IEMAllInstructionsPython.McBlock 582 ## Variations for this block. There is at least one. 583 self.aoVariations = [] # type: list(ThreadedFunctionVariation) 584 ## Dictionary of local variables (IEM_MC_LOCAL[_CONST]) and call arguments (IEM_MC_ARG*). 585 self.dVariables = {} # type: dict(str,McStmtVar) 586 587 @staticmethod 588 def dummyInstance(): 589 """ Gets a dummy instance. """ 590 return ThreadedFunction(iai.McBlock('null', 999999999, 999999999, 'nil', 999999999)); 591 592 def raiseProblem(self, sMessage): 593 """ Raises a problem. """ 594 raise Exception('%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, )); 595 474 596 def analyzeFindVariablesAndCallArgs(self, aoStmts): 475 597 """ Scans the statements for MC variables and call arguments. """ … … 493 615 """ 494 616 Analyzes the code, identifying the number of parameters it requires and such. 495 May raise exceptions if we cannot grok the code. 617 618 Returns dummy True - raises exception on trouble. 496 619 """ 497 620 … … 502 625 self.analyzeFindVariablesAndCallArgs(aoStmts); 503 626 504 # Now scan the code for variables and field references that needs to505 # be passed to the threaded function because they are related to the506 # instruction decoding.507 self.analyzeFindThreadedParamRefs(aoStmts);508 self.analyzeConsolidateThreadedParamRefs();509 510 # Morph the statement stream for the block into what we'll be using in the threaded function. 511 (self.aoStmtsForThreadedFunction, iParamRef) = self.analyzeMorphStmtForThreaded(aoStmts);512 if iParamRef != len(self.aoParamRefs):513 raise Exception('iParamRef=%s, expected %s!' % (iParamRef, len(self.aoParamRefs),));627 # Create variations if needed. 628 if iai.McStmt.findStmtByNames(aoStmts, {'IEM_MC_CALC_RM_EFF_ADDR' : True,}): 629 self.aoVariations = [ThreadedFunctionVariation(self, sVar) 630 for sVar in ThreadedFunctionVariation.kasVariations_EffAddr]; 631 else: 632 self.aoVariations = [ThreadedFunctionVariation(self),]; 633 634 # Continue the analysis on each variation. 635 for oVariation in self.aoVariations: 636 oVariation.analyzeVariation(aoStmts); 514 637 515 638 return True; … … 546 669 self.aoParsers = iai.parseFiles(self.oOptions.asInFiles); 547 670 548 # Wrap MC blocks into threaded functions and analyze these.671 # Create threaded functions for the MC blocks. 549 672 self.aoThreadedFuncs = [ThreadedFunction(oMcBlock) for oMcBlock in iai.g_aoMcBlocks]; 673 674 # Analyze the threaded functions. 550 675 dRawParamCounts = {}; 551 676 dMinParamCounts = {}; 552 677 for oThreadedFunction in self.aoThreadedFuncs: 553 678 oThreadedFunction.analyze(); 554 dRawParamCounts[len(oThreadedFunction.dParamRefs)] = dRawParamCounts.get(len(oThreadedFunction.dParamRefs), 0) + 1; 555 dMinParamCounts[oThreadedFunction.cMinParams] = dMinParamCounts.get(oThreadedFunction.cMinParams, 0) + 1; 679 for oVariation in oThreadedFunction.aoVariations: 680 dRawParamCounts[len(oVariation.dParamRefs)] = dRawParamCounts.get(len(oVariation.dParamRefs), 0) + 1; 681 dMinParamCounts[oVariation.cMinParams] = dMinParamCounts.get(oVariation.cMinParams, 0) + 1; 556 682 print('debug: param count distribution, raw and optimized:', file = sys.stderr); 557 683 for cCount in sorted({cBits: True for cBits in list(dRawParamCounts.keys()) + list(dMinParamCounts.keys())}.keys()): … … 626 752 ' kIemThreadedFunc_Invalid = 0,', 627 753 ]; 628 for oFunc in self.aoThreadedFuncs: 629 asLines.append(' ' + oFunc.getIndexName() + ','); 754 for oThreadedFunction in self.aoThreadedFuncs: 755 for oVariation in oThreadedFunction.aoVariations: 756 asLines.append(' ' + oVariation.getIndexName() + ','); 630 757 asLines += [ 631 758 ' kIemThreadedFunc_End', … … 678 805 for oThreadedFunction in self.aoThreadedFuncs: 679 806 oMcBlock = oThreadedFunction.oMcBlock; 680 # Function header 681 oOut.write( '\n' 682 + '\n' 683 + '/**\n' 684 + ' * %s at line %s offset %s in %s%s\n' 685 % (oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine, os.path.split(oMcBlock.sSrcFile)[1], 686 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '') 687 + ' */\n' 688 + 'static IEM_DECL_IMPL_DEF(VBOXSTRICTRC, ' + oThreadedFunction.getFunctionName() + ',\n' 689 + ' ' + sParamList 690 + '{\n'); 691 692 aasVars = []; 693 for aoRefs in oThreadedFunction.dParamRefs.values(): 694 oRef = aoRefs[0]; 695 cBits = g_kdTypeInfo[oRef.sType][0]; 696 697 sTypeDecl = oRef.sType + ' const'; 698 699 if cBits == 64: 700 assert oRef.offNewParam == 0; 701 if oRef.sType == 'uint64_t': 702 sUnpack = 'uParam%s;' % (oRef.iNewParam,); 807 for oVariation in oThreadedFunction.aoVariations: 808 # Function header 809 oOut.write( '\n' 810 + '\n' 811 + '/**\n' 812 + ' * %s at line %s offset %s in %s%s\n' 813 % (oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine, 814 os.path.split(oMcBlock.sSrcFile)[1], 815 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '') 816 + ' */\n' 817 + 'static IEM_DECL_IMPL_DEF(VBOXSTRICTRC, ' + oVariation.getFunctionName() + ',\n' 818 + ' ' + sParamList 819 + '{\n'); 820 821 aasVars = []; 822 for aoRefs in oVariation.dParamRefs.values(): 823 oRef = aoRefs[0]; 824 if oRef.sType[0] != 'P': 825 cBits = g_kdTypeInfo[oRef.sType][0]; 826 sType = g_kdTypeInfo[oRef.sType][2]; 703 827 else: 704 sUnpack = '(%s)uParam%s;' % (oRef.sType, oRef.iNewParam,); 705 elif oRef.offNewParam == 0: 706 sUnpack = '(%s)(uParam%s & %s);' % (oRef.sType, oRef.iNewParam, self.ksBitsToIntMask[cBits]); 707 else: 708 sUnpack = '(%s)((uParam%s >> %s) & %s);' \ 709 % (oRef.sType, oRef.iNewParam, oRef.offNewParam, self.ksBitsToIntMask[cBits]); 710 711 sComment = '/* %s - %s ref%s */' % (oRef.sOrgRef, len(aoRefs), 's' if len(aoRefs) != 1 else '',); 712 713 aasVars.append([ '%s:%02u' % (oRef.iNewParam, oRef.offNewParam), sTypeDecl, oRef.sNewName, sUnpack, sComment ]); 714 acchVars = [0, 0, 0, 0, 0]; 715 for asVar in aasVars: 716 for iCol, sStr in enumerate(asVar): 717 acchVars[iCol] = max(acchVars[iCol], len(sStr)); 718 sFmt = ' %%-%ss %%-%ss = %%-%ss %%s\n' % (acchVars[1], acchVars[2], acchVars[3]); 719 for asVar in sorted(aasVars): 720 oOut.write(sFmt % (asVar[1], asVar[2], asVar[3], asVar[4],)); 721 722 # RT_NOREF for unused parameters. 723 if oThreadedFunction.cMinParams < g_kcThreadedParams: 724 oOut.write(' RT_NOREF(' 725 + ', '.join(['uParam%u' % (i,) for i in range(oThreadedFunction.cMinParams, g_kcThreadedParams)]) 726 + ');\n'); 727 728 # Now for the actual statements. 729 oOut.write(iai.McStmt.renderCodeForList(oThreadedFunction.aoStmtsForThreadedFunction, cchIndent = 4)); 730 731 oOut.write('}\n'); 828 cBits = 64; 829 sType = oRef.sType; 830 831 sTypeDecl = sType + ' const'; 832 833 if cBits == 64: 834 assert oRef.offNewParam == 0; 835 if sType == 'uint64_t': 836 sUnpack = 'uParam%s;' % (oRef.iNewParam,); 837 else: 838 sUnpack = '(%s)uParam%s;' % (sType, oRef.iNewParam,); 839 elif oRef.offNewParam == 0: 840 sUnpack = '(%s)(uParam%s & %s);' % (sType, oRef.iNewParam, self.ksBitsToIntMask[cBits]); 841 else: 842 sUnpack = '(%s)((uParam%s >> %s) & %s);' \ 843 % (sType, oRef.iNewParam, oRef.offNewParam, self.ksBitsToIntMask[cBits]); 844 845 sComment = '/* %s - %s ref%s */' % (oRef.sOrgRef, len(aoRefs), 's' if len(aoRefs) != 1 else '',); 846 847 aasVars.append([ '%s:%02u' % (oRef.iNewParam, oRef.offNewParam), sTypeDecl, oRef.sNewName, sUnpack, sComment ]); 848 acchVars = [0, 0, 0, 0, 0]; 849 for asVar in aasVars: 850 for iCol, sStr in enumerate(asVar): 851 acchVars[iCol] = max(acchVars[iCol], len(sStr)); 852 sFmt = ' %%-%ss %%-%ss = %%-%ss %%s\n' % (acchVars[1], acchVars[2], acchVars[3]); 853 for asVar in sorted(aasVars): 854 oOut.write(sFmt % (asVar[1], asVar[2], asVar[3], asVar[4],)); 855 856 # RT_NOREF for unused parameters. 857 if oVariation.cMinParams < g_kcThreadedParams: 858 oOut.write(' RT_NOREF(' 859 + ', '.join(['uParam%u' % (i,) for i in range(oVariation.cMinParams, g_kcThreadedParams)]) 860 + ');\n'); 861 862 # Now for the actual statements. 863 oOut.write(iai.McStmt.renderCodeForList(oVariation.aoStmtsForThreadedFunction, cchIndent = 4)); 864 865 oOut.write('}\n'); 866 732 867 733 868 # … … 742 877 + '{\n' 743 878 + ' /*Invalid*/ NULL, \n'); 744 for iThreadedFunction, oThreadedFunction in enumerate(self.aoThreadedFuncs): 745 oOut.write(' /*%4u*/ %s,\n' % (iThreadedFunction + 1, oThreadedFunction.getFunctionName(),)); 879 iThreadedFunction = 0; 880 for oThreadedFunction in self.aoThreadedFuncs: 881 for oVariation in oThreadedFunction.aoVariations: 882 iThreadedFunction += 1; 883 oOut.write(' /*%4u*/ %s,\n' % (iThreadedFunction, oVariation.getFunctionName(),)); 746 884 oOut.write('};\n'); 747 885 -
trunk/src/VBox/VMM/VMMAll/IEMThreadedFunctions.cpp
r98961 r98969 34 34 #endif 35 35 #define VMCPU_INCL_CPUM_GST_CTX 36 #define IEM_WITH_OPAQUE_DECODER_STATE 36 37 #include <VBox/vmm/iem.h> 37 38 #include <VBox/vmm/cpum.h> … … 76 77 *********************************************************************************************************************************/ 77 78 79 /** Variant of IEM_MC_ADVANCE_RIP_AND_FINISH with instruction length as param. */ 80 #define IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED(a_cbInstr) \ 81 return iemRegAddToRipAndFinishingClearingRF(pVCpu, a_cbInstr) 82 #undef IEM_MC_ADVANCE_RIP_AND_FINISH 83 84 /** Variant of IEM_MC_REL_JMP_S8_AND_FINISH with instruction length as param. */ 85 #define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED(a_i8, a_cbInstr) \ 86 return iemRegRipRelativeJumpS8AndFinishClearingRF(pVCpu, a_cbInstr, (a_i8), pVCpu->iem.s.enmEffOpSize) 87 #undef IEM_MC_REL_JMP_S8_AND_FINISH 88 89 /** Variant of IEM_MC_REL_JMP_S16_AND_FINISH with instruction length as param. */ 90 #define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED(a_i16, a_cbInstr) \ 91 return iemRegRipRelativeJumpS16AndFinishClearingRF(pVCpu, a_cbInstr, (a_i16)) 92 #undef IEM_MC_REL_JMP_S16_AND_FINISH 93 94 /** Variant of IEM_MC_REL_JMP_S32_AND_FINISH with instruction length as param. */ 95 #define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED(a_i32, a_cbInstr) \ 96 return iemRegRipRelativeJumpS32AndFinishClearingRF(pVCpu, a_cbInstr, (a_i32), pVCpu->iem.s.enmEffOpSize) 97 #undef IEM_MC_REL_JMP_S32_AND_FINISH 98 99 /** Variant of IEM_MC_CALC_RM_EFF_ADDR with additional parameters. */ 100 # define IEM_MC_CALC_RM_EFF_ADDR_THREADED_ADDR16(a_GCPtrEff, a_bRm, a_u16Disp) \ 101 (a_GCPtrEff) = iemOpHlpCalcRmEffAddrThreadedAddr16(pVCpu, a_bRm, a_u16Disp) 102 103 /** Variant of IEM_MC_CALC_RM_EFF_ADDR with additional parameters. */ 104 # define IEM_MC_CALC_RM_EFF_ADDR_THREADED_ADDR32(a_GCPtrEff, a_bRm, a_bSib, a_u32Disp) \ 105 (a_GCPtrEff) = iemOpHlpCalcRmEffAddrThreadedAddr32(pVCpu, a_bRm, a_bSib, a_u32Disp) 106 107 /** Variant of IEM_MC_CALC_RM_EFF_ADDR with additional parameters. */ 108 # define IEM_MC_CALC_RM_EFF_ADDR_THREADED_ADDR32FLAT(a_GCPtrEff, a_bRm, a_bSib, a_u32Disp) \ 109 (a_GCPtrEff) = iemOpHlpCalcRmEffAddrThreadedAddr32(pVCpu, a_bRm, a_bSib, a_u32Disp) 110 111 /** Variant of IEM_MC_CALC_RM_EFF_ADDR with additional parameters. */ 112 # define IEM_MC_CALC_RM_EFF_ADDR_THREADED_ADDR64(a_GCPtrEff, a_bRmEx, a_bSib, a_u32Disp, a_cbImm) \ 113 (a_GCPtrEff) = iemOpHlpCalcRmEffAddrThreadedAddr64(pVCpu, a_bRmEx, a_bSib, a_u32Disp, a_cbImm) 114 115 /** Variant of IEM_MC_CALC_RM_EFF_ADDR with additional parameters. */ 116 # define IEM_MC_CALC_RM_EFF_ADDR_THREADED_ADDR6432(a_GCPtrEff, a_bRmEx, a_bSib, a_u32Disp, a_cbImm) \ 117 (a_GCPtrEff) = (uint32_t)iemOpHlpCalcRmEffAddrThreadedAddr64(pVCpu, a_bRmEx, a_bSib, a_u32Disp, a_cbImm) 118 119 /** 120 * Calculates the effective address of a ModR/M memory operand, 16-bit 121 * addressing variant. 122 * 123 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR_THREADED_ADDR16. 124 * 125 * @returns The effective address. 126 * @param pVCpu The cross context virtual CPU structure of the calling thread. 127 * @param bRm The ModRM byte. 128 * @param u16Disp The displacement byte/word, if any. 129 * RIP relative addressing. 130 * @param pGCPtrEff Where to return the effective address. 131 */ 132 static RTGCPTR iemOpHlpCalcRmEffAddrThreadedAddr16(PVMCPUCC pVCpu, uint8_t bRm, uint16_t u16Disp) RT_NOEXCEPT 133 { 134 Log5(("iemOpHlpCalcRmEffAddrThreadedAddr16: bRm=%#x\n", bRm)); 135 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT); 136 137 /* Handle the disp16 form with no registers first. */ 138 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6) 139 { 140 Log5(("iemOpHlpCalcRmEffAddrThreadedAddr16: EffAddr=%#010RGv\n", (RTGCPTR)u16Disp)); 141 return u16Disp; 142 } 143 144 /* Get the displacment. */ 145 /** @todo we can eliminate this step by making u16Disp have this value 146 * already! */ 147 uint16_t u16EffAddr; 148 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK) 149 { 150 case 0: u16EffAddr = 0; break; 151 case 1: u16EffAddr = (int16_t)(int8_t)u16Disp; break; 152 case 2: u16EffAddr = u16Disp; break; 153 default: AssertFailedStmt(u16EffAddr = 0); 154 } 155 156 /* Add the base and index registers to the disp. */ 157 switch (bRm & X86_MODRM_RM_MASK) 158 { 159 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break; 160 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break; 161 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; break; 162 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; break; 163 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break; 164 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break; 165 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; break; 166 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break; 167 } 168 169 Log5(("iemOpHlpCalcRmEffAddrThreadedAddr16: EffAddr=%#010RGv\n", (RTGCPTR)u16EffAddr)); 170 return u16EffAddr; 171 } 172 173 174 /** 175 * Calculates the effective address of a ModR/M memory operand, 32-bit 176 * addressing variant. 177 * 178 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR_THREADED_ADDR32 and 179 * IEM_MC_CALC_RM_EFF_ADDR_THREADED_ADDR32FLAT. 180 * 181 * @returns The effective address. 182 * @param pVCpu The cross context virtual CPU structure of the calling thread. 183 * @param bRm The ModRM byte. 184 * @param bSib The SIB byte, if any. 185 * @param u32Disp The displacement byte/dword, if any. 186 */ 187 static RTGCPTR iemOpHlpCalcRmEffAddrThreadedAddr32(PVMCPUCC pVCpu, uint8_t bRm, uint8_t bSib, uint32_t u32Disp) RT_NOEXCEPT 188 { 189 Log5(("iemOpHlpCalcRmEffAddrThreadedAddr32: bRm=%#x\n", bRm)); 190 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT); 191 192 /* Handle the disp32 form with no registers first. */ 193 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5) 194 { 195 Log5(("iemOpHlpCalcRmEffAddrThreadedAddr32: EffAddr=%#010RGv\n", (RTGCPTR)u32Disp)); 196 return u32Disp; 197 } 198 199 /* Get the register (or SIB) value. */ 200 uint32_t u32EffAddr; 201 switch (bRm & X86_MODRM_RM_MASK) 202 { 203 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break; 204 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break; 205 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break; 206 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break; 207 case 4: /* SIB */ 208 { 209 /* Get the index and scale it. */ 210 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) 211 { 212 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break; 213 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break; 214 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break; 215 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break; 216 case 4: u32EffAddr = 0; /*none */ break; 217 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break; 218 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break; 219 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break; 220 } 221 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK; 222 223 /* add base */ 224 switch (bSib & X86_SIB_BASE_MASK) 225 { 226 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break; 227 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break; 228 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break; 229 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break; 230 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; break; 231 case 5: 232 if ((bRm & X86_MODRM_MOD_MASK) != 0) 233 u32EffAddr += pVCpu->cpum.GstCtx.ebp; 234 else 235 u32EffAddr += u32Disp; 236 break; 237 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break; 238 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break; 239 } 240 break; 241 } 242 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break; 243 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break; 244 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break; 245 } 246 247 /* Get and add the displacement. */ 248 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK) 249 { 250 case 0: break; 251 case 1: u32EffAddr += (int8_t)u32Disp; break; 252 case 2: u32EffAddr += u32Disp; break; 253 default: AssertFailed(); 254 } 255 256 Log5(("iemOpHlpCalcRmEffAddrThreadedAddr32: EffAddr=%#010RGv\n", (RTGCPTR)u32EffAddr)); 257 return u32EffAddr; 258 } 259 260 261 /** 262 * Calculates the effective address of a ModR/M memory operand. 263 * 264 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR_THREADED_ADDR64. 265 * 266 * @returns The effective address. 267 * @param pVCpu The cross context virtual CPU structure of the calling thread. 268 * @param bRmEx The ModRM byte but with bit 3 set to REX.B and 269 * bit 4 to REX.X. The two bits are part of the 270 * REG sub-field, which isn't needed in this 271 * function. 272 * @param bSib The SIB byte, if any. 273 * @param u32Disp The displacement byte/word/dword, if any. 274 * @param cbInstr The size of the fully decoded instruction. Used 275 * for RIP relative addressing. 276 * @todo combine cbInstr and cbImm! 277 */ 278 static RTGCPTR iemOpHlpCalcRmEffAddrThreadedAddr64(PVMCPUCC pVCpu, uint8_t bRmEx, uint8_t bSib, 279 uint32_t u32Disp, uint8_t cbInstr) RT_NOEXCEPT 280 { 281 Log5(("iemOpHlpCalcRmEffAddrThreadedAddr64: bRmEx=%#x\n", bRmEx)); 282 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT); 283 284 uint64_t u64EffAddr; 285 286 /* Handle the rip+disp32 form with no registers first. */ 287 if ((bRmEx & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5) 288 { 289 u64EffAddr = (int32_t)u32Disp; 290 u64EffAddr += pVCpu->cpum.GstCtx.rip + cbInstr; 291 } 292 else 293 { 294 /* Get the register (or SIB) value. */ 295 switch (bRmEx & (X86_MODRM_RM_MASK | 0x8)) /* bRmEx[bit 3] = REX.B */ 296 { 297 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break; 298 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break; 299 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break; 300 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break; 301 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break; 302 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break; 303 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break; 304 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break; 305 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break; 306 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break; 307 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break; 308 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break; 309 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break; 310 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break; 311 /* SIB */ 312 case 4: 313 case 12: 314 { 315 /* Get the index and scale it. */ 316 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | ((bRmEx & 0x10) >> 1)) /* bRmEx[bit 4] = REX.X */ 317 { 318 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break; 319 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break; 320 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break; 321 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break; 322 case 4: u64EffAddr = 0; /*none */ break; 323 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break; 324 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break; 325 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break; 326 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break; 327 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break; 328 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break; 329 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break; 330 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break; 331 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break; 332 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break; 333 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break; 334 } 335 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK; 336 337 /* add base */ 338 switch ((bSib & X86_SIB_BASE_MASK) | (bRmEx & 0x8)) /* bRmEx[bit 3] = REX.B */ 339 { 340 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break; 341 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break; 342 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break; 343 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break; 344 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; break; 345 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break; 346 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break; 347 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break; 348 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break; 349 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break; 350 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break; 351 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break; 352 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break; 353 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break; 354 /* complicated encodings */ 355 case 5: 356 if ((bRmEx & X86_MODRM_MOD_MASK) != 0) 357 u64EffAddr += pVCpu->cpum.GstCtx.rbp; 358 else 359 u64EffAddr += (int32_t)u32Disp; 360 break; 361 case 13: 362 if ((bRmEx & X86_MODRM_MOD_MASK) != 0) 363 u64EffAddr += pVCpu->cpum.GstCtx.r13; 364 else 365 u64EffAddr += (int32_t)u32Disp; 366 break; 367 } 368 break; 369 } 370 } 371 372 /* Get and add the displacement. */ 373 switch ((bRmEx >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK) 374 { 375 case 0: break; 376 case 1: u64EffAddr += (int8_t)u32Disp; break; 377 case 2: u64EffAddr += (int32_t)u32Disp; break; 378 default: AssertFailed(); 379 } 380 } 381 382 Log5(("iemOpHlpCalcRmEffAddrThreadedAddr64: EffAddr=%#010RGv\n", u64EffAddr)); 383 return u64EffAddr; 384 } 385 78 386 79 387 … … 82 390 */ 83 391 #include "IEMThreadedFunctions.cpp.h" 392 -
trunk/src/VBox/VMM/include/IEMInline.h
r98150 r98969 182 182 } 183 183 184 185 #if defined(VBOX_INCLUDED_vmm_dbgf_h) || defined(DOXYGEN_RUNNING) /* dbgf.ro.cEnabledHwBreakpoints */ 184 #ifndef IEM_WITH_OPAQUE_DECODER_STATE 185 186 # if defined(VBOX_INCLUDED_vmm_dbgf_h) || defined(DOXYGEN_RUNNING) /* dbgf.ro.cEnabledHwBreakpoints */ 186 187 /** 187 188 * Initializes the execution state. … … 209 210 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); 210 211 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu); 211 # ifdef VBOX_STRICT212 # ifdef VBOX_STRICT 212 213 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe; 213 214 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe; … … 225 226 pVCpu->iem.s.fEvexStuff = 127; 226 227 pVCpu->iem.s.uFpuOpcode = UINT16_MAX; 227 # ifdef IEM_WITH_CODE_TLB228 # ifdef IEM_WITH_CODE_TLB 228 229 pVCpu->iem.s.offInstrNextByte = UINT16_MAX; 229 230 pVCpu->iem.s.pbInstrBuf = NULL; … … 232 233 pVCpu->iem.s.offCurInstrStart = INT16_MAX; 233 234 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff); 234 # else235 # else 235 236 pVCpu->iem.s.offOpcode = 127; 236 237 pVCpu->iem.s.cbOpcode = 127; 237 # endif238 # endif /* VBOX_STRICT */238 # endif 239 # endif /* VBOX_STRICT */ 239 240 240 241 pVCpu->iem.s.cActiveMappings = 0; … … 252 253 iemInitPendingBreakpointsSlow(pVCpu); 253 254 } 254 # endif /* VBOX_INCLUDED_vmm_dbgf_h */255 256 257 # if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)255 # endif /* VBOX_INCLUDED_vmm_dbgf_h */ 256 257 258 # if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX) 258 259 /** 259 260 * Performs a minimal reinitialization of the execution state. … … 285 286 } 286 287 pVCpu->iem.s.iEffSeg = X86_SREG_DS; 287 # ifndef IEM_WITH_CODE_TLB288 # ifndef IEM_WITH_CODE_TLB 288 289 /** @todo Shouldn't we be doing this in IEMTlbInvalidateAll()? */ 289 290 pVCpu->iem.s.offOpcode = 0; 290 291 pVCpu->iem.s.cbOpcode = 0; 292 # endif 293 pVCpu->iem.s.rcPassUp = VINF_SUCCESS; 294 } 291 295 # endif 292 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;293 }294 #endif295 296 296 297 … … 304 305 { 305 306 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */ 306 # ifdef VBOX_STRICT307 # ifdef IEM_WITH_CODE_TLB307 # ifdef VBOX_STRICT 308 # ifdef IEM_WITH_CODE_TLB 308 309 NOREF(pVCpu); 310 # else 311 pVCpu->iem.s.cbOpcode = 0; 312 # endif 309 313 # else 310 pVCpu->iem.s.cbOpcode = 0;314 NOREF(pVCpu); 311 315 # endif 312 #else313 NOREF(pVCpu);314 #endif315 316 } 316 317 … … 340 341 * @param a_cbMin The minimum length. 341 342 */ 342 # define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \343 # define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \ 343 344 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \ 344 345 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH) 345 346 346 347 347 # ifndef IEM_WITH_SETJMP348 # ifndef IEM_WITH_SETJMP 348 349 349 350 /** … … 387 388 } 388 389 389 # else /* IEM_WITH_SETJMP */390 # else /* IEM_WITH_SETJMP */ 390 391 391 392 /** … … 419 420 * Fetch the first opcode byte. 420 421 */ 421 # ifdef IEM_WITH_CODE_TLB422 # ifdef IEM_WITH_CODE_TLB 422 423 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte; 423 424 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf; … … 428 429 return pbBuf[offBuf]; 429 430 } 430 # else431 # else 431 432 uintptr_t offOpcode = pVCpu->iem.s.offOpcode; 432 433 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode)) … … 435 436 return pVCpu->iem.s.abOpcode[offOpcode]; 436 437 } 437 # endif438 # endif 438 439 return iemOpcodeGetNextU8SlowJmp(pVCpu); 439 440 } 440 441 441 # endif /* IEM_WITH_SETJMP */442 # endif /* IEM_WITH_SETJMP */ 442 443 443 444 /** … … 447 448 * @remark Implicitly references pVCpu. 448 449 */ 449 # ifndef IEM_WITH_SETJMP450 # define IEM_OPCODE_GET_FIRST_U8(a_pu8) \450 # ifndef IEM_WITH_SETJMP 451 # define IEM_OPCODE_GET_FIRST_U8(a_pu8) \ 451 452 do \ 452 453 { \ … … 457 458 return rcStrict2; \ 458 459 } while (0) 459 # else460 # define IEM_OPCODE_GET_FIRST_U8(a_pu8) (*(a_pu8) = iemOpcodeGetFirstU8Jmp(pVCpu))461 # endif /* IEM_WITH_SETJMP */462 463 464 # ifndef IEM_WITH_SETJMP460 # else 461 # define IEM_OPCODE_GET_FIRST_U8(a_pu8) (*(a_pu8) = iemOpcodeGetFirstU8Jmp(pVCpu)) 462 # endif /* IEM_WITH_SETJMP */ 463 464 465 # ifndef IEM_WITH_SETJMP 465 466 466 467 /** … … 484 485 } 485 486 486 # else /* IEM_WITH_SETJMP */487 # else /* IEM_WITH_SETJMP */ 487 488 488 489 /** … … 494 495 DECL_INLINE_THROW(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP 495 496 { 496 # ifdef IEM_WITH_CODE_TLB497 # ifdef IEM_WITH_CODE_TLB 497 498 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte; 498 499 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf; … … 503 504 return pbBuf[offBuf]; 504 505 } 505 # else506 # else 506 507 uintptr_t offOpcode = pVCpu->iem.s.offOpcode; 507 508 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode)) … … 510 511 return pVCpu->iem.s.abOpcode[offOpcode]; 511 512 } 512 # endif513 # endif 513 514 return iemOpcodeGetNextU8SlowJmp(pVCpu); 514 515 } 515 516 516 # endif /* IEM_WITH_SETJMP */517 # endif /* IEM_WITH_SETJMP */ 517 518 518 519 /** … … 522 523 * @remark Implicitly references pVCpu. 523 524 */ 524 # ifndef IEM_WITH_SETJMP525 # define IEM_OPCODE_GET_NEXT_U8(a_pu8) \525 # ifndef IEM_WITH_SETJMP 526 # define IEM_OPCODE_GET_NEXT_U8(a_pu8) \ 526 527 do \ 527 528 { \ … … 532 533 return rcStrict2; \ 533 534 } while (0) 534 # else535 # define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))536 # endif /* IEM_WITH_SETJMP */537 538 539 # ifndef IEM_WITH_SETJMP535 # else 536 # define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu)) 537 # endif /* IEM_WITH_SETJMP */ 538 539 540 # ifndef IEM_WITH_SETJMP 540 541 /** 541 542 * Fetches the next signed byte from the opcode stream. … … 549 550 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8); 550 551 } 551 # endif /* !IEM_WITH_SETJMP */552 # endif /* !IEM_WITH_SETJMP */ 552 553 553 554 … … 559 560 * @remark Implicitly references pVCpu. 560 561 */ 561 # ifndef IEM_WITH_SETJMP562 # define IEM_OPCODE_GET_NEXT_S8(a_pi8) \562 # ifndef IEM_WITH_SETJMP 563 # define IEM_OPCODE_GET_NEXT_S8(a_pi8) \ 563 564 do \ 564 565 { \ … … 567 568 return rcStrict2; \ 568 569 } while (0) 569 # else /* IEM_WITH_SETJMP */570 # define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))571 572 # endif /* IEM_WITH_SETJMP */573 574 575 # ifndef IEM_WITH_SETJMP570 # else /* IEM_WITH_SETJMP */ 571 # define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu)) 572 573 # endif /* IEM_WITH_SETJMP */ 574 575 576 # ifndef IEM_WITH_SETJMP 576 577 /** 577 578 * Fetches the next signed byte from the opcode stream, extending it to … … 592 593 return VINF_SUCCESS; 593 594 } 594 # endif /* !IEM_WITH_SETJMP */595 # endif /* !IEM_WITH_SETJMP */ 595 596 596 597 /** … … 601 602 * @remark Implicitly references pVCpu. 602 603 */ 603 # ifndef IEM_WITH_SETJMP604 # define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \604 # ifndef IEM_WITH_SETJMP 605 # define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \ 605 606 do \ 606 607 { \ … … 609 610 return rcStrict2; \ 610 611 } while (0) 611 # else612 # define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))613 # endif614 615 # ifndef IEM_WITH_SETJMP612 # else 613 # define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu)) 614 # endif 615 616 # ifndef IEM_WITH_SETJMP 616 617 /** 617 618 * Fetches the next signed byte from the opcode stream, extending it to … … 632 633 return VINF_SUCCESS; 633 634 } 634 # endif /* !IEM_WITH_SETJMP */635 # endif /* !IEM_WITH_SETJMP */ 635 636 636 637 /** … … 641 642 * @remark Implicitly references pVCpu. 642 643 */ 643 # ifndef IEM_WITH_SETJMP644 # define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \644 # ifndef IEM_WITH_SETJMP 645 # define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \ 645 646 do \ 646 647 { \ … … 649 650 return rcStrict2; \ 650 651 } while (0) 651 # else652 # define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))653 # endif654 655 656 # ifndef IEM_WITH_SETJMP652 # else 653 # define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu)) 654 # endif 655 656 657 # ifndef IEM_WITH_SETJMP 657 658 /** 658 659 * Fetches the next signed byte from the opcode stream, extending it to … … 673 674 return VINF_SUCCESS; 674 675 } 675 # endif /* !IEM_WITH_SETJMP */676 # endif /* !IEM_WITH_SETJMP */ 676 677 677 678 /** … … 682 683 * @remark Implicitly references pVCpu. 683 684 */ 684 # ifndef IEM_WITH_SETJMP685 # define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \685 # ifndef IEM_WITH_SETJMP 686 # define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \ 686 687 do \ 687 688 { \ … … 690 691 return rcStrict2; \ 691 692 } while (0) 692 # else693 # define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))694 # endif695 696 697 # ifndef IEM_WITH_SETJMP693 # else 694 # define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu)) 695 # endif 696 697 698 # ifndef IEM_WITH_SETJMP 698 699 /** 699 700 * Fetches the next opcode byte. … … 716 717 return iemOpcodeGetNextU8Slow(pVCpu, pu8); 717 718 } 718 # else /* IEM_WITH_SETJMP */719 # else /* IEM_WITH_SETJMP */ 719 720 /** 720 721 * Fetches the next opcode byte, longjmp on error. … … 725 726 DECL_INLINE_THROW(uint8_t) iemOpcodeGetNextRmJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP 726 727 { 727 # ifdef IEM_WITH_CODE_TLB728 # ifdef IEM_WITH_CODE_TLB 728 729 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte; 729 730 pVCpu->iem.s.offModRm = offBuf; … … 735 736 return pbBuf[offBuf]; 736 737 } 737 # else738 # else 738 739 uintptr_t offOpcode = pVCpu->iem.s.offOpcode; 739 740 pVCpu->iem.s.offModRm = offOpcode; … … 743 744 return pVCpu->iem.s.abOpcode[offOpcode]; 744 745 } 745 # endif746 # endif 746 747 return iemOpcodeGetNextU8SlowJmp(pVCpu); 747 748 } 748 # endif /* IEM_WITH_SETJMP */749 # endif /* IEM_WITH_SETJMP */ 749 750 750 751 /** … … 757 758 * @remark Implicitly references pVCpu. 758 759 */ 759 # ifndef IEM_WITH_SETJMP760 # define IEM_OPCODE_GET_NEXT_RM(a_pbRm) \760 # ifndef IEM_WITH_SETJMP 761 # define IEM_OPCODE_GET_NEXT_RM(a_pbRm) \ 761 762 do \ 762 763 { \ … … 767 768 return rcStrict2; \ 768 769 } while (0) 769 # else770 # define IEM_OPCODE_GET_NEXT_RM(a_pbRm) (*(a_pbRm) = iemOpcodeGetNextRmJmp(pVCpu))771 # endif /* IEM_WITH_SETJMP */772 773 774 # ifndef IEM_WITH_SETJMP770 # else 771 # define IEM_OPCODE_GET_NEXT_RM(a_pbRm) (*(a_pbRm) = iemOpcodeGetNextRmJmp(pVCpu)) 772 # endif /* IEM_WITH_SETJMP */ 773 774 775 # ifndef IEM_WITH_SETJMP 775 776 776 777 /** … … 787 788 { 788 789 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2; 789 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS790 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS 790 791 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode]; 791 # else792 # else 792 793 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]); 793 # endif794 # endif 794 795 return VINF_SUCCESS; 795 796 } … … 797 798 } 798 799 799 # else /* IEM_WITH_SETJMP */800 # else /* IEM_WITH_SETJMP */ 800 801 801 802 /** … … 807 808 DECL_INLINE_THROW(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP 808 809 { 809 # ifdef IEM_WITH_CODE_TLB810 # ifdef IEM_WITH_CODE_TLB 810 811 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte; 811 812 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf; … … 814 815 { 815 816 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2; 816 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS817 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS 817 818 return *(uint16_t const *)&pbBuf[offBuf]; 818 # else819 # else 819 820 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]); 820 # endif821 } 822 # else /* !IEM_WITH_CODE_TLB */821 # endif 822 } 823 # else /* !IEM_WITH_CODE_TLB */ 823 824 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode; 824 825 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode)) 825 826 { 826 827 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2; 827 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS828 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS 828 829 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode]; 829 # else830 # else 830 831 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]); 831 # endif832 } 833 # endif /* !IEM_WITH_CODE_TLB */832 # endif 833 } 834 # endif /* !IEM_WITH_CODE_TLB */ 834 835 return iemOpcodeGetNextU16SlowJmp(pVCpu); 835 836 } 836 837 837 # endif /* IEM_WITH_SETJMP */838 # endif /* IEM_WITH_SETJMP */ 838 839 839 840 /** … … 843 844 * @remark Implicitly references pVCpu. 844 845 */ 845 # ifndef IEM_WITH_SETJMP846 # define IEM_OPCODE_GET_NEXT_U16(a_pu16) \846 # ifndef IEM_WITH_SETJMP 847 # define IEM_OPCODE_GET_NEXT_U16(a_pu16) \ 847 848 do \ 848 849 { \ … … 851 852 return rcStrict2; \ 852 853 } while (0) 853 # else854 # define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))855 # endif856 857 # ifndef IEM_WITH_SETJMP854 # else 855 # define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu)) 856 # endif 857 858 # ifndef IEM_WITH_SETJMP 858 859 /** 859 860 * Fetches the next opcode word, zero extending it to a double word. … … 873 874 return VINF_SUCCESS; 874 875 } 875 # endif /* !IEM_WITH_SETJMP */876 # endif /* !IEM_WITH_SETJMP */ 876 877 877 878 /** … … 882 883 * @remark Implicitly references pVCpu. 883 884 */ 884 # ifndef IEM_WITH_SETJMP885 # define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \885 # ifndef IEM_WITH_SETJMP 886 # define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \ 886 887 do \ 887 888 { \ … … 890 891 return rcStrict2; \ 891 892 } while (0) 892 # else893 # define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))894 # endif895 896 # ifndef IEM_WITH_SETJMP893 # else 894 # define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu)) 895 # endif 896 897 # ifndef IEM_WITH_SETJMP 897 898 /** 898 899 * Fetches the next opcode word, zero extending it to a quad word. … … 912 913 return VINF_SUCCESS; 913 914 } 914 # endif /* !IEM_WITH_SETJMP */915 # endif /* !IEM_WITH_SETJMP */ 915 916 916 917 /** … … 921 922 * @remark Implicitly references pVCpu. 922 923 */ 923 # ifndef IEM_WITH_SETJMP924 # define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \924 # ifndef IEM_WITH_SETJMP 925 # define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \ 925 926 do \ 926 927 { \ … … 929 930 return rcStrict2; \ 930 931 } while (0) 931 # else932 # define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))933 # endif934 935 936 # ifndef IEM_WITH_SETJMP932 # else 933 # define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu)) 934 # endif 935 936 937 # ifndef IEM_WITH_SETJMP 937 938 /** 938 939 * Fetches the next signed word from the opcode stream. … … 946 947 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16); 947 948 } 948 # endif /* !IEM_WITH_SETJMP */949 # endif /* !IEM_WITH_SETJMP */ 949 950 950 951 … … 956 957 * @remark Implicitly references pVCpu. 957 958 */ 958 # ifndef IEM_WITH_SETJMP959 # define IEM_OPCODE_GET_NEXT_S16(a_pi16) \959 # ifndef IEM_WITH_SETJMP 960 # define IEM_OPCODE_GET_NEXT_S16(a_pi16) \ 960 961 do \ 961 962 { \ … … 964 965 return rcStrict2; \ 965 966 } while (0) 966 # else967 # define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))968 # endif969 970 # ifndef IEM_WITH_SETJMP967 # else 968 # define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu)) 969 # endif 970 971 # ifndef IEM_WITH_SETJMP 971 972 972 973 /** … … 983 984 { 984 985 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4; 985 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS986 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS 986 987 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode]; 987 # else988 # else 988 989 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode], 989 990 pVCpu->iem.s.abOpcode[offOpcode + 1], 990 991 pVCpu->iem.s.abOpcode[offOpcode + 2], 991 992 pVCpu->iem.s.abOpcode[offOpcode + 3]); 992 # endif993 # endif 993 994 return VINF_SUCCESS; 994 995 } … … 996 997 } 997 998 998 # else /* IEM_WITH_SETJMP */999 # else /* IEM_WITH_SETJMP */ 999 1000 1000 1001 /** … … 1006 1007 DECL_INLINE_THROW(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP 1007 1008 { 1008 # ifdef IEM_WITH_CODE_TLB1009 # ifdef IEM_WITH_CODE_TLB 1009 1010 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte; 1010 1011 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf; … … 1013 1014 { 1014 1015 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4; 1015 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS1016 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS 1016 1017 return *(uint32_t const *)&pbBuf[offBuf]; 1017 # else1018 # else 1018 1019 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf], 1019 1020 pbBuf[offBuf + 1], 1020 1021 pbBuf[offBuf + 2], 1021 1022 pbBuf[offBuf + 3]); 1022 # endif1023 } 1024 # else1023 # endif 1024 } 1025 # else 1025 1026 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode; 1026 1027 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode)) 1027 1028 { 1028 1029 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4; 1029 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS1030 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS 1030 1031 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode]; 1031 # else1032 # else 1032 1033 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode], 1033 1034 pVCpu->iem.s.abOpcode[offOpcode + 1], 1034 1035 pVCpu->iem.s.abOpcode[offOpcode + 2], 1035 1036 pVCpu->iem.s.abOpcode[offOpcode + 3]); 1037 # endif 1038 } 1036 1039 # endif 1037 }1038 # endif1039 1040 return iemOpcodeGetNextU32SlowJmp(pVCpu); 1040 1041 } 1041 1042 1042 # endif /* IEM_WITH_SETJMP */1043 # endif /* IEM_WITH_SETJMP */ 1043 1044 1044 1045 /** … … 1048 1049 * @remark Implicitly references pVCpu. 1049 1050 */ 1050 # ifndef IEM_WITH_SETJMP1051 # define IEM_OPCODE_GET_NEXT_U32(a_pu32) \1051 # ifndef IEM_WITH_SETJMP 1052 # define IEM_OPCODE_GET_NEXT_U32(a_pu32) \ 1052 1053 do \ 1053 1054 { \ … … 1056 1057 return rcStrict2; \ 1057 1058 } while (0) 1058 # else1059 # define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))1060 # endif1061 1062 # ifndef IEM_WITH_SETJMP1059 # else 1060 # define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu)) 1061 # endif 1062 1063 # ifndef IEM_WITH_SETJMP 1063 1064 /** 1064 1065 * Fetches the next opcode dword, zero extending it to a quad word. … … 1081 1082 return VINF_SUCCESS; 1082 1083 } 1083 # endif /* !IEM_WITH_SETJMP */1084 # endif /* !IEM_WITH_SETJMP */ 1084 1085 1085 1086 /** … … 1090 1091 * @remark Implicitly references pVCpu. 1091 1092 */ 1092 # ifndef IEM_WITH_SETJMP1093 # define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \1093 # ifndef IEM_WITH_SETJMP 1094 # define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \ 1094 1095 do \ 1095 1096 { \ … … 1098 1099 return rcStrict2; \ 1099 1100 } while (0) 1100 # else1101 # define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))1102 # endif1103 1104 1105 # ifndef IEM_WITH_SETJMP1101 # else 1102 # define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu)) 1103 # endif 1104 1105 1106 # ifndef IEM_WITH_SETJMP 1106 1107 /** 1107 1108 * Fetches the next signed double word from the opcode stream. … … 1115 1116 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32); 1116 1117 } 1117 # endif1118 # endif 1118 1119 1119 1120 /** … … 1124 1125 * @remark Implicitly references pVCpu. 1125 1126 */ 1126 # ifndef IEM_WITH_SETJMP1127 # define IEM_OPCODE_GET_NEXT_S32(a_pi32) \1127 # ifndef IEM_WITH_SETJMP 1128 # define IEM_OPCODE_GET_NEXT_S32(a_pi32) \ 1128 1129 do \ 1129 1130 { \ … … 1132 1133 return rcStrict2; \ 1133 1134 } while (0) 1134 # else1135 # define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))1136 # endif1137 1138 # ifndef IEM_WITH_SETJMP1135 # else 1136 # define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu)) 1137 # endif 1138 1139 # ifndef IEM_WITH_SETJMP 1139 1140 /** 1140 1141 * Fetches the next opcode dword, sign extending it into a quad word. … … 1158 1159 return VINF_SUCCESS; 1159 1160 } 1160 # endif /* !IEM_WITH_SETJMP */1161 # endif /* !IEM_WITH_SETJMP */ 1161 1162 1162 1163 /** … … 1167 1168 * @remark Implicitly references pVCpu. 1168 1169 */ 1169 # ifndef IEM_WITH_SETJMP1170 # define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \1170 # ifndef IEM_WITH_SETJMP 1171 # define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \ 1171 1172 do \ 1172 1173 { \ … … 1175 1176 return rcStrict2; \ 1176 1177 } while (0) 1177 # else1178 # define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))1179 # endif1180 1181 # ifndef IEM_WITH_SETJMP1178 # else 1179 # define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu)) 1180 # endif 1181 1182 # ifndef IEM_WITH_SETJMP 1182 1183 1183 1184 /** … … 1193 1194 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode)) 1194 1195 { 1195 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS1196 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS 1196 1197 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode]; 1197 # else1198 # else 1198 1199 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode], 1199 1200 pVCpu->iem.s.abOpcode[offOpcode + 1], … … 1204 1205 pVCpu->iem.s.abOpcode[offOpcode + 6], 1205 1206 pVCpu->iem.s.abOpcode[offOpcode + 7]); 1206 # endif1207 # endif 1207 1208 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8; 1208 1209 return VINF_SUCCESS; … … 1211 1212 } 1212 1213 1213 # else /* IEM_WITH_SETJMP */1214 # else /* IEM_WITH_SETJMP */ 1214 1215 1215 1216 /** … … 1221 1222 DECL_INLINE_THROW(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP 1222 1223 { 1223 # ifdef IEM_WITH_CODE_TLB1224 # ifdef IEM_WITH_CODE_TLB 1224 1225 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte; 1225 1226 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf; … … 1228 1229 { 1229 1230 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8; 1230 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS1231 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS 1231 1232 return *(uint64_t const *)&pbBuf[offBuf]; 1232 # else1233 # else 1233 1234 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf], 1234 1235 pbBuf[offBuf + 1], … … 1239 1240 pbBuf[offBuf + 6], 1240 1241 pbBuf[offBuf + 7]); 1241 # endif1242 } 1243 # else1242 # endif 1243 } 1244 # else 1244 1245 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode; 1245 1246 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode)) 1246 1247 { 1247 1248 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8; 1248 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS1249 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS 1249 1250 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode]; 1250 # else1251 # else 1251 1252 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode], 1252 1253 pVCpu->iem.s.abOpcode[offOpcode + 1], … … 1257 1258 pVCpu->iem.s.abOpcode[offOpcode + 6], 1258 1259 pVCpu->iem.s.abOpcode[offOpcode + 7]); 1260 # endif 1261 } 1259 1262 # endif 1260 }1261 # endif1262 1263 return iemOpcodeGetNextU64SlowJmp(pVCpu); 1263 1264 } 1264 1265 1265 # endif /* IEM_WITH_SETJMP */1266 # endif /* IEM_WITH_SETJMP */ 1266 1267 1267 1268 /** … … 1271 1272 * @remark Implicitly references pVCpu. 1272 1273 */ 1273 # ifndef IEM_WITH_SETJMP1274 # define IEM_OPCODE_GET_NEXT_U64(a_pu64) \1274 # ifndef IEM_WITH_SETJMP 1275 # define IEM_OPCODE_GET_NEXT_U64(a_pu64) \ 1275 1276 do \ 1276 1277 { \ … … 1279 1280 return rcStrict2; \ 1280 1281 } while (0) 1281 #else 1282 # define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) ) 1283 #endif 1282 # else 1283 # define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) ) 1284 # endif 1285 1286 #endif /* !IEM_WITH_OPAQUE_DECODER_STATE */ 1284 1287 1285 1288 … … 1345 1348 * 1346 1349 */ 1350 1351 #ifndef IEM_WITH_OPAQUE_DECODER_STATE 1347 1352 1348 1353 /** … … 1418 1423 } 1419 1424 1425 #endif /* !IEM_WITH_OPAQUE_DECODER_STATE */ 1420 1426 1421 1427 … … 1518 1524 1519 1525 1526 #ifndef IEM_WITH_OPAQUE_DECODER_STATE 1520 1527 /** 1521 1528 * Gets a reference (pointer) to the specified 8-bit general purpose register. … … 1538 1545 return &pVCpu->cpum.GstCtx.aGRegs[iReg & 3].bHi; 1539 1546 } 1547 #endif 1540 1548 1541 1549 … … 1625 1633 1626 1634 1635 #ifndef IEM_WITH_OPAQUE_DECODER_STATE 1627 1636 /** 1628 1637 * Fetches the value of a 8-bit general purpose register. … … 1636 1645 return *iemGRegRefU8(pVCpu, iReg); 1637 1646 } 1647 #endif 1638 1648 1639 1649 … … 1878 1888 1879 1889 1890 #ifndef IEM_WITH_OPAQUE_DECODER_STATE 1880 1891 /** 1881 1892 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF. … … 1887 1898 return iemRegAddToRipAndFinishingClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu)); 1888 1899 } 1900 #endif 1889 1901 1890 1902 … … 2259 2271 2260 2272 2273 #ifndef IEM_WITH_OPAQUE_DECODER_STATE 2261 2274 /** 2262 2275 * Updates the FOP, FPU.CS and FPUIP registers. … … 2285 2298 *(uint64_t *)&pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip; 2286 2299 } 2287 2300 #endif /* !IEM_WITH_OPAQUE_DECODER_STATE */ 2288 2301 2289 2302 -
trunk/src/VBox/VMM/include/IEMInternal.h
r98921 r98969 569 569 /** @name Decoder state. 570 570 * @{ */ 571 #ifdef IEM_WITH_CODE_TLB 571 #ifndef IEM_WITH_OPAQUE_DECODER_STATE 572 # ifdef IEM_WITH_CODE_TLB 572 573 /** The offset of the next instruction byte. */ 573 574 uint32_t offInstrNextByte; /* 0x08 */ … … 589 590 */ 590 591 uint8_t const *pbInstrBuf; /* 0x10 */ 591 # if ARCH_BITS == 32592 # if ARCH_BITS == 32 592 593 uint32_t uInstrBufHigh; /** The high dword of the host context pbInstrBuf member. */ 593 # endif594 # endif 594 595 /** The program counter corresponding to pbInstrBuf. 595 596 * This is set to a non-canonical address when we need to invalidate it. */ … … 617 618 /** The offset of the ModR/M byte relative to the start of the instruction. */ 618 619 uint8_t offModRm; /* 0x2c */ 619 # else /* !IEM_WITH_CODE_TLB */620 # else /* !IEM_WITH_CODE_TLB */ 620 621 /** The size of what has currently been fetched into abOpcode. */ 621 622 uint8_t cbOpcode; /* 0x08 */ … … 638 639 uint8_t uRexIndex; /* 0x12 */ 639 640 640 # endif /* !IEM_WITH_CODE_TLB */641 # endif /* !IEM_WITH_CODE_TLB */ 641 642 642 643 /** The effective operand mode. */ … … 663 664 /** The FPU opcode (FOP). */ 664 665 uint16_t uFpuOpcode; /* 0x36, 0x1c */ 665 # ifndef IEM_WITH_CODE_TLB666 # ifndef IEM_WITH_CODE_TLB 666 667 /** Explicit alignment padding. */ 667 668 uint8_t abAlignment2b[2]; /* 0x1e */ 668 # endif669 # endif 669 670 670 671 /** The opcode bytes. */ 671 672 uint8_t abOpcode[15]; /* 0x48, 0x20 */ 672 673 /** Explicit alignment padding. */ 673 # ifdef IEM_WITH_CODE_TLB674 # ifdef IEM_WITH_CODE_TLB 674 675 uint8_t abAlignment2c[0x48 - 0x47]; /* 0x37 */ 675 # else676 # else 676 677 uint8_t abAlignment2c[0x48 - 0x2f]; /* 0x2f */ 677 #endif 678 # endif 679 #else /* IEM_WITH_OPAQUE_DECODER_STATE */ 680 uint8_t abOpaqueDecoder[0x48 - 0x8]; 681 #endif /* IEM_WITH_OPAQUE_DECODER_STATE */ 678 682 /** @} */ 679 683
Note:
See TracChangeset
for help on using the changeset viewer.