Changeset 101722 in vbox
- Timestamp:
- Nov 3, 2023 12:36:45 AM (18 months ago)
- svn:sync-xref-src-repo-rev:
- 159818
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAllInstPython.py
r101694 r101722 1973 1973 'IEM_CIMPL_F_XCPT': ('IEM_CIMPL_F_BRANCH_INDIRECT', 'IEM_CIMPL_F_BRANCH_FAR', 1974 1974 'IEM_CIMPL_F_MODE', 'IEM_CIMPL_F_RFLAGS', 'IEM_CIMPL_F_VMEXIT', ), 1975 'IEM_CIMPL_F_CALLS_CIMPL': (), 1976 'IEM_CIMPL_F_CALLS_AIMPL': (), 1977 'IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE': (), 1975 1978 }; 1976 1979 class McBlock(object): … … 1978 1981 Microcode block (IEM_MC_BEGIN ... IEM_MC_END, IEM_MC_DEFER_TO_CIMPL_x_RET). 1979 1982 """ 1983 1984 ## @name Macro expansion types. 1985 ## @{ 1986 kMacroExp_None = 0; 1987 kMacroExp_Entire = 1; ##< Entire block (iBeginLine == iEndLine), original line may contain multiple blocks. 1988 kMacroExp_Partial = 2; ##< Partial/mixed (cmpxchg16b), safe to assume single block. 1989 ## @} 1980 1990 1981 1991 def __init__(self, sSrcFile, iBeginLine, offBeginLine, oFunction, iInFunction, cchIndent = None): … … 2001 2011 ##< The raw lines the block is made up of. 2002 2012 self.asLines = [] # type: List[str] 2013 ## Indicates whether the block includes macro expansion parts (kMacroExp_None, 2014 ## kMacroExp_Entrie, kMacroExp_Partial). 2015 self.iMacroExp = self.kMacroExp_None; 2003 2016 ## IEM_MC_BEGIN: Argument count. 2004 2017 self.cArgs = -1; … … 4870 4883 4871 4884 # 4872 # HACK ALERT! For blocks or ginating from macro expansion the start and4885 # HACK ALERT! For blocks originating from macro expansion the start and 4873 4886 # end line will be the same, but the line has multiple 4874 4887 # newlines inside it. So, we have to do some extra tricks … … 4880 4893 if not asLines[0].strip().startswith('IEM_MC_BEGIN'): 4881 4894 self.raiseError('IEM_MC_BEGIN is not the first word on the line'); 4895 4896 # Hack alert! Detect mixed tail/head macros a la cmpxchg16b and split up the lines 4897 # so we can deal correctly with IEM_MC_END below and everything else. 4898 for sLine in asLines: 4899 cNewLines = sLine.count('\n'); 4900 assert cNewLines > 0; 4901 if cNewLines > 1: 4902 asLines = self.extractLinesFromMacroExpansionLine(''.join(asLines), 4903 self.oCurMcBlock.offBeginLine, 4904 offEndStatementInLine 4905 + sum(len(s) for s in asLines) 4906 - len(asLines[-1])); 4907 self.oCurMcBlock.iMacroExp = McBlock.kMacroExp_Partial; 4908 break; 4882 4909 else: 4910 self.oCurMcBlock.iMacroExp = McBlock.kMacroExp_Entire; 4883 4911 asLines = self.extractLinesFromMacroExpansionLine(self.asLines[self.iLine - 1], 4884 4912 self.oCurMcBlock.offBeginLine, offEndStatementInLine); … … 5207 5235 # nested macro expansion, just to avoid lots of extra work. 5208 5236 # 5237 # There is only limited support for macros expanding to partial MC blocks. 5238 # 5209 5239 # Note! IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX and other macros someone making 5210 5240 # use of IEMOP_RAISE_INVALID_LOCK_PREFIX_RET() will be ignored here and … … 5214 5244 # Also, this way we don't produce lots of unnecessary threaded functions. 5215 5245 # 5216 if sBody.find("IEM_MC_BEGIN") < 0 :5246 if sBody.find("IEM_MC_BEGIN") < 0 and sBody.find("IEM_MC_END") < 0: 5217 5247 #self.debug('workerPreProcessDefine: irrelevant (%s: %s)' % (sName, sBody)); 5218 5248 return True; -
trunk/src/VBox/VMM/VMMAll/IEMAllInstTwoByte0f.cpp.h
r101484 r101722 12428 12428 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fCmpXchg16b) 12429 12429 { 12430 IEM_MC_BEGIN(4, 3, IEM_MC_F_64BIT, 0); 12431 IEM_MC_ARG(PRTUINT128U, pu128MemDst, 0); 12432 IEM_MC_ARG(PRTUINT128U, pu128RaxRdx, 1); 12433 IEM_MC_ARG(PRTUINT128U, pu128RbxRcx, 2); 12434 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 3); 12435 IEM_MC_LOCAL(RTUINT128U, u128RaxRdx); 12436 IEM_MC_LOCAL(RTUINT128U, u128RbxRcx); 12437 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); 12438 12439 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 12440 IEMOP_HLP_DONE_DECODING(); 12441 IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(GCPtrEffDst, 16); 12442 IEM_MC_MEM_MAP(pu128MemDst, IEM_ACCESS_DATA_RW, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0 /*arg*/); 12443 12444 IEM_MC_FETCH_GREG_U64(u128RaxRdx.s.Lo, X86_GREG_xAX); 12445 IEM_MC_FETCH_GREG_U64(u128RaxRdx.s.Hi, X86_GREG_xDX); 12446 IEM_MC_REF_LOCAL(pu128RaxRdx, u128RaxRdx); 12447 12448 IEM_MC_FETCH_GREG_U64(u128RbxRcx.s.Lo, X86_GREG_xBX); 12449 IEM_MC_FETCH_GREG_U64(u128RbxRcx.s.Hi, X86_GREG_xCX); 12450 IEM_MC_REF_LOCAL(pu128RbxRcx, u128RbxRcx); 12451 12452 IEM_MC_FETCH_EFLAGS(EFlags); 12430 /* 12431 * This is hairy, very hairy macro fun. We're walking a fine line 12432 * here to make the code parsable by IEMAllInstPython.py and fit into 12433 * the patterns IEMAllThrdPython.py requires for the code morphing. 12434 */ 12435 #define BODY_CMPXCHG16B_HEAD \ 12436 IEM_MC_BEGIN(4, 3, IEM_MC_F_64BIT, 0); \ 12437 IEM_MC_ARG(PRTUINT128U, pu128MemDst, 0); \ 12438 IEM_MC_ARG(PRTUINT128U, pu128RaxRdx, 1); \ 12439 IEM_MC_ARG(PRTUINT128U, pu128RbxRcx, 2); \ 12440 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 3); \ 12441 IEM_MC_LOCAL(RTUINT128U, u128RaxRdx); \ 12442 IEM_MC_LOCAL(RTUINT128U, u128RbxRcx); \ 12443 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); \ 12444 \ 12445 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); \ 12446 IEMOP_HLP_DONE_DECODING(); \ 12447 IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(GCPtrEffDst, 16); \ 12448 IEM_MC_MEM_MAP(pu128MemDst, IEM_ACCESS_DATA_RW, pVCpu->iem.s.iEffSeg, GCPtrEffDst, 0 /*arg*/); \ 12449 \ 12450 IEM_MC_FETCH_GREG_U64(u128RaxRdx.s.Lo, X86_GREG_xAX); \ 12451 IEM_MC_FETCH_GREG_U64(u128RaxRdx.s.Hi, X86_GREG_xDX); \ 12452 IEM_MC_REF_LOCAL(pu128RaxRdx, u128RaxRdx); \ 12453 \ 12454 IEM_MC_FETCH_GREG_U64(u128RbxRcx.s.Lo, X86_GREG_xBX); \ 12455 IEM_MC_FETCH_GREG_U64(u128RbxRcx.s.Hi, X86_GREG_xCX); \ 12456 IEM_MC_REF_LOCAL(pu128RbxRcx, u128RbxRcx); \ 12457 \ 12458 IEM_MC_FETCH_EFLAGS(EFlags) 12459 12460 #define BODY_CMPXCHG16B_TAIL \ 12461 IEM_MC_MEM_COMMIT_AND_UNMAP(pu128MemDst, IEM_ACCESS_DATA_RW); \ 12462 IEM_MC_COMMIT_EFLAGS(EFlags); \ 12463 IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_ZF) { \ 12464 IEM_MC_STORE_GREG_U64(X86_GREG_xAX, u128RaxRdx.s.Lo); \ 12465 IEM_MC_STORE_GREG_U64(X86_GREG_xDX, u128RaxRdx.s.Hi); \ 12466 } IEM_MC_ENDIF(); \ 12467 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ 12468 IEM_MC_END() 12453 12469 12454 12470 #ifdef RT_ARCH_AMD64 /* some code duplication here because IEMAllInstPython.py cannot parse if/else/#if spaghetti. */ … … 12457 12473 if ( !(pVCpu->iem.s.fExec & IEM_F_X86_DISREGARD_LOCK) 12458 12474 && (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK)) 12475 { 12476 BODY_CMPXCHG16B_HEAD; 12459 12477 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg16b_locked, pu128MemDst, pu128RaxRdx, pu128RbxRcx, pEFlags); 12478 BODY_CMPXCHG16B_TAIL; 12479 } 12460 12480 else 12481 { 12482 BODY_CMPXCHG16B_HEAD; 12461 12483 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg16b, pu128MemDst, pu128RaxRdx, pu128RbxRcx, pEFlags); 12484 BODY_CMPXCHG16B_TAIL; 12485 } 12462 12486 } 12463 12487 else 12464 12488 { /* (see comments in #else case below) */ 12465 12489 if (pVCpu->CTX_SUFF(pVM)->cCpus == 1) 12490 { 12491 BODY_CMPXCHG16B_HEAD; 12466 12492 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg16b_fallback, pu128MemDst, pu128RaxRdx, pu128RbxRcx, pEFlags); 12493 BODY_CMPXCHG16B_TAIL; 12494 } 12467 12495 else 12496 { 12497 BODY_CMPXCHG16B_HEAD; 12468 12498 IEM_MC_CALL_CIMPL_4(IEM_CIMPL_F_STATUS_FLAGS, 12469 12499 iemCImpl_cmpxchg16b_fallback_rendezvous, pu128MemDst, pu128RaxRdx, pu128RbxRcx, pEFlags); 12500 IEM_MC_END(); 12501 } 12470 12502 } 12471 12503 … … 12473 12505 /** @todo may require fallback for unaligned accesses... */ 12474 12506 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK)) 12507 { 12508 BODY_CMPXCHG16B_HEAD; 12475 12509 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg16b, pu128MemDst, pu128RaxRdx, pu128RbxRcx, pEFlags); 12510 BODY_CMPXCHG16B_TAIL; 12511 } 12476 12512 else 12513 { 12514 BODY_CMPXCHG16B_HEAD; 12477 12515 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg16b_locked, pu128MemDst, pu128RaxRdx, pu128RbxRcx, pEFlags); 12516 BODY_CMPXCHG16B_TAIL; 12517 } 12478 12518 12479 12519 #else … … 12483 12523 but to use a rendezvous callback here. Sigh. */ 12484 12524 if (pVCpu->CTX_SUFF(pVM)->cCpus == 1) 12525 { 12526 BODY_CMPXCHG16B_HEAD; 12485 12527 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg16b_fallback, pu128MemDst, pu128RaxRdx, pu128RbxRcx, pEFlags); 12528 BODY_CMPXCHG16B_TAIL; 12529 } 12486 12530 else 12487 12531 { 12488 IEM_MC_CALL_CIMPL_4(IEM_CIMPL_F_STATUS_FLAGS, 12489 iemCImpl_cmpxchg16b_fallback_rendezvous, pu128MemDst, pu128RaxRdx, pu128RbxRcx, pEFlags); 12532 BODY_CMPXCHG16B_HEAD; 12533 IEM_MC_CALL_CIMPL_4(IEM_CIMPL_F_STATUS_FLAGS, iemCImpl_cmpxchg16b_fallback_rendezvous, 12534 pu128MemDst, pu128RaxRdx, pu128RbxRcx, pEFlags); 12535 IEM_MC_END(); 12490 12536 /* Does not get here, tail code is duplicated in iemCImpl_cmpxchg16b_fallback_rendezvous. */ 12491 12537 } 12492 12538 #endif 12493 12539 12494 IEM_MC_MEM_COMMIT_AND_UNMAP(pu128MemDst, IEM_ACCESS_DATA_RW); 12495 IEM_MC_COMMIT_EFLAGS(EFlags); 12496 IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_ZF) { 12497 IEM_MC_STORE_GREG_U64(X86_GREG_xAX, u128RaxRdx.s.Lo); 12498 IEM_MC_STORE_GREG_U64(X86_GREG_xDX, u128RaxRdx.s.Hi); 12499 } IEM_MC_ENDIF(); 12500 IEM_MC_ADVANCE_RIP_AND_FINISH(); 12501 12502 IEM_MC_END(); 12540 #undef BODY_CMPXCHG16B 12503 12541 } 12504 12542 Log(("cmpxchg16b -> #UD\n")); -
trunk/src/VBox/VMM/VMMAll/IEMAllN8vePython.py
r101706 r101722 38 38 39 39 # Standard python imports: 40 #import sys;40 import copy; 41 41 42 42 # Out python imports: … … 196 196 Returns the native recompiler function body for this threaded variant. 197 197 """ 198 aoStmts = self.oVariation.aoStmtsForThreadedFunction # type: list(McStmt) 198 # Take the threaded function statement list and add detected 199 # IEM_CIMPL_F_XXX flags to the IEM_MC_BEGIN statement. 200 aoStmts = list(self.oVariation.aoStmtsForThreadedFunction) # type: list(McStmt) 201 for iStmt, oStmt in enumerate(aoStmts): 202 if oStmt.sName == 'IEM_MC_BEGIN' and self.oVariation.oParent.dsCImplFlags: 203 oNewStmt = copy.deepcopy(oStmt); 204 oNewStmt.asParams[3] = ' | '.join(sorted(self.oVariation.oParent.dsCImplFlags.keys())); 205 aoStmts[iStmt] = oNewStmt; 206 199 207 return iai.McStmt.renderCodeForList(aoStmts, cchIndent); 200 208 … … 272 280 g_dUnsupportedMcStmtLastOneStats[sStmt] = [oVariation,]; 273 281 274 if ( len(dUnsupportedStmts) == 1282 if ( len(dUnsupportedStmts) in (1,2) 275 283 and iai.McStmt.findStmtByNames(aoStmts, 276 284 { 'IEM_MC_LOCAL': 1, 'IEM_MC_LOCAL_CONST': 1, 'IEM_MC_ARG': 1, 'IEM_MC_ARG_CONST': 1, -
trunk/src/VBox/VMM/VMMAll/IEMAllThrdPython.py
r101704 r101722 419 419 'IEM_CIMPL_F_END_TB': True, 420 420 'IEM_CIMPL_F_XCPT': True, 421 'IEM_CIMPL_F_CALLS_CIMPL': False, 422 'IEM_CIMPL_F_CALLS_AIMPL': False, 423 'IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE': False, 421 424 }; 422 425 … … 1331 1334 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_CONDITIONAL'] = True; 1332 1335 1336 # Check for CIMPL and AIMPL calls. 1337 if oStmt.sName.startswith('IEM_MC_CALL_'): 1338 if oStmt.sName.startswith('IEM_MC_CALL_CIMPL_'): 1339 self.dsCImplFlags['IEM_CIMPL_F_CALLS_CIMPL'] = True; 1340 elif ( oStmt.sName.startswith('IEM_MC_CALL_VOID_AIMPL_') 1341 or oStmt.sName.startswith('IEM_MC_CALL_AIMPL_') 1342 or oStmt.sName.startswith('IEM_MC_CALL_AVX_AIMPL_')): 1343 self.dsCImplFlags['IEM_CIMPL_F_CALLS_AIMPL'] = True; 1344 elif ( oStmt.sName.startswith('IEM_MC_CALL_SSE_AIMPL_') 1345 or oStmt.sName.startswith('IEM_MC_CALL_MMX_AIMPL_') 1346 or oStmt.sName.startswith('IEM_MC_CALL_FPU_AIMPL_')): 1347 self.dsCImplFlags['IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE'] = True; 1348 else: 1349 raise Exception('Unknown IEM_MC_CALL_* statement: %s' % (oStmt.sName,)); 1350 1333 1351 # Process branches of conditionals recursively. 1334 1352 if isinstance(oStmt, iai.McStmtCond): … … 1361 1379 self.dsCImplFlags = self.oMcBlock.dsCImplFlags.copy(); 1362 1380 self.analyzeCodeOperation(aoStmts); 1381 if ( ('IEM_CIMPL_F_CALLS_CIMPL' in self.dsCImplFlags) 1382 + ('IEM_CIMPL_F_CALLS_AIMPL' in self.dsCImplFlags) 1383 + ('IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE' in self.dsCImplFlags) > 1): 1384 self.raiseProblem('Mixing CIMPL/AIMPL/AIMPL_WITH_FXSTATE calls'); 1363 1385 1364 1386 # Create variations as needed. … … 1824 1846 if ian.g_dUnsupportedMcStmtLastOneVarStats: 1825 1847 asTopKeys = sorted(ian.g_dUnsupportedMcStmtLastOneVarStats, reverse = True, 1826 key = lambda sSortKey: len(ian.g_dUnsupportedMcStmtLastOneVarStats[sSortKey]))[:1 0];1848 key = lambda sSortKey: len(ian.g_dUnsupportedMcStmtLastOneVarStats[sSortKey]))[:16]; 1827 1849 print('todo:', file = sys.stderr); 1828 print('todo: Top %s variations with variables and oneunsupported statement dependency:' % (len(asTopKeys),),1850 print('todo: Top %s variations with variables and 1-2 unsupported statement dependency:' % (len(asTopKeys),), 1829 1851 file = sys.stderr); 1830 1852 cchMaxKey = max([len(sKey) for sKey in asTopKeys]); … … 1861 1883 cbVars = 0; 1862 1884 for oVar in oThreadedFunction.oMcBlock.aoLocals: 1863 1885 cbVars += (getTypeBitCount(oVar.sType) + 63) // 64 * 8; 1864 1886 cbMaxVars = max(cbMaxVars, cbVars); 1865 1887 cbMaxArgs = max(cbMaxArgs, cbArgs); … … 2366 2388 # 2367 2389 elif oThreadedFunction.oMcBlock.iBeginLine != oThreadedFunction.oMcBlock.iEndLine: 2368 assert sLine.count('IEM_MC_') - sLine.count('IEM_MC_F_') == 1, 'sLine="%s"' % (sLine,); 2390 assert ( (sLine.count('IEM_MC_') - sLine.count('IEM_MC_F_') == 1) 2391 or oThreadedFunction.oMcBlock.iMacroExp == iai.McBlock.kMacroExp_Partial), 'sLine="%s"' % (sLine,); 2369 2392 oOut.write(sLine[:oThreadedFunction.oMcBlock.offBeginLine]); 2370 2393 sModified = oThreadedFunction.generateInputCode().strip(); … … 2373 2396 iLine = oThreadedFunction.oMcBlock.iEndLine; 2374 2397 sLine = oParser.asLines[iLine - 1]; 2375 assert sLine.count('IEM_MC_') - sLine.count('IEM_MC_F_') == 1 or len(oThreadedFunction.oMcBlock.aoStmts) == 1; 2398 assert ( sLine.count('IEM_MC_') - sLine.count('IEM_MC_F_') == 1 2399 or len(oThreadedFunction.oMcBlock.aoStmts) == 1 2400 or oThreadedFunction.oMcBlock.iMacroExp == iai.McBlock.kMacroExp_Partial); 2376 2401 oOut.write(sLine[oThreadedFunction.oMcBlock.offAfterEnd : ]); 2377 2402 -
trunk/src/VBox/VMM/include/IEMInternal.h
r101682 r101722 622 622 #define IEM_CIMPL_F_XCPT \ 623 623 (IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_FAR | IEM_CIMPL_F_MODE | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_VMEXIT) 624 625 /** The block calls a C-implementation instruction function with two implicit arguments. 626 * Mutually exclusive with IEM_CIMPL_F_CALLS_AIMPL and 627 * IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE. 628 * @note The python scripts will add this is missing. */ 629 #define IEM_CIMPL_F_CALLS_CIMPL RT_BIT_32(16) 630 /** The block calls an ASM-implementation instruction function. 631 * Mutually exclusive with IEM_CIMPL_F_CALLS_CIMPL and 632 * IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE. 633 * @note The python scripts will add this is missing. */ 634 #define IEM_CIMPL_F_CALLS_AIMPL RT_BIT_32(17) 635 /** The block calls an ASM-implementation instruction function with an implicit 636 * X86FXSTATE pointer argument. 637 * Mutually exclusive with IEM_CIMPL_F_CALLS_CIMPL and IEM_CIMPL_F_CALLS_AIMPL. 638 * @note The python scripts will add this is missing. */ 639 #define IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE RT_BIT_32(18) 624 640 /** @} */ 625 641
Note:
See TracChangeset
for help on using the changeset viewer.