VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllThrdPython.py@ 102020

Last change on this file since 102020 was 102012, checked in by vboxsync, 13 months ago

VMM/IEM: If we use structure variables in MC blocks, we need special fetch and store MCs for them or it won't be possible to recompile the code (as variables references are translated to uint8_t indexes by name, no subfield access possible). So, added some variable checking to tstIEMCheckMc and addressed the issues found. (There is more to do here, but tomorrow.) bugref:10371

  • Property svn:eol-style set to LF
  • Property svn:executable set to *
  • Property svn:keywords set to Author Date Id Revision
File size: 131.8 KB
Line 
1#!/usr/bin/env python
2# -*- coding: utf-8 -*-
3# $Id: IEMAllThrdPython.py 102012 2023-11-09 02:09:51Z vboxsync $
4# pylint: disable=invalid-name
5
6"""
7Annotates and generates threaded functions from IEMAllInst*.cpp.h.
8"""
9
10from __future__ import print_function;
11
12__copyright__ = \
13"""
14Copyright (C) 2023 Oracle and/or its affiliates.
15
16This file is part of VirtualBox base platform packages, as
17available from https://www.virtualbox.org.
18
19This program is free software; you can redistribute it and/or
20modify it under the terms of the GNU General Public License
21as published by the Free Software Foundation, in version 3 of the
22License.
23
24This program is distributed in the hope that it will be useful, but
25WITHOUT ANY WARRANTY; without even the implied warranty of
26MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
27General Public License for more details.
28
29You should have received a copy of the GNU General Public License
30along with this program; if not, see <https://www.gnu.org/licenses>.
31
32SPDX-License-Identifier: GPL-3.0-only
33"""
34__version__ = "$Revision: 102012 $"
35
36# Standard python imports.
37import copy;
38import datetime;
39import os;
40import re;
41import sys;
42import argparse;
43from typing import Dict, List;
44
45import IEMAllInstPython as iai;
46import IEMAllN8vePython as ian;
47
48
49# Python 3 hacks:
50if sys.version_info[0] >= 3:
51 long = int; # pylint: disable=redefined-builtin,invalid-name
52
53## Number of generic parameters for the thread functions.
54g_kcThreadedParams = 3;
55
56g_kdTypeInfo = {
57 # type name: (cBits, fSigned, C-type )
58 'int8_t': ( 8, True, 'int8_t', ),
59 'int16_t': ( 16, True, 'int16_t', ),
60 'int32_t': ( 32, True, 'int32_t', ),
61 'int64_t': ( 64, True, 'int64_t', ),
62 'uint4_t': ( 4, False, 'uint8_t', ),
63 'uint8_t': ( 8, False, 'uint8_t', ),
64 'uint16_t': ( 16, False, 'uint16_t', ),
65 'uint32_t': ( 32, False, 'uint32_t', ),
66 'uint64_t': ( 64, False, 'uint64_t', ),
67 'uintptr_t': ( 64, False, 'uintptr_t',), # ASSUMES 64-bit host pointer size.
68 'bool': ( 1, False, 'bool', ),
69 'IEMMODE': ( 2, False, 'IEMMODE', ),
70};
71
72# Only for getTypeBitCount/variables.
73g_kdTypeInfo2 = {
74 'RTFLOAT32U': ( 32, False, 'RTFLOAT32U', ),
75 'RTFLOAT64U': ( 64, False, 'RTFLOAT64U', ),
76 'RTUINT64U': ( 64, False, 'RTUINT64U', ),
77 'RTGCPTR': ( 64, False, 'RTGCPTR', ),
78 'RTPBCD80U': ( 80, False, 'RTPBCD80U', ),
79 'RTFLOAT80U': ( 80, False, 'RTFLOAT80U', ),
80 'IEMFPURESULT': (80+16, False, 'IEMFPURESULT', ),
81 'IEMFPURESULTTWO': (80+16+80,False, 'IEMFPURESULTTWO', ),
82 'RTUINT128U': ( 128, False, 'RTUINT128U', ),
83 'X86XMMREG': ( 128, False, 'X86XMMREG', ),
84 'IEMSSERESULT': ( 128+32, False, 'IEMSSERESULT', ),
85 'IEMMEDIAF2XMMSRC': ( 256, False, 'IEMMEDIAF2XMMSRC',),
86 'RTUINT256U': ( 256, False, 'RTUINT256U', ),
87 'IEMPCMPISTRXSRC': ( 256, False, 'IEMPCMPISTRXSRC', ),
88 'IEMPCMPESTRXSRC': ( 384, False, 'IEMPCMPESTRXSRC', ),
89}; #| g_kdTypeInfo; - requires 3.9
90g_kdTypeInfo2.update(g_kdTypeInfo);
91
92def getTypeBitCount(sType):
93 """
94 Translate a type to size in bits
95 """
96 if sType in g_kdTypeInfo2:
97 return g_kdTypeInfo2[sType][0];
98 if '*' in sType or sType[0] == 'P':
99 return 64;
100 #raise Exception('Unknown type: %s' % (sType,));
101 print('error: Unknown type: %s' % (sType,));
102 return 64;
103
104g_kdIemFieldToType = {
105 # Illegal ones:
106 'offInstrNextByte': ( None, ),
107 'cbInstrBuf': ( None, ),
108 'pbInstrBuf': ( None, ),
109 'uInstrBufPc': ( None, ),
110 'cbInstrBufTotal': ( None, ),
111 'offCurInstrStart': ( None, ),
112 'cbOpcode': ( None, ),
113 'offOpcode': ( None, ),
114 'offModRm': ( None, ),
115 # Okay ones.
116 'fPrefixes': ( 'uint32_t', ),
117 'uRexReg': ( 'uint8_t', ),
118 'uRexB': ( 'uint8_t', ),
119 'uRexIndex': ( 'uint8_t', ),
120 'iEffSeg': ( 'uint8_t', ),
121 'enmEffOpSize': ( 'IEMMODE', ),
122 'enmDefAddrMode': ( 'IEMMODE', ),
123 'enmEffAddrMode': ( 'IEMMODE', ),
124 'enmDefOpSize': ( 'IEMMODE', ),
125 'idxPrefix': ( 'uint8_t', ),
126 'uVex3rdReg': ( 'uint8_t', ),
127 'uVexLength': ( 'uint8_t', ),
128 'fEvexStuff': ( 'uint8_t', ),
129 'uFpuOpcode': ( 'uint16_t', ),
130};
131
132class ThreadedParamRef(object):
133 """
134 A parameter reference for a threaded function.
135 """
136
137 def __init__(self, sOrgRef, sType, oStmt, iParam = None, offParam = 0, sStdRef = None):
138 ## The name / reference in the original code.
139 self.sOrgRef = sOrgRef;
140 ## Normalized name to deal with spaces in macro invocations and such.
141 self.sStdRef = sStdRef if sStdRef else ''.join(sOrgRef.split());
142 ## Indicates that sOrgRef may not match the parameter.
143 self.fCustomRef = sStdRef is not None;
144 ## The type (typically derived).
145 self.sType = sType;
146 ## The statement making the reference.
147 self.oStmt = oStmt;
148 ## The parameter containing the references. None if implicit.
149 self.iParam = iParam;
150 ## The offset in the parameter of the reference.
151 self.offParam = offParam;
152
153 ## The variable name in the threaded function.
154 self.sNewName = 'x';
155 ## The this is packed into.
156 self.iNewParam = 99;
157 ## The bit offset in iNewParam.
158 self.offNewParam = 1024
159
160
161class ThreadedFunctionVariation(object):
162 """ Threaded function variation. """
163
164 ## @name Variations.
165 ## These variations will match translation block selection/distinctions as well.
166 ## @{
167 ksVariation_Default = ''; ##< No variations - only used by IEM_MC_DEFER_TO_CIMPL_X_RET.
168 ksVariation_16 = '_16'; ##< 16-bit mode code (386+).
169 ksVariation_16f = '_16f'; ##< 16-bit mode code (386+), check+clear eflags.
170 ksVariation_16_Addr32 = '_16_Addr32'; ##< 16-bit mode code (386+), address size prefixed to 32-bit addressing.
171 ksVariation_16f_Addr32 = '_16f_Addr32'; ##< 16-bit mode code (386+), address size prefixed to 32-bit addressing, eflags.
172 ksVariation_16_Pre386 = '_16_Pre386'; ##< 16-bit mode code, pre-386 CPU target.
173 ksVariation_16f_Pre386 = '_16f_Pre386'; ##< 16-bit mode code, pre-386 CPU target, check+clear eflags.
174 ksVariation_32 = '_32'; ##< 32-bit mode code (386+).
175 ksVariation_32f = '_32f'; ##< 32-bit mode code (386+), check+clear eflags.
176 ksVariation_32_Flat = '_32_Flat'; ##< 32-bit mode code (386+) with CS, DS, E,S and SS flat and 4GB wide.
177 ksVariation_32f_Flat = '_32f_Flat'; ##< 32-bit mode code (386+) with CS, DS, E,S and SS flat and 4GB wide, eflags.
178 ksVariation_32_Addr16 = '_32_Addr16'; ##< 32-bit mode code (386+), address size prefixed to 16-bit addressing.
179 ksVariation_32f_Addr16 = '_32f_Addr16'; ##< 32-bit mode code (386+), address size prefixed to 16-bit addressing, eflags.
180 ksVariation_64 = '_64'; ##< 64-bit mode code.
181 ksVariation_64f = '_64f'; ##< 64-bit mode code, check+clear eflags.
182 ksVariation_64_FsGs = '_64_FsGs'; ##< 64-bit mode code, with memory accesses via FS or GS.
183 ksVariation_64f_FsGs = '_64f_FsGs'; ##< 64-bit mode code, with memory accesses via FS or GS, check+clear eflags.
184 ksVariation_64_Addr32 = '_64_Addr32'; ##< 64-bit mode code, address size prefixed to 32-bit addressing.
185 ksVariation_64f_Addr32 = '_64f_Addr32'; ##< 64-bit mode code, address size prefixed to 32-bit addressing, c+c eflags.
186 kasVariations = (
187 ksVariation_Default,
188 ksVariation_16,
189 ksVariation_16f,
190 ksVariation_16_Addr32,
191 ksVariation_16f_Addr32,
192 ksVariation_16_Pre386,
193 ksVariation_16f_Pre386,
194 ksVariation_32,
195 ksVariation_32f,
196 ksVariation_32_Flat,
197 ksVariation_32f_Flat,
198 ksVariation_32_Addr16,
199 ksVariation_32f_Addr16,
200 ksVariation_64,
201 ksVariation_64f,
202 ksVariation_64_FsGs,
203 ksVariation_64f_FsGs,
204 ksVariation_64_Addr32,
205 ksVariation_64f_Addr32,
206 );
207 kasVariationsWithoutAddress = (
208 ksVariation_16,
209 ksVariation_16f,
210 ksVariation_16_Pre386,
211 ksVariation_16f_Pre386,
212 ksVariation_32,
213 ksVariation_32f,
214 ksVariation_64,
215 ksVariation_64f,
216 );
217 kasVariationsWithoutAddressNot286 = (
218 ksVariation_16,
219 ksVariation_16f,
220 ksVariation_32,
221 ksVariation_32f,
222 ksVariation_64,
223 ksVariation_64f,
224 );
225 kasVariationsWithoutAddressNot286Not64 = (
226 ksVariation_16,
227 ksVariation_16f,
228 ksVariation_32,
229 ksVariation_32f,
230 );
231 kasVariationsWithoutAddressNot64 = (
232 ksVariation_16,
233 ksVariation_16f,
234 ksVariation_16_Pre386,
235 ksVariation_16f_Pre386,
236 ksVariation_32,
237 ksVariation_32f,
238 );
239 kasVariationsWithoutAddressOnly64 = (
240 ksVariation_64,
241 ksVariation_64f,
242 );
243 kasVariationsWithAddress = (
244 ksVariation_16,
245 ksVariation_16f,
246 ksVariation_16_Addr32,
247 ksVariation_16f_Addr32,
248 ksVariation_16_Pre386,
249 ksVariation_16f_Pre386,
250 ksVariation_32,
251 ksVariation_32f,
252 ksVariation_32_Flat,
253 ksVariation_32f_Flat,
254 ksVariation_32_Addr16,
255 ksVariation_32f_Addr16,
256 ksVariation_64,
257 ksVariation_64f,
258 ksVariation_64_FsGs,
259 ksVariation_64f_FsGs,
260 ksVariation_64_Addr32,
261 ksVariation_64f_Addr32,
262 );
263 kasVariationsWithAddressNot286 = (
264 ksVariation_16,
265 ksVariation_16f,
266 ksVariation_16_Addr32,
267 ksVariation_16f_Addr32,
268 ksVariation_32,
269 ksVariation_32f,
270 ksVariation_32_Flat,
271 ksVariation_32f_Flat,
272 ksVariation_32_Addr16,
273 ksVariation_32f_Addr16,
274 ksVariation_64,
275 ksVariation_64f,
276 ksVariation_64_FsGs,
277 ksVariation_64f_FsGs,
278 ksVariation_64_Addr32,
279 ksVariation_64f_Addr32,
280 );
281 kasVariationsWithAddressNot286Not64 = (
282 ksVariation_16,
283 ksVariation_16f,
284 ksVariation_16_Addr32,
285 ksVariation_16f_Addr32,
286 ksVariation_32,
287 ksVariation_32f,
288 ksVariation_32_Flat,
289 ksVariation_32f_Flat,
290 ksVariation_32_Addr16,
291 ksVariation_32f_Addr16,
292 );
293 kasVariationsWithAddressNot64 = (
294 ksVariation_16,
295 ksVariation_16f,
296 ksVariation_16_Addr32,
297 ksVariation_16f_Addr32,
298 ksVariation_16_Pre386,
299 ksVariation_16f_Pre386,
300 ksVariation_32,
301 ksVariation_32f,
302 ksVariation_32_Flat,
303 ksVariation_32f_Flat,
304 ksVariation_32_Addr16,
305 ksVariation_32f_Addr16,
306 );
307 kasVariationsWithAddressOnly64 = (
308 ksVariation_64,
309 ksVariation_64f,
310 ksVariation_64_FsGs,
311 ksVariation_64f_FsGs,
312 ksVariation_64_Addr32,
313 ksVariation_64f_Addr32,
314 );
315 kasVariationsOnlyPre386 = (
316 ksVariation_16_Pre386,
317 ksVariation_16f_Pre386,
318 );
319 kasVariationsEmitOrder = (
320 ksVariation_Default,
321 ksVariation_64,
322 ksVariation_64f,
323 ksVariation_64_FsGs,
324 ksVariation_64f_FsGs,
325 ksVariation_32_Flat,
326 ksVariation_32f_Flat,
327 ksVariation_32,
328 ksVariation_32f,
329 ksVariation_16,
330 ksVariation_16f,
331 ksVariation_16_Addr32,
332 ksVariation_16f_Addr32,
333 ksVariation_16_Pre386,
334 ksVariation_16f_Pre386,
335 ksVariation_32_Addr16,
336 ksVariation_32f_Addr16,
337 ksVariation_64_Addr32,
338 ksVariation_64f_Addr32,
339 );
340 kdVariationNames = {
341 ksVariation_Default: 'defer-to-cimpl',
342 ksVariation_16: '16-bit',
343 ksVariation_16f: '16-bit w/ eflag checking and clearing',
344 ksVariation_16_Addr32: '16-bit w/ address prefix (Addr32)',
345 ksVariation_16f_Addr32: '16-bit w/ address prefix (Addr32) and eflag checking and clearing',
346 ksVariation_16_Pre386: '16-bit on pre-386 CPU',
347 ksVariation_16f_Pre386: '16-bit on pre-386 CPU w/ eflag checking and clearing',
348 ksVariation_32: '32-bit',
349 ksVariation_32f: '32-bit w/ eflag checking and clearing',
350 ksVariation_32_Flat: '32-bit flat and wide open CS, SS, DS and ES',
351 ksVariation_32f_Flat: '32-bit flat and wide open CS, SS, DS and ES w/ eflag checking and clearing',
352 ksVariation_32_Addr16: '32-bit w/ address prefix (Addr16)',
353 ksVariation_32f_Addr16: '32-bit w/ address prefix (Addr16) and eflag checking and clearing',
354 ksVariation_64: '64-bit',
355 ksVariation_64f: '64-bit w/ eflag checking and clearing',
356 ksVariation_64_FsGs: '64-bit with memory accessed via FS or GS',
357 ksVariation_64f_FsGs: '64-bit with memory accessed via FS or GS and eflag checking and clearing',
358 ksVariation_64_Addr32: '64-bit w/ address prefix (Addr32)',
359 ksVariation_64f_Addr32: '64-bit w/ address prefix (Addr32) and eflag checking and clearing',
360 };
361 kdVariationsWithEflagsCheckingAndClearing = {
362 ksVariation_16f: True,
363 ksVariation_16f_Addr32: True,
364 ksVariation_16f_Pre386: True,
365 ksVariation_32f: True,
366 ksVariation_32f_Flat: True,
367 ksVariation_32f_Addr16: True,
368 ksVariation_64f: True,
369 ksVariation_64f_FsGs: True,
370 ksVariation_64f_Addr32: True,
371 };
372 kdVariationsWithFlatAddress = {
373 ksVariation_32_Flat: True,
374 ksVariation_32f_Flat: True,
375 ksVariation_64: True,
376 ksVariation_64f: True,
377 };
378 kdVariationsWithFlatAddr16 = {
379 ksVariation_16: True,
380 ksVariation_16f: True,
381 ksVariation_16_Pre386: True,
382 ksVariation_16f_Pre386: True,
383 ksVariation_32_Addr16: True,
384 ksVariation_32f_Addr16: True,
385 };
386 kdVariationsWithFlatAddr32No64 = {
387 ksVariation_16_Addr32: True,
388 ksVariation_16f_Addr32: True,
389 ksVariation_32: True,
390 ksVariation_32f: True,
391 ksVariation_32_Flat: True,
392 ksVariation_32f_Flat: True,
393 };
394 ## @}
395
396 ## IEM_CIMPL_F_XXX flags that we know.
397 ## The value indicates whether it terminates the TB or not. The goal is to
398 ## improve the recompiler so all but END_TB will be False.
399 ##
400 ## @note iemThreadedRecompilerMcDeferToCImpl0 duplicates info found here.
401 kdCImplFlags = {
402 'IEM_CIMPL_F_MODE': False,
403 'IEM_CIMPL_F_BRANCH_DIRECT': False,
404 'IEM_CIMPL_F_BRANCH_INDIRECT': False,
405 'IEM_CIMPL_F_BRANCH_RELATIVE': False,
406 'IEM_CIMPL_F_BRANCH_FAR': True,
407 'IEM_CIMPL_F_BRANCH_CONDITIONAL': False,
408 # IEM_CIMPL_F_BRANCH_ANY should only be used for testing, so not included here.
409 'IEM_CIMPL_F_BRANCH_STACK': False,
410 'IEM_CIMPL_F_BRANCH_STACK_FAR': False,
411 'IEM_CIMPL_F_RFLAGS': False,
412 'IEM_CIMPL_F_INHIBIT_SHADOW': False,
413 'IEM_CIMPL_F_CHECK_IRQ_AFTER': False,
414 'IEM_CIMPL_F_CHECK_IRQ_BEFORE': False,
415 'IEM_CIMPL_F_CHECK_IRQ_BEFORE_AND_AFTER': False, # (ignore)
416 'IEM_CIMPL_F_STATUS_FLAGS': False,
417 'IEM_CIMPL_F_VMEXIT': False,
418 'IEM_CIMPL_F_FPU': False,
419 'IEM_CIMPL_F_REP': False,
420 'IEM_CIMPL_F_IO': False,
421 'IEM_CIMPL_F_END_TB': True,
422 'IEM_CIMPL_F_XCPT': True,
423 'IEM_CIMPL_F_CALLS_CIMPL': False,
424 'IEM_CIMPL_F_CALLS_AIMPL': False,
425 'IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE': False,
426 };
427
428 def __init__(self, oThreadedFunction, sVariation = ksVariation_Default):
429 self.oParent = oThreadedFunction # type: ThreadedFunction
430 ##< ksVariation_Xxxx.
431 self.sVariation = sVariation
432
433 ## Threaded function parameter references.
434 self.aoParamRefs = [] # type: List[ThreadedParamRef]
435 ## Unique parameter references.
436 self.dParamRefs = {} # type: Dict[str, List[ThreadedParamRef]]
437 ## Minimum number of parameters to the threaded function.
438 self.cMinParams = 0;
439
440 ## List/tree of statements for the threaded function.
441 self.aoStmtsForThreadedFunction = [] # type: List[McStmt]
442
443 ## Function enum number, for verification. Set by generateThreadedFunctionsHeader.
444 self.iEnumValue = -1;
445
446 ## Native recompilation details for this variation.
447 self.oNativeRecomp = None;
448
449 def getIndexName(self):
450 sName = self.oParent.oMcBlock.sFunction;
451 if sName.startswith('iemOp_'):
452 sName = sName[len('iemOp_'):];
453 if self.oParent.oMcBlock.iInFunction == 0:
454 return 'kIemThreadedFunc_%s%s' % ( sName, self.sVariation, );
455 return 'kIemThreadedFunc_%s_%s%s' % ( sName, self.oParent.oMcBlock.iInFunction, self.sVariation, );
456
457 def getThreadedFunctionName(self):
458 sName = self.oParent.oMcBlock.sFunction;
459 if sName.startswith('iemOp_'):
460 sName = sName[len('iemOp_'):];
461 if self.oParent.oMcBlock.iInFunction == 0:
462 return 'iemThreadedFunc_%s%s' % ( sName, self.sVariation, );
463 return 'iemThreadedFunc_%s_%s%s' % ( sName, self.oParent.oMcBlock.iInFunction, self.sVariation, );
464
465 def getNativeFunctionName(self):
466 return 'iemNativeRecompFunc_' + self.getThreadedFunctionName()[len('iemThreadedFunc_'):];
467
468 def getShortName(self):
469 sName = self.oParent.oMcBlock.sFunction;
470 if sName.startswith('iemOp_'):
471 sName = sName[len('iemOp_'):];
472 if self.oParent.oMcBlock.iInFunction == 0:
473 return '%s%s' % ( sName, self.sVariation, );
474 return '%s_%s%s' % ( sName, self.oParent.oMcBlock.iInFunction, self.sVariation, );
475
476 def isWithFlagsCheckingAndClearingVariation(self):
477 """
478 Checks if this is a variation that checks and clears EFLAGS.
479 """
480 return self.sVariation in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing;
481
482 #
483 # Analysis and code morphing.
484 #
485
486 def raiseProblem(self, sMessage):
487 """ Raises a problem. """
488 self.oParent.raiseProblem(sMessage);
489
490 def warning(self, sMessage):
491 """ Emits a warning. """
492 self.oParent.warning(sMessage);
493
494 def analyzeReferenceToType(self, sRef):
495 """
496 Translates a variable or structure reference to a type.
497 Returns type name.
498 Raises exception if unable to figure it out.
499 """
500 ch0 = sRef[0];
501 if ch0 == 'u':
502 if sRef.startswith('u32'):
503 return 'uint32_t';
504 if sRef.startswith('u8') or sRef == 'uReg':
505 return 'uint8_t';
506 if sRef.startswith('u64'):
507 return 'uint64_t';
508 if sRef.startswith('u16'):
509 return 'uint16_t';
510 elif ch0 == 'b':
511 return 'uint8_t';
512 elif ch0 == 'f':
513 return 'bool';
514 elif ch0 == 'i':
515 if sRef.startswith('i8'):
516 return 'int8_t';
517 if sRef.startswith('i16'):
518 return 'int16_t';
519 if sRef.startswith('i32'):
520 return 'int32_t';
521 if sRef.startswith('i64'):
522 return 'int64_t';
523 if sRef in ('iReg', 'iFixedReg', 'iGReg', 'iSegReg', 'iSrcReg', 'iDstReg', 'iCrReg'):
524 return 'uint8_t';
525 elif ch0 == 'p':
526 if sRef.find('-') < 0:
527 return 'uintptr_t';
528 if sRef.startswith('pVCpu->iem.s.'):
529 sField = sRef[len('pVCpu->iem.s.') : ];
530 if sField in g_kdIemFieldToType:
531 if g_kdIemFieldToType[sField][0]:
532 return g_kdIemFieldToType[sField][0];
533 elif ch0 == 'G' and sRef.startswith('GCPtr'):
534 return 'uint64_t';
535 elif ch0 == 'e':
536 if sRef == 'enmEffOpSize':
537 return 'IEMMODE';
538 elif ch0 == 'o':
539 if sRef.startswith('off32'):
540 return 'uint32_t';
541 elif sRef == 'cbFrame': # enter
542 return 'uint16_t';
543 elif sRef == 'cShift': ## @todo risky
544 return 'uint8_t';
545
546 self.raiseProblem('Unknown reference: %s' % (sRef,));
547 return None; # Shut up pylint 2.16.2.
548
549 def analyzeCallToType(self, sFnRef):
550 """
551 Determins the type of an indirect function call.
552 """
553 assert sFnRef[0] == 'p';
554
555 #
556 # Simple?
557 #
558 if sFnRef.find('-') < 0:
559 oDecoderFunction = self.oParent.oMcBlock.oFunction;
560
561 # Try the argument list of the function defintion macro invocation first.
562 iArg = 2;
563 while iArg < len(oDecoderFunction.asDefArgs):
564 if sFnRef == oDecoderFunction.asDefArgs[iArg]:
565 return oDecoderFunction.asDefArgs[iArg - 1];
566 iArg += 1;
567
568 # Then check out line that includes the word and looks like a variable declaration.
569 oRe = re.compile(' +(P[A-Z0-9_]+|const +IEMOP[A-Z0-9_]+ *[*]) +(const |) *' + sFnRef + ' *(;|=)');
570 for sLine in oDecoderFunction.asLines:
571 oMatch = oRe.match(sLine);
572 if oMatch:
573 if not oMatch.group(1).startswith('const'):
574 return oMatch.group(1);
575 return 'PC' + oMatch.group(1)[len('const ') : -1].strip();
576
577 #
578 # Deal with the pImpl->pfnXxx:
579 #
580 elif sFnRef.startswith('pImpl->pfn'):
581 sMember = sFnRef[len('pImpl->') : ];
582 sBaseType = self.analyzeCallToType('pImpl');
583 offBits = sMember.rfind('U') + 1;
584 if sBaseType == 'PCIEMOPBINSIZES': return 'PFNIEMAIMPLBINU' + sMember[offBits:];
585 if sBaseType == 'PCIEMOPUNARYSIZES': return 'PFNIEMAIMPLUNARYU' + sMember[offBits:];
586 if sBaseType == 'PCIEMOPSHIFTSIZES': return 'PFNIEMAIMPLSHIFTU' + sMember[offBits:];
587 if sBaseType == 'PCIEMOPSHIFTDBLSIZES': return 'PFNIEMAIMPLSHIFTDBLU' + sMember[offBits:];
588 if sBaseType == 'PCIEMOPMULDIVSIZES': return 'PFNIEMAIMPLMULDIVU' + sMember[offBits:];
589 if sBaseType == 'PCIEMOPMEDIAF3': return 'PFNIEMAIMPLMEDIAF3U' + sMember[offBits:];
590 if sBaseType == 'PCIEMOPMEDIAOPTF3': return 'PFNIEMAIMPLMEDIAOPTF3U' + sMember[offBits:];
591 if sBaseType == 'PCIEMOPMEDIAOPTF2': return 'PFNIEMAIMPLMEDIAOPTF2U' + sMember[offBits:];
592 if sBaseType == 'PCIEMOPMEDIAOPTF3IMM8': return 'PFNIEMAIMPLMEDIAOPTF3U' + sMember[offBits:] + 'IMM8';
593 if sBaseType == 'PCIEMOPBLENDOP': return 'PFNIEMAIMPLAVXBLENDU' + sMember[offBits:];
594
595 self.raiseProblem('Unknown call reference: %s::%s (%s)' % (sBaseType, sMember, sFnRef,));
596
597 self.raiseProblem('Unknown call reference: %s' % (sFnRef,));
598 return None; # Shut up pylint 2.16.2.
599
600 def analyze8BitGRegStmt(self, oStmt):
601 """
602 Gets the 8-bit general purpose register access details of the given statement.
603 ASSUMES the statement is one accessing an 8-bit GREG.
604 """
605 idxReg = 0;
606 if ( oStmt.sName.find('_FETCH_') > 0
607 or oStmt.sName.find('_REF_') > 0
608 or oStmt.sName.find('_TO_LOCAL') > 0):
609 idxReg = 1;
610
611 sRegRef = oStmt.asParams[idxReg];
612 if sRegRef.startswith('IEM_GET_MODRM_RM') or sRegRef.startswith('IEM_GET_MODRM_REG'):
613 asBits = [sBit.strip() for sBit in sRegRef.replace('(', ',').replace(')', '').split(',')];
614 if len(asBits) != 3 or asBits[1] != 'pVCpu' or (asBits[0] != 'IEM_GET_MODRM_RM' and asBits[0] != 'IEM_GET_MODRM_REG'):
615 self.raiseProblem('Unexpected reference: %s (asBits=%s)' % (sRegRef, asBits));
616 sOrgExpr = asBits[0] + '_EX8(pVCpu, ' + asBits[2] + ')';
617 else:
618 sOrgExpr = '((%s) < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX) ? (%s) : (%s) + 12)' % (sRegRef, sRegRef, sRegRef);
619
620 if sRegRef.find('IEM_GET_MODRM_RM') >= 0: sStdRef = 'bRmRm8Ex';
621 elif sRegRef.find('IEM_GET_MODRM_REG') >= 0: sStdRef = 'bRmReg8Ex';
622 elif sRegRef == 'X86_GREG_xAX': sStdRef = 'bGregXAx8Ex';
623 elif sRegRef == 'X86_GREG_xCX': sStdRef = 'bGregXCx8Ex';
624 elif sRegRef == 'X86_GREG_xSP': sStdRef = 'bGregXSp8Ex';
625 elif sRegRef == 'iFixedReg': sStdRef = 'bFixedReg8Ex';
626 else:
627 self.warning('analyze8BitGRegStmt: sRegRef=%s -> bOther8Ex; %s %s; sOrgExpr=%s'
628 % (sRegRef, oStmt.sName, oStmt.asParams, sOrgExpr,));
629 sStdRef = 'bOther8Ex';
630
631 #print('analyze8BitGRegStmt: %s %s; sRegRef=%s\n -> idxReg=%s sOrgExpr=%s sStdRef=%s'
632 # % (oStmt.sName, oStmt.asParams, sRegRef, idxReg, sOrgExpr, sStdRef));
633 return (idxReg, sOrgExpr, sStdRef);
634
635
636 ## Maps memory related MCs to info for FLAT conversion.
637 ## This is used in 64-bit and flat 32-bit variants to skip the unnecessary
638 ## segmentation checking for every memory access. Only applied to access
639 ## via ES, DS and SS. FS, GS and CS gets the full segmentation threatment,
640 ## the latter (CS) is just to keep things simple (we could safely fetch via
641 ## it, but only in 64-bit mode could we safely write via it, IIRC).
642 kdMemMcToFlatInfo = {
643 'IEM_MC_FETCH_MEM_U8': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8' ),
644 'IEM_MC_FETCH_MEM16_U8': ( 1, 'IEM_MC_FETCH_MEM16_FLAT_U8' ),
645 'IEM_MC_FETCH_MEM32_U8': ( 1, 'IEM_MC_FETCH_MEM32_FLAT_U8' ),
646 'IEM_MC_FETCH_MEM_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16' ),
647 'IEM_MC_FETCH_MEM_U16_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_DISP' ),
648 'IEM_MC_FETCH_MEM_I16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I16' ),
649 'IEM_MC_FETCH_MEM_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32' ),
650 'IEM_MC_FETCH_MEM_U32_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_DISP' ),
651 'IEM_MC_FETCH_MEM_I32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I32' ),
652 'IEM_MC_FETCH_MEM_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64' ),
653 'IEM_MC_FETCH_MEM_U64_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64_DISP' ),
654 'IEM_MC_FETCH_MEM_U64_ALIGN_U128': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64_ALIGN_U128' ),
655 'IEM_MC_FETCH_MEM_I64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I64' ),
656 'IEM_MC_FETCH_MEM_R32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R32' ),
657 'IEM_MC_FETCH_MEM_R64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R64' ),
658 'IEM_MC_FETCH_MEM_R80': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R80' ),
659 'IEM_MC_FETCH_MEM_D80': ( 1, 'IEM_MC_FETCH_MEM_FLAT_D80' ),
660 'IEM_MC_FETCH_MEM_U128': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128' ),
661 'IEM_MC_FETCH_MEM_U128_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128_NO_AC' ),
662 'IEM_MC_FETCH_MEM_U128_ALIGN_SSE': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128_ALIGN_SSE' ),
663 'IEM_MC_FETCH_MEM_XMM': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM' ),
664 'IEM_MC_FETCH_MEM_XMM_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM_NO_AC' ),
665 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM_ALIGN_SSE' ),
666 'IEM_MC_FETCH_MEM_XMM_U32': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_U32' ),
667 'IEM_MC_FETCH_MEM_XMM_U64': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_U64' ),
668 'IEM_MC_FETCH_MEM_U256': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256' ),
669 'IEM_MC_FETCH_MEM_U256_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256_NO_AC' ),
670 'IEM_MC_FETCH_MEM_U256_ALIGN_AVX': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256_ALIGN_AVX' ),
671 'IEM_MC_FETCH_MEM_YMM': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM' ),
672 'IEM_MC_FETCH_MEM_YMM_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM_NO_AC' ),
673 'IEM_MC_FETCH_MEM_YMM_ALIGN_AVX': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM_ALIGN_AVX' ),
674 'IEM_MC_FETCH_MEM_U8_ZX_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U16' ),
675 'IEM_MC_FETCH_MEM_U8_ZX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U32' ),
676 'IEM_MC_FETCH_MEM_U8_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U64' ),
677 'IEM_MC_FETCH_MEM_U16_ZX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_ZX_U32' ),
678 'IEM_MC_FETCH_MEM_U16_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_ZX_U64' ),
679 'IEM_MC_FETCH_MEM_U32_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_ZX_U64' ),
680 'IEM_MC_FETCH_MEM_U8_SX_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U16' ),
681 'IEM_MC_FETCH_MEM_U8_SX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U32' ),
682 'IEM_MC_FETCH_MEM_U8_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U64' ),
683 'IEM_MC_FETCH_MEM_U16_SX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_SX_U32' ),
684 'IEM_MC_FETCH_MEM_U16_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_SX_U64' ),
685 'IEM_MC_FETCH_MEM_U32_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_SX_U64' ),
686 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128': ( 2, 'IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128' ),
687 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE_AND_XREG_XMM': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_ALIGN_SSE_AND_XREG_XMM' ),
688 'IEM_MC_FETCH_MEM_XMM_U32_AND_XREG_XMM': ( 3, 'IEM_MC_FETCH_MEM_FLAT_XMM_U32_AND_XREG_XMM' ),
689 'IEM_MC_FETCH_MEM_XMM_U64_AND_XREG_XMM': ( 3, 'IEM_MC_FETCH_MEM_FLAT_XMM_U64_AND_XREG_XMM' ),
690 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_RAX_RDX_U64':
691 ( 2, 'IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128_AND_RAX_RDX_U64' ),
692 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64':
693 ( 2, 'IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64' ),
694 'IEM_MC_STORE_MEM_U8': ( 0, 'IEM_MC_STORE_MEM_FLAT_U8' ),
695 'IEM_MC_STORE_MEM_U16': ( 0, 'IEM_MC_STORE_MEM_FLAT_U16' ),
696 'IEM_MC_STORE_MEM_U32': ( 0, 'IEM_MC_STORE_MEM_FLAT_U32' ),
697 'IEM_MC_STORE_MEM_U64': ( 0, 'IEM_MC_STORE_MEM_FLAT_U64' ),
698 'IEM_MC_STORE_MEM_U8_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U8_CONST' ),
699 'IEM_MC_STORE_MEM_U16_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U16_CONST' ),
700 'IEM_MC_STORE_MEM_U32_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U32_CONST' ),
701 'IEM_MC_STORE_MEM_U64_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U64_CONST' ),
702 'IEM_MC_STORE_MEM_U128': ( 0, 'IEM_MC_STORE_MEM_FLAT_U128' ),
703 'IEM_MC_STORE_MEM_U128_ALIGN_SSE': ( 0, 'IEM_MC_STORE_MEM_FLAT_U128_ALIGN_SSE' ),
704 'IEM_MC_STORE_MEM_U256': ( 0, 'IEM_MC_STORE_MEM_FLAT_U256' ),
705 'IEM_MC_STORE_MEM_U256_ALIGN_AVX': ( 0, 'IEM_MC_STORE_MEM_FLAT_U256_ALIGN_AVX' ),
706 'IEM_MC_MEM_MAP': ( 2, 'IEM_MC_MEM_FLAT_MAP' ),
707 'IEM_MC_MEM_MAP_U8_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_RW' ),
708 'IEM_MC_MEM_MAP_U8_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_RO' ),
709 'IEM_MC_MEM_MAP_U8_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_WO' ),
710 'IEM_MC_MEM_MAP_U16_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_RW' ),
711 'IEM_MC_MEM_MAP_U16_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_RO' ),
712 'IEM_MC_MEM_MAP_U16_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_WO' ),
713 'IEM_MC_MEM_MAP_U32_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_RW' ),
714 'IEM_MC_MEM_MAP_U32_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_RO' ),
715 'IEM_MC_MEM_MAP_U32_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_WO' ),
716 'IEM_MC_MEM_MAP_U64_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_RW' ),
717 'IEM_MC_MEM_MAP_U64_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_RO' ),
718 'IEM_MC_MEM_MAP_U64_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_WO' ),
719 'IEM_MC_MEM_MAP_EX': ( 3, 'IEM_MC_MEM_FLAT_MAP_EX' ),
720 };
721
722 kdMemMcToFlatInfoStack = {
723 'IEM_MC_PUSH_U16': ( 'IEM_MC_FLAT32_PUSH_U16', 'IEM_MC_FLAT64_PUSH_U16', ),
724 'IEM_MC_PUSH_U32': ( 'IEM_MC_FLAT32_PUSH_U32', 'IEM_MC_PUSH_U32', ),
725 'IEM_MC_PUSH_U64': ( 'IEM_MC_PUSH_U64', 'IEM_MC_FLAT64_PUSH_U64', ),
726 'IEM_MC_PUSH_U32_SREG': ( 'IEM_MC_FLAT32_PUSH_U32_SREG', 'IEM_MC_PUSH_U32_SREG' ),
727 'IEM_MC_POP_U16': ( 'IEM_MC_FLAT32_POP_U16', 'IEM_MC_FLAT64_POP_U16', ),
728 'IEM_MC_POP_U32': ( 'IEM_MC_FLAT32_POP_U32', 'IEM_MC_POP_U32', ),
729 'IEM_MC_POP_U64': ( 'IEM_MC_POP_U64', 'IEM_MC_FLAT64_POP_U64', ),
730 };
731
732 kdThreadedCalcRmEffAddrMcByVariation = {
733 ksVariation_16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
734 ksVariation_16f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
735 ksVariation_16_Pre386: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
736 ksVariation_16f_Pre386: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
737 ksVariation_32_Addr16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
738 ksVariation_32f_Addr16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
739 ksVariation_16_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
740 ksVariation_16f_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
741 ksVariation_32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
742 ksVariation_32f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
743 ksVariation_32_Flat: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
744 ksVariation_32f_Flat: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
745 ksVariation_64: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64',
746 ksVariation_64f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64',
747 ksVariation_64_FsGs: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_FSGS',
748 ksVariation_64f_FsGs: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_FSGS',
749 ksVariation_64_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_ADDR32', ## @todo How did this work again...
750 ksVariation_64f_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_ADDR32',
751 };
752
753 def analyzeMorphStmtForThreaded(self, aoStmts, iParamRef = 0):
754 """
755 Transforms (copy) the statements into those for the threaded function.
756
757 Returns list/tree of statements (aoStmts is not modified) and the new
758 iParamRef value.
759 """
760 #
761 # We'll be traversing aoParamRefs in parallel to the statements, so we
762 # must match the traversal in analyzeFindThreadedParamRefs exactly.
763 #
764 #print('McBlock at %s:%s' % (os.path.split(self.oMcBlock.sSrcFile)[1], self.oMcBlock.iBeginLine,));
765 aoThreadedStmts = [];
766 for oStmt in aoStmts:
767 # Skip C++ statements that is purely related to decoding.
768 if not oStmt.isCppStmt() or not oStmt.fDecode:
769 # Copy the statement. Make a deep copy to make sure we've got our own
770 # copies of all instance variables, even if a bit overkill at the moment.
771 oNewStmt = copy.deepcopy(oStmt);
772 aoThreadedStmts.append(oNewStmt);
773 #print('oNewStmt %s %s' % (oNewStmt.sName, len(oNewStmt.asParams),));
774
775 # If the statement has parameter references, process the relevant parameters.
776 # We grab the references relevant to this statement and apply them in reserve order.
777 if iParamRef < len(self.aoParamRefs) and self.aoParamRefs[iParamRef].oStmt == oStmt:
778 iParamRefFirst = iParamRef;
779 while True:
780 iParamRef += 1;
781 if iParamRef >= len(self.aoParamRefs) or self.aoParamRefs[iParamRef].oStmt != oStmt:
782 break;
783
784 #print('iParamRefFirst=%s iParamRef=%s' % (iParamRefFirst, iParamRef));
785 for iCurRef in range(iParamRef - 1, iParamRefFirst - 1, -1):
786 oCurRef = self.aoParamRefs[iCurRef];
787 if oCurRef.iParam is not None:
788 assert oCurRef.oStmt == oStmt;
789 #print('iCurRef=%s iParam=%s sOrgRef=%s' % (iCurRef, oCurRef.iParam, oCurRef.sOrgRef));
790 sSrcParam = oNewStmt.asParams[oCurRef.iParam];
791 assert ( sSrcParam[oCurRef.offParam : oCurRef.offParam + len(oCurRef.sOrgRef)] == oCurRef.sOrgRef
792 or oCurRef.fCustomRef), \
793 'offParam=%s sOrgRef=%s iParam=%s oStmt.sName=%s sSrcParam=%s<eos>' \
794 % (oCurRef.offParam, oCurRef.sOrgRef, oCurRef.iParam, oStmt.sName, sSrcParam);
795 oNewStmt.asParams[oCurRef.iParam] = sSrcParam[0 : oCurRef.offParam] \
796 + oCurRef.sNewName \
797 + sSrcParam[oCurRef.offParam + len(oCurRef.sOrgRef) : ];
798
799 # Morph IEM_MC_CALC_RM_EFF_ADDR into IEM_MC_CALC_RM_EFF_ADDR_THREADED ...
800 if oNewStmt.sName == 'IEM_MC_CALC_RM_EFF_ADDR':
801 oNewStmt.sName = self.kdThreadedCalcRmEffAddrMcByVariation[self.sVariation];
802 assert len(oNewStmt.asParams) == 3;
803
804 if self.sVariation in self.kdVariationsWithFlatAddr16:
805 oNewStmt.asParams = [
806 oNewStmt.asParams[0], oNewStmt.asParams[1], self.dParamRefs['u16Disp'][0].sNewName,
807 ];
808 else:
809 sSibAndMore = self.dParamRefs['bSib'][0].sNewName; # Merge bSib and 2nd part of cbImmAndRspOffset.
810 if oStmt.asParams[2] not in ('0', '1', '2', '4'):
811 sSibAndMore = '(%s) | ((%s) & 0x0f00)' % (self.dParamRefs['bSib'][0].sNewName, oStmt.asParams[2]);
812
813 if self.sVariation in self.kdVariationsWithFlatAddr32No64:
814 oNewStmt.asParams = [
815 oNewStmt.asParams[0], oNewStmt.asParams[1], sSibAndMore, self.dParamRefs['u32Disp'][0].sNewName,
816 ];
817 else:
818 oNewStmt.asParams = [
819 oNewStmt.asParams[0], self.dParamRefs['bRmEx'][0].sNewName, sSibAndMore,
820 self.dParamRefs['u32Disp'][0].sNewName, self.dParamRefs['cbInstr'][0].sNewName,
821 ];
822 # ... and IEM_MC_ADVANCE_RIP_AND_FINISH into *_THREADED_PCxx[_WITH_FLAGS] ...
823 elif oNewStmt.sName in ('IEM_MC_ADVANCE_RIP_AND_FINISH', 'IEM_MC_REL_JMP_S8_AND_FINISH',
824 'IEM_MC_REL_JMP_S16_AND_FINISH', 'IEM_MC_REL_JMP_S32_AND_FINISH'):
825 oNewStmt.asParams.append(self.dParamRefs['cbInstr'][0].sNewName);
826 if ( oNewStmt.sName in ('IEM_MC_REL_JMP_S8_AND_FINISH', )
827 and self.sVariation not in (self.ksVariation_16_Pre386, self.ksVariation_16f_Pre386,)):
828 oNewStmt.asParams.append(self.dParamRefs['pVCpu->iem.s.enmEffOpSize'][0].sNewName);
829 oNewStmt.sName += '_THREADED';
830 if self.sVariation in (self.ksVariation_64, self.ksVariation_64_FsGs, self.ksVariation_64_Addr32):
831 oNewStmt.sName += '_PC64';
832 elif self.sVariation in (self.ksVariation_64f, self.ksVariation_64f_FsGs, self.ksVariation_64f_Addr32):
833 oNewStmt.sName += '_PC64_WITH_FLAGS';
834 elif self.sVariation == self.ksVariation_16_Pre386:
835 oNewStmt.sName += '_PC16';
836 elif self.sVariation == self.ksVariation_16f_Pre386:
837 oNewStmt.sName += '_PC16_WITH_FLAGS';
838 elif self.sVariation not in self.kdVariationsWithEflagsCheckingAndClearing:
839 assert self.sVariation != self.ksVariation_Default;
840 oNewStmt.sName += '_PC32';
841 else:
842 oNewStmt.sName += '_PC32_WITH_FLAGS';
843
844 # ... and IEM_MC_*_GREG_U8 into *_THREADED w/ reworked index taking REX into account
845 elif oNewStmt.sName.startswith('IEM_MC_') and oNewStmt.sName.find('_GREG_U8') > 0:
846 (idxReg, _, sStdRef) = self.analyze8BitGRegStmt(oStmt); # Don't use oNewStmt as it has been modified!
847 oNewStmt.asParams[idxReg] = self.dParamRefs[sStdRef][0].sNewName;
848 oNewStmt.sName += '_THREADED';
849
850 # ... and IEM_MC_CALL_CIMPL_[0-5] and IEM_MC_DEFER_TO_CIMPL_[0-5]_RET into *_THREADED ...
851 elif oNewStmt.sName.startswith('IEM_MC_CALL_CIMPL_') or oNewStmt.sName.startswith('IEM_MC_DEFER_TO_CIMPL_'):
852 oNewStmt.sName += '_THREADED';
853 oNewStmt.asParams.insert(0, self.dParamRefs['cbInstr'][0].sNewName);
854
855 # ... and in FLAT modes we must morph memory access into FLAT accesses ...
856 elif ( self.sVariation in self.kdVariationsWithFlatAddress
857 and ( oNewStmt.sName.startswith('IEM_MC_FETCH_MEM')
858 or (oNewStmt.sName.startswith('IEM_MC_STORE_MEM_') and oNewStmt.sName.find('_BY_REF') < 0)
859 or oNewStmt.sName.startswith('IEM_MC_MEM_MAP') )):
860 idxEffSeg = self.kdMemMcToFlatInfo[oNewStmt.sName][0];
861 if idxEffSeg != -1:
862 if ( oNewStmt.asParams[idxEffSeg].find('iEffSeg') < 0
863 and oNewStmt.asParams[idxEffSeg] not in ('X86_SREG_ES', ) ):
864 self.raiseProblem('Expected iEffSeg as param #%d to %s: %s'
865 % (idxEffSeg + 1, oNewStmt.sName, oNewStmt.asParams[idxEffSeg],));
866 oNewStmt.asParams.pop(idxEffSeg);
867 oNewStmt.sName = self.kdMemMcToFlatInfo[oNewStmt.sName][1];
868
869 # ... PUSH and POP also needs flat variants, but these differ a little.
870 elif ( self.sVariation in self.kdVariationsWithFlatAddress
871 and ( (oNewStmt.sName.startswith('IEM_MC_PUSH') and oNewStmt.sName.find('_FPU') < 0)
872 or oNewStmt.sName.startswith('IEM_MC_POP'))):
873 oNewStmt.sName = self.kdMemMcToFlatInfoStack[oNewStmt.sName][int(self.sVariation in (self.ksVariation_64,
874 self.ksVariation_64f,))];
875
876
877 # Process branches of conditionals recursively.
878 if isinstance(oStmt, iai.McStmtCond):
879 (oNewStmt.aoIfBranch, iParamRef) = self.analyzeMorphStmtForThreaded(oStmt.aoIfBranch, iParamRef);
880 if oStmt.aoElseBranch:
881 (oNewStmt.aoElseBranch, iParamRef) = self.analyzeMorphStmtForThreaded(oStmt.aoElseBranch, iParamRef);
882
883 return (aoThreadedStmts, iParamRef);
884
885
886 def analyzeConsolidateThreadedParamRefs(self):
887 """
888 Consolidate threaded function parameter references into a dictionary
889 with lists of the references to each variable/field.
890 """
891 # Gather unique parameters.
892 self.dParamRefs = {};
893 for oRef in self.aoParamRefs:
894 if oRef.sStdRef not in self.dParamRefs:
895 self.dParamRefs[oRef.sStdRef] = [oRef,];
896 else:
897 self.dParamRefs[oRef.sStdRef].append(oRef);
898
899 # Generate names for them for use in the threaded function.
900 dParamNames = {};
901 for sName, aoRefs in self.dParamRefs.items():
902 # Morph the reference expression into a name.
903 if sName.startswith('IEM_GET_MODRM_REG'): sName = 'bModRmRegP';
904 elif sName.startswith('IEM_GET_MODRM_RM'): sName = 'bModRmRmP';
905 elif sName.startswith('IEM_GET_MODRM_REG_8'): sName = 'bModRmReg8P';
906 elif sName.startswith('IEM_GET_MODRM_RM_8'): sName = 'bModRmRm8P';
907 elif sName.startswith('IEM_GET_EFFECTIVE_VVVV'): sName = 'bEffVvvvP';
908 elif sName.find('.') >= 0 or sName.find('->') >= 0:
909 sName = sName[max(sName.rfind('.'), sName.rfind('>')) + 1 : ] + 'P';
910 else:
911 sName += 'P';
912
913 # Ensure it's unique.
914 if sName in dParamNames:
915 for i in range(10):
916 if sName + str(i) not in dParamNames:
917 sName += str(i);
918 break;
919 dParamNames[sName] = True;
920
921 # Update all the references.
922 for oRef in aoRefs:
923 oRef.sNewName = sName;
924
925 # Organize them by size too for the purpose of optimize them.
926 dBySize = {} # type: Dict[str, str]
927 for sStdRef, aoRefs in self.dParamRefs.items():
928 if aoRefs[0].sType[0] != 'P':
929 cBits = g_kdTypeInfo[aoRefs[0].sType][0];
930 assert(cBits <= 64);
931 else:
932 cBits = 64;
933
934 if cBits not in dBySize:
935 dBySize[cBits] = [sStdRef,]
936 else:
937 dBySize[cBits].append(sStdRef);
938
939 # Pack the parameters as best as we can, starting with the largest ones
940 # and ASSUMING a 64-bit parameter size.
941 self.cMinParams = 0;
942 offNewParam = 0;
943 for cBits in sorted(dBySize.keys(), reverse = True):
944 for sStdRef in dBySize[cBits]:
945 if offNewParam == 0 or offNewParam + cBits > 64:
946 self.cMinParams += 1;
947 offNewParam = cBits;
948 else:
949 offNewParam += cBits;
950 assert(offNewParam <= 64);
951
952 for oRef in self.dParamRefs[sStdRef]:
953 oRef.iNewParam = self.cMinParams - 1;
954 oRef.offNewParam = offNewParam - cBits;
955
956 # Currently there are a few that requires 4 parameters, list these so we can figure out why:
957 if self.cMinParams >= 4:
958 print('debug: cMinParams=%s cRawParams=%s - %s:%d'
959 % (self.cMinParams, len(self.dParamRefs), self.oParent.oMcBlock.sSrcFile, self.oParent.oMcBlock.iBeginLine,));
960
961 return True;
962
963 ksHexDigits = '0123456789abcdefABCDEF';
964
965 def analyzeFindThreadedParamRefs(self, aoStmts): # pylint: disable=too-many-statements
966 """
967 Scans the statements for things that have to passed on to the threaded
968 function (populates self.aoParamRefs).
969 """
970 for oStmt in aoStmts:
971 # Some statements we can skip alltogether.
972 if isinstance(oStmt, iai.McCppPreProc):
973 continue;
974 if oStmt.isCppStmt() and oStmt.fDecode:
975 continue;
976 if oStmt.sName in ('IEM_MC_BEGIN',):
977 continue;
978
979 if isinstance(oStmt, iai.McStmtVar):
980 if oStmt.sValue is None:
981 continue;
982 aiSkipParams = { 0: True, 1: True, 3: True };
983 else:
984 aiSkipParams = {};
985
986 # Several statements have implicit parameters and some have different parameters.
987 if oStmt.sName in ('IEM_MC_ADVANCE_RIP_AND_FINISH', 'IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_REL_JMP_S16_AND_FINISH',
988 'IEM_MC_REL_JMP_S32_AND_FINISH', 'IEM_MC_CALL_CIMPL_0', 'IEM_MC_CALL_CIMPL_1',
989 'IEM_MC_CALL_CIMPL_2', 'IEM_MC_CALL_CIMPL_3', 'IEM_MC_CALL_CIMPL_4', 'IEM_MC_CALL_CIMPL_5',
990 'IEM_MC_DEFER_TO_CIMPL_0_RET', 'IEM_MC_DEFER_TO_CIMPL_1_RET', 'IEM_MC_DEFER_TO_CIMPL_2_RET',
991 'IEM_MC_DEFER_TO_CIMPL_3_RET', 'IEM_MC_DEFER_TO_CIMPL_4_RET', 'IEM_MC_DEFER_TO_CIMPL_5_RET', ):
992 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_INSTR_LEN(pVCpu)', 'uint4_t', oStmt, sStdRef = 'cbInstr'));
993
994 if ( oStmt.sName in ('IEM_MC_REL_JMP_S8_AND_FINISH',)
995 and self.sVariation not in (self.ksVariation_16_Pre386, self.ksVariation_16f_Pre386,)):
996 self.aoParamRefs.append(ThreadedParamRef('pVCpu->iem.s.enmEffOpSize', 'IEMMODE', oStmt));
997
998 if oStmt.sName == 'IEM_MC_CALC_RM_EFF_ADDR':
999 # This is being pretty presumptive about bRm always being the RM byte...
1000 assert len(oStmt.asParams) == 3;
1001 assert oStmt.asParams[1] == 'bRm';
1002
1003 if self.sVariation in self.kdVariationsWithFlatAddr16:
1004 self.aoParamRefs.append(ThreadedParamRef('bRm', 'uint8_t', oStmt));
1005 self.aoParamRefs.append(ThreadedParamRef('(uint16_t)uEffAddrInfo' ,
1006 'uint16_t', oStmt, sStdRef = 'u16Disp'));
1007 elif self.sVariation in self.kdVariationsWithFlatAddr32No64:
1008 self.aoParamRefs.append(ThreadedParamRef('bRm', 'uint8_t', oStmt));
1009 self.aoParamRefs.append(ThreadedParamRef('(uint8_t)(uEffAddrInfo >> 32)',
1010 'uint8_t', oStmt, sStdRef = 'bSib'));
1011 self.aoParamRefs.append(ThreadedParamRef('(uint32_t)uEffAddrInfo',
1012 'uint32_t', oStmt, sStdRef = 'u32Disp'));
1013 else:
1014 assert self.sVariation in self.kasVariationsWithAddressOnly64;
1015 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_MODRM_EX(pVCpu, bRm)',
1016 'uint8_t', oStmt, sStdRef = 'bRmEx'));
1017 self.aoParamRefs.append(ThreadedParamRef('(uint8_t)(uEffAddrInfo >> 32)',
1018 'uint8_t', oStmt, sStdRef = 'bSib'));
1019 self.aoParamRefs.append(ThreadedParamRef('(uint32_t)uEffAddrInfo',
1020 'uint32_t', oStmt, sStdRef = 'u32Disp'));
1021 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_INSTR_LEN(pVCpu)',
1022 'uint4_t', oStmt, sStdRef = 'cbInstr'));
1023 aiSkipParams[1] = True; # Skip the bRm parameter as it is being replaced by bRmEx.
1024
1025 # 8-bit register accesses needs to have their index argument reworked to take REX into account.
1026 if oStmt.sName.startswith('IEM_MC_') and oStmt.sName.find('_GREG_U8') > 0:
1027 (idxReg, sOrgRef, sStdRef) = self.analyze8BitGRegStmt(oStmt);
1028 self.aoParamRefs.append(ThreadedParamRef(sOrgRef, 'uint16_t', oStmt, idxReg, sStdRef = sStdRef));
1029 aiSkipParams[idxReg] = True; # Skip the parameter below.
1030
1031 # If in flat mode variation, ignore the effective segment parameter to memory MCs.
1032 if ( self.sVariation in self.kdVariationsWithFlatAddress
1033 and oStmt.sName in self.kdMemMcToFlatInfo
1034 and self.kdMemMcToFlatInfo[oStmt.sName][0] != -1):
1035 aiSkipParams[self.kdMemMcToFlatInfo[oStmt.sName][0]] = True;
1036
1037 # Inspect the target of calls to see if we need to pass down a
1038 # function pointer or function table pointer for it to work.
1039 if isinstance(oStmt, iai.McStmtCall):
1040 if oStmt.sFn[0] == 'p':
1041 self.aoParamRefs.append(ThreadedParamRef(oStmt.sFn, self.analyzeCallToType(oStmt.sFn), oStmt, oStmt.idxFn));
1042 elif ( oStmt.sFn[0] != 'i'
1043 and not oStmt.sFn.startswith('IEMTARGETCPU_EFL_BEHAVIOR_SELECT')
1044 and not oStmt.sFn.startswith('IEM_SELECT_HOST_OR_FALLBACK') ):
1045 self.raiseProblem('Bogus function name in %s: %s' % (oStmt.sName, oStmt.sFn,));
1046 aiSkipParams[oStmt.idxFn] = True;
1047
1048 # Skip the hint parameter (first) for IEM_MC_CALL_CIMPL_X.
1049 if oStmt.sName.startswith('IEM_MC_CALL_CIMPL_'):
1050 assert oStmt.idxFn == 2;
1051 aiSkipParams[0] = True;
1052
1053
1054 # Check all the parameters for bogus references.
1055 for iParam, sParam in enumerate(oStmt.asParams):
1056 if iParam not in aiSkipParams and sParam not in self.oParent.dVariables:
1057 # The parameter may contain a C expression, so we have to try
1058 # extract the relevant bits, i.e. variables and fields while
1059 # ignoring operators and parentheses.
1060 offParam = 0;
1061 while offParam < len(sParam):
1062 # Is it the start of an C identifier? If so, find the end, but don't stop on field separators (->, .).
1063 ch = sParam[offParam];
1064 if ch.isalpha() or ch == '_':
1065 offStart = offParam;
1066 offParam += 1;
1067 while offParam < len(sParam):
1068 ch = sParam[offParam];
1069 if not ch.isalnum() and ch != '_' and ch != '.':
1070 if ch != '-' or sParam[offParam + 1] != '>':
1071 # Special hack for the 'CTX_SUFF(pVM)' bit in pVCpu->CTX_SUFF(pVM)->xxxx:
1072 if ( ch == '('
1073 and sParam[offStart : offParam + len('(pVM)->')] == 'pVCpu->CTX_SUFF(pVM)->'):
1074 offParam += len('(pVM)->') - 1;
1075 else:
1076 break;
1077 offParam += 1;
1078 offParam += 1;
1079 sRef = sParam[offStart : offParam];
1080
1081 # For register references, we pass the full register indexes instead as macros
1082 # like IEM_GET_MODRM_REG implicitly references pVCpu->iem.s.uRexReg and the
1083 # threaded function will be more efficient if we just pass the register index
1084 # as a 4-bit param.
1085 if ( sRef.startswith('IEM_GET_MODRM')
1086 or sRef.startswith('IEM_GET_EFFECTIVE_VVVV') ):
1087 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1088 if sParam[offParam] != '(':
1089 self.raiseProblem('Expected "(" following %s in "%s"' % (sRef, oStmt.renderCode(),));
1090 (asMacroParams, offCloseParam) = iai.McBlock.extractParams(sParam, offParam);
1091 if asMacroParams is None:
1092 self.raiseProblem('Unable to find ")" for %s in "%s"' % (sRef, oStmt.renderCode(),));
1093 offParam = offCloseParam + 1;
1094 self.aoParamRefs.append(ThreadedParamRef(sParam[offStart : offParam], 'uint8_t',
1095 oStmt, iParam, offStart));
1096
1097 # We can skip known variables.
1098 elif sRef in self.oParent.dVariables:
1099 pass;
1100
1101 # Skip certain macro invocations.
1102 elif sRef in ('IEM_GET_HOST_CPU_FEATURES',
1103 'IEM_GET_GUEST_CPU_FEATURES',
1104 'IEM_IS_GUEST_CPU_AMD',
1105 'IEM_IS_16BIT_CODE',
1106 'IEM_IS_32BIT_CODE',
1107 'IEM_IS_64BIT_CODE',
1108 ):
1109 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1110 if sParam[offParam] != '(':
1111 self.raiseProblem('Expected "(" following %s in "%s"' % (sRef, oStmt.renderCode(),));
1112 (asMacroParams, offCloseParam) = iai.McBlock.extractParams(sParam, offParam);
1113 if asMacroParams is None:
1114 self.raiseProblem('Unable to find ")" for %s in "%s"' % (sRef, oStmt.renderCode(),));
1115 offParam = offCloseParam + 1;
1116
1117 # Skip any dereference following it, unless it's a predicate like IEM_IS_GUEST_CPU_AMD.
1118 if sRef not in ('IEM_IS_GUEST_CPU_AMD',
1119 'IEM_IS_16BIT_CODE',
1120 'IEM_IS_32BIT_CODE',
1121 'IEM_IS_64BIT_CODE',
1122 ):
1123 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1124 if offParam + 2 <= len(sParam) and sParam[offParam : offParam + 2] == '->':
1125 offParam = iai.McBlock.skipSpacesAt(sParam, offParam + 2, len(sParam));
1126 while offParam < len(sParam) and (sParam[offParam].isalnum() or sParam[offParam] in '_.'):
1127 offParam += 1;
1128
1129 # Skip constants, globals, types (casts), sizeof and macros.
1130 elif ( sRef.startswith('IEM_OP_PRF_')
1131 or sRef.startswith('IEM_ACCESS_')
1132 or sRef.startswith('IEMINT_')
1133 or sRef.startswith('X86_GREG_')
1134 or sRef.startswith('X86_SREG_')
1135 or sRef.startswith('X86_EFL_')
1136 or sRef.startswith('X86_FSW_')
1137 or sRef.startswith('X86_FCW_')
1138 or sRef.startswith('X86_XCPT_')
1139 or sRef.startswith('IEMMODE_')
1140 or sRef.startswith('IEM_F_')
1141 or sRef.startswith('IEM_CIMPL_F_')
1142 or sRef.startswith('g_')
1143 or sRef.startswith('iemAImpl_')
1144 or sRef.startswith('kIemNativeGstReg_')
1145 or sRef in ( 'int8_t', 'int16_t', 'int32_t', 'int64_t',
1146 'INT8_C', 'INT16_C', 'INT32_C', 'INT64_C',
1147 'UINT8_C', 'UINT16_C', 'UINT32_C', 'UINT64_C',
1148 'UINT8_MAX', 'UINT16_MAX', 'UINT32_MAX', 'UINT64_MAX',
1149 'INT8_MAX', 'INT16_MAX', 'INT32_MAX', 'INT64_MAX',
1150 'INT8_MIN', 'INT16_MIN', 'INT32_MIN', 'INT64_MIN',
1151 'sizeof', 'NOREF', 'RT_NOREF', 'IEMMODE_64BIT',
1152 'RT_BIT_32', 'RT_BIT_64', 'true', 'false',
1153 'NIL_RTGCPTR',) ):
1154 pass;
1155
1156 # Skip certain macro invocations.
1157 # Any variable (non-field) and decoder fields in IEMCPU will need to be parameterized.
1158 elif ( ( '.' not in sRef
1159 and '-' not in sRef
1160 and sRef not in ('pVCpu', ) )
1161 or iai.McBlock.koReIemDecoderVars.search(sRef) is not None):
1162 self.aoParamRefs.append(ThreadedParamRef(sRef, self.analyzeReferenceToType(sRef),
1163 oStmt, iParam, offStart));
1164 # Number.
1165 elif ch.isdigit():
1166 if ( ch == '0'
1167 and offParam + 2 <= len(sParam)
1168 and sParam[offParam + 1] in 'xX'
1169 and sParam[offParam + 2] in self.ksHexDigits ):
1170 offParam += 2;
1171 while offParam < len(sParam) and sParam[offParam] in self.ksHexDigits:
1172 offParam += 1;
1173 else:
1174 while offParam < len(sParam) and sParam[offParam].isdigit():
1175 offParam += 1;
1176 # Comment?
1177 elif ( ch == '/'
1178 and offParam + 4 <= len(sParam)
1179 and sParam[offParam + 1] == '*'):
1180 offParam += 2;
1181 offNext = sParam.find('*/', offParam);
1182 if offNext < offParam:
1183 self.raiseProblem('Unable to find "*/" in "%s" ("%s")' % (sRef, oStmt.renderCode(),));
1184 offParam = offNext + 2;
1185 # Whatever else.
1186 else:
1187 offParam += 1;
1188
1189 # Traverse the branches of conditionals.
1190 if isinstance(oStmt, iai.McStmtCond):
1191 self.analyzeFindThreadedParamRefs(oStmt.aoIfBranch);
1192 self.analyzeFindThreadedParamRefs(oStmt.aoElseBranch);
1193 return True;
1194
1195 def analyzeVariation(self, aoStmts):
1196 """
1197 2nd part of the analysis, done on each variation.
1198
1199 The variations may differ in parameter requirements and will end up with
1200 slightly different MC sequences. Thus this is done on each individually.
1201
1202 Returns dummy True - raises exception on trouble.
1203 """
1204 # Now scan the code for variables and field references that needs to
1205 # be passed to the threaded function because they are related to the
1206 # instruction decoding.
1207 self.analyzeFindThreadedParamRefs(aoStmts);
1208 self.analyzeConsolidateThreadedParamRefs();
1209
1210 # Morph the statement stream for the block into what we'll be using in the threaded function.
1211 (self.aoStmtsForThreadedFunction, iParamRef) = self.analyzeMorphStmtForThreaded(aoStmts);
1212 if iParamRef != len(self.aoParamRefs):
1213 raise Exception('iParamRef=%s, expected %s!' % (iParamRef, len(self.aoParamRefs),));
1214
1215 return True;
1216
1217 def emitThreadedCallStmts(self, cchIndent, sCallVarNm = None):
1218 """
1219 Produces generic C++ statments that emits a call to the thread function
1220 variation and any subsequent checks that may be necessary after that.
1221
1222 The sCallVarNm is for emitting
1223 """
1224 aoStmts = [
1225 iai.McCppCall('IEM_MC2_BEGIN_EMIT_CALLS',
1226 ['1' if 'IEM_CIMPL_F_CHECK_IRQ_BEFORE' in self.oParent.dsCImplFlags else '0'],
1227 cchIndent = cchIndent), # Scope and a hook for various stuff.
1228 ];
1229
1230 # The call to the threaded function.
1231 asCallArgs = [ self.getIndexName() if not sCallVarNm else sCallVarNm, ];
1232 for iParam in range(self.cMinParams):
1233 asFrags = [];
1234 for aoRefs in self.dParamRefs.values():
1235 oRef = aoRefs[0];
1236 if oRef.iNewParam == iParam:
1237 sCast = '(uint64_t)'
1238 if oRef.sType in ('int8_t', 'int16_t', 'int32_t'): # Make sure these doesn't get sign-extended.
1239 sCast = '(uint64_t)(u' + oRef.sType + ')';
1240 if oRef.offNewParam == 0:
1241 asFrags.append(sCast + '(' + oRef.sOrgRef + ')');
1242 else:
1243 asFrags.append('(%s(%s) << %s)' % (sCast, oRef.sOrgRef, oRef.offNewParam));
1244 assert asFrags;
1245 asCallArgs.append(' | '.join(asFrags));
1246
1247 aoStmts.append(iai.McCppCall('IEM_MC2_EMIT_CALL_%s' % (len(asCallArgs) - 1,), asCallArgs, cchIndent = cchIndent));
1248
1249 # For CIMPL stuff, we need to consult the associated IEM_CIMPL_F_XXX
1250 # mask and maybe emit additional checks.
1251 if ( 'IEM_CIMPL_F_MODE' in self.oParent.dsCImplFlags
1252 or 'IEM_CIMPL_F_XCPT' in self.oParent.dsCImplFlags
1253 or 'IEM_CIMPL_F_VMEXIT' in self.oParent.dsCImplFlags):
1254 aoStmts.append(iai.McCppCall('IEM_MC2_EMIT_CALL_1', ( 'kIemThreadedFunc_BltIn_CheckMode', 'pVCpu->iem.s.fExec', ),
1255 cchIndent = cchIndent));
1256
1257 sCImplFlags = ' | '.join(self.oParent.dsCImplFlags.keys());
1258 if not sCImplFlags:
1259 sCImplFlags = '0'
1260 aoStmts.append(iai.McCppCall('IEM_MC2_END_EMIT_CALLS', ( sCImplFlags, ), cchIndent = cchIndent)); # For closing the scope.
1261
1262 # Emit fEndTb = true or fTbBranched = true if any of the CIMPL flags
1263 # indicates we should do so.
1264 # Note! iemThreadedRecompilerMcDeferToCImpl0 duplicates work done here.
1265 asEndTbFlags = [];
1266 asTbBranchedFlags = [];
1267 for sFlag in self.oParent.dsCImplFlags:
1268 if self.kdCImplFlags[sFlag] is True:
1269 asEndTbFlags.append(sFlag);
1270 elif sFlag.startswith('IEM_CIMPL_F_BRANCH_'):
1271 asTbBranchedFlags.append(sFlag);
1272 if asTbBranchedFlags:
1273 aoStmts.append(iai.McCppGeneric('iemThreadedSetBranched(pVCpu, %s);'
1274 % ((' | '.join(asTbBranchedFlags)).replace('IEM_CIMPL_F_BRANCH', 'IEMBRANCHED_F'),),
1275 cchIndent = cchIndent)); # Inline fn saves ~2 seconds for gcc 13/dbg (1m13s vs 1m15s).
1276 if asEndTbFlags:
1277 aoStmts.append(iai.McCppGeneric('pVCpu->iem.s.fEndTb = true; /* %s */' % (','.join(asEndTbFlags),),
1278 cchIndent = cchIndent));
1279
1280 if 'IEM_CIMPL_F_CHECK_IRQ_AFTER' in self.oParent.dsCImplFlags:
1281 aoStmts.append(iai.McCppGeneric('pVCpu->iem.s.cInstrTillIrqCheck = 0;', cchIndent = cchIndent));
1282
1283 return aoStmts;
1284
1285
1286class ThreadedFunction(object):
1287 """
1288 A threaded function.
1289 """
1290
1291 def __init__(self, oMcBlock: iai.McBlock) -> None:
1292 self.oMcBlock = oMcBlock # type: iai.McBlock
1293 # The remaining fields are only useful after analyze() has been called:
1294 ## Variations for this block. There is at least one.
1295 self.aoVariations = [] # type: List[ThreadedFunctionVariation]
1296 ## Variation dictionary containing the same as aoVariations.
1297 self.dVariations = {} # type: Dict[str, ThreadedFunctionVariation]
1298 ## Dictionary of local variables (IEM_MC_LOCAL[_CONST]) and call arguments (IEM_MC_ARG*).
1299 self.dVariables = {} # type: Dict[str, iai.McStmtVar]
1300 ## Dictionary with any IEM_CIMPL_F_XXX flags explicitly advertised in the code block
1301 ## and those determined by analyzeCodeOperation().
1302 self.dsCImplFlags = {} # type: Dict[str, bool]
1303
1304 @staticmethod
1305 def dummyInstance():
1306 """ Gets a dummy instance. """
1307 return ThreadedFunction(iai.McBlock('null', 999999999, 999999999,
1308 iai.DecoderFunction('null', 999999999, 'nil', ('','')), 999999999));
1309
1310 def hasWithFlagsCheckingAndClearingVariation(self):
1311 """
1312 Check if there is one or more with flags checking and clearing
1313 variations for this threaded function.
1314 """
1315 for sVarWithFlags in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing:
1316 if sVarWithFlags in self.dVariations:
1317 return True;
1318 return False;
1319
1320 #
1321 # Analysis and code morphing.
1322 #
1323
1324 def raiseProblem(self, sMessage):
1325 """ Raises a problem. """
1326 raise Exception('%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, ));
1327
1328 def warning(self, sMessage):
1329 """ Emits a warning. """
1330 print('%s:%s: warning: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, ));
1331
1332 def analyzeFindVariablesAndCallArgs(self, aoStmts: List[iai.McStmt]) -> bool:
1333 """ Scans the statements for MC variables and call arguments. """
1334 for oStmt in aoStmts:
1335 if isinstance(oStmt, iai.McStmtVar):
1336 if oStmt.sVarName in self.dVariables:
1337 raise Exception('Variable %s is defined more than once!' % (oStmt.sVarName,));
1338 self.dVariables[oStmt.sVarName] = oStmt.sVarName;
1339
1340 # There shouldn't be any variables or arguments declared inside if/
1341 # else blocks, but scan them too to be on the safe side.
1342 if isinstance(oStmt, iai.McStmtCond):
1343 cBefore = len(self.dVariables);
1344 self.analyzeFindVariablesAndCallArgs(oStmt.aoIfBranch);
1345 self.analyzeFindVariablesAndCallArgs(oStmt.aoElseBranch);
1346 if len(self.dVariables) != cBefore:
1347 raise Exception('Variables/arguments defined in conditional branches!');
1348 return True;
1349
1350 def analyzeCodeOperation(self, aoStmts: List[iai.McStmt], fSeenConditional = False) -> bool:
1351 """
1352 Analyzes the code looking clues as to additional side-effects.
1353
1354 Currently this is simply looking for branching and adding the relevant
1355 branch flags to dsCImplFlags. ASSUMES the caller pre-populates the
1356 dictionary with a copy of self.oMcBlock.dsCImplFlags.
1357 """
1358 for oStmt in aoStmts:
1359 # Set IEM_IMPL_C_F_BRANCH if we see any branching MCs.
1360 if oStmt.sName.startswith('IEM_MC_SET_RIP'):
1361 assert not fSeenConditional;
1362 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_INDIRECT'] = True;
1363 elif oStmt.sName.startswith('IEM_MC_REL_JMP'):
1364 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_RELATIVE'] = True;
1365 if fSeenConditional:
1366 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_CONDITIONAL'] = True;
1367
1368 # Check for CIMPL and AIMPL calls.
1369 if oStmt.sName.startswith('IEM_MC_CALL_'):
1370 if oStmt.sName.startswith('IEM_MC_CALL_CIMPL_'):
1371 self.dsCImplFlags['IEM_CIMPL_F_CALLS_CIMPL'] = True;
1372 elif ( oStmt.sName.startswith('IEM_MC_CALL_VOID_AIMPL_')
1373 or oStmt.sName.startswith('IEM_MC_CALL_AIMPL_')
1374 or oStmt.sName.startswith('IEM_MC_CALL_AVX_AIMPL_')):
1375 self.dsCImplFlags['IEM_CIMPL_F_CALLS_AIMPL'] = True;
1376 elif ( oStmt.sName.startswith('IEM_MC_CALL_SSE_AIMPL_')
1377 or oStmt.sName.startswith('IEM_MC_CALL_MMX_AIMPL_')
1378 or oStmt.sName.startswith('IEM_MC_CALL_FPU_AIMPL_')):
1379 self.dsCImplFlags['IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE'] = True;
1380 else:
1381 raise Exception('Unknown IEM_MC_CALL_* statement: %s' % (oStmt.sName,));
1382
1383 # Process branches of conditionals recursively.
1384 if isinstance(oStmt, iai.McStmtCond):
1385 self.analyzeCodeOperation(oStmt.aoIfBranch, True);
1386 if oStmt.aoElseBranch:
1387 self.analyzeCodeOperation(oStmt.aoElseBranch, True);
1388
1389 return True;
1390
1391 def analyze(self):
1392 """
1393 Analyzes the code, identifying the number of parameters it requires and such.
1394
1395 Returns dummy True - raises exception on trouble.
1396 """
1397
1398 # Check the block for errors before we proceed (will decode it).
1399 asErrors = self.oMcBlock.check();
1400 if asErrors:
1401 raise Exception('\n'.join(['%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sError, )
1402 for sError in asErrors]));
1403
1404 # Decode the block into a list/tree of McStmt objects.
1405 aoStmts = self.oMcBlock.decode();
1406
1407 # Scan the statements for local variables and call arguments (self.dVariables).
1408 self.analyzeFindVariablesAndCallArgs(aoStmts);
1409
1410 # Scan the code for IEM_CIMPL_F_ and other clues.
1411 self.dsCImplFlags = self.oMcBlock.dsCImplFlags.copy();
1412 self.analyzeCodeOperation(aoStmts);
1413 if ( ('IEM_CIMPL_F_CALLS_CIMPL' in self.dsCImplFlags)
1414 + ('IEM_CIMPL_F_CALLS_AIMPL' in self.dsCImplFlags)
1415 + ('IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE' in self.dsCImplFlags) > 1):
1416 self.raiseProblem('Mixing CIMPL/AIMPL/AIMPL_WITH_FXSTATE calls');
1417
1418 # Create variations as needed.
1419 if iai.McStmt.findStmtByNames(aoStmts,
1420 { 'IEM_MC_DEFER_TO_CIMPL_0_RET': True,
1421 'IEM_MC_DEFER_TO_CIMPL_1_RET': True,
1422 'IEM_MC_DEFER_TO_CIMPL_2_RET': True,
1423 'IEM_MC_DEFER_TO_CIMPL_3_RET': True, }):
1424 asVariations = (ThreadedFunctionVariation.ksVariation_Default,);
1425
1426 elif iai.McStmt.findStmtByNames(aoStmts, {'IEM_MC_CALC_RM_EFF_ADDR' : True,}):
1427 if 'IEM_MC_F_64BIT' in self.oMcBlock.dsMcFlags:
1428 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressOnly64;
1429 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags and 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
1430 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot286Not64;
1431 elif 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
1432 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot286;
1433 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags:
1434 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot64;
1435 elif 'IEM_MC_F_ONLY_8086' in self.oMcBlock.dsMcFlags:
1436 asVariations = ThreadedFunctionVariation.kasVariationsOnlyPre386;
1437 else:
1438 asVariations = ThreadedFunctionVariation.kasVariationsWithAddress;
1439 else:
1440 if 'IEM_MC_F_64BIT' in self.oMcBlock.dsMcFlags:
1441 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressOnly64;
1442 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags and 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
1443 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot286Not64;
1444 elif 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
1445 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot286;
1446 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags:
1447 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot64;
1448 elif 'IEM_MC_F_ONLY_8086' in self.oMcBlock.dsMcFlags:
1449 asVariations = ThreadedFunctionVariation.kasVariationsOnlyPre386;
1450 else:
1451 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddress;
1452
1453 if not iai.McStmt.findStmtByNames(aoStmts,
1454 { 'IEM_MC_ADVANCE_RIP_AND_FINISH': True,
1455 'IEM_MC_REL_JMP_S8_AND_FINISH': True,
1456 'IEM_MC_REL_JMP_S16_AND_FINISH': True,
1457 'IEM_MC_REL_JMP_S32_AND_FINISH': True,
1458 }):
1459 asVariations = [sVariation for sVariation in asVariations
1460 if sVariation not in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing];
1461
1462 self.aoVariations = [ThreadedFunctionVariation(self, sVar) for sVar in asVariations];
1463
1464 # Dictionary variant of the list.
1465 self.dVariations = { oVar.sVariation: oVar for oVar in self.aoVariations };
1466
1467 # Continue the analysis on each variation.
1468 for oVariation in self.aoVariations:
1469 oVariation.analyzeVariation(aoStmts);
1470
1471 return True;
1472
1473 ## Used by emitThreadedCallStmts.
1474 kdVariationsWithNeedForPrefixCheck = {
1475 ThreadedFunctionVariation.ksVariation_64_Addr32: True,
1476 ThreadedFunctionVariation.ksVariation_64f_Addr32: True,
1477 ThreadedFunctionVariation.ksVariation_64_FsGs: True,
1478 ThreadedFunctionVariation.ksVariation_64f_FsGs: True,
1479 ThreadedFunctionVariation.ksVariation_32_Addr16: True,
1480 ThreadedFunctionVariation.ksVariation_32f_Addr16: True,
1481 ThreadedFunctionVariation.ksVariation_32_Flat: True,
1482 ThreadedFunctionVariation.ksVariation_32f_Flat: True,
1483 ThreadedFunctionVariation.ksVariation_16_Addr32: True,
1484 ThreadedFunctionVariation.ksVariation_16f_Addr32: True,
1485 };
1486
1487 def emitThreadedCallStmts(self):
1488 """
1489 Worker for morphInputCode that returns a list of statements that emits
1490 the call to the threaded functions for the block.
1491 """
1492 # Special case for only default variation:
1493 if len(self.aoVariations) == 1 and self.aoVariations[0].sVariation == ThreadedFunctionVariation.ksVariation_Default:
1494 return self.aoVariations[0].emitThreadedCallStmts(0);
1495
1496 #
1497 # Case statement sub-class.
1498 #
1499 dByVari = self.dVariations;
1500 #fDbg = self.oMcBlock.sFunction == 'iemOpCommonPushSReg';
1501 class Case:
1502 def __init__(self, sCond, sVarNm = None):
1503 self.sCond = sCond;
1504 self.sVarNm = sVarNm;
1505 self.oVar = dByVari[sVarNm] if sVarNm else None;
1506 self.aoBody = self.oVar.emitThreadedCallStmts(8) if sVarNm else None;
1507
1508 def toCode(self):
1509 aoStmts = [ iai.McCppGeneric('case %s:' % (self.sCond), cchIndent = 4), ];
1510 if self.aoBody:
1511 aoStmts.extend(self.aoBody);
1512 aoStmts.append(iai.McCppGeneric('break;', cchIndent = 8));
1513 return aoStmts;
1514
1515 def toFunctionAssignment(self):
1516 aoStmts = [ iai.McCppGeneric('case %s:' % (self.sCond), cchIndent = 4), ];
1517 if self.aoBody:
1518 aoStmts.extend([
1519 iai.McCppGeneric('enmFunction = %s;' % (self.oVar.getIndexName(),), cchIndent = 8),
1520 iai.McCppGeneric('break;', cchIndent = 8),
1521 ]);
1522 return aoStmts;
1523
1524 def isSame(self, oThat):
1525 if not self.aoBody: # fall thru always matches.
1526 return True;
1527 if len(self.aoBody) != len(oThat.aoBody):
1528 #if fDbg: print('dbg: body len diff: %s vs %s' % (len(self.aoBody), len(oThat.aoBody),));
1529 return False;
1530 for iStmt, oStmt in enumerate(self.aoBody):
1531 oThatStmt = oThat.aoBody[iStmt] # type: iai.McStmt
1532 assert isinstance(oStmt, iai.McCppGeneric);
1533 assert not isinstance(oStmt, iai.McStmtCond);
1534 if isinstance(oStmt, iai.McStmtCond):
1535 return False;
1536 if oStmt.sName != oThatStmt.sName:
1537 #if fDbg: print('dbg: stmt #%s name: %s vs %s' % (iStmt, oStmt.sName, oThatStmt.sName,));
1538 return False;
1539 if len(oStmt.asParams) != len(oThatStmt.asParams):
1540 #if fDbg: print('dbg: stmt #%s param count: %s vs %s'
1541 # % (iStmt, len(oStmt.asParams), len(oThatStmt.asParams),));
1542 return False;
1543 for iParam, sParam in enumerate(oStmt.asParams):
1544 if ( sParam != oThatStmt.asParams[iParam]
1545 and ( iParam != 1
1546 or not isinstance(oStmt, iai.McCppCall)
1547 or not oStmt.asParams[0].startswith('IEM_MC2_EMIT_CALL_')
1548 or sParam != self.oVar.getIndexName()
1549 or oThatStmt.asParams[iParam] != oThat.oVar.getIndexName() )):
1550 #if fDbg: print('dbg: stmt #%s, param #%s: %s vs %s'
1551 # % (iStmt, iParam, sParam, oThatStmt.asParams[iParam],));
1552 return False;
1553 return True;
1554
1555 #
1556 # Determine what we're switch on.
1557 # This ASSUMES that (IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | IEM_F_MODE_CPUMODE_MASK) == 7!
1558 #
1559 fSimple = True;
1560 sSwitchValue = '(pVCpu->iem.s.fExec & (IEM_F_MODE_CPUMODE_MASK | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK))';
1561 if dByVari.keys() & self.kdVariationsWithNeedForPrefixCheck.keys():
1562 sSwitchValue += ' | (pVCpu->iem.s.enmEffAddrMode == (pVCpu->iem.s.fExec & IEM_F_MODE_CPUMODE_MASK) ? 0 : 8)';
1563 # Accesses via FS and GS and CS goes thru non-FLAT functions. (CS
1564 # is not writable in 32-bit mode (at least), thus the penalty mode
1565 # for any accesses via it (simpler this way).)
1566 sSwitchValue += ' | (pVCpu->iem.s.iEffSeg < X86_SREG_FS && pVCpu->iem.s.iEffSeg != X86_SREG_CS ? 0 : 16)';
1567 fSimple = False; # threaded functions.
1568 if dByVari.keys() & ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing:
1569 sSwitchValue += ' | ((pVCpu->iem.s.fTbPrevInstr & (IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_INHIBIT_SHADOW)) || ' \
1570 + '(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_MASK) ? 32 : 0)';
1571
1572 #
1573 # Generate the case statements.
1574 #
1575 # pylintx: disable=x
1576 aoCases = [];
1577 if ThreadedFunctionVariation.ksVariation_64_Addr32 in dByVari:
1578 assert not fSimple;
1579 aoCases.extend([
1580 Case('IEMMODE_64BIT', ThrdFnVar.ksVariation_64),
1581 Case('IEMMODE_64BIT | 16', ThrdFnVar.ksVariation_64_FsGs),
1582 Case('IEMMODE_64BIT | 8 | 16', None), # fall thru
1583 Case('IEMMODE_64BIT | 8', ThrdFnVar.ksVariation_64_Addr32),
1584 ]);
1585 if ThreadedFunctionVariation.ksVariation_64f_Addr32 in dByVari:
1586 aoCases.extend([
1587 Case('IEMMODE_64BIT | 32', ThrdFnVar.ksVariation_64f),
1588 Case('IEMMODE_64BIT | 32 | 16', ThrdFnVar.ksVariation_64f_FsGs),
1589 Case('IEMMODE_64BIT | 32 | 8 | 16', None), # fall thru
1590 Case('IEMMODE_64BIT | 32 | 8', ThrdFnVar.ksVariation_64f_Addr32),
1591 ]);
1592 elif ThrdFnVar.ksVariation_64 in dByVari:
1593 assert fSimple;
1594 aoCases.append(Case('IEMMODE_64BIT', ThrdFnVar.ksVariation_64));
1595 if ThreadedFunctionVariation.ksVariation_64f in dByVari:
1596 aoCases.append(Case('IEMMODE_64BIT | 32', ThrdFnVar.ksVariation_64f));
1597
1598 if ThrdFnVar.ksVariation_32_Addr16 in dByVari:
1599 assert not fSimple;
1600 aoCases.extend([
1601 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', ThrdFnVar.ksVariation_32_Flat),
1602 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 16', None), # fall thru
1603 Case('IEMMODE_32BIT | 16', None), # fall thru
1604 Case('IEMMODE_32BIT', ThrdFnVar.ksVariation_32),
1605 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 8', None), # fall thru
1606 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 8 | 16',None), # fall thru
1607 Case('IEMMODE_32BIT | 8 | 16',None), # fall thru
1608 Case('IEMMODE_32BIT | 8', ThrdFnVar.ksVariation_32_Addr16),
1609 ]);
1610 if ThrdFnVar.ksVariation_32f_Addr16 in dByVari:
1611 aoCases.extend([
1612 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', ThrdFnVar.ksVariation_32f_Flat),
1613 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 16', None), # fall thru
1614 Case('IEMMODE_32BIT | 32 | 16', None), # fall thru
1615 Case('IEMMODE_32BIT | 32', ThrdFnVar.ksVariation_32f),
1616 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 8', None), # fall thru
1617 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 8 | 16',None), # fall thru
1618 Case('IEMMODE_32BIT | 32 | 8 | 16',None), # fall thru
1619 Case('IEMMODE_32BIT | 32 | 8', ThrdFnVar.ksVariation_32f_Addr16),
1620 ]);
1621 elif ThrdFnVar.ksVariation_32 in dByVari:
1622 assert fSimple;
1623 aoCases.extend([
1624 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', None), # fall thru
1625 Case('IEMMODE_32BIT', ThrdFnVar.ksVariation_32),
1626 ]);
1627 if ThrdFnVar.ksVariation_32f in dByVari:
1628 aoCases.extend([
1629 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', None), # fall thru
1630 Case('IEMMODE_32BIT | 32', ThrdFnVar.ksVariation_32f),
1631 ]);
1632
1633 if ThrdFnVar.ksVariation_16_Addr32 in dByVari:
1634 assert not fSimple;
1635 aoCases.extend([
1636 Case('IEMMODE_16BIT | 16', None), # fall thru
1637 Case('IEMMODE_16BIT', ThrdFnVar.ksVariation_16),
1638 Case('IEMMODE_16BIT | 8 | 16', None), # fall thru
1639 Case('IEMMODE_16BIT | 8', ThrdFnVar.ksVariation_16_Addr32),
1640 ]);
1641 if ThrdFnVar.ksVariation_16f_Addr32 in dByVari:
1642 aoCases.extend([
1643 Case('IEMMODE_16BIT | 32 | 16', None), # fall thru
1644 Case('IEMMODE_16BIT | 32', ThrdFnVar.ksVariation_16f),
1645 Case('IEMMODE_16BIT | 32 | 8 | 16', None), # fall thru
1646 Case('IEMMODE_16BIT | 32 | 8', ThrdFnVar.ksVariation_16f_Addr32),
1647 ]);
1648 elif ThrdFnVar.ksVariation_16 in dByVari:
1649 assert fSimple;
1650 aoCases.append(Case('IEMMODE_16BIT', ThrdFnVar.ksVariation_16));
1651 if ThrdFnVar.ksVariation_16f in dByVari:
1652 aoCases.append(Case('IEMMODE_16BIT | 32', ThrdFnVar.ksVariation_16f));
1653
1654 if ThrdFnVar.ksVariation_16_Pre386 in dByVari:
1655 if not fSimple:
1656 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 16', None)); # fall thru
1657 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', ThrdFnVar.ksVariation_16_Pre386));
1658 if ThrdFnVar.ksVariation_16f_Pre386 in dByVari: # should be nested under previous if, but line too long.
1659 if not fSimple:
1660 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 16', None)); # fall thru
1661 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', ThrdFnVar.ksVariation_16f_Pre386));
1662
1663 #
1664 # If the case bodies are all the same, except for the function called,
1665 # we can reduce the code size and hopefully compile time.
1666 #
1667 iFirstCaseWithBody = 0;
1668 while not aoCases[iFirstCaseWithBody].aoBody:
1669 iFirstCaseWithBody += 1
1670 fAllSameCases = True
1671 for iCase in range(iFirstCaseWithBody + 1, len(aoCases)):
1672 fAllSameCases = fAllSameCases and aoCases[iCase].isSame(aoCases[iFirstCaseWithBody]);
1673 #if fDbg: print('fAllSameCases=%s %s' % (fAllSameCases, self.oMcBlock.sFunction,));
1674 if fAllSameCases:
1675 aoStmts = [
1676 iai.McCppGeneric('IEMTHREADEDFUNCS enmFunction;'),
1677 iai.McCppGeneric('switch (%s)' % (sSwitchValue,)),
1678 iai.McCppGeneric('{'),
1679 ];
1680 for oCase in aoCases:
1681 aoStmts.extend(oCase.toFunctionAssignment());
1682 aoStmts.extend([
1683 iai.McCppGeneric('IEM_NOT_REACHED_DEFAULT_CASE_RET();', cchIndent = 4),
1684 iai.McCppGeneric('}'),
1685 ]);
1686 aoStmts.extend(dByVari[aoCases[iFirstCaseWithBody].sVarNm].emitThreadedCallStmts(0, 'enmFunction'));
1687
1688 else:
1689 #
1690 # Generate the generic switch statement.
1691 #
1692 aoStmts = [
1693 iai.McCppGeneric('switch (%s)' % (sSwitchValue,)),
1694 iai.McCppGeneric('{'),
1695 ];
1696 for oCase in aoCases:
1697 aoStmts.extend(oCase.toCode());
1698 aoStmts.extend([
1699 iai.McCppGeneric('IEM_NOT_REACHED_DEFAULT_CASE_RET();', cchIndent = 4),
1700 iai.McCppGeneric('}'),
1701 ]);
1702
1703 return aoStmts;
1704
1705 def morphInputCode(self, aoStmts, fCallEmitted = False, cDepth = 0):
1706 """
1707 Adjusts (& copies) the statements for the input/decoder so it will emit
1708 calls to the right threaded functions for each block.
1709
1710 Returns list/tree of statements (aoStmts is not modified) and updated
1711 fCallEmitted status.
1712 """
1713 #print('McBlock at %s:%s' % (os.path.split(self.oMcBlock.sSrcFile)[1], self.oMcBlock.iBeginLine,));
1714 aoDecoderStmts = [];
1715
1716 for oStmt in aoStmts:
1717 # Copy the statement. Make a deep copy to make sure we've got our own
1718 # copies of all instance variables, even if a bit overkill at the moment.
1719 oNewStmt = copy.deepcopy(oStmt);
1720 aoDecoderStmts.append(oNewStmt);
1721 #print('oNewStmt %s %s' % (oNewStmt.sName, len(oNewStmt.asParams),));
1722 if oNewStmt.sName == 'IEM_MC_BEGIN' and self.dsCImplFlags:
1723 oNewStmt.asParams[3] = ' | '.join(sorted(self.dsCImplFlags.keys()));
1724
1725 # If we haven't emitted the threaded function call yet, look for
1726 # statements which it would naturally follow or preceed.
1727 if not fCallEmitted:
1728 if not oStmt.isCppStmt():
1729 if ( oStmt.sName.startswith('IEM_MC_MAYBE_RAISE_') \
1730 or (oStmt.sName.endswith('_AND_FINISH') and oStmt.sName.startswith('IEM_MC_'))
1731 or oStmt.sName.startswith('IEM_MC_CALL_CIMPL_')
1732 or oStmt.sName.startswith('IEM_MC_DEFER_TO_CIMPL_')
1733 or oStmt.sName in ('IEM_MC_RAISE_DIVIDE_ERROR',)):
1734 aoDecoderStmts.pop();
1735 aoDecoderStmts.extend(self.emitThreadedCallStmts());
1736 aoDecoderStmts.append(oNewStmt);
1737 fCallEmitted = True;
1738 elif ( oStmt.fDecode
1739 and ( oStmt.asParams[0].find('IEMOP_HLP_DONE_') >= 0
1740 or oStmt.asParams[0].find('IEMOP_HLP_DECODED_') >= 0)):
1741 aoDecoderStmts.extend(self.emitThreadedCallStmts());
1742 fCallEmitted = True;
1743
1744 # Process branches of conditionals recursively.
1745 if isinstance(oStmt, iai.McStmtCond):
1746 (oNewStmt.aoIfBranch, fCallEmitted1) = self.morphInputCode(oStmt.aoIfBranch, fCallEmitted, cDepth + 1);
1747 if oStmt.aoElseBranch:
1748 (oNewStmt.aoElseBranch, fCallEmitted2) = self.morphInputCode(oStmt.aoElseBranch, fCallEmitted, cDepth + 1);
1749 else:
1750 fCallEmitted2 = False;
1751 fCallEmitted = fCallEmitted or (fCallEmitted1 and fCallEmitted2);
1752
1753 if not fCallEmitted and cDepth == 0:
1754 self.raiseProblem('Unable to insert call to threaded function.');
1755
1756 return (aoDecoderStmts, fCallEmitted);
1757
1758
1759 def generateInputCode(self):
1760 """
1761 Modifies the input code.
1762 """
1763 cchIndent = (self.oMcBlock.cchIndent + 3) // 4 * 4;
1764
1765 if len(self.oMcBlock.aoStmts) == 1:
1766 # IEM_MC_DEFER_TO_CIMPL_X_RET - need to wrap in {} to make it safe to insert into random code.
1767 sCode = ' ' * cchIndent + 'pVCpu->iem.s.fTbCurInstr = ';
1768 if self.dsCImplFlags:
1769 sCode += ' | '.join(sorted(self.dsCImplFlags.keys())) + ';\n';
1770 else:
1771 sCode += '0;\n';
1772 sCode += iai.McStmt.renderCodeForList(self.morphInputCode(self.oMcBlock.aoStmts)[0],
1773 cchIndent = cchIndent).replace('\n', ' /* gen */\n', 1);
1774 sIndent = ' ' * (min(cchIndent, 2) - 2);
1775 sCode = sIndent + '{\n' + sCode + sIndent + '}\n';
1776 return sCode;
1777
1778 # IEM_MC_BEGIN/END block
1779 assert len(self.oMcBlock.asLines) > 2, "asLines=%s" % (self.oMcBlock.asLines,);
1780 return iai.McStmt.renderCodeForList(self.morphInputCode(self.oMcBlock.aoStmts)[0],
1781 cchIndent = cchIndent).replace('\n', ' /* gen */\n', 1);
1782
1783# Short alias for ThreadedFunctionVariation.
1784ThrdFnVar = ThreadedFunctionVariation;
1785
1786
1787class IEMThreadedGenerator(object):
1788 """
1789 The threaded code generator & annotator.
1790 """
1791
1792 def __init__(self):
1793 self.aoThreadedFuncs = [] # type: List[ThreadedFunction]
1794 self.oOptions = None # type: argparse.Namespace
1795 self.aoParsers = [] # type: List[IEMAllInstPython.SimpleParser]
1796 self.aidxFirstFunctions = [] # type: List[int] ##< Runs parallel to aoParser giving the index of the first function.
1797
1798 #
1799 # Processing.
1800 #
1801
1802 def processInputFiles(self, sHostArch, fNativeRecompilerEnabled):
1803 """
1804 Process the input files.
1805 """
1806
1807 # Parse the files.
1808 self.aoParsers = iai.parseFiles(self.oOptions.asInFiles, sHostArch);
1809
1810 # Create threaded functions for the MC blocks.
1811 self.aoThreadedFuncs = [ThreadedFunction(oMcBlock) for oMcBlock in iai.g_aoMcBlocks];
1812
1813 # Analyze the threaded functions.
1814 dRawParamCounts = {};
1815 dMinParamCounts = {};
1816 for oThreadedFunction in self.aoThreadedFuncs:
1817 oThreadedFunction.analyze();
1818 for oVariation in oThreadedFunction.aoVariations:
1819 dRawParamCounts[len(oVariation.dParamRefs)] = dRawParamCounts.get(len(oVariation.dParamRefs), 0) + 1;
1820 dMinParamCounts[oVariation.cMinParams] = dMinParamCounts.get(oVariation.cMinParams, 0) + 1;
1821 print('debug: param count distribution, raw and optimized:', file = sys.stderr);
1822 for cCount in sorted({cBits: True for cBits in list(dRawParamCounts.keys()) + list(dMinParamCounts.keys())}.keys()):
1823 print('debug: %s params: %4s raw, %4s min'
1824 % (cCount, dRawParamCounts.get(cCount, 0), dMinParamCounts.get(cCount, 0)),
1825 file = sys.stderr);
1826
1827 # Populate aidxFirstFunctions. This is ASSUMING that
1828 # g_aoMcBlocks/self.aoThreadedFuncs are in self.aoParsers order.
1829 iThreadedFunction = 0;
1830 oThreadedFunction = self.getThreadedFunctionByIndex(0);
1831 self.aidxFirstFunctions = [];
1832 for oParser in self.aoParsers:
1833 self.aidxFirstFunctions.append(iThreadedFunction);
1834
1835 while oThreadedFunction.oMcBlock.sSrcFile == oParser.sSrcFile:
1836 iThreadedFunction += 1;
1837 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
1838
1839 # Analyze the threaded functions and their variations for native recompilation.
1840 if fNativeRecompilerEnabled:
1841 print('todo:', file = sys.stderr);
1842 cTotal = 0;
1843 cNative = 0;
1844 for oThreadedFunction in self.aoThreadedFuncs:
1845 for oVariation in oThreadedFunction.aoVariations:
1846 cTotal += 1;
1847 oVariation.oNativeRecomp = ian.analyzeVariantForNativeRecomp(oVariation, sHostArch);
1848 if oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
1849 cNative += 1;
1850 print('todo: %.1f%% / %u out of %u threaded function variations are recompilable'
1851 % (cNative * 100.0 / cTotal, cNative, cTotal), file = sys.stderr);
1852 if ian.g_dUnsupportedMcStmtLastOneStats:
1853 asTopKeys = sorted(ian.g_dUnsupportedMcStmtLastOneStats, reverse = True,
1854 key = lambda sSortKey: len(ian.g_dUnsupportedMcStmtLastOneStats[sSortKey]))[:16];
1855 print('todo:', file = sys.stderr);
1856 print('todo: Top %s variations with one unsupported statement dependency:' % (len(asTopKeys),),
1857 file = sys.stderr);
1858 cchMaxKey = max([len(sKey) for sKey in asTopKeys]);
1859 for sKey in asTopKeys:
1860 print('todo: %*s = %s (%s%s)'
1861 % (cchMaxKey, sKey, len(ian.g_dUnsupportedMcStmtLastOneStats[sKey]),
1862 ', '.join([oVar.getShortName() for oVar in ian.g_dUnsupportedMcStmtLastOneStats[sKey][:5]]),
1863 ',...' if len(ian.g_dUnsupportedMcStmtLastOneStats[sKey]) >= 5 else '', )
1864 , file = sys.stderr);
1865
1866 asTopKeys = sorted(ian.g_dUnsupportedMcStmtStats, reverse = True,
1867 key = lambda sSortKey: ian.g_dUnsupportedMcStmtStats[sSortKey])[:16];
1868 print('todo:', file = sys.stderr);
1869 print('todo: Top %d most used unimplemented statements:' % (len(asTopKeys),), file = sys.stderr);
1870 cchMaxKey = max([len(sKey) for sKey in asTopKeys]);
1871 for i in range(0, len(asTopKeys), 2):
1872 print('todo: %*s = %4d %*s = %4d'
1873 % ( cchMaxKey, asTopKeys[i], ian.g_dUnsupportedMcStmtStats[asTopKeys[i]],
1874 cchMaxKey, asTopKeys[i + 1], ian.g_dUnsupportedMcStmtStats[asTopKeys[i + 1]],),
1875 file = sys.stderr);
1876 print('todo:', file = sys.stderr);
1877
1878 if ian.g_dUnsupportedMcStmtLastOneVarStats:
1879 asTopKeys = sorted(ian.g_dUnsupportedMcStmtLastOneVarStats, reverse = True,
1880 key = lambda sSortKey: len(ian.g_dUnsupportedMcStmtLastOneVarStats[sSortKey]))[:16];
1881 print('todo:', file = sys.stderr);
1882 print('todo: Top %s variations with variables and 1-2 unsupported statement dependency:' % (len(asTopKeys),),
1883 file = sys.stderr);
1884 cchMaxKey = max([len(sKey) for sKey in asTopKeys]);
1885 for sKey in asTopKeys:
1886 print('todo: %*s = %s (%s%s)'
1887 % (cchMaxKey, sKey, len(ian.g_dUnsupportedMcStmtLastOneVarStats[sKey]),
1888 ', '.join([oVar.getShortName() for oVar in ian.g_dUnsupportedMcStmtLastOneVarStats[sKey][:5]]),
1889 ',...' if len(ian.g_dUnsupportedMcStmtLastOneVarStats[sKey]) >= 5 else '', )
1890 , file = sys.stderr);
1891
1892
1893 # Gather arguments + variable statistics for the MC blocks.
1894 cMaxArgs = 0;
1895 cMaxVars = 0;
1896 cMaxVarsAndArgs = 0;
1897 cbMaxArgs = 0;
1898 cbMaxVars = 0;
1899 cbMaxVarsAndArgs = 0;
1900 for oThreadedFunction in self.aoThreadedFuncs:
1901 if oThreadedFunction.oMcBlock.cLocals >= 0:
1902 # Counts.
1903 assert oThreadedFunction.oMcBlock.cArgs >= 0;
1904 cMaxVars = max(cMaxVars, oThreadedFunction.oMcBlock.cLocals);
1905 cMaxArgs = max(cMaxArgs, oThreadedFunction.oMcBlock.cArgs);
1906 cMaxVarsAndArgs = max(cMaxVarsAndArgs, oThreadedFunction.oMcBlock.cLocals + oThreadedFunction.oMcBlock.cArgs);
1907 if cMaxVarsAndArgs > 9:
1908 raise Exception('%s potentially uses too many variables / args: %u, max 10 - %u vars and %u args'
1909 % (oThreadedFunction.oMcBlock.oFunction.sName, cMaxVarsAndArgs,
1910 oThreadedFunction.oMcBlock.cLocals, oThreadedFunction.oMcBlock.cArgs));
1911 # Calc stack allocation size:
1912 cbArgs = 0;
1913 for oArg in oThreadedFunction.oMcBlock.aoArgs:
1914 cbArgs += (getTypeBitCount(oArg.sType) + 63) // 64 * 8;
1915 cbVars = 0;
1916 for oVar in oThreadedFunction.oMcBlock.aoLocals:
1917 cbVars += (getTypeBitCount(oVar.sType) + 63) // 64 * 8;
1918 cbMaxVars = max(cbMaxVars, cbVars);
1919 cbMaxArgs = max(cbMaxArgs, cbArgs);
1920 cbMaxVarsAndArgs = max(cbMaxVarsAndArgs, cbVars + cbArgs);
1921 if cbMaxVarsAndArgs >= 0xc0:
1922 raise Exception('%s potentially uses too much stack: cbMaxVars=%#x cbMaxArgs=%#x'
1923 % (oThreadedFunction.oMcBlock.oFunction.sName, cbMaxVars, cbMaxArgs,));
1924
1925 print('debug: max vars+args: %u bytes / %u; max vars: %u bytes / %u; max args: %u bytes / %u'
1926 % (cbMaxVarsAndArgs, cMaxVarsAndArgs, cbMaxVars, cMaxVars, cbMaxArgs, cMaxArgs,), file = sys.stderr);
1927
1928 return True;
1929
1930 #
1931 # Output
1932 #
1933
1934 def generateLicenseHeader(self):
1935 """
1936 Returns the lines for a license header.
1937 """
1938 return [
1939 '/*',
1940 ' * Autogenerated by $Id: IEMAllThrdPython.py 102012 2023-11-09 02:09:51Z vboxsync $ ',
1941 ' * Do not edit!',
1942 ' */',
1943 '',
1944 '/*',
1945 ' * Copyright (C) 2023-' + str(datetime.date.today().year) + ' Oracle and/or its affiliates.',
1946 ' *',
1947 ' * This file is part of VirtualBox base platform packages, as',
1948 ' * available from https://www.virtualbox.org.',
1949 ' *',
1950 ' * This program is free software; you can redistribute it and/or',
1951 ' * modify it under the terms of the GNU General Public License',
1952 ' * as published by the Free Software Foundation, in version 3 of the',
1953 ' * License.',
1954 ' *',
1955 ' * This program is distributed in the hope that it will be useful, but',
1956 ' * WITHOUT ANY WARRANTY; without even the implied warranty of',
1957 ' * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU',
1958 ' * General Public License for more details.',
1959 ' *',
1960 ' * You should have received a copy of the GNU General Public License',
1961 ' * along with this program; if not, see <https://www.gnu.org/licenses>.',
1962 ' *',
1963 ' * The contents of this file may alternatively be used under the terms',
1964 ' * of the Common Development and Distribution License Version 1.0',
1965 ' * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included',
1966 ' * in the VirtualBox distribution, in which case the provisions of the',
1967 ' * CDDL are applicable instead of those of the GPL.',
1968 ' *',
1969 ' * You may elect to license modified versions of this file under the',
1970 ' * terms and conditions of either the GPL or the CDDL or both.',
1971 ' *',
1972 ' * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0',
1973 ' */',
1974 '',
1975 '',
1976 '',
1977 ];
1978
1979 ## List of built-in threaded functions with user argument counts and
1980 ## whether it has a native recompiler implementation.
1981 katBltIns = (
1982 ( 'DeferToCImpl0', 2, True ),
1983 ( 'CheckIrq', 0, True ),
1984 ( 'CheckMode', 1, True ),
1985 ( 'CheckHwInstrBps', 0, False ),
1986 ( 'CheckCsLim', 1, False ),
1987
1988 ( 'CheckCsLimAndOpcodes', 3, False ),
1989 ( 'CheckOpcodes', 3, False ),
1990 ( 'CheckOpcodesConsiderCsLim', 3, False ),
1991
1992 ( 'CheckCsLimAndPcAndOpcodes', 3, False ),
1993 ( 'CheckPcAndOpcodes', 3, False ),
1994 ( 'CheckPcAndOpcodesConsiderCsLim', 3, False ),
1995
1996 ( 'CheckCsLimAndOpcodesAcrossPageLoadingTlb', 3, False ),
1997 ( 'CheckOpcodesAcrossPageLoadingTlb', 3, False ),
1998 ( 'CheckOpcodesAcrossPageLoadingTlbConsiderCsLim', 2, False ),
1999
2000 ( 'CheckCsLimAndOpcodesLoadingTlb', 3, False ),
2001 ( 'CheckOpcodesLoadingTlb', 3, False ),
2002 ( 'CheckOpcodesLoadingTlbConsiderCsLim', 3, False ),
2003
2004 ( 'CheckCsLimAndOpcodesOnNextPageLoadingTlb', 2, False ),
2005 ( 'CheckOpcodesOnNextPageLoadingTlb', 2, False ),
2006 ( 'CheckOpcodesOnNextPageLoadingTlbConsiderCsLim', 2, False ),
2007
2008 ( 'CheckCsLimAndOpcodesOnNewPageLoadingTlb', 2, False ),
2009 ( 'CheckOpcodesOnNewPageLoadingTlb', 2, False ),
2010 ( 'CheckOpcodesOnNewPageLoadingTlbConsiderCsLim', 2, False ),
2011 );
2012
2013 def generateThreadedFunctionsHeader(self, oOut):
2014 """
2015 Generates the threaded functions header file.
2016 Returns success indicator.
2017 """
2018
2019 asLines = self.generateLicenseHeader();
2020
2021 # Generate the threaded function table indexes.
2022 asLines += [
2023 'typedef enum IEMTHREADEDFUNCS',
2024 '{',
2025 ' kIemThreadedFunc_Invalid = 0,',
2026 '',
2027 ' /*',
2028 ' * Predefined',
2029 ' */',
2030 ];
2031 asLines += [' kIemThreadedFunc_BltIn_%s,' % (sFuncNm,) for sFuncNm, _, _ in self.katBltIns];
2032
2033 iThreadedFunction = 1 + len(self.katBltIns);
2034 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2035 asLines += [
2036 '',
2037 ' /*',
2038 ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation] + '',
2039 ' */',
2040 ];
2041 for oThreadedFunction in self.aoThreadedFuncs:
2042 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2043 if oVariation:
2044 iThreadedFunction += 1;
2045 oVariation.iEnumValue = iThreadedFunction;
2046 asLines.append(' ' + oVariation.getIndexName() + ',');
2047 asLines += [
2048 ' kIemThreadedFunc_End',
2049 '} IEMTHREADEDFUNCS;',
2050 '',
2051 ];
2052
2053 # Prototype the function table.
2054 asLines += [
2055 'extern const PFNIEMTHREADEDFUNC g_apfnIemThreadedFunctions[kIemThreadedFunc_End];',
2056 '#if defined(IN_RING3) || defined(LOG_ENABLED)',
2057 'extern const char * const g_apszIemThreadedFunctions[kIemThreadedFunc_End];',
2058 '#endif',
2059 'extern uint8_t const g_acIemThreadedFunctionUsedArgs[kIemThreadedFunc_End];',
2060 ];
2061
2062 oOut.write('\n'.join(asLines));
2063 return True;
2064
2065 ksBitsToIntMask = {
2066 1: "UINT64_C(0x1)",
2067 2: "UINT64_C(0x3)",
2068 4: "UINT64_C(0xf)",
2069 8: "UINT64_C(0xff)",
2070 16: "UINT64_C(0xffff)",
2071 32: "UINT64_C(0xffffffff)",
2072 };
2073
2074 def generateFunctionParameterUnpacking(self, oVariation, oOut, asParams):
2075 """
2076 Outputs code for unpacking parameters.
2077 This is shared by the threaded and native code generators.
2078 """
2079 aasVars = [];
2080 for aoRefs in oVariation.dParamRefs.values():
2081 oRef = aoRefs[0];
2082 if oRef.sType[0] != 'P':
2083 cBits = g_kdTypeInfo[oRef.sType][0];
2084 sType = g_kdTypeInfo[oRef.sType][2];
2085 else:
2086 cBits = 64;
2087 sType = oRef.sType;
2088
2089 sTypeDecl = sType + ' const';
2090
2091 if cBits == 64:
2092 assert oRef.offNewParam == 0;
2093 if sType == 'uint64_t':
2094 sUnpack = '%s;' % (asParams[oRef.iNewParam],);
2095 else:
2096 sUnpack = '(%s)%s;' % (sType, asParams[oRef.iNewParam],);
2097 elif oRef.offNewParam == 0:
2098 sUnpack = '(%s)(%s & %s);' % (sType, asParams[oRef.iNewParam], self.ksBitsToIntMask[cBits]);
2099 else:
2100 sUnpack = '(%s)((%s >> %s) & %s);' \
2101 % (sType, asParams[oRef.iNewParam], oRef.offNewParam, self.ksBitsToIntMask[cBits]);
2102
2103 sComment = '/* %s - %s ref%s */' % (oRef.sOrgRef, len(aoRefs), 's' if len(aoRefs) != 1 else '',);
2104
2105 aasVars.append([ '%s:%02u' % (oRef.iNewParam, oRef.offNewParam),
2106 sTypeDecl, oRef.sNewName, sUnpack, sComment ]);
2107 acchVars = [0, 0, 0, 0, 0];
2108 for asVar in aasVars:
2109 for iCol, sStr in enumerate(asVar):
2110 acchVars[iCol] = max(acchVars[iCol], len(sStr));
2111 sFmt = ' %%-%ss %%-%ss = %%-%ss %%s\n' % (acchVars[1], acchVars[2], acchVars[3]);
2112 for asVar in sorted(aasVars):
2113 oOut.write(sFmt % (asVar[1], asVar[2], asVar[3], asVar[4],));
2114 return True;
2115
2116 kasThreadedParamNames = ('uParam0', 'uParam1', 'uParam2');
2117 def generateThreadedFunctionsSource(self, oOut):
2118 """
2119 Generates the threaded functions source file.
2120 Returns success indicator.
2121 """
2122
2123 asLines = self.generateLicenseHeader();
2124 oOut.write('\n'.join(asLines));
2125
2126 #
2127 # Emit the function definitions.
2128 #
2129 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2130 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
2131 oOut.write( '\n'
2132 + '\n'
2133 + '\n'
2134 + '\n'
2135 + '/*' + '*' * 128 + '\n'
2136 + '* Variation: ' + sVarName + ' ' * (129 - len(sVarName) - 15) + '*\n'
2137 + '*' * 128 + '*/\n');
2138
2139 for oThreadedFunction in self.aoThreadedFuncs:
2140 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2141 if oVariation:
2142 oMcBlock = oThreadedFunction.oMcBlock;
2143
2144 # Function header
2145 oOut.write( '\n'
2146 + '\n'
2147 + '/**\n'
2148 + ' * #%u: %s at line %s offset %s in %s%s\n'
2149 % (oVariation.iEnumValue, oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine,
2150 os.path.split(oMcBlock.sSrcFile)[1],
2151 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '')
2152 + ' */\n'
2153 + 'static IEM_DECL_IEMTHREADEDFUNC_DEF(' + oVariation.getThreadedFunctionName() + ')\n'
2154 + '{\n');
2155
2156 # Unpack parameters.
2157 self.generateFunctionParameterUnpacking(oVariation, oOut, self.kasThreadedParamNames);
2158
2159 # RT_NOREF for unused parameters.
2160 if oVariation.cMinParams < g_kcThreadedParams:
2161 oOut.write(' RT_NOREF(' + ', '.join(self.kasThreadedParamNames[oVariation.cMinParams:]) + ');\n');
2162
2163 # Now for the actual statements.
2164 oOut.write(iai.McStmt.renderCodeForList(oVariation.aoStmtsForThreadedFunction, cchIndent = 4));
2165
2166 oOut.write('}\n');
2167
2168
2169 #
2170 # Generate the output tables in parallel.
2171 #
2172 asFuncTable = [
2173 '/**',
2174 ' * Function pointer table.',
2175 ' */',
2176 'PFNIEMTHREADEDFUNC const g_apfnIemThreadedFunctions[kIemThreadedFunc_End] =',
2177 '{',
2178 ' /*Invalid*/ NULL,',
2179 ];
2180 asNameTable = [
2181 '/**',
2182 ' * Function name table.',
2183 ' */',
2184 'const char * const g_apszIemThreadedFunctions[kIemThreadedFunc_End] =',
2185 '{',
2186 ' "Invalid",',
2187 ];
2188 asArgCntTab = [
2189 '/**',
2190 ' * Argument count table.',
2191 ' */',
2192 'uint8_t const g_acIemThreadedFunctionUsedArgs[kIemThreadedFunc_End] =',
2193 '{',
2194 ' 0, /*Invalid*/',
2195 ];
2196 aasTables = (asFuncTable, asNameTable, asArgCntTab,);
2197
2198 for asTable in aasTables:
2199 asTable.extend((
2200 '',
2201 ' /*',
2202 ' * Predefined.',
2203 ' */',
2204 ));
2205 for sFuncNm, cArgs, _ in self.katBltIns:
2206 asFuncTable.append(' iemThreadedFunc_BltIn_%s,' % (sFuncNm,));
2207 asNameTable.append(' "BltIn_%s",' % (sFuncNm,));
2208 asArgCntTab.append(' %d, /*BltIn_%s*/' % (cArgs, sFuncNm,));
2209
2210 iThreadedFunction = 1 + len(self.katBltIns);
2211 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2212 for asTable in aasTables:
2213 asTable.extend((
2214 '',
2215 ' /*',
2216 ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation],
2217 ' */',
2218 ));
2219 for oThreadedFunction in self.aoThreadedFuncs:
2220 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2221 if oVariation:
2222 iThreadedFunction += 1;
2223 assert oVariation.iEnumValue == iThreadedFunction;
2224 sName = oVariation.getThreadedFunctionName();
2225 asFuncTable.append(' /*%4u*/ %s,' % (iThreadedFunction, sName,));
2226 asNameTable.append(' /*%4u*/ "%s",' % (iThreadedFunction, sName,));
2227 asArgCntTab.append(' /*%4u*/ %d, /*%s*/' % (iThreadedFunction, oVariation.cMinParams, sName,));
2228
2229 for asTable in aasTables:
2230 asTable.append('};');
2231
2232 #
2233 # Output the tables.
2234 #
2235 oOut.write( '\n'
2236 + '\n');
2237 oOut.write('\n'.join(asFuncTable));
2238 oOut.write( '\n'
2239 + '\n'
2240 + '\n'
2241 + '#if defined(IN_RING3) || defined(LOG_ENABLED)\n');
2242 oOut.write('\n'.join(asNameTable));
2243 oOut.write( '\n'
2244 + '#endif /* IN_RING3 || LOG_ENABLED */\n'
2245 + '\n'
2246 + '\n');
2247 oOut.write('\n'.join(asArgCntTab));
2248 oOut.write('\n');
2249
2250 return True;
2251
2252 def generateNativeFunctionsHeader(self, oOut):
2253 """
2254 Generates the native recompiler functions header file.
2255 Returns success indicator.
2256 """
2257 if not self.oOptions.fNativeRecompilerEnabled:
2258 return True;
2259
2260 asLines = self.generateLicenseHeader();
2261
2262 # Prototype the function table.
2263 asLines += [
2264 'extern const PFNIEMNATIVERECOMPFUNC g_apfnIemNativeRecompileFunctions[kIemThreadedFunc_End];',
2265 '',
2266 ];
2267
2268 oOut.write('\n'.join(asLines));
2269 return True;
2270
2271 def generateNativeFunctionsSource(self, oOut):
2272 """
2273 Generates the native recompiler functions source file.
2274 Returns success indicator.
2275 """
2276 if not self.oOptions.fNativeRecompilerEnabled:
2277 return True;
2278
2279 #
2280 # The file header.
2281 #
2282 oOut.write('\n'.join(self.generateLicenseHeader()));
2283
2284 #
2285 # Emit the functions.
2286 #
2287 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2288 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
2289 oOut.write( '\n'
2290 + '\n'
2291 + '\n'
2292 + '\n'
2293 + '/*' + '*' * 128 + '\n'
2294 + '* Variation: ' + sVarName + ' ' * (129 - len(sVarName) - 15) + '*\n'
2295 + '*' * 128 + '*/\n');
2296
2297 for oThreadedFunction in self.aoThreadedFuncs:
2298 oVariation = oThreadedFunction.dVariations.get(sVariation, None) # type: ThreadedFunctionVariation
2299 if oVariation and oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
2300 oMcBlock = oThreadedFunction.oMcBlock;
2301
2302 # Function header
2303 oOut.write( '\n'
2304 + '\n'
2305 + '/**\n'
2306 + ' * #%u: %s at line %s offset %s in %s%s\n'
2307 % (oVariation.iEnumValue, oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine,
2308 os.path.split(oMcBlock.sSrcFile)[1],
2309 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '')
2310 + ' */\n'
2311 + 'static IEM_DECL_IEMNATIVERECOMPFUNC_DEF(' + oVariation.getNativeFunctionName() + ')\n'
2312 + '{\n');
2313
2314 # Unpack parameters.
2315 self.generateFunctionParameterUnpacking(oVariation, oOut,
2316 ('pCallEntry->auParams[0]',
2317 'pCallEntry->auParams[1]',
2318 'pCallEntry->auParams[2]',));
2319
2320 # Now for the actual statements.
2321 oOut.write(oVariation.oNativeRecomp.renderCode(cchIndent = 4));
2322
2323 oOut.write('}\n');
2324
2325 #
2326 # Output the function table.
2327 #
2328 oOut.write( '\n'
2329 + '\n'
2330 + '/*\n'
2331 + ' * Function table running parallel to g_apfnIemThreadedFunctions and friends.\n'
2332 + ' */\n'
2333 + 'const PFNIEMNATIVERECOMPFUNC g_apfnIemNativeRecompileFunctions[kIemThreadedFunc_End] =\n'
2334 + '{\n'
2335 + ' /*Invalid*/ NULL,'
2336 + '\n'
2337 + ' /*\n'
2338 + ' * Predefined.\n'
2339 + ' */\n'
2340 );
2341 for sFuncNm, _, fHaveRecompFunc in self.katBltIns:
2342 if fHaveRecompFunc:
2343 oOut.write(' iemNativeRecompFunc_BltIn_%s,\n' % (sFuncNm,))
2344 else:
2345 oOut.write(' NULL, /*BltIn_%s*/\n' % (sFuncNm,))
2346
2347 iThreadedFunction = 1 + len(self.katBltIns);
2348 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2349 oOut.write( ' /*\n'
2350 + ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation] + '\n'
2351 + ' */\n');
2352 for oThreadedFunction in self.aoThreadedFuncs:
2353 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2354 if oVariation:
2355 iThreadedFunction += 1;
2356 assert oVariation.iEnumValue == iThreadedFunction;
2357 sName = oVariation.getNativeFunctionName();
2358 if oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
2359 oOut.write(' /*%4u*/ %s,\n' % (iThreadedFunction, sName,));
2360 else:
2361 oOut.write(' /*%4u*/ NULL /*%s*/,\n' % (iThreadedFunction, sName,));
2362
2363 oOut.write( '};\n'
2364 + '\n');
2365 return True;
2366
2367
2368 def getThreadedFunctionByIndex(self, idx):
2369 """
2370 Returns a ThreadedFunction object for the given index. If the index is
2371 out of bounds, a dummy is returned.
2372 """
2373 if idx < len(self.aoThreadedFuncs):
2374 return self.aoThreadedFuncs[idx];
2375 return ThreadedFunction.dummyInstance();
2376
2377 def generateModifiedInput(self, oOut, idxFile):
2378 """
2379 Generates the combined modified input source/header file.
2380 Returns success indicator.
2381 """
2382 #
2383 # File header and assert assumptions.
2384 #
2385 oOut.write('\n'.join(self.generateLicenseHeader()));
2386 oOut.write('AssertCompile((IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | IEM_F_MODE_CPUMODE_MASK) == 7);\n');
2387
2388 #
2389 # Iterate all parsers (input files) and output the ones related to the
2390 # file set given by idxFile.
2391 #
2392 for idxParser, oParser in enumerate(self.aoParsers): # type: int, IEMAllInstPython.SimpleParser
2393 # Is this included in the file set?
2394 sSrcBaseFile = os.path.basename(oParser.sSrcFile).lower();
2395 fInclude = -1;
2396 for aoInfo in iai.g_aaoAllInstrFilesAndDefaultMapAndSet:
2397 if sSrcBaseFile == aoInfo[0].lower():
2398 fInclude = aoInfo[2] in (-1, idxFile);
2399 break;
2400 if fInclude is not True:
2401 assert fInclude is False;
2402 continue;
2403
2404 # Output it.
2405 oOut.write("\n\n/* ****** BEGIN %s ******* */\n" % (oParser.sSrcFile,));
2406
2407 iThreadedFunction = self.aidxFirstFunctions[idxParser];
2408 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
2409 iLine = 0;
2410 while iLine < len(oParser.asLines):
2411 sLine = oParser.asLines[iLine];
2412 iLine += 1; # iBeginLine and iEndLine are 1-based.
2413
2414 # Can we pass it thru?
2415 if ( iLine not in [oThreadedFunction.oMcBlock.iBeginLine, oThreadedFunction.oMcBlock.iEndLine]
2416 or oThreadedFunction.oMcBlock.sSrcFile != oParser.sSrcFile):
2417 oOut.write(sLine);
2418 #
2419 # Single MC block. Just extract it and insert the replacement.
2420 #
2421 elif oThreadedFunction.oMcBlock.iBeginLine != oThreadedFunction.oMcBlock.iEndLine:
2422 assert ( (sLine.count('IEM_MC_') - sLine.count('IEM_MC_F_') == 1)
2423 or oThreadedFunction.oMcBlock.iMacroExp == iai.McBlock.kiMacroExp_Partial), 'sLine="%s"' % (sLine,);
2424 oOut.write(sLine[:oThreadedFunction.oMcBlock.offBeginLine]);
2425 sModified = oThreadedFunction.generateInputCode().strip();
2426 oOut.write(sModified);
2427
2428 iLine = oThreadedFunction.oMcBlock.iEndLine;
2429 sLine = oParser.asLines[iLine - 1];
2430 assert ( sLine.count('IEM_MC_') - sLine.count('IEM_MC_F_') == 1
2431 or len(oThreadedFunction.oMcBlock.aoStmts) == 1
2432 or oThreadedFunction.oMcBlock.iMacroExp == iai.McBlock.kiMacroExp_Partial);
2433 oOut.write(sLine[oThreadedFunction.oMcBlock.offAfterEnd : ]);
2434
2435 # Advance
2436 iThreadedFunction += 1;
2437 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
2438 #
2439 # Macro expansion line that have sublines and may contain multiple MC blocks.
2440 #
2441 else:
2442 offLine = 0;
2443 while iLine == oThreadedFunction.oMcBlock.iBeginLine:
2444 oOut.write(sLine[offLine : oThreadedFunction.oMcBlock.offBeginLine]);
2445
2446 sModified = oThreadedFunction.generateInputCode().strip();
2447 assert ( sModified.startswith('IEM_MC_BEGIN')
2448 or (sModified.find('IEM_MC_DEFER_TO_CIMPL_') > 0 and sModified.strip().startswith('{\n'))
2449 or sModified.startswith('pVCpu->iem.s.fEndTb = true')
2450 or sModified.startswith('pVCpu->iem.s.fTbCurInstr = ')
2451 ), 'sModified="%s"' % (sModified,);
2452 oOut.write(sModified);
2453
2454 offLine = oThreadedFunction.oMcBlock.offAfterEnd;
2455
2456 # Advance
2457 iThreadedFunction += 1;
2458 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
2459
2460 # Last line segment.
2461 if offLine < len(sLine):
2462 oOut.write(sLine[offLine : ]);
2463
2464 oOut.write("/* ****** END %s ******* */\n" % (oParser.sSrcFile,));
2465
2466 return True;
2467
2468 def generateModifiedInput1(self, oOut):
2469 """
2470 Generates the combined modified input source/header file, part 1.
2471 Returns success indicator.
2472 """
2473 return self.generateModifiedInput(oOut, 1);
2474
2475 def generateModifiedInput2(self, oOut):
2476 """
2477 Generates the combined modified input source/header file, part 2.
2478 Returns success indicator.
2479 """
2480 return self.generateModifiedInput(oOut, 2);
2481
2482 def generateModifiedInput3(self, oOut):
2483 """
2484 Generates the combined modified input source/header file, part 3.
2485 Returns success indicator.
2486 """
2487 return self.generateModifiedInput(oOut, 3);
2488
2489 def generateModifiedInput4(self, oOut):
2490 """
2491 Generates the combined modified input source/header file, part 4.
2492 Returns success indicator.
2493 """
2494 return self.generateModifiedInput(oOut, 4);
2495
2496
2497 #
2498 # Main
2499 #
2500
2501 def main(self, asArgs):
2502 """
2503 C-like main function.
2504 Returns exit code.
2505 """
2506
2507 #
2508 # Parse arguments
2509 #
2510 sScriptDir = os.path.dirname(__file__);
2511 oParser = argparse.ArgumentParser(add_help = False);
2512 oParser.add_argument('asInFiles',
2513 metavar = 'input.cpp.h',
2514 nargs = '*',
2515 default = [os.path.join(sScriptDir, aoInfo[0])
2516 for aoInfo in iai.g_aaoAllInstrFilesAndDefaultMapAndSet],
2517 help = "Selection of VMMAll/IEMAllInst*.cpp.h files to use as input.");
2518 oParser.add_argument('--host-arch',
2519 metavar = 'arch',
2520 dest = 'sHostArch',
2521 action = 'store',
2522 default = None,
2523 help = 'The host architecture.');
2524
2525 oParser.add_argument('--out-thrd-funcs-hdr',
2526 metavar = 'file-thrd-funcs.h',
2527 dest = 'sOutFileThrdFuncsHdr',
2528 action = 'store',
2529 default = '-',
2530 help = 'The output header file for the threaded functions.');
2531 oParser.add_argument('--out-thrd-funcs-cpp',
2532 metavar = 'file-thrd-funcs.cpp',
2533 dest = 'sOutFileThrdFuncsCpp',
2534 action = 'store',
2535 default = '-',
2536 help = 'The output C++ file for the threaded functions.');
2537 oParser.add_argument('--out-n8ve-funcs-hdr',
2538 metavar = 'file-n8tv-funcs.h',
2539 dest = 'sOutFileN8veFuncsHdr',
2540 action = 'store',
2541 default = '-',
2542 help = 'The output header file for the native recompiler functions.');
2543 oParser.add_argument('--out-n8ve-funcs-cpp',
2544 metavar = 'file-n8tv-funcs.cpp',
2545 dest = 'sOutFileN8veFuncsCpp',
2546 action = 'store',
2547 default = '-',
2548 help = 'The output C++ file for the native recompiler functions.');
2549 oParser.add_argument('--native',
2550 dest = 'fNativeRecompilerEnabled',
2551 action = 'store_true',
2552 default = False,
2553 help = 'Enables generating the files related to native recompilation.');
2554 oParser.add_argument('--out-mod-input1',
2555 metavar = 'file-instr.cpp.h',
2556 dest = 'sOutFileModInput1',
2557 action = 'store',
2558 default = '-',
2559 help = 'The output C++/header file for modified input instruction files part 1.');
2560 oParser.add_argument('--out-mod-input2',
2561 metavar = 'file-instr.cpp.h',
2562 dest = 'sOutFileModInput2',
2563 action = 'store',
2564 default = '-',
2565 help = 'The output C++/header file for modified input instruction files part 2.');
2566 oParser.add_argument('--out-mod-input3',
2567 metavar = 'file-instr.cpp.h',
2568 dest = 'sOutFileModInput3',
2569 action = 'store',
2570 default = '-',
2571 help = 'The output C++/header file for modified input instruction files part 3.');
2572 oParser.add_argument('--out-mod-input4',
2573 metavar = 'file-instr.cpp.h',
2574 dest = 'sOutFileModInput4',
2575 action = 'store',
2576 default = '-',
2577 help = 'The output C++/header file for modified input instruction files part 4.');
2578 oParser.add_argument('--help', '-h', '-?',
2579 action = 'help',
2580 help = 'Display help and exit.');
2581 oParser.add_argument('--version', '-V',
2582 action = 'version',
2583 version = 'r%s (IEMAllThreadedPython.py), r%s (IEMAllInstPython.py)'
2584 % (__version__.split()[1], iai.__version__.split()[1],),
2585 help = 'Displays the version/revision of the script and exit.');
2586 self.oOptions = oParser.parse_args(asArgs[1:]);
2587 print("oOptions=%s" % (self.oOptions,), file = sys.stderr);
2588
2589 #
2590 # Process the instructions specified in the IEM sources.
2591 #
2592 if self.processInputFiles(self.oOptions.sHostArch, self.oOptions.fNativeRecompilerEnabled):
2593 #
2594 # Generate the output files.
2595 #
2596 aaoOutputFiles = (
2597 ( self.oOptions.sOutFileThrdFuncsHdr, self.generateThreadedFunctionsHeader ),
2598 ( self.oOptions.sOutFileThrdFuncsCpp, self.generateThreadedFunctionsSource ),
2599 ( self.oOptions.sOutFileN8veFuncsHdr, self.generateNativeFunctionsHeader ),
2600 ( self.oOptions.sOutFileN8veFuncsCpp, self.generateNativeFunctionsSource ),
2601 ( self.oOptions.sOutFileModInput1, self.generateModifiedInput1 ),
2602 ( self.oOptions.sOutFileModInput2, self.generateModifiedInput2 ),
2603 ( self.oOptions.sOutFileModInput3, self.generateModifiedInput3 ),
2604 ( self.oOptions.sOutFileModInput4, self.generateModifiedInput4 ),
2605 );
2606 fRc = True;
2607 for sOutFile, fnGenMethod in aaoOutputFiles:
2608 if sOutFile == '-':
2609 fRc = fnGenMethod(sys.stdout) and fRc;
2610 else:
2611 try:
2612 oOut = open(sOutFile, 'w'); # pylint: disable=consider-using-with,unspecified-encoding
2613 except Exception as oXcpt:
2614 print('error! Failed open "%s" for writing: %s' % (sOutFile, oXcpt,), file = sys.stderr);
2615 return 1;
2616 fRc = fnGenMethod(oOut) and fRc;
2617 oOut.close();
2618 if fRc:
2619 return 0;
2620
2621 return 1;
2622
2623
2624if __name__ == '__main__':
2625 sys.exit(IEMThreadedGenerator().main(sys.argv));
2626
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette