VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllThrdPython.py@ 102781

Last change on this file since 102781 was 102769, checked in by vboxsync, 13 months ago

VMM/IEM: Tweaked the PUSH/POP -> FLAT64 variants since all stack accesses are flat in 64-bit mode, and any fs/gs segment prefixes are only applicable to operands. (TLB lookup code generator asserted.) bugref:10371

  • Property svn:eol-style set to LF
  • Property svn:executable set to *
  • Property svn:keywords set to Author Date Id Revision
File size: 131.6 KB
Line 
1#!/usr/bin/env python
2# -*- coding: utf-8 -*-
3# $Id: IEMAllThrdPython.py 102769 2024-01-04 23:10:56Z vboxsync $
4# pylint: disable=invalid-name
5
6"""
7Annotates and generates threaded functions from IEMAllInst*.cpp.h.
8"""
9
10from __future__ import print_function;
11
12__copyright__ = \
13"""
14Copyright (C) 2023 Oracle and/or its affiliates.
15
16This file is part of VirtualBox base platform packages, as
17available from https://www.virtualbox.org.
18
19This program is free software; you can redistribute it and/or
20modify it under the terms of the GNU General Public License
21as published by the Free Software Foundation, in version 3 of the
22License.
23
24This program is distributed in the hope that it will be useful, but
25WITHOUT ANY WARRANTY; without even the implied warranty of
26MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
27General Public License for more details.
28
29You should have received a copy of the GNU General Public License
30along with this program; if not, see <https://www.gnu.org/licenses>.
31
32SPDX-License-Identifier: GPL-3.0-only
33"""
34__version__ = "$Revision: 102769 $"
35
36# Standard python imports.
37import copy;
38import datetime;
39import os;
40import re;
41import sys;
42import argparse;
43from typing import Dict, List;
44
45import IEMAllInstPython as iai;
46import IEMAllN8vePython as ian;
47
48
49# Python 3 hacks:
50if sys.version_info[0] >= 3:
51 long = int; # pylint: disable=redefined-builtin,invalid-name
52
53## Number of generic parameters for the thread functions.
54g_kcThreadedParams = 3;
55
56g_kdTypeInfo = {
57 # type name: (cBits, fSigned, C-type )
58 'int8_t': ( 8, True, 'int8_t', ),
59 'int16_t': ( 16, True, 'int16_t', ),
60 'int32_t': ( 32, True, 'int32_t', ),
61 'int64_t': ( 64, True, 'int64_t', ),
62 'uint4_t': ( 4, False, 'uint8_t', ),
63 'uint8_t': ( 8, False, 'uint8_t', ),
64 'uint16_t': ( 16, False, 'uint16_t', ),
65 'uint32_t': ( 32, False, 'uint32_t', ),
66 'uint64_t': ( 64, False, 'uint64_t', ),
67 'uintptr_t': ( 64, False, 'uintptr_t',), # ASSUMES 64-bit host pointer size.
68 'bool': ( 1, False, 'bool', ),
69 'IEMMODE': ( 2, False, 'IEMMODE', ),
70};
71
72# Only for getTypeBitCount/variables.
73g_kdTypeInfo2 = {
74 'RTFLOAT32U': ( 32, False, 'RTFLOAT32U', ),
75 'RTFLOAT64U': ( 64, False, 'RTFLOAT64U', ),
76 'RTUINT64U': ( 64, False, 'RTUINT64U', ),
77 'RTGCPTR': ( 64, False, 'RTGCPTR', ),
78 'RTPBCD80U': ( 80, False, 'RTPBCD80U', ),
79 'RTFLOAT80U': ( 80, False, 'RTFLOAT80U', ),
80 'IEMFPURESULT': (80+16, False, 'IEMFPURESULT', ),
81 'IEMFPURESULTTWO': (80+16+80,False, 'IEMFPURESULTTWO', ),
82 'RTUINT128U': ( 128, False, 'RTUINT128U', ),
83 'X86XMMREG': ( 128, False, 'X86XMMREG', ),
84 'IEMSSERESULT': ( 128+32, False, 'IEMSSERESULT', ),
85 'IEMMEDIAF2XMMSRC': ( 256, False, 'IEMMEDIAF2XMMSRC',),
86 'RTUINT256U': ( 256, False, 'RTUINT256U', ),
87 'IEMPCMPISTRXSRC': ( 256, False, 'IEMPCMPISTRXSRC', ),
88 'IEMPCMPESTRXSRC': ( 384, False, 'IEMPCMPESTRXSRC', ),
89}; #| g_kdTypeInfo; - requires 3.9
90g_kdTypeInfo2.update(g_kdTypeInfo);
91
92def getTypeBitCount(sType):
93 """
94 Translate a type to size in bits
95 """
96 if sType in g_kdTypeInfo2:
97 return g_kdTypeInfo2[sType][0];
98 if '*' in sType or sType[0] == 'P':
99 return 64;
100 #raise Exception('Unknown type: %s' % (sType,));
101 print('error: Unknown type: %s' % (sType,));
102 return 64;
103
104g_kdIemFieldToType = {
105 # Illegal ones:
106 'offInstrNextByte': ( None, ),
107 'cbInstrBuf': ( None, ),
108 'pbInstrBuf': ( None, ),
109 'uInstrBufPc': ( None, ),
110 'cbInstrBufTotal': ( None, ),
111 'offCurInstrStart': ( None, ),
112 'cbOpcode': ( None, ),
113 'offOpcode': ( None, ),
114 'offModRm': ( None, ),
115 # Okay ones.
116 'fPrefixes': ( 'uint32_t', ),
117 'uRexReg': ( 'uint8_t', ),
118 'uRexB': ( 'uint8_t', ),
119 'uRexIndex': ( 'uint8_t', ),
120 'iEffSeg': ( 'uint8_t', ),
121 'enmEffOpSize': ( 'IEMMODE', ),
122 'enmDefAddrMode': ( 'IEMMODE', ),
123 'enmEffAddrMode': ( 'IEMMODE', ),
124 'enmDefOpSize': ( 'IEMMODE', ),
125 'idxPrefix': ( 'uint8_t', ),
126 'uVex3rdReg': ( 'uint8_t', ),
127 'uVexLength': ( 'uint8_t', ),
128 'fEvexStuff': ( 'uint8_t', ),
129 'uFpuOpcode': ( 'uint16_t', ),
130};
131
132class ThreadedParamRef(object):
133 """
134 A parameter reference for a threaded function.
135 """
136
137 def __init__(self, sOrgRef, sType, oStmt, iParam = None, offParam = 0, sStdRef = None):
138 ## The name / reference in the original code.
139 self.sOrgRef = sOrgRef;
140 ## Normalized name to deal with spaces in macro invocations and such.
141 self.sStdRef = sStdRef if sStdRef else ''.join(sOrgRef.split());
142 ## Indicates that sOrgRef may not match the parameter.
143 self.fCustomRef = sStdRef is not None;
144 ## The type (typically derived).
145 self.sType = sType;
146 ## The statement making the reference.
147 self.oStmt = oStmt;
148 ## The parameter containing the references. None if implicit.
149 self.iParam = iParam;
150 ## The offset in the parameter of the reference.
151 self.offParam = offParam;
152
153 ## The variable name in the threaded function.
154 self.sNewName = 'x';
155 ## The this is packed into.
156 self.iNewParam = 99;
157 ## The bit offset in iNewParam.
158 self.offNewParam = 1024
159
160
161class ThreadedFunctionVariation(object):
162 """ Threaded function variation. """
163
164 ## @name Variations.
165 ## These variations will match translation block selection/distinctions as well.
166 ## @{
167 ksVariation_Default = ''; ##< No variations - only used by IEM_MC_DEFER_TO_CIMPL_X_RET.
168 ksVariation_16 = '_16'; ##< 16-bit mode code (386+).
169 ksVariation_16f = '_16f'; ##< 16-bit mode code (386+), check+clear eflags.
170 ksVariation_16_Addr32 = '_16_Addr32'; ##< 16-bit mode code (386+), address size prefixed to 32-bit addressing.
171 ksVariation_16f_Addr32 = '_16f_Addr32'; ##< 16-bit mode code (386+), address size prefixed to 32-bit addressing, eflags.
172 ksVariation_16_Pre386 = '_16_Pre386'; ##< 16-bit mode code, pre-386 CPU target.
173 ksVariation_16f_Pre386 = '_16f_Pre386'; ##< 16-bit mode code, pre-386 CPU target, check+clear eflags.
174 ksVariation_32 = '_32'; ##< 32-bit mode code (386+).
175 ksVariation_32f = '_32f'; ##< 32-bit mode code (386+), check+clear eflags.
176 ksVariation_32_Flat = '_32_Flat'; ##< 32-bit mode code (386+) with CS, DS, E,S and SS flat and 4GB wide.
177 ksVariation_32f_Flat = '_32f_Flat'; ##< 32-bit mode code (386+) with CS, DS, E,S and SS flat and 4GB wide, eflags.
178 ksVariation_32_Addr16 = '_32_Addr16'; ##< 32-bit mode code (386+), address size prefixed to 16-bit addressing.
179 ksVariation_32f_Addr16 = '_32f_Addr16'; ##< 32-bit mode code (386+), address size prefixed to 16-bit addressing, eflags.
180 ksVariation_64 = '_64'; ##< 64-bit mode code.
181 ksVariation_64f = '_64f'; ##< 64-bit mode code, check+clear eflags.
182 ksVariation_64_FsGs = '_64_FsGs'; ##< 64-bit mode code, with memory accesses via FS or GS.
183 ksVariation_64f_FsGs = '_64f_FsGs'; ##< 64-bit mode code, with memory accesses via FS or GS, check+clear eflags.
184 ksVariation_64_Addr32 = '_64_Addr32'; ##< 64-bit mode code, address size prefixed to 32-bit addressing.
185 ksVariation_64f_Addr32 = '_64f_Addr32'; ##< 64-bit mode code, address size prefixed to 32-bit addressing, c+c eflags.
186 kasVariations = (
187 ksVariation_Default,
188 ksVariation_16,
189 ksVariation_16f,
190 ksVariation_16_Addr32,
191 ksVariation_16f_Addr32,
192 ksVariation_16_Pre386,
193 ksVariation_16f_Pre386,
194 ksVariation_32,
195 ksVariation_32f,
196 ksVariation_32_Flat,
197 ksVariation_32f_Flat,
198 ksVariation_32_Addr16,
199 ksVariation_32f_Addr16,
200 ksVariation_64,
201 ksVariation_64f,
202 ksVariation_64_FsGs,
203 ksVariation_64f_FsGs,
204 ksVariation_64_Addr32,
205 ksVariation_64f_Addr32,
206 );
207 kasVariationsWithoutAddress = (
208 ksVariation_16,
209 ksVariation_16f,
210 ksVariation_16_Pre386,
211 ksVariation_16f_Pre386,
212 ksVariation_32,
213 ksVariation_32f,
214 ksVariation_64,
215 ksVariation_64f,
216 );
217 kasVariationsWithoutAddressNot286 = (
218 ksVariation_16,
219 ksVariation_16f,
220 ksVariation_32,
221 ksVariation_32f,
222 ksVariation_64,
223 ksVariation_64f,
224 );
225 kasVariationsWithoutAddressNot286Not64 = (
226 ksVariation_16,
227 ksVariation_16f,
228 ksVariation_32,
229 ksVariation_32f,
230 );
231 kasVariationsWithoutAddressNot64 = (
232 ksVariation_16,
233 ksVariation_16f,
234 ksVariation_16_Pre386,
235 ksVariation_16f_Pre386,
236 ksVariation_32,
237 ksVariation_32f,
238 );
239 kasVariationsWithoutAddressOnly64 = (
240 ksVariation_64,
241 ksVariation_64f,
242 );
243 kasVariationsWithAddress = (
244 ksVariation_16,
245 ksVariation_16f,
246 ksVariation_16_Addr32,
247 ksVariation_16f_Addr32,
248 ksVariation_16_Pre386,
249 ksVariation_16f_Pre386,
250 ksVariation_32,
251 ksVariation_32f,
252 ksVariation_32_Flat,
253 ksVariation_32f_Flat,
254 ksVariation_32_Addr16,
255 ksVariation_32f_Addr16,
256 ksVariation_64,
257 ksVariation_64f,
258 ksVariation_64_FsGs,
259 ksVariation_64f_FsGs,
260 ksVariation_64_Addr32,
261 ksVariation_64f_Addr32,
262 );
263 kasVariationsWithAddressNot286 = (
264 ksVariation_16,
265 ksVariation_16f,
266 ksVariation_16_Addr32,
267 ksVariation_16f_Addr32,
268 ksVariation_32,
269 ksVariation_32f,
270 ksVariation_32_Flat,
271 ksVariation_32f_Flat,
272 ksVariation_32_Addr16,
273 ksVariation_32f_Addr16,
274 ksVariation_64,
275 ksVariation_64f,
276 ksVariation_64_FsGs,
277 ksVariation_64f_FsGs,
278 ksVariation_64_Addr32,
279 ksVariation_64f_Addr32,
280 );
281 kasVariationsWithAddressNot286Not64 = (
282 ksVariation_16,
283 ksVariation_16f,
284 ksVariation_16_Addr32,
285 ksVariation_16f_Addr32,
286 ksVariation_32,
287 ksVariation_32f,
288 ksVariation_32_Flat,
289 ksVariation_32f_Flat,
290 ksVariation_32_Addr16,
291 ksVariation_32f_Addr16,
292 );
293 kasVariationsWithAddressNot64 = (
294 ksVariation_16,
295 ksVariation_16f,
296 ksVariation_16_Addr32,
297 ksVariation_16f_Addr32,
298 ksVariation_16_Pre386,
299 ksVariation_16f_Pre386,
300 ksVariation_32,
301 ksVariation_32f,
302 ksVariation_32_Flat,
303 ksVariation_32f_Flat,
304 ksVariation_32_Addr16,
305 ksVariation_32f_Addr16,
306 );
307 kasVariationsWithAddressOnly64 = (
308 ksVariation_64,
309 ksVariation_64f,
310 ksVariation_64_FsGs,
311 ksVariation_64f_FsGs,
312 ksVariation_64_Addr32,
313 ksVariation_64f_Addr32,
314 );
315 kasVariationsOnlyPre386 = (
316 ksVariation_16_Pre386,
317 ksVariation_16f_Pre386,
318 );
319 kasVariationsEmitOrder = (
320 ksVariation_Default,
321 ksVariation_64,
322 ksVariation_64f,
323 ksVariation_64_FsGs,
324 ksVariation_64f_FsGs,
325 ksVariation_32_Flat,
326 ksVariation_32f_Flat,
327 ksVariation_32,
328 ksVariation_32f,
329 ksVariation_16,
330 ksVariation_16f,
331 ksVariation_16_Addr32,
332 ksVariation_16f_Addr32,
333 ksVariation_16_Pre386,
334 ksVariation_16f_Pre386,
335 ksVariation_32_Addr16,
336 ksVariation_32f_Addr16,
337 ksVariation_64_Addr32,
338 ksVariation_64f_Addr32,
339 );
340 kdVariationNames = {
341 ksVariation_Default: 'defer-to-cimpl',
342 ksVariation_16: '16-bit',
343 ksVariation_16f: '16-bit w/ eflag checking and clearing',
344 ksVariation_16_Addr32: '16-bit w/ address prefix (Addr32)',
345 ksVariation_16f_Addr32: '16-bit w/ address prefix (Addr32) and eflag checking and clearing',
346 ksVariation_16_Pre386: '16-bit on pre-386 CPU',
347 ksVariation_16f_Pre386: '16-bit on pre-386 CPU w/ eflag checking and clearing',
348 ksVariation_32: '32-bit',
349 ksVariation_32f: '32-bit w/ eflag checking and clearing',
350 ksVariation_32_Flat: '32-bit flat and wide open CS, SS, DS and ES',
351 ksVariation_32f_Flat: '32-bit flat and wide open CS, SS, DS and ES w/ eflag checking and clearing',
352 ksVariation_32_Addr16: '32-bit w/ address prefix (Addr16)',
353 ksVariation_32f_Addr16: '32-bit w/ address prefix (Addr16) and eflag checking and clearing',
354 ksVariation_64: '64-bit',
355 ksVariation_64f: '64-bit w/ eflag checking and clearing',
356 ksVariation_64_FsGs: '64-bit with memory accessed via FS or GS',
357 ksVariation_64f_FsGs: '64-bit with memory accessed via FS or GS and eflag checking and clearing',
358 ksVariation_64_Addr32: '64-bit w/ address prefix (Addr32)',
359 ksVariation_64f_Addr32: '64-bit w/ address prefix (Addr32) and eflag checking and clearing',
360 };
361 kdVariationsWithEflagsCheckingAndClearing = {
362 ksVariation_16f: True,
363 ksVariation_16f_Addr32: True,
364 ksVariation_16f_Pre386: True,
365 ksVariation_32f: True,
366 ksVariation_32f_Flat: True,
367 ksVariation_32f_Addr16: True,
368 ksVariation_64f: True,
369 ksVariation_64f_FsGs: True,
370 ksVariation_64f_Addr32: True,
371 };
372 kdVariationsWithFlatAddress = {
373 ksVariation_32_Flat: True,
374 ksVariation_32f_Flat: True,
375 ksVariation_64: True,
376 ksVariation_64f: True,
377 };
378 kdVariationsWithFlatStackAddress = {
379 ksVariation_32_Flat: True,
380 ksVariation_32f_Flat: True,
381 ksVariation_64: True,
382 ksVariation_64f: True,
383 ksVariation_64_FsGs: True,
384 ksVariation_64f_FsGs: True,
385 ksVariation_64_Addr32: True,
386 ksVariation_64f_Addr32: True,
387 };
388 kdVariationsWithFlat64StackAddress = {
389 ksVariation_64: True,
390 ksVariation_64f: True,
391 ksVariation_64_FsGs: True,
392 ksVariation_64f_FsGs: True,
393 ksVariation_64_Addr32: True,
394 ksVariation_64f_Addr32: True,
395 };
396 kdVariationsWithFlatAddr16 = {
397 ksVariation_16: True,
398 ksVariation_16f: True,
399 ksVariation_16_Pre386: True,
400 ksVariation_16f_Pre386: True,
401 ksVariation_32_Addr16: True,
402 ksVariation_32f_Addr16: True,
403 };
404 kdVariationsWithFlatAddr32No64 = {
405 ksVariation_16_Addr32: True,
406 ksVariation_16f_Addr32: True,
407 ksVariation_32: True,
408 ksVariation_32f: True,
409 ksVariation_32_Flat: True,
410 ksVariation_32f_Flat: True,
411 };
412 ## @}
413
414 ## IEM_CIMPL_F_XXX flags that we know.
415 ## The value indicates whether it terminates the TB or not. The goal is to
416 ## improve the recompiler so all but END_TB will be False.
417 ##
418 ## @note iemThreadedRecompilerMcDeferToCImpl0 duplicates info found here.
419 kdCImplFlags = {
420 'IEM_CIMPL_F_MODE': False,
421 'IEM_CIMPL_F_BRANCH_DIRECT': False,
422 'IEM_CIMPL_F_BRANCH_INDIRECT': False,
423 'IEM_CIMPL_F_BRANCH_RELATIVE': False,
424 'IEM_CIMPL_F_BRANCH_FAR': True,
425 'IEM_CIMPL_F_BRANCH_CONDITIONAL': False,
426 # IEM_CIMPL_F_BRANCH_ANY should only be used for testing, so not included here.
427 'IEM_CIMPL_F_BRANCH_STACK': False,
428 'IEM_CIMPL_F_BRANCH_STACK_FAR': False,
429 'IEM_CIMPL_F_RFLAGS': False,
430 'IEM_CIMPL_F_INHIBIT_SHADOW': False,
431 'IEM_CIMPL_F_CHECK_IRQ_AFTER': False,
432 'IEM_CIMPL_F_CHECK_IRQ_BEFORE': False,
433 'IEM_CIMPL_F_CHECK_IRQ_BEFORE_AND_AFTER': False, # (ignore)
434 'IEM_CIMPL_F_STATUS_FLAGS': False,
435 'IEM_CIMPL_F_VMEXIT': False,
436 'IEM_CIMPL_F_FPU': False,
437 'IEM_CIMPL_F_REP': False,
438 'IEM_CIMPL_F_IO': False,
439 'IEM_CIMPL_F_END_TB': True,
440 'IEM_CIMPL_F_XCPT': True,
441 'IEM_CIMPL_F_CALLS_CIMPL': False,
442 'IEM_CIMPL_F_CALLS_AIMPL': False,
443 'IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE': False,
444 };
445
446 def __init__(self, oThreadedFunction, sVariation = ksVariation_Default):
447 self.oParent = oThreadedFunction # type: ThreadedFunction
448 ##< ksVariation_Xxxx.
449 self.sVariation = sVariation
450
451 ## Threaded function parameter references.
452 self.aoParamRefs = [] # type: List[ThreadedParamRef]
453 ## Unique parameter references.
454 self.dParamRefs = {} # type: Dict[str, List[ThreadedParamRef]]
455 ## Minimum number of parameters to the threaded function.
456 self.cMinParams = 0;
457
458 ## List/tree of statements for the threaded function.
459 self.aoStmtsForThreadedFunction = [] # type: List[McStmt]
460
461 ## Function enum number, for verification. Set by generateThreadedFunctionsHeader.
462 self.iEnumValue = -1;
463
464 ## Native recompilation details for this variation.
465 self.oNativeRecomp = None;
466
467 def getIndexName(self):
468 sName = self.oParent.oMcBlock.sFunction;
469 if sName.startswith('iemOp_'):
470 sName = sName[len('iemOp_'):];
471 if self.oParent.oMcBlock.iInFunction == 0:
472 return 'kIemThreadedFunc_%s%s' % ( sName, self.sVariation, );
473 return 'kIemThreadedFunc_%s_%s%s' % ( sName, self.oParent.oMcBlock.iInFunction, self.sVariation, );
474
475 def getThreadedFunctionName(self):
476 sName = self.oParent.oMcBlock.sFunction;
477 if sName.startswith('iemOp_'):
478 sName = sName[len('iemOp_'):];
479 if self.oParent.oMcBlock.iInFunction == 0:
480 return 'iemThreadedFunc_%s%s' % ( sName, self.sVariation, );
481 return 'iemThreadedFunc_%s_%s%s' % ( sName, self.oParent.oMcBlock.iInFunction, self.sVariation, );
482
483 def getNativeFunctionName(self):
484 return 'iemNativeRecompFunc_' + self.getThreadedFunctionName()[len('iemThreadedFunc_'):];
485
486 def getShortName(self):
487 sName = self.oParent.oMcBlock.sFunction;
488 if sName.startswith('iemOp_'):
489 sName = sName[len('iemOp_'):];
490 if self.oParent.oMcBlock.iInFunction == 0:
491 return '%s%s' % ( sName, self.sVariation, );
492 return '%s_%s%s' % ( sName, self.oParent.oMcBlock.iInFunction, self.sVariation, );
493
494 def isWithFlagsCheckingAndClearingVariation(self):
495 """
496 Checks if this is a variation that checks and clears EFLAGS.
497 """
498 return self.sVariation in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing;
499
500 #
501 # Analysis and code morphing.
502 #
503
504 def raiseProblem(self, sMessage):
505 """ Raises a problem. """
506 self.oParent.raiseProblem(sMessage);
507
508 def warning(self, sMessage):
509 """ Emits a warning. """
510 self.oParent.warning(sMessage);
511
512 def analyzeReferenceToType(self, sRef):
513 """
514 Translates a variable or structure reference to a type.
515 Returns type name.
516 Raises exception if unable to figure it out.
517 """
518 ch0 = sRef[0];
519 if ch0 == 'u':
520 if sRef.startswith('u32'):
521 return 'uint32_t';
522 if sRef.startswith('u8') or sRef == 'uReg':
523 return 'uint8_t';
524 if sRef.startswith('u64'):
525 return 'uint64_t';
526 if sRef.startswith('u16'):
527 return 'uint16_t';
528 elif ch0 == 'b':
529 return 'uint8_t';
530 elif ch0 == 'f':
531 return 'bool';
532 elif ch0 == 'i':
533 if sRef.startswith('i8'):
534 return 'int8_t';
535 if sRef.startswith('i16'):
536 return 'int16_t';
537 if sRef.startswith('i32'):
538 return 'int32_t';
539 if sRef.startswith('i64'):
540 return 'int64_t';
541 if sRef in ('iReg', 'iFixedReg', 'iGReg', 'iSegReg', 'iSrcReg', 'iDstReg', 'iCrReg'):
542 return 'uint8_t';
543 elif ch0 == 'p':
544 if sRef.find('-') < 0:
545 return 'uintptr_t';
546 if sRef.startswith('pVCpu->iem.s.'):
547 sField = sRef[len('pVCpu->iem.s.') : ];
548 if sField in g_kdIemFieldToType:
549 if g_kdIemFieldToType[sField][0]:
550 return g_kdIemFieldToType[sField][0];
551 elif ch0 == 'G' and sRef.startswith('GCPtr'):
552 return 'uint64_t';
553 elif ch0 == 'e':
554 if sRef == 'enmEffOpSize':
555 return 'IEMMODE';
556 elif ch0 == 'o':
557 if sRef.startswith('off32'):
558 return 'uint32_t';
559 elif sRef == 'cbFrame': # enter
560 return 'uint16_t';
561 elif sRef == 'cShift': ## @todo risky
562 return 'uint8_t';
563
564 self.raiseProblem('Unknown reference: %s' % (sRef,));
565 return None; # Shut up pylint 2.16.2.
566
567 def analyzeCallToType(self, sFnRef):
568 """
569 Determins the type of an indirect function call.
570 """
571 assert sFnRef[0] == 'p';
572
573 #
574 # Simple?
575 #
576 if sFnRef.find('-') < 0:
577 oDecoderFunction = self.oParent.oMcBlock.oFunction;
578
579 # Try the argument list of the function defintion macro invocation first.
580 iArg = 2;
581 while iArg < len(oDecoderFunction.asDefArgs):
582 if sFnRef == oDecoderFunction.asDefArgs[iArg]:
583 return oDecoderFunction.asDefArgs[iArg - 1];
584 iArg += 1;
585
586 # Then check out line that includes the word and looks like a variable declaration.
587 oRe = re.compile(' +(P[A-Z0-9_]+|const +IEMOP[A-Z0-9_]+ *[*]) +(const |) *' + sFnRef + ' *(;|=)');
588 for sLine in oDecoderFunction.asLines:
589 oMatch = oRe.match(sLine);
590 if oMatch:
591 if not oMatch.group(1).startswith('const'):
592 return oMatch.group(1);
593 return 'PC' + oMatch.group(1)[len('const ') : -1].strip();
594
595 #
596 # Deal with the pImpl->pfnXxx:
597 #
598 elif sFnRef.startswith('pImpl->pfn'):
599 sMember = sFnRef[len('pImpl->') : ];
600 sBaseType = self.analyzeCallToType('pImpl');
601 offBits = sMember.rfind('U') + 1;
602 if sBaseType == 'PCIEMOPBINSIZES': return 'PFNIEMAIMPLBINU' + sMember[offBits:];
603 if sBaseType == 'PCIEMOPUNARYSIZES': return 'PFNIEMAIMPLUNARYU' + sMember[offBits:];
604 if sBaseType == 'PCIEMOPSHIFTSIZES': return 'PFNIEMAIMPLSHIFTU' + sMember[offBits:];
605 if sBaseType == 'PCIEMOPSHIFTDBLSIZES': return 'PFNIEMAIMPLSHIFTDBLU' + sMember[offBits:];
606 if sBaseType == 'PCIEMOPMULDIVSIZES': return 'PFNIEMAIMPLMULDIVU' + sMember[offBits:];
607 if sBaseType == 'PCIEMOPMEDIAF3': return 'PFNIEMAIMPLMEDIAF3U' + sMember[offBits:];
608 if sBaseType == 'PCIEMOPMEDIAOPTF3': return 'PFNIEMAIMPLMEDIAOPTF3U' + sMember[offBits:];
609 if sBaseType == 'PCIEMOPMEDIAOPTF2': return 'PFNIEMAIMPLMEDIAOPTF2U' + sMember[offBits:];
610 if sBaseType == 'PCIEMOPMEDIAOPTF3IMM8': return 'PFNIEMAIMPLMEDIAOPTF3U' + sMember[offBits:] + 'IMM8';
611 if sBaseType == 'PCIEMOPBLENDOP': return 'PFNIEMAIMPLAVXBLENDU' + sMember[offBits:];
612
613 self.raiseProblem('Unknown call reference: %s::%s (%s)' % (sBaseType, sMember, sFnRef,));
614
615 self.raiseProblem('Unknown call reference: %s' % (sFnRef,));
616 return None; # Shut up pylint 2.16.2.
617
618 def analyze8BitGRegStmt(self, oStmt):
619 """
620 Gets the 8-bit general purpose register access details of the given statement.
621 ASSUMES the statement is one accessing an 8-bit GREG.
622 """
623 idxReg = 0;
624 if ( oStmt.sName.find('_FETCH_') > 0
625 or oStmt.sName.find('_REF_') > 0
626 or oStmt.sName.find('_TO_LOCAL') > 0):
627 idxReg = 1;
628
629 sRegRef = oStmt.asParams[idxReg];
630 if sRegRef.startswith('IEM_GET_MODRM_RM') or sRegRef.startswith('IEM_GET_MODRM_REG'):
631 asBits = [sBit.strip() for sBit in sRegRef.replace('(', ',').replace(')', '').split(',')];
632 if len(asBits) != 3 or asBits[1] != 'pVCpu' or (asBits[0] != 'IEM_GET_MODRM_RM' and asBits[0] != 'IEM_GET_MODRM_REG'):
633 self.raiseProblem('Unexpected reference: %s (asBits=%s)' % (sRegRef, asBits));
634 sOrgExpr = asBits[0] + '_EX8(pVCpu, ' + asBits[2] + ')';
635 else:
636 sOrgExpr = '((%s) < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX) ? (%s) : (%s) + 12)' % (sRegRef, sRegRef, sRegRef);
637
638 if sRegRef.find('IEM_GET_MODRM_RM') >= 0: sStdRef = 'bRmRm8Ex';
639 elif sRegRef.find('IEM_GET_MODRM_REG') >= 0: sStdRef = 'bRmReg8Ex';
640 elif sRegRef == 'X86_GREG_xAX': sStdRef = 'bGregXAx8Ex';
641 elif sRegRef == 'X86_GREG_xCX': sStdRef = 'bGregXCx8Ex';
642 elif sRegRef == 'X86_GREG_xSP': sStdRef = 'bGregXSp8Ex';
643 elif sRegRef == 'iFixedReg': sStdRef = 'bFixedReg8Ex';
644 else:
645 self.warning('analyze8BitGRegStmt: sRegRef=%s -> bOther8Ex; %s %s; sOrgExpr=%s'
646 % (sRegRef, oStmt.sName, oStmt.asParams, sOrgExpr,));
647 sStdRef = 'bOther8Ex';
648
649 #print('analyze8BitGRegStmt: %s %s; sRegRef=%s\n -> idxReg=%s sOrgExpr=%s sStdRef=%s'
650 # % (oStmt.sName, oStmt.asParams, sRegRef, idxReg, sOrgExpr, sStdRef));
651 return (idxReg, sOrgExpr, sStdRef);
652
653
654 ## Maps memory related MCs to info for FLAT conversion.
655 ## This is used in 64-bit and flat 32-bit variants to skip the unnecessary
656 ## segmentation checking for every memory access. Only applied to access
657 ## via ES, DS and SS. FS, GS and CS gets the full segmentation threatment,
658 ## the latter (CS) is just to keep things simple (we could safely fetch via
659 ## it, but only in 64-bit mode could we safely write via it, IIRC).
660 kdMemMcToFlatInfo = {
661 'IEM_MC_FETCH_MEM_U8': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8' ),
662 'IEM_MC_FETCH_MEM16_U8': ( 1, 'IEM_MC_FETCH_MEM16_FLAT_U8' ),
663 'IEM_MC_FETCH_MEM32_U8': ( 1, 'IEM_MC_FETCH_MEM32_FLAT_U8' ),
664 'IEM_MC_FETCH_MEM_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16' ),
665 'IEM_MC_FETCH_MEM_U16_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_DISP' ),
666 'IEM_MC_FETCH_MEM_I16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I16' ),
667 'IEM_MC_FETCH_MEM_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32' ),
668 'IEM_MC_FETCH_MEM_U32_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_DISP' ),
669 'IEM_MC_FETCH_MEM_I32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I32' ),
670 'IEM_MC_FETCH_MEM_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64' ),
671 'IEM_MC_FETCH_MEM_U64_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64_DISP' ),
672 'IEM_MC_FETCH_MEM_U64_ALIGN_U128': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64_ALIGN_U128' ),
673 'IEM_MC_FETCH_MEM_I64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I64' ),
674 'IEM_MC_FETCH_MEM_R32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R32' ),
675 'IEM_MC_FETCH_MEM_R64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R64' ),
676 'IEM_MC_FETCH_MEM_R80': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R80' ),
677 'IEM_MC_FETCH_MEM_D80': ( 1, 'IEM_MC_FETCH_MEM_FLAT_D80' ),
678 'IEM_MC_FETCH_MEM_U128': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128' ),
679 'IEM_MC_FETCH_MEM_U128_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128_NO_AC' ),
680 'IEM_MC_FETCH_MEM_U128_ALIGN_SSE': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128_ALIGN_SSE' ),
681 'IEM_MC_FETCH_MEM_XMM': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM' ),
682 'IEM_MC_FETCH_MEM_XMM_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM_NO_AC' ),
683 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM_ALIGN_SSE' ),
684 'IEM_MC_FETCH_MEM_XMM_U32': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_U32' ),
685 'IEM_MC_FETCH_MEM_XMM_U64': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_U64' ),
686 'IEM_MC_FETCH_MEM_U256': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256' ),
687 'IEM_MC_FETCH_MEM_U256_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256_NO_AC' ),
688 'IEM_MC_FETCH_MEM_U256_ALIGN_AVX': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256_ALIGN_AVX' ),
689 'IEM_MC_FETCH_MEM_YMM': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM' ),
690 'IEM_MC_FETCH_MEM_YMM_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM_NO_AC' ),
691 'IEM_MC_FETCH_MEM_YMM_ALIGN_AVX': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM_ALIGN_AVX' ),
692 'IEM_MC_FETCH_MEM_U8_ZX_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U16' ),
693 'IEM_MC_FETCH_MEM_U8_ZX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U32' ),
694 'IEM_MC_FETCH_MEM_U8_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U64' ),
695 'IEM_MC_FETCH_MEM_U16_ZX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_ZX_U32' ),
696 'IEM_MC_FETCH_MEM_U16_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_ZX_U64' ),
697 'IEM_MC_FETCH_MEM_U32_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_ZX_U64' ),
698 'IEM_MC_FETCH_MEM_U8_SX_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U16' ),
699 'IEM_MC_FETCH_MEM_U8_SX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U32' ),
700 'IEM_MC_FETCH_MEM_U8_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U64' ),
701 'IEM_MC_FETCH_MEM_U16_SX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_SX_U32' ),
702 'IEM_MC_FETCH_MEM_U16_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_SX_U64' ),
703 'IEM_MC_FETCH_MEM_U32_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_SX_U64' ),
704 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128': ( 2, 'IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128' ),
705 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE_AND_XREG_XMM': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_ALIGN_SSE_AND_XREG_XMM' ),
706 'IEM_MC_FETCH_MEM_XMM_U32_AND_XREG_XMM': ( 3, 'IEM_MC_FETCH_MEM_FLAT_XMM_U32_AND_XREG_XMM' ),
707 'IEM_MC_FETCH_MEM_XMM_U64_AND_XREG_XMM': ( 3, 'IEM_MC_FETCH_MEM_FLAT_XMM_U64_AND_XREG_XMM' ),
708 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_RAX_RDX_U64':
709 ( 2, 'IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128_AND_RAX_RDX_U64' ),
710 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64':
711 ( 2, 'IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64' ),
712 'IEM_MC_STORE_MEM_U8': ( 0, 'IEM_MC_STORE_MEM_FLAT_U8' ),
713 'IEM_MC_STORE_MEM_U16': ( 0, 'IEM_MC_STORE_MEM_FLAT_U16' ),
714 'IEM_MC_STORE_MEM_U32': ( 0, 'IEM_MC_STORE_MEM_FLAT_U32' ),
715 'IEM_MC_STORE_MEM_U64': ( 0, 'IEM_MC_STORE_MEM_FLAT_U64' ),
716 'IEM_MC_STORE_MEM_U8_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U8_CONST' ),
717 'IEM_MC_STORE_MEM_U16_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U16_CONST' ),
718 'IEM_MC_STORE_MEM_U32_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U32_CONST' ),
719 'IEM_MC_STORE_MEM_U64_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U64_CONST' ),
720 'IEM_MC_STORE_MEM_U128': ( 0, 'IEM_MC_STORE_MEM_FLAT_U128' ),
721 'IEM_MC_STORE_MEM_U128_ALIGN_SSE': ( 0, 'IEM_MC_STORE_MEM_FLAT_U128_ALIGN_SSE' ),
722 'IEM_MC_STORE_MEM_U256': ( 0, 'IEM_MC_STORE_MEM_FLAT_U256' ),
723 'IEM_MC_STORE_MEM_U256_ALIGN_AVX': ( 0, 'IEM_MC_STORE_MEM_FLAT_U256_ALIGN_AVX' ),
724 'IEM_MC_MEM_MAP_D80_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_D80_WO' ),
725 'IEM_MC_MEM_MAP_I16_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_I16_WO' ),
726 'IEM_MC_MEM_MAP_I32_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_I32_WO' ),
727 'IEM_MC_MEM_MAP_I64_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_I64_WO' ),
728 'IEM_MC_MEM_MAP_R32_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_R32_WO' ),
729 'IEM_MC_MEM_MAP_R64_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_R64_WO' ),
730 'IEM_MC_MEM_MAP_R80_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_R80_WO' ),
731 'IEM_MC_MEM_MAP_U8_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_RW' ),
732 'IEM_MC_MEM_MAP_U8_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_RO' ),
733 'IEM_MC_MEM_MAP_U8_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_WO' ),
734 'IEM_MC_MEM_MAP_U16_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_RW' ),
735 'IEM_MC_MEM_MAP_U16_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_RO' ),
736 'IEM_MC_MEM_MAP_U16_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_WO' ),
737 'IEM_MC_MEM_MAP_U32_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_RW' ),
738 'IEM_MC_MEM_MAP_U32_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_RO' ),
739 'IEM_MC_MEM_MAP_U32_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_WO' ),
740 'IEM_MC_MEM_MAP_U64_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_RW' ),
741 'IEM_MC_MEM_MAP_U64_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_RO' ),
742 'IEM_MC_MEM_MAP_U64_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_WO' ),
743 'IEM_MC_MEM_MAP_U128_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_RW' ),
744 'IEM_MC_MEM_MAP_U128_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_RO' ),
745 'IEM_MC_MEM_MAP_U128_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_WO' ),
746 'IEM_MC_MEM_MAP_EX': ( 3, 'IEM_MC_MEM_FLAT_MAP_EX' ),
747 };
748
749 kdMemMcToFlatInfoStack = {
750 'IEM_MC_PUSH_U16': ( 'IEM_MC_FLAT32_PUSH_U16', 'IEM_MC_FLAT64_PUSH_U16', ),
751 'IEM_MC_PUSH_U32': ( 'IEM_MC_FLAT32_PUSH_U32', 'IEM_MC_PUSH_U32', ),
752 'IEM_MC_PUSH_U64': ( 'IEM_MC_PUSH_U64', 'IEM_MC_FLAT64_PUSH_U64', ),
753 'IEM_MC_PUSH_U32_SREG': ( 'IEM_MC_FLAT32_PUSH_U32_SREG', 'IEM_MC_PUSH_U32_SREG' ),
754 'IEM_MC_POP_GREG_U16': ( 'IEM_MC_FLAT32_POP_GREG_U16', 'IEM_MC_FLAT64_POP_GREG_U16', ),
755 'IEM_MC_POP_GREG_U32': ( 'IEM_MC_FLAT32_POP_GREG_U32', 'IEM_MC_POP_GREG_U32', ),
756 'IEM_MC_POP_GREG_U64': ( 'IEM_MC_POP_GREG_U64', 'IEM_MC_FLAT64_POP_GREG_U64', ),
757 };
758
759 kdThreadedCalcRmEffAddrMcByVariation = {
760 ksVariation_16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
761 ksVariation_16f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
762 ksVariation_16_Pre386: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
763 ksVariation_16f_Pre386: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
764 ksVariation_32_Addr16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
765 ksVariation_32f_Addr16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
766 ksVariation_16_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
767 ksVariation_16f_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
768 ksVariation_32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
769 ksVariation_32f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
770 ksVariation_32_Flat: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
771 ksVariation_32f_Flat: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
772 ksVariation_64: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64',
773 ksVariation_64f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64',
774 ksVariation_64_FsGs: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_FSGS',
775 ksVariation_64f_FsGs: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_FSGS',
776 ksVariation_64_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_ADDR32', ## @todo How did this work again...
777 ksVariation_64f_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_ADDR32',
778 };
779
780 def analyzeMorphStmtForThreaded(self, aoStmts, iParamRef = 0):
781 """
782 Transforms (copy) the statements into those for the threaded function.
783
784 Returns list/tree of statements (aoStmts is not modified) and the new
785 iParamRef value.
786 """
787 #
788 # We'll be traversing aoParamRefs in parallel to the statements, so we
789 # must match the traversal in analyzeFindThreadedParamRefs exactly.
790 #
791 #print('McBlock at %s:%s' % (os.path.split(self.oMcBlock.sSrcFile)[1], self.oMcBlock.iBeginLine,));
792 aoThreadedStmts = [];
793 for oStmt in aoStmts:
794 # Skip C++ statements that is purely related to decoding.
795 if not oStmt.isCppStmt() or not oStmt.fDecode:
796 # Copy the statement. Make a deep copy to make sure we've got our own
797 # copies of all instance variables, even if a bit overkill at the moment.
798 oNewStmt = copy.deepcopy(oStmt);
799 aoThreadedStmts.append(oNewStmt);
800 #print('oNewStmt %s %s' % (oNewStmt.sName, len(oNewStmt.asParams),));
801
802 # If the statement has parameter references, process the relevant parameters.
803 # We grab the references relevant to this statement and apply them in reserve order.
804 if iParamRef < len(self.aoParamRefs) and self.aoParamRefs[iParamRef].oStmt == oStmt:
805 iParamRefFirst = iParamRef;
806 while True:
807 iParamRef += 1;
808 if iParamRef >= len(self.aoParamRefs) or self.aoParamRefs[iParamRef].oStmt != oStmt:
809 break;
810
811 #print('iParamRefFirst=%s iParamRef=%s' % (iParamRefFirst, iParamRef));
812 for iCurRef in range(iParamRef - 1, iParamRefFirst - 1, -1):
813 oCurRef = self.aoParamRefs[iCurRef];
814 if oCurRef.iParam is not None:
815 assert oCurRef.oStmt == oStmt;
816 #print('iCurRef=%s iParam=%s sOrgRef=%s' % (iCurRef, oCurRef.iParam, oCurRef.sOrgRef));
817 sSrcParam = oNewStmt.asParams[oCurRef.iParam];
818 assert ( sSrcParam[oCurRef.offParam : oCurRef.offParam + len(oCurRef.sOrgRef)] == oCurRef.sOrgRef
819 or oCurRef.fCustomRef), \
820 'offParam=%s sOrgRef=%s iParam=%s oStmt.sName=%s sSrcParam=%s<eos>' \
821 % (oCurRef.offParam, oCurRef.sOrgRef, oCurRef.iParam, oStmt.sName, sSrcParam);
822 oNewStmt.asParams[oCurRef.iParam] = sSrcParam[0 : oCurRef.offParam] \
823 + oCurRef.sNewName \
824 + sSrcParam[oCurRef.offParam + len(oCurRef.sOrgRef) : ];
825
826 # Morph IEM_MC_CALC_RM_EFF_ADDR into IEM_MC_CALC_RM_EFF_ADDR_THREADED ...
827 if oNewStmt.sName == 'IEM_MC_CALC_RM_EFF_ADDR':
828 oNewStmt.sName = self.kdThreadedCalcRmEffAddrMcByVariation[self.sVariation];
829 assert len(oNewStmt.asParams) == 3;
830
831 if self.sVariation in self.kdVariationsWithFlatAddr16:
832 oNewStmt.asParams = [
833 oNewStmt.asParams[0], oNewStmt.asParams[1], self.dParamRefs['u16Disp'][0].sNewName,
834 ];
835 else:
836 sSibAndMore = self.dParamRefs['bSib'][0].sNewName; # Merge bSib and 2nd part of cbImmAndRspOffset.
837 if oStmt.asParams[2] not in ('0', '1', '2', '4'):
838 sSibAndMore = '(%s) | ((%s) & 0x0f00)' % (self.dParamRefs['bSib'][0].sNewName, oStmt.asParams[2]);
839
840 if self.sVariation in self.kdVariationsWithFlatAddr32No64:
841 oNewStmt.asParams = [
842 oNewStmt.asParams[0], oNewStmt.asParams[1], sSibAndMore, self.dParamRefs['u32Disp'][0].sNewName,
843 ];
844 else:
845 oNewStmt.asParams = [
846 oNewStmt.asParams[0], self.dParamRefs['bRmEx'][0].sNewName, sSibAndMore,
847 self.dParamRefs['u32Disp'][0].sNewName, self.dParamRefs['cbInstr'][0].sNewName,
848 ];
849 # ... and IEM_MC_ADVANCE_RIP_AND_FINISH into *_THREADED_PCxx[_WITH_FLAGS] ...
850 elif ( oNewStmt.sName
851 in ('IEM_MC_ADVANCE_RIP_AND_FINISH',
852 'IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_REL_JMP_S16_AND_FINISH', 'IEM_MC_REL_JMP_S32_AND_FINISH',
853 'IEM_MC_SET_RIP_U16_AND_FINISH', 'IEM_MC_SET_RIP_U32_AND_FINISH', 'IEM_MC_SET_RIP_U64_AND_FINISH', )):
854 if oNewStmt.sName not in ('IEM_MC_SET_RIP_U16_AND_FINISH', 'IEM_MC_SET_RIP_U32_AND_FINISH',
855 'IEM_MC_SET_RIP_U64_AND_FINISH', ):
856 oNewStmt.asParams.append(self.dParamRefs['cbInstr'][0].sNewName);
857 if ( oNewStmt.sName in ('IEM_MC_REL_JMP_S8_AND_FINISH', )
858 and self.sVariation not in (self.ksVariation_16_Pre386, self.ksVariation_16f_Pre386,)):
859 oNewStmt.asParams.append(self.dParamRefs['pVCpu->iem.s.enmEffOpSize'][0].sNewName);
860 oNewStmt.sName += '_THREADED';
861 if self.sVariation in (self.ksVariation_64, self.ksVariation_64_FsGs, self.ksVariation_64_Addr32):
862 oNewStmt.sName += '_PC64';
863 elif self.sVariation in (self.ksVariation_64f, self.ksVariation_64f_FsGs, self.ksVariation_64f_Addr32):
864 oNewStmt.sName += '_PC64_WITH_FLAGS';
865 elif self.sVariation == self.ksVariation_16_Pre386:
866 oNewStmt.sName += '_PC16';
867 elif self.sVariation == self.ksVariation_16f_Pre386:
868 oNewStmt.sName += '_PC16_WITH_FLAGS';
869 elif self.sVariation not in self.kdVariationsWithEflagsCheckingAndClearing:
870 assert self.sVariation != self.ksVariation_Default;
871 oNewStmt.sName += '_PC32';
872 else:
873 oNewStmt.sName += '_PC32_WITH_FLAGS';
874
875 # ... and IEM_MC_*_GREG_U8 into *_THREADED w/ reworked index taking REX into account
876 elif oNewStmt.sName.startswith('IEM_MC_') and oNewStmt.sName.find('_GREG_U8') > 0:
877 (idxReg, _, sStdRef) = self.analyze8BitGRegStmt(oStmt); # Don't use oNewStmt as it has been modified!
878 oNewStmt.asParams[idxReg] = self.dParamRefs[sStdRef][0].sNewName;
879 oNewStmt.sName += '_THREADED';
880
881 # ... and IEM_MC_CALL_CIMPL_[0-5] and IEM_MC_DEFER_TO_CIMPL_[0-5]_RET into *_THREADED ...
882 elif oNewStmt.sName.startswith('IEM_MC_CALL_CIMPL_') or oNewStmt.sName.startswith('IEM_MC_DEFER_TO_CIMPL_'):
883 oNewStmt.sName += '_THREADED';
884 oNewStmt.idxFn += 1;
885 oNewStmt.idxParams += 1;
886 oNewStmt.asParams.insert(0, self.dParamRefs['cbInstr'][0].sNewName);
887
888 # ... and in FLAT modes we must morph memory access into FLAT accesses ...
889 elif ( self.sVariation in self.kdVariationsWithFlatAddress
890 and ( oNewStmt.sName.startswith('IEM_MC_FETCH_MEM')
891 or (oNewStmt.sName.startswith('IEM_MC_STORE_MEM_') and oNewStmt.sName.find('_BY_REF') < 0)
892 or oNewStmt.sName.startswith('IEM_MC_MEM_MAP') )):
893 idxEffSeg = self.kdMemMcToFlatInfo[oNewStmt.sName][0];
894 if idxEffSeg != -1:
895 if ( oNewStmt.asParams[idxEffSeg].find('iEffSeg') < 0
896 and oNewStmt.asParams[idxEffSeg] not in ('X86_SREG_ES', ) ):
897 self.raiseProblem('Expected iEffSeg as param #%d to %s: %s'
898 % (idxEffSeg + 1, oNewStmt.sName, oNewStmt.asParams[idxEffSeg],));
899 oNewStmt.asParams.pop(idxEffSeg);
900 oNewStmt.sName = self.kdMemMcToFlatInfo[oNewStmt.sName][1];
901
902 # ... PUSH and POP also needs flat variants, but these differ a little.
903 elif ( self.sVariation in self.kdVariationsWithFlatStackAddress
904 and ( (oNewStmt.sName.startswith('IEM_MC_PUSH') and oNewStmt.sName.find('_FPU') < 0)
905 or oNewStmt.sName.startswith('IEM_MC_POP'))):
906 oNewStmt.sName = self.kdMemMcToFlatInfoStack[oNewStmt.sName][int(self.sVariation in
907 self.kdVariationsWithFlat64StackAddress)];
908
909
910 # Process branches of conditionals recursively.
911 if isinstance(oStmt, iai.McStmtCond):
912 (oNewStmt.aoIfBranch, iParamRef) = self.analyzeMorphStmtForThreaded(oStmt.aoIfBranch, iParamRef);
913 if oStmt.aoElseBranch:
914 (oNewStmt.aoElseBranch, iParamRef) = self.analyzeMorphStmtForThreaded(oStmt.aoElseBranch, iParamRef);
915
916 return (aoThreadedStmts, iParamRef);
917
918
919 def analyzeConsolidateThreadedParamRefs(self):
920 """
921 Consolidate threaded function parameter references into a dictionary
922 with lists of the references to each variable/field.
923 """
924 # Gather unique parameters.
925 self.dParamRefs = {};
926 for oRef in self.aoParamRefs:
927 if oRef.sStdRef not in self.dParamRefs:
928 self.dParamRefs[oRef.sStdRef] = [oRef,];
929 else:
930 self.dParamRefs[oRef.sStdRef].append(oRef);
931
932 # Generate names for them for use in the threaded function.
933 dParamNames = {};
934 for sName, aoRefs in self.dParamRefs.items():
935 # Morph the reference expression into a name.
936 if sName.startswith('IEM_GET_MODRM_REG'): sName = 'bModRmRegP';
937 elif sName.startswith('IEM_GET_MODRM_RM'): sName = 'bModRmRmP';
938 elif sName.startswith('IEM_GET_MODRM_REG_8'): sName = 'bModRmReg8P';
939 elif sName.startswith('IEM_GET_MODRM_RM_8'): sName = 'bModRmRm8P';
940 elif sName.startswith('IEM_GET_EFFECTIVE_VVVV'): sName = 'bEffVvvvP';
941 elif sName.find('.') >= 0 or sName.find('->') >= 0:
942 sName = sName[max(sName.rfind('.'), sName.rfind('>')) + 1 : ] + 'P';
943 else:
944 sName += 'P';
945
946 # Ensure it's unique.
947 if sName in dParamNames:
948 for i in range(10):
949 if sName + str(i) not in dParamNames:
950 sName += str(i);
951 break;
952 dParamNames[sName] = True;
953
954 # Update all the references.
955 for oRef in aoRefs:
956 oRef.sNewName = sName;
957
958 # Organize them by size too for the purpose of optimize them.
959 dBySize = {} # type: Dict[str, str]
960 for sStdRef, aoRefs in self.dParamRefs.items():
961 if aoRefs[0].sType[0] != 'P':
962 cBits = g_kdTypeInfo[aoRefs[0].sType][0];
963 assert(cBits <= 64);
964 else:
965 cBits = 64;
966
967 if cBits not in dBySize:
968 dBySize[cBits] = [sStdRef,]
969 else:
970 dBySize[cBits].append(sStdRef);
971
972 # Pack the parameters as best as we can, starting with the largest ones
973 # and ASSUMING a 64-bit parameter size.
974 self.cMinParams = 0;
975 offNewParam = 0;
976 for cBits in sorted(dBySize.keys(), reverse = True):
977 for sStdRef in dBySize[cBits]:
978 if offNewParam == 0 or offNewParam + cBits > 64:
979 self.cMinParams += 1;
980 offNewParam = cBits;
981 else:
982 offNewParam += cBits;
983 assert(offNewParam <= 64);
984
985 for oRef in self.dParamRefs[sStdRef]:
986 oRef.iNewParam = self.cMinParams - 1;
987 oRef.offNewParam = offNewParam - cBits;
988
989 # Currently there are a few that requires 4 parameters, list these so we can figure out why:
990 if self.cMinParams >= 4:
991 print('debug: cMinParams=%s cRawParams=%s - %s:%d'
992 % (self.cMinParams, len(self.dParamRefs), self.oParent.oMcBlock.sSrcFile, self.oParent.oMcBlock.iBeginLine,));
993
994 return True;
995
996 ksHexDigits = '0123456789abcdefABCDEF';
997
998 def analyzeFindThreadedParamRefs(self, aoStmts): # pylint: disable=too-many-statements
999 """
1000 Scans the statements for things that have to passed on to the threaded
1001 function (populates self.aoParamRefs).
1002 """
1003 for oStmt in aoStmts:
1004 # Some statements we can skip alltogether.
1005 if isinstance(oStmt, iai.McCppPreProc):
1006 continue;
1007 if oStmt.isCppStmt() and oStmt.fDecode:
1008 continue;
1009 if oStmt.sName in ('IEM_MC_BEGIN',):
1010 continue;
1011
1012 if isinstance(oStmt, iai.McStmtVar):
1013 if oStmt.sValue is None:
1014 continue;
1015 aiSkipParams = { 0: True, 1: True, 3: True };
1016 else:
1017 aiSkipParams = {};
1018
1019 # Several statements have implicit parameters and some have different parameters.
1020 if oStmt.sName in ('IEM_MC_ADVANCE_RIP_AND_FINISH', 'IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_REL_JMP_S16_AND_FINISH',
1021 'IEM_MC_REL_JMP_S32_AND_FINISH', 'IEM_MC_CALL_CIMPL_0', 'IEM_MC_CALL_CIMPL_1',
1022 'IEM_MC_CALL_CIMPL_2', 'IEM_MC_CALL_CIMPL_3', 'IEM_MC_CALL_CIMPL_4', 'IEM_MC_CALL_CIMPL_5',
1023 'IEM_MC_DEFER_TO_CIMPL_0_RET', 'IEM_MC_DEFER_TO_CIMPL_1_RET', 'IEM_MC_DEFER_TO_CIMPL_2_RET',
1024 'IEM_MC_DEFER_TO_CIMPL_3_RET', 'IEM_MC_DEFER_TO_CIMPL_4_RET', 'IEM_MC_DEFER_TO_CIMPL_5_RET', ):
1025 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_INSTR_LEN(pVCpu)', 'uint4_t', oStmt, sStdRef = 'cbInstr'));
1026
1027 if ( oStmt.sName in ('IEM_MC_REL_JMP_S8_AND_FINISH',)
1028 and self.sVariation not in (self.ksVariation_16_Pre386, self.ksVariation_16f_Pre386,)):
1029 self.aoParamRefs.append(ThreadedParamRef('pVCpu->iem.s.enmEffOpSize', 'IEMMODE', oStmt));
1030
1031 if oStmt.sName == 'IEM_MC_CALC_RM_EFF_ADDR':
1032 # This is being pretty presumptive about bRm always being the RM byte...
1033 assert len(oStmt.asParams) == 3;
1034 assert oStmt.asParams[1] == 'bRm';
1035
1036 if self.sVariation in self.kdVariationsWithFlatAddr16:
1037 self.aoParamRefs.append(ThreadedParamRef('bRm', 'uint8_t', oStmt));
1038 self.aoParamRefs.append(ThreadedParamRef('(uint16_t)uEffAddrInfo' ,
1039 'uint16_t', oStmt, sStdRef = 'u16Disp'));
1040 elif self.sVariation in self.kdVariationsWithFlatAddr32No64:
1041 self.aoParamRefs.append(ThreadedParamRef('bRm', 'uint8_t', oStmt));
1042 self.aoParamRefs.append(ThreadedParamRef('(uint8_t)(uEffAddrInfo >> 32)',
1043 'uint8_t', oStmt, sStdRef = 'bSib'));
1044 self.aoParamRefs.append(ThreadedParamRef('(uint32_t)uEffAddrInfo',
1045 'uint32_t', oStmt, sStdRef = 'u32Disp'));
1046 else:
1047 assert self.sVariation in self.kasVariationsWithAddressOnly64;
1048 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_MODRM_EX(pVCpu, bRm)',
1049 'uint8_t', oStmt, sStdRef = 'bRmEx'));
1050 self.aoParamRefs.append(ThreadedParamRef('(uint8_t)(uEffAddrInfo >> 32)',
1051 'uint8_t', oStmt, sStdRef = 'bSib'));
1052 self.aoParamRefs.append(ThreadedParamRef('(uint32_t)uEffAddrInfo',
1053 'uint32_t', oStmt, sStdRef = 'u32Disp'));
1054 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_INSTR_LEN(pVCpu)',
1055 'uint4_t', oStmt, sStdRef = 'cbInstr'));
1056 aiSkipParams[1] = True; # Skip the bRm parameter as it is being replaced by bRmEx.
1057
1058 # 8-bit register accesses needs to have their index argument reworked to take REX into account.
1059 if oStmt.sName.startswith('IEM_MC_') and oStmt.sName.find('_GREG_U8') > 0:
1060 (idxReg, sOrgRef, sStdRef) = self.analyze8BitGRegStmt(oStmt);
1061 self.aoParamRefs.append(ThreadedParamRef(sOrgRef, 'uint16_t', oStmt, idxReg, sStdRef = sStdRef));
1062 aiSkipParams[idxReg] = True; # Skip the parameter below.
1063
1064 # If in flat mode variation, ignore the effective segment parameter to memory MCs.
1065 if ( self.sVariation in self.kdVariationsWithFlatAddress
1066 and oStmt.sName in self.kdMemMcToFlatInfo
1067 and self.kdMemMcToFlatInfo[oStmt.sName][0] != -1):
1068 aiSkipParams[self.kdMemMcToFlatInfo[oStmt.sName][0]] = True;
1069
1070 # Inspect the target of calls to see if we need to pass down a
1071 # function pointer or function table pointer for it to work.
1072 if isinstance(oStmt, iai.McStmtCall):
1073 if oStmt.sFn[0] == 'p':
1074 self.aoParamRefs.append(ThreadedParamRef(oStmt.sFn, self.analyzeCallToType(oStmt.sFn), oStmt, oStmt.idxFn));
1075 elif ( oStmt.sFn[0] != 'i'
1076 and not oStmt.sFn.startswith('IEMTARGETCPU_EFL_BEHAVIOR_SELECT')
1077 and not oStmt.sFn.startswith('IEM_SELECT_HOST_OR_FALLBACK') ):
1078 self.raiseProblem('Bogus function name in %s: %s' % (oStmt.sName, oStmt.sFn,));
1079 aiSkipParams[oStmt.idxFn] = True;
1080
1081 # Skip the hint parameter (first) for IEM_MC_CALL_CIMPL_X.
1082 if oStmt.sName.startswith('IEM_MC_CALL_CIMPL_'):
1083 assert oStmt.idxFn == 2;
1084 aiSkipParams[0] = True;
1085
1086
1087 # Check all the parameters for bogus references.
1088 for iParam, sParam in enumerate(oStmt.asParams):
1089 if iParam not in aiSkipParams and sParam not in self.oParent.dVariables:
1090 # The parameter may contain a C expression, so we have to try
1091 # extract the relevant bits, i.e. variables and fields while
1092 # ignoring operators and parentheses.
1093 offParam = 0;
1094 while offParam < len(sParam):
1095 # Is it the start of an C identifier? If so, find the end, but don't stop on field separators (->, .).
1096 ch = sParam[offParam];
1097 if ch.isalpha() or ch == '_':
1098 offStart = offParam;
1099 offParam += 1;
1100 while offParam < len(sParam):
1101 ch = sParam[offParam];
1102 if not ch.isalnum() and ch != '_' and ch != '.':
1103 if ch != '-' or sParam[offParam + 1] != '>':
1104 # Special hack for the 'CTX_SUFF(pVM)' bit in pVCpu->CTX_SUFF(pVM)->xxxx:
1105 if ( ch == '('
1106 and sParam[offStart : offParam + len('(pVM)->')] == 'pVCpu->CTX_SUFF(pVM)->'):
1107 offParam += len('(pVM)->') - 1;
1108 else:
1109 break;
1110 offParam += 1;
1111 offParam += 1;
1112 sRef = sParam[offStart : offParam];
1113
1114 # For register references, we pass the full register indexes instead as macros
1115 # like IEM_GET_MODRM_REG implicitly references pVCpu->iem.s.uRexReg and the
1116 # threaded function will be more efficient if we just pass the register index
1117 # as a 4-bit param.
1118 if ( sRef.startswith('IEM_GET_MODRM')
1119 or sRef.startswith('IEM_GET_EFFECTIVE_VVVV') ):
1120 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1121 if sParam[offParam] != '(':
1122 self.raiseProblem('Expected "(" following %s in "%s"' % (sRef, oStmt.renderCode(),));
1123 (asMacroParams, offCloseParam) = iai.McBlock.extractParams(sParam, offParam);
1124 if asMacroParams is None:
1125 self.raiseProblem('Unable to find ")" for %s in "%s"' % (sRef, oStmt.renderCode(),));
1126 offParam = offCloseParam + 1;
1127 self.aoParamRefs.append(ThreadedParamRef(sParam[offStart : offParam], 'uint8_t',
1128 oStmt, iParam, offStart));
1129
1130 # We can skip known variables.
1131 elif sRef in self.oParent.dVariables:
1132 pass;
1133
1134 # Skip certain macro invocations.
1135 elif sRef in ('IEM_GET_HOST_CPU_FEATURES',
1136 'IEM_GET_GUEST_CPU_FEATURES',
1137 'IEM_IS_GUEST_CPU_AMD',
1138 'IEM_IS_16BIT_CODE',
1139 'IEM_IS_32BIT_CODE',
1140 'IEM_IS_64BIT_CODE',
1141 ):
1142 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1143 if sParam[offParam] != '(':
1144 self.raiseProblem('Expected "(" following %s in "%s"' % (sRef, oStmt.renderCode(),));
1145 (asMacroParams, offCloseParam) = iai.McBlock.extractParams(sParam, offParam);
1146 if asMacroParams is None:
1147 self.raiseProblem('Unable to find ")" for %s in "%s"' % (sRef, oStmt.renderCode(),));
1148 offParam = offCloseParam + 1;
1149
1150 # Skip any dereference following it, unless it's a predicate like IEM_IS_GUEST_CPU_AMD.
1151 if sRef not in ('IEM_IS_GUEST_CPU_AMD',
1152 'IEM_IS_16BIT_CODE',
1153 'IEM_IS_32BIT_CODE',
1154 'IEM_IS_64BIT_CODE',
1155 ):
1156 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1157 if offParam + 2 <= len(sParam) and sParam[offParam : offParam + 2] == '->':
1158 offParam = iai.McBlock.skipSpacesAt(sParam, offParam + 2, len(sParam));
1159 while offParam < len(sParam) and (sParam[offParam].isalnum() or sParam[offParam] in '_.'):
1160 offParam += 1;
1161
1162 # Skip constants, globals, types (casts), sizeof and macros.
1163 elif ( sRef.startswith('IEM_OP_PRF_')
1164 or sRef.startswith('IEM_ACCESS_')
1165 or sRef.startswith('IEMINT_')
1166 or sRef.startswith('X86_GREG_')
1167 or sRef.startswith('X86_SREG_')
1168 or sRef.startswith('X86_EFL_')
1169 or sRef.startswith('X86_FSW_')
1170 or sRef.startswith('X86_FCW_')
1171 or sRef.startswith('X86_XCPT_')
1172 or sRef.startswith('IEMMODE_')
1173 or sRef.startswith('IEM_F_')
1174 or sRef.startswith('IEM_CIMPL_F_')
1175 or sRef.startswith('g_')
1176 or sRef.startswith('iemAImpl_')
1177 or sRef.startswith('kIemNativeGstReg_')
1178 or sRef in ( 'int8_t', 'int16_t', 'int32_t', 'int64_t',
1179 'INT8_C', 'INT16_C', 'INT32_C', 'INT64_C',
1180 'UINT8_C', 'UINT16_C', 'UINT32_C', 'UINT64_C',
1181 'UINT8_MAX', 'UINT16_MAX', 'UINT32_MAX', 'UINT64_MAX',
1182 'INT8_MAX', 'INT16_MAX', 'INT32_MAX', 'INT64_MAX',
1183 'INT8_MIN', 'INT16_MIN', 'INT32_MIN', 'INT64_MIN',
1184 'sizeof', 'NOREF', 'RT_NOREF', 'IEMMODE_64BIT',
1185 'RT_BIT_32', 'RT_BIT_64', 'true', 'false',
1186 'NIL_RTGCPTR',) ):
1187 pass;
1188
1189 # Skip certain macro invocations.
1190 # Any variable (non-field) and decoder fields in IEMCPU will need to be parameterized.
1191 elif ( ( '.' not in sRef
1192 and '-' not in sRef
1193 and sRef not in ('pVCpu', ) )
1194 or iai.McBlock.koReIemDecoderVars.search(sRef) is not None):
1195 self.aoParamRefs.append(ThreadedParamRef(sRef, self.analyzeReferenceToType(sRef),
1196 oStmt, iParam, offStart));
1197 # Number.
1198 elif ch.isdigit():
1199 if ( ch == '0'
1200 and offParam + 2 <= len(sParam)
1201 and sParam[offParam + 1] in 'xX'
1202 and sParam[offParam + 2] in self.ksHexDigits ):
1203 offParam += 2;
1204 while offParam < len(sParam) and sParam[offParam] in self.ksHexDigits:
1205 offParam += 1;
1206 else:
1207 while offParam < len(sParam) and sParam[offParam].isdigit():
1208 offParam += 1;
1209 # Comment?
1210 elif ( ch == '/'
1211 and offParam + 4 <= len(sParam)
1212 and sParam[offParam + 1] == '*'):
1213 offParam += 2;
1214 offNext = sParam.find('*/', offParam);
1215 if offNext < offParam:
1216 self.raiseProblem('Unable to find "*/" in "%s" ("%s")' % (sRef, oStmt.renderCode(),));
1217 offParam = offNext + 2;
1218 # Whatever else.
1219 else:
1220 offParam += 1;
1221
1222 # Traverse the branches of conditionals.
1223 if isinstance(oStmt, iai.McStmtCond):
1224 self.analyzeFindThreadedParamRefs(oStmt.aoIfBranch);
1225 self.analyzeFindThreadedParamRefs(oStmt.aoElseBranch);
1226 return True;
1227
1228 def analyzeVariation(self, aoStmts):
1229 """
1230 2nd part of the analysis, done on each variation.
1231
1232 The variations may differ in parameter requirements and will end up with
1233 slightly different MC sequences. Thus this is done on each individually.
1234
1235 Returns dummy True - raises exception on trouble.
1236 """
1237 # Now scan the code for variables and field references that needs to
1238 # be passed to the threaded function because they are related to the
1239 # instruction decoding.
1240 self.analyzeFindThreadedParamRefs(aoStmts);
1241 self.analyzeConsolidateThreadedParamRefs();
1242
1243 # Morph the statement stream for the block into what we'll be using in the threaded function.
1244 (self.aoStmtsForThreadedFunction, iParamRef) = self.analyzeMorphStmtForThreaded(aoStmts);
1245 if iParamRef != len(self.aoParamRefs):
1246 raise Exception('iParamRef=%s, expected %s!' % (iParamRef, len(self.aoParamRefs),));
1247
1248 return True;
1249
1250 def emitThreadedCallStmts(self, cchIndent, sCallVarNm = None):
1251 """
1252 Produces generic C++ statments that emits a call to the thread function
1253 variation and any subsequent checks that may be necessary after that.
1254
1255 The sCallVarNm is for emitting
1256 """
1257 aoStmts = [
1258 iai.McCppCall('IEM_MC2_BEGIN_EMIT_CALLS',
1259 ['1' if 'IEM_CIMPL_F_CHECK_IRQ_BEFORE' in self.oParent.dsCImplFlags else '0'],
1260 cchIndent = cchIndent), # Scope and a hook for various stuff.
1261 ];
1262
1263 # The call to the threaded function.
1264 asCallArgs = [ self.getIndexName() if not sCallVarNm else sCallVarNm, ];
1265 for iParam in range(self.cMinParams):
1266 asFrags = [];
1267 for aoRefs in self.dParamRefs.values():
1268 oRef = aoRefs[0];
1269 if oRef.iNewParam == iParam:
1270 sCast = '(uint64_t)'
1271 if oRef.sType in ('int8_t', 'int16_t', 'int32_t'): # Make sure these doesn't get sign-extended.
1272 sCast = '(uint64_t)(u' + oRef.sType + ')';
1273 if oRef.offNewParam == 0:
1274 asFrags.append(sCast + '(' + oRef.sOrgRef + ')');
1275 else:
1276 asFrags.append('(%s(%s) << %s)' % (sCast, oRef.sOrgRef, oRef.offNewParam));
1277 assert asFrags;
1278 asCallArgs.append(' | '.join(asFrags));
1279
1280 aoStmts.append(iai.McCppCall('IEM_MC2_EMIT_CALL_%s' % (len(asCallArgs) - 1,), asCallArgs, cchIndent = cchIndent));
1281
1282 # 2023-11-28: This has to be done AFTER the CIMPL call, so we have to
1283 # emit this mode check from the compilation loop. On the
1284 # plus side, this means we eliminate unnecessary call at
1285 # end of the TB. :-)
1286 ## For CIMPL stuff, we need to consult the associated IEM_CIMPL_F_XXX
1287 ## mask and maybe emit additional checks.
1288 #if ( 'IEM_CIMPL_F_MODE' in self.oParent.dsCImplFlags
1289 # or 'IEM_CIMPL_F_XCPT' in self.oParent.dsCImplFlags
1290 # or 'IEM_CIMPL_F_VMEXIT' in self.oParent.dsCImplFlags):
1291 # aoStmts.append(iai.McCppCall('IEM_MC2_EMIT_CALL_1', ( 'kIemThreadedFunc_BltIn_CheckMode', 'pVCpu->iem.s.fExec', ),
1292 # cchIndent = cchIndent));
1293
1294 sCImplFlags = ' | '.join(self.oParent.dsCImplFlags.keys());
1295 if not sCImplFlags:
1296 sCImplFlags = '0'
1297 aoStmts.append(iai.McCppCall('IEM_MC2_END_EMIT_CALLS', ( sCImplFlags, ), cchIndent = cchIndent)); # For closing the scope.
1298
1299 # Emit fEndTb = true or fTbBranched = true if any of the CIMPL flags
1300 # indicates we should do so.
1301 # Note! iemThreadedRecompilerMcDeferToCImpl0 duplicates work done here.
1302 asEndTbFlags = [];
1303 asTbBranchedFlags = [];
1304 for sFlag in self.oParent.dsCImplFlags:
1305 if self.kdCImplFlags[sFlag] is True:
1306 asEndTbFlags.append(sFlag);
1307 elif sFlag.startswith('IEM_CIMPL_F_BRANCH_'):
1308 asTbBranchedFlags.append(sFlag);
1309 if asTbBranchedFlags:
1310 aoStmts.append(iai.McCppGeneric('iemThreadedSetBranched(pVCpu, %s);'
1311 % ((' | '.join(asTbBranchedFlags)).replace('IEM_CIMPL_F_BRANCH', 'IEMBRANCHED_F'),),
1312 cchIndent = cchIndent)); # Inline fn saves ~2 seconds for gcc 13/dbg (1m13s vs 1m15s).
1313 if asEndTbFlags:
1314 aoStmts.append(iai.McCppGeneric('pVCpu->iem.s.fEndTb = true; /* %s */' % (','.join(asEndTbFlags),),
1315 cchIndent = cchIndent));
1316
1317 if 'IEM_CIMPL_F_CHECK_IRQ_AFTER' in self.oParent.dsCImplFlags:
1318 aoStmts.append(iai.McCppGeneric('pVCpu->iem.s.cInstrTillIrqCheck = 0;', cchIndent = cchIndent));
1319
1320 return aoStmts;
1321
1322
1323class ThreadedFunction(object):
1324 """
1325 A threaded function.
1326 """
1327
1328 def __init__(self, oMcBlock: iai.McBlock) -> None:
1329 self.oMcBlock = oMcBlock # type: iai.McBlock
1330 # The remaining fields are only useful after analyze() has been called:
1331 ## Variations for this block. There is at least one.
1332 self.aoVariations = [] # type: List[ThreadedFunctionVariation]
1333 ## Variation dictionary containing the same as aoVariations.
1334 self.dVariations = {} # type: Dict[str, ThreadedFunctionVariation]
1335 ## Dictionary of local variables (IEM_MC_LOCAL[_CONST]) and call arguments (IEM_MC_ARG*).
1336 self.dVariables = {} # type: Dict[str, iai.McStmtVar]
1337 ## Dictionary with any IEM_CIMPL_F_XXX flags explicitly advertised in the code block
1338 ## and those determined by analyzeCodeOperation().
1339 self.dsCImplFlags = {} # type: Dict[str, bool]
1340
1341 @staticmethod
1342 def dummyInstance():
1343 """ Gets a dummy instance. """
1344 return ThreadedFunction(iai.McBlock('null', 999999999, 999999999,
1345 iai.DecoderFunction('null', 999999999, 'nil', ('','')), 999999999));
1346
1347 def hasWithFlagsCheckingAndClearingVariation(self):
1348 """
1349 Check if there is one or more with flags checking and clearing
1350 variations for this threaded function.
1351 """
1352 for sVarWithFlags in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing:
1353 if sVarWithFlags in self.dVariations:
1354 return True;
1355 return False;
1356
1357 #
1358 # Analysis and code morphing.
1359 #
1360
1361 def raiseProblem(self, sMessage):
1362 """ Raises a problem. """
1363 raise Exception('%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, ));
1364
1365 def warning(self, sMessage):
1366 """ Emits a warning. """
1367 print('%s:%s: warning: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, ));
1368
1369 def analyzeFindVariablesAndCallArgs(self, aoStmts: List[iai.McStmt]) -> bool:
1370 """ Scans the statements for MC variables and call arguments. """
1371 for oStmt in aoStmts:
1372 if isinstance(oStmt, iai.McStmtVar):
1373 if oStmt.sVarName in self.dVariables:
1374 raise Exception('Variable %s is defined more than once!' % (oStmt.sVarName,));
1375 self.dVariables[oStmt.sVarName] = oStmt.sVarName;
1376
1377 # There shouldn't be any variables or arguments declared inside if/
1378 # else blocks, but scan them too to be on the safe side.
1379 if isinstance(oStmt, iai.McStmtCond):
1380 cBefore = len(self.dVariables);
1381 self.analyzeFindVariablesAndCallArgs(oStmt.aoIfBranch);
1382 self.analyzeFindVariablesAndCallArgs(oStmt.aoElseBranch);
1383 #if len(self.dVariables) != cBefore:
1384 # raise Exception('Variables/arguments defined in conditional branches!');
1385 return True;
1386
1387 def analyzeCodeOperation(self, aoStmts: List[iai.McStmt], fSeenConditional = False) -> bool:
1388 """
1389 Analyzes the code looking clues as to additional side-effects.
1390
1391 Currently this is simply looking for branching and adding the relevant
1392 branch flags to dsCImplFlags. ASSUMES the caller pre-populates the
1393 dictionary with a copy of self.oMcBlock.dsCImplFlags.
1394 """
1395 for oStmt in aoStmts:
1396 # Set IEM_IMPL_C_F_BRANCH if we see any branching MCs.
1397 if oStmt.sName.startswith('IEM_MC_SET_RIP'):
1398 assert not fSeenConditional;
1399 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_INDIRECT'] = True;
1400 elif oStmt.sName.startswith('IEM_MC_REL_JMP'):
1401 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_RELATIVE'] = True;
1402 if fSeenConditional:
1403 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_CONDITIONAL'] = True;
1404
1405 # Check for CIMPL and AIMPL calls.
1406 if oStmt.sName.startswith('IEM_MC_CALL_'):
1407 if oStmt.sName.startswith('IEM_MC_CALL_CIMPL_'):
1408 self.dsCImplFlags['IEM_CIMPL_F_CALLS_CIMPL'] = True;
1409 elif ( oStmt.sName.startswith('IEM_MC_CALL_VOID_AIMPL_')
1410 or oStmt.sName.startswith('IEM_MC_CALL_AIMPL_')
1411 or oStmt.sName.startswith('IEM_MC_CALL_AVX_AIMPL_')):
1412 self.dsCImplFlags['IEM_CIMPL_F_CALLS_AIMPL'] = True;
1413 elif ( oStmt.sName.startswith('IEM_MC_CALL_SSE_AIMPL_')
1414 or oStmt.sName.startswith('IEM_MC_CALL_MMX_AIMPL_')
1415 or oStmt.sName.startswith('IEM_MC_CALL_FPU_AIMPL_')):
1416 self.dsCImplFlags['IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE'] = True;
1417 else:
1418 raise Exception('Unknown IEM_MC_CALL_* statement: %s' % (oStmt.sName,));
1419
1420 # Process branches of conditionals recursively.
1421 if isinstance(oStmt, iai.McStmtCond):
1422 self.analyzeCodeOperation(oStmt.aoIfBranch, True);
1423 if oStmt.aoElseBranch:
1424 self.analyzeCodeOperation(oStmt.aoElseBranch, True);
1425
1426 return True;
1427
1428 def analyze(self):
1429 """
1430 Analyzes the code, identifying the number of parameters it requires and such.
1431
1432 Returns dummy True - raises exception on trouble.
1433 """
1434
1435 # Check the block for errors before we proceed (will decode it).
1436 asErrors = self.oMcBlock.check();
1437 if asErrors:
1438 raise Exception('\n'.join(['%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sError, )
1439 for sError in asErrors]));
1440
1441 # Decode the block into a list/tree of McStmt objects.
1442 aoStmts = self.oMcBlock.decode();
1443
1444 # Scan the statements for local variables and call arguments (self.dVariables).
1445 self.analyzeFindVariablesAndCallArgs(aoStmts);
1446
1447 # Scan the code for IEM_CIMPL_F_ and other clues.
1448 self.dsCImplFlags = self.oMcBlock.dsCImplFlags.copy();
1449 self.analyzeCodeOperation(aoStmts);
1450 if ( ('IEM_CIMPL_F_CALLS_CIMPL' in self.dsCImplFlags)
1451 + ('IEM_CIMPL_F_CALLS_AIMPL' in self.dsCImplFlags)
1452 + ('IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE' in self.dsCImplFlags) > 1):
1453 self.raiseProblem('Mixing CIMPL/AIMPL/AIMPL_WITH_FXSTATE calls');
1454
1455 # Create variations as needed.
1456 if iai.McStmt.findStmtByNames(aoStmts,
1457 { 'IEM_MC_DEFER_TO_CIMPL_0_RET': True,
1458 'IEM_MC_DEFER_TO_CIMPL_1_RET': True,
1459 'IEM_MC_DEFER_TO_CIMPL_2_RET': True,
1460 'IEM_MC_DEFER_TO_CIMPL_3_RET': True, }):
1461 asVariations = (ThreadedFunctionVariation.ksVariation_Default,);
1462
1463 elif iai.McStmt.findStmtByNames(aoStmts, { 'IEM_MC_CALC_RM_EFF_ADDR' : True,
1464 'IEM_MC_FETCH_MEM_U8' : True, # mov_AL_Ob ++
1465 'IEM_MC_FETCH_MEM_U16' : True, # mov_rAX_Ov ++
1466 'IEM_MC_FETCH_MEM_U32' : True,
1467 'IEM_MC_FETCH_MEM_U64' : True,
1468 'IEM_MC_STORE_MEM_U8' : True, # mov_Ob_AL ++
1469 'IEM_MC_STORE_MEM_U16' : True, # mov_Ov_rAX ++
1470 'IEM_MC_STORE_MEM_U32' : True,
1471 'IEM_MC_STORE_MEM_U64' : True, }):
1472 if 'IEM_MC_F_64BIT' in self.oMcBlock.dsMcFlags:
1473 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressOnly64;
1474 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags and 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
1475 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot286Not64;
1476 elif 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
1477 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot286;
1478 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags:
1479 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot64;
1480 elif 'IEM_MC_F_ONLY_8086' in self.oMcBlock.dsMcFlags:
1481 asVariations = ThreadedFunctionVariation.kasVariationsOnlyPre386;
1482 else:
1483 asVariations = ThreadedFunctionVariation.kasVariationsWithAddress;
1484 else:
1485 if 'IEM_MC_F_64BIT' in self.oMcBlock.dsMcFlags:
1486 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressOnly64;
1487 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags and 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
1488 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot286Not64;
1489 elif 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
1490 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot286;
1491 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags:
1492 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot64;
1493 elif 'IEM_MC_F_ONLY_8086' in self.oMcBlock.dsMcFlags:
1494 asVariations = ThreadedFunctionVariation.kasVariationsOnlyPre386;
1495 else:
1496 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddress;
1497
1498 if not iai.McStmt.findStmtByNames(aoStmts,
1499 { 'IEM_MC_ADVANCE_RIP_AND_FINISH': True,
1500 'IEM_MC_REL_JMP_S8_AND_FINISH': True,
1501 'IEM_MC_REL_JMP_S16_AND_FINISH': True,
1502 'IEM_MC_REL_JMP_S32_AND_FINISH': True,
1503 'IEM_MC_SET_RIP_U16_AND_FINISH': True,
1504 'IEM_MC_SET_RIP_U32_AND_FINISH': True,
1505 'IEM_MC_SET_RIP_U64_AND_FINISH': True,
1506 }):
1507 asVariations = [sVariation for sVariation in asVariations
1508 if sVariation not in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing];
1509
1510 self.aoVariations = [ThreadedFunctionVariation(self, sVar) for sVar in asVariations];
1511
1512 # Dictionary variant of the list.
1513 self.dVariations = { oVar.sVariation: oVar for oVar in self.aoVariations };
1514
1515 # Continue the analysis on each variation.
1516 for oVariation in self.aoVariations:
1517 oVariation.analyzeVariation(aoStmts);
1518
1519 return True;
1520
1521 ## Used by emitThreadedCallStmts.
1522 kdVariationsWithNeedForPrefixCheck = {
1523 ThreadedFunctionVariation.ksVariation_64_Addr32: True,
1524 ThreadedFunctionVariation.ksVariation_64f_Addr32: True,
1525 ThreadedFunctionVariation.ksVariation_64_FsGs: True,
1526 ThreadedFunctionVariation.ksVariation_64f_FsGs: True,
1527 ThreadedFunctionVariation.ksVariation_32_Addr16: True,
1528 ThreadedFunctionVariation.ksVariation_32f_Addr16: True,
1529 ThreadedFunctionVariation.ksVariation_32_Flat: True,
1530 ThreadedFunctionVariation.ksVariation_32f_Flat: True,
1531 ThreadedFunctionVariation.ksVariation_16_Addr32: True,
1532 ThreadedFunctionVariation.ksVariation_16f_Addr32: True,
1533 };
1534
1535 def emitThreadedCallStmts(self):
1536 """
1537 Worker for morphInputCode that returns a list of statements that emits
1538 the call to the threaded functions for the block.
1539 """
1540 # Special case for only default variation:
1541 if len(self.aoVariations) == 1 and self.aoVariations[0].sVariation == ThreadedFunctionVariation.ksVariation_Default:
1542 return self.aoVariations[0].emitThreadedCallStmts(0);
1543
1544 #
1545 # Case statement sub-class.
1546 #
1547 dByVari = self.dVariations;
1548 #fDbg = self.oMcBlock.sFunction == 'iemOpCommonPushSReg';
1549 class Case:
1550 def __init__(self, sCond, sVarNm = None):
1551 self.sCond = sCond;
1552 self.sVarNm = sVarNm;
1553 self.oVar = dByVari[sVarNm] if sVarNm else None;
1554 self.aoBody = self.oVar.emitThreadedCallStmts(8) if sVarNm else None;
1555
1556 def toCode(self):
1557 aoStmts = [ iai.McCppGeneric('case %s:' % (self.sCond), cchIndent = 4), ];
1558 if self.aoBody:
1559 aoStmts.extend(self.aoBody);
1560 aoStmts.append(iai.McCppGeneric('break;', cchIndent = 8));
1561 return aoStmts;
1562
1563 def toFunctionAssignment(self):
1564 aoStmts = [ iai.McCppGeneric('case %s:' % (self.sCond), cchIndent = 4), ];
1565 if self.aoBody:
1566 aoStmts.extend([
1567 iai.McCppGeneric('enmFunction = %s;' % (self.oVar.getIndexName(),), cchIndent = 8),
1568 iai.McCppGeneric('break;', cchIndent = 8),
1569 ]);
1570 return aoStmts;
1571
1572 def isSame(self, oThat):
1573 if not self.aoBody: # fall thru always matches.
1574 return True;
1575 if len(self.aoBody) != len(oThat.aoBody):
1576 #if fDbg: print('dbg: body len diff: %s vs %s' % (len(self.aoBody), len(oThat.aoBody),));
1577 return False;
1578 for iStmt, oStmt in enumerate(self.aoBody):
1579 oThatStmt = oThat.aoBody[iStmt] # type: iai.McStmt
1580 assert isinstance(oStmt, iai.McCppGeneric);
1581 assert not isinstance(oStmt, iai.McStmtCond);
1582 if isinstance(oStmt, iai.McStmtCond):
1583 return False;
1584 if oStmt.sName != oThatStmt.sName:
1585 #if fDbg: print('dbg: stmt #%s name: %s vs %s' % (iStmt, oStmt.sName, oThatStmt.sName,));
1586 return False;
1587 if len(oStmt.asParams) != len(oThatStmt.asParams):
1588 #if fDbg: print('dbg: stmt #%s param count: %s vs %s'
1589 # % (iStmt, len(oStmt.asParams), len(oThatStmt.asParams),));
1590 return False;
1591 for iParam, sParam in enumerate(oStmt.asParams):
1592 if ( sParam != oThatStmt.asParams[iParam]
1593 and ( iParam != 1
1594 or not isinstance(oStmt, iai.McCppCall)
1595 or not oStmt.asParams[0].startswith('IEM_MC2_EMIT_CALL_')
1596 or sParam != self.oVar.getIndexName()
1597 or oThatStmt.asParams[iParam] != oThat.oVar.getIndexName() )):
1598 #if fDbg: print('dbg: stmt #%s, param #%s: %s vs %s'
1599 # % (iStmt, iParam, sParam, oThatStmt.asParams[iParam],));
1600 return False;
1601 return True;
1602
1603 #
1604 # Determine what we're switch on.
1605 # This ASSUMES that (IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | IEM_F_MODE_CPUMODE_MASK) == 7!
1606 #
1607 fSimple = True;
1608 sSwitchValue = '(pVCpu->iem.s.fExec & (IEM_F_MODE_CPUMODE_MASK | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK))';
1609 if dByVari.keys() & self.kdVariationsWithNeedForPrefixCheck.keys():
1610 sSwitchValue += ' | (pVCpu->iem.s.enmEffAddrMode == (pVCpu->iem.s.fExec & IEM_F_MODE_CPUMODE_MASK) ? 0 : 8)';
1611 # Accesses via FS and GS and CS goes thru non-FLAT functions. (CS
1612 # is not writable in 32-bit mode (at least), thus the penalty mode
1613 # for any accesses via it (simpler this way).)
1614 sSwitchValue += ' | (pVCpu->iem.s.iEffSeg < X86_SREG_FS && pVCpu->iem.s.iEffSeg != X86_SREG_CS ? 0 : 16)';
1615 fSimple = False; # threaded functions.
1616 if dByVari.keys() & ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing:
1617 sSwitchValue += ' | ((pVCpu->iem.s.fTbPrevInstr & (IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_INHIBIT_SHADOW)) || ' \
1618 + '(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_MASK) ? 32 : 0)';
1619
1620 #
1621 # Generate the case statements.
1622 #
1623 # pylintx: disable=x
1624 aoCases = [];
1625 if ThreadedFunctionVariation.ksVariation_64_Addr32 in dByVari:
1626 assert not fSimple;
1627 aoCases.extend([
1628 Case('IEMMODE_64BIT', ThrdFnVar.ksVariation_64),
1629 Case('IEMMODE_64BIT | 16', ThrdFnVar.ksVariation_64_FsGs),
1630 Case('IEMMODE_64BIT | 8 | 16', None), # fall thru
1631 Case('IEMMODE_64BIT | 8', ThrdFnVar.ksVariation_64_Addr32),
1632 ]);
1633 if ThreadedFunctionVariation.ksVariation_64f_Addr32 in dByVari:
1634 aoCases.extend([
1635 Case('IEMMODE_64BIT | 32', ThrdFnVar.ksVariation_64f),
1636 Case('IEMMODE_64BIT | 32 | 16', ThrdFnVar.ksVariation_64f_FsGs),
1637 Case('IEMMODE_64BIT | 32 | 8 | 16', None), # fall thru
1638 Case('IEMMODE_64BIT | 32 | 8', ThrdFnVar.ksVariation_64f_Addr32),
1639 ]);
1640 elif ThrdFnVar.ksVariation_64 in dByVari:
1641 assert fSimple;
1642 aoCases.append(Case('IEMMODE_64BIT', ThrdFnVar.ksVariation_64));
1643 if ThreadedFunctionVariation.ksVariation_64f in dByVari:
1644 aoCases.append(Case('IEMMODE_64BIT | 32', ThrdFnVar.ksVariation_64f));
1645
1646 if ThrdFnVar.ksVariation_32_Addr16 in dByVari:
1647 assert not fSimple;
1648 aoCases.extend([
1649 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', ThrdFnVar.ksVariation_32_Flat),
1650 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 16', None), # fall thru
1651 Case('IEMMODE_32BIT | 16', None), # fall thru
1652 Case('IEMMODE_32BIT', ThrdFnVar.ksVariation_32),
1653 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 8', None), # fall thru
1654 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 8 | 16',None), # fall thru
1655 Case('IEMMODE_32BIT | 8 | 16',None), # fall thru
1656 Case('IEMMODE_32BIT | 8', ThrdFnVar.ksVariation_32_Addr16),
1657 ]);
1658 if ThrdFnVar.ksVariation_32f_Addr16 in dByVari:
1659 aoCases.extend([
1660 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', ThrdFnVar.ksVariation_32f_Flat),
1661 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 16', None), # fall thru
1662 Case('IEMMODE_32BIT | 32 | 16', None), # fall thru
1663 Case('IEMMODE_32BIT | 32', ThrdFnVar.ksVariation_32f),
1664 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 8', None), # fall thru
1665 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 8 | 16',None), # fall thru
1666 Case('IEMMODE_32BIT | 32 | 8 | 16',None), # fall thru
1667 Case('IEMMODE_32BIT | 32 | 8', ThrdFnVar.ksVariation_32f_Addr16),
1668 ]);
1669 elif ThrdFnVar.ksVariation_32 in dByVari:
1670 assert fSimple;
1671 aoCases.extend([
1672 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', None), # fall thru
1673 Case('IEMMODE_32BIT', ThrdFnVar.ksVariation_32),
1674 ]);
1675 if ThrdFnVar.ksVariation_32f in dByVari:
1676 aoCases.extend([
1677 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', None), # fall thru
1678 Case('IEMMODE_32BIT | 32', ThrdFnVar.ksVariation_32f),
1679 ]);
1680
1681 if ThrdFnVar.ksVariation_16_Addr32 in dByVari:
1682 assert not fSimple;
1683 aoCases.extend([
1684 Case('IEMMODE_16BIT | 16', None), # fall thru
1685 Case('IEMMODE_16BIT', ThrdFnVar.ksVariation_16),
1686 Case('IEMMODE_16BIT | 8 | 16', None), # fall thru
1687 Case('IEMMODE_16BIT | 8', ThrdFnVar.ksVariation_16_Addr32),
1688 ]);
1689 if ThrdFnVar.ksVariation_16f_Addr32 in dByVari:
1690 aoCases.extend([
1691 Case('IEMMODE_16BIT | 32 | 16', None), # fall thru
1692 Case('IEMMODE_16BIT | 32', ThrdFnVar.ksVariation_16f),
1693 Case('IEMMODE_16BIT | 32 | 8 | 16', None), # fall thru
1694 Case('IEMMODE_16BIT | 32 | 8', ThrdFnVar.ksVariation_16f_Addr32),
1695 ]);
1696 elif ThrdFnVar.ksVariation_16 in dByVari:
1697 assert fSimple;
1698 aoCases.append(Case('IEMMODE_16BIT', ThrdFnVar.ksVariation_16));
1699 if ThrdFnVar.ksVariation_16f in dByVari:
1700 aoCases.append(Case('IEMMODE_16BIT | 32', ThrdFnVar.ksVariation_16f));
1701
1702 if ThrdFnVar.ksVariation_16_Pre386 in dByVari:
1703 if not fSimple:
1704 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 16', None)); # fall thru
1705 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', ThrdFnVar.ksVariation_16_Pre386));
1706 if ThrdFnVar.ksVariation_16f_Pre386 in dByVari: # should be nested under previous if, but line too long.
1707 if not fSimple:
1708 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 16', None)); # fall thru
1709 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', ThrdFnVar.ksVariation_16f_Pre386));
1710
1711 #
1712 # If the case bodies are all the same, except for the function called,
1713 # we can reduce the code size and hopefully compile time.
1714 #
1715 iFirstCaseWithBody = 0;
1716 while not aoCases[iFirstCaseWithBody].aoBody:
1717 iFirstCaseWithBody += 1
1718 fAllSameCases = True
1719 for iCase in range(iFirstCaseWithBody + 1, len(aoCases)):
1720 fAllSameCases = fAllSameCases and aoCases[iCase].isSame(aoCases[iFirstCaseWithBody]);
1721 #if fDbg: print('fAllSameCases=%s %s' % (fAllSameCases, self.oMcBlock.sFunction,));
1722 if fAllSameCases:
1723 aoStmts = [
1724 iai.McCppGeneric('IEMTHREADEDFUNCS enmFunction;'),
1725 iai.McCppGeneric('switch (%s)' % (sSwitchValue,)),
1726 iai.McCppGeneric('{'),
1727 ];
1728 for oCase in aoCases:
1729 aoStmts.extend(oCase.toFunctionAssignment());
1730 aoStmts.extend([
1731 iai.McCppGeneric('IEM_NOT_REACHED_DEFAULT_CASE_RET();', cchIndent = 4),
1732 iai.McCppGeneric('}'),
1733 ]);
1734 aoStmts.extend(dByVari[aoCases[iFirstCaseWithBody].sVarNm].emitThreadedCallStmts(0, 'enmFunction'));
1735
1736 else:
1737 #
1738 # Generate the generic switch statement.
1739 #
1740 aoStmts = [
1741 iai.McCppGeneric('switch (%s)' % (sSwitchValue,)),
1742 iai.McCppGeneric('{'),
1743 ];
1744 for oCase in aoCases:
1745 aoStmts.extend(oCase.toCode());
1746 aoStmts.extend([
1747 iai.McCppGeneric('IEM_NOT_REACHED_DEFAULT_CASE_RET();', cchIndent = 4),
1748 iai.McCppGeneric('}'),
1749 ]);
1750
1751 return aoStmts;
1752
1753 def morphInputCode(self, aoStmts, fCallEmitted = False, cDepth = 0):
1754 """
1755 Adjusts (& copies) the statements for the input/decoder so it will emit
1756 calls to the right threaded functions for each block.
1757
1758 Returns list/tree of statements (aoStmts is not modified) and updated
1759 fCallEmitted status.
1760 """
1761 #print('McBlock at %s:%s' % (os.path.split(self.oMcBlock.sSrcFile)[1], self.oMcBlock.iBeginLine,));
1762 aoDecoderStmts = [];
1763
1764 for oStmt in aoStmts:
1765 # Copy the statement. Make a deep copy to make sure we've got our own
1766 # copies of all instance variables, even if a bit overkill at the moment.
1767 oNewStmt = copy.deepcopy(oStmt);
1768 aoDecoderStmts.append(oNewStmt);
1769 #print('oNewStmt %s %s' % (oNewStmt.sName, len(oNewStmt.asParams),));
1770 if oNewStmt.sName == 'IEM_MC_BEGIN' and self.dsCImplFlags:
1771 oNewStmt.asParams[3] = ' | '.join(sorted(self.dsCImplFlags.keys()));
1772
1773 # If we haven't emitted the threaded function call yet, look for
1774 # statements which it would naturally follow or preceed.
1775 if not fCallEmitted:
1776 if not oStmt.isCppStmt():
1777 if ( oStmt.sName.startswith('IEM_MC_MAYBE_RAISE_') \
1778 or (oStmt.sName.endswith('_AND_FINISH') and oStmt.sName.startswith('IEM_MC_'))
1779 or oStmt.sName.startswith('IEM_MC_CALL_CIMPL_')
1780 or oStmt.sName.startswith('IEM_MC_DEFER_TO_CIMPL_')
1781 or oStmt.sName in ('IEM_MC_RAISE_DIVIDE_ERROR',)):
1782 aoDecoderStmts.pop();
1783 aoDecoderStmts.extend(self.emitThreadedCallStmts());
1784 aoDecoderStmts.append(oNewStmt);
1785 fCallEmitted = True;
1786 elif ( oStmt.fDecode
1787 and ( oStmt.asParams[0].find('IEMOP_HLP_DONE_') >= 0
1788 or oStmt.asParams[0].find('IEMOP_HLP_DECODED_') >= 0)):
1789 aoDecoderStmts.extend(self.emitThreadedCallStmts());
1790 fCallEmitted = True;
1791
1792 # Process branches of conditionals recursively.
1793 if isinstance(oStmt, iai.McStmtCond):
1794 (oNewStmt.aoIfBranch, fCallEmitted1) = self.morphInputCode(oStmt.aoIfBranch, fCallEmitted, cDepth + 1);
1795 if oStmt.aoElseBranch:
1796 (oNewStmt.aoElseBranch, fCallEmitted2) = self.morphInputCode(oStmt.aoElseBranch, fCallEmitted, cDepth + 1);
1797 else:
1798 fCallEmitted2 = False;
1799 fCallEmitted = fCallEmitted or (fCallEmitted1 and fCallEmitted2);
1800
1801 if not fCallEmitted and cDepth == 0:
1802 self.raiseProblem('Unable to insert call to threaded function.');
1803
1804 return (aoDecoderStmts, fCallEmitted);
1805
1806
1807 def generateInputCode(self):
1808 """
1809 Modifies the input code.
1810 """
1811 cchIndent = (self.oMcBlock.cchIndent + 3) // 4 * 4;
1812
1813 if len(self.oMcBlock.aoStmts) == 1:
1814 # IEM_MC_DEFER_TO_CIMPL_X_RET - need to wrap in {} to make it safe to insert into random code.
1815 sCode = ' ' * cchIndent + 'pVCpu->iem.s.fTbCurInstr = ';
1816 if self.dsCImplFlags:
1817 sCode += ' | '.join(sorted(self.dsCImplFlags.keys())) + ';\n';
1818 else:
1819 sCode += '0;\n';
1820 sCode += iai.McStmt.renderCodeForList(self.morphInputCode(self.oMcBlock.aoStmts)[0],
1821 cchIndent = cchIndent).replace('\n', ' /* gen */\n', 1);
1822 sIndent = ' ' * (min(cchIndent, 2) - 2);
1823 sCode = sIndent + '{\n' + sCode + sIndent + '}\n';
1824 return sCode;
1825
1826 # IEM_MC_BEGIN/END block
1827 assert len(self.oMcBlock.asLines) > 2, "asLines=%s" % (self.oMcBlock.asLines,);
1828 return iai.McStmt.renderCodeForList(self.morphInputCode(self.oMcBlock.aoStmts)[0],
1829 cchIndent = cchIndent).replace('\n', ' /* gen */\n', 1);
1830
1831# Short alias for ThreadedFunctionVariation.
1832ThrdFnVar = ThreadedFunctionVariation;
1833
1834
1835class IEMThreadedGenerator(object):
1836 """
1837 The threaded code generator & annotator.
1838 """
1839
1840 def __init__(self):
1841 self.aoThreadedFuncs = [] # type: List[ThreadedFunction]
1842 self.oOptions = None # type: argparse.Namespace
1843 self.aoParsers = [] # type: List[IEMAllInstPython.SimpleParser]
1844 self.aidxFirstFunctions = [] # type: List[int] ##< Runs parallel to aoParser giving the index of the first function.
1845
1846 #
1847 # Processing.
1848 #
1849
1850 def processInputFiles(self, sHostArch, fNativeRecompilerEnabled):
1851 """
1852 Process the input files.
1853 """
1854
1855 # Parse the files.
1856 self.aoParsers = iai.parseFiles(self.oOptions.asInFiles, sHostArch);
1857
1858 # Create threaded functions for the MC blocks.
1859 self.aoThreadedFuncs = [ThreadedFunction(oMcBlock) for oMcBlock in iai.g_aoMcBlocks];
1860
1861 # Analyze the threaded functions.
1862 dRawParamCounts = {};
1863 dMinParamCounts = {};
1864 for oThreadedFunction in self.aoThreadedFuncs:
1865 oThreadedFunction.analyze();
1866 for oVariation in oThreadedFunction.aoVariations:
1867 dRawParamCounts[len(oVariation.dParamRefs)] = dRawParamCounts.get(len(oVariation.dParamRefs), 0) + 1;
1868 dMinParamCounts[oVariation.cMinParams] = dMinParamCounts.get(oVariation.cMinParams, 0) + 1;
1869 print('debug: param count distribution, raw and optimized:', file = sys.stderr);
1870 for cCount in sorted({cBits: True for cBits in list(dRawParamCounts.keys()) + list(dMinParamCounts.keys())}.keys()):
1871 print('debug: %s params: %4s raw, %4s min'
1872 % (cCount, dRawParamCounts.get(cCount, 0), dMinParamCounts.get(cCount, 0)),
1873 file = sys.stderr);
1874
1875 # Populate aidxFirstFunctions. This is ASSUMING that
1876 # g_aoMcBlocks/self.aoThreadedFuncs are in self.aoParsers order.
1877 iThreadedFunction = 0;
1878 oThreadedFunction = self.getThreadedFunctionByIndex(0);
1879 self.aidxFirstFunctions = [];
1880 for oParser in self.aoParsers:
1881 self.aidxFirstFunctions.append(iThreadedFunction);
1882
1883 while oThreadedFunction.oMcBlock.sSrcFile == oParser.sSrcFile:
1884 iThreadedFunction += 1;
1885 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
1886
1887 # Analyze the threaded functions and their variations for native recompilation.
1888 if fNativeRecompilerEnabled:
1889 ian.displayStatistics(self.aoThreadedFuncs, sHostArch);
1890
1891 # Gather arguments + variable statistics for the MC blocks.
1892 cMaxArgs = 0;
1893 cMaxVars = 0;
1894 cMaxVarsAndArgs = 0;
1895 cbMaxArgs = 0;
1896 cbMaxVars = 0;
1897 cbMaxVarsAndArgs = 0;
1898 for oThreadedFunction in self.aoThreadedFuncs:
1899 if oThreadedFunction.oMcBlock.cLocals >= 0:
1900 # Counts.
1901 assert oThreadedFunction.oMcBlock.cArgs >= 0;
1902 cMaxVars = max(cMaxVars, oThreadedFunction.oMcBlock.cLocals);
1903 cMaxArgs = max(cMaxArgs, oThreadedFunction.oMcBlock.cArgs);
1904 cMaxVarsAndArgs = max(cMaxVarsAndArgs, oThreadedFunction.oMcBlock.cLocals + oThreadedFunction.oMcBlock.cArgs);
1905 if cMaxVarsAndArgs > 9:
1906 raise Exception('%s potentially uses too many variables / args: %u, max 10 - %u vars and %u args'
1907 % (oThreadedFunction.oMcBlock.oFunction.sName, cMaxVarsAndArgs,
1908 oThreadedFunction.oMcBlock.cLocals, oThreadedFunction.oMcBlock.cArgs));
1909 # Calc stack allocation size:
1910 cbArgs = 0;
1911 for oArg in oThreadedFunction.oMcBlock.aoArgs:
1912 cbArgs += (getTypeBitCount(oArg.sType) + 63) // 64 * 8;
1913 cbVars = 0;
1914 for oVar in oThreadedFunction.oMcBlock.aoLocals:
1915 cbVars += (getTypeBitCount(oVar.sType) + 63) // 64 * 8;
1916 cbMaxVars = max(cbMaxVars, cbVars);
1917 cbMaxArgs = max(cbMaxArgs, cbArgs);
1918 cbMaxVarsAndArgs = max(cbMaxVarsAndArgs, cbVars + cbArgs);
1919 if cbMaxVarsAndArgs >= 0xc0:
1920 raise Exception('%s potentially uses too much stack: cbMaxVars=%#x cbMaxArgs=%#x'
1921 % (oThreadedFunction.oMcBlock.oFunction.sName, cbMaxVars, cbMaxArgs,));
1922
1923 print('debug: max vars+args: %u bytes / %u; max vars: %u bytes / %u; max args: %u bytes / %u'
1924 % (cbMaxVarsAndArgs, cMaxVarsAndArgs, cbMaxVars, cMaxVars, cbMaxArgs, cMaxArgs,), file = sys.stderr);
1925
1926 return True;
1927
1928 #
1929 # Output
1930 #
1931
1932 def generateLicenseHeader(self):
1933 """
1934 Returns the lines for a license header.
1935 """
1936 return [
1937 '/*',
1938 ' * Autogenerated by $Id: IEMAllThrdPython.py 102769 2024-01-04 23:10:56Z vboxsync $ ',
1939 ' * Do not edit!',
1940 ' */',
1941 '',
1942 '/*',
1943 ' * Copyright (C) 2023-' + str(datetime.date.today().year) + ' Oracle and/or its affiliates.',
1944 ' *',
1945 ' * This file is part of VirtualBox base platform packages, as',
1946 ' * available from https://www.virtualbox.org.',
1947 ' *',
1948 ' * This program is free software; you can redistribute it and/or',
1949 ' * modify it under the terms of the GNU General Public License',
1950 ' * as published by the Free Software Foundation, in version 3 of the',
1951 ' * License.',
1952 ' *',
1953 ' * This program is distributed in the hope that it will be useful, but',
1954 ' * WITHOUT ANY WARRANTY; without even the implied warranty of',
1955 ' * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU',
1956 ' * General Public License for more details.',
1957 ' *',
1958 ' * You should have received a copy of the GNU General Public License',
1959 ' * along with this program; if not, see <https://www.gnu.org/licenses>.',
1960 ' *',
1961 ' * The contents of this file may alternatively be used under the terms',
1962 ' * of the Common Development and Distribution License Version 1.0',
1963 ' * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included',
1964 ' * in the VirtualBox distribution, in which case the provisions of the',
1965 ' * CDDL are applicable instead of those of the GPL.',
1966 ' *',
1967 ' * You may elect to license modified versions of this file under the',
1968 ' * terms and conditions of either the GPL or the CDDL or both.',
1969 ' *',
1970 ' * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0',
1971 ' */',
1972 '',
1973 '',
1974 '',
1975 ];
1976
1977 ## List of built-in threaded functions with user argument counts and
1978 ## whether it has a native recompiler implementation.
1979 katBltIns = (
1980 ( 'Nop', 0, True ),
1981 ( 'LogCpuState', 0, True ),
1982
1983 ( 'DeferToCImpl0', 2, True ),
1984 ( 'CheckIrq', 0, True ),
1985 ( 'CheckMode', 1, True ),
1986 ( 'CheckHwInstrBps', 0, False ),
1987 ( 'CheckCsLim', 1, True ),
1988
1989 ( 'CheckCsLimAndOpcodes', 3, True ),
1990 ( 'CheckOpcodes', 3, True ),
1991 ( 'CheckOpcodesConsiderCsLim', 3, True ),
1992
1993 ( 'CheckCsLimAndPcAndOpcodes', 3, True ),
1994 ( 'CheckPcAndOpcodes', 3, True ),
1995 ( 'CheckPcAndOpcodesConsiderCsLim', 3, True ),
1996
1997 ( 'CheckCsLimAndOpcodesAcrossPageLoadingTlb', 3, True ),
1998 ( 'CheckOpcodesAcrossPageLoadingTlb', 3, True ),
1999 ( 'CheckOpcodesAcrossPageLoadingTlbConsiderCsLim', 2, True ),
2000
2001 ( 'CheckCsLimAndOpcodesLoadingTlb', 3, True ),
2002 ( 'CheckOpcodesLoadingTlb', 3, True ),
2003 ( 'CheckOpcodesLoadingTlbConsiderCsLim', 3, True ),
2004
2005 ( 'CheckCsLimAndOpcodesOnNextPageLoadingTlb', 2, True ),
2006 ( 'CheckOpcodesOnNextPageLoadingTlb', 2, True ),
2007 ( 'CheckOpcodesOnNextPageLoadingTlbConsiderCsLim', 2, True ),
2008
2009 ( 'CheckCsLimAndOpcodesOnNewPageLoadingTlb', 2, True ),
2010 ( 'CheckOpcodesOnNewPageLoadingTlb', 2, True ),
2011 ( 'CheckOpcodesOnNewPageLoadingTlbConsiderCsLim', 2, True ),
2012 );
2013
2014 def generateThreadedFunctionsHeader(self, oOut):
2015 """
2016 Generates the threaded functions header file.
2017 Returns success indicator.
2018 """
2019
2020 asLines = self.generateLicenseHeader();
2021
2022 # Generate the threaded function table indexes.
2023 asLines += [
2024 'typedef enum IEMTHREADEDFUNCS',
2025 '{',
2026 ' kIemThreadedFunc_Invalid = 0,',
2027 '',
2028 ' /*',
2029 ' * Predefined',
2030 ' */',
2031 ];
2032 asLines += [' kIemThreadedFunc_BltIn_%s,' % (sFuncNm,) for sFuncNm, _, _ in self.katBltIns];
2033
2034 iThreadedFunction = 1 + len(self.katBltIns);
2035 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2036 asLines += [
2037 '',
2038 ' /*',
2039 ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation] + '',
2040 ' */',
2041 ];
2042 for oThreadedFunction in self.aoThreadedFuncs:
2043 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2044 if oVariation:
2045 iThreadedFunction += 1;
2046 oVariation.iEnumValue = iThreadedFunction;
2047 asLines.append(' ' + oVariation.getIndexName() + ',');
2048 asLines += [
2049 ' kIemThreadedFunc_End',
2050 '} IEMTHREADEDFUNCS;',
2051 '',
2052 ];
2053
2054 # Prototype the function table.
2055 asLines += [
2056 'extern const PFNIEMTHREADEDFUNC g_apfnIemThreadedFunctions[kIemThreadedFunc_End];',
2057 '#if defined(IN_RING3) || defined(LOG_ENABLED)',
2058 'extern const char * const g_apszIemThreadedFunctions[kIemThreadedFunc_End];',
2059 '#endif',
2060 'extern uint8_t const g_acIemThreadedFunctionUsedArgs[kIemThreadedFunc_End];',
2061 ];
2062
2063 oOut.write('\n'.join(asLines));
2064 return True;
2065
2066 ksBitsToIntMask = {
2067 1: "UINT64_C(0x1)",
2068 2: "UINT64_C(0x3)",
2069 4: "UINT64_C(0xf)",
2070 8: "UINT64_C(0xff)",
2071 16: "UINT64_C(0xffff)",
2072 32: "UINT64_C(0xffffffff)",
2073 };
2074
2075 def generateFunctionParameterUnpacking(self, oVariation, oOut, asParams):
2076 """
2077 Outputs code for unpacking parameters.
2078 This is shared by the threaded and native code generators.
2079 """
2080 aasVars = [];
2081 for aoRefs in oVariation.dParamRefs.values():
2082 oRef = aoRefs[0];
2083 if oRef.sType[0] != 'P':
2084 cBits = g_kdTypeInfo[oRef.sType][0];
2085 sType = g_kdTypeInfo[oRef.sType][2];
2086 else:
2087 cBits = 64;
2088 sType = oRef.sType;
2089
2090 sTypeDecl = sType + ' const';
2091
2092 if cBits == 64:
2093 assert oRef.offNewParam == 0;
2094 if sType == 'uint64_t':
2095 sUnpack = '%s;' % (asParams[oRef.iNewParam],);
2096 else:
2097 sUnpack = '(%s)%s;' % (sType, asParams[oRef.iNewParam],);
2098 elif oRef.offNewParam == 0:
2099 sUnpack = '(%s)(%s & %s);' % (sType, asParams[oRef.iNewParam], self.ksBitsToIntMask[cBits]);
2100 else:
2101 sUnpack = '(%s)((%s >> %s) & %s);' \
2102 % (sType, asParams[oRef.iNewParam], oRef.offNewParam, self.ksBitsToIntMask[cBits]);
2103
2104 sComment = '/* %s - %s ref%s */' % (oRef.sOrgRef, len(aoRefs), 's' if len(aoRefs) != 1 else '',);
2105
2106 aasVars.append([ '%s:%02u' % (oRef.iNewParam, oRef.offNewParam),
2107 sTypeDecl, oRef.sNewName, sUnpack, sComment ]);
2108 acchVars = [0, 0, 0, 0, 0];
2109 for asVar in aasVars:
2110 for iCol, sStr in enumerate(asVar):
2111 acchVars[iCol] = max(acchVars[iCol], len(sStr));
2112 sFmt = ' %%-%ss %%-%ss = %%-%ss %%s\n' % (acchVars[1], acchVars[2], acchVars[3]);
2113 for asVar in sorted(aasVars):
2114 oOut.write(sFmt % (asVar[1], asVar[2], asVar[3], asVar[4],));
2115 return True;
2116
2117 kasThreadedParamNames = ('uParam0', 'uParam1', 'uParam2');
2118 def generateThreadedFunctionsSource(self, oOut):
2119 """
2120 Generates the threaded functions source file.
2121 Returns success indicator.
2122 """
2123
2124 asLines = self.generateLicenseHeader();
2125 oOut.write('\n'.join(asLines));
2126
2127 #
2128 # Emit the function definitions.
2129 #
2130 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2131 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
2132 oOut.write( '\n'
2133 + '\n'
2134 + '\n'
2135 + '\n'
2136 + '/*' + '*' * 128 + '\n'
2137 + '* Variation: ' + sVarName + ' ' * (129 - len(sVarName) - 15) + '*\n'
2138 + '*' * 128 + '*/\n');
2139
2140 for oThreadedFunction in self.aoThreadedFuncs:
2141 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2142 if oVariation:
2143 oMcBlock = oThreadedFunction.oMcBlock;
2144
2145 # Function header
2146 oOut.write( '\n'
2147 + '\n'
2148 + '/**\n'
2149 + ' * #%u: %s at line %s offset %s in %s%s\n'
2150 % (oVariation.iEnumValue, oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine,
2151 os.path.split(oMcBlock.sSrcFile)[1],
2152 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '')
2153 + ' */\n'
2154 + 'static IEM_DECL_IEMTHREADEDFUNC_DEF(' + oVariation.getThreadedFunctionName() + ')\n'
2155 + '{\n');
2156
2157 # Unpack parameters.
2158 self.generateFunctionParameterUnpacking(oVariation, oOut, self.kasThreadedParamNames);
2159
2160 # RT_NOREF for unused parameters.
2161 if oVariation.cMinParams < g_kcThreadedParams:
2162 oOut.write(' RT_NOREF(' + ', '.join(self.kasThreadedParamNames[oVariation.cMinParams:]) + ');\n');
2163
2164 # Now for the actual statements.
2165 oOut.write(iai.McStmt.renderCodeForList(oVariation.aoStmtsForThreadedFunction, cchIndent = 4));
2166
2167 oOut.write('}\n');
2168
2169
2170 #
2171 # Generate the output tables in parallel.
2172 #
2173 asFuncTable = [
2174 '/**',
2175 ' * Function pointer table.',
2176 ' */',
2177 'PFNIEMTHREADEDFUNC const g_apfnIemThreadedFunctions[kIemThreadedFunc_End] =',
2178 '{',
2179 ' /*Invalid*/ NULL,',
2180 ];
2181 asNameTable = [
2182 '/**',
2183 ' * Function name table.',
2184 ' */',
2185 'const char * const g_apszIemThreadedFunctions[kIemThreadedFunc_End] =',
2186 '{',
2187 ' "Invalid",',
2188 ];
2189 asArgCntTab = [
2190 '/**',
2191 ' * Argument count table.',
2192 ' */',
2193 'uint8_t const g_acIemThreadedFunctionUsedArgs[kIemThreadedFunc_End] =',
2194 '{',
2195 ' 0, /*Invalid*/',
2196 ];
2197 aasTables = (asFuncTable, asNameTable, asArgCntTab,);
2198
2199 for asTable in aasTables:
2200 asTable.extend((
2201 '',
2202 ' /*',
2203 ' * Predefined.',
2204 ' */',
2205 ));
2206 for sFuncNm, cArgs, _ in self.katBltIns:
2207 asFuncTable.append(' iemThreadedFunc_BltIn_%s,' % (sFuncNm,));
2208 asNameTable.append(' "BltIn_%s",' % (sFuncNm,));
2209 asArgCntTab.append(' %d, /*BltIn_%s*/' % (cArgs, sFuncNm,));
2210
2211 iThreadedFunction = 1 + len(self.katBltIns);
2212 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2213 for asTable in aasTables:
2214 asTable.extend((
2215 '',
2216 ' /*',
2217 ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation],
2218 ' */',
2219 ));
2220 for oThreadedFunction in self.aoThreadedFuncs:
2221 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2222 if oVariation:
2223 iThreadedFunction += 1;
2224 assert oVariation.iEnumValue == iThreadedFunction;
2225 sName = oVariation.getThreadedFunctionName();
2226 asFuncTable.append(' /*%4u*/ %s,' % (iThreadedFunction, sName,));
2227 asNameTable.append(' /*%4u*/ "%s",' % (iThreadedFunction, sName,));
2228 asArgCntTab.append(' /*%4u*/ %d, /*%s*/' % (iThreadedFunction, oVariation.cMinParams, sName,));
2229
2230 for asTable in aasTables:
2231 asTable.append('};');
2232
2233 #
2234 # Output the tables.
2235 #
2236 oOut.write( '\n'
2237 + '\n');
2238 oOut.write('\n'.join(asFuncTable));
2239 oOut.write( '\n'
2240 + '\n'
2241 + '\n'
2242 + '#if defined(IN_RING3) || defined(LOG_ENABLED)\n');
2243 oOut.write('\n'.join(asNameTable));
2244 oOut.write( '\n'
2245 + '#endif /* IN_RING3 || LOG_ENABLED */\n'
2246 + '\n'
2247 + '\n');
2248 oOut.write('\n'.join(asArgCntTab));
2249 oOut.write('\n');
2250
2251 return True;
2252
2253 def generateNativeFunctionsHeader(self, oOut):
2254 """
2255 Generates the native recompiler functions header file.
2256 Returns success indicator.
2257 """
2258 if not self.oOptions.fNativeRecompilerEnabled:
2259 return True;
2260
2261 asLines = self.generateLicenseHeader();
2262
2263 # Prototype the function table.
2264 asLines += [
2265 'extern const PFNIEMNATIVERECOMPFUNC g_apfnIemNativeRecompileFunctions[kIemThreadedFunc_End];',
2266 '',
2267 ];
2268
2269 oOut.write('\n'.join(asLines));
2270 return True;
2271
2272 def generateNativeFunctionsSource(self, oOut):
2273 """
2274 Generates the native recompiler functions source file.
2275 Returns success indicator.
2276 """
2277 if not self.oOptions.fNativeRecompilerEnabled:
2278 return True;
2279
2280 #
2281 # The file header.
2282 #
2283 oOut.write('\n'.join(self.generateLicenseHeader()));
2284
2285 #
2286 # Emit the functions.
2287 #
2288 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2289 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
2290 oOut.write( '\n'
2291 + '\n'
2292 + '\n'
2293 + '\n'
2294 + '/*' + '*' * 128 + '\n'
2295 + '* Variation: ' + sVarName + ' ' * (129 - len(sVarName) - 15) + '*\n'
2296 + '*' * 128 + '*/\n');
2297
2298 for oThreadedFunction in self.aoThreadedFuncs:
2299 oVariation = oThreadedFunction.dVariations.get(sVariation, None) # type: ThreadedFunctionVariation
2300 if oVariation and oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
2301 oMcBlock = oThreadedFunction.oMcBlock;
2302
2303 # Function header
2304 oOut.write( '\n'
2305 + '\n'
2306 + '/**\n'
2307 + ' * #%u: %s at line %s offset %s in %s%s\n'
2308 % (oVariation.iEnumValue, oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine,
2309 os.path.split(oMcBlock.sSrcFile)[1],
2310 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '')
2311 + ' */\n'
2312 + 'static IEM_DECL_IEMNATIVERECOMPFUNC_DEF(' + oVariation.getNativeFunctionName() + ')\n'
2313 + '{\n');
2314
2315 # Unpack parameters.
2316 self.generateFunctionParameterUnpacking(oVariation, oOut,
2317 ('pCallEntry->auParams[0]',
2318 'pCallEntry->auParams[1]',
2319 'pCallEntry->auParams[2]',));
2320
2321 # Now for the actual statements.
2322 oOut.write(oVariation.oNativeRecomp.renderCode(cchIndent = 4));
2323
2324 oOut.write('}\n');
2325
2326 #
2327 # Output the function table.
2328 #
2329 oOut.write( '\n'
2330 + '\n'
2331 + '/*\n'
2332 + ' * Function table running parallel to g_apfnIemThreadedFunctions and friends.\n'
2333 + ' */\n'
2334 + 'const PFNIEMNATIVERECOMPFUNC g_apfnIemNativeRecompileFunctions[kIemThreadedFunc_End] =\n'
2335 + '{\n'
2336 + ' /*Invalid*/ NULL,'
2337 + '\n'
2338 + ' /*\n'
2339 + ' * Predefined.\n'
2340 + ' */\n'
2341 );
2342 for sFuncNm, _, fHaveRecompFunc in self.katBltIns:
2343 if fHaveRecompFunc:
2344 oOut.write(' iemNativeRecompFunc_BltIn_%s,\n' % (sFuncNm,))
2345 else:
2346 oOut.write(' NULL, /*BltIn_%s*/\n' % (sFuncNm,))
2347
2348 iThreadedFunction = 1 + len(self.katBltIns);
2349 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2350 oOut.write( ' /*\n'
2351 + ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation] + '\n'
2352 + ' */\n');
2353 for oThreadedFunction in self.aoThreadedFuncs:
2354 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2355 if oVariation:
2356 iThreadedFunction += 1;
2357 assert oVariation.iEnumValue == iThreadedFunction;
2358 sName = oVariation.getNativeFunctionName();
2359 if oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
2360 oOut.write(' /*%4u*/ %s,\n' % (iThreadedFunction, sName,));
2361 else:
2362 oOut.write(' /*%4u*/ NULL /*%s*/,\n' % (iThreadedFunction, sName,));
2363
2364 oOut.write( '};\n'
2365 + '\n');
2366 return True;
2367
2368
2369 def getThreadedFunctionByIndex(self, idx):
2370 """
2371 Returns a ThreadedFunction object for the given index. If the index is
2372 out of bounds, a dummy is returned.
2373 """
2374 if idx < len(self.aoThreadedFuncs):
2375 return self.aoThreadedFuncs[idx];
2376 return ThreadedFunction.dummyInstance();
2377
2378 def generateModifiedInput(self, oOut, idxFile):
2379 """
2380 Generates the combined modified input source/header file.
2381 Returns success indicator.
2382 """
2383 #
2384 # File header and assert assumptions.
2385 #
2386 oOut.write('\n'.join(self.generateLicenseHeader()));
2387 oOut.write('AssertCompile((IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | IEM_F_MODE_CPUMODE_MASK) == 7);\n');
2388
2389 #
2390 # Iterate all parsers (input files) and output the ones related to the
2391 # file set given by idxFile.
2392 #
2393 for idxParser, oParser in enumerate(self.aoParsers): # type: int, IEMAllInstPython.SimpleParser
2394 # Is this included in the file set?
2395 sSrcBaseFile = os.path.basename(oParser.sSrcFile).lower();
2396 fInclude = -1;
2397 for aoInfo in iai.g_aaoAllInstrFilesAndDefaultMapAndSet:
2398 if sSrcBaseFile == aoInfo[0].lower():
2399 fInclude = aoInfo[2] in (-1, idxFile);
2400 break;
2401 if fInclude is not True:
2402 assert fInclude is False;
2403 continue;
2404
2405 # Output it.
2406 oOut.write("\n\n/* ****** BEGIN %s ******* */\n" % (oParser.sSrcFile,));
2407
2408 iThreadedFunction = self.aidxFirstFunctions[idxParser];
2409 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
2410 iLine = 0;
2411 while iLine < len(oParser.asLines):
2412 sLine = oParser.asLines[iLine];
2413 iLine += 1; # iBeginLine and iEndLine are 1-based.
2414
2415 # Can we pass it thru?
2416 if ( iLine not in [oThreadedFunction.oMcBlock.iBeginLine, oThreadedFunction.oMcBlock.iEndLine]
2417 or oThreadedFunction.oMcBlock.sSrcFile != oParser.sSrcFile):
2418 oOut.write(sLine);
2419 #
2420 # Single MC block. Just extract it and insert the replacement.
2421 #
2422 elif oThreadedFunction.oMcBlock.iBeginLine != oThreadedFunction.oMcBlock.iEndLine:
2423 assert ( (sLine.count('IEM_MC_') - sLine.count('IEM_MC_F_') == 1)
2424 or oThreadedFunction.oMcBlock.iMacroExp == iai.McBlock.kiMacroExp_Partial), 'sLine="%s"' % (sLine,);
2425 oOut.write(sLine[:oThreadedFunction.oMcBlock.offBeginLine]);
2426 sModified = oThreadedFunction.generateInputCode().strip();
2427 oOut.write(sModified);
2428
2429 iLine = oThreadedFunction.oMcBlock.iEndLine;
2430 sLine = oParser.asLines[iLine - 1];
2431 assert ( sLine.count('IEM_MC_') - sLine.count('IEM_MC_F_') == 1
2432 or len(oThreadedFunction.oMcBlock.aoStmts) == 1
2433 or oThreadedFunction.oMcBlock.iMacroExp == iai.McBlock.kiMacroExp_Partial);
2434 oOut.write(sLine[oThreadedFunction.oMcBlock.offAfterEnd : ]);
2435
2436 # Advance
2437 iThreadedFunction += 1;
2438 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
2439 #
2440 # Macro expansion line that have sublines and may contain multiple MC blocks.
2441 #
2442 else:
2443 offLine = 0;
2444 while iLine == oThreadedFunction.oMcBlock.iBeginLine:
2445 oOut.write(sLine[offLine : oThreadedFunction.oMcBlock.offBeginLine]);
2446
2447 sModified = oThreadedFunction.generateInputCode().strip();
2448 assert ( sModified.startswith('IEM_MC_BEGIN')
2449 or (sModified.find('IEM_MC_DEFER_TO_CIMPL_') > 0 and sModified.strip().startswith('{\n'))
2450 or sModified.startswith('pVCpu->iem.s.fEndTb = true')
2451 or sModified.startswith('pVCpu->iem.s.fTbCurInstr = ')
2452 ), 'sModified="%s"' % (sModified,);
2453 oOut.write(sModified);
2454
2455 offLine = oThreadedFunction.oMcBlock.offAfterEnd;
2456
2457 # Advance
2458 iThreadedFunction += 1;
2459 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
2460
2461 # Last line segment.
2462 if offLine < len(sLine):
2463 oOut.write(sLine[offLine : ]);
2464
2465 oOut.write("/* ****** END %s ******* */\n" % (oParser.sSrcFile,));
2466
2467 return True;
2468
2469 def generateModifiedInput1(self, oOut):
2470 """
2471 Generates the combined modified input source/header file, part 1.
2472 Returns success indicator.
2473 """
2474 return self.generateModifiedInput(oOut, 1);
2475
2476 def generateModifiedInput2(self, oOut):
2477 """
2478 Generates the combined modified input source/header file, part 2.
2479 Returns success indicator.
2480 """
2481 return self.generateModifiedInput(oOut, 2);
2482
2483 def generateModifiedInput3(self, oOut):
2484 """
2485 Generates the combined modified input source/header file, part 3.
2486 Returns success indicator.
2487 """
2488 return self.generateModifiedInput(oOut, 3);
2489
2490 def generateModifiedInput4(self, oOut):
2491 """
2492 Generates the combined modified input source/header file, part 4.
2493 Returns success indicator.
2494 """
2495 return self.generateModifiedInput(oOut, 4);
2496
2497
2498 #
2499 # Main
2500 #
2501
2502 def main(self, asArgs):
2503 """
2504 C-like main function.
2505 Returns exit code.
2506 """
2507
2508 #
2509 # Parse arguments
2510 #
2511 sScriptDir = os.path.dirname(__file__);
2512 oParser = argparse.ArgumentParser(add_help = False);
2513 oParser.add_argument('asInFiles',
2514 metavar = 'input.cpp.h',
2515 nargs = '*',
2516 default = [os.path.join(sScriptDir, aoInfo[0])
2517 for aoInfo in iai.g_aaoAllInstrFilesAndDefaultMapAndSet],
2518 help = "Selection of VMMAll/IEMAllInst*.cpp.h files to use as input.");
2519 oParser.add_argument('--host-arch',
2520 metavar = 'arch',
2521 dest = 'sHostArch',
2522 action = 'store',
2523 default = None,
2524 help = 'The host architecture.');
2525
2526 oParser.add_argument('--out-thrd-funcs-hdr',
2527 metavar = 'file-thrd-funcs.h',
2528 dest = 'sOutFileThrdFuncsHdr',
2529 action = 'store',
2530 default = '-',
2531 help = 'The output header file for the threaded functions.');
2532 oParser.add_argument('--out-thrd-funcs-cpp',
2533 metavar = 'file-thrd-funcs.cpp',
2534 dest = 'sOutFileThrdFuncsCpp',
2535 action = 'store',
2536 default = '-',
2537 help = 'The output C++ file for the threaded functions.');
2538 oParser.add_argument('--out-n8ve-funcs-hdr',
2539 metavar = 'file-n8tv-funcs.h',
2540 dest = 'sOutFileN8veFuncsHdr',
2541 action = 'store',
2542 default = '-',
2543 help = 'The output header file for the native recompiler functions.');
2544 oParser.add_argument('--out-n8ve-funcs-cpp',
2545 metavar = 'file-n8tv-funcs.cpp',
2546 dest = 'sOutFileN8veFuncsCpp',
2547 action = 'store',
2548 default = '-',
2549 help = 'The output C++ file for the native recompiler functions.');
2550 oParser.add_argument('--native',
2551 dest = 'fNativeRecompilerEnabled',
2552 action = 'store_true',
2553 default = False,
2554 help = 'Enables generating the files related to native recompilation.');
2555 oParser.add_argument('--out-mod-input1',
2556 metavar = 'file-instr.cpp.h',
2557 dest = 'sOutFileModInput1',
2558 action = 'store',
2559 default = '-',
2560 help = 'The output C++/header file for modified input instruction files part 1.');
2561 oParser.add_argument('--out-mod-input2',
2562 metavar = 'file-instr.cpp.h',
2563 dest = 'sOutFileModInput2',
2564 action = 'store',
2565 default = '-',
2566 help = 'The output C++/header file for modified input instruction files part 2.');
2567 oParser.add_argument('--out-mod-input3',
2568 metavar = 'file-instr.cpp.h',
2569 dest = 'sOutFileModInput3',
2570 action = 'store',
2571 default = '-',
2572 help = 'The output C++/header file for modified input instruction files part 3.');
2573 oParser.add_argument('--out-mod-input4',
2574 metavar = 'file-instr.cpp.h',
2575 dest = 'sOutFileModInput4',
2576 action = 'store',
2577 default = '-',
2578 help = 'The output C++/header file for modified input instruction files part 4.');
2579 oParser.add_argument('--help', '-h', '-?',
2580 action = 'help',
2581 help = 'Display help and exit.');
2582 oParser.add_argument('--version', '-V',
2583 action = 'version',
2584 version = 'r%s (IEMAllThreadedPython.py), r%s (IEMAllInstPython.py)'
2585 % (__version__.split()[1], iai.__version__.split()[1],),
2586 help = 'Displays the version/revision of the script and exit.');
2587 self.oOptions = oParser.parse_args(asArgs[1:]);
2588 print("oOptions=%s" % (self.oOptions,), file = sys.stderr);
2589
2590 #
2591 # Process the instructions specified in the IEM sources.
2592 #
2593 if self.processInputFiles(self.oOptions.sHostArch, self.oOptions.fNativeRecompilerEnabled):
2594 #
2595 # Generate the output files.
2596 #
2597 aaoOutputFiles = (
2598 ( self.oOptions.sOutFileThrdFuncsHdr, self.generateThreadedFunctionsHeader ),
2599 ( self.oOptions.sOutFileThrdFuncsCpp, self.generateThreadedFunctionsSource ),
2600 ( self.oOptions.sOutFileN8veFuncsHdr, self.generateNativeFunctionsHeader ),
2601 ( self.oOptions.sOutFileN8veFuncsCpp, self.generateNativeFunctionsSource ),
2602 ( self.oOptions.sOutFileModInput1, self.generateModifiedInput1 ),
2603 ( self.oOptions.sOutFileModInput2, self.generateModifiedInput2 ),
2604 ( self.oOptions.sOutFileModInput3, self.generateModifiedInput3 ),
2605 ( self.oOptions.sOutFileModInput4, self.generateModifiedInput4 ),
2606 );
2607 fRc = True;
2608 for sOutFile, fnGenMethod in aaoOutputFiles:
2609 if sOutFile == '-':
2610 fRc = fnGenMethod(sys.stdout) and fRc;
2611 else:
2612 try:
2613 oOut = open(sOutFile, 'w'); # pylint: disable=consider-using-with,unspecified-encoding
2614 except Exception as oXcpt:
2615 print('error! Failed open "%s" for writing: %s' % (sOutFile, oXcpt,), file = sys.stderr);
2616 return 1;
2617 fRc = fnGenMethod(oOut) and fRc;
2618 oOut.close();
2619 if fRc:
2620 return 0;
2621
2622 return 1;
2623
2624
2625if __name__ == '__main__':
2626 sys.exit(IEMThreadedGenerator().main(sys.argv));
2627
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette