VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllThrdPython.py@ 102010

Last change on this file since 102010 was 102010, checked in by vboxsync, 16 months ago

VMM/IEM: More on the subject of correctly flushing guest register shadow copies when making CIMPL calls. bugref:10371

  • Property svn:eol-style set to LF
  • Property svn:executable set to *
  • Property svn:keywords set to Author Date Id Revision
File size: 131.1 KB
Line 
1#!/usr/bin/env python
2# -*- coding: utf-8 -*-
3# $Id: IEMAllThrdPython.py 102010 2023-11-08 21:36:54Z vboxsync $
4# pylint: disable=invalid-name
5
6"""
7Annotates and generates threaded functions from IEMAllInst*.cpp.h.
8"""
9
10from __future__ import print_function;
11
12__copyright__ = \
13"""
14Copyright (C) 2023 Oracle and/or its affiliates.
15
16This file is part of VirtualBox base platform packages, as
17available from https://www.virtualbox.org.
18
19This program is free software; you can redistribute it and/or
20modify it under the terms of the GNU General Public License
21as published by the Free Software Foundation, in version 3 of the
22License.
23
24This program is distributed in the hope that it will be useful, but
25WITHOUT ANY WARRANTY; without even the implied warranty of
26MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
27General Public License for more details.
28
29You should have received a copy of the GNU General Public License
30along with this program; if not, see <https://www.gnu.org/licenses>.
31
32SPDX-License-Identifier: GPL-3.0-only
33"""
34__version__ = "$Revision: 102010 $"
35
36# Standard python imports.
37import copy;
38import datetime;
39import os;
40import re;
41import sys;
42import argparse;
43from typing import Dict, List;
44
45import IEMAllInstPython as iai;
46import IEMAllN8vePython as ian;
47
48
49# Python 3 hacks:
50if sys.version_info[0] >= 3:
51 long = int; # pylint: disable=redefined-builtin,invalid-name
52
53## Number of generic parameters for the thread functions.
54g_kcThreadedParams = 3;
55
56g_kdTypeInfo = {
57 # type name: (cBits, fSigned, C-type )
58 'int8_t': ( 8, True, 'int8_t', ),
59 'int16_t': ( 16, True, 'int16_t', ),
60 'int32_t': ( 32, True, 'int32_t', ),
61 'int64_t': ( 64, True, 'int64_t', ),
62 'uint4_t': ( 4, False, 'uint8_t', ),
63 'uint8_t': ( 8, False, 'uint8_t', ),
64 'uint16_t': ( 16, False, 'uint16_t', ),
65 'uint32_t': ( 32, False, 'uint32_t', ),
66 'uint64_t': ( 64, False, 'uint64_t', ),
67 'uintptr_t': ( 64, False, 'uintptr_t',), # ASSUMES 64-bit host pointer size.
68 'bool': ( 1, False, 'bool', ),
69 'IEMMODE': ( 2, False, 'IEMMODE', ),
70};
71
72# Only for getTypeBitCount/variables.
73g_kdTypeInfo2 = {
74 'RTFLOAT32U': ( 32, False, 'RTFLOAT32U', ),
75 'RTFLOAT64U': ( 64, False, 'RTFLOAT64U', ),
76 'RTUINT64U': ( 64, False, 'RTUINT64U', ),
77 'RTGCPTR': ( 64, False, 'RTGCPTR', ),
78 'RTPBCD80U': ( 80, False, 'RTPBCD80U', ),
79 'RTFLOAT80U': ( 80, False, 'RTFLOAT80U', ),
80 'IEMFPURESULT': (80+16, False, 'IEMFPURESULT', ),
81 'IEMFPURESULTTWO': (80+16+80,False, 'IEMFPURESULTTWO', ),
82 'RTUINT128U': ( 128, False, 'RTUINT128U', ),
83 'X86XMMREG': ( 128, False, 'X86XMMREG', ),
84 'IEMSSERESULT': ( 128+32, False, 'IEMSSERESULT', ),
85 'IEMMEDIAF2XMMSRC': ( 256, False, 'IEMMEDIAF2XMMSRC',),
86 'RTUINT256U': ( 256, False, 'RTUINT256U', ),
87 'IEMPCMPISTRXSRC': ( 256, False, 'IEMPCMPISTRXSRC', ),
88 'IEMPCMPESTRXSRC': ( 384, False, 'IEMPCMPESTRXSRC', ),
89}; #| g_kdTypeInfo; - requires 3.9
90g_kdTypeInfo2.update(g_kdTypeInfo);
91
92def getTypeBitCount(sType):
93 """
94 Translate a type to size in bits
95 """
96 if sType in g_kdTypeInfo2:
97 return g_kdTypeInfo2[sType][0];
98 if '*' in sType or sType[0] == 'P':
99 return 64;
100 #raise Exception('Unknown type: %s' % (sType,));
101 print('error: Unknown type: %s' % (sType,));
102 return 64;
103
104g_kdIemFieldToType = {
105 # Illegal ones:
106 'offInstrNextByte': ( None, ),
107 'cbInstrBuf': ( None, ),
108 'pbInstrBuf': ( None, ),
109 'uInstrBufPc': ( None, ),
110 'cbInstrBufTotal': ( None, ),
111 'offCurInstrStart': ( None, ),
112 'cbOpcode': ( None, ),
113 'offOpcode': ( None, ),
114 'offModRm': ( None, ),
115 # Okay ones.
116 'fPrefixes': ( 'uint32_t', ),
117 'uRexReg': ( 'uint8_t', ),
118 'uRexB': ( 'uint8_t', ),
119 'uRexIndex': ( 'uint8_t', ),
120 'iEffSeg': ( 'uint8_t', ),
121 'enmEffOpSize': ( 'IEMMODE', ),
122 'enmDefAddrMode': ( 'IEMMODE', ),
123 'enmEffAddrMode': ( 'IEMMODE', ),
124 'enmDefOpSize': ( 'IEMMODE', ),
125 'idxPrefix': ( 'uint8_t', ),
126 'uVex3rdReg': ( 'uint8_t', ),
127 'uVexLength': ( 'uint8_t', ),
128 'fEvexStuff': ( 'uint8_t', ),
129 'uFpuOpcode': ( 'uint16_t', ),
130};
131
132class ThreadedParamRef(object):
133 """
134 A parameter reference for a threaded function.
135 """
136
137 def __init__(self, sOrgRef, sType, oStmt, iParam = None, offParam = 0, sStdRef = None):
138 ## The name / reference in the original code.
139 self.sOrgRef = sOrgRef;
140 ## Normalized name to deal with spaces in macro invocations and such.
141 self.sStdRef = sStdRef if sStdRef else ''.join(sOrgRef.split());
142 ## Indicates that sOrgRef may not match the parameter.
143 self.fCustomRef = sStdRef is not None;
144 ## The type (typically derived).
145 self.sType = sType;
146 ## The statement making the reference.
147 self.oStmt = oStmt;
148 ## The parameter containing the references. None if implicit.
149 self.iParam = iParam;
150 ## The offset in the parameter of the reference.
151 self.offParam = offParam;
152
153 ## The variable name in the threaded function.
154 self.sNewName = 'x';
155 ## The this is packed into.
156 self.iNewParam = 99;
157 ## The bit offset in iNewParam.
158 self.offNewParam = 1024
159
160
161class ThreadedFunctionVariation(object):
162 """ Threaded function variation. """
163
164 ## @name Variations.
165 ## These variations will match translation block selection/distinctions as well.
166 ## @{
167 ksVariation_Default = ''; ##< No variations - only used by IEM_MC_DEFER_TO_CIMPL_X_RET.
168 ksVariation_16 = '_16'; ##< 16-bit mode code (386+).
169 ksVariation_16f = '_16f'; ##< 16-bit mode code (386+), check+clear eflags.
170 ksVariation_16_Addr32 = '_16_Addr32'; ##< 16-bit mode code (386+), address size prefixed to 32-bit addressing.
171 ksVariation_16f_Addr32 = '_16f_Addr32'; ##< 16-bit mode code (386+), address size prefixed to 32-bit addressing, eflags.
172 ksVariation_16_Pre386 = '_16_Pre386'; ##< 16-bit mode code, pre-386 CPU target.
173 ksVariation_16f_Pre386 = '_16f_Pre386'; ##< 16-bit mode code, pre-386 CPU target, check+clear eflags.
174 ksVariation_32 = '_32'; ##< 32-bit mode code (386+).
175 ksVariation_32f = '_32f'; ##< 32-bit mode code (386+), check+clear eflags.
176 ksVariation_32_Flat = '_32_Flat'; ##< 32-bit mode code (386+) with CS, DS, E,S and SS flat and 4GB wide.
177 ksVariation_32f_Flat = '_32f_Flat'; ##< 32-bit mode code (386+) with CS, DS, E,S and SS flat and 4GB wide, eflags.
178 ksVariation_32_Addr16 = '_32_Addr16'; ##< 32-bit mode code (386+), address size prefixed to 16-bit addressing.
179 ksVariation_32f_Addr16 = '_32f_Addr16'; ##< 32-bit mode code (386+), address size prefixed to 16-bit addressing, eflags.
180 ksVariation_64 = '_64'; ##< 64-bit mode code.
181 ksVariation_64f = '_64f'; ##< 64-bit mode code, check+clear eflags.
182 ksVariation_64_FsGs = '_64_FsGs'; ##< 64-bit mode code, with memory accesses via FS or GS.
183 ksVariation_64f_FsGs = '_64f_FsGs'; ##< 64-bit mode code, with memory accesses via FS or GS, check+clear eflags.
184 ksVariation_64_Addr32 = '_64_Addr32'; ##< 64-bit mode code, address size prefixed to 32-bit addressing.
185 ksVariation_64f_Addr32 = '_64f_Addr32'; ##< 64-bit mode code, address size prefixed to 32-bit addressing, c+c eflags.
186 kasVariations = (
187 ksVariation_Default,
188 ksVariation_16,
189 ksVariation_16f,
190 ksVariation_16_Addr32,
191 ksVariation_16f_Addr32,
192 ksVariation_16_Pre386,
193 ksVariation_16f_Pre386,
194 ksVariation_32,
195 ksVariation_32f,
196 ksVariation_32_Flat,
197 ksVariation_32f_Flat,
198 ksVariation_32_Addr16,
199 ksVariation_32f_Addr16,
200 ksVariation_64,
201 ksVariation_64f,
202 ksVariation_64_FsGs,
203 ksVariation_64f_FsGs,
204 ksVariation_64_Addr32,
205 ksVariation_64f_Addr32,
206 );
207 kasVariationsWithoutAddress = (
208 ksVariation_16,
209 ksVariation_16f,
210 ksVariation_16_Pre386,
211 ksVariation_16f_Pre386,
212 ksVariation_32,
213 ksVariation_32f,
214 ksVariation_64,
215 ksVariation_64f,
216 );
217 kasVariationsWithoutAddressNot286 = (
218 ksVariation_16,
219 ksVariation_16f,
220 ksVariation_32,
221 ksVariation_32f,
222 ksVariation_64,
223 ksVariation_64f,
224 );
225 kasVariationsWithoutAddressNot286Not64 = (
226 ksVariation_16,
227 ksVariation_16f,
228 ksVariation_32,
229 ksVariation_32f,
230 );
231 kasVariationsWithoutAddressNot64 = (
232 ksVariation_16,
233 ksVariation_16f,
234 ksVariation_16_Pre386,
235 ksVariation_16f_Pre386,
236 ksVariation_32,
237 ksVariation_32f,
238 );
239 kasVariationsWithoutAddressOnly64 = (
240 ksVariation_64,
241 ksVariation_64f,
242 );
243 kasVariationsWithAddress = (
244 ksVariation_16,
245 ksVariation_16f,
246 ksVariation_16_Addr32,
247 ksVariation_16f_Addr32,
248 ksVariation_16_Pre386,
249 ksVariation_16f_Pre386,
250 ksVariation_32,
251 ksVariation_32f,
252 ksVariation_32_Flat,
253 ksVariation_32f_Flat,
254 ksVariation_32_Addr16,
255 ksVariation_32f_Addr16,
256 ksVariation_64,
257 ksVariation_64f,
258 ksVariation_64_FsGs,
259 ksVariation_64f_FsGs,
260 ksVariation_64_Addr32,
261 ksVariation_64f_Addr32,
262 );
263 kasVariationsWithAddressNot286 = (
264 ksVariation_16,
265 ksVariation_16f,
266 ksVariation_16_Addr32,
267 ksVariation_16f_Addr32,
268 ksVariation_32,
269 ksVariation_32f,
270 ksVariation_32_Flat,
271 ksVariation_32f_Flat,
272 ksVariation_32_Addr16,
273 ksVariation_32f_Addr16,
274 ksVariation_64,
275 ksVariation_64f,
276 ksVariation_64_FsGs,
277 ksVariation_64f_FsGs,
278 ksVariation_64_Addr32,
279 ksVariation_64f_Addr32,
280 );
281 kasVariationsWithAddressNot286Not64 = (
282 ksVariation_16,
283 ksVariation_16f,
284 ksVariation_16_Addr32,
285 ksVariation_16f_Addr32,
286 ksVariation_32,
287 ksVariation_32f,
288 ksVariation_32_Flat,
289 ksVariation_32f_Flat,
290 ksVariation_32_Addr16,
291 ksVariation_32f_Addr16,
292 );
293 kasVariationsWithAddressNot64 = (
294 ksVariation_16,
295 ksVariation_16f,
296 ksVariation_16_Addr32,
297 ksVariation_16f_Addr32,
298 ksVariation_16_Pre386,
299 ksVariation_16f_Pre386,
300 ksVariation_32,
301 ksVariation_32f,
302 ksVariation_32_Flat,
303 ksVariation_32f_Flat,
304 ksVariation_32_Addr16,
305 ksVariation_32f_Addr16,
306 );
307 kasVariationsWithAddressOnly64 = (
308 ksVariation_64,
309 ksVariation_64f,
310 ksVariation_64_FsGs,
311 ksVariation_64f_FsGs,
312 ksVariation_64_Addr32,
313 ksVariation_64f_Addr32,
314 );
315 kasVariationsOnlyPre386 = (
316 ksVariation_16_Pre386,
317 ksVariation_16f_Pre386,
318 );
319 kasVariationsEmitOrder = (
320 ksVariation_Default,
321 ksVariation_64,
322 ksVariation_64f,
323 ksVariation_64_FsGs,
324 ksVariation_64f_FsGs,
325 ksVariation_32_Flat,
326 ksVariation_32f_Flat,
327 ksVariation_32,
328 ksVariation_32f,
329 ksVariation_16,
330 ksVariation_16f,
331 ksVariation_16_Addr32,
332 ksVariation_16f_Addr32,
333 ksVariation_16_Pre386,
334 ksVariation_16f_Pre386,
335 ksVariation_32_Addr16,
336 ksVariation_32f_Addr16,
337 ksVariation_64_Addr32,
338 ksVariation_64f_Addr32,
339 );
340 kdVariationNames = {
341 ksVariation_Default: 'defer-to-cimpl',
342 ksVariation_16: '16-bit',
343 ksVariation_16f: '16-bit w/ eflag checking and clearing',
344 ksVariation_16_Addr32: '16-bit w/ address prefix (Addr32)',
345 ksVariation_16f_Addr32: '16-bit w/ address prefix (Addr32) and eflag checking and clearing',
346 ksVariation_16_Pre386: '16-bit on pre-386 CPU',
347 ksVariation_16f_Pre386: '16-bit on pre-386 CPU w/ eflag checking and clearing',
348 ksVariation_32: '32-bit',
349 ksVariation_32f: '32-bit w/ eflag checking and clearing',
350 ksVariation_32_Flat: '32-bit flat and wide open CS, SS, DS and ES',
351 ksVariation_32f_Flat: '32-bit flat and wide open CS, SS, DS and ES w/ eflag checking and clearing',
352 ksVariation_32_Addr16: '32-bit w/ address prefix (Addr16)',
353 ksVariation_32f_Addr16: '32-bit w/ address prefix (Addr16) and eflag checking and clearing',
354 ksVariation_64: '64-bit',
355 ksVariation_64f: '64-bit w/ eflag checking and clearing',
356 ksVariation_64_FsGs: '64-bit with memory accessed via FS or GS',
357 ksVariation_64f_FsGs: '64-bit with memory accessed via FS or GS and eflag checking and clearing',
358 ksVariation_64_Addr32: '64-bit w/ address prefix (Addr32)',
359 ksVariation_64f_Addr32: '64-bit w/ address prefix (Addr32) and eflag checking and clearing',
360 };
361 kdVariationsWithEflagsCheckingAndClearing = {
362 ksVariation_16f: True,
363 ksVariation_16f_Addr32: True,
364 ksVariation_16f_Pre386: True,
365 ksVariation_32f: True,
366 ksVariation_32f_Flat: True,
367 ksVariation_32f_Addr16: True,
368 ksVariation_64f: True,
369 ksVariation_64f_FsGs: True,
370 ksVariation_64f_Addr32: True,
371 };
372 kdVariationsWithFlatAddress = {
373 ksVariation_32_Flat: True,
374 ksVariation_32f_Flat: True,
375 ksVariation_64: True,
376 ksVariation_64f: True,
377 };
378 kdVariationsWithFlatAddr16 = {
379 ksVariation_16: True,
380 ksVariation_16f: True,
381 ksVariation_16_Pre386: True,
382 ksVariation_16f_Pre386: True,
383 ksVariation_32_Addr16: True,
384 ksVariation_32f_Addr16: True,
385 };
386 kdVariationsWithFlatAddr32No64 = {
387 ksVariation_16_Addr32: True,
388 ksVariation_16f_Addr32: True,
389 ksVariation_32: True,
390 ksVariation_32f: True,
391 ksVariation_32_Flat: True,
392 ksVariation_32f_Flat: True,
393 };
394 ## @}
395
396 ## IEM_CIMPL_F_XXX flags that we know.
397 ## The value indicates whether it terminates the TB or not. The goal is to
398 ## improve the recompiler so all but END_TB will be False.
399 ##
400 ## @note iemThreadedRecompilerMcDeferToCImpl0 duplicates info found here.
401 kdCImplFlags = {
402 'IEM_CIMPL_F_MODE': False,
403 'IEM_CIMPL_F_BRANCH_DIRECT': False,
404 'IEM_CIMPL_F_BRANCH_INDIRECT': False,
405 'IEM_CIMPL_F_BRANCH_RELATIVE': False,
406 'IEM_CIMPL_F_BRANCH_FAR': True,
407 'IEM_CIMPL_F_BRANCH_CONDITIONAL': False,
408 # IEM_CIMPL_F_BRANCH_ANY should only be used for testing, so not included here.
409 'IEM_CIMPL_F_BRANCH_STACK': False,
410 'IEM_CIMPL_F_BRANCH_STACK_FAR': False,
411 'IEM_CIMPL_F_RFLAGS': False,
412 'IEM_CIMPL_F_INHIBIT_SHADOW': False,
413 'IEM_CIMPL_F_CHECK_IRQ_AFTER': False,
414 'IEM_CIMPL_F_CHECK_IRQ_BEFORE': False,
415 'IEM_CIMPL_F_CHECK_IRQ_BEFORE_AND_AFTER': False, # (ignore)
416 'IEM_CIMPL_F_STATUS_FLAGS': False,
417 'IEM_CIMPL_F_VMEXIT': False,
418 'IEM_CIMPL_F_FPU': False,
419 'IEM_CIMPL_F_REP': False,
420 'IEM_CIMPL_F_IO': False,
421 'IEM_CIMPL_F_END_TB': True,
422 'IEM_CIMPL_F_XCPT': True,
423 'IEM_CIMPL_F_CALLS_CIMPL': False,
424 'IEM_CIMPL_F_CALLS_AIMPL': False,
425 'IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE': False,
426 };
427
428 def __init__(self, oThreadedFunction, sVariation = ksVariation_Default):
429 self.oParent = oThreadedFunction # type: ThreadedFunction
430 ##< ksVariation_Xxxx.
431 self.sVariation = sVariation
432
433 ## Threaded function parameter references.
434 self.aoParamRefs = [] # type: List[ThreadedParamRef]
435 ## Unique parameter references.
436 self.dParamRefs = {} # type: Dict[str, List[ThreadedParamRef]]
437 ## Minimum number of parameters to the threaded function.
438 self.cMinParams = 0;
439
440 ## List/tree of statements for the threaded function.
441 self.aoStmtsForThreadedFunction = [] # type: List[McStmt]
442
443 ## Function enum number, for verification. Set by generateThreadedFunctionsHeader.
444 self.iEnumValue = -1;
445
446 ## Native recompilation details for this variation.
447 self.oNativeRecomp = None;
448
449 def getIndexName(self):
450 sName = self.oParent.oMcBlock.sFunction;
451 if sName.startswith('iemOp_'):
452 sName = sName[len('iemOp_'):];
453 if self.oParent.oMcBlock.iInFunction == 0:
454 return 'kIemThreadedFunc_%s%s' % ( sName, self.sVariation, );
455 return 'kIemThreadedFunc_%s_%s%s' % ( sName, self.oParent.oMcBlock.iInFunction, self.sVariation, );
456
457 def getThreadedFunctionName(self):
458 sName = self.oParent.oMcBlock.sFunction;
459 if sName.startswith('iemOp_'):
460 sName = sName[len('iemOp_'):];
461 if self.oParent.oMcBlock.iInFunction == 0:
462 return 'iemThreadedFunc_%s%s' % ( sName, self.sVariation, );
463 return 'iemThreadedFunc_%s_%s%s' % ( sName, self.oParent.oMcBlock.iInFunction, self.sVariation, );
464
465 def getNativeFunctionName(self):
466 return 'iemNativeRecompFunc_' + self.getThreadedFunctionName()[len('iemThreadedFunc_'):];
467
468 def getShortName(self):
469 sName = self.oParent.oMcBlock.sFunction;
470 if sName.startswith('iemOp_'):
471 sName = sName[len('iemOp_'):];
472 if self.oParent.oMcBlock.iInFunction == 0:
473 return '%s%s' % ( sName, self.sVariation, );
474 return '%s_%s%s' % ( sName, self.oParent.oMcBlock.iInFunction, self.sVariation, );
475
476 def isWithFlagsCheckingAndClearingVariation(self):
477 """
478 Checks if this is a variation that checks and clears EFLAGS.
479 """
480 return self.sVariation in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing;
481
482 #
483 # Analysis and code morphing.
484 #
485
486 def raiseProblem(self, sMessage):
487 """ Raises a problem. """
488 self.oParent.raiseProblem(sMessage);
489
490 def warning(self, sMessage):
491 """ Emits a warning. """
492 self.oParent.warning(sMessage);
493
494 def analyzeReferenceToType(self, sRef):
495 """
496 Translates a variable or structure reference to a type.
497 Returns type name.
498 Raises exception if unable to figure it out.
499 """
500 ch0 = sRef[0];
501 if ch0 == 'u':
502 if sRef.startswith('u32'):
503 return 'uint32_t';
504 if sRef.startswith('u8') or sRef == 'uReg':
505 return 'uint8_t';
506 if sRef.startswith('u64'):
507 return 'uint64_t';
508 if sRef.startswith('u16'):
509 return 'uint16_t';
510 elif ch0 == 'b':
511 return 'uint8_t';
512 elif ch0 == 'f':
513 return 'bool';
514 elif ch0 == 'i':
515 if sRef.startswith('i8'):
516 return 'int8_t';
517 if sRef.startswith('i16'):
518 return 'int16_t';
519 if sRef.startswith('i32'):
520 return 'int32_t';
521 if sRef.startswith('i64'):
522 return 'int64_t';
523 if sRef in ('iReg', 'iFixedReg', 'iGReg', 'iSegReg', 'iSrcReg', 'iDstReg', 'iCrReg'):
524 return 'uint8_t';
525 elif ch0 == 'p':
526 if sRef.find('-') < 0:
527 return 'uintptr_t';
528 if sRef.startswith('pVCpu->iem.s.'):
529 sField = sRef[len('pVCpu->iem.s.') : ];
530 if sField in g_kdIemFieldToType:
531 if g_kdIemFieldToType[sField][0]:
532 return g_kdIemFieldToType[sField][0];
533 elif ch0 == 'G' and sRef.startswith('GCPtr'):
534 return 'uint64_t';
535 elif ch0 == 'e':
536 if sRef == 'enmEffOpSize':
537 return 'IEMMODE';
538 elif ch0 == 'o':
539 if sRef.startswith('off32'):
540 return 'uint32_t';
541 elif sRef == 'cbFrame': # enter
542 return 'uint16_t';
543 elif sRef == 'cShift': ## @todo risky
544 return 'uint8_t';
545
546 self.raiseProblem('Unknown reference: %s' % (sRef,));
547 return None; # Shut up pylint 2.16.2.
548
549 def analyzeCallToType(self, sFnRef):
550 """
551 Determins the type of an indirect function call.
552 """
553 assert sFnRef[0] == 'p';
554
555 #
556 # Simple?
557 #
558 if sFnRef.find('-') < 0:
559 oDecoderFunction = self.oParent.oMcBlock.oFunction;
560
561 # Try the argument list of the function defintion macro invocation first.
562 iArg = 2;
563 while iArg < len(oDecoderFunction.asDefArgs):
564 if sFnRef == oDecoderFunction.asDefArgs[iArg]:
565 return oDecoderFunction.asDefArgs[iArg - 1];
566 iArg += 1;
567
568 # Then check out line that includes the word and looks like a variable declaration.
569 oRe = re.compile(' +(P[A-Z0-9_]+|const +IEMOP[A-Z0-9_]+ *[*]) +(const |) *' + sFnRef + ' *(;|=)');
570 for sLine in oDecoderFunction.asLines:
571 oMatch = oRe.match(sLine);
572 if oMatch:
573 if not oMatch.group(1).startswith('const'):
574 return oMatch.group(1);
575 return 'PC' + oMatch.group(1)[len('const ') : -1].strip();
576
577 #
578 # Deal with the pImpl->pfnXxx:
579 #
580 elif sFnRef.startswith('pImpl->pfn'):
581 sMember = sFnRef[len('pImpl->') : ];
582 sBaseType = self.analyzeCallToType('pImpl');
583 offBits = sMember.rfind('U') + 1;
584 if sBaseType == 'PCIEMOPBINSIZES': return 'PFNIEMAIMPLBINU' + sMember[offBits:];
585 if sBaseType == 'PCIEMOPUNARYSIZES': return 'PFNIEMAIMPLUNARYU' + sMember[offBits:];
586 if sBaseType == 'PCIEMOPSHIFTSIZES': return 'PFNIEMAIMPLSHIFTU' + sMember[offBits:];
587 if sBaseType == 'PCIEMOPSHIFTDBLSIZES': return 'PFNIEMAIMPLSHIFTDBLU' + sMember[offBits:];
588 if sBaseType == 'PCIEMOPMULDIVSIZES': return 'PFNIEMAIMPLMULDIVU' + sMember[offBits:];
589 if sBaseType == 'PCIEMOPMEDIAF3': return 'PFNIEMAIMPLMEDIAF3U' + sMember[offBits:];
590 if sBaseType == 'PCIEMOPMEDIAOPTF3': return 'PFNIEMAIMPLMEDIAOPTF3U' + sMember[offBits:];
591 if sBaseType == 'PCIEMOPMEDIAOPTF2': return 'PFNIEMAIMPLMEDIAOPTF2U' + sMember[offBits:];
592 if sBaseType == 'PCIEMOPMEDIAOPTF3IMM8': return 'PFNIEMAIMPLMEDIAOPTF3U' + sMember[offBits:] + 'IMM8';
593 if sBaseType == 'PCIEMOPBLENDOP': return 'PFNIEMAIMPLAVXBLENDU' + sMember[offBits:];
594
595 self.raiseProblem('Unknown call reference: %s::%s (%s)' % (sBaseType, sMember, sFnRef,));
596
597 self.raiseProblem('Unknown call reference: %s' % (sFnRef,));
598 return None; # Shut up pylint 2.16.2.
599
600 def analyze8BitGRegStmt(self, oStmt):
601 """
602 Gets the 8-bit general purpose register access details of the given statement.
603 ASSUMES the statement is one accessing an 8-bit GREG.
604 """
605 idxReg = 0;
606 if ( oStmt.sName.find('_FETCH_') > 0
607 or oStmt.sName.find('_REF_') > 0
608 or oStmt.sName.find('_TO_LOCAL') > 0):
609 idxReg = 1;
610
611 sRegRef = oStmt.asParams[idxReg];
612 if sRegRef.startswith('IEM_GET_MODRM_RM') or sRegRef.startswith('IEM_GET_MODRM_REG'):
613 asBits = [sBit.strip() for sBit in sRegRef.replace('(', ',').replace(')', '').split(',')];
614 if len(asBits) != 3 or asBits[1] != 'pVCpu' or (asBits[0] != 'IEM_GET_MODRM_RM' and asBits[0] != 'IEM_GET_MODRM_REG'):
615 self.raiseProblem('Unexpected reference: %s (asBits=%s)' % (sRegRef, asBits));
616 sOrgExpr = asBits[0] + '_EX8(pVCpu, ' + asBits[2] + ')';
617 else:
618 sOrgExpr = '((%s) < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX) ? (%s) : (%s) + 12)' % (sRegRef, sRegRef, sRegRef);
619
620 if sRegRef.find('IEM_GET_MODRM_RM') >= 0: sStdRef = 'bRmRm8Ex';
621 elif sRegRef.find('IEM_GET_MODRM_REG') >= 0: sStdRef = 'bRmReg8Ex';
622 elif sRegRef == 'X86_GREG_xAX': sStdRef = 'bGregXAx8Ex';
623 elif sRegRef == 'X86_GREG_xCX': sStdRef = 'bGregXCx8Ex';
624 elif sRegRef == 'X86_GREG_xSP': sStdRef = 'bGregXSp8Ex';
625 elif sRegRef == 'iFixedReg': sStdRef = 'bFixedReg8Ex';
626 else:
627 self.warning('analyze8BitGRegStmt: sRegRef=%s -> bOther8Ex; %s %s; sOrgExpr=%s'
628 % (sRegRef, oStmt.sName, oStmt.asParams, sOrgExpr,));
629 sStdRef = 'bOther8Ex';
630
631 #print('analyze8BitGRegStmt: %s %s; sRegRef=%s\n -> idxReg=%s sOrgExpr=%s sStdRef=%s'
632 # % (oStmt.sName, oStmt.asParams, sRegRef, idxReg, sOrgExpr, sStdRef));
633 return (idxReg, sOrgExpr, sStdRef);
634
635
636 ## Maps memory related MCs to info for FLAT conversion.
637 ## This is used in 64-bit and flat 32-bit variants to skip the unnecessary
638 ## segmentation checking for every memory access. Only applied to access
639 ## via ES, DS and SS. FS, GS and CS gets the full segmentation threatment,
640 ## the latter (CS) is just to keep things simple (we could safely fetch via
641 ## it, but only in 64-bit mode could we safely write via it, IIRC).
642 kdMemMcToFlatInfo = {
643 'IEM_MC_FETCH_MEM_U8': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8' ),
644 'IEM_MC_FETCH_MEM16_U8': ( 1, 'IEM_MC_FETCH_MEM16_FLAT_U8' ),
645 'IEM_MC_FETCH_MEM32_U8': ( 1, 'IEM_MC_FETCH_MEM32_FLAT_U8' ),
646 'IEM_MC_FETCH_MEM_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16' ),
647 'IEM_MC_FETCH_MEM_U16_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_DISP' ),
648 'IEM_MC_FETCH_MEM_I16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I16' ),
649 'IEM_MC_FETCH_MEM_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32' ),
650 'IEM_MC_FETCH_MEM_U32_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_DISP' ),
651 'IEM_MC_FETCH_MEM_I32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I32' ),
652 'IEM_MC_FETCH_MEM_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64' ),
653 'IEM_MC_FETCH_MEM_U64_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64_DISP' ),
654 'IEM_MC_FETCH_MEM_U64_ALIGN_U128': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64_ALIGN_U128' ),
655 'IEM_MC_FETCH_MEM_I64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I64' ),
656 'IEM_MC_FETCH_MEM_R32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R32' ),
657 'IEM_MC_FETCH_MEM_R64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R64' ),
658 'IEM_MC_FETCH_MEM_R80': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R80' ),
659 'IEM_MC_FETCH_MEM_D80': ( 1, 'IEM_MC_FETCH_MEM_FLAT_D80' ),
660 'IEM_MC_FETCH_MEM_U128': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128' ),
661 'IEM_MC_FETCH_MEM_U128_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128_NO_AC' ),
662 'IEM_MC_FETCH_MEM_U128_ALIGN_SSE': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128_ALIGN_SSE' ),
663 'IEM_MC_FETCH_MEM_XMM': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM' ),
664 'IEM_MC_FETCH_MEM_XMM_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM_NO_AC' ),
665 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM_ALIGN_SSE' ),
666 'IEM_MC_FETCH_MEM_XMM_U32': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_U32' ),
667 'IEM_MC_FETCH_MEM_XMM_U64': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_U64' ),
668 'IEM_MC_FETCH_MEM_U256': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256' ),
669 'IEM_MC_FETCH_MEM_U256_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256_NO_AC' ),
670 'IEM_MC_FETCH_MEM_U256_ALIGN_AVX': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256_ALIGN_AVX' ),
671 'IEM_MC_FETCH_MEM_YMM': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM' ),
672 'IEM_MC_FETCH_MEM_YMM_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM_NO_AC' ),
673 'IEM_MC_FETCH_MEM_YMM_ALIGN_AVX': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM_ALIGN_AVX' ),
674 'IEM_MC_FETCH_MEM_U8_ZX_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U16' ),
675 'IEM_MC_FETCH_MEM_U8_ZX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U32' ),
676 'IEM_MC_FETCH_MEM_U8_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U64' ),
677 'IEM_MC_FETCH_MEM_U16_ZX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_ZX_U32' ),
678 'IEM_MC_FETCH_MEM_U16_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_ZX_U64' ),
679 'IEM_MC_FETCH_MEM_U32_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_ZX_U64' ),
680 'IEM_MC_FETCH_MEM_U8_SX_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U16' ),
681 'IEM_MC_FETCH_MEM_U8_SX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U32' ),
682 'IEM_MC_FETCH_MEM_U8_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U64' ),
683 'IEM_MC_FETCH_MEM_U16_SX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_SX_U32' ),
684 'IEM_MC_FETCH_MEM_U16_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_SX_U64' ),
685 'IEM_MC_FETCH_MEM_U32_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_SX_U64' ),
686 'IEM_MC_STORE_MEM_U8': ( 0, 'IEM_MC_STORE_MEM_FLAT_U8' ),
687 'IEM_MC_STORE_MEM_U16': ( 0, 'IEM_MC_STORE_MEM_FLAT_U16' ),
688 'IEM_MC_STORE_MEM_U32': ( 0, 'IEM_MC_STORE_MEM_FLAT_U32' ),
689 'IEM_MC_STORE_MEM_U64': ( 0, 'IEM_MC_STORE_MEM_FLAT_U64' ),
690 'IEM_MC_STORE_MEM_U8_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U8_CONST' ),
691 'IEM_MC_STORE_MEM_U16_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U16_CONST' ),
692 'IEM_MC_STORE_MEM_U32_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U32_CONST' ),
693 'IEM_MC_STORE_MEM_U64_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U64_CONST' ),
694 'IEM_MC_STORE_MEM_U128': ( 0, 'IEM_MC_STORE_MEM_FLAT_U128' ),
695 'IEM_MC_STORE_MEM_U128_ALIGN_SSE': ( 0, 'IEM_MC_STORE_MEM_FLAT_U128_ALIGN_SSE' ),
696 'IEM_MC_STORE_MEM_U256': ( 0, 'IEM_MC_STORE_MEM_FLAT_U256' ),
697 'IEM_MC_STORE_MEM_U256_ALIGN_AVX': ( 0, 'IEM_MC_STORE_MEM_FLAT_U256_ALIGN_AVX' ),
698 'IEM_MC_MEM_MAP': ( 2, 'IEM_MC_MEM_FLAT_MAP' ),
699 'IEM_MC_MEM_MAP_U8_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_RW' ),
700 'IEM_MC_MEM_MAP_U8_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_RO' ),
701 'IEM_MC_MEM_MAP_U8_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_WO' ),
702 'IEM_MC_MEM_MAP_U16_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_RW' ),
703 'IEM_MC_MEM_MAP_U16_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_RO' ),
704 'IEM_MC_MEM_MAP_U16_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_WO' ),
705 'IEM_MC_MEM_MAP_U32_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_RW' ),
706 'IEM_MC_MEM_MAP_U32_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_RO' ),
707 'IEM_MC_MEM_MAP_U32_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_WO' ),
708 'IEM_MC_MEM_MAP_U64_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_RW' ),
709 'IEM_MC_MEM_MAP_U64_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_RO' ),
710 'IEM_MC_MEM_MAP_U64_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_WO' ),
711 'IEM_MC_MEM_MAP_EX': ( 3, 'IEM_MC_MEM_FLAT_MAP_EX' ),
712 };
713
714 kdMemMcToFlatInfoStack = {
715 'IEM_MC_PUSH_U16': ( 'IEM_MC_FLAT32_PUSH_U16', 'IEM_MC_FLAT64_PUSH_U16', ),
716 'IEM_MC_PUSH_U32': ( 'IEM_MC_FLAT32_PUSH_U32', 'IEM_MC_PUSH_U32', ),
717 'IEM_MC_PUSH_U64': ( 'IEM_MC_PUSH_U64', 'IEM_MC_FLAT64_PUSH_U64', ),
718 'IEM_MC_PUSH_U32_SREG': ( 'IEM_MC_FLAT32_PUSH_U32_SREG', 'IEM_MC_PUSH_U32_SREG' ),
719 'IEM_MC_POP_U16': ( 'IEM_MC_FLAT32_POP_U16', 'IEM_MC_FLAT64_POP_U16', ),
720 'IEM_MC_POP_U32': ( 'IEM_MC_FLAT32_POP_U32', 'IEM_MC_POP_U32', ),
721 'IEM_MC_POP_U64': ( 'IEM_MC_POP_U64', 'IEM_MC_FLAT64_POP_U64', ),
722 };
723
724 kdThreadedCalcRmEffAddrMcByVariation = {
725 ksVariation_16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
726 ksVariation_16f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
727 ksVariation_16_Pre386: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
728 ksVariation_16f_Pre386: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
729 ksVariation_32_Addr16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
730 ksVariation_32f_Addr16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
731 ksVariation_16_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
732 ksVariation_16f_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
733 ksVariation_32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
734 ksVariation_32f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
735 ksVariation_32_Flat: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
736 ksVariation_32f_Flat: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
737 ksVariation_64: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64',
738 ksVariation_64f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64',
739 ksVariation_64_FsGs: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_FSGS',
740 ksVariation_64f_FsGs: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_FSGS',
741 ksVariation_64_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_ADDR32', ## @todo How did this work again...
742 ksVariation_64f_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_ADDR32',
743 };
744
745 def analyzeMorphStmtForThreaded(self, aoStmts, iParamRef = 0):
746 """
747 Transforms (copy) the statements into those for the threaded function.
748
749 Returns list/tree of statements (aoStmts is not modified) and the new
750 iParamRef value.
751 """
752 #
753 # We'll be traversing aoParamRefs in parallel to the statements, so we
754 # must match the traversal in analyzeFindThreadedParamRefs exactly.
755 #
756 #print('McBlock at %s:%s' % (os.path.split(self.oMcBlock.sSrcFile)[1], self.oMcBlock.iBeginLine,));
757 aoThreadedStmts = [];
758 for oStmt in aoStmts:
759 # Skip C++ statements that is purely related to decoding.
760 if not oStmt.isCppStmt() or not oStmt.fDecode:
761 # Copy the statement. Make a deep copy to make sure we've got our own
762 # copies of all instance variables, even if a bit overkill at the moment.
763 oNewStmt = copy.deepcopy(oStmt);
764 aoThreadedStmts.append(oNewStmt);
765 #print('oNewStmt %s %s' % (oNewStmt.sName, len(oNewStmt.asParams),));
766
767 # If the statement has parameter references, process the relevant parameters.
768 # We grab the references relevant to this statement and apply them in reserve order.
769 if iParamRef < len(self.aoParamRefs) and self.aoParamRefs[iParamRef].oStmt == oStmt:
770 iParamRefFirst = iParamRef;
771 while True:
772 iParamRef += 1;
773 if iParamRef >= len(self.aoParamRefs) or self.aoParamRefs[iParamRef].oStmt != oStmt:
774 break;
775
776 #print('iParamRefFirst=%s iParamRef=%s' % (iParamRefFirst, iParamRef));
777 for iCurRef in range(iParamRef - 1, iParamRefFirst - 1, -1):
778 oCurRef = self.aoParamRefs[iCurRef];
779 if oCurRef.iParam is not None:
780 assert oCurRef.oStmt == oStmt;
781 #print('iCurRef=%s iParam=%s sOrgRef=%s' % (iCurRef, oCurRef.iParam, oCurRef.sOrgRef));
782 sSrcParam = oNewStmt.asParams[oCurRef.iParam];
783 assert ( sSrcParam[oCurRef.offParam : oCurRef.offParam + len(oCurRef.sOrgRef)] == oCurRef.sOrgRef
784 or oCurRef.fCustomRef), \
785 'offParam=%s sOrgRef=%s iParam=%s oStmt.sName=%s sSrcParam=%s<eos>' \
786 % (oCurRef.offParam, oCurRef.sOrgRef, oCurRef.iParam, oStmt.sName, sSrcParam);
787 oNewStmt.asParams[oCurRef.iParam] = sSrcParam[0 : oCurRef.offParam] \
788 + oCurRef.sNewName \
789 + sSrcParam[oCurRef.offParam + len(oCurRef.sOrgRef) : ];
790
791 # Morph IEM_MC_CALC_RM_EFF_ADDR into IEM_MC_CALC_RM_EFF_ADDR_THREADED ...
792 if oNewStmt.sName == 'IEM_MC_CALC_RM_EFF_ADDR':
793 oNewStmt.sName = self.kdThreadedCalcRmEffAddrMcByVariation[self.sVariation];
794 assert len(oNewStmt.asParams) == 3;
795
796 if self.sVariation in self.kdVariationsWithFlatAddr16:
797 oNewStmt.asParams = [
798 oNewStmt.asParams[0], oNewStmt.asParams[1], self.dParamRefs['u16Disp'][0].sNewName,
799 ];
800 else:
801 sSibAndMore = self.dParamRefs['bSib'][0].sNewName; # Merge bSib and 2nd part of cbImmAndRspOffset.
802 if oStmt.asParams[2] not in ('0', '1', '2', '4'):
803 sSibAndMore = '(%s) | ((%s) & 0x0f00)' % (self.dParamRefs['bSib'][0].sNewName, oStmt.asParams[2]);
804
805 if self.sVariation in self.kdVariationsWithFlatAddr32No64:
806 oNewStmt.asParams = [
807 oNewStmt.asParams[0], oNewStmt.asParams[1], sSibAndMore, self.dParamRefs['u32Disp'][0].sNewName,
808 ];
809 else:
810 oNewStmt.asParams = [
811 oNewStmt.asParams[0], self.dParamRefs['bRmEx'][0].sNewName, sSibAndMore,
812 self.dParamRefs['u32Disp'][0].sNewName, self.dParamRefs['cbInstr'][0].sNewName,
813 ];
814 # ... and IEM_MC_ADVANCE_RIP_AND_FINISH into *_THREADED_PCxx[_WITH_FLAGS] ...
815 elif oNewStmt.sName in ('IEM_MC_ADVANCE_RIP_AND_FINISH', 'IEM_MC_REL_JMP_S8_AND_FINISH',
816 'IEM_MC_REL_JMP_S16_AND_FINISH', 'IEM_MC_REL_JMP_S32_AND_FINISH'):
817 oNewStmt.asParams.append(self.dParamRefs['cbInstr'][0].sNewName);
818 if ( oNewStmt.sName in ('IEM_MC_REL_JMP_S8_AND_FINISH', )
819 and self.sVariation not in (self.ksVariation_16_Pre386, self.ksVariation_16f_Pre386,)):
820 oNewStmt.asParams.append(self.dParamRefs['pVCpu->iem.s.enmEffOpSize'][0].sNewName);
821 oNewStmt.sName += '_THREADED';
822 if self.sVariation in (self.ksVariation_64, self.ksVariation_64_FsGs, self.ksVariation_64_Addr32):
823 oNewStmt.sName += '_PC64';
824 elif self.sVariation in (self.ksVariation_64f, self.ksVariation_64f_FsGs, self.ksVariation_64f_Addr32):
825 oNewStmt.sName += '_PC64_WITH_FLAGS';
826 elif self.sVariation == self.ksVariation_16_Pre386:
827 oNewStmt.sName += '_PC16';
828 elif self.sVariation == self.ksVariation_16f_Pre386:
829 oNewStmt.sName += '_PC16_WITH_FLAGS';
830 elif self.sVariation not in self.kdVariationsWithEflagsCheckingAndClearing:
831 assert self.sVariation != self.ksVariation_Default;
832 oNewStmt.sName += '_PC32';
833 else:
834 oNewStmt.sName += '_PC32_WITH_FLAGS';
835
836 # ... and IEM_MC_*_GREG_U8 into *_THREADED w/ reworked index taking REX into account
837 elif oNewStmt.sName.startswith('IEM_MC_') and oNewStmt.sName.find('_GREG_U8') > 0:
838 (idxReg, _, sStdRef) = self.analyze8BitGRegStmt(oStmt); # Don't use oNewStmt as it has been modified!
839 oNewStmt.asParams[idxReg] = self.dParamRefs[sStdRef][0].sNewName;
840 oNewStmt.sName += '_THREADED';
841
842 # ... and IEM_MC_CALL_CIMPL_[0-5] and IEM_MC_DEFER_TO_CIMPL_[0-5]_RET into *_THREADED ...
843 elif oNewStmt.sName.startswith('IEM_MC_CALL_CIMPL_') or oNewStmt.sName.startswith('IEM_MC_DEFER_TO_CIMPL_'):
844 oNewStmt.sName += '_THREADED';
845 oNewStmt.asParams.insert(0, self.dParamRefs['cbInstr'][0].sNewName);
846
847 # ... and in FLAT modes we must morph memory access into FLAT accesses ...
848 elif ( self.sVariation in self.kdVariationsWithFlatAddress
849 and ( oNewStmt.sName.startswith('IEM_MC_FETCH_MEM')
850 or (oNewStmt.sName.startswith('IEM_MC_STORE_MEM_') and oNewStmt.sName.find('_BY_REF') < 0)
851 or oNewStmt.sName.startswith('IEM_MC_MEM_MAP') )):
852 idxEffSeg = self.kdMemMcToFlatInfo[oNewStmt.sName][0];
853 if idxEffSeg != -1:
854 if ( oNewStmt.asParams[idxEffSeg].find('iEffSeg') < 0
855 and oNewStmt.asParams[idxEffSeg] not in ('X86_SREG_ES', ) ):
856 self.raiseProblem('Expected iEffSeg as param #%d to %s: %s'
857 % (idxEffSeg + 1, oNewStmt.sName, oNewStmt.asParams[idxEffSeg],));
858 oNewStmt.asParams.pop(idxEffSeg);
859 oNewStmt.sName = self.kdMemMcToFlatInfo[oNewStmt.sName][1];
860
861 # ... PUSH and POP also needs flat variants, but these differ a little.
862 elif ( self.sVariation in self.kdVariationsWithFlatAddress
863 and ( (oNewStmt.sName.startswith('IEM_MC_PUSH') and oNewStmt.sName.find('_FPU') < 0)
864 or oNewStmt.sName.startswith('IEM_MC_POP'))):
865 oNewStmt.sName = self.kdMemMcToFlatInfoStack[oNewStmt.sName][int(self.sVariation in (self.ksVariation_64,
866 self.ksVariation_64f,))];
867
868
869 # Process branches of conditionals recursively.
870 if isinstance(oStmt, iai.McStmtCond):
871 (oNewStmt.aoIfBranch, iParamRef) = self.analyzeMorphStmtForThreaded(oStmt.aoIfBranch, iParamRef);
872 if oStmt.aoElseBranch:
873 (oNewStmt.aoElseBranch, iParamRef) = self.analyzeMorphStmtForThreaded(oStmt.aoElseBranch, iParamRef);
874
875 return (aoThreadedStmts, iParamRef);
876
877
878 def analyzeConsolidateThreadedParamRefs(self):
879 """
880 Consolidate threaded function parameter references into a dictionary
881 with lists of the references to each variable/field.
882 """
883 # Gather unique parameters.
884 self.dParamRefs = {};
885 for oRef in self.aoParamRefs:
886 if oRef.sStdRef not in self.dParamRefs:
887 self.dParamRefs[oRef.sStdRef] = [oRef,];
888 else:
889 self.dParamRefs[oRef.sStdRef].append(oRef);
890
891 # Generate names for them for use in the threaded function.
892 dParamNames = {};
893 for sName, aoRefs in self.dParamRefs.items():
894 # Morph the reference expression into a name.
895 if sName.startswith('IEM_GET_MODRM_REG'): sName = 'bModRmRegP';
896 elif sName.startswith('IEM_GET_MODRM_RM'): sName = 'bModRmRmP';
897 elif sName.startswith('IEM_GET_MODRM_REG_8'): sName = 'bModRmReg8P';
898 elif sName.startswith('IEM_GET_MODRM_RM_8'): sName = 'bModRmRm8P';
899 elif sName.startswith('IEM_GET_EFFECTIVE_VVVV'): sName = 'bEffVvvvP';
900 elif sName.find('.') >= 0 or sName.find('->') >= 0:
901 sName = sName[max(sName.rfind('.'), sName.rfind('>')) + 1 : ] + 'P';
902 else:
903 sName += 'P';
904
905 # Ensure it's unique.
906 if sName in dParamNames:
907 for i in range(10):
908 if sName + str(i) not in dParamNames:
909 sName += str(i);
910 break;
911 dParamNames[sName] = True;
912
913 # Update all the references.
914 for oRef in aoRefs:
915 oRef.sNewName = sName;
916
917 # Organize them by size too for the purpose of optimize them.
918 dBySize = {} # type: Dict[str, str]
919 for sStdRef, aoRefs in self.dParamRefs.items():
920 if aoRefs[0].sType[0] != 'P':
921 cBits = g_kdTypeInfo[aoRefs[0].sType][0];
922 assert(cBits <= 64);
923 else:
924 cBits = 64;
925
926 if cBits not in dBySize:
927 dBySize[cBits] = [sStdRef,]
928 else:
929 dBySize[cBits].append(sStdRef);
930
931 # Pack the parameters as best as we can, starting with the largest ones
932 # and ASSUMING a 64-bit parameter size.
933 self.cMinParams = 0;
934 offNewParam = 0;
935 for cBits in sorted(dBySize.keys(), reverse = True):
936 for sStdRef in dBySize[cBits]:
937 if offNewParam == 0 or offNewParam + cBits > 64:
938 self.cMinParams += 1;
939 offNewParam = cBits;
940 else:
941 offNewParam += cBits;
942 assert(offNewParam <= 64);
943
944 for oRef in self.dParamRefs[sStdRef]:
945 oRef.iNewParam = self.cMinParams - 1;
946 oRef.offNewParam = offNewParam - cBits;
947
948 # Currently there are a few that requires 4 parameters, list these so we can figure out why:
949 if self.cMinParams >= 4:
950 print('debug: cMinParams=%s cRawParams=%s - %s:%d'
951 % (self.cMinParams, len(self.dParamRefs), self.oParent.oMcBlock.sSrcFile, self.oParent.oMcBlock.iBeginLine,));
952
953 return True;
954
955 ksHexDigits = '0123456789abcdefABCDEF';
956
957 def analyzeFindThreadedParamRefs(self, aoStmts): # pylint: disable=too-many-statements
958 """
959 Scans the statements for things that have to passed on to the threaded
960 function (populates self.aoParamRefs).
961 """
962 for oStmt in aoStmts:
963 # Some statements we can skip alltogether.
964 if isinstance(oStmt, iai.McCppPreProc):
965 continue;
966 if oStmt.isCppStmt() and oStmt.fDecode:
967 continue;
968 if oStmt.sName in ('IEM_MC_BEGIN',):
969 continue;
970
971 if isinstance(oStmt, iai.McStmtVar):
972 if oStmt.sValue is None:
973 continue;
974 aiSkipParams = { 0: True, 1: True, 3: True };
975 else:
976 aiSkipParams = {};
977
978 # Several statements have implicit parameters and some have different parameters.
979 if oStmt.sName in ('IEM_MC_ADVANCE_RIP_AND_FINISH', 'IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_REL_JMP_S16_AND_FINISH',
980 'IEM_MC_REL_JMP_S32_AND_FINISH', 'IEM_MC_CALL_CIMPL_0', 'IEM_MC_CALL_CIMPL_1',
981 'IEM_MC_CALL_CIMPL_2', 'IEM_MC_CALL_CIMPL_3', 'IEM_MC_CALL_CIMPL_4', 'IEM_MC_CALL_CIMPL_5',
982 'IEM_MC_DEFER_TO_CIMPL_0_RET', 'IEM_MC_DEFER_TO_CIMPL_1_RET', 'IEM_MC_DEFER_TO_CIMPL_2_RET',
983 'IEM_MC_DEFER_TO_CIMPL_3_RET', 'IEM_MC_DEFER_TO_CIMPL_4_RET', 'IEM_MC_DEFER_TO_CIMPL_5_RET', ):
984 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_INSTR_LEN(pVCpu)', 'uint4_t', oStmt, sStdRef = 'cbInstr'));
985
986 if ( oStmt.sName in ('IEM_MC_REL_JMP_S8_AND_FINISH',)
987 and self.sVariation not in (self.ksVariation_16_Pre386, self.ksVariation_16f_Pre386,)):
988 self.aoParamRefs.append(ThreadedParamRef('pVCpu->iem.s.enmEffOpSize', 'IEMMODE', oStmt));
989
990 if oStmt.sName == 'IEM_MC_CALC_RM_EFF_ADDR':
991 # This is being pretty presumptive about bRm always being the RM byte...
992 assert len(oStmt.asParams) == 3;
993 assert oStmt.asParams[1] == 'bRm';
994
995 if self.sVariation in self.kdVariationsWithFlatAddr16:
996 self.aoParamRefs.append(ThreadedParamRef('bRm', 'uint8_t', oStmt));
997 self.aoParamRefs.append(ThreadedParamRef('(uint16_t)uEffAddrInfo' ,
998 'uint16_t', oStmt, sStdRef = 'u16Disp'));
999 elif self.sVariation in self.kdVariationsWithFlatAddr32No64:
1000 self.aoParamRefs.append(ThreadedParamRef('bRm', 'uint8_t', oStmt));
1001 self.aoParamRefs.append(ThreadedParamRef('(uint8_t)(uEffAddrInfo >> 32)',
1002 'uint8_t', oStmt, sStdRef = 'bSib'));
1003 self.aoParamRefs.append(ThreadedParamRef('(uint32_t)uEffAddrInfo',
1004 'uint32_t', oStmt, sStdRef = 'u32Disp'));
1005 else:
1006 assert self.sVariation in self.kasVariationsWithAddressOnly64;
1007 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_MODRM_EX(pVCpu, bRm)',
1008 'uint8_t', oStmt, sStdRef = 'bRmEx'));
1009 self.aoParamRefs.append(ThreadedParamRef('(uint8_t)(uEffAddrInfo >> 32)',
1010 'uint8_t', oStmt, sStdRef = 'bSib'));
1011 self.aoParamRefs.append(ThreadedParamRef('(uint32_t)uEffAddrInfo',
1012 'uint32_t', oStmt, sStdRef = 'u32Disp'));
1013 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_INSTR_LEN(pVCpu)',
1014 'uint4_t', oStmt, sStdRef = 'cbInstr'));
1015 aiSkipParams[1] = True; # Skip the bRm parameter as it is being replaced by bRmEx.
1016
1017 # 8-bit register accesses needs to have their index argument reworked to take REX into account.
1018 if oStmt.sName.startswith('IEM_MC_') and oStmt.sName.find('_GREG_U8') > 0:
1019 (idxReg, sOrgRef, sStdRef) = self.analyze8BitGRegStmt(oStmt);
1020 self.aoParamRefs.append(ThreadedParamRef(sOrgRef, 'uint16_t', oStmt, idxReg, sStdRef = sStdRef));
1021 aiSkipParams[idxReg] = True; # Skip the parameter below.
1022
1023 # If in flat mode variation, ignore the effective segment parameter to memory MCs.
1024 if ( self.sVariation in self.kdVariationsWithFlatAddress
1025 and oStmt.sName in self.kdMemMcToFlatInfo
1026 and self.kdMemMcToFlatInfo[oStmt.sName][0] != -1):
1027 aiSkipParams[self.kdMemMcToFlatInfo[oStmt.sName][0]] = True;
1028
1029 # Inspect the target of calls to see if we need to pass down a
1030 # function pointer or function table pointer for it to work.
1031 if isinstance(oStmt, iai.McStmtCall):
1032 if oStmt.sFn[0] == 'p':
1033 self.aoParamRefs.append(ThreadedParamRef(oStmt.sFn, self.analyzeCallToType(oStmt.sFn), oStmt, oStmt.idxFn));
1034 elif ( oStmt.sFn[0] != 'i'
1035 and not oStmt.sFn.startswith('IEMTARGETCPU_EFL_BEHAVIOR_SELECT')
1036 and not oStmt.sFn.startswith('IEM_SELECT_HOST_OR_FALLBACK') ):
1037 self.raiseProblem('Bogus function name in %s: %s' % (oStmt.sName, oStmt.sFn,));
1038 aiSkipParams[oStmt.idxFn] = True;
1039
1040 # Skip the hint parameter (first) for IEM_MC_CALL_CIMPL_X.
1041 if oStmt.sName.startswith('IEM_MC_CALL_CIMPL_'):
1042 assert oStmt.idxFn == 1;
1043 aiSkipParams[0] = True;
1044
1045
1046 # Check all the parameters for bogus references.
1047 for iParam, sParam in enumerate(oStmt.asParams):
1048 if iParam not in aiSkipParams and sParam not in self.oParent.dVariables:
1049 # The parameter may contain a C expression, so we have to try
1050 # extract the relevant bits, i.e. variables and fields while
1051 # ignoring operators and parentheses.
1052 offParam = 0;
1053 while offParam < len(sParam):
1054 # Is it the start of an C identifier? If so, find the end, but don't stop on field separators (->, .).
1055 ch = sParam[offParam];
1056 if ch.isalpha() or ch == '_':
1057 offStart = offParam;
1058 offParam += 1;
1059 while offParam < len(sParam):
1060 ch = sParam[offParam];
1061 if not ch.isalnum() and ch != '_' and ch != '.':
1062 if ch != '-' or sParam[offParam + 1] != '>':
1063 # Special hack for the 'CTX_SUFF(pVM)' bit in pVCpu->CTX_SUFF(pVM)->xxxx:
1064 if ( ch == '('
1065 and sParam[offStart : offParam + len('(pVM)->')] == 'pVCpu->CTX_SUFF(pVM)->'):
1066 offParam += len('(pVM)->') - 1;
1067 else:
1068 break;
1069 offParam += 1;
1070 offParam += 1;
1071 sRef = sParam[offStart : offParam];
1072
1073 # For register references, we pass the full register indexes instead as macros
1074 # like IEM_GET_MODRM_REG implicitly references pVCpu->iem.s.uRexReg and the
1075 # threaded function will be more efficient if we just pass the register index
1076 # as a 4-bit param.
1077 if ( sRef.startswith('IEM_GET_MODRM')
1078 or sRef.startswith('IEM_GET_EFFECTIVE_VVVV') ):
1079 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1080 if sParam[offParam] != '(':
1081 self.raiseProblem('Expected "(" following %s in "%s"' % (sRef, oStmt.renderCode(),));
1082 (asMacroParams, offCloseParam) = iai.McBlock.extractParams(sParam, offParam);
1083 if asMacroParams is None:
1084 self.raiseProblem('Unable to find ")" for %s in "%s"' % (sRef, oStmt.renderCode(),));
1085 offParam = offCloseParam + 1;
1086 self.aoParamRefs.append(ThreadedParamRef(sParam[offStart : offParam], 'uint8_t',
1087 oStmt, iParam, offStart));
1088
1089 # We can skip known variables.
1090 elif sRef in self.oParent.dVariables:
1091 pass;
1092
1093 # Skip certain macro invocations.
1094 elif sRef in ('IEM_GET_HOST_CPU_FEATURES',
1095 'IEM_GET_GUEST_CPU_FEATURES',
1096 'IEM_IS_GUEST_CPU_AMD',
1097 'IEM_IS_16BIT_CODE',
1098 'IEM_IS_32BIT_CODE',
1099 'IEM_IS_64BIT_CODE',
1100 ):
1101 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1102 if sParam[offParam] != '(':
1103 self.raiseProblem('Expected "(" following %s in "%s"' % (sRef, oStmt.renderCode(),));
1104 (asMacroParams, offCloseParam) = iai.McBlock.extractParams(sParam, offParam);
1105 if asMacroParams is None:
1106 self.raiseProblem('Unable to find ")" for %s in "%s"' % (sRef, oStmt.renderCode(),));
1107 offParam = offCloseParam + 1;
1108
1109 # Skip any dereference following it, unless it's a predicate like IEM_IS_GUEST_CPU_AMD.
1110 if sRef not in ('IEM_IS_GUEST_CPU_AMD',
1111 'IEM_IS_16BIT_CODE',
1112 'IEM_IS_32BIT_CODE',
1113 'IEM_IS_64BIT_CODE',
1114 ):
1115 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1116 if offParam + 2 <= len(sParam) and sParam[offParam : offParam + 2] == '->':
1117 offParam = iai.McBlock.skipSpacesAt(sParam, offParam + 2, len(sParam));
1118 while offParam < len(sParam) and (sParam[offParam].isalnum() or sParam[offParam] in '_.'):
1119 offParam += 1;
1120
1121 # Skip constants, globals, types (casts), sizeof and macros.
1122 elif ( sRef.startswith('IEM_OP_PRF_')
1123 or sRef.startswith('IEM_ACCESS_')
1124 or sRef.startswith('IEMINT_')
1125 or sRef.startswith('X86_GREG_')
1126 or sRef.startswith('X86_SREG_')
1127 or sRef.startswith('X86_EFL_')
1128 or sRef.startswith('X86_FSW_')
1129 or sRef.startswith('X86_FCW_')
1130 or sRef.startswith('X86_XCPT_')
1131 or sRef.startswith('IEMMODE_')
1132 or sRef.startswith('IEM_F_')
1133 or sRef.startswith('IEM_CIMPL_F_')
1134 or sRef.startswith('g_')
1135 or sRef.startswith('iemAImpl_')
1136 or sRef.startswith('kIemNativeGstReg_')
1137 or sRef in ( 'int8_t', 'int16_t', 'int32_t', 'int64_t',
1138 'INT8_C', 'INT16_C', 'INT32_C', 'INT64_C',
1139 'UINT8_C', 'UINT16_C', 'UINT32_C', 'UINT64_C',
1140 'UINT8_MAX', 'UINT16_MAX', 'UINT32_MAX', 'UINT64_MAX',
1141 'INT8_MAX', 'INT16_MAX', 'INT32_MAX', 'INT64_MAX',
1142 'INT8_MIN', 'INT16_MIN', 'INT32_MIN', 'INT64_MIN',
1143 'sizeof', 'NOREF', 'RT_NOREF', 'IEMMODE_64BIT',
1144 'RT_BIT_32', 'RT_BIT_64', 'true', 'false',
1145 'NIL_RTGCPTR',) ):
1146 pass;
1147
1148 # Skip certain macro invocations.
1149 # Any variable (non-field) and decoder fields in IEMCPU will need to be parameterized.
1150 elif ( ( '.' not in sRef
1151 and '-' not in sRef
1152 and sRef not in ('pVCpu', ) )
1153 or iai.McBlock.koReIemDecoderVars.search(sRef) is not None):
1154 self.aoParamRefs.append(ThreadedParamRef(sRef, self.analyzeReferenceToType(sRef),
1155 oStmt, iParam, offStart));
1156 # Number.
1157 elif ch.isdigit():
1158 if ( ch == '0'
1159 and offParam + 2 <= len(sParam)
1160 and sParam[offParam + 1] in 'xX'
1161 and sParam[offParam + 2] in self.ksHexDigits ):
1162 offParam += 2;
1163 while offParam < len(sParam) and sParam[offParam] in self.ksHexDigits:
1164 offParam += 1;
1165 else:
1166 while offParam < len(sParam) and sParam[offParam].isdigit():
1167 offParam += 1;
1168 # Comment?
1169 elif ( ch == '/'
1170 and offParam + 4 <= len(sParam)
1171 and sParam[offParam + 1] == '*'):
1172 offParam += 2;
1173 offNext = sParam.find('*/', offParam);
1174 if offNext < offParam:
1175 self.raiseProblem('Unable to find "*/" in "%s" ("%s")' % (sRef, oStmt.renderCode(),));
1176 offParam = offNext + 2;
1177 # Whatever else.
1178 else:
1179 offParam += 1;
1180
1181 # Traverse the branches of conditionals.
1182 if isinstance(oStmt, iai.McStmtCond):
1183 self.analyzeFindThreadedParamRefs(oStmt.aoIfBranch);
1184 self.analyzeFindThreadedParamRefs(oStmt.aoElseBranch);
1185 return True;
1186
1187 def analyzeVariation(self, aoStmts):
1188 """
1189 2nd part of the analysis, done on each variation.
1190
1191 The variations may differ in parameter requirements and will end up with
1192 slightly different MC sequences. Thus this is done on each individually.
1193
1194 Returns dummy True - raises exception on trouble.
1195 """
1196 # Now scan the code for variables and field references that needs to
1197 # be passed to the threaded function because they are related to the
1198 # instruction decoding.
1199 self.analyzeFindThreadedParamRefs(aoStmts);
1200 self.analyzeConsolidateThreadedParamRefs();
1201
1202 # Morph the statement stream for the block into what we'll be using in the threaded function.
1203 (self.aoStmtsForThreadedFunction, iParamRef) = self.analyzeMorphStmtForThreaded(aoStmts);
1204 if iParamRef != len(self.aoParamRefs):
1205 raise Exception('iParamRef=%s, expected %s!' % (iParamRef, len(self.aoParamRefs),));
1206
1207 return True;
1208
1209 def emitThreadedCallStmts(self, cchIndent, sCallVarNm = None):
1210 """
1211 Produces generic C++ statments that emits a call to the thread function
1212 variation and any subsequent checks that may be necessary after that.
1213
1214 The sCallVarNm is for emitting
1215 """
1216 aoStmts = [
1217 iai.McCppCall('IEM_MC2_BEGIN_EMIT_CALLS',
1218 ['1' if 'IEM_CIMPL_F_CHECK_IRQ_BEFORE' in self.oParent.dsCImplFlags else '0'],
1219 cchIndent = cchIndent), # Scope and a hook for various stuff.
1220 ];
1221
1222 # The call to the threaded function.
1223 asCallArgs = [ self.getIndexName() if not sCallVarNm else sCallVarNm, ];
1224 for iParam in range(self.cMinParams):
1225 asFrags = [];
1226 for aoRefs in self.dParamRefs.values():
1227 oRef = aoRefs[0];
1228 if oRef.iNewParam == iParam:
1229 sCast = '(uint64_t)'
1230 if oRef.sType in ('int8_t', 'int16_t', 'int32_t'): # Make sure these doesn't get sign-extended.
1231 sCast = '(uint64_t)(u' + oRef.sType + ')';
1232 if oRef.offNewParam == 0:
1233 asFrags.append(sCast + '(' + oRef.sOrgRef + ')');
1234 else:
1235 asFrags.append('(%s(%s) << %s)' % (sCast, oRef.sOrgRef, oRef.offNewParam));
1236 assert asFrags;
1237 asCallArgs.append(' | '.join(asFrags));
1238
1239 aoStmts.append(iai.McCppCall('IEM_MC2_EMIT_CALL_%s' % (len(asCallArgs) - 1,), asCallArgs, cchIndent = cchIndent));
1240
1241 # For CIMPL stuff, we need to consult the associated IEM_CIMPL_F_XXX
1242 # mask and maybe emit additional checks.
1243 if ( 'IEM_CIMPL_F_MODE' in self.oParent.dsCImplFlags
1244 or 'IEM_CIMPL_F_XCPT' in self.oParent.dsCImplFlags
1245 or 'IEM_CIMPL_F_VMEXIT' in self.oParent.dsCImplFlags):
1246 aoStmts.append(iai.McCppCall('IEM_MC2_EMIT_CALL_1', ( 'kIemThreadedFunc_BltIn_CheckMode', 'pVCpu->iem.s.fExec', ),
1247 cchIndent = cchIndent));
1248
1249 sCImplFlags = ' | '.join(self.oParent.dsCImplFlags.keys());
1250 if not sCImplFlags:
1251 sCImplFlags = '0'
1252 aoStmts.append(iai.McCppCall('IEM_MC2_END_EMIT_CALLS', ( sCImplFlags, ), cchIndent = cchIndent)); # For closing the scope.
1253
1254 # Emit fEndTb = true or fTbBranched = true if any of the CIMPL flags
1255 # indicates we should do so.
1256 # Note! iemThreadedRecompilerMcDeferToCImpl0 duplicates work done here.
1257 asEndTbFlags = [];
1258 asTbBranchedFlags = [];
1259 for sFlag in self.oParent.dsCImplFlags:
1260 if self.kdCImplFlags[sFlag] is True:
1261 asEndTbFlags.append(sFlag);
1262 elif sFlag.startswith('IEM_CIMPL_F_BRANCH_'):
1263 asTbBranchedFlags.append(sFlag);
1264 if asTbBranchedFlags:
1265 aoStmts.append(iai.McCppGeneric('iemThreadedSetBranched(pVCpu, %s);'
1266 % ((' | '.join(asTbBranchedFlags)).replace('IEM_CIMPL_F_BRANCH', 'IEMBRANCHED_F'),),
1267 cchIndent = cchIndent)); # Inline fn saves ~2 seconds for gcc 13/dbg (1m13s vs 1m15s).
1268 if asEndTbFlags:
1269 aoStmts.append(iai.McCppGeneric('pVCpu->iem.s.fEndTb = true; /* %s */' % (','.join(asEndTbFlags),),
1270 cchIndent = cchIndent));
1271
1272 if 'IEM_CIMPL_F_CHECK_IRQ_AFTER' in self.oParent.dsCImplFlags:
1273 aoStmts.append(iai.McCppGeneric('pVCpu->iem.s.cInstrTillIrqCheck = 0;', cchIndent = cchIndent));
1274
1275 return aoStmts;
1276
1277
1278class ThreadedFunction(object):
1279 """
1280 A threaded function.
1281 """
1282
1283 def __init__(self, oMcBlock: iai.McBlock) -> None:
1284 self.oMcBlock = oMcBlock # type: iai.McBlock
1285 # The remaining fields are only useful after analyze() has been called:
1286 ## Variations for this block. There is at least one.
1287 self.aoVariations = [] # type: List[ThreadedFunctionVariation]
1288 ## Variation dictionary containing the same as aoVariations.
1289 self.dVariations = {} # type: Dict[str, ThreadedFunctionVariation]
1290 ## Dictionary of local variables (IEM_MC_LOCAL[_CONST]) and call arguments (IEM_MC_ARG*).
1291 self.dVariables = {} # type: Dict[str, iai.McStmtVar]
1292 ## Dictionary with any IEM_CIMPL_F_XXX flags explicitly advertised in the code block
1293 ## and those determined by analyzeCodeOperation().
1294 self.dsCImplFlags = {} # type: Dict[str, bool]
1295
1296 @staticmethod
1297 def dummyInstance():
1298 """ Gets a dummy instance. """
1299 return ThreadedFunction(iai.McBlock('null', 999999999, 999999999,
1300 iai.DecoderFunction('null', 999999999, 'nil', ('','')), 999999999));
1301
1302 def hasWithFlagsCheckingAndClearingVariation(self):
1303 """
1304 Check if there is one or more with flags checking and clearing
1305 variations for this threaded function.
1306 """
1307 for sVarWithFlags in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing:
1308 if sVarWithFlags in self.dVariations:
1309 return True;
1310 return False;
1311
1312 #
1313 # Analysis and code morphing.
1314 #
1315
1316 def raiseProblem(self, sMessage):
1317 """ Raises a problem. """
1318 raise Exception('%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, ));
1319
1320 def warning(self, sMessage):
1321 """ Emits a warning. """
1322 print('%s:%s: warning: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, ));
1323
1324 def analyzeFindVariablesAndCallArgs(self, aoStmts: List[iai.McStmt]) -> bool:
1325 """ Scans the statements for MC variables and call arguments. """
1326 for oStmt in aoStmts:
1327 if isinstance(oStmt, iai.McStmtVar):
1328 if oStmt.sVarName in self.dVariables:
1329 raise Exception('Variable %s is defined more than once!' % (oStmt.sVarName,));
1330 self.dVariables[oStmt.sVarName] = oStmt.sVarName;
1331
1332 # There shouldn't be any variables or arguments declared inside if/
1333 # else blocks, but scan them too to be on the safe side.
1334 if isinstance(oStmt, iai.McStmtCond):
1335 cBefore = len(self.dVariables);
1336 self.analyzeFindVariablesAndCallArgs(oStmt.aoIfBranch);
1337 self.analyzeFindVariablesAndCallArgs(oStmt.aoElseBranch);
1338 if len(self.dVariables) != cBefore:
1339 raise Exception('Variables/arguments defined in conditional branches!');
1340 return True;
1341
1342 def analyzeCodeOperation(self, aoStmts: List[iai.McStmt], fSeenConditional = False) -> bool:
1343 """
1344 Analyzes the code looking clues as to additional side-effects.
1345
1346 Currently this is simply looking for branching and adding the relevant
1347 branch flags to dsCImplFlags. ASSUMES the caller pre-populates the
1348 dictionary with a copy of self.oMcBlock.dsCImplFlags.
1349 """
1350 for oStmt in aoStmts:
1351 # Set IEM_IMPL_C_F_BRANCH if we see any branching MCs.
1352 if oStmt.sName.startswith('IEM_MC_SET_RIP'):
1353 assert not fSeenConditional;
1354 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_INDIRECT'] = True;
1355 elif oStmt.sName.startswith('IEM_MC_REL_JMP'):
1356 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_RELATIVE'] = True;
1357 if fSeenConditional:
1358 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_CONDITIONAL'] = True;
1359
1360 # Check for CIMPL and AIMPL calls.
1361 if oStmt.sName.startswith('IEM_MC_CALL_'):
1362 if oStmt.sName.startswith('IEM_MC_CALL_CIMPL_'):
1363 self.dsCImplFlags['IEM_CIMPL_F_CALLS_CIMPL'] = True;
1364 elif ( oStmt.sName.startswith('IEM_MC_CALL_VOID_AIMPL_')
1365 or oStmt.sName.startswith('IEM_MC_CALL_AIMPL_')
1366 or oStmt.sName.startswith('IEM_MC_CALL_AVX_AIMPL_')):
1367 self.dsCImplFlags['IEM_CIMPL_F_CALLS_AIMPL'] = True;
1368 elif ( oStmt.sName.startswith('IEM_MC_CALL_SSE_AIMPL_')
1369 or oStmt.sName.startswith('IEM_MC_CALL_MMX_AIMPL_')
1370 or oStmt.sName.startswith('IEM_MC_CALL_FPU_AIMPL_')):
1371 self.dsCImplFlags['IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE'] = True;
1372 else:
1373 raise Exception('Unknown IEM_MC_CALL_* statement: %s' % (oStmt.sName,));
1374
1375 # Process branches of conditionals recursively.
1376 if isinstance(oStmt, iai.McStmtCond):
1377 self.analyzeCodeOperation(oStmt.aoIfBranch, True);
1378 if oStmt.aoElseBranch:
1379 self.analyzeCodeOperation(oStmt.aoElseBranch, True);
1380
1381 return True;
1382
1383 def analyze(self):
1384 """
1385 Analyzes the code, identifying the number of parameters it requires and such.
1386
1387 Returns dummy True - raises exception on trouble.
1388 """
1389
1390 # Check the block for errors before we proceed (will decode it).
1391 asErrors = self.oMcBlock.check();
1392 if asErrors:
1393 raise Exception('\n'.join(['%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sError, )
1394 for sError in asErrors]));
1395
1396 # Decode the block into a list/tree of McStmt objects.
1397 aoStmts = self.oMcBlock.decode();
1398
1399 # Scan the statements for local variables and call arguments (self.dVariables).
1400 self.analyzeFindVariablesAndCallArgs(aoStmts);
1401
1402 # Scan the code for IEM_CIMPL_F_ and other clues.
1403 self.dsCImplFlags = self.oMcBlock.dsCImplFlags.copy();
1404 self.analyzeCodeOperation(aoStmts);
1405 if ( ('IEM_CIMPL_F_CALLS_CIMPL' in self.dsCImplFlags)
1406 + ('IEM_CIMPL_F_CALLS_AIMPL' in self.dsCImplFlags)
1407 + ('IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE' in self.dsCImplFlags) > 1):
1408 self.raiseProblem('Mixing CIMPL/AIMPL/AIMPL_WITH_FXSTATE calls');
1409
1410 # Create variations as needed.
1411 if iai.McStmt.findStmtByNames(aoStmts,
1412 { 'IEM_MC_DEFER_TO_CIMPL_0_RET': True,
1413 'IEM_MC_DEFER_TO_CIMPL_1_RET': True,
1414 'IEM_MC_DEFER_TO_CIMPL_2_RET': True,
1415 'IEM_MC_DEFER_TO_CIMPL_3_RET': True, }):
1416 asVariations = (ThreadedFunctionVariation.ksVariation_Default,);
1417
1418 elif iai.McStmt.findStmtByNames(aoStmts, {'IEM_MC_CALC_RM_EFF_ADDR' : True,}):
1419 if 'IEM_MC_F_64BIT' in self.oMcBlock.dsMcFlags:
1420 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressOnly64;
1421 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags and 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
1422 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot286Not64;
1423 elif 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
1424 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot286;
1425 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags:
1426 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot64;
1427 elif 'IEM_MC_F_ONLY_8086' in self.oMcBlock.dsMcFlags:
1428 asVariations = ThreadedFunctionVariation.kasVariationsOnlyPre386;
1429 else:
1430 asVariations = ThreadedFunctionVariation.kasVariationsWithAddress;
1431 else:
1432 if 'IEM_MC_F_64BIT' in self.oMcBlock.dsMcFlags:
1433 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressOnly64;
1434 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags and 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
1435 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot286Not64;
1436 elif 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
1437 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot286;
1438 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags:
1439 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot64;
1440 elif 'IEM_MC_F_ONLY_8086' in self.oMcBlock.dsMcFlags:
1441 asVariations = ThreadedFunctionVariation.kasVariationsOnlyPre386;
1442 else:
1443 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddress;
1444
1445 if not iai.McStmt.findStmtByNames(aoStmts,
1446 { 'IEM_MC_ADVANCE_RIP_AND_FINISH': True,
1447 'IEM_MC_REL_JMP_S8_AND_FINISH': True,
1448 'IEM_MC_REL_JMP_S16_AND_FINISH': True,
1449 'IEM_MC_REL_JMP_S32_AND_FINISH': True,
1450 }):
1451 asVariations = [sVariation for sVariation in asVariations
1452 if sVariation not in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing];
1453
1454 self.aoVariations = [ThreadedFunctionVariation(self, sVar) for sVar in asVariations];
1455
1456 # Dictionary variant of the list.
1457 self.dVariations = { oVar.sVariation: oVar for oVar in self.aoVariations };
1458
1459 # Continue the analysis on each variation.
1460 for oVariation in self.aoVariations:
1461 oVariation.analyzeVariation(aoStmts);
1462
1463 return True;
1464
1465 ## Used by emitThreadedCallStmts.
1466 kdVariationsWithNeedForPrefixCheck = {
1467 ThreadedFunctionVariation.ksVariation_64_Addr32: True,
1468 ThreadedFunctionVariation.ksVariation_64f_Addr32: True,
1469 ThreadedFunctionVariation.ksVariation_64_FsGs: True,
1470 ThreadedFunctionVariation.ksVariation_64f_FsGs: True,
1471 ThreadedFunctionVariation.ksVariation_32_Addr16: True,
1472 ThreadedFunctionVariation.ksVariation_32f_Addr16: True,
1473 ThreadedFunctionVariation.ksVariation_32_Flat: True,
1474 ThreadedFunctionVariation.ksVariation_32f_Flat: True,
1475 ThreadedFunctionVariation.ksVariation_16_Addr32: True,
1476 ThreadedFunctionVariation.ksVariation_16f_Addr32: True,
1477 };
1478
1479 def emitThreadedCallStmts(self):
1480 """
1481 Worker for morphInputCode that returns a list of statements that emits
1482 the call to the threaded functions for the block.
1483 """
1484 # Special case for only default variation:
1485 if len(self.aoVariations) == 1 and self.aoVariations[0].sVariation == ThreadedFunctionVariation.ksVariation_Default:
1486 return self.aoVariations[0].emitThreadedCallStmts(0);
1487
1488 #
1489 # Case statement sub-class.
1490 #
1491 dByVari = self.dVariations;
1492 #fDbg = self.oMcBlock.sFunction == 'iemOpCommonPushSReg';
1493 class Case:
1494 def __init__(self, sCond, sVarNm = None):
1495 self.sCond = sCond;
1496 self.sVarNm = sVarNm;
1497 self.oVar = dByVari[sVarNm] if sVarNm else None;
1498 self.aoBody = self.oVar.emitThreadedCallStmts(8) if sVarNm else None;
1499
1500 def toCode(self):
1501 aoStmts = [ iai.McCppGeneric('case %s:' % (self.sCond), cchIndent = 4), ];
1502 if self.aoBody:
1503 aoStmts.extend(self.aoBody);
1504 aoStmts.append(iai.McCppGeneric('break;', cchIndent = 8));
1505 return aoStmts;
1506
1507 def toFunctionAssignment(self):
1508 aoStmts = [ iai.McCppGeneric('case %s:' % (self.sCond), cchIndent = 4), ];
1509 if self.aoBody:
1510 aoStmts.extend([
1511 iai.McCppGeneric('enmFunction = %s;' % (self.oVar.getIndexName(),), cchIndent = 8),
1512 iai.McCppGeneric('break;', cchIndent = 8),
1513 ]);
1514 return aoStmts;
1515
1516 def isSame(self, oThat):
1517 if not self.aoBody: # fall thru always matches.
1518 return True;
1519 if len(self.aoBody) != len(oThat.aoBody):
1520 #if fDbg: print('dbg: body len diff: %s vs %s' % (len(self.aoBody), len(oThat.aoBody),));
1521 return False;
1522 for iStmt, oStmt in enumerate(self.aoBody):
1523 oThatStmt = oThat.aoBody[iStmt] # type: iai.McStmt
1524 assert isinstance(oStmt, iai.McCppGeneric);
1525 assert not isinstance(oStmt, iai.McStmtCond);
1526 if isinstance(oStmt, iai.McStmtCond):
1527 return False;
1528 if oStmt.sName != oThatStmt.sName:
1529 #if fDbg: print('dbg: stmt #%s name: %s vs %s' % (iStmt, oStmt.sName, oThatStmt.sName,));
1530 return False;
1531 if len(oStmt.asParams) != len(oThatStmt.asParams):
1532 #if fDbg: print('dbg: stmt #%s param count: %s vs %s'
1533 # % (iStmt, len(oStmt.asParams), len(oThatStmt.asParams),));
1534 return False;
1535 for iParam, sParam in enumerate(oStmt.asParams):
1536 if ( sParam != oThatStmt.asParams[iParam]
1537 and ( iParam != 1
1538 or not isinstance(oStmt, iai.McCppCall)
1539 or not oStmt.asParams[0].startswith('IEM_MC2_EMIT_CALL_')
1540 or sParam != self.oVar.getIndexName()
1541 or oThatStmt.asParams[iParam] != oThat.oVar.getIndexName() )):
1542 #if fDbg: print('dbg: stmt #%s, param #%s: %s vs %s'
1543 # % (iStmt, iParam, sParam, oThatStmt.asParams[iParam],));
1544 return False;
1545 return True;
1546
1547 #
1548 # Determine what we're switch on.
1549 # This ASSUMES that (IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | IEM_F_MODE_CPUMODE_MASK) == 7!
1550 #
1551 fSimple = True;
1552 sSwitchValue = '(pVCpu->iem.s.fExec & (IEM_F_MODE_CPUMODE_MASK | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK))';
1553 if dByVari.keys() & self.kdVariationsWithNeedForPrefixCheck.keys():
1554 sSwitchValue += ' | (pVCpu->iem.s.enmEffAddrMode == (pVCpu->iem.s.fExec & IEM_F_MODE_CPUMODE_MASK) ? 0 : 8)';
1555 # Accesses via FS and GS and CS goes thru non-FLAT functions. (CS
1556 # is not writable in 32-bit mode (at least), thus the penalty mode
1557 # for any accesses via it (simpler this way).)
1558 sSwitchValue += ' | (pVCpu->iem.s.iEffSeg < X86_SREG_FS && pVCpu->iem.s.iEffSeg != X86_SREG_CS ? 0 : 16)';
1559 fSimple = False; # threaded functions.
1560 if dByVari.keys() & ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing:
1561 sSwitchValue += ' | ((pVCpu->iem.s.fTbPrevInstr & (IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_INHIBIT_SHADOW)) || ' \
1562 + '(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_MASK) ? 32 : 0)';
1563
1564 #
1565 # Generate the case statements.
1566 #
1567 # pylintx: disable=x
1568 aoCases = [];
1569 if ThreadedFunctionVariation.ksVariation_64_Addr32 in dByVari:
1570 assert not fSimple;
1571 aoCases.extend([
1572 Case('IEMMODE_64BIT', ThrdFnVar.ksVariation_64),
1573 Case('IEMMODE_64BIT | 16', ThrdFnVar.ksVariation_64_FsGs),
1574 Case('IEMMODE_64BIT | 8 | 16', None), # fall thru
1575 Case('IEMMODE_64BIT | 8', ThrdFnVar.ksVariation_64_Addr32),
1576 ]);
1577 if ThreadedFunctionVariation.ksVariation_64f_Addr32 in dByVari:
1578 aoCases.extend([
1579 Case('IEMMODE_64BIT | 32', ThrdFnVar.ksVariation_64f),
1580 Case('IEMMODE_64BIT | 32 | 16', ThrdFnVar.ksVariation_64f_FsGs),
1581 Case('IEMMODE_64BIT | 32 | 8 | 16', None), # fall thru
1582 Case('IEMMODE_64BIT | 32 | 8', ThrdFnVar.ksVariation_64f_Addr32),
1583 ]);
1584 elif ThrdFnVar.ksVariation_64 in dByVari:
1585 assert fSimple;
1586 aoCases.append(Case('IEMMODE_64BIT', ThrdFnVar.ksVariation_64));
1587 if ThreadedFunctionVariation.ksVariation_64f in dByVari:
1588 aoCases.append(Case('IEMMODE_64BIT | 32', ThrdFnVar.ksVariation_64f));
1589
1590 if ThrdFnVar.ksVariation_32_Addr16 in dByVari:
1591 assert not fSimple;
1592 aoCases.extend([
1593 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', ThrdFnVar.ksVariation_32_Flat),
1594 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 16', None), # fall thru
1595 Case('IEMMODE_32BIT | 16', None), # fall thru
1596 Case('IEMMODE_32BIT', ThrdFnVar.ksVariation_32),
1597 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 8', None), # fall thru
1598 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 8 | 16',None), # fall thru
1599 Case('IEMMODE_32BIT | 8 | 16',None), # fall thru
1600 Case('IEMMODE_32BIT | 8', ThrdFnVar.ksVariation_32_Addr16),
1601 ]);
1602 if ThrdFnVar.ksVariation_32f_Addr16 in dByVari:
1603 aoCases.extend([
1604 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', ThrdFnVar.ksVariation_32f_Flat),
1605 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 16', None), # fall thru
1606 Case('IEMMODE_32BIT | 32 | 16', None), # fall thru
1607 Case('IEMMODE_32BIT | 32', ThrdFnVar.ksVariation_32f),
1608 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 8', None), # fall thru
1609 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 8 | 16',None), # fall thru
1610 Case('IEMMODE_32BIT | 32 | 8 | 16',None), # fall thru
1611 Case('IEMMODE_32BIT | 32 | 8', ThrdFnVar.ksVariation_32f_Addr16),
1612 ]);
1613 elif ThrdFnVar.ksVariation_32 in dByVari:
1614 assert fSimple;
1615 aoCases.extend([
1616 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', None), # fall thru
1617 Case('IEMMODE_32BIT', ThrdFnVar.ksVariation_32),
1618 ]);
1619 if ThrdFnVar.ksVariation_32f in dByVari:
1620 aoCases.extend([
1621 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', None), # fall thru
1622 Case('IEMMODE_32BIT | 32', ThrdFnVar.ksVariation_32f),
1623 ]);
1624
1625 if ThrdFnVar.ksVariation_16_Addr32 in dByVari:
1626 assert not fSimple;
1627 aoCases.extend([
1628 Case('IEMMODE_16BIT | 16', None), # fall thru
1629 Case('IEMMODE_16BIT', ThrdFnVar.ksVariation_16),
1630 Case('IEMMODE_16BIT | 8 | 16', None), # fall thru
1631 Case('IEMMODE_16BIT | 8', ThrdFnVar.ksVariation_16_Addr32),
1632 ]);
1633 if ThrdFnVar.ksVariation_16f_Addr32 in dByVari:
1634 aoCases.extend([
1635 Case('IEMMODE_16BIT | 32 | 16', None), # fall thru
1636 Case('IEMMODE_16BIT | 32', ThrdFnVar.ksVariation_16f),
1637 Case('IEMMODE_16BIT | 32 | 8 | 16', None), # fall thru
1638 Case('IEMMODE_16BIT | 32 | 8', ThrdFnVar.ksVariation_16f_Addr32),
1639 ]);
1640 elif ThrdFnVar.ksVariation_16 in dByVari:
1641 assert fSimple;
1642 aoCases.append(Case('IEMMODE_16BIT', ThrdFnVar.ksVariation_16));
1643 if ThrdFnVar.ksVariation_16f in dByVari:
1644 aoCases.append(Case('IEMMODE_16BIT | 32', ThrdFnVar.ksVariation_16f));
1645
1646 if ThrdFnVar.ksVariation_16_Pre386 in dByVari:
1647 if not fSimple:
1648 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 16', None)); # fall thru
1649 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', ThrdFnVar.ksVariation_16_Pre386));
1650 if ThrdFnVar.ksVariation_16f_Pre386 in dByVari: # should be nested under previous if, but line too long.
1651 if not fSimple:
1652 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 16', None)); # fall thru
1653 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', ThrdFnVar.ksVariation_16f_Pre386));
1654
1655 #
1656 # If the case bodies are all the same, except for the function called,
1657 # we can reduce the code size and hopefully compile time.
1658 #
1659 iFirstCaseWithBody = 0;
1660 while not aoCases[iFirstCaseWithBody].aoBody:
1661 iFirstCaseWithBody += 1
1662 fAllSameCases = True
1663 for iCase in range(iFirstCaseWithBody + 1, len(aoCases)):
1664 fAllSameCases = fAllSameCases and aoCases[iCase].isSame(aoCases[iFirstCaseWithBody]);
1665 #if fDbg: print('fAllSameCases=%s %s' % (fAllSameCases, self.oMcBlock.sFunction,));
1666 if fAllSameCases:
1667 aoStmts = [
1668 iai.McCppGeneric('IEMTHREADEDFUNCS enmFunction;'),
1669 iai.McCppGeneric('switch (%s)' % (sSwitchValue,)),
1670 iai.McCppGeneric('{'),
1671 ];
1672 for oCase in aoCases:
1673 aoStmts.extend(oCase.toFunctionAssignment());
1674 aoStmts.extend([
1675 iai.McCppGeneric('IEM_NOT_REACHED_DEFAULT_CASE_RET();', cchIndent = 4),
1676 iai.McCppGeneric('}'),
1677 ]);
1678 aoStmts.extend(dByVari[aoCases[iFirstCaseWithBody].sVarNm].emitThreadedCallStmts(0, 'enmFunction'));
1679
1680 else:
1681 #
1682 # Generate the generic switch statement.
1683 #
1684 aoStmts = [
1685 iai.McCppGeneric('switch (%s)' % (sSwitchValue,)),
1686 iai.McCppGeneric('{'),
1687 ];
1688 for oCase in aoCases:
1689 aoStmts.extend(oCase.toCode());
1690 aoStmts.extend([
1691 iai.McCppGeneric('IEM_NOT_REACHED_DEFAULT_CASE_RET();', cchIndent = 4),
1692 iai.McCppGeneric('}'),
1693 ]);
1694
1695 return aoStmts;
1696
1697 def morphInputCode(self, aoStmts, fCallEmitted = False, cDepth = 0):
1698 """
1699 Adjusts (& copies) the statements for the input/decoder so it will emit
1700 calls to the right threaded functions for each block.
1701
1702 Returns list/tree of statements (aoStmts is not modified) and updated
1703 fCallEmitted status.
1704 """
1705 #print('McBlock at %s:%s' % (os.path.split(self.oMcBlock.sSrcFile)[1], self.oMcBlock.iBeginLine,));
1706 aoDecoderStmts = [];
1707
1708 for oStmt in aoStmts:
1709 # Copy the statement. Make a deep copy to make sure we've got our own
1710 # copies of all instance variables, even if a bit overkill at the moment.
1711 oNewStmt = copy.deepcopy(oStmt);
1712 aoDecoderStmts.append(oNewStmt);
1713 #print('oNewStmt %s %s' % (oNewStmt.sName, len(oNewStmt.asParams),));
1714 if oNewStmt.sName == 'IEM_MC_BEGIN' and self.dsCImplFlags:
1715 oNewStmt.asParams[3] = ' | '.join(sorted(self.dsCImplFlags.keys()));
1716
1717 # If we haven't emitted the threaded function call yet, look for
1718 # statements which it would naturally follow or preceed.
1719 if not fCallEmitted:
1720 if not oStmt.isCppStmt():
1721 if ( oStmt.sName.startswith('IEM_MC_MAYBE_RAISE_') \
1722 or (oStmt.sName.endswith('_AND_FINISH') and oStmt.sName.startswith('IEM_MC_'))
1723 or oStmt.sName.startswith('IEM_MC_CALL_CIMPL_')
1724 or oStmt.sName.startswith('IEM_MC_DEFER_TO_CIMPL_')
1725 or oStmt.sName in ('IEM_MC_RAISE_DIVIDE_ERROR',)):
1726 aoDecoderStmts.pop();
1727 aoDecoderStmts.extend(self.emitThreadedCallStmts());
1728 aoDecoderStmts.append(oNewStmt);
1729 fCallEmitted = True;
1730 elif ( oStmt.fDecode
1731 and ( oStmt.asParams[0].find('IEMOP_HLP_DONE_') >= 0
1732 or oStmt.asParams[0].find('IEMOP_HLP_DECODED_') >= 0)):
1733 aoDecoderStmts.extend(self.emitThreadedCallStmts());
1734 fCallEmitted = True;
1735
1736 # Process branches of conditionals recursively.
1737 if isinstance(oStmt, iai.McStmtCond):
1738 (oNewStmt.aoIfBranch, fCallEmitted1) = self.morphInputCode(oStmt.aoIfBranch, fCallEmitted, cDepth + 1);
1739 if oStmt.aoElseBranch:
1740 (oNewStmt.aoElseBranch, fCallEmitted2) = self.morphInputCode(oStmt.aoElseBranch, fCallEmitted, cDepth + 1);
1741 else:
1742 fCallEmitted2 = False;
1743 fCallEmitted = fCallEmitted or (fCallEmitted1 and fCallEmitted2);
1744
1745 if not fCallEmitted and cDepth == 0:
1746 self.raiseProblem('Unable to insert call to threaded function.');
1747
1748 return (aoDecoderStmts, fCallEmitted);
1749
1750
1751 def generateInputCode(self):
1752 """
1753 Modifies the input code.
1754 """
1755 cchIndent = (self.oMcBlock.cchIndent + 3) // 4 * 4;
1756
1757 if len(self.oMcBlock.aoStmts) == 1:
1758 # IEM_MC_DEFER_TO_CIMPL_X_RET - need to wrap in {} to make it safe to insert into random code.
1759 sCode = ' ' * cchIndent + 'pVCpu->iem.s.fTbCurInstr = ';
1760 if self.dsCImplFlags:
1761 sCode += ' | '.join(sorted(self.dsCImplFlags.keys())) + ';\n';
1762 else:
1763 sCode += '0;\n';
1764 sCode += iai.McStmt.renderCodeForList(self.morphInputCode(self.oMcBlock.aoStmts)[0],
1765 cchIndent = cchIndent).replace('\n', ' /* gen */\n', 1);
1766 sIndent = ' ' * (min(cchIndent, 2) - 2);
1767 sCode = sIndent + '{\n' + sCode + sIndent + '}\n';
1768 return sCode;
1769
1770 # IEM_MC_BEGIN/END block
1771 assert len(self.oMcBlock.asLines) > 2, "asLines=%s" % (self.oMcBlock.asLines,);
1772 return iai.McStmt.renderCodeForList(self.morphInputCode(self.oMcBlock.aoStmts)[0],
1773 cchIndent = cchIndent).replace('\n', ' /* gen */\n', 1);
1774
1775# Short alias for ThreadedFunctionVariation.
1776ThrdFnVar = ThreadedFunctionVariation;
1777
1778
1779class IEMThreadedGenerator(object):
1780 """
1781 The threaded code generator & annotator.
1782 """
1783
1784 def __init__(self):
1785 self.aoThreadedFuncs = [] # type: List[ThreadedFunction]
1786 self.oOptions = None # type: argparse.Namespace
1787 self.aoParsers = [] # type: List[IEMAllInstPython.SimpleParser]
1788 self.aidxFirstFunctions = [] # type: List[int] ##< Runs parallel to aoParser giving the index of the first function.
1789
1790 #
1791 # Processing.
1792 #
1793
1794 def processInputFiles(self, sHostArch, fNativeRecompilerEnabled):
1795 """
1796 Process the input files.
1797 """
1798
1799 # Parse the files.
1800 self.aoParsers = iai.parseFiles(self.oOptions.asInFiles, sHostArch);
1801
1802 # Create threaded functions for the MC blocks.
1803 self.aoThreadedFuncs = [ThreadedFunction(oMcBlock) for oMcBlock in iai.g_aoMcBlocks];
1804
1805 # Analyze the threaded functions.
1806 dRawParamCounts = {};
1807 dMinParamCounts = {};
1808 for oThreadedFunction in self.aoThreadedFuncs:
1809 oThreadedFunction.analyze();
1810 for oVariation in oThreadedFunction.aoVariations:
1811 dRawParamCounts[len(oVariation.dParamRefs)] = dRawParamCounts.get(len(oVariation.dParamRefs), 0) + 1;
1812 dMinParamCounts[oVariation.cMinParams] = dMinParamCounts.get(oVariation.cMinParams, 0) + 1;
1813 print('debug: param count distribution, raw and optimized:', file = sys.stderr);
1814 for cCount in sorted({cBits: True for cBits in list(dRawParamCounts.keys()) + list(dMinParamCounts.keys())}.keys()):
1815 print('debug: %s params: %4s raw, %4s min'
1816 % (cCount, dRawParamCounts.get(cCount, 0), dMinParamCounts.get(cCount, 0)),
1817 file = sys.stderr);
1818
1819 # Populate aidxFirstFunctions. This is ASSUMING that
1820 # g_aoMcBlocks/self.aoThreadedFuncs are in self.aoParsers order.
1821 iThreadedFunction = 0;
1822 oThreadedFunction = self.getThreadedFunctionByIndex(0);
1823 self.aidxFirstFunctions = [];
1824 for oParser in self.aoParsers:
1825 self.aidxFirstFunctions.append(iThreadedFunction);
1826
1827 while oThreadedFunction.oMcBlock.sSrcFile == oParser.sSrcFile:
1828 iThreadedFunction += 1;
1829 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
1830
1831 # Analyze the threaded functions and their variations for native recompilation.
1832 if fNativeRecompilerEnabled:
1833 print('todo:', file = sys.stderr);
1834 cTotal = 0;
1835 cNative = 0;
1836 for oThreadedFunction in self.aoThreadedFuncs:
1837 for oVariation in oThreadedFunction.aoVariations:
1838 cTotal += 1;
1839 oVariation.oNativeRecomp = ian.analyzeVariantForNativeRecomp(oVariation, sHostArch);
1840 if oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
1841 cNative += 1;
1842 print('todo: %.1f%% / %u out of %u threaded function variations are recompilable'
1843 % (cNative * 100.0 / cTotal, cNative, cTotal), file = sys.stderr);
1844 if ian.g_dUnsupportedMcStmtLastOneStats:
1845 asTopKeys = sorted(ian.g_dUnsupportedMcStmtLastOneStats, reverse = True,
1846 key = lambda sSortKey: len(ian.g_dUnsupportedMcStmtLastOneStats[sSortKey]))[:16];
1847 print('todo:', file = sys.stderr);
1848 print('todo: Top %s variations with one unsupported statement dependency:' % (len(asTopKeys),),
1849 file = sys.stderr);
1850 cchMaxKey = max([len(sKey) for sKey in asTopKeys]);
1851 for sKey in asTopKeys:
1852 print('todo: %*s = %s (%s%s)'
1853 % (cchMaxKey, sKey, len(ian.g_dUnsupportedMcStmtLastOneStats[sKey]),
1854 ', '.join([oVar.getShortName() for oVar in ian.g_dUnsupportedMcStmtLastOneStats[sKey][:5]]),
1855 ',...' if len(ian.g_dUnsupportedMcStmtLastOneStats[sKey]) >= 5 else '', )
1856 , file = sys.stderr);
1857
1858 asTopKeys = sorted(ian.g_dUnsupportedMcStmtStats, reverse = True,
1859 key = lambda sSortKey: ian.g_dUnsupportedMcStmtStats[sSortKey])[:16];
1860 print('todo:', file = sys.stderr);
1861 print('todo: Top %d most used unimplemented statements:' % (len(asTopKeys),), file = sys.stderr);
1862 cchMaxKey = max([len(sKey) for sKey in asTopKeys]);
1863 for i in range(0, len(asTopKeys), 2):
1864 print('todo: %*s = %4d %*s = %4d'
1865 % ( cchMaxKey, asTopKeys[i], ian.g_dUnsupportedMcStmtStats[asTopKeys[i]],
1866 cchMaxKey, asTopKeys[i + 1], ian.g_dUnsupportedMcStmtStats[asTopKeys[i + 1]],),
1867 file = sys.stderr);
1868 print('todo:', file = sys.stderr);
1869
1870 if ian.g_dUnsupportedMcStmtLastOneVarStats:
1871 asTopKeys = sorted(ian.g_dUnsupportedMcStmtLastOneVarStats, reverse = True,
1872 key = lambda sSortKey: len(ian.g_dUnsupportedMcStmtLastOneVarStats[sSortKey]))[:16];
1873 print('todo:', file = sys.stderr);
1874 print('todo: Top %s variations with variables and 1-2 unsupported statement dependency:' % (len(asTopKeys),),
1875 file = sys.stderr);
1876 cchMaxKey = max([len(sKey) for sKey in asTopKeys]);
1877 for sKey in asTopKeys:
1878 print('todo: %*s = %s (%s%s)'
1879 % (cchMaxKey, sKey, len(ian.g_dUnsupportedMcStmtLastOneVarStats[sKey]),
1880 ', '.join([oVar.getShortName() for oVar in ian.g_dUnsupportedMcStmtLastOneVarStats[sKey][:5]]),
1881 ',...' if len(ian.g_dUnsupportedMcStmtLastOneVarStats[sKey]) >= 5 else '', )
1882 , file = sys.stderr);
1883
1884
1885 # Gather arguments + variable statistics for the MC blocks.
1886 cMaxArgs = 0;
1887 cMaxVars = 0;
1888 cMaxVarsAndArgs = 0;
1889 cbMaxArgs = 0;
1890 cbMaxVars = 0;
1891 cbMaxVarsAndArgs = 0;
1892 for oThreadedFunction in self.aoThreadedFuncs:
1893 if oThreadedFunction.oMcBlock.cLocals >= 0:
1894 # Counts.
1895 assert oThreadedFunction.oMcBlock.cArgs >= 0;
1896 cMaxVars = max(cMaxVars, oThreadedFunction.oMcBlock.cLocals);
1897 cMaxArgs = max(cMaxArgs, oThreadedFunction.oMcBlock.cArgs);
1898 cMaxVarsAndArgs = max(cMaxVarsAndArgs, oThreadedFunction.oMcBlock.cLocals + oThreadedFunction.oMcBlock.cArgs);
1899 if cMaxVarsAndArgs > 9:
1900 raise Exception('%s potentially uses too many variables / args: %u, max 10 - %u vars and %u args'
1901 % (oThreadedFunction.oMcBlock.oFunction.sName, cMaxVarsAndArgs,
1902 oThreadedFunction.oMcBlock.cLocals, oThreadedFunction.oMcBlock.cArgs));
1903 # Calc stack allocation size:
1904 cbArgs = 0;
1905 for oArg in oThreadedFunction.oMcBlock.aoArgs:
1906 cbArgs += (getTypeBitCount(oArg.sType) + 63) // 64 * 8;
1907 cbVars = 0;
1908 for oVar in oThreadedFunction.oMcBlock.aoLocals:
1909 cbVars += (getTypeBitCount(oVar.sType) + 63) // 64 * 8;
1910 cbMaxVars = max(cbMaxVars, cbVars);
1911 cbMaxArgs = max(cbMaxArgs, cbArgs);
1912 cbMaxVarsAndArgs = max(cbMaxVarsAndArgs, cbVars + cbArgs);
1913 if cbMaxVarsAndArgs >= 0xc0:
1914 raise Exception('%s potentially uses too much stack: cbMaxVars=%#x cbMaxArgs=%#x'
1915 % (oThreadedFunction.oMcBlock.oFunction.sName, cbMaxVars, cbMaxArgs,));
1916
1917 print('debug: max vars+args: %u bytes / %u; max vars: %u bytes / %u; max args: %u bytes / %u'
1918 % (cbMaxVarsAndArgs, cMaxVarsAndArgs, cbMaxVars, cMaxVars, cbMaxArgs, cMaxArgs,), file = sys.stderr);
1919
1920 return True;
1921
1922 #
1923 # Output
1924 #
1925
1926 def generateLicenseHeader(self):
1927 """
1928 Returns the lines for a license header.
1929 """
1930 return [
1931 '/*',
1932 ' * Autogenerated by $Id: IEMAllThrdPython.py 102010 2023-11-08 21:36:54Z vboxsync $ ',
1933 ' * Do not edit!',
1934 ' */',
1935 '',
1936 '/*',
1937 ' * Copyright (C) 2023-' + str(datetime.date.today().year) + ' Oracle and/or its affiliates.',
1938 ' *',
1939 ' * This file is part of VirtualBox base platform packages, as',
1940 ' * available from https://www.virtualbox.org.',
1941 ' *',
1942 ' * This program is free software; you can redistribute it and/or',
1943 ' * modify it under the terms of the GNU General Public License',
1944 ' * as published by the Free Software Foundation, in version 3 of the',
1945 ' * License.',
1946 ' *',
1947 ' * This program is distributed in the hope that it will be useful, but',
1948 ' * WITHOUT ANY WARRANTY; without even the implied warranty of',
1949 ' * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU',
1950 ' * General Public License for more details.',
1951 ' *',
1952 ' * You should have received a copy of the GNU General Public License',
1953 ' * along with this program; if not, see <https://www.gnu.org/licenses>.',
1954 ' *',
1955 ' * The contents of this file may alternatively be used under the terms',
1956 ' * of the Common Development and Distribution License Version 1.0',
1957 ' * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included',
1958 ' * in the VirtualBox distribution, in which case the provisions of the',
1959 ' * CDDL are applicable instead of those of the GPL.',
1960 ' *',
1961 ' * You may elect to license modified versions of this file under the',
1962 ' * terms and conditions of either the GPL or the CDDL or both.',
1963 ' *',
1964 ' * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0',
1965 ' */',
1966 '',
1967 '',
1968 '',
1969 ];
1970
1971 ## List of built-in threaded functions with user argument counts and
1972 ## whether it has a native recompiler implementation.
1973 katBltIns = (
1974 ( 'DeferToCImpl0', 2, True ),
1975 ( 'CheckIrq', 0, True ),
1976 ( 'CheckMode', 1, True ),
1977 ( 'CheckHwInstrBps', 0, False ),
1978 ( 'CheckCsLim', 1, False ),
1979
1980 ( 'CheckCsLimAndOpcodes', 3, False ),
1981 ( 'CheckOpcodes', 3, False ),
1982 ( 'CheckOpcodesConsiderCsLim', 3, False ),
1983
1984 ( 'CheckCsLimAndPcAndOpcodes', 3, False ),
1985 ( 'CheckPcAndOpcodes', 3, False ),
1986 ( 'CheckPcAndOpcodesConsiderCsLim', 3, False ),
1987
1988 ( 'CheckCsLimAndOpcodesAcrossPageLoadingTlb', 3, False ),
1989 ( 'CheckOpcodesAcrossPageLoadingTlb', 3, False ),
1990 ( 'CheckOpcodesAcrossPageLoadingTlbConsiderCsLim', 2, False ),
1991
1992 ( 'CheckCsLimAndOpcodesLoadingTlb', 3, False ),
1993 ( 'CheckOpcodesLoadingTlb', 3, False ),
1994 ( 'CheckOpcodesLoadingTlbConsiderCsLim', 3, False ),
1995
1996 ( 'CheckCsLimAndOpcodesOnNextPageLoadingTlb', 2, False ),
1997 ( 'CheckOpcodesOnNextPageLoadingTlb', 2, False ),
1998 ( 'CheckOpcodesOnNextPageLoadingTlbConsiderCsLim', 2, False ),
1999
2000 ( 'CheckCsLimAndOpcodesOnNewPageLoadingTlb', 2, False ),
2001 ( 'CheckOpcodesOnNewPageLoadingTlb', 2, False ),
2002 ( 'CheckOpcodesOnNewPageLoadingTlbConsiderCsLim', 2, False ),
2003 );
2004
2005 def generateThreadedFunctionsHeader(self, oOut):
2006 """
2007 Generates the threaded functions header file.
2008 Returns success indicator.
2009 """
2010
2011 asLines = self.generateLicenseHeader();
2012
2013 # Generate the threaded function table indexes.
2014 asLines += [
2015 'typedef enum IEMTHREADEDFUNCS',
2016 '{',
2017 ' kIemThreadedFunc_Invalid = 0,',
2018 '',
2019 ' /*',
2020 ' * Predefined',
2021 ' */',
2022 ];
2023 asLines += [' kIemThreadedFunc_BltIn_%s,' % (sFuncNm,) for sFuncNm, _, _ in self.katBltIns];
2024
2025 iThreadedFunction = 1 + len(self.katBltIns);
2026 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2027 asLines += [
2028 '',
2029 ' /*',
2030 ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation] + '',
2031 ' */',
2032 ];
2033 for oThreadedFunction in self.aoThreadedFuncs:
2034 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2035 if oVariation:
2036 iThreadedFunction += 1;
2037 oVariation.iEnumValue = iThreadedFunction;
2038 asLines.append(' ' + oVariation.getIndexName() + ',');
2039 asLines += [
2040 ' kIemThreadedFunc_End',
2041 '} IEMTHREADEDFUNCS;',
2042 '',
2043 ];
2044
2045 # Prototype the function table.
2046 asLines += [
2047 'extern const PFNIEMTHREADEDFUNC g_apfnIemThreadedFunctions[kIemThreadedFunc_End];',
2048 '#if defined(IN_RING3) || defined(LOG_ENABLED)',
2049 'extern const char * const g_apszIemThreadedFunctions[kIemThreadedFunc_End];',
2050 '#endif',
2051 'extern uint8_t const g_acIemThreadedFunctionUsedArgs[kIemThreadedFunc_End];',
2052 ];
2053
2054 oOut.write('\n'.join(asLines));
2055 return True;
2056
2057 ksBitsToIntMask = {
2058 1: "UINT64_C(0x1)",
2059 2: "UINT64_C(0x3)",
2060 4: "UINT64_C(0xf)",
2061 8: "UINT64_C(0xff)",
2062 16: "UINT64_C(0xffff)",
2063 32: "UINT64_C(0xffffffff)",
2064 };
2065
2066 def generateFunctionParameterUnpacking(self, oVariation, oOut, asParams):
2067 """
2068 Outputs code for unpacking parameters.
2069 This is shared by the threaded and native code generators.
2070 """
2071 aasVars = [];
2072 for aoRefs in oVariation.dParamRefs.values():
2073 oRef = aoRefs[0];
2074 if oRef.sType[0] != 'P':
2075 cBits = g_kdTypeInfo[oRef.sType][0];
2076 sType = g_kdTypeInfo[oRef.sType][2];
2077 else:
2078 cBits = 64;
2079 sType = oRef.sType;
2080
2081 sTypeDecl = sType + ' const';
2082
2083 if cBits == 64:
2084 assert oRef.offNewParam == 0;
2085 if sType == 'uint64_t':
2086 sUnpack = '%s;' % (asParams[oRef.iNewParam],);
2087 else:
2088 sUnpack = '(%s)%s;' % (sType, asParams[oRef.iNewParam],);
2089 elif oRef.offNewParam == 0:
2090 sUnpack = '(%s)(%s & %s);' % (sType, asParams[oRef.iNewParam], self.ksBitsToIntMask[cBits]);
2091 else:
2092 sUnpack = '(%s)((%s >> %s) & %s);' \
2093 % (sType, asParams[oRef.iNewParam], oRef.offNewParam, self.ksBitsToIntMask[cBits]);
2094
2095 sComment = '/* %s - %s ref%s */' % (oRef.sOrgRef, len(aoRefs), 's' if len(aoRefs) != 1 else '',);
2096
2097 aasVars.append([ '%s:%02u' % (oRef.iNewParam, oRef.offNewParam),
2098 sTypeDecl, oRef.sNewName, sUnpack, sComment ]);
2099 acchVars = [0, 0, 0, 0, 0];
2100 for asVar in aasVars:
2101 for iCol, sStr in enumerate(asVar):
2102 acchVars[iCol] = max(acchVars[iCol], len(sStr));
2103 sFmt = ' %%-%ss %%-%ss = %%-%ss %%s\n' % (acchVars[1], acchVars[2], acchVars[3]);
2104 for asVar in sorted(aasVars):
2105 oOut.write(sFmt % (asVar[1], asVar[2], asVar[3], asVar[4],));
2106 return True;
2107
2108 kasThreadedParamNames = ('uParam0', 'uParam1', 'uParam2');
2109 def generateThreadedFunctionsSource(self, oOut):
2110 """
2111 Generates the threaded functions source file.
2112 Returns success indicator.
2113 """
2114
2115 asLines = self.generateLicenseHeader();
2116 oOut.write('\n'.join(asLines));
2117
2118 #
2119 # Emit the function definitions.
2120 #
2121 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2122 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
2123 oOut.write( '\n'
2124 + '\n'
2125 + '\n'
2126 + '\n'
2127 + '/*' + '*' * 128 + '\n'
2128 + '* Variation: ' + sVarName + ' ' * (129 - len(sVarName) - 15) + '*\n'
2129 + '*' * 128 + '*/\n');
2130
2131 for oThreadedFunction in self.aoThreadedFuncs:
2132 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2133 if oVariation:
2134 oMcBlock = oThreadedFunction.oMcBlock;
2135
2136 # Function header
2137 oOut.write( '\n'
2138 + '\n'
2139 + '/**\n'
2140 + ' * #%u: %s at line %s offset %s in %s%s\n'
2141 % (oVariation.iEnumValue, oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine,
2142 os.path.split(oMcBlock.sSrcFile)[1],
2143 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '')
2144 + ' */\n'
2145 + 'static IEM_DECL_IEMTHREADEDFUNC_DEF(' + oVariation.getThreadedFunctionName() + ')\n'
2146 + '{\n');
2147
2148 # Unpack parameters.
2149 self.generateFunctionParameterUnpacking(oVariation, oOut, self.kasThreadedParamNames);
2150
2151 # RT_NOREF for unused parameters.
2152 if oVariation.cMinParams < g_kcThreadedParams:
2153 oOut.write(' RT_NOREF(' + ', '.join(self.kasThreadedParamNames[oVariation.cMinParams:]) + ');\n');
2154
2155 # Now for the actual statements.
2156 oOut.write(iai.McStmt.renderCodeForList(oVariation.aoStmtsForThreadedFunction, cchIndent = 4));
2157
2158 oOut.write('}\n');
2159
2160
2161 #
2162 # Generate the output tables in parallel.
2163 #
2164 asFuncTable = [
2165 '/**',
2166 ' * Function pointer table.',
2167 ' */',
2168 'PFNIEMTHREADEDFUNC const g_apfnIemThreadedFunctions[kIemThreadedFunc_End] =',
2169 '{',
2170 ' /*Invalid*/ NULL,',
2171 ];
2172 asNameTable = [
2173 '/**',
2174 ' * Function name table.',
2175 ' */',
2176 'const char * const g_apszIemThreadedFunctions[kIemThreadedFunc_End] =',
2177 '{',
2178 ' "Invalid",',
2179 ];
2180 asArgCntTab = [
2181 '/**',
2182 ' * Argument count table.',
2183 ' */',
2184 'uint8_t const g_acIemThreadedFunctionUsedArgs[kIemThreadedFunc_End] =',
2185 '{',
2186 ' 0, /*Invalid*/',
2187 ];
2188 aasTables = (asFuncTable, asNameTable, asArgCntTab,);
2189
2190 for asTable in aasTables:
2191 asTable.extend((
2192 '',
2193 ' /*',
2194 ' * Predefined.',
2195 ' */',
2196 ));
2197 for sFuncNm, cArgs, _ in self.katBltIns:
2198 asFuncTable.append(' iemThreadedFunc_BltIn_%s,' % (sFuncNm,));
2199 asNameTable.append(' "BltIn_%s",' % (sFuncNm,));
2200 asArgCntTab.append(' %d, /*BltIn_%s*/' % (cArgs, sFuncNm,));
2201
2202 iThreadedFunction = 1 + len(self.katBltIns);
2203 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2204 for asTable in aasTables:
2205 asTable.extend((
2206 '',
2207 ' /*',
2208 ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation],
2209 ' */',
2210 ));
2211 for oThreadedFunction in self.aoThreadedFuncs:
2212 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2213 if oVariation:
2214 iThreadedFunction += 1;
2215 assert oVariation.iEnumValue == iThreadedFunction;
2216 sName = oVariation.getThreadedFunctionName();
2217 asFuncTable.append(' /*%4u*/ %s,' % (iThreadedFunction, sName,));
2218 asNameTable.append(' /*%4u*/ "%s",' % (iThreadedFunction, sName,));
2219 asArgCntTab.append(' /*%4u*/ %d, /*%s*/' % (iThreadedFunction, oVariation.cMinParams, sName,));
2220
2221 for asTable in aasTables:
2222 asTable.append('};');
2223
2224 #
2225 # Output the tables.
2226 #
2227 oOut.write( '\n'
2228 + '\n');
2229 oOut.write('\n'.join(asFuncTable));
2230 oOut.write( '\n'
2231 + '\n'
2232 + '\n'
2233 + '#if defined(IN_RING3) || defined(LOG_ENABLED)\n');
2234 oOut.write('\n'.join(asNameTable));
2235 oOut.write( '\n'
2236 + '#endif /* IN_RING3 || LOG_ENABLED */\n'
2237 + '\n'
2238 + '\n');
2239 oOut.write('\n'.join(asArgCntTab));
2240 oOut.write('\n');
2241
2242 return True;
2243
2244 def generateNativeFunctionsHeader(self, oOut):
2245 """
2246 Generates the native recompiler functions header file.
2247 Returns success indicator.
2248 """
2249 if not self.oOptions.fNativeRecompilerEnabled:
2250 return True;
2251
2252 asLines = self.generateLicenseHeader();
2253
2254 # Prototype the function table.
2255 asLines += [
2256 'extern const PFNIEMNATIVERECOMPFUNC g_apfnIemNativeRecompileFunctions[kIemThreadedFunc_End];',
2257 '',
2258 ];
2259
2260 oOut.write('\n'.join(asLines));
2261 return True;
2262
2263 def generateNativeFunctionsSource(self, oOut):
2264 """
2265 Generates the native recompiler functions source file.
2266 Returns success indicator.
2267 """
2268 if not self.oOptions.fNativeRecompilerEnabled:
2269 return True;
2270
2271 #
2272 # The file header.
2273 #
2274 oOut.write('\n'.join(self.generateLicenseHeader()));
2275
2276 #
2277 # Emit the functions.
2278 #
2279 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2280 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
2281 oOut.write( '\n'
2282 + '\n'
2283 + '\n'
2284 + '\n'
2285 + '/*' + '*' * 128 + '\n'
2286 + '* Variation: ' + sVarName + ' ' * (129 - len(sVarName) - 15) + '*\n'
2287 + '*' * 128 + '*/\n');
2288
2289 for oThreadedFunction in self.aoThreadedFuncs:
2290 oVariation = oThreadedFunction.dVariations.get(sVariation, None) # type: ThreadedFunctionVariation
2291 if oVariation and oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
2292 oMcBlock = oThreadedFunction.oMcBlock;
2293
2294 # Function header
2295 oOut.write( '\n'
2296 + '\n'
2297 + '/**\n'
2298 + ' * #%u: %s at line %s offset %s in %s%s\n'
2299 % (oVariation.iEnumValue, oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine,
2300 os.path.split(oMcBlock.sSrcFile)[1],
2301 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '')
2302 + ' */\n'
2303 + 'static IEM_DECL_IEMNATIVERECOMPFUNC_DEF(' + oVariation.getNativeFunctionName() + ')\n'
2304 + '{\n');
2305
2306 # Unpack parameters.
2307 self.generateFunctionParameterUnpacking(oVariation, oOut,
2308 ('pCallEntry->auParams[0]',
2309 'pCallEntry->auParams[1]',
2310 'pCallEntry->auParams[2]',));
2311
2312 # Now for the actual statements.
2313 oOut.write(oVariation.oNativeRecomp.renderCode(cchIndent = 4));
2314
2315 oOut.write('}\n');
2316
2317 #
2318 # Output the function table.
2319 #
2320 oOut.write( '\n'
2321 + '\n'
2322 + '/*\n'
2323 + ' * Function table running parallel to g_apfnIemThreadedFunctions and friends.\n'
2324 + ' */\n'
2325 + 'const PFNIEMNATIVERECOMPFUNC g_apfnIemNativeRecompileFunctions[kIemThreadedFunc_End] =\n'
2326 + '{\n'
2327 + ' /*Invalid*/ NULL,'
2328 + '\n'
2329 + ' /*\n'
2330 + ' * Predefined.\n'
2331 + ' */\n'
2332 );
2333 for sFuncNm, _, fHaveRecompFunc in self.katBltIns:
2334 if fHaveRecompFunc:
2335 oOut.write(' iemNativeRecompFunc_BltIn_%s,\n' % (sFuncNm,))
2336 else:
2337 oOut.write(' NULL, /*BltIn_%s*/\n' % (sFuncNm,))
2338
2339 iThreadedFunction = 1 + len(self.katBltIns);
2340 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2341 oOut.write( ' /*\n'
2342 + ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation] + '\n'
2343 + ' */\n');
2344 for oThreadedFunction in self.aoThreadedFuncs:
2345 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2346 if oVariation:
2347 iThreadedFunction += 1;
2348 assert oVariation.iEnumValue == iThreadedFunction;
2349 sName = oVariation.getNativeFunctionName();
2350 if oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
2351 oOut.write(' /*%4u*/ %s,\n' % (iThreadedFunction, sName,));
2352 else:
2353 oOut.write(' /*%4u*/ NULL /*%s*/,\n' % (iThreadedFunction, sName,));
2354
2355 oOut.write( '};\n'
2356 + '\n');
2357 return True;
2358
2359
2360 def getThreadedFunctionByIndex(self, idx):
2361 """
2362 Returns a ThreadedFunction object for the given index. If the index is
2363 out of bounds, a dummy is returned.
2364 """
2365 if idx < len(self.aoThreadedFuncs):
2366 return self.aoThreadedFuncs[idx];
2367 return ThreadedFunction.dummyInstance();
2368
2369 def generateModifiedInput(self, oOut, idxFile):
2370 """
2371 Generates the combined modified input source/header file.
2372 Returns success indicator.
2373 """
2374 #
2375 # File header and assert assumptions.
2376 #
2377 oOut.write('\n'.join(self.generateLicenseHeader()));
2378 oOut.write('AssertCompile((IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | IEM_F_MODE_CPUMODE_MASK) == 7);\n');
2379
2380 #
2381 # Iterate all parsers (input files) and output the ones related to the
2382 # file set given by idxFile.
2383 #
2384 for idxParser, oParser in enumerate(self.aoParsers): # type: int, IEMAllInstPython.SimpleParser
2385 # Is this included in the file set?
2386 sSrcBaseFile = os.path.basename(oParser.sSrcFile).lower();
2387 fInclude = -1;
2388 for aoInfo in iai.g_aaoAllInstrFilesAndDefaultMapAndSet:
2389 if sSrcBaseFile == aoInfo[0].lower():
2390 fInclude = aoInfo[2] in (-1, idxFile);
2391 break;
2392 if fInclude is not True:
2393 assert fInclude is False;
2394 continue;
2395
2396 # Output it.
2397 oOut.write("\n\n/* ****** BEGIN %s ******* */\n" % (oParser.sSrcFile,));
2398
2399 iThreadedFunction = self.aidxFirstFunctions[idxParser];
2400 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
2401 iLine = 0;
2402 while iLine < len(oParser.asLines):
2403 sLine = oParser.asLines[iLine];
2404 iLine += 1; # iBeginLine and iEndLine are 1-based.
2405
2406 # Can we pass it thru?
2407 if ( iLine not in [oThreadedFunction.oMcBlock.iBeginLine, oThreadedFunction.oMcBlock.iEndLine]
2408 or oThreadedFunction.oMcBlock.sSrcFile != oParser.sSrcFile):
2409 oOut.write(sLine);
2410 #
2411 # Single MC block. Just extract it and insert the replacement.
2412 #
2413 elif oThreadedFunction.oMcBlock.iBeginLine != oThreadedFunction.oMcBlock.iEndLine:
2414 assert ( (sLine.count('IEM_MC_') - sLine.count('IEM_MC_F_') == 1)
2415 or oThreadedFunction.oMcBlock.iMacroExp == iai.McBlock.kiMacroExp_Partial), 'sLine="%s"' % (sLine,);
2416 oOut.write(sLine[:oThreadedFunction.oMcBlock.offBeginLine]);
2417 sModified = oThreadedFunction.generateInputCode().strip();
2418 oOut.write(sModified);
2419
2420 iLine = oThreadedFunction.oMcBlock.iEndLine;
2421 sLine = oParser.asLines[iLine - 1];
2422 assert ( sLine.count('IEM_MC_') - sLine.count('IEM_MC_F_') == 1
2423 or len(oThreadedFunction.oMcBlock.aoStmts) == 1
2424 or oThreadedFunction.oMcBlock.iMacroExp == iai.McBlock.kiMacroExp_Partial);
2425 oOut.write(sLine[oThreadedFunction.oMcBlock.offAfterEnd : ]);
2426
2427 # Advance
2428 iThreadedFunction += 1;
2429 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
2430 #
2431 # Macro expansion line that have sublines and may contain multiple MC blocks.
2432 #
2433 else:
2434 offLine = 0;
2435 while iLine == oThreadedFunction.oMcBlock.iBeginLine:
2436 oOut.write(sLine[offLine : oThreadedFunction.oMcBlock.offBeginLine]);
2437
2438 sModified = oThreadedFunction.generateInputCode().strip();
2439 assert ( sModified.startswith('IEM_MC_BEGIN')
2440 or (sModified.find('IEM_MC_DEFER_TO_CIMPL_') > 0 and sModified.strip().startswith('{\n'))
2441 or sModified.startswith('pVCpu->iem.s.fEndTb = true')
2442 or sModified.startswith('pVCpu->iem.s.fTbCurInstr = ')
2443 ), 'sModified="%s"' % (sModified,);
2444 oOut.write(sModified);
2445
2446 offLine = oThreadedFunction.oMcBlock.offAfterEnd;
2447
2448 # Advance
2449 iThreadedFunction += 1;
2450 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
2451
2452 # Last line segment.
2453 if offLine < len(sLine):
2454 oOut.write(sLine[offLine : ]);
2455
2456 oOut.write("/* ****** END %s ******* */\n" % (oParser.sSrcFile,));
2457
2458 return True;
2459
2460 def generateModifiedInput1(self, oOut):
2461 """
2462 Generates the combined modified input source/header file, part 1.
2463 Returns success indicator.
2464 """
2465 return self.generateModifiedInput(oOut, 1);
2466
2467 def generateModifiedInput2(self, oOut):
2468 """
2469 Generates the combined modified input source/header file, part 2.
2470 Returns success indicator.
2471 """
2472 return self.generateModifiedInput(oOut, 2);
2473
2474 def generateModifiedInput3(self, oOut):
2475 """
2476 Generates the combined modified input source/header file, part 3.
2477 Returns success indicator.
2478 """
2479 return self.generateModifiedInput(oOut, 3);
2480
2481 def generateModifiedInput4(self, oOut):
2482 """
2483 Generates the combined modified input source/header file, part 4.
2484 Returns success indicator.
2485 """
2486 return self.generateModifiedInput(oOut, 4);
2487
2488
2489 #
2490 # Main
2491 #
2492
2493 def main(self, asArgs):
2494 """
2495 C-like main function.
2496 Returns exit code.
2497 """
2498
2499 #
2500 # Parse arguments
2501 #
2502 sScriptDir = os.path.dirname(__file__);
2503 oParser = argparse.ArgumentParser(add_help = False);
2504 oParser.add_argument('asInFiles',
2505 metavar = 'input.cpp.h',
2506 nargs = '*',
2507 default = [os.path.join(sScriptDir, aoInfo[0])
2508 for aoInfo in iai.g_aaoAllInstrFilesAndDefaultMapAndSet],
2509 help = "Selection of VMMAll/IEMAllInst*.cpp.h files to use as input.");
2510 oParser.add_argument('--host-arch',
2511 metavar = 'arch',
2512 dest = 'sHostArch',
2513 action = 'store',
2514 default = None,
2515 help = 'The host architecture.');
2516
2517 oParser.add_argument('--out-thrd-funcs-hdr',
2518 metavar = 'file-thrd-funcs.h',
2519 dest = 'sOutFileThrdFuncsHdr',
2520 action = 'store',
2521 default = '-',
2522 help = 'The output header file for the threaded functions.');
2523 oParser.add_argument('--out-thrd-funcs-cpp',
2524 metavar = 'file-thrd-funcs.cpp',
2525 dest = 'sOutFileThrdFuncsCpp',
2526 action = 'store',
2527 default = '-',
2528 help = 'The output C++ file for the threaded functions.');
2529 oParser.add_argument('--out-n8ve-funcs-hdr',
2530 metavar = 'file-n8tv-funcs.h',
2531 dest = 'sOutFileN8veFuncsHdr',
2532 action = 'store',
2533 default = '-',
2534 help = 'The output header file for the native recompiler functions.');
2535 oParser.add_argument('--out-n8ve-funcs-cpp',
2536 metavar = 'file-n8tv-funcs.cpp',
2537 dest = 'sOutFileN8veFuncsCpp',
2538 action = 'store',
2539 default = '-',
2540 help = 'The output C++ file for the native recompiler functions.');
2541 oParser.add_argument('--native',
2542 dest = 'fNativeRecompilerEnabled',
2543 action = 'store_true',
2544 default = False,
2545 help = 'Enables generating the files related to native recompilation.');
2546 oParser.add_argument('--out-mod-input1',
2547 metavar = 'file-instr.cpp.h',
2548 dest = 'sOutFileModInput1',
2549 action = 'store',
2550 default = '-',
2551 help = 'The output C++/header file for modified input instruction files part 1.');
2552 oParser.add_argument('--out-mod-input2',
2553 metavar = 'file-instr.cpp.h',
2554 dest = 'sOutFileModInput2',
2555 action = 'store',
2556 default = '-',
2557 help = 'The output C++/header file for modified input instruction files part 2.');
2558 oParser.add_argument('--out-mod-input3',
2559 metavar = 'file-instr.cpp.h',
2560 dest = 'sOutFileModInput3',
2561 action = 'store',
2562 default = '-',
2563 help = 'The output C++/header file for modified input instruction files part 3.');
2564 oParser.add_argument('--out-mod-input4',
2565 metavar = 'file-instr.cpp.h',
2566 dest = 'sOutFileModInput4',
2567 action = 'store',
2568 default = '-',
2569 help = 'The output C++/header file for modified input instruction files part 4.');
2570 oParser.add_argument('--help', '-h', '-?',
2571 action = 'help',
2572 help = 'Display help and exit.');
2573 oParser.add_argument('--version', '-V',
2574 action = 'version',
2575 version = 'r%s (IEMAllThreadedPython.py), r%s (IEMAllInstPython.py)'
2576 % (__version__.split()[1], iai.__version__.split()[1],),
2577 help = 'Displays the version/revision of the script and exit.');
2578 self.oOptions = oParser.parse_args(asArgs[1:]);
2579 print("oOptions=%s" % (self.oOptions,), file = sys.stderr);
2580
2581 #
2582 # Process the instructions specified in the IEM sources.
2583 #
2584 if self.processInputFiles(self.oOptions.sHostArch, self.oOptions.fNativeRecompilerEnabled):
2585 #
2586 # Generate the output files.
2587 #
2588 aaoOutputFiles = (
2589 ( self.oOptions.sOutFileThrdFuncsHdr, self.generateThreadedFunctionsHeader ),
2590 ( self.oOptions.sOutFileThrdFuncsCpp, self.generateThreadedFunctionsSource ),
2591 ( self.oOptions.sOutFileN8veFuncsHdr, self.generateNativeFunctionsHeader ),
2592 ( self.oOptions.sOutFileN8veFuncsCpp, self.generateNativeFunctionsSource ),
2593 ( self.oOptions.sOutFileModInput1, self.generateModifiedInput1 ),
2594 ( self.oOptions.sOutFileModInput2, self.generateModifiedInput2 ),
2595 ( self.oOptions.sOutFileModInput3, self.generateModifiedInput3 ),
2596 ( self.oOptions.sOutFileModInput4, self.generateModifiedInput4 ),
2597 );
2598 fRc = True;
2599 for sOutFile, fnGenMethod in aaoOutputFiles:
2600 if sOutFile == '-':
2601 fRc = fnGenMethod(sys.stdout) and fRc;
2602 else:
2603 try:
2604 oOut = open(sOutFile, 'w'); # pylint: disable=consider-using-with,unspecified-encoding
2605 except Exception as oXcpt:
2606 print('error! Failed open "%s" for writing: %s' % (sOutFile, oXcpt,), file = sys.stderr);
2607 return 1;
2608 fRc = fnGenMethod(oOut) and fRc;
2609 oOut.close();
2610 if fRc:
2611 return 0;
2612
2613 return 1;
2614
2615
2616if __name__ == '__main__':
2617 sys.exit(IEMThreadedGenerator().main(sys.argv));
2618
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette