VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllThrdPython.py@ 102621

Last change on this file since 102621 was 102593, checked in by vboxsync, 14 months ago

VMM/IEM: Native translation of Blt_CheckCsLim. bugref:10371

  • Property svn:eol-style set to LF
  • Property svn:executable set to *
  • Property svn:keywords set to Author Date Id Revision
File size: 130.9 KB
Line 
1#!/usr/bin/env python
2# -*- coding: utf-8 -*-
3# $Id: IEMAllThrdPython.py 102593 2023-12-13 22:41:23Z vboxsync $
4# pylint: disable=invalid-name
5
6"""
7Annotates and generates threaded functions from IEMAllInst*.cpp.h.
8"""
9
10from __future__ import print_function;
11
12__copyright__ = \
13"""
14Copyright (C) 2023 Oracle and/or its affiliates.
15
16This file is part of VirtualBox base platform packages, as
17available from https://www.virtualbox.org.
18
19This program is free software; you can redistribute it and/or
20modify it under the terms of the GNU General Public License
21as published by the Free Software Foundation, in version 3 of the
22License.
23
24This program is distributed in the hope that it will be useful, but
25WITHOUT ANY WARRANTY; without even the implied warranty of
26MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
27General Public License for more details.
28
29You should have received a copy of the GNU General Public License
30along with this program; if not, see <https://www.gnu.org/licenses>.
31
32SPDX-License-Identifier: GPL-3.0-only
33"""
34__version__ = "$Revision: 102593 $"
35
36# Standard python imports.
37import copy;
38import datetime;
39import os;
40import re;
41import sys;
42import argparse;
43from typing import Dict, List;
44
45import IEMAllInstPython as iai;
46import IEMAllN8vePython as ian;
47
48
49# Python 3 hacks:
50if sys.version_info[0] >= 3:
51 long = int; # pylint: disable=redefined-builtin,invalid-name
52
53## Number of generic parameters for the thread functions.
54g_kcThreadedParams = 3;
55
56g_kdTypeInfo = {
57 # type name: (cBits, fSigned, C-type )
58 'int8_t': ( 8, True, 'int8_t', ),
59 'int16_t': ( 16, True, 'int16_t', ),
60 'int32_t': ( 32, True, 'int32_t', ),
61 'int64_t': ( 64, True, 'int64_t', ),
62 'uint4_t': ( 4, False, 'uint8_t', ),
63 'uint8_t': ( 8, False, 'uint8_t', ),
64 'uint16_t': ( 16, False, 'uint16_t', ),
65 'uint32_t': ( 32, False, 'uint32_t', ),
66 'uint64_t': ( 64, False, 'uint64_t', ),
67 'uintptr_t': ( 64, False, 'uintptr_t',), # ASSUMES 64-bit host pointer size.
68 'bool': ( 1, False, 'bool', ),
69 'IEMMODE': ( 2, False, 'IEMMODE', ),
70};
71
72# Only for getTypeBitCount/variables.
73g_kdTypeInfo2 = {
74 'RTFLOAT32U': ( 32, False, 'RTFLOAT32U', ),
75 'RTFLOAT64U': ( 64, False, 'RTFLOAT64U', ),
76 'RTUINT64U': ( 64, False, 'RTUINT64U', ),
77 'RTGCPTR': ( 64, False, 'RTGCPTR', ),
78 'RTPBCD80U': ( 80, False, 'RTPBCD80U', ),
79 'RTFLOAT80U': ( 80, False, 'RTFLOAT80U', ),
80 'IEMFPURESULT': (80+16, False, 'IEMFPURESULT', ),
81 'IEMFPURESULTTWO': (80+16+80,False, 'IEMFPURESULTTWO', ),
82 'RTUINT128U': ( 128, False, 'RTUINT128U', ),
83 'X86XMMREG': ( 128, False, 'X86XMMREG', ),
84 'IEMSSERESULT': ( 128+32, False, 'IEMSSERESULT', ),
85 'IEMMEDIAF2XMMSRC': ( 256, False, 'IEMMEDIAF2XMMSRC',),
86 'RTUINT256U': ( 256, False, 'RTUINT256U', ),
87 'IEMPCMPISTRXSRC': ( 256, False, 'IEMPCMPISTRXSRC', ),
88 'IEMPCMPESTRXSRC': ( 384, False, 'IEMPCMPESTRXSRC', ),
89}; #| g_kdTypeInfo; - requires 3.9
90g_kdTypeInfo2.update(g_kdTypeInfo);
91
92def getTypeBitCount(sType):
93 """
94 Translate a type to size in bits
95 """
96 if sType in g_kdTypeInfo2:
97 return g_kdTypeInfo2[sType][0];
98 if '*' in sType or sType[0] == 'P':
99 return 64;
100 #raise Exception('Unknown type: %s' % (sType,));
101 print('error: Unknown type: %s' % (sType,));
102 return 64;
103
104g_kdIemFieldToType = {
105 # Illegal ones:
106 'offInstrNextByte': ( None, ),
107 'cbInstrBuf': ( None, ),
108 'pbInstrBuf': ( None, ),
109 'uInstrBufPc': ( None, ),
110 'cbInstrBufTotal': ( None, ),
111 'offCurInstrStart': ( None, ),
112 'cbOpcode': ( None, ),
113 'offOpcode': ( None, ),
114 'offModRm': ( None, ),
115 # Okay ones.
116 'fPrefixes': ( 'uint32_t', ),
117 'uRexReg': ( 'uint8_t', ),
118 'uRexB': ( 'uint8_t', ),
119 'uRexIndex': ( 'uint8_t', ),
120 'iEffSeg': ( 'uint8_t', ),
121 'enmEffOpSize': ( 'IEMMODE', ),
122 'enmDefAddrMode': ( 'IEMMODE', ),
123 'enmEffAddrMode': ( 'IEMMODE', ),
124 'enmDefOpSize': ( 'IEMMODE', ),
125 'idxPrefix': ( 'uint8_t', ),
126 'uVex3rdReg': ( 'uint8_t', ),
127 'uVexLength': ( 'uint8_t', ),
128 'fEvexStuff': ( 'uint8_t', ),
129 'uFpuOpcode': ( 'uint16_t', ),
130};
131
132class ThreadedParamRef(object):
133 """
134 A parameter reference for a threaded function.
135 """
136
137 def __init__(self, sOrgRef, sType, oStmt, iParam = None, offParam = 0, sStdRef = None):
138 ## The name / reference in the original code.
139 self.sOrgRef = sOrgRef;
140 ## Normalized name to deal with spaces in macro invocations and such.
141 self.sStdRef = sStdRef if sStdRef else ''.join(sOrgRef.split());
142 ## Indicates that sOrgRef may not match the parameter.
143 self.fCustomRef = sStdRef is not None;
144 ## The type (typically derived).
145 self.sType = sType;
146 ## The statement making the reference.
147 self.oStmt = oStmt;
148 ## The parameter containing the references. None if implicit.
149 self.iParam = iParam;
150 ## The offset in the parameter of the reference.
151 self.offParam = offParam;
152
153 ## The variable name in the threaded function.
154 self.sNewName = 'x';
155 ## The this is packed into.
156 self.iNewParam = 99;
157 ## The bit offset in iNewParam.
158 self.offNewParam = 1024
159
160
161class ThreadedFunctionVariation(object):
162 """ Threaded function variation. """
163
164 ## @name Variations.
165 ## These variations will match translation block selection/distinctions as well.
166 ## @{
167 ksVariation_Default = ''; ##< No variations - only used by IEM_MC_DEFER_TO_CIMPL_X_RET.
168 ksVariation_16 = '_16'; ##< 16-bit mode code (386+).
169 ksVariation_16f = '_16f'; ##< 16-bit mode code (386+), check+clear eflags.
170 ksVariation_16_Addr32 = '_16_Addr32'; ##< 16-bit mode code (386+), address size prefixed to 32-bit addressing.
171 ksVariation_16f_Addr32 = '_16f_Addr32'; ##< 16-bit mode code (386+), address size prefixed to 32-bit addressing, eflags.
172 ksVariation_16_Pre386 = '_16_Pre386'; ##< 16-bit mode code, pre-386 CPU target.
173 ksVariation_16f_Pre386 = '_16f_Pre386'; ##< 16-bit mode code, pre-386 CPU target, check+clear eflags.
174 ksVariation_32 = '_32'; ##< 32-bit mode code (386+).
175 ksVariation_32f = '_32f'; ##< 32-bit mode code (386+), check+clear eflags.
176 ksVariation_32_Flat = '_32_Flat'; ##< 32-bit mode code (386+) with CS, DS, E,S and SS flat and 4GB wide.
177 ksVariation_32f_Flat = '_32f_Flat'; ##< 32-bit mode code (386+) with CS, DS, E,S and SS flat and 4GB wide, eflags.
178 ksVariation_32_Addr16 = '_32_Addr16'; ##< 32-bit mode code (386+), address size prefixed to 16-bit addressing.
179 ksVariation_32f_Addr16 = '_32f_Addr16'; ##< 32-bit mode code (386+), address size prefixed to 16-bit addressing, eflags.
180 ksVariation_64 = '_64'; ##< 64-bit mode code.
181 ksVariation_64f = '_64f'; ##< 64-bit mode code, check+clear eflags.
182 ksVariation_64_FsGs = '_64_FsGs'; ##< 64-bit mode code, with memory accesses via FS or GS.
183 ksVariation_64f_FsGs = '_64f_FsGs'; ##< 64-bit mode code, with memory accesses via FS or GS, check+clear eflags.
184 ksVariation_64_Addr32 = '_64_Addr32'; ##< 64-bit mode code, address size prefixed to 32-bit addressing.
185 ksVariation_64f_Addr32 = '_64f_Addr32'; ##< 64-bit mode code, address size prefixed to 32-bit addressing, c+c eflags.
186 kasVariations = (
187 ksVariation_Default,
188 ksVariation_16,
189 ksVariation_16f,
190 ksVariation_16_Addr32,
191 ksVariation_16f_Addr32,
192 ksVariation_16_Pre386,
193 ksVariation_16f_Pre386,
194 ksVariation_32,
195 ksVariation_32f,
196 ksVariation_32_Flat,
197 ksVariation_32f_Flat,
198 ksVariation_32_Addr16,
199 ksVariation_32f_Addr16,
200 ksVariation_64,
201 ksVariation_64f,
202 ksVariation_64_FsGs,
203 ksVariation_64f_FsGs,
204 ksVariation_64_Addr32,
205 ksVariation_64f_Addr32,
206 );
207 kasVariationsWithoutAddress = (
208 ksVariation_16,
209 ksVariation_16f,
210 ksVariation_16_Pre386,
211 ksVariation_16f_Pre386,
212 ksVariation_32,
213 ksVariation_32f,
214 ksVariation_64,
215 ksVariation_64f,
216 );
217 kasVariationsWithoutAddressNot286 = (
218 ksVariation_16,
219 ksVariation_16f,
220 ksVariation_32,
221 ksVariation_32f,
222 ksVariation_64,
223 ksVariation_64f,
224 );
225 kasVariationsWithoutAddressNot286Not64 = (
226 ksVariation_16,
227 ksVariation_16f,
228 ksVariation_32,
229 ksVariation_32f,
230 );
231 kasVariationsWithoutAddressNot64 = (
232 ksVariation_16,
233 ksVariation_16f,
234 ksVariation_16_Pre386,
235 ksVariation_16f_Pre386,
236 ksVariation_32,
237 ksVariation_32f,
238 );
239 kasVariationsWithoutAddressOnly64 = (
240 ksVariation_64,
241 ksVariation_64f,
242 );
243 kasVariationsWithAddress = (
244 ksVariation_16,
245 ksVariation_16f,
246 ksVariation_16_Addr32,
247 ksVariation_16f_Addr32,
248 ksVariation_16_Pre386,
249 ksVariation_16f_Pre386,
250 ksVariation_32,
251 ksVariation_32f,
252 ksVariation_32_Flat,
253 ksVariation_32f_Flat,
254 ksVariation_32_Addr16,
255 ksVariation_32f_Addr16,
256 ksVariation_64,
257 ksVariation_64f,
258 ksVariation_64_FsGs,
259 ksVariation_64f_FsGs,
260 ksVariation_64_Addr32,
261 ksVariation_64f_Addr32,
262 );
263 kasVariationsWithAddressNot286 = (
264 ksVariation_16,
265 ksVariation_16f,
266 ksVariation_16_Addr32,
267 ksVariation_16f_Addr32,
268 ksVariation_32,
269 ksVariation_32f,
270 ksVariation_32_Flat,
271 ksVariation_32f_Flat,
272 ksVariation_32_Addr16,
273 ksVariation_32f_Addr16,
274 ksVariation_64,
275 ksVariation_64f,
276 ksVariation_64_FsGs,
277 ksVariation_64f_FsGs,
278 ksVariation_64_Addr32,
279 ksVariation_64f_Addr32,
280 );
281 kasVariationsWithAddressNot286Not64 = (
282 ksVariation_16,
283 ksVariation_16f,
284 ksVariation_16_Addr32,
285 ksVariation_16f_Addr32,
286 ksVariation_32,
287 ksVariation_32f,
288 ksVariation_32_Flat,
289 ksVariation_32f_Flat,
290 ksVariation_32_Addr16,
291 ksVariation_32f_Addr16,
292 );
293 kasVariationsWithAddressNot64 = (
294 ksVariation_16,
295 ksVariation_16f,
296 ksVariation_16_Addr32,
297 ksVariation_16f_Addr32,
298 ksVariation_16_Pre386,
299 ksVariation_16f_Pre386,
300 ksVariation_32,
301 ksVariation_32f,
302 ksVariation_32_Flat,
303 ksVariation_32f_Flat,
304 ksVariation_32_Addr16,
305 ksVariation_32f_Addr16,
306 );
307 kasVariationsWithAddressOnly64 = (
308 ksVariation_64,
309 ksVariation_64f,
310 ksVariation_64_FsGs,
311 ksVariation_64f_FsGs,
312 ksVariation_64_Addr32,
313 ksVariation_64f_Addr32,
314 );
315 kasVariationsOnlyPre386 = (
316 ksVariation_16_Pre386,
317 ksVariation_16f_Pre386,
318 );
319 kasVariationsEmitOrder = (
320 ksVariation_Default,
321 ksVariation_64,
322 ksVariation_64f,
323 ksVariation_64_FsGs,
324 ksVariation_64f_FsGs,
325 ksVariation_32_Flat,
326 ksVariation_32f_Flat,
327 ksVariation_32,
328 ksVariation_32f,
329 ksVariation_16,
330 ksVariation_16f,
331 ksVariation_16_Addr32,
332 ksVariation_16f_Addr32,
333 ksVariation_16_Pre386,
334 ksVariation_16f_Pre386,
335 ksVariation_32_Addr16,
336 ksVariation_32f_Addr16,
337 ksVariation_64_Addr32,
338 ksVariation_64f_Addr32,
339 );
340 kdVariationNames = {
341 ksVariation_Default: 'defer-to-cimpl',
342 ksVariation_16: '16-bit',
343 ksVariation_16f: '16-bit w/ eflag checking and clearing',
344 ksVariation_16_Addr32: '16-bit w/ address prefix (Addr32)',
345 ksVariation_16f_Addr32: '16-bit w/ address prefix (Addr32) and eflag checking and clearing',
346 ksVariation_16_Pre386: '16-bit on pre-386 CPU',
347 ksVariation_16f_Pre386: '16-bit on pre-386 CPU w/ eflag checking and clearing',
348 ksVariation_32: '32-bit',
349 ksVariation_32f: '32-bit w/ eflag checking and clearing',
350 ksVariation_32_Flat: '32-bit flat and wide open CS, SS, DS and ES',
351 ksVariation_32f_Flat: '32-bit flat and wide open CS, SS, DS and ES w/ eflag checking and clearing',
352 ksVariation_32_Addr16: '32-bit w/ address prefix (Addr16)',
353 ksVariation_32f_Addr16: '32-bit w/ address prefix (Addr16) and eflag checking and clearing',
354 ksVariation_64: '64-bit',
355 ksVariation_64f: '64-bit w/ eflag checking and clearing',
356 ksVariation_64_FsGs: '64-bit with memory accessed via FS or GS',
357 ksVariation_64f_FsGs: '64-bit with memory accessed via FS or GS and eflag checking and clearing',
358 ksVariation_64_Addr32: '64-bit w/ address prefix (Addr32)',
359 ksVariation_64f_Addr32: '64-bit w/ address prefix (Addr32) and eflag checking and clearing',
360 };
361 kdVariationsWithEflagsCheckingAndClearing = {
362 ksVariation_16f: True,
363 ksVariation_16f_Addr32: True,
364 ksVariation_16f_Pre386: True,
365 ksVariation_32f: True,
366 ksVariation_32f_Flat: True,
367 ksVariation_32f_Addr16: True,
368 ksVariation_64f: True,
369 ksVariation_64f_FsGs: True,
370 ksVariation_64f_Addr32: True,
371 };
372 kdVariationsWithFlatAddress = {
373 ksVariation_32_Flat: True,
374 ksVariation_32f_Flat: True,
375 ksVariation_64: True,
376 ksVariation_64f: True,
377 };
378 kdVariationsWithFlatAddr16 = {
379 ksVariation_16: True,
380 ksVariation_16f: True,
381 ksVariation_16_Pre386: True,
382 ksVariation_16f_Pre386: True,
383 ksVariation_32_Addr16: True,
384 ksVariation_32f_Addr16: True,
385 };
386 kdVariationsWithFlatAddr32No64 = {
387 ksVariation_16_Addr32: True,
388 ksVariation_16f_Addr32: True,
389 ksVariation_32: True,
390 ksVariation_32f: True,
391 ksVariation_32_Flat: True,
392 ksVariation_32f_Flat: True,
393 };
394 ## @}
395
396 ## IEM_CIMPL_F_XXX flags that we know.
397 ## The value indicates whether it terminates the TB or not. The goal is to
398 ## improve the recompiler so all but END_TB will be False.
399 ##
400 ## @note iemThreadedRecompilerMcDeferToCImpl0 duplicates info found here.
401 kdCImplFlags = {
402 'IEM_CIMPL_F_MODE': False,
403 'IEM_CIMPL_F_BRANCH_DIRECT': False,
404 'IEM_CIMPL_F_BRANCH_INDIRECT': False,
405 'IEM_CIMPL_F_BRANCH_RELATIVE': False,
406 'IEM_CIMPL_F_BRANCH_FAR': True,
407 'IEM_CIMPL_F_BRANCH_CONDITIONAL': False,
408 # IEM_CIMPL_F_BRANCH_ANY should only be used for testing, so not included here.
409 'IEM_CIMPL_F_BRANCH_STACK': False,
410 'IEM_CIMPL_F_BRANCH_STACK_FAR': False,
411 'IEM_CIMPL_F_RFLAGS': False,
412 'IEM_CIMPL_F_INHIBIT_SHADOW': False,
413 'IEM_CIMPL_F_CHECK_IRQ_AFTER': False,
414 'IEM_CIMPL_F_CHECK_IRQ_BEFORE': False,
415 'IEM_CIMPL_F_CHECK_IRQ_BEFORE_AND_AFTER': False, # (ignore)
416 'IEM_CIMPL_F_STATUS_FLAGS': False,
417 'IEM_CIMPL_F_VMEXIT': False,
418 'IEM_CIMPL_F_FPU': False,
419 'IEM_CIMPL_F_REP': False,
420 'IEM_CIMPL_F_IO': False,
421 'IEM_CIMPL_F_END_TB': True,
422 'IEM_CIMPL_F_XCPT': True,
423 'IEM_CIMPL_F_CALLS_CIMPL': False,
424 'IEM_CIMPL_F_CALLS_AIMPL': False,
425 'IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE': False,
426 };
427
428 def __init__(self, oThreadedFunction, sVariation = ksVariation_Default):
429 self.oParent = oThreadedFunction # type: ThreadedFunction
430 ##< ksVariation_Xxxx.
431 self.sVariation = sVariation
432
433 ## Threaded function parameter references.
434 self.aoParamRefs = [] # type: List[ThreadedParamRef]
435 ## Unique parameter references.
436 self.dParamRefs = {} # type: Dict[str, List[ThreadedParamRef]]
437 ## Minimum number of parameters to the threaded function.
438 self.cMinParams = 0;
439
440 ## List/tree of statements for the threaded function.
441 self.aoStmtsForThreadedFunction = [] # type: List[McStmt]
442
443 ## Function enum number, for verification. Set by generateThreadedFunctionsHeader.
444 self.iEnumValue = -1;
445
446 ## Native recompilation details for this variation.
447 self.oNativeRecomp = None;
448
449 def getIndexName(self):
450 sName = self.oParent.oMcBlock.sFunction;
451 if sName.startswith('iemOp_'):
452 sName = sName[len('iemOp_'):];
453 if self.oParent.oMcBlock.iInFunction == 0:
454 return 'kIemThreadedFunc_%s%s' % ( sName, self.sVariation, );
455 return 'kIemThreadedFunc_%s_%s%s' % ( sName, self.oParent.oMcBlock.iInFunction, self.sVariation, );
456
457 def getThreadedFunctionName(self):
458 sName = self.oParent.oMcBlock.sFunction;
459 if sName.startswith('iemOp_'):
460 sName = sName[len('iemOp_'):];
461 if self.oParent.oMcBlock.iInFunction == 0:
462 return 'iemThreadedFunc_%s%s' % ( sName, self.sVariation, );
463 return 'iemThreadedFunc_%s_%s%s' % ( sName, self.oParent.oMcBlock.iInFunction, self.sVariation, );
464
465 def getNativeFunctionName(self):
466 return 'iemNativeRecompFunc_' + self.getThreadedFunctionName()[len('iemThreadedFunc_'):];
467
468 def getShortName(self):
469 sName = self.oParent.oMcBlock.sFunction;
470 if sName.startswith('iemOp_'):
471 sName = sName[len('iemOp_'):];
472 if self.oParent.oMcBlock.iInFunction == 0:
473 return '%s%s' % ( sName, self.sVariation, );
474 return '%s_%s%s' % ( sName, self.oParent.oMcBlock.iInFunction, self.sVariation, );
475
476 def isWithFlagsCheckingAndClearingVariation(self):
477 """
478 Checks if this is a variation that checks and clears EFLAGS.
479 """
480 return self.sVariation in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing;
481
482 #
483 # Analysis and code morphing.
484 #
485
486 def raiseProblem(self, sMessage):
487 """ Raises a problem. """
488 self.oParent.raiseProblem(sMessage);
489
490 def warning(self, sMessage):
491 """ Emits a warning. """
492 self.oParent.warning(sMessage);
493
494 def analyzeReferenceToType(self, sRef):
495 """
496 Translates a variable or structure reference to a type.
497 Returns type name.
498 Raises exception if unable to figure it out.
499 """
500 ch0 = sRef[0];
501 if ch0 == 'u':
502 if sRef.startswith('u32'):
503 return 'uint32_t';
504 if sRef.startswith('u8') or sRef == 'uReg':
505 return 'uint8_t';
506 if sRef.startswith('u64'):
507 return 'uint64_t';
508 if sRef.startswith('u16'):
509 return 'uint16_t';
510 elif ch0 == 'b':
511 return 'uint8_t';
512 elif ch0 == 'f':
513 return 'bool';
514 elif ch0 == 'i':
515 if sRef.startswith('i8'):
516 return 'int8_t';
517 if sRef.startswith('i16'):
518 return 'int16_t';
519 if sRef.startswith('i32'):
520 return 'int32_t';
521 if sRef.startswith('i64'):
522 return 'int64_t';
523 if sRef in ('iReg', 'iFixedReg', 'iGReg', 'iSegReg', 'iSrcReg', 'iDstReg', 'iCrReg'):
524 return 'uint8_t';
525 elif ch0 == 'p':
526 if sRef.find('-') < 0:
527 return 'uintptr_t';
528 if sRef.startswith('pVCpu->iem.s.'):
529 sField = sRef[len('pVCpu->iem.s.') : ];
530 if sField in g_kdIemFieldToType:
531 if g_kdIemFieldToType[sField][0]:
532 return g_kdIemFieldToType[sField][0];
533 elif ch0 == 'G' and sRef.startswith('GCPtr'):
534 return 'uint64_t';
535 elif ch0 == 'e':
536 if sRef == 'enmEffOpSize':
537 return 'IEMMODE';
538 elif ch0 == 'o':
539 if sRef.startswith('off32'):
540 return 'uint32_t';
541 elif sRef == 'cbFrame': # enter
542 return 'uint16_t';
543 elif sRef == 'cShift': ## @todo risky
544 return 'uint8_t';
545
546 self.raiseProblem('Unknown reference: %s' % (sRef,));
547 return None; # Shut up pylint 2.16.2.
548
549 def analyzeCallToType(self, sFnRef):
550 """
551 Determins the type of an indirect function call.
552 """
553 assert sFnRef[0] == 'p';
554
555 #
556 # Simple?
557 #
558 if sFnRef.find('-') < 0:
559 oDecoderFunction = self.oParent.oMcBlock.oFunction;
560
561 # Try the argument list of the function defintion macro invocation first.
562 iArg = 2;
563 while iArg < len(oDecoderFunction.asDefArgs):
564 if sFnRef == oDecoderFunction.asDefArgs[iArg]:
565 return oDecoderFunction.asDefArgs[iArg - 1];
566 iArg += 1;
567
568 # Then check out line that includes the word and looks like a variable declaration.
569 oRe = re.compile(' +(P[A-Z0-9_]+|const +IEMOP[A-Z0-9_]+ *[*]) +(const |) *' + sFnRef + ' *(;|=)');
570 for sLine in oDecoderFunction.asLines:
571 oMatch = oRe.match(sLine);
572 if oMatch:
573 if not oMatch.group(1).startswith('const'):
574 return oMatch.group(1);
575 return 'PC' + oMatch.group(1)[len('const ') : -1].strip();
576
577 #
578 # Deal with the pImpl->pfnXxx:
579 #
580 elif sFnRef.startswith('pImpl->pfn'):
581 sMember = sFnRef[len('pImpl->') : ];
582 sBaseType = self.analyzeCallToType('pImpl');
583 offBits = sMember.rfind('U') + 1;
584 if sBaseType == 'PCIEMOPBINSIZES': return 'PFNIEMAIMPLBINU' + sMember[offBits:];
585 if sBaseType == 'PCIEMOPUNARYSIZES': return 'PFNIEMAIMPLUNARYU' + sMember[offBits:];
586 if sBaseType == 'PCIEMOPSHIFTSIZES': return 'PFNIEMAIMPLSHIFTU' + sMember[offBits:];
587 if sBaseType == 'PCIEMOPSHIFTDBLSIZES': return 'PFNIEMAIMPLSHIFTDBLU' + sMember[offBits:];
588 if sBaseType == 'PCIEMOPMULDIVSIZES': return 'PFNIEMAIMPLMULDIVU' + sMember[offBits:];
589 if sBaseType == 'PCIEMOPMEDIAF3': return 'PFNIEMAIMPLMEDIAF3U' + sMember[offBits:];
590 if sBaseType == 'PCIEMOPMEDIAOPTF3': return 'PFNIEMAIMPLMEDIAOPTF3U' + sMember[offBits:];
591 if sBaseType == 'PCIEMOPMEDIAOPTF2': return 'PFNIEMAIMPLMEDIAOPTF2U' + sMember[offBits:];
592 if sBaseType == 'PCIEMOPMEDIAOPTF3IMM8': return 'PFNIEMAIMPLMEDIAOPTF3U' + sMember[offBits:] + 'IMM8';
593 if sBaseType == 'PCIEMOPBLENDOP': return 'PFNIEMAIMPLAVXBLENDU' + sMember[offBits:];
594
595 self.raiseProblem('Unknown call reference: %s::%s (%s)' % (sBaseType, sMember, sFnRef,));
596
597 self.raiseProblem('Unknown call reference: %s' % (sFnRef,));
598 return None; # Shut up pylint 2.16.2.
599
600 def analyze8BitGRegStmt(self, oStmt):
601 """
602 Gets the 8-bit general purpose register access details of the given statement.
603 ASSUMES the statement is one accessing an 8-bit GREG.
604 """
605 idxReg = 0;
606 if ( oStmt.sName.find('_FETCH_') > 0
607 or oStmt.sName.find('_REF_') > 0
608 or oStmt.sName.find('_TO_LOCAL') > 0):
609 idxReg = 1;
610
611 sRegRef = oStmt.asParams[idxReg];
612 if sRegRef.startswith('IEM_GET_MODRM_RM') or sRegRef.startswith('IEM_GET_MODRM_REG'):
613 asBits = [sBit.strip() for sBit in sRegRef.replace('(', ',').replace(')', '').split(',')];
614 if len(asBits) != 3 or asBits[1] != 'pVCpu' or (asBits[0] != 'IEM_GET_MODRM_RM' and asBits[0] != 'IEM_GET_MODRM_REG'):
615 self.raiseProblem('Unexpected reference: %s (asBits=%s)' % (sRegRef, asBits));
616 sOrgExpr = asBits[0] + '_EX8(pVCpu, ' + asBits[2] + ')';
617 else:
618 sOrgExpr = '((%s) < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX) ? (%s) : (%s) + 12)' % (sRegRef, sRegRef, sRegRef);
619
620 if sRegRef.find('IEM_GET_MODRM_RM') >= 0: sStdRef = 'bRmRm8Ex';
621 elif sRegRef.find('IEM_GET_MODRM_REG') >= 0: sStdRef = 'bRmReg8Ex';
622 elif sRegRef == 'X86_GREG_xAX': sStdRef = 'bGregXAx8Ex';
623 elif sRegRef == 'X86_GREG_xCX': sStdRef = 'bGregXCx8Ex';
624 elif sRegRef == 'X86_GREG_xSP': sStdRef = 'bGregXSp8Ex';
625 elif sRegRef == 'iFixedReg': sStdRef = 'bFixedReg8Ex';
626 else:
627 self.warning('analyze8BitGRegStmt: sRegRef=%s -> bOther8Ex; %s %s; sOrgExpr=%s'
628 % (sRegRef, oStmt.sName, oStmt.asParams, sOrgExpr,));
629 sStdRef = 'bOther8Ex';
630
631 #print('analyze8BitGRegStmt: %s %s; sRegRef=%s\n -> idxReg=%s sOrgExpr=%s sStdRef=%s'
632 # % (oStmt.sName, oStmt.asParams, sRegRef, idxReg, sOrgExpr, sStdRef));
633 return (idxReg, sOrgExpr, sStdRef);
634
635
636 ## Maps memory related MCs to info for FLAT conversion.
637 ## This is used in 64-bit and flat 32-bit variants to skip the unnecessary
638 ## segmentation checking for every memory access. Only applied to access
639 ## via ES, DS and SS. FS, GS and CS gets the full segmentation threatment,
640 ## the latter (CS) is just to keep things simple (we could safely fetch via
641 ## it, but only in 64-bit mode could we safely write via it, IIRC).
642 kdMemMcToFlatInfo = {
643 'IEM_MC_FETCH_MEM_U8': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8' ),
644 'IEM_MC_FETCH_MEM16_U8': ( 1, 'IEM_MC_FETCH_MEM16_FLAT_U8' ),
645 'IEM_MC_FETCH_MEM32_U8': ( 1, 'IEM_MC_FETCH_MEM32_FLAT_U8' ),
646 'IEM_MC_FETCH_MEM_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16' ),
647 'IEM_MC_FETCH_MEM_U16_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_DISP' ),
648 'IEM_MC_FETCH_MEM_I16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I16' ),
649 'IEM_MC_FETCH_MEM_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32' ),
650 'IEM_MC_FETCH_MEM_U32_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_DISP' ),
651 'IEM_MC_FETCH_MEM_I32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I32' ),
652 'IEM_MC_FETCH_MEM_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64' ),
653 'IEM_MC_FETCH_MEM_U64_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64_DISP' ),
654 'IEM_MC_FETCH_MEM_U64_ALIGN_U128': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64_ALIGN_U128' ),
655 'IEM_MC_FETCH_MEM_I64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I64' ),
656 'IEM_MC_FETCH_MEM_R32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R32' ),
657 'IEM_MC_FETCH_MEM_R64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R64' ),
658 'IEM_MC_FETCH_MEM_R80': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R80' ),
659 'IEM_MC_FETCH_MEM_D80': ( 1, 'IEM_MC_FETCH_MEM_FLAT_D80' ),
660 'IEM_MC_FETCH_MEM_U128': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128' ),
661 'IEM_MC_FETCH_MEM_U128_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128_NO_AC' ),
662 'IEM_MC_FETCH_MEM_U128_ALIGN_SSE': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128_ALIGN_SSE' ),
663 'IEM_MC_FETCH_MEM_XMM': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM' ),
664 'IEM_MC_FETCH_MEM_XMM_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM_NO_AC' ),
665 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM_ALIGN_SSE' ),
666 'IEM_MC_FETCH_MEM_XMM_U32': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_U32' ),
667 'IEM_MC_FETCH_MEM_XMM_U64': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_U64' ),
668 'IEM_MC_FETCH_MEM_U256': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256' ),
669 'IEM_MC_FETCH_MEM_U256_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256_NO_AC' ),
670 'IEM_MC_FETCH_MEM_U256_ALIGN_AVX': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256_ALIGN_AVX' ),
671 'IEM_MC_FETCH_MEM_YMM': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM' ),
672 'IEM_MC_FETCH_MEM_YMM_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM_NO_AC' ),
673 'IEM_MC_FETCH_MEM_YMM_ALIGN_AVX': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM_ALIGN_AVX' ),
674 'IEM_MC_FETCH_MEM_U8_ZX_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U16' ),
675 'IEM_MC_FETCH_MEM_U8_ZX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U32' ),
676 'IEM_MC_FETCH_MEM_U8_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U64' ),
677 'IEM_MC_FETCH_MEM_U16_ZX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_ZX_U32' ),
678 'IEM_MC_FETCH_MEM_U16_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_ZX_U64' ),
679 'IEM_MC_FETCH_MEM_U32_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_ZX_U64' ),
680 'IEM_MC_FETCH_MEM_U8_SX_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U16' ),
681 'IEM_MC_FETCH_MEM_U8_SX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U32' ),
682 'IEM_MC_FETCH_MEM_U8_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U64' ),
683 'IEM_MC_FETCH_MEM_U16_SX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_SX_U32' ),
684 'IEM_MC_FETCH_MEM_U16_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_SX_U64' ),
685 'IEM_MC_FETCH_MEM_U32_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_SX_U64' ),
686 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128': ( 2, 'IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128' ),
687 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE_AND_XREG_XMM': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_ALIGN_SSE_AND_XREG_XMM' ),
688 'IEM_MC_FETCH_MEM_XMM_U32_AND_XREG_XMM': ( 3, 'IEM_MC_FETCH_MEM_FLAT_XMM_U32_AND_XREG_XMM' ),
689 'IEM_MC_FETCH_MEM_XMM_U64_AND_XREG_XMM': ( 3, 'IEM_MC_FETCH_MEM_FLAT_XMM_U64_AND_XREG_XMM' ),
690 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_RAX_RDX_U64':
691 ( 2, 'IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128_AND_RAX_RDX_U64' ),
692 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64':
693 ( 2, 'IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64' ),
694 'IEM_MC_STORE_MEM_U8': ( 0, 'IEM_MC_STORE_MEM_FLAT_U8' ),
695 'IEM_MC_STORE_MEM_U16': ( 0, 'IEM_MC_STORE_MEM_FLAT_U16' ),
696 'IEM_MC_STORE_MEM_U32': ( 0, 'IEM_MC_STORE_MEM_FLAT_U32' ),
697 'IEM_MC_STORE_MEM_U64': ( 0, 'IEM_MC_STORE_MEM_FLAT_U64' ),
698 'IEM_MC_STORE_MEM_U8_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U8_CONST' ),
699 'IEM_MC_STORE_MEM_U16_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U16_CONST' ),
700 'IEM_MC_STORE_MEM_U32_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U32_CONST' ),
701 'IEM_MC_STORE_MEM_U64_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U64_CONST' ),
702 'IEM_MC_STORE_MEM_U128': ( 0, 'IEM_MC_STORE_MEM_FLAT_U128' ),
703 'IEM_MC_STORE_MEM_U128_ALIGN_SSE': ( 0, 'IEM_MC_STORE_MEM_FLAT_U128_ALIGN_SSE' ),
704 'IEM_MC_STORE_MEM_U256': ( 0, 'IEM_MC_STORE_MEM_FLAT_U256' ),
705 'IEM_MC_STORE_MEM_U256_ALIGN_AVX': ( 0, 'IEM_MC_STORE_MEM_FLAT_U256_ALIGN_AVX' ),
706 'IEM_MC_MEM_MAP_D80_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_D80_WO' ),
707 'IEM_MC_MEM_MAP_I16_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_I16_WO' ),
708 'IEM_MC_MEM_MAP_I32_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_I32_WO' ),
709 'IEM_MC_MEM_MAP_I64_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_I64_WO' ),
710 'IEM_MC_MEM_MAP_R32_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_R32_WO' ),
711 'IEM_MC_MEM_MAP_R64_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_R64_WO' ),
712 'IEM_MC_MEM_MAP_R80_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_R80_WO' ),
713 'IEM_MC_MEM_MAP_U8_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_RW' ),
714 'IEM_MC_MEM_MAP_U8_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_RO' ),
715 'IEM_MC_MEM_MAP_U8_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_WO' ),
716 'IEM_MC_MEM_MAP_U16_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_RW' ),
717 'IEM_MC_MEM_MAP_U16_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_RO' ),
718 'IEM_MC_MEM_MAP_U16_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_WO' ),
719 'IEM_MC_MEM_MAP_U32_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_RW' ),
720 'IEM_MC_MEM_MAP_U32_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_RO' ),
721 'IEM_MC_MEM_MAP_U32_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_WO' ),
722 'IEM_MC_MEM_MAP_U64_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_RW' ),
723 'IEM_MC_MEM_MAP_U64_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_RO' ),
724 'IEM_MC_MEM_MAP_U64_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_WO' ),
725 'IEM_MC_MEM_MAP_U128_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_RW' ),
726 'IEM_MC_MEM_MAP_U128_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_RO' ),
727 'IEM_MC_MEM_MAP_U128_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_WO' ),
728 'IEM_MC_MEM_MAP_EX': ( 3, 'IEM_MC_MEM_FLAT_MAP_EX' ),
729 };
730
731 kdMemMcToFlatInfoStack = {
732 'IEM_MC_PUSH_U16': ( 'IEM_MC_FLAT32_PUSH_U16', 'IEM_MC_FLAT64_PUSH_U16', ),
733 'IEM_MC_PUSH_U32': ( 'IEM_MC_FLAT32_PUSH_U32', 'IEM_MC_PUSH_U32', ),
734 'IEM_MC_PUSH_U64': ( 'IEM_MC_PUSH_U64', 'IEM_MC_FLAT64_PUSH_U64', ),
735 'IEM_MC_PUSH_U32_SREG': ( 'IEM_MC_FLAT32_PUSH_U32_SREG', 'IEM_MC_PUSH_U32_SREG' ),
736 'IEM_MC_POP_GREG_U16': ( 'IEM_MC_FLAT32_POP_GREG_U16', 'IEM_MC_FLAT64_POP_GREG_U16', ),
737 'IEM_MC_POP_GREG_U32': ( 'IEM_MC_FLAT32_POP_GREG_U32', 'IEM_MC_POP_GREG_U32', ),
738 'IEM_MC_POP_GREG_U64': ( 'IEM_MC_POP_GREG_U64', 'IEM_MC_FLAT64_POP_GREG_U64', ),
739 };
740
741 kdThreadedCalcRmEffAddrMcByVariation = {
742 ksVariation_16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
743 ksVariation_16f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
744 ksVariation_16_Pre386: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
745 ksVariation_16f_Pre386: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
746 ksVariation_32_Addr16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
747 ksVariation_32f_Addr16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
748 ksVariation_16_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
749 ksVariation_16f_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
750 ksVariation_32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
751 ksVariation_32f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
752 ksVariation_32_Flat: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
753 ksVariation_32f_Flat: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
754 ksVariation_64: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64',
755 ksVariation_64f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64',
756 ksVariation_64_FsGs: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_FSGS',
757 ksVariation_64f_FsGs: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_FSGS',
758 ksVariation_64_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_ADDR32', ## @todo How did this work again...
759 ksVariation_64f_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_ADDR32',
760 };
761
762 def analyzeMorphStmtForThreaded(self, aoStmts, iParamRef = 0):
763 """
764 Transforms (copy) the statements into those for the threaded function.
765
766 Returns list/tree of statements (aoStmts is not modified) and the new
767 iParamRef value.
768 """
769 #
770 # We'll be traversing aoParamRefs in parallel to the statements, so we
771 # must match the traversal in analyzeFindThreadedParamRefs exactly.
772 #
773 #print('McBlock at %s:%s' % (os.path.split(self.oMcBlock.sSrcFile)[1], self.oMcBlock.iBeginLine,));
774 aoThreadedStmts = [];
775 for oStmt in aoStmts:
776 # Skip C++ statements that is purely related to decoding.
777 if not oStmt.isCppStmt() or not oStmt.fDecode:
778 # Copy the statement. Make a deep copy to make sure we've got our own
779 # copies of all instance variables, even if a bit overkill at the moment.
780 oNewStmt = copy.deepcopy(oStmt);
781 aoThreadedStmts.append(oNewStmt);
782 #print('oNewStmt %s %s' % (oNewStmt.sName, len(oNewStmt.asParams),));
783
784 # If the statement has parameter references, process the relevant parameters.
785 # We grab the references relevant to this statement and apply them in reserve order.
786 if iParamRef < len(self.aoParamRefs) and self.aoParamRefs[iParamRef].oStmt == oStmt:
787 iParamRefFirst = iParamRef;
788 while True:
789 iParamRef += 1;
790 if iParamRef >= len(self.aoParamRefs) or self.aoParamRefs[iParamRef].oStmt != oStmt:
791 break;
792
793 #print('iParamRefFirst=%s iParamRef=%s' % (iParamRefFirst, iParamRef));
794 for iCurRef in range(iParamRef - 1, iParamRefFirst - 1, -1):
795 oCurRef = self.aoParamRefs[iCurRef];
796 if oCurRef.iParam is not None:
797 assert oCurRef.oStmt == oStmt;
798 #print('iCurRef=%s iParam=%s sOrgRef=%s' % (iCurRef, oCurRef.iParam, oCurRef.sOrgRef));
799 sSrcParam = oNewStmt.asParams[oCurRef.iParam];
800 assert ( sSrcParam[oCurRef.offParam : oCurRef.offParam + len(oCurRef.sOrgRef)] == oCurRef.sOrgRef
801 or oCurRef.fCustomRef), \
802 'offParam=%s sOrgRef=%s iParam=%s oStmt.sName=%s sSrcParam=%s<eos>' \
803 % (oCurRef.offParam, oCurRef.sOrgRef, oCurRef.iParam, oStmt.sName, sSrcParam);
804 oNewStmt.asParams[oCurRef.iParam] = sSrcParam[0 : oCurRef.offParam] \
805 + oCurRef.sNewName \
806 + sSrcParam[oCurRef.offParam + len(oCurRef.sOrgRef) : ];
807
808 # Morph IEM_MC_CALC_RM_EFF_ADDR into IEM_MC_CALC_RM_EFF_ADDR_THREADED ...
809 if oNewStmt.sName == 'IEM_MC_CALC_RM_EFF_ADDR':
810 oNewStmt.sName = self.kdThreadedCalcRmEffAddrMcByVariation[self.sVariation];
811 assert len(oNewStmt.asParams) == 3;
812
813 if self.sVariation in self.kdVariationsWithFlatAddr16:
814 oNewStmt.asParams = [
815 oNewStmt.asParams[0], oNewStmt.asParams[1], self.dParamRefs['u16Disp'][0].sNewName,
816 ];
817 else:
818 sSibAndMore = self.dParamRefs['bSib'][0].sNewName; # Merge bSib and 2nd part of cbImmAndRspOffset.
819 if oStmt.asParams[2] not in ('0', '1', '2', '4'):
820 sSibAndMore = '(%s) | ((%s) & 0x0f00)' % (self.dParamRefs['bSib'][0].sNewName, oStmt.asParams[2]);
821
822 if self.sVariation in self.kdVariationsWithFlatAddr32No64:
823 oNewStmt.asParams = [
824 oNewStmt.asParams[0], oNewStmt.asParams[1], sSibAndMore, self.dParamRefs['u32Disp'][0].sNewName,
825 ];
826 else:
827 oNewStmt.asParams = [
828 oNewStmt.asParams[0], self.dParamRefs['bRmEx'][0].sNewName, sSibAndMore,
829 self.dParamRefs['u32Disp'][0].sNewName, self.dParamRefs['cbInstr'][0].sNewName,
830 ];
831 # ... and IEM_MC_ADVANCE_RIP_AND_FINISH into *_THREADED_PCxx[_WITH_FLAGS] ...
832 elif ( oNewStmt.sName
833 in ('IEM_MC_ADVANCE_RIP_AND_FINISH',
834 'IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_REL_JMP_S16_AND_FINISH', 'IEM_MC_REL_JMP_S32_AND_FINISH',
835 'IEM_MC_SET_RIP_U16_AND_FINISH', 'IEM_MC_SET_RIP_U32_AND_FINISH', 'IEM_MC_SET_RIP_U64_AND_FINISH', )):
836 if oNewStmt.sName not in ('IEM_MC_SET_RIP_U16_AND_FINISH', 'IEM_MC_SET_RIP_U32_AND_FINISH',
837 'IEM_MC_SET_RIP_U64_AND_FINISH', ):
838 oNewStmt.asParams.append(self.dParamRefs['cbInstr'][0].sNewName);
839 if ( oNewStmt.sName in ('IEM_MC_REL_JMP_S8_AND_FINISH', )
840 and self.sVariation not in (self.ksVariation_16_Pre386, self.ksVariation_16f_Pre386,)):
841 oNewStmt.asParams.append(self.dParamRefs['pVCpu->iem.s.enmEffOpSize'][0].sNewName);
842 oNewStmt.sName += '_THREADED';
843 if self.sVariation in (self.ksVariation_64, self.ksVariation_64_FsGs, self.ksVariation_64_Addr32):
844 oNewStmt.sName += '_PC64';
845 elif self.sVariation in (self.ksVariation_64f, self.ksVariation_64f_FsGs, self.ksVariation_64f_Addr32):
846 oNewStmt.sName += '_PC64_WITH_FLAGS';
847 elif self.sVariation == self.ksVariation_16_Pre386:
848 oNewStmt.sName += '_PC16';
849 elif self.sVariation == self.ksVariation_16f_Pre386:
850 oNewStmt.sName += '_PC16_WITH_FLAGS';
851 elif self.sVariation not in self.kdVariationsWithEflagsCheckingAndClearing:
852 assert self.sVariation != self.ksVariation_Default;
853 oNewStmt.sName += '_PC32';
854 else:
855 oNewStmt.sName += '_PC32_WITH_FLAGS';
856
857 # ... and IEM_MC_*_GREG_U8 into *_THREADED w/ reworked index taking REX into account
858 elif oNewStmt.sName.startswith('IEM_MC_') and oNewStmt.sName.find('_GREG_U8') > 0:
859 (idxReg, _, sStdRef) = self.analyze8BitGRegStmt(oStmt); # Don't use oNewStmt as it has been modified!
860 oNewStmt.asParams[idxReg] = self.dParamRefs[sStdRef][0].sNewName;
861 oNewStmt.sName += '_THREADED';
862
863 # ... and IEM_MC_CALL_CIMPL_[0-5] and IEM_MC_DEFER_TO_CIMPL_[0-5]_RET into *_THREADED ...
864 elif oNewStmt.sName.startswith('IEM_MC_CALL_CIMPL_') or oNewStmt.sName.startswith('IEM_MC_DEFER_TO_CIMPL_'):
865 oNewStmt.sName += '_THREADED';
866 oNewStmt.idxFn += 1;
867 oNewStmt.idxParams += 1;
868 oNewStmt.asParams.insert(0, self.dParamRefs['cbInstr'][0].sNewName);
869
870 # ... and in FLAT modes we must morph memory access into FLAT accesses ...
871 elif ( self.sVariation in self.kdVariationsWithFlatAddress
872 and ( oNewStmt.sName.startswith('IEM_MC_FETCH_MEM')
873 or (oNewStmt.sName.startswith('IEM_MC_STORE_MEM_') and oNewStmt.sName.find('_BY_REF') < 0)
874 or oNewStmt.sName.startswith('IEM_MC_MEM_MAP') )):
875 idxEffSeg = self.kdMemMcToFlatInfo[oNewStmt.sName][0];
876 if idxEffSeg != -1:
877 if ( oNewStmt.asParams[idxEffSeg].find('iEffSeg') < 0
878 and oNewStmt.asParams[idxEffSeg] not in ('X86_SREG_ES', ) ):
879 self.raiseProblem('Expected iEffSeg as param #%d to %s: %s'
880 % (idxEffSeg + 1, oNewStmt.sName, oNewStmt.asParams[idxEffSeg],));
881 oNewStmt.asParams.pop(idxEffSeg);
882 oNewStmt.sName = self.kdMemMcToFlatInfo[oNewStmt.sName][1];
883
884 # ... PUSH and POP also needs flat variants, but these differ a little.
885 elif ( self.sVariation in self.kdVariationsWithFlatAddress
886 and ( (oNewStmt.sName.startswith('IEM_MC_PUSH') and oNewStmt.sName.find('_FPU') < 0)
887 or oNewStmt.sName.startswith('IEM_MC_POP'))):
888 oNewStmt.sName = self.kdMemMcToFlatInfoStack[oNewStmt.sName][int(self.sVariation in (self.ksVariation_64,
889 self.ksVariation_64f,))];
890
891
892 # Process branches of conditionals recursively.
893 if isinstance(oStmt, iai.McStmtCond):
894 (oNewStmt.aoIfBranch, iParamRef) = self.analyzeMorphStmtForThreaded(oStmt.aoIfBranch, iParamRef);
895 if oStmt.aoElseBranch:
896 (oNewStmt.aoElseBranch, iParamRef) = self.analyzeMorphStmtForThreaded(oStmt.aoElseBranch, iParamRef);
897
898 return (aoThreadedStmts, iParamRef);
899
900
901 def analyzeConsolidateThreadedParamRefs(self):
902 """
903 Consolidate threaded function parameter references into a dictionary
904 with lists of the references to each variable/field.
905 """
906 # Gather unique parameters.
907 self.dParamRefs = {};
908 for oRef in self.aoParamRefs:
909 if oRef.sStdRef not in self.dParamRefs:
910 self.dParamRefs[oRef.sStdRef] = [oRef,];
911 else:
912 self.dParamRefs[oRef.sStdRef].append(oRef);
913
914 # Generate names for them for use in the threaded function.
915 dParamNames = {};
916 for sName, aoRefs in self.dParamRefs.items():
917 # Morph the reference expression into a name.
918 if sName.startswith('IEM_GET_MODRM_REG'): sName = 'bModRmRegP';
919 elif sName.startswith('IEM_GET_MODRM_RM'): sName = 'bModRmRmP';
920 elif sName.startswith('IEM_GET_MODRM_REG_8'): sName = 'bModRmReg8P';
921 elif sName.startswith('IEM_GET_MODRM_RM_8'): sName = 'bModRmRm8P';
922 elif sName.startswith('IEM_GET_EFFECTIVE_VVVV'): sName = 'bEffVvvvP';
923 elif sName.find('.') >= 0 or sName.find('->') >= 0:
924 sName = sName[max(sName.rfind('.'), sName.rfind('>')) + 1 : ] + 'P';
925 else:
926 sName += 'P';
927
928 # Ensure it's unique.
929 if sName in dParamNames:
930 for i in range(10):
931 if sName + str(i) not in dParamNames:
932 sName += str(i);
933 break;
934 dParamNames[sName] = True;
935
936 # Update all the references.
937 for oRef in aoRefs:
938 oRef.sNewName = sName;
939
940 # Organize them by size too for the purpose of optimize them.
941 dBySize = {} # type: Dict[str, str]
942 for sStdRef, aoRefs in self.dParamRefs.items():
943 if aoRefs[0].sType[0] != 'P':
944 cBits = g_kdTypeInfo[aoRefs[0].sType][0];
945 assert(cBits <= 64);
946 else:
947 cBits = 64;
948
949 if cBits not in dBySize:
950 dBySize[cBits] = [sStdRef,]
951 else:
952 dBySize[cBits].append(sStdRef);
953
954 # Pack the parameters as best as we can, starting with the largest ones
955 # and ASSUMING a 64-bit parameter size.
956 self.cMinParams = 0;
957 offNewParam = 0;
958 for cBits in sorted(dBySize.keys(), reverse = True):
959 for sStdRef in dBySize[cBits]:
960 if offNewParam == 0 or offNewParam + cBits > 64:
961 self.cMinParams += 1;
962 offNewParam = cBits;
963 else:
964 offNewParam += cBits;
965 assert(offNewParam <= 64);
966
967 for oRef in self.dParamRefs[sStdRef]:
968 oRef.iNewParam = self.cMinParams - 1;
969 oRef.offNewParam = offNewParam - cBits;
970
971 # Currently there are a few that requires 4 parameters, list these so we can figure out why:
972 if self.cMinParams >= 4:
973 print('debug: cMinParams=%s cRawParams=%s - %s:%d'
974 % (self.cMinParams, len(self.dParamRefs), self.oParent.oMcBlock.sSrcFile, self.oParent.oMcBlock.iBeginLine,));
975
976 return True;
977
978 ksHexDigits = '0123456789abcdefABCDEF';
979
980 def analyzeFindThreadedParamRefs(self, aoStmts): # pylint: disable=too-many-statements
981 """
982 Scans the statements for things that have to passed on to the threaded
983 function (populates self.aoParamRefs).
984 """
985 for oStmt in aoStmts:
986 # Some statements we can skip alltogether.
987 if isinstance(oStmt, iai.McCppPreProc):
988 continue;
989 if oStmt.isCppStmt() and oStmt.fDecode:
990 continue;
991 if oStmt.sName in ('IEM_MC_BEGIN',):
992 continue;
993
994 if isinstance(oStmt, iai.McStmtVar):
995 if oStmt.sValue is None:
996 continue;
997 aiSkipParams = { 0: True, 1: True, 3: True };
998 else:
999 aiSkipParams = {};
1000
1001 # Several statements have implicit parameters and some have different parameters.
1002 if oStmt.sName in ('IEM_MC_ADVANCE_RIP_AND_FINISH', 'IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_REL_JMP_S16_AND_FINISH',
1003 'IEM_MC_REL_JMP_S32_AND_FINISH', 'IEM_MC_CALL_CIMPL_0', 'IEM_MC_CALL_CIMPL_1',
1004 'IEM_MC_CALL_CIMPL_2', 'IEM_MC_CALL_CIMPL_3', 'IEM_MC_CALL_CIMPL_4', 'IEM_MC_CALL_CIMPL_5',
1005 'IEM_MC_DEFER_TO_CIMPL_0_RET', 'IEM_MC_DEFER_TO_CIMPL_1_RET', 'IEM_MC_DEFER_TO_CIMPL_2_RET',
1006 'IEM_MC_DEFER_TO_CIMPL_3_RET', 'IEM_MC_DEFER_TO_CIMPL_4_RET', 'IEM_MC_DEFER_TO_CIMPL_5_RET', ):
1007 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_INSTR_LEN(pVCpu)', 'uint4_t', oStmt, sStdRef = 'cbInstr'));
1008
1009 if ( oStmt.sName in ('IEM_MC_REL_JMP_S8_AND_FINISH',)
1010 and self.sVariation not in (self.ksVariation_16_Pre386, self.ksVariation_16f_Pre386,)):
1011 self.aoParamRefs.append(ThreadedParamRef('pVCpu->iem.s.enmEffOpSize', 'IEMMODE', oStmt));
1012
1013 if oStmt.sName == 'IEM_MC_CALC_RM_EFF_ADDR':
1014 # This is being pretty presumptive about bRm always being the RM byte...
1015 assert len(oStmt.asParams) == 3;
1016 assert oStmt.asParams[1] == 'bRm';
1017
1018 if self.sVariation in self.kdVariationsWithFlatAddr16:
1019 self.aoParamRefs.append(ThreadedParamRef('bRm', 'uint8_t', oStmt));
1020 self.aoParamRefs.append(ThreadedParamRef('(uint16_t)uEffAddrInfo' ,
1021 'uint16_t', oStmt, sStdRef = 'u16Disp'));
1022 elif self.sVariation in self.kdVariationsWithFlatAddr32No64:
1023 self.aoParamRefs.append(ThreadedParamRef('bRm', 'uint8_t', oStmt));
1024 self.aoParamRefs.append(ThreadedParamRef('(uint8_t)(uEffAddrInfo >> 32)',
1025 'uint8_t', oStmt, sStdRef = 'bSib'));
1026 self.aoParamRefs.append(ThreadedParamRef('(uint32_t)uEffAddrInfo',
1027 'uint32_t', oStmt, sStdRef = 'u32Disp'));
1028 else:
1029 assert self.sVariation in self.kasVariationsWithAddressOnly64;
1030 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_MODRM_EX(pVCpu, bRm)',
1031 'uint8_t', oStmt, sStdRef = 'bRmEx'));
1032 self.aoParamRefs.append(ThreadedParamRef('(uint8_t)(uEffAddrInfo >> 32)',
1033 'uint8_t', oStmt, sStdRef = 'bSib'));
1034 self.aoParamRefs.append(ThreadedParamRef('(uint32_t)uEffAddrInfo',
1035 'uint32_t', oStmt, sStdRef = 'u32Disp'));
1036 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_INSTR_LEN(pVCpu)',
1037 'uint4_t', oStmt, sStdRef = 'cbInstr'));
1038 aiSkipParams[1] = True; # Skip the bRm parameter as it is being replaced by bRmEx.
1039
1040 # 8-bit register accesses needs to have their index argument reworked to take REX into account.
1041 if oStmt.sName.startswith('IEM_MC_') and oStmt.sName.find('_GREG_U8') > 0:
1042 (idxReg, sOrgRef, sStdRef) = self.analyze8BitGRegStmt(oStmt);
1043 self.aoParamRefs.append(ThreadedParamRef(sOrgRef, 'uint16_t', oStmt, idxReg, sStdRef = sStdRef));
1044 aiSkipParams[idxReg] = True; # Skip the parameter below.
1045
1046 # If in flat mode variation, ignore the effective segment parameter to memory MCs.
1047 if ( self.sVariation in self.kdVariationsWithFlatAddress
1048 and oStmt.sName in self.kdMemMcToFlatInfo
1049 and self.kdMemMcToFlatInfo[oStmt.sName][0] != -1):
1050 aiSkipParams[self.kdMemMcToFlatInfo[oStmt.sName][0]] = True;
1051
1052 # Inspect the target of calls to see if we need to pass down a
1053 # function pointer or function table pointer for it to work.
1054 if isinstance(oStmt, iai.McStmtCall):
1055 if oStmt.sFn[0] == 'p':
1056 self.aoParamRefs.append(ThreadedParamRef(oStmt.sFn, self.analyzeCallToType(oStmt.sFn), oStmt, oStmt.idxFn));
1057 elif ( oStmt.sFn[0] != 'i'
1058 and not oStmt.sFn.startswith('IEMTARGETCPU_EFL_BEHAVIOR_SELECT')
1059 and not oStmt.sFn.startswith('IEM_SELECT_HOST_OR_FALLBACK') ):
1060 self.raiseProblem('Bogus function name in %s: %s' % (oStmt.sName, oStmt.sFn,));
1061 aiSkipParams[oStmt.idxFn] = True;
1062
1063 # Skip the hint parameter (first) for IEM_MC_CALL_CIMPL_X.
1064 if oStmt.sName.startswith('IEM_MC_CALL_CIMPL_'):
1065 assert oStmt.idxFn == 2;
1066 aiSkipParams[0] = True;
1067
1068
1069 # Check all the parameters for bogus references.
1070 for iParam, sParam in enumerate(oStmt.asParams):
1071 if iParam not in aiSkipParams and sParam not in self.oParent.dVariables:
1072 # The parameter may contain a C expression, so we have to try
1073 # extract the relevant bits, i.e. variables and fields while
1074 # ignoring operators and parentheses.
1075 offParam = 0;
1076 while offParam < len(sParam):
1077 # Is it the start of an C identifier? If so, find the end, but don't stop on field separators (->, .).
1078 ch = sParam[offParam];
1079 if ch.isalpha() or ch == '_':
1080 offStart = offParam;
1081 offParam += 1;
1082 while offParam < len(sParam):
1083 ch = sParam[offParam];
1084 if not ch.isalnum() and ch != '_' and ch != '.':
1085 if ch != '-' or sParam[offParam + 1] != '>':
1086 # Special hack for the 'CTX_SUFF(pVM)' bit in pVCpu->CTX_SUFF(pVM)->xxxx:
1087 if ( ch == '('
1088 and sParam[offStart : offParam + len('(pVM)->')] == 'pVCpu->CTX_SUFF(pVM)->'):
1089 offParam += len('(pVM)->') - 1;
1090 else:
1091 break;
1092 offParam += 1;
1093 offParam += 1;
1094 sRef = sParam[offStart : offParam];
1095
1096 # For register references, we pass the full register indexes instead as macros
1097 # like IEM_GET_MODRM_REG implicitly references pVCpu->iem.s.uRexReg and the
1098 # threaded function will be more efficient if we just pass the register index
1099 # as a 4-bit param.
1100 if ( sRef.startswith('IEM_GET_MODRM')
1101 or sRef.startswith('IEM_GET_EFFECTIVE_VVVV') ):
1102 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1103 if sParam[offParam] != '(':
1104 self.raiseProblem('Expected "(" following %s in "%s"' % (sRef, oStmt.renderCode(),));
1105 (asMacroParams, offCloseParam) = iai.McBlock.extractParams(sParam, offParam);
1106 if asMacroParams is None:
1107 self.raiseProblem('Unable to find ")" for %s in "%s"' % (sRef, oStmt.renderCode(),));
1108 offParam = offCloseParam + 1;
1109 self.aoParamRefs.append(ThreadedParamRef(sParam[offStart : offParam], 'uint8_t',
1110 oStmt, iParam, offStart));
1111
1112 # We can skip known variables.
1113 elif sRef in self.oParent.dVariables:
1114 pass;
1115
1116 # Skip certain macro invocations.
1117 elif sRef in ('IEM_GET_HOST_CPU_FEATURES',
1118 'IEM_GET_GUEST_CPU_FEATURES',
1119 'IEM_IS_GUEST_CPU_AMD',
1120 'IEM_IS_16BIT_CODE',
1121 'IEM_IS_32BIT_CODE',
1122 'IEM_IS_64BIT_CODE',
1123 ):
1124 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1125 if sParam[offParam] != '(':
1126 self.raiseProblem('Expected "(" following %s in "%s"' % (sRef, oStmt.renderCode(),));
1127 (asMacroParams, offCloseParam) = iai.McBlock.extractParams(sParam, offParam);
1128 if asMacroParams is None:
1129 self.raiseProblem('Unable to find ")" for %s in "%s"' % (sRef, oStmt.renderCode(),));
1130 offParam = offCloseParam + 1;
1131
1132 # Skip any dereference following it, unless it's a predicate like IEM_IS_GUEST_CPU_AMD.
1133 if sRef not in ('IEM_IS_GUEST_CPU_AMD',
1134 'IEM_IS_16BIT_CODE',
1135 'IEM_IS_32BIT_CODE',
1136 'IEM_IS_64BIT_CODE',
1137 ):
1138 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1139 if offParam + 2 <= len(sParam) and sParam[offParam : offParam + 2] == '->':
1140 offParam = iai.McBlock.skipSpacesAt(sParam, offParam + 2, len(sParam));
1141 while offParam < len(sParam) and (sParam[offParam].isalnum() or sParam[offParam] in '_.'):
1142 offParam += 1;
1143
1144 # Skip constants, globals, types (casts), sizeof and macros.
1145 elif ( sRef.startswith('IEM_OP_PRF_')
1146 or sRef.startswith('IEM_ACCESS_')
1147 or sRef.startswith('IEMINT_')
1148 or sRef.startswith('X86_GREG_')
1149 or sRef.startswith('X86_SREG_')
1150 or sRef.startswith('X86_EFL_')
1151 or sRef.startswith('X86_FSW_')
1152 or sRef.startswith('X86_FCW_')
1153 or sRef.startswith('X86_XCPT_')
1154 or sRef.startswith('IEMMODE_')
1155 or sRef.startswith('IEM_F_')
1156 or sRef.startswith('IEM_CIMPL_F_')
1157 or sRef.startswith('g_')
1158 or sRef.startswith('iemAImpl_')
1159 or sRef.startswith('kIemNativeGstReg_')
1160 or sRef in ( 'int8_t', 'int16_t', 'int32_t', 'int64_t',
1161 'INT8_C', 'INT16_C', 'INT32_C', 'INT64_C',
1162 'UINT8_C', 'UINT16_C', 'UINT32_C', 'UINT64_C',
1163 'UINT8_MAX', 'UINT16_MAX', 'UINT32_MAX', 'UINT64_MAX',
1164 'INT8_MAX', 'INT16_MAX', 'INT32_MAX', 'INT64_MAX',
1165 'INT8_MIN', 'INT16_MIN', 'INT32_MIN', 'INT64_MIN',
1166 'sizeof', 'NOREF', 'RT_NOREF', 'IEMMODE_64BIT',
1167 'RT_BIT_32', 'RT_BIT_64', 'true', 'false',
1168 'NIL_RTGCPTR',) ):
1169 pass;
1170
1171 # Skip certain macro invocations.
1172 # Any variable (non-field) and decoder fields in IEMCPU will need to be parameterized.
1173 elif ( ( '.' not in sRef
1174 and '-' not in sRef
1175 and sRef not in ('pVCpu', ) )
1176 or iai.McBlock.koReIemDecoderVars.search(sRef) is not None):
1177 self.aoParamRefs.append(ThreadedParamRef(sRef, self.analyzeReferenceToType(sRef),
1178 oStmt, iParam, offStart));
1179 # Number.
1180 elif ch.isdigit():
1181 if ( ch == '0'
1182 and offParam + 2 <= len(sParam)
1183 and sParam[offParam + 1] in 'xX'
1184 and sParam[offParam + 2] in self.ksHexDigits ):
1185 offParam += 2;
1186 while offParam < len(sParam) and sParam[offParam] in self.ksHexDigits:
1187 offParam += 1;
1188 else:
1189 while offParam < len(sParam) and sParam[offParam].isdigit():
1190 offParam += 1;
1191 # Comment?
1192 elif ( ch == '/'
1193 and offParam + 4 <= len(sParam)
1194 and sParam[offParam + 1] == '*'):
1195 offParam += 2;
1196 offNext = sParam.find('*/', offParam);
1197 if offNext < offParam:
1198 self.raiseProblem('Unable to find "*/" in "%s" ("%s")' % (sRef, oStmt.renderCode(),));
1199 offParam = offNext + 2;
1200 # Whatever else.
1201 else:
1202 offParam += 1;
1203
1204 # Traverse the branches of conditionals.
1205 if isinstance(oStmt, iai.McStmtCond):
1206 self.analyzeFindThreadedParamRefs(oStmt.aoIfBranch);
1207 self.analyzeFindThreadedParamRefs(oStmt.aoElseBranch);
1208 return True;
1209
1210 def analyzeVariation(self, aoStmts):
1211 """
1212 2nd part of the analysis, done on each variation.
1213
1214 The variations may differ in parameter requirements and will end up with
1215 slightly different MC sequences. Thus this is done on each individually.
1216
1217 Returns dummy True - raises exception on trouble.
1218 """
1219 # Now scan the code for variables and field references that needs to
1220 # be passed to the threaded function because they are related to the
1221 # instruction decoding.
1222 self.analyzeFindThreadedParamRefs(aoStmts);
1223 self.analyzeConsolidateThreadedParamRefs();
1224
1225 # Morph the statement stream for the block into what we'll be using in the threaded function.
1226 (self.aoStmtsForThreadedFunction, iParamRef) = self.analyzeMorphStmtForThreaded(aoStmts);
1227 if iParamRef != len(self.aoParamRefs):
1228 raise Exception('iParamRef=%s, expected %s!' % (iParamRef, len(self.aoParamRefs),));
1229
1230 return True;
1231
1232 def emitThreadedCallStmts(self, cchIndent, sCallVarNm = None):
1233 """
1234 Produces generic C++ statments that emits a call to the thread function
1235 variation and any subsequent checks that may be necessary after that.
1236
1237 The sCallVarNm is for emitting
1238 """
1239 aoStmts = [
1240 iai.McCppCall('IEM_MC2_BEGIN_EMIT_CALLS',
1241 ['1' if 'IEM_CIMPL_F_CHECK_IRQ_BEFORE' in self.oParent.dsCImplFlags else '0'],
1242 cchIndent = cchIndent), # Scope and a hook for various stuff.
1243 ];
1244
1245 # The call to the threaded function.
1246 asCallArgs = [ self.getIndexName() if not sCallVarNm else sCallVarNm, ];
1247 for iParam in range(self.cMinParams):
1248 asFrags = [];
1249 for aoRefs in self.dParamRefs.values():
1250 oRef = aoRefs[0];
1251 if oRef.iNewParam == iParam:
1252 sCast = '(uint64_t)'
1253 if oRef.sType in ('int8_t', 'int16_t', 'int32_t'): # Make sure these doesn't get sign-extended.
1254 sCast = '(uint64_t)(u' + oRef.sType + ')';
1255 if oRef.offNewParam == 0:
1256 asFrags.append(sCast + '(' + oRef.sOrgRef + ')');
1257 else:
1258 asFrags.append('(%s(%s) << %s)' % (sCast, oRef.sOrgRef, oRef.offNewParam));
1259 assert asFrags;
1260 asCallArgs.append(' | '.join(asFrags));
1261
1262 aoStmts.append(iai.McCppCall('IEM_MC2_EMIT_CALL_%s' % (len(asCallArgs) - 1,), asCallArgs, cchIndent = cchIndent));
1263
1264 # 2023-11-28: This has to be done AFTER the CIMPL call, so we have to
1265 # emit this mode check from the compilation loop. On the
1266 # plus side, this means we eliminate unnecessary call at
1267 # end of the TB. :-)
1268 ## For CIMPL stuff, we need to consult the associated IEM_CIMPL_F_XXX
1269 ## mask and maybe emit additional checks.
1270 #if ( 'IEM_CIMPL_F_MODE' in self.oParent.dsCImplFlags
1271 # or 'IEM_CIMPL_F_XCPT' in self.oParent.dsCImplFlags
1272 # or 'IEM_CIMPL_F_VMEXIT' in self.oParent.dsCImplFlags):
1273 # aoStmts.append(iai.McCppCall('IEM_MC2_EMIT_CALL_1', ( 'kIemThreadedFunc_BltIn_CheckMode', 'pVCpu->iem.s.fExec', ),
1274 # cchIndent = cchIndent));
1275
1276 sCImplFlags = ' | '.join(self.oParent.dsCImplFlags.keys());
1277 if not sCImplFlags:
1278 sCImplFlags = '0'
1279 aoStmts.append(iai.McCppCall('IEM_MC2_END_EMIT_CALLS', ( sCImplFlags, ), cchIndent = cchIndent)); # For closing the scope.
1280
1281 # Emit fEndTb = true or fTbBranched = true if any of the CIMPL flags
1282 # indicates we should do so.
1283 # Note! iemThreadedRecompilerMcDeferToCImpl0 duplicates work done here.
1284 asEndTbFlags = [];
1285 asTbBranchedFlags = [];
1286 for sFlag in self.oParent.dsCImplFlags:
1287 if self.kdCImplFlags[sFlag] is True:
1288 asEndTbFlags.append(sFlag);
1289 elif sFlag.startswith('IEM_CIMPL_F_BRANCH_'):
1290 asTbBranchedFlags.append(sFlag);
1291 if asTbBranchedFlags:
1292 aoStmts.append(iai.McCppGeneric('iemThreadedSetBranched(pVCpu, %s);'
1293 % ((' | '.join(asTbBranchedFlags)).replace('IEM_CIMPL_F_BRANCH', 'IEMBRANCHED_F'),),
1294 cchIndent = cchIndent)); # Inline fn saves ~2 seconds for gcc 13/dbg (1m13s vs 1m15s).
1295 if asEndTbFlags:
1296 aoStmts.append(iai.McCppGeneric('pVCpu->iem.s.fEndTb = true; /* %s */' % (','.join(asEndTbFlags),),
1297 cchIndent = cchIndent));
1298
1299 if 'IEM_CIMPL_F_CHECK_IRQ_AFTER' in self.oParent.dsCImplFlags:
1300 aoStmts.append(iai.McCppGeneric('pVCpu->iem.s.cInstrTillIrqCheck = 0;', cchIndent = cchIndent));
1301
1302 return aoStmts;
1303
1304
1305class ThreadedFunction(object):
1306 """
1307 A threaded function.
1308 """
1309
1310 def __init__(self, oMcBlock: iai.McBlock) -> None:
1311 self.oMcBlock = oMcBlock # type: iai.McBlock
1312 # The remaining fields are only useful after analyze() has been called:
1313 ## Variations for this block. There is at least one.
1314 self.aoVariations = [] # type: List[ThreadedFunctionVariation]
1315 ## Variation dictionary containing the same as aoVariations.
1316 self.dVariations = {} # type: Dict[str, ThreadedFunctionVariation]
1317 ## Dictionary of local variables (IEM_MC_LOCAL[_CONST]) and call arguments (IEM_MC_ARG*).
1318 self.dVariables = {} # type: Dict[str, iai.McStmtVar]
1319 ## Dictionary with any IEM_CIMPL_F_XXX flags explicitly advertised in the code block
1320 ## and those determined by analyzeCodeOperation().
1321 self.dsCImplFlags = {} # type: Dict[str, bool]
1322
1323 @staticmethod
1324 def dummyInstance():
1325 """ Gets a dummy instance. """
1326 return ThreadedFunction(iai.McBlock('null', 999999999, 999999999,
1327 iai.DecoderFunction('null', 999999999, 'nil', ('','')), 999999999));
1328
1329 def hasWithFlagsCheckingAndClearingVariation(self):
1330 """
1331 Check if there is one or more with flags checking and clearing
1332 variations for this threaded function.
1333 """
1334 for sVarWithFlags in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing:
1335 if sVarWithFlags in self.dVariations:
1336 return True;
1337 return False;
1338
1339 #
1340 # Analysis and code morphing.
1341 #
1342
1343 def raiseProblem(self, sMessage):
1344 """ Raises a problem. """
1345 raise Exception('%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, ));
1346
1347 def warning(self, sMessage):
1348 """ Emits a warning. """
1349 print('%s:%s: warning: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, ));
1350
1351 def analyzeFindVariablesAndCallArgs(self, aoStmts: List[iai.McStmt]) -> bool:
1352 """ Scans the statements for MC variables and call arguments. """
1353 for oStmt in aoStmts:
1354 if isinstance(oStmt, iai.McStmtVar):
1355 if oStmt.sVarName in self.dVariables:
1356 raise Exception('Variable %s is defined more than once!' % (oStmt.sVarName,));
1357 self.dVariables[oStmt.sVarName] = oStmt.sVarName;
1358
1359 # There shouldn't be any variables or arguments declared inside if/
1360 # else blocks, but scan them too to be on the safe side.
1361 if isinstance(oStmt, iai.McStmtCond):
1362 cBefore = len(self.dVariables);
1363 self.analyzeFindVariablesAndCallArgs(oStmt.aoIfBranch);
1364 self.analyzeFindVariablesAndCallArgs(oStmt.aoElseBranch);
1365 #if len(self.dVariables) != cBefore:
1366 # raise Exception('Variables/arguments defined in conditional branches!');
1367 return True;
1368
1369 def analyzeCodeOperation(self, aoStmts: List[iai.McStmt], fSeenConditional = False) -> bool:
1370 """
1371 Analyzes the code looking clues as to additional side-effects.
1372
1373 Currently this is simply looking for branching and adding the relevant
1374 branch flags to dsCImplFlags. ASSUMES the caller pre-populates the
1375 dictionary with a copy of self.oMcBlock.dsCImplFlags.
1376 """
1377 for oStmt in aoStmts:
1378 # Set IEM_IMPL_C_F_BRANCH if we see any branching MCs.
1379 if oStmt.sName.startswith('IEM_MC_SET_RIP'):
1380 assert not fSeenConditional;
1381 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_INDIRECT'] = True;
1382 elif oStmt.sName.startswith('IEM_MC_REL_JMP'):
1383 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_RELATIVE'] = True;
1384 if fSeenConditional:
1385 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_CONDITIONAL'] = True;
1386
1387 # Check for CIMPL and AIMPL calls.
1388 if oStmt.sName.startswith('IEM_MC_CALL_'):
1389 if oStmt.sName.startswith('IEM_MC_CALL_CIMPL_'):
1390 self.dsCImplFlags['IEM_CIMPL_F_CALLS_CIMPL'] = True;
1391 elif ( oStmt.sName.startswith('IEM_MC_CALL_VOID_AIMPL_')
1392 or oStmt.sName.startswith('IEM_MC_CALL_AIMPL_')
1393 or oStmt.sName.startswith('IEM_MC_CALL_AVX_AIMPL_')):
1394 self.dsCImplFlags['IEM_CIMPL_F_CALLS_AIMPL'] = True;
1395 elif ( oStmt.sName.startswith('IEM_MC_CALL_SSE_AIMPL_')
1396 or oStmt.sName.startswith('IEM_MC_CALL_MMX_AIMPL_')
1397 or oStmt.sName.startswith('IEM_MC_CALL_FPU_AIMPL_')):
1398 self.dsCImplFlags['IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE'] = True;
1399 else:
1400 raise Exception('Unknown IEM_MC_CALL_* statement: %s' % (oStmt.sName,));
1401
1402 # Process branches of conditionals recursively.
1403 if isinstance(oStmt, iai.McStmtCond):
1404 self.analyzeCodeOperation(oStmt.aoIfBranch, True);
1405 if oStmt.aoElseBranch:
1406 self.analyzeCodeOperation(oStmt.aoElseBranch, True);
1407
1408 return True;
1409
1410 def analyze(self):
1411 """
1412 Analyzes the code, identifying the number of parameters it requires and such.
1413
1414 Returns dummy True - raises exception on trouble.
1415 """
1416
1417 # Check the block for errors before we proceed (will decode it).
1418 asErrors = self.oMcBlock.check();
1419 if asErrors:
1420 raise Exception('\n'.join(['%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sError, )
1421 for sError in asErrors]));
1422
1423 # Decode the block into a list/tree of McStmt objects.
1424 aoStmts = self.oMcBlock.decode();
1425
1426 # Scan the statements for local variables and call arguments (self.dVariables).
1427 self.analyzeFindVariablesAndCallArgs(aoStmts);
1428
1429 # Scan the code for IEM_CIMPL_F_ and other clues.
1430 self.dsCImplFlags = self.oMcBlock.dsCImplFlags.copy();
1431 self.analyzeCodeOperation(aoStmts);
1432 if ( ('IEM_CIMPL_F_CALLS_CIMPL' in self.dsCImplFlags)
1433 + ('IEM_CIMPL_F_CALLS_AIMPL' in self.dsCImplFlags)
1434 + ('IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE' in self.dsCImplFlags) > 1):
1435 self.raiseProblem('Mixing CIMPL/AIMPL/AIMPL_WITH_FXSTATE calls');
1436
1437 # Create variations as needed.
1438 if iai.McStmt.findStmtByNames(aoStmts,
1439 { 'IEM_MC_DEFER_TO_CIMPL_0_RET': True,
1440 'IEM_MC_DEFER_TO_CIMPL_1_RET': True,
1441 'IEM_MC_DEFER_TO_CIMPL_2_RET': True,
1442 'IEM_MC_DEFER_TO_CIMPL_3_RET': True, }):
1443 asVariations = (ThreadedFunctionVariation.ksVariation_Default,);
1444
1445 elif iai.McStmt.findStmtByNames(aoStmts, { 'IEM_MC_CALC_RM_EFF_ADDR' : True,
1446 'IEM_MC_FETCH_MEM_U8' : True, # mov_AL_Ob ++
1447 'IEM_MC_FETCH_MEM_U16' : True, # mov_rAX_Ov ++
1448 'IEM_MC_FETCH_MEM_U32' : True,
1449 'IEM_MC_FETCH_MEM_U64' : True,
1450 'IEM_MC_STORE_MEM_U8' : True, # mov_Ob_AL ++
1451 'IEM_MC_STORE_MEM_U16' : True, # mov_Ov_rAX ++
1452 'IEM_MC_STORE_MEM_U32' : True,
1453 'IEM_MC_STORE_MEM_U64' : True, }):
1454 if 'IEM_MC_F_64BIT' in self.oMcBlock.dsMcFlags:
1455 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressOnly64;
1456 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags and 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
1457 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot286Not64;
1458 elif 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
1459 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot286;
1460 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags:
1461 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot64;
1462 elif 'IEM_MC_F_ONLY_8086' in self.oMcBlock.dsMcFlags:
1463 asVariations = ThreadedFunctionVariation.kasVariationsOnlyPre386;
1464 else:
1465 asVariations = ThreadedFunctionVariation.kasVariationsWithAddress;
1466 else:
1467 if 'IEM_MC_F_64BIT' in self.oMcBlock.dsMcFlags:
1468 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressOnly64;
1469 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags and 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
1470 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot286Not64;
1471 elif 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
1472 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot286;
1473 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags:
1474 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot64;
1475 elif 'IEM_MC_F_ONLY_8086' in self.oMcBlock.dsMcFlags:
1476 asVariations = ThreadedFunctionVariation.kasVariationsOnlyPre386;
1477 else:
1478 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddress;
1479
1480 if not iai.McStmt.findStmtByNames(aoStmts,
1481 { 'IEM_MC_ADVANCE_RIP_AND_FINISH': True,
1482 'IEM_MC_REL_JMP_S8_AND_FINISH': True,
1483 'IEM_MC_REL_JMP_S16_AND_FINISH': True,
1484 'IEM_MC_REL_JMP_S32_AND_FINISH': True,
1485 'IEM_MC_SET_RIP_U16_AND_FINISH': True,
1486 'IEM_MC_SET_RIP_U32_AND_FINISH': True,
1487 'IEM_MC_SET_RIP_U64_AND_FINISH': True,
1488 }):
1489 asVariations = [sVariation for sVariation in asVariations
1490 if sVariation not in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing];
1491
1492 self.aoVariations = [ThreadedFunctionVariation(self, sVar) for sVar in asVariations];
1493
1494 # Dictionary variant of the list.
1495 self.dVariations = { oVar.sVariation: oVar for oVar in self.aoVariations };
1496
1497 # Continue the analysis on each variation.
1498 for oVariation in self.aoVariations:
1499 oVariation.analyzeVariation(aoStmts);
1500
1501 return True;
1502
1503 ## Used by emitThreadedCallStmts.
1504 kdVariationsWithNeedForPrefixCheck = {
1505 ThreadedFunctionVariation.ksVariation_64_Addr32: True,
1506 ThreadedFunctionVariation.ksVariation_64f_Addr32: True,
1507 ThreadedFunctionVariation.ksVariation_64_FsGs: True,
1508 ThreadedFunctionVariation.ksVariation_64f_FsGs: True,
1509 ThreadedFunctionVariation.ksVariation_32_Addr16: True,
1510 ThreadedFunctionVariation.ksVariation_32f_Addr16: True,
1511 ThreadedFunctionVariation.ksVariation_32_Flat: True,
1512 ThreadedFunctionVariation.ksVariation_32f_Flat: True,
1513 ThreadedFunctionVariation.ksVariation_16_Addr32: True,
1514 ThreadedFunctionVariation.ksVariation_16f_Addr32: True,
1515 };
1516
1517 def emitThreadedCallStmts(self):
1518 """
1519 Worker for morphInputCode that returns a list of statements that emits
1520 the call to the threaded functions for the block.
1521 """
1522 # Special case for only default variation:
1523 if len(self.aoVariations) == 1 and self.aoVariations[0].sVariation == ThreadedFunctionVariation.ksVariation_Default:
1524 return self.aoVariations[0].emitThreadedCallStmts(0);
1525
1526 #
1527 # Case statement sub-class.
1528 #
1529 dByVari = self.dVariations;
1530 #fDbg = self.oMcBlock.sFunction == 'iemOpCommonPushSReg';
1531 class Case:
1532 def __init__(self, sCond, sVarNm = None):
1533 self.sCond = sCond;
1534 self.sVarNm = sVarNm;
1535 self.oVar = dByVari[sVarNm] if sVarNm else None;
1536 self.aoBody = self.oVar.emitThreadedCallStmts(8) if sVarNm else None;
1537
1538 def toCode(self):
1539 aoStmts = [ iai.McCppGeneric('case %s:' % (self.sCond), cchIndent = 4), ];
1540 if self.aoBody:
1541 aoStmts.extend(self.aoBody);
1542 aoStmts.append(iai.McCppGeneric('break;', cchIndent = 8));
1543 return aoStmts;
1544
1545 def toFunctionAssignment(self):
1546 aoStmts = [ iai.McCppGeneric('case %s:' % (self.sCond), cchIndent = 4), ];
1547 if self.aoBody:
1548 aoStmts.extend([
1549 iai.McCppGeneric('enmFunction = %s;' % (self.oVar.getIndexName(),), cchIndent = 8),
1550 iai.McCppGeneric('break;', cchIndent = 8),
1551 ]);
1552 return aoStmts;
1553
1554 def isSame(self, oThat):
1555 if not self.aoBody: # fall thru always matches.
1556 return True;
1557 if len(self.aoBody) != len(oThat.aoBody):
1558 #if fDbg: print('dbg: body len diff: %s vs %s' % (len(self.aoBody), len(oThat.aoBody),));
1559 return False;
1560 for iStmt, oStmt in enumerate(self.aoBody):
1561 oThatStmt = oThat.aoBody[iStmt] # type: iai.McStmt
1562 assert isinstance(oStmt, iai.McCppGeneric);
1563 assert not isinstance(oStmt, iai.McStmtCond);
1564 if isinstance(oStmt, iai.McStmtCond):
1565 return False;
1566 if oStmt.sName != oThatStmt.sName:
1567 #if fDbg: print('dbg: stmt #%s name: %s vs %s' % (iStmt, oStmt.sName, oThatStmt.sName,));
1568 return False;
1569 if len(oStmt.asParams) != len(oThatStmt.asParams):
1570 #if fDbg: print('dbg: stmt #%s param count: %s vs %s'
1571 # % (iStmt, len(oStmt.asParams), len(oThatStmt.asParams),));
1572 return False;
1573 for iParam, sParam in enumerate(oStmt.asParams):
1574 if ( sParam != oThatStmt.asParams[iParam]
1575 and ( iParam != 1
1576 or not isinstance(oStmt, iai.McCppCall)
1577 or not oStmt.asParams[0].startswith('IEM_MC2_EMIT_CALL_')
1578 or sParam != self.oVar.getIndexName()
1579 or oThatStmt.asParams[iParam] != oThat.oVar.getIndexName() )):
1580 #if fDbg: print('dbg: stmt #%s, param #%s: %s vs %s'
1581 # % (iStmt, iParam, sParam, oThatStmt.asParams[iParam],));
1582 return False;
1583 return True;
1584
1585 #
1586 # Determine what we're switch on.
1587 # This ASSUMES that (IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | IEM_F_MODE_CPUMODE_MASK) == 7!
1588 #
1589 fSimple = True;
1590 sSwitchValue = '(pVCpu->iem.s.fExec & (IEM_F_MODE_CPUMODE_MASK | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK))';
1591 if dByVari.keys() & self.kdVariationsWithNeedForPrefixCheck.keys():
1592 sSwitchValue += ' | (pVCpu->iem.s.enmEffAddrMode == (pVCpu->iem.s.fExec & IEM_F_MODE_CPUMODE_MASK) ? 0 : 8)';
1593 # Accesses via FS and GS and CS goes thru non-FLAT functions. (CS
1594 # is not writable in 32-bit mode (at least), thus the penalty mode
1595 # for any accesses via it (simpler this way).)
1596 sSwitchValue += ' | (pVCpu->iem.s.iEffSeg < X86_SREG_FS && pVCpu->iem.s.iEffSeg != X86_SREG_CS ? 0 : 16)';
1597 fSimple = False; # threaded functions.
1598 if dByVari.keys() & ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing:
1599 sSwitchValue += ' | ((pVCpu->iem.s.fTbPrevInstr & (IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_INHIBIT_SHADOW)) || ' \
1600 + '(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_MASK) ? 32 : 0)';
1601
1602 #
1603 # Generate the case statements.
1604 #
1605 # pylintx: disable=x
1606 aoCases = [];
1607 if ThreadedFunctionVariation.ksVariation_64_Addr32 in dByVari:
1608 assert not fSimple;
1609 aoCases.extend([
1610 Case('IEMMODE_64BIT', ThrdFnVar.ksVariation_64),
1611 Case('IEMMODE_64BIT | 16', ThrdFnVar.ksVariation_64_FsGs),
1612 Case('IEMMODE_64BIT | 8 | 16', None), # fall thru
1613 Case('IEMMODE_64BIT | 8', ThrdFnVar.ksVariation_64_Addr32),
1614 ]);
1615 if ThreadedFunctionVariation.ksVariation_64f_Addr32 in dByVari:
1616 aoCases.extend([
1617 Case('IEMMODE_64BIT | 32', ThrdFnVar.ksVariation_64f),
1618 Case('IEMMODE_64BIT | 32 | 16', ThrdFnVar.ksVariation_64f_FsGs),
1619 Case('IEMMODE_64BIT | 32 | 8 | 16', None), # fall thru
1620 Case('IEMMODE_64BIT | 32 | 8', ThrdFnVar.ksVariation_64f_Addr32),
1621 ]);
1622 elif ThrdFnVar.ksVariation_64 in dByVari:
1623 assert fSimple;
1624 aoCases.append(Case('IEMMODE_64BIT', ThrdFnVar.ksVariation_64));
1625 if ThreadedFunctionVariation.ksVariation_64f in dByVari:
1626 aoCases.append(Case('IEMMODE_64BIT | 32', ThrdFnVar.ksVariation_64f));
1627
1628 if ThrdFnVar.ksVariation_32_Addr16 in dByVari:
1629 assert not fSimple;
1630 aoCases.extend([
1631 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', ThrdFnVar.ksVariation_32_Flat),
1632 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 16', None), # fall thru
1633 Case('IEMMODE_32BIT | 16', None), # fall thru
1634 Case('IEMMODE_32BIT', ThrdFnVar.ksVariation_32),
1635 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 8', None), # fall thru
1636 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 8 | 16',None), # fall thru
1637 Case('IEMMODE_32BIT | 8 | 16',None), # fall thru
1638 Case('IEMMODE_32BIT | 8', ThrdFnVar.ksVariation_32_Addr16),
1639 ]);
1640 if ThrdFnVar.ksVariation_32f_Addr16 in dByVari:
1641 aoCases.extend([
1642 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', ThrdFnVar.ksVariation_32f_Flat),
1643 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 16', None), # fall thru
1644 Case('IEMMODE_32BIT | 32 | 16', None), # fall thru
1645 Case('IEMMODE_32BIT | 32', ThrdFnVar.ksVariation_32f),
1646 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 8', None), # fall thru
1647 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 8 | 16',None), # fall thru
1648 Case('IEMMODE_32BIT | 32 | 8 | 16',None), # fall thru
1649 Case('IEMMODE_32BIT | 32 | 8', ThrdFnVar.ksVariation_32f_Addr16),
1650 ]);
1651 elif ThrdFnVar.ksVariation_32 in dByVari:
1652 assert fSimple;
1653 aoCases.extend([
1654 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', None), # fall thru
1655 Case('IEMMODE_32BIT', ThrdFnVar.ksVariation_32),
1656 ]);
1657 if ThrdFnVar.ksVariation_32f in dByVari:
1658 aoCases.extend([
1659 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', None), # fall thru
1660 Case('IEMMODE_32BIT | 32', ThrdFnVar.ksVariation_32f),
1661 ]);
1662
1663 if ThrdFnVar.ksVariation_16_Addr32 in dByVari:
1664 assert not fSimple;
1665 aoCases.extend([
1666 Case('IEMMODE_16BIT | 16', None), # fall thru
1667 Case('IEMMODE_16BIT', ThrdFnVar.ksVariation_16),
1668 Case('IEMMODE_16BIT | 8 | 16', None), # fall thru
1669 Case('IEMMODE_16BIT | 8', ThrdFnVar.ksVariation_16_Addr32),
1670 ]);
1671 if ThrdFnVar.ksVariation_16f_Addr32 in dByVari:
1672 aoCases.extend([
1673 Case('IEMMODE_16BIT | 32 | 16', None), # fall thru
1674 Case('IEMMODE_16BIT | 32', ThrdFnVar.ksVariation_16f),
1675 Case('IEMMODE_16BIT | 32 | 8 | 16', None), # fall thru
1676 Case('IEMMODE_16BIT | 32 | 8', ThrdFnVar.ksVariation_16f_Addr32),
1677 ]);
1678 elif ThrdFnVar.ksVariation_16 in dByVari:
1679 assert fSimple;
1680 aoCases.append(Case('IEMMODE_16BIT', ThrdFnVar.ksVariation_16));
1681 if ThrdFnVar.ksVariation_16f in dByVari:
1682 aoCases.append(Case('IEMMODE_16BIT | 32', ThrdFnVar.ksVariation_16f));
1683
1684 if ThrdFnVar.ksVariation_16_Pre386 in dByVari:
1685 if not fSimple:
1686 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 16', None)); # fall thru
1687 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', ThrdFnVar.ksVariation_16_Pre386));
1688 if ThrdFnVar.ksVariation_16f_Pre386 in dByVari: # should be nested under previous if, but line too long.
1689 if not fSimple:
1690 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 16', None)); # fall thru
1691 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', ThrdFnVar.ksVariation_16f_Pre386));
1692
1693 #
1694 # If the case bodies are all the same, except for the function called,
1695 # we can reduce the code size and hopefully compile time.
1696 #
1697 iFirstCaseWithBody = 0;
1698 while not aoCases[iFirstCaseWithBody].aoBody:
1699 iFirstCaseWithBody += 1
1700 fAllSameCases = True
1701 for iCase in range(iFirstCaseWithBody + 1, len(aoCases)):
1702 fAllSameCases = fAllSameCases and aoCases[iCase].isSame(aoCases[iFirstCaseWithBody]);
1703 #if fDbg: print('fAllSameCases=%s %s' % (fAllSameCases, self.oMcBlock.sFunction,));
1704 if fAllSameCases:
1705 aoStmts = [
1706 iai.McCppGeneric('IEMTHREADEDFUNCS enmFunction;'),
1707 iai.McCppGeneric('switch (%s)' % (sSwitchValue,)),
1708 iai.McCppGeneric('{'),
1709 ];
1710 for oCase in aoCases:
1711 aoStmts.extend(oCase.toFunctionAssignment());
1712 aoStmts.extend([
1713 iai.McCppGeneric('IEM_NOT_REACHED_DEFAULT_CASE_RET();', cchIndent = 4),
1714 iai.McCppGeneric('}'),
1715 ]);
1716 aoStmts.extend(dByVari[aoCases[iFirstCaseWithBody].sVarNm].emitThreadedCallStmts(0, 'enmFunction'));
1717
1718 else:
1719 #
1720 # Generate the generic switch statement.
1721 #
1722 aoStmts = [
1723 iai.McCppGeneric('switch (%s)' % (sSwitchValue,)),
1724 iai.McCppGeneric('{'),
1725 ];
1726 for oCase in aoCases:
1727 aoStmts.extend(oCase.toCode());
1728 aoStmts.extend([
1729 iai.McCppGeneric('IEM_NOT_REACHED_DEFAULT_CASE_RET();', cchIndent = 4),
1730 iai.McCppGeneric('}'),
1731 ]);
1732
1733 return aoStmts;
1734
1735 def morphInputCode(self, aoStmts, fCallEmitted = False, cDepth = 0):
1736 """
1737 Adjusts (& copies) the statements for the input/decoder so it will emit
1738 calls to the right threaded functions for each block.
1739
1740 Returns list/tree of statements (aoStmts is not modified) and updated
1741 fCallEmitted status.
1742 """
1743 #print('McBlock at %s:%s' % (os.path.split(self.oMcBlock.sSrcFile)[1], self.oMcBlock.iBeginLine,));
1744 aoDecoderStmts = [];
1745
1746 for oStmt in aoStmts:
1747 # Copy the statement. Make a deep copy to make sure we've got our own
1748 # copies of all instance variables, even if a bit overkill at the moment.
1749 oNewStmt = copy.deepcopy(oStmt);
1750 aoDecoderStmts.append(oNewStmt);
1751 #print('oNewStmt %s %s' % (oNewStmt.sName, len(oNewStmt.asParams),));
1752 if oNewStmt.sName == 'IEM_MC_BEGIN' and self.dsCImplFlags:
1753 oNewStmt.asParams[3] = ' | '.join(sorted(self.dsCImplFlags.keys()));
1754
1755 # If we haven't emitted the threaded function call yet, look for
1756 # statements which it would naturally follow or preceed.
1757 if not fCallEmitted:
1758 if not oStmt.isCppStmt():
1759 if ( oStmt.sName.startswith('IEM_MC_MAYBE_RAISE_') \
1760 or (oStmt.sName.endswith('_AND_FINISH') and oStmt.sName.startswith('IEM_MC_'))
1761 or oStmt.sName.startswith('IEM_MC_CALL_CIMPL_')
1762 or oStmt.sName.startswith('IEM_MC_DEFER_TO_CIMPL_')
1763 or oStmt.sName in ('IEM_MC_RAISE_DIVIDE_ERROR',)):
1764 aoDecoderStmts.pop();
1765 aoDecoderStmts.extend(self.emitThreadedCallStmts());
1766 aoDecoderStmts.append(oNewStmt);
1767 fCallEmitted = True;
1768 elif ( oStmt.fDecode
1769 and ( oStmt.asParams[0].find('IEMOP_HLP_DONE_') >= 0
1770 or oStmt.asParams[0].find('IEMOP_HLP_DECODED_') >= 0)):
1771 aoDecoderStmts.extend(self.emitThreadedCallStmts());
1772 fCallEmitted = True;
1773
1774 # Process branches of conditionals recursively.
1775 if isinstance(oStmt, iai.McStmtCond):
1776 (oNewStmt.aoIfBranch, fCallEmitted1) = self.morphInputCode(oStmt.aoIfBranch, fCallEmitted, cDepth + 1);
1777 if oStmt.aoElseBranch:
1778 (oNewStmt.aoElseBranch, fCallEmitted2) = self.morphInputCode(oStmt.aoElseBranch, fCallEmitted, cDepth + 1);
1779 else:
1780 fCallEmitted2 = False;
1781 fCallEmitted = fCallEmitted or (fCallEmitted1 and fCallEmitted2);
1782
1783 if not fCallEmitted and cDepth == 0:
1784 self.raiseProblem('Unable to insert call to threaded function.');
1785
1786 return (aoDecoderStmts, fCallEmitted);
1787
1788
1789 def generateInputCode(self):
1790 """
1791 Modifies the input code.
1792 """
1793 cchIndent = (self.oMcBlock.cchIndent + 3) // 4 * 4;
1794
1795 if len(self.oMcBlock.aoStmts) == 1:
1796 # IEM_MC_DEFER_TO_CIMPL_X_RET - need to wrap in {} to make it safe to insert into random code.
1797 sCode = ' ' * cchIndent + 'pVCpu->iem.s.fTbCurInstr = ';
1798 if self.dsCImplFlags:
1799 sCode += ' | '.join(sorted(self.dsCImplFlags.keys())) + ';\n';
1800 else:
1801 sCode += '0;\n';
1802 sCode += iai.McStmt.renderCodeForList(self.morphInputCode(self.oMcBlock.aoStmts)[0],
1803 cchIndent = cchIndent).replace('\n', ' /* gen */\n', 1);
1804 sIndent = ' ' * (min(cchIndent, 2) - 2);
1805 sCode = sIndent + '{\n' + sCode + sIndent + '}\n';
1806 return sCode;
1807
1808 # IEM_MC_BEGIN/END block
1809 assert len(self.oMcBlock.asLines) > 2, "asLines=%s" % (self.oMcBlock.asLines,);
1810 return iai.McStmt.renderCodeForList(self.morphInputCode(self.oMcBlock.aoStmts)[0],
1811 cchIndent = cchIndent).replace('\n', ' /* gen */\n', 1);
1812
1813# Short alias for ThreadedFunctionVariation.
1814ThrdFnVar = ThreadedFunctionVariation;
1815
1816
1817class IEMThreadedGenerator(object):
1818 """
1819 The threaded code generator & annotator.
1820 """
1821
1822 def __init__(self):
1823 self.aoThreadedFuncs = [] # type: List[ThreadedFunction]
1824 self.oOptions = None # type: argparse.Namespace
1825 self.aoParsers = [] # type: List[IEMAllInstPython.SimpleParser]
1826 self.aidxFirstFunctions = [] # type: List[int] ##< Runs parallel to aoParser giving the index of the first function.
1827
1828 #
1829 # Processing.
1830 #
1831
1832 def processInputFiles(self, sHostArch, fNativeRecompilerEnabled):
1833 """
1834 Process the input files.
1835 """
1836
1837 # Parse the files.
1838 self.aoParsers = iai.parseFiles(self.oOptions.asInFiles, sHostArch);
1839
1840 # Create threaded functions for the MC blocks.
1841 self.aoThreadedFuncs = [ThreadedFunction(oMcBlock) for oMcBlock in iai.g_aoMcBlocks];
1842
1843 # Analyze the threaded functions.
1844 dRawParamCounts = {};
1845 dMinParamCounts = {};
1846 for oThreadedFunction in self.aoThreadedFuncs:
1847 oThreadedFunction.analyze();
1848 for oVariation in oThreadedFunction.aoVariations:
1849 dRawParamCounts[len(oVariation.dParamRefs)] = dRawParamCounts.get(len(oVariation.dParamRefs), 0) + 1;
1850 dMinParamCounts[oVariation.cMinParams] = dMinParamCounts.get(oVariation.cMinParams, 0) + 1;
1851 print('debug: param count distribution, raw and optimized:', file = sys.stderr);
1852 for cCount in sorted({cBits: True for cBits in list(dRawParamCounts.keys()) + list(dMinParamCounts.keys())}.keys()):
1853 print('debug: %s params: %4s raw, %4s min'
1854 % (cCount, dRawParamCounts.get(cCount, 0), dMinParamCounts.get(cCount, 0)),
1855 file = sys.stderr);
1856
1857 # Populate aidxFirstFunctions. This is ASSUMING that
1858 # g_aoMcBlocks/self.aoThreadedFuncs are in self.aoParsers order.
1859 iThreadedFunction = 0;
1860 oThreadedFunction = self.getThreadedFunctionByIndex(0);
1861 self.aidxFirstFunctions = [];
1862 for oParser in self.aoParsers:
1863 self.aidxFirstFunctions.append(iThreadedFunction);
1864
1865 while oThreadedFunction.oMcBlock.sSrcFile == oParser.sSrcFile:
1866 iThreadedFunction += 1;
1867 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
1868
1869 # Analyze the threaded functions and their variations for native recompilation.
1870 if fNativeRecompilerEnabled:
1871 ian.displayStatistics(self.aoThreadedFuncs, sHostArch);
1872
1873 # Gather arguments + variable statistics for the MC blocks.
1874 cMaxArgs = 0;
1875 cMaxVars = 0;
1876 cMaxVarsAndArgs = 0;
1877 cbMaxArgs = 0;
1878 cbMaxVars = 0;
1879 cbMaxVarsAndArgs = 0;
1880 for oThreadedFunction in self.aoThreadedFuncs:
1881 if oThreadedFunction.oMcBlock.cLocals >= 0:
1882 # Counts.
1883 assert oThreadedFunction.oMcBlock.cArgs >= 0;
1884 cMaxVars = max(cMaxVars, oThreadedFunction.oMcBlock.cLocals);
1885 cMaxArgs = max(cMaxArgs, oThreadedFunction.oMcBlock.cArgs);
1886 cMaxVarsAndArgs = max(cMaxVarsAndArgs, oThreadedFunction.oMcBlock.cLocals + oThreadedFunction.oMcBlock.cArgs);
1887 if cMaxVarsAndArgs > 9:
1888 raise Exception('%s potentially uses too many variables / args: %u, max 10 - %u vars and %u args'
1889 % (oThreadedFunction.oMcBlock.oFunction.sName, cMaxVarsAndArgs,
1890 oThreadedFunction.oMcBlock.cLocals, oThreadedFunction.oMcBlock.cArgs));
1891 # Calc stack allocation size:
1892 cbArgs = 0;
1893 for oArg in oThreadedFunction.oMcBlock.aoArgs:
1894 cbArgs += (getTypeBitCount(oArg.sType) + 63) // 64 * 8;
1895 cbVars = 0;
1896 for oVar in oThreadedFunction.oMcBlock.aoLocals:
1897 cbVars += (getTypeBitCount(oVar.sType) + 63) // 64 * 8;
1898 cbMaxVars = max(cbMaxVars, cbVars);
1899 cbMaxArgs = max(cbMaxArgs, cbArgs);
1900 cbMaxVarsAndArgs = max(cbMaxVarsAndArgs, cbVars + cbArgs);
1901 if cbMaxVarsAndArgs >= 0xc0:
1902 raise Exception('%s potentially uses too much stack: cbMaxVars=%#x cbMaxArgs=%#x'
1903 % (oThreadedFunction.oMcBlock.oFunction.sName, cbMaxVars, cbMaxArgs,));
1904
1905 print('debug: max vars+args: %u bytes / %u; max vars: %u bytes / %u; max args: %u bytes / %u'
1906 % (cbMaxVarsAndArgs, cMaxVarsAndArgs, cbMaxVars, cMaxVars, cbMaxArgs, cMaxArgs,), file = sys.stderr);
1907
1908 return True;
1909
1910 #
1911 # Output
1912 #
1913
1914 def generateLicenseHeader(self):
1915 """
1916 Returns the lines for a license header.
1917 """
1918 return [
1919 '/*',
1920 ' * Autogenerated by $Id: IEMAllThrdPython.py 102593 2023-12-13 22:41:23Z vboxsync $ ',
1921 ' * Do not edit!',
1922 ' */',
1923 '',
1924 '/*',
1925 ' * Copyright (C) 2023-' + str(datetime.date.today().year) + ' Oracle and/or its affiliates.',
1926 ' *',
1927 ' * This file is part of VirtualBox base platform packages, as',
1928 ' * available from https://www.virtualbox.org.',
1929 ' *',
1930 ' * This program is free software; you can redistribute it and/or',
1931 ' * modify it under the terms of the GNU General Public License',
1932 ' * as published by the Free Software Foundation, in version 3 of the',
1933 ' * License.',
1934 ' *',
1935 ' * This program is distributed in the hope that it will be useful, but',
1936 ' * WITHOUT ANY WARRANTY; without even the implied warranty of',
1937 ' * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU',
1938 ' * General Public License for more details.',
1939 ' *',
1940 ' * You should have received a copy of the GNU General Public License',
1941 ' * along with this program; if not, see <https://www.gnu.org/licenses>.',
1942 ' *',
1943 ' * The contents of this file may alternatively be used under the terms',
1944 ' * of the Common Development and Distribution License Version 1.0',
1945 ' * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included',
1946 ' * in the VirtualBox distribution, in which case the provisions of the',
1947 ' * CDDL are applicable instead of those of the GPL.',
1948 ' *',
1949 ' * You may elect to license modified versions of this file under the',
1950 ' * terms and conditions of either the GPL or the CDDL or both.',
1951 ' *',
1952 ' * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0',
1953 ' */',
1954 '',
1955 '',
1956 '',
1957 ];
1958
1959 ## List of built-in threaded functions with user argument counts and
1960 ## whether it has a native recompiler implementation.
1961 katBltIns = (
1962 ( 'DeferToCImpl0', 2, True ),
1963 ( 'CheckIrq', 0, True ),
1964 ( 'CheckMode', 1, True ),
1965 ( 'CheckHwInstrBps', 0, False ),
1966 ( 'CheckCsLim', 1, True ),
1967
1968 ( 'CheckCsLimAndOpcodes', 3, False ),
1969 ( 'CheckOpcodes', 3, False ),
1970 ( 'CheckOpcodesConsiderCsLim', 3, False ),
1971
1972 ( 'CheckCsLimAndPcAndOpcodes', 3, False ),
1973 ( 'CheckPcAndOpcodes', 3, False ),
1974 ( 'CheckPcAndOpcodesConsiderCsLim', 3, False ),
1975
1976 ( 'CheckCsLimAndOpcodesAcrossPageLoadingTlb', 3, False ),
1977 ( 'CheckOpcodesAcrossPageLoadingTlb', 3, False ),
1978 ( 'CheckOpcodesAcrossPageLoadingTlbConsiderCsLim', 2, False ),
1979
1980 ( 'CheckCsLimAndOpcodesLoadingTlb', 3, False ),
1981 ( 'CheckOpcodesLoadingTlb', 3, False ),
1982 ( 'CheckOpcodesLoadingTlbConsiderCsLim', 3, False ),
1983
1984 ( 'CheckCsLimAndOpcodesOnNextPageLoadingTlb', 2, False ),
1985 ( 'CheckOpcodesOnNextPageLoadingTlb', 2, False ),
1986 ( 'CheckOpcodesOnNextPageLoadingTlbConsiderCsLim', 2, False ),
1987
1988 ( 'CheckCsLimAndOpcodesOnNewPageLoadingTlb', 2, False ),
1989 ( 'CheckOpcodesOnNewPageLoadingTlb', 2, False ),
1990 ( 'CheckOpcodesOnNewPageLoadingTlbConsiderCsLim', 2, False ),
1991 );
1992
1993 def generateThreadedFunctionsHeader(self, oOut):
1994 """
1995 Generates the threaded functions header file.
1996 Returns success indicator.
1997 """
1998
1999 asLines = self.generateLicenseHeader();
2000
2001 # Generate the threaded function table indexes.
2002 asLines += [
2003 'typedef enum IEMTHREADEDFUNCS',
2004 '{',
2005 ' kIemThreadedFunc_Invalid = 0,',
2006 '',
2007 ' /*',
2008 ' * Predefined',
2009 ' */',
2010 ];
2011 asLines += [' kIemThreadedFunc_BltIn_%s,' % (sFuncNm,) for sFuncNm, _, _ in self.katBltIns];
2012
2013 iThreadedFunction = 1 + len(self.katBltIns);
2014 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2015 asLines += [
2016 '',
2017 ' /*',
2018 ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation] + '',
2019 ' */',
2020 ];
2021 for oThreadedFunction in self.aoThreadedFuncs:
2022 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2023 if oVariation:
2024 iThreadedFunction += 1;
2025 oVariation.iEnumValue = iThreadedFunction;
2026 asLines.append(' ' + oVariation.getIndexName() + ',');
2027 asLines += [
2028 ' kIemThreadedFunc_End',
2029 '} IEMTHREADEDFUNCS;',
2030 '',
2031 ];
2032
2033 # Prototype the function table.
2034 asLines += [
2035 'extern const PFNIEMTHREADEDFUNC g_apfnIemThreadedFunctions[kIemThreadedFunc_End];',
2036 '#if defined(IN_RING3) || defined(LOG_ENABLED)',
2037 'extern const char * const g_apszIemThreadedFunctions[kIemThreadedFunc_End];',
2038 '#endif',
2039 'extern uint8_t const g_acIemThreadedFunctionUsedArgs[kIemThreadedFunc_End];',
2040 ];
2041
2042 oOut.write('\n'.join(asLines));
2043 return True;
2044
2045 ksBitsToIntMask = {
2046 1: "UINT64_C(0x1)",
2047 2: "UINT64_C(0x3)",
2048 4: "UINT64_C(0xf)",
2049 8: "UINT64_C(0xff)",
2050 16: "UINT64_C(0xffff)",
2051 32: "UINT64_C(0xffffffff)",
2052 };
2053
2054 def generateFunctionParameterUnpacking(self, oVariation, oOut, asParams):
2055 """
2056 Outputs code for unpacking parameters.
2057 This is shared by the threaded and native code generators.
2058 """
2059 aasVars = [];
2060 for aoRefs in oVariation.dParamRefs.values():
2061 oRef = aoRefs[0];
2062 if oRef.sType[0] != 'P':
2063 cBits = g_kdTypeInfo[oRef.sType][0];
2064 sType = g_kdTypeInfo[oRef.sType][2];
2065 else:
2066 cBits = 64;
2067 sType = oRef.sType;
2068
2069 sTypeDecl = sType + ' const';
2070
2071 if cBits == 64:
2072 assert oRef.offNewParam == 0;
2073 if sType == 'uint64_t':
2074 sUnpack = '%s;' % (asParams[oRef.iNewParam],);
2075 else:
2076 sUnpack = '(%s)%s;' % (sType, asParams[oRef.iNewParam],);
2077 elif oRef.offNewParam == 0:
2078 sUnpack = '(%s)(%s & %s);' % (sType, asParams[oRef.iNewParam], self.ksBitsToIntMask[cBits]);
2079 else:
2080 sUnpack = '(%s)((%s >> %s) & %s);' \
2081 % (sType, asParams[oRef.iNewParam], oRef.offNewParam, self.ksBitsToIntMask[cBits]);
2082
2083 sComment = '/* %s - %s ref%s */' % (oRef.sOrgRef, len(aoRefs), 's' if len(aoRefs) != 1 else '',);
2084
2085 aasVars.append([ '%s:%02u' % (oRef.iNewParam, oRef.offNewParam),
2086 sTypeDecl, oRef.sNewName, sUnpack, sComment ]);
2087 acchVars = [0, 0, 0, 0, 0];
2088 for asVar in aasVars:
2089 for iCol, sStr in enumerate(asVar):
2090 acchVars[iCol] = max(acchVars[iCol], len(sStr));
2091 sFmt = ' %%-%ss %%-%ss = %%-%ss %%s\n' % (acchVars[1], acchVars[2], acchVars[3]);
2092 for asVar in sorted(aasVars):
2093 oOut.write(sFmt % (asVar[1], asVar[2], asVar[3], asVar[4],));
2094 return True;
2095
2096 kasThreadedParamNames = ('uParam0', 'uParam1', 'uParam2');
2097 def generateThreadedFunctionsSource(self, oOut):
2098 """
2099 Generates the threaded functions source file.
2100 Returns success indicator.
2101 """
2102
2103 asLines = self.generateLicenseHeader();
2104 oOut.write('\n'.join(asLines));
2105
2106 #
2107 # Emit the function definitions.
2108 #
2109 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2110 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
2111 oOut.write( '\n'
2112 + '\n'
2113 + '\n'
2114 + '\n'
2115 + '/*' + '*' * 128 + '\n'
2116 + '* Variation: ' + sVarName + ' ' * (129 - len(sVarName) - 15) + '*\n'
2117 + '*' * 128 + '*/\n');
2118
2119 for oThreadedFunction in self.aoThreadedFuncs:
2120 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2121 if oVariation:
2122 oMcBlock = oThreadedFunction.oMcBlock;
2123
2124 # Function header
2125 oOut.write( '\n'
2126 + '\n'
2127 + '/**\n'
2128 + ' * #%u: %s at line %s offset %s in %s%s\n'
2129 % (oVariation.iEnumValue, oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine,
2130 os.path.split(oMcBlock.sSrcFile)[1],
2131 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '')
2132 + ' */\n'
2133 + 'static IEM_DECL_IEMTHREADEDFUNC_DEF(' + oVariation.getThreadedFunctionName() + ')\n'
2134 + '{\n');
2135
2136 # Unpack parameters.
2137 self.generateFunctionParameterUnpacking(oVariation, oOut, self.kasThreadedParamNames);
2138
2139 # RT_NOREF for unused parameters.
2140 if oVariation.cMinParams < g_kcThreadedParams:
2141 oOut.write(' RT_NOREF(' + ', '.join(self.kasThreadedParamNames[oVariation.cMinParams:]) + ');\n');
2142
2143 # Now for the actual statements.
2144 oOut.write(iai.McStmt.renderCodeForList(oVariation.aoStmtsForThreadedFunction, cchIndent = 4));
2145
2146 oOut.write('}\n');
2147
2148
2149 #
2150 # Generate the output tables in parallel.
2151 #
2152 asFuncTable = [
2153 '/**',
2154 ' * Function pointer table.',
2155 ' */',
2156 'PFNIEMTHREADEDFUNC const g_apfnIemThreadedFunctions[kIemThreadedFunc_End] =',
2157 '{',
2158 ' /*Invalid*/ NULL,',
2159 ];
2160 asNameTable = [
2161 '/**',
2162 ' * Function name table.',
2163 ' */',
2164 'const char * const g_apszIemThreadedFunctions[kIemThreadedFunc_End] =',
2165 '{',
2166 ' "Invalid",',
2167 ];
2168 asArgCntTab = [
2169 '/**',
2170 ' * Argument count table.',
2171 ' */',
2172 'uint8_t const g_acIemThreadedFunctionUsedArgs[kIemThreadedFunc_End] =',
2173 '{',
2174 ' 0, /*Invalid*/',
2175 ];
2176 aasTables = (asFuncTable, asNameTable, asArgCntTab,);
2177
2178 for asTable in aasTables:
2179 asTable.extend((
2180 '',
2181 ' /*',
2182 ' * Predefined.',
2183 ' */',
2184 ));
2185 for sFuncNm, cArgs, _ in self.katBltIns:
2186 asFuncTable.append(' iemThreadedFunc_BltIn_%s,' % (sFuncNm,));
2187 asNameTable.append(' "BltIn_%s",' % (sFuncNm,));
2188 asArgCntTab.append(' %d, /*BltIn_%s*/' % (cArgs, sFuncNm,));
2189
2190 iThreadedFunction = 1 + len(self.katBltIns);
2191 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2192 for asTable in aasTables:
2193 asTable.extend((
2194 '',
2195 ' /*',
2196 ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation],
2197 ' */',
2198 ));
2199 for oThreadedFunction in self.aoThreadedFuncs:
2200 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2201 if oVariation:
2202 iThreadedFunction += 1;
2203 assert oVariation.iEnumValue == iThreadedFunction;
2204 sName = oVariation.getThreadedFunctionName();
2205 asFuncTable.append(' /*%4u*/ %s,' % (iThreadedFunction, sName,));
2206 asNameTable.append(' /*%4u*/ "%s",' % (iThreadedFunction, sName,));
2207 asArgCntTab.append(' /*%4u*/ %d, /*%s*/' % (iThreadedFunction, oVariation.cMinParams, sName,));
2208
2209 for asTable in aasTables:
2210 asTable.append('};');
2211
2212 #
2213 # Output the tables.
2214 #
2215 oOut.write( '\n'
2216 + '\n');
2217 oOut.write('\n'.join(asFuncTable));
2218 oOut.write( '\n'
2219 + '\n'
2220 + '\n'
2221 + '#if defined(IN_RING3) || defined(LOG_ENABLED)\n');
2222 oOut.write('\n'.join(asNameTable));
2223 oOut.write( '\n'
2224 + '#endif /* IN_RING3 || LOG_ENABLED */\n'
2225 + '\n'
2226 + '\n');
2227 oOut.write('\n'.join(asArgCntTab));
2228 oOut.write('\n');
2229
2230 return True;
2231
2232 def generateNativeFunctionsHeader(self, oOut):
2233 """
2234 Generates the native recompiler functions header file.
2235 Returns success indicator.
2236 """
2237 if not self.oOptions.fNativeRecompilerEnabled:
2238 return True;
2239
2240 asLines = self.generateLicenseHeader();
2241
2242 # Prototype the function table.
2243 asLines += [
2244 'extern const PFNIEMNATIVERECOMPFUNC g_apfnIemNativeRecompileFunctions[kIemThreadedFunc_End];',
2245 '',
2246 ];
2247
2248 oOut.write('\n'.join(asLines));
2249 return True;
2250
2251 def generateNativeFunctionsSource(self, oOut):
2252 """
2253 Generates the native recompiler functions source file.
2254 Returns success indicator.
2255 """
2256 if not self.oOptions.fNativeRecompilerEnabled:
2257 return True;
2258
2259 #
2260 # The file header.
2261 #
2262 oOut.write('\n'.join(self.generateLicenseHeader()));
2263
2264 #
2265 # Emit the functions.
2266 #
2267 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2268 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
2269 oOut.write( '\n'
2270 + '\n'
2271 + '\n'
2272 + '\n'
2273 + '/*' + '*' * 128 + '\n'
2274 + '* Variation: ' + sVarName + ' ' * (129 - len(sVarName) - 15) + '*\n'
2275 + '*' * 128 + '*/\n');
2276
2277 for oThreadedFunction in self.aoThreadedFuncs:
2278 oVariation = oThreadedFunction.dVariations.get(sVariation, None) # type: ThreadedFunctionVariation
2279 if oVariation and oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
2280 oMcBlock = oThreadedFunction.oMcBlock;
2281
2282 # Function header
2283 oOut.write( '\n'
2284 + '\n'
2285 + '/**\n'
2286 + ' * #%u: %s at line %s offset %s in %s%s\n'
2287 % (oVariation.iEnumValue, oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine,
2288 os.path.split(oMcBlock.sSrcFile)[1],
2289 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '')
2290 + ' */\n'
2291 + 'static IEM_DECL_IEMNATIVERECOMPFUNC_DEF(' + oVariation.getNativeFunctionName() + ')\n'
2292 + '{\n');
2293
2294 # Unpack parameters.
2295 self.generateFunctionParameterUnpacking(oVariation, oOut,
2296 ('pCallEntry->auParams[0]',
2297 'pCallEntry->auParams[1]',
2298 'pCallEntry->auParams[2]',));
2299
2300 # Now for the actual statements.
2301 oOut.write(oVariation.oNativeRecomp.renderCode(cchIndent = 4));
2302
2303 oOut.write('}\n');
2304
2305 #
2306 # Output the function table.
2307 #
2308 oOut.write( '\n'
2309 + '\n'
2310 + '/*\n'
2311 + ' * Function table running parallel to g_apfnIemThreadedFunctions and friends.\n'
2312 + ' */\n'
2313 + 'const PFNIEMNATIVERECOMPFUNC g_apfnIemNativeRecompileFunctions[kIemThreadedFunc_End] =\n'
2314 + '{\n'
2315 + ' /*Invalid*/ NULL,'
2316 + '\n'
2317 + ' /*\n'
2318 + ' * Predefined.\n'
2319 + ' */\n'
2320 );
2321 for sFuncNm, _, fHaveRecompFunc in self.katBltIns:
2322 if fHaveRecompFunc:
2323 oOut.write(' iemNativeRecompFunc_BltIn_%s,\n' % (sFuncNm,))
2324 else:
2325 oOut.write(' NULL, /*BltIn_%s*/\n' % (sFuncNm,))
2326
2327 iThreadedFunction = 1 + len(self.katBltIns);
2328 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2329 oOut.write( ' /*\n'
2330 + ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation] + '\n'
2331 + ' */\n');
2332 for oThreadedFunction in self.aoThreadedFuncs:
2333 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2334 if oVariation:
2335 iThreadedFunction += 1;
2336 assert oVariation.iEnumValue == iThreadedFunction;
2337 sName = oVariation.getNativeFunctionName();
2338 if oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
2339 oOut.write(' /*%4u*/ %s,\n' % (iThreadedFunction, sName,));
2340 else:
2341 oOut.write(' /*%4u*/ NULL /*%s*/,\n' % (iThreadedFunction, sName,));
2342
2343 oOut.write( '};\n'
2344 + '\n');
2345 return True;
2346
2347
2348 def getThreadedFunctionByIndex(self, idx):
2349 """
2350 Returns a ThreadedFunction object for the given index. If the index is
2351 out of bounds, a dummy is returned.
2352 """
2353 if idx < len(self.aoThreadedFuncs):
2354 return self.aoThreadedFuncs[idx];
2355 return ThreadedFunction.dummyInstance();
2356
2357 def generateModifiedInput(self, oOut, idxFile):
2358 """
2359 Generates the combined modified input source/header file.
2360 Returns success indicator.
2361 """
2362 #
2363 # File header and assert assumptions.
2364 #
2365 oOut.write('\n'.join(self.generateLicenseHeader()));
2366 oOut.write('AssertCompile((IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | IEM_F_MODE_CPUMODE_MASK) == 7);\n');
2367
2368 #
2369 # Iterate all parsers (input files) and output the ones related to the
2370 # file set given by idxFile.
2371 #
2372 for idxParser, oParser in enumerate(self.aoParsers): # type: int, IEMAllInstPython.SimpleParser
2373 # Is this included in the file set?
2374 sSrcBaseFile = os.path.basename(oParser.sSrcFile).lower();
2375 fInclude = -1;
2376 for aoInfo in iai.g_aaoAllInstrFilesAndDefaultMapAndSet:
2377 if sSrcBaseFile == aoInfo[0].lower():
2378 fInclude = aoInfo[2] in (-1, idxFile);
2379 break;
2380 if fInclude is not True:
2381 assert fInclude is False;
2382 continue;
2383
2384 # Output it.
2385 oOut.write("\n\n/* ****** BEGIN %s ******* */\n" % (oParser.sSrcFile,));
2386
2387 iThreadedFunction = self.aidxFirstFunctions[idxParser];
2388 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
2389 iLine = 0;
2390 while iLine < len(oParser.asLines):
2391 sLine = oParser.asLines[iLine];
2392 iLine += 1; # iBeginLine and iEndLine are 1-based.
2393
2394 # Can we pass it thru?
2395 if ( iLine not in [oThreadedFunction.oMcBlock.iBeginLine, oThreadedFunction.oMcBlock.iEndLine]
2396 or oThreadedFunction.oMcBlock.sSrcFile != oParser.sSrcFile):
2397 oOut.write(sLine);
2398 #
2399 # Single MC block. Just extract it and insert the replacement.
2400 #
2401 elif oThreadedFunction.oMcBlock.iBeginLine != oThreadedFunction.oMcBlock.iEndLine:
2402 assert ( (sLine.count('IEM_MC_') - sLine.count('IEM_MC_F_') == 1)
2403 or oThreadedFunction.oMcBlock.iMacroExp == iai.McBlock.kiMacroExp_Partial), 'sLine="%s"' % (sLine,);
2404 oOut.write(sLine[:oThreadedFunction.oMcBlock.offBeginLine]);
2405 sModified = oThreadedFunction.generateInputCode().strip();
2406 oOut.write(sModified);
2407
2408 iLine = oThreadedFunction.oMcBlock.iEndLine;
2409 sLine = oParser.asLines[iLine - 1];
2410 assert ( sLine.count('IEM_MC_') - sLine.count('IEM_MC_F_') == 1
2411 or len(oThreadedFunction.oMcBlock.aoStmts) == 1
2412 or oThreadedFunction.oMcBlock.iMacroExp == iai.McBlock.kiMacroExp_Partial);
2413 oOut.write(sLine[oThreadedFunction.oMcBlock.offAfterEnd : ]);
2414
2415 # Advance
2416 iThreadedFunction += 1;
2417 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
2418 #
2419 # Macro expansion line that have sublines and may contain multiple MC blocks.
2420 #
2421 else:
2422 offLine = 0;
2423 while iLine == oThreadedFunction.oMcBlock.iBeginLine:
2424 oOut.write(sLine[offLine : oThreadedFunction.oMcBlock.offBeginLine]);
2425
2426 sModified = oThreadedFunction.generateInputCode().strip();
2427 assert ( sModified.startswith('IEM_MC_BEGIN')
2428 or (sModified.find('IEM_MC_DEFER_TO_CIMPL_') > 0 and sModified.strip().startswith('{\n'))
2429 or sModified.startswith('pVCpu->iem.s.fEndTb = true')
2430 or sModified.startswith('pVCpu->iem.s.fTbCurInstr = ')
2431 ), 'sModified="%s"' % (sModified,);
2432 oOut.write(sModified);
2433
2434 offLine = oThreadedFunction.oMcBlock.offAfterEnd;
2435
2436 # Advance
2437 iThreadedFunction += 1;
2438 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
2439
2440 # Last line segment.
2441 if offLine < len(sLine):
2442 oOut.write(sLine[offLine : ]);
2443
2444 oOut.write("/* ****** END %s ******* */\n" % (oParser.sSrcFile,));
2445
2446 return True;
2447
2448 def generateModifiedInput1(self, oOut):
2449 """
2450 Generates the combined modified input source/header file, part 1.
2451 Returns success indicator.
2452 """
2453 return self.generateModifiedInput(oOut, 1);
2454
2455 def generateModifiedInput2(self, oOut):
2456 """
2457 Generates the combined modified input source/header file, part 2.
2458 Returns success indicator.
2459 """
2460 return self.generateModifiedInput(oOut, 2);
2461
2462 def generateModifiedInput3(self, oOut):
2463 """
2464 Generates the combined modified input source/header file, part 3.
2465 Returns success indicator.
2466 """
2467 return self.generateModifiedInput(oOut, 3);
2468
2469 def generateModifiedInput4(self, oOut):
2470 """
2471 Generates the combined modified input source/header file, part 4.
2472 Returns success indicator.
2473 """
2474 return self.generateModifiedInput(oOut, 4);
2475
2476
2477 #
2478 # Main
2479 #
2480
2481 def main(self, asArgs):
2482 """
2483 C-like main function.
2484 Returns exit code.
2485 """
2486
2487 #
2488 # Parse arguments
2489 #
2490 sScriptDir = os.path.dirname(__file__);
2491 oParser = argparse.ArgumentParser(add_help = False);
2492 oParser.add_argument('asInFiles',
2493 metavar = 'input.cpp.h',
2494 nargs = '*',
2495 default = [os.path.join(sScriptDir, aoInfo[0])
2496 for aoInfo in iai.g_aaoAllInstrFilesAndDefaultMapAndSet],
2497 help = "Selection of VMMAll/IEMAllInst*.cpp.h files to use as input.");
2498 oParser.add_argument('--host-arch',
2499 metavar = 'arch',
2500 dest = 'sHostArch',
2501 action = 'store',
2502 default = None,
2503 help = 'The host architecture.');
2504
2505 oParser.add_argument('--out-thrd-funcs-hdr',
2506 metavar = 'file-thrd-funcs.h',
2507 dest = 'sOutFileThrdFuncsHdr',
2508 action = 'store',
2509 default = '-',
2510 help = 'The output header file for the threaded functions.');
2511 oParser.add_argument('--out-thrd-funcs-cpp',
2512 metavar = 'file-thrd-funcs.cpp',
2513 dest = 'sOutFileThrdFuncsCpp',
2514 action = 'store',
2515 default = '-',
2516 help = 'The output C++ file for the threaded functions.');
2517 oParser.add_argument('--out-n8ve-funcs-hdr',
2518 metavar = 'file-n8tv-funcs.h',
2519 dest = 'sOutFileN8veFuncsHdr',
2520 action = 'store',
2521 default = '-',
2522 help = 'The output header file for the native recompiler functions.');
2523 oParser.add_argument('--out-n8ve-funcs-cpp',
2524 metavar = 'file-n8tv-funcs.cpp',
2525 dest = 'sOutFileN8veFuncsCpp',
2526 action = 'store',
2527 default = '-',
2528 help = 'The output C++ file for the native recompiler functions.');
2529 oParser.add_argument('--native',
2530 dest = 'fNativeRecompilerEnabled',
2531 action = 'store_true',
2532 default = False,
2533 help = 'Enables generating the files related to native recompilation.');
2534 oParser.add_argument('--out-mod-input1',
2535 metavar = 'file-instr.cpp.h',
2536 dest = 'sOutFileModInput1',
2537 action = 'store',
2538 default = '-',
2539 help = 'The output C++/header file for modified input instruction files part 1.');
2540 oParser.add_argument('--out-mod-input2',
2541 metavar = 'file-instr.cpp.h',
2542 dest = 'sOutFileModInput2',
2543 action = 'store',
2544 default = '-',
2545 help = 'The output C++/header file for modified input instruction files part 2.');
2546 oParser.add_argument('--out-mod-input3',
2547 metavar = 'file-instr.cpp.h',
2548 dest = 'sOutFileModInput3',
2549 action = 'store',
2550 default = '-',
2551 help = 'The output C++/header file for modified input instruction files part 3.');
2552 oParser.add_argument('--out-mod-input4',
2553 metavar = 'file-instr.cpp.h',
2554 dest = 'sOutFileModInput4',
2555 action = 'store',
2556 default = '-',
2557 help = 'The output C++/header file for modified input instruction files part 4.');
2558 oParser.add_argument('--help', '-h', '-?',
2559 action = 'help',
2560 help = 'Display help and exit.');
2561 oParser.add_argument('--version', '-V',
2562 action = 'version',
2563 version = 'r%s (IEMAllThreadedPython.py), r%s (IEMAllInstPython.py)'
2564 % (__version__.split()[1], iai.__version__.split()[1],),
2565 help = 'Displays the version/revision of the script and exit.');
2566 self.oOptions = oParser.parse_args(asArgs[1:]);
2567 print("oOptions=%s" % (self.oOptions,), file = sys.stderr);
2568
2569 #
2570 # Process the instructions specified in the IEM sources.
2571 #
2572 if self.processInputFiles(self.oOptions.sHostArch, self.oOptions.fNativeRecompilerEnabled):
2573 #
2574 # Generate the output files.
2575 #
2576 aaoOutputFiles = (
2577 ( self.oOptions.sOutFileThrdFuncsHdr, self.generateThreadedFunctionsHeader ),
2578 ( self.oOptions.sOutFileThrdFuncsCpp, self.generateThreadedFunctionsSource ),
2579 ( self.oOptions.sOutFileN8veFuncsHdr, self.generateNativeFunctionsHeader ),
2580 ( self.oOptions.sOutFileN8veFuncsCpp, self.generateNativeFunctionsSource ),
2581 ( self.oOptions.sOutFileModInput1, self.generateModifiedInput1 ),
2582 ( self.oOptions.sOutFileModInput2, self.generateModifiedInput2 ),
2583 ( self.oOptions.sOutFileModInput3, self.generateModifiedInput3 ),
2584 ( self.oOptions.sOutFileModInput4, self.generateModifiedInput4 ),
2585 );
2586 fRc = True;
2587 for sOutFile, fnGenMethod in aaoOutputFiles:
2588 if sOutFile == '-':
2589 fRc = fnGenMethod(sys.stdout) and fRc;
2590 else:
2591 try:
2592 oOut = open(sOutFile, 'w'); # pylint: disable=consider-using-with,unspecified-encoding
2593 except Exception as oXcpt:
2594 print('error! Failed open "%s" for writing: %s' % (sOutFile, oXcpt,), file = sys.stderr);
2595 return 1;
2596 fRc = fnGenMethod(oOut) and fRc;
2597 oOut.close();
2598 if fRc:
2599 return 0;
2600
2601 return 1;
2602
2603
2604if __name__ == '__main__':
2605 sys.exit(IEMThreadedGenerator().main(sys.argv));
2606
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette