VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllThrdPython.py@ 101984

Last change on this file since 101984 was 101984, checked in by vboxsync, 18 months ago

VMM/IEM: Added a flush mask for guest register shadows to the IEM_MC_DEFER_TO_CIMPL_X_RET macros to better manage register optimizations when recompiling to native code. bugref:10371

  • Property svn:eol-style set to LF
  • Property svn:executable set to *
  • Property svn:keywords set to Author Date Id Revision
File size: 130.4 KB
Line 
1#!/usr/bin/env python
2# -*- coding: utf-8 -*-
3# $Id: IEMAllThrdPython.py 101984 2023-11-08 15:56:18Z vboxsync $
4# pylint: disable=invalid-name
5
6"""
7Annotates and generates threaded functions from IEMAllInst*.cpp.h.
8"""
9
10from __future__ import print_function;
11
12__copyright__ = \
13"""
14Copyright (C) 2023 Oracle and/or its affiliates.
15
16This file is part of VirtualBox base platform packages, as
17available from https://www.virtualbox.org.
18
19This program is free software; you can redistribute it and/or
20modify it under the terms of the GNU General Public License
21as published by the Free Software Foundation, in version 3 of the
22License.
23
24This program is distributed in the hope that it will be useful, but
25WITHOUT ANY WARRANTY; without even the implied warranty of
26MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
27General Public License for more details.
28
29You should have received a copy of the GNU General Public License
30along with this program; if not, see <https://www.gnu.org/licenses>.
31
32SPDX-License-Identifier: GPL-3.0-only
33"""
34__version__ = "$Revision: 101984 $"
35
36# Standard python imports.
37import copy;
38import datetime;
39import os;
40import re;
41import sys;
42import argparse;
43from typing import Dict, List;
44
45import IEMAllInstPython as iai;
46import IEMAllN8vePython as ian;
47
48
49# Python 3 hacks:
50if sys.version_info[0] >= 3:
51 long = int; # pylint: disable=redefined-builtin,invalid-name
52
53## Number of generic parameters for the thread functions.
54g_kcThreadedParams = 3;
55
56g_kdTypeInfo = {
57 # type name: (cBits, fSigned, C-type )
58 'int8_t': ( 8, True, 'int8_t', ),
59 'int16_t': ( 16, True, 'int16_t', ),
60 'int32_t': ( 32, True, 'int32_t', ),
61 'int64_t': ( 64, True, 'int64_t', ),
62 'uint4_t': ( 4, False, 'uint8_t', ),
63 'uint8_t': ( 8, False, 'uint8_t', ),
64 'uint16_t': ( 16, False, 'uint16_t', ),
65 'uint32_t': ( 32, False, 'uint32_t', ),
66 'uint64_t': ( 64, False, 'uint64_t', ),
67 'uintptr_t': ( 64, False, 'uintptr_t',), # ASSUMES 64-bit host pointer size.
68 'bool': ( 1, False, 'bool', ),
69 'IEMMODE': ( 2, False, 'IEMMODE', ),
70};
71
72# Only for getTypeBitCount/variables.
73g_kdTypeInfo2 = {
74 'RTFLOAT32U': ( 32, False, 'RTFLOAT32U', ),
75 'RTFLOAT64U': ( 64, False, 'RTFLOAT64U', ),
76 'RTUINT64U': ( 64, False, 'RTUINT64U', ),
77 'RTGCPTR': ( 64, False, 'RTGCPTR', ),
78 'RTPBCD80U': ( 80, False, 'RTPBCD80U', ),
79 'RTFLOAT80U': ( 80, False, 'RTFLOAT80U', ),
80 'IEMFPURESULT': (80+16, False, 'IEMFPURESULT', ),
81 'IEMFPURESULTTWO': (80+16+80,False, 'IEMFPURESULTTWO', ),
82 'RTUINT128U': ( 128, False, 'RTUINT128U', ),
83 'X86XMMREG': ( 128, False, 'X86XMMREG', ),
84 'IEMSSERESULT': ( 128+32, False, 'IEMSSERESULT', ),
85 'IEMMEDIAF2XMMSRC': ( 256, False, 'IEMMEDIAF2XMMSRC',),
86 'RTUINT256U': ( 256, False, 'RTUINT256U', ),
87 'IEMPCMPISTRXSRC': ( 256, False, 'IEMPCMPISTRXSRC', ),
88 'IEMPCMPESTRXSRC': ( 384, False, 'IEMPCMPESTRXSRC', ),
89}; #| g_kdTypeInfo; - requires 3.9
90g_kdTypeInfo2.update(g_kdTypeInfo);
91
92def getTypeBitCount(sType):
93 """
94 Translate a type to size in bits
95 """
96 if sType in g_kdTypeInfo2:
97 return g_kdTypeInfo2[sType][0];
98 if '*' in sType or sType[0] == 'P':
99 return 64;
100 #raise Exception('Unknown type: %s' % (sType,));
101 print('error: Unknown type: %s' % (sType,));
102 return 64;
103
104g_kdIemFieldToType = {
105 # Illegal ones:
106 'offInstrNextByte': ( None, ),
107 'cbInstrBuf': ( None, ),
108 'pbInstrBuf': ( None, ),
109 'uInstrBufPc': ( None, ),
110 'cbInstrBufTotal': ( None, ),
111 'offCurInstrStart': ( None, ),
112 'cbOpcode': ( None, ),
113 'offOpcode': ( None, ),
114 'offModRm': ( None, ),
115 # Okay ones.
116 'fPrefixes': ( 'uint32_t', ),
117 'uRexReg': ( 'uint8_t', ),
118 'uRexB': ( 'uint8_t', ),
119 'uRexIndex': ( 'uint8_t', ),
120 'iEffSeg': ( 'uint8_t', ),
121 'enmEffOpSize': ( 'IEMMODE', ),
122 'enmDefAddrMode': ( 'IEMMODE', ),
123 'enmEffAddrMode': ( 'IEMMODE', ),
124 'enmDefOpSize': ( 'IEMMODE', ),
125 'idxPrefix': ( 'uint8_t', ),
126 'uVex3rdReg': ( 'uint8_t', ),
127 'uVexLength': ( 'uint8_t', ),
128 'fEvexStuff': ( 'uint8_t', ),
129 'uFpuOpcode': ( 'uint16_t', ),
130};
131
132class ThreadedParamRef(object):
133 """
134 A parameter reference for a threaded function.
135 """
136
137 def __init__(self, sOrgRef, sType, oStmt, iParam = None, offParam = 0, sStdRef = None):
138 ## The name / reference in the original code.
139 self.sOrgRef = sOrgRef;
140 ## Normalized name to deal with spaces in macro invocations and such.
141 self.sStdRef = sStdRef if sStdRef else ''.join(sOrgRef.split());
142 ## Indicates that sOrgRef may not match the parameter.
143 self.fCustomRef = sStdRef is not None;
144 ## The type (typically derived).
145 self.sType = sType;
146 ## The statement making the reference.
147 self.oStmt = oStmt;
148 ## The parameter containing the references. None if implicit.
149 self.iParam = iParam;
150 ## The offset in the parameter of the reference.
151 self.offParam = offParam;
152
153 ## The variable name in the threaded function.
154 self.sNewName = 'x';
155 ## The this is packed into.
156 self.iNewParam = 99;
157 ## The bit offset in iNewParam.
158 self.offNewParam = 1024
159
160
161class ThreadedFunctionVariation(object):
162 """ Threaded function variation. """
163
164 ## @name Variations.
165 ## These variations will match translation block selection/distinctions as well.
166 ## @{
167 ksVariation_Default = ''; ##< No variations - only used by IEM_MC_DEFER_TO_CIMPL_X_RET.
168 ksVariation_16 = '_16'; ##< 16-bit mode code (386+).
169 ksVariation_16f = '_16f'; ##< 16-bit mode code (386+), check+clear eflags.
170 ksVariation_16_Addr32 = '_16_Addr32'; ##< 16-bit mode code (386+), address size prefixed to 32-bit addressing.
171 ksVariation_16f_Addr32 = '_16f_Addr32'; ##< 16-bit mode code (386+), address size prefixed to 32-bit addressing, eflags.
172 ksVariation_16_Pre386 = '_16_Pre386'; ##< 16-bit mode code, pre-386 CPU target.
173 ksVariation_16f_Pre386 = '_16f_Pre386'; ##< 16-bit mode code, pre-386 CPU target, check+clear eflags.
174 ksVariation_32 = '_32'; ##< 32-bit mode code (386+).
175 ksVariation_32f = '_32f'; ##< 32-bit mode code (386+), check+clear eflags.
176 ksVariation_32_Flat = '_32_Flat'; ##< 32-bit mode code (386+) with CS, DS, E,S and SS flat and 4GB wide.
177 ksVariation_32f_Flat = '_32f_Flat'; ##< 32-bit mode code (386+) with CS, DS, E,S and SS flat and 4GB wide, eflags.
178 ksVariation_32_Addr16 = '_32_Addr16'; ##< 32-bit mode code (386+), address size prefixed to 16-bit addressing.
179 ksVariation_32f_Addr16 = '_32f_Addr16'; ##< 32-bit mode code (386+), address size prefixed to 16-bit addressing, eflags.
180 ksVariation_64 = '_64'; ##< 64-bit mode code.
181 ksVariation_64f = '_64f'; ##< 64-bit mode code, check+clear eflags.
182 ksVariation_64_FsGs = '_64_FsGs'; ##< 64-bit mode code, with memory accesses via FS or GS.
183 ksVariation_64f_FsGs = '_64f_FsGs'; ##< 64-bit mode code, with memory accesses via FS or GS, check+clear eflags.
184 ksVariation_64_Addr32 = '_64_Addr32'; ##< 64-bit mode code, address size prefixed to 32-bit addressing.
185 ksVariation_64f_Addr32 = '_64f_Addr32'; ##< 64-bit mode code, address size prefixed to 32-bit addressing, c+c eflags.
186 kasVariations = (
187 ksVariation_Default,
188 ksVariation_16,
189 ksVariation_16f,
190 ksVariation_16_Addr32,
191 ksVariation_16f_Addr32,
192 ksVariation_16_Pre386,
193 ksVariation_16f_Pre386,
194 ksVariation_32,
195 ksVariation_32f,
196 ksVariation_32_Flat,
197 ksVariation_32f_Flat,
198 ksVariation_32_Addr16,
199 ksVariation_32f_Addr16,
200 ksVariation_64,
201 ksVariation_64f,
202 ksVariation_64_FsGs,
203 ksVariation_64f_FsGs,
204 ksVariation_64_Addr32,
205 ksVariation_64f_Addr32,
206 );
207 kasVariationsWithoutAddress = (
208 ksVariation_16,
209 ksVariation_16f,
210 ksVariation_16_Pre386,
211 ksVariation_16f_Pre386,
212 ksVariation_32,
213 ksVariation_32f,
214 ksVariation_64,
215 ksVariation_64f,
216 );
217 kasVariationsWithoutAddressNot286 = (
218 ksVariation_16,
219 ksVariation_16f,
220 ksVariation_32,
221 ksVariation_32f,
222 ksVariation_64,
223 ksVariation_64f,
224 );
225 kasVariationsWithoutAddressNot286Not64 = (
226 ksVariation_16,
227 ksVariation_16f,
228 ksVariation_32,
229 ksVariation_32f,
230 );
231 kasVariationsWithoutAddressNot64 = (
232 ksVariation_16,
233 ksVariation_16f,
234 ksVariation_16_Pre386,
235 ksVariation_16f_Pre386,
236 ksVariation_32,
237 ksVariation_32f,
238 );
239 kasVariationsWithoutAddressOnly64 = (
240 ksVariation_64,
241 ksVariation_64f,
242 );
243 kasVariationsWithAddress = (
244 ksVariation_16,
245 ksVariation_16f,
246 ksVariation_16_Addr32,
247 ksVariation_16f_Addr32,
248 ksVariation_16_Pre386,
249 ksVariation_16f_Pre386,
250 ksVariation_32,
251 ksVariation_32f,
252 ksVariation_32_Flat,
253 ksVariation_32f_Flat,
254 ksVariation_32_Addr16,
255 ksVariation_32f_Addr16,
256 ksVariation_64,
257 ksVariation_64f,
258 ksVariation_64_FsGs,
259 ksVariation_64f_FsGs,
260 ksVariation_64_Addr32,
261 ksVariation_64f_Addr32,
262 );
263 kasVariationsWithAddressNot286 = (
264 ksVariation_16,
265 ksVariation_16f,
266 ksVariation_16_Addr32,
267 ksVariation_16f_Addr32,
268 ksVariation_32,
269 ksVariation_32f,
270 ksVariation_32_Flat,
271 ksVariation_32f_Flat,
272 ksVariation_32_Addr16,
273 ksVariation_32f_Addr16,
274 ksVariation_64,
275 ksVariation_64f,
276 ksVariation_64_FsGs,
277 ksVariation_64f_FsGs,
278 ksVariation_64_Addr32,
279 ksVariation_64f_Addr32,
280 );
281 kasVariationsWithAddressNot286Not64 = (
282 ksVariation_16,
283 ksVariation_16f,
284 ksVariation_16_Addr32,
285 ksVariation_16f_Addr32,
286 ksVariation_32,
287 ksVariation_32f,
288 ksVariation_32_Flat,
289 ksVariation_32f_Flat,
290 ksVariation_32_Addr16,
291 ksVariation_32f_Addr16,
292 );
293 kasVariationsWithAddressNot64 = (
294 ksVariation_16,
295 ksVariation_16f,
296 ksVariation_16_Addr32,
297 ksVariation_16f_Addr32,
298 ksVariation_16_Pre386,
299 ksVariation_16f_Pre386,
300 ksVariation_32,
301 ksVariation_32f,
302 ksVariation_32_Flat,
303 ksVariation_32f_Flat,
304 ksVariation_32_Addr16,
305 ksVariation_32f_Addr16,
306 );
307 kasVariationsWithAddressOnly64 = (
308 ksVariation_64,
309 ksVariation_64f,
310 ksVariation_64_FsGs,
311 ksVariation_64f_FsGs,
312 ksVariation_64_Addr32,
313 ksVariation_64f_Addr32,
314 );
315 kasVariationsOnlyPre386 = (
316 ksVariation_16_Pre386,
317 ksVariation_16f_Pre386,
318 );
319 kasVariationsEmitOrder = (
320 ksVariation_Default,
321 ksVariation_64,
322 ksVariation_64f,
323 ksVariation_64_FsGs,
324 ksVariation_64f_FsGs,
325 ksVariation_32_Flat,
326 ksVariation_32f_Flat,
327 ksVariation_32,
328 ksVariation_32f,
329 ksVariation_16,
330 ksVariation_16f,
331 ksVariation_16_Addr32,
332 ksVariation_16f_Addr32,
333 ksVariation_16_Pre386,
334 ksVariation_16f_Pre386,
335 ksVariation_32_Addr16,
336 ksVariation_32f_Addr16,
337 ksVariation_64_Addr32,
338 ksVariation_64f_Addr32,
339 );
340 kdVariationNames = {
341 ksVariation_Default: 'defer-to-cimpl',
342 ksVariation_16: '16-bit',
343 ksVariation_16f: '16-bit w/ eflag checking and clearing',
344 ksVariation_16_Addr32: '16-bit w/ address prefix (Addr32)',
345 ksVariation_16f_Addr32: '16-bit w/ address prefix (Addr32) and eflag checking and clearing',
346 ksVariation_16_Pre386: '16-bit on pre-386 CPU',
347 ksVariation_16f_Pre386: '16-bit on pre-386 CPU w/ eflag checking and clearing',
348 ksVariation_32: '32-bit',
349 ksVariation_32f: '32-bit w/ eflag checking and clearing',
350 ksVariation_32_Flat: '32-bit flat and wide open CS, SS, DS and ES',
351 ksVariation_32f_Flat: '32-bit flat and wide open CS, SS, DS and ES w/ eflag checking and clearing',
352 ksVariation_32_Addr16: '32-bit w/ address prefix (Addr16)',
353 ksVariation_32f_Addr16: '32-bit w/ address prefix (Addr16) and eflag checking and clearing',
354 ksVariation_64: '64-bit',
355 ksVariation_64f: '64-bit w/ eflag checking and clearing',
356 ksVariation_64_FsGs: '64-bit with memory accessed via FS or GS',
357 ksVariation_64f_FsGs: '64-bit with memory accessed via FS or GS and eflag checking and clearing',
358 ksVariation_64_Addr32: '64-bit w/ address prefix (Addr32)',
359 ksVariation_64f_Addr32: '64-bit w/ address prefix (Addr32) and eflag checking and clearing',
360 };
361 kdVariationsWithEflagsCheckingAndClearing = {
362 ksVariation_16f: True,
363 ksVariation_16f_Addr32: True,
364 ksVariation_16f_Pre386: True,
365 ksVariation_32f: True,
366 ksVariation_32f_Flat: True,
367 ksVariation_32f_Addr16: True,
368 ksVariation_64f: True,
369 ksVariation_64f_FsGs: True,
370 ksVariation_64f_Addr32: True,
371 };
372 kdVariationsWithFlatAddress = {
373 ksVariation_32_Flat: True,
374 ksVariation_32f_Flat: True,
375 ksVariation_64: True,
376 ksVariation_64f: True,
377 };
378 kdVariationsWithFlatAddr16 = {
379 ksVariation_16: True,
380 ksVariation_16f: True,
381 ksVariation_16_Pre386: True,
382 ksVariation_16f_Pre386: True,
383 ksVariation_32_Addr16: True,
384 ksVariation_32f_Addr16: True,
385 };
386 kdVariationsWithFlatAddr32No64 = {
387 ksVariation_16_Addr32: True,
388 ksVariation_16f_Addr32: True,
389 ksVariation_32: True,
390 ksVariation_32f: True,
391 ksVariation_32_Flat: True,
392 ksVariation_32f_Flat: True,
393 };
394 ## @}
395
396 ## IEM_CIMPL_F_XXX flags that we know.
397 ## The value indicates whether it terminates the TB or not. The goal is to
398 ## improve the recompiler so all but END_TB will be False.
399 ##
400 ## @note iemThreadedRecompilerMcDeferToCImpl0 duplicates info found here.
401 kdCImplFlags = {
402 'IEM_CIMPL_F_MODE': False,
403 'IEM_CIMPL_F_BRANCH_DIRECT': False,
404 'IEM_CIMPL_F_BRANCH_INDIRECT': False,
405 'IEM_CIMPL_F_BRANCH_RELATIVE': False,
406 'IEM_CIMPL_F_BRANCH_FAR': True,
407 'IEM_CIMPL_F_BRANCH_CONDITIONAL': False,
408 # IEM_CIMPL_F_BRANCH_ANY should only be used for testing, so not included here.
409 'IEM_CIMPL_F_BRANCH_STACK': False,
410 'IEM_CIMPL_F_BRANCH_STACK_FAR': False,
411 'IEM_CIMPL_F_RFLAGS': False,
412 'IEM_CIMPL_F_INHIBIT_SHADOW': False,
413 'IEM_CIMPL_F_CHECK_IRQ_AFTER': False,
414 'IEM_CIMPL_F_CHECK_IRQ_BEFORE': False,
415 'IEM_CIMPL_F_CHECK_IRQ_BEFORE_AND_AFTER': False, # (ignore)
416 'IEM_CIMPL_F_STATUS_FLAGS': False,
417 'IEM_CIMPL_F_VMEXIT': False,
418 'IEM_CIMPL_F_FPU': False,
419 'IEM_CIMPL_F_REP': False,
420 'IEM_CIMPL_F_IO': False,
421 'IEM_CIMPL_F_END_TB': True,
422 'IEM_CIMPL_F_XCPT': True,
423 'IEM_CIMPL_F_CALLS_CIMPL': False,
424 'IEM_CIMPL_F_CALLS_AIMPL': False,
425 'IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE': False,
426 };
427
428 def __init__(self, oThreadedFunction, sVariation = ksVariation_Default):
429 self.oParent = oThreadedFunction # type: ThreadedFunction
430 ##< ksVariation_Xxxx.
431 self.sVariation = sVariation
432
433 ## Threaded function parameter references.
434 self.aoParamRefs = [] # type: List[ThreadedParamRef]
435 ## Unique parameter references.
436 self.dParamRefs = {} # type: Dict[str, List[ThreadedParamRef]]
437 ## Minimum number of parameters to the threaded function.
438 self.cMinParams = 0;
439
440 ## List/tree of statements for the threaded function.
441 self.aoStmtsForThreadedFunction = [] # type: List[McStmt]
442
443 ## Function enum number, for verification. Set by generateThreadedFunctionsHeader.
444 self.iEnumValue = -1;
445
446 ## Native recompilation details for this variation.
447 self.oNativeRecomp = None;
448
449 def getIndexName(self):
450 sName = self.oParent.oMcBlock.sFunction;
451 if sName.startswith('iemOp_'):
452 sName = sName[len('iemOp_'):];
453 if self.oParent.oMcBlock.iInFunction == 0:
454 return 'kIemThreadedFunc_%s%s' % ( sName, self.sVariation, );
455 return 'kIemThreadedFunc_%s_%s%s' % ( sName, self.oParent.oMcBlock.iInFunction, self.sVariation, );
456
457 def getThreadedFunctionName(self):
458 sName = self.oParent.oMcBlock.sFunction;
459 if sName.startswith('iemOp_'):
460 sName = sName[len('iemOp_'):];
461 if self.oParent.oMcBlock.iInFunction == 0:
462 return 'iemThreadedFunc_%s%s' % ( sName, self.sVariation, );
463 return 'iemThreadedFunc_%s_%s%s' % ( sName, self.oParent.oMcBlock.iInFunction, self.sVariation, );
464
465 def getNativeFunctionName(self):
466 return 'iemNativeRecompFunc_' + self.getThreadedFunctionName()[len('iemThreadedFunc_'):];
467
468 def getShortName(self):
469 sName = self.oParent.oMcBlock.sFunction;
470 if sName.startswith('iemOp_'):
471 sName = sName[len('iemOp_'):];
472 if self.oParent.oMcBlock.iInFunction == 0:
473 return '%s%s' % ( sName, self.sVariation, );
474 return '%s_%s%s' % ( sName, self.oParent.oMcBlock.iInFunction, self.sVariation, );
475
476 #
477 # Analysis and code morphing.
478 #
479
480 def raiseProblem(self, sMessage):
481 """ Raises a problem. """
482 self.oParent.raiseProblem(sMessage);
483
484 def warning(self, sMessage):
485 """ Emits a warning. """
486 self.oParent.warning(sMessage);
487
488 def analyzeReferenceToType(self, sRef):
489 """
490 Translates a variable or structure reference to a type.
491 Returns type name.
492 Raises exception if unable to figure it out.
493 """
494 ch0 = sRef[0];
495 if ch0 == 'u':
496 if sRef.startswith('u32'):
497 return 'uint32_t';
498 if sRef.startswith('u8') or sRef == 'uReg':
499 return 'uint8_t';
500 if sRef.startswith('u64'):
501 return 'uint64_t';
502 if sRef.startswith('u16'):
503 return 'uint16_t';
504 elif ch0 == 'b':
505 return 'uint8_t';
506 elif ch0 == 'f':
507 return 'bool';
508 elif ch0 == 'i':
509 if sRef.startswith('i8'):
510 return 'int8_t';
511 if sRef.startswith('i16'):
512 return 'int16_t';
513 if sRef.startswith('i32'):
514 return 'int32_t';
515 if sRef.startswith('i64'):
516 return 'int64_t';
517 if sRef in ('iReg', 'iFixedReg', 'iGReg', 'iSegReg', 'iSrcReg', 'iDstReg', 'iCrReg'):
518 return 'uint8_t';
519 elif ch0 == 'p':
520 if sRef.find('-') < 0:
521 return 'uintptr_t';
522 if sRef.startswith('pVCpu->iem.s.'):
523 sField = sRef[len('pVCpu->iem.s.') : ];
524 if sField in g_kdIemFieldToType:
525 if g_kdIemFieldToType[sField][0]:
526 return g_kdIemFieldToType[sField][0];
527 elif ch0 == 'G' and sRef.startswith('GCPtr'):
528 return 'uint64_t';
529 elif ch0 == 'e':
530 if sRef == 'enmEffOpSize':
531 return 'IEMMODE';
532 elif ch0 == 'o':
533 if sRef.startswith('off32'):
534 return 'uint32_t';
535 elif sRef == 'cbFrame': # enter
536 return 'uint16_t';
537 elif sRef == 'cShift': ## @todo risky
538 return 'uint8_t';
539
540 self.raiseProblem('Unknown reference: %s' % (sRef,));
541 return None; # Shut up pylint 2.16.2.
542
543 def analyzeCallToType(self, sFnRef):
544 """
545 Determins the type of an indirect function call.
546 """
547 assert sFnRef[0] == 'p';
548
549 #
550 # Simple?
551 #
552 if sFnRef.find('-') < 0:
553 oDecoderFunction = self.oParent.oMcBlock.oFunction;
554
555 # Try the argument list of the function defintion macro invocation first.
556 iArg = 2;
557 while iArg < len(oDecoderFunction.asDefArgs):
558 if sFnRef == oDecoderFunction.asDefArgs[iArg]:
559 return oDecoderFunction.asDefArgs[iArg - 1];
560 iArg += 1;
561
562 # Then check out line that includes the word and looks like a variable declaration.
563 oRe = re.compile(' +(P[A-Z0-9_]+|const +IEMOP[A-Z0-9_]+ *[*]) +(const |) *' + sFnRef + ' *(;|=)');
564 for sLine in oDecoderFunction.asLines:
565 oMatch = oRe.match(sLine);
566 if oMatch:
567 if not oMatch.group(1).startswith('const'):
568 return oMatch.group(1);
569 return 'PC' + oMatch.group(1)[len('const ') : -1].strip();
570
571 #
572 # Deal with the pImpl->pfnXxx:
573 #
574 elif sFnRef.startswith('pImpl->pfn'):
575 sMember = sFnRef[len('pImpl->') : ];
576 sBaseType = self.analyzeCallToType('pImpl');
577 offBits = sMember.rfind('U') + 1;
578 if sBaseType == 'PCIEMOPBINSIZES': return 'PFNIEMAIMPLBINU' + sMember[offBits:];
579 if sBaseType == 'PCIEMOPUNARYSIZES': return 'PFNIEMAIMPLUNARYU' + sMember[offBits:];
580 if sBaseType == 'PCIEMOPSHIFTSIZES': return 'PFNIEMAIMPLSHIFTU' + sMember[offBits:];
581 if sBaseType == 'PCIEMOPSHIFTDBLSIZES': return 'PFNIEMAIMPLSHIFTDBLU' + sMember[offBits:];
582 if sBaseType == 'PCIEMOPMULDIVSIZES': return 'PFNIEMAIMPLMULDIVU' + sMember[offBits:];
583 if sBaseType == 'PCIEMOPMEDIAF3': return 'PFNIEMAIMPLMEDIAF3U' + sMember[offBits:];
584 if sBaseType == 'PCIEMOPMEDIAOPTF3': return 'PFNIEMAIMPLMEDIAOPTF3U' + sMember[offBits:];
585 if sBaseType == 'PCIEMOPMEDIAOPTF2': return 'PFNIEMAIMPLMEDIAOPTF2U' + sMember[offBits:];
586 if sBaseType == 'PCIEMOPMEDIAOPTF3IMM8': return 'PFNIEMAIMPLMEDIAOPTF3U' + sMember[offBits:] + 'IMM8';
587 if sBaseType == 'PCIEMOPBLENDOP': return 'PFNIEMAIMPLAVXBLENDU' + sMember[offBits:];
588
589 self.raiseProblem('Unknown call reference: %s::%s (%s)' % (sBaseType, sMember, sFnRef,));
590
591 self.raiseProblem('Unknown call reference: %s' % (sFnRef,));
592 return None; # Shut up pylint 2.16.2.
593
594 def analyze8BitGRegStmt(self, oStmt):
595 """
596 Gets the 8-bit general purpose register access details of the given statement.
597 ASSUMES the statement is one accessing an 8-bit GREG.
598 """
599 idxReg = 0;
600 if ( oStmt.sName.find('_FETCH_') > 0
601 or oStmt.sName.find('_REF_') > 0
602 or oStmt.sName.find('_TO_LOCAL') > 0):
603 idxReg = 1;
604
605 sRegRef = oStmt.asParams[idxReg];
606 if sRegRef.startswith('IEM_GET_MODRM_RM') or sRegRef.startswith('IEM_GET_MODRM_REG'):
607 asBits = [sBit.strip() for sBit in sRegRef.replace('(', ',').replace(')', '').split(',')];
608 if len(asBits) != 3 or asBits[1] != 'pVCpu' or (asBits[0] != 'IEM_GET_MODRM_RM' and asBits[0] != 'IEM_GET_MODRM_REG'):
609 self.raiseProblem('Unexpected reference: %s (asBits=%s)' % (sRegRef, asBits));
610 sOrgExpr = asBits[0] + '_EX8(pVCpu, ' + asBits[2] + ')';
611 else:
612 sOrgExpr = '((%s) < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX) ? (%s) : (%s) + 12)' % (sRegRef, sRegRef, sRegRef);
613
614 if sRegRef.find('IEM_GET_MODRM_RM') >= 0: sStdRef = 'bRmRm8Ex';
615 elif sRegRef.find('IEM_GET_MODRM_REG') >= 0: sStdRef = 'bRmReg8Ex';
616 elif sRegRef == 'X86_GREG_xAX': sStdRef = 'bGregXAx8Ex';
617 elif sRegRef == 'X86_GREG_xCX': sStdRef = 'bGregXCx8Ex';
618 elif sRegRef == 'X86_GREG_xSP': sStdRef = 'bGregXSp8Ex';
619 elif sRegRef == 'iFixedReg': sStdRef = 'bFixedReg8Ex';
620 else:
621 self.warning('analyze8BitGRegStmt: sRegRef=%s -> bOther8Ex; %s %s; sOrgExpr=%s'
622 % (sRegRef, oStmt.sName, oStmt.asParams, sOrgExpr,));
623 sStdRef = 'bOther8Ex';
624
625 #print('analyze8BitGRegStmt: %s %s; sRegRef=%s\n -> idxReg=%s sOrgExpr=%s sStdRef=%s'
626 # % (oStmt.sName, oStmt.asParams, sRegRef, idxReg, sOrgExpr, sStdRef));
627 return (idxReg, sOrgExpr, sStdRef);
628
629
630 ## Maps memory related MCs to info for FLAT conversion.
631 ## This is used in 64-bit and flat 32-bit variants to skip the unnecessary
632 ## segmentation checking for every memory access. Only applied to access
633 ## via ES, DS and SS. FS, GS and CS gets the full segmentation threatment,
634 ## the latter (CS) is just to keep things simple (we could safely fetch via
635 ## it, but only in 64-bit mode could we safely write via it, IIRC).
636 kdMemMcToFlatInfo = {
637 'IEM_MC_FETCH_MEM_U8': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8' ),
638 'IEM_MC_FETCH_MEM16_U8': ( 1, 'IEM_MC_FETCH_MEM16_FLAT_U8' ),
639 'IEM_MC_FETCH_MEM32_U8': ( 1, 'IEM_MC_FETCH_MEM32_FLAT_U8' ),
640 'IEM_MC_FETCH_MEM_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16' ),
641 'IEM_MC_FETCH_MEM_U16_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_DISP' ),
642 'IEM_MC_FETCH_MEM_I16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I16' ),
643 'IEM_MC_FETCH_MEM_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32' ),
644 'IEM_MC_FETCH_MEM_U32_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_DISP' ),
645 'IEM_MC_FETCH_MEM_I32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I32' ),
646 'IEM_MC_FETCH_MEM_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64' ),
647 'IEM_MC_FETCH_MEM_U64_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64_DISP' ),
648 'IEM_MC_FETCH_MEM_U64_ALIGN_U128': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64_ALIGN_U128' ),
649 'IEM_MC_FETCH_MEM_I64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I64' ),
650 'IEM_MC_FETCH_MEM_R32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R32' ),
651 'IEM_MC_FETCH_MEM_R64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R64' ),
652 'IEM_MC_FETCH_MEM_R80': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R80' ),
653 'IEM_MC_FETCH_MEM_D80': ( 1, 'IEM_MC_FETCH_MEM_FLAT_D80' ),
654 'IEM_MC_FETCH_MEM_U128': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128' ),
655 'IEM_MC_FETCH_MEM_U128_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128_NO_AC' ),
656 'IEM_MC_FETCH_MEM_U128_ALIGN_SSE': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128_ALIGN_SSE' ),
657 'IEM_MC_FETCH_MEM_XMM': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM' ),
658 'IEM_MC_FETCH_MEM_XMM_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM_NO_AC' ),
659 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM_ALIGN_SSE' ),
660 'IEM_MC_FETCH_MEM_XMM_U32': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_U32' ),
661 'IEM_MC_FETCH_MEM_XMM_U64': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_U64' ),
662 'IEM_MC_FETCH_MEM_U256': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256' ),
663 'IEM_MC_FETCH_MEM_U256_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256_NO_AC' ),
664 'IEM_MC_FETCH_MEM_U256_ALIGN_AVX': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256_ALIGN_AVX' ),
665 'IEM_MC_FETCH_MEM_YMM': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM' ),
666 'IEM_MC_FETCH_MEM_YMM_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM_NO_AC' ),
667 'IEM_MC_FETCH_MEM_YMM_ALIGN_AVX': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM_ALIGN_AVX' ),
668 'IEM_MC_FETCH_MEM_U8_ZX_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U16' ),
669 'IEM_MC_FETCH_MEM_U8_ZX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U32' ),
670 'IEM_MC_FETCH_MEM_U8_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U64' ),
671 'IEM_MC_FETCH_MEM_U16_ZX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_ZX_U32' ),
672 'IEM_MC_FETCH_MEM_U16_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_ZX_U64' ),
673 'IEM_MC_FETCH_MEM_U32_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_ZX_U64' ),
674 'IEM_MC_FETCH_MEM_U8_SX_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U16' ),
675 'IEM_MC_FETCH_MEM_U8_SX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U32' ),
676 'IEM_MC_FETCH_MEM_U8_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U64' ),
677 'IEM_MC_FETCH_MEM_U16_SX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_SX_U32' ),
678 'IEM_MC_FETCH_MEM_U16_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_SX_U64' ),
679 'IEM_MC_FETCH_MEM_U32_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_SX_U64' ),
680 'IEM_MC_STORE_MEM_U8': ( 0, 'IEM_MC_STORE_MEM_FLAT_U8' ),
681 'IEM_MC_STORE_MEM_U16': ( 0, 'IEM_MC_STORE_MEM_FLAT_U16' ),
682 'IEM_MC_STORE_MEM_U32': ( 0, 'IEM_MC_STORE_MEM_FLAT_U32' ),
683 'IEM_MC_STORE_MEM_U64': ( 0, 'IEM_MC_STORE_MEM_FLAT_U64' ),
684 'IEM_MC_STORE_MEM_U8_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U8_CONST' ),
685 'IEM_MC_STORE_MEM_U16_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U16_CONST' ),
686 'IEM_MC_STORE_MEM_U32_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U32_CONST' ),
687 'IEM_MC_STORE_MEM_U64_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U64_CONST' ),
688 'IEM_MC_STORE_MEM_U128': ( 0, 'IEM_MC_STORE_MEM_FLAT_U128' ),
689 'IEM_MC_STORE_MEM_U128_ALIGN_SSE': ( 0, 'IEM_MC_STORE_MEM_FLAT_U128_ALIGN_SSE' ),
690 'IEM_MC_STORE_MEM_U256': ( 0, 'IEM_MC_STORE_MEM_FLAT_U256' ),
691 'IEM_MC_STORE_MEM_U256_ALIGN_AVX': ( 0, 'IEM_MC_STORE_MEM_FLAT_U256_ALIGN_AVX' ),
692 'IEM_MC_MEM_MAP': ( 2, 'IEM_MC_MEM_FLAT_MAP' ),
693 'IEM_MC_MEM_MAP_U8_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_RW' ),
694 'IEM_MC_MEM_MAP_U8_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_RO' ),
695 'IEM_MC_MEM_MAP_U8_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_WO' ),
696 'IEM_MC_MEM_MAP_U16_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_RW' ),
697 'IEM_MC_MEM_MAP_U16_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_RO' ),
698 'IEM_MC_MEM_MAP_U16_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_WO' ),
699 'IEM_MC_MEM_MAP_U32_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_RW' ),
700 'IEM_MC_MEM_MAP_U32_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_RO' ),
701 'IEM_MC_MEM_MAP_U32_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_WO' ),
702 'IEM_MC_MEM_MAP_U64_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_RW' ),
703 'IEM_MC_MEM_MAP_U64_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_RO' ),
704 'IEM_MC_MEM_MAP_U64_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_WO' ),
705 'IEM_MC_MEM_MAP_EX': ( 3, 'IEM_MC_MEM_FLAT_MAP_EX' ),
706 };
707
708 kdMemMcToFlatInfoStack = {
709 'IEM_MC_PUSH_U16': ( 'IEM_MC_FLAT32_PUSH_U16', 'IEM_MC_FLAT64_PUSH_U16', ),
710 'IEM_MC_PUSH_U32': ( 'IEM_MC_FLAT32_PUSH_U32', 'IEM_MC_PUSH_U32', ),
711 'IEM_MC_PUSH_U64': ( 'IEM_MC_PUSH_U64', 'IEM_MC_FLAT64_PUSH_U64', ),
712 'IEM_MC_PUSH_U32_SREG': ( 'IEM_MC_FLAT32_PUSH_U32_SREG', 'IEM_MC_PUSH_U32_SREG' ),
713 'IEM_MC_POP_U16': ( 'IEM_MC_FLAT32_POP_U16', 'IEM_MC_FLAT64_POP_U16', ),
714 'IEM_MC_POP_U32': ( 'IEM_MC_FLAT32_POP_U32', 'IEM_MC_POP_U32', ),
715 'IEM_MC_POP_U64': ( 'IEM_MC_POP_U64', 'IEM_MC_FLAT64_POP_U64', ),
716 };
717
718 kdThreadedCalcRmEffAddrMcByVariation = {
719 ksVariation_16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
720 ksVariation_16f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
721 ksVariation_16_Pre386: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
722 ksVariation_16f_Pre386: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
723 ksVariation_32_Addr16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
724 ksVariation_32f_Addr16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
725 ksVariation_16_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
726 ksVariation_16f_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
727 ksVariation_32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
728 ksVariation_32f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
729 ksVariation_32_Flat: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
730 ksVariation_32f_Flat: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
731 ksVariation_64: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64',
732 ksVariation_64f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64',
733 ksVariation_64_FsGs: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_FSGS',
734 ksVariation_64f_FsGs: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_FSGS',
735 ksVariation_64_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_ADDR32', ## @todo How did this work again...
736 ksVariation_64f_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_ADDR32',
737 };
738
739 def analyzeMorphStmtForThreaded(self, aoStmts, iParamRef = 0):
740 """
741 Transforms (copy) the statements into those for the threaded function.
742
743 Returns list/tree of statements (aoStmts is not modified) and the new
744 iParamRef value.
745 """
746 #
747 # We'll be traversing aoParamRefs in parallel to the statements, so we
748 # must match the traversal in analyzeFindThreadedParamRefs exactly.
749 #
750 #print('McBlock at %s:%s' % (os.path.split(self.oMcBlock.sSrcFile)[1], self.oMcBlock.iBeginLine,));
751 aoThreadedStmts = [];
752 for oStmt in aoStmts:
753 # Skip C++ statements that is purely related to decoding.
754 if not oStmt.isCppStmt() or not oStmt.fDecode:
755 # Copy the statement. Make a deep copy to make sure we've got our own
756 # copies of all instance variables, even if a bit overkill at the moment.
757 oNewStmt = copy.deepcopy(oStmt);
758 aoThreadedStmts.append(oNewStmt);
759 #print('oNewStmt %s %s' % (oNewStmt.sName, len(oNewStmt.asParams),));
760
761 # If the statement has parameter references, process the relevant parameters.
762 # We grab the references relevant to this statement and apply them in reserve order.
763 if iParamRef < len(self.aoParamRefs) and self.aoParamRefs[iParamRef].oStmt == oStmt:
764 iParamRefFirst = iParamRef;
765 while True:
766 iParamRef += 1;
767 if iParamRef >= len(self.aoParamRefs) or self.aoParamRefs[iParamRef].oStmt != oStmt:
768 break;
769
770 #print('iParamRefFirst=%s iParamRef=%s' % (iParamRefFirst, iParamRef));
771 for iCurRef in range(iParamRef - 1, iParamRefFirst - 1, -1):
772 oCurRef = self.aoParamRefs[iCurRef];
773 if oCurRef.iParam is not None:
774 assert oCurRef.oStmt == oStmt;
775 #print('iCurRef=%s iParam=%s sOrgRef=%s' % (iCurRef, oCurRef.iParam, oCurRef.sOrgRef));
776 sSrcParam = oNewStmt.asParams[oCurRef.iParam];
777 assert ( sSrcParam[oCurRef.offParam : oCurRef.offParam + len(oCurRef.sOrgRef)] == oCurRef.sOrgRef
778 or oCurRef.fCustomRef), \
779 'offParam=%s sOrgRef=%s iParam=%s oStmt.sName=%s sSrcParam=%s<eos>' \
780 % (oCurRef.offParam, oCurRef.sOrgRef, oCurRef.iParam, oStmt.sName, sSrcParam);
781 oNewStmt.asParams[oCurRef.iParam] = sSrcParam[0 : oCurRef.offParam] \
782 + oCurRef.sNewName \
783 + sSrcParam[oCurRef.offParam + len(oCurRef.sOrgRef) : ];
784
785 # Morph IEM_MC_CALC_RM_EFF_ADDR into IEM_MC_CALC_RM_EFF_ADDR_THREADED ...
786 if oNewStmt.sName == 'IEM_MC_CALC_RM_EFF_ADDR':
787 oNewStmt.sName = self.kdThreadedCalcRmEffAddrMcByVariation[self.sVariation];
788 assert len(oNewStmt.asParams) == 3;
789
790 if self.sVariation in self.kdVariationsWithFlatAddr16:
791 oNewStmt.asParams = [
792 oNewStmt.asParams[0], oNewStmt.asParams[1], self.dParamRefs['u16Disp'][0].sNewName,
793 ];
794 else:
795 sSibAndMore = self.dParamRefs['bSib'][0].sNewName; # Merge bSib and 2nd part of cbImmAndRspOffset.
796 if oStmt.asParams[2] not in ('0', '1', '2', '4'):
797 sSibAndMore = '(%s) | ((%s) & 0x0f00)' % (self.dParamRefs['bSib'][0].sNewName, oStmt.asParams[2]);
798
799 if self.sVariation in self.kdVariationsWithFlatAddr32No64:
800 oNewStmt.asParams = [
801 oNewStmt.asParams[0], oNewStmt.asParams[1], sSibAndMore, self.dParamRefs['u32Disp'][0].sNewName,
802 ];
803 else:
804 oNewStmt.asParams = [
805 oNewStmt.asParams[0], self.dParamRefs['bRmEx'][0].sNewName, sSibAndMore,
806 self.dParamRefs['u32Disp'][0].sNewName, self.dParamRefs['cbInstr'][0].sNewName,
807 ];
808 # ... and IEM_MC_ADVANCE_RIP_AND_FINISH into *_THREADED_PCxx[_WITH_FLAGS] ...
809 elif oNewStmt.sName in ('IEM_MC_ADVANCE_RIP_AND_FINISH', 'IEM_MC_REL_JMP_S8_AND_FINISH',
810 'IEM_MC_REL_JMP_S16_AND_FINISH', 'IEM_MC_REL_JMP_S32_AND_FINISH'):
811 oNewStmt.asParams.append(self.dParamRefs['cbInstr'][0].sNewName);
812 if ( oNewStmt.sName in ('IEM_MC_REL_JMP_S8_AND_FINISH', )
813 and self.sVariation not in (self.ksVariation_16_Pre386, self.ksVariation_16f_Pre386,)):
814 oNewStmt.asParams.append(self.dParamRefs['pVCpu->iem.s.enmEffOpSize'][0].sNewName);
815 oNewStmt.sName += '_THREADED';
816 if self.sVariation in (self.ksVariation_64, self.ksVariation_64_FsGs, self.ksVariation_64_Addr32):
817 oNewStmt.sName += '_PC64';
818 elif self.sVariation in (self.ksVariation_64f, self.ksVariation_64f_FsGs, self.ksVariation_64f_Addr32):
819 oNewStmt.sName += '_PC64_WITH_FLAGS';
820 elif self.sVariation == self.ksVariation_16_Pre386:
821 oNewStmt.sName += '_PC16';
822 elif self.sVariation == self.ksVariation_16f_Pre386:
823 oNewStmt.sName += '_PC16_WITH_FLAGS';
824 elif self.sVariation not in self.kdVariationsWithEflagsCheckingAndClearing:
825 assert self.sVariation != self.ksVariation_Default;
826 oNewStmt.sName += '_PC32';
827 else:
828 oNewStmt.sName += '_PC32_WITH_FLAGS';
829
830 # ... and IEM_MC_*_GREG_U8 into *_THREADED w/ reworked index taking REX into account
831 elif oNewStmt.sName.startswith('IEM_MC_') and oNewStmt.sName.find('_GREG_U8') > 0:
832 (idxReg, _, sStdRef) = self.analyze8BitGRegStmt(oStmt); # Don't use oNewStmt as it has been modified!
833 oNewStmt.asParams[idxReg] = self.dParamRefs[sStdRef][0].sNewName;
834 oNewStmt.sName += '_THREADED';
835
836 # ... and IEM_MC_CALL_CIMPL_[0-5] and IEM_MC_DEFER_TO_CIMPL_[0-5]_RET into *_THREADED ...
837 elif oNewStmt.sName.startswith('IEM_MC_CALL_CIMPL_') or oNewStmt.sName.startswith('IEM_MC_DEFER_TO_CIMPL_'):
838 oNewStmt.sName += '_THREADED';
839 oNewStmt.asParams.insert(0, self.dParamRefs['cbInstr'][0].sNewName);
840
841 # ... and in FLAT modes we must morph memory access into FLAT accesses ...
842 elif ( self.sVariation in self.kdVariationsWithFlatAddress
843 and ( oNewStmt.sName.startswith('IEM_MC_FETCH_MEM')
844 or (oNewStmt.sName.startswith('IEM_MC_STORE_MEM_') and oNewStmt.sName.find('_BY_REF') < 0)
845 or oNewStmt.sName.startswith('IEM_MC_MEM_MAP') )):
846 idxEffSeg = self.kdMemMcToFlatInfo[oNewStmt.sName][0];
847 if idxEffSeg != -1:
848 if ( oNewStmt.asParams[idxEffSeg].find('iEffSeg') < 0
849 and oNewStmt.asParams[idxEffSeg] not in ('X86_SREG_ES', ) ):
850 self.raiseProblem('Expected iEffSeg as param #%d to %s: %s'
851 % (idxEffSeg + 1, oNewStmt.sName, oNewStmt.asParams[idxEffSeg],));
852 oNewStmt.asParams.pop(idxEffSeg);
853 oNewStmt.sName = self.kdMemMcToFlatInfo[oNewStmt.sName][1];
854
855 # ... PUSH and POP also needs flat variants, but these differ a little.
856 elif ( self.sVariation in self.kdVariationsWithFlatAddress
857 and ( (oNewStmt.sName.startswith('IEM_MC_PUSH') and oNewStmt.sName.find('_FPU') < 0)
858 or oNewStmt.sName.startswith('IEM_MC_POP'))):
859 oNewStmt.sName = self.kdMemMcToFlatInfoStack[oNewStmt.sName][int(self.sVariation in (self.ksVariation_64,
860 self.ksVariation_64f,))];
861
862
863 # Process branches of conditionals recursively.
864 if isinstance(oStmt, iai.McStmtCond):
865 (oNewStmt.aoIfBranch, iParamRef) = self.analyzeMorphStmtForThreaded(oStmt.aoIfBranch, iParamRef);
866 if oStmt.aoElseBranch:
867 (oNewStmt.aoElseBranch, iParamRef) = self.analyzeMorphStmtForThreaded(oStmt.aoElseBranch, iParamRef);
868
869 return (aoThreadedStmts, iParamRef);
870
871
872 def analyzeConsolidateThreadedParamRefs(self):
873 """
874 Consolidate threaded function parameter references into a dictionary
875 with lists of the references to each variable/field.
876 """
877 # Gather unique parameters.
878 self.dParamRefs = {};
879 for oRef in self.aoParamRefs:
880 if oRef.sStdRef not in self.dParamRefs:
881 self.dParamRefs[oRef.sStdRef] = [oRef,];
882 else:
883 self.dParamRefs[oRef.sStdRef].append(oRef);
884
885 # Generate names for them for use in the threaded function.
886 dParamNames = {};
887 for sName, aoRefs in self.dParamRefs.items():
888 # Morph the reference expression into a name.
889 if sName.startswith('IEM_GET_MODRM_REG'): sName = 'bModRmRegP';
890 elif sName.startswith('IEM_GET_MODRM_RM'): sName = 'bModRmRmP';
891 elif sName.startswith('IEM_GET_MODRM_REG_8'): sName = 'bModRmReg8P';
892 elif sName.startswith('IEM_GET_MODRM_RM_8'): sName = 'bModRmRm8P';
893 elif sName.startswith('IEM_GET_EFFECTIVE_VVVV'): sName = 'bEffVvvvP';
894 elif sName.find('.') >= 0 or sName.find('->') >= 0:
895 sName = sName[max(sName.rfind('.'), sName.rfind('>')) + 1 : ] + 'P';
896 else:
897 sName += 'P';
898
899 # Ensure it's unique.
900 if sName in dParamNames:
901 for i in range(10):
902 if sName + str(i) not in dParamNames:
903 sName += str(i);
904 break;
905 dParamNames[sName] = True;
906
907 # Update all the references.
908 for oRef in aoRefs:
909 oRef.sNewName = sName;
910
911 # Organize them by size too for the purpose of optimize them.
912 dBySize = {} # type: Dict[str, str]
913 for sStdRef, aoRefs in self.dParamRefs.items():
914 if aoRefs[0].sType[0] != 'P':
915 cBits = g_kdTypeInfo[aoRefs[0].sType][0];
916 assert(cBits <= 64);
917 else:
918 cBits = 64;
919
920 if cBits not in dBySize:
921 dBySize[cBits] = [sStdRef,]
922 else:
923 dBySize[cBits].append(sStdRef);
924
925 # Pack the parameters as best as we can, starting with the largest ones
926 # and ASSUMING a 64-bit parameter size.
927 self.cMinParams = 0;
928 offNewParam = 0;
929 for cBits in sorted(dBySize.keys(), reverse = True):
930 for sStdRef in dBySize[cBits]:
931 if offNewParam == 0 or offNewParam + cBits > 64:
932 self.cMinParams += 1;
933 offNewParam = cBits;
934 else:
935 offNewParam += cBits;
936 assert(offNewParam <= 64);
937
938 for oRef in self.dParamRefs[sStdRef]:
939 oRef.iNewParam = self.cMinParams - 1;
940 oRef.offNewParam = offNewParam - cBits;
941
942 # Currently there are a few that requires 4 parameters, list these so we can figure out why:
943 if self.cMinParams >= 4:
944 print('debug: cMinParams=%s cRawParams=%s - %s:%d'
945 % (self.cMinParams, len(self.dParamRefs), self.oParent.oMcBlock.sSrcFile, self.oParent.oMcBlock.iBeginLine,));
946
947 return True;
948
949 ksHexDigits = '0123456789abcdefABCDEF';
950
951 def analyzeFindThreadedParamRefs(self, aoStmts): # pylint: disable=too-many-statements
952 """
953 Scans the statements for things that have to passed on to the threaded
954 function (populates self.aoParamRefs).
955 """
956 for oStmt in aoStmts:
957 # Some statements we can skip alltogether.
958 if isinstance(oStmt, iai.McCppPreProc):
959 continue;
960 if oStmt.isCppStmt() and oStmt.fDecode:
961 continue;
962 if oStmt.sName in ('IEM_MC_BEGIN',):
963 continue;
964
965 if isinstance(oStmt, iai.McStmtVar):
966 if oStmt.sValue is None:
967 continue;
968 aiSkipParams = { 0: True, 1: True, 3: True };
969 else:
970 aiSkipParams = {};
971
972 # Several statements have implicit parameters and some have different parameters.
973 if oStmt.sName in ('IEM_MC_ADVANCE_RIP_AND_FINISH', 'IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_REL_JMP_S16_AND_FINISH',
974 'IEM_MC_REL_JMP_S32_AND_FINISH', 'IEM_MC_CALL_CIMPL_0', 'IEM_MC_CALL_CIMPL_1',
975 'IEM_MC_CALL_CIMPL_2', 'IEM_MC_CALL_CIMPL_3', 'IEM_MC_CALL_CIMPL_4', 'IEM_MC_CALL_CIMPL_5',
976 'IEM_MC_DEFER_TO_CIMPL_0_RET', 'IEM_MC_DEFER_TO_CIMPL_1_RET', 'IEM_MC_DEFER_TO_CIMPL_2_RET',
977 'IEM_MC_DEFER_TO_CIMPL_3_RET', 'IEM_MC_DEFER_TO_CIMPL_4_RET', 'IEM_MC_DEFER_TO_CIMPL_5_RET', ):
978 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_INSTR_LEN(pVCpu)', 'uint4_t', oStmt, sStdRef = 'cbInstr'));
979
980 if ( oStmt.sName in ('IEM_MC_REL_JMP_S8_AND_FINISH',)
981 and self.sVariation not in (self.ksVariation_16_Pre386, self.ksVariation_16f_Pre386,)):
982 self.aoParamRefs.append(ThreadedParamRef('pVCpu->iem.s.enmEffOpSize', 'IEMMODE', oStmt));
983
984 if oStmt.sName == 'IEM_MC_CALC_RM_EFF_ADDR':
985 # This is being pretty presumptive about bRm always being the RM byte...
986 assert len(oStmt.asParams) == 3;
987 assert oStmt.asParams[1] == 'bRm';
988
989 if self.sVariation in self.kdVariationsWithFlatAddr16:
990 self.aoParamRefs.append(ThreadedParamRef('bRm', 'uint8_t', oStmt));
991 self.aoParamRefs.append(ThreadedParamRef('(uint16_t)uEffAddrInfo' ,
992 'uint16_t', oStmt, sStdRef = 'u16Disp'));
993 elif self.sVariation in self.kdVariationsWithFlatAddr32No64:
994 self.aoParamRefs.append(ThreadedParamRef('bRm', 'uint8_t', oStmt));
995 self.aoParamRefs.append(ThreadedParamRef('(uint8_t)(uEffAddrInfo >> 32)',
996 'uint8_t', oStmt, sStdRef = 'bSib'));
997 self.aoParamRefs.append(ThreadedParamRef('(uint32_t)uEffAddrInfo',
998 'uint32_t', oStmt, sStdRef = 'u32Disp'));
999 else:
1000 assert self.sVariation in self.kasVariationsWithAddressOnly64;
1001 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_MODRM_EX(pVCpu, bRm)',
1002 'uint8_t', oStmt, sStdRef = 'bRmEx'));
1003 self.aoParamRefs.append(ThreadedParamRef('(uint8_t)(uEffAddrInfo >> 32)',
1004 'uint8_t', oStmt, sStdRef = 'bSib'));
1005 self.aoParamRefs.append(ThreadedParamRef('(uint32_t)uEffAddrInfo',
1006 'uint32_t', oStmt, sStdRef = 'u32Disp'));
1007 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_INSTR_LEN(pVCpu)',
1008 'uint4_t', oStmt, sStdRef = 'cbInstr'));
1009 aiSkipParams[1] = True; # Skip the bRm parameter as it is being replaced by bRmEx.
1010
1011 # 8-bit register accesses needs to have their index argument reworked to take REX into account.
1012 if oStmt.sName.startswith('IEM_MC_') and oStmt.sName.find('_GREG_U8') > 0:
1013 (idxReg, sOrgRef, sStdRef) = self.analyze8BitGRegStmt(oStmt);
1014 self.aoParamRefs.append(ThreadedParamRef(sOrgRef, 'uint16_t', oStmt, idxReg, sStdRef = sStdRef));
1015 aiSkipParams[idxReg] = True; # Skip the parameter below.
1016
1017 # If in flat mode variation, ignore the effective segment parameter to memory MCs.
1018 if ( self.sVariation in self.kdVariationsWithFlatAddress
1019 and oStmt.sName in self.kdMemMcToFlatInfo
1020 and self.kdMemMcToFlatInfo[oStmt.sName][0] != -1):
1021 aiSkipParams[self.kdMemMcToFlatInfo[oStmt.sName][0]] = True;
1022
1023 # Inspect the target of calls to see if we need to pass down a
1024 # function pointer or function table pointer for it to work.
1025 if isinstance(oStmt, iai.McStmtCall):
1026 if oStmt.sFn[0] == 'p':
1027 self.aoParamRefs.append(ThreadedParamRef(oStmt.sFn, self.analyzeCallToType(oStmt.sFn), oStmt, oStmt.idxFn));
1028 elif ( oStmt.sFn[0] != 'i'
1029 and not oStmt.sFn.startswith('IEMTARGETCPU_EFL_BEHAVIOR_SELECT')
1030 and not oStmt.sFn.startswith('IEM_SELECT_HOST_OR_FALLBACK') ):
1031 self.raiseProblem('Bogus function name in %s: %s' % (oStmt.sName, oStmt.sFn,));
1032 aiSkipParams[oStmt.idxFn] = True;
1033
1034 # Skip the hint parameter (first) for IEM_MC_CALL_CIMPL_X.
1035 if oStmt.sName.startswith('IEM_MC_CALL_CIMPL_'):
1036 assert oStmt.idxFn == 1;
1037 aiSkipParams[0] = True;
1038
1039
1040 # Check all the parameters for bogus references.
1041 for iParam, sParam in enumerate(oStmt.asParams):
1042 if iParam not in aiSkipParams and sParam not in self.oParent.dVariables:
1043 # The parameter may contain a C expression, so we have to try
1044 # extract the relevant bits, i.e. variables and fields while
1045 # ignoring operators and parentheses.
1046 offParam = 0;
1047 while offParam < len(sParam):
1048 # Is it the start of an C identifier? If so, find the end, but don't stop on field separators (->, .).
1049 ch = sParam[offParam];
1050 if ch.isalpha() or ch == '_':
1051 offStart = offParam;
1052 offParam += 1;
1053 while offParam < len(sParam):
1054 ch = sParam[offParam];
1055 if not ch.isalnum() and ch != '_' and ch != '.':
1056 if ch != '-' or sParam[offParam + 1] != '>':
1057 # Special hack for the 'CTX_SUFF(pVM)' bit in pVCpu->CTX_SUFF(pVM)->xxxx:
1058 if ( ch == '('
1059 and sParam[offStart : offParam + len('(pVM)->')] == 'pVCpu->CTX_SUFF(pVM)->'):
1060 offParam += len('(pVM)->') - 1;
1061 else:
1062 break;
1063 offParam += 1;
1064 offParam += 1;
1065 sRef = sParam[offStart : offParam];
1066
1067 # For register references, we pass the full register indexes instead as macros
1068 # like IEM_GET_MODRM_REG implicitly references pVCpu->iem.s.uRexReg and the
1069 # threaded function will be more efficient if we just pass the register index
1070 # as a 4-bit param.
1071 if ( sRef.startswith('IEM_GET_MODRM')
1072 or sRef.startswith('IEM_GET_EFFECTIVE_VVVV') ):
1073 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1074 if sParam[offParam] != '(':
1075 self.raiseProblem('Expected "(" following %s in "%s"' % (sRef, oStmt.renderCode(),));
1076 (asMacroParams, offCloseParam) = iai.McBlock.extractParams(sParam, offParam);
1077 if asMacroParams is None:
1078 self.raiseProblem('Unable to find ")" for %s in "%s"' % (sRef, oStmt.renderCode(),));
1079 offParam = offCloseParam + 1;
1080 self.aoParamRefs.append(ThreadedParamRef(sParam[offStart : offParam], 'uint8_t',
1081 oStmt, iParam, offStart));
1082
1083 # We can skip known variables.
1084 elif sRef in self.oParent.dVariables:
1085 pass;
1086
1087 # Skip certain macro invocations.
1088 elif sRef in ('IEM_GET_HOST_CPU_FEATURES',
1089 'IEM_GET_GUEST_CPU_FEATURES',
1090 'IEM_IS_GUEST_CPU_AMD',
1091 'IEM_IS_16BIT_CODE',
1092 'IEM_IS_32BIT_CODE',
1093 'IEM_IS_64BIT_CODE',
1094 ):
1095 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1096 if sParam[offParam] != '(':
1097 self.raiseProblem('Expected "(" following %s in "%s"' % (sRef, oStmt.renderCode(),));
1098 (asMacroParams, offCloseParam) = iai.McBlock.extractParams(sParam, offParam);
1099 if asMacroParams is None:
1100 self.raiseProblem('Unable to find ")" for %s in "%s"' % (sRef, oStmt.renderCode(),));
1101 offParam = offCloseParam + 1;
1102
1103 # Skip any dereference following it, unless it's a predicate like IEM_IS_GUEST_CPU_AMD.
1104 if sRef not in ('IEM_IS_GUEST_CPU_AMD',
1105 'IEM_IS_16BIT_CODE',
1106 'IEM_IS_32BIT_CODE',
1107 'IEM_IS_64BIT_CODE',
1108 ):
1109 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1110 if offParam + 2 <= len(sParam) and sParam[offParam : offParam + 2] == '->':
1111 offParam = iai.McBlock.skipSpacesAt(sParam, offParam + 2, len(sParam));
1112 while offParam < len(sParam) and (sParam[offParam].isalnum() or sParam[offParam] in '_.'):
1113 offParam += 1;
1114
1115 # Skip constants, globals, types (casts), sizeof and macros.
1116 elif ( sRef.startswith('IEM_OP_PRF_')
1117 or sRef.startswith('IEM_ACCESS_')
1118 or sRef.startswith('IEMINT_')
1119 or sRef.startswith('X86_GREG_')
1120 or sRef.startswith('X86_SREG_')
1121 or sRef.startswith('X86_EFL_')
1122 or sRef.startswith('X86_FSW_')
1123 or sRef.startswith('X86_FCW_')
1124 or sRef.startswith('X86_XCPT_')
1125 or sRef.startswith('IEMMODE_')
1126 or sRef.startswith('IEM_F_')
1127 or sRef.startswith('IEM_CIMPL_F_')
1128 or sRef.startswith('g_')
1129 or sRef.startswith('iemAImpl_')
1130 or sRef.startswith('kIemNativeGstReg_')
1131 or sRef in ( 'int8_t', 'int16_t', 'int32_t', 'int64_t',
1132 'INT8_C', 'INT16_C', 'INT32_C', 'INT64_C',
1133 'UINT8_C', 'UINT16_C', 'UINT32_C', 'UINT64_C',
1134 'UINT8_MAX', 'UINT16_MAX', 'UINT32_MAX', 'UINT64_MAX',
1135 'INT8_MAX', 'INT16_MAX', 'INT32_MAX', 'INT64_MAX',
1136 'INT8_MIN', 'INT16_MIN', 'INT32_MIN', 'INT64_MIN',
1137 'sizeof', 'NOREF', 'RT_NOREF', 'IEMMODE_64BIT',
1138 'RT_BIT_32', 'RT_BIT_64', 'true', 'false',
1139 'NIL_RTGCPTR',) ):
1140 pass;
1141
1142 # Skip certain macro invocations.
1143 # Any variable (non-field) and decoder fields in IEMCPU will need to be parameterized.
1144 elif ( ( '.' not in sRef
1145 and '-' not in sRef
1146 and sRef not in ('pVCpu', ) )
1147 or iai.McBlock.koReIemDecoderVars.search(sRef) is not None):
1148 self.aoParamRefs.append(ThreadedParamRef(sRef, self.analyzeReferenceToType(sRef),
1149 oStmt, iParam, offStart));
1150 # Number.
1151 elif ch.isdigit():
1152 if ( ch == '0'
1153 and offParam + 2 <= len(sParam)
1154 and sParam[offParam + 1] in 'xX'
1155 and sParam[offParam + 2] in self.ksHexDigits ):
1156 offParam += 2;
1157 while offParam < len(sParam) and sParam[offParam] in self.ksHexDigits:
1158 offParam += 1;
1159 else:
1160 while offParam < len(sParam) and sParam[offParam].isdigit():
1161 offParam += 1;
1162 # Comment?
1163 elif ( ch == '/'
1164 and offParam + 4 <= len(sParam)
1165 and sParam[offParam + 1] == '*'):
1166 offParam += 2;
1167 offNext = sParam.find('*/', offParam);
1168 if offNext < offParam:
1169 self.raiseProblem('Unable to find "*/" in "%s" ("%s")' % (sRef, oStmt.renderCode(),));
1170 offParam = offNext + 2;
1171 # Whatever else.
1172 else:
1173 offParam += 1;
1174
1175 # Traverse the branches of conditionals.
1176 if isinstance(oStmt, iai.McStmtCond):
1177 self.analyzeFindThreadedParamRefs(oStmt.aoIfBranch);
1178 self.analyzeFindThreadedParamRefs(oStmt.aoElseBranch);
1179 return True;
1180
1181 def analyzeVariation(self, aoStmts):
1182 """
1183 2nd part of the analysis, done on each variation.
1184
1185 The variations may differ in parameter requirements and will end up with
1186 slightly different MC sequences. Thus this is done on each individually.
1187
1188 Returns dummy True - raises exception on trouble.
1189 """
1190 # Now scan the code for variables and field references that needs to
1191 # be passed to the threaded function because they are related to the
1192 # instruction decoding.
1193 self.analyzeFindThreadedParamRefs(aoStmts);
1194 self.analyzeConsolidateThreadedParamRefs();
1195
1196 # Morph the statement stream for the block into what we'll be using in the threaded function.
1197 (self.aoStmtsForThreadedFunction, iParamRef) = self.analyzeMorphStmtForThreaded(aoStmts);
1198 if iParamRef != len(self.aoParamRefs):
1199 raise Exception('iParamRef=%s, expected %s!' % (iParamRef, len(self.aoParamRefs),));
1200
1201 return True;
1202
1203 def emitThreadedCallStmts(self, cchIndent, sCallVarNm = None):
1204 """
1205 Produces generic C++ statments that emits a call to the thread function
1206 variation and any subsequent checks that may be necessary after that.
1207
1208 The sCallVarNm is for emitting
1209 """
1210 aoStmts = [
1211 iai.McCppCall('IEM_MC2_BEGIN_EMIT_CALLS',
1212 ['1' if 'IEM_CIMPL_F_CHECK_IRQ_BEFORE' in self.oParent.dsCImplFlags else '0'],
1213 cchIndent = cchIndent), # Scope and a hook for various stuff.
1214 ];
1215
1216 # The call to the threaded function.
1217 asCallArgs = [ self.getIndexName() if not sCallVarNm else sCallVarNm, ];
1218 for iParam in range(self.cMinParams):
1219 asFrags = [];
1220 for aoRefs in self.dParamRefs.values():
1221 oRef = aoRefs[0];
1222 if oRef.iNewParam == iParam:
1223 sCast = '(uint64_t)'
1224 if oRef.sType in ('int8_t', 'int16_t', 'int32_t'): # Make sure these doesn't get sign-extended.
1225 sCast = '(uint64_t)(u' + oRef.sType + ')';
1226 if oRef.offNewParam == 0:
1227 asFrags.append(sCast + '(' + oRef.sOrgRef + ')');
1228 else:
1229 asFrags.append('(%s(%s) << %s)' % (sCast, oRef.sOrgRef, oRef.offNewParam));
1230 assert asFrags;
1231 asCallArgs.append(' | '.join(asFrags));
1232
1233 aoStmts.append(iai.McCppCall('IEM_MC2_EMIT_CALL_%s' % (len(asCallArgs) - 1,), asCallArgs, cchIndent = cchIndent));
1234
1235 # For CIMPL stuff, we need to consult the associated IEM_CIMPL_F_XXX
1236 # mask and maybe emit additional checks.
1237 if ( 'IEM_CIMPL_F_MODE' in self.oParent.dsCImplFlags
1238 or 'IEM_CIMPL_F_XCPT' in self.oParent.dsCImplFlags
1239 or 'IEM_CIMPL_F_VMEXIT' in self.oParent.dsCImplFlags):
1240 aoStmts.append(iai.McCppCall('IEM_MC2_EMIT_CALL_1', ( 'kIemThreadedFunc_BltIn_CheckMode', 'pVCpu->iem.s.fExec', ),
1241 cchIndent = cchIndent));
1242
1243 sCImplFlags = ' | '.join(self.oParent.dsCImplFlags.keys());
1244 if not sCImplFlags:
1245 sCImplFlags = '0'
1246 aoStmts.append(iai.McCppCall('IEM_MC2_END_EMIT_CALLS', ( sCImplFlags, ), cchIndent = cchIndent)); # For closing the scope.
1247
1248 # Emit fEndTb = true or fTbBranched = true if any of the CIMPL flags
1249 # indicates we should do so.
1250 # Note! iemThreadedRecompilerMcDeferToCImpl0 duplicates work done here.
1251 asEndTbFlags = [];
1252 asTbBranchedFlags = [];
1253 for sFlag in self.oParent.dsCImplFlags:
1254 if self.kdCImplFlags[sFlag] is True:
1255 asEndTbFlags.append(sFlag);
1256 elif sFlag.startswith('IEM_CIMPL_F_BRANCH_'):
1257 asTbBranchedFlags.append(sFlag);
1258 if asTbBranchedFlags:
1259 aoStmts.append(iai.McCppGeneric('iemThreadedSetBranched(pVCpu, %s);'
1260 % ((' | '.join(asTbBranchedFlags)).replace('IEM_CIMPL_F_BRANCH', 'IEMBRANCHED_F'),),
1261 cchIndent = cchIndent)); # Inline fn saves ~2 seconds for gcc 13/dbg (1m13s vs 1m15s).
1262 if asEndTbFlags:
1263 aoStmts.append(iai.McCppGeneric('pVCpu->iem.s.fEndTb = true; /* %s */' % (','.join(asEndTbFlags),),
1264 cchIndent = cchIndent));
1265
1266 if 'IEM_CIMPL_F_CHECK_IRQ_AFTER' in self.oParent.dsCImplFlags:
1267 aoStmts.append(iai.McCppGeneric('pVCpu->iem.s.cInstrTillIrqCheck = 0;', cchIndent = cchIndent));
1268
1269 return aoStmts;
1270
1271
1272class ThreadedFunction(object):
1273 """
1274 A threaded function.
1275 """
1276
1277 def __init__(self, oMcBlock: iai.McBlock) -> None:
1278 self.oMcBlock = oMcBlock # type: iai.McBlock
1279 # The remaining fields are only useful after analyze() has been called:
1280 ## Variations for this block. There is at least one.
1281 self.aoVariations = [] # type: List[ThreadedFunctionVariation]
1282 ## Variation dictionary containing the same as aoVariations.
1283 self.dVariations = {} # type: Dict[str, ThreadedFunctionVariation]
1284 ## Dictionary of local variables (IEM_MC_LOCAL[_CONST]) and call arguments (IEM_MC_ARG*).
1285 self.dVariables = {} # type: Dict[str, iai.McStmtVar]
1286 ## Dictionary with any IEM_CIMPL_F_XXX flags explicitly advertised in the code block
1287 ## and those determined by analyzeCodeOperation().
1288 self.dsCImplFlags = {} # type: Dict[str, bool]
1289
1290 @staticmethod
1291 def dummyInstance():
1292 """ Gets a dummy instance. """
1293 return ThreadedFunction(iai.McBlock('null', 999999999, 999999999,
1294 iai.DecoderFunction('null', 999999999, 'nil', ('','')), 999999999));
1295
1296 def raiseProblem(self, sMessage):
1297 """ Raises a problem. """
1298 raise Exception('%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, ));
1299
1300 def warning(self, sMessage):
1301 """ Emits a warning. """
1302 print('%s:%s: warning: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, ));
1303
1304 def analyzeFindVariablesAndCallArgs(self, aoStmts: List[iai.McStmt]) -> bool:
1305 """ Scans the statements for MC variables and call arguments. """
1306 for oStmt in aoStmts:
1307 if isinstance(oStmt, iai.McStmtVar):
1308 if oStmt.sVarName in self.dVariables:
1309 raise Exception('Variable %s is defined more than once!' % (oStmt.sVarName,));
1310 self.dVariables[oStmt.sVarName] = oStmt.sVarName;
1311
1312 # There shouldn't be any variables or arguments declared inside if/
1313 # else blocks, but scan them too to be on the safe side.
1314 if isinstance(oStmt, iai.McStmtCond):
1315 cBefore = len(self.dVariables);
1316 self.analyzeFindVariablesAndCallArgs(oStmt.aoIfBranch);
1317 self.analyzeFindVariablesAndCallArgs(oStmt.aoElseBranch);
1318 if len(self.dVariables) != cBefore:
1319 raise Exception('Variables/arguments defined in conditional branches!');
1320 return True;
1321
1322 def analyzeCodeOperation(self, aoStmts: List[iai.McStmt], fSeenConditional = False) -> bool:
1323 """
1324 Analyzes the code looking clues as to additional side-effects.
1325
1326 Currently this is simply looking for branching and adding the relevant
1327 branch flags to dsCImplFlags. ASSUMES the caller pre-populates the
1328 dictionary with a copy of self.oMcBlock.dsCImplFlags.
1329 """
1330 for oStmt in aoStmts:
1331 # Set IEM_IMPL_C_F_BRANCH if we see any branching MCs.
1332 if oStmt.sName.startswith('IEM_MC_SET_RIP'):
1333 assert not fSeenConditional;
1334 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_INDIRECT'] = True;
1335 elif oStmt.sName.startswith('IEM_MC_REL_JMP'):
1336 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_RELATIVE'] = True;
1337 if fSeenConditional:
1338 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_CONDITIONAL'] = True;
1339
1340 # Check for CIMPL and AIMPL calls.
1341 if oStmt.sName.startswith('IEM_MC_CALL_'):
1342 if oStmt.sName.startswith('IEM_MC_CALL_CIMPL_'):
1343 self.dsCImplFlags['IEM_CIMPL_F_CALLS_CIMPL'] = True;
1344 elif ( oStmt.sName.startswith('IEM_MC_CALL_VOID_AIMPL_')
1345 or oStmt.sName.startswith('IEM_MC_CALL_AIMPL_')
1346 or oStmt.sName.startswith('IEM_MC_CALL_AVX_AIMPL_')):
1347 self.dsCImplFlags['IEM_CIMPL_F_CALLS_AIMPL'] = True;
1348 elif ( oStmt.sName.startswith('IEM_MC_CALL_SSE_AIMPL_')
1349 or oStmt.sName.startswith('IEM_MC_CALL_MMX_AIMPL_')
1350 or oStmt.sName.startswith('IEM_MC_CALL_FPU_AIMPL_')):
1351 self.dsCImplFlags['IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE'] = True;
1352 else:
1353 raise Exception('Unknown IEM_MC_CALL_* statement: %s' % (oStmt.sName,));
1354
1355 # Process branches of conditionals recursively.
1356 if isinstance(oStmt, iai.McStmtCond):
1357 self.analyzeCodeOperation(oStmt.aoIfBranch, True);
1358 if oStmt.aoElseBranch:
1359 self.analyzeCodeOperation(oStmt.aoElseBranch, True);
1360
1361 return True;
1362
1363 def analyze(self):
1364 """
1365 Analyzes the code, identifying the number of parameters it requires and such.
1366
1367 Returns dummy True - raises exception on trouble.
1368 """
1369
1370 # Check the block for errors before we proceed (will decode it).
1371 asErrors = self.oMcBlock.check();
1372 if asErrors:
1373 raise Exception('\n'.join(['%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sError, )
1374 for sError in asErrors]));
1375
1376 # Decode the block into a list/tree of McStmt objects.
1377 aoStmts = self.oMcBlock.decode();
1378
1379 # Scan the statements for local variables and call arguments (self.dVariables).
1380 self.analyzeFindVariablesAndCallArgs(aoStmts);
1381
1382 # Scan the code for IEM_CIMPL_F_ and other clues.
1383 self.dsCImplFlags = self.oMcBlock.dsCImplFlags.copy();
1384 self.analyzeCodeOperation(aoStmts);
1385 if ( ('IEM_CIMPL_F_CALLS_CIMPL' in self.dsCImplFlags)
1386 + ('IEM_CIMPL_F_CALLS_AIMPL' in self.dsCImplFlags)
1387 + ('IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE' in self.dsCImplFlags) > 1):
1388 self.raiseProblem('Mixing CIMPL/AIMPL/AIMPL_WITH_FXSTATE calls');
1389
1390 # Create variations as needed.
1391 if iai.McStmt.findStmtByNames(aoStmts,
1392 { 'IEM_MC_DEFER_TO_CIMPL_0_RET': True,
1393 'IEM_MC_DEFER_TO_CIMPL_1_RET': True,
1394 'IEM_MC_DEFER_TO_CIMPL_2_RET': True,
1395 'IEM_MC_DEFER_TO_CIMPL_3_RET': True, }):
1396 asVariations = (ThreadedFunctionVariation.ksVariation_Default,);
1397
1398 elif iai.McStmt.findStmtByNames(aoStmts, {'IEM_MC_CALC_RM_EFF_ADDR' : True,}):
1399 if 'IEM_MC_F_64BIT' in self.oMcBlock.dsMcFlags:
1400 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressOnly64;
1401 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags and 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
1402 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot286Not64;
1403 elif 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
1404 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot286;
1405 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags:
1406 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot64;
1407 elif 'IEM_MC_F_ONLY_8086' in self.oMcBlock.dsMcFlags:
1408 asVariations = ThreadedFunctionVariation.kasVariationsOnlyPre386;
1409 else:
1410 asVariations = ThreadedFunctionVariation.kasVariationsWithAddress;
1411 else:
1412 if 'IEM_MC_F_64BIT' in self.oMcBlock.dsMcFlags:
1413 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressOnly64;
1414 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags and 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
1415 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot286Not64;
1416 elif 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
1417 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot286;
1418 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags:
1419 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot64;
1420 elif 'IEM_MC_F_ONLY_8086' in self.oMcBlock.dsMcFlags:
1421 asVariations = ThreadedFunctionVariation.kasVariationsOnlyPre386;
1422 else:
1423 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddress;
1424
1425 if not iai.McStmt.findStmtByNames(aoStmts,
1426 { 'IEM_MC_ADVANCE_RIP_AND_FINISH': True,
1427 'IEM_MC_REL_JMP_S8_AND_FINISH': True,
1428 'IEM_MC_REL_JMP_S16_AND_FINISH': True,
1429 'IEM_MC_REL_JMP_S32_AND_FINISH': True,
1430 }):
1431 asVariations = [sVariation for sVariation in asVariations
1432 if sVariation not in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing];
1433
1434 self.aoVariations = [ThreadedFunctionVariation(self, sVar) for sVar in asVariations];
1435
1436 # Dictionary variant of the list.
1437 self.dVariations = { oVar.sVariation: oVar for oVar in self.aoVariations };
1438
1439 # Continue the analysis on each variation.
1440 for oVariation in self.aoVariations:
1441 oVariation.analyzeVariation(aoStmts);
1442
1443 return True;
1444
1445 ## Used by emitThreadedCallStmts.
1446 kdVariationsWithNeedForPrefixCheck = {
1447 ThreadedFunctionVariation.ksVariation_64_Addr32: True,
1448 ThreadedFunctionVariation.ksVariation_64f_Addr32: True,
1449 ThreadedFunctionVariation.ksVariation_64_FsGs: True,
1450 ThreadedFunctionVariation.ksVariation_64f_FsGs: True,
1451 ThreadedFunctionVariation.ksVariation_32_Addr16: True,
1452 ThreadedFunctionVariation.ksVariation_32f_Addr16: True,
1453 ThreadedFunctionVariation.ksVariation_32_Flat: True,
1454 ThreadedFunctionVariation.ksVariation_32f_Flat: True,
1455 ThreadedFunctionVariation.ksVariation_16_Addr32: True,
1456 ThreadedFunctionVariation.ksVariation_16f_Addr32: True,
1457 };
1458
1459 def emitThreadedCallStmts(self):
1460 """
1461 Worker for morphInputCode that returns a list of statements that emits
1462 the call to the threaded functions for the block.
1463 """
1464 # Special case for only default variation:
1465 if len(self.aoVariations) == 1 and self.aoVariations[0].sVariation == ThreadedFunctionVariation.ksVariation_Default:
1466 return self.aoVariations[0].emitThreadedCallStmts(0);
1467
1468 #
1469 # Case statement sub-class.
1470 #
1471 dByVari = self.dVariations;
1472 #fDbg = self.oMcBlock.sFunction == 'iemOpCommonPushSReg';
1473 class Case:
1474 def __init__(self, sCond, sVarNm = None):
1475 self.sCond = sCond;
1476 self.sVarNm = sVarNm;
1477 self.oVar = dByVari[sVarNm] if sVarNm else None;
1478 self.aoBody = self.oVar.emitThreadedCallStmts(8) if sVarNm else None;
1479
1480 def toCode(self):
1481 aoStmts = [ iai.McCppGeneric('case %s:' % (self.sCond), cchIndent = 4), ];
1482 if self.aoBody:
1483 aoStmts.extend(self.aoBody);
1484 aoStmts.append(iai.McCppGeneric('break;', cchIndent = 8));
1485 return aoStmts;
1486
1487 def toFunctionAssignment(self):
1488 aoStmts = [ iai.McCppGeneric('case %s:' % (self.sCond), cchIndent = 4), ];
1489 if self.aoBody:
1490 aoStmts.extend([
1491 iai.McCppGeneric('enmFunction = %s;' % (self.oVar.getIndexName(),), cchIndent = 8),
1492 iai.McCppGeneric('break;', cchIndent = 8),
1493 ]);
1494 return aoStmts;
1495
1496 def isSame(self, oThat):
1497 if not self.aoBody: # fall thru always matches.
1498 return True;
1499 if len(self.aoBody) != len(oThat.aoBody):
1500 #if fDbg: print('dbg: body len diff: %s vs %s' % (len(self.aoBody), len(oThat.aoBody),));
1501 return False;
1502 for iStmt, oStmt in enumerate(self.aoBody):
1503 oThatStmt = oThat.aoBody[iStmt] # type: iai.McStmt
1504 assert isinstance(oStmt, iai.McCppGeneric);
1505 assert not isinstance(oStmt, iai.McStmtCond);
1506 if isinstance(oStmt, iai.McStmtCond):
1507 return False;
1508 if oStmt.sName != oThatStmt.sName:
1509 #if fDbg: print('dbg: stmt #%s name: %s vs %s' % (iStmt, oStmt.sName, oThatStmt.sName,));
1510 return False;
1511 if len(oStmt.asParams) != len(oThatStmt.asParams):
1512 #if fDbg: print('dbg: stmt #%s param count: %s vs %s'
1513 # % (iStmt, len(oStmt.asParams), len(oThatStmt.asParams),));
1514 return False;
1515 for iParam, sParam in enumerate(oStmt.asParams):
1516 if ( sParam != oThatStmt.asParams[iParam]
1517 and ( iParam != 1
1518 or not isinstance(oStmt, iai.McCppCall)
1519 or not oStmt.asParams[0].startswith('IEM_MC2_EMIT_CALL_')
1520 or sParam != self.oVar.getIndexName()
1521 or oThatStmt.asParams[iParam] != oThat.oVar.getIndexName() )):
1522 #if fDbg: print('dbg: stmt #%s, param #%s: %s vs %s'
1523 # % (iStmt, iParam, sParam, oThatStmt.asParams[iParam],));
1524 return False;
1525 return True;
1526
1527 #
1528 # Determine what we're switch on.
1529 # This ASSUMES that (IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | IEM_F_MODE_CPUMODE_MASK) == 7!
1530 #
1531 fSimple = True;
1532 sSwitchValue = '(pVCpu->iem.s.fExec & (IEM_F_MODE_CPUMODE_MASK | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK))';
1533 if dByVari.keys() & self.kdVariationsWithNeedForPrefixCheck.keys():
1534 sSwitchValue += ' | (pVCpu->iem.s.enmEffAddrMode == (pVCpu->iem.s.fExec & IEM_F_MODE_CPUMODE_MASK) ? 0 : 8)';
1535 # Accesses via FS and GS and CS goes thru non-FLAT functions. (CS
1536 # is not writable in 32-bit mode (at least), thus the penalty mode
1537 # for any accesses via it (simpler this way).)
1538 sSwitchValue += ' | (pVCpu->iem.s.iEffSeg < X86_SREG_FS && pVCpu->iem.s.iEffSeg != X86_SREG_CS ? 0 : 16)';
1539 fSimple = False; # threaded functions.
1540 if dByVari.keys() & ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing:
1541 sSwitchValue += ' | ((pVCpu->iem.s.fTbPrevInstr & (IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_INHIBIT_SHADOW)) || ' \
1542 + '(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_MASK) ? 32 : 0)';
1543
1544 #
1545 # Generate the case statements.
1546 #
1547 # pylintx: disable=x
1548 aoCases = [];
1549 if ThreadedFunctionVariation.ksVariation_64_Addr32 in dByVari:
1550 assert not fSimple;
1551 aoCases.extend([
1552 Case('IEMMODE_64BIT', ThrdFnVar.ksVariation_64),
1553 Case('IEMMODE_64BIT | 16', ThrdFnVar.ksVariation_64_FsGs),
1554 Case('IEMMODE_64BIT | 8 | 16', None), # fall thru
1555 Case('IEMMODE_64BIT | 8', ThrdFnVar.ksVariation_64_Addr32),
1556 ]);
1557 if ThreadedFunctionVariation.ksVariation_64f_Addr32 in dByVari:
1558 aoCases.extend([
1559 Case('IEMMODE_64BIT | 32', ThrdFnVar.ksVariation_64f),
1560 Case('IEMMODE_64BIT | 32 | 16', ThrdFnVar.ksVariation_64f_FsGs),
1561 Case('IEMMODE_64BIT | 32 | 8 | 16', None), # fall thru
1562 Case('IEMMODE_64BIT | 32 | 8', ThrdFnVar.ksVariation_64f_Addr32),
1563 ]);
1564 elif ThrdFnVar.ksVariation_64 in dByVari:
1565 assert fSimple;
1566 aoCases.append(Case('IEMMODE_64BIT', ThrdFnVar.ksVariation_64));
1567 if ThreadedFunctionVariation.ksVariation_64f in dByVari:
1568 aoCases.append(Case('IEMMODE_64BIT | 32', ThrdFnVar.ksVariation_64f));
1569
1570 if ThrdFnVar.ksVariation_32_Addr16 in dByVari:
1571 assert not fSimple;
1572 aoCases.extend([
1573 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', ThrdFnVar.ksVariation_32_Flat),
1574 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 16', None), # fall thru
1575 Case('IEMMODE_32BIT | 16', None), # fall thru
1576 Case('IEMMODE_32BIT', ThrdFnVar.ksVariation_32),
1577 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 8', None), # fall thru
1578 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 8 | 16',None), # fall thru
1579 Case('IEMMODE_32BIT | 8 | 16',None), # fall thru
1580 Case('IEMMODE_32BIT | 8', ThrdFnVar.ksVariation_32_Addr16),
1581 ]);
1582 if ThrdFnVar.ksVariation_32f_Addr16 in dByVari:
1583 aoCases.extend([
1584 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', ThrdFnVar.ksVariation_32f_Flat),
1585 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 16', None), # fall thru
1586 Case('IEMMODE_32BIT | 32 | 16', None), # fall thru
1587 Case('IEMMODE_32BIT | 32', ThrdFnVar.ksVariation_32f),
1588 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 8', None), # fall thru
1589 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 8 | 16',None), # fall thru
1590 Case('IEMMODE_32BIT | 32 | 8 | 16',None), # fall thru
1591 Case('IEMMODE_32BIT | 32 | 8', ThrdFnVar.ksVariation_32f_Addr16),
1592 ]);
1593 elif ThrdFnVar.ksVariation_32 in dByVari:
1594 assert fSimple;
1595 aoCases.extend([
1596 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', None), # fall thru
1597 Case('IEMMODE_32BIT', ThrdFnVar.ksVariation_32),
1598 ]);
1599 if ThrdFnVar.ksVariation_32f in dByVari:
1600 aoCases.extend([
1601 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', None), # fall thru
1602 Case('IEMMODE_32BIT | 32', ThrdFnVar.ksVariation_32f),
1603 ]);
1604
1605 if ThrdFnVar.ksVariation_16_Addr32 in dByVari:
1606 assert not fSimple;
1607 aoCases.extend([
1608 Case('IEMMODE_16BIT | 16', None), # fall thru
1609 Case('IEMMODE_16BIT', ThrdFnVar.ksVariation_16),
1610 Case('IEMMODE_16BIT | 8 | 16', None), # fall thru
1611 Case('IEMMODE_16BIT | 8', ThrdFnVar.ksVariation_16_Addr32),
1612 ]);
1613 if ThrdFnVar.ksVariation_16f_Addr32 in dByVari:
1614 aoCases.extend([
1615 Case('IEMMODE_16BIT | 32 | 16', None), # fall thru
1616 Case('IEMMODE_16BIT | 32', ThrdFnVar.ksVariation_16f),
1617 Case('IEMMODE_16BIT | 32 | 8 | 16', None), # fall thru
1618 Case('IEMMODE_16BIT | 32 | 8', ThrdFnVar.ksVariation_16f_Addr32),
1619 ]);
1620 elif ThrdFnVar.ksVariation_16 in dByVari:
1621 assert fSimple;
1622 aoCases.append(Case('IEMMODE_16BIT', ThrdFnVar.ksVariation_16));
1623 if ThrdFnVar.ksVariation_16f in dByVari:
1624 aoCases.append(Case('IEMMODE_16BIT | 32', ThrdFnVar.ksVariation_16f));
1625
1626 if ThrdFnVar.ksVariation_16_Pre386 in dByVari:
1627 if not fSimple:
1628 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 16', None)); # fall thru
1629 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', ThrdFnVar.ksVariation_16_Pre386));
1630 if ThrdFnVar.ksVariation_16f_Pre386 in dByVari: # should be nested under previous if, but line too long.
1631 if not fSimple:
1632 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 16', None)); # fall thru
1633 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', ThrdFnVar.ksVariation_16f_Pre386));
1634
1635 #
1636 # If the case bodies are all the same, except for the function called,
1637 # we can reduce the code size and hopefully compile time.
1638 #
1639 iFirstCaseWithBody = 0;
1640 while not aoCases[iFirstCaseWithBody].aoBody:
1641 iFirstCaseWithBody += 1
1642 fAllSameCases = True
1643 for iCase in range(iFirstCaseWithBody + 1, len(aoCases)):
1644 fAllSameCases = fAllSameCases and aoCases[iCase].isSame(aoCases[iFirstCaseWithBody]);
1645 #if fDbg: print('fAllSameCases=%s %s' % (fAllSameCases, self.oMcBlock.sFunction,));
1646 if fAllSameCases:
1647 aoStmts = [
1648 iai.McCppGeneric('IEMTHREADEDFUNCS enmFunction;'),
1649 iai.McCppGeneric('switch (%s)' % (sSwitchValue,)),
1650 iai.McCppGeneric('{'),
1651 ];
1652 for oCase in aoCases:
1653 aoStmts.extend(oCase.toFunctionAssignment());
1654 aoStmts.extend([
1655 iai.McCppGeneric('IEM_NOT_REACHED_DEFAULT_CASE_RET();', cchIndent = 4),
1656 iai.McCppGeneric('}'),
1657 ]);
1658 aoStmts.extend(dByVari[aoCases[iFirstCaseWithBody].sVarNm].emitThreadedCallStmts(0, 'enmFunction'));
1659
1660 else:
1661 #
1662 # Generate the generic switch statement.
1663 #
1664 aoStmts = [
1665 iai.McCppGeneric('switch (%s)' % (sSwitchValue,)),
1666 iai.McCppGeneric('{'),
1667 ];
1668 for oCase in aoCases:
1669 aoStmts.extend(oCase.toCode());
1670 aoStmts.extend([
1671 iai.McCppGeneric('IEM_NOT_REACHED_DEFAULT_CASE_RET();', cchIndent = 4),
1672 iai.McCppGeneric('}'),
1673 ]);
1674
1675 return aoStmts;
1676
1677 def morphInputCode(self, aoStmts, fCallEmitted = False, cDepth = 0):
1678 """
1679 Adjusts (& copies) the statements for the input/decoder so it will emit
1680 calls to the right threaded functions for each block.
1681
1682 Returns list/tree of statements (aoStmts is not modified) and updated
1683 fCallEmitted status.
1684 """
1685 #print('McBlock at %s:%s' % (os.path.split(self.oMcBlock.sSrcFile)[1], self.oMcBlock.iBeginLine,));
1686 aoDecoderStmts = [];
1687
1688 for oStmt in aoStmts:
1689 # Copy the statement. Make a deep copy to make sure we've got our own
1690 # copies of all instance variables, even if a bit overkill at the moment.
1691 oNewStmt = copy.deepcopy(oStmt);
1692 aoDecoderStmts.append(oNewStmt);
1693 #print('oNewStmt %s %s' % (oNewStmt.sName, len(oNewStmt.asParams),));
1694 if oNewStmt.sName == 'IEM_MC_BEGIN' and self.dsCImplFlags:
1695 oNewStmt.asParams[3] = ' | '.join(sorted(self.dsCImplFlags.keys()));
1696
1697 # If we haven't emitted the threaded function call yet, look for
1698 # statements which it would naturally follow or preceed.
1699 if not fCallEmitted:
1700 if not oStmt.isCppStmt():
1701 if ( oStmt.sName.startswith('IEM_MC_MAYBE_RAISE_') \
1702 or (oStmt.sName.endswith('_AND_FINISH') and oStmt.sName.startswith('IEM_MC_'))
1703 or oStmt.sName.startswith('IEM_MC_CALL_CIMPL_')
1704 or oStmt.sName.startswith('IEM_MC_DEFER_TO_CIMPL_')
1705 or oStmt.sName in ('IEM_MC_RAISE_DIVIDE_ERROR',)):
1706 aoDecoderStmts.pop();
1707 aoDecoderStmts.extend(self.emitThreadedCallStmts());
1708 aoDecoderStmts.append(oNewStmt);
1709 fCallEmitted = True;
1710 elif ( oStmt.fDecode
1711 and ( oStmt.asParams[0].find('IEMOP_HLP_DONE_') >= 0
1712 or oStmt.asParams[0].find('IEMOP_HLP_DECODED_') >= 0)):
1713 aoDecoderStmts.extend(self.emitThreadedCallStmts());
1714 fCallEmitted = True;
1715
1716 # Process branches of conditionals recursively.
1717 if isinstance(oStmt, iai.McStmtCond):
1718 (oNewStmt.aoIfBranch, fCallEmitted1) = self.morphInputCode(oStmt.aoIfBranch, fCallEmitted, cDepth + 1);
1719 if oStmt.aoElseBranch:
1720 (oNewStmt.aoElseBranch, fCallEmitted2) = self.morphInputCode(oStmt.aoElseBranch, fCallEmitted, cDepth + 1);
1721 else:
1722 fCallEmitted2 = False;
1723 fCallEmitted = fCallEmitted or (fCallEmitted1 and fCallEmitted2);
1724
1725 if not fCallEmitted and cDepth == 0:
1726 self.raiseProblem('Unable to insert call to threaded function.');
1727
1728 return (aoDecoderStmts, fCallEmitted);
1729
1730
1731 def generateInputCode(self):
1732 """
1733 Modifies the input code.
1734 """
1735 cchIndent = (self.oMcBlock.cchIndent + 3) // 4 * 4;
1736
1737 if len(self.oMcBlock.aoStmts) == 1:
1738 # IEM_MC_DEFER_TO_CIMPL_X_RET - need to wrap in {} to make it safe to insert into random code.
1739 sCode = ' ' * cchIndent + 'pVCpu->iem.s.fTbCurInstr = ';
1740 if self.dsCImplFlags:
1741 sCode += ' | '.join(sorted(self.dsCImplFlags.keys())) + ';\n';
1742 else:
1743 sCode += '0;\n';
1744 sCode += iai.McStmt.renderCodeForList(self.morphInputCode(self.oMcBlock.aoStmts)[0],
1745 cchIndent = cchIndent).replace('\n', ' /* gen */\n', 1);
1746 sIndent = ' ' * (min(cchIndent, 2) - 2);
1747 sCode = sIndent + '{\n' + sCode + sIndent + '}\n';
1748 return sCode;
1749
1750 # IEM_MC_BEGIN/END block
1751 assert len(self.oMcBlock.asLines) > 2, "asLines=%s" % (self.oMcBlock.asLines,);
1752 return iai.McStmt.renderCodeForList(self.morphInputCode(self.oMcBlock.aoStmts)[0],
1753 cchIndent = cchIndent).replace('\n', ' /* gen */\n', 1);
1754
1755# Short alias for ThreadedFunctionVariation.
1756ThrdFnVar = ThreadedFunctionVariation;
1757
1758
1759class IEMThreadedGenerator(object):
1760 """
1761 The threaded code generator & annotator.
1762 """
1763
1764 def __init__(self):
1765 self.aoThreadedFuncs = [] # type: List[ThreadedFunction]
1766 self.oOptions = None # type: argparse.Namespace
1767 self.aoParsers = [] # type: List[IEMAllInstPython.SimpleParser]
1768 self.aidxFirstFunctions = [] # type: List[int] ##< Runs parallel to aoParser giving the index of the first function.
1769
1770 #
1771 # Processing.
1772 #
1773
1774 def processInputFiles(self, sHostArch, fNativeRecompilerEnabled):
1775 """
1776 Process the input files.
1777 """
1778
1779 # Parse the files.
1780 self.aoParsers = iai.parseFiles(self.oOptions.asInFiles, sHostArch);
1781
1782 # Create threaded functions for the MC blocks.
1783 self.aoThreadedFuncs = [ThreadedFunction(oMcBlock) for oMcBlock in iai.g_aoMcBlocks];
1784
1785 # Analyze the threaded functions.
1786 dRawParamCounts = {};
1787 dMinParamCounts = {};
1788 for oThreadedFunction in self.aoThreadedFuncs:
1789 oThreadedFunction.analyze();
1790 for oVariation in oThreadedFunction.aoVariations:
1791 dRawParamCounts[len(oVariation.dParamRefs)] = dRawParamCounts.get(len(oVariation.dParamRefs), 0) + 1;
1792 dMinParamCounts[oVariation.cMinParams] = dMinParamCounts.get(oVariation.cMinParams, 0) + 1;
1793 print('debug: param count distribution, raw and optimized:', file = sys.stderr);
1794 for cCount in sorted({cBits: True for cBits in list(dRawParamCounts.keys()) + list(dMinParamCounts.keys())}.keys()):
1795 print('debug: %s params: %4s raw, %4s min'
1796 % (cCount, dRawParamCounts.get(cCount, 0), dMinParamCounts.get(cCount, 0)),
1797 file = sys.stderr);
1798
1799 # Populate aidxFirstFunctions. This is ASSUMING that
1800 # g_aoMcBlocks/self.aoThreadedFuncs are in self.aoParsers order.
1801 iThreadedFunction = 0;
1802 oThreadedFunction = self.getThreadedFunctionByIndex(0);
1803 self.aidxFirstFunctions = [];
1804 for oParser in self.aoParsers:
1805 self.aidxFirstFunctions.append(iThreadedFunction);
1806
1807 while oThreadedFunction.oMcBlock.sSrcFile == oParser.sSrcFile:
1808 iThreadedFunction += 1;
1809 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
1810
1811 # Analyze the threaded functions and their variations for native recompilation.
1812 if fNativeRecompilerEnabled:
1813 print('todo:', file = sys.stderr);
1814 cTotal = 0;
1815 cNative = 0;
1816 for oThreadedFunction in self.aoThreadedFuncs:
1817 for oVariation in oThreadedFunction.aoVariations:
1818 cTotal += 1;
1819 oVariation.oNativeRecomp = ian.analyzeVariantForNativeRecomp(oVariation, sHostArch);
1820 if oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
1821 cNative += 1;
1822 print('todo: %.1f%% / %u out of %u threaded function variations are recompilable'
1823 % (cNative * 100.0 / cTotal, cNative, cTotal), file = sys.stderr);
1824 if ian.g_dUnsupportedMcStmtLastOneStats:
1825 asTopKeys = sorted(ian.g_dUnsupportedMcStmtLastOneStats, reverse = True,
1826 key = lambda sSortKey: len(ian.g_dUnsupportedMcStmtLastOneStats[sSortKey]))[:16];
1827 print('todo:', file = sys.stderr);
1828 print('todo: Top %s variations with one unsupported statement dependency:' % (len(asTopKeys),),
1829 file = sys.stderr);
1830 cchMaxKey = max([len(sKey) for sKey in asTopKeys]);
1831 for sKey in asTopKeys:
1832 print('todo: %*s = %s (%s%s)'
1833 % (cchMaxKey, sKey, len(ian.g_dUnsupportedMcStmtLastOneStats[sKey]),
1834 ', '.join([oVar.getShortName() for oVar in ian.g_dUnsupportedMcStmtLastOneStats[sKey][:5]]),
1835 ',...' if len(ian.g_dUnsupportedMcStmtLastOneStats[sKey]) >= 5 else '', )
1836 , file = sys.stderr);
1837
1838 asTopKeys = sorted(ian.g_dUnsupportedMcStmtStats, reverse = True,
1839 key = lambda sSortKey: ian.g_dUnsupportedMcStmtStats[sSortKey])[:16];
1840 print('todo:', file = sys.stderr);
1841 print('todo: Top %d most used unimplemented statements:' % (len(asTopKeys),), file = sys.stderr);
1842 cchMaxKey = max([len(sKey) for sKey in asTopKeys]);
1843 for i in range(0, len(asTopKeys), 2):
1844 print('todo: %*s = %4d %*s = %4d'
1845 % ( cchMaxKey, asTopKeys[i], ian.g_dUnsupportedMcStmtStats[asTopKeys[i]],
1846 cchMaxKey, asTopKeys[i + 1], ian.g_dUnsupportedMcStmtStats[asTopKeys[i + 1]],),
1847 file = sys.stderr);
1848 print('todo:', file = sys.stderr);
1849
1850 if ian.g_dUnsupportedMcStmtLastOneVarStats:
1851 asTopKeys = sorted(ian.g_dUnsupportedMcStmtLastOneVarStats, reverse = True,
1852 key = lambda sSortKey: len(ian.g_dUnsupportedMcStmtLastOneVarStats[sSortKey]))[:16];
1853 print('todo:', file = sys.stderr);
1854 print('todo: Top %s variations with variables and 1-2 unsupported statement dependency:' % (len(asTopKeys),),
1855 file = sys.stderr);
1856 cchMaxKey = max([len(sKey) for sKey in asTopKeys]);
1857 for sKey in asTopKeys:
1858 print('todo: %*s = %s (%s%s)'
1859 % (cchMaxKey, sKey, len(ian.g_dUnsupportedMcStmtLastOneVarStats[sKey]),
1860 ', '.join([oVar.getShortName() for oVar in ian.g_dUnsupportedMcStmtLastOneVarStats[sKey][:5]]),
1861 ',...' if len(ian.g_dUnsupportedMcStmtLastOneVarStats[sKey]) >= 5 else '', )
1862 , file = sys.stderr);
1863
1864
1865 # Gather arguments + variable statistics for the MC blocks.
1866 cMaxArgs = 0;
1867 cMaxVars = 0;
1868 cMaxVarsAndArgs = 0;
1869 cbMaxArgs = 0;
1870 cbMaxVars = 0;
1871 cbMaxVarsAndArgs = 0;
1872 for oThreadedFunction in self.aoThreadedFuncs:
1873 if oThreadedFunction.oMcBlock.cLocals >= 0:
1874 # Counts.
1875 assert oThreadedFunction.oMcBlock.cArgs >= 0;
1876 cMaxVars = max(cMaxVars, oThreadedFunction.oMcBlock.cLocals);
1877 cMaxArgs = max(cMaxArgs, oThreadedFunction.oMcBlock.cArgs);
1878 cMaxVarsAndArgs = max(cMaxVarsAndArgs, oThreadedFunction.oMcBlock.cLocals + oThreadedFunction.oMcBlock.cArgs);
1879 if cMaxVarsAndArgs > 9:
1880 raise Exception('%s potentially uses too many variables / args: %u, max 10 - %u vars and %u args'
1881 % (oThreadedFunction.oMcBlock.oFunction.sName, cMaxVarsAndArgs,
1882 oThreadedFunction.oMcBlock.cLocals, oThreadedFunction.oMcBlock.cArgs));
1883 # Calc stack allocation size:
1884 cbArgs = 0;
1885 for oArg in oThreadedFunction.oMcBlock.aoArgs:
1886 cbArgs += (getTypeBitCount(oArg.sType) + 63) // 64 * 8;
1887 cbVars = 0;
1888 for oVar in oThreadedFunction.oMcBlock.aoLocals:
1889 cbVars += (getTypeBitCount(oVar.sType) + 63) // 64 * 8;
1890 cbMaxVars = max(cbMaxVars, cbVars);
1891 cbMaxArgs = max(cbMaxArgs, cbArgs);
1892 cbMaxVarsAndArgs = max(cbMaxVarsAndArgs, cbVars + cbArgs);
1893 if cbMaxVarsAndArgs >= 0xc0:
1894 raise Exception('%s potentially uses too much stack: cbMaxVars=%#x cbMaxArgs=%#x'
1895 % (oThreadedFunction.oMcBlock.oFunction.sName, cbMaxVars, cbMaxArgs,));
1896
1897 print('debug: max vars+args: %u bytes / %u; max vars: %u bytes / %u; max args: %u bytes / %u'
1898 % (cbMaxVarsAndArgs, cMaxVarsAndArgs, cbMaxVars, cMaxVars, cbMaxArgs, cMaxArgs,), file = sys.stderr);
1899
1900 return True;
1901
1902 #
1903 # Output
1904 #
1905
1906 def generateLicenseHeader(self):
1907 """
1908 Returns the lines for a license header.
1909 """
1910 return [
1911 '/*',
1912 ' * Autogenerated by $Id: IEMAllThrdPython.py 101984 2023-11-08 15:56:18Z vboxsync $ ',
1913 ' * Do not edit!',
1914 ' */',
1915 '',
1916 '/*',
1917 ' * Copyright (C) 2023-' + str(datetime.date.today().year) + ' Oracle and/or its affiliates.',
1918 ' *',
1919 ' * This file is part of VirtualBox base platform packages, as',
1920 ' * available from https://www.virtualbox.org.',
1921 ' *',
1922 ' * This program is free software; you can redistribute it and/or',
1923 ' * modify it under the terms of the GNU General Public License',
1924 ' * as published by the Free Software Foundation, in version 3 of the',
1925 ' * License.',
1926 ' *',
1927 ' * This program is distributed in the hope that it will be useful, but',
1928 ' * WITHOUT ANY WARRANTY; without even the implied warranty of',
1929 ' * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU',
1930 ' * General Public License for more details.',
1931 ' *',
1932 ' * You should have received a copy of the GNU General Public License',
1933 ' * along with this program; if not, see <https://www.gnu.org/licenses>.',
1934 ' *',
1935 ' * The contents of this file may alternatively be used under the terms',
1936 ' * of the Common Development and Distribution License Version 1.0',
1937 ' * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included',
1938 ' * in the VirtualBox distribution, in which case the provisions of the',
1939 ' * CDDL are applicable instead of those of the GPL.',
1940 ' *',
1941 ' * You may elect to license modified versions of this file under the',
1942 ' * terms and conditions of either the GPL or the CDDL or both.',
1943 ' *',
1944 ' * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0',
1945 ' */',
1946 '',
1947 '',
1948 '',
1949 ];
1950
1951 ## List of built-in threaded functions with user argument counts and
1952 ## whether it has a native recompiler implementation.
1953 katBltIns = (
1954 ( 'DeferToCImpl0', 2, True ),
1955 ( 'CheckIrq', 0, True ),
1956 ( 'CheckMode', 1, True ),
1957 ( 'CheckHwInstrBps', 0, False ),
1958 ( 'CheckCsLim', 1, False ),
1959
1960 ( 'CheckCsLimAndOpcodes', 3, False ),
1961 ( 'CheckOpcodes', 3, False ),
1962 ( 'CheckOpcodesConsiderCsLim', 3, False ),
1963
1964 ( 'CheckCsLimAndPcAndOpcodes', 3, False ),
1965 ( 'CheckPcAndOpcodes', 3, False ),
1966 ( 'CheckPcAndOpcodesConsiderCsLim', 3, False ),
1967
1968 ( 'CheckCsLimAndOpcodesAcrossPageLoadingTlb', 3, False ),
1969 ( 'CheckOpcodesAcrossPageLoadingTlb', 3, False ),
1970 ( 'CheckOpcodesAcrossPageLoadingTlbConsiderCsLim', 2, False ),
1971
1972 ( 'CheckCsLimAndOpcodesLoadingTlb', 3, False ),
1973 ( 'CheckOpcodesLoadingTlb', 3, False ),
1974 ( 'CheckOpcodesLoadingTlbConsiderCsLim', 3, False ),
1975
1976 ( 'CheckCsLimAndOpcodesOnNextPageLoadingTlb', 2, False ),
1977 ( 'CheckOpcodesOnNextPageLoadingTlb', 2, False ),
1978 ( 'CheckOpcodesOnNextPageLoadingTlbConsiderCsLim', 2, False ),
1979
1980 ( 'CheckCsLimAndOpcodesOnNewPageLoadingTlb', 2, False ),
1981 ( 'CheckOpcodesOnNewPageLoadingTlb', 2, False ),
1982 ( 'CheckOpcodesOnNewPageLoadingTlbConsiderCsLim', 2, False ),
1983 );
1984
1985 def generateThreadedFunctionsHeader(self, oOut):
1986 """
1987 Generates the threaded functions header file.
1988 Returns success indicator.
1989 """
1990
1991 asLines = self.generateLicenseHeader();
1992
1993 # Generate the threaded function table indexes.
1994 asLines += [
1995 'typedef enum IEMTHREADEDFUNCS',
1996 '{',
1997 ' kIemThreadedFunc_Invalid = 0,',
1998 '',
1999 ' /*',
2000 ' * Predefined',
2001 ' */',
2002 ];
2003 asLines += [' kIemThreadedFunc_BltIn_%s,' % (sFuncNm,) for sFuncNm, _, _ in self.katBltIns];
2004
2005 iThreadedFunction = 1 + len(self.katBltIns);
2006 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2007 asLines += [
2008 '',
2009 ' /*',
2010 ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation] + '',
2011 ' */',
2012 ];
2013 for oThreadedFunction in self.aoThreadedFuncs:
2014 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2015 if oVariation:
2016 iThreadedFunction += 1;
2017 oVariation.iEnumValue = iThreadedFunction;
2018 asLines.append(' ' + oVariation.getIndexName() + ',');
2019 asLines += [
2020 ' kIemThreadedFunc_End',
2021 '} IEMTHREADEDFUNCS;',
2022 '',
2023 ];
2024
2025 # Prototype the function table.
2026 asLines += [
2027 'extern const PFNIEMTHREADEDFUNC g_apfnIemThreadedFunctions[kIemThreadedFunc_End];',
2028 '#if defined(IN_RING3) || defined(LOG_ENABLED)',
2029 'extern const char * const g_apszIemThreadedFunctions[kIemThreadedFunc_End];',
2030 '#endif',
2031 'extern uint8_t const g_acIemThreadedFunctionUsedArgs[kIemThreadedFunc_End];',
2032 ];
2033
2034 oOut.write('\n'.join(asLines));
2035 return True;
2036
2037 ksBitsToIntMask = {
2038 1: "UINT64_C(0x1)",
2039 2: "UINT64_C(0x3)",
2040 4: "UINT64_C(0xf)",
2041 8: "UINT64_C(0xff)",
2042 16: "UINT64_C(0xffff)",
2043 32: "UINT64_C(0xffffffff)",
2044 };
2045
2046 def generateFunctionParameterUnpacking(self, oVariation, oOut, asParams):
2047 """
2048 Outputs code for unpacking parameters.
2049 This is shared by the threaded and native code generators.
2050 """
2051 aasVars = [];
2052 for aoRefs in oVariation.dParamRefs.values():
2053 oRef = aoRefs[0];
2054 if oRef.sType[0] != 'P':
2055 cBits = g_kdTypeInfo[oRef.sType][0];
2056 sType = g_kdTypeInfo[oRef.sType][2];
2057 else:
2058 cBits = 64;
2059 sType = oRef.sType;
2060
2061 sTypeDecl = sType + ' const';
2062
2063 if cBits == 64:
2064 assert oRef.offNewParam == 0;
2065 if sType == 'uint64_t':
2066 sUnpack = '%s;' % (asParams[oRef.iNewParam],);
2067 else:
2068 sUnpack = '(%s)%s;' % (sType, asParams[oRef.iNewParam],);
2069 elif oRef.offNewParam == 0:
2070 sUnpack = '(%s)(%s & %s);' % (sType, asParams[oRef.iNewParam], self.ksBitsToIntMask[cBits]);
2071 else:
2072 sUnpack = '(%s)((%s >> %s) & %s);' \
2073 % (sType, asParams[oRef.iNewParam], oRef.offNewParam, self.ksBitsToIntMask[cBits]);
2074
2075 sComment = '/* %s - %s ref%s */' % (oRef.sOrgRef, len(aoRefs), 's' if len(aoRefs) != 1 else '',);
2076
2077 aasVars.append([ '%s:%02u' % (oRef.iNewParam, oRef.offNewParam),
2078 sTypeDecl, oRef.sNewName, sUnpack, sComment ]);
2079 acchVars = [0, 0, 0, 0, 0];
2080 for asVar in aasVars:
2081 for iCol, sStr in enumerate(asVar):
2082 acchVars[iCol] = max(acchVars[iCol], len(sStr));
2083 sFmt = ' %%-%ss %%-%ss = %%-%ss %%s\n' % (acchVars[1], acchVars[2], acchVars[3]);
2084 for asVar in sorted(aasVars):
2085 oOut.write(sFmt % (asVar[1], asVar[2], asVar[3], asVar[4],));
2086 return True;
2087
2088 kasThreadedParamNames = ('uParam0', 'uParam1', 'uParam2');
2089 def generateThreadedFunctionsSource(self, oOut):
2090 """
2091 Generates the threaded functions source file.
2092 Returns success indicator.
2093 """
2094
2095 asLines = self.generateLicenseHeader();
2096 oOut.write('\n'.join(asLines));
2097
2098 #
2099 # Emit the function definitions.
2100 #
2101 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2102 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
2103 oOut.write( '\n'
2104 + '\n'
2105 + '\n'
2106 + '\n'
2107 + '/*' + '*' * 128 + '\n'
2108 + '* Variation: ' + sVarName + ' ' * (129 - len(sVarName) - 15) + '*\n'
2109 + '*' * 128 + '*/\n');
2110
2111 for oThreadedFunction in self.aoThreadedFuncs:
2112 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2113 if oVariation:
2114 oMcBlock = oThreadedFunction.oMcBlock;
2115
2116 # Function header
2117 oOut.write( '\n'
2118 + '\n'
2119 + '/**\n'
2120 + ' * #%u: %s at line %s offset %s in %s%s\n'
2121 % (oVariation.iEnumValue, oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine,
2122 os.path.split(oMcBlock.sSrcFile)[1],
2123 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '')
2124 + ' */\n'
2125 + 'static IEM_DECL_IEMTHREADEDFUNC_DEF(' + oVariation.getThreadedFunctionName() + ')\n'
2126 + '{\n');
2127
2128 # Unpack parameters.
2129 self.generateFunctionParameterUnpacking(oVariation, oOut, self.kasThreadedParamNames);
2130
2131 # RT_NOREF for unused parameters.
2132 if oVariation.cMinParams < g_kcThreadedParams:
2133 oOut.write(' RT_NOREF(' + ', '.join(self.kasThreadedParamNames[oVariation.cMinParams:]) + ');\n');
2134
2135 # Now for the actual statements.
2136 oOut.write(iai.McStmt.renderCodeForList(oVariation.aoStmtsForThreadedFunction, cchIndent = 4));
2137
2138 oOut.write('}\n');
2139
2140
2141 #
2142 # Generate the output tables in parallel.
2143 #
2144 asFuncTable = [
2145 '/**',
2146 ' * Function pointer table.',
2147 ' */',
2148 'PFNIEMTHREADEDFUNC const g_apfnIemThreadedFunctions[kIemThreadedFunc_End] =',
2149 '{',
2150 ' /*Invalid*/ NULL,',
2151 ];
2152 asNameTable = [
2153 '/**',
2154 ' * Function name table.',
2155 ' */',
2156 'const char * const g_apszIemThreadedFunctions[kIemThreadedFunc_End] =',
2157 '{',
2158 ' "Invalid",',
2159 ];
2160 asArgCntTab = [
2161 '/**',
2162 ' * Argument count table.',
2163 ' */',
2164 'uint8_t const g_acIemThreadedFunctionUsedArgs[kIemThreadedFunc_End] =',
2165 '{',
2166 ' 0, /*Invalid*/',
2167 ];
2168 aasTables = (asFuncTable, asNameTable, asArgCntTab,);
2169
2170 for asTable in aasTables:
2171 asTable.extend((
2172 '',
2173 ' /*',
2174 ' * Predefined.',
2175 ' */',
2176 ));
2177 for sFuncNm, cArgs, _ in self.katBltIns:
2178 asFuncTable.append(' iemThreadedFunc_BltIn_%s,' % (sFuncNm,));
2179 asNameTable.append(' "BltIn_%s",' % (sFuncNm,));
2180 asArgCntTab.append(' %d, /*BltIn_%s*/' % (cArgs, sFuncNm,));
2181
2182 iThreadedFunction = 1 + len(self.katBltIns);
2183 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2184 for asTable in aasTables:
2185 asTable.extend((
2186 '',
2187 ' /*',
2188 ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation],
2189 ' */',
2190 ));
2191 for oThreadedFunction in self.aoThreadedFuncs:
2192 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2193 if oVariation:
2194 iThreadedFunction += 1;
2195 assert oVariation.iEnumValue == iThreadedFunction;
2196 sName = oVariation.getThreadedFunctionName();
2197 asFuncTable.append(' /*%4u*/ %s,' % (iThreadedFunction, sName,));
2198 asNameTable.append(' /*%4u*/ "%s",' % (iThreadedFunction, sName,));
2199 asArgCntTab.append(' /*%4u*/ %d, /*%s*/' % (iThreadedFunction, oVariation.cMinParams, sName,));
2200
2201 for asTable in aasTables:
2202 asTable.append('};');
2203
2204 #
2205 # Output the tables.
2206 #
2207 oOut.write( '\n'
2208 + '\n');
2209 oOut.write('\n'.join(asFuncTable));
2210 oOut.write( '\n'
2211 + '\n'
2212 + '\n'
2213 + '#if defined(IN_RING3) || defined(LOG_ENABLED)\n');
2214 oOut.write('\n'.join(asNameTable));
2215 oOut.write( '\n'
2216 + '#endif /* IN_RING3 || LOG_ENABLED */\n'
2217 + '\n'
2218 + '\n');
2219 oOut.write('\n'.join(asArgCntTab));
2220 oOut.write('\n');
2221
2222 return True;
2223
2224 def generateNativeFunctionsHeader(self, oOut):
2225 """
2226 Generates the native recompiler functions header file.
2227 Returns success indicator.
2228 """
2229 if not self.oOptions.fNativeRecompilerEnabled:
2230 return True;
2231
2232 asLines = self.generateLicenseHeader();
2233
2234 # Prototype the function table.
2235 asLines += [
2236 'extern const PFNIEMNATIVERECOMPFUNC g_apfnIemNativeRecompileFunctions[kIemThreadedFunc_End];',
2237 '',
2238 ];
2239
2240 oOut.write('\n'.join(asLines));
2241 return True;
2242
2243 def generateNativeFunctionsSource(self, oOut):
2244 """
2245 Generates the native recompiler functions source file.
2246 Returns success indicator.
2247 """
2248 if not self.oOptions.fNativeRecompilerEnabled:
2249 return True;
2250
2251 #
2252 # The file header.
2253 #
2254 oOut.write('\n'.join(self.generateLicenseHeader()));
2255
2256 #
2257 # Emit the functions.
2258 #
2259 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2260 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
2261 oOut.write( '\n'
2262 + '\n'
2263 + '\n'
2264 + '\n'
2265 + '/*' + '*' * 128 + '\n'
2266 + '* Variation: ' + sVarName + ' ' * (129 - len(sVarName) - 15) + '*\n'
2267 + '*' * 128 + '*/\n');
2268
2269 for oThreadedFunction in self.aoThreadedFuncs:
2270 oVariation = oThreadedFunction.dVariations.get(sVariation, None) # type: ThreadedFunctionVariation
2271 if oVariation and oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
2272 oMcBlock = oThreadedFunction.oMcBlock;
2273
2274 # Function header
2275 oOut.write( '\n'
2276 + '\n'
2277 + '/**\n'
2278 + ' * #%u: %s at line %s offset %s in %s%s\n'
2279 % (oVariation.iEnumValue, oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine,
2280 os.path.split(oMcBlock.sSrcFile)[1],
2281 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '')
2282 + ' */\n'
2283 + 'static IEM_DECL_IEMNATIVERECOMPFUNC_DEF(' + oVariation.getNativeFunctionName() + ')\n'
2284 + '{\n');
2285
2286 # Unpack parameters.
2287 self.generateFunctionParameterUnpacking(oVariation, oOut,
2288 ('pCallEntry->auParams[0]',
2289 'pCallEntry->auParams[1]',
2290 'pCallEntry->auParams[2]',));
2291
2292 # Now for the actual statements.
2293 oOut.write(oVariation.oNativeRecomp.renderCode(cchIndent = 4));
2294
2295 oOut.write('}\n');
2296
2297 #
2298 # Output the function table.
2299 #
2300 oOut.write( '\n'
2301 + '\n'
2302 + '/*\n'
2303 + ' * Function table running parallel to g_apfnIemThreadedFunctions and friends.\n'
2304 + ' */\n'
2305 + 'const PFNIEMNATIVERECOMPFUNC g_apfnIemNativeRecompileFunctions[kIemThreadedFunc_End] =\n'
2306 + '{\n'
2307 + ' /*Invalid*/ NULL,'
2308 + '\n'
2309 + ' /*\n'
2310 + ' * Predefined.\n'
2311 + ' */\n'
2312 );
2313 for sFuncNm, _, fHaveRecompFunc in self.katBltIns:
2314 if fHaveRecompFunc:
2315 oOut.write(' iemNativeRecompFunc_BltIn_%s,\n' % (sFuncNm,))
2316 else:
2317 oOut.write(' NULL, /*BltIn_%s*/\n' % (sFuncNm,))
2318
2319 iThreadedFunction = 1 + len(self.katBltIns);
2320 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2321 oOut.write( ' /*\n'
2322 + ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation] + '\n'
2323 + ' */\n');
2324 for oThreadedFunction in self.aoThreadedFuncs:
2325 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2326 if oVariation:
2327 iThreadedFunction += 1;
2328 assert oVariation.iEnumValue == iThreadedFunction;
2329 sName = oVariation.getNativeFunctionName();
2330 if oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
2331 oOut.write(' /*%4u*/ %s,\n' % (iThreadedFunction, sName,));
2332 else:
2333 oOut.write(' /*%4u*/ NULL /*%s*/,\n' % (iThreadedFunction, sName,));
2334
2335 oOut.write( '};\n'
2336 + '\n');
2337 return True;
2338
2339
2340 def getThreadedFunctionByIndex(self, idx):
2341 """
2342 Returns a ThreadedFunction object for the given index. If the index is
2343 out of bounds, a dummy is returned.
2344 """
2345 if idx < len(self.aoThreadedFuncs):
2346 return self.aoThreadedFuncs[idx];
2347 return ThreadedFunction.dummyInstance();
2348
2349 def generateModifiedInput(self, oOut, idxFile):
2350 """
2351 Generates the combined modified input source/header file.
2352 Returns success indicator.
2353 """
2354 #
2355 # File header and assert assumptions.
2356 #
2357 oOut.write('\n'.join(self.generateLicenseHeader()));
2358 oOut.write('AssertCompile((IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | IEM_F_MODE_CPUMODE_MASK) == 7);\n');
2359
2360 #
2361 # Iterate all parsers (input files) and output the ones related to the
2362 # file set given by idxFile.
2363 #
2364 for idxParser, oParser in enumerate(self.aoParsers): # type: int, IEMAllInstPython.SimpleParser
2365 # Is this included in the file set?
2366 sSrcBaseFile = os.path.basename(oParser.sSrcFile).lower();
2367 fInclude = -1;
2368 for aoInfo in iai.g_aaoAllInstrFilesAndDefaultMapAndSet:
2369 if sSrcBaseFile == aoInfo[0].lower():
2370 fInclude = aoInfo[2] in (-1, idxFile);
2371 break;
2372 if fInclude is not True:
2373 assert fInclude is False;
2374 continue;
2375
2376 # Output it.
2377 oOut.write("\n\n/* ****** BEGIN %s ******* */\n" % (oParser.sSrcFile,));
2378
2379 iThreadedFunction = self.aidxFirstFunctions[idxParser];
2380 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
2381 iLine = 0;
2382 while iLine < len(oParser.asLines):
2383 sLine = oParser.asLines[iLine];
2384 iLine += 1; # iBeginLine and iEndLine are 1-based.
2385
2386 # Can we pass it thru?
2387 if ( iLine not in [oThreadedFunction.oMcBlock.iBeginLine, oThreadedFunction.oMcBlock.iEndLine]
2388 or oThreadedFunction.oMcBlock.sSrcFile != oParser.sSrcFile):
2389 oOut.write(sLine);
2390 #
2391 # Single MC block. Just extract it and insert the replacement.
2392 #
2393 elif oThreadedFunction.oMcBlock.iBeginLine != oThreadedFunction.oMcBlock.iEndLine:
2394 assert ( (sLine.count('IEM_MC_') - sLine.count('IEM_MC_F_') == 1)
2395 or oThreadedFunction.oMcBlock.iMacroExp == iai.McBlock.kiMacroExp_Partial), 'sLine="%s"' % (sLine,);
2396 oOut.write(sLine[:oThreadedFunction.oMcBlock.offBeginLine]);
2397 sModified = oThreadedFunction.generateInputCode().strip();
2398 oOut.write(sModified);
2399
2400 iLine = oThreadedFunction.oMcBlock.iEndLine;
2401 sLine = oParser.asLines[iLine - 1];
2402 assert ( sLine.count('IEM_MC_') - sLine.count('IEM_MC_F_') == 1
2403 or len(oThreadedFunction.oMcBlock.aoStmts) == 1
2404 or oThreadedFunction.oMcBlock.iMacroExp == iai.McBlock.kiMacroExp_Partial);
2405 oOut.write(sLine[oThreadedFunction.oMcBlock.offAfterEnd : ]);
2406
2407 # Advance
2408 iThreadedFunction += 1;
2409 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
2410 #
2411 # Macro expansion line that have sublines and may contain multiple MC blocks.
2412 #
2413 else:
2414 offLine = 0;
2415 while iLine == oThreadedFunction.oMcBlock.iBeginLine:
2416 oOut.write(sLine[offLine : oThreadedFunction.oMcBlock.offBeginLine]);
2417
2418 sModified = oThreadedFunction.generateInputCode().strip();
2419 assert ( sModified.startswith('IEM_MC_BEGIN')
2420 or (sModified.find('IEM_MC_DEFER_TO_CIMPL_') > 0 and sModified.strip().startswith('{\n'))
2421 or sModified.startswith('pVCpu->iem.s.fEndTb = true')
2422 or sModified.startswith('pVCpu->iem.s.fTbCurInstr = ')
2423 ), 'sModified="%s"' % (sModified,);
2424 oOut.write(sModified);
2425
2426 offLine = oThreadedFunction.oMcBlock.offAfterEnd;
2427
2428 # Advance
2429 iThreadedFunction += 1;
2430 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
2431
2432 # Last line segment.
2433 if offLine < len(sLine):
2434 oOut.write(sLine[offLine : ]);
2435
2436 oOut.write("/* ****** END %s ******* */\n" % (oParser.sSrcFile,));
2437
2438 return True;
2439
2440 def generateModifiedInput1(self, oOut):
2441 """
2442 Generates the combined modified input source/header file, part 1.
2443 Returns success indicator.
2444 """
2445 return self.generateModifiedInput(oOut, 1);
2446
2447 def generateModifiedInput2(self, oOut):
2448 """
2449 Generates the combined modified input source/header file, part 2.
2450 Returns success indicator.
2451 """
2452 return self.generateModifiedInput(oOut, 2);
2453
2454 def generateModifiedInput3(self, oOut):
2455 """
2456 Generates the combined modified input source/header file, part 3.
2457 Returns success indicator.
2458 """
2459 return self.generateModifiedInput(oOut, 3);
2460
2461 def generateModifiedInput4(self, oOut):
2462 """
2463 Generates the combined modified input source/header file, part 4.
2464 Returns success indicator.
2465 """
2466 return self.generateModifiedInput(oOut, 4);
2467
2468
2469 #
2470 # Main
2471 #
2472
2473 def main(self, asArgs):
2474 """
2475 C-like main function.
2476 Returns exit code.
2477 """
2478
2479 #
2480 # Parse arguments
2481 #
2482 sScriptDir = os.path.dirname(__file__);
2483 oParser = argparse.ArgumentParser(add_help = False);
2484 oParser.add_argument('asInFiles',
2485 metavar = 'input.cpp.h',
2486 nargs = '*',
2487 default = [os.path.join(sScriptDir, aoInfo[0])
2488 for aoInfo in iai.g_aaoAllInstrFilesAndDefaultMapAndSet],
2489 help = "Selection of VMMAll/IEMAllInst*.cpp.h files to use as input.");
2490 oParser.add_argument('--host-arch',
2491 metavar = 'arch',
2492 dest = 'sHostArch',
2493 action = 'store',
2494 default = None,
2495 help = 'The host architecture.');
2496
2497 oParser.add_argument('--out-thrd-funcs-hdr',
2498 metavar = 'file-thrd-funcs.h',
2499 dest = 'sOutFileThrdFuncsHdr',
2500 action = 'store',
2501 default = '-',
2502 help = 'The output header file for the threaded functions.');
2503 oParser.add_argument('--out-thrd-funcs-cpp',
2504 metavar = 'file-thrd-funcs.cpp',
2505 dest = 'sOutFileThrdFuncsCpp',
2506 action = 'store',
2507 default = '-',
2508 help = 'The output C++ file for the threaded functions.');
2509 oParser.add_argument('--out-n8ve-funcs-hdr',
2510 metavar = 'file-n8tv-funcs.h',
2511 dest = 'sOutFileN8veFuncsHdr',
2512 action = 'store',
2513 default = '-',
2514 help = 'The output header file for the native recompiler functions.');
2515 oParser.add_argument('--out-n8ve-funcs-cpp',
2516 metavar = 'file-n8tv-funcs.cpp',
2517 dest = 'sOutFileN8veFuncsCpp',
2518 action = 'store',
2519 default = '-',
2520 help = 'The output C++ file for the native recompiler functions.');
2521 oParser.add_argument('--native',
2522 dest = 'fNativeRecompilerEnabled',
2523 action = 'store_true',
2524 default = False,
2525 help = 'Enables generating the files related to native recompilation.');
2526 oParser.add_argument('--out-mod-input1',
2527 metavar = 'file-instr.cpp.h',
2528 dest = 'sOutFileModInput1',
2529 action = 'store',
2530 default = '-',
2531 help = 'The output C++/header file for modified input instruction files part 1.');
2532 oParser.add_argument('--out-mod-input2',
2533 metavar = 'file-instr.cpp.h',
2534 dest = 'sOutFileModInput2',
2535 action = 'store',
2536 default = '-',
2537 help = 'The output C++/header file for modified input instruction files part 2.');
2538 oParser.add_argument('--out-mod-input3',
2539 metavar = 'file-instr.cpp.h',
2540 dest = 'sOutFileModInput3',
2541 action = 'store',
2542 default = '-',
2543 help = 'The output C++/header file for modified input instruction files part 3.');
2544 oParser.add_argument('--out-mod-input4',
2545 metavar = 'file-instr.cpp.h',
2546 dest = 'sOutFileModInput4',
2547 action = 'store',
2548 default = '-',
2549 help = 'The output C++/header file for modified input instruction files part 4.');
2550 oParser.add_argument('--help', '-h', '-?',
2551 action = 'help',
2552 help = 'Display help and exit.');
2553 oParser.add_argument('--version', '-V',
2554 action = 'version',
2555 version = 'r%s (IEMAllThreadedPython.py), r%s (IEMAllInstPython.py)'
2556 % (__version__.split()[1], iai.__version__.split()[1],),
2557 help = 'Displays the version/revision of the script and exit.');
2558 self.oOptions = oParser.parse_args(asArgs[1:]);
2559 print("oOptions=%s" % (self.oOptions,), file = sys.stderr);
2560
2561 #
2562 # Process the instructions specified in the IEM sources.
2563 #
2564 if self.processInputFiles(self.oOptions.sHostArch, self.oOptions.fNativeRecompilerEnabled):
2565 #
2566 # Generate the output files.
2567 #
2568 aaoOutputFiles = (
2569 ( self.oOptions.sOutFileThrdFuncsHdr, self.generateThreadedFunctionsHeader ),
2570 ( self.oOptions.sOutFileThrdFuncsCpp, self.generateThreadedFunctionsSource ),
2571 ( self.oOptions.sOutFileN8veFuncsHdr, self.generateNativeFunctionsHeader ),
2572 ( self.oOptions.sOutFileN8veFuncsCpp, self.generateNativeFunctionsSource ),
2573 ( self.oOptions.sOutFileModInput1, self.generateModifiedInput1 ),
2574 ( self.oOptions.sOutFileModInput2, self.generateModifiedInput2 ),
2575 ( self.oOptions.sOutFileModInput3, self.generateModifiedInput3 ),
2576 ( self.oOptions.sOutFileModInput4, self.generateModifiedInput4 ),
2577 );
2578 fRc = True;
2579 for sOutFile, fnGenMethod in aaoOutputFiles:
2580 if sOutFile == '-':
2581 fRc = fnGenMethod(sys.stdout) and fRc;
2582 else:
2583 try:
2584 oOut = open(sOutFile, 'w'); # pylint: disable=consider-using-with,unspecified-encoding
2585 except Exception as oXcpt:
2586 print('error! Failed open "%s" for writing: %s' % (sOutFile, oXcpt,), file = sys.stderr);
2587 return 1;
2588 fRc = fnGenMethod(oOut) and fRc;
2589 oOut.close();
2590 if fRc:
2591 return 0;
2592
2593 return 1;
2594
2595
2596if __name__ == '__main__':
2597 sys.exit(IEMThreadedGenerator().main(sys.argv));
2598
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette