VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllThrdPython.py@ 102368

Last change on this file since 102368 was 102365, checked in by vboxsync, 17 months ago

VMM/IEM: Fixed bug in threaded recompiler where we'd emit the CheckMode calls with the *OLD* fExec value. bugref:10371

  • Property svn:eol-style set to LF
  • Property svn:executable set to *
  • Property svn:keywords set to Author Date Id Revision
File size: 128.8 KB
Line 
1#!/usr/bin/env python
2# -*- coding: utf-8 -*-
3# $Id: IEMAllThrdPython.py 102365 2023-11-28 14:19:35Z vboxsync $
4# pylint: disable=invalid-name
5
6"""
7Annotates and generates threaded functions from IEMAllInst*.cpp.h.
8"""
9
10from __future__ import print_function;
11
12__copyright__ = \
13"""
14Copyright (C) 2023 Oracle and/or its affiliates.
15
16This file is part of VirtualBox base platform packages, as
17available from https://www.virtualbox.org.
18
19This program is free software; you can redistribute it and/or
20modify it under the terms of the GNU General Public License
21as published by the Free Software Foundation, in version 3 of the
22License.
23
24This program is distributed in the hope that it will be useful, but
25WITHOUT ANY WARRANTY; without even the implied warranty of
26MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
27General Public License for more details.
28
29You should have received a copy of the GNU General Public License
30along with this program; if not, see <https://www.gnu.org/licenses>.
31
32SPDX-License-Identifier: GPL-3.0-only
33"""
34__version__ = "$Revision: 102365 $"
35
36# Standard python imports.
37import copy;
38import datetime;
39import os;
40import re;
41import sys;
42import argparse;
43from typing import Dict, List;
44
45import IEMAllInstPython as iai;
46import IEMAllN8vePython as ian;
47
48
49# Python 3 hacks:
50if sys.version_info[0] >= 3:
51 long = int; # pylint: disable=redefined-builtin,invalid-name
52
53## Number of generic parameters for the thread functions.
54g_kcThreadedParams = 3;
55
56g_kdTypeInfo = {
57 # type name: (cBits, fSigned, C-type )
58 'int8_t': ( 8, True, 'int8_t', ),
59 'int16_t': ( 16, True, 'int16_t', ),
60 'int32_t': ( 32, True, 'int32_t', ),
61 'int64_t': ( 64, True, 'int64_t', ),
62 'uint4_t': ( 4, False, 'uint8_t', ),
63 'uint8_t': ( 8, False, 'uint8_t', ),
64 'uint16_t': ( 16, False, 'uint16_t', ),
65 'uint32_t': ( 32, False, 'uint32_t', ),
66 'uint64_t': ( 64, False, 'uint64_t', ),
67 'uintptr_t': ( 64, False, 'uintptr_t',), # ASSUMES 64-bit host pointer size.
68 'bool': ( 1, False, 'bool', ),
69 'IEMMODE': ( 2, False, 'IEMMODE', ),
70};
71
72# Only for getTypeBitCount/variables.
73g_kdTypeInfo2 = {
74 'RTFLOAT32U': ( 32, False, 'RTFLOAT32U', ),
75 'RTFLOAT64U': ( 64, False, 'RTFLOAT64U', ),
76 'RTUINT64U': ( 64, False, 'RTUINT64U', ),
77 'RTGCPTR': ( 64, False, 'RTGCPTR', ),
78 'RTPBCD80U': ( 80, False, 'RTPBCD80U', ),
79 'RTFLOAT80U': ( 80, False, 'RTFLOAT80U', ),
80 'IEMFPURESULT': (80+16, False, 'IEMFPURESULT', ),
81 'IEMFPURESULTTWO': (80+16+80,False, 'IEMFPURESULTTWO', ),
82 'RTUINT128U': ( 128, False, 'RTUINT128U', ),
83 'X86XMMREG': ( 128, False, 'X86XMMREG', ),
84 'IEMSSERESULT': ( 128+32, False, 'IEMSSERESULT', ),
85 'IEMMEDIAF2XMMSRC': ( 256, False, 'IEMMEDIAF2XMMSRC',),
86 'RTUINT256U': ( 256, False, 'RTUINT256U', ),
87 'IEMPCMPISTRXSRC': ( 256, False, 'IEMPCMPISTRXSRC', ),
88 'IEMPCMPESTRXSRC': ( 384, False, 'IEMPCMPESTRXSRC', ),
89}; #| g_kdTypeInfo; - requires 3.9
90g_kdTypeInfo2.update(g_kdTypeInfo);
91
92def getTypeBitCount(sType):
93 """
94 Translate a type to size in bits
95 """
96 if sType in g_kdTypeInfo2:
97 return g_kdTypeInfo2[sType][0];
98 if '*' in sType or sType[0] == 'P':
99 return 64;
100 #raise Exception('Unknown type: %s' % (sType,));
101 print('error: Unknown type: %s' % (sType,));
102 return 64;
103
104g_kdIemFieldToType = {
105 # Illegal ones:
106 'offInstrNextByte': ( None, ),
107 'cbInstrBuf': ( None, ),
108 'pbInstrBuf': ( None, ),
109 'uInstrBufPc': ( None, ),
110 'cbInstrBufTotal': ( None, ),
111 'offCurInstrStart': ( None, ),
112 'cbOpcode': ( None, ),
113 'offOpcode': ( None, ),
114 'offModRm': ( None, ),
115 # Okay ones.
116 'fPrefixes': ( 'uint32_t', ),
117 'uRexReg': ( 'uint8_t', ),
118 'uRexB': ( 'uint8_t', ),
119 'uRexIndex': ( 'uint8_t', ),
120 'iEffSeg': ( 'uint8_t', ),
121 'enmEffOpSize': ( 'IEMMODE', ),
122 'enmDefAddrMode': ( 'IEMMODE', ),
123 'enmEffAddrMode': ( 'IEMMODE', ),
124 'enmDefOpSize': ( 'IEMMODE', ),
125 'idxPrefix': ( 'uint8_t', ),
126 'uVex3rdReg': ( 'uint8_t', ),
127 'uVexLength': ( 'uint8_t', ),
128 'fEvexStuff': ( 'uint8_t', ),
129 'uFpuOpcode': ( 'uint16_t', ),
130};
131
132class ThreadedParamRef(object):
133 """
134 A parameter reference for a threaded function.
135 """
136
137 def __init__(self, sOrgRef, sType, oStmt, iParam = None, offParam = 0, sStdRef = None):
138 ## The name / reference in the original code.
139 self.sOrgRef = sOrgRef;
140 ## Normalized name to deal with spaces in macro invocations and such.
141 self.sStdRef = sStdRef if sStdRef else ''.join(sOrgRef.split());
142 ## Indicates that sOrgRef may not match the parameter.
143 self.fCustomRef = sStdRef is not None;
144 ## The type (typically derived).
145 self.sType = sType;
146 ## The statement making the reference.
147 self.oStmt = oStmt;
148 ## The parameter containing the references. None if implicit.
149 self.iParam = iParam;
150 ## The offset in the parameter of the reference.
151 self.offParam = offParam;
152
153 ## The variable name in the threaded function.
154 self.sNewName = 'x';
155 ## The this is packed into.
156 self.iNewParam = 99;
157 ## The bit offset in iNewParam.
158 self.offNewParam = 1024
159
160
161class ThreadedFunctionVariation(object):
162 """ Threaded function variation. """
163
164 ## @name Variations.
165 ## These variations will match translation block selection/distinctions as well.
166 ## @{
167 ksVariation_Default = ''; ##< No variations - only used by IEM_MC_DEFER_TO_CIMPL_X_RET.
168 ksVariation_16 = '_16'; ##< 16-bit mode code (386+).
169 ksVariation_16f = '_16f'; ##< 16-bit mode code (386+), check+clear eflags.
170 ksVariation_16_Addr32 = '_16_Addr32'; ##< 16-bit mode code (386+), address size prefixed to 32-bit addressing.
171 ksVariation_16f_Addr32 = '_16f_Addr32'; ##< 16-bit mode code (386+), address size prefixed to 32-bit addressing, eflags.
172 ksVariation_16_Pre386 = '_16_Pre386'; ##< 16-bit mode code, pre-386 CPU target.
173 ksVariation_16f_Pre386 = '_16f_Pre386'; ##< 16-bit mode code, pre-386 CPU target, check+clear eflags.
174 ksVariation_32 = '_32'; ##< 32-bit mode code (386+).
175 ksVariation_32f = '_32f'; ##< 32-bit mode code (386+), check+clear eflags.
176 ksVariation_32_Flat = '_32_Flat'; ##< 32-bit mode code (386+) with CS, DS, E,S and SS flat and 4GB wide.
177 ksVariation_32f_Flat = '_32f_Flat'; ##< 32-bit mode code (386+) with CS, DS, E,S and SS flat and 4GB wide, eflags.
178 ksVariation_32_Addr16 = '_32_Addr16'; ##< 32-bit mode code (386+), address size prefixed to 16-bit addressing.
179 ksVariation_32f_Addr16 = '_32f_Addr16'; ##< 32-bit mode code (386+), address size prefixed to 16-bit addressing, eflags.
180 ksVariation_64 = '_64'; ##< 64-bit mode code.
181 ksVariation_64f = '_64f'; ##< 64-bit mode code, check+clear eflags.
182 ksVariation_64_FsGs = '_64_FsGs'; ##< 64-bit mode code, with memory accesses via FS or GS.
183 ksVariation_64f_FsGs = '_64f_FsGs'; ##< 64-bit mode code, with memory accesses via FS or GS, check+clear eflags.
184 ksVariation_64_Addr32 = '_64_Addr32'; ##< 64-bit mode code, address size prefixed to 32-bit addressing.
185 ksVariation_64f_Addr32 = '_64f_Addr32'; ##< 64-bit mode code, address size prefixed to 32-bit addressing, c+c eflags.
186 kasVariations = (
187 ksVariation_Default,
188 ksVariation_16,
189 ksVariation_16f,
190 ksVariation_16_Addr32,
191 ksVariation_16f_Addr32,
192 ksVariation_16_Pre386,
193 ksVariation_16f_Pre386,
194 ksVariation_32,
195 ksVariation_32f,
196 ksVariation_32_Flat,
197 ksVariation_32f_Flat,
198 ksVariation_32_Addr16,
199 ksVariation_32f_Addr16,
200 ksVariation_64,
201 ksVariation_64f,
202 ksVariation_64_FsGs,
203 ksVariation_64f_FsGs,
204 ksVariation_64_Addr32,
205 ksVariation_64f_Addr32,
206 );
207 kasVariationsWithoutAddress = (
208 ksVariation_16,
209 ksVariation_16f,
210 ksVariation_16_Pre386,
211 ksVariation_16f_Pre386,
212 ksVariation_32,
213 ksVariation_32f,
214 ksVariation_64,
215 ksVariation_64f,
216 );
217 kasVariationsWithoutAddressNot286 = (
218 ksVariation_16,
219 ksVariation_16f,
220 ksVariation_32,
221 ksVariation_32f,
222 ksVariation_64,
223 ksVariation_64f,
224 );
225 kasVariationsWithoutAddressNot286Not64 = (
226 ksVariation_16,
227 ksVariation_16f,
228 ksVariation_32,
229 ksVariation_32f,
230 );
231 kasVariationsWithoutAddressNot64 = (
232 ksVariation_16,
233 ksVariation_16f,
234 ksVariation_16_Pre386,
235 ksVariation_16f_Pre386,
236 ksVariation_32,
237 ksVariation_32f,
238 );
239 kasVariationsWithoutAddressOnly64 = (
240 ksVariation_64,
241 ksVariation_64f,
242 );
243 kasVariationsWithAddress = (
244 ksVariation_16,
245 ksVariation_16f,
246 ksVariation_16_Addr32,
247 ksVariation_16f_Addr32,
248 ksVariation_16_Pre386,
249 ksVariation_16f_Pre386,
250 ksVariation_32,
251 ksVariation_32f,
252 ksVariation_32_Flat,
253 ksVariation_32f_Flat,
254 ksVariation_32_Addr16,
255 ksVariation_32f_Addr16,
256 ksVariation_64,
257 ksVariation_64f,
258 ksVariation_64_FsGs,
259 ksVariation_64f_FsGs,
260 ksVariation_64_Addr32,
261 ksVariation_64f_Addr32,
262 );
263 kasVariationsWithAddressNot286 = (
264 ksVariation_16,
265 ksVariation_16f,
266 ksVariation_16_Addr32,
267 ksVariation_16f_Addr32,
268 ksVariation_32,
269 ksVariation_32f,
270 ksVariation_32_Flat,
271 ksVariation_32f_Flat,
272 ksVariation_32_Addr16,
273 ksVariation_32f_Addr16,
274 ksVariation_64,
275 ksVariation_64f,
276 ksVariation_64_FsGs,
277 ksVariation_64f_FsGs,
278 ksVariation_64_Addr32,
279 ksVariation_64f_Addr32,
280 );
281 kasVariationsWithAddressNot286Not64 = (
282 ksVariation_16,
283 ksVariation_16f,
284 ksVariation_16_Addr32,
285 ksVariation_16f_Addr32,
286 ksVariation_32,
287 ksVariation_32f,
288 ksVariation_32_Flat,
289 ksVariation_32f_Flat,
290 ksVariation_32_Addr16,
291 ksVariation_32f_Addr16,
292 );
293 kasVariationsWithAddressNot64 = (
294 ksVariation_16,
295 ksVariation_16f,
296 ksVariation_16_Addr32,
297 ksVariation_16f_Addr32,
298 ksVariation_16_Pre386,
299 ksVariation_16f_Pre386,
300 ksVariation_32,
301 ksVariation_32f,
302 ksVariation_32_Flat,
303 ksVariation_32f_Flat,
304 ksVariation_32_Addr16,
305 ksVariation_32f_Addr16,
306 );
307 kasVariationsWithAddressOnly64 = (
308 ksVariation_64,
309 ksVariation_64f,
310 ksVariation_64_FsGs,
311 ksVariation_64f_FsGs,
312 ksVariation_64_Addr32,
313 ksVariation_64f_Addr32,
314 );
315 kasVariationsOnlyPre386 = (
316 ksVariation_16_Pre386,
317 ksVariation_16f_Pre386,
318 );
319 kasVariationsEmitOrder = (
320 ksVariation_Default,
321 ksVariation_64,
322 ksVariation_64f,
323 ksVariation_64_FsGs,
324 ksVariation_64f_FsGs,
325 ksVariation_32_Flat,
326 ksVariation_32f_Flat,
327 ksVariation_32,
328 ksVariation_32f,
329 ksVariation_16,
330 ksVariation_16f,
331 ksVariation_16_Addr32,
332 ksVariation_16f_Addr32,
333 ksVariation_16_Pre386,
334 ksVariation_16f_Pre386,
335 ksVariation_32_Addr16,
336 ksVariation_32f_Addr16,
337 ksVariation_64_Addr32,
338 ksVariation_64f_Addr32,
339 );
340 kdVariationNames = {
341 ksVariation_Default: 'defer-to-cimpl',
342 ksVariation_16: '16-bit',
343 ksVariation_16f: '16-bit w/ eflag checking and clearing',
344 ksVariation_16_Addr32: '16-bit w/ address prefix (Addr32)',
345 ksVariation_16f_Addr32: '16-bit w/ address prefix (Addr32) and eflag checking and clearing',
346 ksVariation_16_Pre386: '16-bit on pre-386 CPU',
347 ksVariation_16f_Pre386: '16-bit on pre-386 CPU w/ eflag checking and clearing',
348 ksVariation_32: '32-bit',
349 ksVariation_32f: '32-bit w/ eflag checking and clearing',
350 ksVariation_32_Flat: '32-bit flat and wide open CS, SS, DS and ES',
351 ksVariation_32f_Flat: '32-bit flat and wide open CS, SS, DS and ES w/ eflag checking and clearing',
352 ksVariation_32_Addr16: '32-bit w/ address prefix (Addr16)',
353 ksVariation_32f_Addr16: '32-bit w/ address prefix (Addr16) and eflag checking and clearing',
354 ksVariation_64: '64-bit',
355 ksVariation_64f: '64-bit w/ eflag checking and clearing',
356 ksVariation_64_FsGs: '64-bit with memory accessed via FS or GS',
357 ksVariation_64f_FsGs: '64-bit with memory accessed via FS or GS and eflag checking and clearing',
358 ksVariation_64_Addr32: '64-bit w/ address prefix (Addr32)',
359 ksVariation_64f_Addr32: '64-bit w/ address prefix (Addr32) and eflag checking and clearing',
360 };
361 kdVariationsWithEflagsCheckingAndClearing = {
362 ksVariation_16f: True,
363 ksVariation_16f_Addr32: True,
364 ksVariation_16f_Pre386: True,
365 ksVariation_32f: True,
366 ksVariation_32f_Flat: True,
367 ksVariation_32f_Addr16: True,
368 ksVariation_64f: True,
369 ksVariation_64f_FsGs: True,
370 ksVariation_64f_Addr32: True,
371 };
372 kdVariationsWithFlatAddress = {
373 ksVariation_32_Flat: True,
374 ksVariation_32f_Flat: True,
375 ksVariation_64: True,
376 ksVariation_64f: True,
377 };
378 kdVariationsWithFlatAddr16 = {
379 ksVariation_16: True,
380 ksVariation_16f: True,
381 ksVariation_16_Pre386: True,
382 ksVariation_16f_Pre386: True,
383 ksVariation_32_Addr16: True,
384 ksVariation_32f_Addr16: True,
385 };
386 kdVariationsWithFlatAddr32No64 = {
387 ksVariation_16_Addr32: True,
388 ksVariation_16f_Addr32: True,
389 ksVariation_32: True,
390 ksVariation_32f: True,
391 ksVariation_32_Flat: True,
392 ksVariation_32f_Flat: True,
393 };
394 ## @}
395
396 ## IEM_CIMPL_F_XXX flags that we know.
397 ## The value indicates whether it terminates the TB or not. The goal is to
398 ## improve the recompiler so all but END_TB will be False.
399 ##
400 ## @note iemThreadedRecompilerMcDeferToCImpl0 duplicates info found here.
401 kdCImplFlags = {
402 'IEM_CIMPL_F_MODE': False,
403 'IEM_CIMPL_F_BRANCH_DIRECT': False,
404 'IEM_CIMPL_F_BRANCH_INDIRECT': False,
405 'IEM_CIMPL_F_BRANCH_RELATIVE': False,
406 'IEM_CIMPL_F_BRANCH_FAR': True,
407 'IEM_CIMPL_F_BRANCH_CONDITIONAL': False,
408 # IEM_CIMPL_F_BRANCH_ANY should only be used for testing, so not included here.
409 'IEM_CIMPL_F_BRANCH_STACK': False,
410 'IEM_CIMPL_F_BRANCH_STACK_FAR': False,
411 'IEM_CIMPL_F_RFLAGS': False,
412 'IEM_CIMPL_F_INHIBIT_SHADOW': False,
413 'IEM_CIMPL_F_CHECK_IRQ_AFTER': False,
414 'IEM_CIMPL_F_CHECK_IRQ_BEFORE': False,
415 'IEM_CIMPL_F_CHECK_IRQ_BEFORE_AND_AFTER': False, # (ignore)
416 'IEM_CIMPL_F_STATUS_FLAGS': False,
417 'IEM_CIMPL_F_VMEXIT': False,
418 'IEM_CIMPL_F_FPU': False,
419 'IEM_CIMPL_F_REP': False,
420 'IEM_CIMPL_F_IO': False,
421 'IEM_CIMPL_F_END_TB': True,
422 'IEM_CIMPL_F_XCPT': True,
423 'IEM_CIMPL_F_CALLS_CIMPL': False,
424 'IEM_CIMPL_F_CALLS_AIMPL': False,
425 'IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE': False,
426 };
427
428 def __init__(self, oThreadedFunction, sVariation = ksVariation_Default):
429 self.oParent = oThreadedFunction # type: ThreadedFunction
430 ##< ksVariation_Xxxx.
431 self.sVariation = sVariation
432
433 ## Threaded function parameter references.
434 self.aoParamRefs = [] # type: List[ThreadedParamRef]
435 ## Unique parameter references.
436 self.dParamRefs = {} # type: Dict[str, List[ThreadedParamRef]]
437 ## Minimum number of parameters to the threaded function.
438 self.cMinParams = 0;
439
440 ## List/tree of statements for the threaded function.
441 self.aoStmtsForThreadedFunction = [] # type: List[McStmt]
442
443 ## Function enum number, for verification. Set by generateThreadedFunctionsHeader.
444 self.iEnumValue = -1;
445
446 ## Native recompilation details for this variation.
447 self.oNativeRecomp = None;
448
449 def getIndexName(self):
450 sName = self.oParent.oMcBlock.sFunction;
451 if sName.startswith('iemOp_'):
452 sName = sName[len('iemOp_'):];
453 if self.oParent.oMcBlock.iInFunction == 0:
454 return 'kIemThreadedFunc_%s%s' % ( sName, self.sVariation, );
455 return 'kIemThreadedFunc_%s_%s%s' % ( sName, self.oParent.oMcBlock.iInFunction, self.sVariation, );
456
457 def getThreadedFunctionName(self):
458 sName = self.oParent.oMcBlock.sFunction;
459 if sName.startswith('iemOp_'):
460 sName = sName[len('iemOp_'):];
461 if self.oParent.oMcBlock.iInFunction == 0:
462 return 'iemThreadedFunc_%s%s' % ( sName, self.sVariation, );
463 return 'iemThreadedFunc_%s_%s%s' % ( sName, self.oParent.oMcBlock.iInFunction, self.sVariation, );
464
465 def getNativeFunctionName(self):
466 return 'iemNativeRecompFunc_' + self.getThreadedFunctionName()[len('iemThreadedFunc_'):];
467
468 def getShortName(self):
469 sName = self.oParent.oMcBlock.sFunction;
470 if sName.startswith('iemOp_'):
471 sName = sName[len('iemOp_'):];
472 if self.oParent.oMcBlock.iInFunction == 0:
473 return '%s%s' % ( sName, self.sVariation, );
474 return '%s_%s%s' % ( sName, self.oParent.oMcBlock.iInFunction, self.sVariation, );
475
476 def isWithFlagsCheckingAndClearingVariation(self):
477 """
478 Checks if this is a variation that checks and clears EFLAGS.
479 """
480 return self.sVariation in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing;
481
482 #
483 # Analysis and code morphing.
484 #
485
486 def raiseProblem(self, sMessage):
487 """ Raises a problem. """
488 self.oParent.raiseProblem(sMessage);
489
490 def warning(self, sMessage):
491 """ Emits a warning. """
492 self.oParent.warning(sMessage);
493
494 def analyzeReferenceToType(self, sRef):
495 """
496 Translates a variable or structure reference to a type.
497 Returns type name.
498 Raises exception if unable to figure it out.
499 """
500 ch0 = sRef[0];
501 if ch0 == 'u':
502 if sRef.startswith('u32'):
503 return 'uint32_t';
504 if sRef.startswith('u8') or sRef == 'uReg':
505 return 'uint8_t';
506 if sRef.startswith('u64'):
507 return 'uint64_t';
508 if sRef.startswith('u16'):
509 return 'uint16_t';
510 elif ch0 == 'b':
511 return 'uint8_t';
512 elif ch0 == 'f':
513 return 'bool';
514 elif ch0 == 'i':
515 if sRef.startswith('i8'):
516 return 'int8_t';
517 if sRef.startswith('i16'):
518 return 'int16_t';
519 if sRef.startswith('i32'):
520 return 'int32_t';
521 if sRef.startswith('i64'):
522 return 'int64_t';
523 if sRef in ('iReg', 'iFixedReg', 'iGReg', 'iSegReg', 'iSrcReg', 'iDstReg', 'iCrReg'):
524 return 'uint8_t';
525 elif ch0 == 'p':
526 if sRef.find('-') < 0:
527 return 'uintptr_t';
528 if sRef.startswith('pVCpu->iem.s.'):
529 sField = sRef[len('pVCpu->iem.s.') : ];
530 if sField in g_kdIemFieldToType:
531 if g_kdIemFieldToType[sField][0]:
532 return g_kdIemFieldToType[sField][0];
533 elif ch0 == 'G' and sRef.startswith('GCPtr'):
534 return 'uint64_t';
535 elif ch0 == 'e':
536 if sRef == 'enmEffOpSize':
537 return 'IEMMODE';
538 elif ch0 == 'o':
539 if sRef.startswith('off32'):
540 return 'uint32_t';
541 elif sRef == 'cbFrame': # enter
542 return 'uint16_t';
543 elif sRef == 'cShift': ## @todo risky
544 return 'uint8_t';
545
546 self.raiseProblem('Unknown reference: %s' % (sRef,));
547 return None; # Shut up pylint 2.16.2.
548
549 def analyzeCallToType(self, sFnRef):
550 """
551 Determins the type of an indirect function call.
552 """
553 assert sFnRef[0] == 'p';
554
555 #
556 # Simple?
557 #
558 if sFnRef.find('-') < 0:
559 oDecoderFunction = self.oParent.oMcBlock.oFunction;
560
561 # Try the argument list of the function defintion macro invocation first.
562 iArg = 2;
563 while iArg < len(oDecoderFunction.asDefArgs):
564 if sFnRef == oDecoderFunction.asDefArgs[iArg]:
565 return oDecoderFunction.asDefArgs[iArg - 1];
566 iArg += 1;
567
568 # Then check out line that includes the word and looks like a variable declaration.
569 oRe = re.compile(' +(P[A-Z0-9_]+|const +IEMOP[A-Z0-9_]+ *[*]) +(const |) *' + sFnRef + ' *(;|=)');
570 for sLine in oDecoderFunction.asLines:
571 oMatch = oRe.match(sLine);
572 if oMatch:
573 if not oMatch.group(1).startswith('const'):
574 return oMatch.group(1);
575 return 'PC' + oMatch.group(1)[len('const ') : -1].strip();
576
577 #
578 # Deal with the pImpl->pfnXxx:
579 #
580 elif sFnRef.startswith('pImpl->pfn'):
581 sMember = sFnRef[len('pImpl->') : ];
582 sBaseType = self.analyzeCallToType('pImpl');
583 offBits = sMember.rfind('U') + 1;
584 if sBaseType == 'PCIEMOPBINSIZES': return 'PFNIEMAIMPLBINU' + sMember[offBits:];
585 if sBaseType == 'PCIEMOPUNARYSIZES': return 'PFNIEMAIMPLUNARYU' + sMember[offBits:];
586 if sBaseType == 'PCIEMOPSHIFTSIZES': return 'PFNIEMAIMPLSHIFTU' + sMember[offBits:];
587 if sBaseType == 'PCIEMOPSHIFTDBLSIZES': return 'PFNIEMAIMPLSHIFTDBLU' + sMember[offBits:];
588 if sBaseType == 'PCIEMOPMULDIVSIZES': return 'PFNIEMAIMPLMULDIVU' + sMember[offBits:];
589 if sBaseType == 'PCIEMOPMEDIAF3': return 'PFNIEMAIMPLMEDIAF3U' + sMember[offBits:];
590 if sBaseType == 'PCIEMOPMEDIAOPTF3': return 'PFNIEMAIMPLMEDIAOPTF3U' + sMember[offBits:];
591 if sBaseType == 'PCIEMOPMEDIAOPTF2': return 'PFNIEMAIMPLMEDIAOPTF2U' + sMember[offBits:];
592 if sBaseType == 'PCIEMOPMEDIAOPTF3IMM8': return 'PFNIEMAIMPLMEDIAOPTF3U' + sMember[offBits:] + 'IMM8';
593 if sBaseType == 'PCIEMOPBLENDOP': return 'PFNIEMAIMPLAVXBLENDU' + sMember[offBits:];
594
595 self.raiseProblem('Unknown call reference: %s::%s (%s)' % (sBaseType, sMember, sFnRef,));
596
597 self.raiseProblem('Unknown call reference: %s' % (sFnRef,));
598 return None; # Shut up pylint 2.16.2.
599
600 def analyze8BitGRegStmt(self, oStmt):
601 """
602 Gets the 8-bit general purpose register access details of the given statement.
603 ASSUMES the statement is one accessing an 8-bit GREG.
604 """
605 idxReg = 0;
606 if ( oStmt.sName.find('_FETCH_') > 0
607 or oStmt.sName.find('_REF_') > 0
608 or oStmt.sName.find('_TO_LOCAL') > 0):
609 idxReg = 1;
610
611 sRegRef = oStmt.asParams[idxReg];
612 if sRegRef.startswith('IEM_GET_MODRM_RM') or sRegRef.startswith('IEM_GET_MODRM_REG'):
613 asBits = [sBit.strip() for sBit in sRegRef.replace('(', ',').replace(')', '').split(',')];
614 if len(asBits) != 3 or asBits[1] != 'pVCpu' or (asBits[0] != 'IEM_GET_MODRM_RM' and asBits[0] != 'IEM_GET_MODRM_REG'):
615 self.raiseProblem('Unexpected reference: %s (asBits=%s)' % (sRegRef, asBits));
616 sOrgExpr = asBits[0] + '_EX8(pVCpu, ' + asBits[2] + ')';
617 else:
618 sOrgExpr = '((%s) < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX) ? (%s) : (%s) + 12)' % (sRegRef, sRegRef, sRegRef);
619
620 if sRegRef.find('IEM_GET_MODRM_RM') >= 0: sStdRef = 'bRmRm8Ex';
621 elif sRegRef.find('IEM_GET_MODRM_REG') >= 0: sStdRef = 'bRmReg8Ex';
622 elif sRegRef == 'X86_GREG_xAX': sStdRef = 'bGregXAx8Ex';
623 elif sRegRef == 'X86_GREG_xCX': sStdRef = 'bGregXCx8Ex';
624 elif sRegRef == 'X86_GREG_xSP': sStdRef = 'bGregXSp8Ex';
625 elif sRegRef == 'iFixedReg': sStdRef = 'bFixedReg8Ex';
626 else:
627 self.warning('analyze8BitGRegStmt: sRegRef=%s -> bOther8Ex; %s %s; sOrgExpr=%s'
628 % (sRegRef, oStmt.sName, oStmt.asParams, sOrgExpr,));
629 sStdRef = 'bOther8Ex';
630
631 #print('analyze8BitGRegStmt: %s %s; sRegRef=%s\n -> idxReg=%s sOrgExpr=%s sStdRef=%s'
632 # % (oStmt.sName, oStmt.asParams, sRegRef, idxReg, sOrgExpr, sStdRef));
633 return (idxReg, sOrgExpr, sStdRef);
634
635
636 ## Maps memory related MCs to info for FLAT conversion.
637 ## This is used in 64-bit and flat 32-bit variants to skip the unnecessary
638 ## segmentation checking for every memory access. Only applied to access
639 ## via ES, DS and SS. FS, GS and CS gets the full segmentation threatment,
640 ## the latter (CS) is just to keep things simple (we could safely fetch via
641 ## it, but only in 64-bit mode could we safely write via it, IIRC).
642 kdMemMcToFlatInfo = {
643 'IEM_MC_FETCH_MEM_U8': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8' ),
644 'IEM_MC_FETCH_MEM16_U8': ( 1, 'IEM_MC_FETCH_MEM16_FLAT_U8' ),
645 'IEM_MC_FETCH_MEM32_U8': ( 1, 'IEM_MC_FETCH_MEM32_FLAT_U8' ),
646 'IEM_MC_FETCH_MEM_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16' ),
647 'IEM_MC_FETCH_MEM_U16_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_DISP' ),
648 'IEM_MC_FETCH_MEM_I16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I16' ),
649 'IEM_MC_FETCH_MEM_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32' ),
650 'IEM_MC_FETCH_MEM_U32_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_DISP' ),
651 'IEM_MC_FETCH_MEM_I32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I32' ),
652 'IEM_MC_FETCH_MEM_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64' ),
653 'IEM_MC_FETCH_MEM_U64_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64_DISP' ),
654 'IEM_MC_FETCH_MEM_U64_ALIGN_U128': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64_ALIGN_U128' ),
655 'IEM_MC_FETCH_MEM_I64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I64' ),
656 'IEM_MC_FETCH_MEM_R32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R32' ),
657 'IEM_MC_FETCH_MEM_R64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R64' ),
658 'IEM_MC_FETCH_MEM_R80': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R80' ),
659 'IEM_MC_FETCH_MEM_D80': ( 1, 'IEM_MC_FETCH_MEM_FLAT_D80' ),
660 'IEM_MC_FETCH_MEM_U128': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128' ),
661 'IEM_MC_FETCH_MEM_U128_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128_NO_AC' ),
662 'IEM_MC_FETCH_MEM_U128_ALIGN_SSE': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128_ALIGN_SSE' ),
663 'IEM_MC_FETCH_MEM_XMM': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM' ),
664 'IEM_MC_FETCH_MEM_XMM_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM_NO_AC' ),
665 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM_ALIGN_SSE' ),
666 'IEM_MC_FETCH_MEM_XMM_U32': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_U32' ),
667 'IEM_MC_FETCH_MEM_XMM_U64': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_U64' ),
668 'IEM_MC_FETCH_MEM_U256': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256' ),
669 'IEM_MC_FETCH_MEM_U256_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256_NO_AC' ),
670 'IEM_MC_FETCH_MEM_U256_ALIGN_AVX': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256_ALIGN_AVX' ),
671 'IEM_MC_FETCH_MEM_YMM': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM' ),
672 'IEM_MC_FETCH_MEM_YMM_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM_NO_AC' ),
673 'IEM_MC_FETCH_MEM_YMM_ALIGN_AVX': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM_ALIGN_AVX' ),
674 'IEM_MC_FETCH_MEM_U8_ZX_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U16' ),
675 'IEM_MC_FETCH_MEM_U8_ZX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U32' ),
676 'IEM_MC_FETCH_MEM_U8_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U64' ),
677 'IEM_MC_FETCH_MEM_U16_ZX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_ZX_U32' ),
678 'IEM_MC_FETCH_MEM_U16_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_ZX_U64' ),
679 'IEM_MC_FETCH_MEM_U32_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_ZX_U64' ),
680 'IEM_MC_FETCH_MEM_U8_SX_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U16' ),
681 'IEM_MC_FETCH_MEM_U8_SX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U32' ),
682 'IEM_MC_FETCH_MEM_U8_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U64' ),
683 'IEM_MC_FETCH_MEM_U16_SX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_SX_U32' ),
684 'IEM_MC_FETCH_MEM_U16_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_SX_U64' ),
685 'IEM_MC_FETCH_MEM_U32_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_SX_U64' ),
686 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128': ( 2, 'IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128' ),
687 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE_AND_XREG_XMM': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_ALIGN_SSE_AND_XREG_XMM' ),
688 'IEM_MC_FETCH_MEM_XMM_U32_AND_XREG_XMM': ( 3, 'IEM_MC_FETCH_MEM_FLAT_XMM_U32_AND_XREG_XMM' ),
689 'IEM_MC_FETCH_MEM_XMM_U64_AND_XREG_XMM': ( 3, 'IEM_MC_FETCH_MEM_FLAT_XMM_U64_AND_XREG_XMM' ),
690 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_RAX_RDX_U64':
691 ( 2, 'IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128_AND_RAX_RDX_U64' ),
692 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64':
693 ( 2, 'IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64' ),
694 'IEM_MC_STORE_MEM_U8': ( 0, 'IEM_MC_STORE_MEM_FLAT_U8' ),
695 'IEM_MC_STORE_MEM_U16': ( 0, 'IEM_MC_STORE_MEM_FLAT_U16' ),
696 'IEM_MC_STORE_MEM_U32': ( 0, 'IEM_MC_STORE_MEM_FLAT_U32' ),
697 'IEM_MC_STORE_MEM_U64': ( 0, 'IEM_MC_STORE_MEM_FLAT_U64' ),
698 'IEM_MC_STORE_MEM_U8_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U8_CONST' ),
699 'IEM_MC_STORE_MEM_U16_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U16_CONST' ),
700 'IEM_MC_STORE_MEM_U32_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U32_CONST' ),
701 'IEM_MC_STORE_MEM_U64_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U64_CONST' ),
702 'IEM_MC_STORE_MEM_U128': ( 0, 'IEM_MC_STORE_MEM_FLAT_U128' ),
703 'IEM_MC_STORE_MEM_U128_ALIGN_SSE': ( 0, 'IEM_MC_STORE_MEM_FLAT_U128_ALIGN_SSE' ),
704 'IEM_MC_STORE_MEM_U256': ( 0, 'IEM_MC_STORE_MEM_FLAT_U256' ),
705 'IEM_MC_STORE_MEM_U256_ALIGN_AVX': ( 0, 'IEM_MC_STORE_MEM_FLAT_U256_ALIGN_AVX' ),
706 'IEM_MC_MEM_MAP': ( 2, 'IEM_MC_MEM_FLAT_MAP' ),
707 'IEM_MC_MEM_MAP_U8_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_RW' ),
708 'IEM_MC_MEM_MAP_U8_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_RO' ),
709 'IEM_MC_MEM_MAP_U8_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_WO' ),
710 'IEM_MC_MEM_MAP_U16_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_RW' ),
711 'IEM_MC_MEM_MAP_U16_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_RO' ),
712 'IEM_MC_MEM_MAP_U16_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_WO' ),
713 'IEM_MC_MEM_MAP_U32_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_RW' ),
714 'IEM_MC_MEM_MAP_U32_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_RO' ),
715 'IEM_MC_MEM_MAP_U32_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_WO' ),
716 'IEM_MC_MEM_MAP_U64_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_RW' ),
717 'IEM_MC_MEM_MAP_U64_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_RO' ),
718 'IEM_MC_MEM_MAP_U64_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_WO' ),
719 'IEM_MC_MEM_MAP_EX': ( 3, 'IEM_MC_MEM_FLAT_MAP_EX' ),
720 };
721
722 kdMemMcToFlatInfoStack = {
723 'IEM_MC_PUSH_U16': ( 'IEM_MC_FLAT32_PUSH_U16', 'IEM_MC_FLAT64_PUSH_U16', ),
724 'IEM_MC_PUSH_U32': ( 'IEM_MC_FLAT32_PUSH_U32', 'IEM_MC_PUSH_U32', ),
725 'IEM_MC_PUSH_U64': ( 'IEM_MC_PUSH_U64', 'IEM_MC_FLAT64_PUSH_U64', ),
726 'IEM_MC_PUSH_U32_SREG': ( 'IEM_MC_FLAT32_PUSH_U32_SREG', 'IEM_MC_PUSH_U32_SREG' ),
727 'IEM_MC_POP_U16': ( 'IEM_MC_FLAT32_POP_U16', 'IEM_MC_FLAT64_POP_U16', ),
728 'IEM_MC_POP_U32': ( 'IEM_MC_FLAT32_POP_U32', 'IEM_MC_POP_U32', ),
729 'IEM_MC_POP_U64': ( 'IEM_MC_POP_U64', 'IEM_MC_FLAT64_POP_U64', ),
730 };
731
732 kdThreadedCalcRmEffAddrMcByVariation = {
733 ksVariation_16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
734 ksVariation_16f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
735 ksVariation_16_Pre386: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
736 ksVariation_16f_Pre386: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
737 ksVariation_32_Addr16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
738 ksVariation_32f_Addr16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
739 ksVariation_16_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
740 ksVariation_16f_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
741 ksVariation_32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
742 ksVariation_32f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
743 ksVariation_32_Flat: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
744 ksVariation_32f_Flat: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
745 ksVariation_64: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64',
746 ksVariation_64f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64',
747 ksVariation_64_FsGs: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_FSGS',
748 ksVariation_64f_FsGs: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_FSGS',
749 ksVariation_64_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_ADDR32', ## @todo How did this work again...
750 ksVariation_64f_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_ADDR32',
751 };
752
753 def analyzeMorphStmtForThreaded(self, aoStmts, iParamRef = 0):
754 """
755 Transforms (copy) the statements into those for the threaded function.
756
757 Returns list/tree of statements (aoStmts is not modified) and the new
758 iParamRef value.
759 """
760 #
761 # We'll be traversing aoParamRefs in parallel to the statements, so we
762 # must match the traversal in analyzeFindThreadedParamRefs exactly.
763 #
764 #print('McBlock at %s:%s' % (os.path.split(self.oMcBlock.sSrcFile)[1], self.oMcBlock.iBeginLine,));
765 aoThreadedStmts = [];
766 for oStmt in aoStmts:
767 # Skip C++ statements that is purely related to decoding.
768 if not oStmt.isCppStmt() or not oStmt.fDecode:
769 # Copy the statement. Make a deep copy to make sure we've got our own
770 # copies of all instance variables, even if a bit overkill at the moment.
771 oNewStmt = copy.deepcopy(oStmt);
772 aoThreadedStmts.append(oNewStmt);
773 #print('oNewStmt %s %s' % (oNewStmt.sName, len(oNewStmt.asParams),));
774
775 # If the statement has parameter references, process the relevant parameters.
776 # We grab the references relevant to this statement and apply them in reserve order.
777 if iParamRef < len(self.aoParamRefs) and self.aoParamRefs[iParamRef].oStmt == oStmt:
778 iParamRefFirst = iParamRef;
779 while True:
780 iParamRef += 1;
781 if iParamRef >= len(self.aoParamRefs) or self.aoParamRefs[iParamRef].oStmt != oStmt:
782 break;
783
784 #print('iParamRefFirst=%s iParamRef=%s' % (iParamRefFirst, iParamRef));
785 for iCurRef in range(iParamRef - 1, iParamRefFirst - 1, -1):
786 oCurRef = self.aoParamRefs[iCurRef];
787 if oCurRef.iParam is not None:
788 assert oCurRef.oStmt == oStmt;
789 #print('iCurRef=%s iParam=%s sOrgRef=%s' % (iCurRef, oCurRef.iParam, oCurRef.sOrgRef));
790 sSrcParam = oNewStmt.asParams[oCurRef.iParam];
791 assert ( sSrcParam[oCurRef.offParam : oCurRef.offParam + len(oCurRef.sOrgRef)] == oCurRef.sOrgRef
792 or oCurRef.fCustomRef), \
793 'offParam=%s sOrgRef=%s iParam=%s oStmt.sName=%s sSrcParam=%s<eos>' \
794 % (oCurRef.offParam, oCurRef.sOrgRef, oCurRef.iParam, oStmt.sName, sSrcParam);
795 oNewStmt.asParams[oCurRef.iParam] = sSrcParam[0 : oCurRef.offParam] \
796 + oCurRef.sNewName \
797 + sSrcParam[oCurRef.offParam + len(oCurRef.sOrgRef) : ];
798
799 # Morph IEM_MC_CALC_RM_EFF_ADDR into IEM_MC_CALC_RM_EFF_ADDR_THREADED ...
800 if oNewStmt.sName == 'IEM_MC_CALC_RM_EFF_ADDR':
801 oNewStmt.sName = self.kdThreadedCalcRmEffAddrMcByVariation[self.sVariation];
802 assert len(oNewStmt.asParams) == 3;
803
804 if self.sVariation in self.kdVariationsWithFlatAddr16:
805 oNewStmt.asParams = [
806 oNewStmt.asParams[0], oNewStmt.asParams[1], self.dParamRefs['u16Disp'][0].sNewName,
807 ];
808 else:
809 sSibAndMore = self.dParamRefs['bSib'][0].sNewName; # Merge bSib and 2nd part of cbImmAndRspOffset.
810 if oStmt.asParams[2] not in ('0', '1', '2', '4'):
811 sSibAndMore = '(%s) | ((%s) & 0x0f00)' % (self.dParamRefs['bSib'][0].sNewName, oStmt.asParams[2]);
812
813 if self.sVariation in self.kdVariationsWithFlatAddr32No64:
814 oNewStmt.asParams = [
815 oNewStmt.asParams[0], oNewStmt.asParams[1], sSibAndMore, self.dParamRefs['u32Disp'][0].sNewName,
816 ];
817 else:
818 oNewStmt.asParams = [
819 oNewStmt.asParams[0], self.dParamRefs['bRmEx'][0].sNewName, sSibAndMore,
820 self.dParamRefs['u32Disp'][0].sNewName, self.dParamRefs['cbInstr'][0].sNewName,
821 ];
822 # ... and IEM_MC_ADVANCE_RIP_AND_FINISH into *_THREADED_PCxx[_WITH_FLAGS] ...
823 elif oNewStmt.sName in ('IEM_MC_ADVANCE_RIP_AND_FINISH', 'IEM_MC_REL_JMP_S8_AND_FINISH',
824 'IEM_MC_REL_JMP_S16_AND_FINISH', 'IEM_MC_REL_JMP_S32_AND_FINISH'):
825 oNewStmt.asParams.append(self.dParamRefs['cbInstr'][0].sNewName);
826 if ( oNewStmt.sName in ('IEM_MC_REL_JMP_S8_AND_FINISH', )
827 and self.sVariation not in (self.ksVariation_16_Pre386, self.ksVariation_16f_Pre386,)):
828 oNewStmt.asParams.append(self.dParamRefs['pVCpu->iem.s.enmEffOpSize'][0].sNewName);
829 oNewStmt.sName += '_THREADED';
830 if self.sVariation in (self.ksVariation_64, self.ksVariation_64_FsGs, self.ksVariation_64_Addr32):
831 oNewStmt.sName += '_PC64';
832 elif self.sVariation in (self.ksVariation_64f, self.ksVariation_64f_FsGs, self.ksVariation_64f_Addr32):
833 oNewStmt.sName += '_PC64_WITH_FLAGS';
834 elif self.sVariation == self.ksVariation_16_Pre386:
835 oNewStmt.sName += '_PC16';
836 elif self.sVariation == self.ksVariation_16f_Pre386:
837 oNewStmt.sName += '_PC16_WITH_FLAGS';
838 elif self.sVariation not in self.kdVariationsWithEflagsCheckingAndClearing:
839 assert self.sVariation != self.ksVariation_Default;
840 oNewStmt.sName += '_PC32';
841 else:
842 oNewStmt.sName += '_PC32_WITH_FLAGS';
843
844 # ... and IEM_MC_*_GREG_U8 into *_THREADED w/ reworked index taking REX into account
845 elif oNewStmt.sName.startswith('IEM_MC_') and oNewStmt.sName.find('_GREG_U8') > 0:
846 (idxReg, _, sStdRef) = self.analyze8BitGRegStmt(oStmt); # Don't use oNewStmt as it has been modified!
847 oNewStmt.asParams[idxReg] = self.dParamRefs[sStdRef][0].sNewName;
848 oNewStmt.sName += '_THREADED';
849
850 # ... and IEM_MC_CALL_CIMPL_[0-5] and IEM_MC_DEFER_TO_CIMPL_[0-5]_RET into *_THREADED ...
851 elif oNewStmt.sName.startswith('IEM_MC_CALL_CIMPL_') or oNewStmt.sName.startswith('IEM_MC_DEFER_TO_CIMPL_'):
852 oNewStmt.sName += '_THREADED';
853 oNewStmt.idxFn += 1;
854 oNewStmt.idxParams += 1;
855 oNewStmt.asParams.insert(0, self.dParamRefs['cbInstr'][0].sNewName);
856
857 # ... and in FLAT modes we must morph memory access into FLAT accesses ...
858 elif ( self.sVariation in self.kdVariationsWithFlatAddress
859 and ( oNewStmt.sName.startswith('IEM_MC_FETCH_MEM')
860 or (oNewStmt.sName.startswith('IEM_MC_STORE_MEM_') and oNewStmt.sName.find('_BY_REF') < 0)
861 or oNewStmt.sName.startswith('IEM_MC_MEM_MAP') )):
862 idxEffSeg = self.kdMemMcToFlatInfo[oNewStmt.sName][0];
863 if idxEffSeg != -1:
864 if ( oNewStmt.asParams[idxEffSeg].find('iEffSeg') < 0
865 and oNewStmt.asParams[idxEffSeg] not in ('X86_SREG_ES', ) ):
866 self.raiseProblem('Expected iEffSeg as param #%d to %s: %s'
867 % (idxEffSeg + 1, oNewStmt.sName, oNewStmt.asParams[idxEffSeg],));
868 oNewStmt.asParams.pop(idxEffSeg);
869 oNewStmt.sName = self.kdMemMcToFlatInfo[oNewStmt.sName][1];
870
871 # ... PUSH and POP also needs flat variants, but these differ a little.
872 elif ( self.sVariation in self.kdVariationsWithFlatAddress
873 and ( (oNewStmt.sName.startswith('IEM_MC_PUSH') and oNewStmt.sName.find('_FPU') < 0)
874 or oNewStmt.sName.startswith('IEM_MC_POP'))):
875 oNewStmt.sName = self.kdMemMcToFlatInfoStack[oNewStmt.sName][int(self.sVariation in (self.ksVariation_64,
876 self.ksVariation_64f,))];
877
878
879 # Process branches of conditionals recursively.
880 if isinstance(oStmt, iai.McStmtCond):
881 (oNewStmt.aoIfBranch, iParamRef) = self.analyzeMorphStmtForThreaded(oStmt.aoIfBranch, iParamRef);
882 if oStmt.aoElseBranch:
883 (oNewStmt.aoElseBranch, iParamRef) = self.analyzeMorphStmtForThreaded(oStmt.aoElseBranch, iParamRef);
884
885 return (aoThreadedStmts, iParamRef);
886
887
888 def analyzeConsolidateThreadedParamRefs(self):
889 """
890 Consolidate threaded function parameter references into a dictionary
891 with lists of the references to each variable/field.
892 """
893 # Gather unique parameters.
894 self.dParamRefs = {};
895 for oRef in self.aoParamRefs:
896 if oRef.sStdRef not in self.dParamRefs:
897 self.dParamRefs[oRef.sStdRef] = [oRef,];
898 else:
899 self.dParamRefs[oRef.sStdRef].append(oRef);
900
901 # Generate names for them for use in the threaded function.
902 dParamNames = {};
903 for sName, aoRefs in self.dParamRefs.items():
904 # Morph the reference expression into a name.
905 if sName.startswith('IEM_GET_MODRM_REG'): sName = 'bModRmRegP';
906 elif sName.startswith('IEM_GET_MODRM_RM'): sName = 'bModRmRmP';
907 elif sName.startswith('IEM_GET_MODRM_REG_8'): sName = 'bModRmReg8P';
908 elif sName.startswith('IEM_GET_MODRM_RM_8'): sName = 'bModRmRm8P';
909 elif sName.startswith('IEM_GET_EFFECTIVE_VVVV'): sName = 'bEffVvvvP';
910 elif sName.find('.') >= 0 or sName.find('->') >= 0:
911 sName = sName[max(sName.rfind('.'), sName.rfind('>')) + 1 : ] + 'P';
912 else:
913 sName += 'P';
914
915 # Ensure it's unique.
916 if sName in dParamNames:
917 for i in range(10):
918 if sName + str(i) not in dParamNames:
919 sName += str(i);
920 break;
921 dParamNames[sName] = True;
922
923 # Update all the references.
924 for oRef in aoRefs:
925 oRef.sNewName = sName;
926
927 # Organize them by size too for the purpose of optimize them.
928 dBySize = {} # type: Dict[str, str]
929 for sStdRef, aoRefs in self.dParamRefs.items():
930 if aoRefs[0].sType[0] != 'P':
931 cBits = g_kdTypeInfo[aoRefs[0].sType][0];
932 assert(cBits <= 64);
933 else:
934 cBits = 64;
935
936 if cBits not in dBySize:
937 dBySize[cBits] = [sStdRef,]
938 else:
939 dBySize[cBits].append(sStdRef);
940
941 # Pack the parameters as best as we can, starting with the largest ones
942 # and ASSUMING a 64-bit parameter size.
943 self.cMinParams = 0;
944 offNewParam = 0;
945 for cBits in sorted(dBySize.keys(), reverse = True):
946 for sStdRef in dBySize[cBits]:
947 if offNewParam == 0 or offNewParam + cBits > 64:
948 self.cMinParams += 1;
949 offNewParam = cBits;
950 else:
951 offNewParam += cBits;
952 assert(offNewParam <= 64);
953
954 for oRef in self.dParamRefs[sStdRef]:
955 oRef.iNewParam = self.cMinParams - 1;
956 oRef.offNewParam = offNewParam - cBits;
957
958 # Currently there are a few that requires 4 parameters, list these so we can figure out why:
959 if self.cMinParams >= 4:
960 print('debug: cMinParams=%s cRawParams=%s - %s:%d'
961 % (self.cMinParams, len(self.dParamRefs), self.oParent.oMcBlock.sSrcFile, self.oParent.oMcBlock.iBeginLine,));
962
963 return True;
964
965 ksHexDigits = '0123456789abcdefABCDEF';
966
967 def analyzeFindThreadedParamRefs(self, aoStmts): # pylint: disable=too-many-statements
968 """
969 Scans the statements for things that have to passed on to the threaded
970 function (populates self.aoParamRefs).
971 """
972 for oStmt in aoStmts:
973 # Some statements we can skip alltogether.
974 if isinstance(oStmt, iai.McCppPreProc):
975 continue;
976 if oStmt.isCppStmt() and oStmt.fDecode:
977 continue;
978 if oStmt.sName in ('IEM_MC_BEGIN',):
979 continue;
980
981 if isinstance(oStmt, iai.McStmtVar):
982 if oStmt.sValue is None:
983 continue;
984 aiSkipParams = { 0: True, 1: True, 3: True };
985 else:
986 aiSkipParams = {};
987
988 # Several statements have implicit parameters and some have different parameters.
989 if oStmt.sName in ('IEM_MC_ADVANCE_RIP_AND_FINISH', 'IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_REL_JMP_S16_AND_FINISH',
990 'IEM_MC_REL_JMP_S32_AND_FINISH', 'IEM_MC_CALL_CIMPL_0', 'IEM_MC_CALL_CIMPL_1',
991 'IEM_MC_CALL_CIMPL_2', 'IEM_MC_CALL_CIMPL_3', 'IEM_MC_CALL_CIMPL_4', 'IEM_MC_CALL_CIMPL_5',
992 'IEM_MC_DEFER_TO_CIMPL_0_RET', 'IEM_MC_DEFER_TO_CIMPL_1_RET', 'IEM_MC_DEFER_TO_CIMPL_2_RET',
993 'IEM_MC_DEFER_TO_CIMPL_3_RET', 'IEM_MC_DEFER_TO_CIMPL_4_RET', 'IEM_MC_DEFER_TO_CIMPL_5_RET', ):
994 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_INSTR_LEN(pVCpu)', 'uint4_t', oStmt, sStdRef = 'cbInstr'));
995
996 if ( oStmt.sName in ('IEM_MC_REL_JMP_S8_AND_FINISH',)
997 and self.sVariation not in (self.ksVariation_16_Pre386, self.ksVariation_16f_Pre386,)):
998 self.aoParamRefs.append(ThreadedParamRef('pVCpu->iem.s.enmEffOpSize', 'IEMMODE', oStmt));
999
1000 if oStmt.sName == 'IEM_MC_CALC_RM_EFF_ADDR':
1001 # This is being pretty presumptive about bRm always being the RM byte...
1002 assert len(oStmt.asParams) == 3;
1003 assert oStmt.asParams[1] == 'bRm';
1004
1005 if self.sVariation in self.kdVariationsWithFlatAddr16:
1006 self.aoParamRefs.append(ThreadedParamRef('bRm', 'uint8_t', oStmt));
1007 self.aoParamRefs.append(ThreadedParamRef('(uint16_t)uEffAddrInfo' ,
1008 'uint16_t', oStmt, sStdRef = 'u16Disp'));
1009 elif self.sVariation in self.kdVariationsWithFlatAddr32No64:
1010 self.aoParamRefs.append(ThreadedParamRef('bRm', 'uint8_t', oStmt));
1011 self.aoParamRefs.append(ThreadedParamRef('(uint8_t)(uEffAddrInfo >> 32)',
1012 'uint8_t', oStmt, sStdRef = 'bSib'));
1013 self.aoParamRefs.append(ThreadedParamRef('(uint32_t)uEffAddrInfo',
1014 'uint32_t', oStmt, sStdRef = 'u32Disp'));
1015 else:
1016 assert self.sVariation in self.kasVariationsWithAddressOnly64;
1017 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_MODRM_EX(pVCpu, bRm)',
1018 'uint8_t', oStmt, sStdRef = 'bRmEx'));
1019 self.aoParamRefs.append(ThreadedParamRef('(uint8_t)(uEffAddrInfo >> 32)',
1020 'uint8_t', oStmt, sStdRef = 'bSib'));
1021 self.aoParamRefs.append(ThreadedParamRef('(uint32_t)uEffAddrInfo',
1022 'uint32_t', oStmt, sStdRef = 'u32Disp'));
1023 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_INSTR_LEN(pVCpu)',
1024 'uint4_t', oStmt, sStdRef = 'cbInstr'));
1025 aiSkipParams[1] = True; # Skip the bRm parameter as it is being replaced by bRmEx.
1026
1027 # 8-bit register accesses needs to have their index argument reworked to take REX into account.
1028 if oStmt.sName.startswith('IEM_MC_') and oStmt.sName.find('_GREG_U8') > 0:
1029 (idxReg, sOrgRef, sStdRef) = self.analyze8BitGRegStmt(oStmt);
1030 self.aoParamRefs.append(ThreadedParamRef(sOrgRef, 'uint16_t', oStmt, idxReg, sStdRef = sStdRef));
1031 aiSkipParams[idxReg] = True; # Skip the parameter below.
1032
1033 # If in flat mode variation, ignore the effective segment parameter to memory MCs.
1034 if ( self.sVariation in self.kdVariationsWithFlatAddress
1035 and oStmt.sName in self.kdMemMcToFlatInfo
1036 and self.kdMemMcToFlatInfo[oStmt.sName][0] != -1):
1037 aiSkipParams[self.kdMemMcToFlatInfo[oStmt.sName][0]] = True;
1038
1039 # Inspect the target of calls to see if we need to pass down a
1040 # function pointer or function table pointer for it to work.
1041 if isinstance(oStmt, iai.McStmtCall):
1042 if oStmt.sFn[0] == 'p':
1043 self.aoParamRefs.append(ThreadedParamRef(oStmt.sFn, self.analyzeCallToType(oStmt.sFn), oStmt, oStmt.idxFn));
1044 elif ( oStmt.sFn[0] != 'i'
1045 and not oStmt.sFn.startswith('IEMTARGETCPU_EFL_BEHAVIOR_SELECT')
1046 and not oStmt.sFn.startswith('IEM_SELECT_HOST_OR_FALLBACK') ):
1047 self.raiseProblem('Bogus function name in %s: %s' % (oStmt.sName, oStmt.sFn,));
1048 aiSkipParams[oStmt.idxFn] = True;
1049
1050 # Skip the hint parameter (first) for IEM_MC_CALL_CIMPL_X.
1051 if oStmt.sName.startswith('IEM_MC_CALL_CIMPL_'):
1052 assert oStmt.idxFn == 2;
1053 aiSkipParams[0] = True;
1054
1055
1056 # Check all the parameters for bogus references.
1057 for iParam, sParam in enumerate(oStmt.asParams):
1058 if iParam not in aiSkipParams and sParam not in self.oParent.dVariables:
1059 # The parameter may contain a C expression, so we have to try
1060 # extract the relevant bits, i.e. variables and fields while
1061 # ignoring operators and parentheses.
1062 offParam = 0;
1063 while offParam < len(sParam):
1064 # Is it the start of an C identifier? If so, find the end, but don't stop on field separators (->, .).
1065 ch = sParam[offParam];
1066 if ch.isalpha() or ch == '_':
1067 offStart = offParam;
1068 offParam += 1;
1069 while offParam < len(sParam):
1070 ch = sParam[offParam];
1071 if not ch.isalnum() and ch != '_' and ch != '.':
1072 if ch != '-' or sParam[offParam + 1] != '>':
1073 # Special hack for the 'CTX_SUFF(pVM)' bit in pVCpu->CTX_SUFF(pVM)->xxxx:
1074 if ( ch == '('
1075 and sParam[offStart : offParam + len('(pVM)->')] == 'pVCpu->CTX_SUFF(pVM)->'):
1076 offParam += len('(pVM)->') - 1;
1077 else:
1078 break;
1079 offParam += 1;
1080 offParam += 1;
1081 sRef = sParam[offStart : offParam];
1082
1083 # For register references, we pass the full register indexes instead as macros
1084 # like IEM_GET_MODRM_REG implicitly references pVCpu->iem.s.uRexReg and the
1085 # threaded function will be more efficient if we just pass the register index
1086 # as a 4-bit param.
1087 if ( sRef.startswith('IEM_GET_MODRM')
1088 or sRef.startswith('IEM_GET_EFFECTIVE_VVVV') ):
1089 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1090 if sParam[offParam] != '(':
1091 self.raiseProblem('Expected "(" following %s in "%s"' % (sRef, oStmt.renderCode(),));
1092 (asMacroParams, offCloseParam) = iai.McBlock.extractParams(sParam, offParam);
1093 if asMacroParams is None:
1094 self.raiseProblem('Unable to find ")" for %s in "%s"' % (sRef, oStmt.renderCode(),));
1095 offParam = offCloseParam + 1;
1096 self.aoParamRefs.append(ThreadedParamRef(sParam[offStart : offParam], 'uint8_t',
1097 oStmt, iParam, offStart));
1098
1099 # We can skip known variables.
1100 elif sRef in self.oParent.dVariables:
1101 pass;
1102
1103 # Skip certain macro invocations.
1104 elif sRef in ('IEM_GET_HOST_CPU_FEATURES',
1105 'IEM_GET_GUEST_CPU_FEATURES',
1106 'IEM_IS_GUEST_CPU_AMD',
1107 'IEM_IS_16BIT_CODE',
1108 'IEM_IS_32BIT_CODE',
1109 'IEM_IS_64BIT_CODE',
1110 ):
1111 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1112 if sParam[offParam] != '(':
1113 self.raiseProblem('Expected "(" following %s in "%s"' % (sRef, oStmt.renderCode(),));
1114 (asMacroParams, offCloseParam) = iai.McBlock.extractParams(sParam, offParam);
1115 if asMacroParams is None:
1116 self.raiseProblem('Unable to find ")" for %s in "%s"' % (sRef, oStmt.renderCode(),));
1117 offParam = offCloseParam + 1;
1118
1119 # Skip any dereference following it, unless it's a predicate like IEM_IS_GUEST_CPU_AMD.
1120 if sRef not in ('IEM_IS_GUEST_CPU_AMD',
1121 'IEM_IS_16BIT_CODE',
1122 'IEM_IS_32BIT_CODE',
1123 'IEM_IS_64BIT_CODE',
1124 ):
1125 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1126 if offParam + 2 <= len(sParam) and sParam[offParam : offParam + 2] == '->':
1127 offParam = iai.McBlock.skipSpacesAt(sParam, offParam + 2, len(sParam));
1128 while offParam < len(sParam) and (sParam[offParam].isalnum() or sParam[offParam] in '_.'):
1129 offParam += 1;
1130
1131 # Skip constants, globals, types (casts), sizeof and macros.
1132 elif ( sRef.startswith('IEM_OP_PRF_')
1133 or sRef.startswith('IEM_ACCESS_')
1134 or sRef.startswith('IEMINT_')
1135 or sRef.startswith('X86_GREG_')
1136 or sRef.startswith('X86_SREG_')
1137 or sRef.startswith('X86_EFL_')
1138 or sRef.startswith('X86_FSW_')
1139 or sRef.startswith('X86_FCW_')
1140 or sRef.startswith('X86_XCPT_')
1141 or sRef.startswith('IEMMODE_')
1142 or sRef.startswith('IEM_F_')
1143 or sRef.startswith('IEM_CIMPL_F_')
1144 or sRef.startswith('g_')
1145 or sRef.startswith('iemAImpl_')
1146 or sRef.startswith('kIemNativeGstReg_')
1147 or sRef in ( 'int8_t', 'int16_t', 'int32_t', 'int64_t',
1148 'INT8_C', 'INT16_C', 'INT32_C', 'INT64_C',
1149 'UINT8_C', 'UINT16_C', 'UINT32_C', 'UINT64_C',
1150 'UINT8_MAX', 'UINT16_MAX', 'UINT32_MAX', 'UINT64_MAX',
1151 'INT8_MAX', 'INT16_MAX', 'INT32_MAX', 'INT64_MAX',
1152 'INT8_MIN', 'INT16_MIN', 'INT32_MIN', 'INT64_MIN',
1153 'sizeof', 'NOREF', 'RT_NOREF', 'IEMMODE_64BIT',
1154 'RT_BIT_32', 'RT_BIT_64', 'true', 'false',
1155 'NIL_RTGCPTR',) ):
1156 pass;
1157
1158 # Skip certain macro invocations.
1159 # Any variable (non-field) and decoder fields in IEMCPU will need to be parameterized.
1160 elif ( ( '.' not in sRef
1161 and '-' not in sRef
1162 and sRef not in ('pVCpu', ) )
1163 or iai.McBlock.koReIemDecoderVars.search(sRef) is not None):
1164 self.aoParamRefs.append(ThreadedParamRef(sRef, self.analyzeReferenceToType(sRef),
1165 oStmt, iParam, offStart));
1166 # Number.
1167 elif ch.isdigit():
1168 if ( ch == '0'
1169 and offParam + 2 <= len(sParam)
1170 and sParam[offParam + 1] in 'xX'
1171 and sParam[offParam + 2] in self.ksHexDigits ):
1172 offParam += 2;
1173 while offParam < len(sParam) and sParam[offParam] in self.ksHexDigits:
1174 offParam += 1;
1175 else:
1176 while offParam < len(sParam) and sParam[offParam].isdigit():
1177 offParam += 1;
1178 # Comment?
1179 elif ( ch == '/'
1180 and offParam + 4 <= len(sParam)
1181 and sParam[offParam + 1] == '*'):
1182 offParam += 2;
1183 offNext = sParam.find('*/', offParam);
1184 if offNext < offParam:
1185 self.raiseProblem('Unable to find "*/" in "%s" ("%s")' % (sRef, oStmt.renderCode(),));
1186 offParam = offNext + 2;
1187 # Whatever else.
1188 else:
1189 offParam += 1;
1190
1191 # Traverse the branches of conditionals.
1192 if isinstance(oStmt, iai.McStmtCond):
1193 self.analyzeFindThreadedParamRefs(oStmt.aoIfBranch);
1194 self.analyzeFindThreadedParamRefs(oStmt.aoElseBranch);
1195 return True;
1196
1197 def analyzeVariation(self, aoStmts):
1198 """
1199 2nd part of the analysis, done on each variation.
1200
1201 The variations may differ in parameter requirements and will end up with
1202 slightly different MC sequences. Thus this is done on each individually.
1203
1204 Returns dummy True - raises exception on trouble.
1205 """
1206 # Now scan the code for variables and field references that needs to
1207 # be passed to the threaded function because they are related to the
1208 # instruction decoding.
1209 self.analyzeFindThreadedParamRefs(aoStmts);
1210 self.analyzeConsolidateThreadedParamRefs();
1211
1212 # Morph the statement stream for the block into what we'll be using in the threaded function.
1213 (self.aoStmtsForThreadedFunction, iParamRef) = self.analyzeMorphStmtForThreaded(aoStmts);
1214 if iParamRef != len(self.aoParamRefs):
1215 raise Exception('iParamRef=%s, expected %s!' % (iParamRef, len(self.aoParamRefs),));
1216
1217 return True;
1218
1219 def emitThreadedCallStmts(self, cchIndent, sCallVarNm = None):
1220 """
1221 Produces generic C++ statments that emits a call to the thread function
1222 variation and any subsequent checks that may be necessary after that.
1223
1224 The sCallVarNm is for emitting
1225 """
1226 aoStmts = [
1227 iai.McCppCall('IEM_MC2_BEGIN_EMIT_CALLS',
1228 ['1' if 'IEM_CIMPL_F_CHECK_IRQ_BEFORE' in self.oParent.dsCImplFlags else '0'],
1229 cchIndent = cchIndent), # Scope and a hook for various stuff.
1230 ];
1231
1232 # The call to the threaded function.
1233 asCallArgs = [ self.getIndexName() if not sCallVarNm else sCallVarNm, ];
1234 for iParam in range(self.cMinParams):
1235 asFrags = [];
1236 for aoRefs in self.dParamRefs.values():
1237 oRef = aoRefs[0];
1238 if oRef.iNewParam == iParam:
1239 sCast = '(uint64_t)'
1240 if oRef.sType in ('int8_t', 'int16_t', 'int32_t'): # Make sure these doesn't get sign-extended.
1241 sCast = '(uint64_t)(u' + oRef.sType + ')';
1242 if oRef.offNewParam == 0:
1243 asFrags.append(sCast + '(' + oRef.sOrgRef + ')');
1244 else:
1245 asFrags.append('(%s(%s) << %s)' % (sCast, oRef.sOrgRef, oRef.offNewParam));
1246 assert asFrags;
1247 asCallArgs.append(' | '.join(asFrags));
1248
1249 aoStmts.append(iai.McCppCall('IEM_MC2_EMIT_CALL_%s' % (len(asCallArgs) - 1,), asCallArgs, cchIndent = cchIndent));
1250
1251 # 2023-11-28: This has to be done AFTER the CIMPL call, so we have to
1252 # emit this mode check from the compilation loop. On the
1253 # plus side, this means we eliminate unnecessary call at
1254 # end of the TB. :-)
1255 ## For CIMPL stuff, we need to consult the associated IEM_CIMPL_F_XXX
1256 ## mask and maybe emit additional checks.
1257 #if ( 'IEM_CIMPL_F_MODE' in self.oParent.dsCImplFlags
1258 # or 'IEM_CIMPL_F_XCPT' in self.oParent.dsCImplFlags
1259 # or 'IEM_CIMPL_F_VMEXIT' in self.oParent.dsCImplFlags):
1260 # aoStmts.append(iai.McCppCall('IEM_MC2_EMIT_CALL_1', ( 'kIemThreadedFunc_BltIn_CheckMode', 'pVCpu->iem.s.fExec', ),
1261 # cchIndent = cchIndent));
1262
1263 sCImplFlags = ' | '.join(self.oParent.dsCImplFlags.keys());
1264 if not sCImplFlags:
1265 sCImplFlags = '0'
1266 aoStmts.append(iai.McCppCall('IEM_MC2_END_EMIT_CALLS', ( sCImplFlags, ), cchIndent = cchIndent)); # For closing the scope.
1267
1268 # Emit fEndTb = true or fTbBranched = true if any of the CIMPL flags
1269 # indicates we should do so.
1270 # Note! iemThreadedRecompilerMcDeferToCImpl0 duplicates work done here.
1271 asEndTbFlags = [];
1272 asTbBranchedFlags = [];
1273 for sFlag in self.oParent.dsCImplFlags:
1274 if self.kdCImplFlags[sFlag] is True:
1275 asEndTbFlags.append(sFlag);
1276 elif sFlag.startswith('IEM_CIMPL_F_BRANCH_'):
1277 asTbBranchedFlags.append(sFlag);
1278 if asTbBranchedFlags:
1279 aoStmts.append(iai.McCppGeneric('iemThreadedSetBranched(pVCpu, %s);'
1280 % ((' | '.join(asTbBranchedFlags)).replace('IEM_CIMPL_F_BRANCH', 'IEMBRANCHED_F'),),
1281 cchIndent = cchIndent)); # Inline fn saves ~2 seconds for gcc 13/dbg (1m13s vs 1m15s).
1282 if asEndTbFlags:
1283 aoStmts.append(iai.McCppGeneric('pVCpu->iem.s.fEndTb = true; /* %s */' % (','.join(asEndTbFlags),),
1284 cchIndent = cchIndent));
1285
1286 if 'IEM_CIMPL_F_CHECK_IRQ_AFTER' in self.oParent.dsCImplFlags:
1287 aoStmts.append(iai.McCppGeneric('pVCpu->iem.s.cInstrTillIrqCheck = 0;', cchIndent = cchIndent));
1288
1289 return aoStmts;
1290
1291
1292class ThreadedFunction(object):
1293 """
1294 A threaded function.
1295 """
1296
1297 def __init__(self, oMcBlock: iai.McBlock) -> None:
1298 self.oMcBlock = oMcBlock # type: iai.McBlock
1299 # The remaining fields are only useful after analyze() has been called:
1300 ## Variations for this block. There is at least one.
1301 self.aoVariations = [] # type: List[ThreadedFunctionVariation]
1302 ## Variation dictionary containing the same as aoVariations.
1303 self.dVariations = {} # type: Dict[str, ThreadedFunctionVariation]
1304 ## Dictionary of local variables (IEM_MC_LOCAL[_CONST]) and call arguments (IEM_MC_ARG*).
1305 self.dVariables = {} # type: Dict[str, iai.McStmtVar]
1306 ## Dictionary with any IEM_CIMPL_F_XXX flags explicitly advertised in the code block
1307 ## and those determined by analyzeCodeOperation().
1308 self.dsCImplFlags = {} # type: Dict[str, bool]
1309
1310 @staticmethod
1311 def dummyInstance():
1312 """ Gets a dummy instance. """
1313 return ThreadedFunction(iai.McBlock('null', 999999999, 999999999,
1314 iai.DecoderFunction('null', 999999999, 'nil', ('','')), 999999999));
1315
1316 def hasWithFlagsCheckingAndClearingVariation(self):
1317 """
1318 Check if there is one or more with flags checking and clearing
1319 variations for this threaded function.
1320 """
1321 for sVarWithFlags in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing:
1322 if sVarWithFlags in self.dVariations:
1323 return True;
1324 return False;
1325
1326 #
1327 # Analysis and code morphing.
1328 #
1329
1330 def raiseProblem(self, sMessage):
1331 """ Raises a problem. """
1332 raise Exception('%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, ));
1333
1334 def warning(self, sMessage):
1335 """ Emits a warning. """
1336 print('%s:%s: warning: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, ));
1337
1338 def analyzeFindVariablesAndCallArgs(self, aoStmts: List[iai.McStmt]) -> bool:
1339 """ Scans the statements for MC variables and call arguments. """
1340 for oStmt in aoStmts:
1341 if isinstance(oStmt, iai.McStmtVar):
1342 if oStmt.sVarName in self.dVariables:
1343 raise Exception('Variable %s is defined more than once!' % (oStmt.sVarName,));
1344 self.dVariables[oStmt.sVarName] = oStmt.sVarName;
1345
1346 # There shouldn't be any variables or arguments declared inside if/
1347 # else blocks, but scan them too to be on the safe side.
1348 if isinstance(oStmt, iai.McStmtCond):
1349 cBefore = len(self.dVariables);
1350 self.analyzeFindVariablesAndCallArgs(oStmt.aoIfBranch);
1351 self.analyzeFindVariablesAndCallArgs(oStmt.aoElseBranch);
1352 if len(self.dVariables) != cBefore:
1353 raise Exception('Variables/arguments defined in conditional branches!');
1354 return True;
1355
1356 def analyzeCodeOperation(self, aoStmts: List[iai.McStmt], fSeenConditional = False) -> bool:
1357 """
1358 Analyzes the code looking clues as to additional side-effects.
1359
1360 Currently this is simply looking for branching and adding the relevant
1361 branch flags to dsCImplFlags. ASSUMES the caller pre-populates the
1362 dictionary with a copy of self.oMcBlock.dsCImplFlags.
1363 """
1364 for oStmt in aoStmts:
1365 # Set IEM_IMPL_C_F_BRANCH if we see any branching MCs.
1366 if oStmt.sName.startswith('IEM_MC_SET_RIP'):
1367 assert not fSeenConditional;
1368 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_INDIRECT'] = True;
1369 elif oStmt.sName.startswith('IEM_MC_REL_JMP'):
1370 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_RELATIVE'] = True;
1371 if fSeenConditional:
1372 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_CONDITIONAL'] = True;
1373
1374 # Check for CIMPL and AIMPL calls.
1375 if oStmt.sName.startswith('IEM_MC_CALL_'):
1376 if oStmt.sName.startswith('IEM_MC_CALL_CIMPL_'):
1377 self.dsCImplFlags['IEM_CIMPL_F_CALLS_CIMPL'] = True;
1378 elif ( oStmt.sName.startswith('IEM_MC_CALL_VOID_AIMPL_')
1379 or oStmt.sName.startswith('IEM_MC_CALL_AIMPL_')
1380 or oStmt.sName.startswith('IEM_MC_CALL_AVX_AIMPL_')):
1381 self.dsCImplFlags['IEM_CIMPL_F_CALLS_AIMPL'] = True;
1382 elif ( oStmt.sName.startswith('IEM_MC_CALL_SSE_AIMPL_')
1383 or oStmt.sName.startswith('IEM_MC_CALL_MMX_AIMPL_')
1384 or oStmt.sName.startswith('IEM_MC_CALL_FPU_AIMPL_')):
1385 self.dsCImplFlags['IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE'] = True;
1386 else:
1387 raise Exception('Unknown IEM_MC_CALL_* statement: %s' % (oStmt.sName,));
1388
1389 # Process branches of conditionals recursively.
1390 if isinstance(oStmt, iai.McStmtCond):
1391 self.analyzeCodeOperation(oStmt.aoIfBranch, True);
1392 if oStmt.aoElseBranch:
1393 self.analyzeCodeOperation(oStmt.aoElseBranch, True);
1394
1395 return True;
1396
1397 def analyze(self):
1398 """
1399 Analyzes the code, identifying the number of parameters it requires and such.
1400
1401 Returns dummy True - raises exception on trouble.
1402 """
1403
1404 # Check the block for errors before we proceed (will decode it).
1405 asErrors = self.oMcBlock.check();
1406 if asErrors:
1407 raise Exception('\n'.join(['%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sError, )
1408 for sError in asErrors]));
1409
1410 # Decode the block into a list/tree of McStmt objects.
1411 aoStmts = self.oMcBlock.decode();
1412
1413 # Scan the statements for local variables and call arguments (self.dVariables).
1414 self.analyzeFindVariablesAndCallArgs(aoStmts);
1415
1416 # Scan the code for IEM_CIMPL_F_ and other clues.
1417 self.dsCImplFlags = self.oMcBlock.dsCImplFlags.copy();
1418 self.analyzeCodeOperation(aoStmts);
1419 if ( ('IEM_CIMPL_F_CALLS_CIMPL' in self.dsCImplFlags)
1420 + ('IEM_CIMPL_F_CALLS_AIMPL' in self.dsCImplFlags)
1421 + ('IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE' in self.dsCImplFlags) > 1):
1422 self.raiseProblem('Mixing CIMPL/AIMPL/AIMPL_WITH_FXSTATE calls');
1423
1424 # Create variations as needed.
1425 if iai.McStmt.findStmtByNames(aoStmts,
1426 { 'IEM_MC_DEFER_TO_CIMPL_0_RET': True,
1427 'IEM_MC_DEFER_TO_CIMPL_1_RET': True,
1428 'IEM_MC_DEFER_TO_CIMPL_2_RET': True,
1429 'IEM_MC_DEFER_TO_CIMPL_3_RET': True, }):
1430 asVariations = (ThreadedFunctionVariation.ksVariation_Default,);
1431
1432 elif iai.McStmt.findStmtByNames(aoStmts, {'IEM_MC_CALC_RM_EFF_ADDR' : True,}):
1433 if 'IEM_MC_F_64BIT' in self.oMcBlock.dsMcFlags:
1434 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressOnly64;
1435 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags and 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
1436 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot286Not64;
1437 elif 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
1438 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot286;
1439 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags:
1440 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot64;
1441 elif 'IEM_MC_F_ONLY_8086' in self.oMcBlock.dsMcFlags:
1442 asVariations = ThreadedFunctionVariation.kasVariationsOnlyPre386;
1443 else:
1444 asVariations = ThreadedFunctionVariation.kasVariationsWithAddress;
1445 else:
1446 if 'IEM_MC_F_64BIT' in self.oMcBlock.dsMcFlags:
1447 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressOnly64;
1448 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags and 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
1449 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot286Not64;
1450 elif 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
1451 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot286;
1452 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags:
1453 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot64;
1454 elif 'IEM_MC_F_ONLY_8086' in self.oMcBlock.dsMcFlags:
1455 asVariations = ThreadedFunctionVariation.kasVariationsOnlyPre386;
1456 else:
1457 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddress;
1458
1459 if not iai.McStmt.findStmtByNames(aoStmts,
1460 { 'IEM_MC_ADVANCE_RIP_AND_FINISH': True,
1461 'IEM_MC_REL_JMP_S8_AND_FINISH': True,
1462 'IEM_MC_REL_JMP_S16_AND_FINISH': True,
1463 'IEM_MC_REL_JMP_S32_AND_FINISH': True,
1464 }):
1465 asVariations = [sVariation for sVariation in asVariations
1466 if sVariation not in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing];
1467
1468 self.aoVariations = [ThreadedFunctionVariation(self, sVar) for sVar in asVariations];
1469
1470 # Dictionary variant of the list.
1471 self.dVariations = { oVar.sVariation: oVar for oVar in self.aoVariations };
1472
1473 # Continue the analysis on each variation.
1474 for oVariation in self.aoVariations:
1475 oVariation.analyzeVariation(aoStmts);
1476
1477 return True;
1478
1479 ## Used by emitThreadedCallStmts.
1480 kdVariationsWithNeedForPrefixCheck = {
1481 ThreadedFunctionVariation.ksVariation_64_Addr32: True,
1482 ThreadedFunctionVariation.ksVariation_64f_Addr32: True,
1483 ThreadedFunctionVariation.ksVariation_64_FsGs: True,
1484 ThreadedFunctionVariation.ksVariation_64f_FsGs: True,
1485 ThreadedFunctionVariation.ksVariation_32_Addr16: True,
1486 ThreadedFunctionVariation.ksVariation_32f_Addr16: True,
1487 ThreadedFunctionVariation.ksVariation_32_Flat: True,
1488 ThreadedFunctionVariation.ksVariation_32f_Flat: True,
1489 ThreadedFunctionVariation.ksVariation_16_Addr32: True,
1490 ThreadedFunctionVariation.ksVariation_16f_Addr32: True,
1491 };
1492
1493 def emitThreadedCallStmts(self):
1494 """
1495 Worker for morphInputCode that returns a list of statements that emits
1496 the call to the threaded functions for the block.
1497 """
1498 # Special case for only default variation:
1499 if len(self.aoVariations) == 1 and self.aoVariations[0].sVariation == ThreadedFunctionVariation.ksVariation_Default:
1500 return self.aoVariations[0].emitThreadedCallStmts(0);
1501
1502 #
1503 # Case statement sub-class.
1504 #
1505 dByVari = self.dVariations;
1506 #fDbg = self.oMcBlock.sFunction == 'iemOpCommonPushSReg';
1507 class Case:
1508 def __init__(self, sCond, sVarNm = None):
1509 self.sCond = sCond;
1510 self.sVarNm = sVarNm;
1511 self.oVar = dByVari[sVarNm] if sVarNm else None;
1512 self.aoBody = self.oVar.emitThreadedCallStmts(8) if sVarNm else None;
1513
1514 def toCode(self):
1515 aoStmts = [ iai.McCppGeneric('case %s:' % (self.sCond), cchIndent = 4), ];
1516 if self.aoBody:
1517 aoStmts.extend(self.aoBody);
1518 aoStmts.append(iai.McCppGeneric('break;', cchIndent = 8));
1519 return aoStmts;
1520
1521 def toFunctionAssignment(self):
1522 aoStmts = [ iai.McCppGeneric('case %s:' % (self.sCond), cchIndent = 4), ];
1523 if self.aoBody:
1524 aoStmts.extend([
1525 iai.McCppGeneric('enmFunction = %s;' % (self.oVar.getIndexName(),), cchIndent = 8),
1526 iai.McCppGeneric('break;', cchIndent = 8),
1527 ]);
1528 return aoStmts;
1529
1530 def isSame(self, oThat):
1531 if not self.aoBody: # fall thru always matches.
1532 return True;
1533 if len(self.aoBody) != len(oThat.aoBody):
1534 #if fDbg: print('dbg: body len diff: %s vs %s' % (len(self.aoBody), len(oThat.aoBody),));
1535 return False;
1536 for iStmt, oStmt in enumerate(self.aoBody):
1537 oThatStmt = oThat.aoBody[iStmt] # type: iai.McStmt
1538 assert isinstance(oStmt, iai.McCppGeneric);
1539 assert not isinstance(oStmt, iai.McStmtCond);
1540 if isinstance(oStmt, iai.McStmtCond):
1541 return False;
1542 if oStmt.sName != oThatStmt.sName:
1543 #if fDbg: print('dbg: stmt #%s name: %s vs %s' % (iStmt, oStmt.sName, oThatStmt.sName,));
1544 return False;
1545 if len(oStmt.asParams) != len(oThatStmt.asParams):
1546 #if fDbg: print('dbg: stmt #%s param count: %s vs %s'
1547 # % (iStmt, len(oStmt.asParams), len(oThatStmt.asParams),));
1548 return False;
1549 for iParam, sParam in enumerate(oStmt.asParams):
1550 if ( sParam != oThatStmt.asParams[iParam]
1551 and ( iParam != 1
1552 or not isinstance(oStmt, iai.McCppCall)
1553 or not oStmt.asParams[0].startswith('IEM_MC2_EMIT_CALL_')
1554 or sParam != self.oVar.getIndexName()
1555 or oThatStmt.asParams[iParam] != oThat.oVar.getIndexName() )):
1556 #if fDbg: print('dbg: stmt #%s, param #%s: %s vs %s'
1557 # % (iStmt, iParam, sParam, oThatStmt.asParams[iParam],));
1558 return False;
1559 return True;
1560
1561 #
1562 # Determine what we're switch on.
1563 # This ASSUMES that (IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | IEM_F_MODE_CPUMODE_MASK) == 7!
1564 #
1565 fSimple = True;
1566 sSwitchValue = '(pVCpu->iem.s.fExec & (IEM_F_MODE_CPUMODE_MASK | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK))';
1567 if dByVari.keys() & self.kdVariationsWithNeedForPrefixCheck.keys():
1568 sSwitchValue += ' | (pVCpu->iem.s.enmEffAddrMode == (pVCpu->iem.s.fExec & IEM_F_MODE_CPUMODE_MASK) ? 0 : 8)';
1569 # Accesses via FS and GS and CS goes thru non-FLAT functions. (CS
1570 # is not writable in 32-bit mode (at least), thus the penalty mode
1571 # for any accesses via it (simpler this way).)
1572 sSwitchValue += ' | (pVCpu->iem.s.iEffSeg < X86_SREG_FS && pVCpu->iem.s.iEffSeg != X86_SREG_CS ? 0 : 16)';
1573 fSimple = False; # threaded functions.
1574 if dByVari.keys() & ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing:
1575 sSwitchValue += ' | ((pVCpu->iem.s.fTbPrevInstr & (IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_INHIBIT_SHADOW)) || ' \
1576 + '(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_MASK) ? 32 : 0)';
1577
1578 #
1579 # Generate the case statements.
1580 #
1581 # pylintx: disable=x
1582 aoCases = [];
1583 if ThreadedFunctionVariation.ksVariation_64_Addr32 in dByVari:
1584 assert not fSimple;
1585 aoCases.extend([
1586 Case('IEMMODE_64BIT', ThrdFnVar.ksVariation_64),
1587 Case('IEMMODE_64BIT | 16', ThrdFnVar.ksVariation_64_FsGs),
1588 Case('IEMMODE_64BIT | 8 | 16', None), # fall thru
1589 Case('IEMMODE_64BIT | 8', ThrdFnVar.ksVariation_64_Addr32),
1590 ]);
1591 if ThreadedFunctionVariation.ksVariation_64f_Addr32 in dByVari:
1592 aoCases.extend([
1593 Case('IEMMODE_64BIT | 32', ThrdFnVar.ksVariation_64f),
1594 Case('IEMMODE_64BIT | 32 | 16', ThrdFnVar.ksVariation_64f_FsGs),
1595 Case('IEMMODE_64BIT | 32 | 8 | 16', None), # fall thru
1596 Case('IEMMODE_64BIT | 32 | 8', ThrdFnVar.ksVariation_64f_Addr32),
1597 ]);
1598 elif ThrdFnVar.ksVariation_64 in dByVari:
1599 assert fSimple;
1600 aoCases.append(Case('IEMMODE_64BIT', ThrdFnVar.ksVariation_64));
1601 if ThreadedFunctionVariation.ksVariation_64f in dByVari:
1602 aoCases.append(Case('IEMMODE_64BIT | 32', ThrdFnVar.ksVariation_64f));
1603
1604 if ThrdFnVar.ksVariation_32_Addr16 in dByVari:
1605 assert not fSimple;
1606 aoCases.extend([
1607 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', ThrdFnVar.ksVariation_32_Flat),
1608 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 16', None), # fall thru
1609 Case('IEMMODE_32BIT | 16', None), # fall thru
1610 Case('IEMMODE_32BIT', ThrdFnVar.ksVariation_32),
1611 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 8', None), # fall thru
1612 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 8 | 16',None), # fall thru
1613 Case('IEMMODE_32BIT | 8 | 16',None), # fall thru
1614 Case('IEMMODE_32BIT | 8', ThrdFnVar.ksVariation_32_Addr16),
1615 ]);
1616 if ThrdFnVar.ksVariation_32f_Addr16 in dByVari:
1617 aoCases.extend([
1618 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', ThrdFnVar.ksVariation_32f_Flat),
1619 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 16', None), # fall thru
1620 Case('IEMMODE_32BIT | 32 | 16', None), # fall thru
1621 Case('IEMMODE_32BIT | 32', ThrdFnVar.ksVariation_32f),
1622 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 8', None), # fall thru
1623 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 8 | 16',None), # fall thru
1624 Case('IEMMODE_32BIT | 32 | 8 | 16',None), # fall thru
1625 Case('IEMMODE_32BIT | 32 | 8', ThrdFnVar.ksVariation_32f_Addr16),
1626 ]);
1627 elif ThrdFnVar.ksVariation_32 in dByVari:
1628 assert fSimple;
1629 aoCases.extend([
1630 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', None), # fall thru
1631 Case('IEMMODE_32BIT', ThrdFnVar.ksVariation_32),
1632 ]);
1633 if ThrdFnVar.ksVariation_32f in dByVari:
1634 aoCases.extend([
1635 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', None), # fall thru
1636 Case('IEMMODE_32BIT | 32', ThrdFnVar.ksVariation_32f),
1637 ]);
1638
1639 if ThrdFnVar.ksVariation_16_Addr32 in dByVari:
1640 assert not fSimple;
1641 aoCases.extend([
1642 Case('IEMMODE_16BIT | 16', None), # fall thru
1643 Case('IEMMODE_16BIT', ThrdFnVar.ksVariation_16),
1644 Case('IEMMODE_16BIT | 8 | 16', None), # fall thru
1645 Case('IEMMODE_16BIT | 8', ThrdFnVar.ksVariation_16_Addr32),
1646 ]);
1647 if ThrdFnVar.ksVariation_16f_Addr32 in dByVari:
1648 aoCases.extend([
1649 Case('IEMMODE_16BIT | 32 | 16', None), # fall thru
1650 Case('IEMMODE_16BIT | 32', ThrdFnVar.ksVariation_16f),
1651 Case('IEMMODE_16BIT | 32 | 8 | 16', None), # fall thru
1652 Case('IEMMODE_16BIT | 32 | 8', ThrdFnVar.ksVariation_16f_Addr32),
1653 ]);
1654 elif ThrdFnVar.ksVariation_16 in dByVari:
1655 assert fSimple;
1656 aoCases.append(Case('IEMMODE_16BIT', ThrdFnVar.ksVariation_16));
1657 if ThrdFnVar.ksVariation_16f in dByVari:
1658 aoCases.append(Case('IEMMODE_16BIT | 32', ThrdFnVar.ksVariation_16f));
1659
1660 if ThrdFnVar.ksVariation_16_Pre386 in dByVari:
1661 if not fSimple:
1662 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 16', None)); # fall thru
1663 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', ThrdFnVar.ksVariation_16_Pre386));
1664 if ThrdFnVar.ksVariation_16f_Pre386 in dByVari: # should be nested under previous if, but line too long.
1665 if not fSimple:
1666 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 16', None)); # fall thru
1667 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', ThrdFnVar.ksVariation_16f_Pre386));
1668
1669 #
1670 # If the case bodies are all the same, except for the function called,
1671 # we can reduce the code size and hopefully compile time.
1672 #
1673 iFirstCaseWithBody = 0;
1674 while not aoCases[iFirstCaseWithBody].aoBody:
1675 iFirstCaseWithBody += 1
1676 fAllSameCases = True
1677 for iCase in range(iFirstCaseWithBody + 1, len(aoCases)):
1678 fAllSameCases = fAllSameCases and aoCases[iCase].isSame(aoCases[iFirstCaseWithBody]);
1679 #if fDbg: print('fAllSameCases=%s %s' % (fAllSameCases, self.oMcBlock.sFunction,));
1680 if fAllSameCases:
1681 aoStmts = [
1682 iai.McCppGeneric('IEMTHREADEDFUNCS enmFunction;'),
1683 iai.McCppGeneric('switch (%s)' % (sSwitchValue,)),
1684 iai.McCppGeneric('{'),
1685 ];
1686 for oCase in aoCases:
1687 aoStmts.extend(oCase.toFunctionAssignment());
1688 aoStmts.extend([
1689 iai.McCppGeneric('IEM_NOT_REACHED_DEFAULT_CASE_RET();', cchIndent = 4),
1690 iai.McCppGeneric('}'),
1691 ]);
1692 aoStmts.extend(dByVari[aoCases[iFirstCaseWithBody].sVarNm].emitThreadedCallStmts(0, 'enmFunction'));
1693
1694 else:
1695 #
1696 # Generate the generic switch statement.
1697 #
1698 aoStmts = [
1699 iai.McCppGeneric('switch (%s)' % (sSwitchValue,)),
1700 iai.McCppGeneric('{'),
1701 ];
1702 for oCase in aoCases:
1703 aoStmts.extend(oCase.toCode());
1704 aoStmts.extend([
1705 iai.McCppGeneric('IEM_NOT_REACHED_DEFAULT_CASE_RET();', cchIndent = 4),
1706 iai.McCppGeneric('}'),
1707 ]);
1708
1709 return aoStmts;
1710
1711 def morphInputCode(self, aoStmts, fCallEmitted = False, cDepth = 0):
1712 """
1713 Adjusts (& copies) the statements for the input/decoder so it will emit
1714 calls to the right threaded functions for each block.
1715
1716 Returns list/tree of statements (aoStmts is not modified) and updated
1717 fCallEmitted status.
1718 """
1719 #print('McBlock at %s:%s' % (os.path.split(self.oMcBlock.sSrcFile)[1], self.oMcBlock.iBeginLine,));
1720 aoDecoderStmts = [];
1721
1722 for oStmt in aoStmts:
1723 # Copy the statement. Make a deep copy to make sure we've got our own
1724 # copies of all instance variables, even if a bit overkill at the moment.
1725 oNewStmt = copy.deepcopy(oStmt);
1726 aoDecoderStmts.append(oNewStmt);
1727 #print('oNewStmt %s %s' % (oNewStmt.sName, len(oNewStmt.asParams),));
1728 if oNewStmt.sName == 'IEM_MC_BEGIN' and self.dsCImplFlags:
1729 oNewStmt.asParams[3] = ' | '.join(sorted(self.dsCImplFlags.keys()));
1730
1731 # If we haven't emitted the threaded function call yet, look for
1732 # statements which it would naturally follow or preceed.
1733 if not fCallEmitted:
1734 if not oStmt.isCppStmt():
1735 if ( oStmt.sName.startswith('IEM_MC_MAYBE_RAISE_') \
1736 or (oStmt.sName.endswith('_AND_FINISH') and oStmt.sName.startswith('IEM_MC_'))
1737 or oStmt.sName.startswith('IEM_MC_CALL_CIMPL_')
1738 or oStmt.sName.startswith('IEM_MC_DEFER_TO_CIMPL_')
1739 or oStmt.sName in ('IEM_MC_RAISE_DIVIDE_ERROR',)):
1740 aoDecoderStmts.pop();
1741 aoDecoderStmts.extend(self.emitThreadedCallStmts());
1742 aoDecoderStmts.append(oNewStmt);
1743 fCallEmitted = True;
1744 elif ( oStmt.fDecode
1745 and ( oStmt.asParams[0].find('IEMOP_HLP_DONE_') >= 0
1746 or oStmt.asParams[0].find('IEMOP_HLP_DECODED_') >= 0)):
1747 aoDecoderStmts.extend(self.emitThreadedCallStmts());
1748 fCallEmitted = True;
1749
1750 # Process branches of conditionals recursively.
1751 if isinstance(oStmt, iai.McStmtCond):
1752 (oNewStmt.aoIfBranch, fCallEmitted1) = self.morphInputCode(oStmt.aoIfBranch, fCallEmitted, cDepth + 1);
1753 if oStmt.aoElseBranch:
1754 (oNewStmt.aoElseBranch, fCallEmitted2) = self.morphInputCode(oStmt.aoElseBranch, fCallEmitted, cDepth + 1);
1755 else:
1756 fCallEmitted2 = False;
1757 fCallEmitted = fCallEmitted or (fCallEmitted1 and fCallEmitted2);
1758
1759 if not fCallEmitted and cDepth == 0:
1760 self.raiseProblem('Unable to insert call to threaded function.');
1761
1762 return (aoDecoderStmts, fCallEmitted);
1763
1764
1765 def generateInputCode(self):
1766 """
1767 Modifies the input code.
1768 """
1769 cchIndent = (self.oMcBlock.cchIndent + 3) // 4 * 4;
1770
1771 if len(self.oMcBlock.aoStmts) == 1:
1772 # IEM_MC_DEFER_TO_CIMPL_X_RET - need to wrap in {} to make it safe to insert into random code.
1773 sCode = ' ' * cchIndent + 'pVCpu->iem.s.fTbCurInstr = ';
1774 if self.dsCImplFlags:
1775 sCode += ' | '.join(sorted(self.dsCImplFlags.keys())) + ';\n';
1776 else:
1777 sCode += '0;\n';
1778 sCode += iai.McStmt.renderCodeForList(self.morphInputCode(self.oMcBlock.aoStmts)[0],
1779 cchIndent = cchIndent).replace('\n', ' /* gen */\n', 1);
1780 sIndent = ' ' * (min(cchIndent, 2) - 2);
1781 sCode = sIndent + '{\n' + sCode + sIndent + '}\n';
1782 return sCode;
1783
1784 # IEM_MC_BEGIN/END block
1785 assert len(self.oMcBlock.asLines) > 2, "asLines=%s" % (self.oMcBlock.asLines,);
1786 return iai.McStmt.renderCodeForList(self.morphInputCode(self.oMcBlock.aoStmts)[0],
1787 cchIndent = cchIndent).replace('\n', ' /* gen */\n', 1);
1788
1789# Short alias for ThreadedFunctionVariation.
1790ThrdFnVar = ThreadedFunctionVariation;
1791
1792
1793class IEMThreadedGenerator(object):
1794 """
1795 The threaded code generator & annotator.
1796 """
1797
1798 def __init__(self):
1799 self.aoThreadedFuncs = [] # type: List[ThreadedFunction]
1800 self.oOptions = None # type: argparse.Namespace
1801 self.aoParsers = [] # type: List[IEMAllInstPython.SimpleParser]
1802 self.aidxFirstFunctions = [] # type: List[int] ##< Runs parallel to aoParser giving the index of the first function.
1803
1804 #
1805 # Processing.
1806 #
1807
1808 def processInputFiles(self, sHostArch, fNativeRecompilerEnabled):
1809 """
1810 Process the input files.
1811 """
1812
1813 # Parse the files.
1814 self.aoParsers = iai.parseFiles(self.oOptions.asInFiles, sHostArch);
1815
1816 # Create threaded functions for the MC blocks.
1817 self.aoThreadedFuncs = [ThreadedFunction(oMcBlock) for oMcBlock in iai.g_aoMcBlocks];
1818
1819 # Analyze the threaded functions.
1820 dRawParamCounts = {};
1821 dMinParamCounts = {};
1822 for oThreadedFunction in self.aoThreadedFuncs:
1823 oThreadedFunction.analyze();
1824 for oVariation in oThreadedFunction.aoVariations:
1825 dRawParamCounts[len(oVariation.dParamRefs)] = dRawParamCounts.get(len(oVariation.dParamRefs), 0) + 1;
1826 dMinParamCounts[oVariation.cMinParams] = dMinParamCounts.get(oVariation.cMinParams, 0) + 1;
1827 print('debug: param count distribution, raw and optimized:', file = sys.stderr);
1828 for cCount in sorted({cBits: True for cBits in list(dRawParamCounts.keys()) + list(dMinParamCounts.keys())}.keys()):
1829 print('debug: %s params: %4s raw, %4s min'
1830 % (cCount, dRawParamCounts.get(cCount, 0), dMinParamCounts.get(cCount, 0)),
1831 file = sys.stderr);
1832
1833 # Populate aidxFirstFunctions. This is ASSUMING that
1834 # g_aoMcBlocks/self.aoThreadedFuncs are in self.aoParsers order.
1835 iThreadedFunction = 0;
1836 oThreadedFunction = self.getThreadedFunctionByIndex(0);
1837 self.aidxFirstFunctions = [];
1838 for oParser in self.aoParsers:
1839 self.aidxFirstFunctions.append(iThreadedFunction);
1840
1841 while oThreadedFunction.oMcBlock.sSrcFile == oParser.sSrcFile:
1842 iThreadedFunction += 1;
1843 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
1844
1845 # Analyze the threaded functions and their variations for native recompilation.
1846 if fNativeRecompilerEnabled:
1847 ian.displayStatistics(self.aoThreadedFuncs, sHostArch);
1848
1849 # Gather arguments + variable statistics for the MC blocks.
1850 cMaxArgs = 0;
1851 cMaxVars = 0;
1852 cMaxVarsAndArgs = 0;
1853 cbMaxArgs = 0;
1854 cbMaxVars = 0;
1855 cbMaxVarsAndArgs = 0;
1856 for oThreadedFunction in self.aoThreadedFuncs:
1857 if oThreadedFunction.oMcBlock.cLocals >= 0:
1858 # Counts.
1859 assert oThreadedFunction.oMcBlock.cArgs >= 0;
1860 cMaxVars = max(cMaxVars, oThreadedFunction.oMcBlock.cLocals);
1861 cMaxArgs = max(cMaxArgs, oThreadedFunction.oMcBlock.cArgs);
1862 cMaxVarsAndArgs = max(cMaxVarsAndArgs, oThreadedFunction.oMcBlock.cLocals + oThreadedFunction.oMcBlock.cArgs);
1863 if cMaxVarsAndArgs > 9:
1864 raise Exception('%s potentially uses too many variables / args: %u, max 10 - %u vars and %u args'
1865 % (oThreadedFunction.oMcBlock.oFunction.sName, cMaxVarsAndArgs,
1866 oThreadedFunction.oMcBlock.cLocals, oThreadedFunction.oMcBlock.cArgs));
1867 # Calc stack allocation size:
1868 cbArgs = 0;
1869 for oArg in oThreadedFunction.oMcBlock.aoArgs:
1870 cbArgs += (getTypeBitCount(oArg.sType) + 63) // 64 * 8;
1871 cbVars = 0;
1872 for oVar in oThreadedFunction.oMcBlock.aoLocals:
1873 cbVars += (getTypeBitCount(oVar.sType) + 63) // 64 * 8;
1874 cbMaxVars = max(cbMaxVars, cbVars);
1875 cbMaxArgs = max(cbMaxArgs, cbArgs);
1876 cbMaxVarsAndArgs = max(cbMaxVarsAndArgs, cbVars + cbArgs);
1877 if cbMaxVarsAndArgs >= 0xc0:
1878 raise Exception('%s potentially uses too much stack: cbMaxVars=%#x cbMaxArgs=%#x'
1879 % (oThreadedFunction.oMcBlock.oFunction.sName, cbMaxVars, cbMaxArgs,));
1880
1881 print('debug: max vars+args: %u bytes / %u; max vars: %u bytes / %u; max args: %u bytes / %u'
1882 % (cbMaxVarsAndArgs, cMaxVarsAndArgs, cbMaxVars, cMaxVars, cbMaxArgs, cMaxArgs,), file = sys.stderr);
1883
1884 return True;
1885
1886 #
1887 # Output
1888 #
1889
1890 def generateLicenseHeader(self):
1891 """
1892 Returns the lines for a license header.
1893 """
1894 return [
1895 '/*',
1896 ' * Autogenerated by $Id: IEMAllThrdPython.py 102365 2023-11-28 14:19:35Z vboxsync $ ',
1897 ' * Do not edit!',
1898 ' */',
1899 '',
1900 '/*',
1901 ' * Copyright (C) 2023-' + str(datetime.date.today().year) + ' Oracle and/or its affiliates.',
1902 ' *',
1903 ' * This file is part of VirtualBox base platform packages, as',
1904 ' * available from https://www.virtualbox.org.',
1905 ' *',
1906 ' * This program is free software; you can redistribute it and/or',
1907 ' * modify it under the terms of the GNU General Public License',
1908 ' * as published by the Free Software Foundation, in version 3 of the',
1909 ' * License.',
1910 ' *',
1911 ' * This program is distributed in the hope that it will be useful, but',
1912 ' * WITHOUT ANY WARRANTY; without even the implied warranty of',
1913 ' * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU',
1914 ' * General Public License for more details.',
1915 ' *',
1916 ' * You should have received a copy of the GNU General Public License',
1917 ' * along with this program; if not, see <https://www.gnu.org/licenses>.',
1918 ' *',
1919 ' * The contents of this file may alternatively be used under the terms',
1920 ' * of the Common Development and Distribution License Version 1.0',
1921 ' * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included',
1922 ' * in the VirtualBox distribution, in which case the provisions of the',
1923 ' * CDDL are applicable instead of those of the GPL.',
1924 ' *',
1925 ' * You may elect to license modified versions of this file under the',
1926 ' * terms and conditions of either the GPL or the CDDL or both.',
1927 ' *',
1928 ' * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0',
1929 ' */',
1930 '',
1931 '',
1932 '',
1933 ];
1934
1935 ## List of built-in threaded functions with user argument counts and
1936 ## whether it has a native recompiler implementation.
1937 katBltIns = (
1938 ( 'DeferToCImpl0', 2, True ),
1939 ( 'CheckIrq', 0, True ),
1940 ( 'CheckMode', 1, True ),
1941 ( 'CheckHwInstrBps', 0, False ),
1942 ( 'CheckCsLim', 1, False ),
1943
1944 ( 'CheckCsLimAndOpcodes', 3, False ),
1945 ( 'CheckOpcodes', 3, False ),
1946 ( 'CheckOpcodesConsiderCsLim', 3, False ),
1947
1948 ( 'CheckCsLimAndPcAndOpcodes', 3, False ),
1949 ( 'CheckPcAndOpcodes', 3, False ),
1950 ( 'CheckPcAndOpcodesConsiderCsLim', 3, False ),
1951
1952 ( 'CheckCsLimAndOpcodesAcrossPageLoadingTlb', 3, False ),
1953 ( 'CheckOpcodesAcrossPageLoadingTlb', 3, False ),
1954 ( 'CheckOpcodesAcrossPageLoadingTlbConsiderCsLim', 2, False ),
1955
1956 ( 'CheckCsLimAndOpcodesLoadingTlb', 3, False ),
1957 ( 'CheckOpcodesLoadingTlb', 3, False ),
1958 ( 'CheckOpcodesLoadingTlbConsiderCsLim', 3, False ),
1959
1960 ( 'CheckCsLimAndOpcodesOnNextPageLoadingTlb', 2, False ),
1961 ( 'CheckOpcodesOnNextPageLoadingTlb', 2, False ),
1962 ( 'CheckOpcodesOnNextPageLoadingTlbConsiderCsLim', 2, False ),
1963
1964 ( 'CheckCsLimAndOpcodesOnNewPageLoadingTlb', 2, False ),
1965 ( 'CheckOpcodesOnNewPageLoadingTlb', 2, False ),
1966 ( 'CheckOpcodesOnNewPageLoadingTlbConsiderCsLim', 2, False ),
1967 );
1968
1969 def generateThreadedFunctionsHeader(self, oOut):
1970 """
1971 Generates the threaded functions header file.
1972 Returns success indicator.
1973 """
1974
1975 asLines = self.generateLicenseHeader();
1976
1977 # Generate the threaded function table indexes.
1978 asLines += [
1979 'typedef enum IEMTHREADEDFUNCS',
1980 '{',
1981 ' kIemThreadedFunc_Invalid = 0,',
1982 '',
1983 ' /*',
1984 ' * Predefined',
1985 ' */',
1986 ];
1987 asLines += [' kIemThreadedFunc_BltIn_%s,' % (sFuncNm,) for sFuncNm, _, _ in self.katBltIns];
1988
1989 iThreadedFunction = 1 + len(self.katBltIns);
1990 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
1991 asLines += [
1992 '',
1993 ' /*',
1994 ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation] + '',
1995 ' */',
1996 ];
1997 for oThreadedFunction in self.aoThreadedFuncs:
1998 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
1999 if oVariation:
2000 iThreadedFunction += 1;
2001 oVariation.iEnumValue = iThreadedFunction;
2002 asLines.append(' ' + oVariation.getIndexName() + ',');
2003 asLines += [
2004 ' kIemThreadedFunc_End',
2005 '} IEMTHREADEDFUNCS;',
2006 '',
2007 ];
2008
2009 # Prototype the function table.
2010 asLines += [
2011 'extern const PFNIEMTHREADEDFUNC g_apfnIemThreadedFunctions[kIemThreadedFunc_End];',
2012 '#if defined(IN_RING3) || defined(LOG_ENABLED)',
2013 'extern const char * const g_apszIemThreadedFunctions[kIemThreadedFunc_End];',
2014 '#endif',
2015 'extern uint8_t const g_acIemThreadedFunctionUsedArgs[kIemThreadedFunc_End];',
2016 ];
2017
2018 oOut.write('\n'.join(asLines));
2019 return True;
2020
2021 ksBitsToIntMask = {
2022 1: "UINT64_C(0x1)",
2023 2: "UINT64_C(0x3)",
2024 4: "UINT64_C(0xf)",
2025 8: "UINT64_C(0xff)",
2026 16: "UINT64_C(0xffff)",
2027 32: "UINT64_C(0xffffffff)",
2028 };
2029
2030 def generateFunctionParameterUnpacking(self, oVariation, oOut, asParams):
2031 """
2032 Outputs code for unpacking parameters.
2033 This is shared by the threaded and native code generators.
2034 """
2035 aasVars = [];
2036 for aoRefs in oVariation.dParamRefs.values():
2037 oRef = aoRefs[0];
2038 if oRef.sType[0] != 'P':
2039 cBits = g_kdTypeInfo[oRef.sType][0];
2040 sType = g_kdTypeInfo[oRef.sType][2];
2041 else:
2042 cBits = 64;
2043 sType = oRef.sType;
2044
2045 sTypeDecl = sType + ' const';
2046
2047 if cBits == 64:
2048 assert oRef.offNewParam == 0;
2049 if sType == 'uint64_t':
2050 sUnpack = '%s;' % (asParams[oRef.iNewParam],);
2051 else:
2052 sUnpack = '(%s)%s;' % (sType, asParams[oRef.iNewParam],);
2053 elif oRef.offNewParam == 0:
2054 sUnpack = '(%s)(%s & %s);' % (sType, asParams[oRef.iNewParam], self.ksBitsToIntMask[cBits]);
2055 else:
2056 sUnpack = '(%s)((%s >> %s) & %s);' \
2057 % (sType, asParams[oRef.iNewParam], oRef.offNewParam, self.ksBitsToIntMask[cBits]);
2058
2059 sComment = '/* %s - %s ref%s */' % (oRef.sOrgRef, len(aoRefs), 's' if len(aoRefs) != 1 else '',);
2060
2061 aasVars.append([ '%s:%02u' % (oRef.iNewParam, oRef.offNewParam),
2062 sTypeDecl, oRef.sNewName, sUnpack, sComment ]);
2063 acchVars = [0, 0, 0, 0, 0];
2064 for asVar in aasVars:
2065 for iCol, sStr in enumerate(asVar):
2066 acchVars[iCol] = max(acchVars[iCol], len(sStr));
2067 sFmt = ' %%-%ss %%-%ss = %%-%ss %%s\n' % (acchVars[1], acchVars[2], acchVars[3]);
2068 for asVar in sorted(aasVars):
2069 oOut.write(sFmt % (asVar[1], asVar[2], asVar[3], asVar[4],));
2070 return True;
2071
2072 kasThreadedParamNames = ('uParam0', 'uParam1', 'uParam2');
2073 def generateThreadedFunctionsSource(self, oOut):
2074 """
2075 Generates the threaded functions source file.
2076 Returns success indicator.
2077 """
2078
2079 asLines = self.generateLicenseHeader();
2080 oOut.write('\n'.join(asLines));
2081
2082 #
2083 # Emit the function definitions.
2084 #
2085 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2086 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
2087 oOut.write( '\n'
2088 + '\n'
2089 + '\n'
2090 + '\n'
2091 + '/*' + '*' * 128 + '\n'
2092 + '* Variation: ' + sVarName + ' ' * (129 - len(sVarName) - 15) + '*\n'
2093 + '*' * 128 + '*/\n');
2094
2095 for oThreadedFunction in self.aoThreadedFuncs:
2096 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2097 if oVariation:
2098 oMcBlock = oThreadedFunction.oMcBlock;
2099
2100 # Function header
2101 oOut.write( '\n'
2102 + '\n'
2103 + '/**\n'
2104 + ' * #%u: %s at line %s offset %s in %s%s\n'
2105 % (oVariation.iEnumValue, oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine,
2106 os.path.split(oMcBlock.sSrcFile)[1],
2107 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '')
2108 + ' */\n'
2109 + 'static IEM_DECL_IEMTHREADEDFUNC_DEF(' + oVariation.getThreadedFunctionName() + ')\n'
2110 + '{\n');
2111
2112 # Unpack parameters.
2113 self.generateFunctionParameterUnpacking(oVariation, oOut, self.kasThreadedParamNames);
2114
2115 # RT_NOREF for unused parameters.
2116 if oVariation.cMinParams < g_kcThreadedParams:
2117 oOut.write(' RT_NOREF(' + ', '.join(self.kasThreadedParamNames[oVariation.cMinParams:]) + ');\n');
2118
2119 # Now for the actual statements.
2120 oOut.write(iai.McStmt.renderCodeForList(oVariation.aoStmtsForThreadedFunction, cchIndent = 4));
2121
2122 oOut.write('}\n');
2123
2124
2125 #
2126 # Generate the output tables in parallel.
2127 #
2128 asFuncTable = [
2129 '/**',
2130 ' * Function pointer table.',
2131 ' */',
2132 'PFNIEMTHREADEDFUNC const g_apfnIemThreadedFunctions[kIemThreadedFunc_End] =',
2133 '{',
2134 ' /*Invalid*/ NULL,',
2135 ];
2136 asNameTable = [
2137 '/**',
2138 ' * Function name table.',
2139 ' */',
2140 'const char * const g_apszIemThreadedFunctions[kIemThreadedFunc_End] =',
2141 '{',
2142 ' "Invalid",',
2143 ];
2144 asArgCntTab = [
2145 '/**',
2146 ' * Argument count table.',
2147 ' */',
2148 'uint8_t const g_acIemThreadedFunctionUsedArgs[kIemThreadedFunc_End] =',
2149 '{',
2150 ' 0, /*Invalid*/',
2151 ];
2152 aasTables = (asFuncTable, asNameTable, asArgCntTab,);
2153
2154 for asTable in aasTables:
2155 asTable.extend((
2156 '',
2157 ' /*',
2158 ' * Predefined.',
2159 ' */',
2160 ));
2161 for sFuncNm, cArgs, _ in self.katBltIns:
2162 asFuncTable.append(' iemThreadedFunc_BltIn_%s,' % (sFuncNm,));
2163 asNameTable.append(' "BltIn_%s",' % (sFuncNm,));
2164 asArgCntTab.append(' %d, /*BltIn_%s*/' % (cArgs, sFuncNm,));
2165
2166 iThreadedFunction = 1 + len(self.katBltIns);
2167 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2168 for asTable in aasTables:
2169 asTable.extend((
2170 '',
2171 ' /*',
2172 ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation],
2173 ' */',
2174 ));
2175 for oThreadedFunction in self.aoThreadedFuncs:
2176 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2177 if oVariation:
2178 iThreadedFunction += 1;
2179 assert oVariation.iEnumValue == iThreadedFunction;
2180 sName = oVariation.getThreadedFunctionName();
2181 asFuncTable.append(' /*%4u*/ %s,' % (iThreadedFunction, sName,));
2182 asNameTable.append(' /*%4u*/ "%s",' % (iThreadedFunction, sName,));
2183 asArgCntTab.append(' /*%4u*/ %d, /*%s*/' % (iThreadedFunction, oVariation.cMinParams, sName,));
2184
2185 for asTable in aasTables:
2186 asTable.append('};');
2187
2188 #
2189 # Output the tables.
2190 #
2191 oOut.write( '\n'
2192 + '\n');
2193 oOut.write('\n'.join(asFuncTable));
2194 oOut.write( '\n'
2195 + '\n'
2196 + '\n'
2197 + '#if defined(IN_RING3) || defined(LOG_ENABLED)\n');
2198 oOut.write('\n'.join(asNameTable));
2199 oOut.write( '\n'
2200 + '#endif /* IN_RING3 || LOG_ENABLED */\n'
2201 + '\n'
2202 + '\n');
2203 oOut.write('\n'.join(asArgCntTab));
2204 oOut.write('\n');
2205
2206 return True;
2207
2208 def generateNativeFunctionsHeader(self, oOut):
2209 """
2210 Generates the native recompiler functions header file.
2211 Returns success indicator.
2212 """
2213 if not self.oOptions.fNativeRecompilerEnabled:
2214 return True;
2215
2216 asLines = self.generateLicenseHeader();
2217
2218 # Prototype the function table.
2219 asLines += [
2220 'extern const PFNIEMNATIVERECOMPFUNC g_apfnIemNativeRecompileFunctions[kIemThreadedFunc_End];',
2221 '',
2222 ];
2223
2224 oOut.write('\n'.join(asLines));
2225 return True;
2226
2227 def generateNativeFunctionsSource(self, oOut):
2228 """
2229 Generates the native recompiler functions source file.
2230 Returns success indicator.
2231 """
2232 if not self.oOptions.fNativeRecompilerEnabled:
2233 return True;
2234
2235 #
2236 # The file header.
2237 #
2238 oOut.write('\n'.join(self.generateLicenseHeader()));
2239
2240 #
2241 # Emit the functions.
2242 #
2243 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2244 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
2245 oOut.write( '\n'
2246 + '\n'
2247 + '\n'
2248 + '\n'
2249 + '/*' + '*' * 128 + '\n'
2250 + '* Variation: ' + sVarName + ' ' * (129 - len(sVarName) - 15) + '*\n'
2251 + '*' * 128 + '*/\n');
2252
2253 for oThreadedFunction in self.aoThreadedFuncs:
2254 oVariation = oThreadedFunction.dVariations.get(sVariation, None) # type: ThreadedFunctionVariation
2255 if oVariation and oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
2256 oMcBlock = oThreadedFunction.oMcBlock;
2257
2258 # Function header
2259 oOut.write( '\n'
2260 + '\n'
2261 + '/**\n'
2262 + ' * #%u: %s at line %s offset %s in %s%s\n'
2263 % (oVariation.iEnumValue, oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine,
2264 os.path.split(oMcBlock.sSrcFile)[1],
2265 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '')
2266 + ' */\n'
2267 + 'static IEM_DECL_IEMNATIVERECOMPFUNC_DEF(' + oVariation.getNativeFunctionName() + ')\n'
2268 + '{\n');
2269
2270 # Unpack parameters.
2271 self.generateFunctionParameterUnpacking(oVariation, oOut,
2272 ('pCallEntry->auParams[0]',
2273 'pCallEntry->auParams[1]',
2274 'pCallEntry->auParams[2]',));
2275
2276 # Now for the actual statements.
2277 oOut.write(oVariation.oNativeRecomp.renderCode(cchIndent = 4));
2278
2279 oOut.write('}\n');
2280
2281 #
2282 # Output the function table.
2283 #
2284 oOut.write( '\n'
2285 + '\n'
2286 + '/*\n'
2287 + ' * Function table running parallel to g_apfnIemThreadedFunctions and friends.\n'
2288 + ' */\n'
2289 + 'const PFNIEMNATIVERECOMPFUNC g_apfnIemNativeRecompileFunctions[kIemThreadedFunc_End] =\n'
2290 + '{\n'
2291 + ' /*Invalid*/ NULL,'
2292 + '\n'
2293 + ' /*\n'
2294 + ' * Predefined.\n'
2295 + ' */\n'
2296 );
2297 for sFuncNm, _, fHaveRecompFunc in self.katBltIns:
2298 if fHaveRecompFunc:
2299 oOut.write(' iemNativeRecompFunc_BltIn_%s,\n' % (sFuncNm,))
2300 else:
2301 oOut.write(' NULL, /*BltIn_%s*/\n' % (sFuncNm,))
2302
2303 iThreadedFunction = 1 + len(self.katBltIns);
2304 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2305 oOut.write( ' /*\n'
2306 + ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation] + '\n'
2307 + ' */\n');
2308 for oThreadedFunction in self.aoThreadedFuncs:
2309 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2310 if oVariation:
2311 iThreadedFunction += 1;
2312 assert oVariation.iEnumValue == iThreadedFunction;
2313 sName = oVariation.getNativeFunctionName();
2314 if oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
2315 oOut.write(' /*%4u*/ %s,\n' % (iThreadedFunction, sName,));
2316 else:
2317 oOut.write(' /*%4u*/ NULL /*%s*/,\n' % (iThreadedFunction, sName,));
2318
2319 oOut.write( '};\n'
2320 + '\n');
2321 return True;
2322
2323
2324 def getThreadedFunctionByIndex(self, idx):
2325 """
2326 Returns a ThreadedFunction object for the given index. If the index is
2327 out of bounds, a dummy is returned.
2328 """
2329 if idx < len(self.aoThreadedFuncs):
2330 return self.aoThreadedFuncs[idx];
2331 return ThreadedFunction.dummyInstance();
2332
2333 def generateModifiedInput(self, oOut, idxFile):
2334 """
2335 Generates the combined modified input source/header file.
2336 Returns success indicator.
2337 """
2338 #
2339 # File header and assert assumptions.
2340 #
2341 oOut.write('\n'.join(self.generateLicenseHeader()));
2342 oOut.write('AssertCompile((IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | IEM_F_MODE_CPUMODE_MASK) == 7);\n');
2343
2344 #
2345 # Iterate all parsers (input files) and output the ones related to the
2346 # file set given by idxFile.
2347 #
2348 for idxParser, oParser in enumerate(self.aoParsers): # type: int, IEMAllInstPython.SimpleParser
2349 # Is this included in the file set?
2350 sSrcBaseFile = os.path.basename(oParser.sSrcFile).lower();
2351 fInclude = -1;
2352 for aoInfo in iai.g_aaoAllInstrFilesAndDefaultMapAndSet:
2353 if sSrcBaseFile == aoInfo[0].lower():
2354 fInclude = aoInfo[2] in (-1, idxFile);
2355 break;
2356 if fInclude is not True:
2357 assert fInclude is False;
2358 continue;
2359
2360 # Output it.
2361 oOut.write("\n\n/* ****** BEGIN %s ******* */\n" % (oParser.sSrcFile,));
2362
2363 iThreadedFunction = self.aidxFirstFunctions[idxParser];
2364 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
2365 iLine = 0;
2366 while iLine < len(oParser.asLines):
2367 sLine = oParser.asLines[iLine];
2368 iLine += 1; # iBeginLine and iEndLine are 1-based.
2369
2370 # Can we pass it thru?
2371 if ( iLine not in [oThreadedFunction.oMcBlock.iBeginLine, oThreadedFunction.oMcBlock.iEndLine]
2372 or oThreadedFunction.oMcBlock.sSrcFile != oParser.sSrcFile):
2373 oOut.write(sLine);
2374 #
2375 # Single MC block. Just extract it and insert the replacement.
2376 #
2377 elif oThreadedFunction.oMcBlock.iBeginLine != oThreadedFunction.oMcBlock.iEndLine:
2378 assert ( (sLine.count('IEM_MC_') - sLine.count('IEM_MC_F_') == 1)
2379 or oThreadedFunction.oMcBlock.iMacroExp == iai.McBlock.kiMacroExp_Partial), 'sLine="%s"' % (sLine,);
2380 oOut.write(sLine[:oThreadedFunction.oMcBlock.offBeginLine]);
2381 sModified = oThreadedFunction.generateInputCode().strip();
2382 oOut.write(sModified);
2383
2384 iLine = oThreadedFunction.oMcBlock.iEndLine;
2385 sLine = oParser.asLines[iLine - 1];
2386 assert ( sLine.count('IEM_MC_') - sLine.count('IEM_MC_F_') == 1
2387 or len(oThreadedFunction.oMcBlock.aoStmts) == 1
2388 or oThreadedFunction.oMcBlock.iMacroExp == iai.McBlock.kiMacroExp_Partial);
2389 oOut.write(sLine[oThreadedFunction.oMcBlock.offAfterEnd : ]);
2390
2391 # Advance
2392 iThreadedFunction += 1;
2393 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
2394 #
2395 # Macro expansion line that have sublines and may contain multiple MC blocks.
2396 #
2397 else:
2398 offLine = 0;
2399 while iLine == oThreadedFunction.oMcBlock.iBeginLine:
2400 oOut.write(sLine[offLine : oThreadedFunction.oMcBlock.offBeginLine]);
2401
2402 sModified = oThreadedFunction.generateInputCode().strip();
2403 assert ( sModified.startswith('IEM_MC_BEGIN')
2404 or (sModified.find('IEM_MC_DEFER_TO_CIMPL_') > 0 and sModified.strip().startswith('{\n'))
2405 or sModified.startswith('pVCpu->iem.s.fEndTb = true')
2406 or sModified.startswith('pVCpu->iem.s.fTbCurInstr = ')
2407 ), 'sModified="%s"' % (sModified,);
2408 oOut.write(sModified);
2409
2410 offLine = oThreadedFunction.oMcBlock.offAfterEnd;
2411
2412 # Advance
2413 iThreadedFunction += 1;
2414 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
2415
2416 # Last line segment.
2417 if offLine < len(sLine):
2418 oOut.write(sLine[offLine : ]);
2419
2420 oOut.write("/* ****** END %s ******* */\n" % (oParser.sSrcFile,));
2421
2422 return True;
2423
2424 def generateModifiedInput1(self, oOut):
2425 """
2426 Generates the combined modified input source/header file, part 1.
2427 Returns success indicator.
2428 """
2429 return self.generateModifiedInput(oOut, 1);
2430
2431 def generateModifiedInput2(self, oOut):
2432 """
2433 Generates the combined modified input source/header file, part 2.
2434 Returns success indicator.
2435 """
2436 return self.generateModifiedInput(oOut, 2);
2437
2438 def generateModifiedInput3(self, oOut):
2439 """
2440 Generates the combined modified input source/header file, part 3.
2441 Returns success indicator.
2442 """
2443 return self.generateModifiedInput(oOut, 3);
2444
2445 def generateModifiedInput4(self, oOut):
2446 """
2447 Generates the combined modified input source/header file, part 4.
2448 Returns success indicator.
2449 """
2450 return self.generateModifiedInput(oOut, 4);
2451
2452
2453 #
2454 # Main
2455 #
2456
2457 def main(self, asArgs):
2458 """
2459 C-like main function.
2460 Returns exit code.
2461 """
2462
2463 #
2464 # Parse arguments
2465 #
2466 sScriptDir = os.path.dirname(__file__);
2467 oParser = argparse.ArgumentParser(add_help = False);
2468 oParser.add_argument('asInFiles',
2469 metavar = 'input.cpp.h',
2470 nargs = '*',
2471 default = [os.path.join(sScriptDir, aoInfo[0])
2472 for aoInfo in iai.g_aaoAllInstrFilesAndDefaultMapAndSet],
2473 help = "Selection of VMMAll/IEMAllInst*.cpp.h files to use as input.");
2474 oParser.add_argument('--host-arch',
2475 metavar = 'arch',
2476 dest = 'sHostArch',
2477 action = 'store',
2478 default = None,
2479 help = 'The host architecture.');
2480
2481 oParser.add_argument('--out-thrd-funcs-hdr',
2482 metavar = 'file-thrd-funcs.h',
2483 dest = 'sOutFileThrdFuncsHdr',
2484 action = 'store',
2485 default = '-',
2486 help = 'The output header file for the threaded functions.');
2487 oParser.add_argument('--out-thrd-funcs-cpp',
2488 metavar = 'file-thrd-funcs.cpp',
2489 dest = 'sOutFileThrdFuncsCpp',
2490 action = 'store',
2491 default = '-',
2492 help = 'The output C++ file for the threaded functions.');
2493 oParser.add_argument('--out-n8ve-funcs-hdr',
2494 metavar = 'file-n8tv-funcs.h',
2495 dest = 'sOutFileN8veFuncsHdr',
2496 action = 'store',
2497 default = '-',
2498 help = 'The output header file for the native recompiler functions.');
2499 oParser.add_argument('--out-n8ve-funcs-cpp',
2500 metavar = 'file-n8tv-funcs.cpp',
2501 dest = 'sOutFileN8veFuncsCpp',
2502 action = 'store',
2503 default = '-',
2504 help = 'The output C++ file for the native recompiler functions.');
2505 oParser.add_argument('--native',
2506 dest = 'fNativeRecompilerEnabled',
2507 action = 'store_true',
2508 default = False,
2509 help = 'Enables generating the files related to native recompilation.');
2510 oParser.add_argument('--out-mod-input1',
2511 metavar = 'file-instr.cpp.h',
2512 dest = 'sOutFileModInput1',
2513 action = 'store',
2514 default = '-',
2515 help = 'The output C++/header file for modified input instruction files part 1.');
2516 oParser.add_argument('--out-mod-input2',
2517 metavar = 'file-instr.cpp.h',
2518 dest = 'sOutFileModInput2',
2519 action = 'store',
2520 default = '-',
2521 help = 'The output C++/header file for modified input instruction files part 2.');
2522 oParser.add_argument('--out-mod-input3',
2523 metavar = 'file-instr.cpp.h',
2524 dest = 'sOutFileModInput3',
2525 action = 'store',
2526 default = '-',
2527 help = 'The output C++/header file for modified input instruction files part 3.');
2528 oParser.add_argument('--out-mod-input4',
2529 metavar = 'file-instr.cpp.h',
2530 dest = 'sOutFileModInput4',
2531 action = 'store',
2532 default = '-',
2533 help = 'The output C++/header file for modified input instruction files part 4.');
2534 oParser.add_argument('--help', '-h', '-?',
2535 action = 'help',
2536 help = 'Display help and exit.');
2537 oParser.add_argument('--version', '-V',
2538 action = 'version',
2539 version = 'r%s (IEMAllThreadedPython.py), r%s (IEMAllInstPython.py)'
2540 % (__version__.split()[1], iai.__version__.split()[1],),
2541 help = 'Displays the version/revision of the script and exit.');
2542 self.oOptions = oParser.parse_args(asArgs[1:]);
2543 print("oOptions=%s" % (self.oOptions,), file = sys.stderr);
2544
2545 #
2546 # Process the instructions specified in the IEM sources.
2547 #
2548 if self.processInputFiles(self.oOptions.sHostArch, self.oOptions.fNativeRecompilerEnabled):
2549 #
2550 # Generate the output files.
2551 #
2552 aaoOutputFiles = (
2553 ( self.oOptions.sOutFileThrdFuncsHdr, self.generateThreadedFunctionsHeader ),
2554 ( self.oOptions.sOutFileThrdFuncsCpp, self.generateThreadedFunctionsSource ),
2555 ( self.oOptions.sOutFileN8veFuncsHdr, self.generateNativeFunctionsHeader ),
2556 ( self.oOptions.sOutFileN8veFuncsCpp, self.generateNativeFunctionsSource ),
2557 ( self.oOptions.sOutFileModInput1, self.generateModifiedInput1 ),
2558 ( self.oOptions.sOutFileModInput2, self.generateModifiedInput2 ),
2559 ( self.oOptions.sOutFileModInput3, self.generateModifiedInput3 ),
2560 ( self.oOptions.sOutFileModInput4, self.generateModifiedInput4 ),
2561 );
2562 fRc = True;
2563 for sOutFile, fnGenMethod in aaoOutputFiles:
2564 if sOutFile == '-':
2565 fRc = fnGenMethod(sys.stdout) and fRc;
2566 else:
2567 try:
2568 oOut = open(sOutFile, 'w'); # pylint: disable=consider-using-with,unspecified-encoding
2569 except Exception as oXcpt:
2570 print('error! Failed open "%s" for writing: %s' % (sOutFile, oXcpt,), file = sys.stderr);
2571 return 1;
2572 fRc = fnGenMethod(oOut) and fRc;
2573 oOut.close();
2574 if fRc:
2575 return 0;
2576
2577 return 1;
2578
2579
2580if __name__ == '__main__':
2581 sys.exit(IEMThreadedGenerator().main(sys.argv));
2582
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette