VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllThrdPython.py@ 102073

Last change on this file since 102073 was 102072, checked in by vboxsync, 15 months ago

VMM/IEM: A little python cleanup. bugref:10371

  • Property svn:eol-style set to LF
  • Property svn:executable set to *
  • Property svn:keywords set to Author Date Id Revision
File size: 128.5 KB
Line 
1#!/usr/bin/env python
2# -*- coding: utf-8 -*-
3# $Id: IEMAllThrdPython.py 102072 2023-11-12 23:12:13Z vboxsync $
4# pylint: disable=invalid-name
5
6"""
7Annotates and generates threaded functions from IEMAllInst*.cpp.h.
8"""
9
10from __future__ import print_function;
11
12__copyright__ = \
13"""
14Copyright (C) 2023 Oracle and/or its affiliates.
15
16This file is part of VirtualBox base platform packages, as
17available from https://www.virtualbox.org.
18
19This program is free software; you can redistribute it and/or
20modify it under the terms of the GNU General Public License
21as published by the Free Software Foundation, in version 3 of the
22License.
23
24This program is distributed in the hope that it will be useful, but
25WITHOUT ANY WARRANTY; without even the implied warranty of
26MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
27General Public License for more details.
28
29You should have received a copy of the GNU General Public License
30along with this program; if not, see <https://www.gnu.org/licenses>.
31
32SPDX-License-Identifier: GPL-3.0-only
33"""
34__version__ = "$Revision: 102072 $"
35
36# Standard python imports.
37import copy;
38import datetime;
39import os;
40import re;
41import sys;
42import argparse;
43from typing import Dict, List;
44
45import IEMAllInstPython as iai;
46import IEMAllN8vePython as ian;
47
48
49# Python 3 hacks:
50if sys.version_info[0] >= 3:
51 long = int; # pylint: disable=redefined-builtin,invalid-name
52
53## Number of generic parameters for the thread functions.
54g_kcThreadedParams = 3;
55
56g_kdTypeInfo = {
57 # type name: (cBits, fSigned, C-type )
58 'int8_t': ( 8, True, 'int8_t', ),
59 'int16_t': ( 16, True, 'int16_t', ),
60 'int32_t': ( 32, True, 'int32_t', ),
61 'int64_t': ( 64, True, 'int64_t', ),
62 'uint4_t': ( 4, False, 'uint8_t', ),
63 'uint8_t': ( 8, False, 'uint8_t', ),
64 'uint16_t': ( 16, False, 'uint16_t', ),
65 'uint32_t': ( 32, False, 'uint32_t', ),
66 'uint64_t': ( 64, False, 'uint64_t', ),
67 'uintptr_t': ( 64, False, 'uintptr_t',), # ASSUMES 64-bit host pointer size.
68 'bool': ( 1, False, 'bool', ),
69 'IEMMODE': ( 2, False, 'IEMMODE', ),
70};
71
72# Only for getTypeBitCount/variables.
73g_kdTypeInfo2 = {
74 'RTFLOAT32U': ( 32, False, 'RTFLOAT32U', ),
75 'RTFLOAT64U': ( 64, False, 'RTFLOAT64U', ),
76 'RTUINT64U': ( 64, False, 'RTUINT64U', ),
77 'RTGCPTR': ( 64, False, 'RTGCPTR', ),
78 'RTPBCD80U': ( 80, False, 'RTPBCD80U', ),
79 'RTFLOAT80U': ( 80, False, 'RTFLOAT80U', ),
80 'IEMFPURESULT': (80+16, False, 'IEMFPURESULT', ),
81 'IEMFPURESULTTWO': (80+16+80,False, 'IEMFPURESULTTWO', ),
82 'RTUINT128U': ( 128, False, 'RTUINT128U', ),
83 'X86XMMREG': ( 128, False, 'X86XMMREG', ),
84 'IEMSSERESULT': ( 128+32, False, 'IEMSSERESULT', ),
85 'IEMMEDIAF2XMMSRC': ( 256, False, 'IEMMEDIAF2XMMSRC',),
86 'RTUINT256U': ( 256, False, 'RTUINT256U', ),
87 'IEMPCMPISTRXSRC': ( 256, False, 'IEMPCMPISTRXSRC', ),
88 'IEMPCMPESTRXSRC': ( 384, False, 'IEMPCMPESTRXSRC', ),
89}; #| g_kdTypeInfo; - requires 3.9
90g_kdTypeInfo2.update(g_kdTypeInfo);
91
92def getTypeBitCount(sType):
93 """
94 Translate a type to size in bits
95 """
96 if sType in g_kdTypeInfo2:
97 return g_kdTypeInfo2[sType][0];
98 if '*' in sType or sType[0] == 'P':
99 return 64;
100 #raise Exception('Unknown type: %s' % (sType,));
101 print('error: Unknown type: %s' % (sType,));
102 return 64;
103
104g_kdIemFieldToType = {
105 # Illegal ones:
106 'offInstrNextByte': ( None, ),
107 'cbInstrBuf': ( None, ),
108 'pbInstrBuf': ( None, ),
109 'uInstrBufPc': ( None, ),
110 'cbInstrBufTotal': ( None, ),
111 'offCurInstrStart': ( None, ),
112 'cbOpcode': ( None, ),
113 'offOpcode': ( None, ),
114 'offModRm': ( None, ),
115 # Okay ones.
116 'fPrefixes': ( 'uint32_t', ),
117 'uRexReg': ( 'uint8_t', ),
118 'uRexB': ( 'uint8_t', ),
119 'uRexIndex': ( 'uint8_t', ),
120 'iEffSeg': ( 'uint8_t', ),
121 'enmEffOpSize': ( 'IEMMODE', ),
122 'enmDefAddrMode': ( 'IEMMODE', ),
123 'enmEffAddrMode': ( 'IEMMODE', ),
124 'enmDefOpSize': ( 'IEMMODE', ),
125 'idxPrefix': ( 'uint8_t', ),
126 'uVex3rdReg': ( 'uint8_t', ),
127 'uVexLength': ( 'uint8_t', ),
128 'fEvexStuff': ( 'uint8_t', ),
129 'uFpuOpcode': ( 'uint16_t', ),
130};
131
132class ThreadedParamRef(object):
133 """
134 A parameter reference for a threaded function.
135 """
136
137 def __init__(self, sOrgRef, sType, oStmt, iParam = None, offParam = 0, sStdRef = None):
138 ## The name / reference in the original code.
139 self.sOrgRef = sOrgRef;
140 ## Normalized name to deal with spaces in macro invocations and such.
141 self.sStdRef = sStdRef if sStdRef else ''.join(sOrgRef.split());
142 ## Indicates that sOrgRef may not match the parameter.
143 self.fCustomRef = sStdRef is not None;
144 ## The type (typically derived).
145 self.sType = sType;
146 ## The statement making the reference.
147 self.oStmt = oStmt;
148 ## The parameter containing the references. None if implicit.
149 self.iParam = iParam;
150 ## The offset in the parameter of the reference.
151 self.offParam = offParam;
152
153 ## The variable name in the threaded function.
154 self.sNewName = 'x';
155 ## The this is packed into.
156 self.iNewParam = 99;
157 ## The bit offset in iNewParam.
158 self.offNewParam = 1024
159
160
161class ThreadedFunctionVariation(object):
162 """ Threaded function variation. """
163
164 ## @name Variations.
165 ## These variations will match translation block selection/distinctions as well.
166 ## @{
167 ksVariation_Default = ''; ##< No variations - only used by IEM_MC_DEFER_TO_CIMPL_X_RET.
168 ksVariation_16 = '_16'; ##< 16-bit mode code (386+).
169 ksVariation_16f = '_16f'; ##< 16-bit mode code (386+), check+clear eflags.
170 ksVariation_16_Addr32 = '_16_Addr32'; ##< 16-bit mode code (386+), address size prefixed to 32-bit addressing.
171 ksVariation_16f_Addr32 = '_16f_Addr32'; ##< 16-bit mode code (386+), address size prefixed to 32-bit addressing, eflags.
172 ksVariation_16_Pre386 = '_16_Pre386'; ##< 16-bit mode code, pre-386 CPU target.
173 ksVariation_16f_Pre386 = '_16f_Pre386'; ##< 16-bit mode code, pre-386 CPU target, check+clear eflags.
174 ksVariation_32 = '_32'; ##< 32-bit mode code (386+).
175 ksVariation_32f = '_32f'; ##< 32-bit mode code (386+), check+clear eflags.
176 ksVariation_32_Flat = '_32_Flat'; ##< 32-bit mode code (386+) with CS, DS, E,S and SS flat and 4GB wide.
177 ksVariation_32f_Flat = '_32f_Flat'; ##< 32-bit mode code (386+) with CS, DS, E,S and SS flat and 4GB wide, eflags.
178 ksVariation_32_Addr16 = '_32_Addr16'; ##< 32-bit mode code (386+), address size prefixed to 16-bit addressing.
179 ksVariation_32f_Addr16 = '_32f_Addr16'; ##< 32-bit mode code (386+), address size prefixed to 16-bit addressing, eflags.
180 ksVariation_64 = '_64'; ##< 64-bit mode code.
181 ksVariation_64f = '_64f'; ##< 64-bit mode code, check+clear eflags.
182 ksVariation_64_FsGs = '_64_FsGs'; ##< 64-bit mode code, with memory accesses via FS or GS.
183 ksVariation_64f_FsGs = '_64f_FsGs'; ##< 64-bit mode code, with memory accesses via FS or GS, check+clear eflags.
184 ksVariation_64_Addr32 = '_64_Addr32'; ##< 64-bit mode code, address size prefixed to 32-bit addressing.
185 ksVariation_64f_Addr32 = '_64f_Addr32'; ##< 64-bit mode code, address size prefixed to 32-bit addressing, c+c eflags.
186 kasVariations = (
187 ksVariation_Default,
188 ksVariation_16,
189 ksVariation_16f,
190 ksVariation_16_Addr32,
191 ksVariation_16f_Addr32,
192 ksVariation_16_Pre386,
193 ksVariation_16f_Pre386,
194 ksVariation_32,
195 ksVariation_32f,
196 ksVariation_32_Flat,
197 ksVariation_32f_Flat,
198 ksVariation_32_Addr16,
199 ksVariation_32f_Addr16,
200 ksVariation_64,
201 ksVariation_64f,
202 ksVariation_64_FsGs,
203 ksVariation_64f_FsGs,
204 ksVariation_64_Addr32,
205 ksVariation_64f_Addr32,
206 );
207 kasVariationsWithoutAddress = (
208 ksVariation_16,
209 ksVariation_16f,
210 ksVariation_16_Pre386,
211 ksVariation_16f_Pre386,
212 ksVariation_32,
213 ksVariation_32f,
214 ksVariation_64,
215 ksVariation_64f,
216 );
217 kasVariationsWithoutAddressNot286 = (
218 ksVariation_16,
219 ksVariation_16f,
220 ksVariation_32,
221 ksVariation_32f,
222 ksVariation_64,
223 ksVariation_64f,
224 );
225 kasVariationsWithoutAddressNot286Not64 = (
226 ksVariation_16,
227 ksVariation_16f,
228 ksVariation_32,
229 ksVariation_32f,
230 );
231 kasVariationsWithoutAddressNot64 = (
232 ksVariation_16,
233 ksVariation_16f,
234 ksVariation_16_Pre386,
235 ksVariation_16f_Pre386,
236 ksVariation_32,
237 ksVariation_32f,
238 );
239 kasVariationsWithoutAddressOnly64 = (
240 ksVariation_64,
241 ksVariation_64f,
242 );
243 kasVariationsWithAddress = (
244 ksVariation_16,
245 ksVariation_16f,
246 ksVariation_16_Addr32,
247 ksVariation_16f_Addr32,
248 ksVariation_16_Pre386,
249 ksVariation_16f_Pre386,
250 ksVariation_32,
251 ksVariation_32f,
252 ksVariation_32_Flat,
253 ksVariation_32f_Flat,
254 ksVariation_32_Addr16,
255 ksVariation_32f_Addr16,
256 ksVariation_64,
257 ksVariation_64f,
258 ksVariation_64_FsGs,
259 ksVariation_64f_FsGs,
260 ksVariation_64_Addr32,
261 ksVariation_64f_Addr32,
262 );
263 kasVariationsWithAddressNot286 = (
264 ksVariation_16,
265 ksVariation_16f,
266 ksVariation_16_Addr32,
267 ksVariation_16f_Addr32,
268 ksVariation_32,
269 ksVariation_32f,
270 ksVariation_32_Flat,
271 ksVariation_32f_Flat,
272 ksVariation_32_Addr16,
273 ksVariation_32f_Addr16,
274 ksVariation_64,
275 ksVariation_64f,
276 ksVariation_64_FsGs,
277 ksVariation_64f_FsGs,
278 ksVariation_64_Addr32,
279 ksVariation_64f_Addr32,
280 );
281 kasVariationsWithAddressNot286Not64 = (
282 ksVariation_16,
283 ksVariation_16f,
284 ksVariation_16_Addr32,
285 ksVariation_16f_Addr32,
286 ksVariation_32,
287 ksVariation_32f,
288 ksVariation_32_Flat,
289 ksVariation_32f_Flat,
290 ksVariation_32_Addr16,
291 ksVariation_32f_Addr16,
292 );
293 kasVariationsWithAddressNot64 = (
294 ksVariation_16,
295 ksVariation_16f,
296 ksVariation_16_Addr32,
297 ksVariation_16f_Addr32,
298 ksVariation_16_Pre386,
299 ksVariation_16f_Pre386,
300 ksVariation_32,
301 ksVariation_32f,
302 ksVariation_32_Flat,
303 ksVariation_32f_Flat,
304 ksVariation_32_Addr16,
305 ksVariation_32f_Addr16,
306 );
307 kasVariationsWithAddressOnly64 = (
308 ksVariation_64,
309 ksVariation_64f,
310 ksVariation_64_FsGs,
311 ksVariation_64f_FsGs,
312 ksVariation_64_Addr32,
313 ksVariation_64f_Addr32,
314 );
315 kasVariationsOnlyPre386 = (
316 ksVariation_16_Pre386,
317 ksVariation_16f_Pre386,
318 );
319 kasVariationsEmitOrder = (
320 ksVariation_Default,
321 ksVariation_64,
322 ksVariation_64f,
323 ksVariation_64_FsGs,
324 ksVariation_64f_FsGs,
325 ksVariation_32_Flat,
326 ksVariation_32f_Flat,
327 ksVariation_32,
328 ksVariation_32f,
329 ksVariation_16,
330 ksVariation_16f,
331 ksVariation_16_Addr32,
332 ksVariation_16f_Addr32,
333 ksVariation_16_Pre386,
334 ksVariation_16f_Pre386,
335 ksVariation_32_Addr16,
336 ksVariation_32f_Addr16,
337 ksVariation_64_Addr32,
338 ksVariation_64f_Addr32,
339 );
340 kdVariationNames = {
341 ksVariation_Default: 'defer-to-cimpl',
342 ksVariation_16: '16-bit',
343 ksVariation_16f: '16-bit w/ eflag checking and clearing',
344 ksVariation_16_Addr32: '16-bit w/ address prefix (Addr32)',
345 ksVariation_16f_Addr32: '16-bit w/ address prefix (Addr32) and eflag checking and clearing',
346 ksVariation_16_Pre386: '16-bit on pre-386 CPU',
347 ksVariation_16f_Pre386: '16-bit on pre-386 CPU w/ eflag checking and clearing',
348 ksVariation_32: '32-bit',
349 ksVariation_32f: '32-bit w/ eflag checking and clearing',
350 ksVariation_32_Flat: '32-bit flat and wide open CS, SS, DS and ES',
351 ksVariation_32f_Flat: '32-bit flat and wide open CS, SS, DS and ES w/ eflag checking and clearing',
352 ksVariation_32_Addr16: '32-bit w/ address prefix (Addr16)',
353 ksVariation_32f_Addr16: '32-bit w/ address prefix (Addr16) and eflag checking and clearing',
354 ksVariation_64: '64-bit',
355 ksVariation_64f: '64-bit w/ eflag checking and clearing',
356 ksVariation_64_FsGs: '64-bit with memory accessed via FS or GS',
357 ksVariation_64f_FsGs: '64-bit with memory accessed via FS or GS and eflag checking and clearing',
358 ksVariation_64_Addr32: '64-bit w/ address prefix (Addr32)',
359 ksVariation_64f_Addr32: '64-bit w/ address prefix (Addr32) and eflag checking and clearing',
360 };
361 kdVariationsWithEflagsCheckingAndClearing = {
362 ksVariation_16f: True,
363 ksVariation_16f_Addr32: True,
364 ksVariation_16f_Pre386: True,
365 ksVariation_32f: True,
366 ksVariation_32f_Flat: True,
367 ksVariation_32f_Addr16: True,
368 ksVariation_64f: True,
369 ksVariation_64f_FsGs: True,
370 ksVariation_64f_Addr32: True,
371 };
372 kdVariationsWithFlatAddress = {
373 ksVariation_32_Flat: True,
374 ksVariation_32f_Flat: True,
375 ksVariation_64: True,
376 ksVariation_64f: True,
377 };
378 kdVariationsWithFlatAddr16 = {
379 ksVariation_16: True,
380 ksVariation_16f: True,
381 ksVariation_16_Pre386: True,
382 ksVariation_16f_Pre386: True,
383 ksVariation_32_Addr16: True,
384 ksVariation_32f_Addr16: True,
385 };
386 kdVariationsWithFlatAddr32No64 = {
387 ksVariation_16_Addr32: True,
388 ksVariation_16f_Addr32: True,
389 ksVariation_32: True,
390 ksVariation_32f: True,
391 ksVariation_32_Flat: True,
392 ksVariation_32f_Flat: True,
393 };
394 ## @}
395
396 ## IEM_CIMPL_F_XXX flags that we know.
397 ## The value indicates whether it terminates the TB or not. The goal is to
398 ## improve the recompiler so all but END_TB will be False.
399 ##
400 ## @note iemThreadedRecompilerMcDeferToCImpl0 duplicates info found here.
401 kdCImplFlags = {
402 'IEM_CIMPL_F_MODE': False,
403 'IEM_CIMPL_F_BRANCH_DIRECT': False,
404 'IEM_CIMPL_F_BRANCH_INDIRECT': False,
405 'IEM_CIMPL_F_BRANCH_RELATIVE': False,
406 'IEM_CIMPL_F_BRANCH_FAR': True,
407 'IEM_CIMPL_F_BRANCH_CONDITIONAL': False,
408 # IEM_CIMPL_F_BRANCH_ANY should only be used for testing, so not included here.
409 'IEM_CIMPL_F_BRANCH_STACK': False,
410 'IEM_CIMPL_F_BRANCH_STACK_FAR': False,
411 'IEM_CIMPL_F_RFLAGS': False,
412 'IEM_CIMPL_F_INHIBIT_SHADOW': False,
413 'IEM_CIMPL_F_CHECK_IRQ_AFTER': False,
414 'IEM_CIMPL_F_CHECK_IRQ_BEFORE': False,
415 'IEM_CIMPL_F_CHECK_IRQ_BEFORE_AND_AFTER': False, # (ignore)
416 'IEM_CIMPL_F_STATUS_FLAGS': False,
417 'IEM_CIMPL_F_VMEXIT': False,
418 'IEM_CIMPL_F_FPU': False,
419 'IEM_CIMPL_F_REP': False,
420 'IEM_CIMPL_F_IO': False,
421 'IEM_CIMPL_F_END_TB': True,
422 'IEM_CIMPL_F_XCPT': True,
423 'IEM_CIMPL_F_CALLS_CIMPL': False,
424 'IEM_CIMPL_F_CALLS_AIMPL': False,
425 'IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE': False,
426 };
427
428 def __init__(self, oThreadedFunction, sVariation = ksVariation_Default):
429 self.oParent = oThreadedFunction # type: ThreadedFunction
430 ##< ksVariation_Xxxx.
431 self.sVariation = sVariation
432
433 ## Threaded function parameter references.
434 self.aoParamRefs = [] # type: List[ThreadedParamRef]
435 ## Unique parameter references.
436 self.dParamRefs = {} # type: Dict[str, List[ThreadedParamRef]]
437 ## Minimum number of parameters to the threaded function.
438 self.cMinParams = 0;
439
440 ## List/tree of statements for the threaded function.
441 self.aoStmtsForThreadedFunction = [] # type: List[McStmt]
442
443 ## Function enum number, for verification. Set by generateThreadedFunctionsHeader.
444 self.iEnumValue = -1;
445
446 ## Native recompilation details for this variation.
447 self.oNativeRecomp = None;
448
449 def getIndexName(self):
450 sName = self.oParent.oMcBlock.sFunction;
451 if sName.startswith('iemOp_'):
452 sName = sName[len('iemOp_'):];
453 if self.oParent.oMcBlock.iInFunction == 0:
454 return 'kIemThreadedFunc_%s%s' % ( sName, self.sVariation, );
455 return 'kIemThreadedFunc_%s_%s%s' % ( sName, self.oParent.oMcBlock.iInFunction, self.sVariation, );
456
457 def getThreadedFunctionName(self):
458 sName = self.oParent.oMcBlock.sFunction;
459 if sName.startswith('iemOp_'):
460 sName = sName[len('iemOp_'):];
461 if self.oParent.oMcBlock.iInFunction == 0:
462 return 'iemThreadedFunc_%s%s' % ( sName, self.sVariation, );
463 return 'iemThreadedFunc_%s_%s%s' % ( sName, self.oParent.oMcBlock.iInFunction, self.sVariation, );
464
465 def getNativeFunctionName(self):
466 return 'iemNativeRecompFunc_' + self.getThreadedFunctionName()[len('iemThreadedFunc_'):];
467
468 def getShortName(self):
469 sName = self.oParent.oMcBlock.sFunction;
470 if sName.startswith('iemOp_'):
471 sName = sName[len('iemOp_'):];
472 if self.oParent.oMcBlock.iInFunction == 0:
473 return '%s%s' % ( sName, self.sVariation, );
474 return '%s_%s%s' % ( sName, self.oParent.oMcBlock.iInFunction, self.sVariation, );
475
476 def isWithFlagsCheckingAndClearingVariation(self):
477 """
478 Checks if this is a variation that checks and clears EFLAGS.
479 """
480 return self.sVariation in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing;
481
482 #
483 # Analysis and code morphing.
484 #
485
486 def raiseProblem(self, sMessage):
487 """ Raises a problem. """
488 self.oParent.raiseProblem(sMessage);
489
490 def warning(self, sMessage):
491 """ Emits a warning. """
492 self.oParent.warning(sMessage);
493
494 def analyzeReferenceToType(self, sRef):
495 """
496 Translates a variable or structure reference to a type.
497 Returns type name.
498 Raises exception if unable to figure it out.
499 """
500 ch0 = sRef[0];
501 if ch0 == 'u':
502 if sRef.startswith('u32'):
503 return 'uint32_t';
504 if sRef.startswith('u8') or sRef == 'uReg':
505 return 'uint8_t';
506 if sRef.startswith('u64'):
507 return 'uint64_t';
508 if sRef.startswith('u16'):
509 return 'uint16_t';
510 elif ch0 == 'b':
511 return 'uint8_t';
512 elif ch0 == 'f':
513 return 'bool';
514 elif ch0 == 'i':
515 if sRef.startswith('i8'):
516 return 'int8_t';
517 if sRef.startswith('i16'):
518 return 'int16_t';
519 if sRef.startswith('i32'):
520 return 'int32_t';
521 if sRef.startswith('i64'):
522 return 'int64_t';
523 if sRef in ('iReg', 'iFixedReg', 'iGReg', 'iSegReg', 'iSrcReg', 'iDstReg', 'iCrReg'):
524 return 'uint8_t';
525 elif ch0 == 'p':
526 if sRef.find('-') < 0:
527 return 'uintptr_t';
528 if sRef.startswith('pVCpu->iem.s.'):
529 sField = sRef[len('pVCpu->iem.s.') : ];
530 if sField in g_kdIemFieldToType:
531 if g_kdIemFieldToType[sField][0]:
532 return g_kdIemFieldToType[sField][0];
533 elif ch0 == 'G' and sRef.startswith('GCPtr'):
534 return 'uint64_t';
535 elif ch0 == 'e':
536 if sRef == 'enmEffOpSize':
537 return 'IEMMODE';
538 elif ch0 == 'o':
539 if sRef.startswith('off32'):
540 return 'uint32_t';
541 elif sRef == 'cbFrame': # enter
542 return 'uint16_t';
543 elif sRef == 'cShift': ## @todo risky
544 return 'uint8_t';
545
546 self.raiseProblem('Unknown reference: %s' % (sRef,));
547 return None; # Shut up pylint 2.16.2.
548
549 def analyzeCallToType(self, sFnRef):
550 """
551 Determins the type of an indirect function call.
552 """
553 assert sFnRef[0] == 'p';
554
555 #
556 # Simple?
557 #
558 if sFnRef.find('-') < 0:
559 oDecoderFunction = self.oParent.oMcBlock.oFunction;
560
561 # Try the argument list of the function defintion macro invocation first.
562 iArg = 2;
563 while iArg < len(oDecoderFunction.asDefArgs):
564 if sFnRef == oDecoderFunction.asDefArgs[iArg]:
565 return oDecoderFunction.asDefArgs[iArg - 1];
566 iArg += 1;
567
568 # Then check out line that includes the word and looks like a variable declaration.
569 oRe = re.compile(' +(P[A-Z0-9_]+|const +IEMOP[A-Z0-9_]+ *[*]) +(const |) *' + sFnRef + ' *(;|=)');
570 for sLine in oDecoderFunction.asLines:
571 oMatch = oRe.match(sLine);
572 if oMatch:
573 if not oMatch.group(1).startswith('const'):
574 return oMatch.group(1);
575 return 'PC' + oMatch.group(1)[len('const ') : -1].strip();
576
577 #
578 # Deal with the pImpl->pfnXxx:
579 #
580 elif sFnRef.startswith('pImpl->pfn'):
581 sMember = sFnRef[len('pImpl->') : ];
582 sBaseType = self.analyzeCallToType('pImpl');
583 offBits = sMember.rfind('U') + 1;
584 if sBaseType == 'PCIEMOPBINSIZES': return 'PFNIEMAIMPLBINU' + sMember[offBits:];
585 if sBaseType == 'PCIEMOPUNARYSIZES': return 'PFNIEMAIMPLUNARYU' + sMember[offBits:];
586 if sBaseType == 'PCIEMOPSHIFTSIZES': return 'PFNIEMAIMPLSHIFTU' + sMember[offBits:];
587 if sBaseType == 'PCIEMOPSHIFTDBLSIZES': return 'PFNIEMAIMPLSHIFTDBLU' + sMember[offBits:];
588 if sBaseType == 'PCIEMOPMULDIVSIZES': return 'PFNIEMAIMPLMULDIVU' + sMember[offBits:];
589 if sBaseType == 'PCIEMOPMEDIAF3': return 'PFNIEMAIMPLMEDIAF3U' + sMember[offBits:];
590 if sBaseType == 'PCIEMOPMEDIAOPTF3': return 'PFNIEMAIMPLMEDIAOPTF3U' + sMember[offBits:];
591 if sBaseType == 'PCIEMOPMEDIAOPTF2': return 'PFNIEMAIMPLMEDIAOPTF2U' + sMember[offBits:];
592 if sBaseType == 'PCIEMOPMEDIAOPTF3IMM8': return 'PFNIEMAIMPLMEDIAOPTF3U' + sMember[offBits:] + 'IMM8';
593 if sBaseType == 'PCIEMOPBLENDOP': return 'PFNIEMAIMPLAVXBLENDU' + sMember[offBits:];
594
595 self.raiseProblem('Unknown call reference: %s::%s (%s)' % (sBaseType, sMember, sFnRef,));
596
597 self.raiseProblem('Unknown call reference: %s' % (sFnRef,));
598 return None; # Shut up pylint 2.16.2.
599
600 def analyze8BitGRegStmt(self, oStmt):
601 """
602 Gets the 8-bit general purpose register access details of the given statement.
603 ASSUMES the statement is one accessing an 8-bit GREG.
604 """
605 idxReg = 0;
606 if ( oStmt.sName.find('_FETCH_') > 0
607 or oStmt.sName.find('_REF_') > 0
608 or oStmt.sName.find('_TO_LOCAL') > 0):
609 idxReg = 1;
610
611 sRegRef = oStmt.asParams[idxReg];
612 if sRegRef.startswith('IEM_GET_MODRM_RM') or sRegRef.startswith('IEM_GET_MODRM_REG'):
613 asBits = [sBit.strip() for sBit in sRegRef.replace('(', ',').replace(')', '').split(',')];
614 if len(asBits) != 3 or asBits[1] != 'pVCpu' or (asBits[0] != 'IEM_GET_MODRM_RM' and asBits[0] != 'IEM_GET_MODRM_REG'):
615 self.raiseProblem('Unexpected reference: %s (asBits=%s)' % (sRegRef, asBits));
616 sOrgExpr = asBits[0] + '_EX8(pVCpu, ' + asBits[2] + ')';
617 else:
618 sOrgExpr = '((%s) < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX) ? (%s) : (%s) + 12)' % (sRegRef, sRegRef, sRegRef);
619
620 if sRegRef.find('IEM_GET_MODRM_RM') >= 0: sStdRef = 'bRmRm8Ex';
621 elif sRegRef.find('IEM_GET_MODRM_REG') >= 0: sStdRef = 'bRmReg8Ex';
622 elif sRegRef == 'X86_GREG_xAX': sStdRef = 'bGregXAx8Ex';
623 elif sRegRef == 'X86_GREG_xCX': sStdRef = 'bGregXCx8Ex';
624 elif sRegRef == 'X86_GREG_xSP': sStdRef = 'bGregXSp8Ex';
625 elif sRegRef == 'iFixedReg': sStdRef = 'bFixedReg8Ex';
626 else:
627 self.warning('analyze8BitGRegStmt: sRegRef=%s -> bOther8Ex; %s %s; sOrgExpr=%s'
628 % (sRegRef, oStmt.sName, oStmt.asParams, sOrgExpr,));
629 sStdRef = 'bOther8Ex';
630
631 #print('analyze8BitGRegStmt: %s %s; sRegRef=%s\n -> idxReg=%s sOrgExpr=%s sStdRef=%s'
632 # % (oStmt.sName, oStmt.asParams, sRegRef, idxReg, sOrgExpr, sStdRef));
633 return (idxReg, sOrgExpr, sStdRef);
634
635
636 ## Maps memory related MCs to info for FLAT conversion.
637 ## This is used in 64-bit and flat 32-bit variants to skip the unnecessary
638 ## segmentation checking for every memory access. Only applied to access
639 ## via ES, DS and SS. FS, GS and CS gets the full segmentation threatment,
640 ## the latter (CS) is just to keep things simple (we could safely fetch via
641 ## it, but only in 64-bit mode could we safely write via it, IIRC).
642 kdMemMcToFlatInfo = {
643 'IEM_MC_FETCH_MEM_U8': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8' ),
644 'IEM_MC_FETCH_MEM16_U8': ( 1, 'IEM_MC_FETCH_MEM16_FLAT_U8' ),
645 'IEM_MC_FETCH_MEM32_U8': ( 1, 'IEM_MC_FETCH_MEM32_FLAT_U8' ),
646 'IEM_MC_FETCH_MEM_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16' ),
647 'IEM_MC_FETCH_MEM_U16_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_DISP' ),
648 'IEM_MC_FETCH_MEM_I16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I16' ),
649 'IEM_MC_FETCH_MEM_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32' ),
650 'IEM_MC_FETCH_MEM_U32_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_DISP' ),
651 'IEM_MC_FETCH_MEM_I32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I32' ),
652 'IEM_MC_FETCH_MEM_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64' ),
653 'IEM_MC_FETCH_MEM_U64_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64_DISP' ),
654 'IEM_MC_FETCH_MEM_U64_ALIGN_U128': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64_ALIGN_U128' ),
655 'IEM_MC_FETCH_MEM_I64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I64' ),
656 'IEM_MC_FETCH_MEM_R32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R32' ),
657 'IEM_MC_FETCH_MEM_R64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R64' ),
658 'IEM_MC_FETCH_MEM_R80': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R80' ),
659 'IEM_MC_FETCH_MEM_D80': ( 1, 'IEM_MC_FETCH_MEM_FLAT_D80' ),
660 'IEM_MC_FETCH_MEM_U128': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128' ),
661 'IEM_MC_FETCH_MEM_U128_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128_NO_AC' ),
662 'IEM_MC_FETCH_MEM_U128_ALIGN_SSE': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128_ALIGN_SSE' ),
663 'IEM_MC_FETCH_MEM_XMM': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM' ),
664 'IEM_MC_FETCH_MEM_XMM_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM_NO_AC' ),
665 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM_ALIGN_SSE' ),
666 'IEM_MC_FETCH_MEM_XMM_U32': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_U32' ),
667 'IEM_MC_FETCH_MEM_XMM_U64': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_U64' ),
668 'IEM_MC_FETCH_MEM_U256': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256' ),
669 'IEM_MC_FETCH_MEM_U256_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256_NO_AC' ),
670 'IEM_MC_FETCH_MEM_U256_ALIGN_AVX': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256_ALIGN_AVX' ),
671 'IEM_MC_FETCH_MEM_YMM': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM' ),
672 'IEM_MC_FETCH_MEM_YMM_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM_NO_AC' ),
673 'IEM_MC_FETCH_MEM_YMM_ALIGN_AVX': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM_ALIGN_AVX' ),
674 'IEM_MC_FETCH_MEM_U8_ZX_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U16' ),
675 'IEM_MC_FETCH_MEM_U8_ZX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U32' ),
676 'IEM_MC_FETCH_MEM_U8_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U64' ),
677 'IEM_MC_FETCH_MEM_U16_ZX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_ZX_U32' ),
678 'IEM_MC_FETCH_MEM_U16_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_ZX_U64' ),
679 'IEM_MC_FETCH_MEM_U32_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_ZX_U64' ),
680 'IEM_MC_FETCH_MEM_U8_SX_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U16' ),
681 'IEM_MC_FETCH_MEM_U8_SX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U32' ),
682 'IEM_MC_FETCH_MEM_U8_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U64' ),
683 'IEM_MC_FETCH_MEM_U16_SX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_SX_U32' ),
684 'IEM_MC_FETCH_MEM_U16_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_SX_U64' ),
685 'IEM_MC_FETCH_MEM_U32_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_SX_U64' ),
686 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128': ( 2, 'IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128' ),
687 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE_AND_XREG_XMM': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_ALIGN_SSE_AND_XREG_XMM' ),
688 'IEM_MC_FETCH_MEM_XMM_U32_AND_XREG_XMM': ( 3, 'IEM_MC_FETCH_MEM_FLAT_XMM_U32_AND_XREG_XMM' ),
689 'IEM_MC_FETCH_MEM_XMM_U64_AND_XREG_XMM': ( 3, 'IEM_MC_FETCH_MEM_FLAT_XMM_U64_AND_XREG_XMM' ),
690 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_RAX_RDX_U64':
691 ( 2, 'IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128_AND_RAX_RDX_U64' ),
692 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64':
693 ( 2, 'IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64' ),
694 'IEM_MC_STORE_MEM_U8': ( 0, 'IEM_MC_STORE_MEM_FLAT_U8' ),
695 'IEM_MC_STORE_MEM_U16': ( 0, 'IEM_MC_STORE_MEM_FLAT_U16' ),
696 'IEM_MC_STORE_MEM_U32': ( 0, 'IEM_MC_STORE_MEM_FLAT_U32' ),
697 'IEM_MC_STORE_MEM_U64': ( 0, 'IEM_MC_STORE_MEM_FLAT_U64' ),
698 'IEM_MC_STORE_MEM_U8_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U8_CONST' ),
699 'IEM_MC_STORE_MEM_U16_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U16_CONST' ),
700 'IEM_MC_STORE_MEM_U32_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U32_CONST' ),
701 'IEM_MC_STORE_MEM_U64_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U64_CONST' ),
702 'IEM_MC_STORE_MEM_U128': ( 0, 'IEM_MC_STORE_MEM_FLAT_U128' ),
703 'IEM_MC_STORE_MEM_U128_ALIGN_SSE': ( 0, 'IEM_MC_STORE_MEM_FLAT_U128_ALIGN_SSE' ),
704 'IEM_MC_STORE_MEM_U256': ( 0, 'IEM_MC_STORE_MEM_FLAT_U256' ),
705 'IEM_MC_STORE_MEM_U256_ALIGN_AVX': ( 0, 'IEM_MC_STORE_MEM_FLAT_U256_ALIGN_AVX' ),
706 'IEM_MC_MEM_MAP': ( 2, 'IEM_MC_MEM_FLAT_MAP' ),
707 'IEM_MC_MEM_MAP_U8_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_RW' ),
708 'IEM_MC_MEM_MAP_U8_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_RO' ),
709 'IEM_MC_MEM_MAP_U8_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_WO' ),
710 'IEM_MC_MEM_MAP_U16_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_RW' ),
711 'IEM_MC_MEM_MAP_U16_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_RO' ),
712 'IEM_MC_MEM_MAP_U16_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_WO' ),
713 'IEM_MC_MEM_MAP_U32_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_RW' ),
714 'IEM_MC_MEM_MAP_U32_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_RO' ),
715 'IEM_MC_MEM_MAP_U32_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_WO' ),
716 'IEM_MC_MEM_MAP_U64_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_RW' ),
717 'IEM_MC_MEM_MAP_U64_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_RO' ),
718 'IEM_MC_MEM_MAP_U64_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_WO' ),
719 'IEM_MC_MEM_MAP_EX': ( 3, 'IEM_MC_MEM_FLAT_MAP_EX' ),
720 };
721
722 kdMemMcToFlatInfoStack = {
723 'IEM_MC_PUSH_U16': ( 'IEM_MC_FLAT32_PUSH_U16', 'IEM_MC_FLAT64_PUSH_U16', ),
724 'IEM_MC_PUSH_U32': ( 'IEM_MC_FLAT32_PUSH_U32', 'IEM_MC_PUSH_U32', ),
725 'IEM_MC_PUSH_U64': ( 'IEM_MC_PUSH_U64', 'IEM_MC_FLAT64_PUSH_U64', ),
726 'IEM_MC_PUSH_U32_SREG': ( 'IEM_MC_FLAT32_PUSH_U32_SREG', 'IEM_MC_PUSH_U32_SREG' ),
727 'IEM_MC_POP_U16': ( 'IEM_MC_FLAT32_POP_U16', 'IEM_MC_FLAT64_POP_U16', ),
728 'IEM_MC_POP_U32': ( 'IEM_MC_FLAT32_POP_U32', 'IEM_MC_POP_U32', ),
729 'IEM_MC_POP_U64': ( 'IEM_MC_POP_U64', 'IEM_MC_FLAT64_POP_U64', ),
730 };
731
732 kdThreadedCalcRmEffAddrMcByVariation = {
733 ksVariation_16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
734 ksVariation_16f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
735 ksVariation_16_Pre386: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
736 ksVariation_16f_Pre386: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
737 ksVariation_32_Addr16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
738 ksVariation_32f_Addr16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
739 ksVariation_16_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
740 ksVariation_16f_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
741 ksVariation_32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
742 ksVariation_32f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
743 ksVariation_32_Flat: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
744 ksVariation_32f_Flat: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
745 ksVariation_64: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64',
746 ksVariation_64f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64',
747 ksVariation_64_FsGs: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_FSGS',
748 ksVariation_64f_FsGs: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_FSGS',
749 ksVariation_64_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_ADDR32', ## @todo How did this work again...
750 ksVariation_64f_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_ADDR32',
751 };
752
753 def analyzeMorphStmtForThreaded(self, aoStmts, iParamRef = 0):
754 """
755 Transforms (copy) the statements into those for the threaded function.
756
757 Returns list/tree of statements (aoStmts is not modified) and the new
758 iParamRef value.
759 """
760 #
761 # We'll be traversing aoParamRefs in parallel to the statements, so we
762 # must match the traversal in analyzeFindThreadedParamRefs exactly.
763 #
764 #print('McBlock at %s:%s' % (os.path.split(self.oMcBlock.sSrcFile)[1], self.oMcBlock.iBeginLine,));
765 aoThreadedStmts = [];
766 for oStmt in aoStmts:
767 # Skip C++ statements that is purely related to decoding.
768 if not oStmt.isCppStmt() or not oStmt.fDecode:
769 # Copy the statement. Make a deep copy to make sure we've got our own
770 # copies of all instance variables, even if a bit overkill at the moment.
771 oNewStmt = copy.deepcopy(oStmt);
772 aoThreadedStmts.append(oNewStmt);
773 #print('oNewStmt %s %s' % (oNewStmt.sName, len(oNewStmt.asParams),));
774
775 # If the statement has parameter references, process the relevant parameters.
776 # We grab the references relevant to this statement and apply them in reserve order.
777 if iParamRef < len(self.aoParamRefs) and self.aoParamRefs[iParamRef].oStmt == oStmt:
778 iParamRefFirst = iParamRef;
779 while True:
780 iParamRef += 1;
781 if iParamRef >= len(self.aoParamRefs) or self.aoParamRefs[iParamRef].oStmt != oStmt:
782 break;
783
784 #print('iParamRefFirst=%s iParamRef=%s' % (iParamRefFirst, iParamRef));
785 for iCurRef in range(iParamRef - 1, iParamRefFirst - 1, -1):
786 oCurRef = self.aoParamRefs[iCurRef];
787 if oCurRef.iParam is not None:
788 assert oCurRef.oStmt == oStmt;
789 #print('iCurRef=%s iParam=%s sOrgRef=%s' % (iCurRef, oCurRef.iParam, oCurRef.sOrgRef));
790 sSrcParam = oNewStmt.asParams[oCurRef.iParam];
791 assert ( sSrcParam[oCurRef.offParam : oCurRef.offParam + len(oCurRef.sOrgRef)] == oCurRef.sOrgRef
792 or oCurRef.fCustomRef), \
793 'offParam=%s sOrgRef=%s iParam=%s oStmt.sName=%s sSrcParam=%s<eos>' \
794 % (oCurRef.offParam, oCurRef.sOrgRef, oCurRef.iParam, oStmt.sName, sSrcParam);
795 oNewStmt.asParams[oCurRef.iParam] = sSrcParam[0 : oCurRef.offParam] \
796 + oCurRef.sNewName \
797 + sSrcParam[oCurRef.offParam + len(oCurRef.sOrgRef) : ];
798
799 # Morph IEM_MC_CALC_RM_EFF_ADDR into IEM_MC_CALC_RM_EFF_ADDR_THREADED ...
800 if oNewStmt.sName == 'IEM_MC_CALC_RM_EFF_ADDR':
801 oNewStmt.sName = self.kdThreadedCalcRmEffAddrMcByVariation[self.sVariation];
802 assert len(oNewStmt.asParams) == 3;
803
804 if self.sVariation in self.kdVariationsWithFlatAddr16:
805 oNewStmt.asParams = [
806 oNewStmt.asParams[0], oNewStmt.asParams[1], self.dParamRefs['u16Disp'][0].sNewName,
807 ];
808 else:
809 sSibAndMore = self.dParamRefs['bSib'][0].sNewName; # Merge bSib and 2nd part of cbImmAndRspOffset.
810 if oStmt.asParams[2] not in ('0', '1', '2', '4'):
811 sSibAndMore = '(%s) | ((%s) & 0x0f00)' % (self.dParamRefs['bSib'][0].sNewName, oStmt.asParams[2]);
812
813 if self.sVariation in self.kdVariationsWithFlatAddr32No64:
814 oNewStmt.asParams = [
815 oNewStmt.asParams[0], oNewStmt.asParams[1], sSibAndMore, self.dParamRefs['u32Disp'][0].sNewName,
816 ];
817 else:
818 oNewStmt.asParams = [
819 oNewStmt.asParams[0], self.dParamRefs['bRmEx'][0].sNewName, sSibAndMore,
820 self.dParamRefs['u32Disp'][0].sNewName, self.dParamRefs['cbInstr'][0].sNewName,
821 ];
822 # ... and IEM_MC_ADVANCE_RIP_AND_FINISH into *_THREADED_PCxx[_WITH_FLAGS] ...
823 elif oNewStmt.sName in ('IEM_MC_ADVANCE_RIP_AND_FINISH', 'IEM_MC_REL_JMP_S8_AND_FINISH',
824 'IEM_MC_REL_JMP_S16_AND_FINISH', 'IEM_MC_REL_JMP_S32_AND_FINISH'):
825 oNewStmt.asParams.append(self.dParamRefs['cbInstr'][0].sNewName);
826 if ( oNewStmt.sName in ('IEM_MC_REL_JMP_S8_AND_FINISH', )
827 and self.sVariation not in (self.ksVariation_16_Pre386, self.ksVariation_16f_Pre386,)):
828 oNewStmt.asParams.append(self.dParamRefs['pVCpu->iem.s.enmEffOpSize'][0].sNewName);
829 oNewStmt.sName += '_THREADED';
830 if self.sVariation in (self.ksVariation_64, self.ksVariation_64_FsGs, self.ksVariation_64_Addr32):
831 oNewStmt.sName += '_PC64';
832 elif self.sVariation in (self.ksVariation_64f, self.ksVariation_64f_FsGs, self.ksVariation_64f_Addr32):
833 oNewStmt.sName += '_PC64_WITH_FLAGS';
834 elif self.sVariation == self.ksVariation_16_Pre386:
835 oNewStmt.sName += '_PC16';
836 elif self.sVariation == self.ksVariation_16f_Pre386:
837 oNewStmt.sName += '_PC16_WITH_FLAGS';
838 elif self.sVariation not in self.kdVariationsWithEflagsCheckingAndClearing:
839 assert self.sVariation != self.ksVariation_Default;
840 oNewStmt.sName += '_PC32';
841 else:
842 oNewStmt.sName += '_PC32_WITH_FLAGS';
843
844 # ... and IEM_MC_*_GREG_U8 into *_THREADED w/ reworked index taking REX into account
845 elif oNewStmt.sName.startswith('IEM_MC_') and oNewStmt.sName.find('_GREG_U8') > 0:
846 (idxReg, _, sStdRef) = self.analyze8BitGRegStmt(oStmt); # Don't use oNewStmt as it has been modified!
847 oNewStmt.asParams[idxReg] = self.dParamRefs[sStdRef][0].sNewName;
848 oNewStmt.sName += '_THREADED';
849
850 # ... and IEM_MC_CALL_CIMPL_[0-5] and IEM_MC_DEFER_TO_CIMPL_[0-5]_RET into *_THREADED ...
851 elif oNewStmt.sName.startswith('IEM_MC_CALL_CIMPL_') or oNewStmt.sName.startswith('IEM_MC_DEFER_TO_CIMPL_'):
852 oNewStmt.sName += '_THREADED';
853 oNewStmt.idxFn += 1;
854 oNewStmt.idxParams += 1;
855 oNewStmt.asParams.insert(0, self.dParamRefs['cbInstr'][0].sNewName);
856
857 # ... and in FLAT modes we must morph memory access into FLAT accesses ...
858 elif ( self.sVariation in self.kdVariationsWithFlatAddress
859 and ( oNewStmt.sName.startswith('IEM_MC_FETCH_MEM')
860 or (oNewStmt.sName.startswith('IEM_MC_STORE_MEM_') and oNewStmt.sName.find('_BY_REF') < 0)
861 or oNewStmt.sName.startswith('IEM_MC_MEM_MAP') )):
862 idxEffSeg = self.kdMemMcToFlatInfo[oNewStmt.sName][0];
863 if idxEffSeg != -1:
864 if ( oNewStmt.asParams[idxEffSeg].find('iEffSeg') < 0
865 and oNewStmt.asParams[idxEffSeg] not in ('X86_SREG_ES', ) ):
866 self.raiseProblem('Expected iEffSeg as param #%d to %s: %s'
867 % (idxEffSeg + 1, oNewStmt.sName, oNewStmt.asParams[idxEffSeg],));
868 oNewStmt.asParams.pop(idxEffSeg);
869 oNewStmt.sName = self.kdMemMcToFlatInfo[oNewStmt.sName][1];
870
871 # ... PUSH and POP also needs flat variants, but these differ a little.
872 elif ( self.sVariation in self.kdVariationsWithFlatAddress
873 and ( (oNewStmt.sName.startswith('IEM_MC_PUSH') and oNewStmt.sName.find('_FPU') < 0)
874 or oNewStmt.sName.startswith('IEM_MC_POP'))):
875 oNewStmt.sName = self.kdMemMcToFlatInfoStack[oNewStmt.sName][int(self.sVariation in (self.ksVariation_64,
876 self.ksVariation_64f,))];
877
878
879 # Process branches of conditionals recursively.
880 if isinstance(oStmt, iai.McStmtCond):
881 (oNewStmt.aoIfBranch, iParamRef) = self.analyzeMorphStmtForThreaded(oStmt.aoIfBranch, iParamRef);
882 if oStmt.aoElseBranch:
883 (oNewStmt.aoElseBranch, iParamRef) = self.analyzeMorphStmtForThreaded(oStmt.aoElseBranch, iParamRef);
884
885 return (aoThreadedStmts, iParamRef);
886
887
888 def analyzeConsolidateThreadedParamRefs(self):
889 """
890 Consolidate threaded function parameter references into a dictionary
891 with lists of the references to each variable/field.
892 """
893 # Gather unique parameters.
894 self.dParamRefs = {};
895 for oRef in self.aoParamRefs:
896 if oRef.sStdRef not in self.dParamRefs:
897 self.dParamRefs[oRef.sStdRef] = [oRef,];
898 else:
899 self.dParamRefs[oRef.sStdRef].append(oRef);
900
901 # Generate names for them for use in the threaded function.
902 dParamNames = {};
903 for sName, aoRefs in self.dParamRefs.items():
904 # Morph the reference expression into a name.
905 if sName.startswith('IEM_GET_MODRM_REG'): sName = 'bModRmRegP';
906 elif sName.startswith('IEM_GET_MODRM_RM'): sName = 'bModRmRmP';
907 elif sName.startswith('IEM_GET_MODRM_REG_8'): sName = 'bModRmReg8P';
908 elif sName.startswith('IEM_GET_MODRM_RM_8'): sName = 'bModRmRm8P';
909 elif sName.startswith('IEM_GET_EFFECTIVE_VVVV'): sName = 'bEffVvvvP';
910 elif sName.find('.') >= 0 or sName.find('->') >= 0:
911 sName = sName[max(sName.rfind('.'), sName.rfind('>')) + 1 : ] + 'P';
912 else:
913 sName += 'P';
914
915 # Ensure it's unique.
916 if sName in dParamNames:
917 for i in range(10):
918 if sName + str(i) not in dParamNames:
919 sName += str(i);
920 break;
921 dParamNames[sName] = True;
922
923 # Update all the references.
924 for oRef in aoRefs:
925 oRef.sNewName = sName;
926
927 # Organize them by size too for the purpose of optimize them.
928 dBySize = {} # type: Dict[str, str]
929 for sStdRef, aoRefs in self.dParamRefs.items():
930 if aoRefs[0].sType[0] != 'P':
931 cBits = g_kdTypeInfo[aoRefs[0].sType][0];
932 assert(cBits <= 64);
933 else:
934 cBits = 64;
935
936 if cBits not in dBySize:
937 dBySize[cBits] = [sStdRef,]
938 else:
939 dBySize[cBits].append(sStdRef);
940
941 # Pack the parameters as best as we can, starting with the largest ones
942 # and ASSUMING a 64-bit parameter size.
943 self.cMinParams = 0;
944 offNewParam = 0;
945 for cBits in sorted(dBySize.keys(), reverse = True):
946 for sStdRef in dBySize[cBits]:
947 if offNewParam == 0 or offNewParam + cBits > 64:
948 self.cMinParams += 1;
949 offNewParam = cBits;
950 else:
951 offNewParam += cBits;
952 assert(offNewParam <= 64);
953
954 for oRef in self.dParamRefs[sStdRef]:
955 oRef.iNewParam = self.cMinParams - 1;
956 oRef.offNewParam = offNewParam - cBits;
957
958 # Currently there are a few that requires 4 parameters, list these so we can figure out why:
959 if self.cMinParams >= 4:
960 print('debug: cMinParams=%s cRawParams=%s - %s:%d'
961 % (self.cMinParams, len(self.dParamRefs), self.oParent.oMcBlock.sSrcFile, self.oParent.oMcBlock.iBeginLine,));
962
963 return True;
964
965 ksHexDigits = '0123456789abcdefABCDEF';
966
967 def analyzeFindThreadedParamRefs(self, aoStmts): # pylint: disable=too-many-statements
968 """
969 Scans the statements for things that have to passed on to the threaded
970 function (populates self.aoParamRefs).
971 """
972 for oStmt in aoStmts:
973 # Some statements we can skip alltogether.
974 if isinstance(oStmt, iai.McCppPreProc):
975 continue;
976 if oStmt.isCppStmt() and oStmt.fDecode:
977 continue;
978 if oStmt.sName in ('IEM_MC_BEGIN',):
979 continue;
980
981 if isinstance(oStmt, iai.McStmtVar):
982 if oStmt.sValue is None:
983 continue;
984 aiSkipParams = { 0: True, 1: True, 3: True };
985 else:
986 aiSkipParams = {};
987
988 # Several statements have implicit parameters and some have different parameters.
989 if oStmt.sName in ('IEM_MC_ADVANCE_RIP_AND_FINISH', 'IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_REL_JMP_S16_AND_FINISH',
990 'IEM_MC_REL_JMP_S32_AND_FINISH', 'IEM_MC_CALL_CIMPL_0', 'IEM_MC_CALL_CIMPL_1',
991 'IEM_MC_CALL_CIMPL_2', 'IEM_MC_CALL_CIMPL_3', 'IEM_MC_CALL_CIMPL_4', 'IEM_MC_CALL_CIMPL_5',
992 'IEM_MC_DEFER_TO_CIMPL_0_RET', 'IEM_MC_DEFER_TO_CIMPL_1_RET', 'IEM_MC_DEFER_TO_CIMPL_2_RET',
993 'IEM_MC_DEFER_TO_CIMPL_3_RET', 'IEM_MC_DEFER_TO_CIMPL_4_RET', 'IEM_MC_DEFER_TO_CIMPL_5_RET', ):
994 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_INSTR_LEN(pVCpu)', 'uint4_t', oStmt, sStdRef = 'cbInstr'));
995
996 if ( oStmt.sName in ('IEM_MC_REL_JMP_S8_AND_FINISH',)
997 and self.sVariation not in (self.ksVariation_16_Pre386, self.ksVariation_16f_Pre386,)):
998 self.aoParamRefs.append(ThreadedParamRef('pVCpu->iem.s.enmEffOpSize', 'IEMMODE', oStmt));
999
1000 if oStmt.sName == 'IEM_MC_CALC_RM_EFF_ADDR':
1001 # This is being pretty presumptive about bRm always being the RM byte...
1002 assert len(oStmt.asParams) == 3;
1003 assert oStmt.asParams[1] == 'bRm';
1004
1005 if self.sVariation in self.kdVariationsWithFlatAddr16:
1006 self.aoParamRefs.append(ThreadedParamRef('bRm', 'uint8_t', oStmt));
1007 self.aoParamRefs.append(ThreadedParamRef('(uint16_t)uEffAddrInfo' ,
1008 'uint16_t', oStmt, sStdRef = 'u16Disp'));
1009 elif self.sVariation in self.kdVariationsWithFlatAddr32No64:
1010 self.aoParamRefs.append(ThreadedParamRef('bRm', 'uint8_t', oStmt));
1011 self.aoParamRefs.append(ThreadedParamRef('(uint8_t)(uEffAddrInfo >> 32)',
1012 'uint8_t', oStmt, sStdRef = 'bSib'));
1013 self.aoParamRefs.append(ThreadedParamRef('(uint32_t)uEffAddrInfo',
1014 'uint32_t', oStmt, sStdRef = 'u32Disp'));
1015 else:
1016 assert self.sVariation in self.kasVariationsWithAddressOnly64;
1017 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_MODRM_EX(pVCpu, bRm)',
1018 'uint8_t', oStmt, sStdRef = 'bRmEx'));
1019 self.aoParamRefs.append(ThreadedParamRef('(uint8_t)(uEffAddrInfo >> 32)',
1020 'uint8_t', oStmt, sStdRef = 'bSib'));
1021 self.aoParamRefs.append(ThreadedParamRef('(uint32_t)uEffAddrInfo',
1022 'uint32_t', oStmt, sStdRef = 'u32Disp'));
1023 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_INSTR_LEN(pVCpu)',
1024 'uint4_t', oStmt, sStdRef = 'cbInstr'));
1025 aiSkipParams[1] = True; # Skip the bRm parameter as it is being replaced by bRmEx.
1026
1027 # 8-bit register accesses needs to have their index argument reworked to take REX into account.
1028 if oStmt.sName.startswith('IEM_MC_') and oStmt.sName.find('_GREG_U8') > 0:
1029 (idxReg, sOrgRef, sStdRef) = self.analyze8BitGRegStmt(oStmt);
1030 self.aoParamRefs.append(ThreadedParamRef(sOrgRef, 'uint16_t', oStmt, idxReg, sStdRef = sStdRef));
1031 aiSkipParams[idxReg] = True; # Skip the parameter below.
1032
1033 # If in flat mode variation, ignore the effective segment parameter to memory MCs.
1034 if ( self.sVariation in self.kdVariationsWithFlatAddress
1035 and oStmt.sName in self.kdMemMcToFlatInfo
1036 and self.kdMemMcToFlatInfo[oStmt.sName][0] != -1):
1037 aiSkipParams[self.kdMemMcToFlatInfo[oStmt.sName][0]] = True;
1038
1039 # Inspect the target of calls to see if we need to pass down a
1040 # function pointer or function table pointer for it to work.
1041 if isinstance(oStmt, iai.McStmtCall):
1042 if oStmt.sFn[0] == 'p':
1043 self.aoParamRefs.append(ThreadedParamRef(oStmt.sFn, self.analyzeCallToType(oStmt.sFn), oStmt, oStmt.idxFn));
1044 elif ( oStmt.sFn[0] != 'i'
1045 and not oStmt.sFn.startswith('IEMTARGETCPU_EFL_BEHAVIOR_SELECT')
1046 and not oStmt.sFn.startswith('IEM_SELECT_HOST_OR_FALLBACK') ):
1047 self.raiseProblem('Bogus function name in %s: %s' % (oStmt.sName, oStmt.sFn,));
1048 aiSkipParams[oStmt.idxFn] = True;
1049
1050 # Skip the hint parameter (first) for IEM_MC_CALL_CIMPL_X.
1051 if oStmt.sName.startswith('IEM_MC_CALL_CIMPL_'):
1052 assert oStmt.idxFn == 2;
1053 aiSkipParams[0] = True;
1054
1055
1056 # Check all the parameters for bogus references.
1057 for iParam, sParam in enumerate(oStmt.asParams):
1058 if iParam not in aiSkipParams and sParam not in self.oParent.dVariables:
1059 # The parameter may contain a C expression, so we have to try
1060 # extract the relevant bits, i.e. variables and fields while
1061 # ignoring operators and parentheses.
1062 offParam = 0;
1063 while offParam < len(sParam):
1064 # Is it the start of an C identifier? If so, find the end, but don't stop on field separators (->, .).
1065 ch = sParam[offParam];
1066 if ch.isalpha() or ch == '_':
1067 offStart = offParam;
1068 offParam += 1;
1069 while offParam < len(sParam):
1070 ch = sParam[offParam];
1071 if not ch.isalnum() and ch != '_' and ch != '.':
1072 if ch != '-' or sParam[offParam + 1] != '>':
1073 # Special hack for the 'CTX_SUFF(pVM)' bit in pVCpu->CTX_SUFF(pVM)->xxxx:
1074 if ( ch == '('
1075 and sParam[offStart : offParam + len('(pVM)->')] == 'pVCpu->CTX_SUFF(pVM)->'):
1076 offParam += len('(pVM)->') - 1;
1077 else:
1078 break;
1079 offParam += 1;
1080 offParam += 1;
1081 sRef = sParam[offStart : offParam];
1082
1083 # For register references, we pass the full register indexes instead as macros
1084 # like IEM_GET_MODRM_REG implicitly references pVCpu->iem.s.uRexReg and the
1085 # threaded function will be more efficient if we just pass the register index
1086 # as a 4-bit param.
1087 if ( sRef.startswith('IEM_GET_MODRM')
1088 or sRef.startswith('IEM_GET_EFFECTIVE_VVVV') ):
1089 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1090 if sParam[offParam] != '(':
1091 self.raiseProblem('Expected "(" following %s in "%s"' % (sRef, oStmt.renderCode(),));
1092 (asMacroParams, offCloseParam) = iai.McBlock.extractParams(sParam, offParam);
1093 if asMacroParams is None:
1094 self.raiseProblem('Unable to find ")" for %s in "%s"' % (sRef, oStmt.renderCode(),));
1095 offParam = offCloseParam + 1;
1096 self.aoParamRefs.append(ThreadedParamRef(sParam[offStart : offParam], 'uint8_t',
1097 oStmt, iParam, offStart));
1098
1099 # We can skip known variables.
1100 elif sRef in self.oParent.dVariables:
1101 pass;
1102
1103 # Skip certain macro invocations.
1104 elif sRef in ('IEM_GET_HOST_CPU_FEATURES',
1105 'IEM_GET_GUEST_CPU_FEATURES',
1106 'IEM_IS_GUEST_CPU_AMD',
1107 'IEM_IS_16BIT_CODE',
1108 'IEM_IS_32BIT_CODE',
1109 'IEM_IS_64BIT_CODE',
1110 ):
1111 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1112 if sParam[offParam] != '(':
1113 self.raiseProblem('Expected "(" following %s in "%s"' % (sRef, oStmt.renderCode(),));
1114 (asMacroParams, offCloseParam) = iai.McBlock.extractParams(sParam, offParam);
1115 if asMacroParams is None:
1116 self.raiseProblem('Unable to find ")" for %s in "%s"' % (sRef, oStmt.renderCode(),));
1117 offParam = offCloseParam + 1;
1118
1119 # Skip any dereference following it, unless it's a predicate like IEM_IS_GUEST_CPU_AMD.
1120 if sRef not in ('IEM_IS_GUEST_CPU_AMD',
1121 'IEM_IS_16BIT_CODE',
1122 'IEM_IS_32BIT_CODE',
1123 'IEM_IS_64BIT_CODE',
1124 ):
1125 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1126 if offParam + 2 <= len(sParam) and sParam[offParam : offParam + 2] == '->':
1127 offParam = iai.McBlock.skipSpacesAt(sParam, offParam + 2, len(sParam));
1128 while offParam < len(sParam) and (sParam[offParam].isalnum() or sParam[offParam] in '_.'):
1129 offParam += 1;
1130
1131 # Skip constants, globals, types (casts), sizeof and macros.
1132 elif ( sRef.startswith('IEM_OP_PRF_')
1133 or sRef.startswith('IEM_ACCESS_')
1134 or sRef.startswith('IEMINT_')
1135 or sRef.startswith('X86_GREG_')
1136 or sRef.startswith('X86_SREG_')
1137 or sRef.startswith('X86_EFL_')
1138 or sRef.startswith('X86_FSW_')
1139 or sRef.startswith('X86_FCW_')
1140 or sRef.startswith('X86_XCPT_')
1141 or sRef.startswith('IEMMODE_')
1142 or sRef.startswith('IEM_F_')
1143 or sRef.startswith('IEM_CIMPL_F_')
1144 or sRef.startswith('g_')
1145 or sRef.startswith('iemAImpl_')
1146 or sRef.startswith('kIemNativeGstReg_')
1147 or sRef in ( 'int8_t', 'int16_t', 'int32_t', 'int64_t',
1148 'INT8_C', 'INT16_C', 'INT32_C', 'INT64_C',
1149 'UINT8_C', 'UINT16_C', 'UINT32_C', 'UINT64_C',
1150 'UINT8_MAX', 'UINT16_MAX', 'UINT32_MAX', 'UINT64_MAX',
1151 'INT8_MAX', 'INT16_MAX', 'INT32_MAX', 'INT64_MAX',
1152 'INT8_MIN', 'INT16_MIN', 'INT32_MIN', 'INT64_MIN',
1153 'sizeof', 'NOREF', 'RT_NOREF', 'IEMMODE_64BIT',
1154 'RT_BIT_32', 'RT_BIT_64', 'true', 'false',
1155 'NIL_RTGCPTR',) ):
1156 pass;
1157
1158 # Skip certain macro invocations.
1159 # Any variable (non-field) and decoder fields in IEMCPU will need to be parameterized.
1160 elif ( ( '.' not in sRef
1161 and '-' not in sRef
1162 and sRef not in ('pVCpu', ) )
1163 or iai.McBlock.koReIemDecoderVars.search(sRef) is not None):
1164 self.aoParamRefs.append(ThreadedParamRef(sRef, self.analyzeReferenceToType(sRef),
1165 oStmt, iParam, offStart));
1166 # Number.
1167 elif ch.isdigit():
1168 if ( ch == '0'
1169 and offParam + 2 <= len(sParam)
1170 and sParam[offParam + 1] in 'xX'
1171 and sParam[offParam + 2] in self.ksHexDigits ):
1172 offParam += 2;
1173 while offParam < len(sParam) and sParam[offParam] in self.ksHexDigits:
1174 offParam += 1;
1175 else:
1176 while offParam < len(sParam) and sParam[offParam].isdigit():
1177 offParam += 1;
1178 # Comment?
1179 elif ( ch == '/'
1180 and offParam + 4 <= len(sParam)
1181 and sParam[offParam + 1] == '*'):
1182 offParam += 2;
1183 offNext = sParam.find('*/', offParam);
1184 if offNext < offParam:
1185 self.raiseProblem('Unable to find "*/" in "%s" ("%s")' % (sRef, oStmt.renderCode(),));
1186 offParam = offNext + 2;
1187 # Whatever else.
1188 else:
1189 offParam += 1;
1190
1191 # Traverse the branches of conditionals.
1192 if isinstance(oStmt, iai.McStmtCond):
1193 self.analyzeFindThreadedParamRefs(oStmt.aoIfBranch);
1194 self.analyzeFindThreadedParamRefs(oStmt.aoElseBranch);
1195 return True;
1196
1197 def analyzeVariation(self, aoStmts):
1198 """
1199 2nd part of the analysis, done on each variation.
1200
1201 The variations may differ in parameter requirements and will end up with
1202 slightly different MC sequences. Thus this is done on each individually.
1203
1204 Returns dummy True - raises exception on trouble.
1205 """
1206 # Now scan the code for variables and field references that needs to
1207 # be passed to the threaded function because they are related to the
1208 # instruction decoding.
1209 self.analyzeFindThreadedParamRefs(aoStmts);
1210 self.analyzeConsolidateThreadedParamRefs();
1211
1212 # Morph the statement stream for the block into what we'll be using in the threaded function.
1213 (self.aoStmtsForThreadedFunction, iParamRef) = self.analyzeMorphStmtForThreaded(aoStmts);
1214 if iParamRef != len(self.aoParamRefs):
1215 raise Exception('iParamRef=%s, expected %s!' % (iParamRef, len(self.aoParamRefs),));
1216
1217 return True;
1218
1219 def emitThreadedCallStmts(self, cchIndent, sCallVarNm = None):
1220 """
1221 Produces generic C++ statments that emits a call to the thread function
1222 variation and any subsequent checks that may be necessary after that.
1223
1224 The sCallVarNm is for emitting
1225 """
1226 aoStmts = [
1227 iai.McCppCall('IEM_MC2_BEGIN_EMIT_CALLS',
1228 ['1' if 'IEM_CIMPL_F_CHECK_IRQ_BEFORE' in self.oParent.dsCImplFlags else '0'],
1229 cchIndent = cchIndent), # Scope and a hook for various stuff.
1230 ];
1231
1232 # The call to the threaded function.
1233 asCallArgs = [ self.getIndexName() if not sCallVarNm else sCallVarNm, ];
1234 for iParam in range(self.cMinParams):
1235 asFrags = [];
1236 for aoRefs in self.dParamRefs.values():
1237 oRef = aoRefs[0];
1238 if oRef.iNewParam == iParam:
1239 sCast = '(uint64_t)'
1240 if oRef.sType in ('int8_t', 'int16_t', 'int32_t'): # Make sure these doesn't get sign-extended.
1241 sCast = '(uint64_t)(u' + oRef.sType + ')';
1242 if oRef.offNewParam == 0:
1243 asFrags.append(sCast + '(' + oRef.sOrgRef + ')');
1244 else:
1245 asFrags.append('(%s(%s) << %s)' % (sCast, oRef.sOrgRef, oRef.offNewParam));
1246 assert asFrags;
1247 asCallArgs.append(' | '.join(asFrags));
1248
1249 aoStmts.append(iai.McCppCall('IEM_MC2_EMIT_CALL_%s' % (len(asCallArgs) - 1,), asCallArgs, cchIndent = cchIndent));
1250
1251 # For CIMPL stuff, we need to consult the associated IEM_CIMPL_F_XXX
1252 # mask and maybe emit additional checks.
1253 if ( 'IEM_CIMPL_F_MODE' in self.oParent.dsCImplFlags
1254 or 'IEM_CIMPL_F_XCPT' in self.oParent.dsCImplFlags
1255 or 'IEM_CIMPL_F_VMEXIT' in self.oParent.dsCImplFlags):
1256 aoStmts.append(iai.McCppCall('IEM_MC2_EMIT_CALL_1', ( 'kIemThreadedFunc_BltIn_CheckMode', 'pVCpu->iem.s.fExec', ),
1257 cchIndent = cchIndent));
1258
1259 sCImplFlags = ' | '.join(self.oParent.dsCImplFlags.keys());
1260 if not sCImplFlags:
1261 sCImplFlags = '0'
1262 aoStmts.append(iai.McCppCall('IEM_MC2_END_EMIT_CALLS', ( sCImplFlags, ), cchIndent = cchIndent)); # For closing the scope.
1263
1264 # Emit fEndTb = true or fTbBranched = true if any of the CIMPL flags
1265 # indicates we should do so.
1266 # Note! iemThreadedRecompilerMcDeferToCImpl0 duplicates work done here.
1267 asEndTbFlags = [];
1268 asTbBranchedFlags = [];
1269 for sFlag in self.oParent.dsCImplFlags:
1270 if self.kdCImplFlags[sFlag] is True:
1271 asEndTbFlags.append(sFlag);
1272 elif sFlag.startswith('IEM_CIMPL_F_BRANCH_'):
1273 asTbBranchedFlags.append(sFlag);
1274 if asTbBranchedFlags:
1275 aoStmts.append(iai.McCppGeneric('iemThreadedSetBranched(pVCpu, %s);'
1276 % ((' | '.join(asTbBranchedFlags)).replace('IEM_CIMPL_F_BRANCH', 'IEMBRANCHED_F'),),
1277 cchIndent = cchIndent)); # Inline fn saves ~2 seconds for gcc 13/dbg (1m13s vs 1m15s).
1278 if asEndTbFlags:
1279 aoStmts.append(iai.McCppGeneric('pVCpu->iem.s.fEndTb = true; /* %s */' % (','.join(asEndTbFlags),),
1280 cchIndent = cchIndent));
1281
1282 if 'IEM_CIMPL_F_CHECK_IRQ_AFTER' in self.oParent.dsCImplFlags:
1283 aoStmts.append(iai.McCppGeneric('pVCpu->iem.s.cInstrTillIrqCheck = 0;', cchIndent = cchIndent));
1284
1285 return aoStmts;
1286
1287
1288class ThreadedFunction(object):
1289 """
1290 A threaded function.
1291 """
1292
1293 def __init__(self, oMcBlock: iai.McBlock) -> None:
1294 self.oMcBlock = oMcBlock # type: iai.McBlock
1295 # The remaining fields are only useful after analyze() has been called:
1296 ## Variations for this block. There is at least one.
1297 self.aoVariations = [] # type: List[ThreadedFunctionVariation]
1298 ## Variation dictionary containing the same as aoVariations.
1299 self.dVariations = {} # type: Dict[str, ThreadedFunctionVariation]
1300 ## Dictionary of local variables (IEM_MC_LOCAL[_CONST]) and call arguments (IEM_MC_ARG*).
1301 self.dVariables = {} # type: Dict[str, iai.McStmtVar]
1302 ## Dictionary with any IEM_CIMPL_F_XXX flags explicitly advertised in the code block
1303 ## and those determined by analyzeCodeOperation().
1304 self.dsCImplFlags = {} # type: Dict[str, bool]
1305
1306 @staticmethod
1307 def dummyInstance():
1308 """ Gets a dummy instance. """
1309 return ThreadedFunction(iai.McBlock('null', 999999999, 999999999,
1310 iai.DecoderFunction('null', 999999999, 'nil', ('','')), 999999999));
1311
1312 def hasWithFlagsCheckingAndClearingVariation(self):
1313 """
1314 Check if there is one or more with flags checking and clearing
1315 variations for this threaded function.
1316 """
1317 for sVarWithFlags in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing:
1318 if sVarWithFlags in self.dVariations:
1319 return True;
1320 return False;
1321
1322 #
1323 # Analysis and code morphing.
1324 #
1325
1326 def raiseProblem(self, sMessage):
1327 """ Raises a problem. """
1328 raise Exception('%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, ));
1329
1330 def warning(self, sMessage):
1331 """ Emits a warning. """
1332 print('%s:%s: warning: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, ));
1333
1334 def analyzeFindVariablesAndCallArgs(self, aoStmts: List[iai.McStmt]) -> bool:
1335 """ Scans the statements for MC variables and call arguments. """
1336 for oStmt in aoStmts:
1337 if isinstance(oStmt, iai.McStmtVar):
1338 if oStmt.sVarName in self.dVariables:
1339 raise Exception('Variable %s is defined more than once!' % (oStmt.sVarName,));
1340 self.dVariables[oStmt.sVarName] = oStmt.sVarName;
1341
1342 # There shouldn't be any variables or arguments declared inside if/
1343 # else blocks, but scan them too to be on the safe side.
1344 if isinstance(oStmt, iai.McStmtCond):
1345 cBefore = len(self.dVariables);
1346 self.analyzeFindVariablesAndCallArgs(oStmt.aoIfBranch);
1347 self.analyzeFindVariablesAndCallArgs(oStmt.aoElseBranch);
1348 if len(self.dVariables) != cBefore:
1349 raise Exception('Variables/arguments defined in conditional branches!');
1350 return True;
1351
1352 def analyzeCodeOperation(self, aoStmts: List[iai.McStmt], fSeenConditional = False) -> bool:
1353 """
1354 Analyzes the code looking clues as to additional side-effects.
1355
1356 Currently this is simply looking for branching and adding the relevant
1357 branch flags to dsCImplFlags. ASSUMES the caller pre-populates the
1358 dictionary with a copy of self.oMcBlock.dsCImplFlags.
1359 """
1360 for oStmt in aoStmts:
1361 # Set IEM_IMPL_C_F_BRANCH if we see any branching MCs.
1362 if oStmt.sName.startswith('IEM_MC_SET_RIP'):
1363 assert not fSeenConditional;
1364 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_INDIRECT'] = True;
1365 elif oStmt.sName.startswith('IEM_MC_REL_JMP'):
1366 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_RELATIVE'] = True;
1367 if fSeenConditional:
1368 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_CONDITIONAL'] = True;
1369
1370 # Check for CIMPL and AIMPL calls.
1371 if oStmt.sName.startswith('IEM_MC_CALL_'):
1372 if oStmt.sName.startswith('IEM_MC_CALL_CIMPL_'):
1373 self.dsCImplFlags['IEM_CIMPL_F_CALLS_CIMPL'] = True;
1374 elif ( oStmt.sName.startswith('IEM_MC_CALL_VOID_AIMPL_')
1375 or oStmt.sName.startswith('IEM_MC_CALL_AIMPL_')
1376 or oStmt.sName.startswith('IEM_MC_CALL_AVX_AIMPL_')):
1377 self.dsCImplFlags['IEM_CIMPL_F_CALLS_AIMPL'] = True;
1378 elif ( oStmt.sName.startswith('IEM_MC_CALL_SSE_AIMPL_')
1379 or oStmt.sName.startswith('IEM_MC_CALL_MMX_AIMPL_')
1380 or oStmt.sName.startswith('IEM_MC_CALL_FPU_AIMPL_')):
1381 self.dsCImplFlags['IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE'] = True;
1382 else:
1383 raise Exception('Unknown IEM_MC_CALL_* statement: %s' % (oStmt.sName,));
1384
1385 # Process branches of conditionals recursively.
1386 if isinstance(oStmt, iai.McStmtCond):
1387 self.analyzeCodeOperation(oStmt.aoIfBranch, True);
1388 if oStmt.aoElseBranch:
1389 self.analyzeCodeOperation(oStmt.aoElseBranch, True);
1390
1391 return True;
1392
1393 def analyze(self):
1394 """
1395 Analyzes the code, identifying the number of parameters it requires and such.
1396
1397 Returns dummy True - raises exception on trouble.
1398 """
1399
1400 # Check the block for errors before we proceed (will decode it).
1401 asErrors = self.oMcBlock.check();
1402 if asErrors:
1403 raise Exception('\n'.join(['%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sError, )
1404 for sError in asErrors]));
1405
1406 # Decode the block into a list/tree of McStmt objects.
1407 aoStmts = self.oMcBlock.decode();
1408
1409 # Scan the statements for local variables and call arguments (self.dVariables).
1410 self.analyzeFindVariablesAndCallArgs(aoStmts);
1411
1412 # Scan the code for IEM_CIMPL_F_ and other clues.
1413 self.dsCImplFlags = self.oMcBlock.dsCImplFlags.copy();
1414 self.analyzeCodeOperation(aoStmts);
1415 if ( ('IEM_CIMPL_F_CALLS_CIMPL' in self.dsCImplFlags)
1416 + ('IEM_CIMPL_F_CALLS_AIMPL' in self.dsCImplFlags)
1417 + ('IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE' in self.dsCImplFlags) > 1):
1418 self.raiseProblem('Mixing CIMPL/AIMPL/AIMPL_WITH_FXSTATE calls');
1419
1420 # Create variations as needed.
1421 if iai.McStmt.findStmtByNames(aoStmts,
1422 { 'IEM_MC_DEFER_TO_CIMPL_0_RET': True,
1423 'IEM_MC_DEFER_TO_CIMPL_1_RET': True,
1424 'IEM_MC_DEFER_TO_CIMPL_2_RET': True,
1425 'IEM_MC_DEFER_TO_CIMPL_3_RET': True, }):
1426 asVariations = (ThreadedFunctionVariation.ksVariation_Default,);
1427
1428 elif iai.McStmt.findStmtByNames(aoStmts, {'IEM_MC_CALC_RM_EFF_ADDR' : True,}):
1429 if 'IEM_MC_F_64BIT' in self.oMcBlock.dsMcFlags:
1430 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressOnly64;
1431 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags and 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
1432 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot286Not64;
1433 elif 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
1434 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot286;
1435 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags:
1436 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot64;
1437 elif 'IEM_MC_F_ONLY_8086' in self.oMcBlock.dsMcFlags:
1438 asVariations = ThreadedFunctionVariation.kasVariationsOnlyPre386;
1439 else:
1440 asVariations = ThreadedFunctionVariation.kasVariationsWithAddress;
1441 else:
1442 if 'IEM_MC_F_64BIT' in self.oMcBlock.dsMcFlags:
1443 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressOnly64;
1444 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags and 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
1445 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot286Not64;
1446 elif 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
1447 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot286;
1448 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags:
1449 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot64;
1450 elif 'IEM_MC_F_ONLY_8086' in self.oMcBlock.dsMcFlags:
1451 asVariations = ThreadedFunctionVariation.kasVariationsOnlyPre386;
1452 else:
1453 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddress;
1454
1455 if not iai.McStmt.findStmtByNames(aoStmts,
1456 { 'IEM_MC_ADVANCE_RIP_AND_FINISH': True,
1457 'IEM_MC_REL_JMP_S8_AND_FINISH': True,
1458 'IEM_MC_REL_JMP_S16_AND_FINISH': True,
1459 'IEM_MC_REL_JMP_S32_AND_FINISH': True,
1460 }):
1461 asVariations = [sVariation for sVariation in asVariations
1462 if sVariation not in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing];
1463
1464 self.aoVariations = [ThreadedFunctionVariation(self, sVar) for sVar in asVariations];
1465
1466 # Dictionary variant of the list.
1467 self.dVariations = { oVar.sVariation: oVar for oVar in self.aoVariations };
1468
1469 # Continue the analysis on each variation.
1470 for oVariation in self.aoVariations:
1471 oVariation.analyzeVariation(aoStmts);
1472
1473 return True;
1474
1475 ## Used by emitThreadedCallStmts.
1476 kdVariationsWithNeedForPrefixCheck = {
1477 ThreadedFunctionVariation.ksVariation_64_Addr32: True,
1478 ThreadedFunctionVariation.ksVariation_64f_Addr32: True,
1479 ThreadedFunctionVariation.ksVariation_64_FsGs: True,
1480 ThreadedFunctionVariation.ksVariation_64f_FsGs: True,
1481 ThreadedFunctionVariation.ksVariation_32_Addr16: True,
1482 ThreadedFunctionVariation.ksVariation_32f_Addr16: True,
1483 ThreadedFunctionVariation.ksVariation_32_Flat: True,
1484 ThreadedFunctionVariation.ksVariation_32f_Flat: True,
1485 ThreadedFunctionVariation.ksVariation_16_Addr32: True,
1486 ThreadedFunctionVariation.ksVariation_16f_Addr32: True,
1487 };
1488
1489 def emitThreadedCallStmts(self):
1490 """
1491 Worker for morphInputCode that returns a list of statements that emits
1492 the call to the threaded functions for the block.
1493 """
1494 # Special case for only default variation:
1495 if len(self.aoVariations) == 1 and self.aoVariations[0].sVariation == ThreadedFunctionVariation.ksVariation_Default:
1496 return self.aoVariations[0].emitThreadedCallStmts(0);
1497
1498 #
1499 # Case statement sub-class.
1500 #
1501 dByVari = self.dVariations;
1502 #fDbg = self.oMcBlock.sFunction == 'iemOpCommonPushSReg';
1503 class Case:
1504 def __init__(self, sCond, sVarNm = None):
1505 self.sCond = sCond;
1506 self.sVarNm = sVarNm;
1507 self.oVar = dByVari[sVarNm] if sVarNm else None;
1508 self.aoBody = self.oVar.emitThreadedCallStmts(8) if sVarNm else None;
1509
1510 def toCode(self):
1511 aoStmts = [ iai.McCppGeneric('case %s:' % (self.sCond), cchIndent = 4), ];
1512 if self.aoBody:
1513 aoStmts.extend(self.aoBody);
1514 aoStmts.append(iai.McCppGeneric('break;', cchIndent = 8));
1515 return aoStmts;
1516
1517 def toFunctionAssignment(self):
1518 aoStmts = [ iai.McCppGeneric('case %s:' % (self.sCond), cchIndent = 4), ];
1519 if self.aoBody:
1520 aoStmts.extend([
1521 iai.McCppGeneric('enmFunction = %s;' % (self.oVar.getIndexName(),), cchIndent = 8),
1522 iai.McCppGeneric('break;', cchIndent = 8),
1523 ]);
1524 return aoStmts;
1525
1526 def isSame(self, oThat):
1527 if not self.aoBody: # fall thru always matches.
1528 return True;
1529 if len(self.aoBody) != len(oThat.aoBody):
1530 #if fDbg: print('dbg: body len diff: %s vs %s' % (len(self.aoBody), len(oThat.aoBody),));
1531 return False;
1532 for iStmt, oStmt in enumerate(self.aoBody):
1533 oThatStmt = oThat.aoBody[iStmt] # type: iai.McStmt
1534 assert isinstance(oStmt, iai.McCppGeneric);
1535 assert not isinstance(oStmt, iai.McStmtCond);
1536 if isinstance(oStmt, iai.McStmtCond):
1537 return False;
1538 if oStmt.sName != oThatStmt.sName:
1539 #if fDbg: print('dbg: stmt #%s name: %s vs %s' % (iStmt, oStmt.sName, oThatStmt.sName,));
1540 return False;
1541 if len(oStmt.asParams) != len(oThatStmt.asParams):
1542 #if fDbg: print('dbg: stmt #%s param count: %s vs %s'
1543 # % (iStmt, len(oStmt.asParams), len(oThatStmt.asParams),));
1544 return False;
1545 for iParam, sParam in enumerate(oStmt.asParams):
1546 if ( sParam != oThatStmt.asParams[iParam]
1547 and ( iParam != 1
1548 or not isinstance(oStmt, iai.McCppCall)
1549 or not oStmt.asParams[0].startswith('IEM_MC2_EMIT_CALL_')
1550 or sParam != self.oVar.getIndexName()
1551 or oThatStmt.asParams[iParam] != oThat.oVar.getIndexName() )):
1552 #if fDbg: print('dbg: stmt #%s, param #%s: %s vs %s'
1553 # % (iStmt, iParam, sParam, oThatStmt.asParams[iParam],));
1554 return False;
1555 return True;
1556
1557 #
1558 # Determine what we're switch on.
1559 # This ASSUMES that (IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | IEM_F_MODE_CPUMODE_MASK) == 7!
1560 #
1561 fSimple = True;
1562 sSwitchValue = '(pVCpu->iem.s.fExec & (IEM_F_MODE_CPUMODE_MASK | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK))';
1563 if dByVari.keys() & self.kdVariationsWithNeedForPrefixCheck.keys():
1564 sSwitchValue += ' | (pVCpu->iem.s.enmEffAddrMode == (pVCpu->iem.s.fExec & IEM_F_MODE_CPUMODE_MASK) ? 0 : 8)';
1565 # Accesses via FS and GS and CS goes thru non-FLAT functions. (CS
1566 # is not writable in 32-bit mode (at least), thus the penalty mode
1567 # for any accesses via it (simpler this way).)
1568 sSwitchValue += ' | (pVCpu->iem.s.iEffSeg < X86_SREG_FS && pVCpu->iem.s.iEffSeg != X86_SREG_CS ? 0 : 16)';
1569 fSimple = False; # threaded functions.
1570 if dByVari.keys() & ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing:
1571 sSwitchValue += ' | ((pVCpu->iem.s.fTbPrevInstr & (IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_INHIBIT_SHADOW)) || ' \
1572 + '(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_MASK) ? 32 : 0)';
1573
1574 #
1575 # Generate the case statements.
1576 #
1577 # pylintx: disable=x
1578 aoCases = [];
1579 if ThreadedFunctionVariation.ksVariation_64_Addr32 in dByVari:
1580 assert not fSimple;
1581 aoCases.extend([
1582 Case('IEMMODE_64BIT', ThrdFnVar.ksVariation_64),
1583 Case('IEMMODE_64BIT | 16', ThrdFnVar.ksVariation_64_FsGs),
1584 Case('IEMMODE_64BIT | 8 | 16', None), # fall thru
1585 Case('IEMMODE_64BIT | 8', ThrdFnVar.ksVariation_64_Addr32),
1586 ]);
1587 if ThreadedFunctionVariation.ksVariation_64f_Addr32 in dByVari:
1588 aoCases.extend([
1589 Case('IEMMODE_64BIT | 32', ThrdFnVar.ksVariation_64f),
1590 Case('IEMMODE_64BIT | 32 | 16', ThrdFnVar.ksVariation_64f_FsGs),
1591 Case('IEMMODE_64BIT | 32 | 8 | 16', None), # fall thru
1592 Case('IEMMODE_64BIT | 32 | 8', ThrdFnVar.ksVariation_64f_Addr32),
1593 ]);
1594 elif ThrdFnVar.ksVariation_64 in dByVari:
1595 assert fSimple;
1596 aoCases.append(Case('IEMMODE_64BIT', ThrdFnVar.ksVariation_64));
1597 if ThreadedFunctionVariation.ksVariation_64f in dByVari:
1598 aoCases.append(Case('IEMMODE_64BIT | 32', ThrdFnVar.ksVariation_64f));
1599
1600 if ThrdFnVar.ksVariation_32_Addr16 in dByVari:
1601 assert not fSimple;
1602 aoCases.extend([
1603 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', ThrdFnVar.ksVariation_32_Flat),
1604 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 16', None), # fall thru
1605 Case('IEMMODE_32BIT | 16', None), # fall thru
1606 Case('IEMMODE_32BIT', ThrdFnVar.ksVariation_32),
1607 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 8', None), # fall thru
1608 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 8 | 16',None), # fall thru
1609 Case('IEMMODE_32BIT | 8 | 16',None), # fall thru
1610 Case('IEMMODE_32BIT | 8', ThrdFnVar.ksVariation_32_Addr16),
1611 ]);
1612 if ThrdFnVar.ksVariation_32f_Addr16 in dByVari:
1613 aoCases.extend([
1614 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', ThrdFnVar.ksVariation_32f_Flat),
1615 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 16', None), # fall thru
1616 Case('IEMMODE_32BIT | 32 | 16', None), # fall thru
1617 Case('IEMMODE_32BIT | 32', ThrdFnVar.ksVariation_32f),
1618 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 8', None), # fall thru
1619 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 8 | 16',None), # fall thru
1620 Case('IEMMODE_32BIT | 32 | 8 | 16',None), # fall thru
1621 Case('IEMMODE_32BIT | 32 | 8', ThrdFnVar.ksVariation_32f_Addr16),
1622 ]);
1623 elif ThrdFnVar.ksVariation_32 in dByVari:
1624 assert fSimple;
1625 aoCases.extend([
1626 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', None), # fall thru
1627 Case('IEMMODE_32BIT', ThrdFnVar.ksVariation_32),
1628 ]);
1629 if ThrdFnVar.ksVariation_32f in dByVari:
1630 aoCases.extend([
1631 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', None), # fall thru
1632 Case('IEMMODE_32BIT | 32', ThrdFnVar.ksVariation_32f),
1633 ]);
1634
1635 if ThrdFnVar.ksVariation_16_Addr32 in dByVari:
1636 assert not fSimple;
1637 aoCases.extend([
1638 Case('IEMMODE_16BIT | 16', None), # fall thru
1639 Case('IEMMODE_16BIT', ThrdFnVar.ksVariation_16),
1640 Case('IEMMODE_16BIT | 8 | 16', None), # fall thru
1641 Case('IEMMODE_16BIT | 8', ThrdFnVar.ksVariation_16_Addr32),
1642 ]);
1643 if ThrdFnVar.ksVariation_16f_Addr32 in dByVari:
1644 aoCases.extend([
1645 Case('IEMMODE_16BIT | 32 | 16', None), # fall thru
1646 Case('IEMMODE_16BIT | 32', ThrdFnVar.ksVariation_16f),
1647 Case('IEMMODE_16BIT | 32 | 8 | 16', None), # fall thru
1648 Case('IEMMODE_16BIT | 32 | 8', ThrdFnVar.ksVariation_16f_Addr32),
1649 ]);
1650 elif ThrdFnVar.ksVariation_16 in dByVari:
1651 assert fSimple;
1652 aoCases.append(Case('IEMMODE_16BIT', ThrdFnVar.ksVariation_16));
1653 if ThrdFnVar.ksVariation_16f in dByVari:
1654 aoCases.append(Case('IEMMODE_16BIT | 32', ThrdFnVar.ksVariation_16f));
1655
1656 if ThrdFnVar.ksVariation_16_Pre386 in dByVari:
1657 if not fSimple:
1658 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 16', None)); # fall thru
1659 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', ThrdFnVar.ksVariation_16_Pre386));
1660 if ThrdFnVar.ksVariation_16f_Pre386 in dByVari: # should be nested under previous if, but line too long.
1661 if not fSimple:
1662 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 16', None)); # fall thru
1663 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', ThrdFnVar.ksVariation_16f_Pre386));
1664
1665 #
1666 # If the case bodies are all the same, except for the function called,
1667 # we can reduce the code size and hopefully compile time.
1668 #
1669 iFirstCaseWithBody = 0;
1670 while not aoCases[iFirstCaseWithBody].aoBody:
1671 iFirstCaseWithBody += 1
1672 fAllSameCases = True
1673 for iCase in range(iFirstCaseWithBody + 1, len(aoCases)):
1674 fAllSameCases = fAllSameCases and aoCases[iCase].isSame(aoCases[iFirstCaseWithBody]);
1675 #if fDbg: print('fAllSameCases=%s %s' % (fAllSameCases, self.oMcBlock.sFunction,));
1676 if fAllSameCases:
1677 aoStmts = [
1678 iai.McCppGeneric('IEMTHREADEDFUNCS enmFunction;'),
1679 iai.McCppGeneric('switch (%s)' % (sSwitchValue,)),
1680 iai.McCppGeneric('{'),
1681 ];
1682 for oCase in aoCases:
1683 aoStmts.extend(oCase.toFunctionAssignment());
1684 aoStmts.extend([
1685 iai.McCppGeneric('IEM_NOT_REACHED_DEFAULT_CASE_RET();', cchIndent = 4),
1686 iai.McCppGeneric('}'),
1687 ]);
1688 aoStmts.extend(dByVari[aoCases[iFirstCaseWithBody].sVarNm].emitThreadedCallStmts(0, 'enmFunction'));
1689
1690 else:
1691 #
1692 # Generate the generic switch statement.
1693 #
1694 aoStmts = [
1695 iai.McCppGeneric('switch (%s)' % (sSwitchValue,)),
1696 iai.McCppGeneric('{'),
1697 ];
1698 for oCase in aoCases:
1699 aoStmts.extend(oCase.toCode());
1700 aoStmts.extend([
1701 iai.McCppGeneric('IEM_NOT_REACHED_DEFAULT_CASE_RET();', cchIndent = 4),
1702 iai.McCppGeneric('}'),
1703 ]);
1704
1705 return aoStmts;
1706
1707 def morphInputCode(self, aoStmts, fCallEmitted = False, cDepth = 0):
1708 """
1709 Adjusts (& copies) the statements for the input/decoder so it will emit
1710 calls to the right threaded functions for each block.
1711
1712 Returns list/tree of statements (aoStmts is not modified) and updated
1713 fCallEmitted status.
1714 """
1715 #print('McBlock at %s:%s' % (os.path.split(self.oMcBlock.sSrcFile)[1], self.oMcBlock.iBeginLine,));
1716 aoDecoderStmts = [];
1717
1718 for oStmt in aoStmts:
1719 # Copy the statement. Make a deep copy to make sure we've got our own
1720 # copies of all instance variables, even if a bit overkill at the moment.
1721 oNewStmt = copy.deepcopy(oStmt);
1722 aoDecoderStmts.append(oNewStmt);
1723 #print('oNewStmt %s %s' % (oNewStmt.sName, len(oNewStmt.asParams),));
1724 if oNewStmt.sName == 'IEM_MC_BEGIN' and self.dsCImplFlags:
1725 oNewStmt.asParams[3] = ' | '.join(sorted(self.dsCImplFlags.keys()));
1726
1727 # If we haven't emitted the threaded function call yet, look for
1728 # statements which it would naturally follow or preceed.
1729 if not fCallEmitted:
1730 if not oStmt.isCppStmt():
1731 if ( oStmt.sName.startswith('IEM_MC_MAYBE_RAISE_') \
1732 or (oStmt.sName.endswith('_AND_FINISH') and oStmt.sName.startswith('IEM_MC_'))
1733 or oStmt.sName.startswith('IEM_MC_CALL_CIMPL_')
1734 or oStmt.sName.startswith('IEM_MC_DEFER_TO_CIMPL_')
1735 or oStmt.sName in ('IEM_MC_RAISE_DIVIDE_ERROR',)):
1736 aoDecoderStmts.pop();
1737 aoDecoderStmts.extend(self.emitThreadedCallStmts());
1738 aoDecoderStmts.append(oNewStmt);
1739 fCallEmitted = True;
1740 elif ( oStmt.fDecode
1741 and ( oStmt.asParams[0].find('IEMOP_HLP_DONE_') >= 0
1742 or oStmt.asParams[0].find('IEMOP_HLP_DECODED_') >= 0)):
1743 aoDecoderStmts.extend(self.emitThreadedCallStmts());
1744 fCallEmitted = True;
1745
1746 # Process branches of conditionals recursively.
1747 if isinstance(oStmt, iai.McStmtCond):
1748 (oNewStmt.aoIfBranch, fCallEmitted1) = self.morphInputCode(oStmt.aoIfBranch, fCallEmitted, cDepth + 1);
1749 if oStmt.aoElseBranch:
1750 (oNewStmt.aoElseBranch, fCallEmitted2) = self.morphInputCode(oStmt.aoElseBranch, fCallEmitted, cDepth + 1);
1751 else:
1752 fCallEmitted2 = False;
1753 fCallEmitted = fCallEmitted or (fCallEmitted1 and fCallEmitted2);
1754
1755 if not fCallEmitted and cDepth == 0:
1756 self.raiseProblem('Unable to insert call to threaded function.');
1757
1758 return (aoDecoderStmts, fCallEmitted);
1759
1760
1761 def generateInputCode(self):
1762 """
1763 Modifies the input code.
1764 """
1765 cchIndent = (self.oMcBlock.cchIndent + 3) // 4 * 4;
1766
1767 if len(self.oMcBlock.aoStmts) == 1:
1768 # IEM_MC_DEFER_TO_CIMPL_X_RET - need to wrap in {} to make it safe to insert into random code.
1769 sCode = ' ' * cchIndent + 'pVCpu->iem.s.fTbCurInstr = ';
1770 if self.dsCImplFlags:
1771 sCode += ' | '.join(sorted(self.dsCImplFlags.keys())) + ';\n';
1772 else:
1773 sCode += '0;\n';
1774 sCode += iai.McStmt.renderCodeForList(self.morphInputCode(self.oMcBlock.aoStmts)[0],
1775 cchIndent = cchIndent).replace('\n', ' /* gen */\n', 1);
1776 sIndent = ' ' * (min(cchIndent, 2) - 2);
1777 sCode = sIndent + '{\n' + sCode + sIndent + '}\n';
1778 return sCode;
1779
1780 # IEM_MC_BEGIN/END block
1781 assert len(self.oMcBlock.asLines) > 2, "asLines=%s" % (self.oMcBlock.asLines,);
1782 return iai.McStmt.renderCodeForList(self.morphInputCode(self.oMcBlock.aoStmts)[0],
1783 cchIndent = cchIndent).replace('\n', ' /* gen */\n', 1);
1784
1785# Short alias for ThreadedFunctionVariation.
1786ThrdFnVar = ThreadedFunctionVariation;
1787
1788
1789class IEMThreadedGenerator(object):
1790 """
1791 The threaded code generator & annotator.
1792 """
1793
1794 def __init__(self):
1795 self.aoThreadedFuncs = [] # type: List[ThreadedFunction]
1796 self.oOptions = None # type: argparse.Namespace
1797 self.aoParsers = [] # type: List[IEMAllInstPython.SimpleParser]
1798 self.aidxFirstFunctions = [] # type: List[int] ##< Runs parallel to aoParser giving the index of the first function.
1799
1800 #
1801 # Processing.
1802 #
1803
1804 def processInputFiles(self, sHostArch, fNativeRecompilerEnabled):
1805 """
1806 Process the input files.
1807 """
1808
1809 # Parse the files.
1810 self.aoParsers = iai.parseFiles(self.oOptions.asInFiles, sHostArch);
1811
1812 # Create threaded functions for the MC blocks.
1813 self.aoThreadedFuncs = [ThreadedFunction(oMcBlock) for oMcBlock in iai.g_aoMcBlocks];
1814
1815 # Analyze the threaded functions.
1816 dRawParamCounts = {};
1817 dMinParamCounts = {};
1818 for oThreadedFunction in self.aoThreadedFuncs:
1819 oThreadedFunction.analyze();
1820 for oVariation in oThreadedFunction.aoVariations:
1821 dRawParamCounts[len(oVariation.dParamRefs)] = dRawParamCounts.get(len(oVariation.dParamRefs), 0) + 1;
1822 dMinParamCounts[oVariation.cMinParams] = dMinParamCounts.get(oVariation.cMinParams, 0) + 1;
1823 print('debug: param count distribution, raw and optimized:', file = sys.stderr);
1824 for cCount in sorted({cBits: True for cBits in list(dRawParamCounts.keys()) + list(dMinParamCounts.keys())}.keys()):
1825 print('debug: %s params: %4s raw, %4s min'
1826 % (cCount, dRawParamCounts.get(cCount, 0), dMinParamCounts.get(cCount, 0)),
1827 file = sys.stderr);
1828
1829 # Populate aidxFirstFunctions. This is ASSUMING that
1830 # g_aoMcBlocks/self.aoThreadedFuncs are in self.aoParsers order.
1831 iThreadedFunction = 0;
1832 oThreadedFunction = self.getThreadedFunctionByIndex(0);
1833 self.aidxFirstFunctions = [];
1834 for oParser in self.aoParsers:
1835 self.aidxFirstFunctions.append(iThreadedFunction);
1836
1837 while oThreadedFunction.oMcBlock.sSrcFile == oParser.sSrcFile:
1838 iThreadedFunction += 1;
1839 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
1840
1841 # Analyze the threaded functions and their variations for native recompilation.
1842 if fNativeRecompilerEnabled:
1843 ian.displayStatistics(self.aoThreadedFuncs, sHostArch);
1844
1845 # Gather arguments + variable statistics for the MC blocks.
1846 cMaxArgs = 0;
1847 cMaxVars = 0;
1848 cMaxVarsAndArgs = 0;
1849 cbMaxArgs = 0;
1850 cbMaxVars = 0;
1851 cbMaxVarsAndArgs = 0;
1852 for oThreadedFunction in self.aoThreadedFuncs:
1853 if oThreadedFunction.oMcBlock.cLocals >= 0:
1854 # Counts.
1855 assert oThreadedFunction.oMcBlock.cArgs >= 0;
1856 cMaxVars = max(cMaxVars, oThreadedFunction.oMcBlock.cLocals);
1857 cMaxArgs = max(cMaxArgs, oThreadedFunction.oMcBlock.cArgs);
1858 cMaxVarsAndArgs = max(cMaxVarsAndArgs, oThreadedFunction.oMcBlock.cLocals + oThreadedFunction.oMcBlock.cArgs);
1859 if cMaxVarsAndArgs > 9:
1860 raise Exception('%s potentially uses too many variables / args: %u, max 10 - %u vars and %u args'
1861 % (oThreadedFunction.oMcBlock.oFunction.sName, cMaxVarsAndArgs,
1862 oThreadedFunction.oMcBlock.cLocals, oThreadedFunction.oMcBlock.cArgs));
1863 # Calc stack allocation size:
1864 cbArgs = 0;
1865 for oArg in oThreadedFunction.oMcBlock.aoArgs:
1866 cbArgs += (getTypeBitCount(oArg.sType) + 63) // 64 * 8;
1867 cbVars = 0;
1868 for oVar in oThreadedFunction.oMcBlock.aoLocals:
1869 cbVars += (getTypeBitCount(oVar.sType) + 63) // 64 * 8;
1870 cbMaxVars = max(cbMaxVars, cbVars);
1871 cbMaxArgs = max(cbMaxArgs, cbArgs);
1872 cbMaxVarsAndArgs = max(cbMaxVarsAndArgs, cbVars + cbArgs);
1873 if cbMaxVarsAndArgs >= 0xc0:
1874 raise Exception('%s potentially uses too much stack: cbMaxVars=%#x cbMaxArgs=%#x'
1875 % (oThreadedFunction.oMcBlock.oFunction.sName, cbMaxVars, cbMaxArgs,));
1876
1877 print('debug: max vars+args: %u bytes / %u; max vars: %u bytes / %u; max args: %u bytes / %u'
1878 % (cbMaxVarsAndArgs, cMaxVarsAndArgs, cbMaxVars, cMaxVars, cbMaxArgs, cMaxArgs,), file = sys.stderr);
1879
1880 return True;
1881
1882 #
1883 # Output
1884 #
1885
1886 def generateLicenseHeader(self):
1887 """
1888 Returns the lines for a license header.
1889 """
1890 return [
1891 '/*',
1892 ' * Autogenerated by $Id: IEMAllThrdPython.py 102072 2023-11-12 23:12:13Z vboxsync $ ',
1893 ' * Do not edit!',
1894 ' */',
1895 '',
1896 '/*',
1897 ' * Copyright (C) 2023-' + str(datetime.date.today().year) + ' Oracle and/or its affiliates.',
1898 ' *',
1899 ' * This file is part of VirtualBox base platform packages, as',
1900 ' * available from https://www.virtualbox.org.',
1901 ' *',
1902 ' * This program is free software; you can redistribute it and/or',
1903 ' * modify it under the terms of the GNU General Public License',
1904 ' * as published by the Free Software Foundation, in version 3 of the',
1905 ' * License.',
1906 ' *',
1907 ' * This program is distributed in the hope that it will be useful, but',
1908 ' * WITHOUT ANY WARRANTY; without even the implied warranty of',
1909 ' * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU',
1910 ' * General Public License for more details.',
1911 ' *',
1912 ' * You should have received a copy of the GNU General Public License',
1913 ' * along with this program; if not, see <https://www.gnu.org/licenses>.',
1914 ' *',
1915 ' * The contents of this file may alternatively be used under the terms',
1916 ' * of the Common Development and Distribution License Version 1.0',
1917 ' * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included',
1918 ' * in the VirtualBox distribution, in which case the provisions of the',
1919 ' * CDDL are applicable instead of those of the GPL.',
1920 ' *',
1921 ' * You may elect to license modified versions of this file under the',
1922 ' * terms and conditions of either the GPL or the CDDL or both.',
1923 ' *',
1924 ' * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0',
1925 ' */',
1926 '',
1927 '',
1928 '',
1929 ];
1930
1931 ## List of built-in threaded functions with user argument counts and
1932 ## whether it has a native recompiler implementation.
1933 katBltIns = (
1934 ( 'DeferToCImpl0', 2, True ),
1935 ( 'CheckIrq', 0, True ),
1936 ( 'CheckMode', 1, True ),
1937 ( 'CheckHwInstrBps', 0, False ),
1938 ( 'CheckCsLim', 1, False ),
1939
1940 ( 'CheckCsLimAndOpcodes', 3, False ),
1941 ( 'CheckOpcodes', 3, False ),
1942 ( 'CheckOpcodesConsiderCsLim', 3, False ),
1943
1944 ( 'CheckCsLimAndPcAndOpcodes', 3, False ),
1945 ( 'CheckPcAndOpcodes', 3, False ),
1946 ( 'CheckPcAndOpcodesConsiderCsLim', 3, False ),
1947
1948 ( 'CheckCsLimAndOpcodesAcrossPageLoadingTlb', 3, False ),
1949 ( 'CheckOpcodesAcrossPageLoadingTlb', 3, False ),
1950 ( 'CheckOpcodesAcrossPageLoadingTlbConsiderCsLim', 2, False ),
1951
1952 ( 'CheckCsLimAndOpcodesLoadingTlb', 3, False ),
1953 ( 'CheckOpcodesLoadingTlb', 3, False ),
1954 ( 'CheckOpcodesLoadingTlbConsiderCsLim', 3, False ),
1955
1956 ( 'CheckCsLimAndOpcodesOnNextPageLoadingTlb', 2, False ),
1957 ( 'CheckOpcodesOnNextPageLoadingTlb', 2, False ),
1958 ( 'CheckOpcodesOnNextPageLoadingTlbConsiderCsLim', 2, False ),
1959
1960 ( 'CheckCsLimAndOpcodesOnNewPageLoadingTlb', 2, False ),
1961 ( 'CheckOpcodesOnNewPageLoadingTlb', 2, False ),
1962 ( 'CheckOpcodesOnNewPageLoadingTlbConsiderCsLim', 2, False ),
1963 );
1964
1965 def generateThreadedFunctionsHeader(self, oOut):
1966 """
1967 Generates the threaded functions header file.
1968 Returns success indicator.
1969 """
1970
1971 asLines = self.generateLicenseHeader();
1972
1973 # Generate the threaded function table indexes.
1974 asLines += [
1975 'typedef enum IEMTHREADEDFUNCS',
1976 '{',
1977 ' kIemThreadedFunc_Invalid = 0,',
1978 '',
1979 ' /*',
1980 ' * Predefined',
1981 ' */',
1982 ];
1983 asLines += [' kIemThreadedFunc_BltIn_%s,' % (sFuncNm,) for sFuncNm, _, _ in self.katBltIns];
1984
1985 iThreadedFunction = 1 + len(self.katBltIns);
1986 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
1987 asLines += [
1988 '',
1989 ' /*',
1990 ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation] + '',
1991 ' */',
1992 ];
1993 for oThreadedFunction in self.aoThreadedFuncs:
1994 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
1995 if oVariation:
1996 iThreadedFunction += 1;
1997 oVariation.iEnumValue = iThreadedFunction;
1998 asLines.append(' ' + oVariation.getIndexName() + ',');
1999 asLines += [
2000 ' kIemThreadedFunc_End',
2001 '} IEMTHREADEDFUNCS;',
2002 '',
2003 ];
2004
2005 # Prototype the function table.
2006 asLines += [
2007 'extern const PFNIEMTHREADEDFUNC g_apfnIemThreadedFunctions[kIemThreadedFunc_End];',
2008 '#if defined(IN_RING3) || defined(LOG_ENABLED)',
2009 'extern const char * const g_apszIemThreadedFunctions[kIemThreadedFunc_End];',
2010 '#endif',
2011 'extern uint8_t const g_acIemThreadedFunctionUsedArgs[kIemThreadedFunc_End];',
2012 ];
2013
2014 oOut.write('\n'.join(asLines));
2015 return True;
2016
2017 ksBitsToIntMask = {
2018 1: "UINT64_C(0x1)",
2019 2: "UINT64_C(0x3)",
2020 4: "UINT64_C(0xf)",
2021 8: "UINT64_C(0xff)",
2022 16: "UINT64_C(0xffff)",
2023 32: "UINT64_C(0xffffffff)",
2024 };
2025
2026 def generateFunctionParameterUnpacking(self, oVariation, oOut, asParams):
2027 """
2028 Outputs code for unpacking parameters.
2029 This is shared by the threaded and native code generators.
2030 """
2031 aasVars = [];
2032 for aoRefs in oVariation.dParamRefs.values():
2033 oRef = aoRefs[0];
2034 if oRef.sType[0] != 'P':
2035 cBits = g_kdTypeInfo[oRef.sType][0];
2036 sType = g_kdTypeInfo[oRef.sType][2];
2037 else:
2038 cBits = 64;
2039 sType = oRef.sType;
2040
2041 sTypeDecl = sType + ' const';
2042
2043 if cBits == 64:
2044 assert oRef.offNewParam == 0;
2045 if sType == 'uint64_t':
2046 sUnpack = '%s;' % (asParams[oRef.iNewParam],);
2047 else:
2048 sUnpack = '(%s)%s;' % (sType, asParams[oRef.iNewParam],);
2049 elif oRef.offNewParam == 0:
2050 sUnpack = '(%s)(%s & %s);' % (sType, asParams[oRef.iNewParam], self.ksBitsToIntMask[cBits]);
2051 else:
2052 sUnpack = '(%s)((%s >> %s) & %s);' \
2053 % (sType, asParams[oRef.iNewParam], oRef.offNewParam, self.ksBitsToIntMask[cBits]);
2054
2055 sComment = '/* %s - %s ref%s */' % (oRef.sOrgRef, len(aoRefs), 's' if len(aoRefs) != 1 else '',);
2056
2057 aasVars.append([ '%s:%02u' % (oRef.iNewParam, oRef.offNewParam),
2058 sTypeDecl, oRef.sNewName, sUnpack, sComment ]);
2059 acchVars = [0, 0, 0, 0, 0];
2060 for asVar in aasVars:
2061 for iCol, sStr in enumerate(asVar):
2062 acchVars[iCol] = max(acchVars[iCol], len(sStr));
2063 sFmt = ' %%-%ss %%-%ss = %%-%ss %%s\n' % (acchVars[1], acchVars[2], acchVars[3]);
2064 for asVar in sorted(aasVars):
2065 oOut.write(sFmt % (asVar[1], asVar[2], asVar[3], asVar[4],));
2066 return True;
2067
2068 kasThreadedParamNames = ('uParam0', 'uParam1', 'uParam2');
2069 def generateThreadedFunctionsSource(self, oOut):
2070 """
2071 Generates the threaded functions source file.
2072 Returns success indicator.
2073 """
2074
2075 asLines = self.generateLicenseHeader();
2076 oOut.write('\n'.join(asLines));
2077
2078 #
2079 # Emit the function definitions.
2080 #
2081 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2082 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
2083 oOut.write( '\n'
2084 + '\n'
2085 + '\n'
2086 + '\n'
2087 + '/*' + '*' * 128 + '\n'
2088 + '* Variation: ' + sVarName + ' ' * (129 - len(sVarName) - 15) + '*\n'
2089 + '*' * 128 + '*/\n');
2090
2091 for oThreadedFunction in self.aoThreadedFuncs:
2092 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2093 if oVariation:
2094 oMcBlock = oThreadedFunction.oMcBlock;
2095
2096 # Function header
2097 oOut.write( '\n'
2098 + '\n'
2099 + '/**\n'
2100 + ' * #%u: %s at line %s offset %s in %s%s\n'
2101 % (oVariation.iEnumValue, oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine,
2102 os.path.split(oMcBlock.sSrcFile)[1],
2103 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '')
2104 + ' */\n'
2105 + 'static IEM_DECL_IEMTHREADEDFUNC_DEF(' + oVariation.getThreadedFunctionName() + ')\n'
2106 + '{\n');
2107
2108 # Unpack parameters.
2109 self.generateFunctionParameterUnpacking(oVariation, oOut, self.kasThreadedParamNames);
2110
2111 # RT_NOREF for unused parameters.
2112 if oVariation.cMinParams < g_kcThreadedParams:
2113 oOut.write(' RT_NOREF(' + ', '.join(self.kasThreadedParamNames[oVariation.cMinParams:]) + ');\n');
2114
2115 # Now for the actual statements.
2116 oOut.write(iai.McStmt.renderCodeForList(oVariation.aoStmtsForThreadedFunction, cchIndent = 4));
2117
2118 oOut.write('}\n');
2119
2120
2121 #
2122 # Generate the output tables in parallel.
2123 #
2124 asFuncTable = [
2125 '/**',
2126 ' * Function pointer table.',
2127 ' */',
2128 'PFNIEMTHREADEDFUNC const g_apfnIemThreadedFunctions[kIemThreadedFunc_End] =',
2129 '{',
2130 ' /*Invalid*/ NULL,',
2131 ];
2132 asNameTable = [
2133 '/**',
2134 ' * Function name table.',
2135 ' */',
2136 'const char * const g_apszIemThreadedFunctions[kIemThreadedFunc_End] =',
2137 '{',
2138 ' "Invalid",',
2139 ];
2140 asArgCntTab = [
2141 '/**',
2142 ' * Argument count table.',
2143 ' */',
2144 'uint8_t const g_acIemThreadedFunctionUsedArgs[kIemThreadedFunc_End] =',
2145 '{',
2146 ' 0, /*Invalid*/',
2147 ];
2148 aasTables = (asFuncTable, asNameTable, asArgCntTab,);
2149
2150 for asTable in aasTables:
2151 asTable.extend((
2152 '',
2153 ' /*',
2154 ' * Predefined.',
2155 ' */',
2156 ));
2157 for sFuncNm, cArgs, _ in self.katBltIns:
2158 asFuncTable.append(' iemThreadedFunc_BltIn_%s,' % (sFuncNm,));
2159 asNameTable.append(' "BltIn_%s",' % (sFuncNm,));
2160 asArgCntTab.append(' %d, /*BltIn_%s*/' % (cArgs, sFuncNm,));
2161
2162 iThreadedFunction = 1 + len(self.katBltIns);
2163 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2164 for asTable in aasTables:
2165 asTable.extend((
2166 '',
2167 ' /*',
2168 ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation],
2169 ' */',
2170 ));
2171 for oThreadedFunction in self.aoThreadedFuncs:
2172 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2173 if oVariation:
2174 iThreadedFunction += 1;
2175 assert oVariation.iEnumValue == iThreadedFunction;
2176 sName = oVariation.getThreadedFunctionName();
2177 asFuncTable.append(' /*%4u*/ %s,' % (iThreadedFunction, sName,));
2178 asNameTable.append(' /*%4u*/ "%s",' % (iThreadedFunction, sName,));
2179 asArgCntTab.append(' /*%4u*/ %d, /*%s*/' % (iThreadedFunction, oVariation.cMinParams, sName,));
2180
2181 for asTable in aasTables:
2182 asTable.append('};');
2183
2184 #
2185 # Output the tables.
2186 #
2187 oOut.write( '\n'
2188 + '\n');
2189 oOut.write('\n'.join(asFuncTable));
2190 oOut.write( '\n'
2191 + '\n'
2192 + '\n'
2193 + '#if defined(IN_RING3) || defined(LOG_ENABLED)\n');
2194 oOut.write('\n'.join(asNameTable));
2195 oOut.write( '\n'
2196 + '#endif /* IN_RING3 || LOG_ENABLED */\n'
2197 + '\n'
2198 + '\n');
2199 oOut.write('\n'.join(asArgCntTab));
2200 oOut.write('\n');
2201
2202 return True;
2203
2204 def generateNativeFunctionsHeader(self, oOut):
2205 """
2206 Generates the native recompiler functions header file.
2207 Returns success indicator.
2208 """
2209 if not self.oOptions.fNativeRecompilerEnabled:
2210 return True;
2211
2212 asLines = self.generateLicenseHeader();
2213
2214 # Prototype the function table.
2215 asLines += [
2216 'extern const PFNIEMNATIVERECOMPFUNC g_apfnIemNativeRecompileFunctions[kIemThreadedFunc_End];',
2217 '',
2218 ];
2219
2220 oOut.write('\n'.join(asLines));
2221 return True;
2222
2223 def generateNativeFunctionsSource(self, oOut):
2224 """
2225 Generates the native recompiler functions source file.
2226 Returns success indicator.
2227 """
2228 if not self.oOptions.fNativeRecompilerEnabled:
2229 return True;
2230
2231 #
2232 # The file header.
2233 #
2234 oOut.write('\n'.join(self.generateLicenseHeader()));
2235
2236 #
2237 # Emit the functions.
2238 #
2239 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2240 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
2241 oOut.write( '\n'
2242 + '\n'
2243 + '\n'
2244 + '\n'
2245 + '/*' + '*' * 128 + '\n'
2246 + '* Variation: ' + sVarName + ' ' * (129 - len(sVarName) - 15) + '*\n'
2247 + '*' * 128 + '*/\n');
2248
2249 for oThreadedFunction in self.aoThreadedFuncs:
2250 oVariation = oThreadedFunction.dVariations.get(sVariation, None) # type: ThreadedFunctionVariation
2251 if oVariation and oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
2252 oMcBlock = oThreadedFunction.oMcBlock;
2253
2254 # Function header
2255 oOut.write( '\n'
2256 + '\n'
2257 + '/**\n'
2258 + ' * #%u: %s at line %s offset %s in %s%s\n'
2259 % (oVariation.iEnumValue, oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine,
2260 os.path.split(oMcBlock.sSrcFile)[1],
2261 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '')
2262 + ' */\n'
2263 + 'static IEM_DECL_IEMNATIVERECOMPFUNC_DEF(' + oVariation.getNativeFunctionName() + ')\n'
2264 + '{\n');
2265
2266 # Unpack parameters.
2267 self.generateFunctionParameterUnpacking(oVariation, oOut,
2268 ('pCallEntry->auParams[0]',
2269 'pCallEntry->auParams[1]',
2270 'pCallEntry->auParams[2]',));
2271
2272 # Now for the actual statements.
2273 oOut.write(oVariation.oNativeRecomp.renderCode(cchIndent = 4));
2274
2275 oOut.write('}\n');
2276
2277 #
2278 # Output the function table.
2279 #
2280 oOut.write( '\n'
2281 + '\n'
2282 + '/*\n'
2283 + ' * Function table running parallel to g_apfnIemThreadedFunctions and friends.\n'
2284 + ' */\n'
2285 + 'const PFNIEMNATIVERECOMPFUNC g_apfnIemNativeRecompileFunctions[kIemThreadedFunc_End] =\n'
2286 + '{\n'
2287 + ' /*Invalid*/ NULL,'
2288 + '\n'
2289 + ' /*\n'
2290 + ' * Predefined.\n'
2291 + ' */\n'
2292 );
2293 for sFuncNm, _, fHaveRecompFunc in self.katBltIns:
2294 if fHaveRecompFunc:
2295 oOut.write(' iemNativeRecompFunc_BltIn_%s,\n' % (sFuncNm,))
2296 else:
2297 oOut.write(' NULL, /*BltIn_%s*/\n' % (sFuncNm,))
2298
2299 iThreadedFunction = 1 + len(self.katBltIns);
2300 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2301 oOut.write( ' /*\n'
2302 + ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation] + '\n'
2303 + ' */\n');
2304 for oThreadedFunction in self.aoThreadedFuncs:
2305 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2306 if oVariation:
2307 iThreadedFunction += 1;
2308 assert oVariation.iEnumValue == iThreadedFunction;
2309 sName = oVariation.getNativeFunctionName();
2310 if oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
2311 oOut.write(' /*%4u*/ %s,\n' % (iThreadedFunction, sName,));
2312 else:
2313 oOut.write(' /*%4u*/ NULL /*%s*/,\n' % (iThreadedFunction, sName,));
2314
2315 oOut.write( '};\n'
2316 + '\n');
2317 return True;
2318
2319
2320 def getThreadedFunctionByIndex(self, idx):
2321 """
2322 Returns a ThreadedFunction object for the given index. If the index is
2323 out of bounds, a dummy is returned.
2324 """
2325 if idx < len(self.aoThreadedFuncs):
2326 return self.aoThreadedFuncs[idx];
2327 return ThreadedFunction.dummyInstance();
2328
2329 def generateModifiedInput(self, oOut, idxFile):
2330 """
2331 Generates the combined modified input source/header file.
2332 Returns success indicator.
2333 """
2334 #
2335 # File header and assert assumptions.
2336 #
2337 oOut.write('\n'.join(self.generateLicenseHeader()));
2338 oOut.write('AssertCompile((IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | IEM_F_MODE_CPUMODE_MASK) == 7);\n');
2339
2340 #
2341 # Iterate all parsers (input files) and output the ones related to the
2342 # file set given by idxFile.
2343 #
2344 for idxParser, oParser in enumerate(self.aoParsers): # type: int, IEMAllInstPython.SimpleParser
2345 # Is this included in the file set?
2346 sSrcBaseFile = os.path.basename(oParser.sSrcFile).lower();
2347 fInclude = -1;
2348 for aoInfo in iai.g_aaoAllInstrFilesAndDefaultMapAndSet:
2349 if sSrcBaseFile == aoInfo[0].lower():
2350 fInclude = aoInfo[2] in (-1, idxFile);
2351 break;
2352 if fInclude is not True:
2353 assert fInclude is False;
2354 continue;
2355
2356 # Output it.
2357 oOut.write("\n\n/* ****** BEGIN %s ******* */\n" % (oParser.sSrcFile,));
2358
2359 iThreadedFunction = self.aidxFirstFunctions[idxParser];
2360 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
2361 iLine = 0;
2362 while iLine < len(oParser.asLines):
2363 sLine = oParser.asLines[iLine];
2364 iLine += 1; # iBeginLine and iEndLine are 1-based.
2365
2366 # Can we pass it thru?
2367 if ( iLine not in [oThreadedFunction.oMcBlock.iBeginLine, oThreadedFunction.oMcBlock.iEndLine]
2368 or oThreadedFunction.oMcBlock.sSrcFile != oParser.sSrcFile):
2369 oOut.write(sLine);
2370 #
2371 # Single MC block. Just extract it and insert the replacement.
2372 #
2373 elif oThreadedFunction.oMcBlock.iBeginLine != oThreadedFunction.oMcBlock.iEndLine:
2374 assert ( (sLine.count('IEM_MC_') - sLine.count('IEM_MC_F_') == 1)
2375 or oThreadedFunction.oMcBlock.iMacroExp == iai.McBlock.kiMacroExp_Partial), 'sLine="%s"' % (sLine,);
2376 oOut.write(sLine[:oThreadedFunction.oMcBlock.offBeginLine]);
2377 sModified = oThreadedFunction.generateInputCode().strip();
2378 oOut.write(sModified);
2379
2380 iLine = oThreadedFunction.oMcBlock.iEndLine;
2381 sLine = oParser.asLines[iLine - 1];
2382 assert ( sLine.count('IEM_MC_') - sLine.count('IEM_MC_F_') == 1
2383 or len(oThreadedFunction.oMcBlock.aoStmts) == 1
2384 or oThreadedFunction.oMcBlock.iMacroExp == iai.McBlock.kiMacroExp_Partial);
2385 oOut.write(sLine[oThreadedFunction.oMcBlock.offAfterEnd : ]);
2386
2387 # Advance
2388 iThreadedFunction += 1;
2389 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
2390 #
2391 # Macro expansion line that have sublines and may contain multiple MC blocks.
2392 #
2393 else:
2394 offLine = 0;
2395 while iLine == oThreadedFunction.oMcBlock.iBeginLine:
2396 oOut.write(sLine[offLine : oThreadedFunction.oMcBlock.offBeginLine]);
2397
2398 sModified = oThreadedFunction.generateInputCode().strip();
2399 assert ( sModified.startswith('IEM_MC_BEGIN')
2400 or (sModified.find('IEM_MC_DEFER_TO_CIMPL_') > 0 and sModified.strip().startswith('{\n'))
2401 or sModified.startswith('pVCpu->iem.s.fEndTb = true')
2402 or sModified.startswith('pVCpu->iem.s.fTbCurInstr = ')
2403 ), 'sModified="%s"' % (sModified,);
2404 oOut.write(sModified);
2405
2406 offLine = oThreadedFunction.oMcBlock.offAfterEnd;
2407
2408 # Advance
2409 iThreadedFunction += 1;
2410 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
2411
2412 # Last line segment.
2413 if offLine < len(sLine):
2414 oOut.write(sLine[offLine : ]);
2415
2416 oOut.write("/* ****** END %s ******* */\n" % (oParser.sSrcFile,));
2417
2418 return True;
2419
2420 def generateModifiedInput1(self, oOut):
2421 """
2422 Generates the combined modified input source/header file, part 1.
2423 Returns success indicator.
2424 """
2425 return self.generateModifiedInput(oOut, 1);
2426
2427 def generateModifiedInput2(self, oOut):
2428 """
2429 Generates the combined modified input source/header file, part 2.
2430 Returns success indicator.
2431 """
2432 return self.generateModifiedInput(oOut, 2);
2433
2434 def generateModifiedInput3(self, oOut):
2435 """
2436 Generates the combined modified input source/header file, part 3.
2437 Returns success indicator.
2438 """
2439 return self.generateModifiedInput(oOut, 3);
2440
2441 def generateModifiedInput4(self, oOut):
2442 """
2443 Generates the combined modified input source/header file, part 4.
2444 Returns success indicator.
2445 """
2446 return self.generateModifiedInput(oOut, 4);
2447
2448
2449 #
2450 # Main
2451 #
2452
2453 def main(self, asArgs):
2454 """
2455 C-like main function.
2456 Returns exit code.
2457 """
2458
2459 #
2460 # Parse arguments
2461 #
2462 sScriptDir = os.path.dirname(__file__);
2463 oParser = argparse.ArgumentParser(add_help = False);
2464 oParser.add_argument('asInFiles',
2465 metavar = 'input.cpp.h',
2466 nargs = '*',
2467 default = [os.path.join(sScriptDir, aoInfo[0])
2468 for aoInfo in iai.g_aaoAllInstrFilesAndDefaultMapAndSet],
2469 help = "Selection of VMMAll/IEMAllInst*.cpp.h files to use as input.");
2470 oParser.add_argument('--host-arch',
2471 metavar = 'arch',
2472 dest = 'sHostArch',
2473 action = 'store',
2474 default = None,
2475 help = 'The host architecture.');
2476
2477 oParser.add_argument('--out-thrd-funcs-hdr',
2478 metavar = 'file-thrd-funcs.h',
2479 dest = 'sOutFileThrdFuncsHdr',
2480 action = 'store',
2481 default = '-',
2482 help = 'The output header file for the threaded functions.');
2483 oParser.add_argument('--out-thrd-funcs-cpp',
2484 metavar = 'file-thrd-funcs.cpp',
2485 dest = 'sOutFileThrdFuncsCpp',
2486 action = 'store',
2487 default = '-',
2488 help = 'The output C++ file for the threaded functions.');
2489 oParser.add_argument('--out-n8ve-funcs-hdr',
2490 metavar = 'file-n8tv-funcs.h',
2491 dest = 'sOutFileN8veFuncsHdr',
2492 action = 'store',
2493 default = '-',
2494 help = 'The output header file for the native recompiler functions.');
2495 oParser.add_argument('--out-n8ve-funcs-cpp',
2496 metavar = 'file-n8tv-funcs.cpp',
2497 dest = 'sOutFileN8veFuncsCpp',
2498 action = 'store',
2499 default = '-',
2500 help = 'The output C++ file for the native recompiler functions.');
2501 oParser.add_argument('--native',
2502 dest = 'fNativeRecompilerEnabled',
2503 action = 'store_true',
2504 default = False,
2505 help = 'Enables generating the files related to native recompilation.');
2506 oParser.add_argument('--out-mod-input1',
2507 metavar = 'file-instr.cpp.h',
2508 dest = 'sOutFileModInput1',
2509 action = 'store',
2510 default = '-',
2511 help = 'The output C++/header file for modified input instruction files part 1.');
2512 oParser.add_argument('--out-mod-input2',
2513 metavar = 'file-instr.cpp.h',
2514 dest = 'sOutFileModInput2',
2515 action = 'store',
2516 default = '-',
2517 help = 'The output C++/header file for modified input instruction files part 2.');
2518 oParser.add_argument('--out-mod-input3',
2519 metavar = 'file-instr.cpp.h',
2520 dest = 'sOutFileModInput3',
2521 action = 'store',
2522 default = '-',
2523 help = 'The output C++/header file for modified input instruction files part 3.');
2524 oParser.add_argument('--out-mod-input4',
2525 metavar = 'file-instr.cpp.h',
2526 dest = 'sOutFileModInput4',
2527 action = 'store',
2528 default = '-',
2529 help = 'The output C++/header file for modified input instruction files part 4.');
2530 oParser.add_argument('--help', '-h', '-?',
2531 action = 'help',
2532 help = 'Display help and exit.');
2533 oParser.add_argument('--version', '-V',
2534 action = 'version',
2535 version = 'r%s (IEMAllThreadedPython.py), r%s (IEMAllInstPython.py)'
2536 % (__version__.split()[1], iai.__version__.split()[1],),
2537 help = 'Displays the version/revision of the script and exit.');
2538 self.oOptions = oParser.parse_args(asArgs[1:]);
2539 print("oOptions=%s" % (self.oOptions,), file = sys.stderr);
2540
2541 #
2542 # Process the instructions specified in the IEM sources.
2543 #
2544 if self.processInputFiles(self.oOptions.sHostArch, self.oOptions.fNativeRecompilerEnabled):
2545 #
2546 # Generate the output files.
2547 #
2548 aaoOutputFiles = (
2549 ( self.oOptions.sOutFileThrdFuncsHdr, self.generateThreadedFunctionsHeader ),
2550 ( self.oOptions.sOutFileThrdFuncsCpp, self.generateThreadedFunctionsSource ),
2551 ( self.oOptions.sOutFileN8veFuncsHdr, self.generateNativeFunctionsHeader ),
2552 ( self.oOptions.sOutFileN8veFuncsCpp, self.generateNativeFunctionsSource ),
2553 ( self.oOptions.sOutFileModInput1, self.generateModifiedInput1 ),
2554 ( self.oOptions.sOutFileModInput2, self.generateModifiedInput2 ),
2555 ( self.oOptions.sOutFileModInput3, self.generateModifiedInput3 ),
2556 ( self.oOptions.sOutFileModInput4, self.generateModifiedInput4 ),
2557 );
2558 fRc = True;
2559 for sOutFile, fnGenMethod in aaoOutputFiles:
2560 if sOutFile == '-':
2561 fRc = fnGenMethod(sys.stdout) and fRc;
2562 else:
2563 try:
2564 oOut = open(sOutFile, 'w'); # pylint: disable=consider-using-with,unspecified-encoding
2565 except Exception as oXcpt:
2566 print('error! Failed open "%s" for writing: %s' % (sOutFile, oXcpt,), file = sys.stderr);
2567 return 1;
2568 fRc = fnGenMethod(oOut) and fRc;
2569 oOut.close();
2570 if fRc:
2571 return 0;
2572
2573 return 1;
2574
2575
2576if __name__ == '__main__':
2577 sys.exit(IEMThreadedGenerator().main(sys.argv));
2578
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette