VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllThrdPython.py@ 101704

Last change on this file since 101704 was 101704, checked in by vboxsync, 18 months ago

VMM/IEM: Native translation for IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_PC16_WITH_FLAGS, IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_PC32_WITH_FLAGS and IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_PC64_WITH_FLAGS. bugref:10371

  • Property svn:eol-style set to LF
  • Property svn:executable set to *
  • Property svn:keywords set to Author Date Id Revision
File size: 128.4 KB
Line 
1#!/usr/bin/env python
2# -*- coding: utf-8 -*-
3# $Id: IEMAllThrdPython.py 101704 2023-11-01 23:47:07Z vboxsync $
4# pylint: disable=invalid-name
5
6"""
7Annotates and generates threaded functions from IEMAllInst*.cpp.h.
8"""
9
10from __future__ import print_function;
11
12__copyright__ = \
13"""
14Copyright (C) 2023 Oracle and/or its affiliates.
15
16This file is part of VirtualBox base platform packages, as
17available from https://www.virtualbox.org.
18
19This program is free software; you can redistribute it and/or
20modify it under the terms of the GNU General Public License
21as published by the Free Software Foundation, in version 3 of the
22License.
23
24This program is distributed in the hope that it will be useful, but
25WITHOUT ANY WARRANTY; without even the implied warranty of
26MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
27General Public License for more details.
28
29You should have received a copy of the GNU General Public License
30along with this program; if not, see <https://www.gnu.org/licenses>.
31
32SPDX-License-Identifier: GPL-3.0-only
33"""
34__version__ = "$Revision: 101704 $"
35
36# Standard python imports.
37import copy;
38import datetime;
39import os;
40import re;
41import sys;
42import argparse;
43from typing import Dict, List;
44
45import IEMAllInstPython as iai;
46import IEMAllN8vePython as ian;
47
48
49# Python 3 hacks:
50if sys.version_info[0] >= 3:
51 long = int; # pylint: disable=redefined-builtin,invalid-name
52
53## Number of generic parameters for the thread functions.
54g_kcThreadedParams = 3;
55
56g_kdTypeInfo = {
57 # type name: (cBits, fSigned, C-type )
58 'int8_t': ( 8, True, 'int8_t', ),
59 'int16_t': ( 16, True, 'int16_t', ),
60 'int32_t': ( 32, True, 'int32_t', ),
61 'int64_t': ( 64, True, 'int64_t', ),
62 'uint4_t': ( 4, False, 'uint8_t', ),
63 'uint8_t': ( 8, False, 'uint8_t', ),
64 'uint16_t': ( 16, False, 'uint16_t', ),
65 'uint32_t': ( 32, False, 'uint32_t', ),
66 'uint64_t': ( 64, False, 'uint64_t', ),
67 'uintptr_t': ( 64, False, 'uintptr_t',), # ASSUMES 64-bit host pointer size.
68 'bool': ( 1, False, 'bool', ),
69 'IEMMODE': ( 2, False, 'IEMMODE', ),
70};
71
72# Only for getTypeBitCount/variables.
73g_kdTypeInfo2 = {
74 'RTFLOAT32U': ( 32, False, 'RTFLOAT32U', ),
75 'RTFLOAT64U': ( 64, False, 'RTFLOAT64U', ),
76 'RTUINT64U': ( 64, False, 'RTUINT64U', ),
77 'RTGCPTR': ( 64, False, 'RTGCPTR', ),
78 'RTPBCD80U': ( 80, False, 'RTPBCD80U', ),
79 'RTFLOAT80U': ( 80, False, 'RTFLOAT80U', ),
80 'IEMFPURESULT': (80+16, False, 'IEMFPURESULT', ),
81 'IEMFPURESULTTWO': (80+16+80,False, 'IEMFPURESULTTWO', ),
82 'RTUINT128U': ( 128, False, 'RTUINT128U', ),
83 'X86XMMREG': ( 128, False, 'X86XMMREG', ),
84 'IEMSSERESULT': ( 128+32, False, 'IEMSSERESULT', ),
85 'IEMMEDIAF2XMMSRC': ( 256, False, 'IEMMEDIAF2XMMSRC',),
86 'RTUINT256U': ( 256, False, 'RTUINT256U', ),
87 'IEMPCMPISTRXSRC': ( 256, False, 'IEMPCMPISTRXSRC', ),
88 'IEMPCMPESTRXSRC': ( 384, False, 'IEMPCMPESTRXSRC', ),
89}; #| g_kdTypeInfo; - requires 3.9
90g_kdTypeInfo2.update(g_kdTypeInfo);
91
92def getTypeBitCount(sType):
93 """
94 Translate a type to size in bits
95 """
96 if sType in g_kdTypeInfo2:
97 return g_kdTypeInfo2[sType][0];
98 if '*' in sType or sType[0] == 'P':
99 return 64;
100 #raise Exception('Unknown type: %s' % (sType,));
101 print('error: Unknown type: %s' % (sType,));
102 return 64;
103
104g_kdIemFieldToType = {
105 # Illegal ones:
106 'offInstrNextByte': ( None, ),
107 'cbInstrBuf': ( None, ),
108 'pbInstrBuf': ( None, ),
109 'uInstrBufPc': ( None, ),
110 'cbInstrBufTotal': ( None, ),
111 'offCurInstrStart': ( None, ),
112 'cbOpcode': ( None, ),
113 'offOpcode': ( None, ),
114 'offModRm': ( None, ),
115 # Okay ones.
116 'fPrefixes': ( 'uint32_t', ),
117 'uRexReg': ( 'uint8_t', ),
118 'uRexB': ( 'uint8_t', ),
119 'uRexIndex': ( 'uint8_t', ),
120 'iEffSeg': ( 'uint8_t', ),
121 'enmEffOpSize': ( 'IEMMODE', ),
122 'enmDefAddrMode': ( 'IEMMODE', ),
123 'enmEffAddrMode': ( 'IEMMODE', ),
124 'enmDefOpSize': ( 'IEMMODE', ),
125 'idxPrefix': ( 'uint8_t', ),
126 'uVex3rdReg': ( 'uint8_t', ),
127 'uVexLength': ( 'uint8_t', ),
128 'fEvexStuff': ( 'uint8_t', ),
129 'uFpuOpcode': ( 'uint16_t', ),
130};
131
132class ThreadedParamRef(object):
133 """
134 A parameter reference for a threaded function.
135 """
136
137 def __init__(self, sOrgRef, sType, oStmt, iParam = None, offParam = 0, sStdRef = None):
138 ## The name / reference in the original code.
139 self.sOrgRef = sOrgRef;
140 ## Normalized name to deal with spaces in macro invocations and such.
141 self.sStdRef = sStdRef if sStdRef else ''.join(sOrgRef.split());
142 ## Indicates that sOrgRef may not match the parameter.
143 self.fCustomRef = sStdRef is not None;
144 ## The type (typically derived).
145 self.sType = sType;
146 ## The statement making the reference.
147 self.oStmt = oStmt;
148 ## The parameter containing the references. None if implicit.
149 self.iParam = iParam;
150 ## The offset in the parameter of the reference.
151 self.offParam = offParam;
152
153 ## The variable name in the threaded function.
154 self.sNewName = 'x';
155 ## The this is packed into.
156 self.iNewParam = 99;
157 ## The bit offset in iNewParam.
158 self.offNewParam = 1024
159
160
161class ThreadedFunctionVariation(object):
162 """ Threaded function variation. """
163
164 ## @name Variations.
165 ## These variations will match translation block selection/distinctions as well.
166 ## @{
167 ksVariation_Default = ''; ##< No variations - only used by IEM_MC_DEFER_TO_CIMPL_X_RET.
168 ksVariation_16 = '_16'; ##< 16-bit mode code (386+).
169 ksVariation_16f = '_16f'; ##< 16-bit mode code (386+), check+clear eflags.
170 ksVariation_16_Addr32 = '_16_Addr32'; ##< 16-bit mode code (386+), address size prefixed to 32-bit addressing.
171 ksVariation_16f_Addr32 = '_16f_Addr32'; ##< 16-bit mode code (386+), address size prefixed to 32-bit addressing, eflags.
172 ksVariation_16_Pre386 = '_16_Pre386'; ##< 16-bit mode code, pre-386 CPU target.
173 ksVariation_16f_Pre386 = '_16f_Pre386'; ##< 16-bit mode code, pre-386 CPU target, check+clear eflags.
174 ksVariation_32 = '_32'; ##< 32-bit mode code (386+).
175 ksVariation_32f = '_32f'; ##< 32-bit mode code (386+), check+clear eflags.
176 ksVariation_32_Flat = '_32_Flat'; ##< 32-bit mode code (386+) with CS, DS, E,S and SS flat and 4GB wide.
177 ksVariation_32f_Flat = '_32f_Flat'; ##< 32-bit mode code (386+) with CS, DS, E,S and SS flat and 4GB wide, eflags.
178 ksVariation_32_Addr16 = '_32_Addr16'; ##< 32-bit mode code (386+), address size prefixed to 16-bit addressing.
179 ksVariation_32f_Addr16 = '_32f_Addr16'; ##< 32-bit mode code (386+), address size prefixed to 16-bit addressing, eflags.
180 ksVariation_64 = '_64'; ##< 64-bit mode code.
181 ksVariation_64f = '_64f'; ##< 64-bit mode code, check+clear eflags.
182 ksVariation_64_FsGs = '_64_FsGs'; ##< 64-bit mode code, with memory accesses via FS or GS.
183 ksVariation_64f_FsGs = '_64f_FsGs'; ##< 64-bit mode code, with memory accesses via FS or GS, check+clear eflags.
184 ksVariation_64_Addr32 = '_64_Addr32'; ##< 64-bit mode code, address size prefixed to 32-bit addressing.
185 ksVariation_64f_Addr32 = '_64f_Addr32'; ##< 64-bit mode code, address size prefixed to 32-bit addressing, c+c eflags.
186 kasVariations = (
187 ksVariation_Default,
188 ksVariation_16,
189 ksVariation_16f,
190 ksVariation_16_Addr32,
191 ksVariation_16f_Addr32,
192 ksVariation_16_Pre386,
193 ksVariation_16f_Pre386,
194 ksVariation_32,
195 ksVariation_32f,
196 ksVariation_32_Flat,
197 ksVariation_32f_Flat,
198 ksVariation_32_Addr16,
199 ksVariation_32f_Addr16,
200 ksVariation_64,
201 ksVariation_64f,
202 ksVariation_64_FsGs,
203 ksVariation_64f_FsGs,
204 ksVariation_64_Addr32,
205 ksVariation_64f_Addr32,
206 );
207 kasVariationsWithoutAddress = (
208 ksVariation_16,
209 ksVariation_16f,
210 ksVariation_16_Pre386,
211 ksVariation_16f_Pre386,
212 ksVariation_32,
213 ksVariation_32f,
214 ksVariation_64,
215 ksVariation_64f,
216 );
217 kasVariationsWithoutAddressNot286 = (
218 ksVariation_16,
219 ksVariation_16f,
220 ksVariation_32,
221 ksVariation_32f,
222 ksVariation_64,
223 ksVariation_64f,
224 );
225 kasVariationsWithoutAddressNot286Not64 = (
226 ksVariation_16,
227 ksVariation_16f,
228 ksVariation_32,
229 ksVariation_32f,
230 );
231 kasVariationsWithoutAddressNot64 = (
232 ksVariation_16,
233 ksVariation_16f,
234 ksVariation_16_Pre386,
235 ksVariation_16f_Pre386,
236 ksVariation_32,
237 ksVariation_32f,
238 );
239 kasVariationsWithoutAddressOnly64 = (
240 ksVariation_64,
241 ksVariation_64f,
242 );
243 kasVariationsWithAddress = (
244 ksVariation_16,
245 ksVariation_16f,
246 ksVariation_16_Addr32,
247 ksVariation_16f_Addr32,
248 ksVariation_16_Pre386,
249 ksVariation_16f_Pre386,
250 ksVariation_32,
251 ksVariation_32f,
252 ksVariation_32_Flat,
253 ksVariation_32f_Flat,
254 ksVariation_32_Addr16,
255 ksVariation_32f_Addr16,
256 ksVariation_64,
257 ksVariation_64f,
258 ksVariation_64_FsGs,
259 ksVariation_64f_FsGs,
260 ksVariation_64_Addr32,
261 ksVariation_64f_Addr32,
262 );
263 kasVariationsWithAddressNot286 = (
264 ksVariation_16,
265 ksVariation_16f,
266 ksVariation_16_Addr32,
267 ksVariation_16f_Addr32,
268 ksVariation_32,
269 ksVariation_32f,
270 ksVariation_32_Flat,
271 ksVariation_32f_Flat,
272 ksVariation_32_Addr16,
273 ksVariation_32f_Addr16,
274 ksVariation_64,
275 ksVariation_64f,
276 ksVariation_64_FsGs,
277 ksVariation_64f_FsGs,
278 ksVariation_64_Addr32,
279 ksVariation_64f_Addr32,
280 );
281 kasVariationsWithAddressNot286Not64 = (
282 ksVariation_16,
283 ksVariation_16f,
284 ksVariation_16_Addr32,
285 ksVariation_16f_Addr32,
286 ksVariation_32,
287 ksVariation_32f,
288 ksVariation_32_Flat,
289 ksVariation_32f_Flat,
290 ksVariation_32_Addr16,
291 ksVariation_32f_Addr16,
292 );
293 kasVariationsWithAddressNot64 = (
294 ksVariation_16,
295 ksVariation_16f,
296 ksVariation_16_Addr32,
297 ksVariation_16f_Addr32,
298 ksVariation_16_Pre386,
299 ksVariation_16f_Pre386,
300 ksVariation_32,
301 ksVariation_32f,
302 ksVariation_32_Flat,
303 ksVariation_32f_Flat,
304 ksVariation_32_Addr16,
305 ksVariation_32f_Addr16,
306 );
307 kasVariationsWithAddressOnly64 = (
308 ksVariation_64,
309 ksVariation_64f,
310 ksVariation_64_FsGs,
311 ksVariation_64f_FsGs,
312 ksVariation_64_Addr32,
313 ksVariation_64f_Addr32,
314 );
315 kasVariationsOnlyPre386 = (
316 ksVariation_16_Pre386,
317 ksVariation_16f_Pre386,
318 );
319 kasVariationsEmitOrder = (
320 ksVariation_Default,
321 ksVariation_64,
322 ksVariation_64f,
323 ksVariation_64_FsGs,
324 ksVariation_64f_FsGs,
325 ksVariation_32_Flat,
326 ksVariation_32f_Flat,
327 ksVariation_32,
328 ksVariation_32f,
329 ksVariation_16,
330 ksVariation_16f,
331 ksVariation_16_Addr32,
332 ksVariation_16f_Addr32,
333 ksVariation_16_Pre386,
334 ksVariation_16f_Pre386,
335 ksVariation_32_Addr16,
336 ksVariation_32f_Addr16,
337 ksVariation_64_Addr32,
338 ksVariation_64f_Addr32,
339 );
340 kdVariationNames = {
341 ksVariation_Default: 'defer-to-cimpl',
342 ksVariation_16: '16-bit',
343 ksVariation_16f: '16-bit w/ eflag checking and clearing',
344 ksVariation_16_Addr32: '16-bit w/ address prefix (Addr32)',
345 ksVariation_16f_Addr32: '16-bit w/ address prefix (Addr32) and eflag checking and clearing',
346 ksVariation_16_Pre386: '16-bit on pre-386 CPU',
347 ksVariation_16f_Pre386: '16-bit on pre-386 CPU w/ eflag checking and clearing',
348 ksVariation_32: '32-bit',
349 ksVariation_32f: '32-bit w/ eflag checking and clearing',
350 ksVariation_32_Flat: '32-bit flat and wide open CS, SS, DS and ES',
351 ksVariation_32f_Flat: '32-bit flat and wide open CS, SS, DS and ES w/ eflag checking and clearing',
352 ksVariation_32_Addr16: '32-bit w/ address prefix (Addr16)',
353 ksVariation_32f_Addr16: '32-bit w/ address prefix (Addr16) and eflag checking and clearing',
354 ksVariation_64: '64-bit',
355 ksVariation_64f: '64-bit w/ eflag checking and clearing',
356 ksVariation_64_FsGs: '64-bit with memory accessed via FS or GS',
357 ksVariation_64f_FsGs: '64-bit with memory accessed via FS or GS and eflag checking and clearing',
358 ksVariation_64_Addr32: '64-bit w/ address prefix (Addr32)',
359 ksVariation_64f_Addr32: '64-bit w/ address prefix (Addr32) and eflag checking and clearing',
360 };
361 kdVariationsWithEflagsCheckingAndClearing = {
362 ksVariation_16f: True,
363 ksVariation_16f_Addr32: True,
364 ksVariation_16f_Pre386: True,
365 ksVariation_32f: True,
366 ksVariation_32f_Flat: True,
367 ksVariation_32f_Addr16: True,
368 ksVariation_64f: True,
369 ksVariation_64f_FsGs: True,
370 ksVariation_64f_Addr32: True,
371 };
372 kdVariationsWithFlatAddress = {
373 ksVariation_32_Flat: True,
374 ksVariation_32f_Flat: True,
375 ksVariation_64: True,
376 ksVariation_64f: True,
377 };
378 kdVariationsWithFlatAddr16 = {
379 ksVariation_16: True,
380 ksVariation_16f: True,
381 ksVariation_16_Pre386: True,
382 ksVariation_16f_Pre386: True,
383 ksVariation_32_Addr16: True,
384 ksVariation_32f_Addr16: True,
385 };
386 kdVariationsWithFlatAddr32No64 = {
387 ksVariation_16_Addr32: True,
388 ksVariation_16f_Addr32: True,
389 ksVariation_32: True,
390 ksVariation_32f: True,
391 ksVariation_32_Flat: True,
392 ksVariation_32f_Flat: True,
393 };
394 ## @}
395
396 ## IEM_CIMPL_F_XXX flags that we know.
397 ## The value indicates whether it terminates the TB or not. The goal is to
398 ## improve the recompiler so all but END_TB will be False.
399 ##
400 ## @note iemThreadedRecompilerMcDeferToCImpl0 duplicates info found here.
401 kdCImplFlags = {
402 'IEM_CIMPL_F_MODE': False,
403 'IEM_CIMPL_F_BRANCH_DIRECT': False,
404 'IEM_CIMPL_F_BRANCH_INDIRECT': False,
405 'IEM_CIMPL_F_BRANCH_RELATIVE': False,
406 'IEM_CIMPL_F_BRANCH_FAR': True,
407 'IEM_CIMPL_F_BRANCH_CONDITIONAL': False,
408 # IEM_CIMPL_F_BRANCH_ANY should only be used for testing, so not included here.
409 'IEM_CIMPL_F_RFLAGS': False,
410 'IEM_CIMPL_F_INHIBIT_SHADOW': False,
411 'IEM_CIMPL_F_CHECK_IRQ_AFTER': False,
412 'IEM_CIMPL_F_CHECK_IRQ_BEFORE': False,
413 'IEM_CIMPL_F_CHECK_IRQ_BEFORE_AND_AFTER': False, # (ignore)
414 'IEM_CIMPL_F_STATUS_FLAGS': False,
415 'IEM_CIMPL_F_VMEXIT': False,
416 'IEM_CIMPL_F_FPU': False,
417 'IEM_CIMPL_F_REP': False,
418 'IEM_CIMPL_F_IO': False,
419 'IEM_CIMPL_F_END_TB': True,
420 'IEM_CIMPL_F_XCPT': True,
421 };
422
423 def __init__(self, oThreadedFunction, sVariation = ksVariation_Default):
424 self.oParent = oThreadedFunction # type: ThreadedFunction
425 ##< ksVariation_Xxxx.
426 self.sVariation = sVariation
427
428 ## Threaded function parameter references.
429 self.aoParamRefs = [] # type: List[ThreadedParamRef]
430 ## Unique parameter references.
431 self.dParamRefs = {} # type: Dict[str, List[ThreadedParamRef]]
432 ## Minimum number of parameters to the threaded function.
433 self.cMinParams = 0;
434
435 ## List/tree of statements for the threaded function.
436 self.aoStmtsForThreadedFunction = [] # type: List[McStmt]
437
438 ## Function enum number, for verification. Set by generateThreadedFunctionsHeader.
439 self.iEnumValue = -1;
440
441 ## Native recompilation details for this variation.
442 self.oNativeRecomp = None;
443
444 def getIndexName(self):
445 sName = self.oParent.oMcBlock.sFunction;
446 if sName.startswith('iemOp_'):
447 sName = sName[len('iemOp_'):];
448 if self.oParent.oMcBlock.iInFunction == 0:
449 return 'kIemThreadedFunc_%s%s' % ( sName, self.sVariation, );
450 return 'kIemThreadedFunc_%s_%s%s' % ( sName, self.oParent.oMcBlock.iInFunction, self.sVariation, );
451
452 def getThreadedFunctionName(self):
453 sName = self.oParent.oMcBlock.sFunction;
454 if sName.startswith('iemOp_'):
455 sName = sName[len('iemOp_'):];
456 if self.oParent.oMcBlock.iInFunction == 0:
457 return 'iemThreadedFunc_%s%s' % ( sName, self.sVariation, );
458 return 'iemThreadedFunc_%s_%s%s' % ( sName, self.oParent.oMcBlock.iInFunction, self.sVariation, );
459
460 def getNativeFunctionName(self):
461 return 'iemNativeRecompFunc_' + self.getThreadedFunctionName()[len('iemThreadedFunc_'):];
462
463 def getShortName(self):
464 sName = self.oParent.oMcBlock.sFunction;
465 if sName.startswith('iemOp_'):
466 sName = sName[len('iemOp_'):];
467 if self.oParent.oMcBlock.iInFunction == 0:
468 return '%s%s' % ( sName, self.sVariation, );
469 return '%s_%s%s' % ( sName, self.oParent.oMcBlock.iInFunction, self.sVariation, );
470
471 #
472 # Analysis and code morphing.
473 #
474
475 def raiseProblem(self, sMessage):
476 """ Raises a problem. """
477 self.oParent.raiseProblem(sMessage);
478
479 def warning(self, sMessage):
480 """ Emits a warning. """
481 self.oParent.warning(sMessage);
482
483 def analyzeReferenceToType(self, sRef):
484 """
485 Translates a variable or structure reference to a type.
486 Returns type name.
487 Raises exception if unable to figure it out.
488 """
489 ch0 = sRef[0];
490 if ch0 == 'u':
491 if sRef.startswith('u32'):
492 return 'uint32_t';
493 if sRef.startswith('u8') or sRef == 'uReg':
494 return 'uint8_t';
495 if sRef.startswith('u64'):
496 return 'uint64_t';
497 if sRef.startswith('u16'):
498 return 'uint16_t';
499 elif ch0 == 'b':
500 return 'uint8_t';
501 elif ch0 == 'f':
502 return 'bool';
503 elif ch0 == 'i':
504 if sRef.startswith('i8'):
505 return 'int8_t';
506 if sRef.startswith('i16'):
507 return 'int16_t';
508 if sRef.startswith('i32'):
509 return 'int32_t';
510 if sRef.startswith('i64'):
511 return 'int64_t';
512 if sRef in ('iReg', 'iFixedReg', 'iGReg', 'iSegReg', 'iSrcReg', 'iDstReg', 'iCrReg'):
513 return 'uint8_t';
514 elif ch0 == 'p':
515 if sRef.find('-') < 0:
516 return 'uintptr_t';
517 if sRef.startswith('pVCpu->iem.s.'):
518 sField = sRef[len('pVCpu->iem.s.') : ];
519 if sField in g_kdIemFieldToType:
520 if g_kdIemFieldToType[sField][0]:
521 return g_kdIemFieldToType[sField][0];
522 elif ch0 == 'G' and sRef.startswith('GCPtr'):
523 return 'uint64_t';
524 elif ch0 == 'e':
525 if sRef == 'enmEffOpSize':
526 return 'IEMMODE';
527 elif ch0 == 'o':
528 if sRef.startswith('off32'):
529 return 'uint32_t';
530 elif sRef == 'cbFrame': # enter
531 return 'uint16_t';
532 elif sRef == 'cShift': ## @todo risky
533 return 'uint8_t';
534
535 self.raiseProblem('Unknown reference: %s' % (sRef,));
536 return None; # Shut up pylint 2.16.2.
537
538 def analyzeCallToType(self, sFnRef):
539 """
540 Determins the type of an indirect function call.
541 """
542 assert sFnRef[0] == 'p';
543
544 #
545 # Simple?
546 #
547 if sFnRef.find('-') < 0:
548 oDecoderFunction = self.oParent.oMcBlock.oFunction;
549
550 # Try the argument list of the function defintion macro invocation first.
551 iArg = 2;
552 while iArg < len(oDecoderFunction.asDefArgs):
553 if sFnRef == oDecoderFunction.asDefArgs[iArg]:
554 return oDecoderFunction.asDefArgs[iArg - 1];
555 iArg += 1;
556
557 # Then check out line that includes the word and looks like a variable declaration.
558 oRe = re.compile(' +(P[A-Z0-9_]+|const +IEMOP[A-Z0-9_]+ *[*]) +(const |) *' + sFnRef + ' *(;|=)');
559 for sLine in oDecoderFunction.asLines:
560 oMatch = oRe.match(sLine);
561 if oMatch:
562 if not oMatch.group(1).startswith('const'):
563 return oMatch.group(1);
564 return 'PC' + oMatch.group(1)[len('const ') : -1].strip();
565
566 #
567 # Deal with the pImpl->pfnXxx:
568 #
569 elif sFnRef.startswith('pImpl->pfn'):
570 sMember = sFnRef[len('pImpl->') : ];
571 sBaseType = self.analyzeCallToType('pImpl');
572 offBits = sMember.rfind('U') + 1;
573 if sBaseType == 'PCIEMOPBINSIZES': return 'PFNIEMAIMPLBINU' + sMember[offBits:];
574 if sBaseType == 'PCIEMOPUNARYSIZES': return 'PFNIEMAIMPLUNARYU' + sMember[offBits:];
575 if sBaseType == 'PCIEMOPSHIFTSIZES': return 'PFNIEMAIMPLSHIFTU' + sMember[offBits:];
576 if sBaseType == 'PCIEMOPSHIFTDBLSIZES': return 'PFNIEMAIMPLSHIFTDBLU' + sMember[offBits:];
577 if sBaseType == 'PCIEMOPMULDIVSIZES': return 'PFNIEMAIMPLMULDIVU' + sMember[offBits:];
578 if sBaseType == 'PCIEMOPMEDIAF3': return 'PFNIEMAIMPLMEDIAF3U' + sMember[offBits:];
579 if sBaseType == 'PCIEMOPMEDIAOPTF3': return 'PFNIEMAIMPLMEDIAOPTF3U' + sMember[offBits:];
580 if sBaseType == 'PCIEMOPMEDIAOPTF2': return 'PFNIEMAIMPLMEDIAOPTF2U' + sMember[offBits:];
581 if sBaseType == 'PCIEMOPMEDIAOPTF3IMM8': return 'PFNIEMAIMPLMEDIAOPTF3U' + sMember[offBits:] + 'IMM8';
582 if sBaseType == 'PCIEMOPBLENDOP': return 'PFNIEMAIMPLAVXBLENDU' + sMember[offBits:];
583
584 self.raiseProblem('Unknown call reference: %s::%s (%s)' % (sBaseType, sMember, sFnRef,));
585
586 self.raiseProblem('Unknown call reference: %s' % (sFnRef,));
587 return None; # Shut up pylint 2.16.2.
588
589 def analyze8BitGRegStmt(self, oStmt):
590 """
591 Gets the 8-bit general purpose register access details of the given statement.
592 ASSUMES the statement is one accessing an 8-bit GREG.
593 """
594 idxReg = 0;
595 if ( oStmt.sName.find('_FETCH_') > 0
596 or oStmt.sName.find('_REF_') > 0
597 or oStmt.sName.find('_TO_LOCAL') > 0):
598 idxReg = 1;
599
600 sRegRef = oStmt.asParams[idxReg];
601 if sRegRef.startswith('IEM_GET_MODRM_RM') or sRegRef.startswith('IEM_GET_MODRM_REG'):
602 asBits = [sBit.strip() for sBit in sRegRef.replace('(', ',').replace(')', '').split(',')];
603 if len(asBits) != 3 or asBits[1] != 'pVCpu' or (asBits[0] != 'IEM_GET_MODRM_RM' and asBits[0] != 'IEM_GET_MODRM_REG'):
604 self.raiseProblem('Unexpected reference: %s (asBits=%s)' % (sRegRef, asBits));
605 sOrgExpr = asBits[0] + '_EX8(pVCpu, ' + asBits[2] + ')';
606 else:
607 sOrgExpr = '((%s) < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX) ? (%s) : (%s) + 12)' % (sRegRef, sRegRef, sRegRef);
608
609 if sRegRef.find('IEM_GET_MODRM_RM') >= 0: sStdRef = 'bRmRm8Ex';
610 elif sRegRef.find('IEM_GET_MODRM_REG') >= 0: sStdRef = 'bRmReg8Ex';
611 elif sRegRef == 'X86_GREG_xAX': sStdRef = 'bGregXAx8Ex';
612 elif sRegRef == 'X86_GREG_xCX': sStdRef = 'bGregXCx8Ex';
613 elif sRegRef == 'X86_GREG_xSP': sStdRef = 'bGregXSp8Ex';
614 elif sRegRef == 'iFixedReg': sStdRef = 'bFixedReg8Ex';
615 else:
616 self.warning('analyze8BitGRegStmt: sRegRef=%s -> bOther8Ex; %s %s; sOrgExpr=%s'
617 % (sRegRef, oStmt.sName, oStmt.asParams, sOrgExpr,));
618 sStdRef = 'bOther8Ex';
619
620 #print('analyze8BitGRegStmt: %s %s; sRegRef=%s\n -> idxReg=%s sOrgExpr=%s sStdRef=%s'
621 # % (oStmt.sName, oStmt.asParams, sRegRef, idxReg, sOrgExpr, sStdRef));
622 return (idxReg, sOrgExpr, sStdRef);
623
624
625 ## Maps memory related MCs to info for FLAT conversion.
626 ## This is used in 64-bit and flat 32-bit variants to skip the unnecessary
627 ## segmentation checking for every memory access. Only applied to access
628 ## via ES, DS and SS. FS, GS and CS gets the full segmentation threatment,
629 ## the latter (CS) is just to keep things simple (we could safely fetch via
630 ## it, but only in 64-bit mode could we safely write via it, IIRC).
631 kdMemMcToFlatInfo = {
632 'IEM_MC_FETCH_MEM_U8': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8' ),
633 'IEM_MC_FETCH_MEM16_U8': ( 1, 'IEM_MC_FETCH_MEM16_FLAT_U8' ),
634 'IEM_MC_FETCH_MEM32_U8': ( 1, 'IEM_MC_FETCH_MEM32_FLAT_U8' ),
635 'IEM_MC_FETCH_MEM_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16' ),
636 'IEM_MC_FETCH_MEM_U16_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_DISP' ),
637 'IEM_MC_FETCH_MEM_I16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I16' ),
638 'IEM_MC_FETCH_MEM_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32' ),
639 'IEM_MC_FETCH_MEM_U32_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_DISP' ),
640 'IEM_MC_FETCH_MEM_I32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I32' ),
641 'IEM_MC_FETCH_MEM_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64' ),
642 'IEM_MC_FETCH_MEM_U64_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64_DISP' ),
643 'IEM_MC_FETCH_MEM_U64_ALIGN_U128': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64_ALIGN_U128' ),
644 'IEM_MC_FETCH_MEM_I64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I64' ),
645 'IEM_MC_FETCH_MEM_R32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R32' ),
646 'IEM_MC_FETCH_MEM_R64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R64' ),
647 'IEM_MC_FETCH_MEM_R80': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R80' ),
648 'IEM_MC_FETCH_MEM_D80': ( 1, 'IEM_MC_FETCH_MEM_FLAT_D80' ),
649 'IEM_MC_FETCH_MEM_U128': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128' ),
650 'IEM_MC_FETCH_MEM_U128_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128_NO_AC' ),
651 'IEM_MC_FETCH_MEM_U128_ALIGN_SSE': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128_ALIGN_SSE' ),
652 'IEM_MC_FETCH_MEM_XMM': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM' ),
653 'IEM_MC_FETCH_MEM_XMM_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM_NO_AC' ),
654 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM_ALIGN_SSE' ),
655 'IEM_MC_FETCH_MEM_XMM_U32': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_U32' ),
656 'IEM_MC_FETCH_MEM_XMM_U64': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_U64' ),
657 'IEM_MC_FETCH_MEM_U256': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256' ),
658 'IEM_MC_FETCH_MEM_U256_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256_NO_AC' ),
659 'IEM_MC_FETCH_MEM_U256_ALIGN_AVX': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256_ALIGN_AVX' ),
660 'IEM_MC_FETCH_MEM_YMM': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM' ),
661 'IEM_MC_FETCH_MEM_YMM_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM_NO_AC' ),
662 'IEM_MC_FETCH_MEM_YMM_ALIGN_AVX': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM_ALIGN_AVX' ),
663 'IEM_MC_FETCH_MEM_U8_ZX_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U16' ),
664 'IEM_MC_FETCH_MEM_U8_ZX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U32' ),
665 'IEM_MC_FETCH_MEM_U8_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U64' ),
666 'IEM_MC_FETCH_MEM_U16_ZX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_ZX_U32' ),
667 'IEM_MC_FETCH_MEM_U16_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_ZX_U64' ),
668 'IEM_MC_FETCH_MEM_U32_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_ZX_U64' ),
669 'IEM_MC_FETCH_MEM_U8_SX_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U16' ),
670 'IEM_MC_FETCH_MEM_U8_SX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U32' ),
671 'IEM_MC_FETCH_MEM_U8_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U64' ),
672 'IEM_MC_FETCH_MEM_U16_SX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_SX_U32' ),
673 'IEM_MC_FETCH_MEM_U16_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_SX_U64' ),
674 'IEM_MC_FETCH_MEM_U32_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_SX_U64' ),
675 'IEM_MC_STORE_MEM_U8': ( 0, 'IEM_MC_STORE_MEM_FLAT_U8' ),
676 'IEM_MC_STORE_MEM_U16': ( 0, 'IEM_MC_STORE_MEM_FLAT_U16' ),
677 'IEM_MC_STORE_MEM_U32': ( 0, 'IEM_MC_STORE_MEM_FLAT_U32' ),
678 'IEM_MC_STORE_MEM_U64': ( 0, 'IEM_MC_STORE_MEM_FLAT_U64' ),
679 'IEM_MC_STORE_MEM_U8_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U8_CONST' ),
680 'IEM_MC_STORE_MEM_U16_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U16_CONST' ),
681 'IEM_MC_STORE_MEM_U32_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U32_CONST' ),
682 'IEM_MC_STORE_MEM_U64_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U64_CONST' ),
683 'IEM_MC_STORE_MEM_U128': ( 0, 'IEM_MC_STORE_MEM_FLAT_U128' ),
684 'IEM_MC_STORE_MEM_U128_ALIGN_SSE': ( 0, 'IEM_MC_STORE_MEM_FLAT_U128_ALIGN_SSE' ),
685 'IEM_MC_STORE_MEM_U256': ( 0, 'IEM_MC_STORE_MEM_FLAT_U256' ),
686 'IEM_MC_STORE_MEM_U256_ALIGN_AVX': ( 0, 'IEM_MC_STORE_MEM_FLAT_U256_ALIGN_AVX' ),
687 'IEM_MC_MEM_MAP': ( 2, 'IEM_MC_MEM_FLAT_MAP' ),
688 'IEM_MC_MEM_MAP_U8_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_RW' ),
689 'IEM_MC_MEM_MAP_U8_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_RO' ),
690 'IEM_MC_MEM_MAP_U8_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_WO' ),
691 'IEM_MC_MEM_MAP_U16_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_RW' ),
692 'IEM_MC_MEM_MAP_U16_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_RO' ),
693 'IEM_MC_MEM_MAP_U16_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_WO' ),
694 'IEM_MC_MEM_MAP_U32_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_RW' ),
695 'IEM_MC_MEM_MAP_U32_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_RO' ),
696 'IEM_MC_MEM_MAP_U32_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_WO' ),
697 'IEM_MC_MEM_MAP_U64_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_RW' ),
698 'IEM_MC_MEM_MAP_U64_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_RO' ),
699 'IEM_MC_MEM_MAP_U64_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_WO' ),
700 'IEM_MC_MEM_MAP_EX': ( 3, 'IEM_MC_MEM_FLAT_MAP_EX' ),
701 };
702
703 kdMemMcToFlatInfoStack = {
704 'IEM_MC_PUSH_U16': ( 'IEM_MC_FLAT32_PUSH_U16', 'IEM_MC_FLAT64_PUSH_U16', ),
705 'IEM_MC_PUSH_U32': ( 'IEM_MC_FLAT32_PUSH_U32', 'IEM_MC_PUSH_U32', ),
706 'IEM_MC_PUSH_U64': ( 'IEM_MC_PUSH_U64', 'IEM_MC_FLAT64_PUSH_U64', ),
707 'IEM_MC_PUSH_U32_SREG': ( 'IEM_MC_FLAT32_PUSH_U32_SREG', 'IEM_MC_PUSH_U32_SREG' ),
708 'IEM_MC_POP_U16': ( 'IEM_MC_FLAT32_POP_U16', 'IEM_MC_FLAT64_POP_U16', ),
709 'IEM_MC_POP_U32': ( 'IEM_MC_FLAT32_POP_U32', 'IEM_MC_POP_U32', ),
710 'IEM_MC_POP_U64': ( 'IEM_MC_POP_U64', 'IEM_MC_FLAT64_POP_U64', ),
711 };
712
713 kdThreadedCalcRmEffAddrMcByVariation = {
714 ksVariation_16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
715 ksVariation_16f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
716 ksVariation_16_Pre386: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
717 ksVariation_16f_Pre386: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
718 ksVariation_32_Addr16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
719 ksVariation_32f_Addr16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
720 ksVariation_16_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
721 ksVariation_16f_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
722 ksVariation_32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
723 ksVariation_32f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
724 ksVariation_32_Flat: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
725 ksVariation_32f_Flat: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
726 ksVariation_64: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64',
727 ksVariation_64f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64',
728 ksVariation_64_FsGs: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_FSGS',
729 ksVariation_64f_FsGs: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_FSGS',
730 ksVariation_64_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_ADDR32', ## @todo How did this work again...
731 ksVariation_64f_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_ADDR32',
732 };
733
734 def analyzeMorphStmtForThreaded(self, aoStmts, iParamRef = 0):
735 """
736 Transforms (copy) the statements into those for the threaded function.
737
738 Returns list/tree of statements (aoStmts is not modified) and the new
739 iParamRef value.
740 """
741 #
742 # We'll be traversing aoParamRefs in parallel to the statements, so we
743 # must match the traversal in analyzeFindThreadedParamRefs exactly.
744 #
745 #print('McBlock at %s:%s' % (os.path.split(self.oMcBlock.sSrcFile)[1], self.oMcBlock.iBeginLine,));
746 aoThreadedStmts = [];
747 for oStmt in aoStmts:
748 # Skip C++ statements that is purely related to decoding.
749 if not oStmt.isCppStmt() or not oStmt.fDecode:
750 # Copy the statement. Make a deep copy to make sure we've got our own
751 # copies of all instance variables, even if a bit overkill at the moment.
752 oNewStmt = copy.deepcopy(oStmt);
753 aoThreadedStmts.append(oNewStmt);
754 #print('oNewStmt %s %s' % (oNewStmt.sName, len(oNewStmt.asParams),));
755
756 # If the statement has parameter references, process the relevant parameters.
757 # We grab the references relevant to this statement and apply them in reserve order.
758 if iParamRef < len(self.aoParamRefs) and self.aoParamRefs[iParamRef].oStmt == oStmt:
759 iParamRefFirst = iParamRef;
760 while True:
761 iParamRef += 1;
762 if iParamRef >= len(self.aoParamRefs) or self.aoParamRefs[iParamRef].oStmt != oStmt:
763 break;
764
765 #print('iParamRefFirst=%s iParamRef=%s' % (iParamRefFirst, iParamRef));
766 for iCurRef in range(iParamRef - 1, iParamRefFirst - 1, -1):
767 oCurRef = self.aoParamRefs[iCurRef];
768 if oCurRef.iParam is not None:
769 assert oCurRef.oStmt == oStmt;
770 #print('iCurRef=%s iParam=%s sOrgRef=%s' % (iCurRef, oCurRef.iParam, oCurRef.sOrgRef));
771 sSrcParam = oNewStmt.asParams[oCurRef.iParam];
772 assert ( sSrcParam[oCurRef.offParam : oCurRef.offParam + len(oCurRef.sOrgRef)] == oCurRef.sOrgRef
773 or oCurRef.fCustomRef), \
774 'offParam=%s sOrgRef=%s iParam=%s oStmt.sName=%s sSrcParam=%s<eos>' \
775 % (oCurRef.offParam, oCurRef.sOrgRef, oCurRef.iParam, oStmt.sName, sSrcParam);
776 oNewStmt.asParams[oCurRef.iParam] = sSrcParam[0 : oCurRef.offParam] \
777 + oCurRef.sNewName \
778 + sSrcParam[oCurRef.offParam + len(oCurRef.sOrgRef) : ];
779
780 # Morph IEM_MC_CALC_RM_EFF_ADDR into IEM_MC_CALC_RM_EFF_ADDR_THREADED ...
781 if oNewStmt.sName == 'IEM_MC_CALC_RM_EFF_ADDR':
782 oNewStmt.sName = self.kdThreadedCalcRmEffAddrMcByVariation[self.sVariation];
783 assert len(oNewStmt.asParams) == 3;
784
785 if self.sVariation in self.kdVariationsWithFlatAddr16:
786 oNewStmt.asParams = [
787 oNewStmt.asParams[0], oNewStmt.asParams[1], self.dParamRefs['u16Disp'][0].sNewName,
788 ];
789 else:
790 sSibAndMore = self.dParamRefs['bSib'][0].sNewName; # Merge bSib and 2nd part of cbImmAndRspOffset.
791 if oStmt.asParams[2] not in ('0', '1', '2', '4'):
792 sSibAndMore = '(%s) | ((%s) & 0x0f00)' % (self.dParamRefs['bSib'][0].sNewName, oStmt.asParams[2]);
793
794 if self.sVariation in self.kdVariationsWithFlatAddr32No64:
795 oNewStmt.asParams = [
796 oNewStmt.asParams[0], oNewStmt.asParams[1], sSibAndMore, self.dParamRefs['u32Disp'][0].sNewName,
797 ];
798 else:
799 oNewStmt.asParams = [
800 oNewStmt.asParams[0], self.dParamRefs['bRmEx'][0].sNewName, sSibAndMore,
801 self.dParamRefs['u32Disp'][0].sNewName, self.dParamRefs['cbInstr'][0].sNewName,
802 ];
803 # ... and IEM_MC_ADVANCE_RIP_AND_FINISH into *_THREADED_PCxx[_WITH_FLAGS] ...
804 elif oNewStmt.sName in ('IEM_MC_ADVANCE_RIP_AND_FINISH', 'IEM_MC_REL_JMP_S8_AND_FINISH',
805 'IEM_MC_REL_JMP_S16_AND_FINISH', 'IEM_MC_REL_JMP_S32_AND_FINISH'):
806 oNewStmt.asParams.append(self.dParamRefs['cbInstr'][0].sNewName);
807 if ( oNewStmt.sName in ('IEM_MC_REL_JMP_S8_AND_FINISH', )
808 and self.sVariation not in (self.ksVariation_16_Pre386, self.ksVariation_16f_Pre386,)):
809 oNewStmt.asParams.append(self.dParamRefs['pVCpu->iem.s.enmEffOpSize'][0].sNewName);
810 oNewStmt.sName += '_THREADED';
811 if self.sVariation in (self.ksVariation_64, self.ksVariation_64_FsGs, self.ksVariation_64_Addr32):
812 oNewStmt.sName += '_PC64';
813 elif self.sVariation in (self.ksVariation_64f, self.ksVariation_64f_FsGs, self.ksVariation_64f_Addr32):
814 oNewStmt.sName += '_PC64_WITH_FLAGS';
815 elif self.sVariation == self.ksVariation_16_Pre386:
816 oNewStmt.sName += '_PC16';
817 elif self.sVariation == self.ksVariation_16f_Pre386:
818 oNewStmt.sName += '_PC16_WITH_FLAGS';
819 elif self.sVariation not in self.kdVariationsWithEflagsCheckingAndClearing:
820 assert self.sVariation != self.ksVariation_Default;
821 oNewStmt.sName += '_PC32';
822 else:
823 oNewStmt.sName += '_PC32_WITH_FLAGS';
824
825 # ... and IEM_MC_*_GREG_U8 into *_THREADED w/ reworked index taking REX into account
826 elif oNewStmt.sName.startswith('IEM_MC_') and oNewStmt.sName.find('_GREG_U8') > 0:
827 (idxReg, _, sStdRef) = self.analyze8BitGRegStmt(oStmt); # Don't use oNewStmt as it has been modified!
828 oNewStmt.asParams[idxReg] = self.dParamRefs[sStdRef][0].sNewName;
829 oNewStmt.sName += '_THREADED';
830
831 # ... and IEM_MC_CALL_CIMPL_[0-5] and IEM_MC_DEFER_TO_CIMPL_[0-5]_RET into *_THREADED ...
832 elif oNewStmt.sName.startswith('IEM_MC_CALL_CIMPL_') or oNewStmt.sName.startswith('IEM_MC_DEFER_TO_CIMPL_'):
833 oNewStmt.sName += '_THREADED';
834 oNewStmt.asParams.insert(0, self.dParamRefs['cbInstr'][0].sNewName);
835
836 # ... and in FLAT modes we must morph memory access into FLAT accesses ...
837 elif ( self.sVariation in self.kdVariationsWithFlatAddress
838 and ( oNewStmt.sName.startswith('IEM_MC_FETCH_MEM')
839 or (oNewStmt.sName.startswith('IEM_MC_STORE_MEM_') and oNewStmt.sName.find('_BY_REF') < 0)
840 or oNewStmt.sName.startswith('IEM_MC_MEM_MAP') )):
841 idxEffSeg = self.kdMemMcToFlatInfo[oNewStmt.sName][0];
842 if idxEffSeg != -1:
843 if ( oNewStmt.asParams[idxEffSeg].find('iEffSeg') < 0
844 and oNewStmt.asParams[idxEffSeg] not in ('X86_SREG_ES', ) ):
845 self.raiseProblem('Expected iEffSeg as param #%d to %s: %s'
846 % (idxEffSeg + 1, oNewStmt.sName, oNewStmt.asParams[idxEffSeg],));
847 oNewStmt.asParams.pop(idxEffSeg);
848 oNewStmt.sName = self.kdMemMcToFlatInfo[oNewStmt.sName][1];
849
850 # ... PUSH and POP also needs flat variants, but these differ a little.
851 elif ( self.sVariation in self.kdVariationsWithFlatAddress
852 and ( (oNewStmt.sName.startswith('IEM_MC_PUSH') and oNewStmt.sName.find('_FPU') < 0)
853 or oNewStmt.sName.startswith('IEM_MC_POP'))):
854 oNewStmt.sName = self.kdMemMcToFlatInfoStack[oNewStmt.sName][int(self.sVariation in (self.ksVariation_64,
855 self.ksVariation_64f,))];
856
857
858 # Process branches of conditionals recursively.
859 if isinstance(oStmt, iai.McStmtCond):
860 (oNewStmt.aoIfBranch, iParamRef) = self.analyzeMorphStmtForThreaded(oStmt.aoIfBranch, iParamRef);
861 if oStmt.aoElseBranch:
862 (oNewStmt.aoElseBranch, iParamRef) = self.analyzeMorphStmtForThreaded(oStmt.aoElseBranch, iParamRef);
863
864 return (aoThreadedStmts, iParamRef);
865
866
867 def analyzeConsolidateThreadedParamRefs(self):
868 """
869 Consolidate threaded function parameter references into a dictionary
870 with lists of the references to each variable/field.
871 """
872 # Gather unique parameters.
873 self.dParamRefs = {};
874 for oRef in self.aoParamRefs:
875 if oRef.sStdRef not in self.dParamRefs:
876 self.dParamRefs[oRef.sStdRef] = [oRef,];
877 else:
878 self.dParamRefs[oRef.sStdRef].append(oRef);
879
880 # Generate names for them for use in the threaded function.
881 dParamNames = {};
882 for sName, aoRefs in self.dParamRefs.items():
883 # Morph the reference expression into a name.
884 if sName.startswith('IEM_GET_MODRM_REG'): sName = 'bModRmRegP';
885 elif sName.startswith('IEM_GET_MODRM_RM'): sName = 'bModRmRmP';
886 elif sName.startswith('IEM_GET_MODRM_REG_8'): sName = 'bModRmReg8P';
887 elif sName.startswith('IEM_GET_MODRM_RM_8'): sName = 'bModRmRm8P';
888 elif sName.startswith('IEM_GET_EFFECTIVE_VVVV'): sName = 'bEffVvvvP';
889 elif sName.find('.') >= 0 or sName.find('->') >= 0:
890 sName = sName[max(sName.rfind('.'), sName.rfind('>')) + 1 : ] + 'P';
891 else:
892 sName += 'P';
893
894 # Ensure it's unique.
895 if sName in dParamNames:
896 for i in range(10):
897 if sName + str(i) not in dParamNames:
898 sName += str(i);
899 break;
900 dParamNames[sName] = True;
901
902 # Update all the references.
903 for oRef in aoRefs:
904 oRef.sNewName = sName;
905
906 # Organize them by size too for the purpose of optimize them.
907 dBySize = {} # type: Dict[str, str]
908 for sStdRef, aoRefs in self.dParamRefs.items():
909 if aoRefs[0].sType[0] != 'P':
910 cBits = g_kdTypeInfo[aoRefs[0].sType][0];
911 assert(cBits <= 64);
912 else:
913 cBits = 64;
914
915 if cBits not in dBySize:
916 dBySize[cBits] = [sStdRef,]
917 else:
918 dBySize[cBits].append(sStdRef);
919
920 # Pack the parameters as best as we can, starting with the largest ones
921 # and ASSUMING a 64-bit parameter size.
922 self.cMinParams = 0;
923 offNewParam = 0;
924 for cBits in sorted(dBySize.keys(), reverse = True):
925 for sStdRef in dBySize[cBits]:
926 if offNewParam == 0 or offNewParam + cBits > 64:
927 self.cMinParams += 1;
928 offNewParam = cBits;
929 else:
930 offNewParam += cBits;
931 assert(offNewParam <= 64);
932
933 for oRef in self.dParamRefs[sStdRef]:
934 oRef.iNewParam = self.cMinParams - 1;
935 oRef.offNewParam = offNewParam - cBits;
936
937 # Currently there are a few that requires 4 parameters, list these so we can figure out why:
938 if self.cMinParams >= 4:
939 print('debug: cMinParams=%s cRawParams=%s - %s:%d'
940 % (self.cMinParams, len(self.dParamRefs), self.oParent.oMcBlock.sSrcFile, self.oParent.oMcBlock.iBeginLine,));
941
942 return True;
943
944 ksHexDigits = '0123456789abcdefABCDEF';
945
946 def analyzeFindThreadedParamRefs(self, aoStmts): # pylint: disable=too-many-statements
947 """
948 Scans the statements for things that have to passed on to the threaded
949 function (populates self.aoParamRefs).
950 """
951 for oStmt in aoStmts:
952 # Some statements we can skip alltogether.
953 if isinstance(oStmt, iai.McCppPreProc):
954 continue;
955 if oStmt.isCppStmt() and oStmt.fDecode:
956 continue;
957 if oStmt.sName in ('IEM_MC_BEGIN',):
958 continue;
959
960 if isinstance(oStmt, iai.McStmtVar):
961 if oStmt.sConstValue is None:
962 continue;
963 aiSkipParams = { 0: True, 1: True, 3: True };
964 else:
965 aiSkipParams = {};
966
967 # Several statements have implicit parameters and some have different parameters.
968 if oStmt.sName in ('IEM_MC_ADVANCE_RIP_AND_FINISH', 'IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_REL_JMP_S16_AND_FINISH',
969 'IEM_MC_REL_JMP_S32_AND_FINISH', 'IEM_MC_CALL_CIMPL_0', 'IEM_MC_CALL_CIMPL_1',
970 'IEM_MC_CALL_CIMPL_2', 'IEM_MC_CALL_CIMPL_3', 'IEM_MC_CALL_CIMPL_4', 'IEM_MC_CALL_CIMPL_5',
971 'IEM_MC_DEFER_TO_CIMPL_0_RET', 'IEM_MC_DEFER_TO_CIMPL_1_RET', 'IEM_MC_DEFER_TO_CIMPL_2_RET',
972 'IEM_MC_DEFER_TO_CIMPL_3_RET', 'IEM_MC_DEFER_TO_CIMPL_4_RET', 'IEM_MC_DEFER_TO_CIMPL_5_RET', ):
973 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_INSTR_LEN(pVCpu)', 'uint4_t', oStmt, sStdRef = 'cbInstr'));
974
975 if ( oStmt.sName in ('IEM_MC_REL_JMP_S8_AND_FINISH',)
976 and self.sVariation not in (self.ksVariation_16_Pre386, self.ksVariation_16f_Pre386,)):
977 self.aoParamRefs.append(ThreadedParamRef('pVCpu->iem.s.enmEffOpSize', 'IEMMODE', oStmt));
978
979 if oStmt.sName == 'IEM_MC_CALC_RM_EFF_ADDR':
980 # This is being pretty presumptive about bRm always being the RM byte...
981 assert len(oStmt.asParams) == 3;
982 assert oStmt.asParams[1] == 'bRm';
983
984 if self.sVariation in self.kdVariationsWithFlatAddr16:
985 self.aoParamRefs.append(ThreadedParamRef('bRm', 'uint8_t', oStmt));
986 self.aoParamRefs.append(ThreadedParamRef('(uint16_t)uEffAddrInfo' ,
987 'uint16_t', oStmt, sStdRef = 'u16Disp'));
988 elif self.sVariation in self.kdVariationsWithFlatAddr32No64:
989 self.aoParamRefs.append(ThreadedParamRef('bRm', 'uint8_t', oStmt));
990 self.aoParamRefs.append(ThreadedParamRef('(uint8_t)(uEffAddrInfo >> 32)',
991 'uint8_t', oStmt, sStdRef = 'bSib'));
992 self.aoParamRefs.append(ThreadedParamRef('(uint32_t)uEffAddrInfo',
993 'uint32_t', oStmt, sStdRef = 'u32Disp'));
994 else:
995 assert self.sVariation in self.kasVariationsWithAddressOnly64;
996 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_MODRM_EX(pVCpu, bRm)',
997 'uint8_t', oStmt, sStdRef = 'bRmEx'));
998 self.aoParamRefs.append(ThreadedParamRef('(uint8_t)(uEffAddrInfo >> 32)',
999 'uint8_t', oStmt, sStdRef = 'bSib'));
1000 self.aoParamRefs.append(ThreadedParamRef('(uint32_t)uEffAddrInfo',
1001 'uint32_t', oStmt, sStdRef = 'u32Disp'));
1002 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_INSTR_LEN(pVCpu)',
1003 'uint4_t', oStmt, sStdRef = 'cbInstr'));
1004 aiSkipParams[1] = True; # Skip the bRm parameter as it is being replaced by bRmEx.
1005
1006 # 8-bit register accesses needs to have their index argument reworked to take REX into account.
1007 if oStmt.sName.startswith('IEM_MC_') and oStmt.sName.find('_GREG_U8') > 0:
1008 (idxReg, sOrgRef, sStdRef) = self.analyze8BitGRegStmt(oStmt);
1009 self.aoParamRefs.append(ThreadedParamRef(sOrgRef, 'uint16_t', oStmt, idxReg, sStdRef = sStdRef));
1010 aiSkipParams[idxReg] = True; # Skip the parameter below.
1011
1012 # If in flat mode variation, ignore the effective segment parameter to memory MCs.
1013 if ( self.sVariation in self.kdVariationsWithFlatAddress
1014 and oStmt.sName in self.kdMemMcToFlatInfo
1015 and self.kdMemMcToFlatInfo[oStmt.sName][0] != -1):
1016 aiSkipParams[self.kdMemMcToFlatInfo[oStmt.sName][0]] = True;
1017
1018 # Inspect the target of calls to see if we need to pass down a
1019 # function pointer or function table pointer for it to work.
1020 if isinstance(oStmt, iai.McStmtCall):
1021 if oStmt.sFn[0] == 'p':
1022 self.aoParamRefs.append(ThreadedParamRef(oStmt.sFn, self.analyzeCallToType(oStmt.sFn), oStmt, oStmt.idxFn));
1023 elif ( oStmt.sFn[0] != 'i'
1024 and not oStmt.sFn.startswith('IEMTARGETCPU_EFL_BEHAVIOR_SELECT')
1025 and not oStmt.sFn.startswith('IEM_SELECT_HOST_OR_FALLBACK') ):
1026 self.raiseProblem('Bogus function name in %s: %s' % (oStmt.sName, oStmt.sFn,));
1027 aiSkipParams[oStmt.idxFn] = True;
1028
1029 # Skip the hint parameter (first) for IEM_MC_CALL_CIMPL_X.
1030 if oStmt.sName.startswith('IEM_MC_CALL_CIMPL_'):
1031 assert oStmt.idxFn == 1;
1032 aiSkipParams[0] = True;
1033
1034
1035 # Check all the parameters for bogus references.
1036 for iParam, sParam in enumerate(oStmt.asParams):
1037 if iParam not in aiSkipParams and sParam not in self.oParent.dVariables:
1038 # The parameter may contain a C expression, so we have to try
1039 # extract the relevant bits, i.e. variables and fields while
1040 # ignoring operators and parentheses.
1041 offParam = 0;
1042 while offParam < len(sParam):
1043 # Is it the start of an C identifier? If so, find the end, but don't stop on field separators (->, .).
1044 ch = sParam[offParam];
1045 if ch.isalpha() or ch == '_':
1046 offStart = offParam;
1047 offParam += 1;
1048 while offParam < len(sParam):
1049 ch = sParam[offParam];
1050 if not ch.isalnum() and ch != '_' and ch != '.':
1051 if ch != '-' or sParam[offParam + 1] != '>':
1052 # Special hack for the 'CTX_SUFF(pVM)' bit in pVCpu->CTX_SUFF(pVM)->xxxx:
1053 if ( ch == '('
1054 and sParam[offStart : offParam + len('(pVM)->')] == 'pVCpu->CTX_SUFF(pVM)->'):
1055 offParam += len('(pVM)->') - 1;
1056 else:
1057 break;
1058 offParam += 1;
1059 offParam += 1;
1060 sRef = sParam[offStart : offParam];
1061
1062 # For register references, we pass the full register indexes instead as macros
1063 # like IEM_GET_MODRM_REG implicitly references pVCpu->iem.s.uRexReg and the
1064 # threaded function will be more efficient if we just pass the register index
1065 # as a 4-bit param.
1066 if ( sRef.startswith('IEM_GET_MODRM')
1067 or sRef.startswith('IEM_GET_EFFECTIVE_VVVV') ):
1068 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1069 if sParam[offParam] != '(':
1070 self.raiseProblem('Expected "(" following %s in "%s"' % (sRef, oStmt.renderCode(),));
1071 (asMacroParams, offCloseParam) = iai.McBlock.extractParams(sParam, offParam);
1072 if asMacroParams is None:
1073 self.raiseProblem('Unable to find ")" for %s in "%s"' % (sRef, oStmt.renderCode(),));
1074 offParam = offCloseParam + 1;
1075 self.aoParamRefs.append(ThreadedParamRef(sParam[offStart : offParam], 'uint8_t',
1076 oStmt, iParam, offStart));
1077
1078 # We can skip known variables.
1079 elif sRef in self.oParent.dVariables:
1080 pass;
1081
1082 # Skip certain macro invocations.
1083 elif sRef in ('IEM_GET_HOST_CPU_FEATURES',
1084 'IEM_GET_GUEST_CPU_FEATURES',
1085 'IEM_IS_GUEST_CPU_AMD',
1086 'IEM_IS_16BIT_CODE',
1087 'IEM_IS_32BIT_CODE',
1088 'IEM_IS_64BIT_CODE',
1089 ):
1090 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1091 if sParam[offParam] != '(':
1092 self.raiseProblem('Expected "(" following %s in "%s"' % (sRef, oStmt.renderCode(),));
1093 (asMacroParams, offCloseParam) = iai.McBlock.extractParams(sParam, offParam);
1094 if asMacroParams is None:
1095 self.raiseProblem('Unable to find ")" for %s in "%s"' % (sRef, oStmt.renderCode(),));
1096 offParam = offCloseParam + 1;
1097
1098 # Skip any dereference following it, unless it's a predicate like IEM_IS_GUEST_CPU_AMD.
1099 if sRef not in ('IEM_IS_GUEST_CPU_AMD',
1100 'IEM_IS_16BIT_CODE',
1101 'IEM_IS_32BIT_CODE',
1102 'IEM_IS_64BIT_CODE',
1103 ):
1104 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1105 if offParam + 2 <= len(sParam) and sParam[offParam : offParam + 2] == '->':
1106 offParam = iai.McBlock.skipSpacesAt(sParam, offParam + 2, len(sParam));
1107 while offParam < len(sParam) and (sParam[offParam].isalnum() or sParam[offParam] in '_.'):
1108 offParam += 1;
1109
1110 # Skip constants, globals, types (casts), sizeof and macros.
1111 elif ( sRef.startswith('IEM_OP_PRF_')
1112 or sRef.startswith('IEM_ACCESS_')
1113 or sRef.startswith('IEMINT_')
1114 or sRef.startswith('X86_GREG_')
1115 or sRef.startswith('X86_SREG_')
1116 or sRef.startswith('X86_EFL_')
1117 or sRef.startswith('X86_FSW_')
1118 or sRef.startswith('X86_FCW_')
1119 or sRef.startswith('X86_XCPT_')
1120 or sRef.startswith('IEMMODE_')
1121 or sRef.startswith('IEM_F_')
1122 or sRef.startswith('IEM_CIMPL_F_')
1123 or sRef.startswith('g_')
1124 or sRef.startswith('iemAImpl_')
1125 or sRef in ( 'int8_t', 'int16_t', 'int32_t',
1126 'INT8_C', 'INT16_C', 'INT32_C', 'INT64_C',
1127 'UINT8_C', 'UINT16_C', 'UINT32_C', 'UINT64_C',
1128 'UINT8_MAX', 'UINT16_MAX', 'UINT32_MAX', 'UINT64_MAX',
1129 'INT8_MAX', 'INT16_MAX', 'INT32_MAX', 'INT64_MAX',
1130 'INT8_MIN', 'INT16_MIN', 'INT32_MIN', 'INT64_MIN',
1131 'sizeof', 'NOREF', 'RT_NOREF', 'IEMMODE_64BIT',
1132 'RT_BIT_32', 'true', 'false', 'NIL_RTGCPTR',) ):
1133 pass;
1134
1135 # Skip certain macro invocations.
1136 # Any variable (non-field) and decoder fields in IEMCPU will need to be parameterized.
1137 elif ( ( '.' not in sRef
1138 and '-' not in sRef
1139 and sRef not in ('pVCpu', ) )
1140 or iai.McBlock.koReIemDecoderVars.search(sRef) is not None):
1141 self.aoParamRefs.append(ThreadedParamRef(sRef, self.analyzeReferenceToType(sRef),
1142 oStmt, iParam, offStart));
1143 # Number.
1144 elif ch.isdigit():
1145 if ( ch == '0'
1146 and offParam + 2 <= len(sParam)
1147 and sParam[offParam + 1] in 'xX'
1148 and sParam[offParam + 2] in self.ksHexDigits ):
1149 offParam += 2;
1150 while offParam < len(sParam) and sParam[offParam] in self.ksHexDigits:
1151 offParam += 1;
1152 else:
1153 while offParam < len(sParam) and sParam[offParam].isdigit():
1154 offParam += 1;
1155 # Comment?
1156 elif ( ch == '/'
1157 and offParam + 4 <= len(sParam)
1158 and sParam[offParam + 1] == '*'):
1159 offParam += 2;
1160 offNext = sParam.find('*/', offParam);
1161 if offNext < offParam:
1162 self.raiseProblem('Unable to find "*/" in "%s" ("%s")' % (sRef, oStmt.renderCode(),));
1163 offParam = offNext + 2;
1164 # Whatever else.
1165 else:
1166 offParam += 1;
1167
1168 # Traverse the branches of conditionals.
1169 if isinstance(oStmt, iai.McStmtCond):
1170 self.analyzeFindThreadedParamRefs(oStmt.aoIfBranch);
1171 self.analyzeFindThreadedParamRefs(oStmt.aoElseBranch);
1172 return True;
1173
1174 def analyzeVariation(self, aoStmts):
1175 """
1176 2nd part of the analysis, done on each variation.
1177
1178 The variations may differ in parameter requirements and will end up with
1179 slightly different MC sequences. Thus this is done on each individually.
1180
1181 Returns dummy True - raises exception on trouble.
1182 """
1183 # Now scan the code for variables and field references that needs to
1184 # be passed to the threaded function because they are related to the
1185 # instruction decoding.
1186 self.analyzeFindThreadedParamRefs(aoStmts);
1187 self.analyzeConsolidateThreadedParamRefs();
1188
1189 # Morph the statement stream for the block into what we'll be using in the threaded function.
1190 (self.aoStmtsForThreadedFunction, iParamRef) = self.analyzeMorphStmtForThreaded(aoStmts);
1191 if iParamRef != len(self.aoParamRefs):
1192 raise Exception('iParamRef=%s, expected %s!' % (iParamRef, len(self.aoParamRefs),));
1193
1194 return True;
1195
1196 def emitThreadedCallStmts(self, cchIndent, sCallVarNm = None):
1197 """
1198 Produces generic C++ statments that emits a call to the thread function
1199 variation and any subsequent checks that may be necessary after that.
1200
1201 The sCallVarNm is for emitting
1202 """
1203 aoStmts = [
1204 iai.McCppCall('IEM_MC2_BEGIN_EMIT_CALLS',
1205 ['1' if 'IEM_CIMPL_F_CHECK_IRQ_BEFORE' in self.oParent.dsCImplFlags else '0'],
1206 cchIndent = cchIndent), # Scope and a hook for various stuff.
1207 ];
1208
1209 # The call to the threaded function.
1210 asCallArgs = [ self.getIndexName() if not sCallVarNm else sCallVarNm, ];
1211 for iParam in range(self.cMinParams):
1212 asFrags = [];
1213 for aoRefs in self.dParamRefs.values():
1214 oRef = aoRefs[0];
1215 if oRef.iNewParam == iParam:
1216 sCast = '(uint64_t)'
1217 if oRef.sType in ('int8_t', 'int16_t', 'int32_t'): # Make sure these doesn't get sign-extended.
1218 sCast = '(uint64_t)(u' + oRef.sType + ')';
1219 if oRef.offNewParam == 0:
1220 asFrags.append(sCast + '(' + oRef.sOrgRef + ')');
1221 else:
1222 asFrags.append('(%s(%s) << %s)' % (sCast, oRef.sOrgRef, oRef.offNewParam));
1223 assert asFrags;
1224 asCallArgs.append(' | '.join(asFrags));
1225
1226 aoStmts.append(iai.McCppCall('IEM_MC2_EMIT_CALL_%s' % (len(asCallArgs) - 1,), asCallArgs, cchIndent = cchIndent));
1227
1228 # For CIMPL stuff, we need to consult the associated IEM_CIMPL_F_XXX
1229 # mask and maybe emit additional checks.
1230 if ( 'IEM_CIMPL_F_MODE' in self.oParent.dsCImplFlags
1231 or 'IEM_CIMPL_F_XCPT' in self.oParent.dsCImplFlags
1232 or 'IEM_CIMPL_F_VMEXIT' in self.oParent.dsCImplFlags):
1233 aoStmts.append(iai.McCppCall('IEM_MC2_EMIT_CALL_1', ( 'kIemThreadedFunc_BltIn_CheckMode', 'pVCpu->iem.s.fExec', ),
1234 cchIndent = cchIndent));
1235
1236 sCImplFlags = ' | '.join(self.oParent.dsCImplFlags.keys());
1237 if not sCImplFlags:
1238 sCImplFlags = '0'
1239 aoStmts.append(iai.McCppCall('IEM_MC2_END_EMIT_CALLS', ( sCImplFlags, ), cchIndent = cchIndent)); # For closing the scope.
1240
1241 # Emit fEndTb = true or fTbBranched = true if any of the CIMPL flags
1242 # indicates we should do so.
1243 # Note! iemThreadedRecompilerMcDeferToCImpl0 duplicates work done here.
1244 asEndTbFlags = [];
1245 asTbBranchedFlags = [];
1246 for sFlag in self.oParent.dsCImplFlags:
1247 if self.kdCImplFlags[sFlag] is True:
1248 asEndTbFlags.append(sFlag);
1249 elif sFlag.startswith('IEM_CIMPL_F_BRANCH_'):
1250 asTbBranchedFlags.append(sFlag);
1251 if asTbBranchedFlags:
1252 aoStmts.append(iai.McCppGeneric('iemThreadedSetBranched(pVCpu, %s);'
1253 % ((' | '.join(asTbBranchedFlags)).replace('IEM_CIMPL_F_BRANCH', 'IEMBRANCHED_F'),),
1254 cchIndent = cchIndent)); # Inline fn saves ~2 seconds for gcc 13/dbg (1m13s vs 1m15s).
1255 if asEndTbFlags:
1256 aoStmts.append(iai.McCppGeneric('pVCpu->iem.s.fEndTb = true; /* %s */' % (','.join(asEndTbFlags),),
1257 cchIndent = cchIndent));
1258
1259 if 'IEM_CIMPL_F_CHECK_IRQ_AFTER' in self.oParent.dsCImplFlags:
1260 aoStmts.append(iai.McCppGeneric('pVCpu->iem.s.cInstrTillIrqCheck = 0;', cchIndent = cchIndent));
1261
1262 return aoStmts;
1263
1264
1265class ThreadedFunction(object):
1266 """
1267 A threaded function.
1268 """
1269
1270 def __init__(self, oMcBlock: iai.McBlock) -> None:
1271 self.oMcBlock = oMcBlock # type: iai.McBlock
1272 # The remaining fields are only useful after analyze() has been called:
1273 ## Variations for this block. There is at least one.
1274 self.aoVariations = [] # type: List[ThreadedFunctionVariation]
1275 ## Variation dictionary containing the same as aoVariations.
1276 self.dVariations = {} # type: Dict[str, ThreadedFunctionVariation]
1277 ## Dictionary of local variables (IEM_MC_LOCAL[_CONST]) and call arguments (IEM_MC_ARG*).
1278 self.dVariables = {} # type: Dict[str, iai.McStmtVar]
1279 ## Dictionary with any IEM_CIMPL_F_XXX flags explicitly advertised in the code block
1280 ## and those determined by analyzeCodeOperation().
1281 self.dsCImplFlags = {} # type: Dict[str, bool]
1282
1283 @staticmethod
1284 def dummyInstance():
1285 """ Gets a dummy instance. """
1286 return ThreadedFunction(iai.McBlock('null', 999999999, 999999999,
1287 iai.DecoderFunction('null', 999999999, 'nil', ('','')), 999999999));
1288
1289 def raiseProblem(self, sMessage):
1290 """ Raises a problem. """
1291 raise Exception('%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, ));
1292
1293 def warning(self, sMessage):
1294 """ Emits a warning. """
1295 print('%s:%s: warning: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, ));
1296
1297 def analyzeFindVariablesAndCallArgs(self, aoStmts: List[iai.McStmt]) -> bool:
1298 """ Scans the statements for MC variables and call arguments. """
1299 for oStmt in aoStmts:
1300 if isinstance(oStmt, iai.McStmtVar):
1301 if oStmt.sVarName in self.dVariables:
1302 raise Exception('Variable %s is defined more than once!' % (oStmt.sVarName,));
1303 self.dVariables[oStmt.sVarName] = oStmt.sVarName;
1304
1305 # There shouldn't be any variables or arguments declared inside if/
1306 # else blocks, but scan them too to be on the safe side.
1307 if isinstance(oStmt, iai.McStmtCond):
1308 cBefore = len(self.dVariables);
1309 self.analyzeFindVariablesAndCallArgs(oStmt.aoIfBranch);
1310 self.analyzeFindVariablesAndCallArgs(oStmt.aoElseBranch);
1311 if len(self.dVariables) != cBefore:
1312 raise Exception('Variables/arguments defined in conditional branches!');
1313 return True;
1314
1315 def analyzeCodeOperation(self, aoStmts: List[iai.McStmt], fSeenConditional = False) -> bool:
1316 """
1317 Analyzes the code looking clues as to additional side-effects.
1318
1319 Currently this is simply looking for branching and adding the relevant
1320 branch flags to dsCImplFlags. ASSUMES the caller pre-populates the
1321 dictionary with a copy of self.oMcBlock.dsCImplFlags.
1322 """
1323 for oStmt in aoStmts:
1324 # Set IEM_IMPL_C_F_BRANCH if we see any branching MCs.
1325 if oStmt.sName.startswith('IEM_MC_SET_RIP'):
1326 assert not fSeenConditional;
1327 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_INDIRECT'] = True;
1328 elif oStmt.sName.startswith('IEM_MC_REL_JMP'):
1329 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_RELATIVE'] = True;
1330 if fSeenConditional:
1331 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_CONDITIONAL'] = True;
1332
1333 # Process branches of conditionals recursively.
1334 if isinstance(oStmt, iai.McStmtCond):
1335 self.analyzeCodeOperation(oStmt.aoIfBranch, True);
1336 if oStmt.aoElseBranch:
1337 self.analyzeCodeOperation(oStmt.aoElseBranch, True);
1338
1339 return True;
1340
1341 def analyze(self):
1342 """
1343 Analyzes the code, identifying the number of parameters it requires and such.
1344
1345 Returns dummy True - raises exception on trouble.
1346 """
1347
1348 # Check the block for errors before we proceed (will decode it).
1349 asErrors = self.oMcBlock.check();
1350 if asErrors:
1351 raise Exception('\n'.join(['%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sError, )
1352 for sError in asErrors]));
1353
1354 # Decode the block into a list/tree of McStmt objects.
1355 aoStmts = self.oMcBlock.decode();
1356
1357 # Scan the statements for local variables and call arguments (self.dVariables).
1358 self.analyzeFindVariablesAndCallArgs(aoStmts);
1359
1360 # Scan the code for IEM_CIMPL_F_ and other clues.
1361 self.dsCImplFlags = self.oMcBlock.dsCImplFlags.copy();
1362 self.analyzeCodeOperation(aoStmts);
1363
1364 # Create variations as needed.
1365 if iai.McStmt.findStmtByNames(aoStmts,
1366 { 'IEM_MC_DEFER_TO_CIMPL_0_RET': True,
1367 'IEM_MC_DEFER_TO_CIMPL_1_RET': True,
1368 'IEM_MC_DEFER_TO_CIMPL_2_RET': True,
1369 'IEM_MC_DEFER_TO_CIMPL_3_RET': True, }):
1370 asVariations = (ThreadedFunctionVariation.ksVariation_Default,);
1371
1372 elif iai.McStmt.findStmtByNames(aoStmts, {'IEM_MC_CALC_RM_EFF_ADDR' : True,}):
1373 if 'IEM_MC_F_64BIT' in self.oMcBlock.dsMcFlags:
1374 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressOnly64;
1375 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags and 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
1376 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot286Not64;
1377 elif 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
1378 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot286;
1379 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags:
1380 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot64;
1381 elif 'IEM_MC_F_ONLY_8086' in self.oMcBlock.dsMcFlags:
1382 asVariations = ThreadedFunctionVariation.kasVariationsOnlyPre386;
1383 else:
1384 asVariations = ThreadedFunctionVariation.kasVariationsWithAddress;
1385 else:
1386 if 'IEM_MC_F_64BIT' in self.oMcBlock.dsMcFlags:
1387 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressOnly64;
1388 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags and 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
1389 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot286Not64;
1390 elif 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
1391 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot286;
1392 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags:
1393 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot64;
1394 elif 'IEM_MC_F_ONLY_8086' in self.oMcBlock.dsMcFlags:
1395 asVariations = ThreadedFunctionVariation.kasVariationsOnlyPre386;
1396 else:
1397 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddress;
1398
1399 if not iai.McStmt.findStmtByNames(aoStmts,
1400 { 'IEM_MC_ADVANCE_RIP_AND_FINISH': True,
1401 'IEM_MC_REL_JMP_S8_AND_FINISH': True,
1402 'IEM_MC_REL_JMP_S16_AND_FINISH': True,
1403 'IEM_MC_REL_JMP_S32_AND_FINISH': True,
1404 }):
1405 asVariations = [sVariation for sVariation in asVariations
1406 if sVariation not in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing];
1407
1408 self.aoVariations = [ThreadedFunctionVariation(self, sVar) for sVar in asVariations];
1409
1410 # Dictionary variant of the list.
1411 self.dVariations = { oVar.sVariation: oVar for oVar in self.aoVariations };
1412
1413 # Continue the analysis on each variation.
1414 for oVariation in self.aoVariations:
1415 oVariation.analyzeVariation(aoStmts);
1416
1417 return True;
1418
1419 ## Used by emitThreadedCallStmts.
1420 kdVariationsWithNeedForPrefixCheck = {
1421 ThreadedFunctionVariation.ksVariation_64_Addr32: True,
1422 ThreadedFunctionVariation.ksVariation_64f_Addr32: True,
1423 ThreadedFunctionVariation.ksVariation_64_FsGs: True,
1424 ThreadedFunctionVariation.ksVariation_64f_FsGs: True,
1425 ThreadedFunctionVariation.ksVariation_32_Addr16: True,
1426 ThreadedFunctionVariation.ksVariation_32f_Addr16: True,
1427 ThreadedFunctionVariation.ksVariation_32_Flat: True,
1428 ThreadedFunctionVariation.ksVariation_32f_Flat: True,
1429 ThreadedFunctionVariation.ksVariation_16_Addr32: True,
1430 ThreadedFunctionVariation.ksVariation_16f_Addr32: True,
1431 };
1432
1433 def emitThreadedCallStmts(self):
1434 """
1435 Worker for morphInputCode that returns a list of statements that emits
1436 the call to the threaded functions for the block.
1437 """
1438 # Special case for only default variation:
1439 if len(self.aoVariations) == 1 and self.aoVariations[0].sVariation == ThreadedFunctionVariation.ksVariation_Default:
1440 return self.aoVariations[0].emitThreadedCallStmts(0);
1441
1442 #
1443 # Case statement sub-class.
1444 #
1445 dByVari = self.dVariations;
1446 #fDbg = self.oMcBlock.sFunction == 'iemOpCommonPushSReg';
1447 class Case:
1448 def __init__(self, sCond, sVarNm = None):
1449 self.sCond = sCond;
1450 self.sVarNm = sVarNm;
1451 self.oVar = dByVari[sVarNm] if sVarNm else None;
1452 self.aoBody = self.oVar.emitThreadedCallStmts(8) if sVarNm else None;
1453
1454 def toCode(self):
1455 aoStmts = [ iai.McCppGeneric('case %s:' % (self.sCond), cchIndent = 4), ];
1456 if self.aoBody:
1457 aoStmts.extend(self.aoBody);
1458 aoStmts.append(iai.McCppGeneric('break;', cchIndent = 8));
1459 return aoStmts;
1460
1461 def toFunctionAssignment(self):
1462 aoStmts = [ iai.McCppGeneric('case %s:' % (self.sCond), cchIndent = 4), ];
1463 if self.aoBody:
1464 aoStmts.extend([
1465 iai.McCppGeneric('enmFunction = %s;' % (self.oVar.getIndexName(),), cchIndent = 8),
1466 iai.McCppGeneric('break;', cchIndent = 8),
1467 ]);
1468 return aoStmts;
1469
1470 def isSame(self, oThat):
1471 if not self.aoBody: # fall thru always matches.
1472 return True;
1473 if len(self.aoBody) != len(oThat.aoBody):
1474 #if fDbg: print('dbg: body len diff: %s vs %s' % (len(self.aoBody), len(oThat.aoBody),));
1475 return False;
1476 for iStmt, oStmt in enumerate(self.aoBody):
1477 oThatStmt = oThat.aoBody[iStmt] # type: iai.McStmt
1478 assert isinstance(oStmt, iai.McCppGeneric);
1479 assert not isinstance(oStmt, iai.McStmtCond);
1480 if isinstance(oStmt, iai.McStmtCond):
1481 return False;
1482 if oStmt.sName != oThatStmt.sName:
1483 #if fDbg: print('dbg: stmt #%s name: %s vs %s' % (iStmt, oStmt.sName, oThatStmt.sName,));
1484 return False;
1485 if len(oStmt.asParams) != len(oThatStmt.asParams):
1486 #if fDbg: print('dbg: stmt #%s param count: %s vs %s'
1487 # % (iStmt, len(oStmt.asParams), len(oThatStmt.asParams),));
1488 return False;
1489 for iParam, sParam in enumerate(oStmt.asParams):
1490 if ( sParam != oThatStmt.asParams[iParam]
1491 and ( iParam != 1
1492 or not isinstance(oStmt, iai.McCppCall)
1493 or not oStmt.asParams[0].startswith('IEM_MC2_EMIT_CALL_')
1494 or sParam != self.oVar.getIndexName()
1495 or oThatStmt.asParams[iParam] != oThat.oVar.getIndexName() )):
1496 #if fDbg: print('dbg: stmt #%s, param #%s: %s vs %s'
1497 # % (iStmt, iParam, sParam, oThatStmt.asParams[iParam],));
1498 return False;
1499 return True;
1500
1501 #
1502 # Determine what we're switch on.
1503 # This ASSUMES that (IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | IEM_F_MODE_CPUMODE_MASK) == 7!
1504 #
1505 fSimple = True;
1506 sSwitchValue = '(pVCpu->iem.s.fExec & (IEM_F_MODE_CPUMODE_MASK | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK))';
1507 if dByVari.keys() & self.kdVariationsWithNeedForPrefixCheck.keys():
1508 sSwitchValue += ' | (pVCpu->iem.s.enmEffAddrMode == (pVCpu->iem.s.fExec & IEM_F_MODE_CPUMODE_MASK) ? 0 : 8)';
1509 # Accesses via FS and GS and CS goes thru non-FLAT functions. (CS
1510 # is not writable in 32-bit mode (at least), thus the penalty mode
1511 # for any accesses via it (simpler this way).)
1512 sSwitchValue += ' | (pVCpu->iem.s.iEffSeg < X86_SREG_FS && pVCpu->iem.s.iEffSeg != X86_SREG_CS ? 0 : 16)';
1513 fSimple = False; # threaded functions.
1514 if dByVari.keys() & ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing:
1515 sSwitchValue += ' | ((pVCpu->iem.s.fTbPrevInstr & (IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_INHIBIT_SHADOW)) || ' \
1516 + '(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_MASK) ? 32 : 0)';
1517
1518 #
1519 # Generate the case statements.
1520 #
1521 # pylintx: disable=x
1522 aoCases = [];
1523 if ThreadedFunctionVariation.ksVariation_64_Addr32 in dByVari:
1524 assert not fSimple;
1525 aoCases.extend([
1526 Case('IEMMODE_64BIT', ThrdFnVar.ksVariation_64),
1527 Case('IEMMODE_64BIT | 16', ThrdFnVar.ksVariation_64_FsGs),
1528 Case('IEMMODE_64BIT | 8 | 16', None), # fall thru
1529 Case('IEMMODE_64BIT | 8', ThrdFnVar.ksVariation_64_Addr32),
1530 ]);
1531 if ThreadedFunctionVariation.ksVariation_64f_Addr32 in dByVari:
1532 aoCases.extend([
1533 Case('IEMMODE_64BIT | 32', ThrdFnVar.ksVariation_64f),
1534 Case('IEMMODE_64BIT | 32 | 16', ThrdFnVar.ksVariation_64f_FsGs),
1535 Case('IEMMODE_64BIT | 32 | 8 | 16', None), # fall thru
1536 Case('IEMMODE_64BIT | 32 | 8', ThrdFnVar.ksVariation_64f_Addr32),
1537 ]);
1538 elif ThrdFnVar.ksVariation_64 in dByVari:
1539 assert fSimple;
1540 aoCases.append(Case('IEMMODE_64BIT', ThrdFnVar.ksVariation_64));
1541 if ThreadedFunctionVariation.ksVariation_64f in dByVari:
1542 aoCases.append(Case('IEMMODE_64BIT | 32', ThrdFnVar.ksVariation_64f));
1543
1544 if ThrdFnVar.ksVariation_32_Addr16 in dByVari:
1545 assert not fSimple;
1546 aoCases.extend([
1547 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', ThrdFnVar.ksVariation_32_Flat),
1548 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 16', None), # fall thru
1549 Case('IEMMODE_32BIT | 16', None), # fall thru
1550 Case('IEMMODE_32BIT', ThrdFnVar.ksVariation_32),
1551 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 8', None), # fall thru
1552 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 8 | 16',None), # fall thru
1553 Case('IEMMODE_32BIT | 8 | 16',None), # fall thru
1554 Case('IEMMODE_32BIT | 8', ThrdFnVar.ksVariation_32_Addr16),
1555 ]);
1556 if ThrdFnVar.ksVariation_32f_Addr16 in dByVari:
1557 aoCases.extend([
1558 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', ThrdFnVar.ksVariation_32f_Flat),
1559 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 16', None), # fall thru
1560 Case('IEMMODE_32BIT | 32 | 16', None), # fall thru
1561 Case('IEMMODE_32BIT | 32', ThrdFnVar.ksVariation_32f),
1562 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 8', None), # fall thru
1563 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 8 | 16',None), # fall thru
1564 Case('IEMMODE_32BIT | 32 | 8 | 16',None), # fall thru
1565 Case('IEMMODE_32BIT | 32 | 8', ThrdFnVar.ksVariation_32f_Addr16),
1566 ]);
1567 elif ThrdFnVar.ksVariation_32 in dByVari:
1568 assert fSimple;
1569 aoCases.extend([
1570 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', None), # fall thru
1571 Case('IEMMODE_32BIT', ThrdFnVar.ksVariation_32),
1572 ]);
1573 if ThrdFnVar.ksVariation_32f in dByVari:
1574 aoCases.extend([
1575 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', None), # fall thru
1576 Case('IEMMODE_32BIT | 32', ThrdFnVar.ksVariation_32f),
1577 ]);
1578
1579 if ThrdFnVar.ksVariation_16_Addr32 in dByVari:
1580 assert not fSimple;
1581 aoCases.extend([
1582 Case('IEMMODE_16BIT | 16', None), # fall thru
1583 Case('IEMMODE_16BIT', ThrdFnVar.ksVariation_16),
1584 Case('IEMMODE_16BIT | 8 | 16', None), # fall thru
1585 Case('IEMMODE_16BIT | 8', ThrdFnVar.ksVariation_16_Addr32),
1586 ]);
1587 if ThrdFnVar.ksVariation_16f_Addr32 in dByVari:
1588 aoCases.extend([
1589 Case('IEMMODE_16BIT | 32 | 16', None), # fall thru
1590 Case('IEMMODE_16BIT | 32', ThrdFnVar.ksVariation_16f),
1591 Case('IEMMODE_16BIT | 32 | 8 | 16', None), # fall thru
1592 Case('IEMMODE_16BIT | 32 | 8', ThrdFnVar.ksVariation_16f_Addr32),
1593 ]);
1594 elif ThrdFnVar.ksVariation_16 in dByVari:
1595 assert fSimple;
1596 aoCases.append(Case('IEMMODE_16BIT', ThrdFnVar.ksVariation_16));
1597 if ThrdFnVar.ksVariation_16f in dByVari:
1598 aoCases.append(Case('IEMMODE_16BIT | 32', ThrdFnVar.ksVariation_16f));
1599
1600 if ThrdFnVar.ksVariation_16_Pre386 in dByVari:
1601 if not fSimple:
1602 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 16', None)); # fall thru
1603 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', ThrdFnVar.ksVariation_16_Pre386));
1604 if ThrdFnVar.ksVariation_16f_Pre386 in dByVari: # should be nested under previous if, but line too long.
1605 if not fSimple:
1606 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 16', None)); # fall thru
1607 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', ThrdFnVar.ksVariation_16f_Pre386));
1608
1609 #
1610 # If the case bodies are all the same, except for the function called,
1611 # we can reduce the code size and hopefully compile time.
1612 #
1613 iFirstCaseWithBody = 0;
1614 while not aoCases[iFirstCaseWithBody].aoBody:
1615 iFirstCaseWithBody += 1
1616 fAllSameCases = True
1617 for iCase in range(iFirstCaseWithBody + 1, len(aoCases)):
1618 fAllSameCases = fAllSameCases and aoCases[iCase].isSame(aoCases[iFirstCaseWithBody]);
1619 #if fDbg: print('fAllSameCases=%s %s' % (fAllSameCases, self.oMcBlock.sFunction,));
1620 if fAllSameCases:
1621 aoStmts = [
1622 iai.McCppGeneric('IEMTHREADEDFUNCS enmFunction;'),
1623 iai.McCppGeneric('switch (%s)' % (sSwitchValue,)),
1624 iai.McCppGeneric('{'),
1625 ];
1626 for oCase in aoCases:
1627 aoStmts.extend(oCase.toFunctionAssignment());
1628 aoStmts.extend([
1629 iai.McCppGeneric('IEM_NOT_REACHED_DEFAULT_CASE_RET();', cchIndent = 4),
1630 iai.McCppGeneric('}'),
1631 ]);
1632 aoStmts.extend(dByVari[aoCases[iFirstCaseWithBody].sVarNm].emitThreadedCallStmts(0, 'enmFunction'));
1633
1634 else:
1635 #
1636 # Generate the generic switch statement.
1637 #
1638 aoStmts = [
1639 iai.McCppGeneric('switch (%s)' % (sSwitchValue,)),
1640 iai.McCppGeneric('{'),
1641 ];
1642 for oCase in aoCases:
1643 aoStmts.extend(oCase.toCode());
1644 aoStmts.extend([
1645 iai.McCppGeneric('IEM_NOT_REACHED_DEFAULT_CASE_RET();', cchIndent = 4),
1646 iai.McCppGeneric('}'),
1647 ]);
1648
1649 return aoStmts;
1650
1651 def morphInputCode(self, aoStmts, fCallEmitted = False, cDepth = 0):
1652 """
1653 Adjusts (& copies) the statements for the input/decoder so it will emit
1654 calls to the right threaded functions for each block.
1655
1656 Returns list/tree of statements (aoStmts is not modified) and updated
1657 fCallEmitted status.
1658 """
1659 #print('McBlock at %s:%s' % (os.path.split(self.oMcBlock.sSrcFile)[1], self.oMcBlock.iBeginLine,));
1660 aoDecoderStmts = [];
1661
1662 for oStmt in aoStmts:
1663 # Copy the statement. Make a deep copy to make sure we've got our own
1664 # copies of all instance variables, even if a bit overkill at the moment.
1665 oNewStmt = copy.deepcopy(oStmt);
1666 aoDecoderStmts.append(oNewStmt);
1667 #print('oNewStmt %s %s' % (oNewStmt.sName, len(oNewStmt.asParams),));
1668 if oNewStmt.sName == 'IEM_MC_BEGIN' and self.dsCImplFlags:
1669 oNewStmt.asParams[3] = ' | '.join(sorted(self.dsCImplFlags.keys()));
1670
1671 # If we haven't emitted the threaded function call yet, look for
1672 # statements which it would naturally follow or preceed.
1673 if not fCallEmitted:
1674 if not oStmt.isCppStmt():
1675 if ( oStmt.sName.startswith('IEM_MC_MAYBE_RAISE_') \
1676 or (oStmt.sName.endswith('_AND_FINISH') and oStmt.sName.startswith('IEM_MC_'))
1677 or oStmt.sName.startswith('IEM_MC_CALL_CIMPL_')
1678 or oStmt.sName.startswith('IEM_MC_DEFER_TO_CIMPL_')
1679 or oStmt.sName in ('IEM_MC_RAISE_DIVIDE_ERROR',)):
1680 aoDecoderStmts.pop();
1681 aoDecoderStmts.extend(self.emitThreadedCallStmts());
1682 aoDecoderStmts.append(oNewStmt);
1683 fCallEmitted = True;
1684 elif ( oStmt.fDecode
1685 and ( oStmt.asParams[0].find('IEMOP_HLP_DONE_') >= 0
1686 or oStmt.asParams[0].find('IEMOP_HLP_DECODED_') >= 0)):
1687 aoDecoderStmts.extend(self.emitThreadedCallStmts());
1688 fCallEmitted = True;
1689
1690 # Process branches of conditionals recursively.
1691 if isinstance(oStmt, iai.McStmtCond):
1692 (oNewStmt.aoIfBranch, fCallEmitted1) = self.morphInputCode(oStmt.aoIfBranch, fCallEmitted, cDepth + 1);
1693 if oStmt.aoElseBranch:
1694 (oNewStmt.aoElseBranch, fCallEmitted2) = self.morphInputCode(oStmt.aoElseBranch, fCallEmitted, cDepth + 1);
1695 else:
1696 fCallEmitted2 = False;
1697 fCallEmitted = fCallEmitted or (fCallEmitted1 and fCallEmitted2);
1698
1699 if not fCallEmitted and cDepth == 0:
1700 self.raiseProblem('Unable to insert call to threaded function.');
1701
1702 return (aoDecoderStmts, fCallEmitted);
1703
1704
1705 def generateInputCode(self):
1706 """
1707 Modifies the input code.
1708 """
1709 cchIndent = (self.oMcBlock.cchIndent + 3) // 4 * 4;
1710
1711 if len(self.oMcBlock.aoStmts) == 1:
1712 # IEM_MC_DEFER_TO_CIMPL_X_RET - need to wrap in {} to make it safe to insert into random code.
1713 sCode = ' ' * cchIndent + 'pVCpu->iem.s.fTbCurInstr = ';
1714 if self.dsCImplFlags:
1715 sCode += ' | '.join(sorted(self.dsCImplFlags.keys())) + ';\n';
1716 else:
1717 sCode += '0;\n';
1718 sCode += iai.McStmt.renderCodeForList(self.morphInputCode(self.oMcBlock.aoStmts)[0],
1719 cchIndent = cchIndent).replace('\n', ' /* gen */\n', 1);
1720 sIndent = ' ' * (min(cchIndent, 2) - 2);
1721 sCode = sIndent + '{\n' + sCode + sIndent + '}\n';
1722 return sCode;
1723
1724 # IEM_MC_BEGIN/END block
1725 assert len(self.oMcBlock.asLines) > 2, "asLines=%s" % (self.oMcBlock.asLines,);
1726 return iai.McStmt.renderCodeForList(self.morphInputCode(self.oMcBlock.aoStmts)[0],
1727 cchIndent = cchIndent).replace('\n', ' /* gen */\n', 1);
1728
1729# Short alias for ThreadedFunctionVariation.
1730ThrdFnVar = ThreadedFunctionVariation;
1731
1732
1733class IEMThreadedGenerator(object):
1734 """
1735 The threaded code generator & annotator.
1736 """
1737
1738 def __init__(self):
1739 self.aoThreadedFuncs = [] # type: List[ThreadedFunction]
1740 self.oOptions = None # type: argparse.Namespace
1741 self.aoParsers = [] # type: List[IEMAllInstPython.SimpleParser]
1742 self.aidxFirstFunctions = [] # type: List[int] ##< Runs parallel to aoParser giving the index of the first function.
1743
1744 #
1745 # Processing.
1746 #
1747
1748 def processInputFiles(self, sNativeRecompilerArch = None):
1749 """
1750 Process the input files.
1751 """
1752
1753 # Parse the files.
1754 self.aoParsers = iai.parseFiles(self.oOptions.asInFiles);
1755
1756 # Create threaded functions for the MC blocks.
1757 self.aoThreadedFuncs = [ThreadedFunction(oMcBlock) for oMcBlock in iai.g_aoMcBlocks];
1758
1759 # Analyze the threaded functions.
1760 dRawParamCounts = {};
1761 dMinParamCounts = {};
1762 for oThreadedFunction in self.aoThreadedFuncs:
1763 oThreadedFunction.analyze();
1764 for oVariation in oThreadedFunction.aoVariations:
1765 dRawParamCounts[len(oVariation.dParamRefs)] = dRawParamCounts.get(len(oVariation.dParamRefs), 0) + 1;
1766 dMinParamCounts[oVariation.cMinParams] = dMinParamCounts.get(oVariation.cMinParams, 0) + 1;
1767 print('debug: param count distribution, raw and optimized:', file = sys.stderr);
1768 for cCount in sorted({cBits: True for cBits in list(dRawParamCounts.keys()) + list(dMinParamCounts.keys())}.keys()):
1769 print('debug: %s params: %4s raw, %4s min'
1770 % (cCount, dRawParamCounts.get(cCount, 0), dMinParamCounts.get(cCount, 0)),
1771 file = sys.stderr);
1772
1773 # Populate aidxFirstFunctions. This is ASSUMING that
1774 # g_aoMcBlocks/self.aoThreadedFuncs are in self.aoParsers order.
1775 iThreadedFunction = 0;
1776 oThreadedFunction = self.getThreadedFunctionByIndex(0);
1777 self.aidxFirstFunctions = [];
1778 for oParser in self.aoParsers:
1779 self.aidxFirstFunctions.append(iThreadedFunction);
1780
1781 while oThreadedFunction.oMcBlock.sSrcFile == oParser.sSrcFile:
1782 iThreadedFunction += 1;
1783 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
1784
1785 # Analyze the threaded functions and their variations for native recompilation.
1786 if sNativeRecompilerArch:
1787 print('todo:', file = sys.stderr);
1788 cTotal = 0;
1789 cNative = 0;
1790 for oThreadedFunction in self.aoThreadedFuncs:
1791 for oVariation in oThreadedFunction.aoVariations:
1792 cTotal += 1;
1793 oVariation.oNativeRecomp = ian.analyzeVariantForNativeRecomp(oVariation, sNativeRecompilerArch);
1794 if oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
1795 cNative += 1;
1796 print('todo: %.1f%% / %u out of %u threaded function variations are recompilable'
1797 % (cNative * 100.0 / cTotal, cNative, cTotal), file = sys.stderr);
1798 if ian.g_dUnsupportedMcStmtLastOneStats:
1799 asTopKeys = sorted(ian.g_dUnsupportedMcStmtLastOneStats, reverse = True,
1800 key = lambda sSortKey: len(ian.g_dUnsupportedMcStmtLastOneStats[sSortKey]))[:16];
1801 print('todo:', file = sys.stderr);
1802 print('todo: Top %s variations with one unsupported statement dependency:' % (len(asTopKeys),),
1803 file = sys.stderr);
1804 cchMaxKey = max([len(sKey) for sKey in asTopKeys]);
1805 for sKey in asTopKeys:
1806 print('todo: %*s = %s (%s%s)'
1807 % (cchMaxKey, sKey, len(ian.g_dUnsupportedMcStmtLastOneStats[sKey]),
1808 ', '.join([oVar.getShortName() for oVar in ian.g_dUnsupportedMcStmtLastOneStats[sKey][:5]]),
1809 ',...' if len(ian.g_dUnsupportedMcStmtLastOneStats[sKey]) >= 5 else '', )
1810 , file = sys.stderr);
1811
1812 asTopKeys = sorted(ian.g_dUnsupportedMcStmtStats, reverse = True,
1813 key = lambda sSortKey: ian.g_dUnsupportedMcStmtStats[sSortKey])[:16];
1814 print('todo:', file = sys.stderr);
1815 print('todo: Top %d most used unimplemented statements:' % (len(asTopKeys),), file = sys.stderr);
1816 cchMaxKey = max([len(sKey) for sKey in asTopKeys]);
1817 for i in range(0, len(asTopKeys), 2):
1818 print('todo: %*s = %4d %*s = %4d'
1819 % ( cchMaxKey, asTopKeys[i], ian.g_dUnsupportedMcStmtStats[asTopKeys[i]],
1820 cchMaxKey, asTopKeys[i + 1], ian.g_dUnsupportedMcStmtStats[asTopKeys[i + 1]],),
1821 file = sys.stderr);
1822 print('todo:', file = sys.stderr);
1823
1824 if ian.g_dUnsupportedMcStmtLastOneVarStats:
1825 asTopKeys = sorted(ian.g_dUnsupportedMcStmtLastOneVarStats, reverse = True,
1826 key = lambda sSortKey: len(ian.g_dUnsupportedMcStmtLastOneVarStats[sSortKey]))[:10];
1827 print('todo:', file = sys.stderr);
1828 print('todo: Top %s variations with variables and one unsupported statement dependency:' % (len(asTopKeys),),
1829 file = sys.stderr);
1830 cchMaxKey = max([len(sKey) for sKey in asTopKeys]);
1831 for sKey in asTopKeys:
1832 print('todo: %*s = %s (%s%s)'
1833 % (cchMaxKey, sKey, len(ian.g_dUnsupportedMcStmtLastOneVarStats[sKey]),
1834 ', '.join([oVar.getShortName() for oVar in ian.g_dUnsupportedMcStmtLastOneVarStats[sKey][:5]]),
1835 ',...' if len(ian.g_dUnsupportedMcStmtLastOneVarStats[sKey]) >= 5 else '', )
1836 , file = sys.stderr);
1837
1838
1839 # Gather arguments + variable statistics for the MC blocks.
1840 cMaxArgs = 0;
1841 cMaxVars = 0;
1842 cMaxVarsAndArgs = 0;
1843 cbMaxArgs = 0;
1844 cbMaxVars = 0;
1845 cbMaxVarsAndArgs = 0;
1846 for oThreadedFunction in self.aoThreadedFuncs:
1847 if oThreadedFunction.oMcBlock.cLocals >= 0:
1848 # Counts.
1849 assert oThreadedFunction.oMcBlock.cArgs >= 0;
1850 cMaxVars = max(cMaxVars, oThreadedFunction.oMcBlock.cLocals);
1851 cMaxArgs = max(cMaxArgs, oThreadedFunction.oMcBlock.cArgs);
1852 cMaxVarsAndArgs = max(cMaxVarsAndArgs, oThreadedFunction.oMcBlock.cLocals + oThreadedFunction.oMcBlock.cArgs);
1853 if cMaxVarsAndArgs > 9:
1854 raise Exception('%s potentially uses too many variables / args: %u, max 10 - %u vars and %u args'
1855 % (oThreadedFunction.oMcBlock.oFunction.sName, cMaxVarsAndArgs,
1856 oThreadedFunction.oMcBlock.cLocals, oThreadedFunction.oMcBlock.cArgs));
1857 # Calc stack allocation size:
1858 cbArgs = 0;
1859 for oArg in oThreadedFunction.oMcBlock.aoArgs:
1860 cbArgs += (getTypeBitCount(oArg.sType) + 63) // 64 * 8;
1861 cbVars = 0;
1862 for oVar in oThreadedFunction.oMcBlock.aoLocals:
1863 cbVars += (getTypeBitCount(oVar.sType) + 63) // 64 * 8;
1864 cbMaxVars = max(cbMaxVars, cbVars);
1865 cbMaxArgs = max(cbMaxArgs, cbArgs);
1866 cbMaxVarsAndArgs = max(cbMaxVarsAndArgs, cbVars + cbArgs);
1867 if cbMaxVarsAndArgs >= 0xc0:
1868 raise Exception('%s potentially uses too much stack: cbMaxVars=%#x cbMaxArgs=%#x'
1869 % (oThreadedFunction.oMcBlock.oFunction.sName, cbMaxVars, cbMaxArgs,));
1870
1871 print('debug: max vars+args: %u bytes / %u; max vars: %u bytes / %u; max args: %u bytes / %u'
1872 % (cbMaxVarsAndArgs, cMaxVarsAndArgs, cbMaxVars, cMaxVars, cbMaxArgs, cMaxArgs,), file = sys.stderr);
1873
1874 return True;
1875
1876 #
1877 # Output
1878 #
1879
1880 def generateLicenseHeader(self):
1881 """
1882 Returns the lines for a license header.
1883 """
1884 return [
1885 '/*',
1886 ' * Autogenerated by $Id: IEMAllThrdPython.py 101704 2023-11-01 23:47:07Z vboxsync $ ',
1887 ' * Do not edit!',
1888 ' */',
1889 '',
1890 '/*',
1891 ' * Copyright (C) 2023-' + str(datetime.date.today().year) + ' Oracle and/or its affiliates.',
1892 ' *',
1893 ' * This file is part of VirtualBox base platform packages, as',
1894 ' * available from https://www.virtualbox.org.',
1895 ' *',
1896 ' * This program is free software; you can redistribute it and/or',
1897 ' * modify it under the terms of the GNU General Public License',
1898 ' * as published by the Free Software Foundation, in version 3 of the',
1899 ' * License.',
1900 ' *',
1901 ' * This program is distributed in the hope that it will be useful, but',
1902 ' * WITHOUT ANY WARRANTY; without even the implied warranty of',
1903 ' * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU',
1904 ' * General Public License for more details.',
1905 ' *',
1906 ' * You should have received a copy of the GNU General Public License',
1907 ' * along with this program; if not, see <https://www.gnu.org/licenses>.',
1908 ' *',
1909 ' * The contents of this file may alternatively be used under the terms',
1910 ' * of the Common Development and Distribution License Version 1.0',
1911 ' * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included',
1912 ' * in the VirtualBox distribution, in which case the provisions of the',
1913 ' * CDDL are applicable instead of those of the GPL.',
1914 ' *',
1915 ' * You may elect to license modified versions of this file under the',
1916 ' * terms and conditions of either the GPL or the CDDL or both.',
1917 ' *',
1918 ' * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0',
1919 ' */',
1920 '',
1921 '',
1922 '',
1923 ];
1924
1925 ## List of built-in threaded functions with user argument counts and
1926 ## whether it has a native recompiler implementation.
1927 katBltIns = (
1928 ( 'DeferToCImpl0', 2, True ),
1929 ( 'CheckIrq', 0, True ),
1930 ( 'CheckMode', 1, True ),
1931 ( 'CheckHwInstrBps', 0, False ),
1932 ( 'CheckCsLim', 1, False ),
1933
1934 ( 'CheckCsLimAndOpcodes', 3, False ),
1935 ( 'CheckOpcodes', 3, False ),
1936 ( 'CheckOpcodesConsiderCsLim', 3, False ),
1937
1938 ( 'CheckCsLimAndPcAndOpcodes', 3, False ),
1939 ( 'CheckPcAndOpcodes', 3, False ),
1940 ( 'CheckPcAndOpcodesConsiderCsLim', 3, False ),
1941
1942 ( 'CheckCsLimAndOpcodesAcrossPageLoadingTlb', 3, False ),
1943 ( 'CheckOpcodesAcrossPageLoadingTlb', 3, False ),
1944 ( 'CheckOpcodesAcrossPageLoadingTlbConsiderCsLim', 2, False ),
1945
1946 ( 'CheckCsLimAndOpcodesLoadingTlb', 3, False ),
1947 ( 'CheckOpcodesLoadingTlb', 3, False ),
1948 ( 'CheckOpcodesLoadingTlbConsiderCsLim', 3, False ),
1949
1950 ( 'CheckCsLimAndOpcodesOnNextPageLoadingTlb', 2, False ),
1951 ( 'CheckOpcodesOnNextPageLoadingTlb', 2, False ),
1952 ( 'CheckOpcodesOnNextPageLoadingTlbConsiderCsLim', 2, False ),
1953
1954 ( 'CheckCsLimAndOpcodesOnNewPageLoadingTlb', 2, False ),
1955 ( 'CheckOpcodesOnNewPageLoadingTlb', 2, False ),
1956 ( 'CheckOpcodesOnNewPageLoadingTlbConsiderCsLim', 2, False ),
1957 );
1958
1959 def generateThreadedFunctionsHeader(self, oOut):
1960 """
1961 Generates the threaded functions header file.
1962 Returns success indicator.
1963 """
1964
1965 asLines = self.generateLicenseHeader();
1966
1967 # Generate the threaded function table indexes.
1968 asLines += [
1969 'typedef enum IEMTHREADEDFUNCS',
1970 '{',
1971 ' kIemThreadedFunc_Invalid = 0,',
1972 '',
1973 ' /*',
1974 ' * Predefined',
1975 ' */',
1976 ];
1977 asLines += [' kIemThreadedFunc_BltIn_%s,' % (sFuncNm,) for sFuncNm, _, _ in self.katBltIns];
1978
1979 iThreadedFunction = 1 + len(self.katBltIns);
1980 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
1981 asLines += [
1982 '',
1983 ' /*',
1984 ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation] + '',
1985 ' */',
1986 ];
1987 for oThreadedFunction in self.aoThreadedFuncs:
1988 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
1989 if oVariation:
1990 iThreadedFunction += 1;
1991 oVariation.iEnumValue = iThreadedFunction;
1992 asLines.append(' ' + oVariation.getIndexName() + ',');
1993 asLines += [
1994 ' kIemThreadedFunc_End',
1995 '} IEMTHREADEDFUNCS;',
1996 '',
1997 ];
1998
1999 # Prototype the function table.
2000 asLines += [
2001 'extern const PFNIEMTHREADEDFUNC g_apfnIemThreadedFunctions[kIemThreadedFunc_End];',
2002 '#if defined(IN_RING3) || defined(LOG_ENABLED)',
2003 'extern const char * const g_apszIemThreadedFunctions[kIemThreadedFunc_End];',
2004 '#endif',
2005 'extern uint8_t const g_acIemThreadedFunctionUsedArgs[kIemThreadedFunc_End];',
2006 ];
2007
2008 oOut.write('\n'.join(asLines));
2009 return True;
2010
2011 ksBitsToIntMask = {
2012 1: "UINT64_C(0x1)",
2013 2: "UINT64_C(0x3)",
2014 4: "UINT64_C(0xf)",
2015 8: "UINT64_C(0xff)",
2016 16: "UINT64_C(0xffff)",
2017 32: "UINT64_C(0xffffffff)",
2018 };
2019
2020 def generateFunctionParameterUnpacking(self, oVariation, oOut, asParams):
2021 """
2022 Outputs code for unpacking parameters.
2023 This is shared by the threaded and native code generators.
2024 """
2025 aasVars = [];
2026 for aoRefs in oVariation.dParamRefs.values():
2027 oRef = aoRefs[0];
2028 if oRef.sType[0] != 'P':
2029 cBits = g_kdTypeInfo[oRef.sType][0];
2030 sType = g_kdTypeInfo[oRef.sType][2];
2031 else:
2032 cBits = 64;
2033 sType = oRef.sType;
2034
2035 sTypeDecl = sType + ' const';
2036
2037 if cBits == 64:
2038 assert oRef.offNewParam == 0;
2039 if sType == 'uint64_t':
2040 sUnpack = '%s;' % (asParams[oRef.iNewParam],);
2041 else:
2042 sUnpack = '(%s)%s;' % (sType, asParams[oRef.iNewParam],);
2043 elif oRef.offNewParam == 0:
2044 sUnpack = '(%s)(%s & %s);' % (sType, asParams[oRef.iNewParam], self.ksBitsToIntMask[cBits]);
2045 else:
2046 sUnpack = '(%s)((%s >> %s) & %s);' \
2047 % (sType, asParams[oRef.iNewParam], oRef.offNewParam, self.ksBitsToIntMask[cBits]);
2048
2049 sComment = '/* %s - %s ref%s */' % (oRef.sOrgRef, len(aoRefs), 's' if len(aoRefs) != 1 else '',);
2050
2051 aasVars.append([ '%s:%02u' % (oRef.iNewParam, oRef.offNewParam),
2052 sTypeDecl, oRef.sNewName, sUnpack, sComment ]);
2053 acchVars = [0, 0, 0, 0, 0];
2054 for asVar in aasVars:
2055 for iCol, sStr in enumerate(asVar):
2056 acchVars[iCol] = max(acchVars[iCol], len(sStr));
2057 sFmt = ' %%-%ss %%-%ss = %%-%ss %%s\n' % (acchVars[1], acchVars[2], acchVars[3]);
2058 for asVar in sorted(aasVars):
2059 oOut.write(sFmt % (asVar[1], asVar[2], asVar[3], asVar[4],));
2060 return True;
2061
2062 kasThreadedParamNames = ('uParam0', 'uParam1', 'uParam2');
2063 def generateThreadedFunctionsSource(self, oOut):
2064 """
2065 Generates the threaded functions source file.
2066 Returns success indicator.
2067 """
2068
2069 asLines = self.generateLicenseHeader();
2070 oOut.write('\n'.join(asLines));
2071
2072 #
2073 # Emit the function definitions.
2074 #
2075 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2076 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
2077 oOut.write( '\n'
2078 + '\n'
2079 + '\n'
2080 + '\n'
2081 + '/*' + '*' * 128 + '\n'
2082 + '* Variation: ' + sVarName + ' ' * (129 - len(sVarName) - 15) + '*\n'
2083 + '*' * 128 + '*/\n');
2084
2085 for oThreadedFunction in self.aoThreadedFuncs:
2086 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2087 if oVariation:
2088 oMcBlock = oThreadedFunction.oMcBlock;
2089
2090 # Function header
2091 oOut.write( '\n'
2092 + '\n'
2093 + '/**\n'
2094 + ' * #%u: %s at line %s offset %s in %s%s\n'
2095 % (oVariation.iEnumValue, oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine,
2096 os.path.split(oMcBlock.sSrcFile)[1],
2097 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '')
2098 + ' */\n'
2099 + 'static IEM_DECL_IEMTHREADEDFUNC_DEF(' + oVariation.getThreadedFunctionName() + ')\n'
2100 + '{\n');
2101
2102 # Unpack parameters.
2103 self.generateFunctionParameterUnpacking(oVariation, oOut, self.kasThreadedParamNames);
2104
2105 # RT_NOREF for unused parameters.
2106 if oVariation.cMinParams < g_kcThreadedParams:
2107 oOut.write(' RT_NOREF(' + ', '.join(self.kasThreadedParamNames[oVariation.cMinParams:]) + ');\n');
2108
2109 # Now for the actual statements.
2110 oOut.write(iai.McStmt.renderCodeForList(oVariation.aoStmtsForThreadedFunction, cchIndent = 4));
2111
2112 oOut.write('}\n');
2113
2114
2115 #
2116 # Generate the output tables in parallel.
2117 #
2118 asFuncTable = [
2119 '/**',
2120 ' * Function pointer table.',
2121 ' */',
2122 'PFNIEMTHREADEDFUNC const g_apfnIemThreadedFunctions[kIemThreadedFunc_End] =',
2123 '{',
2124 ' /*Invalid*/ NULL,',
2125 ];
2126 asNameTable = [
2127 '/**',
2128 ' * Function name table.',
2129 ' */',
2130 'const char * const g_apszIemThreadedFunctions[kIemThreadedFunc_End] =',
2131 '{',
2132 ' "Invalid",',
2133 ];
2134 asArgCntTab = [
2135 '/**',
2136 ' * Argument count table.',
2137 ' */',
2138 'uint8_t const g_acIemThreadedFunctionUsedArgs[kIemThreadedFunc_End] =',
2139 '{',
2140 ' 0, /*Invalid*/',
2141 ];
2142 aasTables = (asFuncTable, asNameTable, asArgCntTab,);
2143
2144 for asTable in aasTables:
2145 asTable.extend((
2146 '',
2147 ' /*',
2148 ' * Predefined.',
2149 ' */',
2150 ));
2151 for sFuncNm, cArgs, _ in self.katBltIns:
2152 asFuncTable.append(' iemThreadedFunc_BltIn_%s,' % (sFuncNm,));
2153 asNameTable.append(' "BltIn_%s",' % (sFuncNm,));
2154 asArgCntTab.append(' %d, /*BltIn_%s*/' % (cArgs, sFuncNm,));
2155
2156 iThreadedFunction = 1 + len(self.katBltIns);
2157 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2158 for asTable in aasTables:
2159 asTable.extend((
2160 '',
2161 ' /*',
2162 ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation],
2163 ' */',
2164 ));
2165 for oThreadedFunction in self.aoThreadedFuncs:
2166 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2167 if oVariation:
2168 iThreadedFunction += 1;
2169 assert oVariation.iEnumValue == iThreadedFunction;
2170 sName = oVariation.getThreadedFunctionName();
2171 asFuncTable.append(' /*%4u*/ %s,' % (iThreadedFunction, sName,));
2172 asNameTable.append(' /*%4u*/ "%s",' % (iThreadedFunction, sName,));
2173 asArgCntTab.append(' /*%4u*/ %d, /*%s*/' % (iThreadedFunction, oVariation.cMinParams, sName,));
2174
2175 for asTable in aasTables:
2176 asTable.append('};');
2177
2178 #
2179 # Output the tables.
2180 #
2181 oOut.write( '\n'
2182 + '\n');
2183 oOut.write('\n'.join(asFuncTable));
2184 oOut.write( '\n'
2185 + '\n'
2186 + '\n'
2187 + '#if defined(IN_RING3) || defined(LOG_ENABLED)\n');
2188 oOut.write('\n'.join(asNameTable));
2189 oOut.write( '\n'
2190 + '#endif /* IN_RING3 || LOG_ENABLED */\n'
2191 + '\n'
2192 + '\n');
2193 oOut.write('\n'.join(asArgCntTab));
2194 oOut.write('\n');
2195
2196 return True;
2197
2198 def generateNativeFunctionsHeader(self, oOut):
2199 """
2200 Generates the native recompiler functions header file.
2201 Returns success indicator.
2202 """
2203 if not self.oOptions.sNativeRecompilerArch:
2204 return True;
2205
2206 asLines = self.generateLicenseHeader();
2207
2208 # Prototype the function table.
2209 asLines += [
2210 'extern const PFNIEMNATIVERECOMPFUNC g_apfnIemNativeRecompileFunctions[kIemThreadedFunc_End];',
2211 '',
2212 ];
2213
2214 oOut.write('\n'.join(asLines));
2215 return True;
2216
2217 def generateNativeFunctionsSource(self, oOut):
2218 """
2219 Generates the native recompiler functions source file.
2220 Returns success indicator.
2221 """
2222 if not self.oOptions.sNativeRecompilerArch:
2223 return True;
2224
2225 #
2226 # The file header.
2227 #
2228 oOut.write('\n'.join(self.generateLicenseHeader()));
2229
2230 #
2231 # Emit the functions.
2232 #
2233 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2234 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
2235 oOut.write( '\n'
2236 + '\n'
2237 + '\n'
2238 + '\n'
2239 + '/*' + '*' * 128 + '\n'
2240 + '* Variation: ' + sVarName + ' ' * (129 - len(sVarName) - 15) + '*\n'
2241 + '*' * 128 + '*/\n');
2242
2243 for oThreadedFunction in self.aoThreadedFuncs:
2244 oVariation = oThreadedFunction.dVariations.get(sVariation, None) # type: ThreadedFunctionVariation
2245 if oVariation and oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
2246 oMcBlock = oThreadedFunction.oMcBlock;
2247
2248 # Function header
2249 oOut.write( '\n'
2250 + '\n'
2251 + '/**\n'
2252 + ' * #%u: %s at line %s offset %s in %s%s\n'
2253 % (oVariation.iEnumValue, oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine,
2254 os.path.split(oMcBlock.sSrcFile)[1],
2255 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '')
2256 + ' */\n'
2257 + 'static IEM_DECL_IEMNATIVERECOMPFUNC_DEF(' + oVariation.getNativeFunctionName() + ')\n'
2258 + '{\n');
2259
2260 # Unpack parameters.
2261 self.generateFunctionParameterUnpacking(oVariation, oOut,
2262 ('pCallEntry->auParams[0]',
2263 'pCallEntry->auParams[1]',
2264 'pCallEntry->auParams[2]',));
2265
2266 # Now for the actual statements.
2267 oOut.write(oVariation.oNativeRecomp.renderCode(cchIndent = 4));
2268
2269 oOut.write('}\n');
2270
2271 #
2272 # Output the function table.
2273 #
2274 oOut.write( '\n'
2275 + '\n'
2276 + '/*\n'
2277 + ' * Function table running parallel to g_apfnIemThreadedFunctions and friends.\n'
2278 + ' */\n'
2279 + 'const PFNIEMNATIVERECOMPFUNC g_apfnIemNativeRecompileFunctions[kIemThreadedFunc_End] =\n'
2280 + '{\n'
2281 + ' /*Invalid*/ NULL,'
2282 + '\n'
2283 + ' /*\n'
2284 + ' * Predefined.\n'
2285 + ' */\n'
2286 );
2287 for sFuncNm, _, fHaveRecompFunc in self.katBltIns:
2288 if fHaveRecompFunc:
2289 oOut.write(' iemNativeRecompFunc_BltIn_%s,\n' % (sFuncNm,))
2290 else:
2291 oOut.write(' NULL, /*BltIn_%s*/\n' % (sFuncNm,))
2292
2293 iThreadedFunction = 1 + len(self.katBltIns);
2294 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2295 oOut.write( ' /*\n'
2296 + ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation] + '\n'
2297 + ' */\n');
2298 for oThreadedFunction in self.aoThreadedFuncs:
2299 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2300 if oVariation:
2301 iThreadedFunction += 1;
2302 assert oVariation.iEnumValue == iThreadedFunction;
2303 sName = oVariation.getNativeFunctionName();
2304 if oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
2305 oOut.write(' /*%4u*/ %s,\n' % (iThreadedFunction, sName,));
2306 else:
2307 oOut.write(' /*%4u*/ NULL /*%s*/,\n' % (iThreadedFunction, sName,));
2308
2309 oOut.write( '};\n'
2310 + '\n');
2311 return True;
2312
2313
2314 def getThreadedFunctionByIndex(self, idx):
2315 """
2316 Returns a ThreadedFunction object for the given index. If the index is
2317 out of bounds, a dummy is returned.
2318 """
2319 if idx < len(self.aoThreadedFuncs):
2320 return self.aoThreadedFuncs[idx];
2321 return ThreadedFunction.dummyInstance();
2322
2323 def generateModifiedInput(self, oOut, idxFile):
2324 """
2325 Generates the combined modified input source/header file.
2326 Returns success indicator.
2327 """
2328 #
2329 # File header and assert assumptions.
2330 #
2331 oOut.write('\n'.join(self.generateLicenseHeader()));
2332 oOut.write('AssertCompile((IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | IEM_F_MODE_CPUMODE_MASK) == 7);\n');
2333
2334 #
2335 # Iterate all parsers (input files) and output the ones related to the
2336 # file set given by idxFile.
2337 #
2338 for idxParser, oParser in enumerate(self.aoParsers): # type: int, IEMAllInstPython.SimpleParser
2339 # Is this included in the file set?
2340 sSrcBaseFile = os.path.basename(oParser.sSrcFile).lower();
2341 fInclude = -1;
2342 for aoInfo in iai.g_aaoAllInstrFilesAndDefaultMapAndSet:
2343 if sSrcBaseFile == aoInfo[0].lower():
2344 fInclude = aoInfo[2] in (-1, idxFile);
2345 break;
2346 if fInclude is not True:
2347 assert fInclude is False;
2348 continue;
2349
2350 # Output it.
2351 oOut.write("\n\n/* ****** BEGIN %s ******* */\n" % (oParser.sSrcFile,));
2352
2353 iThreadedFunction = self.aidxFirstFunctions[idxParser];
2354 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
2355 iLine = 0;
2356 while iLine < len(oParser.asLines):
2357 sLine = oParser.asLines[iLine];
2358 iLine += 1; # iBeginLine and iEndLine are 1-based.
2359
2360 # Can we pass it thru?
2361 if ( iLine not in [oThreadedFunction.oMcBlock.iBeginLine, oThreadedFunction.oMcBlock.iEndLine]
2362 or oThreadedFunction.oMcBlock.sSrcFile != oParser.sSrcFile):
2363 oOut.write(sLine);
2364 #
2365 # Single MC block. Just extract it and insert the replacement.
2366 #
2367 elif oThreadedFunction.oMcBlock.iBeginLine != oThreadedFunction.oMcBlock.iEndLine:
2368 assert sLine.count('IEM_MC_') - sLine.count('IEM_MC_F_') == 1, 'sLine="%s"' % (sLine,);
2369 oOut.write(sLine[:oThreadedFunction.oMcBlock.offBeginLine]);
2370 sModified = oThreadedFunction.generateInputCode().strip();
2371 oOut.write(sModified);
2372
2373 iLine = oThreadedFunction.oMcBlock.iEndLine;
2374 sLine = oParser.asLines[iLine - 1];
2375 assert sLine.count('IEM_MC_') - sLine.count('IEM_MC_F_') == 1 or len(oThreadedFunction.oMcBlock.aoStmts) == 1;
2376 oOut.write(sLine[oThreadedFunction.oMcBlock.offAfterEnd : ]);
2377
2378 # Advance
2379 iThreadedFunction += 1;
2380 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
2381 #
2382 # Macro expansion line that have sublines and may contain multiple MC blocks.
2383 #
2384 else:
2385 offLine = 0;
2386 while iLine == oThreadedFunction.oMcBlock.iBeginLine:
2387 oOut.write(sLine[offLine : oThreadedFunction.oMcBlock.offBeginLine]);
2388
2389 sModified = oThreadedFunction.generateInputCode().strip();
2390 assert ( sModified.startswith('IEM_MC_BEGIN')
2391 or (sModified.find('IEM_MC_DEFER_TO_CIMPL_') > 0 and sModified.strip().startswith('{\n'))
2392 or sModified.startswith('pVCpu->iem.s.fEndTb = true')
2393 or sModified.startswith('pVCpu->iem.s.fTbCurInstr = ')
2394 ), 'sModified="%s"' % (sModified,);
2395 oOut.write(sModified);
2396
2397 offLine = oThreadedFunction.oMcBlock.offAfterEnd;
2398
2399 # Advance
2400 iThreadedFunction += 1;
2401 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
2402
2403 # Last line segment.
2404 if offLine < len(sLine):
2405 oOut.write(sLine[offLine : ]);
2406
2407 oOut.write("/* ****** END %s ******* */\n" % (oParser.sSrcFile,));
2408
2409 return True;
2410
2411 def generateModifiedInput1(self, oOut):
2412 """
2413 Generates the combined modified input source/header file, part 1.
2414 Returns success indicator.
2415 """
2416 return self.generateModifiedInput(oOut, 1);
2417
2418 def generateModifiedInput2(self, oOut):
2419 """
2420 Generates the combined modified input source/header file, part 2.
2421 Returns success indicator.
2422 """
2423 return self.generateModifiedInput(oOut, 2);
2424
2425 def generateModifiedInput3(self, oOut):
2426 """
2427 Generates the combined modified input source/header file, part 3.
2428 Returns success indicator.
2429 """
2430 return self.generateModifiedInput(oOut, 3);
2431
2432 def generateModifiedInput4(self, oOut):
2433 """
2434 Generates the combined modified input source/header file, part 4.
2435 Returns success indicator.
2436 """
2437 return self.generateModifiedInput(oOut, 4);
2438
2439
2440 #
2441 # Main
2442 #
2443
2444 def main(self, asArgs):
2445 """
2446 C-like main function.
2447 Returns exit code.
2448 """
2449
2450 #
2451 # Parse arguments
2452 #
2453 sScriptDir = os.path.dirname(__file__);
2454 oParser = argparse.ArgumentParser(add_help = False);
2455 oParser.add_argument('asInFiles',
2456 metavar = 'input.cpp.h',
2457 nargs = '*',
2458 default = [os.path.join(sScriptDir, aoInfo[0])
2459 for aoInfo in iai.g_aaoAllInstrFilesAndDefaultMapAndSet],
2460 help = "Selection of VMMAll/IEMAllInst*.cpp.h files to use as input.");
2461 oParser.add_argument('--out-thrd-funcs-hdr',
2462 metavar = 'file-thrd-funcs.h',
2463 dest = 'sOutFileThrdFuncsHdr',
2464 action = 'store',
2465 default = '-',
2466 help = 'The output header file for the threaded functions.');
2467 oParser.add_argument('--out-thrd-funcs-cpp',
2468 metavar = 'file-thrd-funcs.cpp',
2469 dest = 'sOutFileThrdFuncsCpp',
2470 action = 'store',
2471 default = '-',
2472 help = 'The output C++ file for the threaded functions.');
2473 oParser.add_argument('--out-n8ve-funcs-hdr',
2474 metavar = 'file-n8tv-funcs.h',
2475 dest = 'sOutFileN8veFuncsHdr',
2476 action = 'store',
2477 default = '-',
2478 help = 'The output header file for the native recompiler functions.');
2479 oParser.add_argument('--out-n8ve-funcs-cpp',
2480 metavar = 'file-n8tv-funcs.cpp',
2481 dest = 'sOutFileN8veFuncsCpp',
2482 action = 'store',
2483 default = '-',
2484 help = 'The output C++ file for the native recompiler functions.');
2485 oParser.add_argument('--native-arch',
2486 metavar = 'arch',
2487 dest = 'sNativeRecompilerArch',
2488 action = 'store',
2489 default = None,
2490 help = 'The host architecture for the native recompiler. No default as it enables/disables '
2491 + 'generating the files related to native recompilation.');
2492 oParser.add_argument('--out-mod-input1',
2493 metavar = 'file-instr.cpp.h',
2494 dest = 'sOutFileModInput1',
2495 action = 'store',
2496 default = '-',
2497 help = 'The output C++/header file for modified input instruction files part 1.');
2498 oParser.add_argument('--out-mod-input2',
2499 metavar = 'file-instr.cpp.h',
2500 dest = 'sOutFileModInput2',
2501 action = 'store',
2502 default = '-',
2503 help = 'The output C++/header file for modified input instruction files part 2.');
2504 oParser.add_argument('--out-mod-input3',
2505 metavar = 'file-instr.cpp.h',
2506 dest = 'sOutFileModInput3',
2507 action = 'store',
2508 default = '-',
2509 help = 'The output C++/header file for modified input instruction files part 3.');
2510 oParser.add_argument('--out-mod-input4',
2511 metavar = 'file-instr.cpp.h',
2512 dest = 'sOutFileModInput4',
2513 action = 'store',
2514 default = '-',
2515 help = 'The output C++/header file for modified input instruction files part 4.');
2516 oParser.add_argument('--help', '-h', '-?',
2517 action = 'help',
2518 help = 'Display help and exit.');
2519 oParser.add_argument('--version', '-V',
2520 action = 'version',
2521 version = 'r%s (IEMAllThreadedPython.py), r%s (IEMAllInstPython.py)'
2522 % (__version__.split()[1], iai.__version__.split()[1],),
2523 help = 'Displays the version/revision of the script and exit.');
2524 self.oOptions = oParser.parse_args(asArgs[1:]);
2525 print("oOptions=%s" % (self.oOptions,), file = sys.stderr);
2526
2527 #
2528 # Process the instructions specified in the IEM sources.
2529 #
2530 if self.processInputFiles(self.oOptions.sNativeRecompilerArch):
2531 #
2532 # Generate the output files.
2533 #
2534 aaoOutputFiles = (
2535 ( self.oOptions.sOutFileThrdFuncsHdr, self.generateThreadedFunctionsHeader ),
2536 ( self.oOptions.sOutFileThrdFuncsCpp, self.generateThreadedFunctionsSource ),
2537 ( self.oOptions.sOutFileN8veFuncsHdr, self.generateNativeFunctionsHeader ),
2538 ( self.oOptions.sOutFileN8veFuncsCpp, self.generateNativeFunctionsSource ),
2539 ( self.oOptions.sOutFileModInput1, self.generateModifiedInput1 ),
2540 ( self.oOptions.sOutFileModInput2, self.generateModifiedInput2 ),
2541 ( self.oOptions.sOutFileModInput3, self.generateModifiedInput3 ),
2542 ( self.oOptions.sOutFileModInput4, self.generateModifiedInput4 ),
2543 );
2544 fRc = True;
2545 for sOutFile, fnGenMethod in aaoOutputFiles:
2546 if sOutFile == '-':
2547 fRc = fnGenMethod(sys.stdout) and fRc;
2548 else:
2549 try:
2550 oOut = open(sOutFile, 'w'); # pylint: disable=consider-using-with,unspecified-encoding
2551 except Exception as oXcpt:
2552 print('error! Failed open "%s" for writing: %s' % (sOutFile, oXcpt,), file = sys.stderr);
2553 return 1;
2554 fRc = fnGenMethod(oOut) and fRc;
2555 oOut.close();
2556 if fRc:
2557 return 0;
2558
2559 return 1;
2560
2561
2562if __name__ == '__main__':
2563 sys.exit(IEMThreadedGenerator().main(sys.argv));
2564
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette