VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllThrdPython.py@ 105250

Last change on this file since 105250 was 105235, checked in by vboxsync, 7 months ago

VMM/IEM: Implement vsqrtps,vsqrtpd instruction emulations, bugref:9898

  • Property svn:eol-style set to LF
  • Property svn:executable set to *
  • Property svn:keywords set to Author Date Id Revision
File size: 191.8 KB
Line 
1#!/usr/bin/env python
2# -*- coding: utf-8 -*-
3# $Id: IEMAllThrdPython.py 105235 2024-07-09 12:30:38Z vboxsync $
4# pylint: disable=invalid-name
5
6"""
7Annotates and generates threaded functions from IEMAllInst*.cpp.h.
8"""
9
10from __future__ import print_function;
11
12__copyright__ = \
13"""
14Copyright (C) 2023 Oracle and/or its affiliates.
15
16This file is part of VirtualBox base platform packages, as
17available from https://www.virtualbox.org.
18
19This program is free software; you can redistribute it and/or
20modify it under the terms of the GNU General Public License
21as published by the Free Software Foundation, in version 3 of the
22License.
23
24This program is distributed in the hope that it will be useful, but
25WITHOUT ANY WARRANTY; without even the implied warranty of
26MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
27General Public License for more details.
28
29You should have received a copy of the GNU General Public License
30along with this program; if not, see <https://www.gnu.org/licenses>.
31
32SPDX-License-Identifier: GPL-3.0-only
33"""
34__version__ = "$Revision: 105235 $"
35
36# Standard python imports.
37import copy;
38import datetime;
39import os;
40import re;
41import sys;
42import argparse;
43from typing import Dict, List;
44
45import IEMAllInstPython as iai;
46import IEMAllN8vePython as ian;
47
48
49# Python 3 hacks:
50if sys.version_info[0] >= 3:
51 long = int; # pylint: disable=redefined-builtin,invalid-name
52
53## Number of generic parameters for the thread functions.
54g_kcThreadedParams = 3;
55
56g_kdTypeInfo = {
57 # type name: (cBits, fSigned, C-type )
58 'int8_t': ( 8, True, 'int8_t', ),
59 'int16_t': ( 16, True, 'int16_t', ),
60 'int32_t': ( 32, True, 'int32_t', ),
61 'int64_t': ( 64, True, 'int64_t', ),
62 'uint4_t': ( 4, False, 'uint8_t', ),
63 'uint8_t': ( 8, False, 'uint8_t', ),
64 'uint16_t': ( 16, False, 'uint16_t', ),
65 'uint32_t': ( 32, False, 'uint32_t', ),
66 'uint64_t': ( 64, False, 'uint64_t', ),
67 'uintptr_t': ( 64, False, 'uintptr_t',), # ASSUMES 64-bit host pointer size.
68 'bool': ( 1, False, 'bool', ),
69 'IEMMODE': ( 2, False, 'IEMMODE', ),
70};
71
72# Only for getTypeBitCount/variables.
73g_kdTypeInfo2 = {
74 'RTFLOAT32U': ( 32, False, 'RTFLOAT32U', ),
75 'RTFLOAT64U': ( 64, False, 'RTFLOAT64U', ),
76 'RTUINT64U': ( 64, False, 'RTUINT64U', ),
77 'RTGCPTR': ( 64, False, 'RTGCPTR', ),
78 'RTPBCD80U': ( 80, False, 'RTPBCD80U', ),
79 'RTFLOAT80U': ( 80, False, 'RTFLOAT80U', ),
80 'IEMFPURESULT': (80+16, False, 'IEMFPURESULT', ),
81 'IEMFPURESULTTWO': (80+16+80,False, 'IEMFPURESULTTWO', ),
82 'RTUINT128U': ( 128, False, 'RTUINT128U', ),
83 'X86XMMREG': ( 128, False, 'X86XMMREG', ),
84 'X86YMMREG': ( 256, False, 'X86YMMREG', ),
85 'IEMMEDIAF2XMMSRC': ( 256, False, 'IEMMEDIAF2XMMSRC',),
86 'RTUINT256U': ( 256, False, 'RTUINT256U', ),
87 'IEMPCMPISTRXSRC': ( 256, False, 'IEMPCMPISTRXSRC', ),
88 'IEMPCMPESTRXSRC': ( 384, False, 'IEMPCMPESTRXSRC', ),
89}; #| g_kdTypeInfo; - requires 3.9
90g_kdTypeInfo2.update(g_kdTypeInfo);
91
92def getTypeBitCount(sType):
93 """
94 Translate a type to size in bits
95 """
96 if sType in g_kdTypeInfo2:
97 return g_kdTypeInfo2[sType][0];
98 if '*' in sType or sType[0] == 'P':
99 return 64;
100 #raise Exception('Unknown type: %s' % (sType,));
101 print('error: Unknown type: %s' % (sType,));
102 return 64;
103
104g_kdIemFieldToType = {
105 # Illegal ones:
106 'offInstrNextByte': ( None, ),
107 'cbInstrBuf': ( None, ),
108 'pbInstrBuf': ( None, ),
109 'uInstrBufPc': ( None, ),
110 'cbInstrBufTotal': ( None, ),
111 'offCurInstrStart': ( None, ),
112 'cbOpcode': ( None, ),
113 'offOpcode': ( None, ),
114 'offModRm': ( None, ),
115 # Okay ones.
116 'fPrefixes': ( 'uint32_t', ),
117 'uRexReg': ( 'uint8_t', ),
118 'uRexB': ( 'uint8_t', ),
119 'uRexIndex': ( 'uint8_t', ),
120 'iEffSeg': ( 'uint8_t', ),
121 'enmEffOpSize': ( 'IEMMODE', ),
122 'enmDefAddrMode': ( 'IEMMODE', ),
123 'enmEffAddrMode': ( 'IEMMODE', ),
124 'enmDefOpSize': ( 'IEMMODE', ),
125 'idxPrefix': ( 'uint8_t', ),
126 'uVex3rdReg': ( 'uint8_t', ),
127 'uVexLength': ( 'uint8_t', ),
128 'fEvexStuff': ( 'uint8_t', ),
129 'uFpuOpcode': ( 'uint16_t', ),
130};
131
132## @name McStmtCond.oIfBranchAnnotation/McStmtCond.oElseBranchAnnotation values
133## @{
134g_ksFinishAnnotation_Advance = 'Advance';
135g_ksFinishAnnotation_RelJmp = 'RelJmp';
136g_ksFinishAnnotation_SetJmp = 'SetJmp';
137g_ksFinishAnnotation_RelCall = 'RelCall';
138g_ksFinishAnnotation_IndCall = 'IndCall';
139g_ksFinishAnnotation_DeferToCImpl = 'DeferToCImpl';
140## @}
141
142
143class ThreadedParamRef(object):
144 """
145 A parameter reference for a threaded function.
146 """
147
148 def __init__(self, sOrgRef, sType, oStmt, iParam = None, offParam = 0, sStdRef = None):
149 ## The name / reference in the original code.
150 self.sOrgRef = sOrgRef;
151 ## Normalized name to deal with spaces in macro invocations and such.
152 self.sStdRef = sStdRef if sStdRef else ''.join(sOrgRef.split());
153 ## Indicates that sOrgRef may not match the parameter.
154 self.fCustomRef = sStdRef is not None;
155 ## The type (typically derived).
156 self.sType = sType;
157 ## The statement making the reference.
158 self.oStmt = oStmt;
159 ## The parameter containing the references. None if implicit.
160 self.iParam = iParam;
161 ## The offset in the parameter of the reference.
162 self.offParam = offParam;
163
164 ## The variable name in the threaded function.
165 self.sNewName = 'x';
166 ## The this is packed into.
167 self.iNewParam = 99;
168 ## The bit offset in iNewParam.
169 self.offNewParam = 1024
170
171
172class ThreadedFunctionVariation(object):
173 """ Threaded function variation. """
174
175 ## @name Variations.
176 ## These variations will match translation block selection/distinctions as well.
177 ## @{
178 # pylint: disable=line-too-long
179 ksVariation_Default = ''; ##< No variations - only used by IEM_MC_DEFER_TO_CIMPL_X_RET.
180 ksVariation_16 = '_16'; ##< 16-bit mode code (386+).
181 ksVariation_16f = '_16f'; ##< 16-bit mode code (386+), check+clear eflags.
182 ksVariation_16_Jmp = '_16_Jmp'; ##< 16-bit mode code (386+), conditional jump taken.
183 ksVariation_16f_Jmp = '_16f_Jmp'; ##< 16-bit mode code (386+), check+clear eflags, conditional jump taken.
184 ksVariation_16_NoJmp = '_16_NoJmp'; ##< 16-bit mode code (386+), conditional jump not taken.
185 ksVariation_16f_NoJmp = '_16f_NoJmp'; ##< 16-bit mode code (386+), check+clear eflags, conditional jump not taken.
186 ksVariation_16_Addr32 = '_16_Addr32'; ##< 16-bit mode code (386+), address size prefixed to 32-bit addressing.
187 ksVariation_16f_Addr32 = '_16f_Addr32'; ##< 16-bit mode code (386+), address size prefixed to 32-bit addressing, eflags.
188 ksVariation_16_Pre386 = '_16_Pre386'; ##< 16-bit mode code, pre-386 CPU target.
189 ksVariation_16f_Pre386 = '_16f_Pre386'; ##< 16-bit mode code, pre-386 CPU target, check+clear eflags.
190 ksVariation_16_Pre386_Jmp = '_16_Pre386_Jmp'; ##< 16-bit mode code, pre-386 CPU target, conditional jump taken.
191 ksVariation_16f_Pre386_Jmp = '_16f_Pre386_Jmp'; ##< 16-bit mode code, pre-386 CPU target, check+clear eflags, conditional jump taken.
192 ksVariation_16_Pre386_NoJmp = '_16_Pre386_NoJmp'; ##< 16-bit mode code, pre-386 CPU target, conditional jump not taken.
193 ksVariation_16f_Pre386_NoJmp = '_16f_Pre386_NoJmp'; ##< 16-bit mode code, pre-386 CPU target, check+clear eflags, conditional jump not taken.
194 ksVariation_32 = '_32'; ##< 32-bit mode code (386+).
195 ksVariation_32f = '_32f'; ##< 32-bit mode code (386+), check+clear eflags.
196 ksVariation_32_Jmp = '_32_Jmp'; ##< 32-bit mode code (386+), conditional jump taken.
197 ksVariation_32f_Jmp = '_32f_Jmp'; ##< 32-bit mode code (386+), check+clear eflags, conditional jump taken.
198 ksVariation_32_NoJmp = '_32_NoJmp'; ##< 32-bit mode code (386+), conditional jump not taken.
199 ksVariation_32f_NoJmp = '_32f_NoJmp'; ##< 32-bit mode code (386+), check+clear eflags, conditional jump not taken.
200 ksVariation_32_Flat = '_32_Flat'; ##< 32-bit mode code (386+) with CS, DS, E,S and SS flat and 4GB wide.
201 ksVariation_32f_Flat = '_32f_Flat'; ##< 32-bit mode code (386+) with CS, DS, E,S and SS flat and 4GB wide, eflags.
202 ksVariation_32_Addr16 = '_32_Addr16'; ##< 32-bit mode code (386+), address size prefixed to 16-bit addressing.
203 ksVariation_32f_Addr16 = '_32f_Addr16'; ##< 32-bit mode code (386+), address size prefixed to 16-bit addressing, eflags.
204 ksVariation_64 = '_64'; ##< 64-bit mode code.
205 ksVariation_64f = '_64f'; ##< 64-bit mode code, check+clear eflags.
206 ksVariation_64_Jmp = '_64_Jmp'; ##< 64-bit mode code, conditional jump taken.
207 ksVariation_64f_Jmp = '_64f_Jmp'; ##< 64-bit mode code, check+clear eflags, conditional jump taken.
208 ksVariation_64_NoJmp = '_64_NoJmp'; ##< 64-bit mode code, conditional jump not taken.
209 ksVariation_64f_NoJmp = '_64f_NoJmp'; ##< 64-bit mode code, check+clear eflags, conditional jump not taken.
210 ksVariation_64_FsGs = '_64_FsGs'; ##< 64-bit mode code, with memory accesses via FS or GS.
211 ksVariation_64f_FsGs = '_64f_FsGs'; ##< 64-bit mode code, with memory accesses via FS or GS, check+clear eflags.
212 ksVariation_64_Addr32 = '_64_Addr32'; ##< 64-bit mode code, address size prefixed to 32-bit addressing.
213 ksVariation_64f_Addr32 = '_64f_Addr32'; ##< 64-bit mode code, address size prefixed to 32-bit addressing, c+c eflags.
214 # pylint: enable=line-too-long
215 kasVariations = (
216 ksVariation_Default,
217 ksVariation_16,
218 ksVariation_16f,
219 ksVariation_16_Jmp,
220 ksVariation_16f_Jmp,
221 ksVariation_16_NoJmp,
222 ksVariation_16f_NoJmp,
223 ksVariation_16_Addr32,
224 ksVariation_16f_Addr32,
225 ksVariation_16_Pre386,
226 ksVariation_16f_Pre386,
227 ksVariation_16_Pre386_Jmp,
228 ksVariation_16f_Pre386_Jmp,
229 ksVariation_16_Pre386_NoJmp,
230 ksVariation_16f_Pre386_NoJmp,
231 ksVariation_32,
232 ksVariation_32f,
233 ksVariation_32_Jmp,
234 ksVariation_32f_Jmp,
235 ksVariation_32_NoJmp,
236 ksVariation_32f_NoJmp,
237 ksVariation_32_Flat,
238 ksVariation_32f_Flat,
239 ksVariation_32_Addr16,
240 ksVariation_32f_Addr16,
241 ksVariation_64,
242 ksVariation_64f,
243 ksVariation_64_Jmp,
244 ksVariation_64f_Jmp,
245 ksVariation_64_NoJmp,
246 ksVariation_64f_NoJmp,
247 ksVariation_64_FsGs,
248 ksVariation_64f_FsGs,
249 ksVariation_64_Addr32,
250 ksVariation_64f_Addr32,
251 );
252 kasVariationsWithoutAddress = (
253 ksVariation_16,
254 ksVariation_16f,
255 ksVariation_16_Pre386,
256 ksVariation_16f_Pre386,
257 ksVariation_32,
258 ksVariation_32f,
259 ksVariation_64,
260 ksVariation_64f,
261 );
262 kasVariationsWithoutAddressNot286 = (
263 ksVariation_16,
264 ksVariation_16f,
265 ksVariation_32,
266 ksVariation_32f,
267 ksVariation_64,
268 ksVariation_64f,
269 );
270 kasVariationsWithoutAddressNot286Not64 = (
271 ksVariation_16,
272 ksVariation_16f,
273 ksVariation_32,
274 ksVariation_32f,
275 );
276 kasVariationsWithoutAddressNot64 = (
277 ksVariation_16,
278 ksVariation_16f,
279 ksVariation_16_Pre386,
280 ksVariation_16f_Pre386,
281 ksVariation_32,
282 ksVariation_32f,
283 );
284 kasVariationsWithoutAddressOnly64 = (
285 ksVariation_64,
286 ksVariation_64f,
287 );
288 kasVariationsWithAddress = (
289 ksVariation_16,
290 ksVariation_16f,
291 ksVariation_16_Addr32,
292 ksVariation_16f_Addr32,
293 ksVariation_16_Pre386,
294 ksVariation_16f_Pre386,
295 ksVariation_32,
296 ksVariation_32f,
297 ksVariation_32_Flat,
298 ksVariation_32f_Flat,
299 ksVariation_32_Addr16,
300 ksVariation_32f_Addr16,
301 ksVariation_64,
302 ksVariation_64f,
303 ksVariation_64_FsGs,
304 ksVariation_64f_FsGs,
305 ksVariation_64_Addr32,
306 ksVariation_64f_Addr32,
307 );
308 kasVariationsWithAddressNot286 = (
309 ksVariation_16,
310 ksVariation_16f,
311 ksVariation_16_Addr32,
312 ksVariation_16f_Addr32,
313 ksVariation_32,
314 ksVariation_32f,
315 ksVariation_32_Flat,
316 ksVariation_32f_Flat,
317 ksVariation_32_Addr16,
318 ksVariation_32f_Addr16,
319 ksVariation_64,
320 ksVariation_64f,
321 ksVariation_64_FsGs,
322 ksVariation_64f_FsGs,
323 ksVariation_64_Addr32,
324 ksVariation_64f_Addr32,
325 );
326 kasVariationsWithAddressNot286Not64 = (
327 ksVariation_16,
328 ksVariation_16f,
329 ksVariation_16_Addr32,
330 ksVariation_16f_Addr32,
331 ksVariation_32,
332 ksVariation_32f,
333 ksVariation_32_Flat,
334 ksVariation_32f_Flat,
335 ksVariation_32_Addr16,
336 ksVariation_32f_Addr16,
337 );
338 kasVariationsWithAddressNot64 = (
339 ksVariation_16,
340 ksVariation_16f,
341 ksVariation_16_Addr32,
342 ksVariation_16f_Addr32,
343 ksVariation_16_Pre386,
344 ksVariation_16f_Pre386,
345 ksVariation_32,
346 ksVariation_32f,
347 ksVariation_32_Flat,
348 ksVariation_32f_Flat,
349 ksVariation_32_Addr16,
350 ksVariation_32f_Addr16,
351 );
352 kasVariationsWithAddressOnly64 = (
353 ksVariation_64,
354 ksVariation_64f,
355 ksVariation_64_FsGs,
356 ksVariation_64f_FsGs,
357 ksVariation_64_Addr32,
358 ksVariation_64f_Addr32,
359 );
360 kasVariationsOnlyPre386 = (
361 ksVariation_16_Pre386,
362 ksVariation_16f_Pre386,
363 );
364 kasVariationsEmitOrder = (
365 ksVariation_Default,
366 ksVariation_64,
367 ksVariation_64f,
368 ksVariation_64_Jmp,
369 ksVariation_64f_Jmp,
370 ksVariation_64_NoJmp,
371 ksVariation_64f_NoJmp,
372 ksVariation_64_FsGs,
373 ksVariation_64f_FsGs,
374 ksVariation_32_Flat,
375 ksVariation_32f_Flat,
376 ksVariation_32,
377 ksVariation_32f,
378 ksVariation_32_Jmp,
379 ksVariation_32f_Jmp,
380 ksVariation_32_NoJmp,
381 ksVariation_32f_NoJmp,
382 ksVariation_16,
383 ksVariation_16f,
384 ksVariation_16_Jmp,
385 ksVariation_16f_Jmp,
386 ksVariation_16_NoJmp,
387 ksVariation_16f_NoJmp,
388 ksVariation_16_Addr32,
389 ksVariation_16f_Addr32,
390 ksVariation_16_Pre386,
391 ksVariation_16f_Pre386,
392 ksVariation_16_Pre386_Jmp,
393 ksVariation_16f_Pre386_Jmp,
394 ksVariation_16_Pre386_NoJmp,
395 ksVariation_16f_Pre386_NoJmp,
396 ksVariation_32_Addr16,
397 ksVariation_32f_Addr16,
398 ksVariation_64_Addr32,
399 ksVariation_64f_Addr32,
400 );
401 kdVariationNames = {
402 ksVariation_Default: 'defer-to-cimpl',
403 ksVariation_16: '16-bit',
404 ksVariation_16f: '16-bit w/ eflag checking and clearing',
405 ksVariation_16_Jmp: '16-bit w/ conditional jump taken',
406 ksVariation_16f_Jmp: '16-bit w/ eflag checking and clearing and conditional jump taken',
407 ksVariation_16_NoJmp: '16-bit w/ conditional jump not taken',
408 ksVariation_16f_NoJmp: '16-bit w/ eflag checking and clearing and conditional jump not taken',
409 ksVariation_16_Addr32: '16-bit w/ address prefix (Addr32)',
410 ksVariation_16f_Addr32: '16-bit w/ address prefix (Addr32) and eflag checking and clearing',
411 ksVariation_16_Pre386: '16-bit on pre-386 CPU',
412 ksVariation_16f_Pre386: '16-bit on pre-386 CPU w/ eflag checking and clearing',
413 ksVariation_16_Pre386_Jmp: '16-bit on pre-386 CPU w/ conditional jump taken',
414 ksVariation_16f_Pre386_Jmp: '16-bit on pre-386 CPU w/ eflag checking and clearing and conditional jump taken',
415 ksVariation_16_Pre386_NoJmp: '16-bit on pre-386 CPU w/ conditional jump taken',
416 ksVariation_16f_Pre386_NoJmp: '16-bit on pre-386 CPU w/ eflag checking and clearing and conditional jump taken',
417 ksVariation_32: '32-bit',
418 ksVariation_32f: '32-bit w/ eflag checking and clearing',
419 ksVariation_32_Jmp: '32-bit w/ conditional jump taken',
420 ksVariation_32f_Jmp: '32-bit w/ eflag checking and clearing and conditional jump taken',
421 ksVariation_32_NoJmp: '32-bit w/ conditional jump not taken',
422 ksVariation_32f_NoJmp: '32-bit w/ eflag checking and clearing and conditional jump not taken',
423 ksVariation_32_Flat: '32-bit flat and wide open CS, SS, DS and ES',
424 ksVariation_32f_Flat: '32-bit flat and wide open CS, SS, DS and ES w/ eflag checking and clearing',
425 ksVariation_32_Addr16: '32-bit w/ address prefix (Addr16)',
426 ksVariation_32f_Addr16: '32-bit w/ address prefix (Addr16) and eflag checking and clearing',
427 ksVariation_64: '64-bit',
428 ksVariation_64f: '64-bit w/ eflag checking and clearing',
429 ksVariation_64_Jmp: '64-bit w/ conditional jump taken',
430 ksVariation_64f_Jmp: '64-bit w/ eflag checking and clearing and conditional jump taken',
431 ksVariation_64_NoJmp: '64-bit w/ conditional jump not taken',
432 ksVariation_64f_NoJmp: '64-bit w/ eflag checking and clearing and conditional jump not taken',
433 ksVariation_64_FsGs: '64-bit with memory accessed via FS or GS',
434 ksVariation_64f_FsGs: '64-bit with memory accessed via FS or GS and eflag checking and clearing',
435 ksVariation_64_Addr32: '64-bit w/ address prefix (Addr32)',
436 ksVariation_64f_Addr32: '64-bit w/ address prefix (Addr32) and eflag checking and clearing',
437 };
438 kdVariationsWithEflagsCheckingAndClearing = {
439 ksVariation_16f: True,
440 ksVariation_16f_Jmp: True,
441 ksVariation_16f_NoJmp: True,
442 ksVariation_16f_Addr32: True,
443 ksVariation_16f_Pre386: True,
444 ksVariation_16f_Pre386_Jmp: True,
445 ksVariation_16f_Pre386_NoJmp: True,
446 ksVariation_32f: True,
447 ksVariation_32f_Jmp: True,
448 ksVariation_32f_NoJmp: True,
449 ksVariation_32f_Flat: True,
450 ksVariation_32f_Addr16: True,
451 ksVariation_64f: True,
452 ksVariation_64f_Jmp: True,
453 ksVariation_64f_NoJmp: True,
454 ksVariation_64f_FsGs: True,
455 ksVariation_64f_Addr32: True,
456 };
457 kdVariationsOnly64NoFlags = {
458 ksVariation_64: True,
459 ksVariation_64_Jmp: True,
460 ksVariation_64_NoJmp: True,
461 ksVariation_64_FsGs: True,
462 ksVariation_64_Addr32: True,
463 };
464 kdVariationsOnly64WithFlags = {
465 ksVariation_64f: True,
466 ksVariation_64f_Jmp: True,
467 ksVariation_64f_NoJmp: True,
468 ksVariation_64f_FsGs: True,
469 ksVariation_64f_Addr32: True,
470 };
471 kdVariationsOnlyPre386NoFlags = {
472 ksVariation_16_Pre386: True,
473 ksVariation_16_Pre386_Jmp: True,
474 ksVariation_16_Pre386_NoJmp: True,
475 };
476 kdVariationsOnlyPre386WithFlags = {
477 ksVariation_16f_Pre386: True,
478 ksVariation_16f_Pre386_Jmp: True,
479 ksVariation_16f_Pre386_NoJmp: True,
480 };
481 kdVariationsWithFlatAddress = {
482 ksVariation_32_Flat: True,
483 ksVariation_32f_Flat: True,
484 ksVariation_64: True,
485 ksVariation_64f: True,
486 ksVariation_64_Addr32: True,
487 ksVariation_64f_Addr32: True,
488 };
489 kdVariationsWithFlatStackAddress = {
490 ksVariation_32_Flat: True,
491 ksVariation_32f_Flat: True,
492 ksVariation_64: True,
493 ksVariation_64f: True,
494 ksVariation_64_FsGs: True,
495 ksVariation_64f_FsGs: True,
496 ksVariation_64_Addr32: True,
497 ksVariation_64f_Addr32: True,
498 };
499 kdVariationsWithFlat64StackAddress = {
500 ksVariation_64: True,
501 ksVariation_64f: True,
502 ksVariation_64_FsGs: True,
503 ksVariation_64f_FsGs: True,
504 ksVariation_64_Addr32: True,
505 ksVariation_64f_Addr32: True,
506 };
507 kdVariationsWithFlatAddr16 = {
508 ksVariation_16: True,
509 ksVariation_16f: True,
510 ksVariation_16_Pre386: True,
511 ksVariation_16f_Pre386: True,
512 ksVariation_32_Addr16: True,
513 ksVariation_32f_Addr16: True,
514 };
515 kdVariationsWithFlatAddr32No64 = {
516 ksVariation_16_Addr32: True,
517 ksVariation_16f_Addr32: True,
518 ksVariation_32: True,
519 ksVariation_32f: True,
520 ksVariation_32_Flat: True,
521 ksVariation_32f_Flat: True,
522 };
523 kdVariationsWithAddressOnly64 = {
524 ksVariation_64: True,
525 ksVariation_64f: True,
526 ksVariation_64_FsGs: True,
527 ksVariation_64f_FsGs: True,
528 ksVariation_64_Addr32: True,
529 ksVariation_64f_Addr32: True,
530 };
531 kdVariationsWithConditional = {
532 ksVariation_16_Jmp: True,
533 ksVariation_16_NoJmp: True,
534 ksVariation_16_Pre386_Jmp: True,
535 ksVariation_16_Pre386_NoJmp: True,
536 ksVariation_32_Jmp: True,
537 ksVariation_32_NoJmp: True,
538 ksVariation_64_Jmp: True,
539 ksVariation_64_NoJmp: True,
540 ksVariation_16f_Jmp: True,
541 ksVariation_16f_NoJmp: True,
542 ksVariation_16f_Pre386_Jmp: True,
543 ksVariation_16f_Pre386_NoJmp: True,
544 ksVariation_32f_Jmp: True,
545 ksVariation_32f_NoJmp: True,
546 ksVariation_64f_Jmp: True,
547 ksVariation_64f_NoJmp: True,
548 };
549 kdVariationsWithConditionalNoJmp = {
550 ksVariation_16_NoJmp: True,
551 ksVariation_16_Pre386_NoJmp: True,
552 ksVariation_32_NoJmp: True,
553 ksVariation_64_NoJmp: True,
554 ksVariation_16f_NoJmp: True,
555 ksVariation_16f_Pre386_NoJmp: True,
556 ksVariation_32f_NoJmp: True,
557 ksVariation_64f_NoJmp: True,
558 };
559 kdVariationsOnlyPre386 = {
560 ksVariation_16_Pre386: True,
561 ksVariation_16f_Pre386: True,
562 ksVariation_16_Pre386_Jmp: True,
563 ksVariation_16f_Pre386_Jmp: True,
564 ksVariation_16_Pre386_NoJmp: True,
565 ksVariation_16f_Pre386_NoJmp: True,
566 };
567 ## @}
568
569 ## IEM_CIMPL_F_XXX flags that we know.
570 ## The value indicates whether it terminates the TB or not. The goal is to
571 ## improve the recompiler so all but END_TB will be False.
572 ##
573 ## @note iemThreadedRecompilerMcDeferToCImpl0 duplicates info found here.
574 kdCImplFlags = {
575 'IEM_CIMPL_F_MODE': False,
576 'IEM_CIMPL_F_BRANCH_DIRECT': False,
577 'IEM_CIMPL_F_BRANCH_INDIRECT': False,
578 'IEM_CIMPL_F_BRANCH_RELATIVE': False,
579 'IEM_CIMPL_F_BRANCH_FAR': True,
580 'IEM_CIMPL_F_BRANCH_CONDITIONAL': False,
581 # IEM_CIMPL_F_BRANCH_ANY should only be used for testing, so not included here.
582 'IEM_CIMPL_F_BRANCH_STACK': False,
583 'IEM_CIMPL_F_BRANCH_STACK_FAR': False,
584 'IEM_CIMPL_F_RFLAGS': False,
585 'IEM_CIMPL_F_INHIBIT_SHADOW': False,
586 'IEM_CIMPL_F_CHECK_IRQ_AFTER': False,
587 'IEM_CIMPL_F_CHECK_IRQ_BEFORE': False,
588 'IEM_CIMPL_F_CHECK_IRQ_BEFORE_AND_AFTER': False, # (ignore)
589 'IEM_CIMPL_F_STATUS_FLAGS': False,
590 'IEM_CIMPL_F_VMEXIT': False,
591 'IEM_CIMPL_F_FPU': False,
592 'IEM_CIMPL_F_REP': False,
593 'IEM_CIMPL_F_IO': False,
594 'IEM_CIMPL_F_END_TB': True,
595 'IEM_CIMPL_F_XCPT': True,
596 'IEM_CIMPL_F_CALLS_CIMPL': False,
597 'IEM_CIMPL_F_CALLS_AIMPL': False,
598 'IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE': False,
599 'IEM_CIMPL_F_CALLS_AIMPL_WITH_XSTATE': False,
600 };
601
602 def __init__(self, oThreadedFunction, sVariation = ksVariation_Default):
603 self.oParent = oThreadedFunction # type: ThreadedFunction
604 ##< ksVariation_Xxxx.
605 self.sVariation = sVariation
606
607 ## Threaded function parameter references.
608 self.aoParamRefs = [] # type: List[ThreadedParamRef]
609 ## Unique parameter references.
610 self.dParamRefs = {} # type: Dict[str, List[ThreadedParamRef]]
611 ## Minimum number of parameters to the threaded function.
612 self.cMinParams = 0;
613
614 ## List/tree of statements for the threaded function.
615 self.aoStmtsForThreadedFunction = [] # type: List[McStmt]
616
617 ## Function enum number, for verification. Set by generateThreadedFunctionsHeader.
618 self.iEnumValue = -1;
619
620 ## Native recompilation details for this variation.
621 self.oNativeRecomp = None;
622
623 def getIndexName(self):
624 sName = self.oParent.oMcBlock.sFunction;
625 if sName.startswith('iemOp_'):
626 sName = sName[len('iemOp_'):];
627 return 'kIemThreadedFunc_%s%s%s' % ( sName, self.oParent.sSubName, self.sVariation, );
628
629 def getThreadedFunctionName(self):
630 sName = self.oParent.oMcBlock.sFunction;
631 if sName.startswith('iemOp_'):
632 sName = sName[len('iemOp_'):];
633 return 'iemThreadedFunc_%s%s%s' % ( sName, self.oParent.sSubName, self.sVariation, );
634
635 def getNativeFunctionName(self):
636 return 'iemNativeRecompFunc_' + self.getThreadedFunctionName()[len('iemThreadedFunc_'):];
637
638 def getLivenessFunctionName(self):
639 return 'iemNativeLivenessFunc_' + self.getThreadedFunctionName()[len('iemThreadedFunc_'):];
640
641 def getShortName(self):
642 sName = self.oParent.oMcBlock.sFunction;
643 if sName.startswith('iemOp_'):
644 sName = sName[len('iemOp_'):];
645 return '%s%s%s' % ( sName, self.oParent.sSubName, self.sVariation, );
646
647 def getThreadedFunctionStatisticsName(self):
648 sName = self.oParent.oMcBlock.sFunction;
649 if sName.startswith('iemOp_'):
650 sName = sName[len('iemOp_'):];
651
652 sVarNm = self.sVariation;
653 if sVarNm:
654 if sVarNm.startswith('_'):
655 sVarNm = sVarNm[1:];
656 if sVarNm.endswith('_Jmp'):
657 sVarNm = sVarNm[:-4];
658 sName += '_Jmp';
659 elif sVarNm.endswith('_NoJmp'):
660 sVarNm = sVarNm[:-6];
661 sName += '_NoJmp';
662 else:
663 sVarNm = 'DeferToCImpl';
664
665 return '%s/%s%s' % ( sVarNm, sName, self.oParent.sSubName );
666
667 def isWithFlagsCheckingAndClearingVariation(self):
668 """
669 Checks if this is a variation that checks and clears EFLAGS.
670 """
671 return self.sVariation in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing;
672
673 #
674 # Analysis and code morphing.
675 #
676
677 def raiseProblem(self, sMessage):
678 """ Raises a problem. """
679 self.oParent.raiseProblem(sMessage);
680
681 def warning(self, sMessage):
682 """ Emits a warning. """
683 self.oParent.warning(sMessage);
684
685 def analyzeReferenceToType(self, sRef):
686 """
687 Translates a variable or structure reference to a type.
688 Returns type name.
689 Raises exception if unable to figure it out.
690 """
691 ch0 = sRef[0];
692 if ch0 == 'u':
693 if sRef.startswith('u32'):
694 return 'uint32_t';
695 if sRef.startswith('u8') or sRef == 'uReg':
696 return 'uint8_t';
697 if sRef.startswith('u64'):
698 return 'uint64_t';
699 if sRef.startswith('u16'):
700 return 'uint16_t';
701 elif ch0 == 'b':
702 return 'uint8_t';
703 elif ch0 == 'f':
704 return 'bool';
705 elif ch0 == 'i':
706 if sRef.startswith('i8'):
707 return 'int8_t';
708 if sRef.startswith('i16'):
709 return 'int16_t';
710 if sRef.startswith('i32'):
711 return 'int32_t';
712 if sRef.startswith('i64'):
713 return 'int64_t';
714 if sRef in ('iReg', 'iFixedReg', 'iGReg', 'iSegReg', 'iSrcReg', 'iDstReg', 'iCrReg'):
715 return 'uint8_t';
716 elif ch0 == 'p':
717 if sRef.find('-') < 0:
718 return 'uintptr_t';
719 if sRef.startswith('pVCpu->iem.s.'):
720 sField = sRef[len('pVCpu->iem.s.') : ];
721 if sField in g_kdIemFieldToType:
722 if g_kdIemFieldToType[sField][0]:
723 return g_kdIemFieldToType[sField][0];
724 elif ch0 == 'G' and sRef.startswith('GCPtr'):
725 return 'uint64_t';
726 elif ch0 == 'e':
727 if sRef == 'enmEffOpSize':
728 return 'IEMMODE';
729 elif ch0 == 'o':
730 if sRef.startswith('off32'):
731 return 'uint32_t';
732 elif sRef == 'cbFrame': # enter
733 return 'uint16_t';
734 elif sRef == 'cShift': ## @todo risky
735 return 'uint8_t';
736
737 self.raiseProblem('Unknown reference: %s' % (sRef,));
738 return None; # Shut up pylint 2.16.2.
739
740 def analyzeCallToType(self, sFnRef):
741 """
742 Determins the type of an indirect function call.
743 """
744 assert sFnRef[0] == 'p';
745
746 #
747 # Simple?
748 #
749 if sFnRef.find('-') < 0:
750 oDecoderFunction = self.oParent.oMcBlock.oFunction;
751
752 # Try the argument list of the function defintion macro invocation first.
753 iArg = 2;
754 while iArg < len(oDecoderFunction.asDefArgs):
755 if sFnRef == oDecoderFunction.asDefArgs[iArg]:
756 return oDecoderFunction.asDefArgs[iArg - 1];
757 iArg += 1;
758
759 # Then check out line that includes the word and looks like a variable declaration.
760 oRe = re.compile(' +(P[A-Z0-9_]+|const +IEMOP[A-Z0-9_]+ *[*]) +(const |) *' + sFnRef + ' *(;|=)');
761 for sLine in oDecoderFunction.asLines:
762 oMatch = oRe.match(sLine);
763 if oMatch:
764 if not oMatch.group(1).startswith('const'):
765 return oMatch.group(1);
766 return 'PC' + oMatch.group(1)[len('const ') : -1].strip();
767
768 #
769 # Deal with the pImpl->pfnXxx:
770 #
771 elif sFnRef.startswith('pImpl->pfn'):
772 sMember = sFnRef[len('pImpl->') : ];
773 sBaseType = self.analyzeCallToType('pImpl');
774 offBits = sMember.rfind('U') + 1;
775 if sBaseType == 'PCIEMOPBINSIZES': return 'PFNIEMAIMPLBINU' + sMember[offBits:];
776 if sBaseType == 'PCIEMOPBINTODOSIZES': return 'PFNIEMAIMPLBINTODOU' + sMember[offBits:];
777 if sBaseType == 'PCIEMOPUNARYSIZES': return 'PFNIEMAIMPLUNARYU' + sMember[offBits:];
778 if sBaseType == 'PCIEMOPSHIFTSIZES': return 'PFNIEMAIMPLSHIFTU' + sMember[offBits:];
779 if sBaseType == 'PCIEMOPSHIFTDBLSIZES': return 'PFNIEMAIMPLSHIFTDBLU' + sMember[offBits:];
780 if sBaseType == 'PCIEMOPMULDIVSIZES': return 'PFNIEMAIMPLMULDIVU' + sMember[offBits:];
781 if sBaseType == 'PCIEMOPMEDIAF2': return 'PFNIEMAIMPLMEDIAF2U' + sMember[offBits:];
782 if sBaseType == 'PCIEMOPMEDIAF3': return 'PFNIEMAIMPLMEDIAF3U' + sMember[offBits:];
783 if sBaseType == 'PCIEMOPMEDIAOPTF2': return 'PFNIEMAIMPLMEDIAOPTF2U' + sMember[offBits:];
784 if sBaseType == 'PCIEMOPMEDIAOPTF2IMM8': return 'PFNIEMAIMPLMEDIAOPTF2U' + sMember[offBits:] + 'IMM8';
785 if sBaseType == 'PCIEMOPMEDIAOPTF3': return 'PFNIEMAIMPLMEDIAOPTF3U' + sMember[offBits:];
786 if sBaseType == 'PCIEMOPMEDIAOPTF3IMM8': return 'PFNIEMAIMPLMEDIAOPTF3U' + sMember[offBits:] + 'IMM8';
787 if sBaseType == 'PCIEMOPBLENDOP': return 'PFNIEMAIMPLAVXBLENDU' + sMember[offBits:];
788
789 self.raiseProblem('Unknown call reference: %s::%s (%s)' % (sBaseType, sMember, sFnRef,));
790
791 self.raiseProblem('Unknown call reference: %s' % (sFnRef,));
792 return None; # Shut up pylint 2.16.2.
793
794 def analyze8BitGRegStmt(self, oStmt):
795 """
796 Gets the 8-bit general purpose register access details of the given statement.
797 ASSUMES the statement is one accessing an 8-bit GREG.
798 """
799 idxReg = 0;
800 if ( oStmt.sName.find('_FETCH_') > 0
801 or oStmt.sName.find('_REF_') > 0
802 or oStmt.sName.find('_TO_LOCAL') > 0):
803 idxReg = 1;
804
805 sRegRef = oStmt.asParams[idxReg];
806 if sRegRef.startswith('IEM_GET_MODRM_RM') or sRegRef.startswith('IEM_GET_MODRM_REG'):
807 asBits = [sBit.strip() for sBit in sRegRef.replace('(', ',').replace(')', '').split(',')];
808 if len(asBits) != 3 or asBits[1] != 'pVCpu' or (asBits[0] != 'IEM_GET_MODRM_RM' and asBits[0] != 'IEM_GET_MODRM_REG'):
809 self.raiseProblem('Unexpected reference: %s (asBits=%s)' % (sRegRef, asBits));
810 sOrgExpr = asBits[0] + '_EX8(pVCpu, ' + asBits[2] + ')';
811 else:
812 sOrgExpr = '((%s) < 4 || (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_REX | IEM_OP_PRF_VEX)) ? (%s) : (%s) + 12)' \
813 % (sRegRef, sRegRef, sRegRef,);
814
815 if sRegRef.find('IEM_GET_MODRM_RM') >= 0: sStdRef = 'bRmRm8Ex';
816 elif sRegRef.find('IEM_GET_MODRM_REG') >= 0: sStdRef = 'bRmReg8Ex';
817 elif sRegRef == 'X86_GREG_xAX': sStdRef = 'bGregXAx8Ex';
818 elif sRegRef == 'X86_GREG_xCX': sStdRef = 'bGregXCx8Ex';
819 elif sRegRef == 'X86_GREG_xSP': sStdRef = 'bGregXSp8Ex';
820 elif sRegRef == 'iFixedReg': sStdRef = 'bFixedReg8Ex';
821 else:
822 self.warning('analyze8BitGRegStmt: sRegRef=%s -> bOther8Ex; %s %s; sOrgExpr=%s'
823 % (sRegRef, oStmt.sName, oStmt.asParams, sOrgExpr,));
824 sStdRef = 'bOther8Ex';
825
826 #print('analyze8BitGRegStmt: %s %s; sRegRef=%s\n -> idxReg=%s sOrgExpr=%s sStdRef=%s'
827 # % (oStmt.sName, oStmt.asParams, sRegRef, idxReg, sOrgExpr, sStdRef));
828 return (idxReg, sOrgExpr, sStdRef);
829
830
831 ## Maps memory related MCs to info for FLAT conversion.
832 ## This is used in 64-bit and flat 32-bit variants to skip the unnecessary
833 ## segmentation checking for every memory access. Only applied to access
834 ## via ES, DS and SS. FS, GS and CS gets the full segmentation threatment,
835 ## the latter (CS) is just to keep things simple (we could safely fetch via
836 ## it, but only in 64-bit mode could we safely write via it, IIRC).
837 kdMemMcToFlatInfo = {
838 'IEM_MC_FETCH_MEM_U8': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8' ),
839 'IEM_MC_FETCH_MEM16_U8': ( 1, 'IEM_MC_FETCH_MEM16_FLAT_U8' ),
840 'IEM_MC_FETCH_MEM32_U8': ( 1, 'IEM_MC_FETCH_MEM32_FLAT_U8' ),
841 'IEM_MC_FETCH_MEM_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16' ),
842 'IEM_MC_FETCH_MEM_U16_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_DISP' ),
843 'IEM_MC_FETCH_MEM_I16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I16' ),
844 'IEM_MC_FETCH_MEM_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32' ),
845 'IEM_MC_FETCH_MEM_U32_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_DISP' ),
846 'IEM_MC_FETCH_MEM_I32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I32' ),
847 'IEM_MC_FETCH_MEM_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64' ),
848 'IEM_MC_FETCH_MEM_U64_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64_DISP' ),
849 'IEM_MC_FETCH_MEM_U64_ALIGN_U128': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64_ALIGN_U128' ),
850 'IEM_MC_FETCH_MEM_I64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I64' ),
851 'IEM_MC_FETCH_MEM_R32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R32' ),
852 'IEM_MC_FETCH_MEM_R64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R64' ),
853 'IEM_MC_FETCH_MEM_R80': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R80' ),
854 'IEM_MC_FETCH_MEM_D80': ( 1, 'IEM_MC_FETCH_MEM_FLAT_D80' ),
855 'IEM_MC_FETCH_MEM_U128': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128' ),
856 'IEM_MC_FETCH_MEM_U128_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128_NO_AC' ),
857 'IEM_MC_FETCH_MEM_U128_ALIGN_SSE': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128_ALIGN_SSE' ),
858 'IEM_MC_FETCH_MEM_XMM': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM' ),
859 'IEM_MC_FETCH_MEM_XMM_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM_NO_AC' ),
860 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM_ALIGN_SSE' ),
861 'IEM_MC_FETCH_MEM_XMM_U32': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_U32' ),
862 'IEM_MC_FETCH_MEM_XMM_U64': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_U64' ),
863 'IEM_MC_FETCH_MEM_U256': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256' ),
864 'IEM_MC_FETCH_MEM_U256_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256_NO_AC' ),
865 'IEM_MC_FETCH_MEM_U256_ALIGN_AVX': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256_ALIGN_AVX' ),
866 'IEM_MC_FETCH_MEM_YMM': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM' ),
867 'IEM_MC_FETCH_MEM_YMM_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM_NO_AC' ),
868 'IEM_MC_FETCH_MEM_YMM_ALIGN_AVX': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM_ALIGN_AVX' ),
869 'IEM_MC_FETCH_MEM_U8_ZX_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U16' ),
870 'IEM_MC_FETCH_MEM_U8_ZX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U32' ),
871 'IEM_MC_FETCH_MEM_U8_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U64' ),
872 'IEM_MC_FETCH_MEM_U16_ZX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_ZX_U32' ),
873 'IEM_MC_FETCH_MEM_U16_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_ZX_U64' ),
874 'IEM_MC_FETCH_MEM_U32_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_ZX_U64' ),
875 'IEM_MC_FETCH_MEM_U8_SX_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U16' ),
876 'IEM_MC_FETCH_MEM_U8_SX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U32' ),
877 'IEM_MC_FETCH_MEM_U8_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U64' ),
878 'IEM_MC_FETCH_MEM_U16_SX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_SX_U32' ),
879 'IEM_MC_FETCH_MEM_U16_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_SX_U64' ),
880 'IEM_MC_FETCH_MEM_U32_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_SX_U64' ),
881 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128': ( 2, 'IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128' ),
882 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE_AND_XREG_XMM': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_ALIGN_SSE_AND_XREG_XMM' ),
883 'IEM_MC_FETCH_MEM_XMM_U32_AND_XREG_XMM': ( 3, 'IEM_MC_FETCH_MEM_FLAT_XMM_U32_AND_XREG_XMM' ),
884 'IEM_MC_FETCH_MEM_XMM_U64_AND_XREG_XMM': ( 3, 'IEM_MC_FETCH_MEM_FLAT_XMM_U64_AND_XREG_XMM' ),
885 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_RAX_RDX_U64':
886 ( 2, 'IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128_AND_RAX_RDX_U64' ),
887 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64':
888 ( 2, 'IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64' ),
889 'IEM_MC_STORE_MEM_U8': ( 0, 'IEM_MC_STORE_MEM_FLAT_U8' ),
890 'IEM_MC_STORE_MEM_U16': ( 0, 'IEM_MC_STORE_MEM_FLAT_U16' ),
891 'IEM_MC_STORE_MEM_U32': ( 0, 'IEM_MC_STORE_MEM_FLAT_U32' ),
892 'IEM_MC_STORE_MEM_U64': ( 0, 'IEM_MC_STORE_MEM_FLAT_U64' ),
893 'IEM_MC_STORE_MEM_U8_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U8_CONST' ),
894 'IEM_MC_STORE_MEM_U16_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U16_CONST' ),
895 'IEM_MC_STORE_MEM_U32_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U32_CONST' ),
896 'IEM_MC_STORE_MEM_U64_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U64_CONST' ),
897 'IEM_MC_STORE_MEM_U128': ( 0, 'IEM_MC_STORE_MEM_FLAT_U128' ),
898 'IEM_MC_STORE_MEM_U128_NO_AC': ( 0, 'IEM_MC_STORE_MEM_FLAT_U128_NO_AC' ),
899 'IEM_MC_STORE_MEM_U128_ALIGN_SSE': ( 0, 'IEM_MC_STORE_MEM_FLAT_U128_ALIGN_SSE' ),
900 'IEM_MC_STORE_MEM_U256': ( 0, 'IEM_MC_STORE_MEM_FLAT_U256' ),
901 'IEM_MC_STORE_MEM_U256_NO_AC': ( 0, 'IEM_MC_STORE_MEM_FLAT_U256_NO_AC' ),
902 'IEM_MC_STORE_MEM_U256_ALIGN_AVX': ( 0, 'IEM_MC_STORE_MEM_FLAT_U256_ALIGN_AVX' ),
903 'IEM_MC_MEM_MAP_D80_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_D80_WO' ),
904 'IEM_MC_MEM_MAP_I16_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_I16_WO' ),
905 'IEM_MC_MEM_MAP_I32_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_I32_WO' ),
906 'IEM_MC_MEM_MAP_I64_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_I64_WO' ),
907 'IEM_MC_MEM_MAP_R32_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_R32_WO' ),
908 'IEM_MC_MEM_MAP_R64_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_R64_WO' ),
909 'IEM_MC_MEM_MAP_R80_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_R80_WO' ),
910 'IEM_MC_MEM_MAP_U8_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_ATOMIC' ),
911 'IEM_MC_MEM_MAP_U8_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_RW' ),
912 'IEM_MC_MEM_MAP_U8_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_RO' ),
913 'IEM_MC_MEM_MAP_U8_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_WO' ),
914 'IEM_MC_MEM_MAP_U16_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_ATOMIC' ),
915 'IEM_MC_MEM_MAP_U16_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_RW' ),
916 'IEM_MC_MEM_MAP_U16_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_RO' ),
917 'IEM_MC_MEM_MAP_U16_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_WO' ),
918 'IEM_MC_MEM_MAP_U32_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_ATOMIC' ),
919 'IEM_MC_MEM_MAP_U32_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_RW' ),
920 'IEM_MC_MEM_MAP_U32_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_RO' ),
921 'IEM_MC_MEM_MAP_U32_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_WO' ),
922 'IEM_MC_MEM_MAP_U64_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_ATOMIC' ),
923 'IEM_MC_MEM_MAP_U64_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_RW' ),
924 'IEM_MC_MEM_MAP_U64_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_RO' ),
925 'IEM_MC_MEM_MAP_U64_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_WO' ),
926 'IEM_MC_MEM_MAP_U128_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_ATOMIC' ),
927 'IEM_MC_MEM_MAP_U128_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_RW' ),
928 'IEM_MC_MEM_MAP_U128_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_RO' ),
929 'IEM_MC_MEM_MAP_U128_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_WO' ),
930 'IEM_MC_MEM_MAP_EX': ( 3, 'IEM_MC_MEM_FLAT_MAP_EX' ),
931 };
932
933 kdMemMcToFlatInfoStack = {
934 'IEM_MC_PUSH_U16': ( 'IEM_MC_FLAT32_PUSH_U16', 'IEM_MC_FLAT64_PUSH_U16', ),
935 'IEM_MC_PUSH_U32': ( 'IEM_MC_FLAT32_PUSH_U32', 'IEM_MC_PUSH_U32', ),
936 'IEM_MC_PUSH_U64': ( 'IEM_MC_PUSH_U64', 'IEM_MC_FLAT64_PUSH_U64', ),
937 'IEM_MC_PUSH_U32_SREG': ( 'IEM_MC_FLAT32_PUSH_U32_SREG', 'IEM_MC_PUSH_U32_SREG' ),
938 'IEM_MC_POP_GREG_U16': ( 'IEM_MC_FLAT32_POP_GREG_U16', 'IEM_MC_FLAT64_POP_GREG_U16', ),
939 'IEM_MC_POP_GREG_U32': ( 'IEM_MC_FLAT32_POP_GREG_U32', 'IEM_MC_POP_GREG_U32', ),
940 'IEM_MC_POP_GREG_U64': ( 'IEM_MC_POP_GREG_U64', 'IEM_MC_FLAT64_POP_GREG_U64', ),
941 };
942
943 kdThreadedCalcRmEffAddrMcByVariation = {
944 ksVariation_16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
945 ksVariation_16f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
946 ksVariation_16_Pre386: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
947 ksVariation_16f_Pre386: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
948 ksVariation_32_Addr16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
949 ksVariation_32f_Addr16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
950 ksVariation_16_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
951 ksVariation_16f_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
952 ksVariation_32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
953 ksVariation_32f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
954 ksVariation_32_Flat: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
955 ksVariation_32f_Flat: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
956 ksVariation_64: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64',
957 ksVariation_64f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64',
958 ksVariation_64_FsGs: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_FSGS',
959 ksVariation_64f_FsGs: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_FSGS',
960 ksVariation_64_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_ADDR32', ## @todo How did this work again...
961 ksVariation_64f_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_ADDR32',
962 };
963
964 def analyzeMorphStmtForThreaded(self, aoStmts, dState, iParamRef = 0, iLevel = 0):
965 """
966 Transforms (copy) the statements into those for the threaded function.
967
968 Returns list/tree of statements (aoStmts is not modified) and the new
969 iParamRef value.
970 """
971 #
972 # We'll be traversing aoParamRefs in parallel to the statements, so we
973 # must match the traversal in analyzeFindThreadedParamRefs exactly.
974 #
975 #print('McBlock at %s:%s' % (os.path.split(self.oMcBlock.sSrcFile)[1], self.oMcBlock.iBeginLine,));
976 aoThreadedStmts = [];
977 for oStmt in aoStmts:
978 # Skip C++ statements that is purely related to decoding.
979 if not oStmt.isCppStmt() or not oStmt.fDecode:
980 # Copy the statement. Make a deep copy to make sure we've got our own
981 # copies of all instance variables, even if a bit overkill at the moment.
982 oNewStmt = copy.deepcopy(oStmt);
983 aoThreadedStmts.append(oNewStmt);
984 #print('oNewStmt %s %s' % (oNewStmt.sName, len(oNewStmt.asParams),));
985
986 # If the statement has parameter references, process the relevant parameters.
987 # We grab the references relevant to this statement and apply them in reserve order.
988 if iParamRef < len(self.aoParamRefs) and self.aoParamRefs[iParamRef].oStmt == oStmt:
989 iParamRefFirst = iParamRef;
990 while True:
991 iParamRef += 1;
992 if iParamRef >= len(self.aoParamRefs) or self.aoParamRefs[iParamRef].oStmt != oStmt:
993 break;
994
995 #print('iParamRefFirst=%s iParamRef=%s' % (iParamRefFirst, iParamRef));
996 for iCurRef in range(iParamRef - 1, iParamRefFirst - 1, -1):
997 oCurRef = self.aoParamRefs[iCurRef];
998 if oCurRef.iParam is not None:
999 assert oCurRef.oStmt == oStmt;
1000 #print('iCurRef=%s iParam=%s sOrgRef=%s' % (iCurRef, oCurRef.iParam, oCurRef.sOrgRef));
1001 sSrcParam = oNewStmt.asParams[oCurRef.iParam];
1002 assert ( sSrcParam[oCurRef.offParam : oCurRef.offParam + len(oCurRef.sOrgRef)] == oCurRef.sOrgRef
1003 or oCurRef.fCustomRef), \
1004 'offParam=%s sOrgRef=%s iParam=%s oStmt.sName=%s sSrcParam=%s<eos>' \
1005 % (oCurRef.offParam, oCurRef.sOrgRef, oCurRef.iParam, oStmt.sName, sSrcParam);
1006 oNewStmt.asParams[oCurRef.iParam] = sSrcParam[0 : oCurRef.offParam] \
1007 + oCurRef.sNewName \
1008 + sSrcParam[oCurRef.offParam + len(oCurRef.sOrgRef) : ];
1009
1010 # Morph IEM_MC_CALC_RM_EFF_ADDR into IEM_MC_CALC_RM_EFF_ADDR_THREADED ...
1011 if oNewStmt.sName == 'IEM_MC_CALC_RM_EFF_ADDR':
1012 oNewStmt.sName = self.kdThreadedCalcRmEffAddrMcByVariation[self.sVariation];
1013 assert len(oNewStmt.asParams) == 3;
1014
1015 if self.sVariation in self.kdVariationsWithFlatAddr16:
1016 oNewStmt.asParams = [
1017 oNewStmt.asParams[0], oNewStmt.asParams[1], self.dParamRefs['u16Disp'][0].sNewName,
1018 ];
1019 else:
1020 sSibAndMore = self.dParamRefs['bSib'][0].sNewName; # Merge bSib and 2nd part of cbImmAndRspOffset.
1021 if oStmt.asParams[2] not in ('0', '1', '2', '4'):
1022 sSibAndMore = '(%s) | ((%s) & 0x0f00)' % (self.dParamRefs['bSib'][0].sNewName, oStmt.asParams[2]);
1023
1024 if self.sVariation in self.kdVariationsWithFlatAddr32No64:
1025 oNewStmt.asParams = [
1026 oNewStmt.asParams[0], oNewStmt.asParams[1], sSibAndMore, self.dParamRefs['u32Disp'][0].sNewName,
1027 ];
1028 else:
1029 oNewStmt.asParams = [
1030 oNewStmt.asParams[0], self.dParamRefs['bRmEx'][0].sNewName, sSibAndMore,
1031 self.dParamRefs['u32Disp'][0].sNewName, self.dParamRefs['cbInstr'][0].sNewName,
1032 ];
1033 # ... and IEM_MC_ADVANCE_RIP_AND_FINISH into *_THREADED_PCxx[_WITH_FLAGS] ...
1034 elif ( oNewStmt.sName
1035 in ('IEM_MC_ADVANCE_RIP_AND_FINISH',
1036 'IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_REL_JMP_S16_AND_FINISH', 'IEM_MC_REL_JMP_S32_AND_FINISH',
1037 'IEM_MC_SET_RIP_U16_AND_FINISH', 'IEM_MC_SET_RIP_U32_AND_FINISH', 'IEM_MC_SET_RIP_U64_AND_FINISH',
1038 'IEM_MC_REL_CALL_S16_AND_FINISH', 'IEM_MC_REL_CALL_S32_AND_FINISH', 'IEM_MC_REL_CALL_S64_AND_FINISH',
1039 'IEM_MC_IND_CALL_U16_AND_FINISH', 'IEM_MC_IND_CALL_U32_AND_FINISH', 'IEM_MC_IND_CALL_U64_AND_FINISH',
1040 'IEM_MC_RETN_AND_FINISH',)):
1041 if oNewStmt.sName not in ('IEM_MC_SET_RIP_U16_AND_FINISH', 'IEM_MC_SET_RIP_U32_AND_FINISH',
1042 'IEM_MC_SET_RIP_U64_AND_FINISH', ):
1043 oNewStmt.asParams.append(self.dParamRefs['cbInstr'][0].sNewName);
1044 if ( oNewStmt.sName in ('IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_RETN_AND_FINISH', )
1045 and self.sVariation not in self.kdVariationsOnlyPre386):
1046 oNewStmt.asParams.append(self.dParamRefs['pVCpu->iem.s.enmEffOpSize'][0].sNewName);
1047 oNewStmt.sName += '_THREADED';
1048 if self.sVariation in self.kdVariationsOnly64NoFlags:
1049 oNewStmt.sName += '_PC64';
1050 elif self.sVariation in self.kdVariationsOnly64WithFlags:
1051 oNewStmt.sName += '_PC64_WITH_FLAGS';
1052 elif self.sVariation in self.kdVariationsOnlyPre386NoFlags:
1053 oNewStmt.sName += '_PC16';
1054 elif self.sVariation in self.kdVariationsOnlyPre386WithFlags:
1055 oNewStmt.sName += '_PC16_WITH_FLAGS';
1056 elif self.sVariation not in self.kdVariationsWithEflagsCheckingAndClearing:
1057 assert self.sVariation != self.ksVariation_Default;
1058 oNewStmt.sName += '_PC32';
1059 else:
1060 oNewStmt.sName += '_PC32_WITH_FLAGS';
1061
1062 # This is making the wrong branch of conditionals break out of the TB.
1063 if (oStmt.sName in ('IEM_MC_ADVANCE_RIP_AND_FINISH', 'IEM_MC_REL_JMP_S8_AND_FINISH',
1064 'IEM_MC_REL_JMP_S16_AND_FINISH', 'IEM_MC_REL_JMP_S32_AND_FINISH')):
1065 sExitTbStatus = 'VINF_SUCCESS';
1066 if self.sVariation in self.kdVariationsWithConditional:
1067 if self.sVariation in self.kdVariationsWithConditionalNoJmp:
1068 if oStmt.sName != 'IEM_MC_ADVANCE_RIP_AND_FINISH':
1069 sExitTbStatus = 'VINF_IEM_REEXEC_BREAK';
1070 elif oStmt.sName == 'IEM_MC_ADVANCE_RIP_AND_FINISH':
1071 sExitTbStatus = 'VINF_IEM_REEXEC_BREAK';
1072 oNewStmt.asParams.append(sExitTbStatus);
1073
1074 # Insert an MC so we can assert the correctioness of modified flags annotations on IEM_MC_REF_EFLAGS.
1075 if 'IEM_MC_ASSERT_EFLAGS' in dState:
1076 aoThreadedStmts.insert(len(aoThreadedStmts) - 1,
1077 iai.McStmtAssertEFlags(self.oParent.oMcBlock.oInstruction));
1078 del dState['IEM_MC_ASSERT_EFLAGS'];
1079
1080 # ... and IEM_MC_*_GREG_U8 into *_THREADED w/ reworked index taking REX into account
1081 elif oNewStmt.sName.startswith('IEM_MC_') and oNewStmt.sName.find('_GREG_U8') > 0:
1082 (idxReg, _, sStdRef) = self.analyze8BitGRegStmt(oStmt); # Don't use oNewStmt as it has been modified!
1083 oNewStmt.asParams[idxReg] = self.dParamRefs[sStdRef][0].sNewName;
1084 oNewStmt.sName += '_THREADED';
1085
1086 # ... and IEM_MC_CALL_CIMPL_[0-5] and IEM_MC_DEFER_TO_CIMPL_[0-5]_RET into *_THREADED ...
1087 elif oNewStmt.sName.startswith('IEM_MC_CALL_CIMPL_') or oNewStmt.sName.startswith('IEM_MC_DEFER_TO_CIMPL_'):
1088 oNewStmt.sName += '_THREADED';
1089 oNewStmt.idxFn += 1;
1090 oNewStmt.idxParams += 1;
1091 oNewStmt.asParams.insert(0, self.dParamRefs['cbInstr'][0].sNewName);
1092
1093 # ... and in FLAT modes we must morph memory access into FLAT accesses ...
1094 elif ( self.sVariation in self.kdVariationsWithFlatAddress
1095 and ( oNewStmt.sName.startswith('IEM_MC_FETCH_MEM')
1096 or (oNewStmt.sName.startswith('IEM_MC_STORE_MEM_') and oNewStmt.sName.find('_BY_REF') < 0)
1097 or oNewStmt.sName.startswith('IEM_MC_MEM_MAP') )):
1098 idxEffSeg = self.kdMemMcToFlatInfo[oNewStmt.sName][0];
1099 if idxEffSeg != -1:
1100 if ( oNewStmt.asParams[idxEffSeg].find('iEffSeg') < 0
1101 and oNewStmt.asParams[idxEffSeg] not in ('X86_SREG_ES', ) ):
1102 self.raiseProblem('Expected iEffSeg as param #%d to %s: %s'
1103 % (idxEffSeg + 1, oNewStmt.sName, oNewStmt.asParams[idxEffSeg],));
1104 oNewStmt.asParams.pop(idxEffSeg);
1105 oNewStmt.sName = self.kdMemMcToFlatInfo[oNewStmt.sName][1];
1106
1107 # ... PUSH and POP also needs flat variants, but these differ a little.
1108 elif ( self.sVariation in self.kdVariationsWithFlatStackAddress
1109 and ( (oNewStmt.sName.startswith('IEM_MC_PUSH') and oNewStmt.sName.find('_FPU') < 0)
1110 or oNewStmt.sName.startswith('IEM_MC_POP'))):
1111 oNewStmt.sName = self.kdMemMcToFlatInfoStack[oNewStmt.sName][int(self.sVariation in
1112 self.kdVariationsWithFlat64StackAddress)];
1113
1114 # Add EFLAGS usage annotations to relevant MCs.
1115 elif oNewStmt.sName in ('IEM_MC_COMMIT_EFLAGS', 'IEM_MC_COMMIT_EFLAGS_OPT', 'IEM_MC_REF_EFLAGS',
1116 'IEM_MC_FETCH_EFLAGS'):
1117 oInstruction = self.oParent.oMcBlock.oInstruction;
1118 oNewStmt.sName += '_EX';
1119 oNewStmt.asParams.append(oInstruction.getTestedFlagsCStyle()); # Shall crash and burn if oInstruction is
1120 oNewStmt.asParams.append(oInstruction.getModifiedFlagsCStyle()); # None. Fix the IEM decoder code.
1121
1122 # For IEM_MC_REF_EFLAGS we to emit an MC before the ..._FINISH
1123 if oNewStmt.sName == 'IEM_MC_REF_EFLAGS_EX':
1124 dState['IEM_MC_ASSERT_EFLAGS'] = iLevel;
1125
1126 # Process branches of conditionals recursively.
1127 if isinstance(oStmt, iai.McStmtCond):
1128 (oNewStmt.aoIfBranch, iParamRef) = self.analyzeMorphStmtForThreaded(oStmt.aoIfBranch, dState,
1129 iParamRef, iLevel + 1);
1130 if oStmt.aoElseBranch:
1131 (oNewStmt.aoElseBranch, iParamRef) = self.analyzeMorphStmtForThreaded(oStmt.aoElseBranch,
1132 dState, iParamRef, iLevel + 1);
1133
1134 # Insert an MC so we can assert the correctioness of modified flags annotations
1135 # on IEM_MC_REF_EFLAGS if it goes out of scope.
1136 if dState.get('IEM_MC_ASSERT_EFLAGS', -1) == iLevel:
1137 aoThreadedStmts.append(iai.McStmtAssertEFlags(self.oParent.oMcBlock.oInstruction));
1138 del dState['IEM_MC_ASSERT_EFLAGS'];
1139
1140 return (aoThreadedStmts, iParamRef);
1141
1142
1143 def analyzeConsolidateThreadedParamRefs(self):
1144 """
1145 Consolidate threaded function parameter references into a dictionary
1146 with lists of the references to each variable/field.
1147 """
1148 # Gather unique parameters.
1149 self.dParamRefs = {};
1150 for oRef in self.aoParamRefs:
1151 if oRef.sStdRef not in self.dParamRefs:
1152 self.dParamRefs[oRef.sStdRef] = [oRef,];
1153 else:
1154 self.dParamRefs[oRef.sStdRef].append(oRef);
1155
1156 # Generate names for them for use in the threaded function.
1157 dParamNames = {};
1158 for sName, aoRefs in self.dParamRefs.items():
1159 # Morph the reference expression into a name.
1160 if sName.startswith('IEM_GET_MODRM_REG'): sName = 'bModRmRegP';
1161 elif sName.startswith('IEM_GET_MODRM_RM'): sName = 'bModRmRmP';
1162 elif sName.startswith('IEM_GET_MODRM_REG_8'): sName = 'bModRmReg8P';
1163 elif sName.startswith('IEM_GET_MODRM_RM_8'): sName = 'bModRmRm8P';
1164 elif sName.startswith('IEM_GET_EFFECTIVE_VVVV'): sName = 'bEffVvvvP';
1165 elif sName.startswith('IEM_GET_IMM8_REG'): sName = 'bImm8Reg';
1166 elif sName.find('.') >= 0 or sName.find('->') >= 0:
1167 sName = sName[max(sName.rfind('.'), sName.rfind('>')) + 1 : ] + 'P';
1168 else:
1169 sName += 'P';
1170
1171 # Ensure it's unique.
1172 if sName in dParamNames:
1173 for i in range(10):
1174 if sName + str(i) not in dParamNames:
1175 sName += str(i);
1176 break;
1177 dParamNames[sName] = True;
1178
1179 # Update all the references.
1180 for oRef in aoRefs:
1181 oRef.sNewName = sName;
1182
1183 # Organize them by size too for the purpose of optimize them.
1184 dBySize = {} # type: Dict[str, str]
1185 for sStdRef, aoRefs in self.dParamRefs.items():
1186 if aoRefs[0].sType[0] != 'P':
1187 cBits = g_kdTypeInfo[aoRefs[0].sType][0];
1188 assert(cBits <= 64);
1189 else:
1190 cBits = 64;
1191
1192 if cBits not in dBySize:
1193 dBySize[cBits] = [sStdRef,]
1194 else:
1195 dBySize[cBits].append(sStdRef);
1196
1197 # Pack the parameters as best as we can, starting with the largest ones
1198 # and ASSUMING a 64-bit parameter size.
1199 self.cMinParams = 0;
1200 offNewParam = 0;
1201 for cBits in sorted(dBySize.keys(), reverse = True):
1202 for sStdRef in dBySize[cBits]:
1203 if offNewParam == 0 or offNewParam + cBits > 64:
1204 self.cMinParams += 1;
1205 offNewParam = cBits;
1206 else:
1207 offNewParam += cBits;
1208 assert(offNewParam <= 64);
1209
1210 for oRef in self.dParamRefs[sStdRef]:
1211 oRef.iNewParam = self.cMinParams - 1;
1212 oRef.offNewParam = offNewParam - cBits;
1213
1214 # Currently there are a few that requires 4 parameters, list these so we can figure out why:
1215 if self.cMinParams >= 4:
1216 print('debug: cMinParams=%s cRawParams=%s - %s:%d'
1217 % (self.cMinParams, len(self.dParamRefs), self.oParent.oMcBlock.sSrcFile, self.oParent.oMcBlock.iBeginLine,));
1218
1219 return True;
1220
1221 ksHexDigits = '0123456789abcdefABCDEF';
1222
1223 def analyzeFindThreadedParamRefs(self, aoStmts): # pylint: disable=too-many-statements
1224 """
1225 Scans the statements for things that have to passed on to the threaded
1226 function (populates self.aoParamRefs).
1227 """
1228 for oStmt in aoStmts:
1229 # Some statements we can skip alltogether.
1230 if isinstance(oStmt, iai.McCppPreProc):
1231 continue;
1232 if oStmt.isCppStmt() and oStmt.fDecode:
1233 continue;
1234 if oStmt.sName in ('IEM_MC_BEGIN',):
1235 continue;
1236
1237 if isinstance(oStmt, iai.McStmtVar):
1238 if oStmt.sValue is None:
1239 continue;
1240 aiSkipParams = { 0: True, 1: True, 3: True };
1241 else:
1242 aiSkipParams = {};
1243
1244 # Several statements have implicit parameters and some have different parameters.
1245 if oStmt.sName in ('IEM_MC_ADVANCE_RIP_AND_FINISH', 'IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_REL_JMP_S16_AND_FINISH',
1246 'IEM_MC_REL_JMP_S32_AND_FINISH',
1247 'IEM_MC_REL_CALL_S16_AND_FINISH', 'IEM_MC_REL_CALL_S32_AND_FINISH',
1248 'IEM_MC_REL_CALL_S64_AND_FINISH',
1249 'IEM_MC_IND_CALL_U16_AND_FINISH', 'IEM_MC_IND_CALL_U32_AND_FINISH',
1250 'IEM_MC_IND_CALL_U64_AND_FINISH',
1251 'IEM_MC_RETN_AND_FINISH',
1252 'IEM_MC_CALL_CIMPL_0', 'IEM_MC_CALL_CIMPL_1', 'IEM_MC_CALL_CIMPL_2', 'IEM_MC_CALL_CIMPL_3',
1253 'IEM_MC_CALL_CIMPL_4', 'IEM_MC_CALL_CIMPL_5',
1254 'IEM_MC_DEFER_TO_CIMPL_0_RET', 'IEM_MC_DEFER_TO_CIMPL_1_RET', 'IEM_MC_DEFER_TO_CIMPL_2_RET',
1255 'IEM_MC_DEFER_TO_CIMPL_3_RET', 'IEM_MC_DEFER_TO_CIMPL_4_RET', 'IEM_MC_DEFER_TO_CIMPL_5_RET', ):
1256 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_INSTR_LEN(pVCpu)', 'uint4_t', oStmt, sStdRef = 'cbInstr'));
1257
1258 if ( oStmt.sName in ('IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_RETN_AND_FINISH', )
1259 and self.sVariation not in self.kdVariationsOnlyPre386):
1260 self.aoParamRefs.append(ThreadedParamRef('pVCpu->iem.s.enmEffOpSize', 'IEMMODE', oStmt));
1261
1262 if oStmt.sName == 'IEM_MC_CALC_RM_EFF_ADDR':
1263 # This is being pretty presumptive about bRm always being the RM byte...
1264 assert len(oStmt.asParams) == 3;
1265 assert oStmt.asParams[1] == 'bRm';
1266
1267 if self.sVariation in self.kdVariationsWithFlatAddr16:
1268 self.aoParamRefs.append(ThreadedParamRef('bRm', 'uint8_t', oStmt));
1269 self.aoParamRefs.append(ThreadedParamRef('(uint16_t)uEffAddrInfo' ,
1270 'uint16_t', oStmt, sStdRef = 'u16Disp'));
1271 elif self.sVariation in self.kdVariationsWithFlatAddr32No64:
1272 self.aoParamRefs.append(ThreadedParamRef('bRm', 'uint8_t', oStmt));
1273 self.aoParamRefs.append(ThreadedParamRef('(uint8_t)(uEffAddrInfo >> 32)',
1274 'uint8_t', oStmt, sStdRef = 'bSib'));
1275 self.aoParamRefs.append(ThreadedParamRef('(uint32_t)uEffAddrInfo',
1276 'uint32_t', oStmt, sStdRef = 'u32Disp'));
1277 else:
1278 assert self.sVariation in self.kdVariationsWithAddressOnly64;
1279 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_MODRM_EX(pVCpu, bRm)',
1280 'uint8_t', oStmt, sStdRef = 'bRmEx'));
1281 self.aoParamRefs.append(ThreadedParamRef('(uint8_t)(uEffAddrInfo >> 32)',
1282 'uint8_t', oStmt, sStdRef = 'bSib'));
1283 self.aoParamRefs.append(ThreadedParamRef('(uint32_t)uEffAddrInfo',
1284 'uint32_t', oStmt, sStdRef = 'u32Disp'));
1285 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_INSTR_LEN(pVCpu)',
1286 'uint4_t', oStmt, sStdRef = 'cbInstr'));
1287 aiSkipParams[1] = True; # Skip the bRm parameter as it is being replaced by bRmEx.
1288
1289 # 8-bit register accesses needs to have their index argument reworked to take REX into account.
1290 if oStmt.sName.startswith('IEM_MC_') and oStmt.sName.find('_GREG_U8') > 0:
1291 (idxReg, sOrgRef, sStdRef) = self.analyze8BitGRegStmt(oStmt);
1292 self.aoParamRefs.append(ThreadedParamRef(sOrgRef, 'uint8_t', oStmt, idxReg, sStdRef = sStdRef));
1293 aiSkipParams[idxReg] = True; # Skip the parameter below.
1294
1295 # If in flat mode variation, ignore the effective segment parameter to memory MCs.
1296 if ( self.sVariation in self.kdVariationsWithFlatAddress
1297 and oStmt.sName in self.kdMemMcToFlatInfo
1298 and self.kdMemMcToFlatInfo[oStmt.sName][0] != -1):
1299 aiSkipParams[self.kdMemMcToFlatInfo[oStmt.sName][0]] = True;
1300
1301 # Inspect the target of calls to see if we need to pass down a
1302 # function pointer or function table pointer for it to work.
1303 if isinstance(oStmt, iai.McStmtCall):
1304 if oStmt.sFn[0] == 'p':
1305 self.aoParamRefs.append(ThreadedParamRef(oStmt.sFn, self.analyzeCallToType(oStmt.sFn), oStmt, oStmt.idxFn));
1306 elif ( oStmt.sFn[0] != 'i'
1307 and not oStmt.sFn.startswith('RT_CONCAT3')
1308 and not oStmt.sFn.startswith('IEMTARGETCPU_EFL_BEHAVIOR_SELECT')
1309 and not oStmt.sFn.startswith('IEM_SELECT_HOST_OR_FALLBACK') ):
1310 self.raiseProblem('Bogus function name in %s: %s' % (oStmt.sName, oStmt.sFn,));
1311 aiSkipParams[oStmt.idxFn] = True;
1312
1313 # Skip the hint parameter (first) for IEM_MC_CALL_CIMPL_X.
1314 if oStmt.sName.startswith('IEM_MC_CALL_CIMPL_'):
1315 assert oStmt.idxFn == 2;
1316 aiSkipParams[0] = True;
1317
1318 # Skip the function parameter (first) for IEM_MC_NATIVE_EMIT_X.
1319 if oStmt.sName.startswith('IEM_MC_NATIVE_EMIT_'):
1320 aiSkipParams[0] = True;
1321
1322
1323 # Check all the parameters for bogus references.
1324 for iParam, sParam in enumerate(oStmt.asParams):
1325 if iParam not in aiSkipParams and sParam not in self.oParent.dVariables:
1326 # The parameter may contain a C expression, so we have to try
1327 # extract the relevant bits, i.e. variables and fields while
1328 # ignoring operators and parentheses.
1329 offParam = 0;
1330 while offParam < len(sParam):
1331 # Is it the start of an C identifier? If so, find the end, but don't stop on field separators (->, .).
1332 ch = sParam[offParam];
1333 if ch.isalpha() or ch == '_':
1334 offStart = offParam;
1335 offParam += 1;
1336 while offParam < len(sParam):
1337 ch = sParam[offParam];
1338 if not ch.isalnum() and ch != '_' and ch != '.':
1339 if ch != '-' or sParam[offParam + 1] != '>':
1340 # Special hack for the 'CTX_SUFF(pVM)' bit in pVCpu->CTX_SUFF(pVM)->xxxx:
1341 if ( ch == '('
1342 and sParam[offStart : offParam + len('(pVM)->')] == 'pVCpu->CTX_SUFF(pVM)->'):
1343 offParam += len('(pVM)->') - 1;
1344 else:
1345 break;
1346 offParam += 1;
1347 offParam += 1;
1348 sRef = sParam[offStart : offParam];
1349
1350 # For register references, we pass the full register indexes instead as macros
1351 # like IEM_GET_MODRM_REG implicitly references pVCpu->iem.s.uRexReg and the
1352 # threaded function will be more efficient if we just pass the register index
1353 # as a 4-bit param.
1354 if ( sRef.startswith('IEM_GET_MODRM')
1355 or sRef.startswith('IEM_GET_EFFECTIVE_VVVV')
1356 or sRef.startswith('IEM_GET_IMM8_REG') ):
1357 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1358 if sParam[offParam] != '(':
1359 self.raiseProblem('Expected "(" following %s in "%s"' % (sRef, oStmt.renderCode(),));
1360 (asMacroParams, offCloseParam) = iai.McBlock.extractParams(sParam, offParam);
1361 if asMacroParams is None:
1362 self.raiseProblem('Unable to find ")" for %s in "%s"' % (sRef, oStmt.renderCode(),));
1363 offParam = offCloseParam + 1;
1364 self.aoParamRefs.append(ThreadedParamRef(sParam[offStart : offParam], 'uint8_t',
1365 oStmt, iParam, offStart));
1366
1367 # We can skip known variables.
1368 elif sRef in self.oParent.dVariables:
1369 pass;
1370
1371 # Skip certain macro invocations.
1372 elif sRef in ('IEM_GET_HOST_CPU_FEATURES',
1373 'IEM_GET_GUEST_CPU_FEATURES',
1374 'IEM_IS_GUEST_CPU_AMD',
1375 'IEM_IS_16BIT_CODE',
1376 'IEM_IS_32BIT_CODE',
1377 'IEM_IS_64BIT_CODE',
1378 ):
1379 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1380 if sParam[offParam] != '(':
1381 self.raiseProblem('Expected "(" following %s in "%s"' % (sRef, oStmt.renderCode(),));
1382 (asMacroParams, offCloseParam) = iai.McBlock.extractParams(sParam, offParam);
1383 if asMacroParams is None:
1384 self.raiseProblem('Unable to find ")" for %s in "%s"' % (sRef, oStmt.renderCode(),));
1385 offParam = offCloseParam + 1;
1386
1387 # Skip any dereference following it, unless it's a predicate like IEM_IS_GUEST_CPU_AMD.
1388 if sRef not in ('IEM_IS_GUEST_CPU_AMD',
1389 'IEM_IS_16BIT_CODE',
1390 'IEM_IS_32BIT_CODE',
1391 'IEM_IS_64BIT_CODE',
1392 ):
1393 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1394 if offParam + 2 <= len(sParam) and sParam[offParam : offParam + 2] == '->':
1395 offParam = iai.McBlock.skipSpacesAt(sParam, offParam + 2, len(sParam));
1396 while offParam < len(sParam) and (sParam[offParam].isalnum() or sParam[offParam] in '_.'):
1397 offParam += 1;
1398
1399 # Skip constants, globals, types (casts), sizeof and macros.
1400 elif ( sRef.startswith('IEM_OP_PRF_')
1401 or sRef.startswith('IEM_ACCESS_')
1402 or sRef.startswith('IEMINT_')
1403 or sRef.startswith('X86_GREG_')
1404 or sRef.startswith('X86_SREG_')
1405 or sRef.startswith('X86_EFL_')
1406 or sRef.startswith('X86_FSW_')
1407 or sRef.startswith('X86_FCW_')
1408 or sRef.startswith('X86_XCPT_')
1409 or sRef.startswith('IEMMODE_')
1410 or sRef.startswith('IEM_F_')
1411 or sRef.startswith('IEM_CIMPL_F_')
1412 or sRef.startswith('g_')
1413 or sRef.startswith('iemAImpl_')
1414 or sRef.startswith('kIemNativeGstReg_')
1415 or sRef.startswith('RT_ARCH_VAL_')
1416 or sRef in ( 'int8_t', 'int16_t', 'int32_t', 'int64_t',
1417 'INT8_C', 'INT16_C', 'INT32_C', 'INT64_C',
1418 'uint8_t', 'uint16_t', 'uint32_t', 'uint64_t',
1419 'UINT8_C', 'UINT16_C', 'UINT32_C', 'UINT64_C',
1420 'UINT8_MAX', 'UINT16_MAX', 'UINT32_MAX', 'UINT64_MAX',
1421 'INT8_MAX', 'INT16_MAX', 'INT32_MAX', 'INT64_MAX',
1422 'INT8_MIN', 'INT16_MIN', 'INT32_MIN', 'INT64_MIN',
1423 'sizeof', 'NOREF', 'RT_NOREF', 'IEMMODE_64BIT',
1424 'RT_BIT_32', 'RT_BIT_64', 'true', 'false',
1425 'NIL_RTGCPTR',) ):
1426 pass;
1427
1428 # Skip certain macro invocations.
1429 # Any variable (non-field) and decoder fields in IEMCPU will need to be parameterized.
1430 elif ( ( '.' not in sRef
1431 and '-' not in sRef
1432 and sRef not in ('pVCpu', ) )
1433 or iai.McBlock.koReIemDecoderVars.search(sRef) is not None):
1434 self.aoParamRefs.append(ThreadedParamRef(sRef, self.analyzeReferenceToType(sRef),
1435 oStmt, iParam, offStart));
1436 # Number.
1437 elif ch.isdigit():
1438 if ( ch == '0'
1439 and offParam + 2 <= len(sParam)
1440 and sParam[offParam + 1] in 'xX'
1441 and sParam[offParam + 2] in self.ksHexDigits ):
1442 offParam += 2;
1443 while offParam < len(sParam) and sParam[offParam] in self.ksHexDigits:
1444 offParam += 1;
1445 else:
1446 while offParam < len(sParam) and sParam[offParam].isdigit():
1447 offParam += 1;
1448 # Comment?
1449 elif ( ch == '/'
1450 and offParam + 4 <= len(sParam)
1451 and sParam[offParam + 1] == '*'):
1452 offParam += 2;
1453 offNext = sParam.find('*/', offParam);
1454 if offNext < offParam:
1455 self.raiseProblem('Unable to find "*/" in "%s" ("%s")' % (sRef, oStmt.renderCode(),));
1456 offParam = offNext + 2;
1457 # Whatever else.
1458 else:
1459 offParam += 1;
1460
1461 # Traverse the branches of conditionals.
1462 if isinstance(oStmt, iai.McStmtCond):
1463 self.analyzeFindThreadedParamRefs(oStmt.aoIfBranch);
1464 self.analyzeFindThreadedParamRefs(oStmt.aoElseBranch);
1465 return True;
1466
1467 def analyzeVariation(self, aoStmts):
1468 """
1469 2nd part of the analysis, done on each variation.
1470
1471 The variations may differ in parameter requirements and will end up with
1472 slightly different MC sequences. Thus this is done on each individually.
1473
1474 Returns dummy True - raises exception on trouble.
1475 """
1476 # Now scan the code for variables and field references that needs to
1477 # be passed to the threaded function because they are related to the
1478 # instruction decoding.
1479 self.analyzeFindThreadedParamRefs(aoStmts);
1480 self.analyzeConsolidateThreadedParamRefs();
1481
1482 # Morph the statement stream for the block into what we'll be using in the threaded function.
1483 (self.aoStmtsForThreadedFunction, iParamRef) = self.analyzeMorphStmtForThreaded(aoStmts, {});
1484 if iParamRef != len(self.aoParamRefs):
1485 raise Exception('iParamRef=%s, expected %s!' % (iParamRef, len(self.aoParamRefs),));
1486
1487 return True;
1488
1489 def emitThreadedCallStmtsForVariant(self, cchIndent, fTbLookupTable = False, sCallVarNm = None):
1490 """
1491 Produces generic C++ statments that emits a call to the thread function
1492 variation and any subsequent checks that may be necessary after that.
1493
1494 The sCallVarNm is the name of the variable with the threaded function
1495 to call. This is for the case where all the variations have the same
1496 parameters and only the threaded function number differs.
1497
1498 The fTbLookupTable parameter can either be False, True or whatever else
1499 (like 2) - in the latte case this means a large lookup table.
1500 """
1501 aoStmts = [
1502 iai.McCppCall('IEM_MC2_BEGIN_EMIT_CALLS',
1503 ['1' if 'IEM_CIMPL_F_CHECK_IRQ_BEFORE' in self.oParent.dsCImplFlags else '0'],
1504 cchIndent = cchIndent), # Scope and a hook for various stuff.
1505 ];
1506
1507 # The call to the threaded function.
1508 asCallArgs = [ self.getIndexName() if not sCallVarNm else sCallVarNm, ];
1509 for iParam in range(self.cMinParams):
1510 asFrags = [];
1511 for aoRefs in self.dParamRefs.values():
1512 oRef = aoRefs[0];
1513 if oRef.iNewParam == iParam:
1514 sCast = '(uint64_t)'
1515 if oRef.sType in ('int8_t', 'int16_t', 'int32_t'): # Make sure these doesn't get sign-extended.
1516 sCast = '(uint64_t)(u' + oRef.sType + ')';
1517 if oRef.offNewParam == 0:
1518 asFrags.append(sCast + '(' + oRef.sOrgRef + ')');
1519 else:
1520 asFrags.append('(%s(%s) << %s)' % (sCast, oRef.sOrgRef, oRef.offNewParam));
1521 assert asFrags;
1522 asCallArgs.append(' | '.join(asFrags));
1523
1524 if fTbLookupTable is False:
1525 aoStmts.append(iai.McCppCall('IEM_MC2_EMIT_CALL_%s' % (len(asCallArgs) - 1,),
1526 asCallArgs, cchIndent = cchIndent));
1527 else:
1528 aoStmts.append(iai.McCppCall('IEM_MC2_EMIT_CALL_WITH_TB_LOOKUP_%s' % (len(asCallArgs) - 1,),
1529 ['0' if fTbLookupTable is True else '1',] + asCallArgs, cchIndent = cchIndent));
1530
1531 # 2023-11-28: This has to be done AFTER the CIMPL call, so we have to
1532 # emit this mode check from the compilation loop. On the
1533 # plus side, this means we eliminate unnecessary call at
1534 # end of the TB. :-)
1535 ## For CIMPL stuff, we need to consult the associated IEM_CIMPL_F_XXX
1536 ## mask and maybe emit additional checks.
1537 #if ( 'IEM_CIMPL_F_MODE' in self.oParent.dsCImplFlags
1538 # or 'IEM_CIMPL_F_XCPT' in self.oParent.dsCImplFlags
1539 # or 'IEM_CIMPL_F_VMEXIT' in self.oParent.dsCImplFlags):
1540 # aoStmts.append(iai.McCppCall('IEM_MC2_EMIT_CALL_1', ( 'kIemThreadedFunc_BltIn_CheckMode', 'pVCpu->iem.s.fExec', ),
1541 # cchIndent = cchIndent));
1542
1543 sCImplFlags = ' | '.join(self.oParent.dsCImplFlags.keys());
1544 if not sCImplFlags:
1545 sCImplFlags = '0'
1546 aoStmts.append(iai.McCppCall('IEM_MC2_END_EMIT_CALLS', ( sCImplFlags, ), cchIndent = cchIndent)); # For closing the scope.
1547
1548 # Emit fEndTb = true or fTbBranched = true if any of the CIMPL flags
1549 # indicates we should do so.
1550 # Note! iemThreadedRecompilerMcDeferToCImpl0 duplicates work done here.
1551 asEndTbFlags = [];
1552 asTbBranchedFlags = [];
1553 for sFlag in self.oParent.dsCImplFlags:
1554 if self.kdCImplFlags[sFlag] is True:
1555 asEndTbFlags.append(sFlag);
1556 elif sFlag.startswith('IEM_CIMPL_F_BRANCH_'):
1557 asTbBranchedFlags.append(sFlag);
1558 if ( asTbBranchedFlags
1559 and ( 'IEM_CIMPL_F_BRANCH_CONDITIONAL' not in asTbBranchedFlags
1560 or self.sVariation not in self.kdVariationsWithConditionalNoJmp)):
1561 aoStmts.append(iai.McCppGeneric('iemThreadedSetBranched(pVCpu, %s);'
1562 % ((' | '.join(asTbBranchedFlags)).replace('IEM_CIMPL_F_BRANCH', 'IEMBRANCHED_F'),),
1563 cchIndent = cchIndent)); # Inline fn saves ~2 seconds for gcc 13/dbg (1m13s vs 1m15s).
1564 if asEndTbFlags:
1565 aoStmts.append(iai.McCppGeneric('pVCpu->iem.s.fEndTb = true; /* %s */' % (','.join(asEndTbFlags),),
1566 cchIndent = cchIndent));
1567
1568 if 'IEM_CIMPL_F_CHECK_IRQ_AFTER' in self.oParent.dsCImplFlags:
1569 aoStmts.append(iai.McCppGeneric('pVCpu->iem.s.cInstrTillIrqCheck = 0;', cchIndent = cchIndent));
1570
1571 return aoStmts;
1572
1573
1574class ThreadedFunction(object):
1575 """
1576 A threaded function.
1577 """
1578
1579 def __init__(self, oMcBlock: iai.McBlock) -> None:
1580 self.oMcBlock = oMcBlock # type: iai.McBlock
1581 # The remaining fields are only useful after analyze() has been called:
1582 ## Variations for this block. There is at least one.
1583 self.aoVariations = [] # type: List[ThreadedFunctionVariation]
1584 ## Variation dictionary containing the same as aoVariations.
1585 self.dVariations = {} # type: Dict[str, ThreadedFunctionVariation]
1586 ## Dictionary of local variables (IEM_MC_LOCAL[_CONST]) and call arguments (IEM_MC_ARG*).
1587 self.dVariables = {} # type: Dict[str, iai.McStmtVar]
1588 ## Dictionary with any IEM_CIMPL_F_XXX flags explicitly advertised in the code block
1589 ## and those determined by analyzeCodeOperation().
1590 self.dsCImplFlags = {} # type: Dict[str, bool]
1591 ## The unique sub-name for this threaded function.
1592 self.sSubName = '';
1593 #if oMcBlock.iInFunction > 0 or (oMcBlock.oInstruction and len(oMcBlock.oInstruction.aoMcBlocks) > 1):
1594 # self.sSubName = '_%s' % (oMcBlock.iInFunction);
1595
1596 @staticmethod
1597 def dummyInstance():
1598 """ Gets a dummy instance. """
1599 return ThreadedFunction(iai.McBlock('null', 999999999, 999999999,
1600 iai.DecoderFunction('null', 999999999, 'nil', ('','')), 999999999));
1601
1602 def hasWithFlagsCheckingAndClearingVariation(self):
1603 """
1604 Check if there is one or more with flags checking and clearing
1605 variations for this threaded function.
1606 """
1607 for sVarWithFlags in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing:
1608 if sVarWithFlags in self.dVariations:
1609 return True;
1610 return False;
1611
1612 #
1613 # Analysis and code morphing.
1614 #
1615
1616 def raiseProblem(self, sMessage):
1617 """ Raises a problem. """
1618 raise Exception('%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, ));
1619
1620 def error(self, sMessage, oGenerator):
1621 """ Emits an error via the generator object, causing it to fail. """
1622 oGenerator.rawError('%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, ));
1623
1624 def warning(self, sMessage):
1625 """ Emits a warning. """
1626 print('%s:%s: warning: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, ));
1627
1628 ## Used by analyzeAndAnnotateName for memory MC blocks.
1629 kdAnnotateNameMemStmts = {
1630 'IEM_MC_FETCH_MEM16_U8': '__mem8',
1631 'IEM_MC_FETCH_MEM32_U8': '__mem8',
1632 'IEM_MC_FETCH_MEM_D80': '__mem80',
1633 'IEM_MC_FETCH_MEM_I16': '__mem16',
1634 'IEM_MC_FETCH_MEM_I32': '__mem32',
1635 'IEM_MC_FETCH_MEM_I64': '__mem64',
1636 'IEM_MC_FETCH_MEM_R32': '__mem32',
1637 'IEM_MC_FETCH_MEM_R64': '__mem64',
1638 'IEM_MC_FETCH_MEM_R80': '__mem80',
1639 'IEM_MC_FETCH_MEM_U128': '__mem128',
1640 'IEM_MC_FETCH_MEM_U128_ALIGN_SSE': '__mem128',
1641 'IEM_MC_FETCH_MEM_U128_NO_AC': '__mem128',
1642 'IEM_MC_FETCH_MEM_U16': '__mem16',
1643 'IEM_MC_FETCH_MEM_U16_DISP': '__mem16',
1644 'IEM_MC_FETCH_MEM_U16_SX_U32': '__mem16sx32',
1645 'IEM_MC_FETCH_MEM_U16_SX_U64': '__mem16sx64',
1646 'IEM_MC_FETCH_MEM_U16_ZX_U32': '__mem16zx32',
1647 'IEM_MC_FETCH_MEM_U16_ZX_U64': '__mem16zx64',
1648 'IEM_MC_FETCH_MEM_U256': '__mem256',
1649 'IEM_MC_FETCH_MEM_U256_ALIGN_AVX': '__mem256',
1650 'IEM_MC_FETCH_MEM_U256_NO_AC': '__mem256',
1651 'IEM_MC_FETCH_MEM_U32': '__mem32',
1652 'IEM_MC_FETCH_MEM_U32_DISP': '__mem32',
1653 'IEM_MC_FETCH_MEM_U32_SX_U64': '__mem32sx64',
1654 'IEM_MC_FETCH_MEM_U32_ZX_U64': '__mem32zx64',
1655 'IEM_MC_FETCH_MEM_U64': '__mem64',
1656 'IEM_MC_FETCH_MEM_U64_ALIGN_U128': '__mem64',
1657 'IEM_MC_FETCH_MEM_U64_DISP': '__mem64',
1658 'IEM_MC_FETCH_MEM_U8': '__mem8',
1659 'IEM_MC_FETCH_MEM_U8_DISP': '__mem8',
1660 'IEM_MC_FETCH_MEM_U8_SX_U16': '__mem8sx16',
1661 'IEM_MC_FETCH_MEM_U8_SX_U32': '__mem8sx32',
1662 'IEM_MC_FETCH_MEM_U8_SX_U64': '__mem8sx64',
1663 'IEM_MC_FETCH_MEM_U8_ZX_U16': '__mem8zx16',
1664 'IEM_MC_FETCH_MEM_U8_ZX_U32': '__mem8zx32',
1665 'IEM_MC_FETCH_MEM_U8_ZX_U64': '__mem8zx64',
1666 'IEM_MC_FETCH_MEM_XMM': '__mem128',
1667 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE': '__mem128',
1668 'IEM_MC_FETCH_MEM_XMM_NO_AC': '__mem128',
1669 'IEM_MC_FETCH_MEM_XMM_U32': '__mem32',
1670 'IEM_MC_FETCH_MEM_XMM_U64': '__mem64',
1671 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128': '__mem128',
1672 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE_AND_XREG_XMM': '__mem128',
1673 'IEM_MC_FETCH_MEM_XMM_U32_AND_XREG_XMM': '__mem32',
1674 'IEM_MC_FETCH_MEM_XMM_U64_AND_XREG_XMM': '__mem64',
1675 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_RAX_RDX_U64': '__mem128',
1676 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64': '__mem128',
1677
1678 'IEM_MC_STORE_MEM_I16_CONST_BY_REF': '__mem16',
1679 'IEM_MC_STORE_MEM_I32_CONST_BY_REF': '__mem32',
1680 'IEM_MC_STORE_MEM_I64_CONST_BY_REF': '__mem64',
1681 'IEM_MC_STORE_MEM_I8_CONST_BY_REF': '__mem8',
1682 'IEM_MC_STORE_MEM_INDEF_D80_BY_REF': '__mem80',
1683 'IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF': '__mem32',
1684 'IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF': '__mem64',
1685 'IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF': '__mem80',
1686 'IEM_MC_STORE_MEM_U128': '__mem128',
1687 'IEM_MC_STORE_MEM_U128_ALIGN_SSE': '__mem128',
1688 'IEM_MC_STORE_MEM_U128_NO_AC': '__mem128',
1689 'IEM_MC_STORE_MEM_U16': '__mem16',
1690 'IEM_MC_STORE_MEM_U16_CONST': '__mem16c',
1691 'IEM_MC_STORE_MEM_U256': '__mem256',
1692 'IEM_MC_STORE_MEM_U256_ALIGN_AVX': '__mem256',
1693 'IEM_MC_STORE_MEM_U256_NO_AC': '__mem256',
1694 'IEM_MC_STORE_MEM_U32': '__mem32',
1695 'IEM_MC_STORE_MEM_U32_CONST': '__mem32c',
1696 'IEM_MC_STORE_MEM_U64': '__mem64',
1697 'IEM_MC_STORE_MEM_U64_CONST': '__mem64c',
1698 'IEM_MC_STORE_MEM_U8': '__mem8',
1699 'IEM_MC_STORE_MEM_U8_CONST': '__mem8c',
1700
1701 'IEM_MC_MEM_MAP_D80_WO': '__mem80',
1702 'IEM_MC_MEM_MAP_I16_WO': '__mem16',
1703 'IEM_MC_MEM_MAP_I32_WO': '__mem32',
1704 'IEM_MC_MEM_MAP_I64_WO': '__mem64',
1705 'IEM_MC_MEM_MAP_R32_WO': '__mem32',
1706 'IEM_MC_MEM_MAP_R64_WO': '__mem64',
1707 'IEM_MC_MEM_MAP_R80_WO': '__mem80',
1708 'IEM_MC_MEM_MAP_U128_ATOMIC': '__mem128a',
1709 'IEM_MC_MEM_MAP_U128_RO': '__mem128',
1710 'IEM_MC_MEM_MAP_U128_RW': '__mem128',
1711 'IEM_MC_MEM_MAP_U128_WO': '__mem128',
1712 'IEM_MC_MEM_MAP_U16_ATOMIC': '__mem16a',
1713 'IEM_MC_MEM_MAP_U16_RO': '__mem16',
1714 'IEM_MC_MEM_MAP_U16_RW': '__mem16',
1715 'IEM_MC_MEM_MAP_U16_WO': '__mem16',
1716 'IEM_MC_MEM_MAP_U32_ATOMIC': '__mem32a',
1717 'IEM_MC_MEM_MAP_U32_RO': '__mem32',
1718 'IEM_MC_MEM_MAP_U32_RW': '__mem32',
1719 'IEM_MC_MEM_MAP_U32_WO': '__mem32',
1720 'IEM_MC_MEM_MAP_U64_ATOMIC': '__mem64a',
1721 'IEM_MC_MEM_MAP_U64_RO': '__mem64',
1722 'IEM_MC_MEM_MAP_U64_RW': '__mem64',
1723 'IEM_MC_MEM_MAP_U64_WO': '__mem64',
1724 'IEM_MC_MEM_MAP_U8_ATOMIC': '__mem8a',
1725 'IEM_MC_MEM_MAP_U8_RO': '__mem8',
1726 'IEM_MC_MEM_MAP_U8_RW': '__mem8',
1727 'IEM_MC_MEM_MAP_U8_WO': '__mem8',
1728 };
1729 ## Used by analyzeAndAnnotateName for non-memory MC blocks.
1730 kdAnnotateNameRegStmts = {
1731 'IEM_MC_FETCH_GREG_U8': '__greg8',
1732 'IEM_MC_FETCH_GREG_U8_ZX_U16': '__greg8zx16',
1733 'IEM_MC_FETCH_GREG_U8_ZX_U32': '__greg8zx32',
1734 'IEM_MC_FETCH_GREG_U8_ZX_U64': '__greg8zx64',
1735 'IEM_MC_FETCH_GREG_U8_SX_U16': '__greg8sx16',
1736 'IEM_MC_FETCH_GREG_U8_SX_U32': '__greg8sx32',
1737 'IEM_MC_FETCH_GREG_U8_SX_U64': '__greg8sx64',
1738 'IEM_MC_FETCH_GREG_U16': '__greg16',
1739 'IEM_MC_FETCH_GREG_U16_ZX_U32': '__greg16zx32',
1740 'IEM_MC_FETCH_GREG_U16_ZX_U64': '__greg16zx64',
1741 'IEM_MC_FETCH_GREG_U16_SX_U32': '__greg16sx32',
1742 'IEM_MC_FETCH_GREG_U16_SX_U64': '__greg16sx64',
1743 'IEM_MC_FETCH_GREG_U32': '__greg32',
1744 'IEM_MC_FETCH_GREG_U32_ZX_U64': '__greg32zx64',
1745 'IEM_MC_FETCH_GREG_U32_SX_U64': '__greg32sx64',
1746 'IEM_MC_FETCH_GREG_U64': '__greg64',
1747 'IEM_MC_FETCH_GREG_U64_ZX_U64': '__greg64zx64',
1748 'IEM_MC_FETCH_GREG_PAIR_U32': '__greg32',
1749 'IEM_MC_FETCH_GREG_PAIR_U64': '__greg64',
1750
1751 'IEM_MC_STORE_GREG_U8': '__greg8',
1752 'IEM_MC_STORE_GREG_U16': '__greg16',
1753 'IEM_MC_STORE_GREG_U32': '__greg32',
1754 'IEM_MC_STORE_GREG_U64': '__greg64',
1755 'IEM_MC_STORE_GREG_I64': '__greg64',
1756 'IEM_MC_STORE_GREG_U8_CONST': '__greg8c',
1757 'IEM_MC_STORE_GREG_U16_CONST': '__greg16c',
1758 'IEM_MC_STORE_GREG_U32_CONST': '__greg32c',
1759 'IEM_MC_STORE_GREG_U64_CONST': '__greg64c',
1760 'IEM_MC_STORE_GREG_PAIR_U32': '__greg32',
1761 'IEM_MC_STORE_GREG_PAIR_U64': '__greg64',
1762
1763 'IEM_MC_FETCH_SREG_U16': '__sreg16',
1764 'IEM_MC_FETCH_SREG_ZX_U32': '__sreg32',
1765 'IEM_MC_FETCH_SREG_ZX_U64': '__sreg64',
1766 'IEM_MC_FETCH_SREG_BASE_U64': '__sbase64',
1767 'IEM_MC_FETCH_SREG_BASE_U32': '__sbase32',
1768 'IEM_MC_STORE_SREG_BASE_U64': '__sbase64',
1769 'IEM_MC_STORE_SREG_BASE_U32': '__sbase32',
1770
1771 'IEM_MC_REF_GREG_U8': '__greg8',
1772 'IEM_MC_REF_GREG_U16': '__greg16',
1773 'IEM_MC_REF_GREG_U32': '__greg32',
1774 'IEM_MC_REF_GREG_U64': '__greg64',
1775 'IEM_MC_REF_GREG_U8_CONST': '__greg8',
1776 'IEM_MC_REF_GREG_U16_CONST': '__greg16',
1777 'IEM_MC_REF_GREG_U32_CONST': '__greg32',
1778 'IEM_MC_REF_GREG_U64_CONST': '__greg64',
1779 'IEM_MC_REF_GREG_I32': '__greg32',
1780 'IEM_MC_REF_GREG_I64': '__greg64',
1781 'IEM_MC_REF_GREG_I32_CONST': '__greg32',
1782 'IEM_MC_REF_GREG_I64_CONST': '__greg64',
1783
1784 'IEM_MC_STORE_FPUREG_R80_SRC_REF': '__fpu',
1785 'IEM_MC_REF_FPUREG': '__fpu',
1786
1787 'IEM_MC_FETCH_MREG_U64': '__mreg64',
1788 'IEM_MC_FETCH_MREG_U32': '__mreg32',
1789 'IEM_MC_FETCH_MREG_U16': '__mreg16',
1790 'IEM_MC_FETCH_MREG_U8': '__mreg8',
1791 'IEM_MC_STORE_MREG_U64': '__mreg64',
1792 'IEM_MC_STORE_MREG_U32': '__mreg32',
1793 'IEM_MC_STORE_MREG_U16': '__mreg16',
1794 'IEM_MC_STORE_MREG_U8': '__mreg8',
1795 'IEM_MC_STORE_MREG_U32_ZX_U64': '__mreg32zx64',
1796 'IEM_MC_REF_MREG_U64': '__mreg64',
1797 'IEM_MC_REF_MREG_U64_CONST': '__mreg64',
1798 'IEM_MC_REF_MREG_U32_CONST': '__mreg32',
1799
1800 'IEM_MC_CLEAR_XREG_U32_MASK': '__xreg32x4',
1801 'IEM_MC_FETCH_XREG_U128': '__xreg128',
1802 'IEM_MC_FETCH_XREG_XMM': '__xreg128',
1803 'IEM_MC_FETCH_XREG_U64': '__xreg64',
1804 'IEM_MC_FETCH_XREG_U32': '__xreg32',
1805 'IEM_MC_FETCH_XREG_U16': '__xreg16',
1806 'IEM_MC_FETCH_XREG_U8': '__xreg8',
1807 'IEM_MC_FETCH_XREG_PAIR_U128': '__xreg128p',
1808 'IEM_MC_FETCH_XREG_PAIR_XMM': '__xreg128p',
1809 'IEM_MC_FETCH_XREG_PAIR_U128_AND_RAX_RDX_U64': '__xreg128p',
1810 'IEM_MC_FETCH_XREG_PAIR_U128_AND_EAX_EDX_U32_SX_U64': '__xreg128p',
1811
1812 'IEM_MC_STORE_XREG_U32_U128': '__xreg32',
1813 'IEM_MC_STORE_XREG_U128': '__xreg128',
1814 'IEM_MC_STORE_XREG_XMM': '__xreg128',
1815 'IEM_MC_STORE_XREG_XMM_U32': '__xreg32',
1816 'IEM_MC_STORE_XREG_XMM_U64': '__xreg64',
1817 'IEM_MC_STORE_XREG_U64': '__xreg64',
1818 'IEM_MC_STORE_XREG_U64_ZX_U128': '__xreg64zx128',
1819 'IEM_MC_STORE_XREG_U32': '__xreg32',
1820 'IEM_MC_STORE_XREG_U16': '__xreg16',
1821 'IEM_MC_STORE_XREG_U8': '__xreg8',
1822 'IEM_MC_STORE_XREG_U32_ZX_U128': '__xreg32zx128',
1823 'IEM_MC_STORE_XREG_R32': '__xreg32',
1824 'IEM_MC_STORE_XREG_R64': '__xreg64',
1825 'IEM_MC_BROADCAST_XREG_U8_ZX_VLMAX': '__xreg8zx',
1826 'IEM_MC_BROADCAST_XREG_U16_ZX_VLMAX': '__xreg16zx',
1827 'IEM_MC_BROADCAST_XREG_U32_ZX_VLMAX': '__xreg32zx',
1828 'IEM_MC_BROADCAST_XREG_U64_ZX_VLMAX': '__xreg64zx',
1829 'IEM_MC_BROADCAST_XREG_U128_ZX_VLMAX': '__xreg128zx',
1830 'IEM_MC_REF_XREG_U128': '__xreg128',
1831 'IEM_MC_REF_XREG_U128_CONST': '__xreg128',
1832 'IEM_MC_REF_XREG_U32_CONST': '__xreg32',
1833 'IEM_MC_REF_XREG_U64_CONST': '__xreg64',
1834 'IEM_MC_REF_XREG_R32_CONST': '__xreg32',
1835 'IEM_MC_REF_XREG_R64_CONST': '__xreg64',
1836 'IEM_MC_REF_XREG_XMM_CONST': '__xreg128',
1837 'IEM_MC_COPY_XREG_U128': '__xreg128',
1838
1839 'IEM_MC_FETCH_YREG_U256': '__yreg256',
1840 'IEM_MC_FETCH_YREG_YMM': '__yreg256',
1841 'IEM_MC_FETCH_YREG_U128': '__yreg128',
1842 'IEM_MC_FETCH_YREG_U64': '__yreg64',
1843 'IEM_MC_FETCH_YREG_U32': '__yreg32',
1844 'IEM_MC_STORE_YREG_U128': '__yreg128',
1845 'IEM_MC_STORE_YREG_U32_ZX_VLMAX': '__yreg32zx',
1846 'IEM_MC_STORE_YREG_U64_ZX_VLMAX': '__yreg64zx',
1847 'IEM_MC_STORE_YREG_U128_ZX_VLMAX': '__yreg128zx',
1848 'IEM_MC_STORE_YREG_U256_ZX_VLMAX': '__yreg256zx',
1849 'IEM_MC_BROADCAST_YREG_U8_ZX_VLMAX': '__yreg8',
1850 'IEM_MC_BROADCAST_YREG_U16_ZX_VLMAX': '__yreg16',
1851 'IEM_MC_BROADCAST_YREG_U32_ZX_VLMAX': '__yreg32',
1852 'IEM_MC_BROADCAST_YREG_U64_ZX_VLMAX': '__yreg64',
1853 'IEM_MC_BROADCAST_YREG_U128_ZX_VLMAX': '__yreg128',
1854 'IEM_MC_REF_YREG_U128': '__yreg128',
1855 'IEM_MC_REF_YREG_U128_CONST': '__yreg128',
1856 'IEM_MC_REF_YREG_U64_CONST': '__yreg64',
1857 'IEM_MC_COPY_YREG_U256_ZX_VLMAX': '__yreg256zx',
1858 'IEM_MC_COPY_YREG_U128_ZX_VLMAX': '__yreg128zx',
1859 'IEM_MC_COPY_YREG_U64_ZX_VLMAX': '__yreg64zx',
1860 'IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX': '__yreg3296',
1861 'IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX': '__yreg6464',
1862 'IEM_MC_MERGE_YREG_U64HI_U64HI_ZX_VLMAX': '__yreg64hi64hi',
1863 'IEM_MC_MERGE_YREG_U64LO_U64LO_ZX_VLMAX': '__yreg64lo64lo',
1864 'IEM_MC_MERGE_YREG_U64LO_U64LOCAL_ZX_VLMAX':'__yreg64',
1865 'IEM_MC_MERGE_YREG_U64LOCAL_U64HI_ZX_VLMAX':'__yreg64',
1866 };
1867 kdAnnotateNameCallStmts = {
1868 'IEM_MC_CALL_CIMPL_0': '__cimpl',
1869 'IEM_MC_CALL_CIMPL_1': '__cimpl',
1870 'IEM_MC_CALL_CIMPL_2': '__cimpl',
1871 'IEM_MC_CALL_CIMPL_3': '__cimpl',
1872 'IEM_MC_CALL_CIMPL_4': '__cimpl',
1873 'IEM_MC_CALL_CIMPL_5': '__cimpl',
1874 'IEM_MC_CALL_CIMPL_6': '__cimpl',
1875 'IEM_MC_CALL_CIMPL_7': '__cimpl',
1876 'IEM_MC_DEFER_TO_CIMPL_0_RET': '__cimpl_defer',
1877 'IEM_MC_DEFER_TO_CIMPL_1_RET': '__cimpl_defer',
1878 'IEM_MC_DEFER_TO_CIMPL_2_RET': '__cimpl_defer',
1879 'IEM_MC_DEFER_TO_CIMPL_3_RET': '__cimpl_defer',
1880 'IEM_MC_DEFER_TO_CIMPL_4_RET': '__cimpl_defer',
1881 'IEM_MC_DEFER_TO_CIMPL_5_RET': '__cimpl_defer',
1882 'IEM_MC_DEFER_TO_CIMPL_6_RET': '__cimpl_defer',
1883 'IEM_MC_DEFER_TO_CIMPL_7_RET': '__cimpl_defer',
1884 'IEM_MC_CALL_VOID_AIMPL_0': '__aimpl',
1885 'IEM_MC_CALL_VOID_AIMPL_1': '__aimpl',
1886 'IEM_MC_CALL_VOID_AIMPL_2': '__aimpl',
1887 'IEM_MC_CALL_VOID_AIMPL_3': '__aimpl',
1888 'IEM_MC_CALL_VOID_AIMPL_4': '__aimpl',
1889 'IEM_MC_CALL_VOID_AIMPL_5': '__aimpl',
1890 'IEM_MC_CALL_AIMPL_0': '__aimpl_ret',
1891 'IEM_MC_CALL_AIMPL_1': '__aimpl_ret',
1892 'IEM_MC_CALL_AIMPL_2': '__aimpl_ret',
1893 'IEM_MC_CALL_AIMPL_3': '__aimpl_ret',
1894 'IEM_MC_CALL_AIMPL_4': '__aimpl_ret',
1895 'IEM_MC_CALL_AIMPL_5': '__aimpl_ret',
1896 'IEM_MC_CALL_AIMPL_6': '__aimpl_ret',
1897 'IEM_MC_CALL_VOID_AIMPL_6': '__aimpl_fpu',
1898 'IEM_MC_CALL_FPU_AIMPL_0': '__aimpl_fpu',
1899 'IEM_MC_CALL_FPU_AIMPL_1': '__aimpl_fpu',
1900 'IEM_MC_CALL_FPU_AIMPL_2': '__aimpl_fpu',
1901 'IEM_MC_CALL_FPU_AIMPL_3': '__aimpl_fpu',
1902 'IEM_MC_CALL_FPU_AIMPL_4': '__aimpl_fpu',
1903 'IEM_MC_CALL_FPU_AIMPL_5': '__aimpl_fpu',
1904 'IEM_MC_CALL_MMX_AIMPL_0': '__aimpl_mmx',
1905 'IEM_MC_CALL_MMX_AIMPL_1': '__aimpl_mmx',
1906 'IEM_MC_CALL_MMX_AIMPL_2': '__aimpl_mmx',
1907 'IEM_MC_CALL_MMX_AIMPL_3': '__aimpl_mmx',
1908 'IEM_MC_CALL_MMX_AIMPL_4': '__aimpl_mmx',
1909 'IEM_MC_CALL_MMX_AIMPL_5': '__aimpl_mmx',
1910 'IEM_MC_CALL_SSE_AIMPL_0': '__aimpl_sse',
1911 'IEM_MC_CALL_SSE_AIMPL_1': '__aimpl_sse',
1912 'IEM_MC_CALL_SSE_AIMPL_2': '__aimpl_sse',
1913 'IEM_MC_CALL_SSE_AIMPL_3': '__aimpl_sse',
1914 'IEM_MC_CALL_SSE_AIMPL_4': '__aimpl_sse',
1915 'IEM_MC_CALL_SSE_AIMPL_5': '__aimpl_sse',
1916 'IEM_MC_CALL_AVX_AIMPL_0': '__aimpl_avx',
1917 'IEM_MC_CALL_AVX_AIMPL_1': '__aimpl_avx',
1918 'IEM_MC_CALL_AVX_AIMPL_2': '__aimpl_avx',
1919 'IEM_MC_CALL_AVX_AIMPL_3': '__aimpl_avx',
1920 'IEM_MC_CALL_AVX_AIMPL_4': '__aimpl_avx',
1921 'IEM_MC_CALL_AVX_AIMPL_5': '__aimpl_avx',
1922 };
1923 def analyzeAndAnnotateName(self, aoStmts: List[iai.McStmt]):
1924 """
1925 Scans the statements and variation lists for clues about the threaded function,
1926 and sets self.sSubName if successfull.
1927 """
1928 # Operand base naming:
1929 dHits = {};
1930 cHits = iai.McStmt.countStmtsByName(aoStmts, self.kdAnnotateNameMemStmts, dHits);
1931 if cHits > 0:
1932 sStmtNm = sorted(dHits.keys())[-1]; # priority: STORE, MEM_MAP, FETCH.
1933 sName = self.kdAnnotateNameMemStmts[sStmtNm];
1934 else:
1935 cHits = iai.McStmt.countStmtsByName(aoStmts, self.kdAnnotateNameRegStmts, dHits);
1936 if cHits > 0:
1937 sStmtNm = sorted(dHits.keys())[-1]; # priority: STORE, MEM_MAP, FETCH.
1938 sName = self.kdAnnotateNameRegStmts[sStmtNm];
1939 else:
1940 # No op details, try name it by call type...
1941 cHits = iai.McStmt.countStmtsByName(aoStmts, self.kdAnnotateNameCallStmts, dHits);
1942 if cHits > 0:
1943 sStmtNm = sorted(dHits.keys())[-1]; # Not really necessary to sort, but simple this way.
1944 self.sSubName = self.kdAnnotateNameCallStmts[sStmtNm];
1945 return;
1946
1947 # Add call info if any:
1948 dHits = {};
1949 cHits = iai.McStmt.countStmtsByName(aoStmts, self.kdAnnotateNameCallStmts, dHits);
1950 if cHits > 0:
1951 sStmtNm = sorted(dHits.keys())[-1]; # Not really necessary to sort, but simple this way.
1952 sName += self.kdAnnotateNameCallStmts[sStmtNm][1:];
1953
1954 self.sSubName = sName;
1955 return;
1956
1957 def analyzeFindVariablesAndCallArgs(self, aoStmts: List[iai.McStmt]) -> bool:
1958 """ Scans the statements for MC variables and call arguments. """
1959 for oStmt in aoStmts:
1960 if isinstance(oStmt, iai.McStmtVar):
1961 if oStmt.sVarName in self.dVariables:
1962 raise Exception('Variable %s is defined more than once!' % (oStmt.sVarName,));
1963 self.dVariables[oStmt.sVarName] = oStmt.sVarName;
1964 elif isinstance(oStmt, iai.McStmtCall) and oStmt.sName.startswith('IEM_MC_CALL_AIMPL_'):
1965 if oStmt.asParams[1] in self.dVariables:
1966 raise Exception('Variable %s is defined more than once!' % (oStmt.asParams[1],));
1967 self.dVariables[oStmt.asParams[1]] = iai.McStmtVar('IEM_MC_LOCAL', oStmt.asParams[0:2],
1968 oStmt.asParams[0], oStmt.asParams[1]);
1969
1970 # There shouldn't be any variables or arguments declared inside if/
1971 # else blocks, but scan them too to be on the safe side.
1972 if isinstance(oStmt, iai.McStmtCond):
1973 #cBefore = len(self.dVariables);
1974 self.analyzeFindVariablesAndCallArgs(oStmt.aoIfBranch);
1975 self.analyzeFindVariablesAndCallArgs(oStmt.aoElseBranch);
1976 #if len(self.dVariables) != cBefore:
1977 # raise Exception('Variables/arguments defined in conditional branches!');
1978 return True;
1979
1980 kdReturnStmtAnnotations = {
1981 'IEM_MC_ADVANCE_RIP_AND_FINISH': g_ksFinishAnnotation_Advance,
1982 'IEM_MC_REL_JMP_S8_AND_FINISH': g_ksFinishAnnotation_RelJmp,
1983 'IEM_MC_REL_JMP_S16_AND_FINISH': g_ksFinishAnnotation_RelJmp,
1984 'IEM_MC_REL_JMP_S32_AND_FINISH': g_ksFinishAnnotation_RelJmp,
1985 'IEM_MC_SET_RIP_U16_AND_FINISH': g_ksFinishAnnotation_SetJmp,
1986 'IEM_MC_SET_RIP_U32_AND_FINISH': g_ksFinishAnnotation_SetJmp,
1987 'IEM_MC_SET_RIP_U64_AND_FINISH': g_ksFinishAnnotation_SetJmp,
1988 'IEM_MC_REL_CALL_S16_AND_FINISH': g_ksFinishAnnotation_RelCall,
1989 'IEM_MC_REL_CALL_S32_AND_FINISH': g_ksFinishAnnotation_RelCall,
1990 'IEM_MC_REL_CALL_S64_AND_FINISH': g_ksFinishAnnotation_RelCall,
1991 'IEM_MC_IND_CALL_U16_AND_FINISH': g_ksFinishAnnotation_IndCall,
1992 'IEM_MC_IND_CALL_U32_AND_FINISH': g_ksFinishAnnotation_IndCall,
1993 'IEM_MC_IND_CALL_U64_AND_FINISH': g_ksFinishAnnotation_IndCall,
1994 'IEM_MC_DEFER_TO_CIMPL_0_RET': g_ksFinishAnnotation_DeferToCImpl,
1995 'IEM_MC_DEFER_TO_CIMPL_1_RET': g_ksFinishAnnotation_DeferToCImpl,
1996 'IEM_MC_DEFER_TO_CIMPL_2_RET': g_ksFinishAnnotation_DeferToCImpl,
1997 'IEM_MC_DEFER_TO_CIMPL_3_RET': g_ksFinishAnnotation_DeferToCImpl,
1998 'IEM_MC_DEFER_TO_CIMPL_4_RET': g_ksFinishAnnotation_DeferToCImpl,
1999 'IEM_MC_DEFER_TO_CIMPL_5_RET': g_ksFinishAnnotation_DeferToCImpl,
2000 'IEM_MC_DEFER_TO_CIMPL_6_RET': g_ksFinishAnnotation_DeferToCImpl,
2001 'IEM_MC_DEFER_TO_CIMPL_7_RET': g_ksFinishAnnotation_DeferToCImpl,
2002 };
2003 def analyzeCodeOperation(self, aoStmts: List[iai.McStmt], dEflStmts, fSeenConditional = False) -> bool:
2004 """
2005 Analyzes the code looking clues as to additional side-effects.
2006
2007 Currently this is simply looking for branching and adding the relevant
2008 branch flags to dsCImplFlags. ASSUMES the caller pre-populates the
2009 dictionary with a copy of self.oMcBlock.dsCImplFlags.
2010
2011 This also sets McStmtCond.oIfBranchAnnotation & McStmtCond.oElseBranchAnnotation.
2012
2013 Returns annotation on return style.
2014 """
2015 sAnnotation = None;
2016 for oStmt in aoStmts:
2017 # Set IEM_IMPL_C_F_BRANCH_XXXX flags if we see any branching MCs.
2018 if oStmt.sName.startswith('IEM_MC_SET_RIP'):
2019 assert not fSeenConditional;
2020 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_INDIRECT'] = True;
2021 elif oStmt.sName.startswith('IEM_MC_REL_JMP'):
2022 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_RELATIVE'] = True;
2023 if fSeenConditional:
2024 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_CONDITIONAL'] = True;
2025 elif oStmt.sName.startswith('IEM_MC_IND_CALL'):
2026 assert not fSeenConditional;
2027 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_INDIRECT'] = True;
2028 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_STACK'] = True;
2029 self.dsCImplFlags['IEM_CIMPL_F_END_TB'] = True;
2030 elif oStmt.sName.startswith('IEM_MC_REL_CALL'):
2031 assert not fSeenConditional;
2032 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_RELATIVE'] = True;
2033 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_STACK'] = True;
2034 self.dsCImplFlags['IEM_CIMPL_F_END_TB'] = True;
2035 elif oStmt.sName.startswith('IEM_MC_RETN'):
2036 assert not fSeenConditional;
2037 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_INDIRECT'] = True;
2038 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_STACK'] = True;
2039 self.dsCImplFlags['IEM_CIMPL_F_END_TB'] = True;
2040
2041 # Check for CIMPL and AIMPL calls.
2042 if oStmt.sName.startswith('IEM_MC_CALL_'):
2043 if oStmt.sName.startswith('IEM_MC_CALL_CIMPL_'):
2044 self.dsCImplFlags['IEM_CIMPL_F_CALLS_CIMPL'] = True;
2045 elif ( oStmt.sName.startswith('IEM_MC_CALL_VOID_AIMPL_')
2046 or oStmt.sName.startswith('IEM_MC_CALL_AIMPL_')):
2047 self.dsCImplFlags['IEM_CIMPL_F_CALLS_AIMPL'] = True;
2048 elif ( oStmt.sName.startswith('IEM_MC_CALL_SSE_AIMPL_')
2049 or oStmt.sName.startswith('IEM_MC_CALL_MMX_AIMPL_')
2050 or oStmt.sName.startswith('IEM_MC_CALL_FPU_AIMPL_')):
2051 self.dsCImplFlags['IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE'] = True;
2052 elif oStmt.sName.startswith('IEM_MC_CALL_AVX_AIMPL_'):
2053 self.dsCImplFlags['IEM_CIMPL_F_CALLS_AIMPL_WITH_XSTATE'] = True;
2054 else:
2055 raise Exception('Unknown IEM_MC_CALL_* statement: %s' % (oStmt.sName,));
2056
2057 # Check for return statements.
2058 if oStmt.sName in self.kdReturnStmtAnnotations:
2059 assert sAnnotation is None;
2060 sAnnotation = self.kdReturnStmtAnnotations[oStmt.sName];
2061
2062 # Collect MCs working on EFLAGS. Caller will check this.
2063 if oStmt.sName in ('IEM_MC_FETCH_EFLAGS', 'IEM_MC_FETCH_EFLAGS_U8', 'IEM_MC_COMMIT_EFLAGS',
2064 'IEM_MC_COMMIT_EFLAGS_OPT', 'IEM_MC_REF_EFLAGS', 'IEM_MC_ARG_LOCAL_EFLAGS', ):
2065 dEflStmts[oStmt.sName] = oStmt;
2066 elif isinstance(oStmt, iai.McStmtCall):
2067 if oStmt.sName in ('IEM_MC_CALL_CIMPL_0', 'IEM_MC_CALL_CIMPL_1', 'IEM_MC_CALL_CIMPL_2',
2068 'IEM_MC_CALL_CIMPL_3', 'IEM_MC_CALL_CIMPL_4', 'IEM_MC_CALL_CIMPL_5',):
2069 if ( oStmt.asParams[0].find('IEM_CIMPL_F_RFLAGS') >= 0
2070 or oStmt.asParams[0].find('IEM_CIMPL_F_STATUS_FLAGS') >= 0):
2071 dEflStmts[oStmt.sName] = oStmt;
2072
2073 # Process branches of conditionals recursively.
2074 if isinstance(oStmt, iai.McStmtCond):
2075 oStmt.oIfBranchAnnotation = self.analyzeCodeOperation(oStmt.aoIfBranch, dEflStmts, True);
2076 if oStmt.aoElseBranch:
2077 oStmt.oElseBranchAnnotation = self.analyzeCodeOperation(oStmt.aoElseBranch, dEflStmts, True);
2078
2079 return sAnnotation;
2080
2081 def analyzeThreadedFunction(self, oGenerator):
2082 """
2083 Analyzes the code, identifying the number of parameters it requires and such.
2084
2085 Returns dummy True - raises exception on trouble.
2086 """
2087
2088 #
2089 # Decode the block into a list/tree of McStmt objects.
2090 #
2091 aoStmts = self.oMcBlock.decode();
2092
2093 #
2094 # Check the block for errors before we proceed (will decode it).
2095 #
2096 asErrors = self.oMcBlock.check();
2097 if asErrors:
2098 raise Exception('\n'.join(['%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sError, )
2099 for sError in asErrors]));
2100
2101 #
2102 # Scan the statements for local variables and call arguments (self.dVariables).
2103 #
2104 self.analyzeFindVariablesAndCallArgs(aoStmts);
2105
2106 #
2107 # Scan the code for IEM_CIMPL_F_ and other clues.
2108 #
2109 self.dsCImplFlags = self.oMcBlock.dsCImplFlags.copy();
2110 dEflStmts = {};
2111 self.analyzeCodeOperation(aoStmts, dEflStmts);
2112 if ( ('IEM_CIMPL_F_CALLS_CIMPL' in self.dsCImplFlags)
2113 + ('IEM_CIMPL_F_CALLS_AIMPL' in self.dsCImplFlags)
2114 + ('IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE' in self.dsCImplFlags)
2115 + ('IEM_CIMPL_F_CALLS_AIMPL_WITH_XSTATE' in self.dsCImplFlags) > 1):
2116 self.error('Mixing CIMPL/AIMPL/AIMPL_WITH_FXSTATE/AIMPL_WITH_XSTATE calls', oGenerator);
2117
2118 #
2119 # Analyse EFLAGS related MCs and @opflmodify and friends.
2120 #
2121 if dEflStmts:
2122 oInstruction = self.oMcBlock.oInstruction; # iai.Instruction
2123 if ( oInstruction is None
2124 or (oInstruction.asFlTest is None and oInstruction.asFlModify is None)):
2125 sMcNames = '+'.join(dEflStmts.keys());
2126 if len(dEflStmts) != 1 or not sMcNames.startswith('IEM_MC_CALL_CIMPL_'): # Hack for far calls
2127 self.error('Uses %s but has no @opflmodify, @opfltest or @opflclass with details!' % (sMcNames,), oGenerator);
2128 elif 'IEM_MC_COMMIT_EFLAGS' in dEflStmts or 'IEM_MC_COMMIT_EFLAGS_OPT' in dEflStmts:
2129 if not oInstruction.asFlModify:
2130 if oInstruction.sMnemonic not in [ 'not', ]:
2131 self.error('Uses IEM_MC_COMMIT_EFLAGS[_OPT] but has no flags in @opflmodify!', oGenerator);
2132 elif ( 'IEM_MC_CALL_CIMPL_0' in dEflStmts
2133 or 'IEM_MC_CALL_CIMPL_1' in dEflStmts
2134 or 'IEM_MC_CALL_CIMPL_2' in dEflStmts
2135 or 'IEM_MC_CALL_CIMPL_3' in dEflStmts
2136 or 'IEM_MC_CALL_CIMPL_4' in dEflStmts
2137 or 'IEM_MC_CALL_CIMPL_5' in dEflStmts ):
2138 if not oInstruction.asFlModify:
2139 self.error('Uses IEM_MC_CALL_CIMPL_x or IEM_MC_DEFER_TO_CIMPL_5_RET with IEM_CIMPL_F_STATUS_FLAGS '
2140 'or IEM_CIMPL_F_RFLAGS but has no flags in @opflmodify!', oGenerator);
2141 elif 'IEM_MC_REF_EFLAGS' not in dEflStmts:
2142 if not oInstruction.asFlTest:
2143 if oInstruction.sMnemonic not in [ 'not', ]:
2144 self.error('Expected @opfltest!', oGenerator);
2145 if oInstruction and oInstruction.asFlSet:
2146 for sFlag in oInstruction.asFlSet:
2147 if sFlag not in oInstruction.asFlModify:
2148 self.error('"%s" in @opflset but missing from @opflmodify (%s)!'
2149 % (sFlag, ', '.join(oInstruction.asFlModify)), oGenerator);
2150 if oInstruction and oInstruction.asFlClear:
2151 for sFlag in oInstruction.asFlClear:
2152 if sFlag not in oInstruction.asFlModify:
2153 self.error('"%s" in @opflclear but missing from @opflmodify (%s)!'
2154 % (sFlag, ', '.join(oInstruction.asFlModify)), oGenerator);
2155
2156 #
2157 # Create variations as needed.
2158 #
2159 if iai.McStmt.findStmtByNames(aoStmts,
2160 { 'IEM_MC_DEFER_TO_CIMPL_0_RET': True,
2161 'IEM_MC_DEFER_TO_CIMPL_1_RET': True,
2162 'IEM_MC_DEFER_TO_CIMPL_2_RET': True,
2163 'IEM_MC_DEFER_TO_CIMPL_3_RET': True, }):
2164 asVariations = (ThreadedFunctionVariation.ksVariation_Default,);
2165
2166 elif iai.McStmt.findStmtByNames(aoStmts, { 'IEM_MC_CALC_RM_EFF_ADDR' : True,
2167 'IEM_MC_FETCH_MEM_U8' : True, # mov_AL_Ob ++
2168 'IEM_MC_FETCH_MEM_U16' : True, # mov_rAX_Ov ++
2169 'IEM_MC_FETCH_MEM_U32' : True,
2170 'IEM_MC_FETCH_MEM_U64' : True,
2171 'IEM_MC_STORE_MEM_U8' : True, # mov_Ob_AL ++
2172 'IEM_MC_STORE_MEM_U16' : True, # mov_Ov_rAX ++
2173 'IEM_MC_STORE_MEM_U32' : True,
2174 'IEM_MC_STORE_MEM_U64' : True, }):
2175 if 'IEM_MC_F_64BIT' in self.oMcBlock.dsMcFlags:
2176 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressOnly64;
2177 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags and 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
2178 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot286Not64;
2179 elif 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
2180 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot286;
2181 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags:
2182 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot64;
2183 elif 'IEM_MC_F_ONLY_8086' in self.oMcBlock.dsMcFlags:
2184 asVariations = ThreadedFunctionVariation.kasVariationsOnlyPre386;
2185 else:
2186 asVariations = ThreadedFunctionVariation.kasVariationsWithAddress;
2187 else:
2188 if 'IEM_MC_F_64BIT' in self.oMcBlock.dsMcFlags:
2189 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressOnly64;
2190 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags and 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
2191 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot286Not64;
2192 elif 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
2193 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot286;
2194 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags:
2195 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot64;
2196 elif 'IEM_MC_F_ONLY_8086' in self.oMcBlock.dsMcFlags:
2197 asVariations = ThreadedFunctionVariation.kasVariationsOnlyPre386;
2198 else:
2199 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddress;
2200
2201 if ( 'IEM_CIMPL_F_BRANCH_CONDITIONAL' in self.dsCImplFlags
2202 and 'IEM_CIMPL_F_BRANCH_RELATIVE' in self.dsCImplFlags): # (latter to avoid iemOp_into)
2203 assert set(asVariations).issubset(ThreadedFunctionVariation.kasVariationsWithoutAddress), \
2204 '%s: vars=%s McFlags=%s' % (self.oMcBlock.oFunction.sName, asVariations, self.oMcBlock.dsMcFlags);
2205 asVariationsBase = asVariations;
2206 asVariations = [];
2207 for sVariation in asVariationsBase:
2208 asVariations.extend([sVariation + '_Jmp', sVariation + '_NoJmp']);
2209 assert set(asVariations).issubset(ThreadedFunctionVariation.kdVariationsWithConditional);
2210
2211 if not iai.McStmt.findStmtByNames(aoStmts,
2212 { 'IEM_MC_ADVANCE_RIP_AND_FINISH': True,
2213 'IEM_MC_REL_JMP_S8_AND_FINISH': True,
2214 'IEM_MC_REL_JMP_S16_AND_FINISH': True,
2215 'IEM_MC_REL_JMP_S32_AND_FINISH': True,
2216 'IEM_MC_SET_RIP_U16_AND_FINISH': True,
2217 'IEM_MC_SET_RIP_U32_AND_FINISH': True,
2218 'IEM_MC_SET_RIP_U64_AND_FINISH': True,
2219 'IEM_MC_REL_CALL_S16_AND_FINISH': True,
2220 'IEM_MC_REL_CALL_S32_AND_FINISH': True,
2221 'IEM_MC_REL_CALL_S64_AND_FINISH': True,
2222 'IEM_MC_IND_CALL_U16_AND_FINISH': True,
2223 'IEM_MC_IND_CALL_U32_AND_FINISH': True,
2224 'IEM_MC_IND_CALL_U64_AND_FINISH': True,
2225 'IEM_MC_RETN_AND_FINISH': True,
2226 }):
2227 asVariations = [sVariation for sVariation in asVariations
2228 if sVariation not in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing];
2229
2230 self.aoVariations = [ThreadedFunctionVariation(self, sVar) for sVar in asVariations];
2231
2232 # Dictionary variant of the list.
2233 self.dVariations = { oVar.sVariation: oVar for oVar in self.aoVariations };
2234
2235 #
2236 # Try annotate the threaded function name.
2237 #
2238 self.analyzeAndAnnotateName(aoStmts);
2239
2240 #
2241 # Continue the analysis on each variation.
2242 #
2243 for oVariation in self.aoVariations:
2244 oVariation.analyzeVariation(aoStmts);
2245
2246 return True;
2247
2248 ## Used by emitThreadedCallStmts.
2249 kdVariationsWithNeedForPrefixCheck = {
2250 ThreadedFunctionVariation.ksVariation_64_Addr32: True,
2251 ThreadedFunctionVariation.ksVariation_64f_Addr32: True,
2252 ThreadedFunctionVariation.ksVariation_64_FsGs: True,
2253 ThreadedFunctionVariation.ksVariation_64f_FsGs: True,
2254 ThreadedFunctionVariation.ksVariation_32_Addr16: True,
2255 ThreadedFunctionVariation.ksVariation_32f_Addr16: True,
2256 ThreadedFunctionVariation.ksVariation_32_Flat: True,
2257 ThreadedFunctionVariation.ksVariation_32f_Flat: True,
2258 ThreadedFunctionVariation.ksVariation_16_Addr32: True,
2259 ThreadedFunctionVariation.ksVariation_16f_Addr32: True,
2260 };
2261
2262 def emitThreadedCallStmts(self, sBranch = None, fTbLookupTable = False): # pylint: disable=too-many-statements
2263 """
2264 Worker for morphInputCode that returns a list of statements that emits
2265 the call to the threaded functions for the block.
2266
2267 The sBranch parameter is used with conditional branches where we'll emit
2268 different threaded calls depending on whether we're in the jump-taken or
2269 no-jump code path.
2270
2271 The fTbLookupTable parameter can either be False, True or whatever else
2272 (like 2) - in the latte case this means a large lookup table.
2273 """
2274 # Special case for only default variation:
2275 if len(self.aoVariations) == 1 and self.aoVariations[0].sVariation == ThreadedFunctionVariation.ksVariation_Default:
2276 assert not sBranch;
2277 return self.aoVariations[0].emitThreadedCallStmtsForVariant(0, fTbLookupTable);
2278
2279 #
2280 # Case statement sub-class.
2281 #
2282 dByVari = self.dVariations;
2283 #fDbg = self.oMcBlock.sFunction == 'iemOpCommonPushSReg';
2284 class Case:
2285 def __init__(self, sCond, sVarNm = None):
2286 self.sCond = sCond;
2287 self.sVarNm = sVarNm;
2288 self.oVar = dByVari[sVarNm] if sVarNm else None;
2289 self.aoBody = self.oVar.emitThreadedCallStmtsForVariant(8, fTbLookupTable) if sVarNm else None;
2290
2291 def toCode(self):
2292 aoStmts = [ iai.McCppGeneric('case %s:' % (self.sCond), cchIndent = 4), ];
2293 if self.aoBody:
2294 aoStmts.extend(self.aoBody);
2295 aoStmts.append(iai.McCppGeneric('break;', cchIndent = 8));
2296 return aoStmts;
2297
2298 def toFunctionAssignment(self):
2299 aoStmts = [ iai.McCppGeneric('case %s:' % (self.sCond), cchIndent = 4), ];
2300 if self.aoBody:
2301 aoStmts.extend([
2302 iai.McCppGeneric('enmFunction = %s;' % (self.oVar.getIndexName(),), cchIndent = 8),
2303 iai.McCppGeneric('break;', cchIndent = 8),
2304 ]);
2305 return aoStmts;
2306
2307 def isSame(self, oThat):
2308 if not self.aoBody: # fall thru always matches.
2309 return True;
2310 if len(self.aoBody) != len(oThat.aoBody):
2311 #if fDbg: print('dbg: body len diff: %s vs %s' % (len(self.aoBody), len(oThat.aoBody),));
2312 return False;
2313 for iStmt, oStmt in enumerate(self.aoBody):
2314 oThatStmt = oThat.aoBody[iStmt] # type: iai.McStmt
2315 assert isinstance(oStmt, iai.McCppGeneric);
2316 assert not isinstance(oStmt, iai.McStmtCond);
2317 if isinstance(oStmt, iai.McStmtCond):
2318 return False;
2319 if oStmt.sName != oThatStmt.sName:
2320 #if fDbg: print('dbg: stmt #%s name: %s vs %s' % (iStmt, oStmt.sName, oThatStmt.sName,));
2321 return False;
2322 if len(oStmt.asParams) != len(oThatStmt.asParams):
2323 #if fDbg: print('dbg: stmt #%s param count: %s vs %s'
2324 # % (iStmt, len(oStmt.asParams), len(oThatStmt.asParams),));
2325 return False;
2326 for iParam, sParam in enumerate(oStmt.asParams):
2327 if ( sParam != oThatStmt.asParams[iParam]
2328 and ( iParam != 1
2329 or not isinstance(oStmt, iai.McCppCall)
2330 or not oStmt.asParams[0].startswith('IEM_MC2_EMIT_CALL_')
2331 or sParam != self.oVar.getIndexName()
2332 or oThatStmt.asParams[iParam] != oThat.oVar.getIndexName() )):
2333 #if fDbg: print('dbg: stmt #%s, param #%s: %s vs %s'
2334 # % (iStmt, iParam, sParam, oThatStmt.asParams[iParam],));
2335 return False;
2336 return True;
2337
2338 #
2339 # Determine what we're switch on.
2340 # This ASSUMES that (IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | IEM_F_MODE_CPUMODE_MASK) == 7!
2341 #
2342 fSimple = True;
2343 sSwitchValue = '(pVCpu->iem.s.fExec & (IEM_F_MODE_CPUMODE_MASK | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK))';
2344 if dByVari.keys() & self.kdVariationsWithNeedForPrefixCheck.keys():
2345 sSwitchValue += ' | (pVCpu->iem.s.enmEffAddrMode == (pVCpu->iem.s.fExec & IEM_F_MODE_CPUMODE_MASK) ? 0 : 8)';
2346 # Accesses via FS and GS and CS goes thru non-FLAT functions. (CS
2347 # is not writable in 32-bit mode (at least), thus the penalty mode
2348 # for any accesses via it (simpler this way).)
2349 sSwitchValue += ' | (pVCpu->iem.s.iEffSeg < X86_SREG_FS && pVCpu->iem.s.iEffSeg != X86_SREG_CS ? 0 : 16)';
2350 fSimple = False; # threaded functions.
2351 if dByVari.keys() & ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing:
2352 sSwitchValue += ' | ((pVCpu->iem.s.fTbPrevInstr & (IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_INHIBIT_SHADOW)) || ' \
2353 + '(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_MASK) ? 32 : 0)';
2354
2355 #
2356 # Generate the case statements.
2357 #
2358 # pylintx: disable=x
2359 aoCases = [];
2360 if ThreadedFunctionVariation.ksVariation_64_Addr32 in dByVari:
2361 assert not fSimple and not sBranch;
2362 aoCases.extend([
2363 Case('IEMMODE_64BIT', ThrdFnVar.ksVariation_64),
2364 Case('IEMMODE_64BIT | 16', ThrdFnVar.ksVariation_64_FsGs),
2365 Case('IEMMODE_64BIT | 8 | 16', None), # fall thru
2366 Case('IEMMODE_64BIT | 8', ThrdFnVar.ksVariation_64_Addr32),
2367 ]);
2368 if ThreadedFunctionVariation.ksVariation_64f_Addr32 in dByVari:
2369 aoCases.extend([
2370 Case('IEMMODE_64BIT | 32', ThrdFnVar.ksVariation_64f),
2371 Case('IEMMODE_64BIT | 32 | 16', ThrdFnVar.ksVariation_64f_FsGs),
2372 Case('IEMMODE_64BIT | 32 | 8 | 16', None), # fall thru
2373 Case('IEMMODE_64BIT | 32 | 8', ThrdFnVar.ksVariation_64f_Addr32),
2374 ]);
2375 elif ThrdFnVar.ksVariation_64 in dByVari:
2376 assert fSimple and not sBranch;
2377 aoCases.append(Case('IEMMODE_64BIT', ThrdFnVar.ksVariation_64));
2378 if ThreadedFunctionVariation.ksVariation_64f in dByVari:
2379 aoCases.append(Case('IEMMODE_64BIT | 32', ThrdFnVar.ksVariation_64f));
2380 elif ThrdFnVar.ksVariation_64_Jmp in dByVari:
2381 assert fSimple and sBranch;
2382 aoCases.append(Case('IEMMODE_64BIT',
2383 ThrdFnVar.ksVariation_64_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_64_NoJmp));
2384 if ThreadedFunctionVariation.ksVariation_64f_Jmp in dByVari:
2385 aoCases.append(Case('IEMMODE_64BIT | 32',
2386 ThrdFnVar.ksVariation_64f_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_64f_NoJmp));
2387
2388 if ThrdFnVar.ksVariation_32_Addr16 in dByVari:
2389 assert not fSimple and not sBranch;
2390 aoCases.extend([
2391 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', ThrdFnVar.ksVariation_32_Flat),
2392 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 16', None), # fall thru
2393 Case('IEMMODE_32BIT | 16', None), # fall thru
2394 Case('IEMMODE_32BIT', ThrdFnVar.ksVariation_32),
2395 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 8', None), # fall thru
2396 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 8 | 16',None), # fall thru
2397 Case('IEMMODE_32BIT | 8 | 16',None), # fall thru
2398 Case('IEMMODE_32BIT | 8', ThrdFnVar.ksVariation_32_Addr16),
2399 ]);
2400 if ThrdFnVar.ksVariation_32f_Addr16 in dByVari:
2401 aoCases.extend([
2402 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', ThrdFnVar.ksVariation_32f_Flat),
2403 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 16', None), # fall thru
2404 Case('IEMMODE_32BIT | 32 | 16', None), # fall thru
2405 Case('IEMMODE_32BIT | 32', ThrdFnVar.ksVariation_32f),
2406 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 8', None), # fall thru
2407 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 8 | 16',None), # fall thru
2408 Case('IEMMODE_32BIT | 32 | 8 | 16',None), # fall thru
2409 Case('IEMMODE_32BIT | 32 | 8', ThrdFnVar.ksVariation_32f_Addr16),
2410 ]);
2411 elif ThrdFnVar.ksVariation_32 in dByVari:
2412 assert fSimple and not sBranch;
2413 aoCases.extend([
2414 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', None), # fall thru
2415 Case('IEMMODE_32BIT', ThrdFnVar.ksVariation_32),
2416 ]);
2417 if ThrdFnVar.ksVariation_32f in dByVari:
2418 aoCases.extend([
2419 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', None), # fall thru
2420 Case('IEMMODE_32BIT | 32', ThrdFnVar.ksVariation_32f),
2421 ]);
2422 elif ThrdFnVar.ksVariation_32_Jmp in dByVari:
2423 assert fSimple and sBranch;
2424 aoCases.extend([
2425 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', None), # fall thru
2426 Case('IEMMODE_32BIT',
2427 ThrdFnVar.ksVariation_32_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_32_NoJmp),
2428 ]);
2429 if ThrdFnVar.ksVariation_32f_Jmp in dByVari:
2430 aoCases.extend([
2431 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', None), # fall thru
2432 Case('IEMMODE_32BIT | 32',
2433 ThrdFnVar.ksVariation_32f_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_32f_NoJmp),
2434 ]);
2435
2436 if ThrdFnVar.ksVariation_16_Addr32 in dByVari:
2437 assert not fSimple and not sBranch;
2438 aoCases.extend([
2439 Case('IEMMODE_16BIT | 16', None), # fall thru
2440 Case('IEMMODE_16BIT', ThrdFnVar.ksVariation_16),
2441 Case('IEMMODE_16BIT | 8 | 16', None), # fall thru
2442 Case('IEMMODE_16BIT | 8', ThrdFnVar.ksVariation_16_Addr32),
2443 ]);
2444 if ThrdFnVar.ksVariation_16f_Addr32 in dByVari:
2445 aoCases.extend([
2446 Case('IEMMODE_16BIT | 32 | 16', None), # fall thru
2447 Case('IEMMODE_16BIT | 32', ThrdFnVar.ksVariation_16f),
2448 Case('IEMMODE_16BIT | 32 | 8 | 16', None), # fall thru
2449 Case('IEMMODE_16BIT | 32 | 8', ThrdFnVar.ksVariation_16f_Addr32),
2450 ]);
2451 elif ThrdFnVar.ksVariation_16 in dByVari:
2452 assert fSimple and not sBranch;
2453 aoCases.append(Case('IEMMODE_16BIT', ThrdFnVar.ksVariation_16));
2454 if ThrdFnVar.ksVariation_16f in dByVari:
2455 aoCases.append(Case('IEMMODE_16BIT | 32', ThrdFnVar.ksVariation_16f));
2456 elif ThrdFnVar.ksVariation_16_Jmp in dByVari:
2457 assert fSimple and sBranch;
2458 aoCases.append(Case('IEMMODE_16BIT',
2459 ThrdFnVar.ksVariation_16_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_16_NoJmp));
2460 if ThrdFnVar.ksVariation_16f_Jmp in dByVari:
2461 aoCases.append(Case('IEMMODE_16BIT | 32',
2462 ThrdFnVar.ksVariation_16f_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_16f_NoJmp));
2463
2464
2465 if ThrdFnVar.ksVariation_16_Pre386 in dByVari:
2466 if not fSimple:
2467 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 16', None)); # fall thru
2468 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', ThrdFnVar.ksVariation_16_Pre386));
2469 if ThrdFnVar.ksVariation_16f_Pre386 in dByVari: # should be nested under previous if, but line too long.
2470 if not fSimple:
2471 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 16', None)); # fall thru
2472 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', ThrdFnVar.ksVariation_16f_Pre386));
2473
2474 if ThrdFnVar.ksVariation_16_Pre386_Jmp in dByVari:
2475 assert fSimple and sBranch;
2476 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK',
2477 ThrdFnVar.ksVariation_16_Pre386_Jmp if sBranch == 'Jmp'
2478 else ThrdFnVar.ksVariation_16_Pre386_NoJmp));
2479 if ThrdFnVar.ksVariation_16f_Pre386_Jmp in dByVari:
2480 assert fSimple and sBranch;
2481 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32',
2482 ThrdFnVar.ksVariation_16f_Pre386_Jmp if sBranch == 'Jmp'
2483 else ThrdFnVar.ksVariation_16f_Pre386_NoJmp));
2484
2485 #
2486 # If the case bodies are all the same, except for the function called,
2487 # we can reduce the code size and hopefully compile time.
2488 #
2489 iFirstCaseWithBody = 0;
2490 while not aoCases[iFirstCaseWithBody].aoBody:
2491 iFirstCaseWithBody += 1
2492 fAllSameCases = True
2493 for iCase in range(iFirstCaseWithBody + 1, len(aoCases)):
2494 fAllSameCases = fAllSameCases and aoCases[iCase].isSame(aoCases[iFirstCaseWithBody]);
2495 #if fDbg: print('fAllSameCases=%s %s' % (fAllSameCases, self.oMcBlock.sFunction,));
2496 if fAllSameCases:
2497 aoStmts = [
2498 iai.McCppGeneric('IEMTHREADEDFUNCS enmFunction;'),
2499 iai.McCppGeneric('switch (%s)' % (sSwitchValue,)),
2500 iai.McCppGeneric('{'),
2501 ];
2502 for oCase in aoCases:
2503 aoStmts.extend(oCase.toFunctionAssignment());
2504 aoStmts.extend([
2505 iai.McCppGeneric('IEM_NOT_REACHED_DEFAULT_CASE_RET();', cchIndent = 4),
2506 iai.McCppGeneric('}'),
2507 ]);
2508 aoStmts.extend(dByVari[aoCases[iFirstCaseWithBody].sVarNm].emitThreadedCallStmtsForVariant(0, fTbLookupTable,
2509 'enmFunction'));
2510
2511 else:
2512 #
2513 # Generate the generic switch statement.
2514 #
2515 aoStmts = [
2516 iai.McCppGeneric('switch (%s)' % (sSwitchValue,)),
2517 iai.McCppGeneric('{'),
2518 ];
2519 for oCase in aoCases:
2520 aoStmts.extend(oCase.toCode());
2521 aoStmts.extend([
2522 iai.McCppGeneric('IEM_NOT_REACHED_DEFAULT_CASE_RET();', cchIndent = 4),
2523 iai.McCppGeneric('}'),
2524 ]);
2525
2526 return aoStmts;
2527
2528 def morphInputCode(self, aoStmts, fIsConditional = False, fCallEmitted = False, cDepth = 0, sBranchAnnotation = None):
2529 """
2530 Adjusts (& copies) the statements for the input/decoder so it will emit
2531 calls to the right threaded functions for each block.
2532
2533 Returns list/tree of statements (aoStmts is not modified) and updated
2534 fCallEmitted status.
2535 """
2536 #print('McBlock at %s:%s' % (os.path.split(self.oMcBlock.sSrcFile)[1], self.oMcBlock.iBeginLine,));
2537 aoDecoderStmts = [];
2538
2539 for iStmt, oStmt in enumerate(aoStmts):
2540 # Copy the statement. Make a deep copy to make sure we've got our own
2541 # copies of all instance variables, even if a bit overkill at the moment.
2542 oNewStmt = copy.deepcopy(oStmt);
2543 aoDecoderStmts.append(oNewStmt);
2544 #print('oNewStmt %s %s' % (oNewStmt.sName, len(oNewStmt.asParams),));
2545 if oNewStmt.sName == 'IEM_MC_BEGIN' and self.dsCImplFlags:
2546 oNewStmt.asParams[1] = ' | '.join(sorted(self.dsCImplFlags.keys()));
2547
2548 # If we haven't emitted the threaded function call yet, look for
2549 # statements which it would naturally follow or preceed.
2550 if not fCallEmitted:
2551 if not oStmt.isCppStmt():
2552 if ( oStmt.sName.startswith('IEM_MC_MAYBE_RAISE_') \
2553 or (oStmt.sName.endswith('_AND_FINISH') and oStmt.sName.startswith('IEM_MC_'))
2554 or oStmt.sName.startswith('IEM_MC_CALL_CIMPL_')
2555 or oStmt.sName.startswith('IEM_MC_DEFER_TO_CIMPL_')
2556 or oStmt.sName in ('IEM_MC_RAISE_DIVIDE_ERROR',)):
2557 aoDecoderStmts.pop();
2558 if not fIsConditional:
2559 aoDecoderStmts.extend(self.emitThreadedCallStmts());
2560 elif oStmt.sName == 'IEM_MC_ADVANCE_RIP_AND_FINISH':
2561 aoDecoderStmts.extend(self.emitThreadedCallStmts('NoJmp', True));
2562 else:
2563 assert oStmt.sName in { 'IEM_MC_REL_JMP_S8_AND_FINISH': True,
2564 'IEM_MC_REL_JMP_S16_AND_FINISH': True,
2565 'IEM_MC_REL_JMP_S32_AND_FINISH': True, };
2566 aoDecoderStmts.extend(self.emitThreadedCallStmts('Jmp', True));
2567 aoDecoderStmts.append(oNewStmt);
2568 fCallEmitted = True;
2569
2570 elif iai.g_dMcStmtParsers[oStmt.sName][2]:
2571 # This is for Jmp/NoJmp with loopne and friends which modifies state other than RIP.
2572 if not sBranchAnnotation:
2573 self.raiseProblem('Modifying state before emitting calls! %s' % (oStmt.sName,));
2574 assert fIsConditional;
2575 aoDecoderStmts.pop();
2576 if sBranchAnnotation == g_ksFinishAnnotation_Advance:
2577 assert iai.McStmt.findStmtByNames(aoStmts[iStmt:], {'IEM_MC_ADVANCE_RIP_AND_FINISH':1,})
2578 aoDecoderStmts.extend(self.emitThreadedCallStmts('NoJmp', True));
2579 elif sBranchAnnotation == g_ksFinishAnnotation_RelJmp:
2580 assert iai.McStmt.findStmtByNames(aoStmts[iStmt:],
2581 { 'IEM_MC_REL_JMP_S8_AND_FINISH': 1,
2582 'IEM_MC_REL_JMP_S16_AND_FINISH': 1,
2583 'IEM_MC_REL_JMP_S32_AND_FINISH': 1, });
2584 aoDecoderStmts.extend(self.emitThreadedCallStmts('Jmp', True));
2585 else:
2586 self.raiseProblem('Modifying state before emitting calls! %s' % (oStmt.sName,));
2587 aoDecoderStmts.append(oNewStmt);
2588 fCallEmitted = True;
2589
2590 elif ( not fIsConditional
2591 and oStmt.fDecode
2592 and ( oStmt.asParams[0].find('IEMOP_HLP_DONE_') >= 0
2593 or oStmt.asParams[0].find('IEMOP_HLP_DECODED_') >= 0)):
2594 aoDecoderStmts.extend(self.emitThreadedCallStmts());
2595 fCallEmitted = True;
2596
2597 # Process branches of conditionals recursively.
2598 if isinstance(oStmt, iai.McStmtCond):
2599 (oNewStmt.aoIfBranch, fCallEmitted1) = self.morphInputCode(oStmt.aoIfBranch, fIsConditional,
2600 fCallEmitted, cDepth + 1, oStmt.oIfBranchAnnotation);
2601 if oStmt.aoElseBranch:
2602 (oNewStmt.aoElseBranch, fCallEmitted2) = self.morphInputCode(oStmt.aoElseBranch, fIsConditional,
2603 fCallEmitted, cDepth + 1,
2604 oStmt.oElseBranchAnnotation);
2605 else:
2606 fCallEmitted2 = False;
2607 fCallEmitted = fCallEmitted or (fCallEmitted1 and fCallEmitted2);
2608
2609 if not fCallEmitted and cDepth == 0:
2610 self.raiseProblem('Unable to insert call to threaded function.');
2611
2612 return (aoDecoderStmts, fCallEmitted);
2613
2614
2615 def generateInputCode(self):
2616 """
2617 Modifies the input code.
2618 """
2619 cchIndent = (self.oMcBlock.cchIndent + 3) // 4 * 4;
2620
2621 if len(self.oMcBlock.aoStmts) == 1:
2622 # IEM_MC_DEFER_TO_CIMPL_X_RET - need to wrap in {} to make it safe to insert into random code.
2623 sCode = ' ' * cchIndent + 'pVCpu->iem.s.fTbCurInstr = ';
2624 if self.dsCImplFlags:
2625 sCode += ' | '.join(sorted(self.dsCImplFlags.keys())) + ';\n';
2626 else:
2627 sCode += '0;\n';
2628 sCode += iai.McStmt.renderCodeForList(self.morphInputCode(self.oMcBlock.aoStmts)[0],
2629 cchIndent = cchIndent).replace('\n', ' /* gen */\n', 1);
2630 sIndent = ' ' * (min(cchIndent, 2) - 2);
2631 sCode = sIndent + '{\n' + sCode + sIndent + '}\n';
2632 return sCode;
2633
2634 # IEM_MC_BEGIN/END block
2635 assert len(self.oMcBlock.asLines) > 2, "asLines=%s" % (self.oMcBlock.asLines,);
2636 fIsConditional = ( 'IEM_CIMPL_F_BRANCH_CONDITIONAL' in self.dsCImplFlags
2637 and 'IEM_CIMPL_F_BRANCH_RELATIVE' in self.dsCImplFlags); # (latter to avoid iemOp_into)
2638 return iai.McStmt.renderCodeForList(self.morphInputCode(self.oMcBlock.aoStmts, fIsConditional)[0],
2639 cchIndent = cchIndent).replace('\n', ' /* gen */\n', 1);
2640
2641# Short alias for ThreadedFunctionVariation.
2642ThrdFnVar = ThreadedFunctionVariation;
2643
2644
2645class IEMThreadedGenerator(object):
2646 """
2647 The threaded code generator & annotator.
2648 """
2649
2650 def __init__(self):
2651 self.aoThreadedFuncs = [] # type: List[ThreadedFunction]
2652 self.oOptions = None # type: argparse.Namespace
2653 self.aoParsers = [] # type: List[IEMAllInstPython.SimpleParser]
2654 self.aidxFirstFunctions = [] # type: List[int] ##< Runs parallel to aoParser giving the index of the first function.
2655 self.cErrors = 0;
2656
2657 #
2658 # Error reporting.
2659 #
2660
2661 def rawError(self, sCompleteMessage):
2662 """ Output a raw error and increment the error counter. """
2663 print(sCompleteMessage, file = sys.stderr);
2664 self.cErrors += 1;
2665 return False;
2666
2667 #
2668 # Processing.
2669 #
2670
2671 def processInputFiles(self, sHostArch, fNativeRecompilerEnabled):
2672 """
2673 Process the input files.
2674 """
2675
2676 # Parse the files.
2677 self.aoParsers = iai.parseFiles(self.oOptions.asInFiles, sHostArch);
2678
2679 # Create threaded functions for the MC blocks.
2680 self.aoThreadedFuncs = [ThreadedFunction(oMcBlock) for oMcBlock in iai.g_aoMcBlocks];
2681
2682 # Analyze the threaded functions.
2683 dRawParamCounts = {};
2684 dMinParamCounts = {};
2685 for oThreadedFunction in self.aoThreadedFuncs:
2686 oThreadedFunction.analyzeThreadedFunction(self);
2687 for oVariation in oThreadedFunction.aoVariations:
2688 dRawParamCounts[len(oVariation.dParamRefs)] = dRawParamCounts.get(len(oVariation.dParamRefs), 0) + 1;
2689 dMinParamCounts[oVariation.cMinParams] = dMinParamCounts.get(oVariation.cMinParams, 0) + 1;
2690 print('debug: param count distribution, raw and optimized:', file = sys.stderr);
2691 for cCount in sorted({cBits: True for cBits in list(dRawParamCounts.keys()) + list(dMinParamCounts.keys())}.keys()):
2692 print('debug: %s params: %4s raw, %4s min'
2693 % (cCount, dRawParamCounts.get(cCount, 0), dMinParamCounts.get(cCount, 0)),
2694 file = sys.stderr);
2695
2696 # Do another pass over the threaded functions to settle the name suffix.
2697 iThreadedFn = 0;
2698 while iThreadedFn < len(self.aoThreadedFuncs):
2699 oFunction = self.aoThreadedFuncs[iThreadedFn].oMcBlock.oFunction;
2700 assert oFunction;
2701 iThreadedFnNext = iThreadedFn + 1;
2702 dSubNames = { self.aoThreadedFuncs[iThreadedFn].sSubName: 1 };
2703 while ( iThreadedFnNext < len(self.aoThreadedFuncs)
2704 and self.aoThreadedFuncs[iThreadedFnNext].oMcBlock.oFunction == oFunction):
2705 dSubNames[self.aoThreadedFuncs[iThreadedFnNext].sSubName] = 1;
2706 iThreadedFnNext += 1;
2707 if iThreadedFnNext - iThreadedFn > len(dSubNames):
2708 iSubName = 0;
2709 while iThreadedFn + iSubName < iThreadedFnNext:
2710 self.aoThreadedFuncs[iThreadedFn + iSubName].sSubName += '_%s' % (iSubName,);
2711 iSubName += 1;
2712 iThreadedFn = iThreadedFnNext;
2713
2714 # Populate aidxFirstFunctions. This is ASSUMING that
2715 # g_aoMcBlocks/self.aoThreadedFuncs are in self.aoParsers order.
2716 iThreadedFunction = 0;
2717 oThreadedFunction = self.getThreadedFunctionByIndex(0);
2718 self.aidxFirstFunctions = [];
2719 for oParser in self.aoParsers:
2720 self.aidxFirstFunctions.append(iThreadedFunction);
2721
2722 while oThreadedFunction.oMcBlock.sSrcFile == oParser.sSrcFile:
2723 iThreadedFunction += 1;
2724 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
2725
2726 # Analyze the threaded functions and their variations for native recompilation.
2727 if fNativeRecompilerEnabled:
2728 ian.analyzeThreadedFunctionsForNativeRecomp(self.aoThreadedFuncs, sHostArch);
2729
2730 # Gather arguments + variable statistics for the MC blocks.
2731 cMaxArgs = 0;
2732 cMaxVars = 0;
2733 cMaxVarsAndArgs = 0;
2734 cbMaxArgs = 0;
2735 cbMaxVars = 0;
2736 cbMaxVarsAndArgs = 0;
2737 for oThreadedFunction in self.aoThreadedFuncs:
2738 if oThreadedFunction.oMcBlock.aoLocals or oThreadedFunction.oMcBlock.aoArgs:
2739 # Counts.
2740 cMaxVars = max(cMaxVars, len(oThreadedFunction.oMcBlock.aoLocals));
2741 cMaxArgs = max(cMaxArgs, len(oThreadedFunction.oMcBlock.aoArgs));
2742 cMaxVarsAndArgs = max(cMaxVarsAndArgs,
2743 len(oThreadedFunction.oMcBlock.aoLocals) + len(oThreadedFunction.oMcBlock.aoArgs));
2744 if cMaxVarsAndArgs > 9:
2745 raise Exception('%s potentially uses too many variables / args: %u, max 10 - %u vars and %u args'
2746 % (oThreadedFunction.oMcBlock.oFunction.sName, cMaxVarsAndArgs,
2747 len(oThreadedFunction.oMcBlock.aoLocals), len(oThreadedFunction.oMcBlock.aoArgs),));
2748 # Calc stack allocation size:
2749 cbArgs = 0;
2750 for oArg in oThreadedFunction.oMcBlock.aoArgs:
2751 cbArgs += (getTypeBitCount(oArg.sType) + 63) // 64 * 8;
2752 cbVars = 0;
2753 for oVar in oThreadedFunction.oMcBlock.aoLocals:
2754 cbVars += (getTypeBitCount(oVar.sType) + 63) // 64 * 8;
2755 cbMaxVars = max(cbMaxVars, cbVars);
2756 cbMaxArgs = max(cbMaxArgs, cbArgs);
2757 cbMaxVarsAndArgs = max(cbMaxVarsAndArgs, cbVars + cbArgs);
2758 if cbMaxVarsAndArgs >= 0xc0:
2759 raise Exception('%s potentially uses too much stack: cbMaxVars=%#x cbMaxArgs=%#x'
2760 % (oThreadedFunction.oMcBlock.oFunction.sName, cbMaxVars, cbMaxArgs,));
2761
2762 print('debug: max vars+args: %u bytes / %u; max vars: %u bytes / %u; max args: %u bytes / %u'
2763 % (cbMaxVarsAndArgs, cMaxVarsAndArgs, cbMaxVars, cMaxVars, cbMaxArgs, cMaxArgs,), file = sys.stderr);
2764
2765 if self.cErrors > 0:
2766 print('fatal error: %u error%s during processing. Details above.'
2767 % (self.cErrors, 's' if self.cErrors > 1 else '',), file = sys.stderr);
2768 return False;
2769 return True;
2770
2771 #
2772 # Output
2773 #
2774
2775 def generateLicenseHeader(self):
2776 """
2777 Returns the lines for a license header.
2778 """
2779 return [
2780 '/*',
2781 ' * Autogenerated by $Id: IEMAllThrdPython.py 105235 2024-07-09 12:30:38Z vboxsync $ ',
2782 ' * Do not edit!',
2783 ' */',
2784 '',
2785 '/*',
2786 ' * Copyright (C) 2023-' + str(datetime.date.today().year) + ' Oracle and/or its affiliates.',
2787 ' *',
2788 ' * This file is part of VirtualBox base platform packages, as',
2789 ' * available from https://www.virtualbox.org.',
2790 ' *',
2791 ' * This program is free software; you can redistribute it and/or',
2792 ' * modify it under the terms of the GNU General Public License',
2793 ' * as published by the Free Software Foundation, in version 3 of the',
2794 ' * License.',
2795 ' *',
2796 ' * This program is distributed in the hope that it will be useful, but',
2797 ' * WITHOUT ANY WARRANTY; without even the implied warranty of',
2798 ' * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU',
2799 ' * General Public License for more details.',
2800 ' *',
2801 ' * You should have received a copy of the GNU General Public License',
2802 ' * along with this program; if not, see <https://www.gnu.org/licenses>.',
2803 ' *',
2804 ' * The contents of this file may alternatively be used under the terms',
2805 ' * of the Common Development and Distribution License Version 1.0',
2806 ' * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included',
2807 ' * in the VirtualBox distribution, in which case the provisions of the',
2808 ' * CDDL are applicable instead of those of the GPL.',
2809 ' *',
2810 ' * You may elect to license modified versions of this file under the',
2811 ' * terms and conditions of either the GPL or the CDDL or both.',
2812 ' *',
2813 ' * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0',
2814 ' */',
2815 '',
2816 '',
2817 '',
2818 ];
2819
2820 ## List of built-in threaded functions with user argument counts and
2821 ## whether it has a native recompiler implementation.
2822 katBltIns = (
2823 ( 'Nop', 0, True ),
2824 ( 'LogCpuState', 0, True ),
2825
2826 ( 'DeferToCImpl0', 2, True ),
2827 ( 'CheckIrq', 0, True ),
2828 ( 'CheckMode', 1, True ),
2829 ( 'CheckHwInstrBps', 0, False ),
2830 ( 'CheckCsLim', 1, True ),
2831
2832 ( 'CheckCsLimAndOpcodes', 3, True ),
2833 ( 'CheckOpcodes', 3, True ),
2834 ( 'CheckOpcodesConsiderCsLim', 3, True ),
2835
2836 ( 'CheckCsLimAndPcAndOpcodes', 3, True ),
2837 ( 'CheckPcAndOpcodes', 3, True ),
2838 ( 'CheckPcAndOpcodesConsiderCsLim', 3, True ),
2839
2840 ( 'CheckCsLimAndOpcodesAcrossPageLoadingTlb', 3, True ),
2841 ( 'CheckOpcodesAcrossPageLoadingTlb', 3, True ),
2842 ( 'CheckOpcodesAcrossPageLoadingTlbConsiderCsLim', 2, True ),
2843
2844 ( 'CheckCsLimAndOpcodesLoadingTlb', 3, True ),
2845 ( 'CheckOpcodesLoadingTlb', 3, True ),
2846 ( 'CheckOpcodesLoadingTlbConsiderCsLim', 3, True ),
2847
2848 ( 'CheckCsLimAndOpcodesOnNextPageLoadingTlb', 2, True ),
2849 ( 'CheckOpcodesOnNextPageLoadingTlb', 2, True ),
2850 ( 'CheckOpcodesOnNextPageLoadingTlbConsiderCsLim', 2, True ),
2851
2852 ( 'CheckCsLimAndOpcodesOnNewPageLoadingTlb', 2, True ),
2853 ( 'CheckOpcodesOnNewPageLoadingTlb', 2, True ),
2854 ( 'CheckOpcodesOnNewPageLoadingTlbConsiderCsLim', 2, True ),
2855 );
2856
2857 def generateThreadedFunctionsHeader(self, oOut, _):
2858 """
2859 Generates the threaded functions header file.
2860 Returns success indicator.
2861 """
2862
2863 asLines = self.generateLicenseHeader();
2864
2865 # Generate the threaded function table indexes.
2866 asLines += [
2867 'typedef enum IEMTHREADEDFUNCS',
2868 '{',
2869 ' kIemThreadedFunc_Invalid = 0,',
2870 '',
2871 ' /*',
2872 ' * Predefined',
2873 ' */',
2874 ];
2875 asLines += [' kIemThreadedFunc_BltIn_%s,' % (sFuncNm,) for sFuncNm, _, _ in self.katBltIns];
2876
2877 iThreadedFunction = 1 + len(self.katBltIns);
2878 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2879 asLines += [
2880 '',
2881 ' /*',
2882 ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation] + '',
2883 ' */',
2884 ];
2885 for oThreadedFunction in self.aoThreadedFuncs:
2886 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2887 if oVariation:
2888 iThreadedFunction += 1;
2889 oVariation.iEnumValue = iThreadedFunction;
2890 asLines.append(' ' + oVariation.getIndexName() + ',');
2891 asLines += [
2892 ' kIemThreadedFunc_End',
2893 '} IEMTHREADEDFUNCS;',
2894 '',
2895 ];
2896
2897 # Prototype the function table.
2898 asLines += [
2899 'extern const PFNIEMTHREADEDFUNC g_apfnIemThreadedFunctions[kIemThreadedFunc_End];',
2900 'extern uint8_t const g_acIemThreadedFunctionUsedArgs[kIemThreadedFunc_End];',
2901 '#if defined(IN_RING3) || defined(LOG_ENABLED)',
2902 'extern const char * const g_apszIemThreadedFunctions[kIemThreadedFunc_End];',
2903 '#endif',
2904 '#if defined(IN_RING3)',
2905 'extern const char * const g_apszIemThreadedFunctionStats[kIemThreadedFunc_End];',
2906 '#endif',
2907 ];
2908
2909 oOut.write('\n'.join(asLines));
2910 return True;
2911
2912 ksBitsToIntMask = {
2913 1: "UINT64_C(0x1)",
2914 2: "UINT64_C(0x3)",
2915 4: "UINT64_C(0xf)",
2916 8: "UINT64_C(0xff)",
2917 16: "UINT64_C(0xffff)",
2918 32: "UINT64_C(0xffffffff)",
2919 };
2920
2921 def generateFunctionParameterUnpacking(self, oVariation, oOut, asParams, uNoRefLevel = 0):
2922 """
2923 Outputs code for unpacking parameters.
2924 This is shared by the threaded and native code generators.
2925 """
2926 aasVars = [];
2927 for aoRefs in oVariation.dParamRefs.values():
2928 oRef = aoRefs[0];
2929 if oRef.sType[0] != 'P':
2930 cBits = g_kdTypeInfo[oRef.sType][0];
2931 sType = g_kdTypeInfo[oRef.sType][2];
2932 else:
2933 cBits = 64;
2934 sType = oRef.sType;
2935
2936 sTypeDecl = sType + ' const';
2937
2938 if cBits == 64:
2939 assert oRef.offNewParam == 0;
2940 if sType == 'uint64_t':
2941 sUnpack = '%s;' % (asParams[oRef.iNewParam],);
2942 else:
2943 sUnpack = '(%s)%s;' % (sType, asParams[oRef.iNewParam],);
2944 elif oRef.offNewParam == 0:
2945 sUnpack = '(%s)(%s & %s);' % (sType, asParams[oRef.iNewParam], self.ksBitsToIntMask[cBits]);
2946 else:
2947 sUnpack = '(%s)((%s >> %s) & %s);' \
2948 % (sType, asParams[oRef.iNewParam], oRef.offNewParam, self.ksBitsToIntMask[cBits]);
2949
2950 sComment = '/* %s - %s ref%s */' % (oRef.sOrgRef, len(aoRefs), 's' if len(aoRefs) != 1 else '',);
2951
2952 aasVars.append([ '%s:%02u' % (oRef.iNewParam, oRef.offNewParam),
2953 sTypeDecl, oRef.sNewName, sUnpack, sComment ]);
2954 acchVars = [0, 0, 0, 0, 0];
2955 for asVar in aasVars:
2956 for iCol, sStr in enumerate(asVar):
2957 acchVars[iCol] = max(acchVars[iCol], len(sStr));
2958 sFmt = ' %%-%ss %%-%ss = %%-%ss %%s\n' % (acchVars[1], acchVars[2], acchVars[3]);
2959 for asVar in sorted(aasVars):
2960 oOut.write(sFmt % (asVar[1], asVar[2], asVar[3], asVar[4],));
2961
2962 if uNoRefLevel > 0 and aasVars:
2963 if uNoRefLevel > 1:
2964 # level 2: Everything. This is used by liveness.
2965 oOut.write(' ');
2966 for asVar in sorted(aasVars):
2967 oOut.write(' RT_NOREF_PV(%s);' % (asVar[2],));
2968 oOut.write('\n');
2969 else:
2970 # level 1: Only pfnXxxx variables. This is used by native.
2971 for asVar in sorted(aasVars):
2972 if asVar[2].startswith('pfn'):
2973 oOut.write(' RT_NOREF_PV(%s);\n' % (asVar[2],));
2974 return True;
2975
2976 kasThreadedParamNames = ('uParam0', 'uParam1', 'uParam2');
2977 def generateThreadedFunctionsSource(self, oOut, _):
2978 """
2979 Generates the threaded functions source file.
2980 Returns success indicator.
2981 """
2982
2983 asLines = self.generateLicenseHeader();
2984 oOut.write('\n'.join(asLines));
2985
2986 #
2987 # Emit the function definitions.
2988 #
2989 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2990 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
2991 oOut.write( '\n'
2992 + '\n'
2993 + '\n'
2994 + '\n'
2995 + '/*' + '*' * 128 + '\n'
2996 + '* Variation: ' + sVarName + ' ' * (129 - len(sVarName) - 15) + '*\n'
2997 + '*' * 128 + '*/\n');
2998
2999 for oThreadedFunction in self.aoThreadedFuncs:
3000 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
3001 if oVariation:
3002 oMcBlock = oThreadedFunction.oMcBlock;
3003
3004 # Function header
3005 oOut.write( '\n'
3006 + '\n'
3007 + '/**\n'
3008 + ' * #%u: %s at line %s offset %s in %s%s\n'
3009 % (oVariation.iEnumValue, oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine,
3010 os.path.split(oMcBlock.sSrcFile)[1],
3011 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '')
3012 + ' */\n'
3013 + 'static IEM_DECL_IEMTHREADEDFUNC_DEF(' + oVariation.getThreadedFunctionName() + ')\n'
3014 + '{\n');
3015
3016 # Unpack parameters.
3017 self.generateFunctionParameterUnpacking(oVariation, oOut, self.kasThreadedParamNames);
3018
3019 # RT_NOREF for unused parameters.
3020 if oVariation.cMinParams < g_kcThreadedParams:
3021 oOut.write(' RT_NOREF(' + ', '.join(self.kasThreadedParamNames[oVariation.cMinParams:]) + ');\n');
3022
3023 # Now for the actual statements.
3024 oOut.write(iai.McStmt.renderCodeForList(oVariation.aoStmtsForThreadedFunction, cchIndent = 4));
3025
3026 oOut.write('}\n');
3027
3028
3029 #
3030 # Generate the output tables in parallel.
3031 #
3032 asFuncTable = [
3033 '/**',
3034 ' * Function pointer table.',
3035 ' */',
3036 'PFNIEMTHREADEDFUNC const g_apfnIemThreadedFunctions[kIemThreadedFunc_End] =',
3037 '{',
3038 ' /*Invalid*/ NULL,',
3039 ];
3040 asArgCntTab = [
3041 '/**',
3042 ' * Argument count table.',
3043 ' */',
3044 'uint8_t const g_acIemThreadedFunctionUsedArgs[kIemThreadedFunc_End] =',
3045 '{',
3046 ' 0, /*Invalid*/',
3047 ];
3048 asNameTable = [
3049 '/**',
3050 ' * Function name table.',
3051 ' */',
3052 'const char * const g_apszIemThreadedFunctions[kIemThreadedFunc_End] =',
3053 '{',
3054 ' "Invalid",',
3055 ];
3056 asStatTable = [
3057 '/**',
3058 ' * Function statistics name table.',
3059 ' */',
3060 'const char * const g_apszIemThreadedFunctionStats[kIemThreadedFunc_End] =',
3061 '{',
3062 ' NULL,',
3063 ];
3064 aasTables = (asFuncTable, asArgCntTab, asNameTable, asStatTable,);
3065
3066 for asTable in aasTables:
3067 asTable.extend((
3068 '',
3069 ' /*',
3070 ' * Predefined.',
3071 ' */',
3072 ));
3073 for sFuncNm, cArgs, _ in self.katBltIns:
3074 asFuncTable.append(' iemThreadedFunc_BltIn_%s,' % (sFuncNm,));
3075 asArgCntTab.append(' %d, /*BltIn_%s*/' % (cArgs, sFuncNm,));
3076 asNameTable.append(' "BltIn_%s",' % (sFuncNm,));
3077 asStatTable.append(' "BltIn/%s",' % (sFuncNm,));
3078
3079 iThreadedFunction = 1 + len(self.katBltIns);
3080 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
3081 for asTable in aasTables:
3082 asTable.extend((
3083 '',
3084 ' /*',
3085 ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation],
3086 ' */',
3087 ));
3088 for oThreadedFunction in self.aoThreadedFuncs:
3089 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
3090 if oVariation:
3091 iThreadedFunction += 1;
3092 assert oVariation.iEnumValue == iThreadedFunction;
3093 sName = oVariation.getThreadedFunctionName();
3094 asFuncTable.append(' /*%4u*/ %s,' % (iThreadedFunction, sName,));
3095 asNameTable.append(' /*%4u*/ "%s",' % (iThreadedFunction, sName,));
3096 asArgCntTab.append(' /*%4u*/ %d, /*%s*/' % (iThreadedFunction, oVariation.cMinParams, sName,));
3097 asStatTable.append(' "%s",' % (oVariation.getThreadedFunctionStatisticsName(),));
3098
3099 for asTable in aasTables:
3100 asTable.append('};');
3101
3102 #
3103 # Output the tables.
3104 #
3105 oOut.write( '\n'
3106 + '\n');
3107 oOut.write('\n'.join(asFuncTable));
3108 oOut.write( '\n'
3109 + '\n'
3110 + '\n');
3111 oOut.write('\n'.join(asArgCntTab));
3112 oOut.write( '\n'
3113 + '\n'
3114 + '#if defined(IN_RING3) || defined(LOG_ENABLED)\n');
3115 oOut.write('\n'.join(asNameTable));
3116 oOut.write( '\n'
3117 + '#endif /* IN_RING3 || LOG_ENABLED */\n'
3118 + '\n'
3119 + '\n'
3120 + '#if defined(IN_RING3)\n');
3121 oOut.write('\n'.join(asStatTable));
3122 oOut.write( '\n'
3123 + '#endif /* IN_RING3 */\n');
3124
3125 return True;
3126
3127 def generateNativeFunctionsHeader(self, oOut, _):
3128 """
3129 Generates the native recompiler functions header file.
3130 Returns success indicator.
3131 """
3132 if not self.oOptions.fNativeRecompilerEnabled:
3133 return True;
3134
3135 asLines = self.generateLicenseHeader();
3136
3137 # Prototype the function table.
3138 asLines += [
3139 'extern const PFNIEMNATIVERECOMPFUNC g_apfnIemNativeRecompileFunctions[kIemThreadedFunc_End];',
3140 'extern const PFNIEMNATIVELIVENESSFUNC g_apfnIemNativeLivenessFunctions[kIemThreadedFunc_End];',
3141 '',
3142 ];
3143
3144 # Emit indicators as to which of the builtin functions have a native
3145 # recompiler function and which not. (We only really need this for
3146 # kIemThreadedFunc_BltIn_CheckMode, but do all just for simplicity.)
3147 for atBltIn in self.katBltIns:
3148 if atBltIn[1]:
3149 asLines.append('#define IEMNATIVE_WITH_BLTIN_' + atBltIn[0].upper())
3150 else:
3151 asLines.append('#define IEMNATIVE_WITHOUT_BLTIN_' + atBltIn[0].upper())
3152
3153 # Emit prototypes for the builtin functions we use in tables.
3154 asLines += [
3155 '',
3156 '/* Prototypes for built-in functions used in the above tables. */',
3157 ];
3158 for sFuncNm, _, fHaveRecompFunc in self.katBltIns:
3159 if fHaveRecompFunc:
3160 asLines += [
3161 'IEM_DECL_IEMNATIVERECOMPFUNC_PROTO( iemNativeRecompFunc_BltIn_%s);' % (sFuncNm,),
3162 'IEM_DECL_IEMNATIVELIVENESSFUNC_PROTO(iemNativeLivenessFunc_BltIn_%s);' % (sFuncNm,),
3163 ];
3164
3165 # Emit prototypes for table function.
3166 asLines += [
3167 '',
3168 '#ifdef IEMNATIVE_INCL_TABLE_FUNCTION_PROTOTYPES'
3169 ]
3170 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
3171 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
3172 asLines += [
3173 '',
3174 '/* Variation: ' + sVarName + ' */',
3175 ];
3176 for oThreadedFunction in self.aoThreadedFuncs:
3177 oVariation = oThreadedFunction.dVariations.get(sVariation, None) # type: ThreadedFunctionVariation
3178 if oVariation and oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
3179 asLines.append('IEM_DECL_IEMNATIVERECOMPFUNC_PROTO(' + oVariation.getNativeFunctionName() + ');');
3180 asLines += [
3181 '',
3182 '#endif /* IEMNATIVE_INCL_TABLE_FUNCTION_PROTOTYPES */',
3183 ]
3184
3185 oOut.write('\n'.join(asLines));
3186 return True;
3187
3188 def generateNativeFunctionsSource(self, oOut, idxPart):
3189 """
3190 Generates the native recompiler functions source file.
3191 Returns success indicator.
3192 """
3193 cParts = 4;
3194 assert(idxPart in range(cParts));
3195 if not self.oOptions.fNativeRecompilerEnabled:
3196 return True;
3197
3198 #
3199 # The file header.
3200 #
3201 oOut.write('\n'.join(self.generateLicenseHeader()));
3202
3203 #
3204 # Emit the functions.
3205 #
3206 # The files are split up by threaded variation as that's the simplest way to
3207 # do it, even if the distribution isn't entirely even (ksVariation_Default
3208 # only has the defer to cimpl bits and the pre-386 variants will naturally
3209 # have fewer instructions).
3210 #
3211 cVariationsPerFile = len(ThreadedFunctionVariation.kasVariationsEmitOrder) // cParts;
3212 idxFirstVar = idxPart * cVariationsPerFile;
3213 idxEndVar = idxFirstVar + cVariationsPerFile;
3214 if idxPart + 1 >= cParts:
3215 idxEndVar = len(ThreadedFunctionVariation.kasVariationsEmitOrder);
3216 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder[idxFirstVar:idxEndVar]:
3217 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
3218 oOut.write( '\n'
3219 + '\n'
3220 + '\n'
3221 + '\n'
3222 + '/*' + '*' * 128 + '\n'
3223 + '* Variation: ' + sVarName + ' ' * (129 - len(sVarName) - 15) + '*\n'
3224 + '*' * 128 + '*/\n');
3225
3226 for oThreadedFunction in self.aoThreadedFuncs:
3227 oVariation = oThreadedFunction.dVariations.get(sVariation, None) # type: ThreadedFunctionVariation
3228 if oVariation and oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
3229 oMcBlock = oThreadedFunction.oMcBlock;
3230
3231 # Function header
3232 oOut.write( '\n'
3233 + '\n'
3234 + '/**\n'
3235 + ' * #%u: %s at line %s offset %s in %s%s\n'
3236 % (oVariation.iEnumValue, oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine,
3237 os.path.split(oMcBlock.sSrcFile)[1],
3238 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '')
3239 + ' */\n'
3240 + 'IEM_DECL_IEMNATIVERECOMPFUNC_DEF(' + oVariation.getNativeFunctionName() + ')\n'
3241 + '{\n');
3242
3243 # Unpack parameters.
3244 self.generateFunctionParameterUnpacking(oVariation, oOut,
3245 ('pCallEntry->auParams[0]',
3246 'pCallEntry->auParams[1]',
3247 'pCallEntry->auParams[2]',),
3248 uNoRefLevel = 1);
3249
3250 # Now for the actual statements.
3251 oOut.write(oVariation.oNativeRecomp.renderCode(cchIndent = 4));
3252
3253 oOut.write('}\n');
3254
3255 #
3256 # Output the function table if this is the first file.
3257 #
3258 if idxPart == 0:
3259 oOut.write( '\n'
3260 + '\n'
3261 + '/*\n'
3262 + ' * Function table running parallel to g_apfnIemThreadedFunctions and friends.\n'
3263 + ' */\n'
3264 + 'const PFNIEMNATIVERECOMPFUNC g_apfnIemNativeRecompileFunctions[kIemThreadedFunc_End] =\n'
3265 + '{\n'
3266 + ' /*Invalid*/ NULL,'
3267 + '\n'
3268 + ' /*\n'
3269 + ' * Predefined.\n'
3270 + ' */\n'
3271 );
3272 for sFuncNm, _, fHaveRecompFunc in self.katBltIns:
3273 if fHaveRecompFunc:
3274 oOut.write(' iemNativeRecompFunc_BltIn_%s,\n' % (sFuncNm,))
3275 else:
3276 oOut.write(' NULL, /*BltIn_%s*/\n' % (sFuncNm,))
3277
3278 iThreadedFunction = 1 + len(self.katBltIns);
3279 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
3280 oOut.write( ' /*\n'
3281 + ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation] + '\n'
3282 + ' */\n');
3283 for oThreadedFunction in self.aoThreadedFuncs:
3284 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
3285 if oVariation:
3286 iThreadedFunction += 1;
3287 assert oVariation.iEnumValue == iThreadedFunction;
3288 sName = oVariation.getNativeFunctionName();
3289 if oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
3290 oOut.write(' /*%4u*/ %s,\n' % (iThreadedFunction, sName,));
3291 else:
3292 oOut.write(' /*%4u*/ NULL /*%s*/,\n' % (iThreadedFunction, sName,));
3293
3294 oOut.write( '};\n');
3295
3296 oOut.write('\n');
3297 return True;
3298
3299 def generateNativeLivenessSource(self, oOut, _):
3300 """
3301 Generates the native recompiler liveness analysis functions source file.
3302 Returns success indicator.
3303 """
3304 if not self.oOptions.fNativeRecompilerEnabled:
3305 return True;
3306
3307 #
3308 # The file header.
3309 #
3310 oOut.write('\n'.join(self.generateLicenseHeader()));
3311
3312 #
3313 # Emit the functions.
3314 #
3315 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
3316 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
3317 oOut.write( '\n'
3318 + '\n'
3319 + '\n'
3320 + '\n'
3321 + '/*' + '*' * 128 + '\n'
3322 + '* Variation: ' + sVarName + ' ' * (129 - len(sVarName) - 15) + '*\n'
3323 + '*' * 128 + '*/\n');
3324
3325 for oThreadedFunction in self.aoThreadedFuncs:
3326 oVariation = oThreadedFunction.dVariations.get(sVariation, None) # type: ThreadedFunctionVariation
3327 if oVariation and oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
3328 oMcBlock = oThreadedFunction.oMcBlock;
3329
3330 # Function header
3331 oOut.write( '\n'
3332 + '\n'
3333 + '/**\n'
3334 + ' * #%u: %s at line %s offset %s in %s%s\n'
3335 % (oVariation.iEnumValue, oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine,
3336 os.path.split(oMcBlock.sSrcFile)[1],
3337 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '')
3338 + ' */\n'
3339 + 'static IEM_DECL_IEMNATIVELIVENESSFUNC_DEF(' + oVariation.getLivenessFunctionName() + ')\n'
3340 + '{\n');
3341
3342 # Unpack parameters.
3343 self.generateFunctionParameterUnpacking(oVariation, oOut,
3344 ('pCallEntry->auParams[0]',
3345 'pCallEntry->auParams[1]',
3346 'pCallEntry->auParams[2]',),
3347 uNoRefLevel = 2);
3348
3349 # Now for the actual statements.
3350 oOut.write(oVariation.oNativeRecomp.renderCode(cchIndent = 4));
3351
3352 oOut.write('}\n');
3353
3354 #
3355 # Output the function table.
3356 #
3357 oOut.write( '\n'
3358 + '\n'
3359 + '/*\n'
3360 + ' * Liveness analysis function table running parallel to g_apfnIemThreadedFunctions and friends.\n'
3361 + ' */\n'
3362 + 'const PFNIEMNATIVELIVENESSFUNC g_apfnIemNativeLivenessFunctions[kIemThreadedFunc_End] =\n'
3363 + '{\n'
3364 + ' /*Invalid*/ NULL,'
3365 + '\n'
3366 + ' /*\n'
3367 + ' * Predefined.\n'
3368 + ' */\n'
3369 );
3370 for sFuncNm, _, fHaveRecompFunc in self.katBltIns:
3371 if fHaveRecompFunc:
3372 oOut.write(' iemNativeLivenessFunc_BltIn_%s,\n' % (sFuncNm,))
3373 else:
3374 oOut.write(' NULL, /*BltIn_%s*/\n' % (sFuncNm,))
3375
3376 iThreadedFunction = 1 + len(self.katBltIns);
3377 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
3378 oOut.write( ' /*\n'
3379 + ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation] + '\n'
3380 + ' */\n');
3381 for oThreadedFunction in self.aoThreadedFuncs:
3382 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
3383 if oVariation:
3384 iThreadedFunction += 1;
3385 assert oVariation.iEnumValue == iThreadedFunction;
3386 sName = oVariation.getLivenessFunctionName();
3387 if oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
3388 oOut.write(' /*%4u*/ %s,\n' % (iThreadedFunction, sName,));
3389 else:
3390 oOut.write(' /*%4u*/ NULL /*%s*/,\n' % (iThreadedFunction, sName,));
3391
3392 oOut.write( '};\n'
3393 + '\n');
3394 return True;
3395
3396
3397 def getThreadedFunctionByIndex(self, idx):
3398 """
3399 Returns a ThreadedFunction object for the given index. If the index is
3400 out of bounds, a dummy is returned.
3401 """
3402 if idx < len(self.aoThreadedFuncs):
3403 return self.aoThreadedFuncs[idx];
3404 return ThreadedFunction.dummyInstance();
3405
3406 def generateModifiedInput(self, oOut, idxFile):
3407 """
3408 Generates the combined modified input source/header file.
3409 Returns success indicator.
3410 """
3411 #
3412 # File header and assert assumptions.
3413 #
3414 oOut.write('\n'.join(self.generateLicenseHeader()));
3415 oOut.write('AssertCompile((IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | IEM_F_MODE_CPUMODE_MASK) == 7);\n');
3416
3417 #
3418 # Iterate all parsers (input files) and output the ones related to the
3419 # file set given by idxFile.
3420 #
3421 for idxParser, oParser in enumerate(self.aoParsers): # type: int, IEMAllInstPython.SimpleParser
3422 # Is this included in the file set?
3423 sSrcBaseFile = os.path.basename(oParser.sSrcFile).lower();
3424 fInclude = -1;
3425 for aoInfo in iai.g_aaoAllInstrFilesAndDefaultMapAndSet:
3426 if sSrcBaseFile == aoInfo[0].lower():
3427 fInclude = aoInfo[2] in (-1, idxFile);
3428 break;
3429 if fInclude is not True:
3430 assert fInclude is False;
3431 continue;
3432
3433 # Output it.
3434 oOut.write("\n\n/* ****** BEGIN %s ******* */\n" % (oParser.sSrcFile,));
3435
3436 iThreadedFunction = self.aidxFirstFunctions[idxParser];
3437 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
3438 iLine = 0;
3439 while iLine < len(oParser.asLines):
3440 sLine = oParser.asLines[iLine];
3441 iLine += 1; # iBeginLine and iEndLine are 1-based.
3442
3443 # Can we pass it thru?
3444 if ( iLine not in [oThreadedFunction.oMcBlock.iBeginLine, oThreadedFunction.oMcBlock.iEndLine]
3445 or oThreadedFunction.oMcBlock.sSrcFile != oParser.sSrcFile):
3446 oOut.write(sLine);
3447 #
3448 # Single MC block. Just extract it and insert the replacement.
3449 #
3450 elif oThreadedFunction.oMcBlock.iBeginLine != oThreadedFunction.oMcBlock.iEndLine:
3451 assert ( (sLine.count('IEM_MC_') - sLine.count('IEM_MC_F_') == 1)
3452 or oThreadedFunction.oMcBlock.iMacroExp == iai.McBlock.kiMacroExp_Partial), 'sLine="%s"' % (sLine,);
3453 oOut.write(sLine[:oThreadedFunction.oMcBlock.offBeginLine]);
3454 sModified = oThreadedFunction.generateInputCode().strip();
3455 oOut.write(sModified);
3456
3457 iLine = oThreadedFunction.oMcBlock.iEndLine;
3458 sLine = oParser.asLines[iLine - 1];
3459 assert ( sLine.count('IEM_MC_') - sLine.count('IEM_MC_F_') == 1
3460 or len(oThreadedFunction.oMcBlock.aoStmts) == 1
3461 or oThreadedFunction.oMcBlock.iMacroExp == iai.McBlock.kiMacroExp_Partial);
3462 oOut.write(sLine[oThreadedFunction.oMcBlock.offAfterEnd : ]);
3463
3464 # Advance
3465 iThreadedFunction += 1;
3466 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
3467 #
3468 # Macro expansion line that have sublines and may contain multiple MC blocks.
3469 #
3470 else:
3471 offLine = 0;
3472 while iLine == oThreadedFunction.oMcBlock.iBeginLine:
3473 oOut.write(sLine[offLine : oThreadedFunction.oMcBlock.offBeginLine]);
3474
3475 sModified = oThreadedFunction.generateInputCode().strip();
3476 assert ( sModified.startswith('IEM_MC_BEGIN')
3477 or (sModified.find('IEM_MC_DEFER_TO_CIMPL_') > 0 and sModified.strip().startswith('{\n'))
3478 or sModified.startswith('pVCpu->iem.s.fEndTb = true')
3479 or sModified.startswith('pVCpu->iem.s.fTbCurInstr = ')
3480 ), 'sModified="%s"' % (sModified,);
3481 oOut.write(sModified);
3482
3483 offLine = oThreadedFunction.oMcBlock.offAfterEnd;
3484
3485 # Advance
3486 iThreadedFunction += 1;
3487 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
3488
3489 # Last line segment.
3490 if offLine < len(sLine):
3491 oOut.write(sLine[offLine : ]);
3492
3493 oOut.write("/* ****** END %s ******* */\n" % (oParser.sSrcFile,));
3494
3495 return True;
3496
3497
3498 #
3499 # Main
3500 #
3501
3502 def main(self, asArgs):
3503 """
3504 C-like main function.
3505 Returns exit code.
3506 """
3507
3508 #
3509 # Parse arguments
3510 #
3511 sScriptDir = os.path.dirname(__file__);
3512 oParser = argparse.ArgumentParser(add_help = False);
3513 oParser.add_argument('asInFiles',
3514 metavar = 'input.cpp.h',
3515 nargs = '*',
3516 default = [os.path.join(sScriptDir, aoInfo[0])
3517 for aoInfo in iai.g_aaoAllInstrFilesAndDefaultMapAndSet],
3518 help = "Selection of VMMAll/IEMAllInst*.cpp.h files to use as input.");
3519 oParser.add_argument('--host-arch',
3520 metavar = 'arch',
3521 dest = 'sHostArch',
3522 action = 'store',
3523 default = None,
3524 help = 'The host architecture.');
3525
3526 oParser.add_argument('--out-thrd-funcs-hdr',
3527 metavar = 'file-thrd-funcs.h',
3528 dest = 'sOutFileThrdFuncsHdr',
3529 action = 'store',
3530 default = '-',
3531 help = 'The output header file for the threaded functions.');
3532 oParser.add_argument('--out-thrd-funcs-cpp',
3533 metavar = 'file-thrd-funcs.cpp',
3534 dest = 'sOutFileThrdFuncsCpp',
3535 action = 'store',
3536 default = '-',
3537 help = 'The output C++ file for the threaded functions.');
3538 oParser.add_argument('--out-n8ve-funcs-hdr',
3539 metavar = 'file-n8tv-funcs.h',
3540 dest = 'sOutFileN8veFuncsHdr',
3541 action = 'store',
3542 default = '-',
3543 help = 'The output header file for the native recompiler functions.');
3544 oParser.add_argument('--out-n8ve-funcs-cpp1',
3545 metavar = 'file-n8tv-funcs1.cpp',
3546 dest = 'sOutFileN8veFuncsCpp1',
3547 action = 'store',
3548 default = '-',
3549 help = 'The output C++ file for the native recompiler functions part 1.');
3550 oParser.add_argument('--out-n8ve-funcs-cpp2',
3551 metavar = 'file-n8ve-funcs2.cpp',
3552 dest = 'sOutFileN8veFuncsCpp2',
3553 action = 'store',
3554 default = '-',
3555 help = 'The output C++ file for the native recompiler functions part 2.');
3556 oParser.add_argument('--out-n8ve-funcs-cpp3',
3557 metavar = 'file-n8ve-funcs3.cpp',
3558 dest = 'sOutFileN8veFuncsCpp3',
3559 action = 'store',
3560 default = '-',
3561 help = 'The output C++ file for the native recompiler functions part 3.');
3562 oParser.add_argument('--out-n8ve-funcs-cpp4',
3563 metavar = 'file-n8ve-funcs4.cpp',
3564 dest = 'sOutFileN8veFuncsCpp4',
3565 action = 'store',
3566 default = '-',
3567 help = 'The output C++ file for the native recompiler functions part 4.');
3568 oParser.add_argument('--out-n8ve-liveness-cpp',
3569 metavar = 'file-n8ve-liveness.cpp',
3570 dest = 'sOutFileN8veLivenessCpp',
3571 action = 'store',
3572 default = '-',
3573 help = 'The output C++ file for the native recompiler liveness analysis functions.');
3574 oParser.add_argument('--native',
3575 dest = 'fNativeRecompilerEnabled',
3576 action = 'store_true',
3577 default = False,
3578 help = 'Enables generating the files related to native recompilation.');
3579 oParser.add_argument('--out-mod-input1',
3580 metavar = 'file-instr.cpp.h',
3581 dest = 'sOutFileModInput1',
3582 action = 'store',
3583 default = '-',
3584 help = 'The output C++/header file for modified input instruction files part 1.');
3585 oParser.add_argument('--out-mod-input2',
3586 metavar = 'file-instr.cpp.h',
3587 dest = 'sOutFileModInput2',
3588 action = 'store',
3589 default = '-',
3590 help = 'The output C++/header file for modified input instruction files part 2.');
3591 oParser.add_argument('--out-mod-input3',
3592 metavar = 'file-instr.cpp.h',
3593 dest = 'sOutFileModInput3',
3594 action = 'store',
3595 default = '-',
3596 help = 'The output C++/header file for modified input instruction files part 3.');
3597 oParser.add_argument('--out-mod-input4',
3598 metavar = 'file-instr.cpp.h',
3599 dest = 'sOutFileModInput4',
3600 action = 'store',
3601 default = '-',
3602 help = 'The output C++/header file for modified input instruction files part 4.');
3603 oParser.add_argument('--help', '-h', '-?',
3604 action = 'help',
3605 help = 'Display help and exit.');
3606 oParser.add_argument('--version', '-V',
3607 action = 'version',
3608 version = 'r%s (IEMAllThreadedPython.py), r%s (IEMAllInstPython.py)'
3609 % (__version__.split()[1], iai.__version__.split()[1],),
3610 help = 'Displays the version/revision of the script and exit.');
3611 self.oOptions = oParser.parse_args(asArgs[1:]);
3612 print("oOptions=%s" % (self.oOptions,), file = sys.stderr);
3613
3614 if self.oOptions.sHostArch not in ('amd64', 'arm64'):
3615 print('error! Unsupported (or missing) host architecture: %s' % (self.oOptions.sHostArch,), file = sys.stderr);
3616 return 1;
3617
3618 #
3619 # Process the instructions specified in the IEM sources.
3620 #
3621 if self.processInputFiles(self.oOptions.sHostArch, self.oOptions.fNativeRecompilerEnabled):
3622 #
3623 # Generate the output files.
3624 #
3625 aaoOutputFiles = (
3626 ( self.oOptions.sOutFileThrdFuncsHdr, self.generateThreadedFunctionsHeader, 0, ),
3627 ( self.oOptions.sOutFileThrdFuncsCpp, self.generateThreadedFunctionsSource, 0, ),
3628 ( self.oOptions.sOutFileN8veFuncsHdr, self.generateNativeFunctionsHeader, 0, ),
3629 ( self.oOptions.sOutFileN8veFuncsCpp1, self.generateNativeFunctionsSource, 0, ),
3630 ( self.oOptions.sOutFileN8veFuncsCpp2, self.generateNativeFunctionsSource, 1, ),
3631 ( self.oOptions.sOutFileN8veFuncsCpp3, self.generateNativeFunctionsSource, 2, ),
3632 ( self.oOptions.sOutFileN8veFuncsCpp4, self.generateNativeFunctionsSource, 3, ),
3633 ( self.oOptions.sOutFileN8veLivenessCpp, self.generateNativeLivenessSource, 0, ),
3634 ( self.oOptions.sOutFileModInput1, self.generateModifiedInput, 1, ),
3635 ( self.oOptions.sOutFileModInput2, self.generateModifiedInput, 2, ),
3636 ( self.oOptions.sOutFileModInput3, self.generateModifiedInput, 3, ),
3637 ( self.oOptions.sOutFileModInput4, self.generateModifiedInput, 4, ),
3638 );
3639 fRc = True;
3640 for sOutFile, fnGenMethod, iPartNo in aaoOutputFiles:
3641 if sOutFile == '-':
3642 fRc = fnGenMethod(sys.stdout, iPartNo) and fRc;
3643 else:
3644 try:
3645 oOut = open(sOutFile, 'w'); # pylint: disable=consider-using-with,unspecified-encoding
3646 except Exception as oXcpt:
3647 print('error! Failed open "%s" for writing: %s' % (sOutFile, oXcpt,), file = sys.stderr);
3648 return 1;
3649 fRc = fnGenMethod(oOut, iPartNo) and fRc;
3650 oOut.close();
3651 if fRc:
3652 return 0;
3653
3654 return 1;
3655
3656
3657if __name__ == '__main__':
3658 sys.exit(IEMThreadedGenerator().main(sys.argv));
3659
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette