VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllThrdPython.py@ 105278

Last change on this file since 105278 was 105277, checked in by vboxsync, 8 months ago

VMM/IEM: Rework roundps/roundpd which only have two operands instead of three, add vroundps/vroundpd, ​bugref:9898 [build fix]

  • Property svn:eol-style set to LF
  • Property svn:executable set to *
  • Property svn:keywords set to Author Date Id Revision
File size: 191.9 KB
Line 
1#!/usr/bin/env python
2# -*- coding: utf-8 -*-
3# $Id: IEMAllThrdPython.py 105277 2024-07-11 17:13:59Z vboxsync $
4# pylint: disable=invalid-name
5
6"""
7Annotates and generates threaded functions from IEMAllInst*.cpp.h.
8"""
9
10from __future__ import print_function;
11
12__copyright__ = \
13"""
14Copyright (C) 2023 Oracle and/or its affiliates.
15
16This file is part of VirtualBox base platform packages, as
17available from https://www.virtualbox.org.
18
19This program is free software; you can redistribute it and/or
20modify it under the terms of the GNU General Public License
21as published by the Free Software Foundation, in version 3 of the
22License.
23
24This program is distributed in the hope that it will be useful, but
25WITHOUT ANY WARRANTY; without even the implied warranty of
26MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
27General Public License for more details.
28
29You should have received a copy of the GNU General Public License
30along with this program; if not, see <https://www.gnu.org/licenses>.
31
32SPDX-License-Identifier: GPL-3.0-only
33"""
34__version__ = "$Revision: 105277 $"
35
36# Standard python imports.
37import copy;
38import datetime;
39import os;
40import re;
41import sys;
42import argparse;
43from typing import Dict, List;
44
45import IEMAllInstPython as iai;
46import IEMAllN8vePython as ian;
47
48
49# Python 3 hacks:
50if sys.version_info[0] >= 3:
51 long = int; # pylint: disable=redefined-builtin,invalid-name
52
53## Number of generic parameters for the thread functions.
54g_kcThreadedParams = 3;
55
56g_kdTypeInfo = {
57 # type name: (cBits, fSigned, C-type )
58 'int8_t': ( 8, True, 'int8_t', ),
59 'int16_t': ( 16, True, 'int16_t', ),
60 'int32_t': ( 32, True, 'int32_t', ),
61 'int64_t': ( 64, True, 'int64_t', ),
62 'uint4_t': ( 4, False, 'uint8_t', ),
63 'uint8_t': ( 8, False, 'uint8_t', ),
64 'uint16_t': ( 16, False, 'uint16_t', ),
65 'uint32_t': ( 32, False, 'uint32_t', ),
66 'uint64_t': ( 64, False, 'uint64_t', ),
67 'uintptr_t': ( 64, False, 'uintptr_t',), # ASSUMES 64-bit host pointer size.
68 'bool': ( 1, False, 'bool', ),
69 'IEMMODE': ( 2, False, 'IEMMODE', ),
70};
71
72# Only for getTypeBitCount/variables.
73g_kdTypeInfo2 = {
74 'RTFLOAT32U': ( 32, False, 'RTFLOAT32U', ),
75 'RTFLOAT64U': ( 64, False, 'RTFLOAT64U', ),
76 'RTUINT64U': ( 64, False, 'RTUINT64U', ),
77 'RTGCPTR': ( 64, False, 'RTGCPTR', ),
78 'RTPBCD80U': ( 80, False, 'RTPBCD80U', ),
79 'RTFLOAT80U': ( 80, False, 'RTFLOAT80U', ),
80 'IEMFPURESULT': (80+16, False, 'IEMFPURESULT', ),
81 'IEMFPURESULTTWO': (80+16+80,False, 'IEMFPURESULTTWO', ),
82 'RTUINT128U': ( 128, False, 'RTUINT128U', ),
83 'X86XMMREG': ( 128, False, 'X86XMMREG', ),
84 'X86YMMREG': ( 256, False, 'X86YMMREG', ),
85 'IEMMEDIAF2XMMSRC': ( 256, False, 'IEMMEDIAF2XMMSRC',),
86 'RTUINT256U': ( 256, False, 'RTUINT256U', ),
87 'IEMPCMPISTRXSRC': ( 256, False, 'IEMPCMPISTRXSRC', ),
88 'IEMPCMPESTRXSRC': ( 384, False, 'IEMPCMPESTRXSRC', ),
89}; #| g_kdTypeInfo; - requires 3.9
90g_kdTypeInfo2.update(g_kdTypeInfo);
91
92def getTypeBitCount(sType):
93 """
94 Translate a type to size in bits
95 """
96 if sType in g_kdTypeInfo2:
97 return g_kdTypeInfo2[sType][0];
98 if '*' in sType or sType[0] == 'P':
99 return 64;
100 #raise Exception('Unknown type: %s' % (sType,));
101 print('error: Unknown type: %s' % (sType,));
102 return 64;
103
104g_kdIemFieldToType = {
105 # Illegal ones:
106 'offInstrNextByte': ( None, ),
107 'cbInstrBuf': ( None, ),
108 'pbInstrBuf': ( None, ),
109 'uInstrBufPc': ( None, ),
110 'cbInstrBufTotal': ( None, ),
111 'offCurInstrStart': ( None, ),
112 'cbOpcode': ( None, ),
113 'offOpcode': ( None, ),
114 'offModRm': ( None, ),
115 # Okay ones.
116 'fPrefixes': ( 'uint32_t', ),
117 'uRexReg': ( 'uint8_t', ),
118 'uRexB': ( 'uint8_t', ),
119 'uRexIndex': ( 'uint8_t', ),
120 'iEffSeg': ( 'uint8_t', ),
121 'enmEffOpSize': ( 'IEMMODE', ),
122 'enmDefAddrMode': ( 'IEMMODE', ),
123 'enmEffAddrMode': ( 'IEMMODE', ),
124 'enmDefOpSize': ( 'IEMMODE', ),
125 'idxPrefix': ( 'uint8_t', ),
126 'uVex3rdReg': ( 'uint8_t', ),
127 'uVexLength': ( 'uint8_t', ),
128 'fEvexStuff': ( 'uint8_t', ),
129 'uFpuOpcode': ( 'uint16_t', ),
130};
131
132## @name McStmtCond.oIfBranchAnnotation/McStmtCond.oElseBranchAnnotation values
133## @{
134g_ksFinishAnnotation_Advance = 'Advance';
135g_ksFinishAnnotation_RelJmp = 'RelJmp';
136g_ksFinishAnnotation_SetJmp = 'SetJmp';
137g_ksFinishAnnotation_RelCall = 'RelCall';
138g_ksFinishAnnotation_IndCall = 'IndCall';
139g_ksFinishAnnotation_DeferToCImpl = 'DeferToCImpl';
140## @}
141
142
143class ThreadedParamRef(object):
144 """
145 A parameter reference for a threaded function.
146 """
147
148 def __init__(self, sOrgRef, sType, oStmt, iParam = None, offParam = 0, sStdRef = None):
149 ## The name / reference in the original code.
150 self.sOrgRef = sOrgRef;
151 ## Normalized name to deal with spaces in macro invocations and such.
152 self.sStdRef = sStdRef if sStdRef else ''.join(sOrgRef.split());
153 ## Indicates that sOrgRef may not match the parameter.
154 self.fCustomRef = sStdRef is not None;
155 ## The type (typically derived).
156 self.sType = sType;
157 ## The statement making the reference.
158 self.oStmt = oStmt;
159 ## The parameter containing the references. None if implicit.
160 self.iParam = iParam;
161 ## The offset in the parameter of the reference.
162 self.offParam = offParam;
163
164 ## The variable name in the threaded function.
165 self.sNewName = 'x';
166 ## The this is packed into.
167 self.iNewParam = 99;
168 ## The bit offset in iNewParam.
169 self.offNewParam = 1024
170
171
172class ThreadedFunctionVariation(object):
173 """ Threaded function variation. """
174
175 ## @name Variations.
176 ## These variations will match translation block selection/distinctions as well.
177 ## @{
178 # pylint: disable=line-too-long
179 ksVariation_Default = ''; ##< No variations - only used by IEM_MC_DEFER_TO_CIMPL_X_RET.
180 ksVariation_16 = '_16'; ##< 16-bit mode code (386+).
181 ksVariation_16f = '_16f'; ##< 16-bit mode code (386+), check+clear eflags.
182 ksVariation_16_Jmp = '_16_Jmp'; ##< 16-bit mode code (386+), conditional jump taken.
183 ksVariation_16f_Jmp = '_16f_Jmp'; ##< 16-bit mode code (386+), check+clear eflags, conditional jump taken.
184 ksVariation_16_NoJmp = '_16_NoJmp'; ##< 16-bit mode code (386+), conditional jump not taken.
185 ksVariation_16f_NoJmp = '_16f_NoJmp'; ##< 16-bit mode code (386+), check+clear eflags, conditional jump not taken.
186 ksVariation_16_Addr32 = '_16_Addr32'; ##< 16-bit mode code (386+), address size prefixed to 32-bit addressing.
187 ksVariation_16f_Addr32 = '_16f_Addr32'; ##< 16-bit mode code (386+), address size prefixed to 32-bit addressing, eflags.
188 ksVariation_16_Pre386 = '_16_Pre386'; ##< 16-bit mode code, pre-386 CPU target.
189 ksVariation_16f_Pre386 = '_16f_Pre386'; ##< 16-bit mode code, pre-386 CPU target, check+clear eflags.
190 ksVariation_16_Pre386_Jmp = '_16_Pre386_Jmp'; ##< 16-bit mode code, pre-386 CPU target, conditional jump taken.
191 ksVariation_16f_Pre386_Jmp = '_16f_Pre386_Jmp'; ##< 16-bit mode code, pre-386 CPU target, check+clear eflags, conditional jump taken.
192 ksVariation_16_Pre386_NoJmp = '_16_Pre386_NoJmp'; ##< 16-bit mode code, pre-386 CPU target, conditional jump not taken.
193 ksVariation_16f_Pre386_NoJmp = '_16f_Pre386_NoJmp'; ##< 16-bit mode code, pre-386 CPU target, check+clear eflags, conditional jump not taken.
194 ksVariation_32 = '_32'; ##< 32-bit mode code (386+).
195 ksVariation_32f = '_32f'; ##< 32-bit mode code (386+), check+clear eflags.
196 ksVariation_32_Jmp = '_32_Jmp'; ##< 32-bit mode code (386+), conditional jump taken.
197 ksVariation_32f_Jmp = '_32f_Jmp'; ##< 32-bit mode code (386+), check+clear eflags, conditional jump taken.
198 ksVariation_32_NoJmp = '_32_NoJmp'; ##< 32-bit mode code (386+), conditional jump not taken.
199 ksVariation_32f_NoJmp = '_32f_NoJmp'; ##< 32-bit mode code (386+), check+clear eflags, conditional jump not taken.
200 ksVariation_32_Flat = '_32_Flat'; ##< 32-bit mode code (386+) with CS, DS, E,S and SS flat and 4GB wide.
201 ksVariation_32f_Flat = '_32f_Flat'; ##< 32-bit mode code (386+) with CS, DS, E,S and SS flat and 4GB wide, eflags.
202 ksVariation_32_Addr16 = '_32_Addr16'; ##< 32-bit mode code (386+), address size prefixed to 16-bit addressing.
203 ksVariation_32f_Addr16 = '_32f_Addr16'; ##< 32-bit mode code (386+), address size prefixed to 16-bit addressing, eflags.
204 ksVariation_64 = '_64'; ##< 64-bit mode code.
205 ksVariation_64f = '_64f'; ##< 64-bit mode code, check+clear eflags.
206 ksVariation_64_Jmp = '_64_Jmp'; ##< 64-bit mode code, conditional jump taken.
207 ksVariation_64f_Jmp = '_64f_Jmp'; ##< 64-bit mode code, check+clear eflags, conditional jump taken.
208 ksVariation_64_NoJmp = '_64_NoJmp'; ##< 64-bit mode code, conditional jump not taken.
209 ksVariation_64f_NoJmp = '_64f_NoJmp'; ##< 64-bit mode code, check+clear eflags, conditional jump not taken.
210 ksVariation_64_FsGs = '_64_FsGs'; ##< 64-bit mode code, with memory accesses via FS or GS.
211 ksVariation_64f_FsGs = '_64f_FsGs'; ##< 64-bit mode code, with memory accesses via FS or GS, check+clear eflags.
212 ksVariation_64_Addr32 = '_64_Addr32'; ##< 64-bit mode code, address size prefixed to 32-bit addressing.
213 ksVariation_64f_Addr32 = '_64f_Addr32'; ##< 64-bit mode code, address size prefixed to 32-bit addressing, c+c eflags.
214 # pylint: enable=line-too-long
215 kasVariations = (
216 ksVariation_Default,
217 ksVariation_16,
218 ksVariation_16f,
219 ksVariation_16_Jmp,
220 ksVariation_16f_Jmp,
221 ksVariation_16_NoJmp,
222 ksVariation_16f_NoJmp,
223 ksVariation_16_Addr32,
224 ksVariation_16f_Addr32,
225 ksVariation_16_Pre386,
226 ksVariation_16f_Pre386,
227 ksVariation_16_Pre386_Jmp,
228 ksVariation_16f_Pre386_Jmp,
229 ksVariation_16_Pre386_NoJmp,
230 ksVariation_16f_Pre386_NoJmp,
231 ksVariation_32,
232 ksVariation_32f,
233 ksVariation_32_Jmp,
234 ksVariation_32f_Jmp,
235 ksVariation_32_NoJmp,
236 ksVariation_32f_NoJmp,
237 ksVariation_32_Flat,
238 ksVariation_32f_Flat,
239 ksVariation_32_Addr16,
240 ksVariation_32f_Addr16,
241 ksVariation_64,
242 ksVariation_64f,
243 ksVariation_64_Jmp,
244 ksVariation_64f_Jmp,
245 ksVariation_64_NoJmp,
246 ksVariation_64f_NoJmp,
247 ksVariation_64_FsGs,
248 ksVariation_64f_FsGs,
249 ksVariation_64_Addr32,
250 ksVariation_64f_Addr32,
251 );
252 kasVariationsWithoutAddress = (
253 ksVariation_16,
254 ksVariation_16f,
255 ksVariation_16_Pre386,
256 ksVariation_16f_Pre386,
257 ksVariation_32,
258 ksVariation_32f,
259 ksVariation_64,
260 ksVariation_64f,
261 );
262 kasVariationsWithoutAddressNot286 = (
263 ksVariation_16,
264 ksVariation_16f,
265 ksVariation_32,
266 ksVariation_32f,
267 ksVariation_64,
268 ksVariation_64f,
269 );
270 kasVariationsWithoutAddressNot286Not64 = (
271 ksVariation_16,
272 ksVariation_16f,
273 ksVariation_32,
274 ksVariation_32f,
275 );
276 kasVariationsWithoutAddressNot64 = (
277 ksVariation_16,
278 ksVariation_16f,
279 ksVariation_16_Pre386,
280 ksVariation_16f_Pre386,
281 ksVariation_32,
282 ksVariation_32f,
283 );
284 kasVariationsWithoutAddressOnly64 = (
285 ksVariation_64,
286 ksVariation_64f,
287 );
288 kasVariationsWithAddress = (
289 ksVariation_16,
290 ksVariation_16f,
291 ksVariation_16_Addr32,
292 ksVariation_16f_Addr32,
293 ksVariation_16_Pre386,
294 ksVariation_16f_Pre386,
295 ksVariation_32,
296 ksVariation_32f,
297 ksVariation_32_Flat,
298 ksVariation_32f_Flat,
299 ksVariation_32_Addr16,
300 ksVariation_32f_Addr16,
301 ksVariation_64,
302 ksVariation_64f,
303 ksVariation_64_FsGs,
304 ksVariation_64f_FsGs,
305 ksVariation_64_Addr32,
306 ksVariation_64f_Addr32,
307 );
308 kasVariationsWithAddressNot286 = (
309 ksVariation_16,
310 ksVariation_16f,
311 ksVariation_16_Addr32,
312 ksVariation_16f_Addr32,
313 ksVariation_32,
314 ksVariation_32f,
315 ksVariation_32_Flat,
316 ksVariation_32f_Flat,
317 ksVariation_32_Addr16,
318 ksVariation_32f_Addr16,
319 ksVariation_64,
320 ksVariation_64f,
321 ksVariation_64_FsGs,
322 ksVariation_64f_FsGs,
323 ksVariation_64_Addr32,
324 ksVariation_64f_Addr32,
325 );
326 kasVariationsWithAddressNot286Not64 = (
327 ksVariation_16,
328 ksVariation_16f,
329 ksVariation_16_Addr32,
330 ksVariation_16f_Addr32,
331 ksVariation_32,
332 ksVariation_32f,
333 ksVariation_32_Flat,
334 ksVariation_32f_Flat,
335 ksVariation_32_Addr16,
336 ksVariation_32f_Addr16,
337 );
338 kasVariationsWithAddressNot64 = (
339 ksVariation_16,
340 ksVariation_16f,
341 ksVariation_16_Addr32,
342 ksVariation_16f_Addr32,
343 ksVariation_16_Pre386,
344 ksVariation_16f_Pre386,
345 ksVariation_32,
346 ksVariation_32f,
347 ksVariation_32_Flat,
348 ksVariation_32f_Flat,
349 ksVariation_32_Addr16,
350 ksVariation_32f_Addr16,
351 );
352 kasVariationsWithAddressOnly64 = (
353 ksVariation_64,
354 ksVariation_64f,
355 ksVariation_64_FsGs,
356 ksVariation_64f_FsGs,
357 ksVariation_64_Addr32,
358 ksVariation_64f_Addr32,
359 );
360 kasVariationsOnlyPre386 = (
361 ksVariation_16_Pre386,
362 ksVariation_16f_Pre386,
363 );
364 kasVariationsEmitOrder = (
365 ksVariation_Default,
366 ksVariation_64,
367 ksVariation_64f,
368 ksVariation_64_Jmp,
369 ksVariation_64f_Jmp,
370 ksVariation_64_NoJmp,
371 ksVariation_64f_NoJmp,
372 ksVariation_64_FsGs,
373 ksVariation_64f_FsGs,
374 ksVariation_32_Flat,
375 ksVariation_32f_Flat,
376 ksVariation_32,
377 ksVariation_32f,
378 ksVariation_32_Jmp,
379 ksVariation_32f_Jmp,
380 ksVariation_32_NoJmp,
381 ksVariation_32f_NoJmp,
382 ksVariation_16,
383 ksVariation_16f,
384 ksVariation_16_Jmp,
385 ksVariation_16f_Jmp,
386 ksVariation_16_NoJmp,
387 ksVariation_16f_NoJmp,
388 ksVariation_16_Addr32,
389 ksVariation_16f_Addr32,
390 ksVariation_16_Pre386,
391 ksVariation_16f_Pre386,
392 ksVariation_16_Pre386_Jmp,
393 ksVariation_16f_Pre386_Jmp,
394 ksVariation_16_Pre386_NoJmp,
395 ksVariation_16f_Pre386_NoJmp,
396 ksVariation_32_Addr16,
397 ksVariation_32f_Addr16,
398 ksVariation_64_Addr32,
399 ksVariation_64f_Addr32,
400 );
401 kdVariationNames = {
402 ksVariation_Default: 'defer-to-cimpl',
403 ksVariation_16: '16-bit',
404 ksVariation_16f: '16-bit w/ eflag checking and clearing',
405 ksVariation_16_Jmp: '16-bit w/ conditional jump taken',
406 ksVariation_16f_Jmp: '16-bit w/ eflag checking and clearing and conditional jump taken',
407 ksVariation_16_NoJmp: '16-bit w/ conditional jump not taken',
408 ksVariation_16f_NoJmp: '16-bit w/ eflag checking and clearing and conditional jump not taken',
409 ksVariation_16_Addr32: '16-bit w/ address prefix (Addr32)',
410 ksVariation_16f_Addr32: '16-bit w/ address prefix (Addr32) and eflag checking and clearing',
411 ksVariation_16_Pre386: '16-bit on pre-386 CPU',
412 ksVariation_16f_Pre386: '16-bit on pre-386 CPU w/ eflag checking and clearing',
413 ksVariation_16_Pre386_Jmp: '16-bit on pre-386 CPU w/ conditional jump taken',
414 ksVariation_16f_Pre386_Jmp: '16-bit on pre-386 CPU w/ eflag checking and clearing and conditional jump taken',
415 ksVariation_16_Pre386_NoJmp: '16-bit on pre-386 CPU w/ conditional jump taken',
416 ksVariation_16f_Pre386_NoJmp: '16-bit on pre-386 CPU w/ eflag checking and clearing and conditional jump taken',
417 ksVariation_32: '32-bit',
418 ksVariation_32f: '32-bit w/ eflag checking and clearing',
419 ksVariation_32_Jmp: '32-bit w/ conditional jump taken',
420 ksVariation_32f_Jmp: '32-bit w/ eflag checking and clearing and conditional jump taken',
421 ksVariation_32_NoJmp: '32-bit w/ conditional jump not taken',
422 ksVariation_32f_NoJmp: '32-bit w/ eflag checking and clearing and conditional jump not taken',
423 ksVariation_32_Flat: '32-bit flat and wide open CS, SS, DS and ES',
424 ksVariation_32f_Flat: '32-bit flat and wide open CS, SS, DS and ES w/ eflag checking and clearing',
425 ksVariation_32_Addr16: '32-bit w/ address prefix (Addr16)',
426 ksVariation_32f_Addr16: '32-bit w/ address prefix (Addr16) and eflag checking and clearing',
427 ksVariation_64: '64-bit',
428 ksVariation_64f: '64-bit w/ eflag checking and clearing',
429 ksVariation_64_Jmp: '64-bit w/ conditional jump taken',
430 ksVariation_64f_Jmp: '64-bit w/ eflag checking and clearing and conditional jump taken',
431 ksVariation_64_NoJmp: '64-bit w/ conditional jump not taken',
432 ksVariation_64f_NoJmp: '64-bit w/ eflag checking and clearing and conditional jump not taken',
433 ksVariation_64_FsGs: '64-bit with memory accessed via FS or GS',
434 ksVariation_64f_FsGs: '64-bit with memory accessed via FS or GS and eflag checking and clearing',
435 ksVariation_64_Addr32: '64-bit w/ address prefix (Addr32)',
436 ksVariation_64f_Addr32: '64-bit w/ address prefix (Addr32) and eflag checking and clearing',
437 };
438 kdVariationsWithEflagsCheckingAndClearing = {
439 ksVariation_16f: True,
440 ksVariation_16f_Jmp: True,
441 ksVariation_16f_NoJmp: True,
442 ksVariation_16f_Addr32: True,
443 ksVariation_16f_Pre386: True,
444 ksVariation_16f_Pre386_Jmp: True,
445 ksVariation_16f_Pre386_NoJmp: True,
446 ksVariation_32f: True,
447 ksVariation_32f_Jmp: True,
448 ksVariation_32f_NoJmp: True,
449 ksVariation_32f_Flat: True,
450 ksVariation_32f_Addr16: True,
451 ksVariation_64f: True,
452 ksVariation_64f_Jmp: True,
453 ksVariation_64f_NoJmp: True,
454 ksVariation_64f_FsGs: True,
455 ksVariation_64f_Addr32: True,
456 };
457 kdVariationsOnly64NoFlags = {
458 ksVariation_64: True,
459 ksVariation_64_Jmp: True,
460 ksVariation_64_NoJmp: True,
461 ksVariation_64_FsGs: True,
462 ksVariation_64_Addr32: True,
463 };
464 kdVariationsOnly64WithFlags = {
465 ksVariation_64f: True,
466 ksVariation_64f_Jmp: True,
467 ksVariation_64f_NoJmp: True,
468 ksVariation_64f_FsGs: True,
469 ksVariation_64f_Addr32: True,
470 };
471 kdVariationsOnlyPre386NoFlags = {
472 ksVariation_16_Pre386: True,
473 ksVariation_16_Pre386_Jmp: True,
474 ksVariation_16_Pre386_NoJmp: True,
475 };
476 kdVariationsOnlyPre386WithFlags = {
477 ksVariation_16f_Pre386: True,
478 ksVariation_16f_Pre386_Jmp: True,
479 ksVariation_16f_Pre386_NoJmp: True,
480 };
481 kdVariationsWithFlatAddress = {
482 ksVariation_32_Flat: True,
483 ksVariation_32f_Flat: True,
484 ksVariation_64: True,
485 ksVariation_64f: True,
486 ksVariation_64_Addr32: True,
487 ksVariation_64f_Addr32: True,
488 };
489 kdVariationsWithFlatStackAddress = {
490 ksVariation_32_Flat: True,
491 ksVariation_32f_Flat: True,
492 ksVariation_64: True,
493 ksVariation_64f: True,
494 ksVariation_64_FsGs: True,
495 ksVariation_64f_FsGs: True,
496 ksVariation_64_Addr32: True,
497 ksVariation_64f_Addr32: True,
498 };
499 kdVariationsWithFlat64StackAddress = {
500 ksVariation_64: True,
501 ksVariation_64f: True,
502 ksVariation_64_FsGs: True,
503 ksVariation_64f_FsGs: True,
504 ksVariation_64_Addr32: True,
505 ksVariation_64f_Addr32: True,
506 };
507 kdVariationsWithFlatAddr16 = {
508 ksVariation_16: True,
509 ksVariation_16f: True,
510 ksVariation_16_Pre386: True,
511 ksVariation_16f_Pre386: True,
512 ksVariation_32_Addr16: True,
513 ksVariation_32f_Addr16: True,
514 };
515 kdVariationsWithFlatAddr32No64 = {
516 ksVariation_16_Addr32: True,
517 ksVariation_16f_Addr32: True,
518 ksVariation_32: True,
519 ksVariation_32f: True,
520 ksVariation_32_Flat: True,
521 ksVariation_32f_Flat: True,
522 };
523 kdVariationsWithAddressOnly64 = {
524 ksVariation_64: True,
525 ksVariation_64f: True,
526 ksVariation_64_FsGs: True,
527 ksVariation_64f_FsGs: True,
528 ksVariation_64_Addr32: True,
529 ksVariation_64f_Addr32: True,
530 };
531 kdVariationsWithConditional = {
532 ksVariation_16_Jmp: True,
533 ksVariation_16_NoJmp: True,
534 ksVariation_16_Pre386_Jmp: True,
535 ksVariation_16_Pre386_NoJmp: True,
536 ksVariation_32_Jmp: True,
537 ksVariation_32_NoJmp: True,
538 ksVariation_64_Jmp: True,
539 ksVariation_64_NoJmp: True,
540 ksVariation_16f_Jmp: True,
541 ksVariation_16f_NoJmp: True,
542 ksVariation_16f_Pre386_Jmp: True,
543 ksVariation_16f_Pre386_NoJmp: True,
544 ksVariation_32f_Jmp: True,
545 ksVariation_32f_NoJmp: True,
546 ksVariation_64f_Jmp: True,
547 ksVariation_64f_NoJmp: True,
548 };
549 kdVariationsWithConditionalNoJmp = {
550 ksVariation_16_NoJmp: True,
551 ksVariation_16_Pre386_NoJmp: True,
552 ksVariation_32_NoJmp: True,
553 ksVariation_64_NoJmp: True,
554 ksVariation_16f_NoJmp: True,
555 ksVariation_16f_Pre386_NoJmp: True,
556 ksVariation_32f_NoJmp: True,
557 ksVariation_64f_NoJmp: True,
558 };
559 kdVariationsOnlyPre386 = {
560 ksVariation_16_Pre386: True,
561 ksVariation_16f_Pre386: True,
562 ksVariation_16_Pre386_Jmp: True,
563 ksVariation_16f_Pre386_Jmp: True,
564 ksVariation_16_Pre386_NoJmp: True,
565 ksVariation_16f_Pre386_NoJmp: True,
566 };
567 ## @}
568
569 ## IEM_CIMPL_F_XXX flags that we know.
570 ## The value indicates whether it terminates the TB or not. The goal is to
571 ## improve the recompiler so all but END_TB will be False.
572 ##
573 ## @note iemThreadedRecompilerMcDeferToCImpl0 duplicates info found here.
574 kdCImplFlags = {
575 'IEM_CIMPL_F_MODE': False,
576 'IEM_CIMPL_F_BRANCH_DIRECT': False,
577 'IEM_CIMPL_F_BRANCH_INDIRECT': False,
578 'IEM_CIMPL_F_BRANCH_RELATIVE': False,
579 'IEM_CIMPL_F_BRANCH_FAR': True,
580 'IEM_CIMPL_F_BRANCH_CONDITIONAL': False,
581 # IEM_CIMPL_F_BRANCH_ANY should only be used for testing, so not included here.
582 'IEM_CIMPL_F_BRANCH_STACK': False,
583 'IEM_CIMPL_F_BRANCH_STACK_FAR': False,
584 'IEM_CIMPL_F_RFLAGS': False,
585 'IEM_CIMPL_F_INHIBIT_SHADOW': False,
586 'IEM_CIMPL_F_CHECK_IRQ_AFTER': False,
587 'IEM_CIMPL_F_CHECK_IRQ_BEFORE': False,
588 'IEM_CIMPL_F_CHECK_IRQ_BEFORE_AND_AFTER': False, # (ignore)
589 'IEM_CIMPL_F_STATUS_FLAGS': False,
590 'IEM_CIMPL_F_VMEXIT': False,
591 'IEM_CIMPL_F_FPU': False,
592 'IEM_CIMPL_F_REP': False,
593 'IEM_CIMPL_F_IO': False,
594 'IEM_CIMPL_F_END_TB': True,
595 'IEM_CIMPL_F_XCPT': True,
596 'IEM_CIMPL_F_CALLS_CIMPL': False,
597 'IEM_CIMPL_F_CALLS_AIMPL': False,
598 'IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE': False,
599 'IEM_CIMPL_F_CALLS_AIMPL_WITH_XSTATE': False,
600 };
601
602 def __init__(self, oThreadedFunction, sVariation = ksVariation_Default):
603 self.oParent = oThreadedFunction # type: ThreadedFunction
604 ##< ksVariation_Xxxx.
605 self.sVariation = sVariation
606
607 ## Threaded function parameter references.
608 self.aoParamRefs = [] # type: List[ThreadedParamRef]
609 ## Unique parameter references.
610 self.dParamRefs = {} # type: Dict[str, List[ThreadedParamRef]]
611 ## Minimum number of parameters to the threaded function.
612 self.cMinParams = 0;
613
614 ## List/tree of statements for the threaded function.
615 self.aoStmtsForThreadedFunction = [] # type: List[McStmt]
616
617 ## Function enum number, for verification. Set by generateThreadedFunctionsHeader.
618 self.iEnumValue = -1;
619
620 ## Native recompilation details for this variation.
621 self.oNativeRecomp = None;
622
623 def getIndexName(self):
624 sName = self.oParent.oMcBlock.sFunction;
625 if sName.startswith('iemOp_'):
626 sName = sName[len('iemOp_'):];
627 return 'kIemThreadedFunc_%s%s%s' % ( sName, self.oParent.sSubName, self.sVariation, );
628
629 def getThreadedFunctionName(self):
630 sName = self.oParent.oMcBlock.sFunction;
631 if sName.startswith('iemOp_'):
632 sName = sName[len('iemOp_'):];
633 return 'iemThreadedFunc_%s%s%s' % ( sName, self.oParent.sSubName, self.sVariation, );
634
635 def getNativeFunctionName(self):
636 return 'iemNativeRecompFunc_' + self.getThreadedFunctionName()[len('iemThreadedFunc_'):];
637
638 def getLivenessFunctionName(self):
639 return 'iemNativeLivenessFunc_' + self.getThreadedFunctionName()[len('iemThreadedFunc_'):];
640
641 def getShortName(self):
642 sName = self.oParent.oMcBlock.sFunction;
643 if sName.startswith('iemOp_'):
644 sName = sName[len('iemOp_'):];
645 return '%s%s%s' % ( sName, self.oParent.sSubName, self.sVariation, );
646
647 def getThreadedFunctionStatisticsName(self):
648 sName = self.oParent.oMcBlock.sFunction;
649 if sName.startswith('iemOp_'):
650 sName = sName[len('iemOp_'):];
651
652 sVarNm = self.sVariation;
653 if sVarNm:
654 if sVarNm.startswith('_'):
655 sVarNm = sVarNm[1:];
656 if sVarNm.endswith('_Jmp'):
657 sVarNm = sVarNm[:-4];
658 sName += '_Jmp';
659 elif sVarNm.endswith('_NoJmp'):
660 sVarNm = sVarNm[:-6];
661 sName += '_NoJmp';
662 else:
663 sVarNm = 'DeferToCImpl';
664
665 return '%s/%s%s' % ( sVarNm, sName, self.oParent.sSubName );
666
667 def isWithFlagsCheckingAndClearingVariation(self):
668 """
669 Checks if this is a variation that checks and clears EFLAGS.
670 """
671 return self.sVariation in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing;
672
673 #
674 # Analysis and code morphing.
675 #
676
677 def raiseProblem(self, sMessage):
678 """ Raises a problem. """
679 self.oParent.raiseProblem(sMessage);
680
681 def warning(self, sMessage):
682 """ Emits a warning. """
683 self.oParent.warning(sMessage);
684
685 def analyzeReferenceToType(self, sRef):
686 """
687 Translates a variable or structure reference to a type.
688 Returns type name.
689 Raises exception if unable to figure it out.
690 """
691 ch0 = sRef[0];
692 if ch0 == 'u':
693 if sRef.startswith('u32'):
694 return 'uint32_t';
695 if sRef.startswith('u8') or sRef == 'uReg':
696 return 'uint8_t';
697 if sRef.startswith('u64'):
698 return 'uint64_t';
699 if sRef.startswith('u16'):
700 return 'uint16_t';
701 elif ch0 == 'b':
702 return 'uint8_t';
703 elif ch0 == 'f':
704 return 'bool';
705 elif ch0 == 'i':
706 if sRef.startswith('i8'):
707 return 'int8_t';
708 if sRef.startswith('i16'):
709 return 'int16_t';
710 if sRef.startswith('i32'):
711 return 'int32_t';
712 if sRef.startswith('i64'):
713 return 'int64_t';
714 if sRef in ('iReg', 'iFixedReg', 'iGReg', 'iSegReg', 'iSrcReg', 'iDstReg', 'iCrReg'):
715 return 'uint8_t';
716 elif ch0 == 'p':
717 if sRef.find('-') < 0:
718 return 'uintptr_t';
719 if sRef.startswith('pVCpu->iem.s.'):
720 sField = sRef[len('pVCpu->iem.s.') : ];
721 if sField in g_kdIemFieldToType:
722 if g_kdIemFieldToType[sField][0]:
723 return g_kdIemFieldToType[sField][0];
724 elif ch0 == 'G' and sRef.startswith('GCPtr'):
725 return 'uint64_t';
726 elif ch0 == 'e':
727 if sRef == 'enmEffOpSize':
728 return 'IEMMODE';
729 elif ch0 == 'o':
730 if sRef.startswith('off32'):
731 return 'uint32_t';
732 elif sRef == 'cbFrame': # enter
733 return 'uint16_t';
734 elif sRef == 'cShift': ## @todo risky
735 return 'uint8_t';
736
737 self.raiseProblem('Unknown reference: %s' % (sRef,));
738 return None; # Shut up pylint 2.16.2.
739
740 def analyzeCallToType(self, sFnRef):
741 """
742 Determins the type of an indirect function call.
743 """
744 assert sFnRef[0] == 'p';
745
746 #
747 # Simple?
748 #
749 if sFnRef.find('-') < 0:
750 oDecoderFunction = self.oParent.oMcBlock.oFunction;
751
752 # Try the argument list of the function defintion macro invocation first.
753 iArg = 2;
754 while iArg < len(oDecoderFunction.asDefArgs):
755 if sFnRef == oDecoderFunction.asDefArgs[iArg]:
756 return oDecoderFunction.asDefArgs[iArg - 1];
757 iArg += 1;
758
759 # Then check out line that includes the word and looks like a variable declaration.
760 oRe = re.compile(' +(P[A-Z0-9_]+|const +IEMOP[A-Z0-9_]+ *[*]) +(const |) *' + sFnRef + ' *(;|=)');
761 for sLine in oDecoderFunction.asLines:
762 oMatch = oRe.match(sLine);
763 if oMatch:
764 if not oMatch.group(1).startswith('const'):
765 return oMatch.group(1);
766 return 'PC' + oMatch.group(1)[len('const ') : -1].strip();
767
768 #
769 # Deal with the pImpl->pfnXxx:
770 #
771 elif sFnRef.startswith('pImpl->pfn'):
772 sMember = sFnRef[len('pImpl->') : ];
773 sBaseType = self.analyzeCallToType('pImpl');
774 offBits = sMember.rfind('U') + 1;
775 if sBaseType == 'PCIEMOPBINSIZES': return 'PFNIEMAIMPLBINU' + sMember[offBits:];
776 if sBaseType == 'PCIEMOPBINTODOSIZES': return 'PFNIEMAIMPLBINTODOU' + sMember[offBits:];
777 if sBaseType == 'PCIEMOPUNARYSIZES': return 'PFNIEMAIMPLUNARYU' + sMember[offBits:];
778 if sBaseType == 'PCIEMOPSHIFTSIZES': return 'PFNIEMAIMPLSHIFTU' + sMember[offBits:];
779 if sBaseType == 'PCIEMOPSHIFTDBLSIZES': return 'PFNIEMAIMPLSHIFTDBLU' + sMember[offBits:];
780 if sBaseType == 'PCIEMOPMULDIVSIZES': return 'PFNIEMAIMPLMULDIVU' + sMember[offBits:];
781 if sBaseType == 'PCIEMOPMEDIAF2': return 'PFNIEMAIMPLMEDIAF2U' + sMember[offBits:];
782 if sBaseType == 'PCIEMOPMEDIAF2IMM8': return 'PFNIEMAIMPLMEDIAF2U' + sMember[offBits:] + 'IMM8';
783 if sBaseType == 'PCIEMOPMEDIAF3': return 'PFNIEMAIMPLMEDIAF3U' + sMember[offBits:];
784 if sBaseType == 'PCIEMOPMEDIAOPTF2': return 'PFNIEMAIMPLMEDIAOPTF2U' + sMember[offBits:];
785 if sBaseType == 'PCIEMOPMEDIAOPTF2IMM8': return 'PFNIEMAIMPLMEDIAOPTF2U' + sMember[offBits:] + 'IMM8';
786 if sBaseType == 'PCIEMOPMEDIAOPTF3': return 'PFNIEMAIMPLMEDIAOPTF3U' + sMember[offBits:];
787 if sBaseType == 'PCIEMOPMEDIAOPTF3IMM8': return 'PFNIEMAIMPLMEDIAOPTF3U' + sMember[offBits:] + 'IMM8';
788 if sBaseType == 'PCIEMOPBLENDOP': return 'PFNIEMAIMPLAVXBLENDU' + sMember[offBits:];
789
790 self.raiseProblem('Unknown call reference: %s::%s (%s)' % (sBaseType, sMember, sFnRef,));
791
792 self.raiseProblem('Unknown call reference: %s' % (sFnRef,));
793 return None; # Shut up pylint 2.16.2.
794
795 def analyze8BitGRegStmt(self, oStmt):
796 """
797 Gets the 8-bit general purpose register access details of the given statement.
798 ASSUMES the statement is one accessing an 8-bit GREG.
799 """
800 idxReg = 0;
801 if ( oStmt.sName.find('_FETCH_') > 0
802 or oStmt.sName.find('_REF_') > 0
803 or oStmt.sName.find('_TO_LOCAL') > 0):
804 idxReg = 1;
805
806 sRegRef = oStmt.asParams[idxReg];
807 if sRegRef.startswith('IEM_GET_MODRM_RM') or sRegRef.startswith('IEM_GET_MODRM_REG'):
808 asBits = [sBit.strip() for sBit in sRegRef.replace('(', ',').replace(')', '').split(',')];
809 if len(asBits) != 3 or asBits[1] != 'pVCpu' or (asBits[0] != 'IEM_GET_MODRM_RM' and asBits[0] != 'IEM_GET_MODRM_REG'):
810 self.raiseProblem('Unexpected reference: %s (asBits=%s)' % (sRegRef, asBits));
811 sOrgExpr = asBits[0] + '_EX8(pVCpu, ' + asBits[2] + ')';
812 else:
813 sOrgExpr = '((%s) < 4 || (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_REX | IEM_OP_PRF_VEX)) ? (%s) : (%s) + 12)' \
814 % (sRegRef, sRegRef, sRegRef,);
815
816 if sRegRef.find('IEM_GET_MODRM_RM') >= 0: sStdRef = 'bRmRm8Ex';
817 elif sRegRef.find('IEM_GET_MODRM_REG') >= 0: sStdRef = 'bRmReg8Ex';
818 elif sRegRef == 'X86_GREG_xAX': sStdRef = 'bGregXAx8Ex';
819 elif sRegRef == 'X86_GREG_xCX': sStdRef = 'bGregXCx8Ex';
820 elif sRegRef == 'X86_GREG_xSP': sStdRef = 'bGregXSp8Ex';
821 elif sRegRef == 'iFixedReg': sStdRef = 'bFixedReg8Ex';
822 else:
823 self.warning('analyze8BitGRegStmt: sRegRef=%s -> bOther8Ex; %s %s; sOrgExpr=%s'
824 % (sRegRef, oStmt.sName, oStmt.asParams, sOrgExpr,));
825 sStdRef = 'bOther8Ex';
826
827 #print('analyze8BitGRegStmt: %s %s; sRegRef=%s\n -> idxReg=%s sOrgExpr=%s sStdRef=%s'
828 # % (oStmt.sName, oStmt.asParams, sRegRef, idxReg, sOrgExpr, sStdRef));
829 return (idxReg, sOrgExpr, sStdRef);
830
831
832 ## Maps memory related MCs to info for FLAT conversion.
833 ## This is used in 64-bit and flat 32-bit variants to skip the unnecessary
834 ## segmentation checking for every memory access. Only applied to access
835 ## via ES, DS and SS. FS, GS and CS gets the full segmentation threatment,
836 ## the latter (CS) is just to keep things simple (we could safely fetch via
837 ## it, but only in 64-bit mode could we safely write via it, IIRC).
838 kdMemMcToFlatInfo = {
839 'IEM_MC_FETCH_MEM_U8': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8' ),
840 'IEM_MC_FETCH_MEM16_U8': ( 1, 'IEM_MC_FETCH_MEM16_FLAT_U8' ),
841 'IEM_MC_FETCH_MEM32_U8': ( 1, 'IEM_MC_FETCH_MEM32_FLAT_U8' ),
842 'IEM_MC_FETCH_MEM_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16' ),
843 'IEM_MC_FETCH_MEM_U16_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_DISP' ),
844 'IEM_MC_FETCH_MEM_I16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I16' ),
845 'IEM_MC_FETCH_MEM_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32' ),
846 'IEM_MC_FETCH_MEM_U32_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_DISP' ),
847 'IEM_MC_FETCH_MEM_I32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I32' ),
848 'IEM_MC_FETCH_MEM_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64' ),
849 'IEM_MC_FETCH_MEM_U64_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64_DISP' ),
850 'IEM_MC_FETCH_MEM_U64_ALIGN_U128': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64_ALIGN_U128' ),
851 'IEM_MC_FETCH_MEM_I64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I64' ),
852 'IEM_MC_FETCH_MEM_R32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R32' ),
853 'IEM_MC_FETCH_MEM_R64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R64' ),
854 'IEM_MC_FETCH_MEM_R80': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R80' ),
855 'IEM_MC_FETCH_MEM_D80': ( 1, 'IEM_MC_FETCH_MEM_FLAT_D80' ),
856 'IEM_MC_FETCH_MEM_U128': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128' ),
857 'IEM_MC_FETCH_MEM_U128_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128_NO_AC' ),
858 'IEM_MC_FETCH_MEM_U128_ALIGN_SSE': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128_ALIGN_SSE' ),
859 'IEM_MC_FETCH_MEM_XMM': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM' ),
860 'IEM_MC_FETCH_MEM_XMM_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM_NO_AC' ),
861 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM_ALIGN_SSE' ),
862 'IEM_MC_FETCH_MEM_XMM_U32': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_U32' ),
863 'IEM_MC_FETCH_MEM_XMM_U64': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_U64' ),
864 'IEM_MC_FETCH_MEM_U256': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256' ),
865 'IEM_MC_FETCH_MEM_U256_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256_NO_AC' ),
866 'IEM_MC_FETCH_MEM_U256_ALIGN_AVX': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256_ALIGN_AVX' ),
867 'IEM_MC_FETCH_MEM_YMM': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM' ),
868 'IEM_MC_FETCH_MEM_YMM_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM_NO_AC' ),
869 'IEM_MC_FETCH_MEM_YMM_ALIGN_AVX': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM_ALIGN_AVX' ),
870 'IEM_MC_FETCH_MEM_U8_ZX_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U16' ),
871 'IEM_MC_FETCH_MEM_U8_ZX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U32' ),
872 'IEM_MC_FETCH_MEM_U8_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U64' ),
873 'IEM_MC_FETCH_MEM_U16_ZX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_ZX_U32' ),
874 'IEM_MC_FETCH_MEM_U16_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_ZX_U64' ),
875 'IEM_MC_FETCH_MEM_U32_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_ZX_U64' ),
876 'IEM_MC_FETCH_MEM_U8_SX_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U16' ),
877 'IEM_MC_FETCH_MEM_U8_SX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U32' ),
878 'IEM_MC_FETCH_MEM_U8_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U64' ),
879 'IEM_MC_FETCH_MEM_U16_SX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_SX_U32' ),
880 'IEM_MC_FETCH_MEM_U16_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_SX_U64' ),
881 'IEM_MC_FETCH_MEM_U32_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_SX_U64' ),
882 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128': ( 2, 'IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128' ),
883 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE_AND_XREG_XMM': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_ALIGN_SSE_AND_XREG_XMM' ),
884 'IEM_MC_FETCH_MEM_XMM_U32_AND_XREG_XMM': ( 3, 'IEM_MC_FETCH_MEM_FLAT_XMM_U32_AND_XREG_XMM' ),
885 'IEM_MC_FETCH_MEM_XMM_U64_AND_XREG_XMM': ( 3, 'IEM_MC_FETCH_MEM_FLAT_XMM_U64_AND_XREG_XMM' ),
886 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_RAX_RDX_U64':
887 ( 2, 'IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128_AND_RAX_RDX_U64' ),
888 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64':
889 ( 2, 'IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64' ),
890 'IEM_MC_STORE_MEM_U8': ( 0, 'IEM_MC_STORE_MEM_FLAT_U8' ),
891 'IEM_MC_STORE_MEM_U16': ( 0, 'IEM_MC_STORE_MEM_FLAT_U16' ),
892 'IEM_MC_STORE_MEM_U32': ( 0, 'IEM_MC_STORE_MEM_FLAT_U32' ),
893 'IEM_MC_STORE_MEM_U64': ( 0, 'IEM_MC_STORE_MEM_FLAT_U64' ),
894 'IEM_MC_STORE_MEM_U8_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U8_CONST' ),
895 'IEM_MC_STORE_MEM_U16_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U16_CONST' ),
896 'IEM_MC_STORE_MEM_U32_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U32_CONST' ),
897 'IEM_MC_STORE_MEM_U64_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U64_CONST' ),
898 'IEM_MC_STORE_MEM_U128': ( 0, 'IEM_MC_STORE_MEM_FLAT_U128' ),
899 'IEM_MC_STORE_MEM_U128_NO_AC': ( 0, 'IEM_MC_STORE_MEM_FLAT_U128_NO_AC' ),
900 'IEM_MC_STORE_MEM_U128_ALIGN_SSE': ( 0, 'IEM_MC_STORE_MEM_FLAT_U128_ALIGN_SSE' ),
901 'IEM_MC_STORE_MEM_U256': ( 0, 'IEM_MC_STORE_MEM_FLAT_U256' ),
902 'IEM_MC_STORE_MEM_U256_NO_AC': ( 0, 'IEM_MC_STORE_MEM_FLAT_U256_NO_AC' ),
903 'IEM_MC_STORE_MEM_U256_ALIGN_AVX': ( 0, 'IEM_MC_STORE_MEM_FLAT_U256_ALIGN_AVX' ),
904 'IEM_MC_MEM_MAP_D80_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_D80_WO' ),
905 'IEM_MC_MEM_MAP_I16_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_I16_WO' ),
906 'IEM_MC_MEM_MAP_I32_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_I32_WO' ),
907 'IEM_MC_MEM_MAP_I64_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_I64_WO' ),
908 'IEM_MC_MEM_MAP_R32_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_R32_WO' ),
909 'IEM_MC_MEM_MAP_R64_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_R64_WO' ),
910 'IEM_MC_MEM_MAP_R80_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_R80_WO' ),
911 'IEM_MC_MEM_MAP_U8_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_ATOMIC' ),
912 'IEM_MC_MEM_MAP_U8_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_RW' ),
913 'IEM_MC_MEM_MAP_U8_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_RO' ),
914 'IEM_MC_MEM_MAP_U8_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_WO' ),
915 'IEM_MC_MEM_MAP_U16_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_ATOMIC' ),
916 'IEM_MC_MEM_MAP_U16_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_RW' ),
917 'IEM_MC_MEM_MAP_U16_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_RO' ),
918 'IEM_MC_MEM_MAP_U16_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_WO' ),
919 'IEM_MC_MEM_MAP_U32_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_ATOMIC' ),
920 'IEM_MC_MEM_MAP_U32_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_RW' ),
921 'IEM_MC_MEM_MAP_U32_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_RO' ),
922 'IEM_MC_MEM_MAP_U32_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_WO' ),
923 'IEM_MC_MEM_MAP_U64_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_ATOMIC' ),
924 'IEM_MC_MEM_MAP_U64_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_RW' ),
925 'IEM_MC_MEM_MAP_U64_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_RO' ),
926 'IEM_MC_MEM_MAP_U64_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_WO' ),
927 'IEM_MC_MEM_MAP_U128_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_ATOMIC' ),
928 'IEM_MC_MEM_MAP_U128_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_RW' ),
929 'IEM_MC_MEM_MAP_U128_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_RO' ),
930 'IEM_MC_MEM_MAP_U128_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_WO' ),
931 'IEM_MC_MEM_MAP_EX': ( 3, 'IEM_MC_MEM_FLAT_MAP_EX' ),
932 };
933
934 kdMemMcToFlatInfoStack = {
935 'IEM_MC_PUSH_U16': ( 'IEM_MC_FLAT32_PUSH_U16', 'IEM_MC_FLAT64_PUSH_U16', ),
936 'IEM_MC_PUSH_U32': ( 'IEM_MC_FLAT32_PUSH_U32', 'IEM_MC_PUSH_U32', ),
937 'IEM_MC_PUSH_U64': ( 'IEM_MC_PUSH_U64', 'IEM_MC_FLAT64_PUSH_U64', ),
938 'IEM_MC_PUSH_U32_SREG': ( 'IEM_MC_FLAT32_PUSH_U32_SREG', 'IEM_MC_PUSH_U32_SREG' ),
939 'IEM_MC_POP_GREG_U16': ( 'IEM_MC_FLAT32_POP_GREG_U16', 'IEM_MC_FLAT64_POP_GREG_U16', ),
940 'IEM_MC_POP_GREG_U32': ( 'IEM_MC_FLAT32_POP_GREG_U32', 'IEM_MC_POP_GREG_U32', ),
941 'IEM_MC_POP_GREG_U64': ( 'IEM_MC_POP_GREG_U64', 'IEM_MC_FLAT64_POP_GREG_U64', ),
942 };
943
944 kdThreadedCalcRmEffAddrMcByVariation = {
945 ksVariation_16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
946 ksVariation_16f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
947 ksVariation_16_Pre386: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
948 ksVariation_16f_Pre386: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
949 ksVariation_32_Addr16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
950 ksVariation_32f_Addr16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
951 ksVariation_16_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
952 ksVariation_16f_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
953 ksVariation_32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
954 ksVariation_32f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
955 ksVariation_32_Flat: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
956 ksVariation_32f_Flat: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
957 ksVariation_64: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64',
958 ksVariation_64f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64',
959 ksVariation_64_FsGs: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_FSGS',
960 ksVariation_64f_FsGs: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_FSGS',
961 ksVariation_64_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_ADDR32', ## @todo How did this work again...
962 ksVariation_64f_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_ADDR32',
963 };
964
965 def analyzeMorphStmtForThreaded(self, aoStmts, dState, iParamRef = 0, iLevel = 0):
966 """
967 Transforms (copy) the statements into those for the threaded function.
968
969 Returns list/tree of statements (aoStmts is not modified) and the new
970 iParamRef value.
971 """
972 #
973 # We'll be traversing aoParamRefs in parallel to the statements, so we
974 # must match the traversal in analyzeFindThreadedParamRefs exactly.
975 #
976 #print('McBlock at %s:%s' % (os.path.split(self.oMcBlock.sSrcFile)[1], self.oMcBlock.iBeginLine,));
977 aoThreadedStmts = [];
978 for oStmt in aoStmts:
979 # Skip C++ statements that is purely related to decoding.
980 if not oStmt.isCppStmt() or not oStmt.fDecode:
981 # Copy the statement. Make a deep copy to make sure we've got our own
982 # copies of all instance variables, even if a bit overkill at the moment.
983 oNewStmt = copy.deepcopy(oStmt);
984 aoThreadedStmts.append(oNewStmt);
985 #print('oNewStmt %s %s' % (oNewStmt.sName, len(oNewStmt.asParams),));
986
987 # If the statement has parameter references, process the relevant parameters.
988 # We grab the references relevant to this statement and apply them in reserve order.
989 if iParamRef < len(self.aoParamRefs) and self.aoParamRefs[iParamRef].oStmt == oStmt:
990 iParamRefFirst = iParamRef;
991 while True:
992 iParamRef += 1;
993 if iParamRef >= len(self.aoParamRefs) or self.aoParamRefs[iParamRef].oStmt != oStmt:
994 break;
995
996 #print('iParamRefFirst=%s iParamRef=%s' % (iParamRefFirst, iParamRef));
997 for iCurRef in range(iParamRef - 1, iParamRefFirst - 1, -1):
998 oCurRef = self.aoParamRefs[iCurRef];
999 if oCurRef.iParam is not None:
1000 assert oCurRef.oStmt == oStmt;
1001 #print('iCurRef=%s iParam=%s sOrgRef=%s' % (iCurRef, oCurRef.iParam, oCurRef.sOrgRef));
1002 sSrcParam = oNewStmt.asParams[oCurRef.iParam];
1003 assert ( sSrcParam[oCurRef.offParam : oCurRef.offParam + len(oCurRef.sOrgRef)] == oCurRef.sOrgRef
1004 or oCurRef.fCustomRef), \
1005 'offParam=%s sOrgRef=%s iParam=%s oStmt.sName=%s sSrcParam=%s<eos>' \
1006 % (oCurRef.offParam, oCurRef.sOrgRef, oCurRef.iParam, oStmt.sName, sSrcParam);
1007 oNewStmt.asParams[oCurRef.iParam] = sSrcParam[0 : oCurRef.offParam] \
1008 + oCurRef.sNewName \
1009 + sSrcParam[oCurRef.offParam + len(oCurRef.sOrgRef) : ];
1010
1011 # Morph IEM_MC_CALC_RM_EFF_ADDR into IEM_MC_CALC_RM_EFF_ADDR_THREADED ...
1012 if oNewStmt.sName == 'IEM_MC_CALC_RM_EFF_ADDR':
1013 oNewStmt.sName = self.kdThreadedCalcRmEffAddrMcByVariation[self.sVariation];
1014 assert len(oNewStmt.asParams) == 3;
1015
1016 if self.sVariation in self.kdVariationsWithFlatAddr16:
1017 oNewStmt.asParams = [
1018 oNewStmt.asParams[0], oNewStmt.asParams[1], self.dParamRefs['u16Disp'][0].sNewName,
1019 ];
1020 else:
1021 sSibAndMore = self.dParamRefs['bSib'][0].sNewName; # Merge bSib and 2nd part of cbImmAndRspOffset.
1022 if oStmt.asParams[2] not in ('0', '1', '2', '4'):
1023 sSibAndMore = '(%s) | ((%s) & 0x0f00)' % (self.dParamRefs['bSib'][0].sNewName, oStmt.asParams[2]);
1024
1025 if self.sVariation in self.kdVariationsWithFlatAddr32No64:
1026 oNewStmt.asParams = [
1027 oNewStmt.asParams[0], oNewStmt.asParams[1], sSibAndMore, self.dParamRefs['u32Disp'][0].sNewName,
1028 ];
1029 else:
1030 oNewStmt.asParams = [
1031 oNewStmt.asParams[0], self.dParamRefs['bRmEx'][0].sNewName, sSibAndMore,
1032 self.dParamRefs['u32Disp'][0].sNewName, self.dParamRefs['cbInstr'][0].sNewName,
1033 ];
1034 # ... and IEM_MC_ADVANCE_RIP_AND_FINISH into *_THREADED_PCxx[_WITH_FLAGS] ...
1035 elif ( oNewStmt.sName
1036 in ('IEM_MC_ADVANCE_RIP_AND_FINISH',
1037 'IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_REL_JMP_S16_AND_FINISH', 'IEM_MC_REL_JMP_S32_AND_FINISH',
1038 'IEM_MC_SET_RIP_U16_AND_FINISH', 'IEM_MC_SET_RIP_U32_AND_FINISH', 'IEM_MC_SET_RIP_U64_AND_FINISH',
1039 'IEM_MC_REL_CALL_S16_AND_FINISH', 'IEM_MC_REL_CALL_S32_AND_FINISH', 'IEM_MC_REL_CALL_S64_AND_FINISH',
1040 'IEM_MC_IND_CALL_U16_AND_FINISH', 'IEM_MC_IND_CALL_U32_AND_FINISH', 'IEM_MC_IND_CALL_U64_AND_FINISH',
1041 'IEM_MC_RETN_AND_FINISH',)):
1042 if oNewStmt.sName not in ('IEM_MC_SET_RIP_U16_AND_FINISH', 'IEM_MC_SET_RIP_U32_AND_FINISH',
1043 'IEM_MC_SET_RIP_U64_AND_FINISH', ):
1044 oNewStmt.asParams.append(self.dParamRefs['cbInstr'][0].sNewName);
1045 if ( oNewStmt.sName in ('IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_RETN_AND_FINISH', )
1046 and self.sVariation not in self.kdVariationsOnlyPre386):
1047 oNewStmt.asParams.append(self.dParamRefs['pVCpu->iem.s.enmEffOpSize'][0].sNewName);
1048 oNewStmt.sName += '_THREADED';
1049 if self.sVariation in self.kdVariationsOnly64NoFlags:
1050 oNewStmt.sName += '_PC64';
1051 elif self.sVariation in self.kdVariationsOnly64WithFlags:
1052 oNewStmt.sName += '_PC64_WITH_FLAGS';
1053 elif self.sVariation in self.kdVariationsOnlyPre386NoFlags:
1054 oNewStmt.sName += '_PC16';
1055 elif self.sVariation in self.kdVariationsOnlyPre386WithFlags:
1056 oNewStmt.sName += '_PC16_WITH_FLAGS';
1057 elif self.sVariation not in self.kdVariationsWithEflagsCheckingAndClearing:
1058 assert self.sVariation != self.ksVariation_Default;
1059 oNewStmt.sName += '_PC32';
1060 else:
1061 oNewStmt.sName += '_PC32_WITH_FLAGS';
1062
1063 # This is making the wrong branch of conditionals break out of the TB.
1064 if (oStmt.sName in ('IEM_MC_ADVANCE_RIP_AND_FINISH', 'IEM_MC_REL_JMP_S8_AND_FINISH',
1065 'IEM_MC_REL_JMP_S16_AND_FINISH', 'IEM_MC_REL_JMP_S32_AND_FINISH')):
1066 sExitTbStatus = 'VINF_SUCCESS';
1067 if self.sVariation in self.kdVariationsWithConditional:
1068 if self.sVariation in self.kdVariationsWithConditionalNoJmp:
1069 if oStmt.sName != 'IEM_MC_ADVANCE_RIP_AND_FINISH':
1070 sExitTbStatus = 'VINF_IEM_REEXEC_BREAK';
1071 elif oStmt.sName == 'IEM_MC_ADVANCE_RIP_AND_FINISH':
1072 sExitTbStatus = 'VINF_IEM_REEXEC_BREAK';
1073 oNewStmt.asParams.append(sExitTbStatus);
1074
1075 # Insert an MC so we can assert the correctioness of modified flags annotations on IEM_MC_REF_EFLAGS.
1076 if 'IEM_MC_ASSERT_EFLAGS' in dState:
1077 aoThreadedStmts.insert(len(aoThreadedStmts) - 1,
1078 iai.McStmtAssertEFlags(self.oParent.oMcBlock.oInstruction));
1079 del dState['IEM_MC_ASSERT_EFLAGS'];
1080
1081 # ... and IEM_MC_*_GREG_U8 into *_THREADED w/ reworked index taking REX into account
1082 elif oNewStmt.sName.startswith('IEM_MC_') and oNewStmt.sName.find('_GREG_U8') > 0:
1083 (idxReg, _, sStdRef) = self.analyze8BitGRegStmt(oStmt); # Don't use oNewStmt as it has been modified!
1084 oNewStmt.asParams[idxReg] = self.dParamRefs[sStdRef][0].sNewName;
1085 oNewStmt.sName += '_THREADED';
1086
1087 # ... and IEM_MC_CALL_CIMPL_[0-5] and IEM_MC_DEFER_TO_CIMPL_[0-5]_RET into *_THREADED ...
1088 elif oNewStmt.sName.startswith('IEM_MC_CALL_CIMPL_') or oNewStmt.sName.startswith('IEM_MC_DEFER_TO_CIMPL_'):
1089 oNewStmt.sName += '_THREADED';
1090 oNewStmt.idxFn += 1;
1091 oNewStmt.idxParams += 1;
1092 oNewStmt.asParams.insert(0, self.dParamRefs['cbInstr'][0].sNewName);
1093
1094 # ... and in FLAT modes we must morph memory access into FLAT accesses ...
1095 elif ( self.sVariation in self.kdVariationsWithFlatAddress
1096 and ( oNewStmt.sName.startswith('IEM_MC_FETCH_MEM')
1097 or (oNewStmt.sName.startswith('IEM_MC_STORE_MEM_') and oNewStmt.sName.find('_BY_REF') < 0)
1098 or oNewStmt.sName.startswith('IEM_MC_MEM_MAP') )):
1099 idxEffSeg = self.kdMemMcToFlatInfo[oNewStmt.sName][0];
1100 if idxEffSeg != -1:
1101 if ( oNewStmt.asParams[idxEffSeg].find('iEffSeg') < 0
1102 and oNewStmt.asParams[idxEffSeg] not in ('X86_SREG_ES', ) ):
1103 self.raiseProblem('Expected iEffSeg as param #%d to %s: %s'
1104 % (idxEffSeg + 1, oNewStmt.sName, oNewStmt.asParams[idxEffSeg],));
1105 oNewStmt.asParams.pop(idxEffSeg);
1106 oNewStmt.sName = self.kdMemMcToFlatInfo[oNewStmt.sName][1];
1107
1108 # ... PUSH and POP also needs flat variants, but these differ a little.
1109 elif ( self.sVariation in self.kdVariationsWithFlatStackAddress
1110 and ( (oNewStmt.sName.startswith('IEM_MC_PUSH') and oNewStmt.sName.find('_FPU') < 0)
1111 or oNewStmt.sName.startswith('IEM_MC_POP'))):
1112 oNewStmt.sName = self.kdMemMcToFlatInfoStack[oNewStmt.sName][int(self.sVariation in
1113 self.kdVariationsWithFlat64StackAddress)];
1114
1115 # Add EFLAGS usage annotations to relevant MCs.
1116 elif oNewStmt.sName in ('IEM_MC_COMMIT_EFLAGS', 'IEM_MC_COMMIT_EFLAGS_OPT', 'IEM_MC_REF_EFLAGS',
1117 'IEM_MC_FETCH_EFLAGS'):
1118 oInstruction = self.oParent.oMcBlock.oInstruction;
1119 oNewStmt.sName += '_EX';
1120 oNewStmt.asParams.append(oInstruction.getTestedFlagsCStyle()); # Shall crash and burn if oInstruction is
1121 oNewStmt.asParams.append(oInstruction.getModifiedFlagsCStyle()); # None. Fix the IEM decoder code.
1122
1123 # For IEM_MC_REF_EFLAGS we to emit an MC before the ..._FINISH
1124 if oNewStmt.sName == 'IEM_MC_REF_EFLAGS_EX':
1125 dState['IEM_MC_ASSERT_EFLAGS'] = iLevel;
1126
1127 # Process branches of conditionals recursively.
1128 if isinstance(oStmt, iai.McStmtCond):
1129 (oNewStmt.aoIfBranch, iParamRef) = self.analyzeMorphStmtForThreaded(oStmt.aoIfBranch, dState,
1130 iParamRef, iLevel + 1);
1131 if oStmt.aoElseBranch:
1132 (oNewStmt.aoElseBranch, iParamRef) = self.analyzeMorphStmtForThreaded(oStmt.aoElseBranch,
1133 dState, iParamRef, iLevel + 1);
1134
1135 # Insert an MC so we can assert the correctioness of modified flags annotations
1136 # on IEM_MC_REF_EFLAGS if it goes out of scope.
1137 if dState.get('IEM_MC_ASSERT_EFLAGS', -1) == iLevel:
1138 aoThreadedStmts.append(iai.McStmtAssertEFlags(self.oParent.oMcBlock.oInstruction));
1139 del dState['IEM_MC_ASSERT_EFLAGS'];
1140
1141 return (aoThreadedStmts, iParamRef);
1142
1143
1144 def analyzeConsolidateThreadedParamRefs(self):
1145 """
1146 Consolidate threaded function parameter references into a dictionary
1147 with lists of the references to each variable/field.
1148 """
1149 # Gather unique parameters.
1150 self.dParamRefs = {};
1151 for oRef in self.aoParamRefs:
1152 if oRef.sStdRef not in self.dParamRefs:
1153 self.dParamRefs[oRef.sStdRef] = [oRef,];
1154 else:
1155 self.dParamRefs[oRef.sStdRef].append(oRef);
1156
1157 # Generate names for them for use in the threaded function.
1158 dParamNames = {};
1159 for sName, aoRefs in self.dParamRefs.items():
1160 # Morph the reference expression into a name.
1161 if sName.startswith('IEM_GET_MODRM_REG'): sName = 'bModRmRegP';
1162 elif sName.startswith('IEM_GET_MODRM_RM'): sName = 'bModRmRmP';
1163 elif sName.startswith('IEM_GET_MODRM_REG_8'): sName = 'bModRmReg8P';
1164 elif sName.startswith('IEM_GET_MODRM_RM_8'): sName = 'bModRmRm8P';
1165 elif sName.startswith('IEM_GET_EFFECTIVE_VVVV'): sName = 'bEffVvvvP';
1166 elif sName.startswith('IEM_GET_IMM8_REG'): sName = 'bImm8Reg';
1167 elif sName.find('.') >= 0 or sName.find('->') >= 0:
1168 sName = sName[max(sName.rfind('.'), sName.rfind('>')) + 1 : ] + 'P';
1169 else:
1170 sName += 'P';
1171
1172 # Ensure it's unique.
1173 if sName in dParamNames:
1174 for i in range(10):
1175 if sName + str(i) not in dParamNames:
1176 sName += str(i);
1177 break;
1178 dParamNames[sName] = True;
1179
1180 # Update all the references.
1181 for oRef in aoRefs:
1182 oRef.sNewName = sName;
1183
1184 # Organize them by size too for the purpose of optimize them.
1185 dBySize = {} # type: Dict[str, str]
1186 for sStdRef, aoRefs in self.dParamRefs.items():
1187 if aoRefs[0].sType[0] != 'P':
1188 cBits = g_kdTypeInfo[aoRefs[0].sType][0];
1189 assert(cBits <= 64);
1190 else:
1191 cBits = 64;
1192
1193 if cBits not in dBySize:
1194 dBySize[cBits] = [sStdRef,]
1195 else:
1196 dBySize[cBits].append(sStdRef);
1197
1198 # Pack the parameters as best as we can, starting with the largest ones
1199 # and ASSUMING a 64-bit parameter size.
1200 self.cMinParams = 0;
1201 offNewParam = 0;
1202 for cBits in sorted(dBySize.keys(), reverse = True):
1203 for sStdRef in dBySize[cBits]:
1204 if offNewParam == 0 or offNewParam + cBits > 64:
1205 self.cMinParams += 1;
1206 offNewParam = cBits;
1207 else:
1208 offNewParam += cBits;
1209 assert(offNewParam <= 64);
1210
1211 for oRef in self.dParamRefs[sStdRef]:
1212 oRef.iNewParam = self.cMinParams - 1;
1213 oRef.offNewParam = offNewParam - cBits;
1214
1215 # Currently there are a few that requires 4 parameters, list these so we can figure out why:
1216 if self.cMinParams >= 4:
1217 print('debug: cMinParams=%s cRawParams=%s - %s:%d'
1218 % (self.cMinParams, len(self.dParamRefs), self.oParent.oMcBlock.sSrcFile, self.oParent.oMcBlock.iBeginLine,));
1219
1220 return True;
1221
1222 ksHexDigits = '0123456789abcdefABCDEF';
1223
1224 def analyzeFindThreadedParamRefs(self, aoStmts): # pylint: disable=too-many-statements
1225 """
1226 Scans the statements for things that have to passed on to the threaded
1227 function (populates self.aoParamRefs).
1228 """
1229 for oStmt in aoStmts:
1230 # Some statements we can skip alltogether.
1231 if isinstance(oStmt, iai.McCppPreProc):
1232 continue;
1233 if oStmt.isCppStmt() and oStmt.fDecode:
1234 continue;
1235 if oStmt.sName in ('IEM_MC_BEGIN',):
1236 continue;
1237
1238 if isinstance(oStmt, iai.McStmtVar):
1239 if oStmt.sValue is None:
1240 continue;
1241 aiSkipParams = { 0: True, 1: True, 3: True };
1242 else:
1243 aiSkipParams = {};
1244
1245 # Several statements have implicit parameters and some have different parameters.
1246 if oStmt.sName in ('IEM_MC_ADVANCE_RIP_AND_FINISH', 'IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_REL_JMP_S16_AND_FINISH',
1247 'IEM_MC_REL_JMP_S32_AND_FINISH',
1248 'IEM_MC_REL_CALL_S16_AND_FINISH', 'IEM_MC_REL_CALL_S32_AND_FINISH',
1249 'IEM_MC_REL_CALL_S64_AND_FINISH',
1250 'IEM_MC_IND_CALL_U16_AND_FINISH', 'IEM_MC_IND_CALL_U32_AND_FINISH',
1251 'IEM_MC_IND_CALL_U64_AND_FINISH',
1252 'IEM_MC_RETN_AND_FINISH',
1253 'IEM_MC_CALL_CIMPL_0', 'IEM_MC_CALL_CIMPL_1', 'IEM_MC_CALL_CIMPL_2', 'IEM_MC_CALL_CIMPL_3',
1254 'IEM_MC_CALL_CIMPL_4', 'IEM_MC_CALL_CIMPL_5',
1255 'IEM_MC_DEFER_TO_CIMPL_0_RET', 'IEM_MC_DEFER_TO_CIMPL_1_RET', 'IEM_MC_DEFER_TO_CIMPL_2_RET',
1256 'IEM_MC_DEFER_TO_CIMPL_3_RET', 'IEM_MC_DEFER_TO_CIMPL_4_RET', 'IEM_MC_DEFER_TO_CIMPL_5_RET', ):
1257 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_INSTR_LEN(pVCpu)', 'uint4_t', oStmt, sStdRef = 'cbInstr'));
1258
1259 if ( oStmt.sName in ('IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_RETN_AND_FINISH', )
1260 and self.sVariation not in self.kdVariationsOnlyPre386):
1261 self.aoParamRefs.append(ThreadedParamRef('pVCpu->iem.s.enmEffOpSize', 'IEMMODE', oStmt));
1262
1263 if oStmt.sName == 'IEM_MC_CALC_RM_EFF_ADDR':
1264 # This is being pretty presumptive about bRm always being the RM byte...
1265 assert len(oStmt.asParams) == 3;
1266 assert oStmt.asParams[1] == 'bRm';
1267
1268 if self.sVariation in self.kdVariationsWithFlatAddr16:
1269 self.aoParamRefs.append(ThreadedParamRef('bRm', 'uint8_t', oStmt));
1270 self.aoParamRefs.append(ThreadedParamRef('(uint16_t)uEffAddrInfo' ,
1271 'uint16_t', oStmt, sStdRef = 'u16Disp'));
1272 elif self.sVariation in self.kdVariationsWithFlatAddr32No64:
1273 self.aoParamRefs.append(ThreadedParamRef('bRm', 'uint8_t', oStmt));
1274 self.aoParamRefs.append(ThreadedParamRef('(uint8_t)(uEffAddrInfo >> 32)',
1275 'uint8_t', oStmt, sStdRef = 'bSib'));
1276 self.aoParamRefs.append(ThreadedParamRef('(uint32_t)uEffAddrInfo',
1277 'uint32_t', oStmt, sStdRef = 'u32Disp'));
1278 else:
1279 assert self.sVariation in self.kdVariationsWithAddressOnly64;
1280 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_MODRM_EX(pVCpu, bRm)',
1281 'uint8_t', oStmt, sStdRef = 'bRmEx'));
1282 self.aoParamRefs.append(ThreadedParamRef('(uint8_t)(uEffAddrInfo >> 32)',
1283 'uint8_t', oStmt, sStdRef = 'bSib'));
1284 self.aoParamRefs.append(ThreadedParamRef('(uint32_t)uEffAddrInfo',
1285 'uint32_t', oStmt, sStdRef = 'u32Disp'));
1286 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_INSTR_LEN(pVCpu)',
1287 'uint4_t', oStmt, sStdRef = 'cbInstr'));
1288 aiSkipParams[1] = True; # Skip the bRm parameter as it is being replaced by bRmEx.
1289
1290 # 8-bit register accesses needs to have their index argument reworked to take REX into account.
1291 if oStmt.sName.startswith('IEM_MC_') and oStmt.sName.find('_GREG_U8') > 0:
1292 (idxReg, sOrgRef, sStdRef) = self.analyze8BitGRegStmt(oStmt);
1293 self.aoParamRefs.append(ThreadedParamRef(sOrgRef, 'uint8_t', oStmt, idxReg, sStdRef = sStdRef));
1294 aiSkipParams[idxReg] = True; # Skip the parameter below.
1295
1296 # If in flat mode variation, ignore the effective segment parameter to memory MCs.
1297 if ( self.sVariation in self.kdVariationsWithFlatAddress
1298 and oStmt.sName in self.kdMemMcToFlatInfo
1299 and self.kdMemMcToFlatInfo[oStmt.sName][0] != -1):
1300 aiSkipParams[self.kdMemMcToFlatInfo[oStmt.sName][0]] = True;
1301
1302 # Inspect the target of calls to see if we need to pass down a
1303 # function pointer or function table pointer for it to work.
1304 if isinstance(oStmt, iai.McStmtCall):
1305 if oStmt.sFn[0] == 'p':
1306 self.aoParamRefs.append(ThreadedParamRef(oStmt.sFn, self.analyzeCallToType(oStmt.sFn), oStmt, oStmt.idxFn));
1307 elif ( oStmt.sFn[0] != 'i'
1308 and not oStmt.sFn.startswith('RT_CONCAT3')
1309 and not oStmt.sFn.startswith('IEMTARGETCPU_EFL_BEHAVIOR_SELECT')
1310 and not oStmt.sFn.startswith('IEM_SELECT_HOST_OR_FALLBACK') ):
1311 self.raiseProblem('Bogus function name in %s: %s' % (oStmt.sName, oStmt.sFn,));
1312 aiSkipParams[oStmt.idxFn] = True;
1313
1314 # Skip the hint parameter (first) for IEM_MC_CALL_CIMPL_X.
1315 if oStmt.sName.startswith('IEM_MC_CALL_CIMPL_'):
1316 assert oStmt.idxFn == 2;
1317 aiSkipParams[0] = True;
1318
1319 # Skip the function parameter (first) for IEM_MC_NATIVE_EMIT_X.
1320 if oStmt.sName.startswith('IEM_MC_NATIVE_EMIT_'):
1321 aiSkipParams[0] = True;
1322
1323
1324 # Check all the parameters for bogus references.
1325 for iParam, sParam in enumerate(oStmt.asParams):
1326 if iParam not in aiSkipParams and sParam not in self.oParent.dVariables:
1327 # The parameter may contain a C expression, so we have to try
1328 # extract the relevant bits, i.e. variables and fields while
1329 # ignoring operators and parentheses.
1330 offParam = 0;
1331 while offParam < len(sParam):
1332 # Is it the start of an C identifier? If so, find the end, but don't stop on field separators (->, .).
1333 ch = sParam[offParam];
1334 if ch.isalpha() or ch == '_':
1335 offStart = offParam;
1336 offParam += 1;
1337 while offParam < len(sParam):
1338 ch = sParam[offParam];
1339 if not ch.isalnum() and ch != '_' and ch != '.':
1340 if ch != '-' or sParam[offParam + 1] != '>':
1341 # Special hack for the 'CTX_SUFF(pVM)' bit in pVCpu->CTX_SUFF(pVM)->xxxx:
1342 if ( ch == '('
1343 and sParam[offStart : offParam + len('(pVM)->')] == 'pVCpu->CTX_SUFF(pVM)->'):
1344 offParam += len('(pVM)->') - 1;
1345 else:
1346 break;
1347 offParam += 1;
1348 offParam += 1;
1349 sRef = sParam[offStart : offParam];
1350
1351 # For register references, we pass the full register indexes instead as macros
1352 # like IEM_GET_MODRM_REG implicitly references pVCpu->iem.s.uRexReg and the
1353 # threaded function will be more efficient if we just pass the register index
1354 # as a 4-bit param.
1355 if ( sRef.startswith('IEM_GET_MODRM')
1356 or sRef.startswith('IEM_GET_EFFECTIVE_VVVV')
1357 or sRef.startswith('IEM_GET_IMM8_REG') ):
1358 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1359 if sParam[offParam] != '(':
1360 self.raiseProblem('Expected "(" following %s in "%s"' % (sRef, oStmt.renderCode(),));
1361 (asMacroParams, offCloseParam) = iai.McBlock.extractParams(sParam, offParam);
1362 if asMacroParams is None:
1363 self.raiseProblem('Unable to find ")" for %s in "%s"' % (sRef, oStmt.renderCode(),));
1364 offParam = offCloseParam + 1;
1365 self.aoParamRefs.append(ThreadedParamRef(sParam[offStart : offParam], 'uint8_t',
1366 oStmt, iParam, offStart));
1367
1368 # We can skip known variables.
1369 elif sRef in self.oParent.dVariables:
1370 pass;
1371
1372 # Skip certain macro invocations.
1373 elif sRef in ('IEM_GET_HOST_CPU_FEATURES',
1374 'IEM_GET_GUEST_CPU_FEATURES',
1375 'IEM_IS_GUEST_CPU_AMD',
1376 'IEM_IS_16BIT_CODE',
1377 'IEM_IS_32BIT_CODE',
1378 'IEM_IS_64BIT_CODE',
1379 ):
1380 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1381 if sParam[offParam] != '(':
1382 self.raiseProblem('Expected "(" following %s in "%s"' % (sRef, oStmt.renderCode(),));
1383 (asMacroParams, offCloseParam) = iai.McBlock.extractParams(sParam, offParam);
1384 if asMacroParams is None:
1385 self.raiseProblem('Unable to find ")" for %s in "%s"' % (sRef, oStmt.renderCode(),));
1386 offParam = offCloseParam + 1;
1387
1388 # Skip any dereference following it, unless it's a predicate like IEM_IS_GUEST_CPU_AMD.
1389 if sRef not in ('IEM_IS_GUEST_CPU_AMD',
1390 'IEM_IS_16BIT_CODE',
1391 'IEM_IS_32BIT_CODE',
1392 'IEM_IS_64BIT_CODE',
1393 ):
1394 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1395 if offParam + 2 <= len(sParam) and sParam[offParam : offParam + 2] == '->':
1396 offParam = iai.McBlock.skipSpacesAt(sParam, offParam + 2, len(sParam));
1397 while offParam < len(sParam) and (sParam[offParam].isalnum() or sParam[offParam] in '_.'):
1398 offParam += 1;
1399
1400 # Skip constants, globals, types (casts), sizeof and macros.
1401 elif ( sRef.startswith('IEM_OP_PRF_')
1402 or sRef.startswith('IEM_ACCESS_')
1403 or sRef.startswith('IEMINT_')
1404 or sRef.startswith('X86_GREG_')
1405 or sRef.startswith('X86_SREG_')
1406 or sRef.startswith('X86_EFL_')
1407 or sRef.startswith('X86_FSW_')
1408 or sRef.startswith('X86_FCW_')
1409 or sRef.startswith('X86_XCPT_')
1410 or sRef.startswith('IEMMODE_')
1411 or sRef.startswith('IEM_F_')
1412 or sRef.startswith('IEM_CIMPL_F_')
1413 or sRef.startswith('g_')
1414 or sRef.startswith('iemAImpl_')
1415 or sRef.startswith('kIemNativeGstReg_')
1416 or sRef.startswith('RT_ARCH_VAL_')
1417 or sRef in ( 'int8_t', 'int16_t', 'int32_t', 'int64_t',
1418 'INT8_C', 'INT16_C', 'INT32_C', 'INT64_C',
1419 'uint8_t', 'uint16_t', 'uint32_t', 'uint64_t',
1420 'UINT8_C', 'UINT16_C', 'UINT32_C', 'UINT64_C',
1421 'UINT8_MAX', 'UINT16_MAX', 'UINT32_MAX', 'UINT64_MAX',
1422 'INT8_MAX', 'INT16_MAX', 'INT32_MAX', 'INT64_MAX',
1423 'INT8_MIN', 'INT16_MIN', 'INT32_MIN', 'INT64_MIN',
1424 'sizeof', 'NOREF', 'RT_NOREF', 'IEMMODE_64BIT',
1425 'RT_BIT_32', 'RT_BIT_64', 'true', 'false',
1426 'NIL_RTGCPTR',) ):
1427 pass;
1428
1429 # Skip certain macro invocations.
1430 # Any variable (non-field) and decoder fields in IEMCPU will need to be parameterized.
1431 elif ( ( '.' not in sRef
1432 and '-' not in sRef
1433 and sRef not in ('pVCpu', ) )
1434 or iai.McBlock.koReIemDecoderVars.search(sRef) is not None):
1435 self.aoParamRefs.append(ThreadedParamRef(sRef, self.analyzeReferenceToType(sRef),
1436 oStmt, iParam, offStart));
1437 # Number.
1438 elif ch.isdigit():
1439 if ( ch == '0'
1440 and offParam + 2 <= len(sParam)
1441 and sParam[offParam + 1] in 'xX'
1442 and sParam[offParam + 2] in self.ksHexDigits ):
1443 offParam += 2;
1444 while offParam < len(sParam) and sParam[offParam] in self.ksHexDigits:
1445 offParam += 1;
1446 else:
1447 while offParam < len(sParam) and sParam[offParam].isdigit():
1448 offParam += 1;
1449 # Comment?
1450 elif ( ch == '/'
1451 and offParam + 4 <= len(sParam)
1452 and sParam[offParam + 1] == '*'):
1453 offParam += 2;
1454 offNext = sParam.find('*/', offParam);
1455 if offNext < offParam:
1456 self.raiseProblem('Unable to find "*/" in "%s" ("%s")' % (sRef, oStmt.renderCode(),));
1457 offParam = offNext + 2;
1458 # Whatever else.
1459 else:
1460 offParam += 1;
1461
1462 # Traverse the branches of conditionals.
1463 if isinstance(oStmt, iai.McStmtCond):
1464 self.analyzeFindThreadedParamRefs(oStmt.aoIfBranch);
1465 self.analyzeFindThreadedParamRefs(oStmt.aoElseBranch);
1466 return True;
1467
1468 def analyzeVariation(self, aoStmts):
1469 """
1470 2nd part of the analysis, done on each variation.
1471
1472 The variations may differ in parameter requirements and will end up with
1473 slightly different MC sequences. Thus this is done on each individually.
1474
1475 Returns dummy True - raises exception on trouble.
1476 """
1477 # Now scan the code for variables and field references that needs to
1478 # be passed to the threaded function because they are related to the
1479 # instruction decoding.
1480 self.analyzeFindThreadedParamRefs(aoStmts);
1481 self.analyzeConsolidateThreadedParamRefs();
1482
1483 # Morph the statement stream for the block into what we'll be using in the threaded function.
1484 (self.aoStmtsForThreadedFunction, iParamRef) = self.analyzeMorphStmtForThreaded(aoStmts, {});
1485 if iParamRef != len(self.aoParamRefs):
1486 raise Exception('iParamRef=%s, expected %s!' % (iParamRef, len(self.aoParamRefs),));
1487
1488 return True;
1489
1490 def emitThreadedCallStmtsForVariant(self, cchIndent, fTbLookupTable = False, sCallVarNm = None):
1491 """
1492 Produces generic C++ statments that emits a call to the thread function
1493 variation and any subsequent checks that may be necessary after that.
1494
1495 The sCallVarNm is the name of the variable with the threaded function
1496 to call. This is for the case where all the variations have the same
1497 parameters and only the threaded function number differs.
1498
1499 The fTbLookupTable parameter can either be False, True or whatever else
1500 (like 2) - in the latte case this means a large lookup table.
1501 """
1502 aoStmts = [
1503 iai.McCppCall('IEM_MC2_BEGIN_EMIT_CALLS',
1504 ['1' if 'IEM_CIMPL_F_CHECK_IRQ_BEFORE' in self.oParent.dsCImplFlags else '0'],
1505 cchIndent = cchIndent), # Scope and a hook for various stuff.
1506 ];
1507
1508 # The call to the threaded function.
1509 asCallArgs = [ self.getIndexName() if not sCallVarNm else sCallVarNm, ];
1510 for iParam in range(self.cMinParams):
1511 asFrags = [];
1512 for aoRefs in self.dParamRefs.values():
1513 oRef = aoRefs[0];
1514 if oRef.iNewParam == iParam:
1515 sCast = '(uint64_t)'
1516 if oRef.sType in ('int8_t', 'int16_t', 'int32_t'): # Make sure these doesn't get sign-extended.
1517 sCast = '(uint64_t)(u' + oRef.sType + ')';
1518 if oRef.offNewParam == 0:
1519 asFrags.append(sCast + '(' + oRef.sOrgRef + ')');
1520 else:
1521 asFrags.append('(%s(%s) << %s)' % (sCast, oRef.sOrgRef, oRef.offNewParam));
1522 assert asFrags;
1523 asCallArgs.append(' | '.join(asFrags));
1524
1525 if fTbLookupTable is False:
1526 aoStmts.append(iai.McCppCall('IEM_MC2_EMIT_CALL_%s' % (len(asCallArgs) - 1,),
1527 asCallArgs, cchIndent = cchIndent));
1528 else:
1529 aoStmts.append(iai.McCppCall('IEM_MC2_EMIT_CALL_WITH_TB_LOOKUP_%s' % (len(asCallArgs) - 1,),
1530 ['0' if fTbLookupTable is True else '1',] + asCallArgs, cchIndent = cchIndent));
1531
1532 # 2023-11-28: This has to be done AFTER the CIMPL call, so we have to
1533 # emit this mode check from the compilation loop. On the
1534 # plus side, this means we eliminate unnecessary call at
1535 # end of the TB. :-)
1536 ## For CIMPL stuff, we need to consult the associated IEM_CIMPL_F_XXX
1537 ## mask and maybe emit additional checks.
1538 #if ( 'IEM_CIMPL_F_MODE' in self.oParent.dsCImplFlags
1539 # or 'IEM_CIMPL_F_XCPT' in self.oParent.dsCImplFlags
1540 # or 'IEM_CIMPL_F_VMEXIT' in self.oParent.dsCImplFlags):
1541 # aoStmts.append(iai.McCppCall('IEM_MC2_EMIT_CALL_1', ( 'kIemThreadedFunc_BltIn_CheckMode', 'pVCpu->iem.s.fExec', ),
1542 # cchIndent = cchIndent));
1543
1544 sCImplFlags = ' | '.join(self.oParent.dsCImplFlags.keys());
1545 if not sCImplFlags:
1546 sCImplFlags = '0'
1547 aoStmts.append(iai.McCppCall('IEM_MC2_END_EMIT_CALLS', ( sCImplFlags, ), cchIndent = cchIndent)); # For closing the scope.
1548
1549 # Emit fEndTb = true or fTbBranched = true if any of the CIMPL flags
1550 # indicates we should do so.
1551 # Note! iemThreadedRecompilerMcDeferToCImpl0 duplicates work done here.
1552 asEndTbFlags = [];
1553 asTbBranchedFlags = [];
1554 for sFlag in self.oParent.dsCImplFlags:
1555 if self.kdCImplFlags[sFlag] is True:
1556 asEndTbFlags.append(sFlag);
1557 elif sFlag.startswith('IEM_CIMPL_F_BRANCH_'):
1558 asTbBranchedFlags.append(sFlag);
1559 if ( asTbBranchedFlags
1560 and ( 'IEM_CIMPL_F_BRANCH_CONDITIONAL' not in asTbBranchedFlags
1561 or self.sVariation not in self.kdVariationsWithConditionalNoJmp)):
1562 aoStmts.append(iai.McCppGeneric('iemThreadedSetBranched(pVCpu, %s);'
1563 % ((' | '.join(asTbBranchedFlags)).replace('IEM_CIMPL_F_BRANCH', 'IEMBRANCHED_F'),),
1564 cchIndent = cchIndent)); # Inline fn saves ~2 seconds for gcc 13/dbg (1m13s vs 1m15s).
1565 if asEndTbFlags:
1566 aoStmts.append(iai.McCppGeneric('pVCpu->iem.s.fEndTb = true; /* %s */' % (','.join(asEndTbFlags),),
1567 cchIndent = cchIndent));
1568
1569 if 'IEM_CIMPL_F_CHECK_IRQ_AFTER' in self.oParent.dsCImplFlags:
1570 aoStmts.append(iai.McCppGeneric('pVCpu->iem.s.cInstrTillIrqCheck = 0;', cchIndent = cchIndent));
1571
1572 return aoStmts;
1573
1574
1575class ThreadedFunction(object):
1576 """
1577 A threaded function.
1578 """
1579
1580 def __init__(self, oMcBlock: iai.McBlock) -> None:
1581 self.oMcBlock = oMcBlock # type: iai.McBlock
1582 # The remaining fields are only useful after analyze() has been called:
1583 ## Variations for this block. There is at least one.
1584 self.aoVariations = [] # type: List[ThreadedFunctionVariation]
1585 ## Variation dictionary containing the same as aoVariations.
1586 self.dVariations = {} # type: Dict[str, ThreadedFunctionVariation]
1587 ## Dictionary of local variables (IEM_MC_LOCAL[_CONST]) and call arguments (IEM_MC_ARG*).
1588 self.dVariables = {} # type: Dict[str, iai.McStmtVar]
1589 ## Dictionary with any IEM_CIMPL_F_XXX flags explicitly advertised in the code block
1590 ## and those determined by analyzeCodeOperation().
1591 self.dsCImplFlags = {} # type: Dict[str, bool]
1592 ## The unique sub-name for this threaded function.
1593 self.sSubName = '';
1594 #if oMcBlock.iInFunction > 0 or (oMcBlock.oInstruction and len(oMcBlock.oInstruction.aoMcBlocks) > 1):
1595 # self.sSubName = '_%s' % (oMcBlock.iInFunction);
1596
1597 @staticmethod
1598 def dummyInstance():
1599 """ Gets a dummy instance. """
1600 return ThreadedFunction(iai.McBlock('null', 999999999, 999999999,
1601 iai.DecoderFunction('null', 999999999, 'nil', ('','')), 999999999));
1602
1603 def hasWithFlagsCheckingAndClearingVariation(self):
1604 """
1605 Check if there is one or more with flags checking and clearing
1606 variations for this threaded function.
1607 """
1608 for sVarWithFlags in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing:
1609 if sVarWithFlags in self.dVariations:
1610 return True;
1611 return False;
1612
1613 #
1614 # Analysis and code morphing.
1615 #
1616
1617 def raiseProblem(self, sMessage):
1618 """ Raises a problem. """
1619 raise Exception('%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, ));
1620
1621 def error(self, sMessage, oGenerator):
1622 """ Emits an error via the generator object, causing it to fail. """
1623 oGenerator.rawError('%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, ));
1624
1625 def warning(self, sMessage):
1626 """ Emits a warning. """
1627 print('%s:%s: warning: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, ));
1628
1629 ## Used by analyzeAndAnnotateName for memory MC blocks.
1630 kdAnnotateNameMemStmts = {
1631 'IEM_MC_FETCH_MEM16_U8': '__mem8',
1632 'IEM_MC_FETCH_MEM32_U8': '__mem8',
1633 'IEM_MC_FETCH_MEM_D80': '__mem80',
1634 'IEM_MC_FETCH_MEM_I16': '__mem16',
1635 'IEM_MC_FETCH_MEM_I32': '__mem32',
1636 'IEM_MC_FETCH_MEM_I64': '__mem64',
1637 'IEM_MC_FETCH_MEM_R32': '__mem32',
1638 'IEM_MC_FETCH_MEM_R64': '__mem64',
1639 'IEM_MC_FETCH_MEM_R80': '__mem80',
1640 'IEM_MC_FETCH_MEM_U128': '__mem128',
1641 'IEM_MC_FETCH_MEM_U128_ALIGN_SSE': '__mem128',
1642 'IEM_MC_FETCH_MEM_U128_NO_AC': '__mem128',
1643 'IEM_MC_FETCH_MEM_U16': '__mem16',
1644 'IEM_MC_FETCH_MEM_U16_DISP': '__mem16',
1645 'IEM_MC_FETCH_MEM_U16_SX_U32': '__mem16sx32',
1646 'IEM_MC_FETCH_MEM_U16_SX_U64': '__mem16sx64',
1647 'IEM_MC_FETCH_MEM_U16_ZX_U32': '__mem16zx32',
1648 'IEM_MC_FETCH_MEM_U16_ZX_U64': '__mem16zx64',
1649 'IEM_MC_FETCH_MEM_U256': '__mem256',
1650 'IEM_MC_FETCH_MEM_U256_ALIGN_AVX': '__mem256',
1651 'IEM_MC_FETCH_MEM_U256_NO_AC': '__mem256',
1652 'IEM_MC_FETCH_MEM_U32': '__mem32',
1653 'IEM_MC_FETCH_MEM_U32_DISP': '__mem32',
1654 'IEM_MC_FETCH_MEM_U32_SX_U64': '__mem32sx64',
1655 'IEM_MC_FETCH_MEM_U32_ZX_U64': '__mem32zx64',
1656 'IEM_MC_FETCH_MEM_U64': '__mem64',
1657 'IEM_MC_FETCH_MEM_U64_ALIGN_U128': '__mem64',
1658 'IEM_MC_FETCH_MEM_U64_DISP': '__mem64',
1659 'IEM_MC_FETCH_MEM_U8': '__mem8',
1660 'IEM_MC_FETCH_MEM_U8_DISP': '__mem8',
1661 'IEM_MC_FETCH_MEM_U8_SX_U16': '__mem8sx16',
1662 'IEM_MC_FETCH_MEM_U8_SX_U32': '__mem8sx32',
1663 'IEM_MC_FETCH_MEM_U8_SX_U64': '__mem8sx64',
1664 'IEM_MC_FETCH_MEM_U8_ZX_U16': '__mem8zx16',
1665 'IEM_MC_FETCH_MEM_U8_ZX_U32': '__mem8zx32',
1666 'IEM_MC_FETCH_MEM_U8_ZX_U64': '__mem8zx64',
1667 'IEM_MC_FETCH_MEM_XMM': '__mem128',
1668 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE': '__mem128',
1669 'IEM_MC_FETCH_MEM_XMM_NO_AC': '__mem128',
1670 'IEM_MC_FETCH_MEM_XMM_U32': '__mem32',
1671 'IEM_MC_FETCH_MEM_XMM_U64': '__mem64',
1672 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128': '__mem128',
1673 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE_AND_XREG_XMM': '__mem128',
1674 'IEM_MC_FETCH_MEM_XMM_U32_AND_XREG_XMM': '__mem32',
1675 'IEM_MC_FETCH_MEM_XMM_U64_AND_XREG_XMM': '__mem64',
1676 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_RAX_RDX_U64': '__mem128',
1677 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64': '__mem128',
1678
1679 'IEM_MC_STORE_MEM_I16_CONST_BY_REF': '__mem16',
1680 'IEM_MC_STORE_MEM_I32_CONST_BY_REF': '__mem32',
1681 'IEM_MC_STORE_MEM_I64_CONST_BY_REF': '__mem64',
1682 'IEM_MC_STORE_MEM_I8_CONST_BY_REF': '__mem8',
1683 'IEM_MC_STORE_MEM_INDEF_D80_BY_REF': '__mem80',
1684 'IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF': '__mem32',
1685 'IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF': '__mem64',
1686 'IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF': '__mem80',
1687 'IEM_MC_STORE_MEM_U128': '__mem128',
1688 'IEM_MC_STORE_MEM_U128_ALIGN_SSE': '__mem128',
1689 'IEM_MC_STORE_MEM_U128_NO_AC': '__mem128',
1690 'IEM_MC_STORE_MEM_U16': '__mem16',
1691 'IEM_MC_STORE_MEM_U16_CONST': '__mem16c',
1692 'IEM_MC_STORE_MEM_U256': '__mem256',
1693 'IEM_MC_STORE_MEM_U256_ALIGN_AVX': '__mem256',
1694 'IEM_MC_STORE_MEM_U256_NO_AC': '__mem256',
1695 'IEM_MC_STORE_MEM_U32': '__mem32',
1696 'IEM_MC_STORE_MEM_U32_CONST': '__mem32c',
1697 'IEM_MC_STORE_MEM_U64': '__mem64',
1698 'IEM_MC_STORE_MEM_U64_CONST': '__mem64c',
1699 'IEM_MC_STORE_MEM_U8': '__mem8',
1700 'IEM_MC_STORE_MEM_U8_CONST': '__mem8c',
1701
1702 'IEM_MC_MEM_MAP_D80_WO': '__mem80',
1703 'IEM_MC_MEM_MAP_I16_WO': '__mem16',
1704 'IEM_MC_MEM_MAP_I32_WO': '__mem32',
1705 'IEM_MC_MEM_MAP_I64_WO': '__mem64',
1706 'IEM_MC_MEM_MAP_R32_WO': '__mem32',
1707 'IEM_MC_MEM_MAP_R64_WO': '__mem64',
1708 'IEM_MC_MEM_MAP_R80_WO': '__mem80',
1709 'IEM_MC_MEM_MAP_U128_ATOMIC': '__mem128a',
1710 'IEM_MC_MEM_MAP_U128_RO': '__mem128',
1711 'IEM_MC_MEM_MAP_U128_RW': '__mem128',
1712 'IEM_MC_MEM_MAP_U128_WO': '__mem128',
1713 'IEM_MC_MEM_MAP_U16_ATOMIC': '__mem16a',
1714 'IEM_MC_MEM_MAP_U16_RO': '__mem16',
1715 'IEM_MC_MEM_MAP_U16_RW': '__mem16',
1716 'IEM_MC_MEM_MAP_U16_WO': '__mem16',
1717 'IEM_MC_MEM_MAP_U32_ATOMIC': '__mem32a',
1718 'IEM_MC_MEM_MAP_U32_RO': '__mem32',
1719 'IEM_MC_MEM_MAP_U32_RW': '__mem32',
1720 'IEM_MC_MEM_MAP_U32_WO': '__mem32',
1721 'IEM_MC_MEM_MAP_U64_ATOMIC': '__mem64a',
1722 'IEM_MC_MEM_MAP_U64_RO': '__mem64',
1723 'IEM_MC_MEM_MAP_U64_RW': '__mem64',
1724 'IEM_MC_MEM_MAP_U64_WO': '__mem64',
1725 'IEM_MC_MEM_MAP_U8_ATOMIC': '__mem8a',
1726 'IEM_MC_MEM_MAP_U8_RO': '__mem8',
1727 'IEM_MC_MEM_MAP_U8_RW': '__mem8',
1728 'IEM_MC_MEM_MAP_U8_WO': '__mem8',
1729 };
1730 ## Used by analyzeAndAnnotateName for non-memory MC blocks.
1731 kdAnnotateNameRegStmts = {
1732 'IEM_MC_FETCH_GREG_U8': '__greg8',
1733 'IEM_MC_FETCH_GREG_U8_ZX_U16': '__greg8zx16',
1734 'IEM_MC_FETCH_GREG_U8_ZX_U32': '__greg8zx32',
1735 'IEM_MC_FETCH_GREG_U8_ZX_U64': '__greg8zx64',
1736 'IEM_MC_FETCH_GREG_U8_SX_U16': '__greg8sx16',
1737 'IEM_MC_FETCH_GREG_U8_SX_U32': '__greg8sx32',
1738 'IEM_MC_FETCH_GREG_U8_SX_U64': '__greg8sx64',
1739 'IEM_MC_FETCH_GREG_U16': '__greg16',
1740 'IEM_MC_FETCH_GREG_U16_ZX_U32': '__greg16zx32',
1741 'IEM_MC_FETCH_GREG_U16_ZX_U64': '__greg16zx64',
1742 'IEM_MC_FETCH_GREG_U16_SX_U32': '__greg16sx32',
1743 'IEM_MC_FETCH_GREG_U16_SX_U64': '__greg16sx64',
1744 'IEM_MC_FETCH_GREG_U32': '__greg32',
1745 'IEM_MC_FETCH_GREG_U32_ZX_U64': '__greg32zx64',
1746 'IEM_MC_FETCH_GREG_U32_SX_U64': '__greg32sx64',
1747 'IEM_MC_FETCH_GREG_U64': '__greg64',
1748 'IEM_MC_FETCH_GREG_U64_ZX_U64': '__greg64zx64',
1749 'IEM_MC_FETCH_GREG_PAIR_U32': '__greg32',
1750 'IEM_MC_FETCH_GREG_PAIR_U64': '__greg64',
1751
1752 'IEM_MC_STORE_GREG_U8': '__greg8',
1753 'IEM_MC_STORE_GREG_U16': '__greg16',
1754 'IEM_MC_STORE_GREG_U32': '__greg32',
1755 'IEM_MC_STORE_GREG_U64': '__greg64',
1756 'IEM_MC_STORE_GREG_I64': '__greg64',
1757 'IEM_MC_STORE_GREG_U8_CONST': '__greg8c',
1758 'IEM_MC_STORE_GREG_U16_CONST': '__greg16c',
1759 'IEM_MC_STORE_GREG_U32_CONST': '__greg32c',
1760 'IEM_MC_STORE_GREG_U64_CONST': '__greg64c',
1761 'IEM_MC_STORE_GREG_PAIR_U32': '__greg32',
1762 'IEM_MC_STORE_GREG_PAIR_U64': '__greg64',
1763
1764 'IEM_MC_FETCH_SREG_U16': '__sreg16',
1765 'IEM_MC_FETCH_SREG_ZX_U32': '__sreg32',
1766 'IEM_MC_FETCH_SREG_ZX_U64': '__sreg64',
1767 'IEM_MC_FETCH_SREG_BASE_U64': '__sbase64',
1768 'IEM_MC_FETCH_SREG_BASE_U32': '__sbase32',
1769 'IEM_MC_STORE_SREG_BASE_U64': '__sbase64',
1770 'IEM_MC_STORE_SREG_BASE_U32': '__sbase32',
1771
1772 'IEM_MC_REF_GREG_U8': '__greg8',
1773 'IEM_MC_REF_GREG_U16': '__greg16',
1774 'IEM_MC_REF_GREG_U32': '__greg32',
1775 'IEM_MC_REF_GREG_U64': '__greg64',
1776 'IEM_MC_REF_GREG_U8_CONST': '__greg8',
1777 'IEM_MC_REF_GREG_U16_CONST': '__greg16',
1778 'IEM_MC_REF_GREG_U32_CONST': '__greg32',
1779 'IEM_MC_REF_GREG_U64_CONST': '__greg64',
1780 'IEM_MC_REF_GREG_I32': '__greg32',
1781 'IEM_MC_REF_GREG_I64': '__greg64',
1782 'IEM_MC_REF_GREG_I32_CONST': '__greg32',
1783 'IEM_MC_REF_GREG_I64_CONST': '__greg64',
1784
1785 'IEM_MC_STORE_FPUREG_R80_SRC_REF': '__fpu',
1786 'IEM_MC_REF_FPUREG': '__fpu',
1787
1788 'IEM_MC_FETCH_MREG_U64': '__mreg64',
1789 'IEM_MC_FETCH_MREG_U32': '__mreg32',
1790 'IEM_MC_FETCH_MREG_U16': '__mreg16',
1791 'IEM_MC_FETCH_MREG_U8': '__mreg8',
1792 'IEM_MC_STORE_MREG_U64': '__mreg64',
1793 'IEM_MC_STORE_MREG_U32': '__mreg32',
1794 'IEM_MC_STORE_MREG_U16': '__mreg16',
1795 'IEM_MC_STORE_MREG_U8': '__mreg8',
1796 'IEM_MC_STORE_MREG_U32_ZX_U64': '__mreg32zx64',
1797 'IEM_MC_REF_MREG_U64': '__mreg64',
1798 'IEM_MC_REF_MREG_U64_CONST': '__mreg64',
1799 'IEM_MC_REF_MREG_U32_CONST': '__mreg32',
1800
1801 'IEM_MC_CLEAR_XREG_U32_MASK': '__xreg32x4',
1802 'IEM_MC_FETCH_XREG_U128': '__xreg128',
1803 'IEM_MC_FETCH_XREG_XMM': '__xreg128',
1804 'IEM_MC_FETCH_XREG_U64': '__xreg64',
1805 'IEM_MC_FETCH_XREG_U32': '__xreg32',
1806 'IEM_MC_FETCH_XREG_U16': '__xreg16',
1807 'IEM_MC_FETCH_XREG_U8': '__xreg8',
1808 'IEM_MC_FETCH_XREG_PAIR_U128': '__xreg128p',
1809 'IEM_MC_FETCH_XREG_PAIR_XMM': '__xreg128p',
1810 'IEM_MC_FETCH_XREG_PAIR_U128_AND_RAX_RDX_U64': '__xreg128p',
1811 'IEM_MC_FETCH_XREG_PAIR_U128_AND_EAX_EDX_U32_SX_U64': '__xreg128p',
1812
1813 'IEM_MC_STORE_XREG_U32_U128': '__xreg32',
1814 'IEM_MC_STORE_XREG_U128': '__xreg128',
1815 'IEM_MC_STORE_XREG_XMM': '__xreg128',
1816 'IEM_MC_STORE_XREG_XMM_U32': '__xreg32',
1817 'IEM_MC_STORE_XREG_XMM_U64': '__xreg64',
1818 'IEM_MC_STORE_XREG_U64': '__xreg64',
1819 'IEM_MC_STORE_XREG_U64_ZX_U128': '__xreg64zx128',
1820 'IEM_MC_STORE_XREG_U32': '__xreg32',
1821 'IEM_MC_STORE_XREG_U16': '__xreg16',
1822 'IEM_MC_STORE_XREG_U8': '__xreg8',
1823 'IEM_MC_STORE_XREG_U32_ZX_U128': '__xreg32zx128',
1824 'IEM_MC_STORE_XREG_R32': '__xreg32',
1825 'IEM_MC_STORE_XREG_R64': '__xreg64',
1826 'IEM_MC_BROADCAST_XREG_U8_ZX_VLMAX': '__xreg8zx',
1827 'IEM_MC_BROADCAST_XREG_U16_ZX_VLMAX': '__xreg16zx',
1828 'IEM_MC_BROADCAST_XREG_U32_ZX_VLMAX': '__xreg32zx',
1829 'IEM_MC_BROADCAST_XREG_U64_ZX_VLMAX': '__xreg64zx',
1830 'IEM_MC_BROADCAST_XREG_U128_ZX_VLMAX': '__xreg128zx',
1831 'IEM_MC_REF_XREG_U128': '__xreg128',
1832 'IEM_MC_REF_XREG_U128_CONST': '__xreg128',
1833 'IEM_MC_REF_XREG_U32_CONST': '__xreg32',
1834 'IEM_MC_REF_XREG_U64_CONST': '__xreg64',
1835 'IEM_MC_REF_XREG_R32_CONST': '__xreg32',
1836 'IEM_MC_REF_XREG_R64_CONST': '__xreg64',
1837 'IEM_MC_REF_XREG_XMM_CONST': '__xreg128',
1838 'IEM_MC_COPY_XREG_U128': '__xreg128',
1839
1840 'IEM_MC_FETCH_YREG_U256': '__yreg256',
1841 'IEM_MC_FETCH_YREG_YMM': '__yreg256',
1842 'IEM_MC_FETCH_YREG_U128': '__yreg128',
1843 'IEM_MC_FETCH_YREG_U64': '__yreg64',
1844 'IEM_MC_FETCH_YREG_U32': '__yreg32',
1845 'IEM_MC_STORE_YREG_U128': '__yreg128',
1846 'IEM_MC_STORE_YREG_U32_ZX_VLMAX': '__yreg32zx',
1847 'IEM_MC_STORE_YREG_U64_ZX_VLMAX': '__yreg64zx',
1848 'IEM_MC_STORE_YREG_U128_ZX_VLMAX': '__yreg128zx',
1849 'IEM_MC_STORE_YREG_U256_ZX_VLMAX': '__yreg256zx',
1850 'IEM_MC_BROADCAST_YREG_U8_ZX_VLMAX': '__yreg8',
1851 'IEM_MC_BROADCAST_YREG_U16_ZX_VLMAX': '__yreg16',
1852 'IEM_MC_BROADCAST_YREG_U32_ZX_VLMAX': '__yreg32',
1853 'IEM_MC_BROADCAST_YREG_U64_ZX_VLMAX': '__yreg64',
1854 'IEM_MC_BROADCAST_YREG_U128_ZX_VLMAX': '__yreg128',
1855 'IEM_MC_REF_YREG_U128': '__yreg128',
1856 'IEM_MC_REF_YREG_U128_CONST': '__yreg128',
1857 'IEM_MC_REF_YREG_U64_CONST': '__yreg64',
1858 'IEM_MC_COPY_YREG_U256_ZX_VLMAX': '__yreg256zx',
1859 'IEM_MC_COPY_YREG_U128_ZX_VLMAX': '__yreg128zx',
1860 'IEM_MC_COPY_YREG_U64_ZX_VLMAX': '__yreg64zx',
1861 'IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX': '__yreg3296',
1862 'IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX': '__yreg6464',
1863 'IEM_MC_MERGE_YREG_U64HI_U64HI_ZX_VLMAX': '__yreg64hi64hi',
1864 'IEM_MC_MERGE_YREG_U64LO_U64LO_ZX_VLMAX': '__yreg64lo64lo',
1865 'IEM_MC_MERGE_YREG_U64LO_U64LOCAL_ZX_VLMAX':'__yreg64',
1866 'IEM_MC_MERGE_YREG_U64LOCAL_U64HI_ZX_VLMAX':'__yreg64',
1867 };
1868 kdAnnotateNameCallStmts = {
1869 'IEM_MC_CALL_CIMPL_0': '__cimpl',
1870 'IEM_MC_CALL_CIMPL_1': '__cimpl',
1871 'IEM_MC_CALL_CIMPL_2': '__cimpl',
1872 'IEM_MC_CALL_CIMPL_3': '__cimpl',
1873 'IEM_MC_CALL_CIMPL_4': '__cimpl',
1874 'IEM_MC_CALL_CIMPL_5': '__cimpl',
1875 'IEM_MC_CALL_CIMPL_6': '__cimpl',
1876 'IEM_MC_CALL_CIMPL_7': '__cimpl',
1877 'IEM_MC_DEFER_TO_CIMPL_0_RET': '__cimpl_defer',
1878 'IEM_MC_DEFER_TO_CIMPL_1_RET': '__cimpl_defer',
1879 'IEM_MC_DEFER_TO_CIMPL_2_RET': '__cimpl_defer',
1880 'IEM_MC_DEFER_TO_CIMPL_3_RET': '__cimpl_defer',
1881 'IEM_MC_DEFER_TO_CIMPL_4_RET': '__cimpl_defer',
1882 'IEM_MC_DEFER_TO_CIMPL_5_RET': '__cimpl_defer',
1883 'IEM_MC_DEFER_TO_CIMPL_6_RET': '__cimpl_defer',
1884 'IEM_MC_DEFER_TO_CIMPL_7_RET': '__cimpl_defer',
1885 'IEM_MC_CALL_VOID_AIMPL_0': '__aimpl',
1886 'IEM_MC_CALL_VOID_AIMPL_1': '__aimpl',
1887 'IEM_MC_CALL_VOID_AIMPL_2': '__aimpl',
1888 'IEM_MC_CALL_VOID_AIMPL_3': '__aimpl',
1889 'IEM_MC_CALL_VOID_AIMPL_4': '__aimpl',
1890 'IEM_MC_CALL_VOID_AIMPL_5': '__aimpl',
1891 'IEM_MC_CALL_AIMPL_0': '__aimpl_ret',
1892 'IEM_MC_CALL_AIMPL_1': '__aimpl_ret',
1893 'IEM_MC_CALL_AIMPL_2': '__aimpl_ret',
1894 'IEM_MC_CALL_AIMPL_3': '__aimpl_ret',
1895 'IEM_MC_CALL_AIMPL_4': '__aimpl_ret',
1896 'IEM_MC_CALL_AIMPL_5': '__aimpl_ret',
1897 'IEM_MC_CALL_AIMPL_6': '__aimpl_ret',
1898 'IEM_MC_CALL_VOID_AIMPL_6': '__aimpl_fpu',
1899 'IEM_MC_CALL_FPU_AIMPL_0': '__aimpl_fpu',
1900 'IEM_MC_CALL_FPU_AIMPL_1': '__aimpl_fpu',
1901 'IEM_MC_CALL_FPU_AIMPL_2': '__aimpl_fpu',
1902 'IEM_MC_CALL_FPU_AIMPL_3': '__aimpl_fpu',
1903 'IEM_MC_CALL_FPU_AIMPL_4': '__aimpl_fpu',
1904 'IEM_MC_CALL_FPU_AIMPL_5': '__aimpl_fpu',
1905 'IEM_MC_CALL_MMX_AIMPL_0': '__aimpl_mmx',
1906 'IEM_MC_CALL_MMX_AIMPL_1': '__aimpl_mmx',
1907 'IEM_MC_CALL_MMX_AIMPL_2': '__aimpl_mmx',
1908 'IEM_MC_CALL_MMX_AIMPL_3': '__aimpl_mmx',
1909 'IEM_MC_CALL_MMX_AIMPL_4': '__aimpl_mmx',
1910 'IEM_MC_CALL_MMX_AIMPL_5': '__aimpl_mmx',
1911 'IEM_MC_CALL_SSE_AIMPL_0': '__aimpl_sse',
1912 'IEM_MC_CALL_SSE_AIMPL_1': '__aimpl_sse',
1913 'IEM_MC_CALL_SSE_AIMPL_2': '__aimpl_sse',
1914 'IEM_MC_CALL_SSE_AIMPL_3': '__aimpl_sse',
1915 'IEM_MC_CALL_SSE_AIMPL_4': '__aimpl_sse',
1916 'IEM_MC_CALL_SSE_AIMPL_5': '__aimpl_sse',
1917 'IEM_MC_CALL_AVX_AIMPL_0': '__aimpl_avx',
1918 'IEM_MC_CALL_AVX_AIMPL_1': '__aimpl_avx',
1919 'IEM_MC_CALL_AVX_AIMPL_2': '__aimpl_avx',
1920 'IEM_MC_CALL_AVX_AIMPL_3': '__aimpl_avx',
1921 'IEM_MC_CALL_AVX_AIMPL_4': '__aimpl_avx',
1922 'IEM_MC_CALL_AVX_AIMPL_5': '__aimpl_avx',
1923 };
1924 def analyzeAndAnnotateName(self, aoStmts: List[iai.McStmt]):
1925 """
1926 Scans the statements and variation lists for clues about the threaded function,
1927 and sets self.sSubName if successfull.
1928 """
1929 # Operand base naming:
1930 dHits = {};
1931 cHits = iai.McStmt.countStmtsByName(aoStmts, self.kdAnnotateNameMemStmts, dHits);
1932 if cHits > 0:
1933 sStmtNm = sorted(dHits.keys())[-1]; # priority: STORE, MEM_MAP, FETCH.
1934 sName = self.kdAnnotateNameMemStmts[sStmtNm];
1935 else:
1936 cHits = iai.McStmt.countStmtsByName(aoStmts, self.kdAnnotateNameRegStmts, dHits);
1937 if cHits > 0:
1938 sStmtNm = sorted(dHits.keys())[-1]; # priority: STORE, MEM_MAP, FETCH.
1939 sName = self.kdAnnotateNameRegStmts[sStmtNm];
1940 else:
1941 # No op details, try name it by call type...
1942 cHits = iai.McStmt.countStmtsByName(aoStmts, self.kdAnnotateNameCallStmts, dHits);
1943 if cHits > 0:
1944 sStmtNm = sorted(dHits.keys())[-1]; # Not really necessary to sort, but simple this way.
1945 self.sSubName = self.kdAnnotateNameCallStmts[sStmtNm];
1946 return;
1947
1948 # Add call info if any:
1949 dHits = {};
1950 cHits = iai.McStmt.countStmtsByName(aoStmts, self.kdAnnotateNameCallStmts, dHits);
1951 if cHits > 0:
1952 sStmtNm = sorted(dHits.keys())[-1]; # Not really necessary to sort, but simple this way.
1953 sName += self.kdAnnotateNameCallStmts[sStmtNm][1:];
1954
1955 self.sSubName = sName;
1956 return;
1957
1958 def analyzeFindVariablesAndCallArgs(self, aoStmts: List[iai.McStmt]) -> bool:
1959 """ Scans the statements for MC variables and call arguments. """
1960 for oStmt in aoStmts:
1961 if isinstance(oStmt, iai.McStmtVar):
1962 if oStmt.sVarName in self.dVariables:
1963 raise Exception('Variable %s is defined more than once!' % (oStmt.sVarName,));
1964 self.dVariables[oStmt.sVarName] = oStmt.sVarName;
1965 elif isinstance(oStmt, iai.McStmtCall) and oStmt.sName.startswith('IEM_MC_CALL_AIMPL_'):
1966 if oStmt.asParams[1] in self.dVariables:
1967 raise Exception('Variable %s is defined more than once!' % (oStmt.asParams[1],));
1968 self.dVariables[oStmt.asParams[1]] = iai.McStmtVar('IEM_MC_LOCAL', oStmt.asParams[0:2],
1969 oStmt.asParams[0], oStmt.asParams[1]);
1970
1971 # There shouldn't be any variables or arguments declared inside if/
1972 # else blocks, but scan them too to be on the safe side.
1973 if isinstance(oStmt, iai.McStmtCond):
1974 #cBefore = len(self.dVariables);
1975 self.analyzeFindVariablesAndCallArgs(oStmt.aoIfBranch);
1976 self.analyzeFindVariablesAndCallArgs(oStmt.aoElseBranch);
1977 #if len(self.dVariables) != cBefore:
1978 # raise Exception('Variables/arguments defined in conditional branches!');
1979 return True;
1980
1981 kdReturnStmtAnnotations = {
1982 'IEM_MC_ADVANCE_RIP_AND_FINISH': g_ksFinishAnnotation_Advance,
1983 'IEM_MC_REL_JMP_S8_AND_FINISH': g_ksFinishAnnotation_RelJmp,
1984 'IEM_MC_REL_JMP_S16_AND_FINISH': g_ksFinishAnnotation_RelJmp,
1985 'IEM_MC_REL_JMP_S32_AND_FINISH': g_ksFinishAnnotation_RelJmp,
1986 'IEM_MC_SET_RIP_U16_AND_FINISH': g_ksFinishAnnotation_SetJmp,
1987 'IEM_MC_SET_RIP_U32_AND_FINISH': g_ksFinishAnnotation_SetJmp,
1988 'IEM_MC_SET_RIP_U64_AND_FINISH': g_ksFinishAnnotation_SetJmp,
1989 'IEM_MC_REL_CALL_S16_AND_FINISH': g_ksFinishAnnotation_RelCall,
1990 'IEM_MC_REL_CALL_S32_AND_FINISH': g_ksFinishAnnotation_RelCall,
1991 'IEM_MC_REL_CALL_S64_AND_FINISH': g_ksFinishAnnotation_RelCall,
1992 'IEM_MC_IND_CALL_U16_AND_FINISH': g_ksFinishAnnotation_IndCall,
1993 'IEM_MC_IND_CALL_U32_AND_FINISH': g_ksFinishAnnotation_IndCall,
1994 'IEM_MC_IND_CALL_U64_AND_FINISH': g_ksFinishAnnotation_IndCall,
1995 'IEM_MC_DEFER_TO_CIMPL_0_RET': g_ksFinishAnnotation_DeferToCImpl,
1996 'IEM_MC_DEFER_TO_CIMPL_1_RET': g_ksFinishAnnotation_DeferToCImpl,
1997 'IEM_MC_DEFER_TO_CIMPL_2_RET': g_ksFinishAnnotation_DeferToCImpl,
1998 'IEM_MC_DEFER_TO_CIMPL_3_RET': g_ksFinishAnnotation_DeferToCImpl,
1999 'IEM_MC_DEFER_TO_CIMPL_4_RET': g_ksFinishAnnotation_DeferToCImpl,
2000 'IEM_MC_DEFER_TO_CIMPL_5_RET': g_ksFinishAnnotation_DeferToCImpl,
2001 'IEM_MC_DEFER_TO_CIMPL_6_RET': g_ksFinishAnnotation_DeferToCImpl,
2002 'IEM_MC_DEFER_TO_CIMPL_7_RET': g_ksFinishAnnotation_DeferToCImpl,
2003 };
2004 def analyzeCodeOperation(self, aoStmts: List[iai.McStmt], dEflStmts, fSeenConditional = False) -> bool:
2005 """
2006 Analyzes the code looking clues as to additional side-effects.
2007
2008 Currently this is simply looking for branching and adding the relevant
2009 branch flags to dsCImplFlags. ASSUMES the caller pre-populates the
2010 dictionary with a copy of self.oMcBlock.dsCImplFlags.
2011
2012 This also sets McStmtCond.oIfBranchAnnotation & McStmtCond.oElseBranchAnnotation.
2013
2014 Returns annotation on return style.
2015 """
2016 sAnnotation = None;
2017 for oStmt in aoStmts:
2018 # Set IEM_IMPL_C_F_BRANCH_XXXX flags if we see any branching MCs.
2019 if oStmt.sName.startswith('IEM_MC_SET_RIP'):
2020 assert not fSeenConditional;
2021 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_INDIRECT'] = True;
2022 elif oStmt.sName.startswith('IEM_MC_REL_JMP'):
2023 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_RELATIVE'] = True;
2024 if fSeenConditional:
2025 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_CONDITIONAL'] = True;
2026 elif oStmt.sName.startswith('IEM_MC_IND_CALL'):
2027 assert not fSeenConditional;
2028 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_INDIRECT'] = True;
2029 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_STACK'] = True;
2030 self.dsCImplFlags['IEM_CIMPL_F_END_TB'] = True;
2031 elif oStmt.sName.startswith('IEM_MC_REL_CALL'):
2032 assert not fSeenConditional;
2033 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_RELATIVE'] = True;
2034 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_STACK'] = True;
2035 self.dsCImplFlags['IEM_CIMPL_F_END_TB'] = True;
2036 elif oStmt.sName.startswith('IEM_MC_RETN'):
2037 assert not fSeenConditional;
2038 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_INDIRECT'] = True;
2039 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_STACK'] = True;
2040 self.dsCImplFlags['IEM_CIMPL_F_END_TB'] = True;
2041
2042 # Check for CIMPL and AIMPL calls.
2043 if oStmt.sName.startswith('IEM_MC_CALL_'):
2044 if oStmt.sName.startswith('IEM_MC_CALL_CIMPL_'):
2045 self.dsCImplFlags['IEM_CIMPL_F_CALLS_CIMPL'] = True;
2046 elif ( oStmt.sName.startswith('IEM_MC_CALL_VOID_AIMPL_')
2047 or oStmt.sName.startswith('IEM_MC_CALL_AIMPL_')):
2048 self.dsCImplFlags['IEM_CIMPL_F_CALLS_AIMPL'] = True;
2049 elif ( oStmt.sName.startswith('IEM_MC_CALL_SSE_AIMPL_')
2050 or oStmt.sName.startswith('IEM_MC_CALL_MMX_AIMPL_')
2051 or oStmt.sName.startswith('IEM_MC_CALL_FPU_AIMPL_')):
2052 self.dsCImplFlags['IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE'] = True;
2053 elif oStmt.sName.startswith('IEM_MC_CALL_AVX_AIMPL_'):
2054 self.dsCImplFlags['IEM_CIMPL_F_CALLS_AIMPL_WITH_XSTATE'] = True;
2055 else:
2056 raise Exception('Unknown IEM_MC_CALL_* statement: %s' % (oStmt.sName,));
2057
2058 # Check for return statements.
2059 if oStmt.sName in self.kdReturnStmtAnnotations:
2060 assert sAnnotation is None;
2061 sAnnotation = self.kdReturnStmtAnnotations[oStmt.sName];
2062
2063 # Collect MCs working on EFLAGS. Caller will check this.
2064 if oStmt.sName in ('IEM_MC_FETCH_EFLAGS', 'IEM_MC_FETCH_EFLAGS_U8', 'IEM_MC_COMMIT_EFLAGS',
2065 'IEM_MC_COMMIT_EFLAGS_OPT', 'IEM_MC_REF_EFLAGS', 'IEM_MC_ARG_LOCAL_EFLAGS', ):
2066 dEflStmts[oStmt.sName] = oStmt;
2067 elif isinstance(oStmt, iai.McStmtCall):
2068 if oStmt.sName in ('IEM_MC_CALL_CIMPL_0', 'IEM_MC_CALL_CIMPL_1', 'IEM_MC_CALL_CIMPL_2',
2069 'IEM_MC_CALL_CIMPL_3', 'IEM_MC_CALL_CIMPL_4', 'IEM_MC_CALL_CIMPL_5',):
2070 if ( oStmt.asParams[0].find('IEM_CIMPL_F_RFLAGS') >= 0
2071 or oStmt.asParams[0].find('IEM_CIMPL_F_STATUS_FLAGS') >= 0):
2072 dEflStmts[oStmt.sName] = oStmt;
2073
2074 # Process branches of conditionals recursively.
2075 if isinstance(oStmt, iai.McStmtCond):
2076 oStmt.oIfBranchAnnotation = self.analyzeCodeOperation(oStmt.aoIfBranch, dEflStmts, True);
2077 if oStmt.aoElseBranch:
2078 oStmt.oElseBranchAnnotation = self.analyzeCodeOperation(oStmt.aoElseBranch, dEflStmts, True);
2079
2080 return sAnnotation;
2081
2082 def analyzeThreadedFunction(self, oGenerator):
2083 """
2084 Analyzes the code, identifying the number of parameters it requires and such.
2085
2086 Returns dummy True - raises exception on trouble.
2087 """
2088
2089 #
2090 # Decode the block into a list/tree of McStmt objects.
2091 #
2092 aoStmts = self.oMcBlock.decode();
2093
2094 #
2095 # Check the block for errors before we proceed (will decode it).
2096 #
2097 asErrors = self.oMcBlock.check();
2098 if asErrors:
2099 raise Exception('\n'.join(['%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sError, )
2100 for sError in asErrors]));
2101
2102 #
2103 # Scan the statements for local variables and call arguments (self.dVariables).
2104 #
2105 self.analyzeFindVariablesAndCallArgs(aoStmts);
2106
2107 #
2108 # Scan the code for IEM_CIMPL_F_ and other clues.
2109 #
2110 self.dsCImplFlags = self.oMcBlock.dsCImplFlags.copy();
2111 dEflStmts = {};
2112 self.analyzeCodeOperation(aoStmts, dEflStmts);
2113 if ( ('IEM_CIMPL_F_CALLS_CIMPL' in self.dsCImplFlags)
2114 + ('IEM_CIMPL_F_CALLS_AIMPL' in self.dsCImplFlags)
2115 + ('IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE' in self.dsCImplFlags)
2116 + ('IEM_CIMPL_F_CALLS_AIMPL_WITH_XSTATE' in self.dsCImplFlags) > 1):
2117 self.error('Mixing CIMPL/AIMPL/AIMPL_WITH_FXSTATE/AIMPL_WITH_XSTATE calls', oGenerator);
2118
2119 #
2120 # Analyse EFLAGS related MCs and @opflmodify and friends.
2121 #
2122 if dEflStmts:
2123 oInstruction = self.oMcBlock.oInstruction; # iai.Instruction
2124 if ( oInstruction is None
2125 or (oInstruction.asFlTest is None and oInstruction.asFlModify is None)):
2126 sMcNames = '+'.join(dEflStmts.keys());
2127 if len(dEflStmts) != 1 or not sMcNames.startswith('IEM_MC_CALL_CIMPL_'): # Hack for far calls
2128 self.error('Uses %s but has no @opflmodify, @opfltest or @opflclass with details!' % (sMcNames,), oGenerator);
2129 elif 'IEM_MC_COMMIT_EFLAGS' in dEflStmts or 'IEM_MC_COMMIT_EFLAGS_OPT' in dEflStmts:
2130 if not oInstruction.asFlModify:
2131 if oInstruction.sMnemonic not in [ 'not', ]:
2132 self.error('Uses IEM_MC_COMMIT_EFLAGS[_OPT] but has no flags in @opflmodify!', oGenerator);
2133 elif ( 'IEM_MC_CALL_CIMPL_0' in dEflStmts
2134 or 'IEM_MC_CALL_CIMPL_1' in dEflStmts
2135 or 'IEM_MC_CALL_CIMPL_2' in dEflStmts
2136 or 'IEM_MC_CALL_CIMPL_3' in dEflStmts
2137 or 'IEM_MC_CALL_CIMPL_4' in dEflStmts
2138 or 'IEM_MC_CALL_CIMPL_5' in dEflStmts ):
2139 if not oInstruction.asFlModify:
2140 self.error('Uses IEM_MC_CALL_CIMPL_x or IEM_MC_DEFER_TO_CIMPL_5_RET with IEM_CIMPL_F_STATUS_FLAGS '
2141 'or IEM_CIMPL_F_RFLAGS but has no flags in @opflmodify!', oGenerator);
2142 elif 'IEM_MC_REF_EFLAGS' not in dEflStmts:
2143 if not oInstruction.asFlTest:
2144 if oInstruction.sMnemonic not in [ 'not', ]:
2145 self.error('Expected @opfltest!', oGenerator);
2146 if oInstruction and oInstruction.asFlSet:
2147 for sFlag in oInstruction.asFlSet:
2148 if sFlag not in oInstruction.asFlModify:
2149 self.error('"%s" in @opflset but missing from @opflmodify (%s)!'
2150 % (sFlag, ', '.join(oInstruction.asFlModify)), oGenerator);
2151 if oInstruction and oInstruction.asFlClear:
2152 for sFlag in oInstruction.asFlClear:
2153 if sFlag not in oInstruction.asFlModify:
2154 self.error('"%s" in @opflclear but missing from @opflmodify (%s)!'
2155 % (sFlag, ', '.join(oInstruction.asFlModify)), oGenerator);
2156
2157 #
2158 # Create variations as needed.
2159 #
2160 if iai.McStmt.findStmtByNames(aoStmts,
2161 { 'IEM_MC_DEFER_TO_CIMPL_0_RET': True,
2162 'IEM_MC_DEFER_TO_CIMPL_1_RET': True,
2163 'IEM_MC_DEFER_TO_CIMPL_2_RET': True,
2164 'IEM_MC_DEFER_TO_CIMPL_3_RET': True, }):
2165 asVariations = (ThreadedFunctionVariation.ksVariation_Default,);
2166
2167 elif iai.McStmt.findStmtByNames(aoStmts, { 'IEM_MC_CALC_RM_EFF_ADDR' : True,
2168 'IEM_MC_FETCH_MEM_U8' : True, # mov_AL_Ob ++
2169 'IEM_MC_FETCH_MEM_U16' : True, # mov_rAX_Ov ++
2170 'IEM_MC_FETCH_MEM_U32' : True,
2171 'IEM_MC_FETCH_MEM_U64' : True,
2172 'IEM_MC_STORE_MEM_U8' : True, # mov_Ob_AL ++
2173 'IEM_MC_STORE_MEM_U16' : True, # mov_Ov_rAX ++
2174 'IEM_MC_STORE_MEM_U32' : True,
2175 'IEM_MC_STORE_MEM_U64' : True, }):
2176 if 'IEM_MC_F_64BIT' in self.oMcBlock.dsMcFlags:
2177 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressOnly64;
2178 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags and 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
2179 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot286Not64;
2180 elif 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
2181 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot286;
2182 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags:
2183 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot64;
2184 elif 'IEM_MC_F_ONLY_8086' in self.oMcBlock.dsMcFlags:
2185 asVariations = ThreadedFunctionVariation.kasVariationsOnlyPre386;
2186 else:
2187 asVariations = ThreadedFunctionVariation.kasVariationsWithAddress;
2188 else:
2189 if 'IEM_MC_F_64BIT' in self.oMcBlock.dsMcFlags:
2190 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressOnly64;
2191 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags and 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
2192 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot286Not64;
2193 elif 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
2194 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot286;
2195 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags:
2196 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot64;
2197 elif 'IEM_MC_F_ONLY_8086' in self.oMcBlock.dsMcFlags:
2198 asVariations = ThreadedFunctionVariation.kasVariationsOnlyPre386;
2199 else:
2200 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddress;
2201
2202 if ( 'IEM_CIMPL_F_BRANCH_CONDITIONAL' in self.dsCImplFlags
2203 and 'IEM_CIMPL_F_BRANCH_RELATIVE' in self.dsCImplFlags): # (latter to avoid iemOp_into)
2204 assert set(asVariations).issubset(ThreadedFunctionVariation.kasVariationsWithoutAddress), \
2205 '%s: vars=%s McFlags=%s' % (self.oMcBlock.oFunction.sName, asVariations, self.oMcBlock.dsMcFlags);
2206 asVariationsBase = asVariations;
2207 asVariations = [];
2208 for sVariation in asVariationsBase:
2209 asVariations.extend([sVariation + '_Jmp', sVariation + '_NoJmp']);
2210 assert set(asVariations).issubset(ThreadedFunctionVariation.kdVariationsWithConditional);
2211
2212 if not iai.McStmt.findStmtByNames(aoStmts,
2213 { 'IEM_MC_ADVANCE_RIP_AND_FINISH': True,
2214 'IEM_MC_REL_JMP_S8_AND_FINISH': True,
2215 'IEM_MC_REL_JMP_S16_AND_FINISH': True,
2216 'IEM_MC_REL_JMP_S32_AND_FINISH': True,
2217 'IEM_MC_SET_RIP_U16_AND_FINISH': True,
2218 'IEM_MC_SET_RIP_U32_AND_FINISH': True,
2219 'IEM_MC_SET_RIP_U64_AND_FINISH': True,
2220 'IEM_MC_REL_CALL_S16_AND_FINISH': True,
2221 'IEM_MC_REL_CALL_S32_AND_FINISH': True,
2222 'IEM_MC_REL_CALL_S64_AND_FINISH': True,
2223 'IEM_MC_IND_CALL_U16_AND_FINISH': True,
2224 'IEM_MC_IND_CALL_U32_AND_FINISH': True,
2225 'IEM_MC_IND_CALL_U64_AND_FINISH': True,
2226 'IEM_MC_RETN_AND_FINISH': True,
2227 }):
2228 asVariations = [sVariation for sVariation in asVariations
2229 if sVariation not in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing];
2230
2231 self.aoVariations = [ThreadedFunctionVariation(self, sVar) for sVar in asVariations];
2232
2233 # Dictionary variant of the list.
2234 self.dVariations = { oVar.sVariation: oVar for oVar in self.aoVariations };
2235
2236 #
2237 # Try annotate the threaded function name.
2238 #
2239 self.analyzeAndAnnotateName(aoStmts);
2240
2241 #
2242 # Continue the analysis on each variation.
2243 #
2244 for oVariation in self.aoVariations:
2245 oVariation.analyzeVariation(aoStmts);
2246
2247 return True;
2248
2249 ## Used by emitThreadedCallStmts.
2250 kdVariationsWithNeedForPrefixCheck = {
2251 ThreadedFunctionVariation.ksVariation_64_Addr32: True,
2252 ThreadedFunctionVariation.ksVariation_64f_Addr32: True,
2253 ThreadedFunctionVariation.ksVariation_64_FsGs: True,
2254 ThreadedFunctionVariation.ksVariation_64f_FsGs: True,
2255 ThreadedFunctionVariation.ksVariation_32_Addr16: True,
2256 ThreadedFunctionVariation.ksVariation_32f_Addr16: True,
2257 ThreadedFunctionVariation.ksVariation_32_Flat: True,
2258 ThreadedFunctionVariation.ksVariation_32f_Flat: True,
2259 ThreadedFunctionVariation.ksVariation_16_Addr32: True,
2260 ThreadedFunctionVariation.ksVariation_16f_Addr32: True,
2261 };
2262
2263 def emitThreadedCallStmts(self, sBranch = None, fTbLookupTable = False): # pylint: disable=too-many-statements
2264 """
2265 Worker for morphInputCode that returns a list of statements that emits
2266 the call to the threaded functions for the block.
2267
2268 The sBranch parameter is used with conditional branches where we'll emit
2269 different threaded calls depending on whether we're in the jump-taken or
2270 no-jump code path.
2271
2272 The fTbLookupTable parameter can either be False, True or whatever else
2273 (like 2) - in the latte case this means a large lookup table.
2274 """
2275 # Special case for only default variation:
2276 if len(self.aoVariations) == 1 and self.aoVariations[0].sVariation == ThreadedFunctionVariation.ksVariation_Default:
2277 assert not sBranch;
2278 return self.aoVariations[0].emitThreadedCallStmtsForVariant(0, fTbLookupTable);
2279
2280 #
2281 # Case statement sub-class.
2282 #
2283 dByVari = self.dVariations;
2284 #fDbg = self.oMcBlock.sFunction == 'iemOpCommonPushSReg';
2285 class Case:
2286 def __init__(self, sCond, sVarNm = None):
2287 self.sCond = sCond;
2288 self.sVarNm = sVarNm;
2289 self.oVar = dByVari[sVarNm] if sVarNm else None;
2290 self.aoBody = self.oVar.emitThreadedCallStmtsForVariant(8, fTbLookupTable) if sVarNm else None;
2291
2292 def toCode(self):
2293 aoStmts = [ iai.McCppGeneric('case %s:' % (self.sCond), cchIndent = 4), ];
2294 if self.aoBody:
2295 aoStmts.extend(self.aoBody);
2296 aoStmts.append(iai.McCppGeneric('break;', cchIndent = 8));
2297 return aoStmts;
2298
2299 def toFunctionAssignment(self):
2300 aoStmts = [ iai.McCppGeneric('case %s:' % (self.sCond), cchIndent = 4), ];
2301 if self.aoBody:
2302 aoStmts.extend([
2303 iai.McCppGeneric('enmFunction = %s;' % (self.oVar.getIndexName(),), cchIndent = 8),
2304 iai.McCppGeneric('break;', cchIndent = 8),
2305 ]);
2306 return aoStmts;
2307
2308 def isSame(self, oThat):
2309 if not self.aoBody: # fall thru always matches.
2310 return True;
2311 if len(self.aoBody) != len(oThat.aoBody):
2312 #if fDbg: print('dbg: body len diff: %s vs %s' % (len(self.aoBody), len(oThat.aoBody),));
2313 return False;
2314 for iStmt, oStmt in enumerate(self.aoBody):
2315 oThatStmt = oThat.aoBody[iStmt] # type: iai.McStmt
2316 assert isinstance(oStmt, iai.McCppGeneric);
2317 assert not isinstance(oStmt, iai.McStmtCond);
2318 if isinstance(oStmt, iai.McStmtCond):
2319 return False;
2320 if oStmt.sName != oThatStmt.sName:
2321 #if fDbg: print('dbg: stmt #%s name: %s vs %s' % (iStmt, oStmt.sName, oThatStmt.sName,));
2322 return False;
2323 if len(oStmt.asParams) != len(oThatStmt.asParams):
2324 #if fDbg: print('dbg: stmt #%s param count: %s vs %s'
2325 # % (iStmt, len(oStmt.asParams), len(oThatStmt.asParams),));
2326 return False;
2327 for iParam, sParam in enumerate(oStmt.asParams):
2328 if ( sParam != oThatStmt.asParams[iParam]
2329 and ( iParam != 1
2330 or not isinstance(oStmt, iai.McCppCall)
2331 or not oStmt.asParams[0].startswith('IEM_MC2_EMIT_CALL_')
2332 or sParam != self.oVar.getIndexName()
2333 or oThatStmt.asParams[iParam] != oThat.oVar.getIndexName() )):
2334 #if fDbg: print('dbg: stmt #%s, param #%s: %s vs %s'
2335 # % (iStmt, iParam, sParam, oThatStmt.asParams[iParam],));
2336 return False;
2337 return True;
2338
2339 #
2340 # Determine what we're switch on.
2341 # This ASSUMES that (IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | IEM_F_MODE_CPUMODE_MASK) == 7!
2342 #
2343 fSimple = True;
2344 sSwitchValue = '(pVCpu->iem.s.fExec & (IEM_F_MODE_CPUMODE_MASK | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK))';
2345 if dByVari.keys() & self.kdVariationsWithNeedForPrefixCheck.keys():
2346 sSwitchValue += ' | (pVCpu->iem.s.enmEffAddrMode == (pVCpu->iem.s.fExec & IEM_F_MODE_CPUMODE_MASK) ? 0 : 8)';
2347 # Accesses via FS and GS and CS goes thru non-FLAT functions. (CS
2348 # is not writable in 32-bit mode (at least), thus the penalty mode
2349 # for any accesses via it (simpler this way).)
2350 sSwitchValue += ' | (pVCpu->iem.s.iEffSeg < X86_SREG_FS && pVCpu->iem.s.iEffSeg != X86_SREG_CS ? 0 : 16)';
2351 fSimple = False; # threaded functions.
2352 if dByVari.keys() & ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing:
2353 sSwitchValue += ' | ((pVCpu->iem.s.fTbPrevInstr & (IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_INHIBIT_SHADOW)) || ' \
2354 + '(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_MASK) ? 32 : 0)';
2355
2356 #
2357 # Generate the case statements.
2358 #
2359 # pylintx: disable=x
2360 aoCases = [];
2361 if ThreadedFunctionVariation.ksVariation_64_Addr32 in dByVari:
2362 assert not fSimple and not sBranch;
2363 aoCases.extend([
2364 Case('IEMMODE_64BIT', ThrdFnVar.ksVariation_64),
2365 Case('IEMMODE_64BIT | 16', ThrdFnVar.ksVariation_64_FsGs),
2366 Case('IEMMODE_64BIT | 8 | 16', None), # fall thru
2367 Case('IEMMODE_64BIT | 8', ThrdFnVar.ksVariation_64_Addr32),
2368 ]);
2369 if ThreadedFunctionVariation.ksVariation_64f_Addr32 in dByVari:
2370 aoCases.extend([
2371 Case('IEMMODE_64BIT | 32', ThrdFnVar.ksVariation_64f),
2372 Case('IEMMODE_64BIT | 32 | 16', ThrdFnVar.ksVariation_64f_FsGs),
2373 Case('IEMMODE_64BIT | 32 | 8 | 16', None), # fall thru
2374 Case('IEMMODE_64BIT | 32 | 8', ThrdFnVar.ksVariation_64f_Addr32),
2375 ]);
2376 elif ThrdFnVar.ksVariation_64 in dByVari:
2377 assert fSimple and not sBranch;
2378 aoCases.append(Case('IEMMODE_64BIT', ThrdFnVar.ksVariation_64));
2379 if ThreadedFunctionVariation.ksVariation_64f in dByVari:
2380 aoCases.append(Case('IEMMODE_64BIT | 32', ThrdFnVar.ksVariation_64f));
2381 elif ThrdFnVar.ksVariation_64_Jmp in dByVari:
2382 assert fSimple and sBranch;
2383 aoCases.append(Case('IEMMODE_64BIT',
2384 ThrdFnVar.ksVariation_64_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_64_NoJmp));
2385 if ThreadedFunctionVariation.ksVariation_64f_Jmp in dByVari:
2386 aoCases.append(Case('IEMMODE_64BIT | 32',
2387 ThrdFnVar.ksVariation_64f_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_64f_NoJmp));
2388
2389 if ThrdFnVar.ksVariation_32_Addr16 in dByVari:
2390 assert not fSimple and not sBranch;
2391 aoCases.extend([
2392 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', ThrdFnVar.ksVariation_32_Flat),
2393 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 16', None), # fall thru
2394 Case('IEMMODE_32BIT | 16', None), # fall thru
2395 Case('IEMMODE_32BIT', ThrdFnVar.ksVariation_32),
2396 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 8', None), # fall thru
2397 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 8 | 16',None), # fall thru
2398 Case('IEMMODE_32BIT | 8 | 16',None), # fall thru
2399 Case('IEMMODE_32BIT | 8', ThrdFnVar.ksVariation_32_Addr16),
2400 ]);
2401 if ThrdFnVar.ksVariation_32f_Addr16 in dByVari:
2402 aoCases.extend([
2403 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', ThrdFnVar.ksVariation_32f_Flat),
2404 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 16', None), # fall thru
2405 Case('IEMMODE_32BIT | 32 | 16', None), # fall thru
2406 Case('IEMMODE_32BIT | 32', ThrdFnVar.ksVariation_32f),
2407 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 8', None), # fall thru
2408 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 8 | 16',None), # fall thru
2409 Case('IEMMODE_32BIT | 32 | 8 | 16',None), # fall thru
2410 Case('IEMMODE_32BIT | 32 | 8', ThrdFnVar.ksVariation_32f_Addr16),
2411 ]);
2412 elif ThrdFnVar.ksVariation_32 in dByVari:
2413 assert fSimple and not sBranch;
2414 aoCases.extend([
2415 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', None), # fall thru
2416 Case('IEMMODE_32BIT', ThrdFnVar.ksVariation_32),
2417 ]);
2418 if ThrdFnVar.ksVariation_32f in dByVari:
2419 aoCases.extend([
2420 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', None), # fall thru
2421 Case('IEMMODE_32BIT | 32', ThrdFnVar.ksVariation_32f),
2422 ]);
2423 elif ThrdFnVar.ksVariation_32_Jmp in dByVari:
2424 assert fSimple and sBranch;
2425 aoCases.extend([
2426 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', None), # fall thru
2427 Case('IEMMODE_32BIT',
2428 ThrdFnVar.ksVariation_32_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_32_NoJmp),
2429 ]);
2430 if ThrdFnVar.ksVariation_32f_Jmp in dByVari:
2431 aoCases.extend([
2432 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', None), # fall thru
2433 Case('IEMMODE_32BIT | 32',
2434 ThrdFnVar.ksVariation_32f_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_32f_NoJmp),
2435 ]);
2436
2437 if ThrdFnVar.ksVariation_16_Addr32 in dByVari:
2438 assert not fSimple and not sBranch;
2439 aoCases.extend([
2440 Case('IEMMODE_16BIT | 16', None), # fall thru
2441 Case('IEMMODE_16BIT', ThrdFnVar.ksVariation_16),
2442 Case('IEMMODE_16BIT | 8 | 16', None), # fall thru
2443 Case('IEMMODE_16BIT | 8', ThrdFnVar.ksVariation_16_Addr32),
2444 ]);
2445 if ThrdFnVar.ksVariation_16f_Addr32 in dByVari:
2446 aoCases.extend([
2447 Case('IEMMODE_16BIT | 32 | 16', None), # fall thru
2448 Case('IEMMODE_16BIT | 32', ThrdFnVar.ksVariation_16f),
2449 Case('IEMMODE_16BIT | 32 | 8 | 16', None), # fall thru
2450 Case('IEMMODE_16BIT | 32 | 8', ThrdFnVar.ksVariation_16f_Addr32),
2451 ]);
2452 elif ThrdFnVar.ksVariation_16 in dByVari:
2453 assert fSimple and not sBranch;
2454 aoCases.append(Case('IEMMODE_16BIT', ThrdFnVar.ksVariation_16));
2455 if ThrdFnVar.ksVariation_16f in dByVari:
2456 aoCases.append(Case('IEMMODE_16BIT | 32', ThrdFnVar.ksVariation_16f));
2457 elif ThrdFnVar.ksVariation_16_Jmp in dByVari:
2458 assert fSimple and sBranch;
2459 aoCases.append(Case('IEMMODE_16BIT',
2460 ThrdFnVar.ksVariation_16_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_16_NoJmp));
2461 if ThrdFnVar.ksVariation_16f_Jmp in dByVari:
2462 aoCases.append(Case('IEMMODE_16BIT | 32',
2463 ThrdFnVar.ksVariation_16f_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_16f_NoJmp));
2464
2465
2466 if ThrdFnVar.ksVariation_16_Pre386 in dByVari:
2467 if not fSimple:
2468 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 16', None)); # fall thru
2469 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', ThrdFnVar.ksVariation_16_Pre386));
2470 if ThrdFnVar.ksVariation_16f_Pre386 in dByVari: # should be nested under previous if, but line too long.
2471 if not fSimple:
2472 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 16', None)); # fall thru
2473 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', ThrdFnVar.ksVariation_16f_Pre386));
2474
2475 if ThrdFnVar.ksVariation_16_Pre386_Jmp in dByVari:
2476 assert fSimple and sBranch;
2477 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK',
2478 ThrdFnVar.ksVariation_16_Pre386_Jmp if sBranch == 'Jmp'
2479 else ThrdFnVar.ksVariation_16_Pre386_NoJmp));
2480 if ThrdFnVar.ksVariation_16f_Pre386_Jmp in dByVari:
2481 assert fSimple and sBranch;
2482 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32',
2483 ThrdFnVar.ksVariation_16f_Pre386_Jmp if sBranch == 'Jmp'
2484 else ThrdFnVar.ksVariation_16f_Pre386_NoJmp));
2485
2486 #
2487 # If the case bodies are all the same, except for the function called,
2488 # we can reduce the code size and hopefully compile time.
2489 #
2490 iFirstCaseWithBody = 0;
2491 while not aoCases[iFirstCaseWithBody].aoBody:
2492 iFirstCaseWithBody += 1
2493 fAllSameCases = True
2494 for iCase in range(iFirstCaseWithBody + 1, len(aoCases)):
2495 fAllSameCases = fAllSameCases and aoCases[iCase].isSame(aoCases[iFirstCaseWithBody]);
2496 #if fDbg: print('fAllSameCases=%s %s' % (fAllSameCases, self.oMcBlock.sFunction,));
2497 if fAllSameCases:
2498 aoStmts = [
2499 iai.McCppGeneric('IEMTHREADEDFUNCS enmFunction;'),
2500 iai.McCppGeneric('switch (%s)' % (sSwitchValue,)),
2501 iai.McCppGeneric('{'),
2502 ];
2503 for oCase in aoCases:
2504 aoStmts.extend(oCase.toFunctionAssignment());
2505 aoStmts.extend([
2506 iai.McCppGeneric('IEM_NOT_REACHED_DEFAULT_CASE_RET();', cchIndent = 4),
2507 iai.McCppGeneric('}'),
2508 ]);
2509 aoStmts.extend(dByVari[aoCases[iFirstCaseWithBody].sVarNm].emitThreadedCallStmtsForVariant(0, fTbLookupTable,
2510 'enmFunction'));
2511
2512 else:
2513 #
2514 # Generate the generic switch statement.
2515 #
2516 aoStmts = [
2517 iai.McCppGeneric('switch (%s)' % (sSwitchValue,)),
2518 iai.McCppGeneric('{'),
2519 ];
2520 for oCase in aoCases:
2521 aoStmts.extend(oCase.toCode());
2522 aoStmts.extend([
2523 iai.McCppGeneric('IEM_NOT_REACHED_DEFAULT_CASE_RET();', cchIndent = 4),
2524 iai.McCppGeneric('}'),
2525 ]);
2526
2527 return aoStmts;
2528
2529 def morphInputCode(self, aoStmts, fIsConditional = False, fCallEmitted = False, cDepth = 0, sBranchAnnotation = None):
2530 """
2531 Adjusts (& copies) the statements for the input/decoder so it will emit
2532 calls to the right threaded functions for each block.
2533
2534 Returns list/tree of statements (aoStmts is not modified) and updated
2535 fCallEmitted status.
2536 """
2537 #print('McBlock at %s:%s' % (os.path.split(self.oMcBlock.sSrcFile)[1], self.oMcBlock.iBeginLine,));
2538 aoDecoderStmts = [];
2539
2540 for iStmt, oStmt in enumerate(aoStmts):
2541 # Copy the statement. Make a deep copy to make sure we've got our own
2542 # copies of all instance variables, even if a bit overkill at the moment.
2543 oNewStmt = copy.deepcopy(oStmt);
2544 aoDecoderStmts.append(oNewStmt);
2545 #print('oNewStmt %s %s' % (oNewStmt.sName, len(oNewStmt.asParams),));
2546 if oNewStmt.sName == 'IEM_MC_BEGIN' and self.dsCImplFlags:
2547 oNewStmt.asParams[1] = ' | '.join(sorted(self.dsCImplFlags.keys()));
2548
2549 # If we haven't emitted the threaded function call yet, look for
2550 # statements which it would naturally follow or preceed.
2551 if not fCallEmitted:
2552 if not oStmt.isCppStmt():
2553 if ( oStmt.sName.startswith('IEM_MC_MAYBE_RAISE_') \
2554 or (oStmt.sName.endswith('_AND_FINISH') and oStmt.sName.startswith('IEM_MC_'))
2555 or oStmt.sName.startswith('IEM_MC_CALL_CIMPL_')
2556 or oStmt.sName.startswith('IEM_MC_DEFER_TO_CIMPL_')
2557 or oStmt.sName in ('IEM_MC_RAISE_DIVIDE_ERROR',)):
2558 aoDecoderStmts.pop();
2559 if not fIsConditional:
2560 aoDecoderStmts.extend(self.emitThreadedCallStmts());
2561 elif oStmt.sName == 'IEM_MC_ADVANCE_RIP_AND_FINISH':
2562 aoDecoderStmts.extend(self.emitThreadedCallStmts('NoJmp', True));
2563 else:
2564 assert oStmt.sName in { 'IEM_MC_REL_JMP_S8_AND_FINISH': True,
2565 'IEM_MC_REL_JMP_S16_AND_FINISH': True,
2566 'IEM_MC_REL_JMP_S32_AND_FINISH': True, };
2567 aoDecoderStmts.extend(self.emitThreadedCallStmts('Jmp', True));
2568 aoDecoderStmts.append(oNewStmt);
2569 fCallEmitted = True;
2570
2571 elif iai.g_dMcStmtParsers[oStmt.sName][2]:
2572 # This is for Jmp/NoJmp with loopne and friends which modifies state other than RIP.
2573 if not sBranchAnnotation:
2574 self.raiseProblem('Modifying state before emitting calls! %s' % (oStmt.sName,));
2575 assert fIsConditional;
2576 aoDecoderStmts.pop();
2577 if sBranchAnnotation == g_ksFinishAnnotation_Advance:
2578 assert iai.McStmt.findStmtByNames(aoStmts[iStmt:], {'IEM_MC_ADVANCE_RIP_AND_FINISH':1,})
2579 aoDecoderStmts.extend(self.emitThreadedCallStmts('NoJmp', True));
2580 elif sBranchAnnotation == g_ksFinishAnnotation_RelJmp:
2581 assert iai.McStmt.findStmtByNames(aoStmts[iStmt:],
2582 { 'IEM_MC_REL_JMP_S8_AND_FINISH': 1,
2583 'IEM_MC_REL_JMP_S16_AND_FINISH': 1,
2584 'IEM_MC_REL_JMP_S32_AND_FINISH': 1, });
2585 aoDecoderStmts.extend(self.emitThreadedCallStmts('Jmp', True));
2586 else:
2587 self.raiseProblem('Modifying state before emitting calls! %s' % (oStmt.sName,));
2588 aoDecoderStmts.append(oNewStmt);
2589 fCallEmitted = True;
2590
2591 elif ( not fIsConditional
2592 and oStmt.fDecode
2593 and ( oStmt.asParams[0].find('IEMOP_HLP_DONE_') >= 0
2594 or oStmt.asParams[0].find('IEMOP_HLP_DECODED_') >= 0)):
2595 aoDecoderStmts.extend(self.emitThreadedCallStmts());
2596 fCallEmitted = True;
2597
2598 # Process branches of conditionals recursively.
2599 if isinstance(oStmt, iai.McStmtCond):
2600 (oNewStmt.aoIfBranch, fCallEmitted1) = self.morphInputCode(oStmt.aoIfBranch, fIsConditional,
2601 fCallEmitted, cDepth + 1, oStmt.oIfBranchAnnotation);
2602 if oStmt.aoElseBranch:
2603 (oNewStmt.aoElseBranch, fCallEmitted2) = self.morphInputCode(oStmt.aoElseBranch, fIsConditional,
2604 fCallEmitted, cDepth + 1,
2605 oStmt.oElseBranchAnnotation);
2606 else:
2607 fCallEmitted2 = False;
2608 fCallEmitted = fCallEmitted or (fCallEmitted1 and fCallEmitted2);
2609
2610 if not fCallEmitted and cDepth == 0:
2611 self.raiseProblem('Unable to insert call to threaded function.');
2612
2613 return (aoDecoderStmts, fCallEmitted);
2614
2615
2616 def generateInputCode(self):
2617 """
2618 Modifies the input code.
2619 """
2620 cchIndent = (self.oMcBlock.cchIndent + 3) // 4 * 4;
2621
2622 if len(self.oMcBlock.aoStmts) == 1:
2623 # IEM_MC_DEFER_TO_CIMPL_X_RET - need to wrap in {} to make it safe to insert into random code.
2624 sCode = ' ' * cchIndent + 'pVCpu->iem.s.fTbCurInstr = ';
2625 if self.dsCImplFlags:
2626 sCode += ' | '.join(sorted(self.dsCImplFlags.keys())) + ';\n';
2627 else:
2628 sCode += '0;\n';
2629 sCode += iai.McStmt.renderCodeForList(self.morphInputCode(self.oMcBlock.aoStmts)[0],
2630 cchIndent = cchIndent).replace('\n', ' /* gen */\n', 1);
2631 sIndent = ' ' * (min(cchIndent, 2) - 2);
2632 sCode = sIndent + '{\n' + sCode + sIndent + '}\n';
2633 return sCode;
2634
2635 # IEM_MC_BEGIN/END block
2636 assert len(self.oMcBlock.asLines) > 2, "asLines=%s" % (self.oMcBlock.asLines,);
2637 fIsConditional = ( 'IEM_CIMPL_F_BRANCH_CONDITIONAL' in self.dsCImplFlags
2638 and 'IEM_CIMPL_F_BRANCH_RELATIVE' in self.dsCImplFlags); # (latter to avoid iemOp_into)
2639 return iai.McStmt.renderCodeForList(self.morphInputCode(self.oMcBlock.aoStmts, fIsConditional)[0],
2640 cchIndent = cchIndent).replace('\n', ' /* gen */\n', 1);
2641
2642# Short alias for ThreadedFunctionVariation.
2643ThrdFnVar = ThreadedFunctionVariation;
2644
2645
2646class IEMThreadedGenerator(object):
2647 """
2648 The threaded code generator & annotator.
2649 """
2650
2651 def __init__(self):
2652 self.aoThreadedFuncs = [] # type: List[ThreadedFunction]
2653 self.oOptions = None # type: argparse.Namespace
2654 self.aoParsers = [] # type: List[IEMAllInstPython.SimpleParser]
2655 self.aidxFirstFunctions = [] # type: List[int] ##< Runs parallel to aoParser giving the index of the first function.
2656 self.cErrors = 0;
2657
2658 #
2659 # Error reporting.
2660 #
2661
2662 def rawError(self, sCompleteMessage):
2663 """ Output a raw error and increment the error counter. """
2664 print(sCompleteMessage, file = sys.stderr);
2665 self.cErrors += 1;
2666 return False;
2667
2668 #
2669 # Processing.
2670 #
2671
2672 def processInputFiles(self, sHostArch, fNativeRecompilerEnabled):
2673 """
2674 Process the input files.
2675 """
2676
2677 # Parse the files.
2678 self.aoParsers = iai.parseFiles(self.oOptions.asInFiles, sHostArch);
2679
2680 # Create threaded functions for the MC blocks.
2681 self.aoThreadedFuncs = [ThreadedFunction(oMcBlock) for oMcBlock in iai.g_aoMcBlocks];
2682
2683 # Analyze the threaded functions.
2684 dRawParamCounts = {};
2685 dMinParamCounts = {};
2686 for oThreadedFunction in self.aoThreadedFuncs:
2687 oThreadedFunction.analyzeThreadedFunction(self);
2688 for oVariation in oThreadedFunction.aoVariations:
2689 dRawParamCounts[len(oVariation.dParamRefs)] = dRawParamCounts.get(len(oVariation.dParamRefs), 0) + 1;
2690 dMinParamCounts[oVariation.cMinParams] = dMinParamCounts.get(oVariation.cMinParams, 0) + 1;
2691 print('debug: param count distribution, raw and optimized:', file = sys.stderr);
2692 for cCount in sorted({cBits: True for cBits in list(dRawParamCounts.keys()) + list(dMinParamCounts.keys())}.keys()):
2693 print('debug: %s params: %4s raw, %4s min'
2694 % (cCount, dRawParamCounts.get(cCount, 0), dMinParamCounts.get(cCount, 0)),
2695 file = sys.stderr);
2696
2697 # Do another pass over the threaded functions to settle the name suffix.
2698 iThreadedFn = 0;
2699 while iThreadedFn < len(self.aoThreadedFuncs):
2700 oFunction = self.aoThreadedFuncs[iThreadedFn].oMcBlock.oFunction;
2701 assert oFunction;
2702 iThreadedFnNext = iThreadedFn + 1;
2703 dSubNames = { self.aoThreadedFuncs[iThreadedFn].sSubName: 1 };
2704 while ( iThreadedFnNext < len(self.aoThreadedFuncs)
2705 and self.aoThreadedFuncs[iThreadedFnNext].oMcBlock.oFunction == oFunction):
2706 dSubNames[self.aoThreadedFuncs[iThreadedFnNext].sSubName] = 1;
2707 iThreadedFnNext += 1;
2708 if iThreadedFnNext - iThreadedFn > len(dSubNames):
2709 iSubName = 0;
2710 while iThreadedFn + iSubName < iThreadedFnNext:
2711 self.aoThreadedFuncs[iThreadedFn + iSubName].sSubName += '_%s' % (iSubName,);
2712 iSubName += 1;
2713 iThreadedFn = iThreadedFnNext;
2714
2715 # Populate aidxFirstFunctions. This is ASSUMING that
2716 # g_aoMcBlocks/self.aoThreadedFuncs are in self.aoParsers order.
2717 iThreadedFunction = 0;
2718 oThreadedFunction = self.getThreadedFunctionByIndex(0);
2719 self.aidxFirstFunctions = [];
2720 for oParser in self.aoParsers:
2721 self.aidxFirstFunctions.append(iThreadedFunction);
2722
2723 while oThreadedFunction.oMcBlock.sSrcFile == oParser.sSrcFile:
2724 iThreadedFunction += 1;
2725 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
2726
2727 # Analyze the threaded functions and their variations for native recompilation.
2728 if fNativeRecompilerEnabled:
2729 ian.analyzeThreadedFunctionsForNativeRecomp(self.aoThreadedFuncs, sHostArch);
2730
2731 # Gather arguments + variable statistics for the MC blocks.
2732 cMaxArgs = 0;
2733 cMaxVars = 0;
2734 cMaxVarsAndArgs = 0;
2735 cbMaxArgs = 0;
2736 cbMaxVars = 0;
2737 cbMaxVarsAndArgs = 0;
2738 for oThreadedFunction in self.aoThreadedFuncs:
2739 if oThreadedFunction.oMcBlock.aoLocals or oThreadedFunction.oMcBlock.aoArgs:
2740 # Counts.
2741 cMaxVars = max(cMaxVars, len(oThreadedFunction.oMcBlock.aoLocals));
2742 cMaxArgs = max(cMaxArgs, len(oThreadedFunction.oMcBlock.aoArgs));
2743 cMaxVarsAndArgs = max(cMaxVarsAndArgs,
2744 len(oThreadedFunction.oMcBlock.aoLocals) + len(oThreadedFunction.oMcBlock.aoArgs));
2745 if cMaxVarsAndArgs > 9:
2746 raise Exception('%s potentially uses too many variables / args: %u, max 10 - %u vars and %u args'
2747 % (oThreadedFunction.oMcBlock.oFunction.sName, cMaxVarsAndArgs,
2748 len(oThreadedFunction.oMcBlock.aoLocals), len(oThreadedFunction.oMcBlock.aoArgs),));
2749 # Calc stack allocation size:
2750 cbArgs = 0;
2751 for oArg in oThreadedFunction.oMcBlock.aoArgs:
2752 cbArgs += (getTypeBitCount(oArg.sType) + 63) // 64 * 8;
2753 cbVars = 0;
2754 for oVar in oThreadedFunction.oMcBlock.aoLocals:
2755 cbVars += (getTypeBitCount(oVar.sType) + 63) // 64 * 8;
2756 cbMaxVars = max(cbMaxVars, cbVars);
2757 cbMaxArgs = max(cbMaxArgs, cbArgs);
2758 cbMaxVarsAndArgs = max(cbMaxVarsAndArgs, cbVars + cbArgs);
2759 if cbMaxVarsAndArgs >= 0xc0:
2760 raise Exception('%s potentially uses too much stack: cbMaxVars=%#x cbMaxArgs=%#x'
2761 % (oThreadedFunction.oMcBlock.oFunction.sName, cbMaxVars, cbMaxArgs,));
2762
2763 print('debug: max vars+args: %u bytes / %u; max vars: %u bytes / %u; max args: %u bytes / %u'
2764 % (cbMaxVarsAndArgs, cMaxVarsAndArgs, cbMaxVars, cMaxVars, cbMaxArgs, cMaxArgs,), file = sys.stderr);
2765
2766 if self.cErrors > 0:
2767 print('fatal error: %u error%s during processing. Details above.'
2768 % (self.cErrors, 's' if self.cErrors > 1 else '',), file = sys.stderr);
2769 return False;
2770 return True;
2771
2772 #
2773 # Output
2774 #
2775
2776 def generateLicenseHeader(self):
2777 """
2778 Returns the lines for a license header.
2779 """
2780 return [
2781 '/*',
2782 ' * Autogenerated by $Id: IEMAllThrdPython.py 105277 2024-07-11 17:13:59Z vboxsync $ ',
2783 ' * Do not edit!',
2784 ' */',
2785 '',
2786 '/*',
2787 ' * Copyright (C) 2023-' + str(datetime.date.today().year) + ' Oracle and/or its affiliates.',
2788 ' *',
2789 ' * This file is part of VirtualBox base platform packages, as',
2790 ' * available from https://www.virtualbox.org.',
2791 ' *',
2792 ' * This program is free software; you can redistribute it and/or',
2793 ' * modify it under the terms of the GNU General Public License',
2794 ' * as published by the Free Software Foundation, in version 3 of the',
2795 ' * License.',
2796 ' *',
2797 ' * This program is distributed in the hope that it will be useful, but',
2798 ' * WITHOUT ANY WARRANTY; without even the implied warranty of',
2799 ' * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU',
2800 ' * General Public License for more details.',
2801 ' *',
2802 ' * You should have received a copy of the GNU General Public License',
2803 ' * along with this program; if not, see <https://www.gnu.org/licenses>.',
2804 ' *',
2805 ' * The contents of this file may alternatively be used under the terms',
2806 ' * of the Common Development and Distribution License Version 1.0',
2807 ' * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included',
2808 ' * in the VirtualBox distribution, in which case the provisions of the',
2809 ' * CDDL are applicable instead of those of the GPL.',
2810 ' *',
2811 ' * You may elect to license modified versions of this file under the',
2812 ' * terms and conditions of either the GPL or the CDDL or both.',
2813 ' *',
2814 ' * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0',
2815 ' */',
2816 '',
2817 '',
2818 '',
2819 ];
2820
2821 ## List of built-in threaded functions with user argument counts and
2822 ## whether it has a native recompiler implementation.
2823 katBltIns = (
2824 ( 'Nop', 0, True ),
2825 ( 'LogCpuState', 0, True ),
2826
2827 ( 'DeferToCImpl0', 2, True ),
2828 ( 'CheckIrq', 0, True ),
2829 ( 'CheckMode', 1, True ),
2830 ( 'CheckHwInstrBps', 0, False ),
2831 ( 'CheckCsLim', 1, True ),
2832
2833 ( 'CheckCsLimAndOpcodes', 3, True ),
2834 ( 'CheckOpcodes', 3, True ),
2835 ( 'CheckOpcodesConsiderCsLim', 3, True ),
2836
2837 ( 'CheckCsLimAndPcAndOpcodes', 3, True ),
2838 ( 'CheckPcAndOpcodes', 3, True ),
2839 ( 'CheckPcAndOpcodesConsiderCsLim', 3, True ),
2840
2841 ( 'CheckCsLimAndOpcodesAcrossPageLoadingTlb', 3, True ),
2842 ( 'CheckOpcodesAcrossPageLoadingTlb', 3, True ),
2843 ( 'CheckOpcodesAcrossPageLoadingTlbConsiderCsLim', 2, True ),
2844
2845 ( 'CheckCsLimAndOpcodesLoadingTlb', 3, True ),
2846 ( 'CheckOpcodesLoadingTlb', 3, True ),
2847 ( 'CheckOpcodesLoadingTlbConsiderCsLim', 3, True ),
2848
2849 ( 'CheckCsLimAndOpcodesOnNextPageLoadingTlb', 2, True ),
2850 ( 'CheckOpcodesOnNextPageLoadingTlb', 2, True ),
2851 ( 'CheckOpcodesOnNextPageLoadingTlbConsiderCsLim', 2, True ),
2852
2853 ( 'CheckCsLimAndOpcodesOnNewPageLoadingTlb', 2, True ),
2854 ( 'CheckOpcodesOnNewPageLoadingTlb', 2, True ),
2855 ( 'CheckOpcodesOnNewPageLoadingTlbConsiderCsLim', 2, True ),
2856 );
2857
2858 def generateThreadedFunctionsHeader(self, oOut, _):
2859 """
2860 Generates the threaded functions header file.
2861 Returns success indicator.
2862 """
2863
2864 asLines = self.generateLicenseHeader();
2865
2866 # Generate the threaded function table indexes.
2867 asLines += [
2868 'typedef enum IEMTHREADEDFUNCS',
2869 '{',
2870 ' kIemThreadedFunc_Invalid = 0,',
2871 '',
2872 ' /*',
2873 ' * Predefined',
2874 ' */',
2875 ];
2876 asLines += [' kIemThreadedFunc_BltIn_%s,' % (sFuncNm,) for sFuncNm, _, _ in self.katBltIns];
2877
2878 iThreadedFunction = 1 + len(self.katBltIns);
2879 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2880 asLines += [
2881 '',
2882 ' /*',
2883 ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation] + '',
2884 ' */',
2885 ];
2886 for oThreadedFunction in self.aoThreadedFuncs:
2887 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2888 if oVariation:
2889 iThreadedFunction += 1;
2890 oVariation.iEnumValue = iThreadedFunction;
2891 asLines.append(' ' + oVariation.getIndexName() + ',');
2892 asLines += [
2893 ' kIemThreadedFunc_End',
2894 '} IEMTHREADEDFUNCS;',
2895 '',
2896 ];
2897
2898 # Prototype the function table.
2899 asLines += [
2900 'extern const PFNIEMTHREADEDFUNC g_apfnIemThreadedFunctions[kIemThreadedFunc_End];',
2901 'extern uint8_t const g_acIemThreadedFunctionUsedArgs[kIemThreadedFunc_End];',
2902 '#if defined(IN_RING3) || defined(LOG_ENABLED)',
2903 'extern const char * const g_apszIemThreadedFunctions[kIemThreadedFunc_End];',
2904 '#endif',
2905 '#if defined(IN_RING3)',
2906 'extern const char * const g_apszIemThreadedFunctionStats[kIemThreadedFunc_End];',
2907 '#endif',
2908 ];
2909
2910 oOut.write('\n'.join(asLines));
2911 return True;
2912
2913 ksBitsToIntMask = {
2914 1: "UINT64_C(0x1)",
2915 2: "UINT64_C(0x3)",
2916 4: "UINT64_C(0xf)",
2917 8: "UINT64_C(0xff)",
2918 16: "UINT64_C(0xffff)",
2919 32: "UINT64_C(0xffffffff)",
2920 };
2921
2922 def generateFunctionParameterUnpacking(self, oVariation, oOut, asParams, uNoRefLevel = 0):
2923 """
2924 Outputs code for unpacking parameters.
2925 This is shared by the threaded and native code generators.
2926 """
2927 aasVars = [];
2928 for aoRefs in oVariation.dParamRefs.values():
2929 oRef = aoRefs[0];
2930 if oRef.sType[0] != 'P':
2931 cBits = g_kdTypeInfo[oRef.sType][0];
2932 sType = g_kdTypeInfo[oRef.sType][2];
2933 else:
2934 cBits = 64;
2935 sType = oRef.sType;
2936
2937 sTypeDecl = sType + ' const';
2938
2939 if cBits == 64:
2940 assert oRef.offNewParam == 0;
2941 if sType == 'uint64_t':
2942 sUnpack = '%s;' % (asParams[oRef.iNewParam],);
2943 else:
2944 sUnpack = '(%s)%s;' % (sType, asParams[oRef.iNewParam],);
2945 elif oRef.offNewParam == 0:
2946 sUnpack = '(%s)(%s & %s);' % (sType, asParams[oRef.iNewParam], self.ksBitsToIntMask[cBits]);
2947 else:
2948 sUnpack = '(%s)((%s >> %s) & %s);' \
2949 % (sType, asParams[oRef.iNewParam], oRef.offNewParam, self.ksBitsToIntMask[cBits]);
2950
2951 sComment = '/* %s - %s ref%s */' % (oRef.sOrgRef, len(aoRefs), 's' if len(aoRefs) != 1 else '',);
2952
2953 aasVars.append([ '%s:%02u' % (oRef.iNewParam, oRef.offNewParam),
2954 sTypeDecl, oRef.sNewName, sUnpack, sComment ]);
2955 acchVars = [0, 0, 0, 0, 0];
2956 for asVar in aasVars:
2957 for iCol, sStr in enumerate(asVar):
2958 acchVars[iCol] = max(acchVars[iCol], len(sStr));
2959 sFmt = ' %%-%ss %%-%ss = %%-%ss %%s\n' % (acchVars[1], acchVars[2], acchVars[3]);
2960 for asVar in sorted(aasVars):
2961 oOut.write(sFmt % (asVar[1], asVar[2], asVar[3], asVar[4],));
2962
2963 if uNoRefLevel > 0 and aasVars:
2964 if uNoRefLevel > 1:
2965 # level 2: Everything. This is used by liveness.
2966 oOut.write(' ');
2967 for asVar in sorted(aasVars):
2968 oOut.write(' RT_NOREF_PV(%s);' % (asVar[2],));
2969 oOut.write('\n');
2970 else:
2971 # level 1: Only pfnXxxx variables. This is used by native.
2972 for asVar in sorted(aasVars):
2973 if asVar[2].startswith('pfn'):
2974 oOut.write(' RT_NOREF_PV(%s);\n' % (asVar[2],));
2975 return True;
2976
2977 kasThreadedParamNames = ('uParam0', 'uParam1', 'uParam2');
2978 def generateThreadedFunctionsSource(self, oOut, _):
2979 """
2980 Generates the threaded functions source file.
2981 Returns success indicator.
2982 """
2983
2984 asLines = self.generateLicenseHeader();
2985 oOut.write('\n'.join(asLines));
2986
2987 #
2988 # Emit the function definitions.
2989 #
2990 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2991 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
2992 oOut.write( '\n'
2993 + '\n'
2994 + '\n'
2995 + '\n'
2996 + '/*' + '*' * 128 + '\n'
2997 + '* Variation: ' + sVarName + ' ' * (129 - len(sVarName) - 15) + '*\n'
2998 + '*' * 128 + '*/\n');
2999
3000 for oThreadedFunction in self.aoThreadedFuncs:
3001 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
3002 if oVariation:
3003 oMcBlock = oThreadedFunction.oMcBlock;
3004
3005 # Function header
3006 oOut.write( '\n'
3007 + '\n'
3008 + '/**\n'
3009 + ' * #%u: %s at line %s offset %s in %s%s\n'
3010 % (oVariation.iEnumValue, oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine,
3011 os.path.split(oMcBlock.sSrcFile)[1],
3012 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '')
3013 + ' */\n'
3014 + 'static IEM_DECL_IEMTHREADEDFUNC_DEF(' + oVariation.getThreadedFunctionName() + ')\n'
3015 + '{\n');
3016
3017 # Unpack parameters.
3018 self.generateFunctionParameterUnpacking(oVariation, oOut, self.kasThreadedParamNames);
3019
3020 # RT_NOREF for unused parameters.
3021 if oVariation.cMinParams < g_kcThreadedParams:
3022 oOut.write(' RT_NOREF(' + ', '.join(self.kasThreadedParamNames[oVariation.cMinParams:]) + ');\n');
3023
3024 # Now for the actual statements.
3025 oOut.write(iai.McStmt.renderCodeForList(oVariation.aoStmtsForThreadedFunction, cchIndent = 4));
3026
3027 oOut.write('}\n');
3028
3029
3030 #
3031 # Generate the output tables in parallel.
3032 #
3033 asFuncTable = [
3034 '/**',
3035 ' * Function pointer table.',
3036 ' */',
3037 'PFNIEMTHREADEDFUNC const g_apfnIemThreadedFunctions[kIemThreadedFunc_End] =',
3038 '{',
3039 ' /*Invalid*/ NULL,',
3040 ];
3041 asArgCntTab = [
3042 '/**',
3043 ' * Argument count table.',
3044 ' */',
3045 'uint8_t const g_acIemThreadedFunctionUsedArgs[kIemThreadedFunc_End] =',
3046 '{',
3047 ' 0, /*Invalid*/',
3048 ];
3049 asNameTable = [
3050 '/**',
3051 ' * Function name table.',
3052 ' */',
3053 'const char * const g_apszIemThreadedFunctions[kIemThreadedFunc_End] =',
3054 '{',
3055 ' "Invalid",',
3056 ];
3057 asStatTable = [
3058 '/**',
3059 ' * Function statistics name table.',
3060 ' */',
3061 'const char * const g_apszIemThreadedFunctionStats[kIemThreadedFunc_End] =',
3062 '{',
3063 ' NULL,',
3064 ];
3065 aasTables = (asFuncTable, asArgCntTab, asNameTable, asStatTable,);
3066
3067 for asTable in aasTables:
3068 asTable.extend((
3069 '',
3070 ' /*',
3071 ' * Predefined.',
3072 ' */',
3073 ));
3074 for sFuncNm, cArgs, _ in self.katBltIns:
3075 asFuncTable.append(' iemThreadedFunc_BltIn_%s,' % (sFuncNm,));
3076 asArgCntTab.append(' %d, /*BltIn_%s*/' % (cArgs, sFuncNm,));
3077 asNameTable.append(' "BltIn_%s",' % (sFuncNm,));
3078 asStatTable.append(' "BltIn/%s",' % (sFuncNm,));
3079
3080 iThreadedFunction = 1 + len(self.katBltIns);
3081 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
3082 for asTable in aasTables:
3083 asTable.extend((
3084 '',
3085 ' /*',
3086 ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation],
3087 ' */',
3088 ));
3089 for oThreadedFunction in self.aoThreadedFuncs:
3090 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
3091 if oVariation:
3092 iThreadedFunction += 1;
3093 assert oVariation.iEnumValue == iThreadedFunction;
3094 sName = oVariation.getThreadedFunctionName();
3095 asFuncTable.append(' /*%4u*/ %s,' % (iThreadedFunction, sName,));
3096 asNameTable.append(' /*%4u*/ "%s",' % (iThreadedFunction, sName,));
3097 asArgCntTab.append(' /*%4u*/ %d, /*%s*/' % (iThreadedFunction, oVariation.cMinParams, sName,));
3098 asStatTable.append(' "%s",' % (oVariation.getThreadedFunctionStatisticsName(),));
3099
3100 for asTable in aasTables:
3101 asTable.append('};');
3102
3103 #
3104 # Output the tables.
3105 #
3106 oOut.write( '\n'
3107 + '\n');
3108 oOut.write('\n'.join(asFuncTable));
3109 oOut.write( '\n'
3110 + '\n'
3111 + '\n');
3112 oOut.write('\n'.join(asArgCntTab));
3113 oOut.write( '\n'
3114 + '\n'
3115 + '#if defined(IN_RING3) || defined(LOG_ENABLED)\n');
3116 oOut.write('\n'.join(asNameTable));
3117 oOut.write( '\n'
3118 + '#endif /* IN_RING3 || LOG_ENABLED */\n'
3119 + '\n'
3120 + '\n'
3121 + '#if defined(IN_RING3)\n');
3122 oOut.write('\n'.join(asStatTable));
3123 oOut.write( '\n'
3124 + '#endif /* IN_RING3 */\n');
3125
3126 return True;
3127
3128 def generateNativeFunctionsHeader(self, oOut, _):
3129 """
3130 Generates the native recompiler functions header file.
3131 Returns success indicator.
3132 """
3133 if not self.oOptions.fNativeRecompilerEnabled:
3134 return True;
3135
3136 asLines = self.generateLicenseHeader();
3137
3138 # Prototype the function table.
3139 asLines += [
3140 'extern const PFNIEMNATIVERECOMPFUNC g_apfnIemNativeRecompileFunctions[kIemThreadedFunc_End];',
3141 'extern const PFNIEMNATIVELIVENESSFUNC g_apfnIemNativeLivenessFunctions[kIemThreadedFunc_End];',
3142 '',
3143 ];
3144
3145 # Emit indicators as to which of the builtin functions have a native
3146 # recompiler function and which not. (We only really need this for
3147 # kIemThreadedFunc_BltIn_CheckMode, but do all just for simplicity.)
3148 for atBltIn in self.katBltIns:
3149 if atBltIn[1]:
3150 asLines.append('#define IEMNATIVE_WITH_BLTIN_' + atBltIn[0].upper())
3151 else:
3152 asLines.append('#define IEMNATIVE_WITHOUT_BLTIN_' + atBltIn[0].upper())
3153
3154 # Emit prototypes for the builtin functions we use in tables.
3155 asLines += [
3156 '',
3157 '/* Prototypes for built-in functions used in the above tables. */',
3158 ];
3159 for sFuncNm, _, fHaveRecompFunc in self.katBltIns:
3160 if fHaveRecompFunc:
3161 asLines += [
3162 'IEM_DECL_IEMNATIVERECOMPFUNC_PROTO( iemNativeRecompFunc_BltIn_%s);' % (sFuncNm,),
3163 'IEM_DECL_IEMNATIVELIVENESSFUNC_PROTO(iemNativeLivenessFunc_BltIn_%s);' % (sFuncNm,),
3164 ];
3165
3166 # Emit prototypes for table function.
3167 asLines += [
3168 '',
3169 '#ifdef IEMNATIVE_INCL_TABLE_FUNCTION_PROTOTYPES'
3170 ]
3171 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
3172 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
3173 asLines += [
3174 '',
3175 '/* Variation: ' + sVarName + ' */',
3176 ];
3177 for oThreadedFunction in self.aoThreadedFuncs:
3178 oVariation = oThreadedFunction.dVariations.get(sVariation, None) # type: ThreadedFunctionVariation
3179 if oVariation and oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
3180 asLines.append('IEM_DECL_IEMNATIVERECOMPFUNC_PROTO(' + oVariation.getNativeFunctionName() + ');');
3181 asLines += [
3182 '',
3183 '#endif /* IEMNATIVE_INCL_TABLE_FUNCTION_PROTOTYPES */',
3184 ]
3185
3186 oOut.write('\n'.join(asLines));
3187 return True;
3188
3189 def generateNativeFunctionsSource(self, oOut, idxPart):
3190 """
3191 Generates the native recompiler functions source file.
3192 Returns success indicator.
3193 """
3194 cParts = 4;
3195 assert(idxPart in range(cParts));
3196 if not self.oOptions.fNativeRecompilerEnabled:
3197 return True;
3198
3199 #
3200 # The file header.
3201 #
3202 oOut.write('\n'.join(self.generateLicenseHeader()));
3203
3204 #
3205 # Emit the functions.
3206 #
3207 # The files are split up by threaded variation as that's the simplest way to
3208 # do it, even if the distribution isn't entirely even (ksVariation_Default
3209 # only has the defer to cimpl bits and the pre-386 variants will naturally
3210 # have fewer instructions).
3211 #
3212 cVariationsPerFile = len(ThreadedFunctionVariation.kasVariationsEmitOrder) // cParts;
3213 idxFirstVar = idxPart * cVariationsPerFile;
3214 idxEndVar = idxFirstVar + cVariationsPerFile;
3215 if idxPart + 1 >= cParts:
3216 idxEndVar = len(ThreadedFunctionVariation.kasVariationsEmitOrder);
3217 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder[idxFirstVar:idxEndVar]:
3218 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
3219 oOut.write( '\n'
3220 + '\n'
3221 + '\n'
3222 + '\n'
3223 + '/*' + '*' * 128 + '\n'
3224 + '* Variation: ' + sVarName + ' ' * (129 - len(sVarName) - 15) + '*\n'
3225 + '*' * 128 + '*/\n');
3226
3227 for oThreadedFunction in self.aoThreadedFuncs:
3228 oVariation = oThreadedFunction.dVariations.get(sVariation, None) # type: ThreadedFunctionVariation
3229 if oVariation and oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
3230 oMcBlock = oThreadedFunction.oMcBlock;
3231
3232 # Function header
3233 oOut.write( '\n'
3234 + '\n'
3235 + '/**\n'
3236 + ' * #%u: %s at line %s offset %s in %s%s\n'
3237 % (oVariation.iEnumValue, oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine,
3238 os.path.split(oMcBlock.sSrcFile)[1],
3239 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '')
3240 + ' */\n'
3241 + 'IEM_DECL_IEMNATIVERECOMPFUNC_DEF(' + oVariation.getNativeFunctionName() + ')\n'
3242 + '{\n');
3243
3244 # Unpack parameters.
3245 self.generateFunctionParameterUnpacking(oVariation, oOut,
3246 ('pCallEntry->auParams[0]',
3247 'pCallEntry->auParams[1]',
3248 'pCallEntry->auParams[2]',),
3249 uNoRefLevel = 1);
3250
3251 # Now for the actual statements.
3252 oOut.write(oVariation.oNativeRecomp.renderCode(cchIndent = 4));
3253
3254 oOut.write('}\n');
3255
3256 #
3257 # Output the function table if this is the first file.
3258 #
3259 if idxPart == 0:
3260 oOut.write( '\n'
3261 + '\n'
3262 + '/*\n'
3263 + ' * Function table running parallel to g_apfnIemThreadedFunctions and friends.\n'
3264 + ' */\n'
3265 + 'const PFNIEMNATIVERECOMPFUNC g_apfnIemNativeRecompileFunctions[kIemThreadedFunc_End] =\n'
3266 + '{\n'
3267 + ' /*Invalid*/ NULL,'
3268 + '\n'
3269 + ' /*\n'
3270 + ' * Predefined.\n'
3271 + ' */\n'
3272 );
3273 for sFuncNm, _, fHaveRecompFunc in self.katBltIns:
3274 if fHaveRecompFunc:
3275 oOut.write(' iemNativeRecompFunc_BltIn_%s,\n' % (sFuncNm,))
3276 else:
3277 oOut.write(' NULL, /*BltIn_%s*/\n' % (sFuncNm,))
3278
3279 iThreadedFunction = 1 + len(self.katBltIns);
3280 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
3281 oOut.write( ' /*\n'
3282 + ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation] + '\n'
3283 + ' */\n');
3284 for oThreadedFunction in self.aoThreadedFuncs:
3285 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
3286 if oVariation:
3287 iThreadedFunction += 1;
3288 assert oVariation.iEnumValue == iThreadedFunction;
3289 sName = oVariation.getNativeFunctionName();
3290 if oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
3291 oOut.write(' /*%4u*/ %s,\n' % (iThreadedFunction, sName,));
3292 else:
3293 oOut.write(' /*%4u*/ NULL /*%s*/,\n' % (iThreadedFunction, sName,));
3294
3295 oOut.write( '};\n');
3296
3297 oOut.write('\n');
3298 return True;
3299
3300 def generateNativeLivenessSource(self, oOut, _):
3301 """
3302 Generates the native recompiler liveness analysis functions source file.
3303 Returns success indicator.
3304 """
3305 if not self.oOptions.fNativeRecompilerEnabled:
3306 return True;
3307
3308 #
3309 # The file header.
3310 #
3311 oOut.write('\n'.join(self.generateLicenseHeader()));
3312
3313 #
3314 # Emit the functions.
3315 #
3316 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
3317 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
3318 oOut.write( '\n'
3319 + '\n'
3320 + '\n'
3321 + '\n'
3322 + '/*' + '*' * 128 + '\n'
3323 + '* Variation: ' + sVarName + ' ' * (129 - len(sVarName) - 15) + '*\n'
3324 + '*' * 128 + '*/\n');
3325
3326 for oThreadedFunction in self.aoThreadedFuncs:
3327 oVariation = oThreadedFunction.dVariations.get(sVariation, None) # type: ThreadedFunctionVariation
3328 if oVariation and oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
3329 oMcBlock = oThreadedFunction.oMcBlock;
3330
3331 # Function header
3332 oOut.write( '\n'
3333 + '\n'
3334 + '/**\n'
3335 + ' * #%u: %s at line %s offset %s in %s%s\n'
3336 % (oVariation.iEnumValue, oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine,
3337 os.path.split(oMcBlock.sSrcFile)[1],
3338 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '')
3339 + ' */\n'
3340 + 'static IEM_DECL_IEMNATIVELIVENESSFUNC_DEF(' + oVariation.getLivenessFunctionName() + ')\n'
3341 + '{\n');
3342
3343 # Unpack parameters.
3344 self.generateFunctionParameterUnpacking(oVariation, oOut,
3345 ('pCallEntry->auParams[0]',
3346 'pCallEntry->auParams[1]',
3347 'pCallEntry->auParams[2]',),
3348 uNoRefLevel = 2);
3349
3350 # Now for the actual statements.
3351 oOut.write(oVariation.oNativeRecomp.renderCode(cchIndent = 4));
3352
3353 oOut.write('}\n');
3354
3355 #
3356 # Output the function table.
3357 #
3358 oOut.write( '\n'
3359 + '\n'
3360 + '/*\n'
3361 + ' * Liveness analysis function table running parallel to g_apfnIemThreadedFunctions and friends.\n'
3362 + ' */\n'
3363 + 'const PFNIEMNATIVELIVENESSFUNC g_apfnIemNativeLivenessFunctions[kIemThreadedFunc_End] =\n'
3364 + '{\n'
3365 + ' /*Invalid*/ NULL,'
3366 + '\n'
3367 + ' /*\n'
3368 + ' * Predefined.\n'
3369 + ' */\n'
3370 );
3371 for sFuncNm, _, fHaveRecompFunc in self.katBltIns:
3372 if fHaveRecompFunc:
3373 oOut.write(' iemNativeLivenessFunc_BltIn_%s,\n' % (sFuncNm,))
3374 else:
3375 oOut.write(' NULL, /*BltIn_%s*/\n' % (sFuncNm,))
3376
3377 iThreadedFunction = 1 + len(self.katBltIns);
3378 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
3379 oOut.write( ' /*\n'
3380 + ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation] + '\n'
3381 + ' */\n');
3382 for oThreadedFunction in self.aoThreadedFuncs:
3383 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
3384 if oVariation:
3385 iThreadedFunction += 1;
3386 assert oVariation.iEnumValue == iThreadedFunction;
3387 sName = oVariation.getLivenessFunctionName();
3388 if oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
3389 oOut.write(' /*%4u*/ %s,\n' % (iThreadedFunction, sName,));
3390 else:
3391 oOut.write(' /*%4u*/ NULL /*%s*/,\n' % (iThreadedFunction, sName,));
3392
3393 oOut.write( '};\n'
3394 + '\n');
3395 return True;
3396
3397
3398 def getThreadedFunctionByIndex(self, idx):
3399 """
3400 Returns a ThreadedFunction object for the given index. If the index is
3401 out of bounds, a dummy is returned.
3402 """
3403 if idx < len(self.aoThreadedFuncs):
3404 return self.aoThreadedFuncs[idx];
3405 return ThreadedFunction.dummyInstance();
3406
3407 def generateModifiedInput(self, oOut, idxFile):
3408 """
3409 Generates the combined modified input source/header file.
3410 Returns success indicator.
3411 """
3412 #
3413 # File header and assert assumptions.
3414 #
3415 oOut.write('\n'.join(self.generateLicenseHeader()));
3416 oOut.write('AssertCompile((IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | IEM_F_MODE_CPUMODE_MASK) == 7);\n');
3417
3418 #
3419 # Iterate all parsers (input files) and output the ones related to the
3420 # file set given by idxFile.
3421 #
3422 for idxParser, oParser in enumerate(self.aoParsers): # type: int, IEMAllInstPython.SimpleParser
3423 # Is this included in the file set?
3424 sSrcBaseFile = os.path.basename(oParser.sSrcFile).lower();
3425 fInclude = -1;
3426 for aoInfo in iai.g_aaoAllInstrFilesAndDefaultMapAndSet:
3427 if sSrcBaseFile == aoInfo[0].lower():
3428 fInclude = aoInfo[2] in (-1, idxFile);
3429 break;
3430 if fInclude is not True:
3431 assert fInclude is False;
3432 continue;
3433
3434 # Output it.
3435 oOut.write("\n\n/* ****** BEGIN %s ******* */\n" % (oParser.sSrcFile,));
3436
3437 iThreadedFunction = self.aidxFirstFunctions[idxParser];
3438 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
3439 iLine = 0;
3440 while iLine < len(oParser.asLines):
3441 sLine = oParser.asLines[iLine];
3442 iLine += 1; # iBeginLine and iEndLine are 1-based.
3443
3444 # Can we pass it thru?
3445 if ( iLine not in [oThreadedFunction.oMcBlock.iBeginLine, oThreadedFunction.oMcBlock.iEndLine]
3446 or oThreadedFunction.oMcBlock.sSrcFile != oParser.sSrcFile):
3447 oOut.write(sLine);
3448 #
3449 # Single MC block. Just extract it and insert the replacement.
3450 #
3451 elif oThreadedFunction.oMcBlock.iBeginLine != oThreadedFunction.oMcBlock.iEndLine:
3452 assert ( (sLine.count('IEM_MC_') - sLine.count('IEM_MC_F_') == 1)
3453 or oThreadedFunction.oMcBlock.iMacroExp == iai.McBlock.kiMacroExp_Partial), 'sLine="%s"' % (sLine,);
3454 oOut.write(sLine[:oThreadedFunction.oMcBlock.offBeginLine]);
3455 sModified = oThreadedFunction.generateInputCode().strip();
3456 oOut.write(sModified);
3457
3458 iLine = oThreadedFunction.oMcBlock.iEndLine;
3459 sLine = oParser.asLines[iLine - 1];
3460 assert ( sLine.count('IEM_MC_') - sLine.count('IEM_MC_F_') == 1
3461 or len(oThreadedFunction.oMcBlock.aoStmts) == 1
3462 or oThreadedFunction.oMcBlock.iMacroExp == iai.McBlock.kiMacroExp_Partial);
3463 oOut.write(sLine[oThreadedFunction.oMcBlock.offAfterEnd : ]);
3464
3465 # Advance
3466 iThreadedFunction += 1;
3467 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
3468 #
3469 # Macro expansion line that have sublines and may contain multiple MC blocks.
3470 #
3471 else:
3472 offLine = 0;
3473 while iLine == oThreadedFunction.oMcBlock.iBeginLine:
3474 oOut.write(sLine[offLine : oThreadedFunction.oMcBlock.offBeginLine]);
3475
3476 sModified = oThreadedFunction.generateInputCode().strip();
3477 assert ( sModified.startswith('IEM_MC_BEGIN')
3478 or (sModified.find('IEM_MC_DEFER_TO_CIMPL_') > 0 and sModified.strip().startswith('{\n'))
3479 or sModified.startswith('pVCpu->iem.s.fEndTb = true')
3480 or sModified.startswith('pVCpu->iem.s.fTbCurInstr = ')
3481 ), 'sModified="%s"' % (sModified,);
3482 oOut.write(sModified);
3483
3484 offLine = oThreadedFunction.oMcBlock.offAfterEnd;
3485
3486 # Advance
3487 iThreadedFunction += 1;
3488 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
3489
3490 # Last line segment.
3491 if offLine < len(sLine):
3492 oOut.write(sLine[offLine : ]);
3493
3494 oOut.write("/* ****** END %s ******* */\n" % (oParser.sSrcFile,));
3495
3496 return True;
3497
3498
3499 #
3500 # Main
3501 #
3502
3503 def main(self, asArgs):
3504 """
3505 C-like main function.
3506 Returns exit code.
3507 """
3508
3509 #
3510 # Parse arguments
3511 #
3512 sScriptDir = os.path.dirname(__file__);
3513 oParser = argparse.ArgumentParser(add_help = False);
3514 oParser.add_argument('asInFiles',
3515 metavar = 'input.cpp.h',
3516 nargs = '*',
3517 default = [os.path.join(sScriptDir, aoInfo[0])
3518 for aoInfo in iai.g_aaoAllInstrFilesAndDefaultMapAndSet],
3519 help = "Selection of VMMAll/IEMAllInst*.cpp.h files to use as input.");
3520 oParser.add_argument('--host-arch',
3521 metavar = 'arch',
3522 dest = 'sHostArch',
3523 action = 'store',
3524 default = None,
3525 help = 'The host architecture.');
3526
3527 oParser.add_argument('--out-thrd-funcs-hdr',
3528 metavar = 'file-thrd-funcs.h',
3529 dest = 'sOutFileThrdFuncsHdr',
3530 action = 'store',
3531 default = '-',
3532 help = 'The output header file for the threaded functions.');
3533 oParser.add_argument('--out-thrd-funcs-cpp',
3534 metavar = 'file-thrd-funcs.cpp',
3535 dest = 'sOutFileThrdFuncsCpp',
3536 action = 'store',
3537 default = '-',
3538 help = 'The output C++ file for the threaded functions.');
3539 oParser.add_argument('--out-n8ve-funcs-hdr',
3540 metavar = 'file-n8tv-funcs.h',
3541 dest = 'sOutFileN8veFuncsHdr',
3542 action = 'store',
3543 default = '-',
3544 help = 'The output header file for the native recompiler functions.');
3545 oParser.add_argument('--out-n8ve-funcs-cpp1',
3546 metavar = 'file-n8tv-funcs1.cpp',
3547 dest = 'sOutFileN8veFuncsCpp1',
3548 action = 'store',
3549 default = '-',
3550 help = 'The output C++ file for the native recompiler functions part 1.');
3551 oParser.add_argument('--out-n8ve-funcs-cpp2',
3552 metavar = 'file-n8ve-funcs2.cpp',
3553 dest = 'sOutFileN8veFuncsCpp2',
3554 action = 'store',
3555 default = '-',
3556 help = 'The output C++ file for the native recompiler functions part 2.');
3557 oParser.add_argument('--out-n8ve-funcs-cpp3',
3558 metavar = 'file-n8ve-funcs3.cpp',
3559 dest = 'sOutFileN8veFuncsCpp3',
3560 action = 'store',
3561 default = '-',
3562 help = 'The output C++ file for the native recompiler functions part 3.');
3563 oParser.add_argument('--out-n8ve-funcs-cpp4',
3564 metavar = 'file-n8ve-funcs4.cpp',
3565 dest = 'sOutFileN8veFuncsCpp4',
3566 action = 'store',
3567 default = '-',
3568 help = 'The output C++ file for the native recompiler functions part 4.');
3569 oParser.add_argument('--out-n8ve-liveness-cpp',
3570 metavar = 'file-n8ve-liveness.cpp',
3571 dest = 'sOutFileN8veLivenessCpp',
3572 action = 'store',
3573 default = '-',
3574 help = 'The output C++ file for the native recompiler liveness analysis functions.');
3575 oParser.add_argument('--native',
3576 dest = 'fNativeRecompilerEnabled',
3577 action = 'store_true',
3578 default = False,
3579 help = 'Enables generating the files related to native recompilation.');
3580 oParser.add_argument('--out-mod-input1',
3581 metavar = 'file-instr.cpp.h',
3582 dest = 'sOutFileModInput1',
3583 action = 'store',
3584 default = '-',
3585 help = 'The output C++/header file for modified input instruction files part 1.');
3586 oParser.add_argument('--out-mod-input2',
3587 metavar = 'file-instr.cpp.h',
3588 dest = 'sOutFileModInput2',
3589 action = 'store',
3590 default = '-',
3591 help = 'The output C++/header file for modified input instruction files part 2.');
3592 oParser.add_argument('--out-mod-input3',
3593 metavar = 'file-instr.cpp.h',
3594 dest = 'sOutFileModInput3',
3595 action = 'store',
3596 default = '-',
3597 help = 'The output C++/header file for modified input instruction files part 3.');
3598 oParser.add_argument('--out-mod-input4',
3599 metavar = 'file-instr.cpp.h',
3600 dest = 'sOutFileModInput4',
3601 action = 'store',
3602 default = '-',
3603 help = 'The output C++/header file for modified input instruction files part 4.');
3604 oParser.add_argument('--help', '-h', '-?',
3605 action = 'help',
3606 help = 'Display help and exit.');
3607 oParser.add_argument('--version', '-V',
3608 action = 'version',
3609 version = 'r%s (IEMAllThreadedPython.py), r%s (IEMAllInstPython.py)'
3610 % (__version__.split()[1], iai.__version__.split()[1],),
3611 help = 'Displays the version/revision of the script and exit.');
3612 self.oOptions = oParser.parse_args(asArgs[1:]);
3613 print("oOptions=%s" % (self.oOptions,), file = sys.stderr);
3614
3615 if self.oOptions.sHostArch not in ('amd64', 'arm64'):
3616 print('error! Unsupported (or missing) host architecture: %s' % (self.oOptions.sHostArch,), file = sys.stderr);
3617 return 1;
3618
3619 #
3620 # Process the instructions specified in the IEM sources.
3621 #
3622 if self.processInputFiles(self.oOptions.sHostArch, self.oOptions.fNativeRecompilerEnabled):
3623 #
3624 # Generate the output files.
3625 #
3626 aaoOutputFiles = (
3627 ( self.oOptions.sOutFileThrdFuncsHdr, self.generateThreadedFunctionsHeader, 0, ),
3628 ( self.oOptions.sOutFileThrdFuncsCpp, self.generateThreadedFunctionsSource, 0, ),
3629 ( self.oOptions.sOutFileN8veFuncsHdr, self.generateNativeFunctionsHeader, 0, ),
3630 ( self.oOptions.sOutFileN8veFuncsCpp1, self.generateNativeFunctionsSource, 0, ),
3631 ( self.oOptions.sOutFileN8veFuncsCpp2, self.generateNativeFunctionsSource, 1, ),
3632 ( self.oOptions.sOutFileN8veFuncsCpp3, self.generateNativeFunctionsSource, 2, ),
3633 ( self.oOptions.sOutFileN8veFuncsCpp4, self.generateNativeFunctionsSource, 3, ),
3634 ( self.oOptions.sOutFileN8veLivenessCpp, self.generateNativeLivenessSource, 0, ),
3635 ( self.oOptions.sOutFileModInput1, self.generateModifiedInput, 1, ),
3636 ( self.oOptions.sOutFileModInput2, self.generateModifiedInput, 2, ),
3637 ( self.oOptions.sOutFileModInput3, self.generateModifiedInput, 3, ),
3638 ( self.oOptions.sOutFileModInput4, self.generateModifiedInput, 4, ),
3639 );
3640 fRc = True;
3641 for sOutFile, fnGenMethod, iPartNo in aaoOutputFiles:
3642 if sOutFile == '-':
3643 fRc = fnGenMethod(sys.stdout, iPartNo) and fRc;
3644 else:
3645 try:
3646 oOut = open(sOutFile, 'w'); # pylint: disable=consider-using-with,unspecified-encoding
3647 except Exception as oXcpt:
3648 print('error! Failed open "%s" for writing: %s' % (sOutFile, oXcpt,), file = sys.stderr);
3649 return 1;
3650 fRc = fnGenMethod(oOut, iPartNo) and fRc;
3651 oOut.close();
3652 if fRc:
3653 return 0;
3654
3655 return 1;
3656
3657
3658if __name__ == '__main__':
3659 sys.exit(IEMThreadedGenerator().main(sys.argv));
3660
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette