VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllThrdPython.py@ 105655

Last change on this file since 105655 was 105652, checked in by vboxsync, 8 months ago

VMM/IEM: Fix bound instruction emulation when running in the recompiler on ARM, bugref:10741

The bs3-cpu-generated-1 testcase would fail on the bound instruction when running in the recompiler because
input values are not properly sign extended to 32-bit on ARM before being passed to iemCImpl_bound_16 because the IEM MC block
for bound treated everything as uint16_t. This works with the interpreter because the function definition is int16_t so the
compiler does the proper sign extension but with our own recompiler we would end up with negative values not being properly sign extended.

Create some new IEM MC statements for signed values to make it easier to get things right in the future instead
of just making the iemCImpl_bound_16() take uint16_t and cast the values to int16_t in it.

On a funny side note, lldb prints the correct negative values for the int16_t in iemCImpl_bound_16(), so these can't be trusted, the registers
show the real values:

(lldb) register read
General Purpose Registers:

x0 = 0x000000011653c000
x1 = 0x0000000000000004
x2 = 0x000000000000ffff <= Wrong index, should be 0x00000000ffffffff
x3 = 0x000000000000fffe <= Wrong lower bound, should be 0x00000000fffffffe
x4 = 0x0000000000000000 <= Upper bound

[...]
(lldb) stepi
Process 31449 stopped

  • thread #22, name = 'EMT', stop reason = instruction step into

frame #0: 0x0000000132b242e4 VBoxVMM.dylib`::iemCImpl_bound_16(pVCpu=0x000000011653c000, cbInstr='\x04', idxArray=-1, idxLowerBound=-2, idxUpperBound=0) at IEMAllCImpl.cpp:8304:9 [opt]

  • Property svn:eol-style set to LF
  • Property svn:executable set to *
  • Property svn:keywords set to Author Date Id Revision
File size: 192.3 KB
Line 
1#!/usr/bin/env python
2# -*- coding: utf-8 -*-
3# $Id: IEMAllThrdPython.py 105652 2024-08-12 12:16:36Z vboxsync $
4# pylint: disable=invalid-name
5
6"""
7Annotates and generates threaded functions from IEMAllInst*.cpp.h.
8"""
9
10from __future__ import print_function;
11
12__copyright__ = \
13"""
14Copyright (C) 2023 Oracle and/or its affiliates.
15
16This file is part of VirtualBox base platform packages, as
17available from https://www.virtualbox.org.
18
19This program is free software; you can redistribute it and/or
20modify it under the terms of the GNU General Public License
21as published by the Free Software Foundation, in version 3 of the
22License.
23
24This program is distributed in the hope that it will be useful, but
25WITHOUT ANY WARRANTY; without even the implied warranty of
26MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
27General Public License for more details.
28
29You should have received a copy of the GNU General Public License
30along with this program; if not, see <https://www.gnu.org/licenses>.
31
32SPDX-License-Identifier: GPL-3.0-only
33"""
34__version__ = "$Revision: 105652 $"
35
36# Standard python imports.
37import copy;
38import datetime;
39import os;
40import re;
41import sys;
42import argparse;
43from typing import Dict, List;
44
45import IEMAllInstPython as iai;
46import IEMAllN8vePython as ian;
47
48
49# Python 3 hacks:
50if sys.version_info[0] >= 3:
51 long = int; # pylint: disable=redefined-builtin,invalid-name
52
53## Number of generic parameters for the thread functions.
54g_kcThreadedParams = 3;
55
56g_kdTypeInfo = {
57 # type name: (cBits, fSigned, C-type )
58 'int8_t': ( 8, True, 'int8_t', ),
59 'int16_t': ( 16, True, 'int16_t', ),
60 'int32_t': ( 32, True, 'int32_t', ),
61 'int64_t': ( 64, True, 'int64_t', ),
62 'uint4_t': ( 4, False, 'uint8_t', ),
63 'uint8_t': ( 8, False, 'uint8_t', ),
64 'uint16_t': ( 16, False, 'uint16_t', ),
65 'uint32_t': ( 32, False, 'uint32_t', ),
66 'uint64_t': ( 64, False, 'uint64_t', ),
67 'uintptr_t': ( 64, False, 'uintptr_t',), # ASSUMES 64-bit host pointer size.
68 'bool': ( 1, False, 'bool', ),
69 'IEMMODE': ( 2, False, 'IEMMODE', ),
70};
71
72# Only for getTypeBitCount/variables.
73g_kdTypeInfo2 = {
74 'RTFLOAT32U': ( 32, False, 'RTFLOAT32U', ),
75 'RTFLOAT64U': ( 64, False, 'RTFLOAT64U', ),
76 'RTUINT64U': ( 64, False, 'RTUINT64U', ),
77 'RTGCPTR': ( 64, False, 'RTGCPTR', ),
78 'RTPBCD80U': ( 80, False, 'RTPBCD80U', ),
79 'RTFLOAT80U': ( 80, False, 'RTFLOAT80U', ),
80 'IEMFPURESULT': (80+16, False, 'IEMFPURESULT', ),
81 'IEMFPURESULTTWO': (80+16+80,False, 'IEMFPURESULTTWO', ),
82 'RTUINT128U': ( 128, False, 'RTUINT128U', ),
83 'X86XMMREG': ( 128, False, 'X86XMMREG', ),
84 'X86YMMREG': ( 256, False, 'X86YMMREG', ),
85 'IEMMEDIAF2XMMSRC': ( 256, False, 'IEMMEDIAF2XMMSRC',),
86 'IEMMEDIAF2YMMSRC': ( 512, False, 'IEMMEDIAF2YMMSRC',),
87 'RTUINT256U': ( 256, False, 'RTUINT256U', ),
88 'IEMPCMPISTRXSRC': ( 256, False, 'IEMPCMPISTRXSRC', ),
89 'IEMPCMPESTRXSRC': ( 384, False, 'IEMPCMPESTRXSRC', ),
90}; #| g_kdTypeInfo; - requires 3.9
91g_kdTypeInfo2.update(g_kdTypeInfo);
92
93def getTypeBitCount(sType):
94 """
95 Translate a type to size in bits
96 """
97 if sType in g_kdTypeInfo2:
98 return g_kdTypeInfo2[sType][0];
99 if '*' in sType or sType[0] == 'P':
100 return 64;
101 #raise Exception('Unknown type: %s' % (sType,));
102 print('error: Unknown type: %s' % (sType,));
103 return 64;
104
105g_kdIemFieldToType = {
106 # Illegal ones:
107 'offInstrNextByte': ( None, ),
108 'cbInstrBuf': ( None, ),
109 'pbInstrBuf': ( None, ),
110 'uInstrBufPc': ( None, ),
111 'cbInstrBufTotal': ( None, ),
112 'offCurInstrStart': ( None, ),
113 'cbOpcode': ( None, ),
114 'offOpcode': ( None, ),
115 'offModRm': ( None, ),
116 # Okay ones.
117 'fPrefixes': ( 'uint32_t', ),
118 'uRexReg': ( 'uint8_t', ),
119 'uRexB': ( 'uint8_t', ),
120 'uRexIndex': ( 'uint8_t', ),
121 'iEffSeg': ( 'uint8_t', ),
122 'enmEffOpSize': ( 'IEMMODE', ),
123 'enmDefAddrMode': ( 'IEMMODE', ),
124 'enmEffAddrMode': ( 'IEMMODE', ),
125 'enmDefOpSize': ( 'IEMMODE', ),
126 'idxPrefix': ( 'uint8_t', ),
127 'uVex3rdReg': ( 'uint8_t', ),
128 'uVexLength': ( 'uint8_t', ),
129 'fEvexStuff': ( 'uint8_t', ),
130 'uFpuOpcode': ( 'uint16_t', ),
131};
132
133## @name McStmtCond.oIfBranchAnnotation/McStmtCond.oElseBranchAnnotation values
134## @{
135g_ksFinishAnnotation_Advance = 'Advance';
136g_ksFinishAnnotation_RelJmp = 'RelJmp';
137g_ksFinishAnnotation_SetJmp = 'SetJmp';
138g_ksFinishAnnotation_RelCall = 'RelCall';
139g_ksFinishAnnotation_IndCall = 'IndCall';
140g_ksFinishAnnotation_DeferToCImpl = 'DeferToCImpl';
141## @}
142
143
144class ThreadedParamRef(object):
145 """
146 A parameter reference for a threaded function.
147 """
148
149 def __init__(self, sOrgRef, sType, oStmt, iParam = None, offParam = 0, sStdRef = None):
150 ## The name / reference in the original code.
151 self.sOrgRef = sOrgRef;
152 ## Normalized name to deal with spaces in macro invocations and such.
153 self.sStdRef = sStdRef if sStdRef else ''.join(sOrgRef.split());
154 ## Indicates that sOrgRef may not match the parameter.
155 self.fCustomRef = sStdRef is not None;
156 ## The type (typically derived).
157 self.sType = sType;
158 ## The statement making the reference.
159 self.oStmt = oStmt;
160 ## The parameter containing the references. None if implicit.
161 self.iParam = iParam;
162 ## The offset in the parameter of the reference.
163 self.offParam = offParam;
164
165 ## The variable name in the threaded function.
166 self.sNewName = 'x';
167 ## The this is packed into.
168 self.iNewParam = 99;
169 ## The bit offset in iNewParam.
170 self.offNewParam = 1024
171
172
173class ThreadedFunctionVariation(object):
174 """ Threaded function variation. """
175
176 ## @name Variations.
177 ## These variations will match translation block selection/distinctions as well.
178 ## @{
179 # pylint: disable=line-too-long
180 ksVariation_Default = ''; ##< No variations - only used by IEM_MC_DEFER_TO_CIMPL_X_RET.
181 ksVariation_16 = '_16'; ##< 16-bit mode code (386+).
182 ksVariation_16f = '_16f'; ##< 16-bit mode code (386+), check+clear eflags.
183 ksVariation_16_Jmp = '_16_Jmp'; ##< 16-bit mode code (386+), conditional jump taken.
184 ksVariation_16f_Jmp = '_16f_Jmp'; ##< 16-bit mode code (386+), check+clear eflags, conditional jump taken.
185 ksVariation_16_NoJmp = '_16_NoJmp'; ##< 16-bit mode code (386+), conditional jump not taken.
186 ksVariation_16f_NoJmp = '_16f_NoJmp'; ##< 16-bit mode code (386+), check+clear eflags, conditional jump not taken.
187 ksVariation_16_Addr32 = '_16_Addr32'; ##< 16-bit mode code (386+), address size prefixed to 32-bit addressing.
188 ksVariation_16f_Addr32 = '_16f_Addr32'; ##< 16-bit mode code (386+), address size prefixed to 32-bit addressing, eflags.
189 ksVariation_16_Pre386 = '_16_Pre386'; ##< 16-bit mode code, pre-386 CPU target.
190 ksVariation_16f_Pre386 = '_16f_Pre386'; ##< 16-bit mode code, pre-386 CPU target, check+clear eflags.
191 ksVariation_16_Pre386_Jmp = '_16_Pre386_Jmp'; ##< 16-bit mode code, pre-386 CPU target, conditional jump taken.
192 ksVariation_16f_Pre386_Jmp = '_16f_Pre386_Jmp'; ##< 16-bit mode code, pre-386 CPU target, check+clear eflags, conditional jump taken.
193 ksVariation_16_Pre386_NoJmp = '_16_Pre386_NoJmp'; ##< 16-bit mode code, pre-386 CPU target, conditional jump not taken.
194 ksVariation_16f_Pre386_NoJmp = '_16f_Pre386_NoJmp'; ##< 16-bit mode code, pre-386 CPU target, check+clear eflags, conditional jump not taken.
195 ksVariation_32 = '_32'; ##< 32-bit mode code (386+).
196 ksVariation_32f = '_32f'; ##< 32-bit mode code (386+), check+clear eflags.
197 ksVariation_32_Jmp = '_32_Jmp'; ##< 32-bit mode code (386+), conditional jump taken.
198 ksVariation_32f_Jmp = '_32f_Jmp'; ##< 32-bit mode code (386+), check+clear eflags, conditional jump taken.
199 ksVariation_32_NoJmp = '_32_NoJmp'; ##< 32-bit mode code (386+), conditional jump not taken.
200 ksVariation_32f_NoJmp = '_32f_NoJmp'; ##< 32-bit mode code (386+), check+clear eflags, conditional jump not taken.
201 ksVariation_32_Flat = '_32_Flat'; ##< 32-bit mode code (386+) with CS, DS, E,S and SS flat and 4GB wide.
202 ksVariation_32f_Flat = '_32f_Flat'; ##< 32-bit mode code (386+) with CS, DS, E,S and SS flat and 4GB wide, eflags.
203 ksVariation_32_Addr16 = '_32_Addr16'; ##< 32-bit mode code (386+), address size prefixed to 16-bit addressing.
204 ksVariation_32f_Addr16 = '_32f_Addr16'; ##< 32-bit mode code (386+), address size prefixed to 16-bit addressing, eflags.
205 ksVariation_64 = '_64'; ##< 64-bit mode code.
206 ksVariation_64f = '_64f'; ##< 64-bit mode code, check+clear eflags.
207 ksVariation_64_Jmp = '_64_Jmp'; ##< 64-bit mode code, conditional jump taken.
208 ksVariation_64f_Jmp = '_64f_Jmp'; ##< 64-bit mode code, check+clear eflags, conditional jump taken.
209 ksVariation_64_NoJmp = '_64_NoJmp'; ##< 64-bit mode code, conditional jump not taken.
210 ksVariation_64f_NoJmp = '_64f_NoJmp'; ##< 64-bit mode code, check+clear eflags, conditional jump not taken.
211 ksVariation_64_FsGs = '_64_FsGs'; ##< 64-bit mode code, with memory accesses via FS or GS.
212 ksVariation_64f_FsGs = '_64f_FsGs'; ##< 64-bit mode code, with memory accesses via FS or GS, check+clear eflags.
213 ksVariation_64_Addr32 = '_64_Addr32'; ##< 64-bit mode code, address size prefixed to 32-bit addressing.
214 ksVariation_64f_Addr32 = '_64f_Addr32'; ##< 64-bit mode code, address size prefixed to 32-bit addressing, c+c eflags.
215 # pylint: enable=line-too-long
216 kasVariations = (
217 ksVariation_Default,
218 ksVariation_16,
219 ksVariation_16f,
220 ksVariation_16_Jmp,
221 ksVariation_16f_Jmp,
222 ksVariation_16_NoJmp,
223 ksVariation_16f_NoJmp,
224 ksVariation_16_Addr32,
225 ksVariation_16f_Addr32,
226 ksVariation_16_Pre386,
227 ksVariation_16f_Pre386,
228 ksVariation_16_Pre386_Jmp,
229 ksVariation_16f_Pre386_Jmp,
230 ksVariation_16_Pre386_NoJmp,
231 ksVariation_16f_Pre386_NoJmp,
232 ksVariation_32,
233 ksVariation_32f,
234 ksVariation_32_Jmp,
235 ksVariation_32f_Jmp,
236 ksVariation_32_NoJmp,
237 ksVariation_32f_NoJmp,
238 ksVariation_32_Flat,
239 ksVariation_32f_Flat,
240 ksVariation_32_Addr16,
241 ksVariation_32f_Addr16,
242 ksVariation_64,
243 ksVariation_64f,
244 ksVariation_64_Jmp,
245 ksVariation_64f_Jmp,
246 ksVariation_64_NoJmp,
247 ksVariation_64f_NoJmp,
248 ksVariation_64_FsGs,
249 ksVariation_64f_FsGs,
250 ksVariation_64_Addr32,
251 ksVariation_64f_Addr32,
252 );
253 kasVariationsWithoutAddress = (
254 ksVariation_16,
255 ksVariation_16f,
256 ksVariation_16_Pre386,
257 ksVariation_16f_Pre386,
258 ksVariation_32,
259 ksVariation_32f,
260 ksVariation_64,
261 ksVariation_64f,
262 );
263 kasVariationsWithoutAddressNot286 = (
264 ksVariation_16,
265 ksVariation_16f,
266 ksVariation_32,
267 ksVariation_32f,
268 ksVariation_64,
269 ksVariation_64f,
270 );
271 kasVariationsWithoutAddressNot286Not64 = (
272 ksVariation_16,
273 ksVariation_16f,
274 ksVariation_32,
275 ksVariation_32f,
276 );
277 kasVariationsWithoutAddressNot64 = (
278 ksVariation_16,
279 ksVariation_16f,
280 ksVariation_16_Pre386,
281 ksVariation_16f_Pre386,
282 ksVariation_32,
283 ksVariation_32f,
284 );
285 kasVariationsWithoutAddressOnly64 = (
286 ksVariation_64,
287 ksVariation_64f,
288 );
289 kasVariationsWithAddress = (
290 ksVariation_16,
291 ksVariation_16f,
292 ksVariation_16_Addr32,
293 ksVariation_16f_Addr32,
294 ksVariation_16_Pre386,
295 ksVariation_16f_Pre386,
296 ksVariation_32,
297 ksVariation_32f,
298 ksVariation_32_Flat,
299 ksVariation_32f_Flat,
300 ksVariation_32_Addr16,
301 ksVariation_32f_Addr16,
302 ksVariation_64,
303 ksVariation_64f,
304 ksVariation_64_FsGs,
305 ksVariation_64f_FsGs,
306 ksVariation_64_Addr32,
307 ksVariation_64f_Addr32,
308 );
309 kasVariationsWithAddressNot286 = (
310 ksVariation_16,
311 ksVariation_16f,
312 ksVariation_16_Addr32,
313 ksVariation_16f_Addr32,
314 ksVariation_32,
315 ksVariation_32f,
316 ksVariation_32_Flat,
317 ksVariation_32f_Flat,
318 ksVariation_32_Addr16,
319 ksVariation_32f_Addr16,
320 ksVariation_64,
321 ksVariation_64f,
322 ksVariation_64_FsGs,
323 ksVariation_64f_FsGs,
324 ksVariation_64_Addr32,
325 ksVariation_64f_Addr32,
326 );
327 kasVariationsWithAddressNot286Not64 = (
328 ksVariation_16,
329 ksVariation_16f,
330 ksVariation_16_Addr32,
331 ksVariation_16f_Addr32,
332 ksVariation_32,
333 ksVariation_32f,
334 ksVariation_32_Flat,
335 ksVariation_32f_Flat,
336 ksVariation_32_Addr16,
337 ksVariation_32f_Addr16,
338 );
339 kasVariationsWithAddressNot64 = (
340 ksVariation_16,
341 ksVariation_16f,
342 ksVariation_16_Addr32,
343 ksVariation_16f_Addr32,
344 ksVariation_16_Pre386,
345 ksVariation_16f_Pre386,
346 ksVariation_32,
347 ksVariation_32f,
348 ksVariation_32_Flat,
349 ksVariation_32f_Flat,
350 ksVariation_32_Addr16,
351 ksVariation_32f_Addr16,
352 );
353 kasVariationsWithAddressOnly64 = (
354 ksVariation_64,
355 ksVariation_64f,
356 ksVariation_64_FsGs,
357 ksVariation_64f_FsGs,
358 ksVariation_64_Addr32,
359 ksVariation_64f_Addr32,
360 );
361 kasVariationsOnlyPre386 = (
362 ksVariation_16_Pre386,
363 ksVariation_16f_Pre386,
364 );
365 kasVariationsEmitOrder = (
366 ksVariation_Default,
367 ksVariation_64,
368 ksVariation_64f,
369 ksVariation_64_Jmp,
370 ksVariation_64f_Jmp,
371 ksVariation_64_NoJmp,
372 ksVariation_64f_NoJmp,
373 ksVariation_64_FsGs,
374 ksVariation_64f_FsGs,
375 ksVariation_32_Flat,
376 ksVariation_32f_Flat,
377 ksVariation_32,
378 ksVariation_32f,
379 ksVariation_32_Jmp,
380 ksVariation_32f_Jmp,
381 ksVariation_32_NoJmp,
382 ksVariation_32f_NoJmp,
383 ksVariation_16,
384 ksVariation_16f,
385 ksVariation_16_Jmp,
386 ksVariation_16f_Jmp,
387 ksVariation_16_NoJmp,
388 ksVariation_16f_NoJmp,
389 ksVariation_16_Addr32,
390 ksVariation_16f_Addr32,
391 ksVariation_16_Pre386,
392 ksVariation_16f_Pre386,
393 ksVariation_16_Pre386_Jmp,
394 ksVariation_16f_Pre386_Jmp,
395 ksVariation_16_Pre386_NoJmp,
396 ksVariation_16f_Pre386_NoJmp,
397 ksVariation_32_Addr16,
398 ksVariation_32f_Addr16,
399 ksVariation_64_Addr32,
400 ksVariation_64f_Addr32,
401 );
402 kdVariationNames = {
403 ksVariation_Default: 'defer-to-cimpl',
404 ksVariation_16: '16-bit',
405 ksVariation_16f: '16-bit w/ eflag checking and clearing',
406 ksVariation_16_Jmp: '16-bit w/ conditional jump taken',
407 ksVariation_16f_Jmp: '16-bit w/ eflag checking and clearing and conditional jump taken',
408 ksVariation_16_NoJmp: '16-bit w/ conditional jump not taken',
409 ksVariation_16f_NoJmp: '16-bit w/ eflag checking and clearing and conditional jump not taken',
410 ksVariation_16_Addr32: '16-bit w/ address prefix (Addr32)',
411 ksVariation_16f_Addr32: '16-bit w/ address prefix (Addr32) and eflag checking and clearing',
412 ksVariation_16_Pre386: '16-bit on pre-386 CPU',
413 ksVariation_16f_Pre386: '16-bit on pre-386 CPU w/ eflag checking and clearing',
414 ksVariation_16_Pre386_Jmp: '16-bit on pre-386 CPU w/ conditional jump taken',
415 ksVariation_16f_Pre386_Jmp: '16-bit on pre-386 CPU w/ eflag checking and clearing and conditional jump taken',
416 ksVariation_16_Pre386_NoJmp: '16-bit on pre-386 CPU w/ conditional jump taken',
417 ksVariation_16f_Pre386_NoJmp: '16-bit on pre-386 CPU w/ eflag checking and clearing and conditional jump taken',
418 ksVariation_32: '32-bit',
419 ksVariation_32f: '32-bit w/ eflag checking and clearing',
420 ksVariation_32_Jmp: '32-bit w/ conditional jump taken',
421 ksVariation_32f_Jmp: '32-bit w/ eflag checking and clearing and conditional jump taken',
422 ksVariation_32_NoJmp: '32-bit w/ conditional jump not taken',
423 ksVariation_32f_NoJmp: '32-bit w/ eflag checking and clearing and conditional jump not taken',
424 ksVariation_32_Flat: '32-bit flat and wide open CS, SS, DS and ES',
425 ksVariation_32f_Flat: '32-bit flat and wide open CS, SS, DS and ES w/ eflag checking and clearing',
426 ksVariation_32_Addr16: '32-bit w/ address prefix (Addr16)',
427 ksVariation_32f_Addr16: '32-bit w/ address prefix (Addr16) and eflag checking and clearing',
428 ksVariation_64: '64-bit',
429 ksVariation_64f: '64-bit w/ eflag checking and clearing',
430 ksVariation_64_Jmp: '64-bit w/ conditional jump taken',
431 ksVariation_64f_Jmp: '64-bit w/ eflag checking and clearing and conditional jump taken',
432 ksVariation_64_NoJmp: '64-bit w/ conditional jump not taken',
433 ksVariation_64f_NoJmp: '64-bit w/ eflag checking and clearing and conditional jump not taken',
434 ksVariation_64_FsGs: '64-bit with memory accessed via FS or GS',
435 ksVariation_64f_FsGs: '64-bit with memory accessed via FS or GS and eflag checking and clearing',
436 ksVariation_64_Addr32: '64-bit w/ address prefix (Addr32)',
437 ksVariation_64f_Addr32: '64-bit w/ address prefix (Addr32) and eflag checking and clearing',
438 };
439 kdVariationsWithEflagsCheckingAndClearing = {
440 ksVariation_16f: True,
441 ksVariation_16f_Jmp: True,
442 ksVariation_16f_NoJmp: True,
443 ksVariation_16f_Addr32: True,
444 ksVariation_16f_Pre386: True,
445 ksVariation_16f_Pre386_Jmp: True,
446 ksVariation_16f_Pre386_NoJmp: True,
447 ksVariation_32f: True,
448 ksVariation_32f_Jmp: True,
449 ksVariation_32f_NoJmp: True,
450 ksVariation_32f_Flat: True,
451 ksVariation_32f_Addr16: True,
452 ksVariation_64f: True,
453 ksVariation_64f_Jmp: True,
454 ksVariation_64f_NoJmp: True,
455 ksVariation_64f_FsGs: True,
456 ksVariation_64f_Addr32: True,
457 };
458 kdVariationsOnly64NoFlags = {
459 ksVariation_64: True,
460 ksVariation_64_Jmp: True,
461 ksVariation_64_NoJmp: True,
462 ksVariation_64_FsGs: True,
463 ksVariation_64_Addr32: True,
464 };
465 kdVariationsOnly64WithFlags = {
466 ksVariation_64f: True,
467 ksVariation_64f_Jmp: True,
468 ksVariation_64f_NoJmp: True,
469 ksVariation_64f_FsGs: True,
470 ksVariation_64f_Addr32: True,
471 };
472 kdVariationsOnlyPre386NoFlags = {
473 ksVariation_16_Pre386: True,
474 ksVariation_16_Pre386_Jmp: True,
475 ksVariation_16_Pre386_NoJmp: True,
476 };
477 kdVariationsOnlyPre386WithFlags = {
478 ksVariation_16f_Pre386: True,
479 ksVariation_16f_Pre386_Jmp: True,
480 ksVariation_16f_Pre386_NoJmp: True,
481 };
482 kdVariationsWithFlatAddress = {
483 ksVariation_32_Flat: True,
484 ksVariation_32f_Flat: True,
485 ksVariation_64: True,
486 ksVariation_64f: True,
487 ksVariation_64_Addr32: True,
488 ksVariation_64f_Addr32: True,
489 };
490 kdVariationsWithFlatStackAddress = {
491 ksVariation_32_Flat: True,
492 ksVariation_32f_Flat: True,
493 ksVariation_64: True,
494 ksVariation_64f: True,
495 ksVariation_64_FsGs: True,
496 ksVariation_64f_FsGs: True,
497 ksVariation_64_Addr32: True,
498 ksVariation_64f_Addr32: True,
499 };
500 kdVariationsWithFlat64StackAddress = {
501 ksVariation_64: True,
502 ksVariation_64f: True,
503 ksVariation_64_FsGs: True,
504 ksVariation_64f_FsGs: True,
505 ksVariation_64_Addr32: True,
506 ksVariation_64f_Addr32: True,
507 };
508 kdVariationsWithFlatAddr16 = {
509 ksVariation_16: True,
510 ksVariation_16f: True,
511 ksVariation_16_Pre386: True,
512 ksVariation_16f_Pre386: True,
513 ksVariation_32_Addr16: True,
514 ksVariation_32f_Addr16: True,
515 };
516 kdVariationsWithFlatAddr32No64 = {
517 ksVariation_16_Addr32: True,
518 ksVariation_16f_Addr32: True,
519 ksVariation_32: True,
520 ksVariation_32f: True,
521 ksVariation_32_Flat: True,
522 ksVariation_32f_Flat: True,
523 };
524 kdVariationsWithAddressOnly64 = {
525 ksVariation_64: True,
526 ksVariation_64f: True,
527 ksVariation_64_FsGs: True,
528 ksVariation_64f_FsGs: True,
529 ksVariation_64_Addr32: True,
530 ksVariation_64f_Addr32: True,
531 };
532 kdVariationsWithConditional = {
533 ksVariation_16_Jmp: True,
534 ksVariation_16_NoJmp: True,
535 ksVariation_16_Pre386_Jmp: True,
536 ksVariation_16_Pre386_NoJmp: True,
537 ksVariation_32_Jmp: True,
538 ksVariation_32_NoJmp: True,
539 ksVariation_64_Jmp: True,
540 ksVariation_64_NoJmp: True,
541 ksVariation_16f_Jmp: True,
542 ksVariation_16f_NoJmp: True,
543 ksVariation_16f_Pre386_Jmp: True,
544 ksVariation_16f_Pre386_NoJmp: True,
545 ksVariation_32f_Jmp: True,
546 ksVariation_32f_NoJmp: True,
547 ksVariation_64f_Jmp: True,
548 ksVariation_64f_NoJmp: True,
549 };
550 kdVariationsWithConditionalNoJmp = {
551 ksVariation_16_NoJmp: True,
552 ksVariation_16_Pre386_NoJmp: True,
553 ksVariation_32_NoJmp: True,
554 ksVariation_64_NoJmp: True,
555 ksVariation_16f_NoJmp: True,
556 ksVariation_16f_Pre386_NoJmp: True,
557 ksVariation_32f_NoJmp: True,
558 ksVariation_64f_NoJmp: True,
559 };
560 kdVariationsOnlyPre386 = {
561 ksVariation_16_Pre386: True,
562 ksVariation_16f_Pre386: True,
563 ksVariation_16_Pre386_Jmp: True,
564 ksVariation_16f_Pre386_Jmp: True,
565 ksVariation_16_Pre386_NoJmp: True,
566 ksVariation_16f_Pre386_NoJmp: True,
567 };
568 ## @}
569
570 ## IEM_CIMPL_F_XXX flags that we know.
571 ## The value indicates whether it terminates the TB or not. The goal is to
572 ## improve the recompiler so all but END_TB will be False.
573 ##
574 ## @note iemThreadedRecompilerMcDeferToCImpl0 duplicates info found here.
575 kdCImplFlags = {
576 'IEM_CIMPL_F_MODE': False,
577 'IEM_CIMPL_F_BRANCH_DIRECT': False,
578 'IEM_CIMPL_F_BRANCH_INDIRECT': False,
579 'IEM_CIMPL_F_BRANCH_RELATIVE': False,
580 'IEM_CIMPL_F_BRANCH_FAR': True,
581 'IEM_CIMPL_F_BRANCH_CONDITIONAL': False,
582 # IEM_CIMPL_F_BRANCH_ANY should only be used for testing, so not included here.
583 'IEM_CIMPL_F_BRANCH_STACK': False,
584 'IEM_CIMPL_F_BRANCH_STACK_FAR': False,
585 'IEM_CIMPL_F_RFLAGS': False,
586 'IEM_CIMPL_F_INHIBIT_SHADOW': False,
587 'IEM_CIMPL_F_CHECK_IRQ_AFTER': False,
588 'IEM_CIMPL_F_CHECK_IRQ_BEFORE': False,
589 'IEM_CIMPL_F_CHECK_IRQ_BEFORE_AND_AFTER': False, # (ignore)
590 'IEM_CIMPL_F_STATUS_FLAGS': False,
591 'IEM_CIMPL_F_VMEXIT': False,
592 'IEM_CIMPL_F_FPU': False,
593 'IEM_CIMPL_F_REP': False,
594 'IEM_CIMPL_F_IO': False,
595 'IEM_CIMPL_F_END_TB': True,
596 'IEM_CIMPL_F_XCPT': True,
597 'IEM_CIMPL_F_CALLS_CIMPL': False,
598 'IEM_CIMPL_F_CALLS_AIMPL': False,
599 'IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE': False,
600 'IEM_CIMPL_F_CALLS_AIMPL_WITH_XSTATE': False,
601 };
602
603 def __init__(self, oThreadedFunction, sVariation = ksVariation_Default):
604 self.oParent = oThreadedFunction # type: ThreadedFunction
605 ##< ksVariation_Xxxx.
606 self.sVariation = sVariation
607
608 ## Threaded function parameter references.
609 self.aoParamRefs = [] # type: List[ThreadedParamRef]
610 ## Unique parameter references.
611 self.dParamRefs = {} # type: Dict[str, List[ThreadedParamRef]]
612 ## Minimum number of parameters to the threaded function.
613 self.cMinParams = 0;
614
615 ## List/tree of statements for the threaded function.
616 self.aoStmtsForThreadedFunction = [] # type: List[McStmt]
617
618 ## Function enum number, for verification. Set by generateThreadedFunctionsHeader.
619 self.iEnumValue = -1;
620
621 ## Native recompilation details for this variation.
622 self.oNativeRecomp = None;
623
624 def getIndexName(self):
625 sName = self.oParent.oMcBlock.sFunction;
626 if sName.startswith('iemOp_'):
627 sName = sName[len('iemOp_'):];
628 return 'kIemThreadedFunc_%s%s%s' % ( sName, self.oParent.sSubName, self.sVariation, );
629
630 def getThreadedFunctionName(self):
631 sName = self.oParent.oMcBlock.sFunction;
632 if sName.startswith('iemOp_'):
633 sName = sName[len('iemOp_'):];
634 return 'iemThreadedFunc_%s%s%s' % ( sName, self.oParent.sSubName, self.sVariation, );
635
636 def getNativeFunctionName(self):
637 return 'iemNativeRecompFunc_' + self.getThreadedFunctionName()[len('iemThreadedFunc_'):];
638
639 def getLivenessFunctionName(self):
640 return 'iemNativeLivenessFunc_' + self.getThreadedFunctionName()[len('iemThreadedFunc_'):];
641
642 def getShortName(self):
643 sName = self.oParent.oMcBlock.sFunction;
644 if sName.startswith('iemOp_'):
645 sName = sName[len('iemOp_'):];
646 return '%s%s%s' % ( sName, self.oParent.sSubName, self.sVariation, );
647
648 def getThreadedFunctionStatisticsName(self):
649 sName = self.oParent.oMcBlock.sFunction;
650 if sName.startswith('iemOp_'):
651 sName = sName[len('iemOp_'):];
652
653 sVarNm = self.sVariation;
654 if sVarNm:
655 if sVarNm.startswith('_'):
656 sVarNm = sVarNm[1:];
657 if sVarNm.endswith('_Jmp'):
658 sVarNm = sVarNm[:-4];
659 sName += '_Jmp';
660 elif sVarNm.endswith('_NoJmp'):
661 sVarNm = sVarNm[:-6];
662 sName += '_NoJmp';
663 else:
664 sVarNm = 'DeferToCImpl';
665
666 return '%s/%s%s' % ( sVarNm, sName, self.oParent.sSubName );
667
668 def isWithFlagsCheckingAndClearingVariation(self):
669 """
670 Checks if this is a variation that checks and clears EFLAGS.
671 """
672 return self.sVariation in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing;
673
674 #
675 # Analysis and code morphing.
676 #
677
678 def raiseProblem(self, sMessage):
679 """ Raises a problem. """
680 self.oParent.raiseProblem(sMessage);
681
682 def warning(self, sMessage):
683 """ Emits a warning. """
684 self.oParent.warning(sMessage);
685
686 def analyzeReferenceToType(self, sRef):
687 """
688 Translates a variable or structure reference to a type.
689 Returns type name.
690 Raises exception if unable to figure it out.
691 """
692 ch0 = sRef[0];
693 if ch0 == 'u':
694 if sRef.startswith('u32'):
695 return 'uint32_t';
696 if sRef.startswith('u8') or sRef == 'uReg':
697 return 'uint8_t';
698 if sRef.startswith('u64'):
699 return 'uint64_t';
700 if sRef.startswith('u16'):
701 return 'uint16_t';
702 elif ch0 == 'b':
703 return 'uint8_t';
704 elif ch0 == 'f':
705 return 'bool';
706 elif ch0 == 'i':
707 if sRef.startswith('i8'):
708 return 'int8_t';
709 if sRef.startswith('i16'):
710 return 'int16_t';
711 if sRef.startswith('i32'):
712 return 'int32_t';
713 if sRef.startswith('i64'):
714 return 'int64_t';
715 if sRef in ('iReg', 'iFixedReg', 'iGReg', 'iSegReg', 'iSrcReg', 'iDstReg', 'iCrReg'):
716 return 'uint8_t';
717 elif ch0 == 'p':
718 if sRef.find('-') < 0:
719 return 'uintptr_t';
720 if sRef.startswith('pVCpu->iem.s.'):
721 sField = sRef[len('pVCpu->iem.s.') : ];
722 if sField in g_kdIemFieldToType:
723 if g_kdIemFieldToType[sField][0]:
724 return g_kdIemFieldToType[sField][0];
725 elif ch0 == 'G' and sRef.startswith('GCPtr'):
726 return 'uint64_t';
727 elif ch0 == 'e':
728 if sRef == 'enmEffOpSize':
729 return 'IEMMODE';
730 elif ch0 == 'o':
731 if sRef.startswith('off32'):
732 return 'uint32_t';
733 elif sRef == 'cbFrame': # enter
734 return 'uint16_t';
735 elif sRef == 'cShift': ## @todo risky
736 return 'uint8_t';
737
738 self.raiseProblem('Unknown reference: %s' % (sRef,));
739 return None; # Shut up pylint 2.16.2.
740
741 def analyzeCallToType(self, sFnRef):
742 """
743 Determins the type of an indirect function call.
744 """
745 assert sFnRef[0] == 'p';
746
747 #
748 # Simple?
749 #
750 if sFnRef.find('-') < 0:
751 oDecoderFunction = self.oParent.oMcBlock.oFunction;
752
753 # Try the argument list of the function defintion macro invocation first.
754 iArg = 2;
755 while iArg < len(oDecoderFunction.asDefArgs):
756 if sFnRef == oDecoderFunction.asDefArgs[iArg]:
757 return oDecoderFunction.asDefArgs[iArg - 1];
758 iArg += 1;
759
760 # Then check out line that includes the word and looks like a variable declaration.
761 oRe = re.compile(' +(P[A-Z0-9_]+|const +IEMOP[A-Z0-9_]+ *[*]) +(const |) *' + sFnRef + ' *(;|=)');
762 for sLine in oDecoderFunction.asLines:
763 oMatch = oRe.match(sLine);
764 if oMatch:
765 if not oMatch.group(1).startswith('const'):
766 return oMatch.group(1);
767 return 'PC' + oMatch.group(1)[len('const ') : -1].strip();
768
769 #
770 # Deal with the pImpl->pfnXxx:
771 #
772 elif sFnRef.startswith('pImpl->pfn'):
773 sMember = sFnRef[len('pImpl->') : ];
774 sBaseType = self.analyzeCallToType('pImpl');
775 offBits = sMember.rfind('U') + 1;
776 if sBaseType == 'PCIEMOPBINSIZES': return 'PFNIEMAIMPLBINU' + sMember[offBits:];
777 if sBaseType == 'PCIEMOPBINTODOSIZES': return 'PFNIEMAIMPLBINTODOU' + sMember[offBits:];
778 if sBaseType == 'PCIEMOPUNARYSIZES': return 'PFNIEMAIMPLUNARYU' + sMember[offBits:];
779 if sBaseType == 'PCIEMOPSHIFTSIZES': return 'PFNIEMAIMPLSHIFTU' + sMember[offBits:];
780 if sBaseType == 'PCIEMOPSHIFTDBLSIZES': return 'PFNIEMAIMPLSHIFTDBLU' + sMember[offBits:];
781 if sBaseType == 'PCIEMOPMULDIVSIZES': return 'PFNIEMAIMPLMULDIVU' + sMember[offBits:];
782 if sBaseType == 'PCIEMOPMEDIAF2': return 'PFNIEMAIMPLMEDIAF2U' + sMember[offBits:];
783 if sBaseType == 'PCIEMOPMEDIAF2IMM8': return 'PFNIEMAIMPLMEDIAF2U' + sMember[offBits:] + 'IMM8';
784 if sBaseType == 'PCIEMOPMEDIAF3': return 'PFNIEMAIMPLMEDIAF3U' + sMember[offBits:];
785 if sBaseType == 'PCIEMOPMEDIAOPTF2': return 'PFNIEMAIMPLMEDIAOPTF2U' + sMember[offBits:];
786 if sBaseType == 'PCIEMOPMEDIAOPTF2IMM8': return 'PFNIEMAIMPLMEDIAOPTF2U' + sMember[offBits:] + 'IMM8';
787 if sBaseType == 'PCIEMOPMEDIAOPTF3': return 'PFNIEMAIMPLMEDIAOPTF3U' + sMember[offBits:];
788 if sBaseType == 'PCIEMOPMEDIAOPTF3IMM8': return 'PFNIEMAIMPLMEDIAOPTF3U' + sMember[offBits:] + 'IMM8';
789 if sBaseType == 'PCIEMOPBLENDOP': return 'PFNIEMAIMPLAVXBLENDU' + sMember[offBits:];
790
791 self.raiseProblem('Unknown call reference: %s::%s (%s)' % (sBaseType, sMember, sFnRef,));
792
793 self.raiseProblem('Unknown call reference: %s' % (sFnRef,));
794 return None; # Shut up pylint 2.16.2.
795
796 def analyze8BitGRegStmt(self, oStmt):
797 """
798 Gets the 8-bit general purpose register access details of the given statement.
799 ASSUMES the statement is one accessing an 8-bit GREG.
800 """
801 idxReg = 0;
802 if ( oStmt.sName.find('_FETCH_') > 0
803 or oStmt.sName.find('_REF_') > 0
804 or oStmt.sName.find('_TO_LOCAL') > 0):
805 idxReg = 1;
806
807 sRegRef = oStmt.asParams[idxReg];
808 if sRegRef.startswith('IEM_GET_MODRM_RM') or sRegRef.startswith('IEM_GET_MODRM_REG'):
809 asBits = [sBit.strip() for sBit in sRegRef.replace('(', ',').replace(')', '').split(',')];
810 if len(asBits) != 3 or asBits[1] != 'pVCpu' or (asBits[0] != 'IEM_GET_MODRM_RM' and asBits[0] != 'IEM_GET_MODRM_REG'):
811 self.raiseProblem('Unexpected reference: %s (asBits=%s)' % (sRegRef, asBits));
812 sOrgExpr = asBits[0] + '_EX8(pVCpu, ' + asBits[2] + ')';
813 else:
814 sOrgExpr = '((%s) < 4 || (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_REX | IEM_OP_PRF_VEX)) ? (%s) : (%s) + 12)' \
815 % (sRegRef, sRegRef, sRegRef,);
816
817 if sRegRef.find('IEM_GET_MODRM_RM') >= 0: sStdRef = 'bRmRm8Ex';
818 elif sRegRef.find('IEM_GET_MODRM_REG') >= 0: sStdRef = 'bRmReg8Ex';
819 elif sRegRef == 'X86_GREG_xAX': sStdRef = 'bGregXAx8Ex';
820 elif sRegRef == 'X86_GREG_xCX': sStdRef = 'bGregXCx8Ex';
821 elif sRegRef == 'X86_GREG_xSP': sStdRef = 'bGregXSp8Ex';
822 elif sRegRef == 'iFixedReg': sStdRef = 'bFixedReg8Ex';
823 else:
824 self.warning('analyze8BitGRegStmt: sRegRef=%s -> bOther8Ex; %s %s; sOrgExpr=%s'
825 % (sRegRef, oStmt.sName, oStmt.asParams, sOrgExpr,));
826 sStdRef = 'bOther8Ex';
827
828 #print('analyze8BitGRegStmt: %s %s; sRegRef=%s\n -> idxReg=%s sOrgExpr=%s sStdRef=%s'
829 # % (oStmt.sName, oStmt.asParams, sRegRef, idxReg, sOrgExpr, sStdRef));
830 return (idxReg, sOrgExpr, sStdRef);
831
832
833 ## Maps memory related MCs to info for FLAT conversion.
834 ## This is used in 64-bit and flat 32-bit variants to skip the unnecessary
835 ## segmentation checking for every memory access. Only applied to access
836 ## via ES, DS and SS. FS, GS and CS gets the full segmentation threatment,
837 ## the latter (CS) is just to keep things simple (we could safely fetch via
838 ## it, but only in 64-bit mode could we safely write via it, IIRC).
839 kdMemMcToFlatInfo = {
840 'IEM_MC_FETCH_MEM_U8': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8' ),
841 'IEM_MC_FETCH_MEM16_U8': ( 1, 'IEM_MC_FETCH_MEM16_FLAT_U8' ),
842 'IEM_MC_FETCH_MEM32_U8': ( 1, 'IEM_MC_FETCH_MEM32_FLAT_U8' ),
843 'IEM_MC_FETCH_MEM_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16' ),
844 'IEM_MC_FETCH_MEM_U16_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_DISP' ),
845 'IEM_MC_FETCH_MEM_I16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I16' ),
846 'IEM_MC_FETCH_MEM_I16_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I16_DISP' ),
847 'IEM_MC_FETCH_MEM_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32' ),
848 'IEM_MC_FETCH_MEM_U32_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_DISP' ),
849 'IEM_MC_FETCH_MEM_I32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I32' ),
850 'IEM_MC_FETCH_MEM_I32_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I32_DISP' ),
851 'IEM_MC_FETCH_MEM_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64' ),
852 'IEM_MC_FETCH_MEM_U64_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64_DISP' ),
853 'IEM_MC_FETCH_MEM_U64_ALIGN_U128': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64_ALIGN_U128' ),
854 'IEM_MC_FETCH_MEM_I64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I64' ),
855 'IEM_MC_FETCH_MEM_R32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R32' ),
856 'IEM_MC_FETCH_MEM_R64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R64' ),
857 'IEM_MC_FETCH_MEM_R80': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R80' ),
858 'IEM_MC_FETCH_MEM_D80': ( 1, 'IEM_MC_FETCH_MEM_FLAT_D80' ),
859 'IEM_MC_FETCH_MEM_U128': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128' ),
860 'IEM_MC_FETCH_MEM_U128_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128_NO_AC' ),
861 'IEM_MC_FETCH_MEM_U128_ALIGN_SSE': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128_ALIGN_SSE' ),
862 'IEM_MC_FETCH_MEM_XMM': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM' ),
863 'IEM_MC_FETCH_MEM_XMM_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM_NO_AC' ),
864 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM_ALIGN_SSE' ),
865 'IEM_MC_FETCH_MEM_XMM_U32': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_U32' ),
866 'IEM_MC_FETCH_MEM_XMM_U64': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_U64' ),
867 'IEM_MC_FETCH_MEM_U256': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256' ),
868 'IEM_MC_FETCH_MEM_U256_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256_NO_AC' ),
869 'IEM_MC_FETCH_MEM_U256_ALIGN_AVX': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256_ALIGN_AVX' ),
870 'IEM_MC_FETCH_MEM_YMM': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM' ),
871 'IEM_MC_FETCH_MEM_YMM_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM_NO_AC' ),
872 'IEM_MC_FETCH_MEM_YMM_ALIGN_AVX': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM_ALIGN_AVX' ),
873 'IEM_MC_FETCH_MEM_U8_ZX_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U16' ),
874 'IEM_MC_FETCH_MEM_U8_ZX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U32' ),
875 'IEM_MC_FETCH_MEM_U8_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U64' ),
876 'IEM_MC_FETCH_MEM_U16_ZX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_ZX_U32' ),
877 'IEM_MC_FETCH_MEM_U16_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_ZX_U64' ),
878 'IEM_MC_FETCH_MEM_U32_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_ZX_U64' ),
879 'IEM_MC_FETCH_MEM_U8_SX_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U16' ),
880 'IEM_MC_FETCH_MEM_U8_SX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U32' ),
881 'IEM_MC_FETCH_MEM_U8_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U64' ),
882 'IEM_MC_FETCH_MEM_U16_SX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_SX_U32' ),
883 'IEM_MC_FETCH_MEM_U16_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_SX_U64' ),
884 'IEM_MC_FETCH_MEM_U32_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_SX_U64' ),
885 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128': ( 2, 'IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128' ),
886 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE_AND_XREG_XMM': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_ALIGN_SSE_AND_XREG_XMM' ),
887 'IEM_MC_FETCH_MEM_XMM_U32_AND_XREG_XMM': ( 3, 'IEM_MC_FETCH_MEM_FLAT_XMM_U32_AND_XREG_XMM' ),
888 'IEM_MC_FETCH_MEM_XMM_U64_AND_XREG_XMM': ( 3, 'IEM_MC_FETCH_MEM_FLAT_XMM_U64_AND_XREG_XMM' ),
889 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_RAX_RDX_U64':
890 ( 2, 'IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128_AND_RAX_RDX_U64' ),
891 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64':
892 ( 2, 'IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64' ),
893 'IEM_MC_FETCH_MEM_YMM_ALIGN_AVX_AND_YREG_YMM': ( 2, 'IEM_MC_FETCH_MEM_FLAT_YMM_ALIGN_AVX_AND_YREG_YMM' ),
894 'IEM_MC_STORE_MEM_U8': ( 0, 'IEM_MC_STORE_MEM_FLAT_U8' ),
895 'IEM_MC_STORE_MEM_U16': ( 0, 'IEM_MC_STORE_MEM_FLAT_U16' ),
896 'IEM_MC_STORE_MEM_U32': ( 0, 'IEM_MC_STORE_MEM_FLAT_U32' ),
897 'IEM_MC_STORE_MEM_U64': ( 0, 'IEM_MC_STORE_MEM_FLAT_U64' ),
898 'IEM_MC_STORE_MEM_U8_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U8_CONST' ),
899 'IEM_MC_STORE_MEM_U16_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U16_CONST' ),
900 'IEM_MC_STORE_MEM_U32_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U32_CONST' ),
901 'IEM_MC_STORE_MEM_U64_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U64_CONST' ),
902 'IEM_MC_STORE_MEM_U128': ( 0, 'IEM_MC_STORE_MEM_FLAT_U128' ),
903 'IEM_MC_STORE_MEM_U128_NO_AC': ( 0, 'IEM_MC_STORE_MEM_FLAT_U128_NO_AC' ),
904 'IEM_MC_STORE_MEM_U128_ALIGN_SSE': ( 0, 'IEM_MC_STORE_MEM_FLAT_U128_ALIGN_SSE' ),
905 'IEM_MC_STORE_MEM_U256': ( 0, 'IEM_MC_STORE_MEM_FLAT_U256' ),
906 'IEM_MC_STORE_MEM_U256_NO_AC': ( 0, 'IEM_MC_STORE_MEM_FLAT_U256_NO_AC' ),
907 'IEM_MC_STORE_MEM_U256_ALIGN_AVX': ( 0, 'IEM_MC_STORE_MEM_FLAT_U256_ALIGN_AVX' ),
908 'IEM_MC_MEM_MAP_D80_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_D80_WO' ),
909 'IEM_MC_MEM_MAP_I16_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_I16_WO' ),
910 'IEM_MC_MEM_MAP_I32_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_I32_WO' ),
911 'IEM_MC_MEM_MAP_I64_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_I64_WO' ),
912 'IEM_MC_MEM_MAP_R32_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_R32_WO' ),
913 'IEM_MC_MEM_MAP_R64_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_R64_WO' ),
914 'IEM_MC_MEM_MAP_R80_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_R80_WO' ),
915 'IEM_MC_MEM_MAP_U8_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_ATOMIC' ),
916 'IEM_MC_MEM_MAP_U8_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_RW' ),
917 'IEM_MC_MEM_MAP_U8_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_RO' ),
918 'IEM_MC_MEM_MAP_U8_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_WO' ),
919 'IEM_MC_MEM_MAP_U16_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_ATOMIC' ),
920 'IEM_MC_MEM_MAP_U16_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_RW' ),
921 'IEM_MC_MEM_MAP_U16_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_RO' ),
922 'IEM_MC_MEM_MAP_U16_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_WO' ),
923 'IEM_MC_MEM_MAP_U32_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_ATOMIC' ),
924 'IEM_MC_MEM_MAP_U32_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_RW' ),
925 'IEM_MC_MEM_MAP_U32_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_RO' ),
926 'IEM_MC_MEM_MAP_U32_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_WO' ),
927 'IEM_MC_MEM_MAP_U64_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_ATOMIC' ),
928 'IEM_MC_MEM_MAP_U64_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_RW' ),
929 'IEM_MC_MEM_MAP_U64_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_RO' ),
930 'IEM_MC_MEM_MAP_U64_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_WO' ),
931 'IEM_MC_MEM_MAP_U128_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_ATOMIC' ),
932 'IEM_MC_MEM_MAP_U128_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_RW' ),
933 'IEM_MC_MEM_MAP_U128_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_RO' ),
934 'IEM_MC_MEM_MAP_U128_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_WO' ),
935 'IEM_MC_MEM_MAP_EX': ( 3, 'IEM_MC_MEM_FLAT_MAP_EX' ),
936 };
937
938 kdMemMcToFlatInfoStack = {
939 'IEM_MC_PUSH_U16': ( 'IEM_MC_FLAT32_PUSH_U16', 'IEM_MC_FLAT64_PUSH_U16', ),
940 'IEM_MC_PUSH_U32': ( 'IEM_MC_FLAT32_PUSH_U32', 'IEM_MC_PUSH_U32', ),
941 'IEM_MC_PUSH_U64': ( 'IEM_MC_PUSH_U64', 'IEM_MC_FLAT64_PUSH_U64', ),
942 'IEM_MC_PUSH_U32_SREG': ( 'IEM_MC_FLAT32_PUSH_U32_SREG', 'IEM_MC_PUSH_U32_SREG' ),
943 'IEM_MC_POP_GREG_U16': ( 'IEM_MC_FLAT32_POP_GREG_U16', 'IEM_MC_FLAT64_POP_GREG_U16', ),
944 'IEM_MC_POP_GREG_U32': ( 'IEM_MC_FLAT32_POP_GREG_U32', 'IEM_MC_POP_GREG_U32', ),
945 'IEM_MC_POP_GREG_U64': ( 'IEM_MC_POP_GREG_U64', 'IEM_MC_FLAT64_POP_GREG_U64', ),
946 };
947
948 kdThreadedCalcRmEffAddrMcByVariation = {
949 ksVariation_16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
950 ksVariation_16f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
951 ksVariation_16_Pre386: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
952 ksVariation_16f_Pre386: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
953 ksVariation_32_Addr16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
954 ksVariation_32f_Addr16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
955 ksVariation_16_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
956 ksVariation_16f_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
957 ksVariation_32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
958 ksVariation_32f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
959 ksVariation_32_Flat: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
960 ksVariation_32f_Flat: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
961 ksVariation_64: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64',
962 ksVariation_64f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64',
963 ksVariation_64_FsGs: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_FSGS',
964 ksVariation_64f_FsGs: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_FSGS',
965 ksVariation_64_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_ADDR32', ## @todo How did this work again...
966 ksVariation_64f_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_ADDR32',
967 };
968
969 def analyzeMorphStmtForThreaded(self, aoStmts, dState, iParamRef = 0, iLevel = 0):
970 """
971 Transforms (copy) the statements into those for the threaded function.
972
973 Returns list/tree of statements (aoStmts is not modified) and the new
974 iParamRef value.
975 """
976 #
977 # We'll be traversing aoParamRefs in parallel to the statements, so we
978 # must match the traversal in analyzeFindThreadedParamRefs exactly.
979 #
980 #print('McBlock at %s:%s' % (os.path.split(self.oMcBlock.sSrcFile)[1], self.oMcBlock.iBeginLine,));
981 aoThreadedStmts = [];
982 for oStmt in aoStmts:
983 # Skip C++ statements that is purely related to decoding.
984 if not oStmt.isCppStmt() or not oStmt.fDecode:
985 # Copy the statement. Make a deep copy to make sure we've got our own
986 # copies of all instance variables, even if a bit overkill at the moment.
987 oNewStmt = copy.deepcopy(oStmt);
988 aoThreadedStmts.append(oNewStmt);
989 #print('oNewStmt %s %s' % (oNewStmt.sName, len(oNewStmt.asParams),));
990
991 # If the statement has parameter references, process the relevant parameters.
992 # We grab the references relevant to this statement and apply them in reserve order.
993 if iParamRef < len(self.aoParamRefs) and self.aoParamRefs[iParamRef].oStmt == oStmt:
994 iParamRefFirst = iParamRef;
995 while True:
996 iParamRef += 1;
997 if iParamRef >= len(self.aoParamRefs) or self.aoParamRefs[iParamRef].oStmt != oStmt:
998 break;
999
1000 #print('iParamRefFirst=%s iParamRef=%s' % (iParamRefFirst, iParamRef));
1001 for iCurRef in range(iParamRef - 1, iParamRefFirst - 1, -1):
1002 oCurRef = self.aoParamRefs[iCurRef];
1003 if oCurRef.iParam is not None:
1004 assert oCurRef.oStmt == oStmt;
1005 #print('iCurRef=%s iParam=%s sOrgRef=%s' % (iCurRef, oCurRef.iParam, oCurRef.sOrgRef));
1006 sSrcParam = oNewStmt.asParams[oCurRef.iParam];
1007 assert ( sSrcParam[oCurRef.offParam : oCurRef.offParam + len(oCurRef.sOrgRef)] == oCurRef.sOrgRef
1008 or oCurRef.fCustomRef), \
1009 'offParam=%s sOrgRef=%s iParam=%s oStmt.sName=%s sSrcParam=%s<eos>' \
1010 % (oCurRef.offParam, oCurRef.sOrgRef, oCurRef.iParam, oStmt.sName, sSrcParam);
1011 oNewStmt.asParams[oCurRef.iParam] = sSrcParam[0 : oCurRef.offParam] \
1012 + oCurRef.sNewName \
1013 + sSrcParam[oCurRef.offParam + len(oCurRef.sOrgRef) : ];
1014
1015 # Morph IEM_MC_CALC_RM_EFF_ADDR into IEM_MC_CALC_RM_EFF_ADDR_THREADED ...
1016 if oNewStmt.sName == 'IEM_MC_CALC_RM_EFF_ADDR':
1017 oNewStmt.sName = self.kdThreadedCalcRmEffAddrMcByVariation[self.sVariation];
1018 assert len(oNewStmt.asParams) == 3;
1019
1020 if self.sVariation in self.kdVariationsWithFlatAddr16:
1021 oNewStmt.asParams = [
1022 oNewStmt.asParams[0], oNewStmt.asParams[1], self.dParamRefs['u16Disp'][0].sNewName,
1023 ];
1024 else:
1025 sSibAndMore = self.dParamRefs['bSib'][0].sNewName; # Merge bSib and 2nd part of cbImmAndRspOffset.
1026 if oStmt.asParams[2] not in ('0', '1', '2', '4'):
1027 sSibAndMore = '(%s) | ((%s) & 0x0f00)' % (self.dParamRefs['bSib'][0].sNewName, oStmt.asParams[2]);
1028
1029 if self.sVariation in self.kdVariationsWithFlatAddr32No64:
1030 oNewStmt.asParams = [
1031 oNewStmt.asParams[0], oNewStmt.asParams[1], sSibAndMore, self.dParamRefs['u32Disp'][0].sNewName,
1032 ];
1033 else:
1034 oNewStmt.asParams = [
1035 oNewStmt.asParams[0], self.dParamRefs['bRmEx'][0].sNewName, sSibAndMore,
1036 self.dParamRefs['u32Disp'][0].sNewName, self.dParamRefs['cbInstr'][0].sNewName,
1037 ];
1038 # ... and IEM_MC_ADVANCE_RIP_AND_FINISH into *_THREADED_PCxx[_WITH_FLAGS] ...
1039 elif ( oNewStmt.sName
1040 in ('IEM_MC_ADVANCE_RIP_AND_FINISH',
1041 'IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_REL_JMP_S16_AND_FINISH', 'IEM_MC_REL_JMP_S32_AND_FINISH',
1042 'IEM_MC_SET_RIP_U16_AND_FINISH', 'IEM_MC_SET_RIP_U32_AND_FINISH', 'IEM_MC_SET_RIP_U64_AND_FINISH',
1043 'IEM_MC_REL_CALL_S16_AND_FINISH', 'IEM_MC_REL_CALL_S32_AND_FINISH', 'IEM_MC_REL_CALL_S64_AND_FINISH',
1044 'IEM_MC_IND_CALL_U16_AND_FINISH', 'IEM_MC_IND_CALL_U32_AND_FINISH', 'IEM_MC_IND_CALL_U64_AND_FINISH',
1045 'IEM_MC_RETN_AND_FINISH',)):
1046 if oNewStmt.sName not in ('IEM_MC_SET_RIP_U16_AND_FINISH', 'IEM_MC_SET_RIP_U32_AND_FINISH',
1047 'IEM_MC_SET_RIP_U64_AND_FINISH', ):
1048 oNewStmt.asParams.append(self.dParamRefs['cbInstr'][0].sNewName);
1049 if ( oNewStmt.sName in ('IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_RETN_AND_FINISH', )
1050 and self.sVariation not in self.kdVariationsOnlyPre386):
1051 oNewStmt.asParams.append(self.dParamRefs['pVCpu->iem.s.enmEffOpSize'][0].sNewName);
1052 oNewStmt.sName += '_THREADED';
1053 if self.sVariation in self.kdVariationsOnly64NoFlags:
1054 oNewStmt.sName += '_PC64';
1055 elif self.sVariation in self.kdVariationsOnly64WithFlags:
1056 oNewStmt.sName += '_PC64_WITH_FLAGS';
1057 elif self.sVariation in self.kdVariationsOnlyPre386NoFlags:
1058 oNewStmt.sName += '_PC16';
1059 elif self.sVariation in self.kdVariationsOnlyPre386WithFlags:
1060 oNewStmt.sName += '_PC16_WITH_FLAGS';
1061 elif self.sVariation not in self.kdVariationsWithEflagsCheckingAndClearing:
1062 assert self.sVariation != self.ksVariation_Default;
1063 oNewStmt.sName += '_PC32';
1064 else:
1065 oNewStmt.sName += '_PC32_WITH_FLAGS';
1066
1067 # This is making the wrong branch of conditionals break out of the TB.
1068 if (oStmt.sName in ('IEM_MC_ADVANCE_RIP_AND_FINISH', 'IEM_MC_REL_JMP_S8_AND_FINISH',
1069 'IEM_MC_REL_JMP_S16_AND_FINISH', 'IEM_MC_REL_JMP_S32_AND_FINISH')):
1070 sExitTbStatus = 'VINF_SUCCESS';
1071 if self.sVariation in self.kdVariationsWithConditional:
1072 if self.sVariation in self.kdVariationsWithConditionalNoJmp:
1073 if oStmt.sName != 'IEM_MC_ADVANCE_RIP_AND_FINISH':
1074 sExitTbStatus = 'VINF_IEM_REEXEC_BREAK';
1075 elif oStmt.sName == 'IEM_MC_ADVANCE_RIP_AND_FINISH':
1076 sExitTbStatus = 'VINF_IEM_REEXEC_BREAK';
1077 oNewStmt.asParams.append(sExitTbStatus);
1078
1079 # Insert an MC so we can assert the correctioness of modified flags annotations on IEM_MC_REF_EFLAGS.
1080 if 'IEM_MC_ASSERT_EFLAGS' in dState:
1081 aoThreadedStmts.insert(len(aoThreadedStmts) - 1,
1082 iai.McStmtAssertEFlags(self.oParent.oMcBlock.oInstruction));
1083 del dState['IEM_MC_ASSERT_EFLAGS'];
1084
1085 # ... and IEM_MC_*_GREG_U8 into *_THREADED w/ reworked index taking REX into account
1086 elif oNewStmt.sName.startswith('IEM_MC_') and oNewStmt.sName.find('_GREG_U8') > 0:
1087 (idxReg, _, sStdRef) = self.analyze8BitGRegStmt(oStmt); # Don't use oNewStmt as it has been modified!
1088 oNewStmt.asParams[idxReg] = self.dParamRefs[sStdRef][0].sNewName;
1089 oNewStmt.sName += '_THREADED';
1090
1091 # ... and IEM_MC_CALL_CIMPL_[0-5] and IEM_MC_DEFER_TO_CIMPL_[0-5]_RET into *_THREADED ...
1092 elif oNewStmt.sName.startswith('IEM_MC_CALL_CIMPL_') or oNewStmt.sName.startswith('IEM_MC_DEFER_TO_CIMPL_'):
1093 oNewStmt.sName += '_THREADED';
1094 oNewStmt.idxFn += 1;
1095 oNewStmt.idxParams += 1;
1096 oNewStmt.asParams.insert(0, self.dParamRefs['cbInstr'][0].sNewName);
1097
1098 # ... and in FLAT modes we must morph memory access into FLAT accesses ...
1099 elif ( self.sVariation in self.kdVariationsWithFlatAddress
1100 and ( oNewStmt.sName.startswith('IEM_MC_FETCH_MEM')
1101 or (oNewStmt.sName.startswith('IEM_MC_STORE_MEM_') and oNewStmt.sName.find('_BY_REF') < 0)
1102 or oNewStmt.sName.startswith('IEM_MC_MEM_MAP') )):
1103 idxEffSeg = self.kdMemMcToFlatInfo[oNewStmt.sName][0];
1104 if idxEffSeg != -1:
1105 if ( oNewStmt.asParams[idxEffSeg].find('iEffSeg') < 0
1106 and oNewStmt.asParams[idxEffSeg] not in ('X86_SREG_ES', ) ):
1107 self.raiseProblem('Expected iEffSeg as param #%d to %s: %s'
1108 % (idxEffSeg + 1, oNewStmt.sName, oNewStmt.asParams[idxEffSeg],));
1109 oNewStmt.asParams.pop(idxEffSeg);
1110 oNewStmt.sName = self.kdMemMcToFlatInfo[oNewStmt.sName][1];
1111
1112 # ... PUSH and POP also needs flat variants, but these differ a little.
1113 elif ( self.sVariation in self.kdVariationsWithFlatStackAddress
1114 and ( (oNewStmt.sName.startswith('IEM_MC_PUSH') and oNewStmt.sName.find('_FPU') < 0)
1115 or oNewStmt.sName.startswith('IEM_MC_POP'))):
1116 oNewStmt.sName = self.kdMemMcToFlatInfoStack[oNewStmt.sName][int(self.sVariation in
1117 self.kdVariationsWithFlat64StackAddress)];
1118
1119 # Add EFLAGS usage annotations to relevant MCs.
1120 elif oNewStmt.sName in ('IEM_MC_COMMIT_EFLAGS', 'IEM_MC_COMMIT_EFLAGS_OPT', 'IEM_MC_REF_EFLAGS',
1121 'IEM_MC_FETCH_EFLAGS'):
1122 oInstruction = self.oParent.oMcBlock.oInstruction;
1123 oNewStmt.sName += '_EX';
1124 oNewStmt.asParams.append(oInstruction.getTestedFlagsCStyle()); # Shall crash and burn if oInstruction is
1125 oNewStmt.asParams.append(oInstruction.getModifiedFlagsCStyle()); # None. Fix the IEM decoder code.
1126
1127 # For IEM_MC_REF_EFLAGS we to emit an MC before the ..._FINISH
1128 if oNewStmt.sName == 'IEM_MC_REF_EFLAGS_EX':
1129 dState['IEM_MC_ASSERT_EFLAGS'] = iLevel;
1130
1131 # Process branches of conditionals recursively.
1132 if isinstance(oStmt, iai.McStmtCond):
1133 (oNewStmt.aoIfBranch, iParamRef) = self.analyzeMorphStmtForThreaded(oStmt.aoIfBranch, dState,
1134 iParamRef, iLevel + 1);
1135 if oStmt.aoElseBranch:
1136 (oNewStmt.aoElseBranch, iParamRef) = self.analyzeMorphStmtForThreaded(oStmt.aoElseBranch,
1137 dState, iParamRef, iLevel + 1);
1138
1139 # Insert an MC so we can assert the correctioness of modified flags annotations
1140 # on IEM_MC_REF_EFLAGS if it goes out of scope.
1141 if dState.get('IEM_MC_ASSERT_EFLAGS', -1) == iLevel:
1142 aoThreadedStmts.append(iai.McStmtAssertEFlags(self.oParent.oMcBlock.oInstruction));
1143 del dState['IEM_MC_ASSERT_EFLAGS'];
1144
1145 return (aoThreadedStmts, iParamRef);
1146
1147
1148 def analyzeConsolidateThreadedParamRefs(self):
1149 """
1150 Consolidate threaded function parameter references into a dictionary
1151 with lists of the references to each variable/field.
1152 """
1153 # Gather unique parameters.
1154 self.dParamRefs = {};
1155 for oRef in self.aoParamRefs:
1156 if oRef.sStdRef not in self.dParamRefs:
1157 self.dParamRefs[oRef.sStdRef] = [oRef,];
1158 else:
1159 self.dParamRefs[oRef.sStdRef].append(oRef);
1160
1161 # Generate names for them for use in the threaded function.
1162 dParamNames = {};
1163 for sName, aoRefs in self.dParamRefs.items():
1164 # Morph the reference expression into a name.
1165 if sName.startswith('IEM_GET_MODRM_REG'): sName = 'bModRmRegP';
1166 elif sName.startswith('IEM_GET_MODRM_RM'): sName = 'bModRmRmP';
1167 elif sName.startswith('IEM_GET_MODRM_REG_8'): sName = 'bModRmReg8P';
1168 elif sName.startswith('IEM_GET_MODRM_RM_8'): sName = 'bModRmRm8P';
1169 elif sName.startswith('IEM_GET_EFFECTIVE_VVVV'): sName = 'bEffVvvvP';
1170 elif sName.startswith('IEM_GET_IMM8_REG'): sName = 'bImm8Reg';
1171 elif sName.find('.') >= 0 or sName.find('->') >= 0:
1172 sName = sName[max(sName.rfind('.'), sName.rfind('>')) + 1 : ] + 'P';
1173 else:
1174 sName += 'P';
1175
1176 # Ensure it's unique.
1177 if sName in dParamNames:
1178 for i in range(10):
1179 if sName + str(i) not in dParamNames:
1180 sName += str(i);
1181 break;
1182 dParamNames[sName] = True;
1183
1184 # Update all the references.
1185 for oRef in aoRefs:
1186 oRef.sNewName = sName;
1187
1188 # Organize them by size too for the purpose of optimize them.
1189 dBySize = {} # type: Dict[str, str]
1190 for sStdRef, aoRefs in self.dParamRefs.items():
1191 if aoRefs[0].sType[0] != 'P':
1192 cBits = g_kdTypeInfo[aoRefs[0].sType][0];
1193 assert(cBits <= 64);
1194 else:
1195 cBits = 64;
1196
1197 if cBits not in dBySize:
1198 dBySize[cBits] = [sStdRef,]
1199 else:
1200 dBySize[cBits].append(sStdRef);
1201
1202 # Pack the parameters as best as we can, starting with the largest ones
1203 # and ASSUMING a 64-bit parameter size.
1204 self.cMinParams = 0;
1205 offNewParam = 0;
1206 for cBits in sorted(dBySize.keys(), reverse = True):
1207 for sStdRef in dBySize[cBits]:
1208 if offNewParam == 0 or offNewParam + cBits > 64:
1209 self.cMinParams += 1;
1210 offNewParam = cBits;
1211 else:
1212 offNewParam += cBits;
1213 assert(offNewParam <= 64);
1214
1215 for oRef in self.dParamRefs[sStdRef]:
1216 oRef.iNewParam = self.cMinParams - 1;
1217 oRef.offNewParam = offNewParam - cBits;
1218
1219 # Currently there are a few that requires 4 parameters, list these so we can figure out why:
1220 if self.cMinParams >= 4:
1221 print('debug: cMinParams=%s cRawParams=%s - %s:%d'
1222 % (self.cMinParams, len(self.dParamRefs), self.oParent.oMcBlock.sSrcFile, self.oParent.oMcBlock.iBeginLine,));
1223
1224 return True;
1225
1226 ksHexDigits = '0123456789abcdefABCDEF';
1227
1228 def analyzeFindThreadedParamRefs(self, aoStmts): # pylint: disable=too-many-statements
1229 """
1230 Scans the statements for things that have to passed on to the threaded
1231 function (populates self.aoParamRefs).
1232 """
1233 for oStmt in aoStmts:
1234 # Some statements we can skip alltogether.
1235 if isinstance(oStmt, iai.McCppPreProc):
1236 continue;
1237 if oStmt.isCppStmt() and oStmt.fDecode:
1238 continue;
1239 if oStmt.sName in ('IEM_MC_BEGIN',):
1240 continue;
1241
1242 if isinstance(oStmt, iai.McStmtVar):
1243 if oStmt.sValue is None:
1244 continue;
1245 aiSkipParams = { 0: True, 1: True, 3: True };
1246 else:
1247 aiSkipParams = {};
1248
1249 # Several statements have implicit parameters and some have different parameters.
1250 if oStmt.sName in ('IEM_MC_ADVANCE_RIP_AND_FINISH', 'IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_REL_JMP_S16_AND_FINISH',
1251 'IEM_MC_REL_JMP_S32_AND_FINISH',
1252 'IEM_MC_REL_CALL_S16_AND_FINISH', 'IEM_MC_REL_CALL_S32_AND_FINISH',
1253 'IEM_MC_REL_CALL_S64_AND_FINISH',
1254 'IEM_MC_IND_CALL_U16_AND_FINISH', 'IEM_MC_IND_CALL_U32_AND_FINISH',
1255 'IEM_MC_IND_CALL_U64_AND_FINISH',
1256 'IEM_MC_RETN_AND_FINISH',
1257 'IEM_MC_CALL_CIMPL_0', 'IEM_MC_CALL_CIMPL_1', 'IEM_MC_CALL_CIMPL_2', 'IEM_MC_CALL_CIMPL_3',
1258 'IEM_MC_CALL_CIMPL_4', 'IEM_MC_CALL_CIMPL_5',
1259 'IEM_MC_DEFER_TO_CIMPL_0_RET', 'IEM_MC_DEFER_TO_CIMPL_1_RET', 'IEM_MC_DEFER_TO_CIMPL_2_RET',
1260 'IEM_MC_DEFER_TO_CIMPL_3_RET', 'IEM_MC_DEFER_TO_CIMPL_4_RET', 'IEM_MC_DEFER_TO_CIMPL_5_RET', ):
1261 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_INSTR_LEN(pVCpu)', 'uint4_t', oStmt, sStdRef = 'cbInstr'));
1262
1263 if ( oStmt.sName in ('IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_RETN_AND_FINISH', )
1264 and self.sVariation not in self.kdVariationsOnlyPre386):
1265 self.aoParamRefs.append(ThreadedParamRef('pVCpu->iem.s.enmEffOpSize', 'IEMMODE', oStmt));
1266
1267 if oStmt.sName == 'IEM_MC_CALC_RM_EFF_ADDR':
1268 # This is being pretty presumptive about bRm always being the RM byte...
1269 assert len(oStmt.asParams) == 3;
1270 assert oStmt.asParams[1] == 'bRm';
1271
1272 if self.sVariation in self.kdVariationsWithFlatAddr16:
1273 self.aoParamRefs.append(ThreadedParamRef('bRm', 'uint8_t', oStmt));
1274 self.aoParamRefs.append(ThreadedParamRef('(uint16_t)uEffAddrInfo' ,
1275 'uint16_t', oStmt, sStdRef = 'u16Disp'));
1276 elif self.sVariation in self.kdVariationsWithFlatAddr32No64:
1277 self.aoParamRefs.append(ThreadedParamRef('bRm', 'uint8_t', oStmt));
1278 self.aoParamRefs.append(ThreadedParamRef('(uint8_t)(uEffAddrInfo >> 32)',
1279 'uint8_t', oStmt, sStdRef = 'bSib'));
1280 self.aoParamRefs.append(ThreadedParamRef('(uint32_t)uEffAddrInfo',
1281 'uint32_t', oStmt, sStdRef = 'u32Disp'));
1282 else:
1283 assert self.sVariation in self.kdVariationsWithAddressOnly64;
1284 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_MODRM_EX(pVCpu, bRm)',
1285 'uint8_t', oStmt, sStdRef = 'bRmEx'));
1286 self.aoParamRefs.append(ThreadedParamRef('(uint8_t)(uEffAddrInfo >> 32)',
1287 'uint8_t', oStmt, sStdRef = 'bSib'));
1288 self.aoParamRefs.append(ThreadedParamRef('(uint32_t)uEffAddrInfo',
1289 'uint32_t', oStmt, sStdRef = 'u32Disp'));
1290 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_INSTR_LEN(pVCpu)',
1291 'uint4_t', oStmt, sStdRef = 'cbInstr'));
1292 aiSkipParams[1] = True; # Skip the bRm parameter as it is being replaced by bRmEx.
1293
1294 # 8-bit register accesses needs to have their index argument reworked to take REX into account.
1295 if oStmt.sName.startswith('IEM_MC_') and oStmt.sName.find('_GREG_U8') > 0:
1296 (idxReg, sOrgRef, sStdRef) = self.analyze8BitGRegStmt(oStmt);
1297 self.aoParamRefs.append(ThreadedParamRef(sOrgRef, 'uint8_t', oStmt, idxReg, sStdRef = sStdRef));
1298 aiSkipParams[idxReg] = True; # Skip the parameter below.
1299
1300 # If in flat mode variation, ignore the effective segment parameter to memory MCs.
1301 if ( self.sVariation in self.kdVariationsWithFlatAddress
1302 and oStmt.sName in self.kdMemMcToFlatInfo
1303 and self.kdMemMcToFlatInfo[oStmt.sName][0] != -1):
1304 aiSkipParams[self.kdMemMcToFlatInfo[oStmt.sName][0]] = True;
1305
1306 # Inspect the target of calls to see if we need to pass down a
1307 # function pointer or function table pointer for it to work.
1308 if isinstance(oStmt, iai.McStmtCall):
1309 if oStmt.sFn[0] == 'p':
1310 self.aoParamRefs.append(ThreadedParamRef(oStmt.sFn, self.analyzeCallToType(oStmt.sFn), oStmt, oStmt.idxFn));
1311 elif ( oStmt.sFn[0] != 'i'
1312 and not oStmt.sFn.startswith('RT_CONCAT3')
1313 and not oStmt.sFn.startswith('IEMTARGETCPU_EFL_BEHAVIOR_SELECT')
1314 and not oStmt.sFn.startswith('IEM_SELECT_HOST_OR_FALLBACK') ):
1315 self.raiseProblem('Bogus function name in %s: %s' % (oStmt.sName, oStmt.sFn,));
1316 aiSkipParams[oStmt.idxFn] = True;
1317
1318 # Skip the hint parameter (first) for IEM_MC_CALL_CIMPL_X.
1319 if oStmt.sName.startswith('IEM_MC_CALL_CIMPL_'):
1320 assert oStmt.idxFn == 2;
1321 aiSkipParams[0] = True;
1322
1323 # Skip the function parameter (first) for IEM_MC_NATIVE_EMIT_X.
1324 if oStmt.sName.startswith('IEM_MC_NATIVE_EMIT_'):
1325 aiSkipParams[0] = True;
1326
1327
1328 # Check all the parameters for bogus references.
1329 for iParam, sParam in enumerate(oStmt.asParams):
1330 if iParam not in aiSkipParams and sParam not in self.oParent.dVariables:
1331 # The parameter may contain a C expression, so we have to try
1332 # extract the relevant bits, i.e. variables and fields while
1333 # ignoring operators and parentheses.
1334 offParam = 0;
1335 while offParam < len(sParam):
1336 # Is it the start of an C identifier? If so, find the end, but don't stop on field separators (->, .).
1337 ch = sParam[offParam];
1338 if ch.isalpha() or ch == '_':
1339 offStart = offParam;
1340 offParam += 1;
1341 while offParam < len(sParam):
1342 ch = sParam[offParam];
1343 if not ch.isalnum() and ch != '_' and ch != '.':
1344 if ch != '-' or sParam[offParam + 1] != '>':
1345 # Special hack for the 'CTX_SUFF(pVM)' bit in pVCpu->CTX_SUFF(pVM)->xxxx:
1346 if ( ch == '('
1347 and sParam[offStart : offParam + len('(pVM)->')] == 'pVCpu->CTX_SUFF(pVM)->'):
1348 offParam += len('(pVM)->') - 1;
1349 else:
1350 break;
1351 offParam += 1;
1352 offParam += 1;
1353 sRef = sParam[offStart : offParam];
1354
1355 # For register references, we pass the full register indexes instead as macros
1356 # like IEM_GET_MODRM_REG implicitly references pVCpu->iem.s.uRexReg and the
1357 # threaded function will be more efficient if we just pass the register index
1358 # as a 4-bit param.
1359 if ( sRef.startswith('IEM_GET_MODRM')
1360 or sRef.startswith('IEM_GET_EFFECTIVE_VVVV')
1361 or sRef.startswith('IEM_GET_IMM8_REG') ):
1362 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1363 if sParam[offParam] != '(':
1364 self.raiseProblem('Expected "(" following %s in "%s"' % (sRef, oStmt.renderCode(),));
1365 (asMacroParams, offCloseParam) = iai.McBlock.extractParams(sParam, offParam);
1366 if asMacroParams is None:
1367 self.raiseProblem('Unable to find ")" for %s in "%s"' % (sRef, oStmt.renderCode(),));
1368 offParam = offCloseParam + 1;
1369 self.aoParamRefs.append(ThreadedParamRef(sParam[offStart : offParam], 'uint8_t',
1370 oStmt, iParam, offStart));
1371
1372 # We can skip known variables.
1373 elif sRef in self.oParent.dVariables:
1374 pass;
1375
1376 # Skip certain macro invocations.
1377 elif sRef in ('IEM_GET_HOST_CPU_FEATURES',
1378 'IEM_GET_GUEST_CPU_FEATURES',
1379 'IEM_IS_GUEST_CPU_AMD',
1380 'IEM_IS_16BIT_CODE',
1381 'IEM_IS_32BIT_CODE',
1382 'IEM_IS_64BIT_CODE',
1383 ):
1384 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1385 if sParam[offParam] != '(':
1386 self.raiseProblem('Expected "(" following %s in "%s"' % (sRef, oStmt.renderCode(),));
1387 (asMacroParams, offCloseParam) = iai.McBlock.extractParams(sParam, offParam);
1388 if asMacroParams is None:
1389 self.raiseProblem('Unable to find ")" for %s in "%s"' % (sRef, oStmt.renderCode(),));
1390 offParam = offCloseParam + 1;
1391
1392 # Skip any dereference following it, unless it's a predicate like IEM_IS_GUEST_CPU_AMD.
1393 if sRef not in ('IEM_IS_GUEST_CPU_AMD',
1394 'IEM_IS_16BIT_CODE',
1395 'IEM_IS_32BIT_CODE',
1396 'IEM_IS_64BIT_CODE',
1397 ):
1398 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1399 if offParam + 2 <= len(sParam) and sParam[offParam : offParam + 2] == '->':
1400 offParam = iai.McBlock.skipSpacesAt(sParam, offParam + 2, len(sParam));
1401 while offParam < len(sParam) and (sParam[offParam].isalnum() or sParam[offParam] in '_.'):
1402 offParam += 1;
1403
1404 # Skip constants, globals, types (casts), sizeof and macros.
1405 elif ( sRef.startswith('IEM_OP_PRF_')
1406 or sRef.startswith('IEM_ACCESS_')
1407 or sRef.startswith('IEMINT_')
1408 or sRef.startswith('X86_GREG_')
1409 or sRef.startswith('X86_SREG_')
1410 or sRef.startswith('X86_EFL_')
1411 or sRef.startswith('X86_FSW_')
1412 or sRef.startswith('X86_FCW_')
1413 or sRef.startswith('X86_XCPT_')
1414 or sRef.startswith('IEMMODE_')
1415 or sRef.startswith('IEM_F_')
1416 or sRef.startswith('IEM_CIMPL_F_')
1417 or sRef.startswith('g_')
1418 or sRef.startswith('iemAImpl_')
1419 or sRef.startswith('kIemNativeGstReg_')
1420 or sRef.startswith('RT_ARCH_VAL_')
1421 or sRef in ( 'int8_t', 'int16_t', 'int32_t', 'int64_t',
1422 'INT8_C', 'INT16_C', 'INT32_C', 'INT64_C',
1423 'uint8_t', 'uint16_t', 'uint32_t', 'uint64_t',
1424 'UINT8_C', 'UINT16_C', 'UINT32_C', 'UINT64_C',
1425 'UINT8_MAX', 'UINT16_MAX', 'UINT32_MAX', 'UINT64_MAX',
1426 'INT8_MAX', 'INT16_MAX', 'INT32_MAX', 'INT64_MAX',
1427 'INT8_MIN', 'INT16_MIN', 'INT32_MIN', 'INT64_MIN',
1428 'sizeof', 'NOREF', 'RT_NOREF', 'IEMMODE_64BIT',
1429 'RT_BIT_32', 'RT_BIT_64', 'true', 'false',
1430 'NIL_RTGCPTR',) ):
1431 pass;
1432
1433 # Skip certain macro invocations.
1434 # Any variable (non-field) and decoder fields in IEMCPU will need to be parameterized.
1435 elif ( ( '.' not in sRef
1436 and '-' not in sRef
1437 and sRef not in ('pVCpu', ) )
1438 or iai.McBlock.koReIemDecoderVars.search(sRef) is not None):
1439 self.aoParamRefs.append(ThreadedParamRef(sRef, self.analyzeReferenceToType(sRef),
1440 oStmt, iParam, offStart));
1441 # Number.
1442 elif ch.isdigit():
1443 if ( ch == '0'
1444 and offParam + 2 <= len(sParam)
1445 and sParam[offParam + 1] in 'xX'
1446 and sParam[offParam + 2] in self.ksHexDigits ):
1447 offParam += 2;
1448 while offParam < len(sParam) and sParam[offParam] in self.ksHexDigits:
1449 offParam += 1;
1450 else:
1451 while offParam < len(sParam) and sParam[offParam].isdigit():
1452 offParam += 1;
1453 # Comment?
1454 elif ( ch == '/'
1455 and offParam + 4 <= len(sParam)
1456 and sParam[offParam + 1] == '*'):
1457 offParam += 2;
1458 offNext = sParam.find('*/', offParam);
1459 if offNext < offParam:
1460 self.raiseProblem('Unable to find "*/" in "%s" ("%s")' % (sRef, oStmt.renderCode(),));
1461 offParam = offNext + 2;
1462 # Whatever else.
1463 else:
1464 offParam += 1;
1465
1466 # Traverse the branches of conditionals.
1467 if isinstance(oStmt, iai.McStmtCond):
1468 self.analyzeFindThreadedParamRefs(oStmt.aoIfBranch);
1469 self.analyzeFindThreadedParamRefs(oStmt.aoElseBranch);
1470 return True;
1471
1472 def analyzeVariation(self, aoStmts):
1473 """
1474 2nd part of the analysis, done on each variation.
1475
1476 The variations may differ in parameter requirements and will end up with
1477 slightly different MC sequences. Thus this is done on each individually.
1478
1479 Returns dummy True - raises exception on trouble.
1480 """
1481 # Now scan the code for variables and field references that needs to
1482 # be passed to the threaded function because they are related to the
1483 # instruction decoding.
1484 self.analyzeFindThreadedParamRefs(aoStmts);
1485 self.analyzeConsolidateThreadedParamRefs();
1486
1487 # Morph the statement stream for the block into what we'll be using in the threaded function.
1488 (self.aoStmtsForThreadedFunction, iParamRef) = self.analyzeMorphStmtForThreaded(aoStmts, {});
1489 if iParamRef != len(self.aoParamRefs):
1490 raise Exception('iParamRef=%s, expected %s!' % (iParamRef, len(self.aoParamRefs),));
1491
1492 return True;
1493
1494 def emitThreadedCallStmtsForVariant(self, cchIndent, fTbLookupTable = False, sCallVarNm = None):
1495 """
1496 Produces generic C++ statments that emits a call to the thread function
1497 variation and any subsequent checks that may be necessary after that.
1498
1499 The sCallVarNm is the name of the variable with the threaded function
1500 to call. This is for the case where all the variations have the same
1501 parameters and only the threaded function number differs.
1502
1503 The fTbLookupTable parameter can either be False, True or whatever else
1504 (like 2) - in the latte case this means a large lookup table.
1505 """
1506 aoStmts = [
1507 iai.McCppCall('IEM_MC2_BEGIN_EMIT_CALLS',
1508 ['1' if 'IEM_CIMPL_F_CHECK_IRQ_BEFORE' in self.oParent.dsCImplFlags else '0'],
1509 cchIndent = cchIndent), # Scope and a hook for various stuff.
1510 ];
1511
1512 # The call to the threaded function.
1513 asCallArgs = [ self.getIndexName() if not sCallVarNm else sCallVarNm, ];
1514 for iParam in range(self.cMinParams):
1515 asFrags = [];
1516 for aoRefs in self.dParamRefs.values():
1517 oRef = aoRefs[0];
1518 if oRef.iNewParam == iParam:
1519 sCast = '(uint64_t)'
1520 if oRef.sType in ('int8_t', 'int16_t', 'int32_t'): # Make sure these doesn't get sign-extended.
1521 sCast = '(uint64_t)(u' + oRef.sType + ')';
1522 if oRef.offNewParam == 0:
1523 asFrags.append(sCast + '(' + oRef.sOrgRef + ')');
1524 else:
1525 asFrags.append('(%s(%s) << %s)' % (sCast, oRef.sOrgRef, oRef.offNewParam));
1526 assert asFrags;
1527 asCallArgs.append(' | '.join(asFrags));
1528
1529 if fTbLookupTable is False:
1530 aoStmts.append(iai.McCppCall('IEM_MC2_EMIT_CALL_%s' % (len(asCallArgs) - 1,),
1531 asCallArgs, cchIndent = cchIndent));
1532 else:
1533 aoStmts.append(iai.McCppCall('IEM_MC2_EMIT_CALL_WITH_TB_LOOKUP_%s' % (len(asCallArgs) - 1,),
1534 ['0' if fTbLookupTable is True else '1',] + asCallArgs, cchIndent = cchIndent));
1535
1536 # 2023-11-28: This has to be done AFTER the CIMPL call, so we have to
1537 # emit this mode check from the compilation loop. On the
1538 # plus side, this means we eliminate unnecessary call at
1539 # end of the TB. :-)
1540 ## For CIMPL stuff, we need to consult the associated IEM_CIMPL_F_XXX
1541 ## mask and maybe emit additional checks.
1542 #if ( 'IEM_CIMPL_F_MODE' in self.oParent.dsCImplFlags
1543 # or 'IEM_CIMPL_F_XCPT' in self.oParent.dsCImplFlags
1544 # or 'IEM_CIMPL_F_VMEXIT' in self.oParent.dsCImplFlags):
1545 # aoStmts.append(iai.McCppCall('IEM_MC2_EMIT_CALL_1', ( 'kIemThreadedFunc_BltIn_CheckMode', 'pVCpu->iem.s.fExec', ),
1546 # cchIndent = cchIndent));
1547
1548 sCImplFlags = ' | '.join(self.oParent.dsCImplFlags.keys());
1549 if not sCImplFlags:
1550 sCImplFlags = '0'
1551 aoStmts.append(iai.McCppCall('IEM_MC2_END_EMIT_CALLS', ( sCImplFlags, ), cchIndent = cchIndent)); # For closing the scope.
1552
1553 # Emit fEndTb = true or fTbBranched = true if any of the CIMPL flags
1554 # indicates we should do so.
1555 # Note! iemThreadedRecompilerMcDeferToCImpl0 duplicates work done here.
1556 asEndTbFlags = [];
1557 asTbBranchedFlags = [];
1558 for sFlag in self.oParent.dsCImplFlags:
1559 if self.kdCImplFlags[sFlag] is True:
1560 asEndTbFlags.append(sFlag);
1561 elif sFlag.startswith('IEM_CIMPL_F_BRANCH_'):
1562 asTbBranchedFlags.append(sFlag);
1563 if ( asTbBranchedFlags
1564 and ( 'IEM_CIMPL_F_BRANCH_CONDITIONAL' not in asTbBranchedFlags
1565 or self.sVariation not in self.kdVariationsWithConditionalNoJmp)):
1566 aoStmts.append(iai.McCppGeneric('iemThreadedSetBranched(pVCpu, %s);'
1567 % ((' | '.join(asTbBranchedFlags)).replace('IEM_CIMPL_F_BRANCH', 'IEMBRANCHED_F'),),
1568 cchIndent = cchIndent)); # Inline fn saves ~2 seconds for gcc 13/dbg (1m13s vs 1m15s).
1569 if asEndTbFlags:
1570 aoStmts.append(iai.McCppGeneric('pVCpu->iem.s.fEndTb = true; /* %s */' % (','.join(asEndTbFlags),),
1571 cchIndent = cchIndent));
1572
1573 if 'IEM_CIMPL_F_CHECK_IRQ_AFTER' in self.oParent.dsCImplFlags:
1574 aoStmts.append(iai.McCppGeneric('pVCpu->iem.s.cInstrTillIrqCheck = 0;', cchIndent = cchIndent));
1575
1576 return aoStmts;
1577
1578
1579class ThreadedFunction(object):
1580 """
1581 A threaded function.
1582 """
1583
1584 def __init__(self, oMcBlock: iai.McBlock) -> None:
1585 self.oMcBlock = oMcBlock # type: iai.McBlock
1586 # The remaining fields are only useful after analyze() has been called:
1587 ## Variations for this block. There is at least one.
1588 self.aoVariations = [] # type: List[ThreadedFunctionVariation]
1589 ## Variation dictionary containing the same as aoVariations.
1590 self.dVariations = {} # type: Dict[str, ThreadedFunctionVariation]
1591 ## Dictionary of local variables (IEM_MC_LOCAL[_CONST]) and call arguments (IEM_MC_ARG*).
1592 self.dVariables = {} # type: Dict[str, iai.McStmtVar]
1593 ## Dictionary with any IEM_CIMPL_F_XXX flags explicitly advertised in the code block
1594 ## and those determined by analyzeCodeOperation().
1595 self.dsCImplFlags = {} # type: Dict[str, bool]
1596 ## The unique sub-name for this threaded function.
1597 self.sSubName = '';
1598 #if oMcBlock.iInFunction > 0 or (oMcBlock.oInstruction and len(oMcBlock.oInstruction.aoMcBlocks) > 1):
1599 # self.sSubName = '_%s' % (oMcBlock.iInFunction);
1600
1601 @staticmethod
1602 def dummyInstance():
1603 """ Gets a dummy instance. """
1604 return ThreadedFunction(iai.McBlock('null', 999999999, 999999999,
1605 iai.DecoderFunction('null', 999999999, 'nil', ('','')), 999999999));
1606
1607 def hasWithFlagsCheckingAndClearingVariation(self):
1608 """
1609 Check if there is one or more with flags checking and clearing
1610 variations for this threaded function.
1611 """
1612 for sVarWithFlags in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing:
1613 if sVarWithFlags in self.dVariations:
1614 return True;
1615 return False;
1616
1617 #
1618 # Analysis and code morphing.
1619 #
1620
1621 def raiseProblem(self, sMessage):
1622 """ Raises a problem. """
1623 raise Exception('%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, ));
1624
1625 def error(self, sMessage, oGenerator):
1626 """ Emits an error via the generator object, causing it to fail. """
1627 oGenerator.rawError('%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, ));
1628
1629 def warning(self, sMessage):
1630 """ Emits a warning. """
1631 print('%s:%s: warning: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, ));
1632
1633 ## Used by analyzeAndAnnotateName for memory MC blocks.
1634 kdAnnotateNameMemStmts = {
1635 'IEM_MC_FETCH_MEM16_U8': '__mem8',
1636 'IEM_MC_FETCH_MEM32_U8': '__mem8',
1637 'IEM_MC_FETCH_MEM_D80': '__mem80',
1638 'IEM_MC_FETCH_MEM_I16': '__mem16',
1639 'IEM_MC_FETCH_MEM_I32': '__mem32',
1640 'IEM_MC_FETCH_MEM_I64': '__mem64',
1641 'IEM_MC_FETCH_MEM_R32': '__mem32',
1642 'IEM_MC_FETCH_MEM_R64': '__mem64',
1643 'IEM_MC_FETCH_MEM_R80': '__mem80',
1644 'IEM_MC_FETCH_MEM_U128': '__mem128',
1645 'IEM_MC_FETCH_MEM_U128_ALIGN_SSE': '__mem128',
1646 'IEM_MC_FETCH_MEM_U128_NO_AC': '__mem128',
1647 'IEM_MC_FETCH_MEM_U16': '__mem16',
1648 'IEM_MC_FETCH_MEM_U16_DISP': '__mem16',
1649 'IEM_MC_FETCH_MEM_U16_SX_U32': '__mem16sx32',
1650 'IEM_MC_FETCH_MEM_U16_SX_U64': '__mem16sx64',
1651 'IEM_MC_FETCH_MEM_U16_ZX_U32': '__mem16zx32',
1652 'IEM_MC_FETCH_MEM_U16_ZX_U64': '__mem16zx64',
1653 'IEM_MC_FETCH_MEM_U256': '__mem256',
1654 'IEM_MC_FETCH_MEM_U256_ALIGN_AVX': '__mem256',
1655 'IEM_MC_FETCH_MEM_U256_NO_AC': '__mem256',
1656 'IEM_MC_FETCH_MEM_U32': '__mem32',
1657 'IEM_MC_FETCH_MEM_U32_DISP': '__mem32',
1658 'IEM_MC_FETCH_MEM_U32_SX_U64': '__mem32sx64',
1659 'IEM_MC_FETCH_MEM_U32_ZX_U64': '__mem32zx64',
1660 'IEM_MC_FETCH_MEM_U64': '__mem64',
1661 'IEM_MC_FETCH_MEM_U64_ALIGN_U128': '__mem64',
1662 'IEM_MC_FETCH_MEM_U64_DISP': '__mem64',
1663 'IEM_MC_FETCH_MEM_U8': '__mem8',
1664 'IEM_MC_FETCH_MEM_U8_DISP': '__mem8',
1665 'IEM_MC_FETCH_MEM_U8_SX_U16': '__mem8sx16',
1666 'IEM_MC_FETCH_MEM_U8_SX_U32': '__mem8sx32',
1667 'IEM_MC_FETCH_MEM_U8_SX_U64': '__mem8sx64',
1668 'IEM_MC_FETCH_MEM_U8_ZX_U16': '__mem8zx16',
1669 'IEM_MC_FETCH_MEM_U8_ZX_U32': '__mem8zx32',
1670 'IEM_MC_FETCH_MEM_U8_ZX_U64': '__mem8zx64',
1671 'IEM_MC_FETCH_MEM_XMM': '__mem128',
1672 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE': '__mem128',
1673 'IEM_MC_FETCH_MEM_XMM_NO_AC': '__mem128',
1674 'IEM_MC_FETCH_MEM_XMM_U32': '__mem32',
1675 'IEM_MC_FETCH_MEM_XMM_U64': '__mem64',
1676 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128': '__mem128',
1677 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE_AND_XREG_XMM': '__mem128',
1678 'IEM_MC_FETCH_MEM_XMM_U32_AND_XREG_XMM': '__mem32',
1679 'IEM_MC_FETCH_MEM_XMM_U64_AND_XREG_XMM': '__mem64',
1680 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_RAX_RDX_U64': '__mem128',
1681 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64': '__mem128',
1682
1683 'IEM_MC_STORE_MEM_I16_CONST_BY_REF': '__mem16',
1684 'IEM_MC_STORE_MEM_I32_CONST_BY_REF': '__mem32',
1685 'IEM_MC_STORE_MEM_I64_CONST_BY_REF': '__mem64',
1686 'IEM_MC_STORE_MEM_I8_CONST_BY_REF': '__mem8',
1687 'IEM_MC_STORE_MEM_INDEF_D80_BY_REF': '__mem80',
1688 'IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF': '__mem32',
1689 'IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF': '__mem64',
1690 'IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF': '__mem80',
1691 'IEM_MC_STORE_MEM_U128': '__mem128',
1692 'IEM_MC_STORE_MEM_U128_ALIGN_SSE': '__mem128',
1693 'IEM_MC_STORE_MEM_U128_NO_AC': '__mem128',
1694 'IEM_MC_STORE_MEM_U16': '__mem16',
1695 'IEM_MC_STORE_MEM_U16_CONST': '__mem16c',
1696 'IEM_MC_STORE_MEM_U256': '__mem256',
1697 'IEM_MC_STORE_MEM_U256_ALIGN_AVX': '__mem256',
1698 'IEM_MC_STORE_MEM_U256_NO_AC': '__mem256',
1699 'IEM_MC_STORE_MEM_U32': '__mem32',
1700 'IEM_MC_STORE_MEM_U32_CONST': '__mem32c',
1701 'IEM_MC_STORE_MEM_U64': '__mem64',
1702 'IEM_MC_STORE_MEM_U64_CONST': '__mem64c',
1703 'IEM_MC_STORE_MEM_U8': '__mem8',
1704 'IEM_MC_STORE_MEM_U8_CONST': '__mem8c',
1705
1706 'IEM_MC_MEM_MAP_D80_WO': '__mem80',
1707 'IEM_MC_MEM_MAP_I16_WO': '__mem16',
1708 'IEM_MC_MEM_MAP_I32_WO': '__mem32',
1709 'IEM_MC_MEM_MAP_I64_WO': '__mem64',
1710 'IEM_MC_MEM_MAP_R32_WO': '__mem32',
1711 'IEM_MC_MEM_MAP_R64_WO': '__mem64',
1712 'IEM_MC_MEM_MAP_R80_WO': '__mem80',
1713 'IEM_MC_MEM_MAP_U128_ATOMIC': '__mem128a',
1714 'IEM_MC_MEM_MAP_U128_RO': '__mem128',
1715 'IEM_MC_MEM_MAP_U128_RW': '__mem128',
1716 'IEM_MC_MEM_MAP_U128_WO': '__mem128',
1717 'IEM_MC_MEM_MAP_U16_ATOMIC': '__mem16a',
1718 'IEM_MC_MEM_MAP_U16_RO': '__mem16',
1719 'IEM_MC_MEM_MAP_U16_RW': '__mem16',
1720 'IEM_MC_MEM_MAP_U16_WO': '__mem16',
1721 'IEM_MC_MEM_MAP_U32_ATOMIC': '__mem32a',
1722 'IEM_MC_MEM_MAP_U32_RO': '__mem32',
1723 'IEM_MC_MEM_MAP_U32_RW': '__mem32',
1724 'IEM_MC_MEM_MAP_U32_WO': '__mem32',
1725 'IEM_MC_MEM_MAP_U64_ATOMIC': '__mem64a',
1726 'IEM_MC_MEM_MAP_U64_RO': '__mem64',
1727 'IEM_MC_MEM_MAP_U64_RW': '__mem64',
1728 'IEM_MC_MEM_MAP_U64_WO': '__mem64',
1729 'IEM_MC_MEM_MAP_U8_ATOMIC': '__mem8a',
1730 'IEM_MC_MEM_MAP_U8_RO': '__mem8',
1731 'IEM_MC_MEM_MAP_U8_RW': '__mem8',
1732 'IEM_MC_MEM_MAP_U8_WO': '__mem8',
1733 };
1734 ## Used by analyzeAndAnnotateName for non-memory MC blocks.
1735 kdAnnotateNameRegStmts = {
1736 'IEM_MC_FETCH_GREG_U8': '__greg8',
1737 'IEM_MC_FETCH_GREG_U8_ZX_U16': '__greg8zx16',
1738 'IEM_MC_FETCH_GREG_U8_ZX_U32': '__greg8zx32',
1739 'IEM_MC_FETCH_GREG_U8_ZX_U64': '__greg8zx64',
1740 'IEM_MC_FETCH_GREG_U8_SX_U16': '__greg8sx16',
1741 'IEM_MC_FETCH_GREG_U8_SX_U32': '__greg8sx32',
1742 'IEM_MC_FETCH_GREG_U8_SX_U64': '__greg8sx64',
1743 'IEM_MC_FETCH_GREG_U16': '__greg16',
1744 'IEM_MC_FETCH_GREG_U16_ZX_U32': '__greg16zx32',
1745 'IEM_MC_FETCH_GREG_U16_ZX_U64': '__greg16zx64',
1746 'IEM_MC_FETCH_GREG_U16_SX_U32': '__greg16sx32',
1747 'IEM_MC_FETCH_GREG_U16_SX_U64': '__greg16sx64',
1748 'IEM_MC_FETCH_GREG_U32': '__greg32',
1749 'IEM_MC_FETCH_GREG_U32_ZX_U64': '__greg32zx64',
1750 'IEM_MC_FETCH_GREG_U32_SX_U64': '__greg32sx64',
1751 'IEM_MC_FETCH_GREG_U64': '__greg64',
1752 'IEM_MC_FETCH_GREG_U64_ZX_U64': '__greg64zx64',
1753 'IEM_MC_FETCH_GREG_PAIR_U32': '__greg32',
1754 'IEM_MC_FETCH_GREG_PAIR_U64': '__greg64',
1755
1756 'IEM_MC_STORE_GREG_U8': '__greg8',
1757 'IEM_MC_STORE_GREG_U16': '__greg16',
1758 'IEM_MC_STORE_GREG_U32': '__greg32',
1759 'IEM_MC_STORE_GREG_U64': '__greg64',
1760 'IEM_MC_STORE_GREG_I64': '__greg64',
1761 'IEM_MC_STORE_GREG_U8_CONST': '__greg8c',
1762 'IEM_MC_STORE_GREG_U16_CONST': '__greg16c',
1763 'IEM_MC_STORE_GREG_U32_CONST': '__greg32c',
1764 'IEM_MC_STORE_GREG_U64_CONST': '__greg64c',
1765 'IEM_MC_STORE_GREG_PAIR_U32': '__greg32',
1766 'IEM_MC_STORE_GREG_PAIR_U64': '__greg64',
1767
1768 'IEM_MC_FETCH_SREG_U16': '__sreg16',
1769 'IEM_MC_FETCH_SREG_ZX_U32': '__sreg32',
1770 'IEM_MC_FETCH_SREG_ZX_U64': '__sreg64',
1771 'IEM_MC_FETCH_SREG_BASE_U64': '__sbase64',
1772 'IEM_MC_FETCH_SREG_BASE_U32': '__sbase32',
1773 'IEM_MC_STORE_SREG_BASE_U64': '__sbase64',
1774 'IEM_MC_STORE_SREG_BASE_U32': '__sbase32',
1775
1776 'IEM_MC_REF_GREG_U8': '__greg8',
1777 'IEM_MC_REF_GREG_U16': '__greg16',
1778 'IEM_MC_REF_GREG_U32': '__greg32',
1779 'IEM_MC_REF_GREG_U64': '__greg64',
1780 'IEM_MC_REF_GREG_U8_CONST': '__greg8',
1781 'IEM_MC_REF_GREG_U16_CONST': '__greg16',
1782 'IEM_MC_REF_GREG_U32_CONST': '__greg32',
1783 'IEM_MC_REF_GREG_U64_CONST': '__greg64',
1784 'IEM_MC_REF_GREG_I32': '__greg32',
1785 'IEM_MC_REF_GREG_I64': '__greg64',
1786 'IEM_MC_REF_GREG_I32_CONST': '__greg32',
1787 'IEM_MC_REF_GREG_I64_CONST': '__greg64',
1788
1789 'IEM_MC_STORE_FPUREG_R80_SRC_REF': '__fpu',
1790 'IEM_MC_REF_FPUREG': '__fpu',
1791
1792 'IEM_MC_FETCH_MREG_U64': '__mreg64',
1793 'IEM_MC_FETCH_MREG_U32': '__mreg32',
1794 'IEM_MC_FETCH_MREG_U16': '__mreg16',
1795 'IEM_MC_FETCH_MREG_U8': '__mreg8',
1796 'IEM_MC_STORE_MREG_U64': '__mreg64',
1797 'IEM_MC_STORE_MREG_U32': '__mreg32',
1798 'IEM_MC_STORE_MREG_U16': '__mreg16',
1799 'IEM_MC_STORE_MREG_U8': '__mreg8',
1800 'IEM_MC_STORE_MREG_U32_ZX_U64': '__mreg32zx64',
1801 'IEM_MC_REF_MREG_U64': '__mreg64',
1802 'IEM_MC_REF_MREG_U64_CONST': '__mreg64',
1803 'IEM_MC_REF_MREG_U32_CONST': '__mreg32',
1804
1805 'IEM_MC_CLEAR_XREG_U32_MASK': '__xreg32x4',
1806 'IEM_MC_FETCH_XREG_U128': '__xreg128',
1807 'IEM_MC_FETCH_XREG_XMM': '__xreg128',
1808 'IEM_MC_FETCH_XREG_U64': '__xreg64',
1809 'IEM_MC_FETCH_XREG_U32': '__xreg32',
1810 'IEM_MC_FETCH_XREG_U16': '__xreg16',
1811 'IEM_MC_FETCH_XREG_U8': '__xreg8',
1812 'IEM_MC_FETCH_XREG_PAIR_U128': '__xreg128p',
1813 'IEM_MC_FETCH_XREG_PAIR_XMM': '__xreg128p',
1814 'IEM_MC_FETCH_XREG_PAIR_U128_AND_RAX_RDX_U64': '__xreg128p',
1815 'IEM_MC_FETCH_XREG_PAIR_U128_AND_EAX_EDX_U32_SX_U64': '__xreg128p',
1816
1817 'IEM_MC_STORE_XREG_U32_U128': '__xreg32',
1818 'IEM_MC_STORE_XREG_U128': '__xreg128',
1819 'IEM_MC_STORE_XREG_XMM': '__xreg128',
1820 'IEM_MC_STORE_XREG_XMM_U32': '__xreg32',
1821 'IEM_MC_STORE_XREG_XMM_U64': '__xreg64',
1822 'IEM_MC_STORE_XREG_U64': '__xreg64',
1823 'IEM_MC_STORE_XREG_U64_ZX_U128': '__xreg64zx128',
1824 'IEM_MC_STORE_XREG_U32': '__xreg32',
1825 'IEM_MC_STORE_XREG_U16': '__xreg16',
1826 'IEM_MC_STORE_XREG_U8': '__xreg8',
1827 'IEM_MC_STORE_XREG_U32_ZX_U128': '__xreg32zx128',
1828 'IEM_MC_STORE_XREG_R32': '__xreg32',
1829 'IEM_MC_STORE_XREG_R64': '__xreg64',
1830 'IEM_MC_BROADCAST_XREG_U8_ZX_VLMAX': '__xreg8zx',
1831 'IEM_MC_BROADCAST_XREG_U16_ZX_VLMAX': '__xreg16zx',
1832 'IEM_MC_BROADCAST_XREG_U32_ZX_VLMAX': '__xreg32zx',
1833 'IEM_MC_BROADCAST_XREG_U64_ZX_VLMAX': '__xreg64zx',
1834 'IEM_MC_BROADCAST_XREG_U128_ZX_VLMAX': '__xreg128zx',
1835 'IEM_MC_REF_XREG_U128': '__xreg128',
1836 'IEM_MC_REF_XREG_U128_CONST': '__xreg128',
1837 'IEM_MC_REF_XREG_U32_CONST': '__xreg32',
1838 'IEM_MC_REF_XREG_U64_CONST': '__xreg64',
1839 'IEM_MC_REF_XREG_R32_CONST': '__xreg32',
1840 'IEM_MC_REF_XREG_R64_CONST': '__xreg64',
1841 'IEM_MC_REF_XREG_XMM_CONST': '__xreg128',
1842 'IEM_MC_COPY_XREG_U128': '__xreg128',
1843
1844 'IEM_MC_FETCH_YREG_U256': '__yreg256',
1845 'IEM_MC_FETCH_YREG_YMM': '__yreg256',
1846 'IEM_MC_FETCH_YREG_U128': '__yreg128',
1847 'IEM_MC_FETCH_YREG_U64': '__yreg64',
1848 'IEM_MC_FETCH_YREG_U32': '__yreg32',
1849 'IEM_MC_STORE_YREG_U128': '__yreg128',
1850 'IEM_MC_STORE_YREG_U32_ZX_VLMAX': '__yreg32zx',
1851 'IEM_MC_STORE_YREG_U64_ZX_VLMAX': '__yreg64zx',
1852 'IEM_MC_STORE_YREG_U128_ZX_VLMAX': '__yreg128zx',
1853 'IEM_MC_STORE_YREG_U256_ZX_VLMAX': '__yreg256zx',
1854 'IEM_MC_BROADCAST_YREG_U8_ZX_VLMAX': '__yreg8',
1855 'IEM_MC_BROADCAST_YREG_U16_ZX_VLMAX': '__yreg16',
1856 'IEM_MC_BROADCAST_YREG_U32_ZX_VLMAX': '__yreg32',
1857 'IEM_MC_BROADCAST_YREG_U64_ZX_VLMAX': '__yreg64',
1858 'IEM_MC_BROADCAST_YREG_U128_ZX_VLMAX': '__yreg128',
1859 'IEM_MC_REF_YREG_U128': '__yreg128',
1860 'IEM_MC_REF_YREG_U128_CONST': '__yreg128',
1861 'IEM_MC_REF_YREG_U64_CONST': '__yreg64',
1862 'IEM_MC_COPY_YREG_U256_ZX_VLMAX': '__yreg256zx',
1863 'IEM_MC_COPY_YREG_U128_ZX_VLMAX': '__yreg128zx',
1864 'IEM_MC_COPY_YREG_U64_ZX_VLMAX': '__yreg64zx',
1865 'IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX': '__yreg3296',
1866 'IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX': '__yreg6464',
1867 'IEM_MC_MERGE_YREG_U64HI_U64HI_ZX_VLMAX': '__yreg64hi64hi',
1868 'IEM_MC_MERGE_YREG_U64LO_U64LO_ZX_VLMAX': '__yreg64lo64lo',
1869 'IEM_MC_MERGE_YREG_U64LO_U64LOCAL_ZX_VLMAX':'__yreg64',
1870 'IEM_MC_MERGE_YREG_U64LOCAL_U64HI_ZX_VLMAX':'__yreg64',
1871 };
1872 kdAnnotateNameCallStmts = {
1873 'IEM_MC_CALL_CIMPL_0': '__cimpl',
1874 'IEM_MC_CALL_CIMPL_1': '__cimpl',
1875 'IEM_MC_CALL_CIMPL_2': '__cimpl',
1876 'IEM_MC_CALL_CIMPL_3': '__cimpl',
1877 'IEM_MC_CALL_CIMPL_4': '__cimpl',
1878 'IEM_MC_CALL_CIMPL_5': '__cimpl',
1879 'IEM_MC_CALL_CIMPL_6': '__cimpl',
1880 'IEM_MC_CALL_CIMPL_7': '__cimpl',
1881 'IEM_MC_DEFER_TO_CIMPL_0_RET': '__cimpl_defer',
1882 'IEM_MC_DEFER_TO_CIMPL_1_RET': '__cimpl_defer',
1883 'IEM_MC_DEFER_TO_CIMPL_2_RET': '__cimpl_defer',
1884 'IEM_MC_DEFER_TO_CIMPL_3_RET': '__cimpl_defer',
1885 'IEM_MC_DEFER_TO_CIMPL_4_RET': '__cimpl_defer',
1886 'IEM_MC_DEFER_TO_CIMPL_5_RET': '__cimpl_defer',
1887 'IEM_MC_DEFER_TO_CIMPL_6_RET': '__cimpl_defer',
1888 'IEM_MC_DEFER_TO_CIMPL_7_RET': '__cimpl_defer',
1889 'IEM_MC_CALL_VOID_AIMPL_0': '__aimpl',
1890 'IEM_MC_CALL_VOID_AIMPL_1': '__aimpl',
1891 'IEM_MC_CALL_VOID_AIMPL_2': '__aimpl',
1892 'IEM_MC_CALL_VOID_AIMPL_3': '__aimpl',
1893 'IEM_MC_CALL_VOID_AIMPL_4': '__aimpl',
1894 'IEM_MC_CALL_VOID_AIMPL_5': '__aimpl',
1895 'IEM_MC_CALL_AIMPL_0': '__aimpl_ret',
1896 'IEM_MC_CALL_AIMPL_1': '__aimpl_ret',
1897 'IEM_MC_CALL_AIMPL_2': '__aimpl_ret',
1898 'IEM_MC_CALL_AIMPL_3': '__aimpl_ret',
1899 'IEM_MC_CALL_AIMPL_4': '__aimpl_ret',
1900 'IEM_MC_CALL_AIMPL_5': '__aimpl_ret',
1901 'IEM_MC_CALL_AIMPL_6': '__aimpl_ret',
1902 'IEM_MC_CALL_VOID_AIMPL_6': '__aimpl_fpu',
1903 'IEM_MC_CALL_FPU_AIMPL_0': '__aimpl_fpu',
1904 'IEM_MC_CALL_FPU_AIMPL_1': '__aimpl_fpu',
1905 'IEM_MC_CALL_FPU_AIMPL_2': '__aimpl_fpu',
1906 'IEM_MC_CALL_FPU_AIMPL_3': '__aimpl_fpu',
1907 'IEM_MC_CALL_FPU_AIMPL_4': '__aimpl_fpu',
1908 'IEM_MC_CALL_FPU_AIMPL_5': '__aimpl_fpu',
1909 'IEM_MC_CALL_MMX_AIMPL_0': '__aimpl_mmx',
1910 'IEM_MC_CALL_MMX_AIMPL_1': '__aimpl_mmx',
1911 'IEM_MC_CALL_MMX_AIMPL_2': '__aimpl_mmx',
1912 'IEM_MC_CALL_MMX_AIMPL_3': '__aimpl_mmx',
1913 'IEM_MC_CALL_MMX_AIMPL_4': '__aimpl_mmx',
1914 'IEM_MC_CALL_MMX_AIMPL_5': '__aimpl_mmx',
1915 'IEM_MC_CALL_SSE_AIMPL_0': '__aimpl_sse',
1916 'IEM_MC_CALL_SSE_AIMPL_1': '__aimpl_sse',
1917 'IEM_MC_CALL_SSE_AIMPL_2': '__aimpl_sse',
1918 'IEM_MC_CALL_SSE_AIMPL_3': '__aimpl_sse',
1919 'IEM_MC_CALL_SSE_AIMPL_4': '__aimpl_sse',
1920 'IEM_MC_CALL_SSE_AIMPL_5': '__aimpl_sse',
1921 'IEM_MC_CALL_AVX_AIMPL_0': '__aimpl_avx',
1922 'IEM_MC_CALL_AVX_AIMPL_1': '__aimpl_avx',
1923 'IEM_MC_CALL_AVX_AIMPL_2': '__aimpl_avx',
1924 'IEM_MC_CALL_AVX_AIMPL_3': '__aimpl_avx',
1925 'IEM_MC_CALL_AVX_AIMPL_4': '__aimpl_avx',
1926 'IEM_MC_CALL_AVX_AIMPL_5': '__aimpl_avx',
1927 };
1928 def analyzeAndAnnotateName(self, aoStmts: List[iai.McStmt]):
1929 """
1930 Scans the statements and variation lists for clues about the threaded function,
1931 and sets self.sSubName if successfull.
1932 """
1933 # Operand base naming:
1934 dHits = {};
1935 cHits = iai.McStmt.countStmtsByName(aoStmts, self.kdAnnotateNameMemStmts, dHits);
1936 if cHits > 0:
1937 sStmtNm = sorted(dHits.keys())[-1]; # priority: STORE, MEM_MAP, FETCH.
1938 sName = self.kdAnnotateNameMemStmts[sStmtNm];
1939 else:
1940 cHits = iai.McStmt.countStmtsByName(aoStmts, self.kdAnnotateNameRegStmts, dHits);
1941 if cHits > 0:
1942 sStmtNm = sorted(dHits.keys())[-1]; # priority: STORE, MEM_MAP, FETCH.
1943 sName = self.kdAnnotateNameRegStmts[sStmtNm];
1944 else:
1945 # No op details, try name it by call type...
1946 cHits = iai.McStmt.countStmtsByName(aoStmts, self.kdAnnotateNameCallStmts, dHits);
1947 if cHits > 0:
1948 sStmtNm = sorted(dHits.keys())[-1]; # Not really necessary to sort, but simple this way.
1949 self.sSubName = self.kdAnnotateNameCallStmts[sStmtNm];
1950 return;
1951
1952 # Add call info if any:
1953 dHits = {};
1954 cHits = iai.McStmt.countStmtsByName(aoStmts, self.kdAnnotateNameCallStmts, dHits);
1955 if cHits > 0:
1956 sStmtNm = sorted(dHits.keys())[-1]; # Not really necessary to sort, but simple this way.
1957 sName += self.kdAnnotateNameCallStmts[sStmtNm][1:];
1958
1959 self.sSubName = sName;
1960 return;
1961
1962 def analyzeFindVariablesAndCallArgs(self, aoStmts: List[iai.McStmt]) -> bool:
1963 """ Scans the statements for MC variables and call arguments. """
1964 for oStmt in aoStmts:
1965 if isinstance(oStmt, iai.McStmtVar):
1966 if oStmt.sVarName in self.dVariables:
1967 raise Exception('Variable %s is defined more than once!' % (oStmt.sVarName,));
1968 self.dVariables[oStmt.sVarName] = oStmt.sVarName;
1969 elif isinstance(oStmt, iai.McStmtCall) and oStmt.sName.startswith('IEM_MC_CALL_AIMPL_'):
1970 if oStmt.asParams[1] in self.dVariables:
1971 raise Exception('Variable %s is defined more than once!' % (oStmt.asParams[1],));
1972 self.dVariables[oStmt.asParams[1]] = iai.McStmtVar('IEM_MC_LOCAL', oStmt.asParams[0:2],
1973 oStmt.asParams[0], oStmt.asParams[1]);
1974
1975 # There shouldn't be any variables or arguments declared inside if/
1976 # else blocks, but scan them too to be on the safe side.
1977 if isinstance(oStmt, iai.McStmtCond):
1978 #cBefore = len(self.dVariables);
1979 self.analyzeFindVariablesAndCallArgs(oStmt.aoIfBranch);
1980 self.analyzeFindVariablesAndCallArgs(oStmt.aoElseBranch);
1981 #if len(self.dVariables) != cBefore:
1982 # raise Exception('Variables/arguments defined in conditional branches!');
1983 return True;
1984
1985 kdReturnStmtAnnotations = {
1986 'IEM_MC_ADVANCE_RIP_AND_FINISH': g_ksFinishAnnotation_Advance,
1987 'IEM_MC_REL_JMP_S8_AND_FINISH': g_ksFinishAnnotation_RelJmp,
1988 'IEM_MC_REL_JMP_S16_AND_FINISH': g_ksFinishAnnotation_RelJmp,
1989 'IEM_MC_REL_JMP_S32_AND_FINISH': g_ksFinishAnnotation_RelJmp,
1990 'IEM_MC_SET_RIP_U16_AND_FINISH': g_ksFinishAnnotation_SetJmp,
1991 'IEM_MC_SET_RIP_U32_AND_FINISH': g_ksFinishAnnotation_SetJmp,
1992 'IEM_MC_SET_RIP_U64_AND_FINISH': g_ksFinishAnnotation_SetJmp,
1993 'IEM_MC_REL_CALL_S16_AND_FINISH': g_ksFinishAnnotation_RelCall,
1994 'IEM_MC_REL_CALL_S32_AND_FINISH': g_ksFinishAnnotation_RelCall,
1995 'IEM_MC_REL_CALL_S64_AND_FINISH': g_ksFinishAnnotation_RelCall,
1996 'IEM_MC_IND_CALL_U16_AND_FINISH': g_ksFinishAnnotation_IndCall,
1997 'IEM_MC_IND_CALL_U32_AND_FINISH': g_ksFinishAnnotation_IndCall,
1998 'IEM_MC_IND_CALL_U64_AND_FINISH': g_ksFinishAnnotation_IndCall,
1999 'IEM_MC_DEFER_TO_CIMPL_0_RET': g_ksFinishAnnotation_DeferToCImpl,
2000 'IEM_MC_DEFER_TO_CIMPL_1_RET': g_ksFinishAnnotation_DeferToCImpl,
2001 'IEM_MC_DEFER_TO_CIMPL_2_RET': g_ksFinishAnnotation_DeferToCImpl,
2002 'IEM_MC_DEFER_TO_CIMPL_3_RET': g_ksFinishAnnotation_DeferToCImpl,
2003 'IEM_MC_DEFER_TO_CIMPL_4_RET': g_ksFinishAnnotation_DeferToCImpl,
2004 'IEM_MC_DEFER_TO_CIMPL_5_RET': g_ksFinishAnnotation_DeferToCImpl,
2005 'IEM_MC_DEFER_TO_CIMPL_6_RET': g_ksFinishAnnotation_DeferToCImpl,
2006 'IEM_MC_DEFER_TO_CIMPL_7_RET': g_ksFinishAnnotation_DeferToCImpl,
2007 };
2008 def analyzeCodeOperation(self, aoStmts: List[iai.McStmt], dEflStmts, fSeenConditional = False) -> bool:
2009 """
2010 Analyzes the code looking clues as to additional side-effects.
2011
2012 Currently this is simply looking for branching and adding the relevant
2013 branch flags to dsCImplFlags. ASSUMES the caller pre-populates the
2014 dictionary with a copy of self.oMcBlock.dsCImplFlags.
2015
2016 This also sets McStmtCond.oIfBranchAnnotation & McStmtCond.oElseBranchAnnotation.
2017
2018 Returns annotation on return style.
2019 """
2020 sAnnotation = None;
2021 for oStmt in aoStmts:
2022 # Set IEM_IMPL_C_F_BRANCH_XXXX flags if we see any branching MCs.
2023 if oStmt.sName.startswith('IEM_MC_SET_RIP'):
2024 assert not fSeenConditional;
2025 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_INDIRECT'] = True;
2026 elif oStmt.sName.startswith('IEM_MC_REL_JMP'):
2027 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_RELATIVE'] = True;
2028 if fSeenConditional:
2029 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_CONDITIONAL'] = True;
2030 elif oStmt.sName.startswith('IEM_MC_IND_CALL'):
2031 assert not fSeenConditional;
2032 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_INDIRECT'] = True;
2033 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_STACK'] = True;
2034 self.dsCImplFlags['IEM_CIMPL_F_END_TB'] = True;
2035 elif oStmt.sName.startswith('IEM_MC_REL_CALL'):
2036 assert not fSeenConditional;
2037 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_RELATIVE'] = True;
2038 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_STACK'] = True;
2039 self.dsCImplFlags['IEM_CIMPL_F_END_TB'] = True;
2040 elif oStmt.sName.startswith('IEM_MC_RETN'):
2041 assert not fSeenConditional;
2042 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_INDIRECT'] = True;
2043 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_STACK'] = True;
2044 self.dsCImplFlags['IEM_CIMPL_F_END_TB'] = True;
2045
2046 # Check for CIMPL and AIMPL calls.
2047 if oStmt.sName.startswith('IEM_MC_CALL_'):
2048 if oStmt.sName.startswith('IEM_MC_CALL_CIMPL_'):
2049 self.dsCImplFlags['IEM_CIMPL_F_CALLS_CIMPL'] = True;
2050 elif ( oStmt.sName.startswith('IEM_MC_CALL_VOID_AIMPL_')
2051 or oStmt.sName.startswith('IEM_MC_CALL_AIMPL_')):
2052 self.dsCImplFlags['IEM_CIMPL_F_CALLS_AIMPL'] = True;
2053 elif ( oStmt.sName.startswith('IEM_MC_CALL_SSE_AIMPL_')
2054 or oStmt.sName.startswith('IEM_MC_CALL_MMX_AIMPL_')
2055 or oStmt.sName.startswith('IEM_MC_CALL_FPU_AIMPL_')):
2056 self.dsCImplFlags['IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE'] = True;
2057 elif oStmt.sName.startswith('IEM_MC_CALL_AVX_AIMPL_'):
2058 self.dsCImplFlags['IEM_CIMPL_F_CALLS_AIMPL_WITH_XSTATE'] = True;
2059 else:
2060 raise Exception('Unknown IEM_MC_CALL_* statement: %s' % (oStmt.sName,));
2061
2062 # Check for return statements.
2063 if oStmt.sName in self.kdReturnStmtAnnotations:
2064 assert sAnnotation is None;
2065 sAnnotation = self.kdReturnStmtAnnotations[oStmt.sName];
2066
2067 # Collect MCs working on EFLAGS. Caller will check this.
2068 if oStmt.sName in ('IEM_MC_FETCH_EFLAGS', 'IEM_MC_FETCH_EFLAGS_U8', 'IEM_MC_COMMIT_EFLAGS',
2069 'IEM_MC_COMMIT_EFLAGS_OPT', 'IEM_MC_REF_EFLAGS', 'IEM_MC_ARG_LOCAL_EFLAGS', ):
2070 dEflStmts[oStmt.sName] = oStmt;
2071 elif isinstance(oStmt, iai.McStmtCall):
2072 if oStmt.sName in ('IEM_MC_CALL_CIMPL_0', 'IEM_MC_CALL_CIMPL_1', 'IEM_MC_CALL_CIMPL_2',
2073 'IEM_MC_CALL_CIMPL_3', 'IEM_MC_CALL_CIMPL_4', 'IEM_MC_CALL_CIMPL_5',):
2074 if ( oStmt.asParams[0].find('IEM_CIMPL_F_RFLAGS') >= 0
2075 or oStmt.asParams[0].find('IEM_CIMPL_F_STATUS_FLAGS') >= 0):
2076 dEflStmts[oStmt.sName] = oStmt;
2077
2078 # Process branches of conditionals recursively.
2079 if isinstance(oStmt, iai.McStmtCond):
2080 oStmt.oIfBranchAnnotation = self.analyzeCodeOperation(oStmt.aoIfBranch, dEflStmts, True);
2081 if oStmt.aoElseBranch:
2082 oStmt.oElseBranchAnnotation = self.analyzeCodeOperation(oStmt.aoElseBranch, dEflStmts, True);
2083
2084 return sAnnotation;
2085
2086 def analyzeThreadedFunction(self, oGenerator):
2087 """
2088 Analyzes the code, identifying the number of parameters it requires and such.
2089
2090 Returns dummy True - raises exception on trouble.
2091 """
2092
2093 #
2094 # Decode the block into a list/tree of McStmt objects.
2095 #
2096 aoStmts = self.oMcBlock.decode();
2097
2098 #
2099 # Check the block for errors before we proceed (will decode it).
2100 #
2101 asErrors = self.oMcBlock.check();
2102 if asErrors:
2103 raise Exception('\n'.join(['%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sError, )
2104 for sError in asErrors]));
2105
2106 #
2107 # Scan the statements for local variables and call arguments (self.dVariables).
2108 #
2109 self.analyzeFindVariablesAndCallArgs(aoStmts);
2110
2111 #
2112 # Scan the code for IEM_CIMPL_F_ and other clues.
2113 #
2114 self.dsCImplFlags = self.oMcBlock.dsCImplFlags.copy();
2115 dEflStmts = {};
2116 self.analyzeCodeOperation(aoStmts, dEflStmts);
2117 if ( ('IEM_CIMPL_F_CALLS_CIMPL' in self.dsCImplFlags)
2118 + ('IEM_CIMPL_F_CALLS_AIMPL' in self.dsCImplFlags)
2119 + ('IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE' in self.dsCImplFlags)
2120 + ('IEM_CIMPL_F_CALLS_AIMPL_WITH_XSTATE' in self.dsCImplFlags) > 1):
2121 self.error('Mixing CIMPL/AIMPL/AIMPL_WITH_FXSTATE/AIMPL_WITH_XSTATE calls', oGenerator);
2122
2123 #
2124 # Analyse EFLAGS related MCs and @opflmodify and friends.
2125 #
2126 if dEflStmts:
2127 oInstruction = self.oMcBlock.oInstruction; # iai.Instruction
2128 if ( oInstruction is None
2129 or (oInstruction.asFlTest is None and oInstruction.asFlModify is None)):
2130 sMcNames = '+'.join(dEflStmts.keys());
2131 if len(dEflStmts) != 1 or not sMcNames.startswith('IEM_MC_CALL_CIMPL_'): # Hack for far calls
2132 self.error('Uses %s but has no @opflmodify, @opfltest or @opflclass with details!' % (sMcNames,), oGenerator);
2133 elif 'IEM_MC_COMMIT_EFLAGS' in dEflStmts or 'IEM_MC_COMMIT_EFLAGS_OPT' in dEflStmts:
2134 if not oInstruction.asFlModify:
2135 if oInstruction.sMnemonic not in [ 'not', ]:
2136 self.error('Uses IEM_MC_COMMIT_EFLAGS[_OPT] but has no flags in @opflmodify!', oGenerator);
2137 elif ( 'IEM_MC_CALL_CIMPL_0' in dEflStmts
2138 or 'IEM_MC_CALL_CIMPL_1' in dEflStmts
2139 or 'IEM_MC_CALL_CIMPL_2' in dEflStmts
2140 or 'IEM_MC_CALL_CIMPL_3' in dEflStmts
2141 or 'IEM_MC_CALL_CIMPL_4' in dEflStmts
2142 or 'IEM_MC_CALL_CIMPL_5' in dEflStmts ):
2143 if not oInstruction.asFlModify:
2144 self.error('Uses IEM_MC_CALL_CIMPL_x or IEM_MC_DEFER_TO_CIMPL_5_RET with IEM_CIMPL_F_STATUS_FLAGS '
2145 'or IEM_CIMPL_F_RFLAGS but has no flags in @opflmodify!', oGenerator);
2146 elif 'IEM_MC_REF_EFLAGS' not in dEflStmts:
2147 if not oInstruction.asFlTest:
2148 if oInstruction.sMnemonic not in [ 'not', ]:
2149 self.error('Expected @opfltest!', oGenerator);
2150 if oInstruction and oInstruction.asFlSet:
2151 for sFlag in oInstruction.asFlSet:
2152 if sFlag not in oInstruction.asFlModify:
2153 self.error('"%s" in @opflset but missing from @opflmodify (%s)!'
2154 % (sFlag, ', '.join(oInstruction.asFlModify)), oGenerator);
2155 if oInstruction and oInstruction.asFlClear:
2156 for sFlag in oInstruction.asFlClear:
2157 if sFlag not in oInstruction.asFlModify:
2158 self.error('"%s" in @opflclear but missing from @opflmodify (%s)!'
2159 % (sFlag, ', '.join(oInstruction.asFlModify)), oGenerator);
2160
2161 #
2162 # Create variations as needed.
2163 #
2164 if iai.McStmt.findStmtByNames(aoStmts,
2165 { 'IEM_MC_DEFER_TO_CIMPL_0_RET': True,
2166 'IEM_MC_DEFER_TO_CIMPL_1_RET': True,
2167 'IEM_MC_DEFER_TO_CIMPL_2_RET': True,
2168 'IEM_MC_DEFER_TO_CIMPL_3_RET': True, }):
2169 asVariations = (ThreadedFunctionVariation.ksVariation_Default,);
2170
2171 elif iai.McStmt.findStmtByNames(aoStmts, { 'IEM_MC_CALC_RM_EFF_ADDR' : True,
2172 'IEM_MC_FETCH_MEM_U8' : True, # mov_AL_Ob ++
2173 'IEM_MC_FETCH_MEM_U16' : True, # mov_rAX_Ov ++
2174 'IEM_MC_FETCH_MEM_U32' : True,
2175 'IEM_MC_FETCH_MEM_U64' : True,
2176 'IEM_MC_STORE_MEM_U8' : True, # mov_Ob_AL ++
2177 'IEM_MC_STORE_MEM_U16' : True, # mov_Ov_rAX ++
2178 'IEM_MC_STORE_MEM_U32' : True,
2179 'IEM_MC_STORE_MEM_U64' : True, }):
2180 if 'IEM_MC_F_64BIT' in self.oMcBlock.dsMcFlags:
2181 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressOnly64;
2182 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags and 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
2183 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot286Not64;
2184 elif 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
2185 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot286;
2186 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags:
2187 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot64;
2188 elif 'IEM_MC_F_ONLY_8086' in self.oMcBlock.dsMcFlags:
2189 asVariations = ThreadedFunctionVariation.kasVariationsOnlyPre386;
2190 else:
2191 asVariations = ThreadedFunctionVariation.kasVariationsWithAddress;
2192 else:
2193 if 'IEM_MC_F_64BIT' in self.oMcBlock.dsMcFlags:
2194 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressOnly64;
2195 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags and 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
2196 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot286Not64;
2197 elif 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
2198 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot286;
2199 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags:
2200 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot64;
2201 elif 'IEM_MC_F_ONLY_8086' in self.oMcBlock.dsMcFlags:
2202 asVariations = ThreadedFunctionVariation.kasVariationsOnlyPre386;
2203 else:
2204 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddress;
2205
2206 if ( 'IEM_CIMPL_F_BRANCH_CONDITIONAL' in self.dsCImplFlags
2207 and 'IEM_CIMPL_F_BRANCH_RELATIVE' in self.dsCImplFlags): # (latter to avoid iemOp_into)
2208 assert set(asVariations).issubset(ThreadedFunctionVariation.kasVariationsWithoutAddress), \
2209 '%s: vars=%s McFlags=%s' % (self.oMcBlock.oFunction.sName, asVariations, self.oMcBlock.dsMcFlags);
2210 asVariationsBase = asVariations;
2211 asVariations = [];
2212 for sVariation in asVariationsBase:
2213 asVariations.extend([sVariation + '_Jmp', sVariation + '_NoJmp']);
2214 assert set(asVariations).issubset(ThreadedFunctionVariation.kdVariationsWithConditional);
2215
2216 if not iai.McStmt.findStmtByNames(aoStmts,
2217 { 'IEM_MC_ADVANCE_RIP_AND_FINISH': True,
2218 'IEM_MC_REL_JMP_S8_AND_FINISH': True,
2219 'IEM_MC_REL_JMP_S16_AND_FINISH': True,
2220 'IEM_MC_REL_JMP_S32_AND_FINISH': True,
2221 'IEM_MC_SET_RIP_U16_AND_FINISH': True,
2222 'IEM_MC_SET_RIP_U32_AND_FINISH': True,
2223 'IEM_MC_SET_RIP_U64_AND_FINISH': True,
2224 'IEM_MC_REL_CALL_S16_AND_FINISH': True,
2225 'IEM_MC_REL_CALL_S32_AND_FINISH': True,
2226 'IEM_MC_REL_CALL_S64_AND_FINISH': True,
2227 'IEM_MC_IND_CALL_U16_AND_FINISH': True,
2228 'IEM_MC_IND_CALL_U32_AND_FINISH': True,
2229 'IEM_MC_IND_CALL_U64_AND_FINISH': True,
2230 'IEM_MC_RETN_AND_FINISH': True,
2231 }):
2232 asVariations = [sVariation for sVariation in asVariations
2233 if sVariation not in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing];
2234
2235 self.aoVariations = [ThreadedFunctionVariation(self, sVar) for sVar in asVariations];
2236
2237 # Dictionary variant of the list.
2238 self.dVariations = { oVar.sVariation: oVar for oVar in self.aoVariations };
2239
2240 #
2241 # Try annotate the threaded function name.
2242 #
2243 self.analyzeAndAnnotateName(aoStmts);
2244
2245 #
2246 # Continue the analysis on each variation.
2247 #
2248 for oVariation in self.aoVariations:
2249 oVariation.analyzeVariation(aoStmts);
2250
2251 return True;
2252
2253 ## Used by emitThreadedCallStmts.
2254 kdVariationsWithNeedForPrefixCheck = {
2255 ThreadedFunctionVariation.ksVariation_64_Addr32: True,
2256 ThreadedFunctionVariation.ksVariation_64f_Addr32: True,
2257 ThreadedFunctionVariation.ksVariation_64_FsGs: True,
2258 ThreadedFunctionVariation.ksVariation_64f_FsGs: True,
2259 ThreadedFunctionVariation.ksVariation_32_Addr16: True,
2260 ThreadedFunctionVariation.ksVariation_32f_Addr16: True,
2261 ThreadedFunctionVariation.ksVariation_32_Flat: True,
2262 ThreadedFunctionVariation.ksVariation_32f_Flat: True,
2263 ThreadedFunctionVariation.ksVariation_16_Addr32: True,
2264 ThreadedFunctionVariation.ksVariation_16f_Addr32: True,
2265 };
2266
2267 def emitThreadedCallStmts(self, sBranch = None, fTbLookupTable = False): # pylint: disable=too-many-statements
2268 """
2269 Worker for morphInputCode that returns a list of statements that emits
2270 the call to the threaded functions for the block.
2271
2272 The sBranch parameter is used with conditional branches where we'll emit
2273 different threaded calls depending on whether we're in the jump-taken or
2274 no-jump code path.
2275
2276 The fTbLookupTable parameter can either be False, True or whatever else
2277 (like 2) - in the latte case this means a large lookup table.
2278 """
2279 # Special case for only default variation:
2280 if len(self.aoVariations) == 1 and self.aoVariations[0].sVariation == ThreadedFunctionVariation.ksVariation_Default:
2281 assert not sBranch;
2282 return self.aoVariations[0].emitThreadedCallStmtsForVariant(0, fTbLookupTable);
2283
2284 #
2285 # Case statement sub-class.
2286 #
2287 dByVari = self.dVariations;
2288 #fDbg = self.oMcBlock.sFunction == 'iemOpCommonPushSReg';
2289 class Case:
2290 def __init__(self, sCond, sVarNm = None):
2291 self.sCond = sCond;
2292 self.sVarNm = sVarNm;
2293 self.oVar = dByVari[sVarNm] if sVarNm else None;
2294 self.aoBody = self.oVar.emitThreadedCallStmtsForVariant(8, fTbLookupTable) if sVarNm else None;
2295
2296 def toCode(self):
2297 aoStmts = [ iai.McCppGeneric('case %s:' % (self.sCond), cchIndent = 4), ];
2298 if self.aoBody:
2299 aoStmts.extend(self.aoBody);
2300 aoStmts.append(iai.McCppGeneric('break;', cchIndent = 8));
2301 return aoStmts;
2302
2303 def toFunctionAssignment(self):
2304 aoStmts = [ iai.McCppGeneric('case %s:' % (self.sCond), cchIndent = 4), ];
2305 if self.aoBody:
2306 aoStmts.extend([
2307 iai.McCppGeneric('enmFunction = %s;' % (self.oVar.getIndexName(),), cchIndent = 8),
2308 iai.McCppGeneric('break;', cchIndent = 8),
2309 ]);
2310 return aoStmts;
2311
2312 def isSame(self, oThat):
2313 if not self.aoBody: # fall thru always matches.
2314 return True;
2315 if len(self.aoBody) != len(oThat.aoBody):
2316 #if fDbg: print('dbg: body len diff: %s vs %s' % (len(self.aoBody), len(oThat.aoBody),));
2317 return False;
2318 for iStmt, oStmt in enumerate(self.aoBody):
2319 oThatStmt = oThat.aoBody[iStmt] # type: iai.McStmt
2320 assert isinstance(oStmt, iai.McCppGeneric);
2321 assert not isinstance(oStmt, iai.McStmtCond);
2322 if isinstance(oStmt, iai.McStmtCond):
2323 return False;
2324 if oStmt.sName != oThatStmt.sName:
2325 #if fDbg: print('dbg: stmt #%s name: %s vs %s' % (iStmt, oStmt.sName, oThatStmt.sName,));
2326 return False;
2327 if len(oStmt.asParams) != len(oThatStmt.asParams):
2328 #if fDbg: print('dbg: stmt #%s param count: %s vs %s'
2329 # % (iStmt, len(oStmt.asParams), len(oThatStmt.asParams),));
2330 return False;
2331 for iParam, sParam in enumerate(oStmt.asParams):
2332 if ( sParam != oThatStmt.asParams[iParam]
2333 and ( iParam != 1
2334 or not isinstance(oStmt, iai.McCppCall)
2335 or not oStmt.asParams[0].startswith('IEM_MC2_EMIT_CALL_')
2336 or sParam != self.oVar.getIndexName()
2337 or oThatStmt.asParams[iParam] != oThat.oVar.getIndexName() )):
2338 #if fDbg: print('dbg: stmt #%s, param #%s: %s vs %s'
2339 # % (iStmt, iParam, sParam, oThatStmt.asParams[iParam],));
2340 return False;
2341 return True;
2342
2343 #
2344 # Determine what we're switch on.
2345 # This ASSUMES that (IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | IEM_F_MODE_CPUMODE_MASK) == 7!
2346 #
2347 fSimple = True;
2348 sSwitchValue = '(pVCpu->iem.s.fExec & (IEM_F_MODE_CPUMODE_MASK | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK))';
2349 if dByVari.keys() & self.kdVariationsWithNeedForPrefixCheck.keys():
2350 sSwitchValue += ' | (pVCpu->iem.s.enmEffAddrMode == (pVCpu->iem.s.fExec & IEM_F_MODE_CPUMODE_MASK) ? 0 : 8)';
2351 # Accesses via FS and GS and CS goes thru non-FLAT functions. (CS
2352 # is not writable in 32-bit mode (at least), thus the penalty mode
2353 # for any accesses via it (simpler this way).)
2354 sSwitchValue += ' | (pVCpu->iem.s.iEffSeg < X86_SREG_FS && pVCpu->iem.s.iEffSeg != X86_SREG_CS ? 0 : 16)';
2355 fSimple = False; # threaded functions.
2356 if dByVari.keys() & ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing:
2357 sSwitchValue += ' | ((pVCpu->iem.s.fTbPrevInstr & (IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_INHIBIT_SHADOW)) || ' \
2358 + '(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_MASK) ? 32 : 0)';
2359
2360 #
2361 # Generate the case statements.
2362 #
2363 # pylintx: disable=x
2364 aoCases = [];
2365 if ThreadedFunctionVariation.ksVariation_64_Addr32 in dByVari:
2366 assert not fSimple and not sBranch;
2367 aoCases.extend([
2368 Case('IEMMODE_64BIT', ThrdFnVar.ksVariation_64),
2369 Case('IEMMODE_64BIT | 16', ThrdFnVar.ksVariation_64_FsGs),
2370 Case('IEMMODE_64BIT | 8 | 16', None), # fall thru
2371 Case('IEMMODE_64BIT | 8', ThrdFnVar.ksVariation_64_Addr32),
2372 ]);
2373 if ThreadedFunctionVariation.ksVariation_64f_Addr32 in dByVari:
2374 aoCases.extend([
2375 Case('IEMMODE_64BIT | 32', ThrdFnVar.ksVariation_64f),
2376 Case('IEMMODE_64BIT | 32 | 16', ThrdFnVar.ksVariation_64f_FsGs),
2377 Case('IEMMODE_64BIT | 32 | 8 | 16', None), # fall thru
2378 Case('IEMMODE_64BIT | 32 | 8', ThrdFnVar.ksVariation_64f_Addr32),
2379 ]);
2380 elif ThrdFnVar.ksVariation_64 in dByVari:
2381 assert fSimple and not sBranch;
2382 aoCases.append(Case('IEMMODE_64BIT', ThrdFnVar.ksVariation_64));
2383 if ThreadedFunctionVariation.ksVariation_64f in dByVari:
2384 aoCases.append(Case('IEMMODE_64BIT | 32', ThrdFnVar.ksVariation_64f));
2385 elif ThrdFnVar.ksVariation_64_Jmp in dByVari:
2386 assert fSimple and sBranch;
2387 aoCases.append(Case('IEMMODE_64BIT',
2388 ThrdFnVar.ksVariation_64_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_64_NoJmp));
2389 if ThreadedFunctionVariation.ksVariation_64f_Jmp in dByVari:
2390 aoCases.append(Case('IEMMODE_64BIT | 32',
2391 ThrdFnVar.ksVariation_64f_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_64f_NoJmp));
2392
2393 if ThrdFnVar.ksVariation_32_Addr16 in dByVari:
2394 assert not fSimple and not sBranch;
2395 aoCases.extend([
2396 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', ThrdFnVar.ksVariation_32_Flat),
2397 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 16', None), # fall thru
2398 Case('IEMMODE_32BIT | 16', None), # fall thru
2399 Case('IEMMODE_32BIT', ThrdFnVar.ksVariation_32),
2400 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 8', None), # fall thru
2401 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 8 | 16',None), # fall thru
2402 Case('IEMMODE_32BIT | 8 | 16',None), # fall thru
2403 Case('IEMMODE_32BIT | 8', ThrdFnVar.ksVariation_32_Addr16),
2404 ]);
2405 if ThrdFnVar.ksVariation_32f_Addr16 in dByVari:
2406 aoCases.extend([
2407 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', ThrdFnVar.ksVariation_32f_Flat),
2408 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 16', None), # fall thru
2409 Case('IEMMODE_32BIT | 32 | 16', None), # fall thru
2410 Case('IEMMODE_32BIT | 32', ThrdFnVar.ksVariation_32f),
2411 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 8', None), # fall thru
2412 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 8 | 16',None), # fall thru
2413 Case('IEMMODE_32BIT | 32 | 8 | 16',None), # fall thru
2414 Case('IEMMODE_32BIT | 32 | 8', ThrdFnVar.ksVariation_32f_Addr16),
2415 ]);
2416 elif ThrdFnVar.ksVariation_32 in dByVari:
2417 assert fSimple and not sBranch;
2418 aoCases.extend([
2419 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', None), # fall thru
2420 Case('IEMMODE_32BIT', ThrdFnVar.ksVariation_32),
2421 ]);
2422 if ThrdFnVar.ksVariation_32f in dByVari:
2423 aoCases.extend([
2424 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', None), # fall thru
2425 Case('IEMMODE_32BIT | 32', ThrdFnVar.ksVariation_32f),
2426 ]);
2427 elif ThrdFnVar.ksVariation_32_Jmp in dByVari:
2428 assert fSimple and sBranch;
2429 aoCases.extend([
2430 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', None), # fall thru
2431 Case('IEMMODE_32BIT',
2432 ThrdFnVar.ksVariation_32_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_32_NoJmp),
2433 ]);
2434 if ThrdFnVar.ksVariation_32f_Jmp in dByVari:
2435 aoCases.extend([
2436 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', None), # fall thru
2437 Case('IEMMODE_32BIT | 32',
2438 ThrdFnVar.ksVariation_32f_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_32f_NoJmp),
2439 ]);
2440
2441 if ThrdFnVar.ksVariation_16_Addr32 in dByVari:
2442 assert not fSimple and not sBranch;
2443 aoCases.extend([
2444 Case('IEMMODE_16BIT | 16', None), # fall thru
2445 Case('IEMMODE_16BIT', ThrdFnVar.ksVariation_16),
2446 Case('IEMMODE_16BIT | 8 | 16', None), # fall thru
2447 Case('IEMMODE_16BIT | 8', ThrdFnVar.ksVariation_16_Addr32),
2448 ]);
2449 if ThrdFnVar.ksVariation_16f_Addr32 in dByVari:
2450 aoCases.extend([
2451 Case('IEMMODE_16BIT | 32 | 16', None), # fall thru
2452 Case('IEMMODE_16BIT | 32', ThrdFnVar.ksVariation_16f),
2453 Case('IEMMODE_16BIT | 32 | 8 | 16', None), # fall thru
2454 Case('IEMMODE_16BIT | 32 | 8', ThrdFnVar.ksVariation_16f_Addr32),
2455 ]);
2456 elif ThrdFnVar.ksVariation_16 in dByVari:
2457 assert fSimple and not sBranch;
2458 aoCases.append(Case('IEMMODE_16BIT', ThrdFnVar.ksVariation_16));
2459 if ThrdFnVar.ksVariation_16f in dByVari:
2460 aoCases.append(Case('IEMMODE_16BIT | 32', ThrdFnVar.ksVariation_16f));
2461 elif ThrdFnVar.ksVariation_16_Jmp in dByVari:
2462 assert fSimple and sBranch;
2463 aoCases.append(Case('IEMMODE_16BIT',
2464 ThrdFnVar.ksVariation_16_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_16_NoJmp));
2465 if ThrdFnVar.ksVariation_16f_Jmp in dByVari:
2466 aoCases.append(Case('IEMMODE_16BIT | 32',
2467 ThrdFnVar.ksVariation_16f_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_16f_NoJmp));
2468
2469
2470 if ThrdFnVar.ksVariation_16_Pre386 in dByVari:
2471 if not fSimple:
2472 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 16', None)); # fall thru
2473 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', ThrdFnVar.ksVariation_16_Pre386));
2474 if ThrdFnVar.ksVariation_16f_Pre386 in dByVari: # should be nested under previous if, but line too long.
2475 if not fSimple:
2476 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 16', None)); # fall thru
2477 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', ThrdFnVar.ksVariation_16f_Pre386));
2478
2479 if ThrdFnVar.ksVariation_16_Pre386_Jmp in dByVari:
2480 assert fSimple and sBranch;
2481 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK',
2482 ThrdFnVar.ksVariation_16_Pre386_Jmp if sBranch == 'Jmp'
2483 else ThrdFnVar.ksVariation_16_Pre386_NoJmp));
2484 if ThrdFnVar.ksVariation_16f_Pre386_Jmp in dByVari:
2485 assert fSimple and sBranch;
2486 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32',
2487 ThrdFnVar.ksVariation_16f_Pre386_Jmp if sBranch == 'Jmp'
2488 else ThrdFnVar.ksVariation_16f_Pre386_NoJmp));
2489
2490 #
2491 # If the case bodies are all the same, except for the function called,
2492 # we can reduce the code size and hopefully compile time.
2493 #
2494 iFirstCaseWithBody = 0;
2495 while not aoCases[iFirstCaseWithBody].aoBody:
2496 iFirstCaseWithBody += 1
2497 fAllSameCases = True
2498 for iCase in range(iFirstCaseWithBody + 1, len(aoCases)):
2499 fAllSameCases = fAllSameCases and aoCases[iCase].isSame(aoCases[iFirstCaseWithBody]);
2500 #if fDbg: print('fAllSameCases=%s %s' % (fAllSameCases, self.oMcBlock.sFunction,));
2501 if fAllSameCases:
2502 aoStmts = [
2503 iai.McCppGeneric('IEMTHREADEDFUNCS enmFunction;'),
2504 iai.McCppGeneric('switch (%s)' % (sSwitchValue,)),
2505 iai.McCppGeneric('{'),
2506 ];
2507 for oCase in aoCases:
2508 aoStmts.extend(oCase.toFunctionAssignment());
2509 aoStmts.extend([
2510 iai.McCppGeneric('IEM_NOT_REACHED_DEFAULT_CASE_RET();', cchIndent = 4),
2511 iai.McCppGeneric('}'),
2512 ]);
2513 aoStmts.extend(dByVari[aoCases[iFirstCaseWithBody].sVarNm].emitThreadedCallStmtsForVariant(0, fTbLookupTable,
2514 'enmFunction'));
2515
2516 else:
2517 #
2518 # Generate the generic switch statement.
2519 #
2520 aoStmts = [
2521 iai.McCppGeneric('switch (%s)' % (sSwitchValue,)),
2522 iai.McCppGeneric('{'),
2523 ];
2524 for oCase in aoCases:
2525 aoStmts.extend(oCase.toCode());
2526 aoStmts.extend([
2527 iai.McCppGeneric('IEM_NOT_REACHED_DEFAULT_CASE_RET();', cchIndent = 4),
2528 iai.McCppGeneric('}'),
2529 ]);
2530
2531 return aoStmts;
2532
2533 def morphInputCode(self, aoStmts, fIsConditional = False, fCallEmitted = False, cDepth = 0, sBranchAnnotation = None):
2534 """
2535 Adjusts (& copies) the statements for the input/decoder so it will emit
2536 calls to the right threaded functions for each block.
2537
2538 Returns list/tree of statements (aoStmts is not modified) and updated
2539 fCallEmitted status.
2540 """
2541 #print('McBlock at %s:%s' % (os.path.split(self.oMcBlock.sSrcFile)[1], self.oMcBlock.iBeginLine,));
2542 aoDecoderStmts = [];
2543
2544 for iStmt, oStmt in enumerate(aoStmts):
2545 # Copy the statement. Make a deep copy to make sure we've got our own
2546 # copies of all instance variables, even if a bit overkill at the moment.
2547 oNewStmt = copy.deepcopy(oStmt);
2548 aoDecoderStmts.append(oNewStmt);
2549 #print('oNewStmt %s %s' % (oNewStmt.sName, len(oNewStmt.asParams),));
2550 if oNewStmt.sName == 'IEM_MC_BEGIN' and self.dsCImplFlags:
2551 oNewStmt.asParams[1] = ' | '.join(sorted(self.dsCImplFlags.keys()));
2552
2553 # If we haven't emitted the threaded function call yet, look for
2554 # statements which it would naturally follow or preceed.
2555 if not fCallEmitted:
2556 if not oStmt.isCppStmt():
2557 if ( oStmt.sName.startswith('IEM_MC_MAYBE_RAISE_') \
2558 or (oStmt.sName.endswith('_AND_FINISH') and oStmt.sName.startswith('IEM_MC_'))
2559 or oStmt.sName.startswith('IEM_MC_CALL_CIMPL_')
2560 or oStmt.sName.startswith('IEM_MC_DEFER_TO_CIMPL_')
2561 or oStmt.sName in ('IEM_MC_RAISE_DIVIDE_ERROR',)):
2562 aoDecoderStmts.pop();
2563 if not fIsConditional:
2564 aoDecoderStmts.extend(self.emitThreadedCallStmts());
2565 elif oStmt.sName == 'IEM_MC_ADVANCE_RIP_AND_FINISH':
2566 aoDecoderStmts.extend(self.emitThreadedCallStmts('NoJmp', True));
2567 else:
2568 assert oStmt.sName in { 'IEM_MC_REL_JMP_S8_AND_FINISH': True,
2569 'IEM_MC_REL_JMP_S16_AND_FINISH': True,
2570 'IEM_MC_REL_JMP_S32_AND_FINISH': True, };
2571 aoDecoderStmts.extend(self.emitThreadedCallStmts('Jmp', True));
2572 aoDecoderStmts.append(oNewStmt);
2573 fCallEmitted = True;
2574
2575 elif iai.g_dMcStmtParsers[oStmt.sName][2]:
2576 # This is for Jmp/NoJmp with loopne and friends which modifies state other than RIP.
2577 if not sBranchAnnotation:
2578 self.raiseProblem('Modifying state before emitting calls! %s' % (oStmt.sName,));
2579 assert fIsConditional;
2580 aoDecoderStmts.pop();
2581 if sBranchAnnotation == g_ksFinishAnnotation_Advance:
2582 assert iai.McStmt.findStmtByNames(aoStmts[iStmt:], {'IEM_MC_ADVANCE_RIP_AND_FINISH':1,})
2583 aoDecoderStmts.extend(self.emitThreadedCallStmts('NoJmp', True));
2584 elif sBranchAnnotation == g_ksFinishAnnotation_RelJmp:
2585 assert iai.McStmt.findStmtByNames(aoStmts[iStmt:],
2586 { 'IEM_MC_REL_JMP_S8_AND_FINISH': 1,
2587 'IEM_MC_REL_JMP_S16_AND_FINISH': 1,
2588 'IEM_MC_REL_JMP_S32_AND_FINISH': 1, });
2589 aoDecoderStmts.extend(self.emitThreadedCallStmts('Jmp', True));
2590 else:
2591 self.raiseProblem('Modifying state before emitting calls! %s' % (oStmt.sName,));
2592 aoDecoderStmts.append(oNewStmt);
2593 fCallEmitted = True;
2594
2595 elif ( not fIsConditional
2596 and oStmt.fDecode
2597 and ( oStmt.asParams[0].find('IEMOP_HLP_DONE_') >= 0
2598 or oStmt.asParams[0].find('IEMOP_HLP_DECODED_') >= 0)):
2599 aoDecoderStmts.extend(self.emitThreadedCallStmts());
2600 fCallEmitted = True;
2601
2602 # Process branches of conditionals recursively.
2603 if isinstance(oStmt, iai.McStmtCond):
2604 (oNewStmt.aoIfBranch, fCallEmitted1) = self.morphInputCode(oStmt.aoIfBranch, fIsConditional,
2605 fCallEmitted, cDepth + 1, oStmt.oIfBranchAnnotation);
2606 if oStmt.aoElseBranch:
2607 (oNewStmt.aoElseBranch, fCallEmitted2) = self.morphInputCode(oStmt.aoElseBranch, fIsConditional,
2608 fCallEmitted, cDepth + 1,
2609 oStmt.oElseBranchAnnotation);
2610 else:
2611 fCallEmitted2 = False;
2612 fCallEmitted = fCallEmitted or (fCallEmitted1 and fCallEmitted2);
2613
2614 if not fCallEmitted and cDepth == 0:
2615 self.raiseProblem('Unable to insert call to threaded function.');
2616
2617 return (aoDecoderStmts, fCallEmitted);
2618
2619
2620 def generateInputCode(self):
2621 """
2622 Modifies the input code.
2623 """
2624 cchIndent = (self.oMcBlock.cchIndent + 3) // 4 * 4;
2625
2626 if len(self.oMcBlock.aoStmts) == 1:
2627 # IEM_MC_DEFER_TO_CIMPL_X_RET - need to wrap in {} to make it safe to insert into random code.
2628 sCode = ' ' * cchIndent + 'pVCpu->iem.s.fTbCurInstr = ';
2629 if self.dsCImplFlags:
2630 sCode += ' | '.join(sorted(self.dsCImplFlags.keys())) + ';\n';
2631 else:
2632 sCode += '0;\n';
2633 sCode += iai.McStmt.renderCodeForList(self.morphInputCode(self.oMcBlock.aoStmts)[0],
2634 cchIndent = cchIndent).replace('\n', ' /* gen */\n', 1);
2635 sIndent = ' ' * (min(cchIndent, 2) - 2);
2636 sCode = sIndent + '{\n' + sCode + sIndent + '}\n';
2637 return sCode;
2638
2639 # IEM_MC_BEGIN/END block
2640 assert len(self.oMcBlock.asLines) > 2, "asLines=%s" % (self.oMcBlock.asLines,);
2641 fIsConditional = ( 'IEM_CIMPL_F_BRANCH_CONDITIONAL' in self.dsCImplFlags
2642 and 'IEM_CIMPL_F_BRANCH_RELATIVE' in self.dsCImplFlags); # (latter to avoid iemOp_into)
2643 return iai.McStmt.renderCodeForList(self.morphInputCode(self.oMcBlock.aoStmts, fIsConditional)[0],
2644 cchIndent = cchIndent).replace('\n', ' /* gen */\n', 1);
2645
2646# Short alias for ThreadedFunctionVariation.
2647ThrdFnVar = ThreadedFunctionVariation;
2648
2649
2650class IEMThreadedGenerator(object):
2651 """
2652 The threaded code generator & annotator.
2653 """
2654
2655 def __init__(self):
2656 self.aoThreadedFuncs = [] # type: List[ThreadedFunction]
2657 self.oOptions = None # type: argparse.Namespace
2658 self.aoParsers = [] # type: List[IEMAllInstPython.SimpleParser]
2659 self.aidxFirstFunctions = [] # type: List[int] ##< Runs parallel to aoParser giving the index of the first function.
2660 self.cErrors = 0;
2661
2662 #
2663 # Error reporting.
2664 #
2665
2666 def rawError(self, sCompleteMessage):
2667 """ Output a raw error and increment the error counter. """
2668 print(sCompleteMessage, file = sys.stderr);
2669 self.cErrors += 1;
2670 return False;
2671
2672 #
2673 # Processing.
2674 #
2675
2676 def processInputFiles(self, sHostArch, fNativeRecompilerEnabled):
2677 """
2678 Process the input files.
2679 """
2680
2681 # Parse the files.
2682 self.aoParsers = iai.parseFiles(self.oOptions.asInFiles, sHostArch);
2683
2684 # Create threaded functions for the MC blocks.
2685 self.aoThreadedFuncs = [ThreadedFunction(oMcBlock) for oMcBlock in iai.g_aoMcBlocks];
2686
2687 # Analyze the threaded functions.
2688 dRawParamCounts = {};
2689 dMinParamCounts = {};
2690 for oThreadedFunction in self.aoThreadedFuncs:
2691 oThreadedFunction.analyzeThreadedFunction(self);
2692 for oVariation in oThreadedFunction.aoVariations:
2693 dRawParamCounts[len(oVariation.dParamRefs)] = dRawParamCounts.get(len(oVariation.dParamRefs), 0) + 1;
2694 dMinParamCounts[oVariation.cMinParams] = dMinParamCounts.get(oVariation.cMinParams, 0) + 1;
2695 print('debug: param count distribution, raw and optimized:', file = sys.stderr);
2696 for cCount in sorted({cBits: True for cBits in list(dRawParamCounts.keys()) + list(dMinParamCounts.keys())}.keys()):
2697 print('debug: %s params: %4s raw, %4s min'
2698 % (cCount, dRawParamCounts.get(cCount, 0), dMinParamCounts.get(cCount, 0)),
2699 file = sys.stderr);
2700
2701 # Do another pass over the threaded functions to settle the name suffix.
2702 iThreadedFn = 0;
2703 while iThreadedFn < len(self.aoThreadedFuncs):
2704 oFunction = self.aoThreadedFuncs[iThreadedFn].oMcBlock.oFunction;
2705 assert oFunction;
2706 iThreadedFnNext = iThreadedFn + 1;
2707 dSubNames = { self.aoThreadedFuncs[iThreadedFn].sSubName: 1 };
2708 while ( iThreadedFnNext < len(self.aoThreadedFuncs)
2709 and self.aoThreadedFuncs[iThreadedFnNext].oMcBlock.oFunction == oFunction):
2710 dSubNames[self.aoThreadedFuncs[iThreadedFnNext].sSubName] = 1;
2711 iThreadedFnNext += 1;
2712 if iThreadedFnNext - iThreadedFn > len(dSubNames):
2713 iSubName = 0;
2714 while iThreadedFn + iSubName < iThreadedFnNext:
2715 self.aoThreadedFuncs[iThreadedFn + iSubName].sSubName += '_%s' % (iSubName,);
2716 iSubName += 1;
2717 iThreadedFn = iThreadedFnNext;
2718
2719 # Populate aidxFirstFunctions. This is ASSUMING that
2720 # g_aoMcBlocks/self.aoThreadedFuncs are in self.aoParsers order.
2721 iThreadedFunction = 0;
2722 oThreadedFunction = self.getThreadedFunctionByIndex(0);
2723 self.aidxFirstFunctions = [];
2724 for oParser in self.aoParsers:
2725 self.aidxFirstFunctions.append(iThreadedFunction);
2726
2727 while oThreadedFunction.oMcBlock.sSrcFile == oParser.sSrcFile:
2728 iThreadedFunction += 1;
2729 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
2730
2731 # Analyze the threaded functions and their variations for native recompilation.
2732 if fNativeRecompilerEnabled:
2733 ian.analyzeThreadedFunctionsForNativeRecomp(self.aoThreadedFuncs, sHostArch);
2734
2735 # Gather arguments + variable statistics for the MC blocks.
2736 cMaxArgs = 0;
2737 cMaxVars = 0;
2738 cMaxVarsAndArgs = 0;
2739 cbMaxArgs = 0;
2740 cbMaxVars = 0;
2741 cbMaxVarsAndArgs = 0;
2742 for oThreadedFunction in self.aoThreadedFuncs:
2743 if oThreadedFunction.oMcBlock.aoLocals or oThreadedFunction.oMcBlock.aoArgs:
2744 # Counts.
2745 cMaxVars = max(cMaxVars, len(oThreadedFunction.oMcBlock.aoLocals));
2746 cMaxArgs = max(cMaxArgs, len(oThreadedFunction.oMcBlock.aoArgs));
2747 cMaxVarsAndArgs = max(cMaxVarsAndArgs,
2748 len(oThreadedFunction.oMcBlock.aoLocals) + len(oThreadedFunction.oMcBlock.aoArgs));
2749 if cMaxVarsAndArgs > 9:
2750 raise Exception('%s potentially uses too many variables / args: %u, max 10 - %u vars and %u args'
2751 % (oThreadedFunction.oMcBlock.oFunction.sName, cMaxVarsAndArgs,
2752 len(oThreadedFunction.oMcBlock.aoLocals), len(oThreadedFunction.oMcBlock.aoArgs),));
2753 # Calc stack allocation size:
2754 cbArgs = 0;
2755 for oArg in oThreadedFunction.oMcBlock.aoArgs:
2756 cbArgs += (getTypeBitCount(oArg.sType) + 63) // 64 * 8;
2757 cbVars = 0;
2758 for oVar in oThreadedFunction.oMcBlock.aoLocals:
2759 cbVars += (getTypeBitCount(oVar.sType) + 63) // 64 * 8;
2760 cbMaxVars = max(cbMaxVars, cbVars);
2761 cbMaxArgs = max(cbMaxArgs, cbArgs);
2762 cbMaxVarsAndArgs = max(cbMaxVarsAndArgs, cbVars + cbArgs);
2763 if cbMaxVarsAndArgs >= 0xc0:
2764 raise Exception('%s potentially uses too much stack: cbMaxVars=%#x cbMaxArgs=%#x'
2765 % (oThreadedFunction.oMcBlock.oFunction.sName, cbMaxVars, cbMaxArgs,));
2766
2767 print('debug: max vars+args: %u bytes / %u; max vars: %u bytes / %u; max args: %u bytes / %u'
2768 % (cbMaxVarsAndArgs, cMaxVarsAndArgs, cbMaxVars, cMaxVars, cbMaxArgs, cMaxArgs,), file = sys.stderr);
2769
2770 if self.cErrors > 0:
2771 print('fatal error: %u error%s during processing. Details above.'
2772 % (self.cErrors, 's' if self.cErrors > 1 else '',), file = sys.stderr);
2773 return False;
2774 return True;
2775
2776 #
2777 # Output
2778 #
2779
2780 def generateLicenseHeader(self):
2781 """
2782 Returns the lines for a license header.
2783 """
2784 return [
2785 '/*',
2786 ' * Autogenerated by $Id: IEMAllThrdPython.py 105652 2024-08-12 12:16:36Z vboxsync $ ',
2787 ' * Do not edit!',
2788 ' */',
2789 '',
2790 '/*',
2791 ' * Copyright (C) 2023-' + str(datetime.date.today().year) + ' Oracle and/or its affiliates.',
2792 ' *',
2793 ' * This file is part of VirtualBox base platform packages, as',
2794 ' * available from https://www.virtualbox.org.',
2795 ' *',
2796 ' * This program is free software; you can redistribute it and/or',
2797 ' * modify it under the terms of the GNU General Public License',
2798 ' * as published by the Free Software Foundation, in version 3 of the',
2799 ' * License.',
2800 ' *',
2801 ' * This program is distributed in the hope that it will be useful, but',
2802 ' * WITHOUT ANY WARRANTY; without even the implied warranty of',
2803 ' * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU',
2804 ' * General Public License for more details.',
2805 ' *',
2806 ' * You should have received a copy of the GNU General Public License',
2807 ' * along with this program; if not, see <https://www.gnu.org/licenses>.',
2808 ' *',
2809 ' * The contents of this file may alternatively be used under the terms',
2810 ' * of the Common Development and Distribution License Version 1.0',
2811 ' * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included',
2812 ' * in the VirtualBox distribution, in which case the provisions of the',
2813 ' * CDDL are applicable instead of those of the GPL.',
2814 ' *',
2815 ' * You may elect to license modified versions of this file under the',
2816 ' * terms and conditions of either the GPL or the CDDL or both.',
2817 ' *',
2818 ' * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0',
2819 ' */',
2820 '',
2821 '',
2822 '',
2823 ];
2824
2825 ## List of built-in threaded functions with user argument counts and
2826 ## whether it has a native recompiler implementation.
2827 katBltIns = (
2828 ( 'Nop', 0, True ),
2829 ( 'LogCpuState', 0, True ),
2830
2831 ( 'DeferToCImpl0', 2, True ),
2832 ( 'CheckIrq', 0, True ),
2833 ( 'CheckMode', 1, True ),
2834 ( 'CheckHwInstrBps', 0, False ),
2835 ( 'CheckCsLim', 1, True ),
2836
2837 ( 'CheckCsLimAndOpcodes', 3, True ),
2838 ( 'CheckOpcodes', 3, True ),
2839 ( 'CheckOpcodesConsiderCsLim', 3, True ),
2840
2841 ( 'CheckCsLimAndPcAndOpcodes', 3, True ),
2842 ( 'CheckPcAndOpcodes', 3, True ),
2843 ( 'CheckPcAndOpcodesConsiderCsLim', 3, True ),
2844
2845 ( 'CheckCsLimAndOpcodesAcrossPageLoadingTlb', 3, True ),
2846 ( 'CheckOpcodesAcrossPageLoadingTlb', 3, True ),
2847 ( 'CheckOpcodesAcrossPageLoadingTlbConsiderCsLim', 2, True ),
2848
2849 ( 'CheckCsLimAndOpcodesLoadingTlb', 3, True ),
2850 ( 'CheckOpcodesLoadingTlb', 3, True ),
2851 ( 'CheckOpcodesLoadingTlbConsiderCsLim', 3, True ),
2852
2853 ( 'CheckCsLimAndOpcodesOnNextPageLoadingTlb', 2, True ),
2854 ( 'CheckOpcodesOnNextPageLoadingTlb', 2, True ),
2855 ( 'CheckOpcodesOnNextPageLoadingTlbConsiderCsLim', 2, True ),
2856
2857 ( 'CheckCsLimAndOpcodesOnNewPageLoadingTlb', 2, True ),
2858 ( 'CheckOpcodesOnNewPageLoadingTlb', 2, True ),
2859 ( 'CheckOpcodesOnNewPageLoadingTlbConsiderCsLim', 2, True ),
2860 );
2861
2862 def generateThreadedFunctionsHeader(self, oOut, _):
2863 """
2864 Generates the threaded functions header file.
2865 Returns success indicator.
2866 """
2867
2868 asLines = self.generateLicenseHeader();
2869
2870 # Generate the threaded function table indexes.
2871 asLines += [
2872 'typedef enum IEMTHREADEDFUNCS',
2873 '{',
2874 ' kIemThreadedFunc_Invalid = 0,',
2875 '',
2876 ' /*',
2877 ' * Predefined',
2878 ' */',
2879 ];
2880 asLines += [' kIemThreadedFunc_BltIn_%s,' % (sFuncNm,) for sFuncNm, _, _ in self.katBltIns];
2881
2882 iThreadedFunction = 1 + len(self.katBltIns);
2883 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2884 asLines += [
2885 '',
2886 ' /*',
2887 ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation] + '',
2888 ' */',
2889 ];
2890 for oThreadedFunction in self.aoThreadedFuncs:
2891 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2892 if oVariation:
2893 iThreadedFunction += 1;
2894 oVariation.iEnumValue = iThreadedFunction;
2895 asLines.append(' ' + oVariation.getIndexName() + ',');
2896 asLines += [
2897 ' kIemThreadedFunc_End',
2898 '} IEMTHREADEDFUNCS;',
2899 '',
2900 ];
2901
2902 # Prototype the function table.
2903 asLines += [
2904 'extern const PFNIEMTHREADEDFUNC g_apfnIemThreadedFunctions[kIemThreadedFunc_End];',
2905 'extern uint8_t const g_acIemThreadedFunctionUsedArgs[kIemThreadedFunc_End];',
2906 '#if defined(IN_RING3) || defined(LOG_ENABLED)',
2907 'extern const char * const g_apszIemThreadedFunctions[kIemThreadedFunc_End];',
2908 '#endif',
2909 '#if defined(IN_RING3)',
2910 'extern const char * const g_apszIemThreadedFunctionStats[kIemThreadedFunc_End];',
2911 '#endif',
2912 ];
2913
2914 oOut.write('\n'.join(asLines));
2915 return True;
2916
2917 ksBitsToIntMask = {
2918 1: "UINT64_C(0x1)",
2919 2: "UINT64_C(0x3)",
2920 4: "UINT64_C(0xf)",
2921 8: "UINT64_C(0xff)",
2922 16: "UINT64_C(0xffff)",
2923 32: "UINT64_C(0xffffffff)",
2924 };
2925
2926 def generateFunctionParameterUnpacking(self, oVariation, oOut, asParams, uNoRefLevel = 0):
2927 """
2928 Outputs code for unpacking parameters.
2929 This is shared by the threaded and native code generators.
2930 """
2931 aasVars = [];
2932 for aoRefs in oVariation.dParamRefs.values():
2933 oRef = aoRefs[0];
2934 if oRef.sType[0] != 'P':
2935 cBits = g_kdTypeInfo[oRef.sType][0];
2936 sType = g_kdTypeInfo[oRef.sType][2];
2937 else:
2938 cBits = 64;
2939 sType = oRef.sType;
2940
2941 sTypeDecl = sType + ' const';
2942
2943 if cBits == 64:
2944 assert oRef.offNewParam == 0;
2945 if sType == 'uint64_t':
2946 sUnpack = '%s;' % (asParams[oRef.iNewParam],);
2947 else:
2948 sUnpack = '(%s)%s;' % (sType, asParams[oRef.iNewParam],);
2949 elif oRef.offNewParam == 0:
2950 sUnpack = '(%s)(%s & %s);' % (sType, asParams[oRef.iNewParam], self.ksBitsToIntMask[cBits]);
2951 else:
2952 sUnpack = '(%s)((%s >> %s) & %s);' \
2953 % (sType, asParams[oRef.iNewParam], oRef.offNewParam, self.ksBitsToIntMask[cBits]);
2954
2955 sComment = '/* %s - %s ref%s */' % (oRef.sOrgRef, len(aoRefs), 's' if len(aoRefs) != 1 else '',);
2956
2957 aasVars.append([ '%s:%02u' % (oRef.iNewParam, oRef.offNewParam),
2958 sTypeDecl, oRef.sNewName, sUnpack, sComment ]);
2959 acchVars = [0, 0, 0, 0, 0];
2960 for asVar in aasVars:
2961 for iCol, sStr in enumerate(asVar):
2962 acchVars[iCol] = max(acchVars[iCol], len(sStr));
2963 sFmt = ' %%-%ss %%-%ss = %%-%ss %%s\n' % (acchVars[1], acchVars[2], acchVars[3]);
2964 for asVar in sorted(aasVars):
2965 oOut.write(sFmt % (asVar[1], asVar[2], asVar[3], asVar[4],));
2966
2967 if uNoRefLevel > 0 and aasVars:
2968 if uNoRefLevel > 1:
2969 # level 2: Everything. This is used by liveness.
2970 oOut.write(' ');
2971 for asVar in sorted(aasVars):
2972 oOut.write(' RT_NOREF_PV(%s);' % (asVar[2],));
2973 oOut.write('\n');
2974 else:
2975 # level 1: Only pfnXxxx variables. This is used by native.
2976 for asVar in sorted(aasVars):
2977 if asVar[2].startswith('pfn'):
2978 oOut.write(' RT_NOREF_PV(%s);\n' % (asVar[2],));
2979 return True;
2980
2981 kasThreadedParamNames = ('uParam0', 'uParam1', 'uParam2');
2982 def generateThreadedFunctionsSource(self, oOut, _):
2983 """
2984 Generates the threaded functions source file.
2985 Returns success indicator.
2986 """
2987
2988 asLines = self.generateLicenseHeader();
2989 oOut.write('\n'.join(asLines));
2990
2991 #
2992 # Emit the function definitions.
2993 #
2994 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2995 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
2996 oOut.write( '\n'
2997 + '\n'
2998 + '\n'
2999 + '\n'
3000 + '/*' + '*' * 128 + '\n'
3001 + '* Variation: ' + sVarName + ' ' * (129 - len(sVarName) - 15) + '*\n'
3002 + '*' * 128 + '*/\n');
3003
3004 for oThreadedFunction in self.aoThreadedFuncs:
3005 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
3006 if oVariation:
3007 oMcBlock = oThreadedFunction.oMcBlock;
3008
3009 # Function header
3010 oOut.write( '\n'
3011 + '\n'
3012 + '/**\n'
3013 + ' * #%u: %s at line %s offset %s in %s%s\n'
3014 % (oVariation.iEnumValue, oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine,
3015 os.path.split(oMcBlock.sSrcFile)[1],
3016 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '')
3017 + ' */\n'
3018 + 'static IEM_DECL_IEMTHREADEDFUNC_DEF(' + oVariation.getThreadedFunctionName() + ')\n'
3019 + '{\n');
3020
3021 # Unpack parameters.
3022 self.generateFunctionParameterUnpacking(oVariation, oOut, self.kasThreadedParamNames);
3023
3024 # RT_NOREF for unused parameters.
3025 if oVariation.cMinParams < g_kcThreadedParams:
3026 oOut.write(' RT_NOREF(' + ', '.join(self.kasThreadedParamNames[oVariation.cMinParams:]) + ');\n');
3027
3028 # Now for the actual statements.
3029 oOut.write(iai.McStmt.renderCodeForList(oVariation.aoStmtsForThreadedFunction, cchIndent = 4));
3030
3031 oOut.write('}\n');
3032
3033
3034 #
3035 # Generate the output tables in parallel.
3036 #
3037 asFuncTable = [
3038 '/**',
3039 ' * Function pointer table.',
3040 ' */',
3041 'PFNIEMTHREADEDFUNC const g_apfnIemThreadedFunctions[kIemThreadedFunc_End] =',
3042 '{',
3043 ' /*Invalid*/ NULL,',
3044 ];
3045 asArgCntTab = [
3046 '/**',
3047 ' * Argument count table.',
3048 ' */',
3049 'uint8_t const g_acIemThreadedFunctionUsedArgs[kIemThreadedFunc_End] =',
3050 '{',
3051 ' 0, /*Invalid*/',
3052 ];
3053 asNameTable = [
3054 '/**',
3055 ' * Function name table.',
3056 ' */',
3057 'const char * const g_apszIemThreadedFunctions[kIemThreadedFunc_End] =',
3058 '{',
3059 ' "Invalid",',
3060 ];
3061 asStatTable = [
3062 '/**',
3063 ' * Function statistics name table.',
3064 ' */',
3065 'const char * const g_apszIemThreadedFunctionStats[kIemThreadedFunc_End] =',
3066 '{',
3067 ' NULL,',
3068 ];
3069 aasTables = (asFuncTable, asArgCntTab, asNameTable, asStatTable,);
3070
3071 for asTable in aasTables:
3072 asTable.extend((
3073 '',
3074 ' /*',
3075 ' * Predefined.',
3076 ' */',
3077 ));
3078 for sFuncNm, cArgs, _ in self.katBltIns:
3079 asFuncTable.append(' iemThreadedFunc_BltIn_%s,' % (sFuncNm,));
3080 asArgCntTab.append(' %d, /*BltIn_%s*/' % (cArgs, sFuncNm,));
3081 asNameTable.append(' "BltIn_%s",' % (sFuncNm,));
3082 asStatTable.append(' "BltIn/%s",' % (sFuncNm,));
3083
3084 iThreadedFunction = 1 + len(self.katBltIns);
3085 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
3086 for asTable in aasTables:
3087 asTable.extend((
3088 '',
3089 ' /*',
3090 ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation],
3091 ' */',
3092 ));
3093 for oThreadedFunction in self.aoThreadedFuncs:
3094 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
3095 if oVariation:
3096 iThreadedFunction += 1;
3097 assert oVariation.iEnumValue == iThreadedFunction;
3098 sName = oVariation.getThreadedFunctionName();
3099 asFuncTable.append(' /*%4u*/ %s,' % (iThreadedFunction, sName,));
3100 asNameTable.append(' /*%4u*/ "%s",' % (iThreadedFunction, sName,));
3101 asArgCntTab.append(' /*%4u*/ %d, /*%s*/' % (iThreadedFunction, oVariation.cMinParams, sName,));
3102 asStatTable.append(' "%s",' % (oVariation.getThreadedFunctionStatisticsName(),));
3103
3104 for asTable in aasTables:
3105 asTable.append('};');
3106
3107 #
3108 # Output the tables.
3109 #
3110 oOut.write( '\n'
3111 + '\n');
3112 oOut.write('\n'.join(asFuncTable));
3113 oOut.write( '\n'
3114 + '\n'
3115 + '\n');
3116 oOut.write('\n'.join(asArgCntTab));
3117 oOut.write( '\n'
3118 + '\n'
3119 + '#if defined(IN_RING3) || defined(LOG_ENABLED)\n');
3120 oOut.write('\n'.join(asNameTable));
3121 oOut.write( '\n'
3122 + '#endif /* IN_RING3 || LOG_ENABLED */\n'
3123 + '\n'
3124 + '\n'
3125 + '#if defined(IN_RING3)\n');
3126 oOut.write('\n'.join(asStatTable));
3127 oOut.write( '\n'
3128 + '#endif /* IN_RING3 */\n');
3129
3130 return True;
3131
3132 def generateNativeFunctionsHeader(self, oOut, _):
3133 """
3134 Generates the native recompiler functions header file.
3135 Returns success indicator.
3136 """
3137 if not self.oOptions.fNativeRecompilerEnabled:
3138 return True;
3139
3140 asLines = self.generateLicenseHeader();
3141
3142 # Prototype the function table.
3143 asLines += [
3144 'extern const PFNIEMNATIVERECOMPFUNC g_apfnIemNativeRecompileFunctions[kIemThreadedFunc_End];',
3145 'extern const PFNIEMNATIVELIVENESSFUNC g_apfnIemNativeLivenessFunctions[kIemThreadedFunc_End];',
3146 '',
3147 ];
3148
3149 # Emit indicators as to which of the builtin functions have a native
3150 # recompiler function and which not. (We only really need this for
3151 # kIemThreadedFunc_BltIn_CheckMode, but do all just for simplicity.)
3152 for atBltIn in self.katBltIns:
3153 if atBltIn[1]:
3154 asLines.append('#define IEMNATIVE_WITH_BLTIN_' + atBltIn[0].upper())
3155 else:
3156 asLines.append('#define IEMNATIVE_WITHOUT_BLTIN_' + atBltIn[0].upper())
3157
3158 # Emit prototypes for the builtin functions we use in tables.
3159 asLines += [
3160 '',
3161 '/* Prototypes for built-in functions used in the above tables. */',
3162 ];
3163 for sFuncNm, _, fHaveRecompFunc in self.katBltIns:
3164 if fHaveRecompFunc:
3165 asLines += [
3166 'IEM_DECL_IEMNATIVERECOMPFUNC_PROTO( iemNativeRecompFunc_BltIn_%s);' % (sFuncNm,),
3167 'IEM_DECL_IEMNATIVELIVENESSFUNC_PROTO(iemNativeLivenessFunc_BltIn_%s);' % (sFuncNm,),
3168 ];
3169
3170 # Emit prototypes for table function.
3171 asLines += [
3172 '',
3173 '#ifdef IEMNATIVE_INCL_TABLE_FUNCTION_PROTOTYPES'
3174 ]
3175 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
3176 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
3177 asLines += [
3178 '',
3179 '/* Variation: ' + sVarName + ' */',
3180 ];
3181 for oThreadedFunction in self.aoThreadedFuncs:
3182 oVariation = oThreadedFunction.dVariations.get(sVariation, None) # type: ThreadedFunctionVariation
3183 if oVariation and oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
3184 asLines.append('IEM_DECL_IEMNATIVERECOMPFUNC_PROTO(' + oVariation.getNativeFunctionName() + ');');
3185 asLines += [
3186 '',
3187 '#endif /* IEMNATIVE_INCL_TABLE_FUNCTION_PROTOTYPES */',
3188 ]
3189
3190 oOut.write('\n'.join(asLines));
3191 return True;
3192
3193 def generateNativeFunctionsSource(self, oOut, idxPart):
3194 """
3195 Generates the native recompiler functions source file.
3196 Returns success indicator.
3197 """
3198 cParts = 4;
3199 assert(idxPart in range(cParts));
3200 if not self.oOptions.fNativeRecompilerEnabled:
3201 return True;
3202
3203 #
3204 # The file header.
3205 #
3206 oOut.write('\n'.join(self.generateLicenseHeader()));
3207
3208 #
3209 # Emit the functions.
3210 #
3211 # The files are split up by threaded variation as that's the simplest way to
3212 # do it, even if the distribution isn't entirely even (ksVariation_Default
3213 # only has the defer to cimpl bits and the pre-386 variants will naturally
3214 # have fewer instructions).
3215 #
3216 cVariationsPerFile = len(ThreadedFunctionVariation.kasVariationsEmitOrder) // cParts;
3217 idxFirstVar = idxPart * cVariationsPerFile;
3218 idxEndVar = idxFirstVar + cVariationsPerFile;
3219 if idxPart + 1 >= cParts:
3220 idxEndVar = len(ThreadedFunctionVariation.kasVariationsEmitOrder);
3221 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder[idxFirstVar:idxEndVar]:
3222 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
3223 oOut.write( '\n'
3224 + '\n'
3225 + '\n'
3226 + '\n'
3227 + '/*' + '*' * 128 + '\n'
3228 + '* Variation: ' + sVarName + ' ' * (129 - len(sVarName) - 15) + '*\n'
3229 + '*' * 128 + '*/\n');
3230
3231 for oThreadedFunction in self.aoThreadedFuncs:
3232 oVariation = oThreadedFunction.dVariations.get(sVariation, None) # type: ThreadedFunctionVariation
3233 if oVariation and oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
3234 oMcBlock = oThreadedFunction.oMcBlock;
3235
3236 # Function header
3237 oOut.write( '\n'
3238 + '\n'
3239 + '/**\n'
3240 + ' * #%u: %s at line %s offset %s in %s%s\n'
3241 % (oVariation.iEnumValue, oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine,
3242 os.path.split(oMcBlock.sSrcFile)[1],
3243 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '')
3244 + ' */\n'
3245 + 'IEM_DECL_IEMNATIVERECOMPFUNC_DEF(' + oVariation.getNativeFunctionName() + ')\n'
3246 + '{\n');
3247
3248 # Unpack parameters.
3249 self.generateFunctionParameterUnpacking(oVariation, oOut,
3250 ('pCallEntry->auParams[0]',
3251 'pCallEntry->auParams[1]',
3252 'pCallEntry->auParams[2]',),
3253 uNoRefLevel = 1);
3254
3255 # Now for the actual statements.
3256 oOut.write(oVariation.oNativeRecomp.renderCode(cchIndent = 4));
3257
3258 oOut.write('}\n');
3259
3260 #
3261 # Output the function table if this is the first file.
3262 #
3263 if idxPart == 0:
3264 oOut.write( '\n'
3265 + '\n'
3266 + '/*\n'
3267 + ' * Function table running parallel to g_apfnIemThreadedFunctions and friends.\n'
3268 + ' */\n'
3269 + 'const PFNIEMNATIVERECOMPFUNC g_apfnIemNativeRecompileFunctions[kIemThreadedFunc_End] =\n'
3270 + '{\n'
3271 + ' /*Invalid*/ NULL,'
3272 + '\n'
3273 + ' /*\n'
3274 + ' * Predefined.\n'
3275 + ' */\n'
3276 );
3277 for sFuncNm, _, fHaveRecompFunc in self.katBltIns:
3278 if fHaveRecompFunc:
3279 oOut.write(' iemNativeRecompFunc_BltIn_%s,\n' % (sFuncNm,))
3280 else:
3281 oOut.write(' NULL, /*BltIn_%s*/\n' % (sFuncNm,))
3282
3283 iThreadedFunction = 1 + len(self.katBltIns);
3284 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
3285 oOut.write( ' /*\n'
3286 + ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation] + '\n'
3287 + ' */\n');
3288 for oThreadedFunction in self.aoThreadedFuncs:
3289 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
3290 if oVariation:
3291 iThreadedFunction += 1;
3292 assert oVariation.iEnumValue == iThreadedFunction;
3293 sName = oVariation.getNativeFunctionName();
3294 if oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
3295 oOut.write(' /*%4u*/ %s,\n' % (iThreadedFunction, sName,));
3296 else:
3297 oOut.write(' /*%4u*/ NULL /*%s*/,\n' % (iThreadedFunction, sName,));
3298
3299 oOut.write( '};\n');
3300
3301 oOut.write('\n');
3302 return True;
3303
3304 def generateNativeLivenessSource(self, oOut, _):
3305 """
3306 Generates the native recompiler liveness analysis functions source file.
3307 Returns success indicator.
3308 """
3309 if not self.oOptions.fNativeRecompilerEnabled:
3310 return True;
3311
3312 #
3313 # The file header.
3314 #
3315 oOut.write('\n'.join(self.generateLicenseHeader()));
3316
3317 #
3318 # Emit the functions.
3319 #
3320 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
3321 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
3322 oOut.write( '\n'
3323 + '\n'
3324 + '\n'
3325 + '\n'
3326 + '/*' + '*' * 128 + '\n'
3327 + '* Variation: ' + sVarName + ' ' * (129 - len(sVarName) - 15) + '*\n'
3328 + '*' * 128 + '*/\n');
3329
3330 for oThreadedFunction in self.aoThreadedFuncs:
3331 oVariation = oThreadedFunction.dVariations.get(sVariation, None) # type: ThreadedFunctionVariation
3332 if oVariation and oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
3333 oMcBlock = oThreadedFunction.oMcBlock;
3334
3335 # Function header
3336 oOut.write( '\n'
3337 + '\n'
3338 + '/**\n'
3339 + ' * #%u: %s at line %s offset %s in %s%s\n'
3340 % (oVariation.iEnumValue, oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine,
3341 os.path.split(oMcBlock.sSrcFile)[1],
3342 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '')
3343 + ' */\n'
3344 + 'static IEM_DECL_IEMNATIVELIVENESSFUNC_DEF(' + oVariation.getLivenessFunctionName() + ')\n'
3345 + '{\n');
3346
3347 # Unpack parameters.
3348 self.generateFunctionParameterUnpacking(oVariation, oOut,
3349 ('pCallEntry->auParams[0]',
3350 'pCallEntry->auParams[1]',
3351 'pCallEntry->auParams[2]',),
3352 uNoRefLevel = 2);
3353
3354 # Now for the actual statements.
3355 oOut.write(oVariation.oNativeRecomp.renderCode(cchIndent = 4));
3356
3357 oOut.write('}\n');
3358
3359 #
3360 # Output the function table.
3361 #
3362 oOut.write( '\n'
3363 + '\n'
3364 + '/*\n'
3365 + ' * Liveness analysis function table running parallel to g_apfnIemThreadedFunctions and friends.\n'
3366 + ' */\n'
3367 + 'const PFNIEMNATIVELIVENESSFUNC g_apfnIemNativeLivenessFunctions[kIemThreadedFunc_End] =\n'
3368 + '{\n'
3369 + ' /*Invalid*/ NULL,'
3370 + '\n'
3371 + ' /*\n'
3372 + ' * Predefined.\n'
3373 + ' */\n'
3374 );
3375 for sFuncNm, _, fHaveRecompFunc in self.katBltIns:
3376 if fHaveRecompFunc:
3377 oOut.write(' iemNativeLivenessFunc_BltIn_%s,\n' % (sFuncNm,))
3378 else:
3379 oOut.write(' NULL, /*BltIn_%s*/\n' % (sFuncNm,))
3380
3381 iThreadedFunction = 1 + len(self.katBltIns);
3382 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
3383 oOut.write( ' /*\n'
3384 + ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation] + '\n'
3385 + ' */\n');
3386 for oThreadedFunction in self.aoThreadedFuncs:
3387 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
3388 if oVariation:
3389 iThreadedFunction += 1;
3390 assert oVariation.iEnumValue == iThreadedFunction;
3391 sName = oVariation.getLivenessFunctionName();
3392 if oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
3393 oOut.write(' /*%4u*/ %s,\n' % (iThreadedFunction, sName,));
3394 else:
3395 oOut.write(' /*%4u*/ NULL /*%s*/,\n' % (iThreadedFunction, sName,));
3396
3397 oOut.write( '};\n'
3398 + '\n');
3399 return True;
3400
3401
3402 def getThreadedFunctionByIndex(self, idx):
3403 """
3404 Returns a ThreadedFunction object for the given index. If the index is
3405 out of bounds, a dummy is returned.
3406 """
3407 if idx < len(self.aoThreadedFuncs):
3408 return self.aoThreadedFuncs[idx];
3409 return ThreadedFunction.dummyInstance();
3410
3411 def generateModifiedInput(self, oOut, idxFile):
3412 """
3413 Generates the combined modified input source/header file.
3414 Returns success indicator.
3415 """
3416 #
3417 # File header and assert assumptions.
3418 #
3419 oOut.write('\n'.join(self.generateLicenseHeader()));
3420 oOut.write('AssertCompile((IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | IEM_F_MODE_CPUMODE_MASK) == 7);\n');
3421
3422 #
3423 # Iterate all parsers (input files) and output the ones related to the
3424 # file set given by idxFile.
3425 #
3426 for idxParser, oParser in enumerate(self.aoParsers): # type: int, IEMAllInstPython.SimpleParser
3427 # Is this included in the file set?
3428 sSrcBaseFile = os.path.basename(oParser.sSrcFile).lower();
3429 fInclude = -1;
3430 for aoInfo in iai.g_aaoAllInstrFilesAndDefaultMapAndSet:
3431 if sSrcBaseFile == aoInfo[0].lower():
3432 fInclude = aoInfo[2] in (-1, idxFile);
3433 break;
3434 if fInclude is not True:
3435 assert fInclude is False;
3436 continue;
3437
3438 # Output it.
3439 oOut.write("\n\n/* ****** BEGIN %s ******* */\n" % (oParser.sSrcFile,));
3440
3441 iThreadedFunction = self.aidxFirstFunctions[idxParser];
3442 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
3443 iLine = 0;
3444 while iLine < len(oParser.asLines):
3445 sLine = oParser.asLines[iLine];
3446 iLine += 1; # iBeginLine and iEndLine are 1-based.
3447
3448 # Can we pass it thru?
3449 if ( iLine not in [oThreadedFunction.oMcBlock.iBeginLine, oThreadedFunction.oMcBlock.iEndLine]
3450 or oThreadedFunction.oMcBlock.sSrcFile != oParser.sSrcFile):
3451 oOut.write(sLine);
3452 #
3453 # Single MC block. Just extract it and insert the replacement.
3454 #
3455 elif oThreadedFunction.oMcBlock.iBeginLine != oThreadedFunction.oMcBlock.iEndLine:
3456 assert ( (sLine.count('IEM_MC_') - sLine.count('IEM_MC_F_') == 1)
3457 or oThreadedFunction.oMcBlock.iMacroExp == iai.McBlock.kiMacroExp_Partial), 'sLine="%s"' % (sLine,);
3458 oOut.write(sLine[:oThreadedFunction.oMcBlock.offBeginLine]);
3459 sModified = oThreadedFunction.generateInputCode().strip();
3460 oOut.write(sModified);
3461
3462 iLine = oThreadedFunction.oMcBlock.iEndLine;
3463 sLine = oParser.asLines[iLine - 1];
3464 assert ( sLine.count('IEM_MC_') - sLine.count('IEM_MC_F_') == 1
3465 or len(oThreadedFunction.oMcBlock.aoStmts) == 1
3466 or oThreadedFunction.oMcBlock.iMacroExp == iai.McBlock.kiMacroExp_Partial);
3467 oOut.write(sLine[oThreadedFunction.oMcBlock.offAfterEnd : ]);
3468
3469 # Advance
3470 iThreadedFunction += 1;
3471 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
3472 #
3473 # Macro expansion line that have sublines and may contain multiple MC blocks.
3474 #
3475 else:
3476 offLine = 0;
3477 while iLine == oThreadedFunction.oMcBlock.iBeginLine:
3478 oOut.write(sLine[offLine : oThreadedFunction.oMcBlock.offBeginLine]);
3479
3480 sModified = oThreadedFunction.generateInputCode().strip();
3481 assert ( sModified.startswith('IEM_MC_BEGIN')
3482 or (sModified.find('IEM_MC_DEFER_TO_CIMPL_') > 0 and sModified.strip().startswith('{\n'))
3483 or sModified.startswith('pVCpu->iem.s.fEndTb = true')
3484 or sModified.startswith('pVCpu->iem.s.fTbCurInstr = ')
3485 ), 'sModified="%s"' % (sModified,);
3486 oOut.write(sModified);
3487
3488 offLine = oThreadedFunction.oMcBlock.offAfterEnd;
3489
3490 # Advance
3491 iThreadedFunction += 1;
3492 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
3493
3494 # Last line segment.
3495 if offLine < len(sLine):
3496 oOut.write(sLine[offLine : ]);
3497
3498 oOut.write("/* ****** END %s ******* */\n" % (oParser.sSrcFile,));
3499
3500 return True;
3501
3502
3503 #
3504 # Main
3505 #
3506
3507 def main(self, asArgs):
3508 """
3509 C-like main function.
3510 Returns exit code.
3511 """
3512
3513 #
3514 # Parse arguments
3515 #
3516 sScriptDir = os.path.dirname(__file__);
3517 oParser = argparse.ArgumentParser(add_help = False);
3518 oParser.add_argument('asInFiles',
3519 metavar = 'input.cpp.h',
3520 nargs = '*',
3521 default = [os.path.join(sScriptDir, aoInfo[0])
3522 for aoInfo in iai.g_aaoAllInstrFilesAndDefaultMapAndSet],
3523 help = "Selection of VMMAll/IEMAllInst*.cpp.h files to use as input.");
3524 oParser.add_argument('--host-arch',
3525 metavar = 'arch',
3526 dest = 'sHostArch',
3527 action = 'store',
3528 default = None,
3529 help = 'The host architecture.');
3530
3531 oParser.add_argument('--out-thrd-funcs-hdr',
3532 metavar = 'file-thrd-funcs.h',
3533 dest = 'sOutFileThrdFuncsHdr',
3534 action = 'store',
3535 default = '-',
3536 help = 'The output header file for the threaded functions.');
3537 oParser.add_argument('--out-thrd-funcs-cpp',
3538 metavar = 'file-thrd-funcs.cpp',
3539 dest = 'sOutFileThrdFuncsCpp',
3540 action = 'store',
3541 default = '-',
3542 help = 'The output C++ file for the threaded functions.');
3543 oParser.add_argument('--out-n8ve-funcs-hdr',
3544 metavar = 'file-n8tv-funcs.h',
3545 dest = 'sOutFileN8veFuncsHdr',
3546 action = 'store',
3547 default = '-',
3548 help = 'The output header file for the native recompiler functions.');
3549 oParser.add_argument('--out-n8ve-funcs-cpp1',
3550 metavar = 'file-n8tv-funcs1.cpp',
3551 dest = 'sOutFileN8veFuncsCpp1',
3552 action = 'store',
3553 default = '-',
3554 help = 'The output C++ file for the native recompiler functions part 1.');
3555 oParser.add_argument('--out-n8ve-funcs-cpp2',
3556 metavar = 'file-n8ve-funcs2.cpp',
3557 dest = 'sOutFileN8veFuncsCpp2',
3558 action = 'store',
3559 default = '-',
3560 help = 'The output C++ file for the native recompiler functions part 2.');
3561 oParser.add_argument('--out-n8ve-funcs-cpp3',
3562 metavar = 'file-n8ve-funcs3.cpp',
3563 dest = 'sOutFileN8veFuncsCpp3',
3564 action = 'store',
3565 default = '-',
3566 help = 'The output C++ file for the native recompiler functions part 3.');
3567 oParser.add_argument('--out-n8ve-funcs-cpp4',
3568 metavar = 'file-n8ve-funcs4.cpp',
3569 dest = 'sOutFileN8veFuncsCpp4',
3570 action = 'store',
3571 default = '-',
3572 help = 'The output C++ file for the native recompiler functions part 4.');
3573 oParser.add_argument('--out-n8ve-liveness-cpp',
3574 metavar = 'file-n8ve-liveness.cpp',
3575 dest = 'sOutFileN8veLivenessCpp',
3576 action = 'store',
3577 default = '-',
3578 help = 'The output C++ file for the native recompiler liveness analysis functions.');
3579 oParser.add_argument('--native',
3580 dest = 'fNativeRecompilerEnabled',
3581 action = 'store_true',
3582 default = False,
3583 help = 'Enables generating the files related to native recompilation.');
3584 oParser.add_argument('--out-mod-input1',
3585 metavar = 'file-instr.cpp.h',
3586 dest = 'sOutFileModInput1',
3587 action = 'store',
3588 default = '-',
3589 help = 'The output C++/header file for modified input instruction files part 1.');
3590 oParser.add_argument('--out-mod-input2',
3591 metavar = 'file-instr.cpp.h',
3592 dest = 'sOutFileModInput2',
3593 action = 'store',
3594 default = '-',
3595 help = 'The output C++/header file for modified input instruction files part 2.');
3596 oParser.add_argument('--out-mod-input3',
3597 metavar = 'file-instr.cpp.h',
3598 dest = 'sOutFileModInput3',
3599 action = 'store',
3600 default = '-',
3601 help = 'The output C++/header file for modified input instruction files part 3.');
3602 oParser.add_argument('--out-mod-input4',
3603 metavar = 'file-instr.cpp.h',
3604 dest = 'sOutFileModInput4',
3605 action = 'store',
3606 default = '-',
3607 help = 'The output C++/header file for modified input instruction files part 4.');
3608 oParser.add_argument('--help', '-h', '-?',
3609 action = 'help',
3610 help = 'Display help and exit.');
3611 oParser.add_argument('--version', '-V',
3612 action = 'version',
3613 version = 'r%s (IEMAllThreadedPython.py), r%s (IEMAllInstPython.py)'
3614 % (__version__.split()[1], iai.__version__.split()[1],),
3615 help = 'Displays the version/revision of the script and exit.');
3616 self.oOptions = oParser.parse_args(asArgs[1:]);
3617 print("oOptions=%s" % (self.oOptions,), file = sys.stderr);
3618
3619 if self.oOptions.sHostArch not in ('amd64', 'arm64'):
3620 print('error! Unsupported (or missing) host architecture: %s' % (self.oOptions.sHostArch,), file = sys.stderr);
3621 return 1;
3622
3623 #
3624 # Process the instructions specified in the IEM sources.
3625 #
3626 if self.processInputFiles(self.oOptions.sHostArch, self.oOptions.fNativeRecompilerEnabled):
3627 #
3628 # Generate the output files.
3629 #
3630 aaoOutputFiles = (
3631 ( self.oOptions.sOutFileThrdFuncsHdr, self.generateThreadedFunctionsHeader, 0, ),
3632 ( self.oOptions.sOutFileThrdFuncsCpp, self.generateThreadedFunctionsSource, 0, ),
3633 ( self.oOptions.sOutFileN8veFuncsHdr, self.generateNativeFunctionsHeader, 0, ),
3634 ( self.oOptions.sOutFileN8veFuncsCpp1, self.generateNativeFunctionsSource, 0, ),
3635 ( self.oOptions.sOutFileN8veFuncsCpp2, self.generateNativeFunctionsSource, 1, ),
3636 ( self.oOptions.sOutFileN8veFuncsCpp3, self.generateNativeFunctionsSource, 2, ),
3637 ( self.oOptions.sOutFileN8veFuncsCpp4, self.generateNativeFunctionsSource, 3, ),
3638 ( self.oOptions.sOutFileN8veLivenessCpp, self.generateNativeLivenessSource, 0, ),
3639 ( self.oOptions.sOutFileModInput1, self.generateModifiedInput, 1, ),
3640 ( self.oOptions.sOutFileModInput2, self.generateModifiedInput, 2, ),
3641 ( self.oOptions.sOutFileModInput3, self.generateModifiedInput, 3, ),
3642 ( self.oOptions.sOutFileModInput4, self.generateModifiedInput, 4, ),
3643 );
3644 fRc = True;
3645 for sOutFile, fnGenMethod, iPartNo in aaoOutputFiles:
3646 if sOutFile == '-':
3647 fRc = fnGenMethod(sys.stdout, iPartNo) and fRc;
3648 else:
3649 try:
3650 oOut = open(sOutFile, 'w'); # pylint: disable=consider-using-with,unspecified-encoding
3651 except Exception as oXcpt:
3652 print('error! Failed open "%s" for writing: %s' % (sOutFile, oXcpt,), file = sys.stderr);
3653 return 1;
3654 fRc = fnGenMethod(oOut, iPartNo) and fRc;
3655 oOut.close();
3656 if fRc:
3657 return 0;
3658
3659 return 1;
3660
3661
3662if __name__ == '__main__':
3663 sys.exit(IEMThreadedGenerator().main(sys.argv));
3664
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette