VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllThrdPython.py@ 103612

Last change on this file since 103612 was 103560, checked in by vboxsync, 14 months ago

Try to fix build bugref:9898

  • Property svn:eol-style set to LF
  • Property svn:executable set to *
  • Property svn:keywords set to Author Date Id Revision
File size: 177.8 KB
Line 
1#!/usr/bin/env python
2# -*- coding: utf-8 -*-
3# $Id: IEMAllThrdPython.py 103560 2024-02-24 12:59:06Z vboxsync $
4# pylint: disable=invalid-name
5
6"""
7Annotates and generates threaded functions from IEMAllInst*.cpp.h.
8"""
9
10from __future__ import print_function;
11
12__copyright__ = \
13"""
14Copyright (C) 2023 Oracle and/or its affiliates.
15
16This file is part of VirtualBox base platform packages, as
17available from https://www.virtualbox.org.
18
19This program is free software; you can redistribute it and/or
20modify it under the terms of the GNU General Public License
21as published by the Free Software Foundation, in version 3 of the
22License.
23
24This program is distributed in the hope that it will be useful, but
25WITHOUT ANY WARRANTY; without even the implied warranty of
26MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
27General Public License for more details.
28
29You should have received a copy of the GNU General Public License
30along with this program; if not, see <https://www.gnu.org/licenses>.
31
32SPDX-License-Identifier: GPL-3.0-only
33"""
34__version__ = "$Revision: 103560 $"
35
36# Standard python imports.
37import copy;
38import datetime;
39import os;
40import re;
41import sys;
42import argparse;
43from typing import Dict, List;
44
45import IEMAllInstPython as iai;
46import IEMAllN8vePython as ian;
47
48
49# Python 3 hacks:
50if sys.version_info[0] >= 3:
51 long = int; # pylint: disable=redefined-builtin,invalid-name
52
53## Number of generic parameters for the thread functions.
54g_kcThreadedParams = 3;
55
56g_kdTypeInfo = {
57 # type name: (cBits, fSigned, C-type )
58 'int8_t': ( 8, True, 'int8_t', ),
59 'int16_t': ( 16, True, 'int16_t', ),
60 'int32_t': ( 32, True, 'int32_t', ),
61 'int64_t': ( 64, True, 'int64_t', ),
62 'uint4_t': ( 4, False, 'uint8_t', ),
63 'uint8_t': ( 8, False, 'uint8_t', ),
64 'uint16_t': ( 16, False, 'uint16_t', ),
65 'uint32_t': ( 32, False, 'uint32_t', ),
66 'uint64_t': ( 64, False, 'uint64_t', ),
67 'uintptr_t': ( 64, False, 'uintptr_t',), # ASSUMES 64-bit host pointer size.
68 'bool': ( 1, False, 'bool', ),
69 'IEMMODE': ( 2, False, 'IEMMODE', ),
70};
71
72# Only for getTypeBitCount/variables.
73g_kdTypeInfo2 = {
74 'RTFLOAT32U': ( 32, False, 'RTFLOAT32U', ),
75 'RTFLOAT64U': ( 64, False, 'RTFLOAT64U', ),
76 'RTUINT64U': ( 64, False, 'RTUINT64U', ),
77 'RTGCPTR': ( 64, False, 'RTGCPTR', ),
78 'RTPBCD80U': ( 80, False, 'RTPBCD80U', ),
79 'RTFLOAT80U': ( 80, False, 'RTFLOAT80U', ),
80 'IEMFPURESULT': (80+16, False, 'IEMFPURESULT', ),
81 'IEMFPURESULTTWO': (80+16+80,False, 'IEMFPURESULTTWO', ),
82 'RTUINT128U': ( 128, False, 'RTUINT128U', ),
83 'X86XMMREG': ( 128, False, 'X86XMMREG', ),
84 'IEMSSERESULT': ( 128+32, False, 'IEMSSERESULT', ),
85 'IEMMEDIAF2XMMSRC': ( 256, False, 'IEMMEDIAF2XMMSRC',),
86 'RTUINT256U': ( 256, False, 'RTUINT256U', ),
87 'IEMPCMPISTRXSRC': ( 256, False, 'IEMPCMPISTRXSRC', ),
88 'IEMPCMPESTRXSRC': ( 384, False, 'IEMPCMPESTRXSRC', ),
89}; #| g_kdTypeInfo; - requires 3.9
90g_kdTypeInfo2.update(g_kdTypeInfo);
91
92def getTypeBitCount(sType):
93 """
94 Translate a type to size in bits
95 """
96 if sType in g_kdTypeInfo2:
97 return g_kdTypeInfo2[sType][0];
98 if '*' in sType or sType[0] == 'P':
99 return 64;
100 #raise Exception('Unknown type: %s' % (sType,));
101 print('error: Unknown type: %s' % (sType,));
102 return 64;
103
104g_kdIemFieldToType = {
105 # Illegal ones:
106 'offInstrNextByte': ( None, ),
107 'cbInstrBuf': ( None, ),
108 'pbInstrBuf': ( None, ),
109 'uInstrBufPc': ( None, ),
110 'cbInstrBufTotal': ( None, ),
111 'offCurInstrStart': ( None, ),
112 'cbOpcode': ( None, ),
113 'offOpcode': ( None, ),
114 'offModRm': ( None, ),
115 # Okay ones.
116 'fPrefixes': ( 'uint32_t', ),
117 'uRexReg': ( 'uint8_t', ),
118 'uRexB': ( 'uint8_t', ),
119 'uRexIndex': ( 'uint8_t', ),
120 'iEffSeg': ( 'uint8_t', ),
121 'enmEffOpSize': ( 'IEMMODE', ),
122 'enmDefAddrMode': ( 'IEMMODE', ),
123 'enmEffAddrMode': ( 'IEMMODE', ),
124 'enmDefOpSize': ( 'IEMMODE', ),
125 'idxPrefix': ( 'uint8_t', ),
126 'uVex3rdReg': ( 'uint8_t', ),
127 'uVexLength': ( 'uint8_t', ),
128 'fEvexStuff': ( 'uint8_t', ),
129 'uFpuOpcode': ( 'uint16_t', ),
130};
131
132## @name McStmtCond.oIfBranchAnnotation/McStmtCond.oElseBranchAnnotation values
133## @{
134g_ksFinishAnnotation_Advance = 'Advance';
135g_ksFinishAnnotation_RelJmp = 'RelJmp';
136g_ksFinishAnnotation_SetJmp = 'SetJmp';
137g_ksFinishAnnotation_DeferToCImpl = 'DeferToCImpl';
138## @}
139
140
141class ThreadedParamRef(object):
142 """
143 A parameter reference for a threaded function.
144 """
145
146 def __init__(self, sOrgRef, sType, oStmt, iParam = None, offParam = 0, sStdRef = None):
147 ## The name / reference in the original code.
148 self.sOrgRef = sOrgRef;
149 ## Normalized name to deal with spaces in macro invocations and such.
150 self.sStdRef = sStdRef if sStdRef else ''.join(sOrgRef.split());
151 ## Indicates that sOrgRef may not match the parameter.
152 self.fCustomRef = sStdRef is not None;
153 ## The type (typically derived).
154 self.sType = sType;
155 ## The statement making the reference.
156 self.oStmt = oStmt;
157 ## The parameter containing the references. None if implicit.
158 self.iParam = iParam;
159 ## The offset in the parameter of the reference.
160 self.offParam = offParam;
161
162 ## The variable name in the threaded function.
163 self.sNewName = 'x';
164 ## The this is packed into.
165 self.iNewParam = 99;
166 ## The bit offset in iNewParam.
167 self.offNewParam = 1024
168
169
170class ThreadedFunctionVariation(object):
171 """ Threaded function variation. """
172
173 ## @name Variations.
174 ## These variations will match translation block selection/distinctions as well.
175 ## @{
176 # pylint: disable=line-too-long
177 ksVariation_Default = ''; ##< No variations - only used by IEM_MC_DEFER_TO_CIMPL_X_RET.
178 ksVariation_16 = '_16'; ##< 16-bit mode code (386+).
179 ksVariation_16f = '_16f'; ##< 16-bit mode code (386+), check+clear eflags.
180 ksVariation_16_Jmp = '_16_Jmp'; ##< 16-bit mode code (386+), conditional jump taken.
181 ksVariation_16f_Jmp = '_16f_Jmp'; ##< 16-bit mode code (386+), check+clear eflags, conditional jump taken.
182 ksVariation_16_NoJmp = '_16_NoJmp'; ##< 16-bit mode code (386+), conditional jump not taken.
183 ksVariation_16f_NoJmp = '_16f_NoJmp'; ##< 16-bit mode code (386+), check+clear eflags, conditional jump not taken.
184 ksVariation_16_Addr32 = '_16_Addr32'; ##< 16-bit mode code (386+), address size prefixed to 32-bit addressing.
185 ksVariation_16f_Addr32 = '_16f_Addr32'; ##< 16-bit mode code (386+), address size prefixed to 32-bit addressing, eflags.
186 ksVariation_16_Pre386 = '_16_Pre386'; ##< 16-bit mode code, pre-386 CPU target.
187 ksVariation_16f_Pre386 = '_16f_Pre386'; ##< 16-bit mode code, pre-386 CPU target, check+clear eflags.
188 ksVariation_16_Pre386_Jmp = '_16_Pre386_Jmp'; ##< 16-bit mode code, pre-386 CPU target, conditional jump taken.
189 ksVariation_16f_Pre386_Jmp = '_16f_Pre386_Jmp'; ##< 16-bit mode code, pre-386 CPU target, check+clear eflags, conditional jump taken.
190 ksVariation_16_Pre386_NoJmp = '_16_Pre386_NoJmp'; ##< 16-bit mode code, pre-386 CPU target, conditional jump not taken.
191 ksVariation_16f_Pre386_NoJmp = '_16f_Pre386_NoJmp'; ##< 16-bit mode code, pre-386 CPU target, check+clear eflags, conditional jump not taken.
192 ksVariation_32 = '_32'; ##< 32-bit mode code (386+).
193 ksVariation_32f = '_32f'; ##< 32-bit mode code (386+), check+clear eflags.
194 ksVariation_32_Jmp = '_32_Jmp'; ##< 32-bit mode code (386+), conditional jump taken.
195 ksVariation_32f_Jmp = '_32f_Jmp'; ##< 32-bit mode code (386+), check+clear eflags, conditional jump taken.
196 ksVariation_32_NoJmp = '_32_NoJmp'; ##< 32-bit mode code (386+), conditional jump not taken.
197 ksVariation_32f_NoJmp = '_32f_NoJmp'; ##< 32-bit mode code (386+), check+clear eflags, conditional jump not taken.
198 ksVariation_32_Flat = '_32_Flat'; ##< 32-bit mode code (386+) with CS, DS, E,S and SS flat and 4GB wide.
199 ksVariation_32f_Flat = '_32f_Flat'; ##< 32-bit mode code (386+) with CS, DS, E,S and SS flat and 4GB wide, eflags.
200 ksVariation_32_Addr16 = '_32_Addr16'; ##< 32-bit mode code (386+), address size prefixed to 16-bit addressing.
201 ksVariation_32f_Addr16 = '_32f_Addr16'; ##< 32-bit mode code (386+), address size prefixed to 16-bit addressing, eflags.
202 ksVariation_64 = '_64'; ##< 64-bit mode code.
203 ksVariation_64f = '_64f'; ##< 64-bit mode code, check+clear eflags.
204 ksVariation_64_Jmp = '_64_Jmp'; ##< 64-bit mode code, conditional jump taken.
205 ksVariation_64f_Jmp = '_64f_Jmp'; ##< 64-bit mode code, check+clear eflags, conditional jump taken.
206 ksVariation_64_NoJmp = '_64_NoJmp'; ##< 64-bit mode code, conditional jump not taken.
207 ksVariation_64f_NoJmp = '_64f_NoJmp'; ##< 64-bit mode code, check+clear eflags, conditional jump not taken.
208 ksVariation_64_FsGs = '_64_FsGs'; ##< 64-bit mode code, with memory accesses via FS or GS.
209 ksVariation_64f_FsGs = '_64f_FsGs'; ##< 64-bit mode code, with memory accesses via FS or GS, check+clear eflags.
210 ksVariation_64_Addr32 = '_64_Addr32'; ##< 64-bit mode code, address size prefixed to 32-bit addressing.
211 ksVariation_64f_Addr32 = '_64f_Addr32'; ##< 64-bit mode code, address size prefixed to 32-bit addressing, c+c eflags.
212 # pylint: enable=line-too-long
213 kasVariations = (
214 ksVariation_Default,
215 ksVariation_16,
216 ksVariation_16f,
217 ksVariation_16_Jmp,
218 ksVariation_16f_Jmp,
219 ksVariation_16_NoJmp,
220 ksVariation_16f_NoJmp,
221 ksVariation_16_Addr32,
222 ksVariation_16f_Addr32,
223 ksVariation_16_Pre386,
224 ksVariation_16f_Pre386,
225 ksVariation_16_Pre386_Jmp,
226 ksVariation_16f_Pre386_Jmp,
227 ksVariation_16_Pre386_NoJmp,
228 ksVariation_16f_Pre386_NoJmp,
229 ksVariation_32,
230 ksVariation_32f,
231 ksVariation_32_Jmp,
232 ksVariation_32f_Jmp,
233 ksVariation_32_NoJmp,
234 ksVariation_32f_NoJmp,
235 ksVariation_32_Flat,
236 ksVariation_32f_Flat,
237 ksVariation_32_Addr16,
238 ksVariation_32f_Addr16,
239 ksVariation_64,
240 ksVariation_64f,
241 ksVariation_64_Jmp,
242 ksVariation_64f_Jmp,
243 ksVariation_64_NoJmp,
244 ksVariation_64f_NoJmp,
245 ksVariation_64_FsGs,
246 ksVariation_64f_FsGs,
247 ksVariation_64_Addr32,
248 ksVariation_64f_Addr32,
249 );
250 kasVariationsWithoutAddress = (
251 ksVariation_16,
252 ksVariation_16f,
253 ksVariation_16_Pre386,
254 ksVariation_16f_Pre386,
255 ksVariation_32,
256 ksVariation_32f,
257 ksVariation_64,
258 ksVariation_64f,
259 );
260 kasVariationsWithoutAddressNot286 = (
261 ksVariation_16,
262 ksVariation_16f,
263 ksVariation_32,
264 ksVariation_32f,
265 ksVariation_64,
266 ksVariation_64f,
267 );
268 kasVariationsWithoutAddressNot286Not64 = (
269 ksVariation_16,
270 ksVariation_16f,
271 ksVariation_32,
272 ksVariation_32f,
273 );
274 kasVariationsWithoutAddressNot64 = (
275 ksVariation_16,
276 ksVariation_16f,
277 ksVariation_16_Pre386,
278 ksVariation_16f_Pre386,
279 ksVariation_32,
280 ksVariation_32f,
281 );
282 kasVariationsWithoutAddressOnly64 = (
283 ksVariation_64,
284 ksVariation_64f,
285 );
286 kasVariationsWithAddress = (
287 ksVariation_16,
288 ksVariation_16f,
289 ksVariation_16_Addr32,
290 ksVariation_16f_Addr32,
291 ksVariation_16_Pre386,
292 ksVariation_16f_Pre386,
293 ksVariation_32,
294 ksVariation_32f,
295 ksVariation_32_Flat,
296 ksVariation_32f_Flat,
297 ksVariation_32_Addr16,
298 ksVariation_32f_Addr16,
299 ksVariation_64,
300 ksVariation_64f,
301 ksVariation_64_FsGs,
302 ksVariation_64f_FsGs,
303 ksVariation_64_Addr32,
304 ksVariation_64f_Addr32,
305 );
306 kasVariationsWithAddressNot286 = (
307 ksVariation_16,
308 ksVariation_16f,
309 ksVariation_16_Addr32,
310 ksVariation_16f_Addr32,
311 ksVariation_32,
312 ksVariation_32f,
313 ksVariation_32_Flat,
314 ksVariation_32f_Flat,
315 ksVariation_32_Addr16,
316 ksVariation_32f_Addr16,
317 ksVariation_64,
318 ksVariation_64f,
319 ksVariation_64_FsGs,
320 ksVariation_64f_FsGs,
321 ksVariation_64_Addr32,
322 ksVariation_64f_Addr32,
323 );
324 kasVariationsWithAddressNot286Not64 = (
325 ksVariation_16,
326 ksVariation_16f,
327 ksVariation_16_Addr32,
328 ksVariation_16f_Addr32,
329 ksVariation_32,
330 ksVariation_32f,
331 ksVariation_32_Flat,
332 ksVariation_32f_Flat,
333 ksVariation_32_Addr16,
334 ksVariation_32f_Addr16,
335 );
336 kasVariationsWithAddressNot64 = (
337 ksVariation_16,
338 ksVariation_16f,
339 ksVariation_16_Addr32,
340 ksVariation_16f_Addr32,
341 ksVariation_16_Pre386,
342 ksVariation_16f_Pre386,
343 ksVariation_32,
344 ksVariation_32f,
345 ksVariation_32_Flat,
346 ksVariation_32f_Flat,
347 ksVariation_32_Addr16,
348 ksVariation_32f_Addr16,
349 );
350 kasVariationsWithAddressOnly64 = (
351 ksVariation_64,
352 ksVariation_64f,
353 ksVariation_64_FsGs,
354 ksVariation_64f_FsGs,
355 ksVariation_64_Addr32,
356 ksVariation_64f_Addr32,
357 );
358 kasVariationsOnlyPre386 = (
359 ksVariation_16_Pre386,
360 ksVariation_16f_Pre386,
361 );
362 kasVariationsEmitOrder = (
363 ksVariation_Default,
364 ksVariation_64,
365 ksVariation_64f,
366 ksVariation_64_Jmp,
367 ksVariation_64f_Jmp,
368 ksVariation_64_NoJmp,
369 ksVariation_64f_NoJmp,
370 ksVariation_64_FsGs,
371 ksVariation_64f_FsGs,
372 ksVariation_32_Flat,
373 ksVariation_32f_Flat,
374 ksVariation_32,
375 ksVariation_32f,
376 ksVariation_32_Jmp,
377 ksVariation_32f_Jmp,
378 ksVariation_32_NoJmp,
379 ksVariation_32f_NoJmp,
380 ksVariation_16,
381 ksVariation_16f,
382 ksVariation_16_Jmp,
383 ksVariation_16f_Jmp,
384 ksVariation_16_NoJmp,
385 ksVariation_16f_NoJmp,
386 ksVariation_16_Addr32,
387 ksVariation_16f_Addr32,
388 ksVariation_16_Pre386,
389 ksVariation_16f_Pre386,
390 ksVariation_16_Pre386_Jmp,
391 ksVariation_16f_Pre386_Jmp,
392 ksVariation_16_Pre386_NoJmp,
393 ksVariation_16f_Pre386_NoJmp,
394 ksVariation_32_Addr16,
395 ksVariation_32f_Addr16,
396 ksVariation_64_Addr32,
397 ksVariation_64f_Addr32,
398 );
399 kdVariationNames = {
400 ksVariation_Default: 'defer-to-cimpl',
401 ksVariation_16: '16-bit',
402 ksVariation_16f: '16-bit w/ eflag checking and clearing',
403 ksVariation_16_Jmp: '16-bit w/ conditional jump taken',
404 ksVariation_16f_Jmp: '16-bit w/ eflag checking and clearing and conditional jump taken',
405 ksVariation_16_NoJmp: '16-bit w/ conditional jump not taken',
406 ksVariation_16f_NoJmp: '16-bit w/ eflag checking and clearing and conditional jump not taken',
407 ksVariation_16_Addr32: '16-bit w/ address prefix (Addr32)',
408 ksVariation_16f_Addr32: '16-bit w/ address prefix (Addr32) and eflag checking and clearing',
409 ksVariation_16_Pre386: '16-bit on pre-386 CPU',
410 ksVariation_16f_Pre386: '16-bit on pre-386 CPU w/ eflag checking and clearing',
411 ksVariation_16_Pre386_Jmp: '16-bit on pre-386 CPU w/ conditional jump taken',
412 ksVariation_16f_Pre386_Jmp: '16-bit on pre-386 CPU w/ eflag checking and clearing and conditional jump taken',
413 ksVariation_16_Pre386_NoJmp: '16-bit on pre-386 CPU w/ conditional jump taken',
414 ksVariation_16f_Pre386_NoJmp: '16-bit on pre-386 CPU w/ eflag checking and clearing and conditional jump taken',
415 ksVariation_32: '32-bit',
416 ksVariation_32f: '32-bit w/ eflag checking and clearing',
417 ksVariation_32_Jmp: '32-bit w/ conditional jump taken',
418 ksVariation_32f_Jmp: '32-bit w/ eflag checking and clearing and conditional jump taken',
419 ksVariation_32_NoJmp: '32-bit w/ conditional jump not taken',
420 ksVariation_32f_NoJmp: '32-bit w/ eflag checking and clearing and conditional jump not taken',
421 ksVariation_32_Flat: '32-bit flat and wide open CS, SS, DS and ES',
422 ksVariation_32f_Flat: '32-bit flat and wide open CS, SS, DS and ES w/ eflag checking and clearing',
423 ksVariation_32_Addr16: '32-bit w/ address prefix (Addr16)',
424 ksVariation_32f_Addr16: '32-bit w/ address prefix (Addr16) and eflag checking and clearing',
425 ksVariation_64: '64-bit',
426 ksVariation_64f: '64-bit w/ eflag checking and clearing',
427 ksVariation_64_Jmp: '64-bit w/ conditional jump taken',
428 ksVariation_64f_Jmp: '64-bit w/ eflag checking and clearing and conditional jump taken',
429 ksVariation_64_NoJmp: '64-bit w/ conditional jump not taken',
430 ksVariation_64f_NoJmp: '64-bit w/ eflag checking and clearing and conditional jump not taken',
431 ksVariation_64_FsGs: '64-bit with memory accessed via FS or GS',
432 ksVariation_64f_FsGs: '64-bit with memory accessed via FS or GS and eflag checking and clearing',
433 ksVariation_64_Addr32: '64-bit w/ address prefix (Addr32)',
434 ksVariation_64f_Addr32: '64-bit w/ address prefix (Addr32) and eflag checking and clearing',
435 };
436 kdVariationsWithEflagsCheckingAndClearing = {
437 ksVariation_16f: True,
438 ksVariation_16f_Jmp: True,
439 ksVariation_16f_NoJmp: True,
440 ksVariation_16f_Addr32: True,
441 ksVariation_16f_Pre386: True,
442 ksVariation_16f_Pre386_Jmp: True,
443 ksVariation_16f_Pre386_NoJmp: True,
444 ksVariation_32f: True,
445 ksVariation_32f_Jmp: True,
446 ksVariation_32f_NoJmp: True,
447 ksVariation_32f_Flat: True,
448 ksVariation_32f_Addr16: True,
449 ksVariation_64f: True,
450 ksVariation_64f_Jmp: True,
451 ksVariation_64f_NoJmp: True,
452 ksVariation_64f_FsGs: True,
453 ksVariation_64f_Addr32: True,
454 };
455 kdVariationsOnly64NoFlags = {
456 ksVariation_64: True,
457 ksVariation_64_Jmp: True,
458 ksVariation_64_NoJmp: True,
459 ksVariation_64_FsGs: True,
460 ksVariation_64_Addr32: True,
461 };
462 kdVariationsOnly64WithFlags = {
463 ksVariation_64f: True,
464 ksVariation_64f_Jmp: True,
465 ksVariation_64f_NoJmp: True,
466 ksVariation_64f_FsGs: True,
467 ksVariation_64f_Addr32: True,
468 };
469 kdVariationsOnlyPre386NoFlags = {
470 ksVariation_16_Pre386: True,
471 ksVariation_16_Pre386_Jmp: True,
472 ksVariation_16_Pre386_NoJmp: True,
473 };
474 kdVariationsOnlyPre386WithFlags = {
475 ksVariation_16f_Pre386: True,
476 ksVariation_16f_Pre386_Jmp: True,
477 ksVariation_16f_Pre386_NoJmp: True,
478 };
479 kdVariationsWithFlatAddress = {
480 ksVariation_32_Flat: True,
481 ksVariation_32f_Flat: True,
482 ksVariation_64: True,
483 ksVariation_64f: True,
484 ksVariation_64_Addr32: True,
485 ksVariation_64f_Addr32: True,
486 };
487 kdVariationsWithFlatStackAddress = {
488 ksVariation_32_Flat: True,
489 ksVariation_32f_Flat: True,
490 ksVariation_64: True,
491 ksVariation_64f: True,
492 ksVariation_64_FsGs: True,
493 ksVariation_64f_FsGs: True,
494 ksVariation_64_Addr32: True,
495 ksVariation_64f_Addr32: True,
496 };
497 kdVariationsWithFlat64StackAddress = {
498 ksVariation_64: True,
499 ksVariation_64f: True,
500 ksVariation_64_FsGs: True,
501 ksVariation_64f_FsGs: True,
502 ksVariation_64_Addr32: True,
503 ksVariation_64f_Addr32: True,
504 };
505 kdVariationsWithFlatAddr16 = {
506 ksVariation_16: True,
507 ksVariation_16f: True,
508 ksVariation_16_Pre386: True,
509 ksVariation_16f_Pre386: True,
510 ksVariation_32_Addr16: True,
511 ksVariation_32f_Addr16: True,
512 };
513 kdVariationsWithFlatAddr32No64 = {
514 ksVariation_16_Addr32: True,
515 ksVariation_16f_Addr32: True,
516 ksVariation_32: True,
517 ksVariation_32f: True,
518 ksVariation_32_Flat: True,
519 ksVariation_32f_Flat: True,
520 };
521 kdVariationsWithAddressOnly64 = {
522 ksVariation_64: True,
523 ksVariation_64f: True,
524 ksVariation_64_FsGs: True,
525 ksVariation_64f_FsGs: True,
526 ksVariation_64_Addr32: True,
527 ksVariation_64f_Addr32: True,
528 };
529 kdVariationsWithConditional = {
530 ksVariation_16_Jmp: True,
531 ksVariation_16_NoJmp: True,
532 ksVariation_16_Pre386_Jmp: True,
533 ksVariation_16_Pre386_NoJmp: True,
534 ksVariation_32_Jmp: True,
535 ksVariation_32_NoJmp: True,
536 ksVariation_64_Jmp: True,
537 ksVariation_64_NoJmp: True,
538 ksVariation_16f_Jmp: True,
539 ksVariation_16f_NoJmp: True,
540 ksVariation_16f_Pre386_Jmp: True,
541 ksVariation_16f_Pre386_NoJmp: True,
542 ksVariation_32f_Jmp: True,
543 ksVariation_32f_NoJmp: True,
544 ksVariation_64f_Jmp: True,
545 ksVariation_64f_NoJmp: True,
546 };
547 kdVariationsWithConditionalNoJmp = {
548 ksVariation_16_NoJmp: True,
549 ksVariation_16_Pre386_NoJmp: True,
550 ksVariation_32_NoJmp: True,
551 ksVariation_64_NoJmp: True,
552 ksVariation_16f_NoJmp: True,
553 ksVariation_16f_Pre386_NoJmp: True,
554 ksVariation_32f_NoJmp: True,
555 ksVariation_64f_NoJmp: True,
556 };
557 kdVariationsOnlyPre386 = {
558 ksVariation_16_Pre386: True,
559 ksVariation_16f_Pre386: True,
560 ksVariation_16_Pre386_Jmp: True,
561 ksVariation_16f_Pre386_Jmp: True,
562 ksVariation_16_Pre386_NoJmp: True,
563 ksVariation_16f_Pre386_NoJmp: True,
564 };
565 ## @}
566
567 ## IEM_CIMPL_F_XXX flags that we know.
568 ## The value indicates whether it terminates the TB or not. The goal is to
569 ## improve the recompiler so all but END_TB will be False.
570 ##
571 ## @note iemThreadedRecompilerMcDeferToCImpl0 duplicates info found here.
572 kdCImplFlags = {
573 'IEM_CIMPL_F_MODE': False,
574 'IEM_CIMPL_F_BRANCH_DIRECT': False,
575 'IEM_CIMPL_F_BRANCH_INDIRECT': False,
576 'IEM_CIMPL_F_BRANCH_RELATIVE': False,
577 'IEM_CIMPL_F_BRANCH_FAR': True,
578 'IEM_CIMPL_F_BRANCH_CONDITIONAL': False,
579 # IEM_CIMPL_F_BRANCH_ANY should only be used for testing, so not included here.
580 'IEM_CIMPL_F_BRANCH_STACK': False,
581 'IEM_CIMPL_F_BRANCH_STACK_FAR': False,
582 'IEM_CIMPL_F_RFLAGS': False,
583 'IEM_CIMPL_F_INHIBIT_SHADOW': False,
584 'IEM_CIMPL_F_CHECK_IRQ_AFTER': False,
585 'IEM_CIMPL_F_CHECK_IRQ_BEFORE': False,
586 'IEM_CIMPL_F_CHECK_IRQ_BEFORE_AND_AFTER': False, # (ignore)
587 'IEM_CIMPL_F_STATUS_FLAGS': False,
588 'IEM_CIMPL_F_VMEXIT': False,
589 'IEM_CIMPL_F_FPU': False,
590 'IEM_CIMPL_F_REP': False,
591 'IEM_CIMPL_F_IO': False,
592 'IEM_CIMPL_F_END_TB': True,
593 'IEM_CIMPL_F_XCPT': True,
594 'IEM_CIMPL_F_CALLS_CIMPL': False,
595 'IEM_CIMPL_F_CALLS_AIMPL': False,
596 'IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE': False,
597 };
598
599 def __init__(self, oThreadedFunction, sVariation = ksVariation_Default):
600 self.oParent = oThreadedFunction # type: ThreadedFunction
601 ##< ksVariation_Xxxx.
602 self.sVariation = sVariation
603
604 ## Threaded function parameter references.
605 self.aoParamRefs = [] # type: List[ThreadedParamRef]
606 ## Unique parameter references.
607 self.dParamRefs = {} # type: Dict[str, List[ThreadedParamRef]]
608 ## Minimum number of parameters to the threaded function.
609 self.cMinParams = 0;
610
611 ## List/tree of statements for the threaded function.
612 self.aoStmtsForThreadedFunction = [] # type: List[McStmt]
613
614 ## Function enum number, for verification. Set by generateThreadedFunctionsHeader.
615 self.iEnumValue = -1;
616
617 ## Native recompilation details for this variation.
618 self.oNativeRecomp = None;
619
620 def getIndexName(self):
621 sName = self.oParent.oMcBlock.sFunction;
622 if sName.startswith('iemOp_'):
623 sName = sName[len('iemOp_'):];
624 return 'kIemThreadedFunc_%s%s%s' % ( sName, self.oParent.sSubName, self.sVariation, );
625
626 def getThreadedFunctionName(self):
627 sName = self.oParent.oMcBlock.sFunction;
628 if sName.startswith('iemOp_'):
629 sName = sName[len('iemOp_'):];
630 return 'iemThreadedFunc_%s%s%s' % ( sName, self.oParent.sSubName, self.sVariation, );
631
632 def getNativeFunctionName(self):
633 return 'iemNativeRecompFunc_' + self.getThreadedFunctionName()[len('iemThreadedFunc_'):];
634
635 def getLivenessFunctionName(self):
636 return 'iemNativeLivenessFunc_' + self.getThreadedFunctionName()[len('iemThreadedFunc_'):];
637
638 def getShortName(self):
639 sName = self.oParent.oMcBlock.sFunction;
640 if sName.startswith('iemOp_'):
641 sName = sName[len('iemOp_'):];
642 return '%s%s%s' % ( sName, self.oParent.sSubName, self.sVariation, );
643
644 def getThreadedFunctionStatisticsName(self):
645 sName = self.oParent.oMcBlock.sFunction;
646 if sName.startswith('iemOp_'):
647 sName = sName[len('iemOp_'):];
648
649 sVarNm = self.sVariation;
650 if sVarNm:
651 if sVarNm.startswith('_'):
652 sVarNm = sVarNm[1:];
653 if sVarNm.endswith('_Jmp'):
654 sVarNm = sVarNm[:-4];
655 sName += '_Jmp';
656 elif sVarNm.endswith('_NoJmp'):
657 sVarNm = sVarNm[:-6];
658 sName += '_NoJmp';
659 else:
660 sVarNm = 'DeferToCImpl';
661
662 return '%s/%s%s' % ( sVarNm, sName, self.oParent.sSubName );
663
664 def isWithFlagsCheckingAndClearingVariation(self):
665 """
666 Checks if this is a variation that checks and clears EFLAGS.
667 """
668 return self.sVariation in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing;
669
670 #
671 # Analysis and code morphing.
672 #
673
674 def raiseProblem(self, sMessage):
675 """ Raises a problem. """
676 self.oParent.raiseProblem(sMessage);
677
678 def warning(self, sMessage):
679 """ Emits a warning. """
680 self.oParent.warning(sMessage);
681
682 def analyzeReferenceToType(self, sRef):
683 """
684 Translates a variable or structure reference to a type.
685 Returns type name.
686 Raises exception if unable to figure it out.
687 """
688 ch0 = sRef[0];
689 if ch0 == 'u':
690 if sRef.startswith('u32'):
691 return 'uint32_t';
692 if sRef.startswith('u8') or sRef == 'uReg':
693 return 'uint8_t';
694 if sRef.startswith('u64'):
695 return 'uint64_t';
696 if sRef.startswith('u16'):
697 return 'uint16_t';
698 elif ch0 == 'b':
699 return 'uint8_t';
700 elif ch0 == 'f':
701 return 'bool';
702 elif ch0 == 'i':
703 if sRef.startswith('i8'):
704 return 'int8_t';
705 if sRef.startswith('i16'):
706 return 'int16_t';
707 if sRef.startswith('i32'):
708 return 'int32_t';
709 if sRef.startswith('i64'):
710 return 'int64_t';
711 if sRef in ('iReg', 'iFixedReg', 'iGReg', 'iSegReg', 'iSrcReg', 'iDstReg', 'iCrReg'):
712 return 'uint8_t';
713 elif ch0 == 'p':
714 if sRef.find('-') < 0:
715 return 'uintptr_t';
716 if sRef.startswith('pVCpu->iem.s.'):
717 sField = sRef[len('pVCpu->iem.s.') : ];
718 if sField in g_kdIemFieldToType:
719 if g_kdIemFieldToType[sField][0]:
720 return g_kdIemFieldToType[sField][0];
721 elif ch0 == 'G' and sRef.startswith('GCPtr'):
722 return 'uint64_t';
723 elif ch0 == 'e':
724 if sRef == 'enmEffOpSize':
725 return 'IEMMODE';
726 elif ch0 == 'o':
727 if sRef.startswith('off32'):
728 return 'uint32_t';
729 elif sRef == 'cbFrame': # enter
730 return 'uint16_t';
731 elif sRef == 'cShift': ## @todo risky
732 return 'uint8_t';
733
734 self.raiseProblem('Unknown reference: %s' % (sRef,));
735 return None; # Shut up pylint 2.16.2.
736
737 def analyzeCallToType(self, sFnRef):
738 """
739 Determins the type of an indirect function call.
740 """
741 assert sFnRef[0] == 'p';
742
743 #
744 # Simple?
745 #
746 if sFnRef.find('-') < 0:
747 oDecoderFunction = self.oParent.oMcBlock.oFunction;
748
749 # Try the argument list of the function defintion macro invocation first.
750 iArg = 2;
751 while iArg < len(oDecoderFunction.asDefArgs):
752 if sFnRef == oDecoderFunction.asDefArgs[iArg]:
753 return oDecoderFunction.asDefArgs[iArg - 1];
754 iArg += 1;
755
756 # Then check out line that includes the word and looks like a variable declaration.
757 oRe = re.compile(' +(P[A-Z0-9_]+|const +IEMOP[A-Z0-9_]+ *[*]) +(const |) *' + sFnRef + ' *(;|=)');
758 for sLine in oDecoderFunction.asLines:
759 oMatch = oRe.match(sLine);
760 if oMatch:
761 if not oMatch.group(1).startswith('const'):
762 return oMatch.group(1);
763 return 'PC' + oMatch.group(1)[len('const ') : -1].strip();
764
765 #
766 # Deal with the pImpl->pfnXxx:
767 #
768 elif sFnRef.startswith('pImpl->pfn'):
769 sMember = sFnRef[len('pImpl->') : ];
770 sBaseType = self.analyzeCallToType('pImpl');
771 offBits = sMember.rfind('U') + 1;
772 if sBaseType == 'PCIEMOPBINSIZES': return 'PFNIEMAIMPLBINU' + sMember[offBits:];
773 if sBaseType == 'PCIEMOPUNARYSIZES': return 'PFNIEMAIMPLUNARYU' + sMember[offBits:];
774 if sBaseType == 'PCIEMOPSHIFTSIZES': return 'PFNIEMAIMPLSHIFTU' + sMember[offBits:];
775 if sBaseType == 'PCIEMOPSHIFTDBLSIZES': return 'PFNIEMAIMPLSHIFTDBLU' + sMember[offBits:];
776 if sBaseType == 'PCIEMOPMULDIVSIZES': return 'PFNIEMAIMPLMULDIVU' + sMember[offBits:];
777 if sBaseType == 'PCIEMOPMEDIAF3': return 'PFNIEMAIMPLMEDIAF3U' + sMember[offBits:];
778 if sBaseType == 'PCIEMOPMEDIAOPTF2': return 'PFNIEMAIMPLMEDIAOPTF2U' + sMember[offBits:];
779 if sBaseType == 'PCIEMOPMEDIAOPTF2IMM8': return 'PFNIEMAIMPLMEDIAOPTF2U' + sMember[offBits:] + 'IMM8';
780 if sBaseType == 'PCIEMOPMEDIAOPTF3': return 'PFNIEMAIMPLMEDIAOPTF3U' + sMember[offBits:];
781 if sBaseType == 'PCIEMOPMEDIAOPTF3IMM8': return 'PFNIEMAIMPLMEDIAOPTF3U' + sMember[offBits:] + 'IMM8';
782 if sBaseType == 'PCIEMOPBLENDOP': return 'PFNIEMAIMPLAVXBLENDU' + sMember[offBits:];
783
784 self.raiseProblem('Unknown call reference: %s::%s (%s)' % (sBaseType, sMember, sFnRef,));
785
786 self.raiseProblem('Unknown call reference: %s' % (sFnRef,));
787 return None; # Shut up pylint 2.16.2.
788
789 def analyze8BitGRegStmt(self, oStmt):
790 """
791 Gets the 8-bit general purpose register access details of the given statement.
792 ASSUMES the statement is one accessing an 8-bit GREG.
793 """
794 idxReg = 0;
795 if ( oStmt.sName.find('_FETCH_') > 0
796 or oStmt.sName.find('_REF_') > 0
797 or oStmt.sName.find('_TO_LOCAL') > 0):
798 idxReg = 1;
799
800 sRegRef = oStmt.asParams[idxReg];
801 if sRegRef.startswith('IEM_GET_MODRM_RM') or sRegRef.startswith('IEM_GET_MODRM_REG'):
802 asBits = [sBit.strip() for sBit in sRegRef.replace('(', ',').replace(')', '').split(',')];
803 if len(asBits) != 3 or asBits[1] != 'pVCpu' or (asBits[0] != 'IEM_GET_MODRM_RM' and asBits[0] != 'IEM_GET_MODRM_REG'):
804 self.raiseProblem('Unexpected reference: %s (asBits=%s)' % (sRegRef, asBits));
805 sOrgExpr = asBits[0] + '_EX8(pVCpu, ' + asBits[2] + ')';
806 else:
807 sOrgExpr = '((%s) < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX) ? (%s) : (%s) + 12)' % (sRegRef, sRegRef, sRegRef);
808
809 if sRegRef.find('IEM_GET_MODRM_RM') >= 0: sStdRef = 'bRmRm8Ex';
810 elif sRegRef.find('IEM_GET_MODRM_REG') >= 0: sStdRef = 'bRmReg8Ex';
811 elif sRegRef == 'X86_GREG_xAX': sStdRef = 'bGregXAx8Ex';
812 elif sRegRef == 'X86_GREG_xCX': sStdRef = 'bGregXCx8Ex';
813 elif sRegRef == 'X86_GREG_xSP': sStdRef = 'bGregXSp8Ex';
814 elif sRegRef == 'iFixedReg': sStdRef = 'bFixedReg8Ex';
815 else:
816 self.warning('analyze8BitGRegStmt: sRegRef=%s -> bOther8Ex; %s %s; sOrgExpr=%s'
817 % (sRegRef, oStmt.sName, oStmt.asParams, sOrgExpr,));
818 sStdRef = 'bOther8Ex';
819
820 #print('analyze8BitGRegStmt: %s %s; sRegRef=%s\n -> idxReg=%s sOrgExpr=%s sStdRef=%s'
821 # % (oStmt.sName, oStmt.asParams, sRegRef, idxReg, sOrgExpr, sStdRef));
822 return (idxReg, sOrgExpr, sStdRef);
823
824
825 ## Maps memory related MCs to info for FLAT conversion.
826 ## This is used in 64-bit and flat 32-bit variants to skip the unnecessary
827 ## segmentation checking for every memory access. Only applied to access
828 ## via ES, DS and SS. FS, GS and CS gets the full segmentation threatment,
829 ## the latter (CS) is just to keep things simple (we could safely fetch via
830 ## it, but only in 64-bit mode could we safely write via it, IIRC).
831 kdMemMcToFlatInfo = {
832 'IEM_MC_FETCH_MEM_U8': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8' ),
833 'IEM_MC_FETCH_MEM16_U8': ( 1, 'IEM_MC_FETCH_MEM16_FLAT_U8' ),
834 'IEM_MC_FETCH_MEM32_U8': ( 1, 'IEM_MC_FETCH_MEM32_FLAT_U8' ),
835 'IEM_MC_FETCH_MEM_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16' ),
836 'IEM_MC_FETCH_MEM_U16_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_DISP' ),
837 'IEM_MC_FETCH_MEM_I16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I16' ),
838 'IEM_MC_FETCH_MEM_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32' ),
839 'IEM_MC_FETCH_MEM_U32_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_DISP' ),
840 'IEM_MC_FETCH_MEM_I32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I32' ),
841 'IEM_MC_FETCH_MEM_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64' ),
842 'IEM_MC_FETCH_MEM_U64_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64_DISP' ),
843 'IEM_MC_FETCH_MEM_U64_ALIGN_U128': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64_ALIGN_U128' ),
844 'IEM_MC_FETCH_MEM_I64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I64' ),
845 'IEM_MC_FETCH_MEM_R32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R32' ),
846 'IEM_MC_FETCH_MEM_R64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R64' ),
847 'IEM_MC_FETCH_MEM_R80': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R80' ),
848 'IEM_MC_FETCH_MEM_D80': ( 1, 'IEM_MC_FETCH_MEM_FLAT_D80' ),
849 'IEM_MC_FETCH_MEM_U128': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128' ),
850 'IEM_MC_FETCH_MEM_U128_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128_NO_AC' ),
851 'IEM_MC_FETCH_MEM_U128_ALIGN_SSE': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128_ALIGN_SSE' ),
852 'IEM_MC_FETCH_MEM_XMM': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM' ),
853 'IEM_MC_FETCH_MEM_XMM_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM_NO_AC' ),
854 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM_ALIGN_SSE' ),
855 'IEM_MC_FETCH_MEM_XMM_U32': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_U32' ),
856 'IEM_MC_FETCH_MEM_XMM_U64': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_U64' ),
857 'IEM_MC_FETCH_MEM_U256': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256' ),
858 'IEM_MC_FETCH_MEM_U256_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256_NO_AC' ),
859 'IEM_MC_FETCH_MEM_U256_ALIGN_AVX': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256_ALIGN_AVX' ),
860 'IEM_MC_FETCH_MEM_YMM': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM' ),
861 'IEM_MC_FETCH_MEM_YMM_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM_NO_AC' ),
862 'IEM_MC_FETCH_MEM_YMM_ALIGN_AVX': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM_ALIGN_AVX' ),
863 'IEM_MC_FETCH_MEM_U8_ZX_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U16' ),
864 'IEM_MC_FETCH_MEM_U8_ZX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U32' ),
865 'IEM_MC_FETCH_MEM_U8_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U64' ),
866 'IEM_MC_FETCH_MEM_U16_ZX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_ZX_U32' ),
867 'IEM_MC_FETCH_MEM_U16_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_ZX_U64' ),
868 'IEM_MC_FETCH_MEM_U32_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_ZX_U64' ),
869 'IEM_MC_FETCH_MEM_U8_SX_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U16' ),
870 'IEM_MC_FETCH_MEM_U8_SX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U32' ),
871 'IEM_MC_FETCH_MEM_U8_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U64' ),
872 'IEM_MC_FETCH_MEM_U16_SX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_SX_U32' ),
873 'IEM_MC_FETCH_MEM_U16_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_SX_U64' ),
874 'IEM_MC_FETCH_MEM_U32_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_SX_U64' ),
875 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128': ( 2, 'IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128' ),
876 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE_AND_XREG_XMM': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_ALIGN_SSE_AND_XREG_XMM' ),
877 'IEM_MC_FETCH_MEM_XMM_U32_AND_XREG_XMM': ( 3, 'IEM_MC_FETCH_MEM_FLAT_XMM_U32_AND_XREG_XMM' ),
878 'IEM_MC_FETCH_MEM_XMM_U64_AND_XREG_XMM': ( 3, 'IEM_MC_FETCH_MEM_FLAT_XMM_U64_AND_XREG_XMM' ),
879 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_RAX_RDX_U64':
880 ( 2, 'IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128_AND_RAX_RDX_U64' ),
881 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64':
882 ( 2, 'IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64' ),
883 'IEM_MC_STORE_MEM_U8': ( 0, 'IEM_MC_STORE_MEM_FLAT_U8' ),
884 'IEM_MC_STORE_MEM_U16': ( 0, 'IEM_MC_STORE_MEM_FLAT_U16' ),
885 'IEM_MC_STORE_MEM_U32': ( 0, 'IEM_MC_STORE_MEM_FLAT_U32' ),
886 'IEM_MC_STORE_MEM_U64': ( 0, 'IEM_MC_STORE_MEM_FLAT_U64' ),
887 'IEM_MC_STORE_MEM_U8_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U8_CONST' ),
888 'IEM_MC_STORE_MEM_U16_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U16_CONST' ),
889 'IEM_MC_STORE_MEM_U32_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U32_CONST' ),
890 'IEM_MC_STORE_MEM_U64_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U64_CONST' ),
891 'IEM_MC_STORE_MEM_U128': ( 0, 'IEM_MC_STORE_MEM_FLAT_U128' ),
892 'IEM_MC_STORE_MEM_U128_NO_AC': ( 0, 'IEM_MC_STORE_MEM_FLAT_U128_NO_AC' ),
893 'IEM_MC_STORE_MEM_U128_ALIGN_SSE': ( 0, 'IEM_MC_STORE_MEM_FLAT_U128_ALIGN_SSE' ),
894 'IEM_MC_STORE_MEM_U256': ( 0, 'IEM_MC_STORE_MEM_FLAT_U256' ),
895 'IEM_MC_STORE_MEM_U256_NO_AC': ( 0, 'IEM_MC_STORE_MEM_FLAT_U256_NO_AC' ),
896 'IEM_MC_STORE_MEM_U256_ALIGN_AVX': ( 0, 'IEM_MC_STORE_MEM_FLAT_U256_ALIGN_AVX' ),
897 'IEM_MC_MEM_MAP_D80_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_D80_WO' ),
898 'IEM_MC_MEM_MAP_I16_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_I16_WO' ),
899 'IEM_MC_MEM_MAP_I32_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_I32_WO' ),
900 'IEM_MC_MEM_MAP_I64_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_I64_WO' ),
901 'IEM_MC_MEM_MAP_R32_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_R32_WO' ),
902 'IEM_MC_MEM_MAP_R64_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_R64_WO' ),
903 'IEM_MC_MEM_MAP_R80_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_R80_WO' ),
904 'IEM_MC_MEM_MAP_U8_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_ATOMIC' ),
905 'IEM_MC_MEM_MAP_U8_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_RW' ),
906 'IEM_MC_MEM_MAP_U8_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_RO' ),
907 'IEM_MC_MEM_MAP_U8_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_WO' ),
908 'IEM_MC_MEM_MAP_U16_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_ATOMIC' ),
909 'IEM_MC_MEM_MAP_U16_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_RW' ),
910 'IEM_MC_MEM_MAP_U16_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_RO' ),
911 'IEM_MC_MEM_MAP_U16_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_WO' ),
912 'IEM_MC_MEM_MAP_U32_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_ATOMIC' ),
913 'IEM_MC_MEM_MAP_U32_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_RW' ),
914 'IEM_MC_MEM_MAP_U32_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_RO' ),
915 'IEM_MC_MEM_MAP_U32_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_WO' ),
916 'IEM_MC_MEM_MAP_U64_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_ATOMIC' ),
917 'IEM_MC_MEM_MAP_U64_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_RW' ),
918 'IEM_MC_MEM_MAP_U64_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_RO' ),
919 'IEM_MC_MEM_MAP_U64_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_WO' ),
920 'IEM_MC_MEM_MAP_U128_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_ATOMIC' ),
921 'IEM_MC_MEM_MAP_U128_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_RW' ),
922 'IEM_MC_MEM_MAP_U128_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_RO' ),
923 'IEM_MC_MEM_MAP_U128_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_WO' ),
924 'IEM_MC_MEM_MAP_EX': ( 3, 'IEM_MC_MEM_FLAT_MAP_EX' ),
925 };
926
927 kdMemMcToFlatInfoStack = {
928 'IEM_MC_PUSH_U16': ( 'IEM_MC_FLAT32_PUSH_U16', 'IEM_MC_FLAT64_PUSH_U16', ),
929 'IEM_MC_PUSH_U32': ( 'IEM_MC_FLAT32_PUSH_U32', 'IEM_MC_PUSH_U32', ),
930 'IEM_MC_PUSH_U64': ( 'IEM_MC_PUSH_U64', 'IEM_MC_FLAT64_PUSH_U64', ),
931 'IEM_MC_PUSH_U32_SREG': ( 'IEM_MC_FLAT32_PUSH_U32_SREG', 'IEM_MC_PUSH_U32_SREG' ),
932 'IEM_MC_POP_GREG_U16': ( 'IEM_MC_FLAT32_POP_GREG_U16', 'IEM_MC_FLAT64_POP_GREG_U16', ),
933 'IEM_MC_POP_GREG_U32': ( 'IEM_MC_FLAT32_POP_GREG_U32', 'IEM_MC_POP_GREG_U32', ),
934 'IEM_MC_POP_GREG_U64': ( 'IEM_MC_POP_GREG_U64', 'IEM_MC_FLAT64_POP_GREG_U64', ),
935 };
936
937 kdThreadedCalcRmEffAddrMcByVariation = {
938 ksVariation_16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
939 ksVariation_16f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
940 ksVariation_16_Pre386: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
941 ksVariation_16f_Pre386: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
942 ksVariation_32_Addr16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
943 ksVariation_32f_Addr16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
944 ksVariation_16_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
945 ksVariation_16f_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
946 ksVariation_32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
947 ksVariation_32f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
948 ksVariation_32_Flat: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
949 ksVariation_32f_Flat: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
950 ksVariation_64: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64',
951 ksVariation_64f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64',
952 ksVariation_64_FsGs: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_FSGS',
953 ksVariation_64f_FsGs: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_FSGS',
954 ksVariation_64_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_ADDR32', ## @todo How did this work again...
955 ksVariation_64f_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_ADDR32',
956 };
957
958 def analyzeMorphStmtForThreaded(self, aoStmts, dState, iParamRef = 0):
959 """
960 Transforms (copy) the statements into those for the threaded function.
961
962 Returns list/tree of statements (aoStmts is not modified) and the new
963 iParamRef value.
964 """
965 #
966 # We'll be traversing aoParamRefs in parallel to the statements, so we
967 # must match the traversal in analyzeFindThreadedParamRefs exactly.
968 #
969 #print('McBlock at %s:%s' % (os.path.split(self.oMcBlock.sSrcFile)[1], self.oMcBlock.iBeginLine,));
970 aoThreadedStmts = [];
971 for oStmt in aoStmts:
972 # Skip C++ statements that is purely related to decoding.
973 if not oStmt.isCppStmt() or not oStmt.fDecode:
974 # Copy the statement. Make a deep copy to make sure we've got our own
975 # copies of all instance variables, even if a bit overkill at the moment.
976 oNewStmt = copy.deepcopy(oStmt);
977 aoThreadedStmts.append(oNewStmt);
978 #print('oNewStmt %s %s' % (oNewStmt.sName, len(oNewStmt.asParams),));
979
980 # If the statement has parameter references, process the relevant parameters.
981 # We grab the references relevant to this statement and apply them in reserve order.
982 if iParamRef < len(self.aoParamRefs) and self.aoParamRefs[iParamRef].oStmt == oStmt:
983 iParamRefFirst = iParamRef;
984 while True:
985 iParamRef += 1;
986 if iParamRef >= len(self.aoParamRefs) or self.aoParamRefs[iParamRef].oStmt != oStmt:
987 break;
988
989 #print('iParamRefFirst=%s iParamRef=%s' % (iParamRefFirst, iParamRef));
990 for iCurRef in range(iParamRef - 1, iParamRefFirst - 1, -1):
991 oCurRef = self.aoParamRefs[iCurRef];
992 if oCurRef.iParam is not None:
993 assert oCurRef.oStmt == oStmt;
994 #print('iCurRef=%s iParam=%s sOrgRef=%s' % (iCurRef, oCurRef.iParam, oCurRef.sOrgRef));
995 sSrcParam = oNewStmt.asParams[oCurRef.iParam];
996 assert ( sSrcParam[oCurRef.offParam : oCurRef.offParam + len(oCurRef.sOrgRef)] == oCurRef.sOrgRef
997 or oCurRef.fCustomRef), \
998 'offParam=%s sOrgRef=%s iParam=%s oStmt.sName=%s sSrcParam=%s<eos>' \
999 % (oCurRef.offParam, oCurRef.sOrgRef, oCurRef.iParam, oStmt.sName, sSrcParam);
1000 oNewStmt.asParams[oCurRef.iParam] = sSrcParam[0 : oCurRef.offParam] \
1001 + oCurRef.sNewName \
1002 + sSrcParam[oCurRef.offParam + len(oCurRef.sOrgRef) : ];
1003
1004 # Morph IEM_MC_CALC_RM_EFF_ADDR into IEM_MC_CALC_RM_EFF_ADDR_THREADED ...
1005 if oNewStmt.sName == 'IEM_MC_CALC_RM_EFF_ADDR':
1006 oNewStmt.sName = self.kdThreadedCalcRmEffAddrMcByVariation[self.sVariation];
1007 assert len(oNewStmt.asParams) == 3;
1008
1009 if self.sVariation in self.kdVariationsWithFlatAddr16:
1010 oNewStmt.asParams = [
1011 oNewStmt.asParams[0], oNewStmt.asParams[1], self.dParamRefs['u16Disp'][0].sNewName,
1012 ];
1013 else:
1014 sSibAndMore = self.dParamRefs['bSib'][0].sNewName; # Merge bSib and 2nd part of cbImmAndRspOffset.
1015 if oStmt.asParams[2] not in ('0', '1', '2', '4'):
1016 sSibAndMore = '(%s) | ((%s) & 0x0f00)' % (self.dParamRefs['bSib'][0].sNewName, oStmt.asParams[2]);
1017
1018 if self.sVariation in self.kdVariationsWithFlatAddr32No64:
1019 oNewStmt.asParams = [
1020 oNewStmt.asParams[0], oNewStmt.asParams[1], sSibAndMore, self.dParamRefs['u32Disp'][0].sNewName,
1021 ];
1022 else:
1023 oNewStmt.asParams = [
1024 oNewStmt.asParams[0], self.dParamRefs['bRmEx'][0].sNewName, sSibAndMore,
1025 self.dParamRefs['u32Disp'][0].sNewName, self.dParamRefs['cbInstr'][0].sNewName,
1026 ];
1027 # ... and IEM_MC_ADVANCE_RIP_AND_FINISH into *_THREADED_PCxx[_WITH_FLAGS] ...
1028 elif ( oNewStmt.sName
1029 in ('IEM_MC_ADVANCE_RIP_AND_FINISH',
1030 'IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_REL_JMP_S16_AND_FINISH', 'IEM_MC_REL_JMP_S32_AND_FINISH',
1031 'IEM_MC_SET_RIP_U16_AND_FINISH', 'IEM_MC_SET_RIP_U32_AND_FINISH', 'IEM_MC_SET_RIP_U64_AND_FINISH', )):
1032 if oNewStmt.sName not in ('IEM_MC_SET_RIP_U16_AND_FINISH', 'IEM_MC_SET_RIP_U32_AND_FINISH',
1033 'IEM_MC_SET_RIP_U64_AND_FINISH', ):
1034 oNewStmt.asParams.append(self.dParamRefs['cbInstr'][0].sNewName);
1035 if ( oNewStmt.sName in ('IEM_MC_REL_JMP_S8_AND_FINISH', )
1036 and self.sVariation not in self.kdVariationsOnlyPre386):
1037 oNewStmt.asParams.append(self.dParamRefs['pVCpu->iem.s.enmEffOpSize'][0].sNewName);
1038 oNewStmt.sName += '_THREADED';
1039 if self.sVariation in self.kdVariationsOnly64NoFlags:
1040 oNewStmt.sName += '_PC64';
1041 elif self.sVariation in self.kdVariationsOnly64WithFlags:
1042 oNewStmt.sName += '_PC64_WITH_FLAGS';
1043 elif self.sVariation in self.kdVariationsOnlyPre386NoFlags:
1044 oNewStmt.sName += '_PC16';
1045 elif self.sVariation in self.kdVariationsOnlyPre386WithFlags:
1046 oNewStmt.sName += '_PC16_WITH_FLAGS';
1047 elif self.sVariation not in self.kdVariationsWithEflagsCheckingAndClearing:
1048 assert self.sVariation != self.ksVariation_Default;
1049 oNewStmt.sName += '_PC32';
1050 else:
1051 oNewStmt.sName += '_PC32_WITH_FLAGS';
1052
1053 # This is making the wrong branch of conditionals break out of the TB.
1054 if (oStmt.sName in ('IEM_MC_ADVANCE_RIP_AND_FINISH', 'IEM_MC_REL_JMP_S8_AND_FINISH',
1055 'IEM_MC_REL_JMP_S16_AND_FINISH', 'IEM_MC_REL_JMP_S32_AND_FINISH')):
1056 sExitTbStatus = 'VINF_SUCCESS';
1057 if self.sVariation in self.kdVariationsWithConditional:
1058 if self.sVariation in self.kdVariationsWithConditionalNoJmp:
1059 if oStmt.sName != 'IEM_MC_ADVANCE_RIP_AND_FINISH':
1060 sExitTbStatus = 'VINF_IEM_REEXEC_BREAK';
1061 elif oStmt.sName == 'IEM_MC_ADVANCE_RIP_AND_FINISH':
1062 sExitTbStatus = 'VINF_IEM_REEXEC_BREAK';
1063 oNewStmt.asParams.append(sExitTbStatus);
1064
1065 # Insert an MC so we can assert the correctioness of modified flags annotations on IEM_MC_REF_EFLAGS.
1066 if 'IEM_MC_ASSERT_EFLAGS' in dState:
1067 aoThreadedStmts.insert(len(aoThreadedStmts) - 1,
1068 iai.McStmtAssertEFlags(self.oParent.oMcBlock.oInstruction));
1069
1070 # ... and IEM_MC_*_GREG_U8 into *_THREADED w/ reworked index taking REX into account
1071 elif oNewStmt.sName.startswith('IEM_MC_') and oNewStmt.sName.find('_GREG_U8') > 0:
1072 (idxReg, _, sStdRef) = self.analyze8BitGRegStmt(oStmt); # Don't use oNewStmt as it has been modified!
1073 oNewStmt.asParams[idxReg] = self.dParamRefs[sStdRef][0].sNewName;
1074 oNewStmt.sName += '_THREADED';
1075
1076 # ... and IEM_MC_CALL_CIMPL_[0-5] and IEM_MC_DEFER_TO_CIMPL_[0-5]_RET into *_THREADED ...
1077 elif oNewStmt.sName.startswith('IEM_MC_CALL_CIMPL_') or oNewStmt.sName.startswith('IEM_MC_DEFER_TO_CIMPL_'):
1078 oNewStmt.sName += '_THREADED';
1079 oNewStmt.idxFn += 1;
1080 oNewStmt.idxParams += 1;
1081 oNewStmt.asParams.insert(0, self.dParamRefs['cbInstr'][0].sNewName);
1082
1083 # ... and in FLAT modes we must morph memory access into FLAT accesses ...
1084 elif ( self.sVariation in self.kdVariationsWithFlatAddress
1085 and ( oNewStmt.sName.startswith('IEM_MC_FETCH_MEM')
1086 or (oNewStmt.sName.startswith('IEM_MC_STORE_MEM_') and oNewStmt.sName.find('_BY_REF') < 0)
1087 or oNewStmt.sName.startswith('IEM_MC_MEM_MAP') )):
1088 idxEffSeg = self.kdMemMcToFlatInfo[oNewStmt.sName][0];
1089 if idxEffSeg != -1:
1090 if ( oNewStmt.asParams[idxEffSeg].find('iEffSeg') < 0
1091 and oNewStmt.asParams[idxEffSeg] not in ('X86_SREG_ES', ) ):
1092 self.raiseProblem('Expected iEffSeg as param #%d to %s: %s'
1093 % (idxEffSeg + 1, oNewStmt.sName, oNewStmt.asParams[idxEffSeg],));
1094 oNewStmt.asParams.pop(idxEffSeg);
1095 oNewStmt.sName = self.kdMemMcToFlatInfo[oNewStmt.sName][1];
1096
1097 # ... PUSH and POP also needs flat variants, but these differ a little.
1098 elif ( self.sVariation in self.kdVariationsWithFlatStackAddress
1099 and ( (oNewStmt.sName.startswith('IEM_MC_PUSH') and oNewStmt.sName.find('_FPU') < 0)
1100 or oNewStmt.sName.startswith('IEM_MC_POP'))):
1101 oNewStmt.sName = self.kdMemMcToFlatInfoStack[oNewStmt.sName][int(self.sVariation in
1102 self.kdVariationsWithFlat64StackAddress)];
1103
1104 # Add EFLAGS usage annotations to relevant MCs.
1105 elif oNewStmt.sName in ('IEM_MC_COMMIT_EFLAGS', 'IEM_MC_REF_EFLAGS', 'IEM_MC_FETCH_EFLAGS'):
1106 oInstruction = self.oParent.oMcBlock.oInstruction;
1107 oNewStmt.sName += '_EX';
1108 oNewStmt.asParams.append(oInstruction.getTestedFlagsCStyle()); # Shall crash and burn if oInstruction is
1109 oNewStmt.asParams.append(oInstruction.getModifiedFlagsCStyle()); # None. Fix the IEM decoder code.
1110
1111 # For IEM_MC_REF_EFLAGS we to emit an MC before the ..._FINISH
1112 if oNewStmt.sName == 'IEM_MC_REF_EFLAGS_EX':
1113 dState['IEM_MC_ASSERT_EFLAGS'] = True;
1114
1115 # Process branches of conditionals recursively.
1116 if isinstance(oStmt, iai.McStmtCond):
1117 (oNewStmt.aoIfBranch, iParamRef) = self.analyzeMorphStmtForThreaded(oStmt.aoIfBranch, dState, iParamRef);
1118 if oStmt.aoElseBranch:
1119 (oNewStmt.aoElseBranch, iParamRef) = self.analyzeMorphStmtForThreaded(oStmt.aoElseBranch,
1120 dState, iParamRef);
1121
1122 return (aoThreadedStmts, iParamRef);
1123
1124
1125 def analyzeConsolidateThreadedParamRefs(self):
1126 """
1127 Consolidate threaded function parameter references into a dictionary
1128 with lists of the references to each variable/field.
1129 """
1130 # Gather unique parameters.
1131 self.dParamRefs = {};
1132 for oRef in self.aoParamRefs:
1133 if oRef.sStdRef not in self.dParamRefs:
1134 self.dParamRefs[oRef.sStdRef] = [oRef,];
1135 else:
1136 self.dParamRefs[oRef.sStdRef].append(oRef);
1137
1138 # Generate names for them for use in the threaded function.
1139 dParamNames = {};
1140 for sName, aoRefs in self.dParamRefs.items():
1141 # Morph the reference expression into a name.
1142 if sName.startswith('IEM_GET_MODRM_REG'): sName = 'bModRmRegP';
1143 elif sName.startswith('IEM_GET_MODRM_RM'): sName = 'bModRmRmP';
1144 elif sName.startswith('IEM_GET_MODRM_REG_8'): sName = 'bModRmReg8P';
1145 elif sName.startswith('IEM_GET_MODRM_RM_8'): sName = 'bModRmRm8P';
1146 elif sName.startswith('IEM_GET_EFFECTIVE_VVVV'): sName = 'bEffVvvvP';
1147 elif sName.find('.') >= 0 or sName.find('->') >= 0:
1148 sName = sName[max(sName.rfind('.'), sName.rfind('>')) + 1 : ] + 'P';
1149 else:
1150 sName += 'P';
1151
1152 # Ensure it's unique.
1153 if sName in dParamNames:
1154 for i in range(10):
1155 if sName + str(i) not in dParamNames:
1156 sName += str(i);
1157 break;
1158 dParamNames[sName] = True;
1159
1160 # Update all the references.
1161 for oRef in aoRefs:
1162 oRef.sNewName = sName;
1163
1164 # Organize them by size too for the purpose of optimize them.
1165 dBySize = {} # type: Dict[str, str]
1166 for sStdRef, aoRefs in self.dParamRefs.items():
1167 if aoRefs[0].sType[0] != 'P':
1168 cBits = g_kdTypeInfo[aoRefs[0].sType][0];
1169 assert(cBits <= 64);
1170 else:
1171 cBits = 64;
1172
1173 if cBits not in dBySize:
1174 dBySize[cBits] = [sStdRef,]
1175 else:
1176 dBySize[cBits].append(sStdRef);
1177
1178 # Pack the parameters as best as we can, starting with the largest ones
1179 # and ASSUMING a 64-bit parameter size.
1180 self.cMinParams = 0;
1181 offNewParam = 0;
1182 for cBits in sorted(dBySize.keys(), reverse = True):
1183 for sStdRef in dBySize[cBits]:
1184 if offNewParam == 0 or offNewParam + cBits > 64:
1185 self.cMinParams += 1;
1186 offNewParam = cBits;
1187 else:
1188 offNewParam += cBits;
1189 assert(offNewParam <= 64);
1190
1191 for oRef in self.dParamRefs[sStdRef]:
1192 oRef.iNewParam = self.cMinParams - 1;
1193 oRef.offNewParam = offNewParam - cBits;
1194
1195 # Currently there are a few that requires 4 parameters, list these so we can figure out why:
1196 if self.cMinParams >= 4:
1197 print('debug: cMinParams=%s cRawParams=%s - %s:%d'
1198 % (self.cMinParams, len(self.dParamRefs), self.oParent.oMcBlock.sSrcFile, self.oParent.oMcBlock.iBeginLine,));
1199
1200 return True;
1201
1202 ksHexDigits = '0123456789abcdefABCDEF';
1203
1204 def analyzeFindThreadedParamRefs(self, aoStmts): # pylint: disable=too-many-statements
1205 """
1206 Scans the statements for things that have to passed on to the threaded
1207 function (populates self.aoParamRefs).
1208 """
1209 for oStmt in aoStmts:
1210 # Some statements we can skip alltogether.
1211 if isinstance(oStmt, iai.McCppPreProc):
1212 continue;
1213 if oStmt.isCppStmt() and oStmt.fDecode:
1214 continue;
1215 if oStmt.sName in ('IEM_MC_BEGIN',):
1216 continue;
1217
1218 if isinstance(oStmt, iai.McStmtVar):
1219 if oStmt.sValue is None:
1220 continue;
1221 aiSkipParams = { 0: True, 1: True, 3: True };
1222 else:
1223 aiSkipParams = {};
1224
1225 # Several statements have implicit parameters and some have different parameters.
1226 if oStmt.sName in ('IEM_MC_ADVANCE_RIP_AND_FINISH', 'IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_REL_JMP_S16_AND_FINISH',
1227 'IEM_MC_REL_JMP_S32_AND_FINISH', 'IEM_MC_CALL_CIMPL_0', 'IEM_MC_CALL_CIMPL_1',
1228 'IEM_MC_CALL_CIMPL_2', 'IEM_MC_CALL_CIMPL_3', 'IEM_MC_CALL_CIMPL_4', 'IEM_MC_CALL_CIMPL_5',
1229 'IEM_MC_DEFER_TO_CIMPL_0_RET', 'IEM_MC_DEFER_TO_CIMPL_1_RET', 'IEM_MC_DEFER_TO_CIMPL_2_RET',
1230 'IEM_MC_DEFER_TO_CIMPL_3_RET', 'IEM_MC_DEFER_TO_CIMPL_4_RET', 'IEM_MC_DEFER_TO_CIMPL_5_RET', ):
1231 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_INSTR_LEN(pVCpu)', 'uint4_t', oStmt, sStdRef = 'cbInstr'));
1232
1233 if ( oStmt.sName in ('IEM_MC_REL_JMP_S8_AND_FINISH',)
1234 and self.sVariation not in self.kdVariationsOnlyPre386):
1235 self.aoParamRefs.append(ThreadedParamRef('pVCpu->iem.s.enmEffOpSize', 'IEMMODE', oStmt));
1236
1237 if oStmt.sName == 'IEM_MC_CALC_RM_EFF_ADDR':
1238 # This is being pretty presumptive about bRm always being the RM byte...
1239 assert len(oStmt.asParams) == 3;
1240 assert oStmt.asParams[1] == 'bRm';
1241
1242 if self.sVariation in self.kdVariationsWithFlatAddr16:
1243 self.aoParamRefs.append(ThreadedParamRef('bRm', 'uint8_t', oStmt));
1244 self.aoParamRefs.append(ThreadedParamRef('(uint16_t)uEffAddrInfo' ,
1245 'uint16_t', oStmt, sStdRef = 'u16Disp'));
1246 elif self.sVariation in self.kdVariationsWithFlatAddr32No64:
1247 self.aoParamRefs.append(ThreadedParamRef('bRm', 'uint8_t', oStmt));
1248 self.aoParamRefs.append(ThreadedParamRef('(uint8_t)(uEffAddrInfo >> 32)',
1249 'uint8_t', oStmt, sStdRef = 'bSib'));
1250 self.aoParamRefs.append(ThreadedParamRef('(uint32_t)uEffAddrInfo',
1251 'uint32_t', oStmt, sStdRef = 'u32Disp'));
1252 else:
1253 assert self.sVariation in self.kdVariationsWithAddressOnly64;
1254 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_MODRM_EX(pVCpu, bRm)',
1255 'uint8_t', oStmt, sStdRef = 'bRmEx'));
1256 self.aoParamRefs.append(ThreadedParamRef('(uint8_t)(uEffAddrInfo >> 32)',
1257 'uint8_t', oStmt, sStdRef = 'bSib'));
1258 self.aoParamRefs.append(ThreadedParamRef('(uint32_t)uEffAddrInfo',
1259 'uint32_t', oStmt, sStdRef = 'u32Disp'));
1260 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_INSTR_LEN(pVCpu)',
1261 'uint4_t', oStmt, sStdRef = 'cbInstr'));
1262 aiSkipParams[1] = True; # Skip the bRm parameter as it is being replaced by bRmEx.
1263
1264 # 8-bit register accesses needs to have their index argument reworked to take REX into account.
1265 if oStmt.sName.startswith('IEM_MC_') and oStmt.sName.find('_GREG_U8') > 0:
1266 (idxReg, sOrgRef, sStdRef) = self.analyze8BitGRegStmt(oStmt);
1267 self.aoParamRefs.append(ThreadedParamRef(sOrgRef, 'uint8_t', oStmt, idxReg, sStdRef = sStdRef));
1268 aiSkipParams[idxReg] = True; # Skip the parameter below.
1269
1270 # If in flat mode variation, ignore the effective segment parameter to memory MCs.
1271 if ( self.sVariation in self.kdVariationsWithFlatAddress
1272 and oStmt.sName in self.kdMemMcToFlatInfo
1273 and self.kdMemMcToFlatInfo[oStmt.sName][0] != -1):
1274 aiSkipParams[self.kdMemMcToFlatInfo[oStmt.sName][0]] = True;
1275
1276 # Inspect the target of calls to see if we need to pass down a
1277 # function pointer or function table pointer for it to work.
1278 if isinstance(oStmt, iai.McStmtCall):
1279 if oStmt.sFn[0] == 'p':
1280 self.aoParamRefs.append(ThreadedParamRef(oStmt.sFn, self.analyzeCallToType(oStmt.sFn), oStmt, oStmt.idxFn));
1281 elif ( oStmt.sFn[0] != 'i'
1282 and not oStmt.sFn.startswith('IEMTARGETCPU_EFL_BEHAVIOR_SELECT')
1283 and not oStmt.sFn.startswith('IEM_SELECT_HOST_OR_FALLBACK') ):
1284 self.raiseProblem('Bogus function name in %s: %s' % (oStmt.sName, oStmt.sFn,));
1285 aiSkipParams[oStmt.idxFn] = True;
1286
1287 # Skip the hint parameter (first) for IEM_MC_CALL_CIMPL_X.
1288 if oStmt.sName.startswith('IEM_MC_CALL_CIMPL_'):
1289 assert oStmt.idxFn == 2;
1290 aiSkipParams[0] = True;
1291
1292
1293 # Check all the parameters for bogus references.
1294 for iParam, sParam in enumerate(oStmt.asParams):
1295 if iParam not in aiSkipParams and sParam not in self.oParent.dVariables:
1296 # The parameter may contain a C expression, so we have to try
1297 # extract the relevant bits, i.e. variables and fields while
1298 # ignoring operators and parentheses.
1299 offParam = 0;
1300 while offParam < len(sParam):
1301 # Is it the start of an C identifier? If so, find the end, but don't stop on field separators (->, .).
1302 ch = sParam[offParam];
1303 if ch.isalpha() or ch == '_':
1304 offStart = offParam;
1305 offParam += 1;
1306 while offParam < len(sParam):
1307 ch = sParam[offParam];
1308 if not ch.isalnum() and ch != '_' and ch != '.':
1309 if ch != '-' or sParam[offParam + 1] != '>':
1310 # Special hack for the 'CTX_SUFF(pVM)' bit in pVCpu->CTX_SUFF(pVM)->xxxx:
1311 if ( ch == '('
1312 and sParam[offStart : offParam + len('(pVM)->')] == 'pVCpu->CTX_SUFF(pVM)->'):
1313 offParam += len('(pVM)->') - 1;
1314 else:
1315 break;
1316 offParam += 1;
1317 offParam += 1;
1318 sRef = sParam[offStart : offParam];
1319
1320 # For register references, we pass the full register indexes instead as macros
1321 # like IEM_GET_MODRM_REG implicitly references pVCpu->iem.s.uRexReg and the
1322 # threaded function will be more efficient if we just pass the register index
1323 # as a 4-bit param.
1324 if ( sRef.startswith('IEM_GET_MODRM')
1325 or sRef.startswith('IEM_GET_EFFECTIVE_VVVV') ):
1326 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1327 if sParam[offParam] != '(':
1328 self.raiseProblem('Expected "(" following %s in "%s"' % (sRef, oStmt.renderCode(),));
1329 (asMacroParams, offCloseParam) = iai.McBlock.extractParams(sParam, offParam);
1330 if asMacroParams is None:
1331 self.raiseProblem('Unable to find ")" for %s in "%s"' % (sRef, oStmt.renderCode(),));
1332 offParam = offCloseParam + 1;
1333 self.aoParamRefs.append(ThreadedParamRef(sParam[offStart : offParam], 'uint8_t',
1334 oStmt, iParam, offStart));
1335
1336 # We can skip known variables.
1337 elif sRef in self.oParent.dVariables:
1338 pass;
1339
1340 # Skip certain macro invocations.
1341 elif sRef in ('IEM_GET_HOST_CPU_FEATURES',
1342 'IEM_GET_GUEST_CPU_FEATURES',
1343 'IEM_IS_GUEST_CPU_AMD',
1344 'IEM_IS_16BIT_CODE',
1345 'IEM_IS_32BIT_CODE',
1346 'IEM_IS_64BIT_CODE',
1347 ):
1348 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1349 if sParam[offParam] != '(':
1350 self.raiseProblem('Expected "(" following %s in "%s"' % (sRef, oStmt.renderCode(),));
1351 (asMacroParams, offCloseParam) = iai.McBlock.extractParams(sParam, offParam);
1352 if asMacroParams is None:
1353 self.raiseProblem('Unable to find ")" for %s in "%s"' % (sRef, oStmt.renderCode(),));
1354 offParam = offCloseParam + 1;
1355
1356 # Skip any dereference following it, unless it's a predicate like IEM_IS_GUEST_CPU_AMD.
1357 if sRef not in ('IEM_IS_GUEST_CPU_AMD',
1358 'IEM_IS_16BIT_CODE',
1359 'IEM_IS_32BIT_CODE',
1360 'IEM_IS_64BIT_CODE',
1361 ):
1362 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1363 if offParam + 2 <= len(sParam) and sParam[offParam : offParam + 2] == '->':
1364 offParam = iai.McBlock.skipSpacesAt(sParam, offParam + 2, len(sParam));
1365 while offParam < len(sParam) and (sParam[offParam].isalnum() or sParam[offParam] in '_.'):
1366 offParam += 1;
1367
1368 # Skip constants, globals, types (casts), sizeof and macros.
1369 elif ( sRef.startswith('IEM_OP_PRF_')
1370 or sRef.startswith('IEM_ACCESS_')
1371 or sRef.startswith('IEMINT_')
1372 or sRef.startswith('X86_GREG_')
1373 or sRef.startswith('X86_SREG_')
1374 or sRef.startswith('X86_EFL_')
1375 or sRef.startswith('X86_FSW_')
1376 or sRef.startswith('X86_FCW_')
1377 or sRef.startswith('X86_XCPT_')
1378 or sRef.startswith('IEMMODE_')
1379 or sRef.startswith('IEM_F_')
1380 or sRef.startswith('IEM_CIMPL_F_')
1381 or sRef.startswith('g_')
1382 or sRef.startswith('iemAImpl_')
1383 or sRef.startswith('kIemNativeGstReg_')
1384 or sRef in ( 'int8_t', 'int16_t', 'int32_t', 'int64_t',
1385 'INT8_C', 'INT16_C', 'INT32_C', 'INT64_C',
1386 'uint8_t', 'uint16_t', 'uint32_t', 'uint64_t',
1387 'UINT8_C', 'UINT16_C', 'UINT32_C', 'UINT64_C',
1388 'UINT8_MAX', 'UINT16_MAX', 'UINT32_MAX', 'UINT64_MAX',
1389 'INT8_MAX', 'INT16_MAX', 'INT32_MAX', 'INT64_MAX',
1390 'INT8_MIN', 'INT16_MIN', 'INT32_MIN', 'INT64_MIN',
1391 'sizeof', 'NOREF', 'RT_NOREF', 'IEMMODE_64BIT',
1392 'RT_BIT_32', 'RT_BIT_64', 'true', 'false',
1393 'NIL_RTGCPTR',) ):
1394 pass;
1395
1396 # Skip certain macro invocations.
1397 # Any variable (non-field) and decoder fields in IEMCPU will need to be parameterized.
1398 elif ( ( '.' not in sRef
1399 and '-' not in sRef
1400 and sRef not in ('pVCpu', ) )
1401 or iai.McBlock.koReIemDecoderVars.search(sRef) is not None):
1402 self.aoParamRefs.append(ThreadedParamRef(sRef, self.analyzeReferenceToType(sRef),
1403 oStmt, iParam, offStart));
1404 # Number.
1405 elif ch.isdigit():
1406 if ( ch == '0'
1407 and offParam + 2 <= len(sParam)
1408 and sParam[offParam + 1] in 'xX'
1409 and sParam[offParam + 2] in self.ksHexDigits ):
1410 offParam += 2;
1411 while offParam < len(sParam) and sParam[offParam] in self.ksHexDigits:
1412 offParam += 1;
1413 else:
1414 while offParam < len(sParam) and sParam[offParam].isdigit():
1415 offParam += 1;
1416 # Comment?
1417 elif ( ch == '/'
1418 and offParam + 4 <= len(sParam)
1419 and sParam[offParam + 1] == '*'):
1420 offParam += 2;
1421 offNext = sParam.find('*/', offParam);
1422 if offNext < offParam:
1423 self.raiseProblem('Unable to find "*/" in "%s" ("%s")' % (sRef, oStmt.renderCode(),));
1424 offParam = offNext + 2;
1425 # Whatever else.
1426 else:
1427 offParam += 1;
1428
1429 # Traverse the branches of conditionals.
1430 if isinstance(oStmt, iai.McStmtCond):
1431 self.analyzeFindThreadedParamRefs(oStmt.aoIfBranch);
1432 self.analyzeFindThreadedParamRefs(oStmt.aoElseBranch);
1433 return True;
1434
1435 def analyzeVariation(self, aoStmts):
1436 """
1437 2nd part of the analysis, done on each variation.
1438
1439 The variations may differ in parameter requirements and will end up with
1440 slightly different MC sequences. Thus this is done on each individually.
1441
1442 Returns dummy True - raises exception on trouble.
1443 """
1444 # Now scan the code for variables and field references that needs to
1445 # be passed to the threaded function because they are related to the
1446 # instruction decoding.
1447 self.analyzeFindThreadedParamRefs(aoStmts);
1448 self.analyzeConsolidateThreadedParamRefs();
1449
1450 # Morph the statement stream for the block into what we'll be using in the threaded function.
1451 (self.aoStmtsForThreadedFunction, iParamRef) = self.analyzeMorphStmtForThreaded(aoStmts, {});
1452 if iParamRef != len(self.aoParamRefs):
1453 raise Exception('iParamRef=%s, expected %s!' % (iParamRef, len(self.aoParamRefs),));
1454
1455 return True;
1456
1457 def emitThreadedCallStmts(self, cchIndent, sCallVarNm = None):
1458 """
1459 Produces generic C++ statments that emits a call to the thread function
1460 variation and any subsequent checks that may be necessary after that.
1461
1462 The sCallVarNm is the name of the variable with the threaded function
1463 to call. This is for the case where all the variations have the same
1464 parameters and only the threaded function number differs.
1465 """
1466 aoStmts = [
1467 iai.McCppCall('IEM_MC2_BEGIN_EMIT_CALLS',
1468 ['1' if 'IEM_CIMPL_F_CHECK_IRQ_BEFORE' in self.oParent.dsCImplFlags else '0'],
1469 cchIndent = cchIndent), # Scope and a hook for various stuff.
1470 ];
1471
1472 # The call to the threaded function.
1473 asCallArgs = [ self.getIndexName() if not sCallVarNm else sCallVarNm, ];
1474 for iParam in range(self.cMinParams):
1475 asFrags = [];
1476 for aoRefs in self.dParamRefs.values():
1477 oRef = aoRefs[0];
1478 if oRef.iNewParam == iParam:
1479 sCast = '(uint64_t)'
1480 if oRef.sType in ('int8_t', 'int16_t', 'int32_t'): # Make sure these doesn't get sign-extended.
1481 sCast = '(uint64_t)(u' + oRef.sType + ')';
1482 if oRef.offNewParam == 0:
1483 asFrags.append(sCast + '(' + oRef.sOrgRef + ')');
1484 else:
1485 asFrags.append('(%s(%s) << %s)' % (sCast, oRef.sOrgRef, oRef.offNewParam));
1486 assert asFrags;
1487 asCallArgs.append(' | '.join(asFrags));
1488
1489 aoStmts.append(iai.McCppCall('IEM_MC2_EMIT_CALL_%s' % (len(asCallArgs) - 1,), asCallArgs, cchIndent = cchIndent));
1490
1491 # 2023-11-28: This has to be done AFTER the CIMPL call, so we have to
1492 # emit this mode check from the compilation loop. On the
1493 # plus side, this means we eliminate unnecessary call at
1494 # end of the TB. :-)
1495 ## For CIMPL stuff, we need to consult the associated IEM_CIMPL_F_XXX
1496 ## mask and maybe emit additional checks.
1497 #if ( 'IEM_CIMPL_F_MODE' in self.oParent.dsCImplFlags
1498 # or 'IEM_CIMPL_F_XCPT' in self.oParent.dsCImplFlags
1499 # or 'IEM_CIMPL_F_VMEXIT' in self.oParent.dsCImplFlags):
1500 # aoStmts.append(iai.McCppCall('IEM_MC2_EMIT_CALL_1', ( 'kIemThreadedFunc_BltIn_CheckMode', 'pVCpu->iem.s.fExec', ),
1501 # cchIndent = cchIndent));
1502
1503 sCImplFlags = ' | '.join(self.oParent.dsCImplFlags.keys());
1504 if not sCImplFlags:
1505 sCImplFlags = '0'
1506 aoStmts.append(iai.McCppCall('IEM_MC2_END_EMIT_CALLS', ( sCImplFlags, ), cchIndent = cchIndent)); # For closing the scope.
1507
1508 # Emit fEndTb = true or fTbBranched = true if any of the CIMPL flags
1509 # indicates we should do so.
1510 # Note! iemThreadedRecompilerMcDeferToCImpl0 duplicates work done here.
1511 asEndTbFlags = [];
1512 asTbBranchedFlags = [];
1513 for sFlag in self.oParent.dsCImplFlags:
1514 if self.kdCImplFlags[sFlag] is True:
1515 asEndTbFlags.append(sFlag);
1516 elif sFlag.startswith('IEM_CIMPL_F_BRANCH_'):
1517 asTbBranchedFlags.append(sFlag);
1518 if ( asTbBranchedFlags
1519 and ( 'IEM_CIMPL_F_BRANCH_CONDITIONAL' not in asTbBranchedFlags
1520 or self.sVariation not in self.kdVariationsWithConditionalNoJmp)):
1521 aoStmts.append(iai.McCppGeneric('iemThreadedSetBranched(pVCpu, %s);'
1522 % ((' | '.join(asTbBranchedFlags)).replace('IEM_CIMPL_F_BRANCH', 'IEMBRANCHED_F'),),
1523 cchIndent = cchIndent)); # Inline fn saves ~2 seconds for gcc 13/dbg (1m13s vs 1m15s).
1524 if asEndTbFlags:
1525 aoStmts.append(iai.McCppGeneric('pVCpu->iem.s.fEndTb = true; /* %s */' % (','.join(asEndTbFlags),),
1526 cchIndent = cchIndent));
1527
1528 if 'IEM_CIMPL_F_CHECK_IRQ_AFTER' in self.oParent.dsCImplFlags:
1529 aoStmts.append(iai.McCppGeneric('pVCpu->iem.s.cInstrTillIrqCheck = 0;', cchIndent = cchIndent));
1530
1531 return aoStmts;
1532
1533
1534class ThreadedFunction(object):
1535 """
1536 A threaded function.
1537 """
1538
1539 def __init__(self, oMcBlock: iai.McBlock) -> None:
1540 self.oMcBlock = oMcBlock # type: iai.McBlock
1541 # The remaining fields are only useful after analyze() has been called:
1542 ## Variations for this block. There is at least one.
1543 self.aoVariations = [] # type: List[ThreadedFunctionVariation]
1544 ## Variation dictionary containing the same as aoVariations.
1545 self.dVariations = {} # type: Dict[str, ThreadedFunctionVariation]
1546 ## Dictionary of local variables (IEM_MC_LOCAL[_CONST]) and call arguments (IEM_MC_ARG*).
1547 self.dVariables = {} # type: Dict[str, iai.McStmtVar]
1548 ## Dictionary with any IEM_CIMPL_F_XXX flags explicitly advertised in the code block
1549 ## and those determined by analyzeCodeOperation().
1550 self.dsCImplFlags = {} # type: Dict[str, bool]
1551 ## The unique sub-name for this threaded function.
1552 self.sSubName = '';
1553 #if oMcBlock.iInFunction > 0 or (oMcBlock.oInstruction and len(oMcBlock.oInstruction.aoMcBlocks) > 1):
1554 # self.sSubName = '_%s' % (oMcBlock.iInFunction);
1555
1556 @staticmethod
1557 def dummyInstance():
1558 """ Gets a dummy instance. """
1559 return ThreadedFunction(iai.McBlock('null', 999999999, 999999999,
1560 iai.DecoderFunction('null', 999999999, 'nil', ('','')), 999999999));
1561
1562 def hasWithFlagsCheckingAndClearingVariation(self):
1563 """
1564 Check if there is one or more with flags checking and clearing
1565 variations for this threaded function.
1566 """
1567 for sVarWithFlags in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing:
1568 if sVarWithFlags in self.dVariations:
1569 return True;
1570 return False;
1571
1572 #
1573 # Analysis and code morphing.
1574 #
1575
1576 def raiseProblem(self, sMessage):
1577 """ Raises a problem. """
1578 raise Exception('%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, ));
1579
1580 def error(self, sMessage, oGenerator):
1581 """ Emits an error via the generator object, causing it to fail. """
1582 oGenerator.rawError('%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, ));
1583
1584 def warning(self, sMessage):
1585 """ Emits a warning. """
1586 print('%s:%s: warning: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, ));
1587
1588 ## Used by analyzeAndAnnotateName for memory MC blocks.
1589 kdAnnotateNameMemStmts = {
1590 'IEM_MC_FETCH_MEM16_U8': '__mem8',
1591 'IEM_MC_FETCH_MEM32_U8': '__mem8',
1592 'IEM_MC_FETCH_MEM_D80': '__mem80',
1593 'IEM_MC_FETCH_MEM_I16': '__mem16',
1594 'IEM_MC_FETCH_MEM_I32': '__mem32',
1595 'IEM_MC_FETCH_MEM_I64': '__mem64',
1596 'IEM_MC_FETCH_MEM_R32': '__mem32',
1597 'IEM_MC_FETCH_MEM_R64': '__mem64',
1598 'IEM_MC_FETCH_MEM_R80': '__mem80',
1599 'IEM_MC_FETCH_MEM_U128': '__mem128',
1600 'IEM_MC_FETCH_MEM_U128_ALIGN_SSE': '__mem128',
1601 'IEM_MC_FETCH_MEM_U128_NO_AC': '__mem128',
1602 'IEM_MC_FETCH_MEM_U16': '__mem16',
1603 'IEM_MC_FETCH_MEM_U16_DISP': '__mem16',
1604 'IEM_MC_FETCH_MEM_U16_SX_U32': '__mem16sx32',
1605 'IEM_MC_FETCH_MEM_U16_SX_U64': '__mem16sx64',
1606 'IEM_MC_FETCH_MEM_U16_ZX_U32': '__mem16zx32',
1607 'IEM_MC_FETCH_MEM_U16_ZX_U64': '__mem16zx64',
1608 'IEM_MC_FETCH_MEM_U256': '__mem256',
1609 'IEM_MC_FETCH_MEM_U256_ALIGN_AVX': '__mem256',
1610 'IEM_MC_FETCH_MEM_U256_NO_AC': '__mem256',
1611 'IEM_MC_FETCH_MEM_U32': '__mem32',
1612 'IEM_MC_FETCH_MEM_U32_DISP': '__mem32',
1613 'IEM_MC_FETCH_MEM_U32_SX_U64': '__mem32sx64',
1614 'IEM_MC_FETCH_MEM_U32_ZX_U64': '__mem32zx64',
1615 'IEM_MC_FETCH_MEM_U64': '__mem64',
1616 'IEM_MC_FETCH_MEM_U64_ALIGN_U128': '__mem64',
1617 'IEM_MC_FETCH_MEM_U64_DISP': '__mem64',
1618 'IEM_MC_FETCH_MEM_U8': '__mem8',
1619 'IEM_MC_FETCH_MEM_U8_DISP': '__mem8',
1620 'IEM_MC_FETCH_MEM_U8_SX_U16': '__mem8sx16',
1621 'IEM_MC_FETCH_MEM_U8_SX_U32': '__mem8sx32',
1622 'IEM_MC_FETCH_MEM_U8_SX_U64': '__mem8sx64',
1623 'IEM_MC_FETCH_MEM_U8_ZX_U16': '__mem8zx16',
1624 'IEM_MC_FETCH_MEM_U8_ZX_U32': '__mem8zx32',
1625 'IEM_MC_FETCH_MEM_U8_ZX_U64': '__mem8zx64',
1626 'IEM_MC_FETCH_MEM_XMM': '__mem128',
1627 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE': '__mem128',
1628 'IEM_MC_FETCH_MEM_XMM_NO_AC': '__mem128',
1629 'IEM_MC_FETCH_MEM_XMM_U32': '__mem32',
1630 'IEM_MC_FETCH_MEM_XMM_U64': '__mem64',
1631 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128': '__mem128',
1632 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE_AND_XREG_XMM': '__mem128',
1633 'IEM_MC_FETCH_MEM_XMM_U32_AND_XREG_XMM': '__mem32',
1634 'IEM_MC_FETCH_MEM_XMM_U64_AND_XREG_XMM': '__mem64',
1635 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_RAX_RDX_U64': '__mem128',
1636 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64': '__mem128',
1637
1638 'IEM_MC_STORE_MEM_I16_CONST_BY_REF': '__mem16',
1639 'IEM_MC_STORE_MEM_I32_CONST_BY_REF': '__mem32',
1640 'IEM_MC_STORE_MEM_I64_CONST_BY_REF': '__mem64',
1641 'IEM_MC_STORE_MEM_I8_CONST_BY_REF': '__mem8',
1642 'IEM_MC_STORE_MEM_INDEF_D80_BY_REF': '__mem80',
1643 'IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF': '__mem32',
1644 'IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF': '__mem64',
1645 'IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF': '__mem80',
1646 'IEM_MC_STORE_MEM_U128': '__mem128',
1647 'IEM_MC_STORE_MEM_U128_ALIGN_SSE': '__mem128',
1648 'IEM_MC_STORE_MEM_U128_NO_AC': '__mem128',
1649 'IEM_MC_STORE_MEM_U16': '__mem16',
1650 'IEM_MC_STORE_MEM_U16_CONST': '__mem16c',
1651 'IEM_MC_STORE_MEM_U256': '__mem256',
1652 'IEM_MC_STORE_MEM_U256_ALIGN_AVX': '__mem256',
1653 'IEM_MC_STORE_MEM_U256_NO_AC': '__mem256',
1654 'IEM_MC_STORE_MEM_U32': '__mem32',
1655 'IEM_MC_STORE_MEM_U32_CONST': '__mem32c',
1656 'IEM_MC_STORE_MEM_U64': '__mem64',
1657 'IEM_MC_STORE_MEM_U64_CONST': '__mem64c',
1658 'IEM_MC_STORE_MEM_U8': '__mem8',
1659 'IEM_MC_STORE_MEM_U8_CONST': '__mem8c',
1660
1661 'IEM_MC_MEM_MAP_D80_WO': '__mem80',
1662 'IEM_MC_MEM_MAP_I16_WO': '__mem16',
1663 'IEM_MC_MEM_MAP_I32_WO': '__mem32',
1664 'IEM_MC_MEM_MAP_I64_WO': '__mem64',
1665 'IEM_MC_MEM_MAP_R32_WO': '__mem32',
1666 'IEM_MC_MEM_MAP_R64_WO': '__mem64',
1667 'IEM_MC_MEM_MAP_R80_WO': '__mem80',
1668 'IEM_MC_MEM_MAP_U128_ATOMIC': '__mem128a',
1669 'IEM_MC_MEM_MAP_U128_RO': '__mem128',
1670 'IEM_MC_MEM_MAP_U128_RW': '__mem128',
1671 'IEM_MC_MEM_MAP_U128_WO': '__mem128',
1672 'IEM_MC_MEM_MAP_U16_ATOMIC': '__mem16a',
1673 'IEM_MC_MEM_MAP_U16_RO': '__mem16',
1674 'IEM_MC_MEM_MAP_U16_RW': '__mem16',
1675 'IEM_MC_MEM_MAP_U16_WO': '__mem16',
1676 'IEM_MC_MEM_MAP_U32_ATOMIC': '__mem32a',
1677 'IEM_MC_MEM_MAP_U32_RO': '__mem32',
1678 'IEM_MC_MEM_MAP_U32_RW': '__mem32',
1679 'IEM_MC_MEM_MAP_U32_WO': '__mem32',
1680 'IEM_MC_MEM_MAP_U64_ATOMIC': '__mem64a',
1681 'IEM_MC_MEM_MAP_U64_RO': '__mem64',
1682 'IEM_MC_MEM_MAP_U64_RW': '__mem64',
1683 'IEM_MC_MEM_MAP_U64_WO': '__mem64',
1684 'IEM_MC_MEM_MAP_U8_ATOMIC': '__mem8a',
1685 'IEM_MC_MEM_MAP_U8_RO': '__mem8',
1686 'IEM_MC_MEM_MAP_U8_RW': '__mem8',
1687 'IEM_MC_MEM_MAP_U8_WO': '__mem8',
1688 };
1689 ## Used by analyzeAndAnnotateName for non-memory MC blocks.
1690 kdAnnotateNameRegStmts = {
1691 'IEM_MC_FETCH_GREG_U8': '__greg8',
1692 'IEM_MC_FETCH_GREG_U8_ZX_U16': '__greg8zx16',
1693 'IEM_MC_FETCH_GREG_U8_ZX_U32': '__greg8zx32',
1694 'IEM_MC_FETCH_GREG_U8_ZX_U64': '__greg8zx64',
1695 'IEM_MC_FETCH_GREG_U8_SX_U16': '__greg8sx16',
1696 'IEM_MC_FETCH_GREG_U8_SX_U32': '__greg8sx32',
1697 'IEM_MC_FETCH_GREG_U8_SX_U64': '__greg8sx64',
1698 'IEM_MC_FETCH_GREG_U16': '__greg16',
1699 'IEM_MC_FETCH_GREG_U16_ZX_U32': '__greg16zx32',
1700 'IEM_MC_FETCH_GREG_U16_ZX_U64': '__greg16zx64',
1701 'IEM_MC_FETCH_GREG_U16_SX_U32': '__greg16sx32',
1702 'IEM_MC_FETCH_GREG_U16_SX_U64': '__greg16sx64',
1703 'IEM_MC_FETCH_GREG_U32': '__greg32',
1704 'IEM_MC_FETCH_GREG_U32_ZX_U64': '__greg32zx64',
1705 'IEM_MC_FETCH_GREG_U32_SX_U64': '__greg32sx64',
1706 'IEM_MC_FETCH_GREG_U64': '__greg64',
1707 'IEM_MC_FETCH_GREG_U64_ZX_U64': '__greg64zx64',
1708 'IEM_MC_FETCH_GREG_PAIR_U32': '__greg32',
1709 'IEM_MC_FETCH_GREG_PAIR_U64': '__greg64',
1710
1711 'IEM_MC_STORE_GREG_U8': '__greg8',
1712 'IEM_MC_STORE_GREG_U16': '__greg16',
1713 'IEM_MC_STORE_GREG_U32': '__greg32',
1714 'IEM_MC_STORE_GREG_U64': '__greg64',
1715 'IEM_MC_STORE_GREG_I64': '__greg64',
1716 'IEM_MC_STORE_GREG_U8_CONST': '__greg8c',
1717 'IEM_MC_STORE_GREG_U16_CONST': '__greg16c',
1718 'IEM_MC_STORE_GREG_U32_CONST': '__greg32c',
1719 'IEM_MC_STORE_GREG_U64_CONST': '__greg64c',
1720 'IEM_MC_STORE_GREG_PAIR_U32': '__greg32',
1721 'IEM_MC_STORE_GREG_PAIR_U64': '__greg64',
1722
1723 'IEM_MC_FETCH_SREG_U16': '__sreg16',
1724 'IEM_MC_FETCH_SREG_ZX_U32': '__sreg32',
1725 'IEM_MC_FETCH_SREG_ZX_U64': '__sreg64',
1726 'IEM_MC_FETCH_SREG_BASE_U64': '__sbase64',
1727 'IEM_MC_FETCH_SREG_BASE_U32': '__sbase32',
1728 'IEM_MC_STORE_SREG_BASE_U64': '__sbase64',
1729 'IEM_MC_STORE_SREG_BASE_U32': '__sbase32',
1730
1731 'IEM_MC_REF_GREG_U8': '__greg8',
1732 'IEM_MC_REF_GREG_U16': '__greg16',
1733 'IEM_MC_REF_GREG_U32': '__greg32',
1734 'IEM_MC_REF_GREG_U64': '__greg64',
1735 'IEM_MC_REF_GREG_U8_CONST': '__greg8',
1736 'IEM_MC_REF_GREG_U16_CONST': '__greg16',
1737 'IEM_MC_REF_GREG_U32_CONST': '__greg32',
1738 'IEM_MC_REF_GREG_U64_CONST': '__greg64',
1739 'IEM_MC_REF_GREG_I32': '__greg32',
1740 'IEM_MC_REF_GREG_I64': '__greg64',
1741 'IEM_MC_REF_GREG_I32_CONST': '__greg32',
1742 'IEM_MC_REF_GREG_I64_CONST': '__greg64',
1743
1744 'IEM_MC_STORE_FPUREG_R80_SRC_REF': '__fpu',
1745 'IEM_MC_REF_FPUREG': '__fpu',
1746
1747 'IEM_MC_FETCH_MREG_U64': '__mreg64',
1748 'IEM_MC_FETCH_MREG_U32': '__mreg32',
1749 'IEM_MC_STORE_MREG_U64': '__mreg64',
1750 'IEM_MC_STORE_MREG_U32_ZX_U64': '__mreg32zx64',
1751 'IEM_MC_REF_MREG_U64': '__mreg64',
1752 'IEM_MC_REF_MREG_U64_CONST': '__mreg64',
1753 'IEM_MC_REF_MREG_U32_CONST': '__mreg32',
1754
1755 'IEM_MC_CLEAR_XREG_U32_MASK': '__xreg32x4',
1756 'IEM_MC_FETCH_XREG_U128': '__xreg128',
1757 'IEM_MC_FETCH_XREG_XMM': '__xreg128',
1758 'IEM_MC_FETCH_XREG_U64': '__xreg64',
1759 'IEM_MC_FETCH_XREG_U32': '__xreg32',
1760 'IEM_MC_FETCH_XREG_U16': '__xreg16',
1761 'IEM_MC_FETCH_XREG_U8': '__xreg8',
1762 'IEM_MC_FETCH_XREG_PAIR_U128': '__xreg128p',
1763 'IEM_MC_FETCH_XREG_PAIR_XMM': '__xreg128p',
1764 'IEM_MC_FETCH_XREG_PAIR_U128_AND_RAX_RDX_U64': '__xreg128p',
1765 'IEM_MC_FETCH_XREG_PAIR_U128_AND_EAX_EDX_U32_SX_U64': '__xreg128p',
1766
1767 'IEM_MC_STORE_XREG_U32_U128': '__xreg32',
1768 'IEM_MC_STORE_XREG_U128': '__xreg128',
1769 'IEM_MC_STORE_XREG_XMM': '__xreg128',
1770 'IEM_MC_STORE_XREG_XMM_U32': '__xreg32',
1771 'IEM_MC_STORE_XREG_XMM_U64': '__xreg64',
1772 'IEM_MC_STORE_XREG_U64': '__xreg64',
1773 'IEM_MC_STORE_XREG_U64_ZX_U128': '__xreg64zx128',
1774 'IEM_MC_STORE_XREG_U32': '__xreg32',
1775 'IEM_MC_STORE_XREG_U16': '__xreg16',
1776 'IEM_MC_STORE_XREG_U8': '__xreg8',
1777 'IEM_MC_STORE_XREG_U32_ZX_U128': '__xreg32zx128',
1778 'IEM_MC_STORE_XREG_HI_U64': '__xreg64hi',
1779 'IEM_MC_STORE_XREG_R32': '__xreg32',
1780 'IEM_MC_STORE_XREG_R64': '__xreg64',
1781 'IEM_MC_BROADCAST_XREG_U8_ZX_VLMAX': '__xreg8zx',
1782 'IEM_MC_BROADCAST_XREG_U16_ZX_VLMAX': '__xreg16zx',
1783 'IEM_MC_BROADCAST_XREG_U32_ZX_VLMAX': '__xreg32zx',
1784 'IEM_MC_BROADCAST_XREG_U64_ZX_VLMAX': '__xreg64zx',
1785 'IEM_MC_BROADCAST_XREG_U128_ZX_VLMAX': '__xreg128zx',
1786 'IEM_MC_REF_XREG_U128': '__xreg128',
1787 'IEM_MC_REF_XREG_U128_CONST': '__xreg128',
1788 'IEM_MC_REF_XREG_U32_CONST': '__xreg32',
1789 'IEM_MC_REF_XREG_U64_CONST': '__xreg64',
1790 'IEM_MC_REF_XREG_R32_CONST': '__xreg32',
1791 'IEM_MC_REF_XREG_R64_CONST': '__xreg64',
1792 'IEM_MC_REF_XREG_XMM_CONST': '__xreg128',
1793 'IEM_MC_COPY_XREG_U128': '__xreg128',
1794
1795 'IEM_MC_FETCH_YREG_U256': '__yreg256',
1796 'IEM_MC_FETCH_YREG_U128': '__yreg128',
1797 'IEM_MC_FETCH_YREG_U64': '__yreg64',
1798 'IEM_MC_FETCH_YREG_2ND_U64': '__yreg64',
1799 'IEM_MC_FETCH_YREG_U32': '__yreg32',
1800 'IEM_MC_STORE_YREG_U128': '__yreg128',
1801 'IEM_MC_STORE_YREG_U32_ZX_VLMAX': '__yreg32zx',
1802 'IEM_MC_STORE_YREG_U64_ZX_VLMAX': '__yreg64zx',
1803 'IEM_MC_STORE_YREG_U128_ZX_VLMAX': '__yreg128zx',
1804 'IEM_MC_STORE_YREG_U256_ZX_VLMAX': '__yreg256zx',
1805 'IEM_MC_BROADCAST_YREG_U8_ZX_VLMAX': '__yreg8',
1806 'IEM_MC_BROADCAST_YREG_U16_ZX_VLMAX': '__yreg16',
1807 'IEM_MC_BROADCAST_YREG_U32_ZX_VLMAX': '__yreg32',
1808 'IEM_MC_BROADCAST_YREG_U64_ZX_VLMAX': '__yreg64',
1809 'IEM_MC_BROADCAST_YREG_U128_ZX_VLMAX': '__yreg128',
1810 'IEM_MC_REF_YREG_U128': '__yreg128',
1811 'IEM_MC_REF_YREG_U128_CONST': '__yreg128',
1812 'IEM_MC_REF_YREG_U64_CONST': '__yreg64',
1813 'IEM_MC_COPY_YREG_U256_ZX_VLMAX': '__yreg256zx',
1814 'IEM_MC_COPY_YREG_U128_ZX_VLMAX': '__yreg128zx',
1815 'IEM_MC_COPY_YREG_U64_ZX_VLMAX': '__yreg64zx',
1816 'IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX': '__yreg3296',
1817 'IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX': '__yreg6464',
1818 'IEM_MC_MERGE_YREG_U64HI_U64HI_ZX_VLMAX': '__yreg64hi64hi',
1819 'IEM_MC_MERGE_YREG_U64LO_U64LO_ZX_VLMAX': '__yreg64lo64lo',
1820 'IEM_MC_MERGE_YREG_U64LO_U64LOCAL_ZX_VLMAX':'__yreg64',
1821 'IEM_MC_MERGE_YREG_U64LOCAL_U64HI_ZX_VLMAX':'__yreg64',
1822 };
1823 def analyzeAndAnnotateName(self, aoStmts: List[iai.McStmt]):
1824 """
1825 Scans the statements and variation lists for clues about the threaded function,
1826 and sets self.sSubName if successfull.
1827 """
1828 dHits = {};
1829 cHits = iai.McStmt.countStmtsByName(aoStmts, self.kdAnnotateNameMemStmts, dHits);
1830 if cHits > 0:
1831 sStmtNm = sorted(dHits.keys())[-1]; # priority: STORE, MEM_MAP, FETCH.
1832 sName = self.kdAnnotateNameMemStmts[sStmtNm];
1833 else:
1834 cHits = iai.McStmt.countStmtsByName(aoStmts, self.kdAnnotateNameRegStmts, dHits);
1835 if not cHits:
1836 return;
1837 sStmtNm = sorted(dHits.keys())[-1]; # priority: STORE, MEM_MAP, FETCH.
1838 sName = self.kdAnnotateNameRegStmts[sStmtNm];
1839 self.sSubName = sName;
1840 return;
1841
1842 def analyzeFindVariablesAndCallArgs(self, aoStmts: List[iai.McStmt]) -> bool:
1843 """ Scans the statements for MC variables and call arguments. """
1844 for oStmt in aoStmts:
1845 if isinstance(oStmt, iai.McStmtVar):
1846 if oStmt.sVarName in self.dVariables:
1847 raise Exception('Variable %s is defined more than once!' % (oStmt.sVarName,));
1848 self.dVariables[oStmt.sVarName] = oStmt.sVarName;
1849
1850 # There shouldn't be any variables or arguments declared inside if/
1851 # else blocks, but scan them too to be on the safe side.
1852 if isinstance(oStmt, iai.McStmtCond):
1853 #cBefore = len(self.dVariables);
1854 self.analyzeFindVariablesAndCallArgs(oStmt.aoIfBranch);
1855 self.analyzeFindVariablesAndCallArgs(oStmt.aoElseBranch);
1856 #if len(self.dVariables) != cBefore:
1857 # raise Exception('Variables/arguments defined in conditional branches!');
1858 return True;
1859
1860 def analyzeCodeOperation(self, aoStmts: List[iai.McStmt], dEflStmts, fSeenConditional = False) -> bool:
1861 """
1862 Analyzes the code looking clues as to additional side-effects.
1863
1864 Currently this is simply looking for branching and adding the relevant
1865 branch flags to dsCImplFlags. ASSUMES the caller pre-populates the
1866 dictionary with a copy of self.oMcBlock.dsCImplFlags.
1867
1868 This also sets McStmtCond.oIfBranchAnnotation & McStmtCond.oElseBranchAnnotation.
1869
1870 Returns annotation on return style.
1871 """
1872 sAnnotation = None;
1873 for oStmt in aoStmts:
1874 # Set IEM_IMPL_C_F_BRANCH if we see any branching MCs.
1875 if oStmt.sName.startswith('IEM_MC_SET_RIP'):
1876 assert not fSeenConditional;
1877 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_INDIRECT'] = True;
1878 elif oStmt.sName.startswith('IEM_MC_REL_JMP'):
1879 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_RELATIVE'] = True;
1880 if fSeenConditional:
1881 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_CONDITIONAL'] = True;
1882
1883 # Check for CIMPL and AIMPL calls.
1884 if oStmt.sName.startswith('IEM_MC_CALL_'):
1885 if oStmt.sName.startswith('IEM_MC_CALL_CIMPL_'):
1886 self.dsCImplFlags['IEM_CIMPL_F_CALLS_CIMPL'] = True;
1887 elif ( oStmt.sName.startswith('IEM_MC_CALL_VOID_AIMPL_')
1888 or oStmt.sName.startswith('IEM_MC_CALL_AIMPL_')
1889 or oStmt.sName.startswith('IEM_MC_CALL_AVX_AIMPL_')):
1890 self.dsCImplFlags['IEM_CIMPL_F_CALLS_AIMPL'] = True;
1891 elif ( oStmt.sName.startswith('IEM_MC_CALL_SSE_AIMPL_')
1892 or oStmt.sName.startswith('IEM_MC_CALL_MMX_AIMPL_')
1893 or oStmt.sName.startswith('IEM_MC_CALL_FPU_AIMPL_')):
1894 self.dsCImplFlags['IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE'] = True;
1895 else:
1896 raise Exception('Unknown IEM_MC_CALL_* statement: %s' % (oStmt.sName,));
1897
1898 # Check for return statements.
1899 if oStmt.sName in ('IEM_MC_ADVANCE_RIP_AND_FINISH',):
1900 assert sAnnotation is None;
1901 sAnnotation = g_ksFinishAnnotation_Advance;
1902 elif oStmt.sName in ('IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_REL_JMP_S16_AND_FINISH',
1903 'IEM_MC_REL_JMP_S32_AND_FINISH',):
1904 assert sAnnotation is None;
1905 sAnnotation = g_ksFinishAnnotation_RelJmp;
1906 elif oStmt.sName in ('IEM_MC_SET_RIP_U16_AND_FINISH', 'IEM_MC_SET_RIP_U32_AND_FINISH',
1907 'IEM_MC_SET_RIP_U64_AND_FINISH',):
1908 assert sAnnotation is None;
1909 sAnnotation = g_ksFinishAnnotation_SetJmp;
1910 elif oStmt.sName.startswith('IEM_MC_DEFER_TO_CIMPL_'):
1911 assert sAnnotation is None;
1912 sAnnotation = g_ksFinishAnnotation_DeferToCImpl;
1913
1914 # Collect MCs working on EFLAGS. Caller will check this.
1915 if oStmt.sName in ('IEM_MC_FETCH_EFLAGS', 'IEM_MC_FETCH_EFLAGS_U8', 'IEM_MC_COMMIT_EFLAGS', 'IEM_MC_REF_EFLAGS',
1916 'IEM_MC_ARG_LOCAL_EFLAGS', ):
1917 dEflStmts[oStmt.sName] = oStmt;
1918 elif isinstance(oStmt, iai.McStmtCall):
1919 if oStmt.sName in ('IEM_MC_CALL_CIMPL_0', 'IEM_MC_CALL_CIMPL_1', 'IEM_MC_CALL_CIMPL_2',
1920 'IEM_MC_CALL_CIMPL_3', 'IEM_MC_CALL_CIMPL_4', 'IEM_MC_CALL_CIMPL_5',):
1921 if ( oStmt.asParams[0].find('IEM_CIMPL_F_RFLAGS') >= 0
1922 or oStmt.asParams[0].find('IEM_CIMPL_F_STATUS_FLAGS') >= 0):
1923 dEflStmts[oStmt.sName] = oStmt;
1924
1925 # Process branches of conditionals recursively.
1926 if isinstance(oStmt, iai.McStmtCond):
1927 oStmt.oIfBranchAnnotation = self.analyzeCodeOperation(oStmt.aoIfBranch, dEflStmts, True);
1928 if oStmt.aoElseBranch:
1929 oStmt.oElseBranchAnnotation = self.analyzeCodeOperation(oStmt.aoElseBranch, dEflStmts, True);
1930
1931 return sAnnotation;
1932
1933 def analyze(self, oGenerator):
1934 """
1935 Analyzes the code, identifying the number of parameters it requires and such.
1936
1937 Returns dummy True - raises exception on trouble.
1938 """
1939
1940 #
1941 # Decode the block into a list/tree of McStmt objects.
1942 #
1943 aoStmts = self.oMcBlock.decode();
1944
1945 #
1946 # Check the block for errors before we proceed (will decode it).
1947 #
1948 asErrors = self.oMcBlock.check();
1949 if asErrors:
1950 raise Exception('\n'.join(['%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sError, )
1951 for sError in asErrors]));
1952
1953 #
1954 # Scan the statements for local variables and call arguments (self.dVariables).
1955 #
1956 self.analyzeFindVariablesAndCallArgs(aoStmts);
1957
1958 #
1959 # Scan the code for IEM_CIMPL_F_ and other clues.
1960 #
1961 self.dsCImplFlags = self.oMcBlock.dsCImplFlags.copy();
1962 dEflStmts = {};
1963 self.analyzeCodeOperation(aoStmts, dEflStmts);
1964 if ( ('IEM_CIMPL_F_CALLS_CIMPL' in self.dsCImplFlags)
1965 + ('IEM_CIMPL_F_CALLS_AIMPL' in self.dsCImplFlags)
1966 + ('IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE' in self.dsCImplFlags) > 1):
1967 self.error('Mixing CIMPL/AIMPL/AIMPL_WITH_FXSTATE calls', oGenerator);
1968
1969 #
1970 # Analyse EFLAGS related MCs and @opflmodify and friends.
1971 #
1972 if dEflStmts:
1973 oInstruction = self.oMcBlock.oInstruction; # iai.Instruction
1974 if ( oInstruction is None
1975 or (oInstruction.asFlTest is None and oInstruction.asFlModify is None)):
1976 sMcNames = '+'.join(dEflStmts.keys());
1977 if len(dEflStmts) != 1 or not sMcNames.startswith('IEM_MC_CALL_CIMPL_'): # Hack for far calls
1978 self.error('Uses %s but has no @opflmodify, @opfltest or @opflclass with details!' % (sMcNames,), oGenerator);
1979 elif 'IEM_MC_COMMIT_EFLAGS' in dEflStmts:
1980 if not oInstruction.asFlModify:
1981 if oInstruction.sMnemonic not in [ 'not', ]:
1982 self.error('Uses IEM_MC_COMMIT_EFLAGS but has no flags in @opflmodify!', oGenerator);
1983 elif ( 'IEM_MC_CALL_CIMPL_0' in dEflStmts
1984 or 'IEM_MC_CALL_CIMPL_1' in dEflStmts
1985 or 'IEM_MC_CALL_CIMPL_2' in dEflStmts
1986 or 'IEM_MC_CALL_CIMPL_3' in dEflStmts
1987 or 'IEM_MC_CALL_CIMPL_4' in dEflStmts
1988 or 'IEM_MC_CALL_CIMPL_5' in dEflStmts ):
1989 if not oInstruction.asFlModify:
1990 self.error('Uses IEM_MC_CALL_CIMPL_x or IEM_MC_DEFER_TO_CIMPL_5_RET with IEM_CIMPL_F_STATUS_FLAGS '
1991 'or IEM_CIMPL_F_RFLAGS but has no flags in @opflmodify!', oGenerator);
1992 elif 'IEM_MC_REF_EFLAGS' not in dEflStmts:
1993 if not oInstruction.asFlTest:
1994 if oInstruction.sMnemonic not in [ 'not', ]:
1995 self.error('Expected @opfltest!', oGenerator);
1996 if oInstruction and oInstruction.asFlSet:
1997 for sFlag in oInstruction.asFlSet:
1998 if sFlag not in oInstruction.asFlModify:
1999 self.error('"%s" in @opflset but missing from @opflmodify (%s)!'
2000 % (sFlag, ', '.join(oInstruction.asFlModify)), oGenerator);
2001 if oInstruction and oInstruction.asFlClear:
2002 for sFlag in oInstruction.asFlClear:
2003 if sFlag not in oInstruction.asFlModify:
2004 self.error('"%s" in @opflclear but missing from @opflmodify (%s)!'
2005 % (sFlag, ', '.join(oInstruction.asFlModify)), oGenerator);
2006
2007 #
2008 # Create variations as needed.
2009 #
2010 if iai.McStmt.findStmtByNames(aoStmts,
2011 { 'IEM_MC_DEFER_TO_CIMPL_0_RET': True,
2012 'IEM_MC_DEFER_TO_CIMPL_1_RET': True,
2013 'IEM_MC_DEFER_TO_CIMPL_2_RET': True,
2014 'IEM_MC_DEFER_TO_CIMPL_3_RET': True, }):
2015 asVariations = (ThreadedFunctionVariation.ksVariation_Default,);
2016
2017 elif iai.McStmt.findStmtByNames(aoStmts, { 'IEM_MC_CALC_RM_EFF_ADDR' : True,
2018 'IEM_MC_FETCH_MEM_U8' : True, # mov_AL_Ob ++
2019 'IEM_MC_FETCH_MEM_U16' : True, # mov_rAX_Ov ++
2020 'IEM_MC_FETCH_MEM_U32' : True,
2021 'IEM_MC_FETCH_MEM_U64' : True,
2022 'IEM_MC_STORE_MEM_U8' : True, # mov_Ob_AL ++
2023 'IEM_MC_STORE_MEM_U16' : True, # mov_Ov_rAX ++
2024 'IEM_MC_STORE_MEM_U32' : True,
2025 'IEM_MC_STORE_MEM_U64' : True, }):
2026 if 'IEM_MC_F_64BIT' in self.oMcBlock.dsMcFlags:
2027 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressOnly64;
2028 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags and 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
2029 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot286Not64;
2030 elif 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
2031 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot286;
2032 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags:
2033 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot64;
2034 elif 'IEM_MC_F_ONLY_8086' in self.oMcBlock.dsMcFlags:
2035 asVariations = ThreadedFunctionVariation.kasVariationsOnlyPre386;
2036 else:
2037 asVariations = ThreadedFunctionVariation.kasVariationsWithAddress;
2038 else:
2039 if 'IEM_MC_F_64BIT' in self.oMcBlock.dsMcFlags:
2040 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressOnly64;
2041 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags and 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
2042 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot286Not64;
2043 elif 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
2044 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot286;
2045 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags:
2046 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot64;
2047 elif 'IEM_MC_F_ONLY_8086' in self.oMcBlock.dsMcFlags:
2048 asVariations = ThreadedFunctionVariation.kasVariationsOnlyPre386;
2049 else:
2050 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddress;
2051
2052 if ( 'IEM_CIMPL_F_BRANCH_CONDITIONAL' in self.dsCImplFlags
2053 and 'IEM_CIMPL_F_BRANCH_RELATIVE' in self.dsCImplFlags): # (latter to avoid iemOp_into)
2054 assert set(asVariations).issubset(ThreadedFunctionVariation.kasVariationsWithoutAddress), \
2055 '%s: vars=%s McFlags=%s' % (self.oMcBlock.oFunction.sName, asVariations, self.oMcBlock.dsMcFlags);
2056 asVariationsBase = asVariations;
2057 asVariations = [];
2058 for sVariation in asVariationsBase:
2059 asVariations.extend([sVariation + '_Jmp', sVariation + '_NoJmp']);
2060 assert set(asVariations).issubset(ThreadedFunctionVariation.kdVariationsWithConditional);
2061
2062 if not iai.McStmt.findStmtByNames(aoStmts,
2063 { 'IEM_MC_ADVANCE_RIP_AND_FINISH': True,
2064 'IEM_MC_REL_JMP_S8_AND_FINISH': True,
2065 'IEM_MC_REL_JMP_S16_AND_FINISH': True,
2066 'IEM_MC_REL_JMP_S32_AND_FINISH': True,
2067 'IEM_MC_SET_RIP_U16_AND_FINISH': True,
2068 'IEM_MC_SET_RIP_U32_AND_FINISH': True,
2069 'IEM_MC_SET_RIP_U64_AND_FINISH': True,
2070 }):
2071 asVariations = [sVariation for sVariation in asVariations
2072 if sVariation not in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing];
2073
2074 self.aoVariations = [ThreadedFunctionVariation(self, sVar) for sVar in asVariations];
2075
2076 # Dictionary variant of the list.
2077 self.dVariations = { oVar.sVariation: oVar for oVar in self.aoVariations };
2078
2079 #
2080 # Try annotate the threaded function name.
2081 #
2082 self.analyzeAndAnnotateName(aoStmts);
2083
2084 #
2085 # Continue the analysis on each variation.
2086 #
2087 for oVariation in self.aoVariations:
2088 oVariation.analyzeVariation(aoStmts);
2089
2090 return True;
2091
2092 ## Used by emitThreadedCallStmts.
2093 kdVariationsWithNeedForPrefixCheck = {
2094 ThreadedFunctionVariation.ksVariation_64_Addr32: True,
2095 ThreadedFunctionVariation.ksVariation_64f_Addr32: True,
2096 ThreadedFunctionVariation.ksVariation_64_FsGs: True,
2097 ThreadedFunctionVariation.ksVariation_64f_FsGs: True,
2098 ThreadedFunctionVariation.ksVariation_32_Addr16: True,
2099 ThreadedFunctionVariation.ksVariation_32f_Addr16: True,
2100 ThreadedFunctionVariation.ksVariation_32_Flat: True,
2101 ThreadedFunctionVariation.ksVariation_32f_Flat: True,
2102 ThreadedFunctionVariation.ksVariation_16_Addr32: True,
2103 ThreadedFunctionVariation.ksVariation_16f_Addr32: True,
2104 };
2105
2106 def emitThreadedCallStmts(self, sBranch = None): # pylint: disable=too-many-statements
2107 """
2108 Worker for morphInputCode that returns a list of statements that emits
2109 the call to the threaded functions for the block.
2110
2111 The sBranch parameter is used with conditional branches where we'll emit
2112 different threaded calls depending on whether we're in the jump-taken or
2113 no-jump code path.
2114 """
2115 # Special case for only default variation:
2116 if len(self.aoVariations) == 1 and self.aoVariations[0].sVariation == ThreadedFunctionVariation.ksVariation_Default:
2117 assert not sBranch;
2118 return self.aoVariations[0].emitThreadedCallStmts(0);
2119
2120 #
2121 # Case statement sub-class.
2122 #
2123 dByVari = self.dVariations;
2124 #fDbg = self.oMcBlock.sFunction == 'iemOpCommonPushSReg';
2125 class Case:
2126 def __init__(self, sCond, sVarNm = None):
2127 self.sCond = sCond;
2128 self.sVarNm = sVarNm;
2129 self.oVar = dByVari[sVarNm] if sVarNm else None;
2130 self.aoBody = self.oVar.emitThreadedCallStmts(8) if sVarNm else None;
2131
2132 def toCode(self):
2133 aoStmts = [ iai.McCppGeneric('case %s:' % (self.sCond), cchIndent = 4), ];
2134 if self.aoBody:
2135 aoStmts.extend(self.aoBody);
2136 aoStmts.append(iai.McCppGeneric('break;', cchIndent = 8));
2137 return aoStmts;
2138
2139 def toFunctionAssignment(self):
2140 aoStmts = [ iai.McCppGeneric('case %s:' % (self.sCond), cchIndent = 4), ];
2141 if self.aoBody:
2142 aoStmts.extend([
2143 iai.McCppGeneric('enmFunction = %s;' % (self.oVar.getIndexName(),), cchIndent = 8),
2144 iai.McCppGeneric('break;', cchIndent = 8),
2145 ]);
2146 return aoStmts;
2147
2148 def isSame(self, oThat):
2149 if not self.aoBody: # fall thru always matches.
2150 return True;
2151 if len(self.aoBody) != len(oThat.aoBody):
2152 #if fDbg: print('dbg: body len diff: %s vs %s' % (len(self.aoBody), len(oThat.aoBody),));
2153 return False;
2154 for iStmt, oStmt in enumerate(self.aoBody):
2155 oThatStmt = oThat.aoBody[iStmt] # type: iai.McStmt
2156 assert isinstance(oStmt, iai.McCppGeneric);
2157 assert not isinstance(oStmt, iai.McStmtCond);
2158 if isinstance(oStmt, iai.McStmtCond):
2159 return False;
2160 if oStmt.sName != oThatStmt.sName:
2161 #if fDbg: print('dbg: stmt #%s name: %s vs %s' % (iStmt, oStmt.sName, oThatStmt.sName,));
2162 return False;
2163 if len(oStmt.asParams) != len(oThatStmt.asParams):
2164 #if fDbg: print('dbg: stmt #%s param count: %s vs %s'
2165 # % (iStmt, len(oStmt.asParams), len(oThatStmt.asParams),));
2166 return False;
2167 for iParam, sParam in enumerate(oStmt.asParams):
2168 if ( sParam != oThatStmt.asParams[iParam]
2169 and ( iParam != 1
2170 or not isinstance(oStmt, iai.McCppCall)
2171 or not oStmt.asParams[0].startswith('IEM_MC2_EMIT_CALL_')
2172 or sParam != self.oVar.getIndexName()
2173 or oThatStmt.asParams[iParam] != oThat.oVar.getIndexName() )):
2174 #if fDbg: print('dbg: stmt #%s, param #%s: %s vs %s'
2175 # % (iStmt, iParam, sParam, oThatStmt.asParams[iParam],));
2176 return False;
2177 return True;
2178
2179 #
2180 # Determine what we're switch on.
2181 # This ASSUMES that (IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | IEM_F_MODE_CPUMODE_MASK) == 7!
2182 #
2183 fSimple = True;
2184 sSwitchValue = '(pVCpu->iem.s.fExec & (IEM_F_MODE_CPUMODE_MASK | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK))';
2185 if dByVari.keys() & self.kdVariationsWithNeedForPrefixCheck.keys():
2186 sSwitchValue += ' | (pVCpu->iem.s.enmEffAddrMode == (pVCpu->iem.s.fExec & IEM_F_MODE_CPUMODE_MASK) ? 0 : 8)';
2187 # Accesses via FS and GS and CS goes thru non-FLAT functions. (CS
2188 # is not writable in 32-bit mode (at least), thus the penalty mode
2189 # for any accesses via it (simpler this way).)
2190 sSwitchValue += ' | (pVCpu->iem.s.iEffSeg < X86_SREG_FS && pVCpu->iem.s.iEffSeg != X86_SREG_CS ? 0 : 16)';
2191 fSimple = False; # threaded functions.
2192 if dByVari.keys() & ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing:
2193 sSwitchValue += ' | ((pVCpu->iem.s.fTbPrevInstr & (IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_INHIBIT_SHADOW)) || ' \
2194 + '(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_MASK) ? 32 : 0)';
2195
2196 #
2197 # Generate the case statements.
2198 #
2199 # pylintx: disable=x
2200 aoCases = [];
2201 if ThreadedFunctionVariation.ksVariation_64_Addr32 in dByVari:
2202 assert not fSimple and not sBranch;
2203 aoCases.extend([
2204 Case('IEMMODE_64BIT', ThrdFnVar.ksVariation_64),
2205 Case('IEMMODE_64BIT | 16', ThrdFnVar.ksVariation_64_FsGs),
2206 Case('IEMMODE_64BIT | 8 | 16', None), # fall thru
2207 Case('IEMMODE_64BIT | 8', ThrdFnVar.ksVariation_64_Addr32),
2208 ]);
2209 if ThreadedFunctionVariation.ksVariation_64f_Addr32 in dByVari:
2210 aoCases.extend([
2211 Case('IEMMODE_64BIT | 32', ThrdFnVar.ksVariation_64f),
2212 Case('IEMMODE_64BIT | 32 | 16', ThrdFnVar.ksVariation_64f_FsGs),
2213 Case('IEMMODE_64BIT | 32 | 8 | 16', None), # fall thru
2214 Case('IEMMODE_64BIT | 32 | 8', ThrdFnVar.ksVariation_64f_Addr32),
2215 ]);
2216 elif ThrdFnVar.ksVariation_64 in dByVari:
2217 assert fSimple and not sBranch;
2218 aoCases.append(Case('IEMMODE_64BIT', ThrdFnVar.ksVariation_64));
2219 if ThreadedFunctionVariation.ksVariation_64f in dByVari:
2220 aoCases.append(Case('IEMMODE_64BIT | 32', ThrdFnVar.ksVariation_64f));
2221 elif ThrdFnVar.ksVariation_64_Jmp in dByVari:
2222 assert fSimple and sBranch;
2223 aoCases.append(Case('IEMMODE_64BIT',
2224 ThrdFnVar.ksVariation_64_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_64_NoJmp));
2225 if ThreadedFunctionVariation.ksVariation_64f_Jmp in dByVari:
2226 aoCases.append(Case('IEMMODE_64BIT | 32',
2227 ThrdFnVar.ksVariation_64f_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_64f_NoJmp));
2228
2229 if ThrdFnVar.ksVariation_32_Addr16 in dByVari:
2230 assert not fSimple and not sBranch;
2231 aoCases.extend([
2232 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', ThrdFnVar.ksVariation_32_Flat),
2233 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 16', None), # fall thru
2234 Case('IEMMODE_32BIT | 16', None), # fall thru
2235 Case('IEMMODE_32BIT', ThrdFnVar.ksVariation_32),
2236 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 8', None), # fall thru
2237 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 8 | 16',None), # fall thru
2238 Case('IEMMODE_32BIT | 8 | 16',None), # fall thru
2239 Case('IEMMODE_32BIT | 8', ThrdFnVar.ksVariation_32_Addr16),
2240 ]);
2241 if ThrdFnVar.ksVariation_32f_Addr16 in dByVari:
2242 aoCases.extend([
2243 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', ThrdFnVar.ksVariation_32f_Flat),
2244 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 16', None), # fall thru
2245 Case('IEMMODE_32BIT | 32 | 16', None), # fall thru
2246 Case('IEMMODE_32BIT | 32', ThrdFnVar.ksVariation_32f),
2247 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 8', None), # fall thru
2248 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 8 | 16',None), # fall thru
2249 Case('IEMMODE_32BIT | 32 | 8 | 16',None), # fall thru
2250 Case('IEMMODE_32BIT | 32 | 8', ThrdFnVar.ksVariation_32f_Addr16),
2251 ]);
2252 elif ThrdFnVar.ksVariation_32 in dByVari:
2253 assert fSimple and not sBranch;
2254 aoCases.extend([
2255 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', None), # fall thru
2256 Case('IEMMODE_32BIT', ThrdFnVar.ksVariation_32),
2257 ]);
2258 if ThrdFnVar.ksVariation_32f in dByVari:
2259 aoCases.extend([
2260 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', None), # fall thru
2261 Case('IEMMODE_32BIT | 32', ThrdFnVar.ksVariation_32f),
2262 ]);
2263 elif ThrdFnVar.ksVariation_32_Jmp in dByVari:
2264 assert fSimple and sBranch;
2265 aoCases.extend([
2266 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', None), # fall thru
2267 Case('IEMMODE_32BIT',
2268 ThrdFnVar.ksVariation_32_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_32_NoJmp),
2269 ]);
2270 if ThrdFnVar.ksVariation_32f_Jmp in dByVari:
2271 aoCases.extend([
2272 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', None), # fall thru
2273 Case('IEMMODE_32BIT | 32',
2274 ThrdFnVar.ksVariation_32f_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_32f_NoJmp),
2275 ]);
2276
2277 if ThrdFnVar.ksVariation_16_Addr32 in dByVari:
2278 assert not fSimple and not sBranch;
2279 aoCases.extend([
2280 Case('IEMMODE_16BIT | 16', None), # fall thru
2281 Case('IEMMODE_16BIT', ThrdFnVar.ksVariation_16),
2282 Case('IEMMODE_16BIT | 8 | 16', None), # fall thru
2283 Case('IEMMODE_16BIT | 8', ThrdFnVar.ksVariation_16_Addr32),
2284 ]);
2285 if ThrdFnVar.ksVariation_16f_Addr32 in dByVari:
2286 aoCases.extend([
2287 Case('IEMMODE_16BIT | 32 | 16', None), # fall thru
2288 Case('IEMMODE_16BIT | 32', ThrdFnVar.ksVariation_16f),
2289 Case('IEMMODE_16BIT | 32 | 8 | 16', None), # fall thru
2290 Case('IEMMODE_16BIT | 32 | 8', ThrdFnVar.ksVariation_16f_Addr32),
2291 ]);
2292 elif ThrdFnVar.ksVariation_16 in dByVari:
2293 assert fSimple and not sBranch;
2294 aoCases.append(Case('IEMMODE_16BIT', ThrdFnVar.ksVariation_16));
2295 if ThrdFnVar.ksVariation_16f in dByVari:
2296 aoCases.append(Case('IEMMODE_16BIT | 32', ThrdFnVar.ksVariation_16f));
2297 elif ThrdFnVar.ksVariation_16_Jmp in dByVari:
2298 assert fSimple and sBranch;
2299 aoCases.append(Case('IEMMODE_16BIT',
2300 ThrdFnVar.ksVariation_16_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_16_NoJmp));
2301 if ThrdFnVar.ksVariation_16f_Jmp in dByVari:
2302 aoCases.append(Case('IEMMODE_16BIT | 32',
2303 ThrdFnVar.ksVariation_16f_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_16f_NoJmp));
2304
2305
2306 if ThrdFnVar.ksVariation_16_Pre386 in dByVari:
2307 if not fSimple:
2308 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 16', None)); # fall thru
2309 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', ThrdFnVar.ksVariation_16_Pre386));
2310 if ThrdFnVar.ksVariation_16f_Pre386 in dByVari: # should be nested under previous if, but line too long.
2311 if not fSimple:
2312 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 16', None)); # fall thru
2313 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', ThrdFnVar.ksVariation_16f_Pre386));
2314
2315 if ThrdFnVar.ksVariation_16_Pre386_Jmp in dByVari:
2316 assert fSimple and sBranch;
2317 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK',
2318 ThrdFnVar.ksVariation_16_Pre386_Jmp if sBranch == 'Jmp'
2319 else ThrdFnVar.ksVariation_16_Pre386_NoJmp));
2320 if ThrdFnVar.ksVariation_16f_Pre386_Jmp in dByVari:
2321 assert fSimple and sBranch;
2322 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32',
2323 ThrdFnVar.ksVariation_16f_Pre386_Jmp if sBranch == 'Jmp'
2324 else ThrdFnVar.ksVariation_16f_Pre386_NoJmp));
2325
2326 #
2327 # If the case bodies are all the same, except for the function called,
2328 # we can reduce the code size and hopefully compile time.
2329 #
2330 iFirstCaseWithBody = 0;
2331 while not aoCases[iFirstCaseWithBody].aoBody:
2332 iFirstCaseWithBody += 1
2333 fAllSameCases = True
2334 for iCase in range(iFirstCaseWithBody + 1, len(aoCases)):
2335 fAllSameCases = fAllSameCases and aoCases[iCase].isSame(aoCases[iFirstCaseWithBody]);
2336 #if fDbg: print('fAllSameCases=%s %s' % (fAllSameCases, self.oMcBlock.sFunction,));
2337 if fAllSameCases:
2338 aoStmts = [
2339 iai.McCppGeneric('IEMTHREADEDFUNCS enmFunction;'),
2340 iai.McCppGeneric('switch (%s)' % (sSwitchValue,)),
2341 iai.McCppGeneric('{'),
2342 ];
2343 for oCase in aoCases:
2344 aoStmts.extend(oCase.toFunctionAssignment());
2345 aoStmts.extend([
2346 iai.McCppGeneric('IEM_NOT_REACHED_DEFAULT_CASE_RET();', cchIndent = 4),
2347 iai.McCppGeneric('}'),
2348 ]);
2349 aoStmts.extend(dByVari[aoCases[iFirstCaseWithBody].sVarNm].emitThreadedCallStmts(0, 'enmFunction'));
2350
2351 else:
2352 #
2353 # Generate the generic switch statement.
2354 #
2355 aoStmts = [
2356 iai.McCppGeneric('switch (%s)' % (sSwitchValue,)),
2357 iai.McCppGeneric('{'),
2358 ];
2359 for oCase in aoCases:
2360 aoStmts.extend(oCase.toCode());
2361 aoStmts.extend([
2362 iai.McCppGeneric('IEM_NOT_REACHED_DEFAULT_CASE_RET();', cchIndent = 4),
2363 iai.McCppGeneric('}'),
2364 ]);
2365
2366 return aoStmts;
2367
2368 def morphInputCode(self, aoStmts, fIsConditional = False, fCallEmitted = False, cDepth = 0, sBranchAnnotation = None):
2369 """
2370 Adjusts (& copies) the statements for the input/decoder so it will emit
2371 calls to the right threaded functions for each block.
2372
2373 Returns list/tree of statements (aoStmts is not modified) and updated
2374 fCallEmitted status.
2375 """
2376 #print('McBlock at %s:%s' % (os.path.split(self.oMcBlock.sSrcFile)[1], self.oMcBlock.iBeginLine,));
2377 aoDecoderStmts = [];
2378
2379 for iStmt, oStmt in enumerate(aoStmts):
2380 # Copy the statement. Make a deep copy to make sure we've got our own
2381 # copies of all instance variables, even if a bit overkill at the moment.
2382 oNewStmt = copy.deepcopy(oStmt);
2383 aoDecoderStmts.append(oNewStmt);
2384 #print('oNewStmt %s %s' % (oNewStmt.sName, len(oNewStmt.asParams),));
2385 if oNewStmt.sName == 'IEM_MC_BEGIN' and self.dsCImplFlags:
2386 oNewStmt.asParams[3] = ' | '.join(sorted(self.dsCImplFlags.keys()));
2387
2388 # If we haven't emitted the threaded function call yet, look for
2389 # statements which it would naturally follow or preceed.
2390 if not fCallEmitted:
2391 if not oStmt.isCppStmt():
2392 if ( oStmt.sName.startswith('IEM_MC_MAYBE_RAISE_') \
2393 or (oStmt.sName.endswith('_AND_FINISH') and oStmt.sName.startswith('IEM_MC_'))
2394 or oStmt.sName.startswith('IEM_MC_CALL_CIMPL_')
2395 or oStmt.sName.startswith('IEM_MC_DEFER_TO_CIMPL_')
2396 or oStmt.sName in ('IEM_MC_RAISE_DIVIDE_ERROR',)):
2397 aoDecoderStmts.pop();
2398 if not fIsConditional:
2399 aoDecoderStmts.extend(self.emitThreadedCallStmts());
2400 elif oStmt.sName == 'IEM_MC_ADVANCE_RIP_AND_FINISH':
2401 aoDecoderStmts.extend(self.emitThreadedCallStmts('NoJmp'));
2402 else:
2403 assert oStmt.sName in { 'IEM_MC_REL_JMP_S8_AND_FINISH': True,
2404 'IEM_MC_REL_JMP_S16_AND_FINISH': True,
2405 'IEM_MC_REL_JMP_S32_AND_FINISH': True, };
2406 aoDecoderStmts.extend(self.emitThreadedCallStmts('Jmp'));
2407 aoDecoderStmts.append(oNewStmt);
2408 fCallEmitted = True;
2409
2410 elif iai.g_dMcStmtParsers[oStmt.sName][2]:
2411 # This is for Jmp/NoJmp with loopne and friends which modifies state other than RIP.
2412 if not sBranchAnnotation:
2413 self.raiseProblem('Modifying state before emitting calls! %s' % (oStmt.sName,));
2414 assert fIsConditional;
2415 aoDecoderStmts.pop();
2416 if sBranchAnnotation == g_ksFinishAnnotation_Advance:
2417 assert iai.McStmt.findStmtByNames(aoStmts[iStmt:], {'IEM_MC_ADVANCE_RIP_AND_FINISH':1,})
2418 aoDecoderStmts.extend(self.emitThreadedCallStmts('NoJmp'));
2419 elif sBranchAnnotation == g_ksFinishAnnotation_RelJmp:
2420 assert iai.McStmt.findStmtByNames(aoStmts[iStmt:],
2421 { 'IEM_MC_REL_JMP_S8_AND_FINISH': 1,
2422 'IEM_MC_REL_JMP_S16_AND_FINISH': 1,
2423 'IEM_MC_REL_JMP_S32_AND_FINISH': 1, });
2424 aoDecoderStmts.extend(self.emitThreadedCallStmts('Jmp'));
2425 else:
2426 self.raiseProblem('Modifying state before emitting calls! %s' % (oStmt.sName,));
2427 aoDecoderStmts.append(oNewStmt);
2428 fCallEmitted = True;
2429
2430 elif ( not fIsConditional
2431 and oStmt.fDecode
2432 and ( oStmt.asParams[0].find('IEMOP_HLP_DONE_') >= 0
2433 or oStmt.asParams[0].find('IEMOP_HLP_DECODED_') >= 0)):
2434 aoDecoderStmts.extend(self.emitThreadedCallStmts());
2435 fCallEmitted = True;
2436
2437 # Process branches of conditionals recursively.
2438 if isinstance(oStmt, iai.McStmtCond):
2439 (oNewStmt.aoIfBranch, fCallEmitted1) = self.morphInputCode(oStmt.aoIfBranch, fIsConditional,
2440 fCallEmitted, cDepth + 1, oStmt.oIfBranchAnnotation);
2441 if oStmt.aoElseBranch:
2442 (oNewStmt.aoElseBranch, fCallEmitted2) = self.morphInputCode(oStmt.aoElseBranch, fIsConditional,
2443 fCallEmitted, cDepth + 1,
2444 oStmt.oElseBranchAnnotation);
2445 else:
2446 fCallEmitted2 = False;
2447 fCallEmitted = fCallEmitted or (fCallEmitted1 and fCallEmitted2);
2448
2449 if not fCallEmitted and cDepth == 0:
2450 self.raiseProblem('Unable to insert call to threaded function.');
2451
2452 return (aoDecoderStmts, fCallEmitted);
2453
2454
2455 def generateInputCode(self):
2456 """
2457 Modifies the input code.
2458 """
2459 cchIndent = (self.oMcBlock.cchIndent + 3) // 4 * 4;
2460
2461 if len(self.oMcBlock.aoStmts) == 1:
2462 # IEM_MC_DEFER_TO_CIMPL_X_RET - need to wrap in {} to make it safe to insert into random code.
2463 sCode = ' ' * cchIndent + 'pVCpu->iem.s.fTbCurInstr = ';
2464 if self.dsCImplFlags:
2465 sCode += ' | '.join(sorted(self.dsCImplFlags.keys())) + ';\n';
2466 else:
2467 sCode += '0;\n';
2468 sCode += iai.McStmt.renderCodeForList(self.morphInputCode(self.oMcBlock.aoStmts)[0],
2469 cchIndent = cchIndent).replace('\n', ' /* gen */\n', 1);
2470 sIndent = ' ' * (min(cchIndent, 2) - 2);
2471 sCode = sIndent + '{\n' + sCode + sIndent + '}\n';
2472 return sCode;
2473
2474 # IEM_MC_BEGIN/END block
2475 assert len(self.oMcBlock.asLines) > 2, "asLines=%s" % (self.oMcBlock.asLines,);
2476 fIsConditional = ( 'IEM_CIMPL_F_BRANCH_CONDITIONAL' in self.dsCImplFlags
2477 and 'IEM_CIMPL_F_BRANCH_RELATIVE' in self.dsCImplFlags); # (latter to avoid iemOp_into)
2478 return iai.McStmt.renderCodeForList(self.morphInputCode(self.oMcBlock.aoStmts, fIsConditional)[0],
2479 cchIndent = cchIndent).replace('\n', ' /* gen */\n', 1);
2480
2481# Short alias for ThreadedFunctionVariation.
2482ThrdFnVar = ThreadedFunctionVariation;
2483
2484
2485class IEMThreadedGenerator(object):
2486 """
2487 The threaded code generator & annotator.
2488 """
2489
2490 def __init__(self):
2491 self.aoThreadedFuncs = [] # type: List[ThreadedFunction]
2492 self.oOptions = None # type: argparse.Namespace
2493 self.aoParsers = [] # type: List[IEMAllInstPython.SimpleParser]
2494 self.aidxFirstFunctions = [] # type: List[int] ##< Runs parallel to aoParser giving the index of the first function.
2495 self.cErrors = 0;
2496
2497 #
2498 # Error reporting.
2499 #
2500
2501 def rawError(self, sCompleteMessage):
2502 """ Output a raw error and increment the error counter. """
2503 print(sCompleteMessage, file = sys.stderr);
2504 self.cErrors += 1;
2505 return False;
2506
2507 #
2508 # Processing.
2509 #
2510
2511 def processInputFiles(self, sHostArch, fNativeRecompilerEnabled):
2512 """
2513 Process the input files.
2514 """
2515
2516 # Parse the files.
2517 self.aoParsers = iai.parseFiles(self.oOptions.asInFiles, sHostArch);
2518
2519 # Create threaded functions for the MC blocks.
2520 self.aoThreadedFuncs = [ThreadedFunction(oMcBlock) for oMcBlock in iai.g_aoMcBlocks];
2521
2522 # Analyze the threaded functions.
2523 dRawParamCounts = {};
2524 dMinParamCounts = {};
2525 for oThreadedFunction in self.aoThreadedFuncs:
2526 oThreadedFunction.analyze(self);
2527 for oVariation in oThreadedFunction.aoVariations:
2528 dRawParamCounts[len(oVariation.dParamRefs)] = dRawParamCounts.get(len(oVariation.dParamRefs), 0) + 1;
2529 dMinParamCounts[oVariation.cMinParams] = dMinParamCounts.get(oVariation.cMinParams, 0) + 1;
2530 print('debug: param count distribution, raw and optimized:', file = sys.stderr);
2531 for cCount in sorted({cBits: True for cBits in list(dRawParamCounts.keys()) + list(dMinParamCounts.keys())}.keys()):
2532 print('debug: %s params: %4s raw, %4s min'
2533 % (cCount, dRawParamCounts.get(cCount, 0), dMinParamCounts.get(cCount, 0)),
2534 file = sys.stderr);
2535
2536 # Do another pass over the threaded functions to settle the name suffix.
2537 iThreadedFn = 0;
2538 while iThreadedFn < len(self.aoThreadedFuncs):
2539 oFunction = self.aoThreadedFuncs[iThreadedFn].oMcBlock.oFunction;
2540 assert oFunction;
2541 iThreadedFnNext = iThreadedFn + 1;
2542 dSubNames = { self.aoThreadedFuncs[iThreadedFn].sSubName: 1 };
2543 while ( iThreadedFnNext < len(self.aoThreadedFuncs)
2544 and self.aoThreadedFuncs[iThreadedFnNext].oMcBlock.oFunction == oFunction):
2545 dSubNames[self.aoThreadedFuncs[iThreadedFnNext].sSubName] = 1;
2546 iThreadedFnNext += 1;
2547 if iThreadedFnNext - iThreadedFn > len(dSubNames):
2548 iSubName = 0;
2549 while iThreadedFn + iSubName < iThreadedFnNext:
2550 self.aoThreadedFuncs[iThreadedFn + iSubName].sSubName += '_%s' % (iSubName,);
2551 iSubName += 1;
2552 iThreadedFn = iThreadedFnNext;
2553
2554 # Populate aidxFirstFunctions. This is ASSUMING that
2555 # g_aoMcBlocks/self.aoThreadedFuncs are in self.aoParsers order.
2556 iThreadedFunction = 0;
2557 oThreadedFunction = self.getThreadedFunctionByIndex(0);
2558 self.aidxFirstFunctions = [];
2559 for oParser in self.aoParsers:
2560 self.aidxFirstFunctions.append(iThreadedFunction);
2561
2562 while oThreadedFunction.oMcBlock.sSrcFile == oParser.sSrcFile:
2563 iThreadedFunction += 1;
2564 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
2565
2566 # Analyze the threaded functions and their variations for native recompilation.
2567 if fNativeRecompilerEnabled:
2568 ian.displayStatistics(self.aoThreadedFuncs, sHostArch);
2569
2570 # Gather arguments + variable statistics for the MC blocks.
2571 cMaxArgs = 0;
2572 cMaxVars = 0;
2573 cMaxVarsAndArgs = 0;
2574 cbMaxArgs = 0;
2575 cbMaxVars = 0;
2576 cbMaxVarsAndArgs = 0;
2577 for oThreadedFunction in self.aoThreadedFuncs:
2578 if oThreadedFunction.oMcBlock.cLocals >= 0:
2579 # Counts.
2580 assert oThreadedFunction.oMcBlock.cArgs >= 0;
2581 cMaxVars = max(cMaxVars, oThreadedFunction.oMcBlock.cLocals);
2582 cMaxArgs = max(cMaxArgs, oThreadedFunction.oMcBlock.cArgs);
2583 cMaxVarsAndArgs = max(cMaxVarsAndArgs, oThreadedFunction.oMcBlock.cLocals + oThreadedFunction.oMcBlock.cArgs);
2584 if cMaxVarsAndArgs > 9:
2585 raise Exception('%s potentially uses too many variables / args: %u, max 10 - %u vars and %u args'
2586 % (oThreadedFunction.oMcBlock.oFunction.sName, cMaxVarsAndArgs,
2587 oThreadedFunction.oMcBlock.cLocals, oThreadedFunction.oMcBlock.cArgs));
2588 # Calc stack allocation size:
2589 cbArgs = 0;
2590 for oArg in oThreadedFunction.oMcBlock.aoArgs:
2591 cbArgs += (getTypeBitCount(oArg.sType) + 63) // 64 * 8;
2592 cbVars = 0;
2593 for oVar in oThreadedFunction.oMcBlock.aoLocals:
2594 cbVars += (getTypeBitCount(oVar.sType) + 63) // 64 * 8;
2595 cbMaxVars = max(cbMaxVars, cbVars);
2596 cbMaxArgs = max(cbMaxArgs, cbArgs);
2597 cbMaxVarsAndArgs = max(cbMaxVarsAndArgs, cbVars + cbArgs);
2598 if cbMaxVarsAndArgs >= 0xc0:
2599 raise Exception('%s potentially uses too much stack: cbMaxVars=%#x cbMaxArgs=%#x'
2600 % (oThreadedFunction.oMcBlock.oFunction.sName, cbMaxVars, cbMaxArgs,));
2601
2602 print('debug: max vars+args: %u bytes / %u; max vars: %u bytes / %u; max args: %u bytes / %u'
2603 % (cbMaxVarsAndArgs, cMaxVarsAndArgs, cbMaxVars, cMaxVars, cbMaxArgs, cMaxArgs,), file = sys.stderr);
2604
2605 if self.cErrors > 0:
2606 print('fatal error: %u error%s during processing. Details above.'
2607 % (self.cErrors, 's' if self.cErrors > 1 else '',), file = sys.stderr);
2608 return False;
2609 return True;
2610
2611 #
2612 # Output
2613 #
2614
2615 def generateLicenseHeader(self):
2616 """
2617 Returns the lines for a license header.
2618 """
2619 return [
2620 '/*',
2621 ' * Autogenerated by $Id: IEMAllThrdPython.py 103560 2024-02-24 12:59:06Z vboxsync $ ',
2622 ' * Do not edit!',
2623 ' */',
2624 '',
2625 '/*',
2626 ' * Copyright (C) 2023-' + str(datetime.date.today().year) + ' Oracle and/or its affiliates.',
2627 ' *',
2628 ' * This file is part of VirtualBox base platform packages, as',
2629 ' * available from https://www.virtualbox.org.',
2630 ' *',
2631 ' * This program is free software; you can redistribute it and/or',
2632 ' * modify it under the terms of the GNU General Public License',
2633 ' * as published by the Free Software Foundation, in version 3 of the',
2634 ' * License.',
2635 ' *',
2636 ' * This program is distributed in the hope that it will be useful, but',
2637 ' * WITHOUT ANY WARRANTY; without even the implied warranty of',
2638 ' * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU',
2639 ' * General Public License for more details.',
2640 ' *',
2641 ' * You should have received a copy of the GNU General Public License',
2642 ' * along with this program; if not, see <https://www.gnu.org/licenses>.',
2643 ' *',
2644 ' * The contents of this file may alternatively be used under the terms',
2645 ' * of the Common Development and Distribution License Version 1.0',
2646 ' * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included',
2647 ' * in the VirtualBox distribution, in which case the provisions of the',
2648 ' * CDDL are applicable instead of those of the GPL.',
2649 ' *',
2650 ' * You may elect to license modified versions of this file under the',
2651 ' * terms and conditions of either the GPL or the CDDL or both.',
2652 ' *',
2653 ' * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0',
2654 ' */',
2655 '',
2656 '',
2657 '',
2658 ];
2659
2660 ## List of built-in threaded functions with user argument counts and
2661 ## whether it has a native recompiler implementation.
2662 katBltIns = (
2663 ( 'Nop', 0, True ),
2664 ( 'LogCpuState', 0, True ),
2665
2666 ( 'DeferToCImpl0', 2, True ),
2667 ( 'CheckIrq', 0, True ),
2668 ( 'CheckMode', 1, True ),
2669 ( 'CheckHwInstrBps', 0, False ),
2670 ( 'CheckCsLim', 1, True ),
2671
2672 ( 'CheckCsLimAndOpcodes', 3, True ),
2673 ( 'CheckOpcodes', 3, True ),
2674 ( 'CheckOpcodesConsiderCsLim', 3, True ),
2675
2676 ( 'CheckCsLimAndPcAndOpcodes', 3, True ),
2677 ( 'CheckPcAndOpcodes', 3, True ),
2678 ( 'CheckPcAndOpcodesConsiderCsLim', 3, True ),
2679
2680 ( 'CheckCsLimAndOpcodesAcrossPageLoadingTlb', 3, True ),
2681 ( 'CheckOpcodesAcrossPageLoadingTlb', 3, True ),
2682 ( 'CheckOpcodesAcrossPageLoadingTlbConsiderCsLim', 2, True ),
2683
2684 ( 'CheckCsLimAndOpcodesLoadingTlb', 3, True ),
2685 ( 'CheckOpcodesLoadingTlb', 3, True ),
2686 ( 'CheckOpcodesLoadingTlbConsiderCsLim', 3, True ),
2687
2688 ( 'CheckCsLimAndOpcodesOnNextPageLoadingTlb', 2, True ),
2689 ( 'CheckOpcodesOnNextPageLoadingTlb', 2, True ),
2690 ( 'CheckOpcodesOnNextPageLoadingTlbConsiderCsLim', 2, True ),
2691
2692 ( 'CheckCsLimAndOpcodesOnNewPageLoadingTlb', 2, True ),
2693 ( 'CheckOpcodesOnNewPageLoadingTlb', 2, True ),
2694 ( 'CheckOpcodesOnNewPageLoadingTlbConsiderCsLim', 2, True ),
2695 );
2696
2697 def generateThreadedFunctionsHeader(self, oOut):
2698 """
2699 Generates the threaded functions header file.
2700 Returns success indicator.
2701 """
2702
2703 asLines = self.generateLicenseHeader();
2704
2705 # Generate the threaded function table indexes.
2706 asLines += [
2707 'typedef enum IEMTHREADEDFUNCS',
2708 '{',
2709 ' kIemThreadedFunc_Invalid = 0,',
2710 '',
2711 ' /*',
2712 ' * Predefined',
2713 ' */',
2714 ];
2715 asLines += [' kIemThreadedFunc_BltIn_%s,' % (sFuncNm,) for sFuncNm, _, _ in self.katBltIns];
2716
2717 iThreadedFunction = 1 + len(self.katBltIns);
2718 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2719 asLines += [
2720 '',
2721 ' /*',
2722 ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation] + '',
2723 ' */',
2724 ];
2725 for oThreadedFunction in self.aoThreadedFuncs:
2726 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2727 if oVariation:
2728 iThreadedFunction += 1;
2729 oVariation.iEnumValue = iThreadedFunction;
2730 asLines.append(' ' + oVariation.getIndexName() + ',');
2731 asLines += [
2732 ' kIemThreadedFunc_End',
2733 '} IEMTHREADEDFUNCS;',
2734 '',
2735 ];
2736
2737 # Prototype the function table.
2738 asLines += [
2739 'extern const PFNIEMTHREADEDFUNC g_apfnIemThreadedFunctions[kIemThreadedFunc_End];',
2740 'extern uint8_t const g_acIemThreadedFunctionUsedArgs[kIemThreadedFunc_End];',
2741 '#if defined(IN_RING3) || defined(LOG_ENABLED)',
2742 'extern const char * const g_apszIemThreadedFunctions[kIemThreadedFunc_End];',
2743 '#endif',
2744 '#if defined(IN_RING3)',
2745 'extern const char * const g_apszIemThreadedFunctionStats[kIemThreadedFunc_End];',
2746 '#endif',
2747 ];
2748
2749 oOut.write('\n'.join(asLines));
2750 return True;
2751
2752 ksBitsToIntMask = {
2753 1: "UINT64_C(0x1)",
2754 2: "UINT64_C(0x3)",
2755 4: "UINT64_C(0xf)",
2756 8: "UINT64_C(0xff)",
2757 16: "UINT64_C(0xffff)",
2758 32: "UINT64_C(0xffffffff)",
2759 };
2760
2761 def generateFunctionParameterUnpacking(self, oVariation, oOut, asParams):
2762 """
2763 Outputs code for unpacking parameters.
2764 This is shared by the threaded and native code generators.
2765 """
2766 aasVars = [];
2767 for aoRefs in oVariation.dParamRefs.values():
2768 oRef = aoRefs[0];
2769 if oRef.sType[0] != 'P':
2770 cBits = g_kdTypeInfo[oRef.sType][0];
2771 sType = g_kdTypeInfo[oRef.sType][2];
2772 else:
2773 cBits = 64;
2774 sType = oRef.sType;
2775
2776 sTypeDecl = sType + ' const';
2777
2778 if cBits == 64:
2779 assert oRef.offNewParam == 0;
2780 if sType == 'uint64_t':
2781 sUnpack = '%s;' % (asParams[oRef.iNewParam],);
2782 else:
2783 sUnpack = '(%s)%s;' % (sType, asParams[oRef.iNewParam],);
2784 elif oRef.offNewParam == 0:
2785 sUnpack = '(%s)(%s & %s);' % (sType, asParams[oRef.iNewParam], self.ksBitsToIntMask[cBits]);
2786 else:
2787 sUnpack = '(%s)((%s >> %s) & %s);' \
2788 % (sType, asParams[oRef.iNewParam], oRef.offNewParam, self.ksBitsToIntMask[cBits]);
2789
2790 sComment = '/* %s - %s ref%s */' % (oRef.sOrgRef, len(aoRefs), 's' if len(aoRefs) != 1 else '',);
2791
2792 aasVars.append([ '%s:%02u' % (oRef.iNewParam, oRef.offNewParam),
2793 sTypeDecl, oRef.sNewName, sUnpack, sComment ]);
2794 acchVars = [0, 0, 0, 0, 0];
2795 for asVar in aasVars:
2796 for iCol, sStr in enumerate(asVar):
2797 acchVars[iCol] = max(acchVars[iCol], len(sStr));
2798 sFmt = ' %%-%ss %%-%ss = %%-%ss %%s\n' % (acchVars[1], acchVars[2], acchVars[3]);
2799 for asVar in sorted(aasVars):
2800 oOut.write(sFmt % (asVar[1], asVar[2], asVar[3], asVar[4],));
2801 return True;
2802
2803 kasThreadedParamNames = ('uParam0', 'uParam1', 'uParam2');
2804 def generateThreadedFunctionsSource(self, oOut):
2805 """
2806 Generates the threaded functions source file.
2807 Returns success indicator.
2808 """
2809
2810 asLines = self.generateLicenseHeader();
2811 oOut.write('\n'.join(asLines));
2812
2813 #
2814 # Emit the function definitions.
2815 #
2816 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2817 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
2818 oOut.write( '\n'
2819 + '\n'
2820 + '\n'
2821 + '\n'
2822 + '/*' + '*' * 128 + '\n'
2823 + '* Variation: ' + sVarName + ' ' * (129 - len(sVarName) - 15) + '*\n'
2824 + '*' * 128 + '*/\n');
2825
2826 for oThreadedFunction in self.aoThreadedFuncs:
2827 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2828 if oVariation:
2829 oMcBlock = oThreadedFunction.oMcBlock;
2830
2831 # Function header
2832 oOut.write( '\n'
2833 + '\n'
2834 + '/**\n'
2835 + ' * #%u: %s at line %s offset %s in %s%s\n'
2836 % (oVariation.iEnumValue, oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine,
2837 os.path.split(oMcBlock.sSrcFile)[1],
2838 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '')
2839 + ' */\n'
2840 + 'static IEM_DECL_IEMTHREADEDFUNC_DEF(' + oVariation.getThreadedFunctionName() + ')\n'
2841 + '{\n');
2842
2843 # Unpack parameters.
2844 self.generateFunctionParameterUnpacking(oVariation, oOut, self.kasThreadedParamNames);
2845
2846 # RT_NOREF for unused parameters.
2847 if oVariation.cMinParams < g_kcThreadedParams:
2848 oOut.write(' RT_NOREF(' + ', '.join(self.kasThreadedParamNames[oVariation.cMinParams:]) + ');\n');
2849
2850 # Now for the actual statements.
2851 oOut.write(iai.McStmt.renderCodeForList(oVariation.aoStmtsForThreadedFunction, cchIndent = 4));
2852
2853 oOut.write('}\n');
2854
2855
2856 #
2857 # Generate the output tables in parallel.
2858 #
2859 asFuncTable = [
2860 '/**',
2861 ' * Function pointer table.',
2862 ' */',
2863 'PFNIEMTHREADEDFUNC const g_apfnIemThreadedFunctions[kIemThreadedFunc_End] =',
2864 '{',
2865 ' /*Invalid*/ NULL,',
2866 ];
2867 asArgCntTab = [
2868 '/**',
2869 ' * Argument count table.',
2870 ' */',
2871 'uint8_t const g_acIemThreadedFunctionUsedArgs[kIemThreadedFunc_End] =',
2872 '{',
2873 ' 0, /*Invalid*/',
2874 ];
2875 asNameTable = [
2876 '/**',
2877 ' * Function name table.',
2878 ' */',
2879 'const char * const g_apszIemThreadedFunctions[kIemThreadedFunc_End] =',
2880 '{',
2881 ' "Invalid",',
2882 ];
2883 asStatTable = [
2884 '/**',
2885 ' * Function statistics name table.',
2886 ' */',
2887 'const char * const g_apszIemThreadedFunctionStats[kIemThreadedFunc_End] =',
2888 '{',
2889 ' NULL,',
2890 ];
2891 aasTables = (asFuncTable, asArgCntTab, asNameTable, asStatTable,);
2892
2893 for asTable in aasTables:
2894 asTable.extend((
2895 '',
2896 ' /*',
2897 ' * Predefined.',
2898 ' */',
2899 ));
2900 for sFuncNm, cArgs, _ in self.katBltIns:
2901 asFuncTable.append(' iemThreadedFunc_BltIn_%s,' % (sFuncNm,));
2902 asArgCntTab.append(' %d, /*BltIn_%s*/' % (cArgs, sFuncNm,));
2903 asNameTable.append(' "BltIn_%s",' % (sFuncNm,));
2904 asStatTable.append(' "BltIn/%s",' % (sFuncNm,));
2905
2906 iThreadedFunction = 1 + len(self.katBltIns);
2907 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2908 for asTable in aasTables:
2909 asTable.extend((
2910 '',
2911 ' /*',
2912 ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation],
2913 ' */',
2914 ));
2915 for oThreadedFunction in self.aoThreadedFuncs:
2916 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2917 if oVariation:
2918 iThreadedFunction += 1;
2919 assert oVariation.iEnumValue == iThreadedFunction;
2920 sName = oVariation.getThreadedFunctionName();
2921 asFuncTable.append(' /*%4u*/ %s,' % (iThreadedFunction, sName,));
2922 asNameTable.append(' /*%4u*/ "%s",' % (iThreadedFunction, sName,));
2923 asArgCntTab.append(' /*%4u*/ %d, /*%s*/' % (iThreadedFunction, oVariation.cMinParams, sName,));
2924 asStatTable.append(' "%s",' % (oVariation.getThreadedFunctionStatisticsName(),));
2925
2926 for asTable in aasTables:
2927 asTable.append('};');
2928
2929 #
2930 # Output the tables.
2931 #
2932 oOut.write( '\n'
2933 + '\n');
2934 oOut.write('\n'.join(asFuncTable));
2935 oOut.write( '\n'
2936 + '\n'
2937 + '\n');
2938 oOut.write('\n'.join(asArgCntTab));
2939 oOut.write( '\n'
2940 + '\n'
2941 + '#if defined(IN_RING3) || defined(LOG_ENABLED)\n');
2942 oOut.write('\n'.join(asNameTable));
2943 oOut.write( '\n'
2944 + '#endif /* IN_RING3 || LOG_ENABLED */\n'
2945 + '\n'
2946 + '\n'
2947 + '#if defined(IN_RING3)\n');
2948 oOut.write('\n'.join(asStatTable));
2949 oOut.write( '\n'
2950 + '#endif /* IN_RING3 */\n');
2951
2952 return True;
2953
2954 def generateNativeFunctionsHeader(self, oOut):
2955 """
2956 Generates the native recompiler functions header file.
2957 Returns success indicator.
2958 """
2959 if not self.oOptions.fNativeRecompilerEnabled:
2960 return True;
2961
2962 asLines = self.generateLicenseHeader();
2963
2964 # Prototype the function table.
2965 asLines += [
2966 'extern const PFNIEMNATIVERECOMPFUNC g_apfnIemNativeRecompileFunctions[kIemThreadedFunc_End];',
2967 'extern const PFNIEMNATIVELIVENESSFUNC g_apfnIemNativeLivenessFunctions[kIemThreadedFunc_End];',
2968 '',
2969 ];
2970
2971 # Emit indicators as to which of the builtin functions have a native
2972 # recompiler function and which not. (We only really need this for
2973 # kIemThreadedFunc_BltIn_CheckMode, but do all just for simplicity.)
2974 for atBltIn in self.katBltIns:
2975 if atBltIn[1]:
2976 asLines.append('#define IEMNATIVE_WITH_BLTIN_' + atBltIn[0].upper())
2977 else:
2978 asLines.append('#define IEMNATIVE_WITHOUT_BLTIN_' + atBltIn[0].upper())
2979
2980 # Emit prototypes for the builtin functions we use in tables.
2981 asLines += [
2982 '',
2983 '/* Prototypes for built-in functions used in the above tables. */',
2984 ];
2985 for sFuncNm, _, fHaveRecompFunc in self.katBltIns:
2986 if fHaveRecompFunc:
2987 asLines += [
2988 'IEM_DECL_IEMNATIVERECOMPFUNC_PROTO( iemNativeRecompFunc_BltIn_%s);' % (sFuncNm,),
2989 'IEM_DECL_IEMNATIVELIVENESSFUNC_PROTO(iemNativeLivenessFunc_BltIn_%s);' % (sFuncNm,),
2990 ];
2991
2992 oOut.write('\n'.join(asLines));
2993 return True;
2994
2995 def generateNativeFunctionsSource(self, oOut):
2996 """
2997 Generates the native recompiler functions source file.
2998 Returns success indicator.
2999 """
3000 if not self.oOptions.fNativeRecompilerEnabled:
3001 return True;
3002
3003 #
3004 # The file header.
3005 #
3006 oOut.write('\n'.join(self.generateLicenseHeader()));
3007
3008 #
3009 # Emit the functions.
3010 #
3011 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
3012 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
3013 oOut.write( '\n'
3014 + '\n'
3015 + '\n'
3016 + '\n'
3017 + '/*' + '*' * 128 + '\n'
3018 + '* Variation: ' + sVarName + ' ' * (129 - len(sVarName) - 15) + '*\n'
3019 + '*' * 128 + '*/\n');
3020
3021 for oThreadedFunction in self.aoThreadedFuncs:
3022 oVariation = oThreadedFunction.dVariations.get(sVariation, None) # type: ThreadedFunctionVariation
3023 if oVariation and oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
3024 oMcBlock = oThreadedFunction.oMcBlock;
3025
3026 # Function header
3027 oOut.write( '\n'
3028 + '\n'
3029 + '/**\n'
3030 + ' * #%u: %s at line %s offset %s in %s%s\n'
3031 % (oVariation.iEnumValue, oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine,
3032 os.path.split(oMcBlock.sSrcFile)[1],
3033 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '')
3034 + ' */\n'
3035 + 'static IEM_DECL_IEMNATIVERECOMPFUNC_DEF(' + oVariation.getNativeFunctionName() + ')\n'
3036 + '{\n');
3037
3038 # Unpack parameters.
3039 self.generateFunctionParameterUnpacking(oVariation, oOut,
3040 ('pCallEntry->auParams[0]',
3041 'pCallEntry->auParams[1]',
3042 'pCallEntry->auParams[2]',));
3043
3044 # Now for the actual statements.
3045 oOut.write(oVariation.oNativeRecomp.renderCode(cchIndent = 4));
3046
3047 oOut.write('}\n');
3048
3049 #
3050 # Output the function table.
3051 #
3052 oOut.write( '\n'
3053 + '\n'
3054 + '/*\n'
3055 + ' * Function table running parallel to g_apfnIemThreadedFunctions and friends.\n'
3056 + ' */\n'
3057 + 'const PFNIEMNATIVERECOMPFUNC g_apfnIemNativeRecompileFunctions[kIemThreadedFunc_End] =\n'
3058 + '{\n'
3059 + ' /*Invalid*/ NULL,'
3060 + '\n'
3061 + ' /*\n'
3062 + ' * Predefined.\n'
3063 + ' */\n'
3064 );
3065 for sFuncNm, _, fHaveRecompFunc in self.katBltIns:
3066 if fHaveRecompFunc:
3067 oOut.write(' iemNativeRecompFunc_BltIn_%s,\n' % (sFuncNm,))
3068 else:
3069 oOut.write(' NULL, /*BltIn_%s*/\n' % (sFuncNm,))
3070
3071 iThreadedFunction = 1 + len(self.katBltIns);
3072 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
3073 oOut.write( ' /*\n'
3074 + ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation] + '\n'
3075 + ' */\n');
3076 for oThreadedFunction in self.aoThreadedFuncs:
3077 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
3078 if oVariation:
3079 iThreadedFunction += 1;
3080 assert oVariation.iEnumValue == iThreadedFunction;
3081 sName = oVariation.getNativeFunctionName();
3082 if oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
3083 oOut.write(' /*%4u*/ %s,\n' % (iThreadedFunction, sName,));
3084 else:
3085 oOut.write(' /*%4u*/ NULL /*%s*/,\n' % (iThreadedFunction, sName,));
3086
3087 oOut.write( '};\n'
3088 + '\n');
3089 return True;
3090
3091 def generateNativeLivenessSource(self, oOut):
3092 """
3093 Generates the native recompiler liveness analysis functions source file.
3094 Returns success indicator.
3095 """
3096 if not self.oOptions.fNativeRecompilerEnabled:
3097 return True;
3098
3099 #
3100 # The file header.
3101 #
3102 oOut.write('\n'.join(self.generateLicenseHeader()));
3103
3104 #
3105 # Emit the functions.
3106 #
3107 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
3108 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
3109 oOut.write( '\n'
3110 + '\n'
3111 + '\n'
3112 + '\n'
3113 + '/*' + '*' * 128 + '\n'
3114 + '* Variation: ' + sVarName + ' ' * (129 - len(sVarName) - 15) + '*\n'
3115 + '*' * 128 + '*/\n');
3116
3117 for oThreadedFunction in self.aoThreadedFuncs:
3118 oVariation = oThreadedFunction.dVariations.get(sVariation, None) # type: ThreadedFunctionVariation
3119 if oVariation and oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
3120 oMcBlock = oThreadedFunction.oMcBlock;
3121
3122 # Function header
3123 oOut.write( '\n'
3124 + '\n'
3125 + '/**\n'
3126 + ' * #%u: %s at line %s offset %s in %s%s\n'
3127 % (oVariation.iEnumValue, oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine,
3128 os.path.split(oMcBlock.sSrcFile)[1],
3129 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '')
3130 + ' */\n'
3131 + 'static IEM_DECL_IEMNATIVELIVENESSFUNC_DEF(' + oVariation.getLivenessFunctionName() + ')\n'
3132 + '{\n');
3133
3134 # Unpack parameters.
3135 self.generateFunctionParameterUnpacking(oVariation, oOut,
3136 ('pCallEntry->auParams[0]',
3137 'pCallEntry->auParams[1]',
3138 'pCallEntry->auParams[2]',));
3139 asNoRefs = []; #[ 'RT_NOREF_PV(pReNative);', ];
3140 for aoRefs in oVariation.dParamRefs.values():
3141 asNoRefs.append('RT_NOREF_PV(%s);' % (aoRefs[0].sNewName,));
3142 oOut.write(' %s\n' % (' '.join(asNoRefs),));
3143
3144 # Now for the actual statements.
3145 oOut.write(oVariation.oNativeRecomp.renderCode(cchIndent = 4));
3146
3147 oOut.write('}\n');
3148
3149 #
3150 # Output the function table.
3151 #
3152 oOut.write( '\n'
3153 + '\n'
3154 + '/*\n'
3155 + ' * Liveness analysis function table running parallel to g_apfnIemThreadedFunctions and friends.\n'
3156 + ' */\n'
3157 + 'const PFNIEMNATIVELIVENESSFUNC g_apfnIemNativeLivenessFunctions[kIemThreadedFunc_End] =\n'
3158 + '{\n'
3159 + ' /*Invalid*/ NULL,'
3160 + '\n'
3161 + ' /*\n'
3162 + ' * Predefined.\n'
3163 + ' */\n'
3164 );
3165 for sFuncNm, _, fHaveRecompFunc in self.katBltIns:
3166 if fHaveRecompFunc:
3167 oOut.write(' iemNativeLivenessFunc_BltIn_%s,\n' % (sFuncNm,))
3168 else:
3169 oOut.write(' NULL, /*BltIn_%s*/\n' % (sFuncNm,))
3170
3171 iThreadedFunction = 1 + len(self.katBltIns);
3172 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
3173 oOut.write( ' /*\n'
3174 + ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation] + '\n'
3175 + ' */\n');
3176 for oThreadedFunction in self.aoThreadedFuncs:
3177 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
3178 if oVariation:
3179 iThreadedFunction += 1;
3180 assert oVariation.iEnumValue == iThreadedFunction;
3181 sName = oVariation.getLivenessFunctionName();
3182 if oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
3183 oOut.write(' /*%4u*/ %s,\n' % (iThreadedFunction, sName,));
3184 else:
3185 oOut.write(' /*%4u*/ NULL /*%s*/,\n' % (iThreadedFunction, sName,));
3186
3187 oOut.write( '};\n'
3188 + '\n');
3189 return True;
3190
3191
3192 def getThreadedFunctionByIndex(self, idx):
3193 """
3194 Returns a ThreadedFunction object for the given index. If the index is
3195 out of bounds, a dummy is returned.
3196 """
3197 if idx < len(self.aoThreadedFuncs):
3198 return self.aoThreadedFuncs[idx];
3199 return ThreadedFunction.dummyInstance();
3200
3201 def generateModifiedInput(self, oOut, idxFile):
3202 """
3203 Generates the combined modified input source/header file.
3204 Returns success indicator.
3205 """
3206 #
3207 # File header and assert assumptions.
3208 #
3209 oOut.write('\n'.join(self.generateLicenseHeader()));
3210 oOut.write('AssertCompile((IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | IEM_F_MODE_CPUMODE_MASK) == 7);\n');
3211
3212 #
3213 # Iterate all parsers (input files) and output the ones related to the
3214 # file set given by idxFile.
3215 #
3216 for idxParser, oParser in enumerate(self.aoParsers): # type: int, IEMAllInstPython.SimpleParser
3217 # Is this included in the file set?
3218 sSrcBaseFile = os.path.basename(oParser.sSrcFile).lower();
3219 fInclude = -1;
3220 for aoInfo in iai.g_aaoAllInstrFilesAndDefaultMapAndSet:
3221 if sSrcBaseFile == aoInfo[0].lower():
3222 fInclude = aoInfo[2] in (-1, idxFile);
3223 break;
3224 if fInclude is not True:
3225 assert fInclude is False;
3226 continue;
3227
3228 # Output it.
3229 oOut.write("\n\n/* ****** BEGIN %s ******* */\n" % (oParser.sSrcFile,));
3230
3231 iThreadedFunction = self.aidxFirstFunctions[idxParser];
3232 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
3233 iLine = 0;
3234 while iLine < len(oParser.asLines):
3235 sLine = oParser.asLines[iLine];
3236 iLine += 1; # iBeginLine and iEndLine are 1-based.
3237
3238 # Can we pass it thru?
3239 if ( iLine not in [oThreadedFunction.oMcBlock.iBeginLine, oThreadedFunction.oMcBlock.iEndLine]
3240 or oThreadedFunction.oMcBlock.sSrcFile != oParser.sSrcFile):
3241 oOut.write(sLine);
3242 #
3243 # Single MC block. Just extract it and insert the replacement.
3244 #
3245 elif oThreadedFunction.oMcBlock.iBeginLine != oThreadedFunction.oMcBlock.iEndLine:
3246 assert ( (sLine.count('IEM_MC_') - sLine.count('IEM_MC_F_') == 1)
3247 or oThreadedFunction.oMcBlock.iMacroExp == iai.McBlock.kiMacroExp_Partial), 'sLine="%s"' % (sLine,);
3248 oOut.write(sLine[:oThreadedFunction.oMcBlock.offBeginLine]);
3249 sModified = oThreadedFunction.generateInputCode().strip();
3250 oOut.write(sModified);
3251
3252 iLine = oThreadedFunction.oMcBlock.iEndLine;
3253 sLine = oParser.asLines[iLine - 1];
3254 assert ( sLine.count('IEM_MC_') - sLine.count('IEM_MC_F_') == 1
3255 or len(oThreadedFunction.oMcBlock.aoStmts) == 1
3256 or oThreadedFunction.oMcBlock.iMacroExp == iai.McBlock.kiMacroExp_Partial);
3257 oOut.write(sLine[oThreadedFunction.oMcBlock.offAfterEnd : ]);
3258
3259 # Advance
3260 iThreadedFunction += 1;
3261 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
3262 #
3263 # Macro expansion line that have sublines and may contain multiple MC blocks.
3264 #
3265 else:
3266 offLine = 0;
3267 while iLine == oThreadedFunction.oMcBlock.iBeginLine:
3268 oOut.write(sLine[offLine : oThreadedFunction.oMcBlock.offBeginLine]);
3269
3270 sModified = oThreadedFunction.generateInputCode().strip();
3271 assert ( sModified.startswith('IEM_MC_BEGIN')
3272 or (sModified.find('IEM_MC_DEFER_TO_CIMPL_') > 0 and sModified.strip().startswith('{\n'))
3273 or sModified.startswith('pVCpu->iem.s.fEndTb = true')
3274 or sModified.startswith('pVCpu->iem.s.fTbCurInstr = ')
3275 ), 'sModified="%s"' % (sModified,);
3276 oOut.write(sModified);
3277
3278 offLine = oThreadedFunction.oMcBlock.offAfterEnd;
3279
3280 # Advance
3281 iThreadedFunction += 1;
3282 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
3283
3284 # Last line segment.
3285 if offLine < len(sLine):
3286 oOut.write(sLine[offLine : ]);
3287
3288 oOut.write("/* ****** END %s ******* */\n" % (oParser.sSrcFile,));
3289
3290 return True;
3291
3292 def generateModifiedInput1(self, oOut):
3293 """
3294 Generates the combined modified input source/header file, part 1.
3295 Returns success indicator.
3296 """
3297 return self.generateModifiedInput(oOut, 1);
3298
3299 def generateModifiedInput2(self, oOut):
3300 """
3301 Generates the combined modified input source/header file, part 2.
3302 Returns success indicator.
3303 """
3304 return self.generateModifiedInput(oOut, 2);
3305
3306 def generateModifiedInput3(self, oOut):
3307 """
3308 Generates the combined modified input source/header file, part 3.
3309 Returns success indicator.
3310 """
3311 return self.generateModifiedInput(oOut, 3);
3312
3313 def generateModifiedInput4(self, oOut):
3314 """
3315 Generates the combined modified input source/header file, part 4.
3316 Returns success indicator.
3317 """
3318 return self.generateModifiedInput(oOut, 4);
3319
3320
3321 #
3322 # Main
3323 #
3324
3325 def main(self, asArgs):
3326 """
3327 C-like main function.
3328 Returns exit code.
3329 """
3330
3331 #
3332 # Parse arguments
3333 #
3334 sScriptDir = os.path.dirname(__file__);
3335 oParser = argparse.ArgumentParser(add_help = False);
3336 oParser.add_argument('asInFiles',
3337 metavar = 'input.cpp.h',
3338 nargs = '*',
3339 default = [os.path.join(sScriptDir, aoInfo[0])
3340 for aoInfo in iai.g_aaoAllInstrFilesAndDefaultMapAndSet],
3341 help = "Selection of VMMAll/IEMAllInst*.cpp.h files to use as input.");
3342 oParser.add_argument('--host-arch',
3343 metavar = 'arch',
3344 dest = 'sHostArch',
3345 action = 'store',
3346 default = None,
3347 help = 'The host architecture.');
3348
3349 oParser.add_argument('--out-thrd-funcs-hdr',
3350 metavar = 'file-thrd-funcs.h',
3351 dest = 'sOutFileThrdFuncsHdr',
3352 action = 'store',
3353 default = '-',
3354 help = 'The output header file for the threaded functions.');
3355 oParser.add_argument('--out-thrd-funcs-cpp',
3356 metavar = 'file-thrd-funcs.cpp',
3357 dest = 'sOutFileThrdFuncsCpp',
3358 action = 'store',
3359 default = '-',
3360 help = 'The output C++ file for the threaded functions.');
3361 oParser.add_argument('--out-n8ve-funcs-hdr',
3362 metavar = 'file-n8tv-funcs.h',
3363 dest = 'sOutFileN8veFuncsHdr',
3364 action = 'store',
3365 default = '-',
3366 help = 'The output header file for the native recompiler functions.');
3367 oParser.add_argument('--out-n8ve-funcs-cpp',
3368 metavar = 'file-n8tv-funcs.cpp',
3369 dest = 'sOutFileN8veFuncsCpp',
3370 action = 'store',
3371 default = '-',
3372 help = 'The output C++ file for the native recompiler functions.');
3373 oParser.add_argument('--out-n8ve-liveness-cpp',
3374 metavar = 'file-n8tv-liveness.cpp',
3375 dest = 'sOutFileN8veLivenessCpp',
3376 action = 'store',
3377 default = '-',
3378 help = 'The output C++ file for the native recompiler liveness analysis functions.');
3379 oParser.add_argument('--native',
3380 dest = 'fNativeRecompilerEnabled',
3381 action = 'store_true',
3382 default = False,
3383 help = 'Enables generating the files related to native recompilation.');
3384 oParser.add_argument('--out-mod-input1',
3385 metavar = 'file-instr.cpp.h',
3386 dest = 'sOutFileModInput1',
3387 action = 'store',
3388 default = '-',
3389 help = 'The output C++/header file for modified input instruction files part 1.');
3390 oParser.add_argument('--out-mod-input2',
3391 metavar = 'file-instr.cpp.h',
3392 dest = 'sOutFileModInput2',
3393 action = 'store',
3394 default = '-',
3395 help = 'The output C++/header file for modified input instruction files part 2.');
3396 oParser.add_argument('--out-mod-input3',
3397 metavar = 'file-instr.cpp.h',
3398 dest = 'sOutFileModInput3',
3399 action = 'store',
3400 default = '-',
3401 help = 'The output C++/header file for modified input instruction files part 3.');
3402 oParser.add_argument('--out-mod-input4',
3403 metavar = 'file-instr.cpp.h',
3404 dest = 'sOutFileModInput4',
3405 action = 'store',
3406 default = '-',
3407 help = 'The output C++/header file for modified input instruction files part 4.');
3408 oParser.add_argument('--help', '-h', '-?',
3409 action = 'help',
3410 help = 'Display help and exit.');
3411 oParser.add_argument('--version', '-V',
3412 action = 'version',
3413 version = 'r%s (IEMAllThreadedPython.py), r%s (IEMAllInstPython.py)'
3414 % (__version__.split()[1], iai.__version__.split()[1],),
3415 help = 'Displays the version/revision of the script and exit.');
3416 self.oOptions = oParser.parse_args(asArgs[1:]);
3417 print("oOptions=%s" % (self.oOptions,), file = sys.stderr);
3418
3419 #
3420 # Process the instructions specified in the IEM sources.
3421 #
3422 if self.processInputFiles(self.oOptions.sHostArch, self.oOptions.fNativeRecompilerEnabled):
3423 #
3424 # Generate the output files.
3425 #
3426 aaoOutputFiles = (
3427 ( self.oOptions.sOutFileThrdFuncsHdr, self.generateThreadedFunctionsHeader ),
3428 ( self.oOptions.sOutFileThrdFuncsCpp, self.generateThreadedFunctionsSource ),
3429 ( self.oOptions.sOutFileN8veFuncsHdr, self.generateNativeFunctionsHeader ),
3430 ( self.oOptions.sOutFileN8veFuncsCpp, self.generateNativeFunctionsSource ),
3431 ( self.oOptions.sOutFileN8veLivenessCpp, self.generateNativeLivenessSource ),
3432 ( self.oOptions.sOutFileModInput1, self.generateModifiedInput1 ),
3433 ( self.oOptions.sOutFileModInput2, self.generateModifiedInput2 ),
3434 ( self.oOptions.sOutFileModInput3, self.generateModifiedInput3 ),
3435 ( self.oOptions.sOutFileModInput4, self.generateModifiedInput4 ),
3436 );
3437 fRc = True;
3438 for sOutFile, fnGenMethod in aaoOutputFiles:
3439 if sOutFile == '-':
3440 fRc = fnGenMethod(sys.stdout) and fRc;
3441 else:
3442 try:
3443 oOut = open(sOutFile, 'w'); # pylint: disable=consider-using-with,unspecified-encoding
3444 except Exception as oXcpt:
3445 print('error! Failed open "%s" for writing: %s' % (sOutFile, oXcpt,), file = sys.stderr);
3446 return 1;
3447 fRc = fnGenMethod(oOut) and fRc;
3448 oOut.close();
3449 if fRc:
3450 return 0;
3451
3452 return 1;
3453
3454
3455if __name__ == '__main__':
3456 sys.exit(IEMThreadedGenerator().main(sys.argv));
3457
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette