VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllThrdPython.py@ 103922

Last change on this file since 103922 was 103922, checked in by vboxsync, 9 months ago

VMM/IEM: Fixed the remaining decoding issues with vblendvps/d. bugref:9898

  • Property svn:eol-style set to LF
  • Property svn:executable set to *
  • Property svn:keywords set to Author Date Id Revision
File size: 185.9 KB
Line 
1#!/usr/bin/env python
2# -*- coding: utf-8 -*-
3# $Id: IEMAllThrdPython.py 103922 2024-03-19 16:10:02Z vboxsync $
4# pylint: disable=invalid-name
5
6"""
7Annotates and generates threaded functions from IEMAllInst*.cpp.h.
8"""
9
10from __future__ import print_function;
11
12__copyright__ = \
13"""
14Copyright (C) 2023 Oracle and/or its affiliates.
15
16This file is part of VirtualBox base platform packages, as
17available from https://www.virtualbox.org.
18
19This program is free software; you can redistribute it and/or
20modify it under the terms of the GNU General Public License
21as published by the Free Software Foundation, in version 3 of the
22License.
23
24This program is distributed in the hope that it will be useful, but
25WITHOUT ANY WARRANTY; without even the implied warranty of
26MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
27General Public License for more details.
28
29You should have received a copy of the GNU General Public License
30along with this program; if not, see <https://www.gnu.org/licenses>.
31
32SPDX-License-Identifier: GPL-3.0-only
33"""
34__version__ = "$Revision: 103922 $"
35
36# Standard python imports.
37import copy;
38import datetime;
39import os;
40import re;
41import sys;
42import argparse;
43from typing import Dict, List;
44
45import IEMAllInstPython as iai;
46import IEMAllN8vePython as ian;
47
48
49# Python 3 hacks:
50if sys.version_info[0] >= 3:
51 long = int; # pylint: disable=redefined-builtin,invalid-name
52
53## Number of generic parameters for the thread functions.
54g_kcThreadedParams = 3;
55
56g_kdTypeInfo = {
57 # type name: (cBits, fSigned, C-type )
58 'int8_t': ( 8, True, 'int8_t', ),
59 'int16_t': ( 16, True, 'int16_t', ),
60 'int32_t': ( 32, True, 'int32_t', ),
61 'int64_t': ( 64, True, 'int64_t', ),
62 'uint4_t': ( 4, False, 'uint8_t', ),
63 'uint8_t': ( 8, False, 'uint8_t', ),
64 'uint16_t': ( 16, False, 'uint16_t', ),
65 'uint32_t': ( 32, False, 'uint32_t', ),
66 'uint64_t': ( 64, False, 'uint64_t', ),
67 'uintptr_t': ( 64, False, 'uintptr_t',), # ASSUMES 64-bit host pointer size.
68 'bool': ( 1, False, 'bool', ),
69 'IEMMODE': ( 2, False, 'IEMMODE', ),
70};
71
72# Only for getTypeBitCount/variables.
73g_kdTypeInfo2 = {
74 'RTFLOAT32U': ( 32, False, 'RTFLOAT32U', ),
75 'RTFLOAT64U': ( 64, False, 'RTFLOAT64U', ),
76 'RTUINT64U': ( 64, False, 'RTUINT64U', ),
77 'RTGCPTR': ( 64, False, 'RTGCPTR', ),
78 'RTPBCD80U': ( 80, False, 'RTPBCD80U', ),
79 'RTFLOAT80U': ( 80, False, 'RTFLOAT80U', ),
80 'IEMFPURESULT': (80+16, False, 'IEMFPURESULT', ),
81 'IEMFPURESULTTWO': (80+16+80,False, 'IEMFPURESULTTWO', ),
82 'RTUINT128U': ( 128, False, 'RTUINT128U', ),
83 'X86XMMREG': ( 128, False, 'X86XMMREG', ),
84 'IEMSSERESULT': ( 128+32, False, 'IEMSSERESULT', ),
85 'IEMMEDIAF2XMMSRC': ( 256, False, 'IEMMEDIAF2XMMSRC',),
86 'RTUINT256U': ( 256, False, 'RTUINT256U', ),
87 'IEMPCMPISTRXSRC': ( 256, False, 'IEMPCMPISTRXSRC', ),
88 'IEMPCMPESTRXSRC': ( 384, False, 'IEMPCMPESTRXSRC', ),
89}; #| g_kdTypeInfo; - requires 3.9
90g_kdTypeInfo2.update(g_kdTypeInfo);
91
92def getTypeBitCount(sType):
93 """
94 Translate a type to size in bits
95 """
96 if sType in g_kdTypeInfo2:
97 return g_kdTypeInfo2[sType][0];
98 if '*' in sType or sType[0] == 'P':
99 return 64;
100 #raise Exception('Unknown type: %s' % (sType,));
101 print('error: Unknown type: %s' % (sType,));
102 return 64;
103
104g_kdIemFieldToType = {
105 # Illegal ones:
106 'offInstrNextByte': ( None, ),
107 'cbInstrBuf': ( None, ),
108 'pbInstrBuf': ( None, ),
109 'uInstrBufPc': ( None, ),
110 'cbInstrBufTotal': ( None, ),
111 'offCurInstrStart': ( None, ),
112 'cbOpcode': ( None, ),
113 'offOpcode': ( None, ),
114 'offModRm': ( None, ),
115 # Okay ones.
116 'fPrefixes': ( 'uint32_t', ),
117 'uRexReg': ( 'uint8_t', ),
118 'uRexB': ( 'uint8_t', ),
119 'uRexIndex': ( 'uint8_t', ),
120 'iEffSeg': ( 'uint8_t', ),
121 'enmEffOpSize': ( 'IEMMODE', ),
122 'enmDefAddrMode': ( 'IEMMODE', ),
123 'enmEffAddrMode': ( 'IEMMODE', ),
124 'enmDefOpSize': ( 'IEMMODE', ),
125 'idxPrefix': ( 'uint8_t', ),
126 'uVex3rdReg': ( 'uint8_t', ),
127 'uVexLength': ( 'uint8_t', ),
128 'fEvexStuff': ( 'uint8_t', ),
129 'uFpuOpcode': ( 'uint16_t', ),
130};
131
132## @name McStmtCond.oIfBranchAnnotation/McStmtCond.oElseBranchAnnotation values
133## @{
134g_ksFinishAnnotation_Advance = 'Advance';
135g_ksFinishAnnotation_RelJmp = 'RelJmp';
136g_ksFinishAnnotation_SetJmp = 'SetJmp';
137g_ksFinishAnnotation_DeferToCImpl = 'DeferToCImpl';
138## @}
139
140
141class ThreadedParamRef(object):
142 """
143 A parameter reference for a threaded function.
144 """
145
146 def __init__(self, sOrgRef, sType, oStmt, iParam = None, offParam = 0, sStdRef = None):
147 ## The name / reference in the original code.
148 self.sOrgRef = sOrgRef;
149 ## Normalized name to deal with spaces in macro invocations and such.
150 self.sStdRef = sStdRef if sStdRef else ''.join(sOrgRef.split());
151 ## Indicates that sOrgRef may not match the parameter.
152 self.fCustomRef = sStdRef is not None;
153 ## The type (typically derived).
154 self.sType = sType;
155 ## The statement making the reference.
156 self.oStmt = oStmt;
157 ## The parameter containing the references. None if implicit.
158 self.iParam = iParam;
159 ## The offset in the parameter of the reference.
160 self.offParam = offParam;
161
162 ## The variable name in the threaded function.
163 self.sNewName = 'x';
164 ## The this is packed into.
165 self.iNewParam = 99;
166 ## The bit offset in iNewParam.
167 self.offNewParam = 1024
168
169
170class ThreadedFunctionVariation(object):
171 """ Threaded function variation. """
172
173 ## @name Variations.
174 ## These variations will match translation block selection/distinctions as well.
175 ## @{
176 # pylint: disable=line-too-long
177 ksVariation_Default = ''; ##< No variations - only used by IEM_MC_DEFER_TO_CIMPL_X_RET.
178 ksVariation_16 = '_16'; ##< 16-bit mode code (386+).
179 ksVariation_16f = '_16f'; ##< 16-bit mode code (386+), check+clear eflags.
180 ksVariation_16_Jmp = '_16_Jmp'; ##< 16-bit mode code (386+), conditional jump taken.
181 ksVariation_16f_Jmp = '_16f_Jmp'; ##< 16-bit mode code (386+), check+clear eflags, conditional jump taken.
182 ksVariation_16_NoJmp = '_16_NoJmp'; ##< 16-bit mode code (386+), conditional jump not taken.
183 ksVariation_16f_NoJmp = '_16f_NoJmp'; ##< 16-bit mode code (386+), check+clear eflags, conditional jump not taken.
184 ksVariation_16_Addr32 = '_16_Addr32'; ##< 16-bit mode code (386+), address size prefixed to 32-bit addressing.
185 ksVariation_16f_Addr32 = '_16f_Addr32'; ##< 16-bit mode code (386+), address size prefixed to 32-bit addressing, eflags.
186 ksVariation_16_Pre386 = '_16_Pre386'; ##< 16-bit mode code, pre-386 CPU target.
187 ksVariation_16f_Pre386 = '_16f_Pre386'; ##< 16-bit mode code, pre-386 CPU target, check+clear eflags.
188 ksVariation_16_Pre386_Jmp = '_16_Pre386_Jmp'; ##< 16-bit mode code, pre-386 CPU target, conditional jump taken.
189 ksVariation_16f_Pre386_Jmp = '_16f_Pre386_Jmp'; ##< 16-bit mode code, pre-386 CPU target, check+clear eflags, conditional jump taken.
190 ksVariation_16_Pre386_NoJmp = '_16_Pre386_NoJmp'; ##< 16-bit mode code, pre-386 CPU target, conditional jump not taken.
191 ksVariation_16f_Pre386_NoJmp = '_16f_Pre386_NoJmp'; ##< 16-bit mode code, pre-386 CPU target, check+clear eflags, conditional jump not taken.
192 ksVariation_32 = '_32'; ##< 32-bit mode code (386+).
193 ksVariation_32f = '_32f'; ##< 32-bit mode code (386+), check+clear eflags.
194 ksVariation_32_Jmp = '_32_Jmp'; ##< 32-bit mode code (386+), conditional jump taken.
195 ksVariation_32f_Jmp = '_32f_Jmp'; ##< 32-bit mode code (386+), check+clear eflags, conditional jump taken.
196 ksVariation_32_NoJmp = '_32_NoJmp'; ##< 32-bit mode code (386+), conditional jump not taken.
197 ksVariation_32f_NoJmp = '_32f_NoJmp'; ##< 32-bit mode code (386+), check+clear eflags, conditional jump not taken.
198 ksVariation_32_Flat = '_32_Flat'; ##< 32-bit mode code (386+) with CS, DS, E,S and SS flat and 4GB wide.
199 ksVariation_32f_Flat = '_32f_Flat'; ##< 32-bit mode code (386+) with CS, DS, E,S and SS flat and 4GB wide, eflags.
200 ksVariation_32_Addr16 = '_32_Addr16'; ##< 32-bit mode code (386+), address size prefixed to 16-bit addressing.
201 ksVariation_32f_Addr16 = '_32f_Addr16'; ##< 32-bit mode code (386+), address size prefixed to 16-bit addressing, eflags.
202 ksVariation_64 = '_64'; ##< 64-bit mode code.
203 ksVariation_64f = '_64f'; ##< 64-bit mode code, check+clear eflags.
204 ksVariation_64_Jmp = '_64_Jmp'; ##< 64-bit mode code, conditional jump taken.
205 ksVariation_64f_Jmp = '_64f_Jmp'; ##< 64-bit mode code, check+clear eflags, conditional jump taken.
206 ksVariation_64_NoJmp = '_64_NoJmp'; ##< 64-bit mode code, conditional jump not taken.
207 ksVariation_64f_NoJmp = '_64f_NoJmp'; ##< 64-bit mode code, check+clear eflags, conditional jump not taken.
208 ksVariation_64_FsGs = '_64_FsGs'; ##< 64-bit mode code, with memory accesses via FS or GS.
209 ksVariation_64f_FsGs = '_64f_FsGs'; ##< 64-bit mode code, with memory accesses via FS or GS, check+clear eflags.
210 ksVariation_64_Addr32 = '_64_Addr32'; ##< 64-bit mode code, address size prefixed to 32-bit addressing.
211 ksVariation_64f_Addr32 = '_64f_Addr32'; ##< 64-bit mode code, address size prefixed to 32-bit addressing, c+c eflags.
212 # pylint: enable=line-too-long
213 kasVariations = (
214 ksVariation_Default,
215 ksVariation_16,
216 ksVariation_16f,
217 ksVariation_16_Jmp,
218 ksVariation_16f_Jmp,
219 ksVariation_16_NoJmp,
220 ksVariation_16f_NoJmp,
221 ksVariation_16_Addr32,
222 ksVariation_16f_Addr32,
223 ksVariation_16_Pre386,
224 ksVariation_16f_Pre386,
225 ksVariation_16_Pre386_Jmp,
226 ksVariation_16f_Pre386_Jmp,
227 ksVariation_16_Pre386_NoJmp,
228 ksVariation_16f_Pre386_NoJmp,
229 ksVariation_32,
230 ksVariation_32f,
231 ksVariation_32_Jmp,
232 ksVariation_32f_Jmp,
233 ksVariation_32_NoJmp,
234 ksVariation_32f_NoJmp,
235 ksVariation_32_Flat,
236 ksVariation_32f_Flat,
237 ksVariation_32_Addr16,
238 ksVariation_32f_Addr16,
239 ksVariation_64,
240 ksVariation_64f,
241 ksVariation_64_Jmp,
242 ksVariation_64f_Jmp,
243 ksVariation_64_NoJmp,
244 ksVariation_64f_NoJmp,
245 ksVariation_64_FsGs,
246 ksVariation_64f_FsGs,
247 ksVariation_64_Addr32,
248 ksVariation_64f_Addr32,
249 );
250 kasVariationsWithoutAddress = (
251 ksVariation_16,
252 ksVariation_16f,
253 ksVariation_16_Pre386,
254 ksVariation_16f_Pre386,
255 ksVariation_32,
256 ksVariation_32f,
257 ksVariation_64,
258 ksVariation_64f,
259 );
260 kasVariationsWithoutAddressNot286 = (
261 ksVariation_16,
262 ksVariation_16f,
263 ksVariation_32,
264 ksVariation_32f,
265 ksVariation_64,
266 ksVariation_64f,
267 );
268 kasVariationsWithoutAddressNot286Not64 = (
269 ksVariation_16,
270 ksVariation_16f,
271 ksVariation_32,
272 ksVariation_32f,
273 );
274 kasVariationsWithoutAddressNot64 = (
275 ksVariation_16,
276 ksVariation_16f,
277 ksVariation_16_Pre386,
278 ksVariation_16f_Pre386,
279 ksVariation_32,
280 ksVariation_32f,
281 );
282 kasVariationsWithoutAddressOnly64 = (
283 ksVariation_64,
284 ksVariation_64f,
285 );
286 kasVariationsWithAddress = (
287 ksVariation_16,
288 ksVariation_16f,
289 ksVariation_16_Addr32,
290 ksVariation_16f_Addr32,
291 ksVariation_16_Pre386,
292 ksVariation_16f_Pre386,
293 ksVariation_32,
294 ksVariation_32f,
295 ksVariation_32_Flat,
296 ksVariation_32f_Flat,
297 ksVariation_32_Addr16,
298 ksVariation_32f_Addr16,
299 ksVariation_64,
300 ksVariation_64f,
301 ksVariation_64_FsGs,
302 ksVariation_64f_FsGs,
303 ksVariation_64_Addr32,
304 ksVariation_64f_Addr32,
305 );
306 kasVariationsWithAddressNot286 = (
307 ksVariation_16,
308 ksVariation_16f,
309 ksVariation_16_Addr32,
310 ksVariation_16f_Addr32,
311 ksVariation_32,
312 ksVariation_32f,
313 ksVariation_32_Flat,
314 ksVariation_32f_Flat,
315 ksVariation_32_Addr16,
316 ksVariation_32f_Addr16,
317 ksVariation_64,
318 ksVariation_64f,
319 ksVariation_64_FsGs,
320 ksVariation_64f_FsGs,
321 ksVariation_64_Addr32,
322 ksVariation_64f_Addr32,
323 );
324 kasVariationsWithAddressNot286Not64 = (
325 ksVariation_16,
326 ksVariation_16f,
327 ksVariation_16_Addr32,
328 ksVariation_16f_Addr32,
329 ksVariation_32,
330 ksVariation_32f,
331 ksVariation_32_Flat,
332 ksVariation_32f_Flat,
333 ksVariation_32_Addr16,
334 ksVariation_32f_Addr16,
335 );
336 kasVariationsWithAddressNot64 = (
337 ksVariation_16,
338 ksVariation_16f,
339 ksVariation_16_Addr32,
340 ksVariation_16f_Addr32,
341 ksVariation_16_Pre386,
342 ksVariation_16f_Pre386,
343 ksVariation_32,
344 ksVariation_32f,
345 ksVariation_32_Flat,
346 ksVariation_32f_Flat,
347 ksVariation_32_Addr16,
348 ksVariation_32f_Addr16,
349 );
350 kasVariationsWithAddressOnly64 = (
351 ksVariation_64,
352 ksVariation_64f,
353 ksVariation_64_FsGs,
354 ksVariation_64f_FsGs,
355 ksVariation_64_Addr32,
356 ksVariation_64f_Addr32,
357 );
358 kasVariationsOnlyPre386 = (
359 ksVariation_16_Pre386,
360 ksVariation_16f_Pre386,
361 );
362 kasVariationsEmitOrder = (
363 ksVariation_Default,
364 ksVariation_64,
365 ksVariation_64f,
366 ksVariation_64_Jmp,
367 ksVariation_64f_Jmp,
368 ksVariation_64_NoJmp,
369 ksVariation_64f_NoJmp,
370 ksVariation_64_FsGs,
371 ksVariation_64f_FsGs,
372 ksVariation_32_Flat,
373 ksVariation_32f_Flat,
374 ksVariation_32,
375 ksVariation_32f,
376 ksVariation_32_Jmp,
377 ksVariation_32f_Jmp,
378 ksVariation_32_NoJmp,
379 ksVariation_32f_NoJmp,
380 ksVariation_16,
381 ksVariation_16f,
382 ksVariation_16_Jmp,
383 ksVariation_16f_Jmp,
384 ksVariation_16_NoJmp,
385 ksVariation_16f_NoJmp,
386 ksVariation_16_Addr32,
387 ksVariation_16f_Addr32,
388 ksVariation_16_Pre386,
389 ksVariation_16f_Pre386,
390 ksVariation_16_Pre386_Jmp,
391 ksVariation_16f_Pre386_Jmp,
392 ksVariation_16_Pre386_NoJmp,
393 ksVariation_16f_Pre386_NoJmp,
394 ksVariation_32_Addr16,
395 ksVariation_32f_Addr16,
396 ksVariation_64_Addr32,
397 ksVariation_64f_Addr32,
398 );
399 kdVariationNames = {
400 ksVariation_Default: 'defer-to-cimpl',
401 ksVariation_16: '16-bit',
402 ksVariation_16f: '16-bit w/ eflag checking and clearing',
403 ksVariation_16_Jmp: '16-bit w/ conditional jump taken',
404 ksVariation_16f_Jmp: '16-bit w/ eflag checking and clearing and conditional jump taken',
405 ksVariation_16_NoJmp: '16-bit w/ conditional jump not taken',
406 ksVariation_16f_NoJmp: '16-bit w/ eflag checking and clearing and conditional jump not taken',
407 ksVariation_16_Addr32: '16-bit w/ address prefix (Addr32)',
408 ksVariation_16f_Addr32: '16-bit w/ address prefix (Addr32) and eflag checking and clearing',
409 ksVariation_16_Pre386: '16-bit on pre-386 CPU',
410 ksVariation_16f_Pre386: '16-bit on pre-386 CPU w/ eflag checking and clearing',
411 ksVariation_16_Pre386_Jmp: '16-bit on pre-386 CPU w/ conditional jump taken',
412 ksVariation_16f_Pre386_Jmp: '16-bit on pre-386 CPU w/ eflag checking and clearing and conditional jump taken',
413 ksVariation_16_Pre386_NoJmp: '16-bit on pre-386 CPU w/ conditional jump taken',
414 ksVariation_16f_Pre386_NoJmp: '16-bit on pre-386 CPU w/ eflag checking and clearing and conditional jump taken',
415 ksVariation_32: '32-bit',
416 ksVariation_32f: '32-bit w/ eflag checking and clearing',
417 ksVariation_32_Jmp: '32-bit w/ conditional jump taken',
418 ksVariation_32f_Jmp: '32-bit w/ eflag checking and clearing and conditional jump taken',
419 ksVariation_32_NoJmp: '32-bit w/ conditional jump not taken',
420 ksVariation_32f_NoJmp: '32-bit w/ eflag checking and clearing and conditional jump not taken',
421 ksVariation_32_Flat: '32-bit flat and wide open CS, SS, DS and ES',
422 ksVariation_32f_Flat: '32-bit flat and wide open CS, SS, DS and ES w/ eflag checking and clearing',
423 ksVariation_32_Addr16: '32-bit w/ address prefix (Addr16)',
424 ksVariation_32f_Addr16: '32-bit w/ address prefix (Addr16) and eflag checking and clearing',
425 ksVariation_64: '64-bit',
426 ksVariation_64f: '64-bit w/ eflag checking and clearing',
427 ksVariation_64_Jmp: '64-bit w/ conditional jump taken',
428 ksVariation_64f_Jmp: '64-bit w/ eflag checking and clearing and conditional jump taken',
429 ksVariation_64_NoJmp: '64-bit w/ conditional jump not taken',
430 ksVariation_64f_NoJmp: '64-bit w/ eflag checking and clearing and conditional jump not taken',
431 ksVariation_64_FsGs: '64-bit with memory accessed via FS or GS',
432 ksVariation_64f_FsGs: '64-bit with memory accessed via FS or GS and eflag checking and clearing',
433 ksVariation_64_Addr32: '64-bit w/ address prefix (Addr32)',
434 ksVariation_64f_Addr32: '64-bit w/ address prefix (Addr32) and eflag checking and clearing',
435 };
436 kdVariationsWithEflagsCheckingAndClearing = {
437 ksVariation_16f: True,
438 ksVariation_16f_Jmp: True,
439 ksVariation_16f_NoJmp: True,
440 ksVariation_16f_Addr32: True,
441 ksVariation_16f_Pre386: True,
442 ksVariation_16f_Pre386_Jmp: True,
443 ksVariation_16f_Pre386_NoJmp: True,
444 ksVariation_32f: True,
445 ksVariation_32f_Jmp: True,
446 ksVariation_32f_NoJmp: True,
447 ksVariation_32f_Flat: True,
448 ksVariation_32f_Addr16: True,
449 ksVariation_64f: True,
450 ksVariation_64f_Jmp: True,
451 ksVariation_64f_NoJmp: True,
452 ksVariation_64f_FsGs: True,
453 ksVariation_64f_Addr32: True,
454 };
455 kdVariationsOnly64NoFlags = {
456 ksVariation_64: True,
457 ksVariation_64_Jmp: True,
458 ksVariation_64_NoJmp: True,
459 ksVariation_64_FsGs: True,
460 ksVariation_64_Addr32: True,
461 };
462 kdVariationsOnly64WithFlags = {
463 ksVariation_64f: True,
464 ksVariation_64f_Jmp: True,
465 ksVariation_64f_NoJmp: True,
466 ksVariation_64f_FsGs: True,
467 ksVariation_64f_Addr32: True,
468 };
469 kdVariationsOnlyPre386NoFlags = {
470 ksVariation_16_Pre386: True,
471 ksVariation_16_Pre386_Jmp: True,
472 ksVariation_16_Pre386_NoJmp: True,
473 };
474 kdVariationsOnlyPre386WithFlags = {
475 ksVariation_16f_Pre386: True,
476 ksVariation_16f_Pre386_Jmp: True,
477 ksVariation_16f_Pre386_NoJmp: True,
478 };
479 kdVariationsWithFlatAddress = {
480 ksVariation_32_Flat: True,
481 ksVariation_32f_Flat: True,
482 ksVariation_64: True,
483 ksVariation_64f: True,
484 ksVariation_64_Addr32: True,
485 ksVariation_64f_Addr32: True,
486 };
487 kdVariationsWithFlatStackAddress = {
488 ksVariation_32_Flat: True,
489 ksVariation_32f_Flat: True,
490 ksVariation_64: True,
491 ksVariation_64f: True,
492 ksVariation_64_FsGs: True,
493 ksVariation_64f_FsGs: True,
494 ksVariation_64_Addr32: True,
495 ksVariation_64f_Addr32: True,
496 };
497 kdVariationsWithFlat64StackAddress = {
498 ksVariation_64: True,
499 ksVariation_64f: True,
500 ksVariation_64_FsGs: True,
501 ksVariation_64f_FsGs: True,
502 ksVariation_64_Addr32: True,
503 ksVariation_64f_Addr32: True,
504 };
505 kdVariationsWithFlatAddr16 = {
506 ksVariation_16: True,
507 ksVariation_16f: True,
508 ksVariation_16_Pre386: True,
509 ksVariation_16f_Pre386: True,
510 ksVariation_32_Addr16: True,
511 ksVariation_32f_Addr16: True,
512 };
513 kdVariationsWithFlatAddr32No64 = {
514 ksVariation_16_Addr32: True,
515 ksVariation_16f_Addr32: True,
516 ksVariation_32: True,
517 ksVariation_32f: True,
518 ksVariation_32_Flat: True,
519 ksVariation_32f_Flat: True,
520 };
521 kdVariationsWithAddressOnly64 = {
522 ksVariation_64: True,
523 ksVariation_64f: True,
524 ksVariation_64_FsGs: True,
525 ksVariation_64f_FsGs: True,
526 ksVariation_64_Addr32: True,
527 ksVariation_64f_Addr32: True,
528 };
529 kdVariationsWithConditional = {
530 ksVariation_16_Jmp: True,
531 ksVariation_16_NoJmp: True,
532 ksVariation_16_Pre386_Jmp: True,
533 ksVariation_16_Pre386_NoJmp: True,
534 ksVariation_32_Jmp: True,
535 ksVariation_32_NoJmp: True,
536 ksVariation_64_Jmp: True,
537 ksVariation_64_NoJmp: True,
538 ksVariation_16f_Jmp: True,
539 ksVariation_16f_NoJmp: True,
540 ksVariation_16f_Pre386_Jmp: True,
541 ksVariation_16f_Pre386_NoJmp: True,
542 ksVariation_32f_Jmp: True,
543 ksVariation_32f_NoJmp: True,
544 ksVariation_64f_Jmp: True,
545 ksVariation_64f_NoJmp: True,
546 };
547 kdVariationsWithConditionalNoJmp = {
548 ksVariation_16_NoJmp: True,
549 ksVariation_16_Pre386_NoJmp: True,
550 ksVariation_32_NoJmp: True,
551 ksVariation_64_NoJmp: True,
552 ksVariation_16f_NoJmp: True,
553 ksVariation_16f_Pre386_NoJmp: True,
554 ksVariation_32f_NoJmp: True,
555 ksVariation_64f_NoJmp: True,
556 };
557 kdVariationsOnlyPre386 = {
558 ksVariation_16_Pre386: True,
559 ksVariation_16f_Pre386: True,
560 ksVariation_16_Pre386_Jmp: True,
561 ksVariation_16f_Pre386_Jmp: True,
562 ksVariation_16_Pre386_NoJmp: True,
563 ksVariation_16f_Pre386_NoJmp: True,
564 };
565 ## @}
566
567 ## IEM_CIMPL_F_XXX flags that we know.
568 ## The value indicates whether it terminates the TB or not. The goal is to
569 ## improve the recompiler so all but END_TB will be False.
570 ##
571 ## @note iemThreadedRecompilerMcDeferToCImpl0 duplicates info found here.
572 kdCImplFlags = {
573 'IEM_CIMPL_F_MODE': False,
574 'IEM_CIMPL_F_BRANCH_DIRECT': False,
575 'IEM_CIMPL_F_BRANCH_INDIRECT': False,
576 'IEM_CIMPL_F_BRANCH_RELATIVE': False,
577 'IEM_CIMPL_F_BRANCH_FAR': True,
578 'IEM_CIMPL_F_BRANCH_CONDITIONAL': False,
579 # IEM_CIMPL_F_BRANCH_ANY should only be used for testing, so not included here.
580 'IEM_CIMPL_F_BRANCH_STACK': False,
581 'IEM_CIMPL_F_BRANCH_STACK_FAR': False,
582 'IEM_CIMPL_F_RFLAGS': False,
583 'IEM_CIMPL_F_INHIBIT_SHADOW': False,
584 'IEM_CIMPL_F_CHECK_IRQ_AFTER': False,
585 'IEM_CIMPL_F_CHECK_IRQ_BEFORE': False,
586 'IEM_CIMPL_F_CHECK_IRQ_BEFORE_AND_AFTER': False, # (ignore)
587 'IEM_CIMPL_F_STATUS_FLAGS': False,
588 'IEM_CIMPL_F_VMEXIT': False,
589 'IEM_CIMPL_F_FPU': False,
590 'IEM_CIMPL_F_REP': False,
591 'IEM_CIMPL_F_IO': False,
592 'IEM_CIMPL_F_END_TB': True,
593 'IEM_CIMPL_F_XCPT': True,
594 'IEM_CIMPL_F_CALLS_CIMPL': False,
595 'IEM_CIMPL_F_CALLS_AIMPL': False,
596 'IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE': False,
597 };
598
599 def __init__(self, oThreadedFunction, sVariation = ksVariation_Default):
600 self.oParent = oThreadedFunction # type: ThreadedFunction
601 ##< ksVariation_Xxxx.
602 self.sVariation = sVariation
603
604 ## Threaded function parameter references.
605 self.aoParamRefs = [] # type: List[ThreadedParamRef]
606 ## Unique parameter references.
607 self.dParamRefs = {} # type: Dict[str, List[ThreadedParamRef]]
608 ## Minimum number of parameters to the threaded function.
609 self.cMinParams = 0;
610
611 ## List/tree of statements for the threaded function.
612 self.aoStmtsForThreadedFunction = [] # type: List[McStmt]
613
614 ## Function enum number, for verification. Set by generateThreadedFunctionsHeader.
615 self.iEnumValue = -1;
616
617 ## Native recompilation details for this variation.
618 self.oNativeRecomp = None;
619
620 def getIndexName(self):
621 sName = self.oParent.oMcBlock.sFunction;
622 if sName.startswith('iemOp_'):
623 sName = sName[len('iemOp_'):];
624 return 'kIemThreadedFunc_%s%s%s' % ( sName, self.oParent.sSubName, self.sVariation, );
625
626 def getThreadedFunctionName(self):
627 sName = self.oParent.oMcBlock.sFunction;
628 if sName.startswith('iemOp_'):
629 sName = sName[len('iemOp_'):];
630 return 'iemThreadedFunc_%s%s%s' % ( sName, self.oParent.sSubName, self.sVariation, );
631
632 def getNativeFunctionName(self):
633 return 'iemNativeRecompFunc_' + self.getThreadedFunctionName()[len('iemThreadedFunc_'):];
634
635 def getLivenessFunctionName(self):
636 return 'iemNativeLivenessFunc_' + self.getThreadedFunctionName()[len('iemThreadedFunc_'):];
637
638 def getShortName(self):
639 sName = self.oParent.oMcBlock.sFunction;
640 if sName.startswith('iemOp_'):
641 sName = sName[len('iemOp_'):];
642 return '%s%s%s' % ( sName, self.oParent.sSubName, self.sVariation, );
643
644 def getThreadedFunctionStatisticsName(self):
645 sName = self.oParent.oMcBlock.sFunction;
646 if sName.startswith('iemOp_'):
647 sName = sName[len('iemOp_'):];
648
649 sVarNm = self.sVariation;
650 if sVarNm:
651 if sVarNm.startswith('_'):
652 sVarNm = sVarNm[1:];
653 if sVarNm.endswith('_Jmp'):
654 sVarNm = sVarNm[:-4];
655 sName += '_Jmp';
656 elif sVarNm.endswith('_NoJmp'):
657 sVarNm = sVarNm[:-6];
658 sName += '_NoJmp';
659 else:
660 sVarNm = 'DeferToCImpl';
661
662 return '%s/%s%s' % ( sVarNm, sName, self.oParent.sSubName );
663
664 def isWithFlagsCheckingAndClearingVariation(self):
665 """
666 Checks if this is a variation that checks and clears EFLAGS.
667 """
668 return self.sVariation in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing;
669
670 #
671 # Analysis and code morphing.
672 #
673
674 def raiseProblem(self, sMessage):
675 """ Raises a problem. """
676 self.oParent.raiseProblem(sMessage);
677
678 def warning(self, sMessage):
679 """ Emits a warning. """
680 self.oParent.warning(sMessage);
681
682 def analyzeReferenceToType(self, sRef):
683 """
684 Translates a variable or structure reference to a type.
685 Returns type name.
686 Raises exception if unable to figure it out.
687 """
688 ch0 = sRef[0];
689 if ch0 == 'u':
690 if sRef.startswith('u32'):
691 return 'uint32_t';
692 if sRef.startswith('u8') or sRef == 'uReg':
693 return 'uint8_t';
694 if sRef.startswith('u64'):
695 return 'uint64_t';
696 if sRef.startswith('u16'):
697 return 'uint16_t';
698 elif ch0 == 'b':
699 return 'uint8_t';
700 elif ch0 == 'f':
701 return 'bool';
702 elif ch0 == 'i':
703 if sRef.startswith('i8'):
704 return 'int8_t';
705 if sRef.startswith('i16'):
706 return 'int16_t';
707 if sRef.startswith('i32'):
708 return 'int32_t';
709 if sRef.startswith('i64'):
710 return 'int64_t';
711 if sRef in ('iReg', 'iFixedReg', 'iGReg', 'iSegReg', 'iSrcReg', 'iDstReg', 'iCrReg'):
712 return 'uint8_t';
713 elif ch0 == 'p':
714 if sRef.find('-') < 0:
715 return 'uintptr_t';
716 if sRef.startswith('pVCpu->iem.s.'):
717 sField = sRef[len('pVCpu->iem.s.') : ];
718 if sField in g_kdIemFieldToType:
719 if g_kdIemFieldToType[sField][0]:
720 return g_kdIemFieldToType[sField][0];
721 elif ch0 == 'G' and sRef.startswith('GCPtr'):
722 return 'uint64_t';
723 elif ch0 == 'e':
724 if sRef == 'enmEffOpSize':
725 return 'IEMMODE';
726 elif ch0 == 'o':
727 if sRef.startswith('off32'):
728 return 'uint32_t';
729 elif sRef == 'cbFrame': # enter
730 return 'uint16_t';
731 elif sRef == 'cShift': ## @todo risky
732 return 'uint8_t';
733
734 self.raiseProblem('Unknown reference: %s' % (sRef,));
735 return None; # Shut up pylint 2.16.2.
736
737 def analyzeCallToType(self, sFnRef):
738 """
739 Determins the type of an indirect function call.
740 """
741 assert sFnRef[0] == 'p';
742
743 #
744 # Simple?
745 #
746 if sFnRef.find('-') < 0:
747 oDecoderFunction = self.oParent.oMcBlock.oFunction;
748
749 # Try the argument list of the function defintion macro invocation first.
750 iArg = 2;
751 while iArg < len(oDecoderFunction.asDefArgs):
752 if sFnRef == oDecoderFunction.asDefArgs[iArg]:
753 return oDecoderFunction.asDefArgs[iArg - 1];
754 iArg += 1;
755
756 # Then check out line that includes the word and looks like a variable declaration.
757 oRe = re.compile(' +(P[A-Z0-9_]+|const +IEMOP[A-Z0-9_]+ *[*]) +(const |) *' + sFnRef + ' *(;|=)');
758 for sLine in oDecoderFunction.asLines:
759 oMatch = oRe.match(sLine);
760 if oMatch:
761 if not oMatch.group(1).startswith('const'):
762 return oMatch.group(1);
763 return 'PC' + oMatch.group(1)[len('const ') : -1].strip();
764
765 #
766 # Deal with the pImpl->pfnXxx:
767 #
768 elif sFnRef.startswith('pImpl->pfn'):
769 sMember = sFnRef[len('pImpl->') : ];
770 sBaseType = self.analyzeCallToType('pImpl');
771 offBits = sMember.rfind('U') + 1;
772 if sBaseType == 'PCIEMOPBINSIZES': return 'PFNIEMAIMPLBINU' + sMember[offBits:];
773 if sBaseType == 'PCIEMOPUNARYSIZES': return 'PFNIEMAIMPLUNARYU' + sMember[offBits:];
774 if sBaseType == 'PCIEMOPSHIFTSIZES': return 'PFNIEMAIMPLSHIFTU' + sMember[offBits:];
775 if sBaseType == 'PCIEMOPSHIFTDBLSIZES': return 'PFNIEMAIMPLSHIFTDBLU' + sMember[offBits:];
776 if sBaseType == 'PCIEMOPMULDIVSIZES': return 'PFNIEMAIMPLMULDIVU' + sMember[offBits:];
777 if sBaseType == 'PCIEMOPMEDIAF3': return 'PFNIEMAIMPLMEDIAF3U' + sMember[offBits:];
778 if sBaseType == 'PCIEMOPMEDIAOPTF2': return 'PFNIEMAIMPLMEDIAOPTF2U' + sMember[offBits:];
779 if sBaseType == 'PCIEMOPMEDIAOPTF2IMM8': return 'PFNIEMAIMPLMEDIAOPTF2U' + sMember[offBits:] + 'IMM8';
780 if sBaseType == 'PCIEMOPMEDIAOPTF3': return 'PFNIEMAIMPLMEDIAOPTF3U' + sMember[offBits:];
781 if sBaseType == 'PCIEMOPMEDIAOPTF3IMM8': return 'PFNIEMAIMPLMEDIAOPTF3U' + sMember[offBits:] + 'IMM8';
782 if sBaseType == 'PCIEMOPBLENDOP': return 'PFNIEMAIMPLAVXBLENDU' + sMember[offBits:];
783
784 self.raiseProblem('Unknown call reference: %s::%s (%s)' % (sBaseType, sMember, sFnRef,));
785
786 self.raiseProblem('Unknown call reference: %s' % (sFnRef,));
787 return None; # Shut up pylint 2.16.2.
788
789 def analyze8BitGRegStmt(self, oStmt):
790 """
791 Gets the 8-bit general purpose register access details of the given statement.
792 ASSUMES the statement is one accessing an 8-bit GREG.
793 """
794 idxReg = 0;
795 if ( oStmt.sName.find('_FETCH_') > 0
796 or oStmt.sName.find('_REF_') > 0
797 or oStmt.sName.find('_TO_LOCAL') > 0):
798 idxReg = 1;
799
800 sRegRef = oStmt.asParams[idxReg];
801 if sRegRef.startswith('IEM_GET_MODRM_RM') or sRegRef.startswith('IEM_GET_MODRM_REG'):
802 asBits = [sBit.strip() for sBit in sRegRef.replace('(', ',').replace(')', '').split(',')];
803 if len(asBits) != 3 or asBits[1] != 'pVCpu' or (asBits[0] != 'IEM_GET_MODRM_RM' and asBits[0] != 'IEM_GET_MODRM_REG'):
804 self.raiseProblem('Unexpected reference: %s (asBits=%s)' % (sRegRef, asBits));
805 sOrgExpr = asBits[0] + '_EX8(pVCpu, ' + asBits[2] + ')';
806 else:
807 sOrgExpr = '((%s) < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX) ? (%s) : (%s) + 12)' % (sRegRef, sRegRef, sRegRef);
808
809 if sRegRef.find('IEM_GET_MODRM_RM') >= 0: sStdRef = 'bRmRm8Ex';
810 elif sRegRef.find('IEM_GET_MODRM_REG') >= 0: sStdRef = 'bRmReg8Ex';
811 elif sRegRef == 'X86_GREG_xAX': sStdRef = 'bGregXAx8Ex';
812 elif sRegRef == 'X86_GREG_xCX': sStdRef = 'bGregXCx8Ex';
813 elif sRegRef == 'X86_GREG_xSP': sStdRef = 'bGregXSp8Ex';
814 elif sRegRef == 'iFixedReg': sStdRef = 'bFixedReg8Ex';
815 else:
816 self.warning('analyze8BitGRegStmt: sRegRef=%s -> bOther8Ex; %s %s; sOrgExpr=%s'
817 % (sRegRef, oStmt.sName, oStmt.asParams, sOrgExpr,));
818 sStdRef = 'bOther8Ex';
819
820 #print('analyze8BitGRegStmt: %s %s; sRegRef=%s\n -> idxReg=%s sOrgExpr=%s sStdRef=%s'
821 # % (oStmt.sName, oStmt.asParams, sRegRef, idxReg, sOrgExpr, sStdRef));
822 return (idxReg, sOrgExpr, sStdRef);
823
824
825 ## Maps memory related MCs to info for FLAT conversion.
826 ## This is used in 64-bit and flat 32-bit variants to skip the unnecessary
827 ## segmentation checking for every memory access. Only applied to access
828 ## via ES, DS and SS. FS, GS and CS gets the full segmentation threatment,
829 ## the latter (CS) is just to keep things simple (we could safely fetch via
830 ## it, but only in 64-bit mode could we safely write via it, IIRC).
831 kdMemMcToFlatInfo = {
832 'IEM_MC_FETCH_MEM_U8': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8' ),
833 'IEM_MC_FETCH_MEM16_U8': ( 1, 'IEM_MC_FETCH_MEM16_FLAT_U8' ),
834 'IEM_MC_FETCH_MEM32_U8': ( 1, 'IEM_MC_FETCH_MEM32_FLAT_U8' ),
835 'IEM_MC_FETCH_MEM_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16' ),
836 'IEM_MC_FETCH_MEM_U16_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_DISP' ),
837 'IEM_MC_FETCH_MEM_I16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I16' ),
838 'IEM_MC_FETCH_MEM_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32' ),
839 'IEM_MC_FETCH_MEM_U32_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_DISP' ),
840 'IEM_MC_FETCH_MEM_I32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I32' ),
841 'IEM_MC_FETCH_MEM_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64' ),
842 'IEM_MC_FETCH_MEM_U64_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64_DISP' ),
843 'IEM_MC_FETCH_MEM_U64_ALIGN_U128': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64_ALIGN_U128' ),
844 'IEM_MC_FETCH_MEM_I64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I64' ),
845 'IEM_MC_FETCH_MEM_R32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R32' ),
846 'IEM_MC_FETCH_MEM_R64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R64' ),
847 'IEM_MC_FETCH_MEM_R80': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R80' ),
848 'IEM_MC_FETCH_MEM_D80': ( 1, 'IEM_MC_FETCH_MEM_FLAT_D80' ),
849 'IEM_MC_FETCH_MEM_U128': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128' ),
850 'IEM_MC_FETCH_MEM_U128_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128_NO_AC' ),
851 'IEM_MC_FETCH_MEM_U128_ALIGN_SSE': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128_ALIGN_SSE' ),
852 'IEM_MC_FETCH_MEM_XMM': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM' ),
853 'IEM_MC_FETCH_MEM_XMM_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM_NO_AC' ),
854 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM_ALIGN_SSE' ),
855 'IEM_MC_FETCH_MEM_XMM_U32': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_U32' ),
856 'IEM_MC_FETCH_MEM_XMM_U64': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_U64' ),
857 'IEM_MC_FETCH_MEM_U256': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256' ),
858 'IEM_MC_FETCH_MEM_U256_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256_NO_AC' ),
859 'IEM_MC_FETCH_MEM_U256_ALIGN_AVX': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256_ALIGN_AVX' ),
860 'IEM_MC_FETCH_MEM_YMM': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM' ),
861 'IEM_MC_FETCH_MEM_YMM_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM_NO_AC' ),
862 'IEM_MC_FETCH_MEM_YMM_ALIGN_AVX': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM_ALIGN_AVX' ),
863 'IEM_MC_FETCH_MEM_U8_ZX_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U16' ),
864 'IEM_MC_FETCH_MEM_U8_ZX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U32' ),
865 'IEM_MC_FETCH_MEM_U8_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U64' ),
866 'IEM_MC_FETCH_MEM_U16_ZX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_ZX_U32' ),
867 'IEM_MC_FETCH_MEM_U16_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_ZX_U64' ),
868 'IEM_MC_FETCH_MEM_U32_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_ZX_U64' ),
869 'IEM_MC_FETCH_MEM_U8_SX_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U16' ),
870 'IEM_MC_FETCH_MEM_U8_SX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U32' ),
871 'IEM_MC_FETCH_MEM_U8_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U64' ),
872 'IEM_MC_FETCH_MEM_U16_SX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_SX_U32' ),
873 'IEM_MC_FETCH_MEM_U16_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_SX_U64' ),
874 'IEM_MC_FETCH_MEM_U32_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_SX_U64' ),
875 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128': ( 2, 'IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128' ),
876 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE_AND_XREG_XMM': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_ALIGN_SSE_AND_XREG_XMM' ),
877 'IEM_MC_FETCH_MEM_XMM_U32_AND_XREG_XMM': ( 3, 'IEM_MC_FETCH_MEM_FLAT_XMM_U32_AND_XREG_XMM' ),
878 'IEM_MC_FETCH_MEM_XMM_U64_AND_XREG_XMM': ( 3, 'IEM_MC_FETCH_MEM_FLAT_XMM_U64_AND_XREG_XMM' ),
879 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_RAX_RDX_U64':
880 ( 2, 'IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128_AND_RAX_RDX_U64' ),
881 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64':
882 ( 2, 'IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64' ),
883 'IEM_MC_STORE_MEM_U8': ( 0, 'IEM_MC_STORE_MEM_FLAT_U8' ),
884 'IEM_MC_STORE_MEM_U16': ( 0, 'IEM_MC_STORE_MEM_FLAT_U16' ),
885 'IEM_MC_STORE_MEM_U32': ( 0, 'IEM_MC_STORE_MEM_FLAT_U32' ),
886 'IEM_MC_STORE_MEM_U64': ( 0, 'IEM_MC_STORE_MEM_FLAT_U64' ),
887 'IEM_MC_STORE_MEM_U8_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U8_CONST' ),
888 'IEM_MC_STORE_MEM_U16_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U16_CONST' ),
889 'IEM_MC_STORE_MEM_U32_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U32_CONST' ),
890 'IEM_MC_STORE_MEM_U64_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U64_CONST' ),
891 'IEM_MC_STORE_MEM_U128': ( 0, 'IEM_MC_STORE_MEM_FLAT_U128' ),
892 'IEM_MC_STORE_MEM_U128_NO_AC': ( 0, 'IEM_MC_STORE_MEM_FLAT_U128_NO_AC' ),
893 'IEM_MC_STORE_MEM_U128_ALIGN_SSE': ( 0, 'IEM_MC_STORE_MEM_FLAT_U128_ALIGN_SSE' ),
894 'IEM_MC_STORE_MEM_U256': ( 0, 'IEM_MC_STORE_MEM_FLAT_U256' ),
895 'IEM_MC_STORE_MEM_U256_NO_AC': ( 0, 'IEM_MC_STORE_MEM_FLAT_U256_NO_AC' ),
896 'IEM_MC_STORE_MEM_U256_ALIGN_AVX': ( 0, 'IEM_MC_STORE_MEM_FLAT_U256_ALIGN_AVX' ),
897 'IEM_MC_MEM_MAP_D80_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_D80_WO' ),
898 'IEM_MC_MEM_MAP_I16_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_I16_WO' ),
899 'IEM_MC_MEM_MAP_I32_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_I32_WO' ),
900 'IEM_MC_MEM_MAP_I64_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_I64_WO' ),
901 'IEM_MC_MEM_MAP_R32_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_R32_WO' ),
902 'IEM_MC_MEM_MAP_R64_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_R64_WO' ),
903 'IEM_MC_MEM_MAP_R80_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_R80_WO' ),
904 'IEM_MC_MEM_MAP_U8_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_ATOMIC' ),
905 'IEM_MC_MEM_MAP_U8_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_RW' ),
906 'IEM_MC_MEM_MAP_U8_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_RO' ),
907 'IEM_MC_MEM_MAP_U8_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_WO' ),
908 'IEM_MC_MEM_MAP_U16_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_ATOMIC' ),
909 'IEM_MC_MEM_MAP_U16_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_RW' ),
910 'IEM_MC_MEM_MAP_U16_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_RO' ),
911 'IEM_MC_MEM_MAP_U16_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_WO' ),
912 'IEM_MC_MEM_MAP_U32_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_ATOMIC' ),
913 'IEM_MC_MEM_MAP_U32_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_RW' ),
914 'IEM_MC_MEM_MAP_U32_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_RO' ),
915 'IEM_MC_MEM_MAP_U32_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_WO' ),
916 'IEM_MC_MEM_MAP_U64_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_ATOMIC' ),
917 'IEM_MC_MEM_MAP_U64_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_RW' ),
918 'IEM_MC_MEM_MAP_U64_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_RO' ),
919 'IEM_MC_MEM_MAP_U64_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_WO' ),
920 'IEM_MC_MEM_MAP_U128_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_ATOMIC' ),
921 'IEM_MC_MEM_MAP_U128_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_RW' ),
922 'IEM_MC_MEM_MAP_U128_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_RO' ),
923 'IEM_MC_MEM_MAP_U128_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_WO' ),
924 'IEM_MC_MEM_MAP_EX': ( 3, 'IEM_MC_MEM_FLAT_MAP_EX' ),
925 };
926
927 kdMemMcToFlatInfoStack = {
928 'IEM_MC_PUSH_U16': ( 'IEM_MC_FLAT32_PUSH_U16', 'IEM_MC_FLAT64_PUSH_U16', ),
929 'IEM_MC_PUSH_U32': ( 'IEM_MC_FLAT32_PUSH_U32', 'IEM_MC_PUSH_U32', ),
930 'IEM_MC_PUSH_U64': ( 'IEM_MC_PUSH_U64', 'IEM_MC_FLAT64_PUSH_U64', ),
931 'IEM_MC_PUSH_U32_SREG': ( 'IEM_MC_FLAT32_PUSH_U32_SREG', 'IEM_MC_PUSH_U32_SREG' ),
932 'IEM_MC_POP_GREG_U16': ( 'IEM_MC_FLAT32_POP_GREG_U16', 'IEM_MC_FLAT64_POP_GREG_U16', ),
933 'IEM_MC_POP_GREG_U32': ( 'IEM_MC_FLAT32_POP_GREG_U32', 'IEM_MC_POP_GREG_U32', ),
934 'IEM_MC_POP_GREG_U64': ( 'IEM_MC_POP_GREG_U64', 'IEM_MC_FLAT64_POP_GREG_U64', ),
935 };
936
937 kdThreadedCalcRmEffAddrMcByVariation = {
938 ksVariation_16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
939 ksVariation_16f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
940 ksVariation_16_Pre386: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
941 ksVariation_16f_Pre386: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
942 ksVariation_32_Addr16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
943 ksVariation_32f_Addr16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
944 ksVariation_16_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
945 ksVariation_16f_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
946 ksVariation_32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
947 ksVariation_32f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
948 ksVariation_32_Flat: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
949 ksVariation_32f_Flat: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
950 ksVariation_64: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64',
951 ksVariation_64f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64',
952 ksVariation_64_FsGs: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_FSGS',
953 ksVariation_64f_FsGs: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_FSGS',
954 ksVariation_64_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_ADDR32', ## @todo How did this work again...
955 ksVariation_64f_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_ADDR32',
956 };
957
958 def analyzeMorphStmtForThreaded(self, aoStmts, dState, iParamRef = 0, iLevel = 0):
959 """
960 Transforms (copy) the statements into those for the threaded function.
961
962 Returns list/tree of statements (aoStmts is not modified) and the new
963 iParamRef value.
964 """
965 #
966 # We'll be traversing aoParamRefs in parallel to the statements, so we
967 # must match the traversal in analyzeFindThreadedParamRefs exactly.
968 #
969 #print('McBlock at %s:%s' % (os.path.split(self.oMcBlock.sSrcFile)[1], self.oMcBlock.iBeginLine,));
970 aoThreadedStmts = [];
971 for oStmt in aoStmts:
972 # Skip C++ statements that is purely related to decoding.
973 if not oStmt.isCppStmt() or not oStmt.fDecode:
974 # Copy the statement. Make a deep copy to make sure we've got our own
975 # copies of all instance variables, even if a bit overkill at the moment.
976 oNewStmt = copy.deepcopy(oStmt);
977 aoThreadedStmts.append(oNewStmt);
978 #print('oNewStmt %s %s' % (oNewStmt.sName, len(oNewStmt.asParams),));
979
980 # If the statement has parameter references, process the relevant parameters.
981 # We grab the references relevant to this statement and apply them in reserve order.
982 if iParamRef < len(self.aoParamRefs) and self.aoParamRefs[iParamRef].oStmt == oStmt:
983 iParamRefFirst = iParamRef;
984 while True:
985 iParamRef += 1;
986 if iParamRef >= len(self.aoParamRefs) or self.aoParamRefs[iParamRef].oStmt != oStmt:
987 break;
988
989 #print('iParamRefFirst=%s iParamRef=%s' % (iParamRefFirst, iParamRef));
990 for iCurRef in range(iParamRef - 1, iParamRefFirst - 1, -1):
991 oCurRef = self.aoParamRefs[iCurRef];
992 if oCurRef.iParam is not None:
993 assert oCurRef.oStmt == oStmt;
994 #print('iCurRef=%s iParam=%s sOrgRef=%s' % (iCurRef, oCurRef.iParam, oCurRef.sOrgRef));
995 sSrcParam = oNewStmt.asParams[oCurRef.iParam];
996 assert ( sSrcParam[oCurRef.offParam : oCurRef.offParam + len(oCurRef.sOrgRef)] == oCurRef.sOrgRef
997 or oCurRef.fCustomRef), \
998 'offParam=%s sOrgRef=%s iParam=%s oStmt.sName=%s sSrcParam=%s<eos>' \
999 % (oCurRef.offParam, oCurRef.sOrgRef, oCurRef.iParam, oStmt.sName, sSrcParam);
1000 oNewStmt.asParams[oCurRef.iParam] = sSrcParam[0 : oCurRef.offParam] \
1001 + oCurRef.sNewName \
1002 + sSrcParam[oCurRef.offParam + len(oCurRef.sOrgRef) : ];
1003
1004 # Morph IEM_MC_CALC_RM_EFF_ADDR into IEM_MC_CALC_RM_EFF_ADDR_THREADED ...
1005 if oNewStmt.sName == 'IEM_MC_CALC_RM_EFF_ADDR':
1006 oNewStmt.sName = self.kdThreadedCalcRmEffAddrMcByVariation[self.sVariation];
1007 assert len(oNewStmt.asParams) == 3;
1008
1009 if self.sVariation in self.kdVariationsWithFlatAddr16:
1010 oNewStmt.asParams = [
1011 oNewStmt.asParams[0], oNewStmt.asParams[1], self.dParamRefs['u16Disp'][0].sNewName,
1012 ];
1013 else:
1014 sSibAndMore = self.dParamRefs['bSib'][0].sNewName; # Merge bSib and 2nd part of cbImmAndRspOffset.
1015 if oStmt.asParams[2] not in ('0', '1', '2', '4'):
1016 sSibAndMore = '(%s) | ((%s) & 0x0f00)' % (self.dParamRefs['bSib'][0].sNewName, oStmt.asParams[2]);
1017
1018 if self.sVariation in self.kdVariationsWithFlatAddr32No64:
1019 oNewStmt.asParams = [
1020 oNewStmt.asParams[0], oNewStmt.asParams[1], sSibAndMore, self.dParamRefs['u32Disp'][0].sNewName,
1021 ];
1022 else:
1023 oNewStmt.asParams = [
1024 oNewStmt.asParams[0], self.dParamRefs['bRmEx'][0].sNewName, sSibAndMore,
1025 self.dParamRefs['u32Disp'][0].sNewName, self.dParamRefs['cbInstr'][0].sNewName,
1026 ];
1027 # ... and IEM_MC_ADVANCE_RIP_AND_FINISH into *_THREADED_PCxx[_WITH_FLAGS] ...
1028 elif ( oNewStmt.sName
1029 in ('IEM_MC_ADVANCE_RIP_AND_FINISH',
1030 'IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_REL_JMP_S16_AND_FINISH', 'IEM_MC_REL_JMP_S32_AND_FINISH',
1031 'IEM_MC_SET_RIP_U16_AND_FINISH', 'IEM_MC_SET_RIP_U32_AND_FINISH', 'IEM_MC_SET_RIP_U64_AND_FINISH', )):
1032 if oNewStmt.sName not in ('IEM_MC_SET_RIP_U16_AND_FINISH', 'IEM_MC_SET_RIP_U32_AND_FINISH',
1033 'IEM_MC_SET_RIP_U64_AND_FINISH', ):
1034 oNewStmt.asParams.append(self.dParamRefs['cbInstr'][0].sNewName);
1035 if ( oNewStmt.sName in ('IEM_MC_REL_JMP_S8_AND_FINISH', )
1036 and self.sVariation not in self.kdVariationsOnlyPre386):
1037 oNewStmt.asParams.append(self.dParamRefs['pVCpu->iem.s.enmEffOpSize'][0].sNewName);
1038 oNewStmt.sName += '_THREADED';
1039 if self.sVariation in self.kdVariationsOnly64NoFlags:
1040 oNewStmt.sName += '_PC64';
1041 elif self.sVariation in self.kdVariationsOnly64WithFlags:
1042 oNewStmt.sName += '_PC64_WITH_FLAGS';
1043 elif self.sVariation in self.kdVariationsOnlyPre386NoFlags:
1044 oNewStmt.sName += '_PC16';
1045 elif self.sVariation in self.kdVariationsOnlyPre386WithFlags:
1046 oNewStmt.sName += '_PC16_WITH_FLAGS';
1047 elif self.sVariation not in self.kdVariationsWithEflagsCheckingAndClearing:
1048 assert self.sVariation != self.ksVariation_Default;
1049 oNewStmt.sName += '_PC32';
1050 else:
1051 oNewStmt.sName += '_PC32_WITH_FLAGS';
1052
1053 # This is making the wrong branch of conditionals break out of the TB.
1054 if (oStmt.sName in ('IEM_MC_ADVANCE_RIP_AND_FINISH', 'IEM_MC_REL_JMP_S8_AND_FINISH',
1055 'IEM_MC_REL_JMP_S16_AND_FINISH', 'IEM_MC_REL_JMP_S32_AND_FINISH')):
1056 sExitTbStatus = 'VINF_SUCCESS';
1057 if self.sVariation in self.kdVariationsWithConditional:
1058 if self.sVariation in self.kdVariationsWithConditionalNoJmp:
1059 if oStmt.sName != 'IEM_MC_ADVANCE_RIP_AND_FINISH':
1060 sExitTbStatus = 'VINF_IEM_REEXEC_BREAK';
1061 elif oStmt.sName == 'IEM_MC_ADVANCE_RIP_AND_FINISH':
1062 sExitTbStatus = 'VINF_IEM_REEXEC_BREAK';
1063 oNewStmt.asParams.append(sExitTbStatus);
1064
1065 # Insert an MC so we can assert the correctioness of modified flags annotations on IEM_MC_REF_EFLAGS.
1066 if 'IEM_MC_ASSERT_EFLAGS' in dState:
1067 aoThreadedStmts.insert(len(aoThreadedStmts) - 1,
1068 iai.McStmtAssertEFlags(self.oParent.oMcBlock.oInstruction));
1069 del dState['IEM_MC_ASSERT_EFLAGS'];
1070
1071 # ... and IEM_MC_*_GREG_U8 into *_THREADED w/ reworked index taking REX into account
1072 elif oNewStmt.sName.startswith('IEM_MC_') and oNewStmt.sName.find('_GREG_U8') > 0:
1073 (idxReg, _, sStdRef) = self.analyze8BitGRegStmt(oStmt); # Don't use oNewStmt as it has been modified!
1074 oNewStmt.asParams[idxReg] = self.dParamRefs[sStdRef][0].sNewName;
1075 oNewStmt.sName += '_THREADED';
1076
1077 # ... and IEM_MC_CALL_CIMPL_[0-5] and IEM_MC_DEFER_TO_CIMPL_[0-5]_RET into *_THREADED ...
1078 elif oNewStmt.sName.startswith('IEM_MC_CALL_CIMPL_') or oNewStmt.sName.startswith('IEM_MC_DEFER_TO_CIMPL_'):
1079 oNewStmt.sName += '_THREADED';
1080 oNewStmt.idxFn += 1;
1081 oNewStmt.idxParams += 1;
1082 oNewStmt.asParams.insert(0, self.dParamRefs['cbInstr'][0].sNewName);
1083
1084 # ... and in FLAT modes we must morph memory access into FLAT accesses ...
1085 elif ( self.sVariation in self.kdVariationsWithFlatAddress
1086 and ( oNewStmt.sName.startswith('IEM_MC_FETCH_MEM')
1087 or (oNewStmt.sName.startswith('IEM_MC_STORE_MEM_') and oNewStmt.sName.find('_BY_REF') < 0)
1088 or oNewStmt.sName.startswith('IEM_MC_MEM_MAP') )):
1089 idxEffSeg = self.kdMemMcToFlatInfo[oNewStmt.sName][0];
1090 if idxEffSeg != -1:
1091 if ( oNewStmt.asParams[idxEffSeg].find('iEffSeg') < 0
1092 and oNewStmt.asParams[idxEffSeg] not in ('X86_SREG_ES', ) ):
1093 self.raiseProblem('Expected iEffSeg as param #%d to %s: %s'
1094 % (idxEffSeg + 1, oNewStmt.sName, oNewStmt.asParams[idxEffSeg],));
1095 oNewStmt.asParams.pop(idxEffSeg);
1096 oNewStmt.sName = self.kdMemMcToFlatInfo[oNewStmt.sName][1];
1097
1098 # ... PUSH and POP also needs flat variants, but these differ a little.
1099 elif ( self.sVariation in self.kdVariationsWithFlatStackAddress
1100 and ( (oNewStmt.sName.startswith('IEM_MC_PUSH') and oNewStmt.sName.find('_FPU') < 0)
1101 or oNewStmt.sName.startswith('IEM_MC_POP'))):
1102 oNewStmt.sName = self.kdMemMcToFlatInfoStack[oNewStmt.sName][int(self.sVariation in
1103 self.kdVariationsWithFlat64StackAddress)];
1104
1105 # Add EFLAGS usage annotations to relevant MCs.
1106 elif oNewStmt.sName in ('IEM_MC_COMMIT_EFLAGS', 'IEM_MC_COMMIT_EFLAGS_OPT', 'IEM_MC_REF_EFLAGS',
1107 'IEM_MC_FETCH_EFLAGS'):
1108 oInstruction = self.oParent.oMcBlock.oInstruction;
1109 oNewStmt.sName += '_EX';
1110 oNewStmt.asParams.append(oInstruction.getTestedFlagsCStyle()); # Shall crash and burn if oInstruction is
1111 oNewStmt.asParams.append(oInstruction.getModifiedFlagsCStyle()); # None. Fix the IEM decoder code.
1112
1113 # For IEM_MC_REF_EFLAGS we to emit an MC before the ..._FINISH
1114 if oNewStmt.sName == 'IEM_MC_REF_EFLAGS_EX':
1115 dState['IEM_MC_ASSERT_EFLAGS'] = iLevel;
1116
1117 # Process branches of conditionals recursively.
1118 if isinstance(oStmt, iai.McStmtCond):
1119 (oNewStmt.aoIfBranch, iParamRef) = self.analyzeMorphStmtForThreaded(oStmt.aoIfBranch, dState,
1120 iParamRef, iLevel + 1);
1121 if oStmt.aoElseBranch:
1122 (oNewStmt.aoElseBranch, iParamRef) = self.analyzeMorphStmtForThreaded(oStmt.aoElseBranch,
1123 dState, iParamRef, iLevel + 1);
1124
1125 # Insert an MC so we can assert the correctioness of modified flags annotations
1126 # on IEM_MC_REF_EFLAGS if it goes out of scope.
1127 if dState.get('IEM_MC_ASSERT_EFLAGS', -1) == iLevel:
1128 aoThreadedStmts.append(iai.McStmtAssertEFlags(self.oParent.oMcBlock.oInstruction));
1129 del dState['IEM_MC_ASSERT_EFLAGS'];
1130
1131 return (aoThreadedStmts, iParamRef);
1132
1133
1134 def analyzeConsolidateThreadedParamRefs(self):
1135 """
1136 Consolidate threaded function parameter references into a dictionary
1137 with lists of the references to each variable/field.
1138 """
1139 # Gather unique parameters.
1140 self.dParamRefs = {};
1141 for oRef in self.aoParamRefs:
1142 if oRef.sStdRef not in self.dParamRefs:
1143 self.dParamRefs[oRef.sStdRef] = [oRef,];
1144 else:
1145 self.dParamRefs[oRef.sStdRef].append(oRef);
1146
1147 # Generate names for them for use in the threaded function.
1148 dParamNames = {};
1149 for sName, aoRefs in self.dParamRefs.items():
1150 # Morph the reference expression into a name.
1151 if sName.startswith('IEM_GET_MODRM_REG'): sName = 'bModRmRegP';
1152 elif sName.startswith('IEM_GET_MODRM_RM'): sName = 'bModRmRmP';
1153 elif sName.startswith('IEM_GET_MODRM_REG_8'): sName = 'bModRmReg8P';
1154 elif sName.startswith('IEM_GET_MODRM_RM_8'): sName = 'bModRmRm8P';
1155 elif sName.startswith('IEM_GET_EFFECTIVE_VVVV'): sName = 'bEffVvvvP';
1156 elif sName.startswith('IEM_GET_IMM8_REG'): sName = 'bImm8Reg';
1157 elif sName.find('.') >= 0 or sName.find('->') >= 0:
1158 sName = sName[max(sName.rfind('.'), sName.rfind('>')) + 1 : ] + 'P';
1159 else:
1160 sName += 'P';
1161
1162 # Ensure it's unique.
1163 if sName in dParamNames:
1164 for i in range(10):
1165 if sName + str(i) not in dParamNames:
1166 sName += str(i);
1167 break;
1168 dParamNames[sName] = True;
1169
1170 # Update all the references.
1171 for oRef in aoRefs:
1172 oRef.sNewName = sName;
1173
1174 # Organize them by size too for the purpose of optimize them.
1175 dBySize = {} # type: Dict[str, str]
1176 for sStdRef, aoRefs in self.dParamRefs.items():
1177 if aoRefs[0].sType[0] != 'P':
1178 cBits = g_kdTypeInfo[aoRefs[0].sType][0];
1179 assert(cBits <= 64);
1180 else:
1181 cBits = 64;
1182
1183 if cBits not in dBySize:
1184 dBySize[cBits] = [sStdRef,]
1185 else:
1186 dBySize[cBits].append(sStdRef);
1187
1188 # Pack the parameters as best as we can, starting with the largest ones
1189 # and ASSUMING a 64-bit parameter size.
1190 self.cMinParams = 0;
1191 offNewParam = 0;
1192 for cBits in sorted(dBySize.keys(), reverse = True):
1193 for sStdRef in dBySize[cBits]:
1194 if offNewParam == 0 or offNewParam + cBits > 64:
1195 self.cMinParams += 1;
1196 offNewParam = cBits;
1197 else:
1198 offNewParam += cBits;
1199 assert(offNewParam <= 64);
1200
1201 for oRef in self.dParamRefs[sStdRef]:
1202 oRef.iNewParam = self.cMinParams - 1;
1203 oRef.offNewParam = offNewParam - cBits;
1204
1205 # Currently there are a few that requires 4 parameters, list these so we can figure out why:
1206 if self.cMinParams >= 4:
1207 print('debug: cMinParams=%s cRawParams=%s - %s:%d'
1208 % (self.cMinParams, len(self.dParamRefs), self.oParent.oMcBlock.sSrcFile, self.oParent.oMcBlock.iBeginLine,));
1209
1210 return True;
1211
1212 ksHexDigits = '0123456789abcdefABCDEF';
1213
1214 def analyzeFindThreadedParamRefs(self, aoStmts): # pylint: disable=too-many-statements
1215 """
1216 Scans the statements for things that have to passed on to the threaded
1217 function (populates self.aoParamRefs).
1218 """
1219 for oStmt in aoStmts:
1220 # Some statements we can skip alltogether.
1221 if isinstance(oStmt, iai.McCppPreProc):
1222 continue;
1223 if oStmt.isCppStmt() and oStmt.fDecode:
1224 continue;
1225 if oStmt.sName in ('IEM_MC_BEGIN',):
1226 continue;
1227
1228 if isinstance(oStmt, iai.McStmtVar):
1229 if oStmt.sValue is None:
1230 continue;
1231 aiSkipParams = { 0: True, 1: True, 3: True };
1232 else:
1233 aiSkipParams = {};
1234
1235 # Several statements have implicit parameters and some have different parameters.
1236 if oStmt.sName in ('IEM_MC_ADVANCE_RIP_AND_FINISH', 'IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_REL_JMP_S16_AND_FINISH',
1237 'IEM_MC_REL_JMP_S32_AND_FINISH', 'IEM_MC_CALL_CIMPL_0', 'IEM_MC_CALL_CIMPL_1',
1238 'IEM_MC_CALL_CIMPL_2', 'IEM_MC_CALL_CIMPL_3', 'IEM_MC_CALL_CIMPL_4', 'IEM_MC_CALL_CIMPL_5',
1239 'IEM_MC_DEFER_TO_CIMPL_0_RET', 'IEM_MC_DEFER_TO_CIMPL_1_RET', 'IEM_MC_DEFER_TO_CIMPL_2_RET',
1240 'IEM_MC_DEFER_TO_CIMPL_3_RET', 'IEM_MC_DEFER_TO_CIMPL_4_RET', 'IEM_MC_DEFER_TO_CIMPL_5_RET', ):
1241 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_INSTR_LEN(pVCpu)', 'uint4_t', oStmt, sStdRef = 'cbInstr'));
1242
1243 if ( oStmt.sName in ('IEM_MC_REL_JMP_S8_AND_FINISH',)
1244 and self.sVariation not in self.kdVariationsOnlyPre386):
1245 self.aoParamRefs.append(ThreadedParamRef('pVCpu->iem.s.enmEffOpSize', 'IEMMODE', oStmt));
1246
1247 if oStmt.sName == 'IEM_MC_CALC_RM_EFF_ADDR':
1248 # This is being pretty presumptive about bRm always being the RM byte...
1249 assert len(oStmt.asParams) == 3;
1250 assert oStmt.asParams[1] == 'bRm';
1251
1252 if self.sVariation in self.kdVariationsWithFlatAddr16:
1253 self.aoParamRefs.append(ThreadedParamRef('bRm', 'uint8_t', oStmt));
1254 self.aoParamRefs.append(ThreadedParamRef('(uint16_t)uEffAddrInfo' ,
1255 'uint16_t', oStmt, sStdRef = 'u16Disp'));
1256 elif self.sVariation in self.kdVariationsWithFlatAddr32No64:
1257 self.aoParamRefs.append(ThreadedParamRef('bRm', 'uint8_t', oStmt));
1258 self.aoParamRefs.append(ThreadedParamRef('(uint8_t)(uEffAddrInfo >> 32)',
1259 'uint8_t', oStmt, sStdRef = 'bSib'));
1260 self.aoParamRefs.append(ThreadedParamRef('(uint32_t)uEffAddrInfo',
1261 'uint32_t', oStmt, sStdRef = 'u32Disp'));
1262 else:
1263 assert self.sVariation in self.kdVariationsWithAddressOnly64;
1264 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_MODRM_EX(pVCpu, bRm)',
1265 'uint8_t', oStmt, sStdRef = 'bRmEx'));
1266 self.aoParamRefs.append(ThreadedParamRef('(uint8_t)(uEffAddrInfo >> 32)',
1267 'uint8_t', oStmt, sStdRef = 'bSib'));
1268 self.aoParamRefs.append(ThreadedParamRef('(uint32_t)uEffAddrInfo',
1269 'uint32_t', oStmt, sStdRef = 'u32Disp'));
1270 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_INSTR_LEN(pVCpu)',
1271 'uint4_t', oStmt, sStdRef = 'cbInstr'));
1272 aiSkipParams[1] = True; # Skip the bRm parameter as it is being replaced by bRmEx.
1273
1274 # 8-bit register accesses needs to have their index argument reworked to take REX into account.
1275 if oStmt.sName.startswith('IEM_MC_') and oStmt.sName.find('_GREG_U8') > 0:
1276 (idxReg, sOrgRef, sStdRef) = self.analyze8BitGRegStmt(oStmt);
1277 self.aoParamRefs.append(ThreadedParamRef(sOrgRef, 'uint8_t', oStmt, idxReg, sStdRef = sStdRef));
1278 aiSkipParams[idxReg] = True; # Skip the parameter below.
1279
1280 # If in flat mode variation, ignore the effective segment parameter to memory MCs.
1281 if ( self.sVariation in self.kdVariationsWithFlatAddress
1282 and oStmt.sName in self.kdMemMcToFlatInfo
1283 and self.kdMemMcToFlatInfo[oStmt.sName][0] != -1):
1284 aiSkipParams[self.kdMemMcToFlatInfo[oStmt.sName][0]] = True;
1285
1286 # Inspect the target of calls to see if we need to pass down a
1287 # function pointer or function table pointer for it to work.
1288 if isinstance(oStmt, iai.McStmtCall):
1289 if oStmt.sFn[0] == 'p':
1290 self.aoParamRefs.append(ThreadedParamRef(oStmt.sFn, self.analyzeCallToType(oStmt.sFn), oStmt, oStmt.idxFn));
1291 elif ( oStmt.sFn[0] != 'i'
1292 and not oStmt.sFn.startswith('RT_CONCAT3')
1293 and not oStmt.sFn.startswith('IEMTARGETCPU_EFL_BEHAVIOR_SELECT')
1294 and not oStmt.sFn.startswith('IEM_SELECT_HOST_OR_FALLBACK') ):
1295 self.raiseProblem('Bogus function name in %s: %s' % (oStmt.sName, oStmt.sFn,));
1296 aiSkipParams[oStmt.idxFn] = True;
1297
1298 # Skip the hint parameter (first) for IEM_MC_CALL_CIMPL_X.
1299 if oStmt.sName.startswith('IEM_MC_CALL_CIMPL_'):
1300 assert oStmt.idxFn == 2;
1301 aiSkipParams[0] = True;
1302
1303 # Skip the function parameter (first) for IEM_MC_NATIVE_EMIT_X.
1304 if oStmt.sName.startswith('IEM_MC_NATIVE_EMIT_'):
1305 aiSkipParams[0] = True;
1306
1307
1308 # Check all the parameters for bogus references.
1309 for iParam, sParam in enumerate(oStmt.asParams):
1310 if iParam not in aiSkipParams and sParam not in self.oParent.dVariables:
1311 # The parameter may contain a C expression, so we have to try
1312 # extract the relevant bits, i.e. variables and fields while
1313 # ignoring operators and parentheses.
1314 offParam = 0;
1315 while offParam < len(sParam):
1316 # Is it the start of an C identifier? If so, find the end, but don't stop on field separators (->, .).
1317 ch = sParam[offParam];
1318 if ch.isalpha() or ch == '_':
1319 offStart = offParam;
1320 offParam += 1;
1321 while offParam < len(sParam):
1322 ch = sParam[offParam];
1323 if not ch.isalnum() and ch != '_' and ch != '.':
1324 if ch != '-' or sParam[offParam + 1] != '>':
1325 # Special hack for the 'CTX_SUFF(pVM)' bit in pVCpu->CTX_SUFF(pVM)->xxxx:
1326 if ( ch == '('
1327 and sParam[offStart : offParam + len('(pVM)->')] == 'pVCpu->CTX_SUFF(pVM)->'):
1328 offParam += len('(pVM)->') - 1;
1329 else:
1330 break;
1331 offParam += 1;
1332 offParam += 1;
1333 sRef = sParam[offStart : offParam];
1334
1335 # For register references, we pass the full register indexes instead as macros
1336 # like IEM_GET_MODRM_REG implicitly references pVCpu->iem.s.uRexReg and the
1337 # threaded function will be more efficient if we just pass the register index
1338 # as a 4-bit param.
1339 if ( sRef.startswith('IEM_GET_MODRM')
1340 or sRef.startswith('IEM_GET_EFFECTIVE_VVVV')
1341 or sRef.startswith('IEM_GET_IMM8_REG') ):
1342 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1343 if sParam[offParam] != '(':
1344 self.raiseProblem('Expected "(" following %s in "%s"' % (sRef, oStmt.renderCode(),));
1345 (asMacroParams, offCloseParam) = iai.McBlock.extractParams(sParam, offParam);
1346 if asMacroParams is None:
1347 self.raiseProblem('Unable to find ")" for %s in "%s"' % (sRef, oStmt.renderCode(),));
1348 offParam = offCloseParam + 1;
1349 self.aoParamRefs.append(ThreadedParamRef(sParam[offStart : offParam], 'uint8_t',
1350 oStmt, iParam, offStart));
1351
1352 # We can skip known variables.
1353 elif sRef in self.oParent.dVariables:
1354 pass;
1355
1356 # Skip certain macro invocations.
1357 elif sRef in ('IEM_GET_HOST_CPU_FEATURES',
1358 'IEM_GET_GUEST_CPU_FEATURES',
1359 'IEM_IS_GUEST_CPU_AMD',
1360 'IEM_IS_16BIT_CODE',
1361 'IEM_IS_32BIT_CODE',
1362 'IEM_IS_64BIT_CODE',
1363 ):
1364 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1365 if sParam[offParam] != '(':
1366 self.raiseProblem('Expected "(" following %s in "%s"' % (sRef, oStmt.renderCode(),));
1367 (asMacroParams, offCloseParam) = iai.McBlock.extractParams(sParam, offParam);
1368 if asMacroParams is None:
1369 self.raiseProblem('Unable to find ")" for %s in "%s"' % (sRef, oStmt.renderCode(),));
1370 offParam = offCloseParam + 1;
1371
1372 # Skip any dereference following it, unless it's a predicate like IEM_IS_GUEST_CPU_AMD.
1373 if sRef not in ('IEM_IS_GUEST_CPU_AMD',
1374 'IEM_IS_16BIT_CODE',
1375 'IEM_IS_32BIT_CODE',
1376 'IEM_IS_64BIT_CODE',
1377 ):
1378 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1379 if offParam + 2 <= len(sParam) and sParam[offParam : offParam + 2] == '->':
1380 offParam = iai.McBlock.skipSpacesAt(sParam, offParam + 2, len(sParam));
1381 while offParam < len(sParam) and (sParam[offParam].isalnum() or sParam[offParam] in '_.'):
1382 offParam += 1;
1383
1384 # Skip constants, globals, types (casts), sizeof and macros.
1385 elif ( sRef.startswith('IEM_OP_PRF_')
1386 or sRef.startswith('IEM_ACCESS_')
1387 or sRef.startswith('IEMINT_')
1388 or sRef.startswith('X86_GREG_')
1389 or sRef.startswith('X86_SREG_')
1390 or sRef.startswith('X86_EFL_')
1391 or sRef.startswith('X86_FSW_')
1392 or sRef.startswith('X86_FCW_')
1393 or sRef.startswith('X86_XCPT_')
1394 or sRef.startswith('IEMMODE_')
1395 or sRef.startswith('IEM_F_')
1396 or sRef.startswith('IEM_CIMPL_F_')
1397 or sRef.startswith('g_')
1398 or sRef.startswith('iemAImpl_')
1399 or sRef.startswith('kIemNativeGstReg_')
1400 or sRef.startswith('RT_ARCH_VAL_')
1401 or sRef in ( 'int8_t', 'int16_t', 'int32_t', 'int64_t',
1402 'INT8_C', 'INT16_C', 'INT32_C', 'INT64_C',
1403 'uint8_t', 'uint16_t', 'uint32_t', 'uint64_t',
1404 'UINT8_C', 'UINT16_C', 'UINT32_C', 'UINT64_C',
1405 'UINT8_MAX', 'UINT16_MAX', 'UINT32_MAX', 'UINT64_MAX',
1406 'INT8_MAX', 'INT16_MAX', 'INT32_MAX', 'INT64_MAX',
1407 'INT8_MIN', 'INT16_MIN', 'INT32_MIN', 'INT64_MIN',
1408 'sizeof', 'NOREF', 'RT_NOREF', 'IEMMODE_64BIT',
1409 'RT_BIT_32', 'RT_BIT_64', 'true', 'false',
1410 'NIL_RTGCPTR',) ):
1411 pass;
1412
1413 # Skip certain macro invocations.
1414 # Any variable (non-field) and decoder fields in IEMCPU will need to be parameterized.
1415 elif ( ( '.' not in sRef
1416 and '-' not in sRef
1417 and sRef not in ('pVCpu', ) )
1418 or iai.McBlock.koReIemDecoderVars.search(sRef) is not None):
1419 self.aoParamRefs.append(ThreadedParamRef(sRef, self.analyzeReferenceToType(sRef),
1420 oStmt, iParam, offStart));
1421 # Number.
1422 elif ch.isdigit():
1423 if ( ch == '0'
1424 and offParam + 2 <= len(sParam)
1425 and sParam[offParam + 1] in 'xX'
1426 and sParam[offParam + 2] in self.ksHexDigits ):
1427 offParam += 2;
1428 while offParam < len(sParam) and sParam[offParam] in self.ksHexDigits:
1429 offParam += 1;
1430 else:
1431 while offParam < len(sParam) and sParam[offParam].isdigit():
1432 offParam += 1;
1433 # Comment?
1434 elif ( ch == '/'
1435 and offParam + 4 <= len(sParam)
1436 and sParam[offParam + 1] == '*'):
1437 offParam += 2;
1438 offNext = sParam.find('*/', offParam);
1439 if offNext < offParam:
1440 self.raiseProblem('Unable to find "*/" in "%s" ("%s")' % (sRef, oStmt.renderCode(),));
1441 offParam = offNext + 2;
1442 # Whatever else.
1443 else:
1444 offParam += 1;
1445
1446 # Traverse the branches of conditionals.
1447 if isinstance(oStmt, iai.McStmtCond):
1448 self.analyzeFindThreadedParamRefs(oStmt.aoIfBranch);
1449 self.analyzeFindThreadedParamRefs(oStmt.aoElseBranch);
1450 return True;
1451
1452 def analyzeVariation(self, aoStmts):
1453 """
1454 2nd part of the analysis, done on each variation.
1455
1456 The variations may differ in parameter requirements and will end up with
1457 slightly different MC sequences. Thus this is done on each individually.
1458
1459 Returns dummy True - raises exception on trouble.
1460 """
1461 # Now scan the code for variables and field references that needs to
1462 # be passed to the threaded function because they are related to the
1463 # instruction decoding.
1464 self.analyzeFindThreadedParamRefs(aoStmts);
1465 self.analyzeConsolidateThreadedParamRefs();
1466
1467 # Morph the statement stream for the block into what we'll be using in the threaded function.
1468 (self.aoStmtsForThreadedFunction, iParamRef) = self.analyzeMorphStmtForThreaded(aoStmts, {});
1469 if iParamRef != len(self.aoParamRefs):
1470 raise Exception('iParamRef=%s, expected %s!' % (iParamRef, len(self.aoParamRefs),));
1471
1472 return True;
1473
1474 def emitThreadedCallStmts(self, cchIndent, sCallVarNm = None):
1475 """
1476 Produces generic C++ statments that emits a call to the thread function
1477 variation and any subsequent checks that may be necessary after that.
1478
1479 The sCallVarNm is the name of the variable with the threaded function
1480 to call. This is for the case where all the variations have the same
1481 parameters and only the threaded function number differs.
1482 """
1483 aoStmts = [
1484 iai.McCppCall('IEM_MC2_BEGIN_EMIT_CALLS',
1485 ['1' if 'IEM_CIMPL_F_CHECK_IRQ_BEFORE' in self.oParent.dsCImplFlags else '0'],
1486 cchIndent = cchIndent), # Scope and a hook for various stuff.
1487 ];
1488
1489 # The call to the threaded function.
1490 asCallArgs = [ self.getIndexName() if not sCallVarNm else sCallVarNm, ];
1491 for iParam in range(self.cMinParams):
1492 asFrags = [];
1493 for aoRefs in self.dParamRefs.values():
1494 oRef = aoRefs[0];
1495 if oRef.iNewParam == iParam:
1496 sCast = '(uint64_t)'
1497 if oRef.sType in ('int8_t', 'int16_t', 'int32_t'): # Make sure these doesn't get sign-extended.
1498 sCast = '(uint64_t)(u' + oRef.sType + ')';
1499 if oRef.offNewParam == 0:
1500 asFrags.append(sCast + '(' + oRef.sOrgRef + ')');
1501 else:
1502 asFrags.append('(%s(%s) << %s)' % (sCast, oRef.sOrgRef, oRef.offNewParam));
1503 assert asFrags;
1504 asCallArgs.append(' | '.join(asFrags));
1505
1506 aoStmts.append(iai.McCppCall('IEM_MC2_EMIT_CALL_%s' % (len(asCallArgs) - 1,), asCallArgs, cchIndent = cchIndent));
1507
1508 # 2023-11-28: This has to be done AFTER the CIMPL call, so we have to
1509 # emit this mode check from the compilation loop. On the
1510 # plus side, this means we eliminate unnecessary call at
1511 # end of the TB. :-)
1512 ## For CIMPL stuff, we need to consult the associated IEM_CIMPL_F_XXX
1513 ## mask and maybe emit additional checks.
1514 #if ( 'IEM_CIMPL_F_MODE' in self.oParent.dsCImplFlags
1515 # or 'IEM_CIMPL_F_XCPT' in self.oParent.dsCImplFlags
1516 # or 'IEM_CIMPL_F_VMEXIT' in self.oParent.dsCImplFlags):
1517 # aoStmts.append(iai.McCppCall('IEM_MC2_EMIT_CALL_1', ( 'kIemThreadedFunc_BltIn_CheckMode', 'pVCpu->iem.s.fExec', ),
1518 # cchIndent = cchIndent));
1519
1520 sCImplFlags = ' | '.join(self.oParent.dsCImplFlags.keys());
1521 if not sCImplFlags:
1522 sCImplFlags = '0'
1523 aoStmts.append(iai.McCppCall('IEM_MC2_END_EMIT_CALLS', ( sCImplFlags, ), cchIndent = cchIndent)); # For closing the scope.
1524
1525 # Emit fEndTb = true or fTbBranched = true if any of the CIMPL flags
1526 # indicates we should do so.
1527 # Note! iemThreadedRecompilerMcDeferToCImpl0 duplicates work done here.
1528 asEndTbFlags = [];
1529 asTbBranchedFlags = [];
1530 for sFlag in self.oParent.dsCImplFlags:
1531 if self.kdCImplFlags[sFlag] is True:
1532 asEndTbFlags.append(sFlag);
1533 elif sFlag.startswith('IEM_CIMPL_F_BRANCH_'):
1534 asTbBranchedFlags.append(sFlag);
1535 if ( asTbBranchedFlags
1536 and ( 'IEM_CIMPL_F_BRANCH_CONDITIONAL' not in asTbBranchedFlags
1537 or self.sVariation not in self.kdVariationsWithConditionalNoJmp)):
1538 aoStmts.append(iai.McCppGeneric('iemThreadedSetBranched(pVCpu, %s);'
1539 % ((' | '.join(asTbBranchedFlags)).replace('IEM_CIMPL_F_BRANCH', 'IEMBRANCHED_F'),),
1540 cchIndent = cchIndent)); # Inline fn saves ~2 seconds for gcc 13/dbg (1m13s vs 1m15s).
1541 if asEndTbFlags:
1542 aoStmts.append(iai.McCppGeneric('pVCpu->iem.s.fEndTb = true; /* %s */' % (','.join(asEndTbFlags),),
1543 cchIndent = cchIndent));
1544
1545 if 'IEM_CIMPL_F_CHECK_IRQ_AFTER' in self.oParent.dsCImplFlags:
1546 aoStmts.append(iai.McCppGeneric('pVCpu->iem.s.cInstrTillIrqCheck = 0;', cchIndent = cchIndent));
1547
1548 return aoStmts;
1549
1550
1551class ThreadedFunction(object):
1552 """
1553 A threaded function.
1554 """
1555
1556 def __init__(self, oMcBlock: iai.McBlock) -> None:
1557 self.oMcBlock = oMcBlock # type: iai.McBlock
1558 # The remaining fields are only useful after analyze() has been called:
1559 ## Variations for this block. There is at least one.
1560 self.aoVariations = [] # type: List[ThreadedFunctionVariation]
1561 ## Variation dictionary containing the same as aoVariations.
1562 self.dVariations = {} # type: Dict[str, ThreadedFunctionVariation]
1563 ## Dictionary of local variables (IEM_MC_LOCAL[_CONST]) and call arguments (IEM_MC_ARG*).
1564 self.dVariables = {} # type: Dict[str, iai.McStmtVar]
1565 ## Dictionary with any IEM_CIMPL_F_XXX flags explicitly advertised in the code block
1566 ## and those determined by analyzeCodeOperation().
1567 self.dsCImplFlags = {} # type: Dict[str, bool]
1568 ## The unique sub-name for this threaded function.
1569 self.sSubName = '';
1570 #if oMcBlock.iInFunction > 0 or (oMcBlock.oInstruction and len(oMcBlock.oInstruction.aoMcBlocks) > 1):
1571 # self.sSubName = '_%s' % (oMcBlock.iInFunction);
1572
1573 @staticmethod
1574 def dummyInstance():
1575 """ Gets a dummy instance. """
1576 return ThreadedFunction(iai.McBlock('null', 999999999, 999999999,
1577 iai.DecoderFunction('null', 999999999, 'nil', ('','')), 999999999));
1578
1579 def hasWithFlagsCheckingAndClearingVariation(self):
1580 """
1581 Check if there is one or more with flags checking and clearing
1582 variations for this threaded function.
1583 """
1584 for sVarWithFlags in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing:
1585 if sVarWithFlags in self.dVariations:
1586 return True;
1587 return False;
1588
1589 #
1590 # Analysis and code morphing.
1591 #
1592
1593 def raiseProblem(self, sMessage):
1594 """ Raises a problem. """
1595 raise Exception('%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, ));
1596
1597 def error(self, sMessage, oGenerator):
1598 """ Emits an error via the generator object, causing it to fail. """
1599 oGenerator.rawError('%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, ));
1600
1601 def warning(self, sMessage):
1602 """ Emits a warning. """
1603 print('%s:%s: warning: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, ));
1604
1605 ## Used by analyzeAndAnnotateName for memory MC blocks.
1606 kdAnnotateNameMemStmts = {
1607 'IEM_MC_FETCH_MEM16_U8': '__mem8',
1608 'IEM_MC_FETCH_MEM32_U8': '__mem8',
1609 'IEM_MC_FETCH_MEM_D80': '__mem80',
1610 'IEM_MC_FETCH_MEM_I16': '__mem16',
1611 'IEM_MC_FETCH_MEM_I32': '__mem32',
1612 'IEM_MC_FETCH_MEM_I64': '__mem64',
1613 'IEM_MC_FETCH_MEM_R32': '__mem32',
1614 'IEM_MC_FETCH_MEM_R64': '__mem64',
1615 'IEM_MC_FETCH_MEM_R80': '__mem80',
1616 'IEM_MC_FETCH_MEM_U128': '__mem128',
1617 'IEM_MC_FETCH_MEM_U128_ALIGN_SSE': '__mem128',
1618 'IEM_MC_FETCH_MEM_U128_NO_AC': '__mem128',
1619 'IEM_MC_FETCH_MEM_U16': '__mem16',
1620 'IEM_MC_FETCH_MEM_U16_DISP': '__mem16',
1621 'IEM_MC_FETCH_MEM_U16_SX_U32': '__mem16sx32',
1622 'IEM_MC_FETCH_MEM_U16_SX_U64': '__mem16sx64',
1623 'IEM_MC_FETCH_MEM_U16_ZX_U32': '__mem16zx32',
1624 'IEM_MC_FETCH_MEM_U16_ZX_U64': '__mem16zx64',
1625 'IEM_MC_FETCH_MEM_U256': '__mem256',
1626 'IEM_MC_FETCH_MEM_U256_ALIGN_AVX': '__mem256',
1627 'IEM_MC_FETCH_MEM_U256_NO_AC': '__mem256',
1628 'IEM_MC_FETCH_MEM_U32': '__mem32',
1629 'IEM_MC_FETCH_MEM_U32_DISP': '__mem32',
1630 'IEM_MC_FETCH_MEM_U32_SX_U64': '__mem32sx64',
1631 'IEM_MC_FETCH_MEM_U32_ZX_U64': '__mem32zx64',
1632 'IEM_MC_FETCH_MEM_U64': '__mem64',
1633 'IEM_MC_FETCH_MEM_U64_ALIGN_U128': '__mem64',
1634 'IEM_MC_FETCH_MEM_U64_DISP': '__mem64',
1635 'IEM_MC_FETCH_MEM_U8': '__mem8',
1636 'IEM_MC_FETCH_MEM_U8_DISP': '__mem8',
1637 'IEM_MC_FETCH_MEM_U8_SX_U16': '__mem8sx16',
1638 'IEM_MC_FETCH_MEM_U8_SX_U32': '__mem8sx32',
1639 'IEM_MC_FETCH_MEM_U8_SX_U64': '__mem8sx64',
1640 'IEM_MC_FETCH_MEM_U8_ZX_U16': '__mem8zx16',
1641 'IEM_MC_FETCH_MEM_U8_ZX_U32': '__mem8zx32',
1642 'IEM_MC_FETCH_MEM_U8_ZX_U64': '__mem8zx64',
1643 'IEM_MC_FETCH_MEM_XMM': '__mem128',
1644 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE': '__mem128',
1645 'IEM_MC_FETCH_MEM_XMM_NO_AC': '__mem128',
1646 'IEM_MC_FETCH_MEM_XMM_U32': '__mem32',
1647 'IEM_MC_FETCH_MEM_XMM_U64': '__mem64',
1648 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128': '__mem128',
1649 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE_AND_XREG_XMM': '__mem128',
1650 'IEM_MC_FETCH_MEM_XMM_U32_AND_XREG_XMM': '__mem32',
1651 'IEM_MC_FETCH_MEM_XMM_U64_AND_XREG_XMM': '__mem64',
1652 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_RAX_RDX_U64': '__mem128',
1653 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64': '__mem128',
1654
1655 'IEM_MC_STORE_MEM_I16_CONST_BY_REF': '__mem16',
1656 'IEM_MC_STORE_MEM_I32_CONST_BY_REF': '__mem32',
1657 'IEM_MC_STORE_MEM_I64_CONST_BY_REF': '__mem64',
1658 'IEM_MC_STORE_MEM_I8_CONST_BY_REF': '__mem8',
1659 'IEM_MC_STORE_MEM_INDEF_D80_BY_REF': '__mem80',
1660 'IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF': '__mem32',
1661 'IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF': '__mem64',
1662 'IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF': '__mem80',
1663 'IEM_MC_STORE_MEM_U128': '__mem128',
1664 'IEM_MC_STORE_MEM_U128_ALIGN_SSE': '__mem128',
1665 'IEM_MC_STORE_MEM_U128_NO_AC': '__mem128',
1666 'IEM_MC_STORE_MEM_U16': '__mem16',
1667 'IEM_MC_STORE_MEM_U16_CONST': '__mem16c',
1668 'IEM_MC_STORE_MEM_U256': '__mem256',
1669 'IEM_MC_STORE_MEM_U256_ALIGN_AVX': '__mem256',
1670 'IEM_MC_STORE_MEM_U256_NO_AC': '__mem256',
1671 'IEM_MC_STORE_MEM_U32': '__mem32',
1672 'IEM_MC_STORE_MEM_U32_CONST': '__mem32c',
1673 'IEM_MC_STORE_MEM_U64': '__mem64',
1674 'IEM_MC_STORE_MEM_U64_CONST': '__mem64c',
1675 'IEM_MC_STORE_MEM_U8': '__mem8',
1676 'IEM_MC_STORE_MEM_U8_CONST': '__mem8c',
1677
1678 'IEM_MC_MEM_MAP_D80_WO': '__mem80',
1679 'IEM_MC_MEM_MAP_I16_WO': '__mem16',
1680 'IEM_MC_MEM_MAP_I32_WO': '__mem32',
1681 'IEM_MC_MEM_MAP_I64_WO': '__mem64',
1682 'IEM_MC_MEM_MAP_R32_WO': '__mem32',
1683 'IEM_MC_MEM_MAP_R64_WO': '__mem64',
1684 'IEM_MC_MEM_MAP_R80_WO': '__mem80',
1685 'IEM_MC_MEM_MAP_U128_ATOMIC': '__mem128a',
1686 'IEM_MC_MEM_MAP_U128_RO': '__mem128',
1687 'IEM_MC_MEM_MAP_U128_RW': '__mem128',
1688 'IEM_MC_MEM_MAP_U128_WO': '__mem128',
1689 'IEM_MC_MEM_MAP_U16_ATOMIC': '__mem16a',
1690 'IEM_MC_MEM_MAP_U16_RO': '__mem16',
1691 'IEM_MC_MEM_MAP_U16_RW': '__mem16',
1692 'IEM_MC_MEM_MAP_U16_WO': '__mem16',
1693 'IEM_MC_MEM_MAP_U32_ATOMIC': '__mem32a',
1694 'IEM_MC_MEM_MAP_U32_RO': '__mem32',
1695 'IEM_MC_MEM_MAP_U32_RW': '__mem32',
1696 'IEM_MC_MEM_MAP_U32_WO': '__mem32',
1697 'IEM_MC_MEM_MAP_U64_ATOMIC': '__mem64a',
1698 'IEM_MC_MEM_MAP_U64_RO': '__mem64',
1699 'IEM_MC_MEM_MAP_U64_RW': '__mem64',
1700 'IEM_MC_MEM_MAP_U64_WO': '__mem64',
1701 'IEM_MC_MEM_MAP_U8_ATOMIC': '__mem8a',
1702 'IEM_MC_MEM_MAP_U8_RO': '__mem8',
1703 'IEM_MC_MEM_MAP_U8_RW': '__mem8',
1704 'IEM_MC_MEM_MAP_U8_WO': '__mem8',
1705 };
1706 ## Used by analyzeAndAnnotateName for non-memory MC blocks.
1707 kdAnnotateNameRegStmts = {
1708 'IEM_MC_FETCH_GREG_U8': '__greg8',
1709 'IEM_MC_FETCH_GREG_U8_ZX_U16': '__greg8zx16',
1710 'IEM_MC_FETCH_GREG_U8_ZX_U32': '__greg8zx32',
1711 'IEM_MC_FETCH_GREG_U8_ZX_U64': '__greg8zx64',
1712 'IEM_MC_FETCH_GREG_U8_SX_U16': '__greg8sx16',
1713 'IEM_MC_FETCH_GREG_U8_SX_U32': '__greg8sx32',
1714 'IEM_MC_FETCH_GREG_U8_SX_U64': '__greg8sx64',
1715 'IEM_MC_FETCH_GREG_U16': '__greg16',
1716 'IEM_MC_FETCH_GREG_U16_ZX_U32': '__greg16zx32',
1717 'IEM_MC_FETCH_GREG_U16_ZX_U64': '__greg16zx64',
1718 'IEM_MC_FETCH_GREG_U16_SX_U32': '__greg16sx32',
1719 'IEM_MC_FETCH_GREG_U16_SX_U64': '__greg16sx64',
1720 'IEM_MC_FETCH_GREG_U32': '__greg32',
1721 'IEM_MC_FETCH_GREG_U32_ZX_U64': '__greg32zx64',
1722 'IEM_MC_FETCH_GREG_U32_SX_U64': '__greg32sx64',
1723 'IEM_MC_FETCH_GREG_U64': '__greg64',
1724 'IEM_MC_FETCH_GREG_U64_ZX_U64': '__greg64zx64',
1725 'IEM_MC_FETCH_GREG_PAIR_U32': '__greg32',
1726 'IEM_MC_FETCH_GREG_PAIR_U64': '__greg64',
1727
1728 'IEM_MC_STORE_GREG_U8': '__greg8',
1729 'IEM_MC_STORE_GREG_U16': '__greg16',
1730 'IEM_MC_STORE_GREG_U32': '__greg32',
1731 'IEM_MC_STORE_GREG_U64': '__greg64',
1732 'IEM_MC_STORE_GREG_I64': '__greg64',
1733 'IEM_MC_STORE_GREG_U8_CONST': '__greg8c',
1734 'IEM_MC_STORE_GREG_U16_CONST': '__greg16c',
1735 'IEM_MC_STORE_GREG_U32_CONST': '__greg32c',
1736 'IEM_MC_STORE_GREG_U64_CONST': '__greg64c',
1737 'IEM_MC_STORE_GREG_PAIR_U32': '__greg32',
1738 'IEM_MC_STORE_GREG_PAIR_U64': '__greg64',
1739
1740 'IEM_MC_FETCH_SREG_U16': '__sreg16',
1741 'IEM_MC_FETCH_SREG_ZX_U32': '__sreg32',
1742 'IEM_MC_FETCH_SREG_ZX_U64': '__sreg64',
1743 'IEM_MC_FETCH_SREG_BASE_U64': '__sbase64',
1744 'IEM_MC_FETCH_SREG_BASE_U32': '__sbase32',
1745 'IEM_MC_STORE_SREG_BASE_U64': '__sbase64',
1746 'IEM_MC_STORE_SREG_BASE_U32': '__sbase32',
1747
1748 'IEM_MC_REF_GREG_U8': '__greg8',
1749 'IEM_MC_REF_GREG_U16': '__greg16',
1750 'IEM_MC_REF_GREG_U32': '__greg32',
1751 'IEM_MC_REF_GREG_U64': '__greg64',
1752 'IEM_MC_REF_GREG_U8_CONST': '__greg8',
1753 'IEM_MC_REF_GREG_U16_CONST': '__greg16',
1754 'IEM_MC_REF_GREG_U32_CONST': '__greg32',
1755 'IEM_MC_REF_GREG_U64_CONST': '__greg64',
1756 'IEM_MC_REF_GREG_I32': '__greg32',
1757 'IEM_MC_REF_GREG_I64': '__greg64',
1758 'IEM_MC_REF_GREG_I32_CONST': '__greg32',
1759 'IEM_MC_REF_GREG_I64_CONST': '__greg64',
1760
1761 'IEM_MC_STORE_FPUREG_R80_SRC_REF': '__fpu',
1762 'IEM_MC_REF_FPUREG': '__fpu',
1763
1764 'IEM_MC_FETCH_MREG_U64': '__mreg64',
1765 'IEM_MC_FETCH_MREG_U32': '__mreg32',
1766 'IEM_MC_FETCH_MREG_U16': '__mreg16',
1767 'IEM_MC_STORE_MREG_U64': '__mreg64',
1768 'IEM_MC_STORE_MREG_U32_ZX_U64': '__mreg32zx64',
1769 'IEM_MC_REF_MREG_U64': '__mreg64',
1770 'IEM_MC_REF_MREG_U64_CONST': '__mreg64',
1771 'IEM_MC_REF_MREG_U32_CONST': '__mreg32',
1772
1773 'IEM_MC_CLEAR_XREG_U32_MASK': '__xreg32x4',
1774 'IEM_MC_FETCH_XREG_U128': '__xreg128',
1775 'IEM_MC_FETCH_XREG_XMM': '__xreg128',
1776 'IEM_MC_FETCH_XREG_U64': '__xreg64',
1777 'IEM_MC_FETCH_XREG_U32': '__xreg32',
1778 'IEM_MC_FETCH_XREG_U16': '__xreg16',
1779 'IEM_MC_FETCH_XREG_U8': '__xreg8',
1780 'IEM_MC_FETCH_XREG_PAIR_U128': '__xreg128p',
1781 'IEM_MC_FETCH_XREG_PAIR_XMM': '__xreg128p',
1782 'IEM_MC_FETCH_XREG_PAIR_U128_AND_RAX_RDX_U64': '__xreg128p',
1783 'IEM_MC_FETCH_XREG_PAIR_U128_AND_EAX_EDX_U32_SX_U64': '__xreg128p',
1784
1785 'IEM_MC_STORE_XREG_U32_U128': '__xreg32',
1786 'IEM_MC_STORE_XREG_U128': '__xreg128',
1787 'IEM_MC_STORE_XREG_XMM': '__xreg128',
1788 'IEM_MC_STORE_XREG_XMM_U32': '__xreg32',
1789 'IEM_MC_STORE_XREG_XMM_U64': '__xreg64',
1790 'IEM_MC_STORE_XREG_U64': '__xreg64',
1791 'IEM_MC_STORE_XREG_U64_ZX_U128': '__xreg64zx128',
1792 'IEM_MC_STORE_XREG_U32': '__xreg32',
1793 'IEM_MC_STORE_XREG_U16': '__xreg16',
1794 'IEM_MC_STORE_XREG_U8': '__xreg8',
1795 'IEM_MC_STORE_XREG_U32_ZX_U128': '__xreg32zx128',
1796 'IEM_MC_STORE_XREG_R32': '__xreg32',
1797 'IEM_MC_STORE_XREG_R64': '__xreg64',
1798 'IEM_MC_BROADCAST_XREG_U8_ZX_VLMAX': '__xreg8zx',
1799 'IEM_MC_BROADCAST_XREG_U16_ZX_VLMAX': '__xreg16zx',
1800 'IEM_MC_BROADCAST_XREG_U32_ZX_VLMAX': '__xreg32zx',
1801 'IEM_MC_BROADCAST_XREG_U64_ZX_VLMAX': '__xreg64zx',
1802 'IEM_MC_BROADCAST_XREG_U128_ZX_VLMAX': '__xreg128zx',
1803 'IEM_MC_REF_XREG_U128': '__xreg128',
1804 'IEM_MC_REF_XREG_U128_CONST': '__xreg128',
1805 'IEM_MC_REF_XREG_U32_CONST': '__xreg32',
1806 'IEM_MC_REF_XREG_U64_CONST': '__xreg64',
1807 'IEM_MC_REF_XREG_R32_CONST': '__xreg32',
1808 'IEM_MC_REF_XREG_R64_CONST': '__xreg64',
1809 'IEM_MC_REF_XREG_XMM_CONST': '__xreg128',
1810 'IEM_MC_COPY_XREG_U128': '__xreg128',
1811
1812 'IEM_MC_FETCH_YREG_U256': '__yreg256',
1813 'IEM_MC_FETCH_YREG_U128': '__yreg128',
1814 'IEM_MC_FETCH_YREG_U64': '__yreg64',
1815 'IEM_MC_FETCH_YREG_U32': '__yreg32',
1816 'IEM_MC_STORE_YREG_U128': '__yreg128',
1817 'IEM_MC_STORE_YREG_U32_ZX_VLMAX': '__yreg32zx',
1818 'IEM_MC_STORE_YREG_U64_ZX_VLMAX': '__yreg64zx',
1819 'IEM_MC_STORE_YREG_U128_ZX_VLMAX': '__yreg128zx',
1820 'IEM_MC_STORE_YREG_U256_ZX_VLMAX': '__yreg256zx',
1821 'IEM_MC_BROADCAST_YREG_U8_ZX_VLMAX': '__yreg8',
1822 'IEM_MC_BROADCAST_YREG_U16_ZX_VLMAX': '__yreg16',
1823 'IEM_MC_BROADCAST_YREG_U32_ZX_VLMAX': '__yreg32',
1824 'IEM_MC_BROADCAST_YREG_U64_ZX_VLMAX': '__yreg64',
1825 'IEM_MC_BROADCAST_YREG_U128_ZX_VLMAX': '__yreg128',
1826 'IEM_MC_REF_YREG_U128': '__yreg128',
1827 'IEM_MC_REF_YREG_U128_CONST': '__yreg128',
1828 'IEM_MC_REF_YREG_U64_CONST': '__yreg64',
1829 'IEM_MC_COPY_YREG_U256_ZX_VLMAX': '__yreg256zx',
1830 'IEM_MC_COPY_YREG_U128_ZX_VLMAX': '__yreg128zx',
1831 'IEM_MC_COPY_YREG_U64_ZX_VLMAX': '__yreg64zx',
1832 'IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX': '__yreg3296',
1833 'IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX': '__yreg6464',
1834 'IEM_MC_MERGE_YREG_U64HI_U64HI_ZX_VLMAX': '__yreg64hi64hi',
1835 'IEM_MC_MERGE_YREG_U64LO_U64LO_ZX_VLMAX': '__yreg64lo64lo',
1836 'IEM_MC_MERGE_YREG_U64LO_U64LOCAL_ZX_VLMAX':'__yreg64',
1837 'IEM_MC_MERGE_YREG_U64LOCAL_U64HI_ZX_VLMAX':'__yreg64',
1838 };
1839 kdAnnotateNameCallStmts = {
1840 'IEM_MC_CALL_CIMPL_0': '__cimpl',
1841 'IEM_MC_CALL_CIMPL_1': '__cimpl',
1842 'IEM_MC_CALL_CIMPL_2': '__cimpl',
1843 'IEM_MC_CALL_CIMPL_3': '__cimpl',
1844 'IEM_MC_CALL_CIMPL_4': '__cimpl',
1845 'IEM_MC_CALL_CIMPL_5': '__cimpl',
1846 'IEM_MC_CALL_CIMPL_6': '__cimpl',
1847 'IEM_MC_CALL_CIMPL_7': '__cimpl',
1848 'IEM_MC_DEFER_TO_CIMPL_0_RET': '__cimpl_defer',
1849 'IEM_MC_DEFER_TO_CIMPL_1_RET': '__cimpl_defer',
1850 'IEM_MC_DEFER_TO_CIMPL_2_RET': '__cimpl_defer',
1851 'IEM_MC_DEFER_TO_CIMPL_3_RET': '__cimpl_defer',
1852 'IEM_MC_DEFER_TO_CIMPL_4_RET': '__cimpl_defer',
1853 'IEM_MC_DEFER_TO_CIMPL_5_RET': '__cimpl_defer',
1854 'IEM_MC_DEFER_TO_CIMPL_6_RET': '__cimpl_defer',
1855 'IEM_MC_DEFER_TO_CIMPL_7_RET': '__cimpl_defer',
1856 'IEM_MC_CALL_VOID_AIMPL_0': '__aimpl',
1857 'IEM_MC_CALL_VOID_AIMPL_1': '__aimpl',
1858 'IEM_MC_CALL_VOID_AIMPL_2': '__aimpl',
1859 'IEM_MC_CALL_VOID_AIMPL_3': '__aimpl',
1860 'IEM_MC_CALL_VOID_AIMPL_4': '__aimpl',
1861 'IEM_MC_CALL_VOID_AIMPL_5': '__aimpl',
1862 'IEM_MC_CALL_AIMPL_0': '__aimpl_ret',
1863 'IEM_MC_CALL_AIMPL_1': '__aimpl_ret',
1864 'IEM_MC_CALL_AIMPL_2': '__aimpl_ret',
1865 'IEM_MC_CALL_AIMPL_3': '__aimpl_ret',
1866 'IEM_MC_CALL_AIMPL_4': '__aimpl_ret',
1867 'IEM_MC_CALL_AIMPL_5': '__aimpl_ret',
1868 'IEM_MC_CALL_AIMPL_6': '__aimpl_ret',
1869 'IEM_MC_CALL_VOID_AIMPL_6': '__aimpl_fpu',
1870 'IEM_MC_CALL_FPU_AIMPL_0': '__aimpl_fpu',
1871 'IEM_MC_CALL_FPU_AIMPL_1': '__aimpl_fpu',
1872 'IEM_MC_CALL_FPU_AIMPL_2': '__aimpl_fpu',
1873 'IEM_MC_CALL_FPU_AIMPL_3': '__aimpl_fpu',
1874 'IEM_MC_CALL_FPU_AIMPL_4': '__aimpl_fpu',
1875 'IEM_MC_CALL_FPU_AIMPL_5': '__aimpl_fpu',
1876 'IEM_MC_CALL_MMX_AIMPL_0': '__aimpl_mmx',
1877 'IEM_MC_CALL_MMX_AIMPL_1': '__aimpl_mmx',
1878 'IEM_MC_CALL_MMX_AIMPL_2': '__aimpl_mmx',
1879 'IEM_MC_CALL_MMX_AIMPL_3': '__aimpl_mmx',
1880 'IEM_MC_CALL_MMX_AIMPL_4': '__aimpl_mmx',
1881 'IEM_MC_CALL_MMX_AIMPL_5': '__aimpl_mmx',
1882 'IEM_MC_CALL_SSE_AIMPL_0': '__aimpl_sse',
1883 'IEM_MC_CALL_SSE_AIMPL_1': '__aimpl_sse',
1884 'IEM_MC_CALL_SSE_AIMPL_2': '__aimpl_sse',
1885 'IEM_MC_CALL_SSE_AIMPL_3': '__aimpl_sse',
1886 'IEM_MC_CALL_SSE_AIMPL_4': '__aimpl_sse',
1887 'IEM_MC_CALL_SSE_AIMPL_5': '__aimpl_sse',
1888 'IEM_MC_CALL_AVX_AIMPL_0': '__aimpl_avx',
1889 'IEM_MC_CALL_AVX_AIMPL_1': '__aimpl_avx',
1890 'IEM_MC_CALL_AVX_AIMPL_2': '__aimpl_avx',
1891 'IEM_MC_CALL_AVX_AIMPL_3': '__aimpl_avx',
1892 'IEM_MC_CALL_AVX_AIMPL_4': '__aimpl_avx',
1893 'IEM_MC_CALL_AVX_AIMPL_5': '__aimpl_avx',
1894 };
1895 def analyzeAndAnnotateName(self, aoStmts: List[iai.McStmt]):
1896 """
1897 Scans the statements and variation lists for clues about the threaded function,
1898 and sets self.sSubName if successfull.
1899 """
1900 # Operand base naming:
1901 dHits = {};
1902 cHits = iai.McStmt.countStmtsByName(aoStmts, self.kdAnnotateNameMemStmts, dHits);
1903 if cHits > 0:
1904 sStmtNm = sorted(dHits.keys())[-1]; # priority: STORE, MEM_MAP, FETCH.
1905 sName = self.kdAnnotateNameMemStmts[sStmtNm];
1906 else:
1907 cHits = iai.McStmt.countStmtsByName(aoStmts, self.kdAnnotateNameRegStmts, dHits);
1908 if cHits > 0:
1909 sStmtNm = sorted(dHits.keys())[-1]; # priority: STORE, MEM_MAP, FETCH.
1910 sName = self.kdAnnotateNameRegStmts[sStmtNm];
1911 else:
1912 # No op details, try name it by call type...
1913 cHits = iai.McStmt.countStmtsByName(aoStmts, self.kdAnnotateNameCallStmts, dHits);
1914 if cHits > 0:
1915 sStmtNm = sorted(dHits.keys())[-1]; # Not really necessary to sort, but simple this way.
1916 self.sSubName = self.kdAnnotateNameCallStmts[sStmtNm];
1917 return;
1918
1919 # Add call info if any:
1920 dHits = {};
1921 cHits = iai.McStmt.countStmtsByName(aoStmts, self.kdAnnotateNameCallStmts, dHits);
1922 if cHits > 0:
1923 sStmtNm = sorted(dHits.keys())[-1]; # Not really necessary to sort, but simple this way.
1924 sName += self.kdAnnotateNameCallStmts[sStmtNm][1:];
1925
1926 self.sSubName = sName;
1927 return;
1928
1929 def analyzeFindVariablesAndCallArgs(self, aoStmts: List[iai.McStmt]) -> bool:
1930 """ Scans the statements for MC variables and call arguments. """
1931 for oStmt in aoStmts:
1932 if isinstance(oStmt, iai.McStmtVar):
1933 if oStmt.sVarName in self.dVariables:
1934 raise Exception('Variable %s is defined more than once!' % (oStmt.sVarName,));
1935 self.dVariables[oStmt.sVarName] = oStmt.sVarName;
1936
1937 # There shouldn't be any variables or arguments declared inside if/
1938 # else blocks, but scan them too to be on the safe side.
1939 if isinstance(oStmt, iai.McStmtCond):
1940 #cBefore = len(self.dVariables);
1941 self.analyzeFindVariablesAndCallArgs(oStmt.aoIfBranch);
1942 self.analyzeFindVariablesAndCallArgs(oStmt.aoElseBranch);
1943 #if len(self.dVariables) != cBefore:
1944 # raise Exception('Variables/arguments defined in conditional branches!');
1945 return True;
1946
1947 def analyzeCodeOperation(self, aoStmts: List[iai.McStmt], dEflStmts, fSeenConditional = False) -> bool:
1948 """
1949 Analyzes the code looking clues as to additional side-effects.
1950
1951 Currently this is simply looking for branching and adding the relevant
1952 branch flags to dsCImplFlags. ASSUMES the caller pre-populates the
1953 dictionary with a copy of self.oMcBlock.dsCImplFlags.
1954
1955 This also sets McStmtCond.oIfBranchAnnotation & McStmtCond.oElseBranchAnnotation.
1956
1957 Returns annotation on return style.
1958 """
1959 sAnnotation = None;
1960 for oStmt in aoStmts:
1961 # Set IEM_IMPL_C_F_BRANCH if we see any branching MCs.
1962 if oStmt.sName.startswith('IEM_MC_SET_RIP'):
1963 assert not fSeenConditional;
1964 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_INDIRECT'] = True;
1965 elif oStmt.sName.startswith('IEM_MC_REL_JMP'):
1966 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_RELATIVE'] = True;
1967 if fSeenConditional:
1968 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_CONDITIONAL'] = True;
1969
1970 # Check for CIMPL and AIMPL calls.
1971 if oStmt.sName.startswith('IEM_MC_CALL_'):
1972 if oStmt.sName.startswith('IEM_MC_CALL_CIMPL_'):
1973 self.dsCImplFlags['IEM_CIMPL_F_CALLS_CIMPL'] = True;
1974 elif ( oStmt.sName.startswith('IEM_MC_CALL_VOID_AIMPL_')
1975 or oStmt.sName.startswith('IEM_MC_CALL_AIMPL_')
1976 or oStmt.sName.startswith('IEM_MC_CALL_AVX_AIMPL_')):
1977 self.dsCImplFlags['IEM_CIMPL_F_CALLS_AIMPL'] = True;
1978 elif ( oStmt.sName.startswith('IEM_MC_CALL_SSE_AIMPL_')
1979 or oStmt.sName.startswith('IEM_MC_CALL_MMX_AIMPL_')
1980 or oStmt.sName.startswith('IEM_MC_CALL_FPU_AIMPL_')):
1981 self.dsCImplFlags['IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE'] = True;
1982 else:
1983 raise Exception('Unknown IEM_MC_CALL_* statement: %s' % (oStmt.sName,));
1984
1985 # Check for return statements.
1986 if oStmt.sName in ('IEM_MC_ADVANCE_RIP_AND_FINISH',):
1987 assert sAnnotation is None;
1988 sAnnotation = g_ksFinishAnnotation_Advance;
1989 elif oStmt.sName in ('IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_REL_JMP_S16_AND_FINISH',
1990 'IEM_MC_REL_JMP_S32_AND_FINISH',):
1991 assert sAnnotation is None;
1992 sAnnotation = g_ksFinishAnnotation_RelJmp;
1993 elif oStmt.sName in ('IEM_MC_SET_RIP_U16_AND_FINISH', 'IEM_MC_SET_RIP_U32_AND_FINISH',
1994 'IEM_MC_SET_RIP_U64_AND_FINISH',):
1995 assert sAnnotation is None;
1996 sAnnotation = g_ksFinishAnnotation_SetJmp;
1997 elif oStmt.sName.startswith('IEM_MC_DEFER_TO_CIMPL_'):
1998 assert sAnnotation is None;
1999 sAnnotation = g_ksFinishAnnotation_DeferToCImpl;
2000
2001 # Collect MCs working on EFLAGS. Caller will check this.
2002 if oStmt.sName in ('IEM_MC_FETCH_EFLAGS', 'IEM_MC_FETCH_EFLAGS_U8', 'IEM_MC_COMMIT_EFLAGS',
2003 'IEM_MC_COMMIT_EFLAGS_OPT', 'IEM_MC_REF_EFLAGS', 'IEM_MC_ARG_LOCAL_EFLAGS', ):
2004 dEflStmts[oStmt.sName] = oStmt;
2005 elif isinstance(oStmt, iai.McStmtCall):
2006 if oStmt.sName in ('IEM_MC_CALL_CIMPL_0', 'IEM_MC_CALL_CIMPL_1', 'IEM_MC_CALL_CIMPL_2',
2007 'IEM_MC_CALL_CIMPL_3', 'IEM_MC_CALL_CIMPL_4', 'IEM_MC_CALL_CIMPL_5',):
2008 if ( oStmt.asParams[0].find('IEM_CIMPL_F_RFLAGS') >= 0
2009 or oStmt.asParams[0].find('IEM_CIMPL_F_STATUS_FLAGS') >= 0):
2010 dEflStmts[oStmt.sName] = oStmt;
2011
2012 # Process branches of conditionals recursively.
2013 if isinstance(oStmt, iai.McStmtCond):
2014 oStmt.oIfBranchAnnotation = self.analyzeCodeOperation(oStmt.aoIfBranch, dEflStmts, True);
2015 if oStmt.aoElseBranch:
2016 oStmt.oElseBranchAnnotation = self.analyzeCodeOperation(oStmt.aoElseBranch, dEflStmts, True);
2017
2018 return sAnnotation;
2019
2020 def analyzeThreadedFunction(self, oGenerator):
2021 """
2022 Analyzes the code, identifying the number of parameters it requires and such.
2023
2024 Returns dummy True - raises exception on trouble.
2025 """
2026
2027 #
2028 # Decode the block into a list/tree of McStmt objects.
2029 #
2030 aoStmts = self.oMcBlock.decode();
2031
2032 #
2033 # Check the block for errors before we proceed (will decode it).
2034 #
2035 asErrors = self.oMcBlock.check();
2036 if asErrors:
2037 raise Exception('\n'.join(['%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sError, )
2038 for sError in asErrors]));
2039
2040 #
2041 # Scan the statements for local variables and call arguments (self.dVariables).
2042 #
2043 self.analyzeFindVariablesAndCallArgs(aoStmts);
2044
2045 #
2046 # Scan the code for IEM_CIMPL_F_ and other clues.
2047 #
2048 self.dsCImplFlags = self.oMcBlock.dsCImplFlags.copy();
2049 dEflStmts = {};
2050 self.analyzeCodeOperation(aoStmts, dEflStmts);
2051 if ( ('IEM_CIMPL_F_CALLS_CIMPL' in self.dsCImplFlags)
2052 + ('IEM_CIMPL_F_CALLS_AIMPL' in self.dsCImplFlags)
2053 + ('IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE' in self.dsCImplFlags) > 1):
2054 self.error('Mixing CIMPL/AIMPL/AIMPL_WITH_FXSTATE calls', oGenerator);
2055
2056 #
2057 # Analyse EFLAGS related MCs and @opflmodify and friends.
2058 #
2059 if dEflStmts:
2060 oInstruction = self.oMcBlock.oInstruction; # iai.Instruction
2061 if ( oInstruction is None
2062 or (oInstruction.asFlTest is None and oInstruction.asFlModify is None)):
2063 sMcNames = '+'.join(dEflStmts.keys());
2064 if len(dEflStmts) != 1 or not sMcNames.startswith('IEM_MC_CALL_CIMPL_'): # Hack for far calls
2065 self.error('Uses %s but has no @opflmodify, @opfltest or @opflclass with details!' % (sMcNames,), oGenerator);
2066 elif 'IEM_MC_COMMIT_EFLAGS' in dEflStmts or 'IEM_MC_COMMIT_EFLAGS_OPT' in dEflStmts:
2067 if not oInstruction.asFlModify:
2068 if oInstruction.sMnemonic not in [ 'not', ]:
2069 self.error('Uses IEM_MC_COMMIT_EFLAGS[_OPT] but has no flags in @opflmodify!', oGenerator);
2070 elif ( 'IEM_MC_CALL_CIMPL_0' in dEflStmts
2071 or 'IEM_MC_CALL_CIMPL_1' in dEflStmts
2072 or 'IEM_MC_CALL_CIMPL_2' in dEflStmts
2073 or 'IEM_MC_CALL_CIMPL_3' in dEflStmts
2074 or 'IEM_MC_CALL_CIMPL_4' in dEflStmts
2075 or 'IEM_MC_CALL_CIMPL_5' in dEflStmts ):
2076 if not oInstruction.asFlModify:
2077 self.error('Uses IEM_MC_CALL_CIMPL_x or IEM_MC_DEFER_TO_CIMPL_5_RET with IEM_CIMPL_F_STATUS_FLAGS '
2078 'or IEM_CIMPL_F_RFLAGS but has no flags in @opflmodify!', oGenerator);
2079 elif 'IEM_MC_REF_EFLAGS' not in dEflStmts:
2080 if not oInstruction.asFlTest:
2081 if oInstruction.sMnemonic not in [ 'not', ]:
2082 self.error('Expected @opfltest!', oGenerator);
2083 if oInstruction and oInstruction.asFlSet:
2084 for sFlag in oInstruction.asFlSet:
2085 if sFlag not in oInstruction.asFlModify:
2086 self.error('"%s" in @opflset but missing from @opflmodify (%s)!'
2087 % (sFlag, ', '.join(oInstruction.asFlModify)), oGenerator);
2088 if oInstruction and oInstruction.asFlClear:
2089 for sFlag in oInstruction.asFlClear:
2090 if sFlag not in oInstruction.asFlModify:
2091 self.error('"%s" in @opflclear but missing from @opflmodify (%s)!'
2092 % (sFlag, ', '.join(oInstruction.asFlModify)), oGenerator);
2093
2094 #
2095 # Create variations as needed.
2096 #
2097 if iai.McStmt.findStmtByNames(aoStmts,
2098 { 'IEM_MC_DEFER_TO_CIMPL_0_RET': True,
2099 'IEM_MC_DEFER_TO_CIMPL_1_RET': True,
2100 'IEM_MC_DEFER_TO_CIMPL_2_RET': True,
2101 'IEM_MC_DEFER_TO_CIMPL_3_RET': True, }):
2102 asVariations = (ThreadedFunctionVariation.ksVariation_Default,);
2103
2104 elif iai.McStmt.findStmtByNames(aoStmts, { 'IEM_MC_CALC_RM_EFF_ADDR' : True,
2105 'IEM_MC_FETCH_MEM_U8' : True, # mov_AL_Ob ++
2106 'IEM_MC_FETCH_MEM_U16' : True, # mov_rAX_Ov ++
2107 'IEM_MC_FETCH_MEM_U32' : True,
2108 'IEM_MC_FETCH_MEM_U64' : True,
2109 'IEM_MC_STORE_MEM_U8' : True, # mov_Ob_AL ++
2110 'IEM_MC_STORE_MEM_U16' : True, # mov_Ov_rAX ++
2111 'IEM_MC_STORE_MEM_U32' : True,
2112 'IEM_MC_STORE_MEM_U64' : True, }):
2113 if 'IEM_MC_F_64BIT' in self.oMcBlock.dsMcFlags:
2114 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressOnly64;
2115 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags and 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
2116 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot286Not64;
2117 elif 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
2118 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot286;
2119 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags:
2120 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot64;
2121 elif 'IEM_MC_F_ONLY_8086' in self.oMcBlock.dsMcFlags:
2122 asVariations = ThreadedFunctionVariation.kasVariationsOnlyPre386;
2123 else:
2124 asVariations = ThreadedFunctionVariation.kasVariationsWithAddress;
2125 else:
2126 if 'IEM_MC_F_64BIT' in self.oMcBlock.dsMcFlags:
2127 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressOnly64;
2128 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags and 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
2129 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot286Not64;
2130 elif 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
2131 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot286;
2132 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags:
2133 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot64;
2134 elif 'IEM_MC_F_ONLY_8086' in self.oMcBlock.dsMcFlags:
2135 asVariations = ThreadedFunctionVariation.kasVariationsOnlyPre386;
2136 else:
2137 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddress;
2138
2139 if ( 'IEM_CIMPL_F_BRANCH_CONDITIONAL' in self.dsCImplFlags
2140 and 'IEM_CIMPL_F_BRANCH_RELATIVE' in self.dsCImplFlags): # (latter to avoid iemOp_into)
2141 assert set(asVariations).issubset(ThreadedFunctionVariation.kasVariationsWithoutAddress), \
2142 '%s: vars=%s McFlags=%s' % (self.oMcBlock.oFunction.sName, asVariations, self.oMcBlock.dsMcFlags);
2143 asVariationsBase = asVariations;
2144 asVariations = [];
2145 for sVariation in asVariationsBase:
2146 asVariations.extend([sVariation + '_Jmp', sVariation + '_NoJmp']);
2147 assert set(asVariations).issubset(ThreadedFunctionVariation.kdVariationsWithConditional);
2148
2149 if not iai.McStmt.findStmtByNames(aoStmts,
2150 { 'IEM_MC_ADVANCE_RIP_AND_FINISH': True,
2151 'IEM_MC_REL_JMP_S8_AND_FINISH': True,
2152 'IEM_MC_REL_JMP_S16_AND_FINISH': True,
2153 'IEM_MC_REL_JMP_S32_AND_FINISH': True,
2154 'IEM_MC_SET_RIP_U16_AND_FINISH': True,
2155 'IEM_MC_SET_RIP_U32_AND_FINISH': True,
2156 'IEM_MC_SET_RIP_U64_AND_FINISH': True,
2157 }):
2158 asVariations = [sVariation for sVariation in asVariations
2159 if sVariation not in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing];
2160
2161 self.aoVariations = [ThreadedFunctionVariation(self, sVar) for sVar in asVariations];
2162
2163 # Dictionary variant of the list.
2164 self.dVariations = { oVar.sVariation: oVar for oVar in self.aoVariations };
2165
2166 #
2167 # Try annotate the threaded function name.
2168 #
2169 self.analyzeAndAnnotateName(aoStmts);
2170
2171 #
2172 # Continue the analysis on each variation.
2173 #
2174 for oVariation in self.aoVariations:
2175 oVariation.analyzeVariation(aoStmts);
2176
2177 return True;
2178
2179 ## Used by emitThreadedCallStmts.
2180 kdVariationsWithNeedForPrefixCheck = {
2181 ThreadedFunctionVariation.ksVariation_64_Addr32: True,
2182 ThreadedFunctionVariation.ksVariation_64f_Addr32: True,
2183 ThreadedFunctionVariation.ksVariation_64_FsGs: True,
2184 ThreadedFunctionVariation.ksVariation_64f_FsGs: True,
2185 ThreadedFunctionVariation.ksVariation_32_Addr16: True,
2186 ThreadedFunctionVariation.ksVariation_32f_Addr16: True,
2187 ThreadedFunctionVariation.ksVariation_32_Flat: True,
2188 ThreadedFunctionVariation.ksVariation_32f_Flat: True,
2189 ThreadedFunctionVariation.ksVariation_16_Addr32: True,
2190 ThreadedFunctionVariation.ksVariation_16f_Addr32: True,
2191 };
2192
2193 def emitThreadedCallStmts(self, sBranch = None): # pylint: disable=too-many-statements
2194 """
2195 Worker for morphInputCode that returns a list of statements that emits
2196 the call to the threaded functions for the block.
2197
2198 The sBranch parameter is used with conditional branches where we'll emit
2199 different threaded calls depending on whether we're in the jump-taken or
2200 no-jump code path.
2201 """
2202 # Special case for only default variation:
2203 if len(self.aoVariations) == 1 and self.aoVariations[0].sVariation == ThreadedFunctionVariation.ksVariation_Default:
2204 assert not sBranch;
2205 return self.aoVariations[0].emitThreadedCallStmts(0);
2206
2207 #
2208 # Case statement sub-class.
2209 #
2210 dByVari = self.dVariations;
2211 #fDbg = self.oMcBlock.sFunction == 'iemOpCommonPushSReg';
2212 class Case:
2213 def __init__(self, sCond, sVarNm = None):
2214 self.sCond = sCond;
2215 self.sVarNm = sVarNm;
2216 self.oVar = dByVari[sVarNm] if sVarNm else None;
2217 self.aoBody = self.oVar.emitThreadedCallStmts(8) if sVarNm else None;
2218
2219 def toCode(self):
2220 aoStmts = [ iai.McCppGeneric('case %s:' % (self.sCond), cchIndent = 4), ];
2221 if self.aoBody:
2222 aoStmts.extend(self.aoBody);
2223 aoStmts.append(iai.McCppGeneric('break;', cchIndent = 8));
2224 return aoStmts;
2225
2226 def toFunctionAssignment(self):
2227 aoStmts = [ iai.McCppGeneric('case %s:' % (self.sCond), cchIndent = 4), ];
2228 if self.aoBody:
2229 aoStmts.extend([
2230 iai.McCppGeneric('enmFunction = %s;' % (self.oVar.getIndexName(),), cchIndent = 8),
2231 iai.McCppGeneric('break;', cchIndent = 8),
2232 ]);
2233 return aoStmts;
2234
2235 def isSame(self, oThat):
2236 if not self.aoBody: # fall thru always matches.
2237 return True;
2238 if len(self.aoBody) != len(oThat.aoBody):
2239 #if fDbg: print('dbg: body len diff: %s vs %s' % (len(self.aoBody), len(oThat.aoBody),));
2240 return False;
2241 for iStmt, oStmt in enumerate(self.aoBody):
2242 oThatStmt = oThat.aoBody[iStmt] # type: iai.McStmt
2243 assert isinstance(oStmt, iai.McCppGeneric);
2244 assert not isinstance(oStmt, iai.McStmtCond);
2245 if isinstance(oStmt, iai.McStmtCond):
2246 return False;
2247 if oStmt.sName != oThatStmt.sName:
2248 #if fDbg: print('dbg: stmt #%s name: %s vs %s' % (iStmt, oStmt.sName, oThatStmt.sName,));
2249 return False;
2250 if len(oStmt.asParams) != len(oThatStmt.asParams):
2251 #if fDbg: print('dbg: stmt #%s param count: %s vs %s'
2252 # % (iStmt, len(oStmt.asParams), len(oThatStmt.asParams),));
2253 return False;
2254 for iParam, sParam in enumerate(oStmt.asParams):
2255 if ( sParam != oThatStmt.asParams[iParam]
2256 and ( iParam != 1
2257 or not isinstance(oStmt, iai.McCppCall)
2258 or not oStmt.asParams[0].startswith('IEM_MC2_EMIT_CALL_')
2259 or sParam != self.oVar.getIndexName()
2260 or oThatStmt.asParams[iParam] != oThat.oVar.getIndexName() )):
2261 #if fDbg: print('dbg: stmt #%s, param #%s: %s vs %s'
2262 # % (iStmt, iParam, sParam, oThatStmt.asParams[iParam],));
2263 return False;
2264 return True;
2265
2266 #
2267 # Determine what we're switch on.
2268 # This ASSUMES that (IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | IEM_F_MODE_CPUMODE_MASK) == 7!
2269 #
2270 fSimple = True;
2271 sSwitchValue = '(pVCpu->iem.s.fExec & (IEM_F_MODE_CPUMODE_MASK | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK))';
2272 if dByVari.keys() & self.kdVariationsWithNeedForPrefixCheck.keys():
2273 sSwitchValue += ' | (pVCpu->iem.s.enmEffAddrMode == (pVCpu->iem.s.fExec & IEM_F_MODE_CPUMODE_MASK) ? 0 : 8)';
2274 # Accesses via FS and GS and CS goes thru non-FLAT functions. (CS
2275 # is not writable in 32-bit mode (at least), thus the penalty mode
2276 # for any accesses via it (simpler this way).)
2277 sSwitchValue += ' | (pVCpu->iem.s.iEffSeg < X86_SREG_FS && pVCpu->iem.s.iEffSeg != X86_SREG_CS ? 0 : 16)';
2278 fSimple = False; # threaded functions.
2279 if dByVari.keys() & ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing:
2280 sSwitchValue += ' | ((pVCpu->iem.s.fTbPrevInstr & (IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_INHIBIT_SHADOW)) || ' \
2281 + '(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_MASK) ? 32 : 0)';
2282
2283 #
2284 # Generate the case statements.
2285 #
2286 # pylintx: disable=x
2287 aoCases = [];
2288 if ThreadedFunctionVariation.ksVariation_64_Addr32 in dByVari:
2289 assert not fSimple and not sBranch;
2290 aoCases.extend([
2291 Case('IEMMODE_64BIT', ThrdFnVar.ksVariation_64),
2292 Case('IEMMODE_64BIT | 16', ThrdFnVar.ksVariation_64_FsGs),
2293 Case('IEMMODE_64BIT | 8 | 16', None), # fall thru
2294 Case('IEMMODE_64BIT | 8', ThrdFnVar.ksVariation_64_Addr32),
2295 ]);
2296 if ThreadedFunctionVariation.ksVariation_64f_Addr32 in dByVari:
2297 aoCases.extend([
2298 Case('IEMMODE_64BIT | 32', ThrdFnVar.ksVariation_64f),
2299 Case('IEMMODE_64BIT | 32 | 16', ThrdFnVar.ksVariation_64f_FsGs),
2300 Case('IEMMODE_64BIT | 32 | 8 | 16', None), # fall thru
2301 Case('IEMMODE_64BIT | 32 | 8', ThrdFnVar.ksVariation_64f_Addr32),
2302 ]);
2303 elif ThrdFnVar.ksVariation_64 in dByVari:
2304 assert fSimple and not sBranch;
2305 aoCases.append(Case('IEMMODE_64BIT', ThrdFnVar.ksVariation_64));
2306 if ThreadedFunctionVariation.ksVariation_64f in dByVari:
2307 aoCases.append(Case('IEMMODE_64BIT | 32', ThrdFnVar.ksVariation_64f));
2308 elif ThrdFnVar.ksVariation_64_Jmp in dByVari:
2309 assert fSimple and sBranch;
2310 aoCases.append(Case('IEMMODE_64BIT',
2311 ThrdFnVar.ksVariation_64_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_64_NoJmp));
2312 if ThreadedFunctionVariation.ksVariation_64f_Jmp in dByVari:
2313 aoCases.append(Case('IEMMODE_64BIT | 32',
2314 ThrdFnVar.ksVariation_64f_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_64f_NoJmp));
2315
2316 if ThrdFnVar.ksVariation_32_Addr16 in dByVari:
2317 assert not fSimple and not sBranch;
2318 aoCases.extend([
2319 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', ThrdFnVar.ksVariation_32_Flat),
2320 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 16', None), # fall thru
2321 Case('IEMMODE_32BIT | 16', None), # fall thru
2322 Case('IEMMODE_32BIT', ThrdFnVar.ksVariation_32),
2323 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 8', None), # fall thru
2324 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 8 | 16',None), # fall thru
2325 Case('IEMMODE_32BIT | 8 | 16',None), # fall thru
2326 Case('IEMMODE_32BIT | 8', ThrdFnVar.ksVariation_32_Addr16),
2327 ]);
2328 if ThrdFnVar.ksVariation_32f_Addr16 in dByVari:
2329 aoCases.extend([
2330 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', ThrdFnVar.ksVariation_32f_Flat),
2331 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 16', None), # fall thru
2332 Case('IEMMODE_32BIT | 32 | 16', None), # fall thru
2333 Case('IEMMODE_32BIT | 32', ThrdFnVar.ksVariation_32f),
2334 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 8', None), # fall thru
2335 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 8 | 16',None), # fall thru
2336 Case('IEMMODE_32BIT | 32 | 8 | 16',None), # fall thru
2337 Case('IEMMODE_32BIT | 32 | 8', ThrdFnVar.ksVariation_32f_Addr16),
2338 ]);
2339 elif ThrdFnVar.ksVariation_32 in dByVari:
2340 assert fSimple and not sBranch;
2341 aoCases.extend([
2342 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', None), # fall thru
2343 Case('IEMMODE_32BIT', ThrdFnVar.ksVariation_32),
2344 ]);
2345 if ThrdFnVar.ksVariation_32f in dByVari:
2346 aoCases.extend([
2347 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', None), # fall thru
2348 Case('IEMMODE_32BIT | 32', ThrdFnVar.ksVariation_32f),
2349 ]);
2350 elif ThrdFnVar.ksVariation_32_Jmp in dByVari:
2351 assert fSimple and sBranch;
2352 aoCases.extend([
2353 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', None), # fall thru
2354 Case('IEMMODE_32BIT',
2355 ThrdFnVar.ksVariation_32_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_32_NoJmp),
2356 ]);
2357 if ThrdFnVar.ksVariation_32f_Jmp in dByVari:
2358 aoCases.extend([
2359 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', None), # fall thru
2360 Case('IEMMODE_32BIT | 32',
2361 ThrdFnVar.ksVariation_32f_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_32f_NoJmp),
2362 ]);
2363
2364 if ThrdFnVar.ksVariation_16_Addr32 in dByVari:
2365 assert not fSimple and not sBranch;
2366 aoCases.extend([
2367 Case('IEMMODE_16BIT | 16', None), # fall thru
2368 Case('IEMMODE_16BIT', ThrdFnVar.ksVariation_16),
2369 Case('IEMMODE_16BIT | 8 | 16', None), # fall thru
2370 Case('IEMMODE_16BIT | 8', ThrdFnVar.ksVariation_16_Addr32),
2371 ]);
2372 if ThrdFnVar.ksVariation_16f_Addr32 in dByVari:
2373 aoCases.extend([
2374 Case('IEMMODE_16BIT | 32 | 16', None), # fall thru
2375 Case('IEMMODE_16BIT | 32', ThrdFnVar.ksVariation_16f),
2376 Case('IEMMODE_16BIT | 32 | 8 | 16', None), # fall thru
2377 Case('IEMMODE_16BIT | 32 | 8', ThrdFnVar.ksVariation_16f_Addr32),
2378 ]);
2379 elif ThrdFnVar.ksVariation_16 in dByVari:
2380 assert fSimple and not sBranch;
2381 aoCases.append(Case('IEMMODE_16BIT', ThrdFnVar.ksVariation_16));
2382 if ThrdFnVar.ksVariation_16f in dByVari:
2383 aoCases.append(Case('IEMMODE_16BIT | 32', ThrdFnVar.ksVariation_16f));
2384 elif ThrdFnVar.ksVariation_16_Jmp in dByVari:
2385 assert fSimple and sBranch;
2386 aoCases.append(Case('IEMMODE_16BIT',
2387 ThrdFnVar.ksVariation_16_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_16_NoJmp));
2388 if ThrdFnVar.ksVariation_16f_Jmp in dByVari:
2389 aoCases.append(Case('IEMMODE_16BIT | 32',
2390 ThrdFnVar.ksVariation_16f_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_16f_NoJmp));
2391
2392
2393 if ThrdFnVar.ksVariation_16_Pre386 in dByVari:
2394 if not fSimple:
2395 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 16', None)); # fall thru
2396 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', ThrdFnVar.ksVariation_16_Pre386));
2397 if ThrdFnVar.ksVariation_16f_Pre386 in dByVari: # should be nested under previous if, but line too long.
2398 if not fSimple:
2399 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 16', None)); # fall thru
2400 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', ThrdFnVar.ksVariation_16f_Pre386));
2401
2402 if ThrdFnVar.ksVariation_16_Pre386_Jmp in dByVari:
2403 assert fSimple and sBranch;
2404 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK',
2405 ThrdFnVar.ksVariation_16_Pre386_Jmp if sBranch == 'Jmp'
2406 else ThrdFnVar.ksVariation_16_Pre386_NoJmp));
2407 if ThrdFnVar.ksVariation_16f_Pre386_Jmp in dByVari:
2408 assert fSimple and sBranch;
2409 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32',
2410 ThrdFnVar.ksVariation_16f_Pre386_Jmp if sBranch == 'Jmp'
2411 else ThrdFnVar.ksVariation_16f_Pre386_NoJmp));
2412
2413 #
2414 # If the case bodies are all the same, except for the function called,
2415 # we can reduce the code size and hopefully compile time.
2416 #
2417 iFirstCaseWithBody = 0;
2418 while not aoCases[iFirstCaseWithBody].aoBody:
2419 iFirstCaseWithBody += 1
2420 fAllSameCases = True
2421 for iCase in range(iFirstCaseWithBody + 1, len(aoCases)):
2422 fAllSameCases = fAllSameCases and aoCases[iCase].isSame(aoCases[iFirstCaseWithBody]);
2423 #if fDbg: print('fAllSameCases=%s %s' % (fAllSameCases, self.oMcBlock.sFunction,));
2424 if fAllSameCases:
2425 aoStmts = [
2426 iai.McCppGeneric('IEMTHREADEDFUNCS enmFunction;'),
2427 iai.McCppGeneric('switch (%s)' % (sSwitchValue,)),
2428 iai.McCppGeneric('{'),
2429 ];
2430 for oCase in aoCases:
2431 aoStmts.extend(oCase.toFunctionAssignment());
2432 aoStmts.extend([
2433 iai.McCppGeneric('IEM_NOT_REACHED_DEFAULT_CASE_RET();', cchIndent = 4),
2434 iai.McCppGeneric('}'),
2435 ]);
2436 aoStmts.extend(dByVari[aoCases[iFirstCaseWithBody].sVarNm].emitThreadedCallStmts(0, 'enmFunction'));
2437
2438 else:
2439 #
2440 # Generate the generic switch statement.
2441 #
2442 aoStmts = [
2443 iai.McCppGeneric('switch (%s)' % (sSwitchValue,)),
2444 iai.McCppGeneric('{'),
2445 ];
2446 for oCase in aoCases:
2447 aoStmts.extend(oCase.toCode());
2448 aoStmts.extend([
2449 iai.McCppGeneric('IEM_NOT_REACHED_DEFAULT_CASE_RET();', cchIndent = 4),
2450 iai.McCppGeneric('}'),
2451 ]);
2452
2453 return aoStmts;
2454
2455 def morphInputCode(self, aoStmts, fIsConditional = False, fCallEmitted = False, cDepth = 0, sBranchAnnotation = None):
2456 """
2457 Adjusts (& copies) the statements for the input/decoder so it will emit
2458 calls to the right threaded functions for each block.
2459
2460 Returns list/tree of statements (aoStmts is not modified) and updated
2461 fCallEmitted status.
2462 """
2463 #print('McBlock at %s:%s' % (os.path.split(self.oMcBlock.sSrcFile)[1], self.oMcBlock.iBeginLine,));
2464 aoDecoderStmts = [];
2465
2466 for iStmt, oStmt in enumerate(aoStmts):
2467 # Copy the statement. Make a deep copy to make sure we've got our own
2468 # copies of all instance variables, even if a bit overkill at the moment.
2469 oNewStmt = copy.deepcopy(oStmt);
2470 aoDecoderStmts.append(oNewStmt);
2471 #print('oNewStmt %s %s' % (oNewStmt.sName, len(oNewStmt.asParams),));
2472 if oNewStmt.sName == 'IEM_MC_BEGIN' and self.dsCImplFlags:
2473 oNewStmt.asParams[3] = ' | '.join(sorted(self.dsCImplFlags.keys()));
2474
2475 # If we haven't emitted the threaded function call yet, look for
2476 # statements which it would naturally follow or preceed.
2477 if not fCallEmitted:
2478 if not oStmt.isCppStmt():
2479 if ( oStmt.sName.startswith('IEM_MC_MAYBE_RAISE_') \
2480 or (oStmt.sName.endswith('_AND_FINISH') and oStmt.sName.startswith('IEM_MC_'))
2481 or oStmt.sName.startswith('IEM_MC_CALL_CIMPL_')
2482 or oStmt.sName.startswith('IEM_MC_DEFER_TO_CIMPL_')
2483 or oStmt.sName in ('IEM_MC_RAISE_DIVIDE_ERROR',)):
2484 aoDecoderStmts.pop();
2485 if not fIsConditional:
2486 aoDecoderStmts.extend(self.emitThreadedCallStmts());
2487 elif oStmt.sName == 'IEM_MC_ADVANCE_RIP_AND_FINISH':
2488 aoDecoderStmts.extend(self.emitThreadedCallStmts('NoJmp'));
2489 else:
2490 assert oStmt.sName in { 'IEM_MC_REL_JMP_S8_AND_FINISH': True,
2491 'IEM_MC_REL_JMP_S16_AND_FINISH': True,
2492 'IEM_MC_REL_JMP_S32_AND_FINISH': True, };
2493 aoDecoderStmts.extend(self.emitThreadedCallStmts('Jmp'));
2494 aoDecoderStmts.append(oNewStmt);
2495 fCallEmitted = True;
2496
2497 elif iai.g_dMcStmtParsers[oStmt.sName][2]:
2498 # This is for Jmp/NoJmp with loopne and friends which modifies state other than RIP.
2499 if not sBranchAnnotation:
2500 self.raiseProblem('Modifying state before emitting calls! %s' % (oStmt.sName,));
2501 assert fIsConditional;
2502 aoDecoderStmts.pop();
2503 if sBranchAnnotation == g_ksFinishAnnotation_Advance:
2504 assert iai.McStmt.findStmtByNames(aoStmts[iStmt:], {'IEM_MC_ADVANCE_RIP_AND_FINISH':1,})
2505 aoDecoderStmts.extend(self.emitThreadedCallStmts('NoJmp'));
2506 elif sBranchAnnotation == g_ksFinishAnnotation_RelJmp:
2507 assert iai.McStmt.findStmtByNames(aoStmts[iStmt:],
2508 { 'IEM_MC_REL_JMP_S8_AND_FINISH': 1,
2509 'IEM_MC_REL_JMP_S16_AND_FINISH': 1,
2510 'IEM_MC_REL_JMP_S32_AND_FINISH': 1, });
2511 aoDecoderStmts.extend(self.emitThreadedCallStmts('Jmp'));
2512 else:
2513 self.raiseProblem('Modifying state before emitting calls! %s' % (oStmt.sName,));
2514 aoDecoderStmts.append(oNewStmt);
2515 fCallEmitted = True;
2516
2517 elif ( not fIsConditional
2518 and oStmt.fDecode
2519 and ( oStmt.asParams[0].find('IEMOP_HLP_DONE_') >= 0
2520 or oStmt.asParams[0].find('IEMOP_HLP_DECODED_') >= 0)):
2521 aoDecoderStmts.extend(self.emitThreadedCallStmts());
2522 fCallEmitted = True;
2523
2524 # Process branches of conditionals recursively.
2525 if isinstance(oStmt, iai.McStmtCond):
2526 (oNewStmt.aoIfBranch, fCallEmitted1) = self.morphInputCode(oStmt.aoIfBranch, fIsConditional,
2527 fCallEmitted, cDepth + 1, oStmt.oIfBranchAnnotation);
2528 if oStmt.aoElseBranch:
2529 (oNewStmt.aoElseBranch, fCallEmitted2) = self.morphInputCode(oStmt.aoElseBranch, fIsConditional,
2530 fCallEmitted, cDepth + 1,
2531 oStmt.oElseBranchAnnotation);
2532 else:
2533 fCallEmitted2 = False;
2534 fCallEmitted = fCallEmitted or (fCallEmitted1 and fCallEmitted2);
2535
2536 if not fCallEmitted and cDepth == 0:
2537 self.raiseProblem('Unable to insert call to threaded function.');
2538
2539 return (aoDecoderStmts, fCallEmitted);
2540
2541
2542 def generateInputCode(self):
2543 """
2544 Modifies the input code.
2545 """
2546 cchIndent = (self.oMcBlock.cchIndent + 3) // 4 * 4;
2547
2548 if len(self.oMcBlock.aoStmts) == 1:
2549 # IEM_MC_DEFER_TO_CIMPL_X_RET - need to wrap in {} to make it safe to insert into random code.
2550 sCode = ' ' * cchIndent + 'pVCpu->iem.s.fTbCurInstr = ';
2551 if self.dsCImplFlags:
2552 sCode += ' | '.join(sorted(self.dsCImplFlags.keys())) + ';\n';
2553 else:
2554 sCode += '0;\n';
2555 sCode += iai.McStmt.renderCodeForList(self.morphInputCode(self.oMcBlock.aoStmts)[0],
2556 cchIndent = cchIndent).replace('\n', ' /* gen */\n', 1);
2557 sIndent = ' ' * (min(cchIndent, 2) - 2);
2558 sCode = sIndent + '{\n' + sCode + sIndent + '}\n';
2559 return sCode;
2560
2561 # IEM_MC_BEGIN/END block
2562 assert len(self.oMcBlock.asLines) > 2, "asLines=%s" % (self.oMcBlock.asLines,);
2563 fIsConditional = ( 'IEM_CIMPL_F_BRANCH_CONDITIONAL' in self.dsCImplFlags
2564 and 'IEM_CIMPL_F_BRANCH_RELATIVE' in self.dsCImplFlags); # (latter to avoid iemOp_into)
2565 return iai.McStmt.renderCodeForList(self.morphInputCode(self.oMcBlock.aoStmts, fIsConditional)[0],
2566 cchIndent = cchIndent).replace('\n', ' /* gen */\n', 1);
2567
2568# Short alias for ThreadedFunctionVariation.
2569ThrdFnVar = ThreadedFunctionVariation;
2570
2571
2572class IEMThreadedGenerator(object):
2573 """
2574 The threaded code generator & annotator.
2575 """
2576
2577 def __init__(self):
2578 self.aoThreadedFuncs = [] # type: List[ThreadedFunction]
2579 self.oOptions = None # type: argparse.Namespace
2580 self.aoParsers = [] # type: List[IEMAllInstPython.SimpleParser]
2581 self.aidxFirstFunctions = [] # type: List[int] ##< Runs parallel to aoParser giving the index of the first function.
2582 self.cErrors = 0;
2583
2584 #
2585 # Error reporting.
2586 #
2587
2588 def rawError(self, sCompleteMessage):
2589 """ Output a raw error and increment the error counter. """
2590 print(sCompleteMessage, file = sys.stderr);
2591 self.cErrors += 1;
2592 return False;
2593
2594 #
2595 # Processing.
2596 #
2597
2598 def processInputFiles(self, sHostArch, fNativeRecompilerEnabled):
2599 """
2600 Process the input files.
2601 """
2602
2603 # Parse the files.
2604 self.aoParsers = iai.parseFiles(self.oOptions.asInFiles, sHostArch);
2605
2606 # Create threaded functions for the MC blocks.
2607 self.aoThreadedFuncs = [ThreadedFunction(oMcBlock) for oMcBlock in iai.g_aoMcBlocks];
2608
2609 # Analyze the threaded functions.
2610 dRawParamCounts = {};
2611 dMinParamCounts = {};
2612 for oThreadedFunction in self.aoThreadedFuncs:
2613 oThreadedFunction.analyzeThreadedFunction(self);
2614 for oVariation in oThreadedFunction.aoVariations:
2615 dRawParamCounts[len(oVariation.dParamRefs)] = dRawParamCounts.get(len(oVariation.dParamRefs), 0) + 1;
2616 dMinParamCounts[oVariation.cMinParams] = dMinParamCounts.get(oVariation.cMinParams, 0) + 1;
2617 print('debug: param count distribution, raw and optimized:', file = sys.stderr);
2618 for cCount in sorted({cBits: True for cBits in list(dRawParamCounts.keys()) + list(dMinParamCounts.keys())}.keys()):
2619 print('debug: %s params: %4s raw, %4s min'
2620 % (cCount, dRawParamCounts.get(cCount, 0), dMinParamCounts.get(cCount, 0)),
2621 file = sys.stderr);
2622
2623 # Do another pass over the threaded functions to settle the name suffix.
2624 iThreadedFn = 0;
2625 while iThreadedFn < len(self.aoThreadedFuncs):
2626 oFunction = self.aoThreadedFuncs[iThreadedFn].oMcBlock.oFunction;
2627 assert oFunction;
2628 iThreadedFnNext = iThreadedFn + 1;
2629 dSubNames = { self.aoThreadedFuncs[iThreadedFn].sSubName: 1 };
2630 while ( iThreadedFnNext < len(self.aoThreadedFuncs)
2631 and self.aoThreadedFuncs[iThreadedFnNext].oMcBlock.oFunction == oFunction):
2632 dSubNames[self.aoThreadedFuncs[iThreadedFnNext].sSubName] = 1;
2633 iThreadedFnNext += 1;
2634 if iThreadedFnNext - iThreadedFn > len(dSubNames):
2635 iSubName = 0;
2636 while iThreadedFn + iSubName < iThreadedFnNext:
2637 self.aoThreadedFuncs[iThreadedFn + iSubName].sSubName += '_%s' % (iSubName,);
2638 iSubName += 1;
2639 iThreadedFn = iThreadedFnNext;
2640
2641 # Populate aidxFirstFunctions. This is ASSUMING that
2642 # g_aoMcBlocks/self.aoThreadedFuncs are in self.aoParsers order.
2643 iThreadedFunction = 0;
2644 oThreadedFunction = self.getThreadedFunctionByIndex(0);
2645 self.aidxFirstFunctions = [];
2646 for oParser in self.aoParsers:
2647 self.aidxFirstFunctions.append(iThreadedFunction);
2648
2649 while oThreadedFunction.oMcBlock.sSrcFile == oParser.sSrcFile:
2650 iThreadedFunction += 1;
2651 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
2652
2653 # Analyze the threaded functions and their variations for native recompilation.
2654 if fNativeRecompilerEnabled:
2655 ian.analyzeThreadedFunctionsForNativeRecomp(self.aoThreadedFuncs, sHostArch);
2656
2657 # Gather arguments + variable statistics for the MC blocks.
2658 cMaxArgs = 0;
2659 cMaxVars = 0;
2660 cMaxVarsAndArgs = 0;
2661 cbMaxArgs = 0;
2662 cbMaxVars = 0;
2663 cbMaxVarsAndArgs = 0;
2664 for oThreadedFunction in self.aoThreadedFuncs:
2665 if oThreadedFunction.oMcBlock.cLocals >= 0:
2666 # Counts.
2667 assert oThreadedFunction.oMcBlock.cArgs >= 0;
2668 cMaxVars = max(cMaxVars, oThreadedFunction.oMcBlock.cLocals);
2669 cMaxArgs = max(cMaxArgs, oThreadedFunction.oMcBlock.cArgs);
2670 cMaxVarsAndArgs = max(cMaxVarsAndArgs, oThreadedFunction.oMcBlock.cLocals + oThreadedFunction.oMcBlock.cArgs);
2671 if cMaxVarsAndArgs > 9:
2672 raise Exception('%s potentially uses too many variables / args: %u, max 10 - %u vars and %u args'
2673 % (oThreadedFunction.oMcBlock.oFunction.sName, cMaxVarsAndArgs,
2674 oThreadedFunction.oMcBlock.cLocals, oThreadedFunction.oMcBlock.cArgs));
2675 # Calc stack allocation size:
2676 cbArgs = 0;
2677 for oArg in oThreadedFunction.oMcBlock.aoArgs:
2678 cbArgs += (getTypeBitCount(oArg.sType) + 63) // 64 * 8;
2679 cbVars = 0;
2680 for oVar in oThreadedFunction.oMcBlock.aoLocals:
2681 cbVars += (getTypeBitCount(oVar.sType) + 63) // 64 * 8;
2682 cbMaxVars = max(cbMaxVars, cbVars);
2683 cbMaxArgs = max(cbMaxArgs, cbArgs);
2684 cbMaxVarsAndArgs = max(cbMaxVarsAndArgs, cbVars + cbArgs);
2685 if cbMaxVarsAndArgs >= 0xc0:
2686 raise Exception('%s potentially uses too much stack: cbMaxVars=%#x cbMaxArgs=%#x'
2687 % (oThreadedFunction.oMcBlock.oFunction.sName, cbMaxVars, cbMaxArgs,));
2688
2689 print('debug: max vars+args: %u bytes / %u; max vars: %u bytes / %u; max args: %u bytes / %u'
2690 % (cbMaxVarsAndArgs, cMaxVarsAndArgs, cbMaxVars, cMaxVars, cbMaxArgs, cMaxArgs,), file = sys.stderr);
2691
2692 if self.cErrors > 0:
2693 print('fatal error: %u error%s during processing. Details above.'
2694 % (self.cErrors, 's' if self.cErrors > 1 else '',), file = sys.stderr);
2695 return False;
2696 return True;
2697
2698 #
2699 # Output
2700 #
2701
2702 def generateLicenseHeader(self):
2703 """
2704 Returns the lines for a license header.
2705 """
2706 return [
2707 '/*',
2708 ' * Autogenerated by $Id: IEMAllThrdPython.py 103922 2024-03-19 16:10:02Z vboxsync $ ',
2709 ' * Do not edit!',
2710 ' */',
2711 '',
2712 '/*',
2713 ' * Copyright (C) 2023-' + str(datetime.date.today().year) + ' Oracle and/or its affiliates.',
2714 ' *',
2715 ' * This file is part of VirtualBox base platform packages, as',
2716 ' * available from https://www.virtualbox.org.',
2717 ' *',
2718 ' * This program is free software; you can redistribute it and/or',
2719 ' * modify it under the terms of the GNU General Public License',
2720 ' * as published by the Free Software Foundation, in version 3 of the',
2721 ' * License.',
2722 ' *',
2723 ' * This program is distributed in the hope that it will be useful, but',
2724 ' * WITHOUT ANY WARRANTY; without even the implied warranty of',
2725 ' * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU',
2726 ' * General Public License for more details.',
2727 ' *',
2728 ' * You should have received a copy of the GNU General Public License',
2729 ' * along with this program; if not, see <https://www.gnu.org/licenses>.',
2730 ' *',
2731 ' * The contents of this file may alternatively be used under the terms',
2732 ' * of the Common Development and Distribution License Version 1.0',
2733 ' * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included',
2734 ' * in the VirtualBox distribution, in which case the provisions of the',
2735 ' * CDDL are applicable instead of those of the GPL.',
2736 ' *',
2737 ' * You may elect to license modified versions of this file under the',
2738 ' * terms and conditions of either the GPL or the CDDL or both.',
2739 ' *',
2740 ' * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0',
2741 ' */',
2742 '',
2743 '',
2744 '',
2745 ];
2746
2747 ## List of built-in threaded functions with user argument counts and
2748 ## whether it has a native recompiler implementation.
2749 katBltIns = (
2750 ( 'Nop', 0, True ),
2751 ( 'LogCpuState', 0, True ),
2752
2753 ( 'DeferToCImpl0', 2, True ),
2754 ( 'CheckIrq', 0, True ),
2755 ( 'CheckMode', 1, True ),
2756 ( 'CheckHwInstrBps', 0, False ),
2757 ( 'CheckCsLim', 1, True ),
2758
2759 ( 'CheckCsLimAndOpcodes', 3, True ),
2760 ( 'CheckOpcodes', 3, True ),
2761 ( 'CheckOpcodesConsiderCsLim', 3, True ),
2762
2763 ( 'CheckCsLimAndPcAndOpcodes', 3, True ),
2764 ( 'CheckPcAndOpcodes', 3, True ),
2765 ( 'CheckPcAndOpcodesConsiderCsLim', 3, True ),
2766
2767 ( 'CheckCsLimAndOpcodesAcrossPageLoadingTlb', 3, True ),
2768 ( 'CheckOpcodesAcrossPageLoadingTlb', 3, True ),
2769 ( 'CheckOpcodesAcrossPageLoadingTlbConsiderCsLim', 2, True ),
2770
2771 ( 'CheckCsLimAndOpcodesLoadingTlb', 3, True ),
2772 ( 'CheckOpcodesLoadingTlb', 3, True ),
2773 ( 'CheckOpcodesLoadingTlbConsiderCsLim', 3, True ),
2774
2775 ( 'CheckCsLimAndOpcodesOnNextPageLoadingTlb', 2, True ),
2776 ( 'CheckOpcodesOnNextPageLoadingTlb', 2, True ),
2777 ( 'CheckOpcodesOnNextPageLoadingTlbConsiderCsLim', 2, True ),
2778
2779 ( 'CheckCsLimAndOpcodesOnNewPageLoadingTlb', 2, True ),
2780 ( 'CheckOpcodesOnNewPageLoadingTlb', 2, True ),
2781 ( 'CheckOpcodesOnNewPageLoadingTlbConsiderCsLim', 2, True ),
2782 );
2783
2784 def generateThreadedFunctionsHeader(self, oOut, _):
2785 """
2786 Generates the threaded functions header file.
2787 Returns success indicator.
2788 """
2789
2790 asLines = self.generateLicenseHeader();
2791
2792 # Generate the threaded function table indexes.
2793 asLines += [
2794 'typedef enum IEMTHREADEDFUNCS',
2795 '{',
2796 ' kIemThreadedFunc_Invalid = 0,',
2797 '',
2798 ' /*',
2799 ' * Predefined',
2800 ' */',
2801 ];
2802 asLines += [' kIemThreadedFunc_BltIn_%s,' % (sFuncNm,) for sFuncNm, _, _ in self.katBltIns];
2803
2804 iThreadedFunction = 1 + len(self.katBltIns);
2805 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2806 asLines += [
2807 '',
2808 ' /*',
2809 ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation] + '',
2810 ' */',
2811 ];
2812 for oThreadedFunction in self.aoThreadedFuncs:
2813 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2814 if oVariation:
2815 iThreadedFunction += 1;
2816 oVariation.iEnumValue = iThreadedFunction;
2817 asLines.append(' ' + oVariation.getIndexName() + ',');
2818 asLines += [
2819 ' kIemThreadedFunc_End',
2820 '} IEMTHREADEDFUNCS;',
2821 '',
2822 ];
2823
2824 # Prototype the function table.
2825 asLines += [
2826 'extern const PFNIEMTHREADEDFUNC g_apfnIemThreadedFunctions[kIemThreadedFunc_End];',
2827 'extern uint8_t const g_acIemThreadedFunctionUsedArgs[kIemThreadedFunc_End];',
2828 '#if defined(IN_RING3) || defined(LOG_ENABLED)',
2829 'extern const char * const g_apszIemThreadedFunctions[kIemThreadedFunc_End];',
2830 '#endif',
2831 '#if defined(IN_RING3)',
2832 'extern const char * const g_apszIemThreadedFunctionStats[kIemThreadedFunc_End];',
2833 '#endif',
2834 ];
2835
2836 oOut.write('\n'.join(asLines));
2837 return True;
2838
2839 ksBitsToIntMask = {
2840 1: "UINT64_C(0x1)",
2841 2: "UINT64_C(0x3)",
2842 4: "UINT64_C(0xf)",
2843 8: "UINT64_C(0xff)",
2844 16: "UINT64_C(0xffff)",
2845 32: "UINT64_C(0xffffffff)",
2846 };
2847
2848 def generateFunctionParameterUnpacking(self, oVariation, oOut, asParams):
2849 """
2850 Outputs code for unpacking parameters.
2851 This is shared by the threaded and native code generators.
2852 """
2853 aasVars = [];
2854 for aoRefs in oVariation.dParamRefs.values():
2855 oRef = aoRefs[0];
2856 if oRef.sType[0] != 'P':
2857 cBits = g_kdTypeInfo[oRef.sType][0];
2858 sType = g_kdTypeInfo[oRef.sType][2];
2859 else:
2860 cBits = 64;
2861 sType = oRef.sType;
2862
2863 sTypeDecl = sType + ' const';
2864
2865 if cBits == 64:
2866 assert oRef.offNewParam == 0;
2867 if sType == 'uint64_t':
2868 sUnpack = '%s;' % (asParams[oRef.iNewParam],);
2869 else:
2870 sUnpack = '(%s)%s;' % (sType, asParams[oRef.iNewParam],);
2871 elif oRef.offNewParam == 0:
2872 sUnpack = '(%s)(%s & %s);' % (sType, asParams[oRef.iNewParam], self.ksBitsToIntMask[cBits]);
2873 else:
2874 sUnpack = '(%s)((%s >> %s) & %s);' \
2875 % (sType, asParams[oRef.iNewParam], oRef.offNewParam, self.ksBitsToIntMask[cBits]);
2876
2877 sComment = '/* %s - %s ref%s */' % (oRef.sOrgRef, len(aoRefs), 's' if len(aoRefs) != 1 else '',);
2878
2879 aasVars.append([ '%s:%02u' % (oRef.iNewParam, oRef.offNewParam),
2880 sTypeDecl, oRef.sNewName, sUnpack, sComment ]);
2881 acchVars = [0, 0, 0, 0, 0];
2882 for asVar in aasVars:
2883 for iCol, sStr in enumerate(asVar):
2884 acchVars[iCol] = max(acchVars[iCol], len(sStr));
2885 sFmt = ' %%-%ss %%-%ss = %%-%ss %%s\n' % (acchVars[1], acchVars[2], acchVars[3]);
2886 for asVar in sorted(aasVars):
2887 oOut.write(sFmt % (asVar[1], asVar[2], asVar[3], asVar[4],));
2888 return True;
2889
2890 kasThreadedParamNames = ('uParam0', 'uParam1', 'uParam2');
2891 def generateThreadedFunctionsSource(self, oOut, _):
2892 """
2893 Generates the threaded functions source file.
2894 Returns success indicator.
2895 """
2896
2897 asLines = self.generateLicenseHeader();
2898 oOut.write('\n'.join(asLines));
2899
2900 #
2901 # Emit the function definitions.
2902 #
2903 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2904 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
2905 oOut.write( '\n'
2906 + '\n'
2907 + '\n'
2908 + '\n'
2909 + '/*' + '*' * 128 + '\n'
2910 + '* Variation: ' + sVarName + ' ' * (129 - len(sVarName) - 15) + '*\n'
2911 + '*' * 128 + '*/\n');
2912
2913 for oThreadedFunction in self.aoThreadedFuncs:
2914 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2915 if oVariation:
2916 oMcBlock = oThreadedFunction.oMcBlock;
2917
2918 # Function header
2919 oOut.write( '\n'
2920 + '\n'
2921 + '/**\n'
2922 + ' * #%u: %s at line %s offset %s in %s%s\n'
2923 % (oVariation.iEnumValue, oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine,
2924 os.path.split(oMcBlock.sSrcFile)[1],
2925 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '')
2926 + ' */\n'
2927 + 'static IEM_DECL_IEMTHREADEDFUNC_DEF(' + oVariation.getThreadedFunctionName() + ')\n'
2928 + '{\n');
2929
2930 # Unpack parameters.
2931 self.generateFunctionParameterUnpacking(oVariation, oOut, self.kasThreadedParamNames);
2932
2933 # RT_NOREF for unused parameters.
2934 if oVariation.cMinParams < g_kcThreadedParams:
2935 oOut.write(' RT_NOREF(' + ', '.join(self.kasThreadedParamNames[oVariation.cMinParams:]) + ');\n');
2936
2937 # Now for the actual statements.
2938 oOut.write(iai.McStmt.renderCodeForList(oVariation.aoStmtsForThreadedFunction, cchIndent = 4));
2939
2940 oOut.write('}\n');
2941
2942
2943 #
2944 # Generate the output tables in parallel.
2945 #
2946 asFuncTable = [
2947 '/**',
2948 ' * Function pointer table.',
2949 ' */',
2950 'PFNIEMTHREADEDFUNC const g_apfnIemThreadedFunctions[kIemThreadedFunc_End] =',
2951 '{',
2952 ' /*Invalid*/ NULL,',
2953 ];
2954 asArgCntTab = [
2955 '/**',
2956 ' * Argument count table.',
2957 ' */',
2958 'uint8_t const g_acIemThreadedFunctionUsedArgs[kIemThreadedFunc_End] =',
2959 '{',
2960 ' 0, /*Invalid*/',
2961 ];
2962 asNameTable = [
2963 '/**',
2964 ' * Function name table.',
2965 ' */',
2966 'const char * const g_apszIemThreadedFunctions[kIemThreadedFunc_End] =',
2967 '{',
2968 ' "Invalid",',
2969 ];
2970 asStatTable = [
2971 '/**',
2972 ' * Function statistics name table.',
2973 ' */',
2974 'const char * const g_apszIemThreadedFunctionStats[kIemThreadedFunc_End] =',
2975 '{',
2976 ' NULL,',
2977 ];
2978 aasTables = (asFuncTable, asArgCntTab, asNameTable, asStatTable,);
2979
2980 for asTable in aasTables:
2981 asTable.extend((
2982 '',
2983 ' /*',
2984 ' * Predefined.',
2985 ' */',
2986 ));
2987 for sFuncNm, cArgs, _ in self.katBltIns:
2988 asFuncTable.append(' iemThreadedFunc_BltIn_%s,' % (sFuncNm,));
2989 asArgCntTab.append(' %d, /*BltIn_%s*/' % (cArgs, sFuncNm,));
2990 asNameTable.append(' "BltIn_%s",' % (sFuncNm,));
2991 asStatTable.append(' "BltIn/%s",' % (sFuncNm,));
2992
2993 iThreadedFunction = 1 + len(self.katBltIns);
2994 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2995 for asTable in aasTables:
2996 asTable.extend((
2997 '',
2998 ' /*',
2999 ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation],
3000 ' */',
3001 ));
3002 for oThreadedFunction in self.aoThreadedFuncs:
3003 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
3004 if oVariation:
3005 iThreadedFunction += 1;
3006 assert oVariation.iEnumValue == iThreadedFunction;
3007 sName = oVariation.getThreadedFunctionName();
3008 asFuncTable.append(' /*%4u*/ %s,' % (iThreadedFunction, sName,));
3009 asNameTable.append(' /*%4u*/ "%s",' % (iThreadedFunction, sName,));
3010 asArgCntTab.append(' /*%4u*/ %d, /*%s*/' % (iThreadedFunction, oVariation.cMinParams, sName,));
3011 asStatTable.append(' "%s",' % (oVariation.getThreadedFunctionStatisticsName(),));
3012
3013 for asTable in aasTables:
3014 asTable.append('};');
3015
3016 #
3017 # Output the tables.
3018 #
3019 oOut.write( '\n'
3020 + '\n');
3021 oOut.write('\n'.join(asFuncTable));
3022 oOut.write( '\n'
3023 + '\n'
3024 + '\n');
3025 oOut.write('\n'.join(asArgCntTab));
3026 oOut.write( '\n'
3027 + '\n'
3028 + '#if defined(IN_RING3) || defined(LOG_ENABLED)\n');
3029 oOut.write('\n'.join(asNameTable));
3030 oOut.write( '\n'
3031 + '#endif /* IN_RING3 || LOG_ENABLED */\n'
3032 + '\n'
3033 + '\n'
3034 + '#if defined(IN_RING3)\n');
3035 oOut.write('\n'.join(asStatTable));
3036 oOut.write( '\n'
3037 + '#endif /* IN_RING3 */\n');
3038
3039 return True;
3040
3041 def generateNativeFunctionsHeader(self, oOut, _):
3042 """
3043 Generates the native recompiler functions header file.
3044 Returns success indicator.
3045 """
3046 if not self.oOptions.fNativeRecompilerEnabled:
3047 return True;
3048
3049 asLines = self.generateLicenseHeader();
3050
3051 # Prototype the function table.
3052 asLines += [
3053 'extern const PFNIEMNATIVERECOMPFUNC g_apfnIemNativeRecompileFunctions[kIemThreadedFunc_End];',
3054 'extern const PFNIEMNATIVELIVENESSFUNC g_apfnIemNativeLivenessFunctions[kIemThreadedFunc_End];',
3055 '',
3056 ];
3057
3058 # Emit indicators as to which of the builtin functions have a native
3059 # recompiler function and which not. (We only really need this for
3060 # kIemThreadedFunc_BltIn_CheckMode, but do all just for simplicity.)
3061 for atBltIn in self.katBltIns:
3062 if atBltIn[1]:
3063 asLines.append('#define IEMNATIVE_WITH_BLTIN_' + atBltIn[0].upper())
3064 else:
3065 asLines.append('#define IEMNATIVE_WITHOUT_BLTIN_' + atBltIn[0].upper())
3066
3067 # Emit prototypes for the builtin functions we use in tables.
3068 asLines += [
3069 '',
3070 '/* Prototypes for built-in functions used in the above tables. */',
3071 ];
3072 for sFuncNm, _, fHaveRecompFunc in self.katBltIns:
3073 if fHaveRecompFunc:
3074 asLines += [
3075 'IEM_DECL_IEMNATIVERECOMPFUNC_PROTO( iemNativeRecompFunc_BltIn_%s);' % (sFuncNm,),
3076 'IEM_DECL_IEMNATIVELIVENESSFUNC_PROTO(iemNativeLivenessFunc_BltIn_%s);' % (sFuncNm,),
3077 ];
3078
3079 # Emit prototypes for table function.
3080 asLines += [
3081 '',
3082 '#ifdef IEMNATIVE_INCL_TABLE_FUNCTION_PROTOTYPES'
3083 ]
3084 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
3085 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
3086 asLines += [
3087 '',
3088 '/* Variation: ' + sVarName + ' */',
3089 ];
3090 for oThreadedFunction in self.aoThreadedFuncs:
3091 oVariation = oThreadedFunction.dVariations.get(sVariation, None) # type: ThreadedFunctionVariation
3092 if oVariation and oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
3093 asLines.append('IEM_DECL_IEMNATIVERECOMPFUNC_PROTO(' + oVariation.getNativeFunctionName() + ');');
3094 asLines += [
3095 '',
3096 '#endif /* IEMNATIVE_INCL_TABLE_FUNCTION_PROTOTYPES */',
3097 ]
3098
3099 oOut.write('\n'.join(asLines));
3100 return True;
3101
3102 def generateNativeFunctionsSource(self, oOut, idxPart):
3103 """
3104 Generates the native recompiler functions source file.
3105 Returns success indicator.
3106 """
3107 cParts = 4;
3108 assert(idxPart in range(cParts));
3109 if not self.oOptions.fNativeRecompilerEnabled:
3110 return True;
3111
3112 #
3113 # The file header.
3114 #
3115 oOut.write('\n'.join(self.generateLicenseHeader()));
3116
3117 #
3118 # Emit the functions.
3119 #
3120 # The files are split up by threaded variation as that's the simplest way to
3121 # do it, even if the distribution isn't entirely even (ksVariation_Default
3122 # only has the defer to cimpl bits and the pre-386 variants will naturally
3123 # have fewer instructions).
3124 #
3125 cVariationsPerFile = len(ThreadedFunctionVariation.kasVariationsEmitOrder) // cParts;
3126 idxFirstVar = idxPart * cVariationsPerFile;
3127 idxEndVar = idxFirstVar + cVariationsPerFile;
3128 if idxPart + 1 >= cParts:
3129 idxEndVar = len(ThreadedFunctionVariation.kasVariationsEmitOrder);
3130 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder[idxFirstVar:idxEndVar]:
3131 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
3132 oOut.write( '\n'
3133 + '\n'
3134 + '\n'
3135 + '\n'
3136 + '/*' + '*' * 128 + '\n'
3137 + '* Variation: ' + sVarName + ' ' * (129 - len(sVarName) - 15) + '*\n'
3138 + '*' * 128 + '*/\n');
3139
3140 for oThreadedFunction in self.aoThreadedFuncs:
3141 oVariation = oThreadedFunction.dVariations.get(sVariation, None) # type: ThreadedFunctionVariation
3142 if oVariation and oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
3143 oMcBlock = oThreadedFunction.oMcBlock;
3144
3145 # Function header
3146 oOut.write( '\n'
3147 + '\n'
3148 + '/**\n'
3149 + ' * #%u: %s at line %s offset %s in %s%s\n'
3150 % (oVariation.iEnumValue, oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine,
3151 os.path.split(oMcBlock.sSrcFile)[1],
3152 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '')
3153 + ' */\n'
3154 + 'IEM_DECL_IEMNATIVERECOMPFUNC_DEF(' + oVariation.getNativeFunctionName() + ')\n'
3155 + '{\n');
3156
3157 # Unpack parameters.
3158 self.generateFunctionParameterUnpacking(oVariation, oOut,
3159 ('pCallEntry->auParams[0]',
3160 'pCallEntry->auParams[1]',
3161 'pCallEntry->auParams[2]',));
3162
3163 # Now for the actual statements.
3164 oOut.write(oVariation.oNativeRecomp.renderCode(cchIndent = 4));
3165
3166 oOut.write('}\n');
3167
3168 #
3169 # Output the function table if this is the first file.
3170 #
3171 if idxPart == 0:
3172 oOut.write( '\n'
3173 + '\n'
3174 + '/*\n'
3175 + ' * Function table running parallel to g_apfnIemThreadedFunctions and friends.\n'
3176 + ' */\n'
3177 + 'const PFNIEMNATIVERECOMPFUNC g_apfnIemNativeRecompileFunctions[kIemThreadedFunc_End] =\n'
3178 + '{\n'
3179 + ' /*Invalid*/ NULL,'
3180 + '\n'
3181 + ' /*\n'
3182 + ' * Predefined.\n'
3183 + ' */\n'
3184 );
3185 for sFuncNm, _, fHaveRecompFunc in self.katBltIns:
3186 if fHaveRecompFunc:
3187 oOut.write(' iemNativeRecompFunc_BltIn_%s,\n' % (sFuncNm,))
3188 else:
3189 oOut.write(' NULL, /*BltIn_%s*/\n' % (sFuncNm,))
3190
3191 iThreadedFunction = 1 + len(self.katBltIns);
3192 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
3193 oOut.write( ' /*\n'
3194 + ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation] + '\n'
3195 + ' */\n');
3196 for oThreadedFunction in self.aoThreadedFuncs:
3197 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
3198 if oVariation:
3199 iThreadedFunction += 1;
3200 assert oVariation.iEnumValue == iThreadedFunction;
3201 sName = oVariation.getNativeFunctionName();
3202 if oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
3203 oOut.write(' /*%4u*/ %s,\n' % (iThreadedFunction, sName,));
3204 else:
3205 oOut.write(' /*%4u*/ NULL /*%s*/,\n' % (iThreadedFunction, sName,));
3206
3207 oOut.write( '};\n');
3208
3209 oOut.write('\n');
3210 return True;
3211
3212 def generateNativeLivenessSource(self, oOut, _):
3213 """
3214 Generates the native recompiler liveness analysis functions source file.
3215 Returns success indicator.
3216 """
3217 if not self.oOptions.fNativeRecompilerEnabled:
3218 return True;
3219
3220 #
3221 # The file header.
3222 #
3223 oOut.write('\n'.join(self.generateLicenseHeader()));
3224
3225 #
3226 # Emit the functions.
3227 #
3228 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
3229 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
3230 oOut.write( '\n'
3231 + '\n'
3232 + '\n'
3233 + '\n'
3234 + '/*' + '*' * 128 + '\n'
3235 + '* Variation: ' + sVarName + ' ' * (129 - len(sVarName) - 15) + '*\n'
3236 + '*' * 128 + '*/\n');
3237
3238 for oThreadedFunction in self.aoThreadedFuncs:
3239 oVariation = oThreadedFunction.dVariations.get(sVariation, None) # type: ThreadedFunctionVariation
3240 if oVariation and oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
3241 oMcBlock = oThreadedFunction.oMcBlock;
3242
3243 # Function header
3244 oOut.write( '\n'
3245 + '\n'
3246 + '/**\n'
3247 + ' * #%u: %s at line %s offset %s in %s%s\n'
3248 % (oVariation.iEnumValue, oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine,
3249 os.path.split(oMcBlock.sSrcFile)[1],
3250 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '')
3251 + ' */\n'
3252 + 'static IEM_DECL_IEMNATIVELIVENESSFUNC_DEF(' + oVariation.getLivenessFunctionName() + ')\n'
3253 + '{\n');
3254
3255 # Unpack parameters.
3256 self.generateFunctionParameterUnpacking(oVariation, oOut,
3257 ('pCallEntry->auParams[0]',
3258 'pCallEntry->auParams[1]',
3259 'pCallEntry->auParams[2]',));
3260 asNoRefs = []; #[ 'RT_NOREF_PV(pReNative);', ];
3261 for aoRefs in oVariation.dParamRefs.values():
3262 asNoRefs.append('RT_NOREF_PV(%s);' % (aoRefs[0].sNewName,));
3263 oOut.write(' %s\n' % (' '.join(asNoRefs),));
3264
3265 # Now for the actual statements.
3266 oOut.write(oVariation.oNativeRecomp.renderCode(cchIndent = 4));
3267
3268 oOut.write('}\n');
3269
3270 #
3271 # Output the function table.
3272 #
3273 oOut.write( '\n'
3274 + '\n'
3275 + '/*\n'
3276 + ' * Liveness analysis function table running parallel to g_apfnIemThreadedFunctions and friends.\n'
3277 + ' */\n'
3278 + 'const PFNIEMNATIVELIVENESSFUNC g_apfnIemNativeLivenessFunctions[kIemThreadedFunc_End] =\n'
3279 + '{\n'
3280 + ' /*Invalid*/ NULL,'
3281 + '\n'
3282 + ' /*\n'
3283 + ' * Predefined.\n'
3284 + ' */\n'
3285 );
3286 for sFuncNm, _, fHaveRecompFunc in self.katBltIns:
3287 if fHaveRecompFunc:
3288 oOut.write(' iemNativeLivenessFunc_BltIn_%s,\n' % (sFuncNm,))
3289 else:
3290 oOut.write(' NULL, /*BltIn_%s*/\n' % (sFuncNm,))
3291
3292 iThreadedFunction = 1 + len(self.katBltIns);
3293 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
3294 oOut.write( ' /*\n'
3295 + ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation] + '\n'
3296 + ' */\n');
3297 for oThreadedFunction in self.aoThreadedFuncs:
3298 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
3299 if oVariation:
3300 iThreadedFunction += 1;
3301 assert oVariation.iEnumValue == iThreadedFunction;
3302 sName = oVariation.getLivenessFunctionName();
3303 if oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
3304 oOut.write(' /*%4u*/ %s,\n' % (iThreadedFunction, sName,));
3305 else:
3306 oOut.write(' /*%4u*/ NULL /*%s*/,\n' % (iThreadedFunction, sName,));
3307
3308 oOut.write( '};\n'
3309 + '\n');
3310 return True;
3311
3312
3313 def getThreadedFunctionByIndex(self, idx):
3314 """
3315 Returns a ThreadedFunction object for the given index. If the index is
3316 out of bounds, a dummy is returned.
3317 """
3318 if idx < len(self.aoThreadedFuncs):
3319 return self.aoThreadedFuncs[idx];
3320 return ThreadedFunction.dummyInstance();
3321
3322 def generateModifiedInput(self, oOut, idxFile):
3323 """
3324 Generates the combined modified input source/header file.
3325 Returns success indicator.
3326 """
3327 #
3328 # File header and assert assumptions.
3329 #
3330 oOut.write('\n'.join(self.generateLicenseHeader()));
3331 oOut.write('AssertCompile((IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | IEM_F_MODE_CPUMODE_MASK) == 7);\n');
3332
3333 #
3334 # Iterate all parsers (input files) and output the ones related to the
3335 # file set given by idxFile.
3336 #
3337 for idxParser, oParser in enumerate(self.aoParsers): # type: int, IEMAllInstPython.SimpleParser
3338 # Is this included in the file set?
3339 sSrcBaseFile = os.path.basename(oParser.sSrcFile).lower();
3340 fInclude = -1;
3341 for aoInfo in iai.g_aaoAllInstrFilesAndDefaultMapAndSet:
3342 if sSrcBaseFile == aoInfo[0].lower():
3343 fInclude = aoInfo[2] in (-1, idxFile);
3344 break;
3345 if fInclude is not True:
3346 assert fInclude is False;
3347 continue;
3348
3349 # Output it.
3350 oOut.write("\n\n/* ****** BEGIN %s ******* */\n" % (oParser.sSrcFile,));
3351
3352 iThreadedFunction = self.aidxFirstFunctions[idxParser];
3353 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
3354 iLine = 0;
3355 while iLine < len(oParser.asLines):
3356 sLine = oParser.asLines[iLine];
3357 iLine += 1; # iBeginLine and iEndLine are 1-based.
3358
3359 # Can we pass it thru?
3360 if ( iLine not in [oThreadedFunction.oMcBlock.iBeginLine, oThreadedFunction.oMcBlock.iEndLine]
3361 or oThreadedFunction.oMcBlock.sSrcFile != oParser.sSrcFile):
3362 oOut.write(sLine);
3363 #
3364 # Single MC block. Just extract it and insert the replacement.
3365 #
3366 elif oThreadedFunction.oMcBlock.iBeginLine != oThreadedFunction.oMcBlock.iEndLine:
3367 assert ( (sLine.count('IEM_MC_') - sLine.count('IEM_MC_F_') == 1)
3368 or oThreadedFunction.oMcBlock.iMacroExp == iai.McBlock.kiMacroExp_Partial), 'sLine="%s"' % (sLine,);
3369 oOut.write(sLine[:oThreadedFunction.oMcBlock.offBeginLine]);
3370 sModified = oThreadedFunction.generateInputCode().strip();
3371 oOut.write(sModified);
3372
3373 iLine = oThreadedFunction.oMcBlock.iEndLine;
3374 sLine = oParser.asLines[iLine - 1];
3375 assert ( sLine.count('IEM_MC_') - sLine.count('IEM_MC_F_') == 1
3376 or len(oThreadedFunction.oMcBlock.aoStmts) == 1
3377 or oThreadedFunction.oMcBlock.iMacroExp == iai.McBlock.kiMacroExp_Partial);
3378 oOut.write(sLine[oThreadedFunction.oMcBlock.offAfterEnd : ]);
3379
3380 # Advance
3381 iThreadedFunction += 1;
3382 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
3383 #
3384 # Macro expansion line that have sublines and may contain multiple MC blocks.
3385 #
3386 else:
3387 offLine = 0;
3388 while iLine == oThreadedFunction.oMcBlock.iBeginLine:
3389 oOut.write(sLine[offLine : oThreadedFunction.oMcBlock.offBeginLine]);
3390
3391 sModified = oThreadedFunction.generateInputCode().strip();
3392 assert ( sModified.startswith('IEM_MC_BEGIN')
3393 or (sModified.find('IEM_MC_DEFER_TO_CIMPL_') > 0 and sModified.strip().startswith('{\n'))
3394 or sModified.startswith('pVCpu->iem.s.fEndTb = true')
3395 or sModified.startswith('pVCpu->iem.s.fTbCurInstr = ')
3396 ), 'sModified="%s"' % (sModified,);
3397 oOut.write(sModified);
3398
3399 offLine = oThreadedFunction.oMcBlock.offAfterEnd;
3400
3401 # Advance
3402 iThreadedFunction += 1;
3403 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
3404
3405 # Last line segment.
3406 if offLine < len(sLine):
3407 oOut.write(sLine[offLine : ]);
3408
3409 oOut.write("/* ****** END %s ******* */\n" % (oParser.sSrcFile,));
3410
3411 return True;
3412
3413
3414 #
3415 # Main
3416 #
3417
3418 def main(self, asArgs):
3419 """
3420 C-like main function.
3421 Returns exit code.
3422 """
3423
3424 #
3425 # Parse arguments
3426 #
3427 sScriptDir = os.path.dirname(__file__);
3428 oParser = argparse.ArgumentParser(add_help = False);
3429 oParser.add_argument('asInFiles',
3430 metavar = 'input.cpp.h',
3431 nargs = '*',
3432 default = [os.path.join(sScriptDir, aoInfo[0])
3433 for aoInfo in iai.g_aaoAllInstrFilesAndDefaultMapAndSet],
3434 help = "Selection of VMMAll/IEMAllInst*.cpp.h files to use as input.");
3435 oParser.add_argument('--host-arch',
3436 metavar = 'arch',
3437 dest = 'sHostArch',
3438 action = 'store',
3439 default = None,
3440 help = 'The host architecture.');
3441
3442 oParser.add_argument('--out-thrd-funcs-hdr',
3443 metavar = 'file-thrd-funcs.h',
3444 dest = 'sOutFileThrdFuncsHdr',
3445 action = 'store',
3446 default = '-',
3447 help = 'The output header file for the threaded functions.');
3448 oParser.add_argument('--out-thrd-funcs-cpp',
3449 metavar = 'file-thrd-funcs.cpp',
3450 dest = 'sOutFileThrdFuncsCpp',
3451 action = 'store',
3452 default = '-',
3453 help = 'The output C++ file for the threaded functions.');
3454 oParser.add_argument('--out-n8ve-funcs-hdr',
3455 metavar = 'file-n8tv-funcs.h',
3456 dest = 'sOutFileN8veFuncsHdr',
3457 action = 'store',
3458 default = '-',
3459 help = 'The output header file for the native recompiler functions.');
3460 oParser.add_argument('--out-n8ve-funcs-cpp1',
3461 metavar = 'file-n8tv-funcs1.cpp',
3462 dest = 'sOutFileN8veFuncsCpp1',
3463 action = 'store',
3464 default = '-',
3465 help = 'The output C++ file for the native recompiler functions part 1.');
3466 oParser.add_argument('--out-n8ve-funcs-cpp2',
3467 metavar = 'file-n8ve-funcs2.cpp',
3468 dest = 'sOutFileN8veFuncsCpp2',
3469 action = 'store',
3470 default = '-',
3471 help = 'The output C++ file for the native recompiler functions part 2.');
3472 oParser.add_argument('--out-n8ve-funcs-cpp3',
3473 metavar = 'file-n8ve-funcs3.cpp',
3474 dest = 'sOutFileN8veFuncsCpp3',
3475 action = 'store',
3476 default = '-',
3477 help = 'The output C++ file for the native recompiler functions part 3.');
3478 oParser.add_argument('--out-n8ve-funcs-cpp4',
3479 metavar = 'file-n8ve-funcs4.cpp',
3480 dest = 'sOutFileN8veFuncsCpp4',
3481 action = 'store',
3482 default = '-',
3483 help = 'The output C++ file for the native recompiler functions part 4.');
3484 oParser.add_argument('--out-n8ve-liveness-cpp',
3485 metavar = 'file-n8ve-liveness.cpp',
3486 dest = 'sOutFileN8veLivenessCpp',
3487 action = 'store',
3488 default = '-',
3489 help = 'The output C++ file for the native recompiler liveness analysis functions.');
3490 oParser.add_argument('--native',
3491 dest = 'fNativeRecompilerEnabled',
3492 action = 'store_true',
3493 default = False,
3494 help = 'Enables generating the files related to native recompilation.');
3495 oParser.add_argument('--out-mod-input1',
3496 metavar = 'file-instr.cpp.h',
3497 dest = 'sOutFileModInput1',
3498 action = 'store',
3499 default = '-',
3500 help = 'The output C++/header file for modified input instruction files part 1.');
3501 oParser.add_argument('--out-mod-input2',
3502 metavar = 'file-instr.cpp.h',
3503 dest = 'sOutFileModInput2',
3504 action = 'store',
3505 default = '-',
3506 help = 'The output C++/header file for modified input instruction files part 2.');
3507 oParser.add_argument('--out-mod-input3',
3508 metavar = 'file-instr.cpp.h',
3509 dest = 'sOutFileModInput3',
3510 action = 'store',
3511 default = '-',
3512 help = 'The output C++/header file for modified input instruction files part 3.');
3513 oParser.add_argument('--out-mod-input4',
3514 metavar = 'file-instr.cpp.h',
3515 dest = 'sOutFileModInput4',
3516 action = 'store',
3517 default = '-',
3518 help = 'The output C++/header file for modified input instruction files part 4.');
3519 oParser.add_argument('--help', '-h', '-?',
3520 action = 'help',
3521 help = 'Display help and exit.');
3522 oParser.add_argument('--version', '-V',
3523 action = 'version',
3524 version = 'r%s (IEMAllThreadedPython.py), r%s (IEMAllInstPython.py)'
3525 % (__version__.split()[1], iai.__version__.split()[1],),
3526 help = 'Displays the version/revision of the script and exit.');
3527 self.oOptions = oParser.parse_args(asArgs[1:]);
3528 print("oOptions=%s" % (self.oOptions,), file = sys.stderr);
3529
3530 if self.oOptions.sHostArch not in ('amd64', 'arm64'):
3531 print('error! Unsupported (or missing) host architecture: %s' % (self.oOptions.sHostArch,), file = sys.stderr);
3532 return 1;
3533
3534 #
3535 # Process the instructions specified in the IEM sources.
3536 #
3537 if self.processInputFiles(self.oOptions.sHostArch, self.oOptions.fNativeRecompilerEnabled):
3538 #
3539 # Generate the output files.
3540 #
3541 aaoOutputFiles = (
3542 ( self.oOptions.sOutFileThrdFuncsHdr, self.generateThreadedFunctionsHeader, 0, ),
3543 ( self.oOptions.sOutFileThrdFuncsCpp, self.generateThreadedFunctionsSource, 0, ),
3544 ( self.oOptions.sOutFileN8veFuncsHdr, self.generateNativeFunctionsHeader, 0, ),
3545 ( self.oOptions.sOutFileN8veFuncsCpp1, self.generateNativeFunctionsSource, 0, ),
3546 ( self.oOptions.sOutFileN8veFuncsCpp2, self.generateNativeFunctionsSource, 1, ),
3547 ( self.oOptions.sOutFileN8veFuncsCpp3, self.generateNativeFunctionsSource, 2, ),
3548 ( self.oOptions.sOutFileN8veFuncsCpp4, self.generateNativeFunctionsSource, 3, ),
3549 ( self.oOptions.sOutFileN8veLivenessCpp, self.generateNativeLivenessSource, 0, ),
3550 ( self.oOptions.sOutFileModInput1, self.generateModifiedInput, 1, ),
3551 ( self.oOptions.sOutFileModInput2, self.generateModifiedInput, 2, ),
3552 ( self.oOptions.sOutFileModInput3, self.generateModifiedInput, 3, ),
3553 ( self.oOptions.sOutFileModInput4, self.generateModifiedInput, 4, ),
3554 );
3555 fRc = True;
3556 for sOutFile, fnGenMethod, iPartNo in aaoOutputFiles:
3557 if sOutFile == '-':
3558 fRc = fnGenMethod(sys.stdout, iPartNo) and fRc;
3559 else:
3560 try:
3561 oOut = open(sOutFile, 'w'); # pylint: disable=consider-using-with,unspecified-encoding
3562 except Exception as oXcpt:
3563 print('error! Failed open "%s" for writing: %s' % (sOutFile, oXcpt,), file = sys.stderr);
3564 return 1;
3565 fRc = fnGenMethod(oOut, iPartNo) and fRc;
3566 oOut.close();
3567 if fRc:
3568 return 0;
3569
3570 return 1;
3571
3572
3573if __name__ == '__main__':
3574 sys.exit(IEMThreadedGenerator().main(sys.argv));
3575
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette