VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllThrdPython.py@ 104329

Last change on this file since 104329 was 104195, checked in by vboxsync, 10 months ago

VMM/IEM: Refactoring assembly helpers to not pass eflags by reference but instead by value and return the updated value (via eax/w0) - first chunk: ADD,ADC,SUB,SBB,CMP,TEST,AND,OR,XOR. bugref:10376

  • Property svn:eol-style set to LF
  • Property svn:executable set to *
  • Property svn:keywords set to Author Date Id Revision
File size: 187.4 KB
Line 
1#!/usr/bin/env python
2# -*- coding: utf-8 -*-
3# $Id: IEMAllThrdPython.py 104195 2024-04-05 14:45:23Z vboxsync $
4# pylint: disable=invalid-name
5
6"""
7Annotates and generates threaded functions from IEMAllInst*.cpp.h.
8"""
9
10from __future__ import print_function;
11
12__copyright__ = \
13"""
14Copyright (C) 2023 Oracle and/or its affiliates.
15
16This file is part of VirtualBox base platform packages, as
17available from https://www.virtualbox.org.
18
19This program is free software; you can redistribute it and/or
20modify it under the terms of the GNU General Public License
21as published by the Free Software Foundation, in version 3 of the
22License.
23
24This program is distributed in the hope that it will be useful, but
25WITHOUT ANY WARRANTY; without even the implied warranty of
26MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
27General Public License for more details.
28
29You should have received a copy of the GNU General Public License
30along with this program; if not, see <https://www.gnu.org/licenses>.
31
32SPDX-License-Identifier: GPL-3.0-only
33"""
34__version__ = "$Revision: 104195 $"
35
36# Standard python imports.
37import copy;
38import datetime;
39import os;
40import re;
41import sys;
42import argparse;
43from typing import Dict, List;
44
45import IEMAllInstPython as iai;
46import IEMAllN8vePython as ian;
47
48
49# Python 3 hacks:
50if sys.version_info[0] >= 3:
51 long = int; # pylint: disable=redefined-builtin,invalid-name
52
53## Number of generic parameters for the thread functions.
54g_kcThreadedParams = 3;
55
56g_kdTypeInfo = {
57 # type name: (cBits, fSigned, C-type )
58 'int8_t': ( 8, True, 'int8_t', ),
59 'int16_t': ( 16, True, 'int16_t', ),
60 'int32_t': ( 32, True, 'int32_t', ),
61 'int64_t': ( 64, True, 'int64_t', ),
62 'uint4_t': ( 4, False, 'uint8_t', ),
63 'uint8_t': ( 8, False, 'uint8_t', ),
64 'uint16_t': ( 16, False, 'uint16_t', ),
65 'uint32_t': ( 32, False, 'uint32_t', ),
66 'uint64_t': ( 64, False, 'uint64_t', ),
67 'uintptr_t': ( 64, False, 'uintptr_t',), # ASSUMES 64-bit host pointer size.
68 'bool': ( 1, False, 'bool', ),
69 'IEMMODE': ( 2, False, 'IEMMODE', ),
70};
71
72# Only for getTypeBitCount/variables.
73g_kdTypeInfo2 = {
74 'RTFLOAT32U': ( 32, False, 'RTFLOAT32U', ),
75 'RTFLOAT64U': ( 64, False, 'RTFLOAT64U', ),
76 'RTUINT64U': ( 64, False, 'RTUINT64U', ),
77 'RTGCPTR': ( 64, False, 'RTGCPTR', ),
78 'RTPBCD80U': ( 80, False, 'RTPBCD80U', ),
79 'RTFLOAT80U': ( 80, False, 'RTFLOAT80U', ),
80 'IEMFPURESULT': (80+16, False, 'IEMFPURESULT', ),
81 'IEMFPURESULTTWO': (80+16+80,False, 'IEMFPURESULTTWO', ),
82 'RTUINT128U': ( 128, False, 'RTUINT128U', ),
83 'X86XMMREG': ( 128, False, 'X86XMMREG', ),
84 'IEMMEDIAF2XMMSRC': ( 256, False, 'IEMMEDIAF2XMMSRC',),
85 'RTUINT256U': ( 256, False, 'RTUINT256U', ),
86 'IEMPCMPISTRXSRC': ( 256, False, 'IEMPCMPISTRXSRC', ),
87 'IEMPCMPESTRXSRC': ( 384, False, 'IEMPCMPESTRXSRC', ),
88}; #| g_kdTypeInfo; - requires 3.9
89g_kdTypeInfo2.update(g_kdTypeInfo);
90
91def getTypeBitCount(sType):
92 """
93 Translate a type to size in bits
94 """
95 if sType in g_kdTypeInfo2:
96 return g_kdTypeInfo2[sType][0];
97 if '*' in sType or sType[0] == 'P':
98 return 64;
99 #raise Exception('Unknown type: %s' % (sType,));
100 print('error: Unknown type: %s' % (sType,));
101 return 64;
102
103g_kdIemFieldToType = {
104 # Illegal ones:
105 'offInstrNextByte': ( None, ),
106 'cbInstrBuf': ( None, ),
107 'pbInstrBuf': ( None, ),
108 'uInstrBufPc': ( None, ),
109 'cbInstrBufTotal': ( None, ),
110 'offCurInstrStart': ( None, ),
111 'cbOpcode': ( None, ),
112 'offOpcode': ( None, ),
113 'offModRm': ( None, ),
114 # Okay ones.
115 'fPrefixes': ( 'uint32_t', ),
116 'uRexReg': ( 'uint8_t', ),
117 'uRexB': ( 'uint8_t', ),
118 'uRexIndex': ( 'uint8_t', ),
119 'iEffSeg': ( 'uint8_t', ),
120 'enmEffOpSize': ( 'IEMMODE', ),
121 'enmDefAddrMode': ( 'IEMMODE', ),
122 'enmEffAddrMode': ( 'IEMMODE', ),
123 'enmDefOpSize': ( 'IEMMODE', ),
124 'idxPrefix': ( 'uint8_t', ),
125 'uVex3rdReg': ( 'uint8_t', ),
126 'uVexLength': ( 'uint8_t', ),
127 'fEvexStuff': ( 'uint8_t', ),
128 'uFpuOpcode': ( 'uint16_t', ),
129};
130
131## @name McStmtCond.oIfBranchAnnotation/McStmtCond.oElseBranchAnnotation values
132## @{
133g_ksFinishAnnotation_Advance = 'Advance';
134g_ksFinishAnnotation_RelJmp = 'RelJmp';
135g_ksFinishAnnotation_SetJmp = 'SetJmp';
136g_ksFinishAnnotation_DeferToCImpl = 'DeferToCImpl';
137## @}
138
139
140class ThreadedParamRef(object):
141 """
142 A parameter reference for a threaded function.
143 """
144
145 def __init__(self, sOrgRef, sType, oStmt, iParam = None, offParam = 0, sStdRef = None):
146 ## The name / reference in the original code.
147 self.sOrgRef = sOrgRef;
148 ## Normalized name to deal with spaces in macro invocations and such.
149 self.sStdRef = sStdRef if sStdRef else ''.join(sOrgRef.split());
150 ## Indicates that sOrgRef may not match the parameter.
151 self.fCustomRef = sStdRef is not None;
152 ## The type (typically derived).
153 self.sType = sType;
154 ## The statement making the reference.
155 self.oStmt = oStmt;
156 ## The parameter containing the references. None if implicit.
157 self.iParam = iParam;
158 ## The offset in the parameter of the reference.
159 self.offParam = offParam;
160
161 ## The variable name in the threaded function.
162 self.sNewName = 'x';
163 ## The this is packed into.
164 self.iNewParam = 99;
165 ## The bit offset in iNewParam.
166 self.offNewParam = 1024
167
168
169class ThreadedFunctionVariation(object):
170 """ Threaded function variation. """
171
172 ## @name Variations.
173 ## These variations will match translation block selection/distinctions as well.
174 ## @{
175 # pylint: disable=line-too-long
176 ksVariation_Default = ''; ##< No variations - only used by IEM_MC_DEFER_TO_CIMPL_X_RET.
177 ksVariation_16 = '_16'; ##< 16-bit mode code (386+).
178 ksVariation_16f = '_16f'; ##< 16-bit mode code (386+), check+clear eflags.
179 ksVariation_16_Jmp = '_16_Jmp'; ##< 16-bit mode code (386+), conditional jump taken.
180 ksVariation_16f_Jmp = '_16f_Jmp'; ##< 16-bit mode code (386+), check+clear eflags, conditional jump taken.
181 ksVariation_16_NoJmp = '_16_NoJmp'; ##< 16-bit mode code (386+), conditional jump not taken.
182 ksVariation_16f_NoJmp = '_16f_NoJmp'; ##< 16-bit mode code (386+), check+clear eflags, conditional jump not taken.
183 ksVariation_16_Addr32 = '_16_Addr32'; ##< 16-bit mode code (386+), address size prefixed to 32-bit addressing.
184 ksVariation_16f_Addr32 = '_16f_Addr32'; ##< 16-bit mode code (386+), address size prefixed to 32-bit addressing, eflags.
185 ksVariation_16_Pre386 = '_16_Pre386'; ##< 16-bit mode code, pre-386 CPU target.
186 ksVariation_16f_Pre386 = '_16f_Pre386'; ##< 16-bit mode code, pre-386 CPU target, check+clear eflags.
187 ksVariation_16_Pre386_Jmp = '_16_Pre386_Jmp'; ##< 16-bit mode code, pre-386 CPU target, conditional jump taken.
188 ksVariation_16f_Pre386_Jmp = '_16f_Pre386_Jmp'; ##< 16-bit mode code, pre-386 CPU target, check+clear eflags, conditional jump taken.
189 ksVariation_16_Pre386_NoJmp = '_16_Pre386_NoJmp'; ##< 16-bit mode code, pre-386 CPU target, conditional jump not taken.
190 ksVariation_16f_Pre386_NoJmp = '_16f_Pre386_NoJmp'; ##< 16-bit mode code, pre-386 CPU target, check+clear eflags, conditional jump not taken.
191 ksVariation_32 = '_32'; ##< 32-bit mode code (386+).
192 ksVariation_32f = '_32f'; ##< 32-bit mode code (386+), check+clear eflags.
193 ksVariation_32_Jmp = '_32_Jmp'; ##< 32-bit mode code (386+), conditional jump taken.
194 ksVariation_32f_Jmp = '_32f_Jmp'; ##< 32-bit mode code (386+), check+clear eflags, conditional jump taken.
195 ksVariation_32_NoJmp = '_32_NoJmp'; ##< 32-bit mode code (386+), conditional jump not taken.
196 ksVariation_32f_NoJmp = '_32f_NoJmp'; ##< 32-bit mode code (386+), check+clear eflags, conditional jump not taken.
197 ksVariation_32_Flat = '_32_Flat'; ##< 32-bit mode code (386+) with CS, DS, E,S and SS flat and 4GB wide.
198 ksVariation_32f_Flat = '_32f_Flat'; ##< 32-bit mode code (386+) with CS, DS, E,S and SS flat and 4GB wide, eflags.
199 ksVariation_32_Addr16 = '_32_Addr16'; ##< 32-bit mode code (386+), address size prefixed to 16-bit addressing.
200 ksVariation_32f_Addr16 = '_32f_Addr16'; ##< 32-bit mode code (386+), address size prefixed to 16-bit addressing, eflags.
201 ksVariation_64 = '_64'; ##< 64-bit mode code.
202 ksVariation_64f = '_64f'; ##< 64-bit mode code, check+clear eflags.
203 ksVariation_64_Jmp = '_64_Jmp'; ##< 64-bit mode code, conditional jump taken.
204 ksVariation_64f_Jmp = '_64f_Jmp'; ##< 64-bit mode code, check+clear eflags, conditional jump taken.
205 ksVariation_64_NoJmp = '_64_NoJmp'; ##< 64-bit mode code, conditional jump not taken.
206 ksVariation_64f_NoJmp = '_64f_NoJmp'; ##< 64-bit mode code, check+clear eflags, conditional jump not taken.
207 ksVariation_64_FsGs = '_64_FsGs'; ##< 64-bit mode code, with memory accesses via FS or GS.
208 ksVariation_64f_FsGs = '_64f_FsGs'; ##< 64-bit mode code, with memory accesses via FS or GS, check+clear eflags.
209 ksVariation_64_Addr32 = '_64_Addr32'; ##< 64-bit mode code, address size prefixed to 32-bit addressing.
210 ksVariation_64f_Addr32 = '_64f_Addr32'; ##< 64-bit mode code, address size prefixed to 32-bit addressing, c+c eflags.
211 # pylint: enable=line-too-long
212 kasVariations = (
213 ksVariation_Default,
214 ksVariation_16,
215 ksVariation_16f,
216 ksVariation_16_Jmp,
217 ksVariation_16f_Jmp,
218 ksVariation_16_NoJmp,
219 ksVariation_16f_NoJmp,
220 ksVariation_16_Addr32,
221 ksVariation_16f_Addr32,
222 ksVariation_16_Pre386,
223 ksVariation_16f_Pre386,
224 ksVariation_16_Pre386_Jmp,
225 ksVariation_16f_Pre386_Jmp,
226 ksVariation_16_Pre386_NoJmp,
227 ksVariation_16f_Pre386_NoJmp,
228 ksVariation_32,
229 ksVariation_32f,
230 ksVariation_32_Jmp,
231 ksVariation_32f_Jmp,
232 ksVariation_32_NoJmp,
233 ksVariation_32f_NoJmp,
234 ksVariation_32_Flat,
235 ksVariation_32f_Flat,
236 ksVariation_32_Addr16,
237 ksVariation_32f_Addr16,
238 ksVariation_64,
239 ksVariation_64f,
240 ksVariation_64_Jmp,
241 ksVariation_64f_Jmp,
242 ksVariation_64_NoJmp,
243 ksVariation_64f_NoJmp,
244 ksVariation_64_FsGs,
245 ksVariation_64f_FsGs,
246 ksVariation_64_Addr32,
247 ksVariation_64f_Addr32,
248 );
249 kasVariationsWithoutAddress = (
250 ksVariation_16,
251 ksVariation_16f,
252 ksVariation_16_Pre386,
253 ksVariation_16f_Pre386,
254 ksVariation_32,
255 ksVariation_32f,
256 ksVariation_64,
257 ksVariation_64f,
258 );
259 kasVariationsWithoutAddressNot286 = (
260 ksVariation_16,
261 ksVariation_16f,
262 ksVariation_32,
263 ksVariation_32f,
264 ksVariation_64,
265 ksVariation_64f,
266 );
267 kasVariationsWithoutAddressNot286Not64 = (
268 ksVariation_16,
269 ksVariation_16f,
270 ksVariation_32,
271 ksVariation_32f,
272 );
273 kasVariationsWithoutAddressNot64 = (
274 ksVariation_16,
275 ksVariation_16f,
276 ksVariation_16_Pre386,
277 ksVariation_16f_Pre386,
278 ksVariation_32,
279 ksVariation_32f,
280 );
281 kasVariationsWithoutAddressOnly64 = (
282 ksVariation_64,
283 ksVariation_64f,
284 );
285 kasVariationsWithAddress = (
286 ksVariation_16,
287 ksVariation_16f,
288 ksVariation_16_Addr32,
289 ksVariation_16f_Addr32,
290 ksVariation_16_Pre386,
291 ksVariation_16f_Pre386,
292 ksVariation_32,
293 ksVariation_32f,
294 ksVariation_32_Flat,
295 ksVariation_32f_Flat,
296 ksVariation_32_Addr16,
297 ksVariation_32f_Addr16,
298 ksVariation_64,
299 ksVariation_64f,
300 ksVariation_64_FsGs,
301 ksVariation_64f_FsGs,
302 ksVariation_64_Addr32,
303 ksVariation_64f_Addr32,
304 );
305 kasVariationsWithAddressNot286 = (
306 ksVariation_16,
307 ksVariation_16f,
308 ksVariation_16_Addr32,
309 ksVariation_16f_Addr32,
310 ksVariation_32,
311 ksVariation_32f,
312 ksVariation_32_Flat,
313 ksVariation_32f_Flat,
314 ksVariation_32_Addr16,
315 ksVariation_32f_Addr16,
316 ksVariation_64,
317 ksVariation_64f,
318 ksVariation_64_FsGs,
319 ksVariation_64f_FsGs,
320 ksVariation_64_Addr32,
321 ksVariation_64f_Addr32,
322 );
323 kasVariationsWithAddressNot286Not64 = (
324 ksVariation_16,
325 ksVariation_16f,
326 ksVariation_16_Addr32,
327 ksVariation_16f_Addr32,
328 ksVariation_32,
329 ksVariation_32f,
330 ksVariation_32_Flat,
331 ksVariation_32f_Flat,
332 ksVariation_32_Addr16,
333 ksVariation_32f_Addr16,
334 );
335 kasVariationsWithAddressNot64 = (
336 ksVariation_16,
337 ksVariation_16f,
338 ksVariation_16_Addr32,
339 ksVariation_16f_Addr32,
340 ksVariation_16_Pre386,
341 ksVariation_16f_Pre386,
342 ksVariation_32,
343 ksVariation_32f,
344 ksVariation_32_Flat,
345 ksVariation_32f_Flat,
346 ksVariation_32_Addr16,
347 ksVariation_32f_Addr16,
348 );
349 kasVariationsWithAddressOnly64 = (
350 ksVariation_64,
351 ksVariation_64f,
352 ksVariation_64_FsGs,
353 ksVariation_64f_FsGs,
354 ksVariation_64_Addr32,
355 ksVariation_64f_Addr32,
356 );
357 kasVariationsOnlyPre386 = (
358 ksVariation_16_Pre386,
359 ksVariation_16f_Pre386,
360 );
361 kasVariationsEmitOrder = (
362 ksVariation_Default,
363 ksVariation_64,
364 ksVariation_64f,
365 ksVariation_64_Jmp,
366 ksVariation_64f_Jmp,
367 ksVariation_64_NoJmp,
368 ksVariation_64f_NoJmp,
369 ksVariation_64_FsGs,
370 ksVariation_64f_FsGs,
371 ksVariation_32_Flat,
372 ksVariation_32f_Flat,
373 ksVariation_32,
374 ksVariation_32f,
375 ksVariation_32_Jmp,
376 ksVariation_32f_Jmp,
377 ksVariation_32_NoJmp,
378 ksVariation_32f_NoJmp,
379 ksVariation_16,
380 ksVariation_16f,
381 ksVariation_16_Jmp,
382 ksVariation_16f_Jmp,
383 ksVariation_16_NoJmp,
384 ksVariation_16f_NoJmp,
385 ksVariation_16_Addr32,
386 ksVariation_16f_Addr32,
387 ksVariation_16_Pre386,
388 ksVariation_16f_Pre386,
389 ksVariation_16_Pre386_Jmp,
390 ksVariation_16f_Pre386_Jmp,
391 ksVariation_16_Pre386_NoJmp,
392 ksVariation_16f_Pre386_NoJmp,
393 ksVariation_32_Addr16,
394 ksVariation_32f_Addr16,
395 ksVariation_64_Addr32,
396 ksVariation_64f_Addr32,
397 );
398 kdVariationNames = {
399 ksVariation_Default: 'defer-to-cimpl',
400 ksVariation_16: '16-bit',
401 ksVariation_16f: '16-bit w/ eflag checking and clearing',
402 ksVariation_16_Jmp: '16-bit w/ conditional jump taken',
403 ksVariation_16f_Jmp: '16-bit w/ eflag checking and clearing and conditional jump taken',
404 ksVariation_16_NoJmp: '16-bit w/ conditional jump not taken',
405 ksVariation_16f_NoJmp: '16-bit w/ eflag checking and clearing and conditional jump not taken',
406 ksVariation_16_Addr32: '16-bit w/ address prefix (Addr32)',
407 ksVariation_16f_Addr32: '16-bit w/ address prefix (Addr32) and eflag checking and clearing',
408 ksVariation_16_Pre386: '16-bit on pre-386 CPU',
409 ksVariation_16f_Pre386: '16-bit on pre-386 CPU w/ eflag checking and clearing',
410 ksVariation_16_Pre386_Jmp: '16-bit on pre-386 CPU w/ conditional jump taken',
411 ksVariation_16f_Pre386_Jmp: '16-bit on pre-386 CPU w/ eflag checking and clearing and conditional jump taken',
412 ksVariation_16_Pre386_NoJmp: '16-bit on pre-386 CPU w/ conditional jump taken',
413 ksVariation_16f_Pre386_NoJmp: '16-bit on pre-386 CPU w/ eflag checking and clearing and conditional jump taken',
414 ksVariation_32: '32-bit',
415 ksVariation_32f: '32-bit w/ eflag checking and clearing',
416 ksVariation_32_Jmp: '32-bit w/ conditional jump taken',
417 ksVariation_32f_Jmp: '32-bit w/ eflag checking and clearing and conditional jump taken',
418 ksVariation_32_NoJmp: '32-bit w/ conditional jump not taken',
419 ksVariation_32f_NoJmp: '32-bit w/ eflag checking and clearing and conditional jump not taken',
420 ksVariation_32_Flat: '32-bit flat and wide open CS, SS, DS and ES',
421 ksVariation_32f_Flat: '32-bit flat and wide open CS, SS, DS and ES w/ eflag checking and clearing',
422 ksVariation_32_Addr16: '32-bit w/ address prefix (Addr16)',
423 ksVariation_32f_Addr16: '32-bit w/ address prefix (Addr16) and eflag checking and clearing',
424 ksVariation_64: '64-bit',
425 ksVariation_64f: '64-bit w/ eflag checking and clearing',
426 ksVariation_64_Jmp: '64-bit w/ conditional jump taken',
427 ksVariation_64f_Jmp: '64-bit w/ eflag checking and clearing and conditional jump taken',
428 ksVariation_64_NoJmp: '64-bit w/ conditional jump not taken',
429 ksVariation_64f_NoJmp: '64-bit w/ eflag checking and clearing and conditional jump not taken',
430 ksVariation_64_FsGs: '64-bit with memory accessed via FS or GS',
431 ksVariation_64f_FsGs: '64-bit with memory accessed via FS or GS and eflag checking and clearing',
432 ksVariation_64_Addr32: '64-bit w/ address prefix (Addr32)',
433 ksVariation_64f_Addr32: '64-bit w/ address prefix (Addr32) and eflag checking and clearing',
434 };
435 kdVariationsWithEflagsCheckingAndClearing = {
436 ksVariation_16f: True,
437 ksVariation_16f_Jmp: True,
438 ksVariation_16f_NoJmp: True,
439 ksVariation_16f_Addr32: True,
440 ksVariation_16f_Pre386: True,
441 ksVariation_16f_Pre386_Jmp: True,
442 ksVariation_16f_Pre386_NoJmp: True,
443 ksVariation_32f: True,
444 ksVariation_32f_Jmp: True,
445 ksVariation_32f_NoJmp: True,
446 ksVariation_32f_Flat: True,
447 ksVariation_32f_Addr16: True,
448 ksVariation_64f: True,
449 ksVariation_64f_Jmp: True,
450 ksVariation_64f_NoJmp: True,
451 ksVariation_64f_FsGs: True,
452 ksVariation_64f_Addr32: True,
453 };
454 kdVariationsOnly64NoFlags = {
455 ksVariation_64: True,
456 ksVariation_64_Jmp: True,
457 ksVariation_64_NoJmp: True,
458 ksVariation_64_FsGs: True,
459 ksVariation_64_Addr32: True,
460 };
461 kdVariationsOnly64WithFlags = {
462 ksVariation_64f: True,
463 ksVariation_64f_Jmp: True,
464 ksVariation_64f_NoJmp: True,
465 ksVariation_64f_FsGs: True,
466 ksVariation_64f_Addr32: True,
467 };
468 kdVariationsOnlyPre386NoFlags = {
469 ksVariation_16_Pre386: True,
470 ksVariation_16_Pre386_Jmp: True,
471 ksVariation_16_Pre386_NoJmp: True,
472 };
473 kdVariationsOnlyPre386WithFlags = {
474 ksVariation_16f_Pre386: True,
475 ksVariation_16f_Pre386_Jmp: True,
476 ksVariation_16f_Pre386_NoJmp: True,
477 };
478 kdVariationsWithFlatAddress = {
479 ksVariation_32_Flat: True,
480 ksVariation_32f_Flat: True,
481 ksVariation_64: True,
482 ksVariation_64f: True,
483 ksVariation_64_Addr32: True,
484 ksVariation_64f_Addr32: True,
485 };
486 kdVariationsWithFlatStackAddress = {
487 ksVariation_32_Flat: True,
488 ksVariation_32f_Flat: True,
489 ksVariation_64: True,
490 ksVariation_64f: True,
491 ksVariation_64_FsGs: True,
492 ksVariation_64f_FsGs: True,
493 ksVariation_64_Addr32: True,
494 ksVariation_64f_Addr32: True,
495 };
496 kdVariationsWithFlat64StackAddress = {
497 ksVariation_64: True,
498 ksVariation_64f: True,
499 ksVariation_64_FsGs: True,
500 ksVariation_64f_FsGs: True,
501 ksVariation_64_Addr32: True,
502 ksVariation_64f_Addr32: True,
503 };
504 kdVariationsWithFlatAddr16 = {
505 ksVariation_16: True,
506 ksVariation_16f: True,
507 ksVariation_16_Pre386: True,
508 ksVariation_16f_Pre386: True,
509 ksVariation_32_Addr16: True,
510 ksVariation_32f_Addr16: True,
511 };
512 kdVariationsWithFlatAddr32No64 = {
513 ksVariation_16_Addr32: True,
514 ksVariation_16f_Addr32: True,
515 ksVariation_32: True,
516 ksVariation_32f: True,
517 ksVariation_32_Flat: True,
518 ksVariation_32f_Flat: True,
519 };
520 kdVariationsWithAddressOnly64 = {
521 ksVariation_64: True,
522 ksVariation_64f: True,
523 ksVariation_64_FsGs: True,
524 ksVariation_64f_FsGs: True,
525 ksVariation_64_Addr32: True,
526 ksVariation_64f_Addr32: True,
527 };
528 kdVariationsWithConditional = {
529 ksVariation_16_Jmp: True,
530 ksVariation_16_NoJmp: True,
531 ksVariation_16_Pre386_Jmp: True,
532 ksVariation_16_Pre386_NoJmp: True,
533 ksVariation_32_Jmp: True,
534 ksVariation_32_NoJmp: True,
535 ksVariation_64_Jmp: True,
536 ksVariation_64_NoJmp: True,
537 ksVariation_16f_Jmp: True,
538 ksVariation_16f_NoJmp: True,
539 ksVariation_16f_Pre386_Jmp: True,
540 ksVariation_16f_Pre386_NoJmp: True,
541 ksVariation_32f_Jmp: True,
542 ksVariation_32f_NoJmp: True,
543 ksVariation_64f_Jmp: True,
544 ksVariation_64f_NoJmp: True,
545 };
546 kdVariationsWithConditionalNoJmp = {
547 ksVariation_16_NoJmp: True,
548 ksVariation_16_Pre386_NoJmp: True,
549 ksVariation_32_NoJmp: True,
550 ksVariation_64_NoJmp: True,
551 ksVariation_16f_NoJmp: True,
552 ksVariation_16f_Pre386_NoJmp: True,
553 ksVariation_32f_NoJmp: True,
554 ksVariation_64f_NoJmp: True,
555 };
556 kdVariationsOnlyPre386 = {
557 ksVariation_16_Pre386: True,
558 ksVariation_16f_Pre386: True,
559 ksVariation_16_Pre386_Jmp: True,
560 ksVariation_16f_Pre386_Jmp: True,
561 ksVariation_16_Pre386_NoJmp: True,
562 ksVariation_16f_Pre386_NoJmp: True,
563 };
564 ## @}
565
566 ## IEM_CIMPL_F_XXX flags that we know.
567 ## The value indicates whether it terminates the TB or not. The goal is to
568 ## improve the recompiler so all but END_TB will be False.
569 ##
570 ## @note iemThreadedRecompilerMcDeferToCImpl0 duplicates info found here.
571 kdCImplFlags = {
572 'IEM_CIMPL_F_MODE': False,
573 'IEM_CIMPL_F_BRANCH_DIRECT': False,
574 'IEM_CIMPL_F_BRANCH_INDIRECT': False,
575 'IEM_CIMPL_F_BRANCH_RELATIVE': False,
576 'IEM_CIMPL_F_BRANCH_FAR': True,
577 'IEM_CIMPL_F_BRANCH_CONDITIONAL': False,
578 # IEM_CIMPL_F_BRANCH_ANY should only be used for testing, so not included here.
579 'IEM_CIMPL_F_BRANCH_STACK': False,
580 'IEM_CIMPL_F_BRANCH_STACK_FAR': False,
581 'IEM_CIMPL_F_RFLAGS': False,
582 'IEM_CIMPL_F_INHIBIT_SHADOW': False,
583 'IEM_CIMPL_F_CHECK_IRQ_AFTER': False,
584 'IEM_CIMPL_F_CHECK_IRQ_BEFORE': False,
585 'IEM_CIMPL_F_CHECK_IRQ_BEFORE_AND_AFTER': False, # (ignore)
586 'IEM_CIMPL_F_STATUS_FLAGS': False,
587 'IEM_CIMPL_F_VMEXIT': False,
588 'IEM_CIMPL_F_FPU': False,
589 'IEM_CIMPL_F_REP': False,
590 'IEM_CIMPL_F_IO': False,
591 'IEM_CIMPL_F_END_TB': True,
592 'IEM_CIMPL_F_XCPT': True,
593 'IEM_CIMPL_F_CALLS_CIMPL': False,
594 'IEM_CIMPL_F_CALLS_AIMPL': False,
595 'IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE': False,
596 'IEM_CIMPL_F_CALLS_AIMPL_WITH_XSTATE': False,
597 };
598
599 def __init__(self, oThreadedFunction, sVariation = ksVariation_Default):
600 self.oParent = oThreadedFunction # type: ThreadedFunction
601 ##< ksVariation_Xxxx.
602 self.sVariation = sVariation
603
604 ## Threaded function parameter references.
605 self.aoParamRefs = [] # type: List[ThreadedParamRef]
606 ## Unique parameter references.
607 self.dParamRefs = {} # type: Dict[str, List[ThreadedParamRef]]
608 ## Minimum number of parameters to the threaded function.
609 self.cMinParams = 0;
610
611 ## List/tree of statements for the threaded function.
612 self.aoStmtsForThreadedFunction = [] # type: List[McStmt]
613
614 ## Function enum number, for verification. Set by generateThreadedFunctionsHeader.
615 self.iEnumValue = -1;
616
617 ## Native recompilation details for this variation.
618 self.oNativeRecomp = None;
619
620 def getIndexName(self):
621 sName = self.oParent.oMcBlock.sFunction;
622 if sName.startswith('iemOp_'):
623 sName = sName[len('iemOp_'):];
624 return 'kIemThreadedFunc_%s%s%s' % ( sName, self.oParent.sSubName, self.sVariation, );
625
626 def getThreadedFunctionName(self):
627 sName = self.oParent.oMcBlock.sFunction;
628 if sName.startswith('iemOp_'):
629 sName = sName[len('iemOp_'):];
630 return 'iemThreadedFunc_%s%s%s' % ( sName, self.oParent.sSubName, self.sVariation, );
631
632 def getNativeFunctionName(self):
633 return 'iemNativeRecompFunc_' + self.getThreadedFunctionName()[len('iemThreadedFunc_'):];
634
635 def getLivenessFunctionName(self):
636 return 'iemNativeLivenessFunc_' + self.getThreadedFunctionName()[len('iemThreadedFunc_'):];
637
638 def getShortName(self):
639 sName = self.oParent.oMcBlock.sFunction;
640 if sName.startswith('iemOp_'):
641 sName = sName[len('iemOp_'):];
642 return '%s%s%s' % ( sName, self.oParent.sSubName, self.sVariation, );
643
644 def getThreadedFunctionStatisticsName(self):
645 sName = self.oParent.oMcBlock.sFunction;
646 if sName.startswith('iemOp_'):
647 sName = sName[len('iemOp_'):];
648
649 sVarNm = self.sVariation;
650 if sVarNm:
651 if sVarNm.startswith('_'):
652 sVarNm = sVarNm[1:];
653 if sVarNm.endswith('_Jmp'):
654 sVarNm = sVarNm[:-4];
655 sName += '_Jmp';
656 elif sVarNm.endswith('_NoJmp'):
657 sVarNm = sVarNm[:-6];
658 sName += '_NoJmp';
659 else:
660 sVarNm = 'DeferToCImpl';
661
662 return '%s/%s%s' % ( sVarNm, sName, self.oParent.sSubName );
663
664 def isWithFlagsCheckingAndClearingVariation(self):
665 """
666 Checks if this is a variation that checks and clears EFLAGS.
667 """
668 return self.sVariation in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing;
669
670 #
671 # Analysis and code morphing.
672 #
673
674 def raiseProblem(self, sMessage):
675 """ Raises a problem. """
676 self.oParent.raiseProblem(sMessage);
677
678 def warning(self, sMessage):
679 """ Emits a warning. """
680 self.oParent.warning(sMessage);
681
682 def analyzeReferenceToType(self, sRef):
683 """
684 Translates a variable or structure reference to a type.
685 Returns type name.
686 Raises exception if unable to figure it out.
687 """
688 ch0 = sRef[0];
689 if ch0 == 'u':
690 if sRef.startswith('u32'):
691 return 'uint32_t';
692 if sRef.startswith('u8') or sRef == 'uReg':
693 return 'uint8_t';
694 if sRef.startswith('u64'):
695 return 'uint64_t';
696 if sRef.startswith('u16'):
697 return 'uint16_t';
698 elif ch0 == 'b':
699 return 'uint8_t';
700 elif ch0 == 'f':
701 return 'bool';
702 elif ch0 == 'i':
703 if sRef.startswith('i8'):
704 return 'int8_t';
705 if sRef.startswith('i16'):
706 return 'int16_t';
707 if sRef.startswith('i32'):
708 return 'int32_t';
709 if sRef.startswith('i64'):
710 return 'int64_t';
711 if sRef in ('iReg', 'iFixedReg', 'iGReg', 'iSegReg', 'iSrcReg', 'iDstReg', 'iCrReg'):
712 return 'uint8_t';
713 elif ch0 == 'p':
714 if sRef.find('-') < 0:
715 return 'uintptr_t';
716 if sRef.startswith('pVCpu->iem.s.'):
717 sField = sRef[len('pVCpu->iem.s.') : ];
718 if sField in g_kdIemFieldToType:
719 if g_kdIemFieldToType[sField][0]:
720 return g_kdIemFieldToType[sField][0];
721 elif ch0 == 'G' and sRef.startswith('GCPtr'):
722 return 'uint64_t';
723 elif ch0 == 'e':
724 if sRef == 'enmEffOpSize':
725 return 'IEMMODE';
726 elif ch0 == 'o':
727 if sRef.startswith('off32'):
728 return 'uint32_t';
729 elif sRef == 'cbFrame': # enter
730 return 'uint16_t';
731 elif sRef == 'cShift': ## @todo risky
732 return 'uint8_t';
733
734 self.raiseProblem('Unknown reference: %s' % (sRef,));
735 return None; # Shut up pylint 2.16.2.
736
737 def analyzeCallToType(self, sFnRef):
738 """
739 Determins the type of an indirect function call.
740 """
741 assert sFnRef[0] == 'p';
742
743 #
744 # Simple?
745 #
746 if sFnRef.find('-') < 0:
747 oDecoderFunction = self.oParent.oMcBlock.oFunction;
748
749 # Try the argument list of the function defintion macro invocation first.
750 iArg = 2;
751 while iArg < len(oDecoderFunction.asDefArgs):
752 if sFnRef == oDecoderFunction.asDefArgs[iArg]:
753 return oDecoderFunction.asDefArgs[iArg - 1];
754 iArg += 1;
755
756 # Then check out line that includes the word and looks like a variable declaration.
757 oRe = re.compile(' +(P[A-Z0-9_]+|const +IEMOP[A-Z0-9_]+ *[*]) +(const |) *' + sFnRef + ' *(;|=)');
758 for sLine in oDecoderFunction.asLines:
759 oMatch = oRe.match(sLine);
760 if oMatch:
761 if not oMatch.group(1).startswith('const'):
762 return oMatch.group(1);
763 return 'PC' + oMatch.group(1)[len('const ') : -1].strip();
764
765 #
766 # Deal with the pImpl->pfnXxx:
767 #
768 elif sFnRef.startswith('pImpl->pfn'):
769 sMember = sFnRef[len('pImpl->') : ];
770 sBaseType = self.analyzeCallToType('pImpl');
771 offBits = sMember.rfind('U') + 1;
772 if sBaseType == 'PCIEMOPBINSIZES': return 'PFNIEMAIMPLBINU' + sMember[offBits:];
773 if sBaseType == 'PCIEMOPBINTODOSIZES': return 'PFNIEMAIMPLBINTODOU' + sMember[offBits:];
774 if sBaseType == 'PCIEMOPUNARYSIZES': return 'PFNIEMAIMPLUNARYU' + sMember[offBits:];
775 if sBaseType == 'PCIEMOPSHIFTSIZES': return 'PFNIEMAIMPLSHIFTU' + sMember[offBits:];
776 if sBaseType == 'PCIEMOPSHIFTDBLSIZES': return 'PFNIEMAIMPLSHIFTDBLU' + sMember[offBits:];
777 if sBaseType == 'PCIEMOPMULDIVSIZES': return 'PFNIEMAIMPLMULDIVU' + sMember[offBits:];
778 if sBaseType == 'PCIEMOPMEDIAF3': return 'PFNIEMAIMPLMEDIAF3U' + sMember[offBits:];
779 if sBaseType == 'PCIEMOPMEDIAOPTF2': return 'PFNIEMAIMPLMEDIAOPTF2U' + sMember[offBits:];
780 if sBaseType == 'PCIEMOPMEDIAOPTF2IMM8': return 'PFNIEMAIMPLMEDIAOPTF2U' + sMember[offBits:] + 'IMM8';
781 if sBaseType == 'PCIEMOPMEDIAOPTF3': return 'PFNIEMAIMPLMEDIAOPTF3U' + sMember[offBits:];
782 if sBaseType == 'PCIEMOPMEDIAOPTF3IMM8': return 'PFNIEMAIMPLMEDIAOPTF3U' + sMember[offBits:] + 'IMM8';
783 if sBaseType == 'PCIEMOPBLENDOP': return 'PFNIEMAIMPLAVXBLENDU' + sMember[offBits:];
784
785 self.raiseProblem('Unknown call reference: %s::%s (%s)' % (sBaseType, sMember, sFnRef,));
786
787 self.raiseProblem('Unknown call reference: %s' % (sFnRef,));
788 return None; # Shut up pylint 2.16.2.
789
790 def analyze8BitGRegStmt(self, oStmt):
791 """
792 Gets the 8-bit general purpose register access details of the given statement.
793 ASSUMES the statement is one accessing an 8-bit GREG.
794 """
795 idxReg = 0;
796 if ( oStmt.sName.find('_FETCH_') > 0
797 or oStmt.sName.find('_REF_') > 0
798 or oStmt.sName.find('_TO_LOCAL') > 0):
799 idxReg = 1;
800
801 sRegRef = oStmt.asParams[idxReg];
802 if sRegRef.startswith('IEM_GET_MODRM_RM') or sRegRef.startswith('IEM_GET_MODRM_REG'):
803 asBits = [sBit.strip() for sBit in sRegRef.replace('(', ',').replace(')', '').split(',')];
804 if len(asBits) != 3 or asBits[1] != 'pVCpu' or (asBits[0] != 'IEM_GET_MODRM_RM' and asBits[0] != 'IEM_GET_MODRM_REG'):
805 self.raiseProblem('Unexpected reference: %s (asBits=%s)' % (sRegRef, asBits));
806 sOrgExpr = asBits[0] + '_EX8(pVCpu, ' + asBits[2] + ')';
807 else:
808 sOrgExpr = '((%s) < 4 || (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_REX | IEM_OP_PRF_VEX)) ? (%s) : (%s) + 12)' \
809 % (sRegRef, sRegRef, sRegRef,);
810
811 if sRegRef.find('IEM_GET_MODRM_RM') >= 0: sStdRef = 'bRmRm8Ex';
812 elif sRegRef.find('IEM_GET_MODRM_REG') >= 0: sStdRef = 'bRmReg8Ex';
813 elif sRegRef == 'X86_GREG_xAX': sStdRef = 'bGregXAx8Ex';
814 elif sRegRef == 'X86_GREG_xCX': sStdRef = 'bGregXCx8Ex';
815 elif sRegRef == 'X86_GREG_xSP': sStdRef = 'bGregXSp8Ex';
816 elif sRegRef == 'iFixedReg': sStdRef = 'bFixedReg8Ex';
817 else:
818 self.warning('analyze8BitGRegStmt: sRegRef=%s -> bOther8Ex; %s %s; sOrgExpr=%s'
819 % (sRegRef, oStmt.sName, oStmt.asParams, sOrgExpr,));
820 sStdRef = 'bOther8Ex';
821
822 #print('analyze8BitGRegStmt: %s %s; sRegRef=%s\n -> idxReg=%s sOrgExpr=%s sStdRef=%s'
823 # % (oStmt.sName, oStmt.asParams, sRegRef, idxReg, sOrgExpr, sStdRef));
824 return (idxReg, sOrgExpr, sStdRef);
825
826
827 ## Maps memory related MCs to info for FLAT conversion.
828 ## This is used in 64-bit and flat 32-bit variants to skip the unnecessary
829 ## segmentation checking for every memory access. Only applied to access
830 ## via ES, DS and SS. FS, GS and CS gets the full segmentation threatment,
831 ## the latter (CS) is just to keep things simple (we could safely fetch via
832 ## it, but only in 64-bit mode could we safely write via it, IIRC).
833 kdMemMcToFlatInfo = {
834 'IEM_MC_FETCH_MEM_U8': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8' ),
835 'IEM_MC_FETCH_MEM16_U8': ( 1, 'IEM_MC_FETCH_MEM16_FLAT_U8' ),
836 'IEM_MC_FETCH_MEM32_U8': ( 1, 'IEM_MC_FETCH_MEM32_FLAT_U8' ),
837 'IEM_MC_FETCH_MEM_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16' ),
838 'IEM_MC_FETCH_MEM_U16_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_DISP' ),
839 'IEM_MC_FETCH_MEM_I16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I16' ),
840 'IEM_MC_FETCH_MEM_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32' ),
841 'IEM_MC_FETCH_MEM_U32_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_DISP' ),
842 'IEM_MC_FETCH_MEM_I32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I32' ),
843 'IEM_MC_FETCH_MEM_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64' ),
844 'IEM_MC_FETCH_MEM_U64_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64_DISP' ),
845 'IEM_MC_FETCH_MEM_U64_ALIGN_U128': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64_ALIGN_U128' ),
846 'IEM_MC_FETCH_MEM_I64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I64' ),
847 'IEM_MC_FETCH_MEM_R32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R32' ),
848 'IEM_MC_FETCH_MEM_R64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R64' ),
849 'IEM_MC_FETCH_MEM_R80': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R80' ),
850 'IEM_MC_FETCH_MEM_D80': ( 1, 'IEM_MC_FETCH_MEM_FLAT_D80' ),
851 'IEM_MC_FETCH_MEM_U128': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128' ),
852 'IEM_MC_FETCH_MEM_U128_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128_NO_AC' ),
853 'IEM_MC_FETCH_MEM_U128_ALIGN_SSE': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128_ALIGN_SSE' ),
854 'IEM_MC_FETCH_MEM_XMM': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM' ),
855 'IEM_MC_FETCH_MEM_XMM_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM_NO_AC' ),
856 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM_ALIGN_SSE' ),
857 'IEM_MC_FETCH_MEM_XMM_U32': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_U32' ),
858 'IEM_MC_FETCH_MEM_XMM_U64': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_U64' ),
859 'IEM_MC_FETCH_MEM_U256': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256' ),
860 'IEM_MC_FETCH_MEM_U256_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256_NO_AC' ),
861 'IEM_MC_FETCH_MEM_U256_ALIGN_AVX': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256_ALIGN_AVX' ),
862 'IEM_MC_FETCH_MEM_YMM': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM' ),
863 'IEM_MC_FETCH_MEM_YMM_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM_NO_AC' ),
864 'IEM_MC_FETCH_MEM_YMM_ALIGN_AVX': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM_ALIGN_AVX' ),
865 'IEM_MC_FETCH_MEM_U8_ZX_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U16' ),
866 'IEM_MC_FETCH_MEM_U8_ZX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U32' ),
867 'IEM_MC_FETCH_MEM_U8_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U64' ),
868 'IEM_MC_FETCH_MEM_U16_ZX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_ZX_U32' ),
869 'IEM_MC_FETCH_MEM_U16_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_ZX_U64' ),
870 'IEM_MC_FETCH_MEM_U32_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_ZX_U64' ),
871 'IEM_MC_FETCH_MEM_U8_SX_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U16' ),
872 'IEM_MC_FETCH_MEM_U8_SX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U32' ),
873 'IEM_MC_FETCH_MEM_U8_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U64' ),
874 'IEM_MC_FETCH_MEM_U16_SX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_SX_U32' ),
875 'IEM_MC_FETCH_MEM_U16_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_SX_U64' ),
876 'IEM_MC_FETCH_MEM_U32_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_SX_U64' ),
877 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128': ( 2, 'IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128' ),
878 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE_AND_XREG_XMM': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_ALIGN_SSE_AND_XREG_XMM' ),
879 'IEM_MC_FETCH_MEM_XMM_U32_AND_XREG_XMM': ( 3, 'IEM_MC_FETCH_MEM_FLAT_XMM_U32_AND_XREG_XMM' ),
880 'IEM_MC_FETCH_MEM_XMM_U64_AND_XREG_XMM': ( 3, 'IEM_MC_FETCH_MEM_FLAT_XMM_U64_AND_XREG_XMM' ),
881 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_RAX_RDX_U64':
882 ( 2, 'IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128_AND_RAX_RDX_U64' ),
883 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64':
884 ( 2, 'IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64' ),
885 'IEM_MC_STORE_MEM_U8': ( 0, 'IEM_MC_STORE_MEM_FLAT_U8' ),
886 'IEM_MC_STORE_MEM_U16': ( 0, 'IEM_MC_STORE_MEM_FLAT_U16' ),
887 'IEM_MC_STORE_MEM_U32': ( 0, 'IEM_MC_STORE_MEM_FLAT_U32' ),
888 'IEM_MC_STORE_MEM_U64': ( 0, 'IEM_MC_STORE_MEM_FLAT_U64' ),
889 'IEM_MC_STORE_MEM_U8_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U8_CONST' ),
890 'IEM_MC_STORE_MEM_U16_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U16_CONST' ),
891 'IEM_MC_STORE_MEM_U32_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U32_CONST' ),
892 'IEM_MC_STORE_MEM_U64_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U64_CONST' ),
893 'IEM_MC_STORE_MEM_U128': ( 0, 'IEM_MC_STORE_MEM_FLAT_U128' ),
894 'IEM_MC_STORE_MEM_U128_NO_AC': ( 0, 'IEM_MC_STORE_MEM_FLAT_U128_NO_AC' ),
895 'IEM_MC_STORE_MEM_U128_ALIGN_SSE': ( 0, 'IEM_MC_STORE_MEM_FLAT_U128_ALIGN_SSE' ),
896 'IEM_MC_STORE_MEM_U256': ( 0, 'IEM_MC_STORE_MEM_FLAT_U256' ),
897 'IEM_MC_STORE_MEM_U256_NO_AC': ( 0, 'IEM_MC_STORE_MEM_FLAT_U256_NO_AC' ),
898 'IEM_MC_STORE_MEM_U256_ALIGN_AVX': ( 0, 'IEM_MC_STORE_MEM_FLAT_U256_ALIGN_AVX' ),
899 'IEM_MC_MEM_MAP_D80_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_D80_WO' ),
900 'IEM_MC_MEM_MAP_I16_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_I16_WO' ),
901 'IEM_MC_MEM_MAP_I32_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_I32_WO' ),
902 'IEM_MC_MEM_MAP_I64_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_I64_WO' ),
903 'IEM_MC_MEM_MAP_R32_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_R32_WO' ),
904 'IEM_MC_MEM_MAP_R64_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_R64_WO' ),
905 'IEM_MC_MEM_MAP_R80_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_R80_WO' ),
906 'IEM_MC_MEM_MAP_U8_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_ATOMIC' ),
907 'IEM_MC_MEM_MAP_U8_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_RW' ),
908 'IEM_MC_MEM_MAP_U8_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_RO' ),
909 'IEM_MC_MEM_MAP_U8_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_WO' ),
910 'IEM_MC_MEM_MAP_U16_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_ATOMIC' ),
911 'IEM_MC_MEM_MAP_U16_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_RW' ),
912 'IEM_MC_MEM_MAP_U16_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_RO' ),
913 'IEM_MC_MEM_MAP_U16_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_WO' ),
914 'IEM_MC_MEM_MAP_U32_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_ATOMIC' ),
915 'IEM_MC_MEM_MAP_U32_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_RW' ),
916 'IEM_MC_MEM_MAP_U32_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_RO' ),
917 'IEM_MC_MEM_MAP_U32_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_WO' ),
918 'IEM_MC_MEM_MAP_U64_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_ATOMIC' ),
919 'IEM_MC_MEM_MAP_U64_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_RW' ),
920 'IEM_MC_MEM_MAP_U64_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_RO' ),
921 'IEM_MC_MEM_MAP_U64_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_WO' ),
922 'IEM_MC_MEM_MAP_U128_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_ATOMIC' ),
923 'IEM_MC_MEM_MAP_U128_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_RW' ),
924 'IEM_MC_MEM_MAP_U128_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_RO' ),
925 'IEM_MC_MEM_MAP_U128_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_WO' ),
926 'IEM_MC_MEM_MAP_EX': ( 3, 'IEM_MC_MEM_FLAT_MAP_EX' ),
927 };
928
929 kdMemMcToFlatInfoStack = {
930 'IEM_MC_PUSH_U16': ( 'IEM_MC_FLAT32_PUSH_U16', 'IEM_MC_FLAT64_PUSH_U16', ),
931 'IEM_MC_PUSH_U32': ( 'IEM_MC_FLAT32_PUSH_U32', 'IEM_MC_PUSH_U32', ),
932 'IEM_MC_PUSH_U64': ( 'IEM_MC_PUSH_U64', 'IEM_MC_FLAT64_PUSH_U64', ),
933 'IEM_MC_PUSH_U32_SREG': ( 'IEM_MC_FLAT32_PUSH_U32_SREG', 'IEM_MC_PUSH_U32_SREG' ),
934 'IEM_MC_POP_GREG_U16': ( 'IEM_MC_FLAT32_POP_GREG_U16', 'IEM_MC_FLAT64_POP_GREG_U16', ),
935 'IEM_MC_POP_GREG_U32': ( 'IEM_MC_FLAT32_POP_GREG_U32', 'IEM_MC_POP_GREG_U32', ),
936 'IEM_MC_POP_GREG_U64': ( 'IEM_MC_POP_GREG_U64', 'IEM_MC_FLAT64_POP_GREG_U64', ),
937 };
938
939 kdThreadedCalcRmEffAddrMcByVariation = {
940 ksVariation_16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
941 ksVariation_16f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
942 ksVariation_16_Pre386: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
943 ksVariation_16f_Pre386: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
944 ksVariation_32_Addr16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
945 ksVariation_32f_Addr16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
946 ksVariation_16_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
947 ksVariation_16f_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
948 ksVariation_32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
949 ksVariation_32f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
950 ksVariation_32_Flat: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
951 ksVariation_32f_Flat: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
952 ksVariation_64: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64',
953 ksVariation_64f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64',
954 ksVariation_64_FsGs: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_FSGS',
955 ksVariation_64f_FsGs: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_FSGS',
956 ksVariation_64_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_ADDR32', ## @todo How did this work again...
957 ksVariation_64f_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_ADDR32',
958 };
959
960 def analyzeMorphStmtForThreaded(self, aoStmts, dState, iParamRef = 0, iLevel = 0):
961 """
962 Transforms (copy) the statements into those for the threaded function.
963
964 Returns list/tree of statements (aoStmts is not modified) and the new
965 iParamRef value.
966 """
967 #
968 # We'll be traversing aoParamRefs in parallel to the statements, so we
969 # must match the traversal in analyzeFindThreadedParamRefs exactly.
970 #
971 #print('McBlock at %s:%s' % (os.path.split(self.oMcBlock.sSrcFile)[1], self.oMcBlock.iBeginLine,));
972 aoThreadedStmts = [];
973 for oStmt in aoStmts:
974 # Skip C++ statements that is purely related to decoding.
975 if not oStmt.isCppStmt() or not oStmt.fDecode:
976 # Copy the statement. Make a deep copy to make sure we've got our own
977 # copies of all instance variables, even if a bit overkill at the moment.
978 oNewStmt = copy.deepcopy(oStmt);
979 aoThreadedStmts.append(oNewStmt);
980 #print('oNewStmt %s %s' % (oNewStmt.sName, len(oNewStmt.asParams),));
981
982 # If the statement has parameter references, process the relevant parameters.
983 # We grab the references relevant to this statement and apply them in reserve order.
984 if iParamRef < len(self.aoParamRefs) and self.aoParamRefs[iParamRef].oStmt == oStmt:
985 iParamRefFirst = iParamRef;
986 while True:
987 iParamRef += 1;
988 if iParamRef >= len(self.aoParamRefs) or self.aoParamRefs[iParamRef].oStmt != oStmt:
989 break;
990
991 #print('iParamRefFirst=%s iParamRef=%s' % (iParamRefFirst, iParamRef));
992 for iCurRef in range(iParamRef - 1, iParamRefFirst - 1, -1):
993 oCurRef = self.aoParamRefs[iCurRef];
994 if oCurRef.iParam is not None:
995 assert oCurRef.oStmt == oStmt;
996 #print('iCurRef=%s iParam=%s sOrgRef=%s' % (iCurRef, oCurRef.iParam, oCurRef.sOrgRef));
997 sSrcParam = oNewStmt.asParams[oCurRef.iParam];
998 assert ( sSrcParam[oCurRef.offParam : oCurRef.offParam + len(oCurRef.sOrgRef)] == oCurRef.sOrgRef
999 or oCurRef.fCustomRef), \
1000 'offParam=%s sOrgRef=%s iParam=%s oStmt.sName=%s sSrcParam=%s<eos>' \
1001 % (oCurRef.offParam, oCurRef.sOrgRef, oCurRef.iParam, oStmt.sName, sSrcParam);
1002 oNewStmt.asParams[oCurRef.iParam] = sSrcParam[0 : oCurRef.offParam] \
1003 + oCurRef.sNewName \
1004 + sSrcParam[oCurRef.offParam + len(oCurRef.sOrgRef) : ];
1005
1006 # Morph IEM_MC_CALC_RM_EFF_ADDR into IEM_MC_CALC_RM_EFF_ADDR_THREADED ...
1007 if oNewStmt.sName == 'IEM_MC_CALC_RM_EFF_ADDR':
1008 oNewStmt.sName = self.kdThreadedCalcRmEffAddrMcByVariation[self.sVariation];
1009 assert len(oNewStmt.asParams) == 3;
1010
1011 if self.sVariation in self.kdVariationsWithFlatAddr16:
1012 oNewStmt.asParams = [
1013 oNewStmt.asParams[0], oNewStmt.asParams[1], self.dParamRefs['u16Disp'][0].sNewName,
1014 ];
1015 else:
1016 sSibAndMore = self.dParamRefs['bSib'][0].sNewName; # Merge bSib and 2nd part of cbImmAndRspOffset.
1017 if oStmt.asParams[2] not in ('0', '1', '2', '4'):
1018 sSibAndMore = '(%s) | ((%s) & 0x0f00)' % (self.dParamRefs['bSib'][0].sNewName, oStmt.asParams[2]);
1019
1020 if self.sVariation in self.kdVariationsWithFlatAddr32No64:
1021 oNewStmt.asParams = [
1022 oNewStmt.asParams[0], oNewStmt.asParams[1], sSibAndMore, self.dParamRefs['u32Disp'][0].sNewName,
1023 ];
1024 else:
1025 oNewStmt.asParams = [
1026 oNewStmt.asParams[0], self.dParamRefs['bRmEx'][0].sNewName, sSibAndMore,
1027 self.dParamRefs['u32Disp'][0].sNewName, self.dParamRefs['cbInstr'][0].sNewName,
1028 ];
1029 # ... and IEM_MC_ADVANCE_RIP_AND_FINISH into *_THREADED_PCxx[_WITH_FLAGS] ...
1030 elif ( oNewStmt.sName
1031 in ('IEM_MC_ADVANCE_RIP_AND_FINISH',
1032 'IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_REL_JMP_S16_AND_FINISH', 'IEM_MC_REL_JMP_S32_AND_FINISH',
1033 'IEM_MC_SET_RIP_U16_AND_FINISH', 'IEM_MC_SET_RIP_U32_AND_FINISH', 'IEM_MC_SET_RIP_U64_AND_FINISH', )):
1034 if oNewStmt.sName not in ('IEM_MC_SET_RIP_U16_AND_FINISH', 'IEM_MC_SET_RIP_U32_AND_FINISH',
1035 'IEM_MC_SET_RIP_U64_AND_FINISH', ):
1036 oNewStmt.asParams.append(self.dParamRefs['cbInstr'][0].sNewName);
1037 if ( oNewStmt.sName in ('IEM_MC_REL_JMP_S8_AND_FINISH', )
1038 and self.sVariation not in self.kdVariationsOnlyPre386):
1039 oNewStmt.asParams.append(self.dParamRefs['pVCpu->iem.s.enmEffOpSize'][0].sNewName);
1040 oNewStmt.sName += '_THREADED';
1041 if self.sVariation in self.kdVariationsOnly64NoFlags:
1042 oNewStmt.sName += '_PC64';
1043 elif self.sVariation in self.kdVariationsOnly64WithFlags:
1044 oNewStmt.sName += '_PC64_WITH_FLAGS';
1045 elif self.sVariation in self.kdVariationsOnlyPre386NoFlags:
1046 oNewStmt.sName += '_PC16';
1047 elif self.sVariation in self.kdVariationsOnlyPre386WithFlags:
1048 oNewStmt.sName += '_PC16_WITH_FLAGS';
1049 elif self.sVariation not in self.kdVariationsWithEflagsCheckingAndClearing:
1050 assert self.sVariation != self.ksVariation_Default;
1051 oNewStmt.sName += '_PC32';
1052 else:
1053 oNewStmt.sName += '_PC32_WITH_FLAGS';
1054
1055 # This is making the wrong branch of conditionals break out of the TB.
1056 if (oStmt.sName in ('IEM_MC_ADVANCE_RIP_AND_FINISH', 'IEM_MC_REL_JMP_S8_AND_FINISH',
1057 'IEM_MC_REL_JMP_S16_AND_FINISH', 'IEM_MC_REL_JMP_S32_AND_FINISH')):
1058 sExitTbStatus = 'VINF_SUCCESS';
1059 if self.sVariation in self.kdVariationsWithConditional:
1060 if self.sVariation in self.kdVariationsWithConditionalNoJmp:
1061 if oStmt.sName != 'IEM_MC_ADVANCE_RIP_AND_FINISH':
1062 sExitTbStatus = 'VINF_IEM_REEXEC_BREAK';
1063 elif oStmt.sName == 'IEM_MC_ADVANCE_RIP_AND_FINISH':
1064 sExitTbStatus = 'VINF_IEM_REEXEC_BREAK';
1065 oNewStmt.asParams.append(sExitTbStatus);
1066
1067 # Insert an MC so we can assert the correctioness of modified flags annotations on IEM_MC_REF_EFLAGS.
1068 if 'IEM_MC_ASSERT_EFLAGS' in dState:
1069 aoThreadedStmts.insert(len(aoThreadedStmts) - 1,
1070 iai.McStmtAssertEFlags(self.oParent.oMcBlock.oInstruction));
1071 del dState['IEM_MC_ASSERT_EFLAGS'];
1072
1073 # ... and IEM_MC_*_GREG_U8 into *_THREADED w/ reworked index taking REX into account
1074 elif oNewStmt.sName.startswith('IEM_MC_') and oNewStmt.sName.find('_GREG_U8') > 0:
1075 (idxReg, _, sStdRef) = self.analyze8BitGRegStmt(oStmt); # Don't use oNewStmt as it has been modified!
1076 oNewStmt.asParams[idxReg] = self.dParamRefs[sStdRef][0].sNewName;
1077 oNewStmt.sName += '_THREADED';
1078
1079 # ... and IEM_MC_CALL_CIMPL_[0-5] and IEM_MC_DEFER_TO_CIMPL_[0-5]_RET into *_THREADED ...
1080 elif oNewStmt.sName.startswith('IEM_MC_CALL_CIMPL_') or oNewStmt.sName.startswith('IEM_MC_DEFER_TO_CIMPL_'):
1081 oNewStmt.sName += '_THREADED';
1082 oNewStmt.idxFn += 1;
1083 oNewStmt.idxParams += 1;
1084 oNewStmt.asParams.insert(0, self.dParamRefs['cbInstr'][0].sNewName);
1085
1086 # ... and in FLAT modes we must morph memory access into FLAT accesses ...
1087 elif ( self.sVariation in self.kdVariationsWithFlatAddress
1088 and ( oNewStmt.sName.startswith('IEM_MC_FETCH_MEM')
1089 or (oNewStmt.sName.startswith('IEM_MC_STORE_MEM_') and oNewStmt.sName.find('_BY_REF') < 0)
1090 or oNewStmt.sName.startswith('IEM_MC_MEM_MAP') )):
1091 idxEffSeg = self.kdMemMcToFlatInfo[oNewStmt.sName][0];
1092 if idxEffSeg != -1:
1093 if ( oNewStmt.asParams[idxEffSeg].find('iEffSeg') < 0
1094 and oNewStmt.asParams[idxEffSeg] not in ('X86_SREG_ES', ) ):
1095 self.raiseProblem('Expected iEffSeg as param #%d to %s: %s'
1096 % (idxEffSeg + 1, oNewStmt.sName, oNewStmt.asParams[idxEffSeg],));
1097 oNewStmt.asParams.pop(idxEffSeg);
1098 oNewStmt.sName = self.kdMemMcToFlatInfo[oNewStmt.sName][1];
1099
1100 # ... PUSH and POP also needs flat variants, but these differ a little.
1101 elif ( self.sVariation in self.kdVariationsWithFlatStackAddress
1102 and ( (oNewStmt.sName.startswith('IEM_MC_PUSH') and oNewStmt.sName.find('_FPU') < 0)
1103 or oNewStmt.sName.startswith('IEM_MC_POP'))):
1104 oNewStmt.sName = self.kdMemMcToFlatInfoStack[oNewStmt.sName][int(self.sVariation in
1105 self.kdVariationsWithFlat64StackAddress)];
1106
1107 # Add EFLAGS usage annotations to relevant MCs.
1108 elif oNewStmt.sName in ('IEM_MC_COMMIT_EFLAGS', 'IEM_MC_COMMIT_EFLAGS_OPT', 'IEM_MC_REF_EFLAGS',
1109 'IEM_MC_FETCH_EFLAGS'):
1110 oInstruction = self.oParent.oMcBlock.oInstruction;
1111 oNewStmt.sName += '_EX';
1112 oNewStmt.asParams.append(oInstruction.getTestedFlagsCStyle()); # Shall crash and burn if oInstruction is
1113 oNewStmt.asParams.append(oInstruction.getModifiedFlagsCStyle()); # None. Fix the IEM decoder code.
1114
1115 # For IEM_MC_REF_EFLAGS we to emit an MC before the ..._FINISH
1116 if oNewStmt.sName == 'IEM_MC_REF_EFLAGS_EX':
1117 dState['IEM_MC_ASSERT_EFLAGS'] = iLevel;
1118
1119 # Process branches of conditionals recursively.
1120 if isinstance(oStmt, iai.McStmtCond):
1121 (oNewStmt.aoIfBranch, iParamRef) = self.analyzeMorphStmtForThreaded(oStmt.aoIfBranch, dState,
1122 iParamRef, iLevel + 1);
1123 if oStmt.aoElseBranch:
1124 (oNewStmt.aoElseBranch, iParamRef) = self.analyzeMorphStmtForThreaded(oStmt.aoElseBranch,
1125 dState, iParamRef, iLevel + 1);
1126
1127 # Insert an MC so we can assert the correctioness of modified flags annotations
1128 # on IEM_MC_REF_EFLAGS if it goes out of scope.
1129 if dState.get('IEM_MC_ASSERT_EFLAGS', -1) == iLevel:
1130 aoThreadedStmts.append(iai.McStmtAssertEFlags(self.oParent.oMcBlock.oInstruction));
1131 del dState['IEM_MC_ASSERT_EFLAGS'];
1132
1133 return (aoThreadedStmts, iParamRef);
1134
1135
1136 def analyzeConsolidateThreadedParamRefs(self):
1137 """
1138 Consolidate threaded function parameter references into a dictionary
1139 with lists of the references to each variable/field.
1140 """
1141 # Gather unique parameters.
1142 self.dParamRefs = {};
1143 for oRef in self.aoParamRefs:
1144 if oRef.sStdRef not in self.dParamRefs:
1145 self.dParamRefs[oRef.sStdRef] = [oRef,];
1146 else:
1147 self.dParamRefs[oRef.sStdRef].append(oRef);
1148
1149 # Generate names for them for use in the threaded function.
1150 dParamNames = {};
1151 for sName, aoRefs in self.dParamRefs.items():
1152 # Morph the reference expression into a name.
1153 if sName.startswith('IEM_GET_MODRM_REG'): sName = 'bModRmRegP';
1154 elif sName.startswith('IEM_GET_MODRM_RM'): sName = 'bModRmRmP';
1155 elif sName.startswith('IEM_GET_MODRM_REG_8'): sName = 'bModRmReg8P';
1156 elif sName.startswith('IEM_GET_MODRM_RM_8'): sName = 'bModRmRm8P';
1157 elif sName.startswith('IEM_GET_EFFECTIVE_VVVV'): sName = 'bEffVvvvP';
1158 elif sName.startswith('IEM_GET_IMM8_REG'): sName = 'bImm8Reg';
1159 elif sName.find('.') >= 0 or sName.find('->') >= 0:
1160 sName = sName[max(sName.rfind('.'), sName.rfind('>')) + 1 : ] + 'P';
1161 else:
1162 sName += 'P';
1163
1164 # Ensure it's unique.
1165 if sName in dParamNames:
1166 for i in range(10):
1167 if sName + str(i) not in dParamNames:
1168 sName += str(i);
1169 break;
1170 dParamNames[sName] = True;
1171
1172 # Update all the references.
1173 for oRef in aoRefs:
1174 oRef.sNewName = sName;
1175
1176 # Organize them by size too for the purpose of optimize them.
1177 dBySize = {} # type: Dict[str, str]
1178 for sStdRef, aoRefs in self.dParamRefs.items():
1179 if aoRefs[0].sType[0] != 'P':
1180 cBits = g_kdTypeInfo[aoRefs[0].sType][0];
1181 assert(cBits <= 64);
1182 else:
1183 cBits = 64;
1184
1185 if cBits not in dBySize:
1186 dBySize[cBits] = [sStdRef,]
1187 else:
1188 dBySize[cBits].append(sStdRef);
1189
1190 # Pack the parameters as best as we can, starting with the largest ones
1191 # and ASSUMING a 64-bit parameter size.
1192 self.cMinParams = 0;
1193 offNewParam = 0;
1194 for cBits in sorted(dBySize.keys(), reverse = True):
1195 for sStdRef in dBySize[cBits]:
1196 if offNewParam == 0 or offNewParam + cBits > 64:
1197 self.cMinParams += 1;
1198 offNewParam = cBits;
1199 else:
1200 offNewParam += cBits;
1201 assert(offNewParam <= 64);
1202
1203 for oRef in self.dParamRefs[sStdRef]:
1204 oRef.iNewParam = self.cMinParams - 1;
1205 oRef.offNewParam = offNewParam - cBits;
1206
1207 # Currently there are a few that requires 4 parameters, list these so we can figure out why:
1208 if self.cMinParams >= 4:
1209 print('debug: cMinParams=%s cRawParams=%s - %s:%d'
1210 % (self.cMinParams, len(self.dParamRefs), self.oParent.oMcBlock.sSrcFile, self.oParent.oMcBlock.iBeginLine,));
1211
1212 return True;
1213
1214 ksHexDigits = '0123456789abcdefABCDEF';
1215
1216 def analyzeFindThreadedParamRefs(self, aoStmts): # pylint: disable=too-many-statements
1217 """
1218 Scans the statements for things that have to passed on to the threaded
1219 function (populates self.aoParamRefs).
1220 """
1221 for oStmt in aoStmts:
1222 # Some statements we can skip alltogether.
1223 if isinstance(oStmt, iai.McCppPreProc):
1224 continue;
1225 if oStmt.isCppStmt() and oStmt.fDecode:
1226 continue;
1227 if oStmt.sName in ('IEM_MC_BEGIN',):
1228 continue;
1229
1230 if isinstance(oStmt, iai.McStmtVar):
1231 if oStmt.sValue is None:
1232 continue;
1233 aiSkipParams = { 0: True, 1: True, 3: True };
1234 else:
1235 aiSkipParams = {};
1236
1237 # Several statements have implicit parameters and some have different parameters.
1238 if oStmt.sName in ('IEM_MC_ADVANCE_RIP_AND_FINISH', 'IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_REL_JMP_S16_AND_FINISH',
1239 'IEM_MC_REL_JMP_S32_AND_FINISH', 'IEM_MC_CALL_CIMPL_0', 'IEM_MC_CALL_CIMPL_1',
1240 'IEM_MC_CALL_CIMPL_2', 'IEM_MC_CALL_CIMPL_3', 'IEM_MC_CALL_CIMPL_4', 'IEM_MC_CALL_CIMPL_5',
1241 'IEM_MC_DEFER_TO_CIMPL_0_RET', 'IEM_MC_DEFER_TO_CIMPL_1_RET', 'IEM_MC_DEFER_TO_CIMPL_2_RET',
1242 'IEM_MC_DEFER_TO_CIMPL_3_RET', 'IEM_MC_DEFER_TO_CIMPL_4_RET', 'IEM_MC_DEFER_TO_CIMPL_5_RET', ):
1243 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_INSTR_LEN(pVCpu)', 'uint4_t', oStmt, sStdRef = 'cbInstr'));
1244
1245 if ( oStmt.sName in ('IEM_MC_REL_JMP_S8_AND_FINISH',)
1246 and self.sVariation not in self.kdVariationsOnlyPre386):
1247 self.aoParamRefs.append(ThreadedParamRef('pVCpu->iem.s.enmEffOpSize', 'IEMMODE', oStmt));
1248
1249 if oStmt.sName == 'IEM_MC_CALC_RM_EFF_ADDR':
1250 # This is being pretty presumptive about bRm always being the RM byte...
1251 assert len(oStmt.asParams) == 3;
1252 assert oStmt.asParams[1] == 'bRm';
1253
1254 if self.sVariation in self.kdVariationsWithFlatAddr16:
1255 self.aoParamRefs.append(ThreadedParamRef('bRm', 'uint8_t', oStmt));
1256 self.aoParamRefs.append(ThreadedParamRef('(uint16_t)uEffAddrInfo' ,
1257 'uint16_t', oStmt, sStdRef = 'u16Disp'));
1258 elif self.sVariation in self.kdVariationsWithFlatAddr32No64:
1259 self.aoParamRefs.append(ThreadedParamRef('bRm', 'uint8_t', oStmt));
1260 self.aoParamRefs.append(ThreadedParamRef('(uint8_t)(uEffAddrInfo >> 32)',
1261 'uint8_t', oStmt, sStdRef = 'bSib'));
1262 self.aoParamRefs.append(ThreadedParamRef('(uint32_t)uEffAddrInfo',
1263 'uint32_t', oStmt, sStdRef = 'u32Disp'));
1264 else:
1265 assert self.sVariation in self.kdVariationsWithAddressOnly64;
1266 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_MODRM_EX(pVCpu, bRm)',
1267 'uint8_t', oStmt, sStdRef = 'bRmEx'));
1268 self.aoParamRefs.append(ThreadedParamRef('(uint8_t)(uEffAddrInfo >> 32)',
1269 'uint8_t', oStmt, sStdRef = 'bSib'));
1270 self.aoParamRefs.append(ThreadedParamRef('(uint32_t)uEffAddrInfo',
1271 'uint32_t', oStmt, sStdRef = 'u32Disp'));
1272 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_INSTR_LEN(pVCpu)',
1273 'uint4_t', oStmt, sStdRef = 'cbInstr'));
1274 aiSkipParams[1] = True; # Skip the bRm parameter as it is being replaced by bRmEx.
1275
1276 # 8-bit register accesses needs to have their index argument reworked to take REX into account.
1277 if oStmt.sName.startswith('IEM_MC_') and oStmt.sName.find('_GREG_U8') > 0:
1278 (idxReg, sOrgRef, sStdRef) = self.analyze8BitGRegStmt(oStmt);
1279 self.aoParamRefs.append(ThreadedParamRef(sOrgRef, 'uint8_t', oStmt, idxReg, sStdRef = sStdRef));
1280 aiSkipParams[idxReg] = True; # Skip the parameter below.
1281
1282 # If in flat mode variation, ignore the effective segment parameter to memory MCs.
1283 if ( self.sVariation in self.kdVariationsWithFlatAddress
1284 and oStmt.sName in self.kdMemMcToFlatInfo
1285 and self.kdMemMcToFlatInfo[oStmt.sName][0] != -1):
1286 aiSkipParams[self.kdMemMcToFlatInfo[oStmt.sName][0]] = True;
1287
1288 # Inspect the target of calls to see if we need to pass down a
1289 # function pointer or function table pointer for it to work.
1290 if isinstance(oStmt, iai.McStmtCall):
1291 if oStmt.sFn[0] == 'p':
1292 self.aoParamRefs.append(ThreadedParamRef(oStmt.sFn, self.analyzeCallToType(oStmt.sFn), oStmt, oStmt.idxFn));
1293 elif ( oStmt.sFn[0] != 'i'
1294 and not oStmt.sFn.startswith('RT_CONCAT3')
1295 and not oStmt.sFn.startswith('IEMTARGETCPU_EFL_BEHAVIOR_SELECT')
1296 and not oStmt.sFn.startswith('IEM_SELECT_HOST_OR_FALLBACK') ):
1297 self.raiseProblem('Bogus function name in %s: %s' % (oStmt.sName, oStmt.sFn,));
1298 aiSkipParams[oStmt.idxFn] = True;
1299
1300 # Skip the hint parameter (first) for IEM_MC_CALL_CIMPL_X.
1301 if oStmt.sName.startswith('IEM_MC_CALL_CIMPL_'):
1302 assert oStmt.idxFn == 2;
1303 aiSkipParams[0] = True;
1304
1305 # Skip the function parameter (first) for IEM_MC_NATIVE_EMIT_X.
1306 if oStmt.sName.startswith('IEM_MC_NATIVE_EMIT_'):
1307 aiSkipParams[0] = True;
1308
1309
1310 # Check all the parameters for bogus references.
1311 for iParam, sParam in enumerate(oStmt.asParams):
1312 if iParam not in aiSkipParams and sParam not in self.oParent.dVariables:
1313 # The parameter may contain a C expression, so we have to try
1314 # extract the relevant bits, i.e. variables and fields while
1315 # ignoring operators and parentheses.
1316 offParam = 0;
1317 while offParam < len(sParam):
1318 # Is it the start of an C identifier? If so, find the end, but don't stop on field separators (->, .).
1319 ch = sParam[offParam];
1320 if ch.isalpha() or ch == '_':
1321 offStart = offParam;
1322 offParam += 1;
1323 while offParam < len(sParam):
1324 ch = sParam[offParam];
1325 if not ch.isalnum() and ch != '_' and ch != '.':
1326 if ch != '-' or sParam[offParam + 1] != '>':
1327 # Special hack for the 'CTX_SUFF(pVM)' bit in pVCpu->CTX_SUFF(pVM)->xxxx:
1328 if ( ch == '('
1329 and sParam[offStart : offParam + len('(pVM)->')] == 'pVCpu->CTX_SUFF(pVM)->'):
1330 offParam += len('(pVM)->') - 1;
1331 else:
1332 break;
1333 offParam += 1;
1334 offParam += 1;
1335 sRef = sParam[offStart : offParam];
1336
1337 # For register references, we pass the full register indexes instead as macros
1338 # like IEM_GET_MODRM_REG implicitly references pVCpu->iem.s.uRexReg and the
1339 # threaded function will be more efficient if we just pass the register index
1340 # as a 4-bit param.
1341 if ( sRef.startswith('IEM_GET_MODRM')
1342 or sRef.startswith('IEM_GET_EFFECTIVE_VVVV')
1343 or sRef.startswith('IEM_GET_IMM8_REG') ):
1344 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1345 if sParam[offParam] != '(':
1346 self.raiseProblem('Expected "(" following %s in "%s"' % (sRef, oStmt.renderCode(),));
1347 (asMacroParams, offCloseParam) = iai.McBlock.extractParams(sParam, offParam);
1348 if asMacroParams is None:
1349 self.raiseProblem('Unable to find ")" for %s in "%s"' % (sRef, oStmt.renderCode(),));
1350 offParam = offCloseParam + 1;
1351 self.aoParamRefs.append(ThreadedParamRef(sParam[offStart : offParam], 'uint8_t',
1352 oStmt, iParam, offStart));
1353
1354 # We can skip known variables.
1355 elif sRef in self.oParent.dVariables:
1356 pass;
1357
1358 # Skip certain macro invocations.
1359 elif sRef in ('IEM_GET_HOST_CPU_FEATURES',
1360 'IEM_GET_GUEST_CPU_FEATURES',
1361 'IEM_IS_GUEST_CPU_AMD',
1362 'IEM_IS_16BIT_CODE',
1363 'IEM_IS_32BIT_CODE',
1364 'IEM_IS_64BIT_CODE',
1365 ):
1366 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1367 if sParam[offParam] != '(':
1368 self.raiseProblem('Expected "(" following %s in "%s"' % (sRef, oStmt.renderCode(),));
1369 (asMacroParams, offCloseParam) = iai.McBlock.extractParams(sParam, offParam);
1370 if asMacroParams is None:
1371 self.raiseProblem('Unable to find ")" for %s in "%s"' % (sRef, oStmt.renderCode(),));
1372 offParam = offCloseParam + 1;
1373
1374 # Skip any dereference following it, unless it's a predicate like IEM_IS_GUEST_CPU_AMD.
1375 if sRef not in ('IEM_IS_GUEST_CPU_AMD',
1376 'IEM_IS_16BIT_CODE',
1377 'IEM_IS_32BIT_CODE',
1378 'IEM_IS_64BIT_CODE',
1379 ):
1380 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1381 if offParam + 2 <= len(sParam) and sParam[offParam : offParam + 2] == '->':
1382 offParam = iai.McBlock.skipSpacesAt(sParam, offParam + 2, len(sParam));
1383 while offParam < len(sParam) and (sParam[offParam].isalnum() or sParam[offParam] in '_.'):
1384 offParam += 1;
1385
1386 # Skip constants, globals, types (casts), sizeof and macros.
1387 elif ( sRef.startswith('IEM_OP_PRF_')
1388 or sRef.startswith('IEM_ACCESS_')
1389 or sRef.startswith('IEMINT_')
1390 or sRef.startswith('X86_GREG_')
1391 or sRef.startswith('X86_SREG_')
1392 or sRef.startswith('X86_EFL_')
1393 or sRef.startswith('X86_FSW_')
1394 or sRef.startswith('X86_FCW_')
1395 or sRef.startswith('X86_XCPT_')
1396 or sRef.startswith('IEMMODE_')
1397 or sRef.startswith('IEM_F_')
1398 or sRef.startswith('IEM_CIMPL_F_')
1399 or sRef.startswith('g_')
1400 or sRef.startswith('iemAImpl_')
1401 or sRef.startswith('kIemNativeGstReg_')
1402 or sRef.startswith('RT_ARCH_VAL_')
1403 or sRef in ( 'int8_t', 'int16_t', 'int32_t', 'int64_t',
1404 'INT8_C', 'INT16_C', 'INT32_C', 'INT64_C',
1405 'uint8_t', 'uint16_t', 'uint32_t', 'uint64_t',
1406 'UINT8_C', 'UINT16_C', 'UINT32_C', 'UINT64_C',
1407 'UINT8_MAX', 'UINT16_MAX', 'UINT32_MAX', 'UINT64_MAX',
1408 'INT8_MAX', 'INT16_MAX', 'INT32_MAX', 'INT64_MAX',
1409 'INT8_MIN', 'INT16_MIN', 'INT32_MIN', 'INT64_MIN',
1410 'sizeof', 'NOREF', 'RT_NOREF', 'IEMMODE_64BIT',
1411 'RT_BIT_32', 'RT_BIT_64', 'true', 'false',
1412 'NIL_RTGCPTR',) ):
1413 pass;
1414
1415 # Skip certain macro invocations.
1416 # Any variable (non-field) and decoder fields in IEMCPU will need to be parameterized.
1417 elif ( ( '.' not in sRef
1418 and '-' not in sRef
1419 and sRef not in ('pVCpu', ) )
1420 or iai.McBlock.koReIemDecoderVars.search(sRef) is not None):
1421 self.aoParamRefs.append(ThreadedParamRef(sRef, self.analyzeReferenceToType(sRef),
1422 oStmt, iParam, offStart));
1423 # Number.
1424 elif ch.isdigit():
1425 if ( ch == '0'
1426 and offParam + 2 <= len(sParam)
1427 and sParam[offParam + 1] in 'xX'
1428 and sParam[offParam + 2] in self.ksHexDigits ):
1429 offParam += 2;
1430 while offParam < len(sParam) and sParam[offParam] in self.ksHexDigits:
1431 offParam += 1;
1432 else:
1433 while offParam < len(sParam) and sParam[offParam].isdigit():
1434 offParam += 1;
1435 # Comment?
1436 elif ( ch == '/'
1437 and offParam + 4 <= len(sParam)
1438 and sParam[offParam + 1] == '*'):
1439 offParam += 2;
1440 offNext = sParam.find('*/', offParam);
1441 if offNext < offParam:
1442 self.raiseProblem('Unable to find "*/" in "%s" ("%s")' % (sRef, oStmt.renderCode(),));
1443 offParam = offNext + 2;
1444 # Whatever else.
1445 else:
1446 offParam += 1;
1447
1448 # Traverse the branches of conditionals.
1449 if isinstance(oStmt, iai.McStmtCond):
1450 self.analyzeFindThreadedParamRefs(oStmt.aoIfBranch);
1451 self.analyzeFindThreadedParamRefs(oStmt.aoElseBranch);
1452 return True;
1453
1454 def analyzeVariation(self, aoStmts):
1455 """
1456 2nd part of the analysis, done on each variation.
1457
1458 The variations may differ in parameter requirements and will end up with
1459 slightly different MC sequences. Thus this is done on each individually.
1460
1461 Returns dummy True - raises exception on trouble.
1462 """
1463 # Now scan the code for variables and field references that needs to
1464 # be passed to the threaded function because they are related to the
1465 # instruction decoding.
1466 self.analyzeFindThreadedParamRefs(aoStmts);
1467 self.analyzeConsolidateThreadedParamRefs();
1468
1469 # Morph the statement stream for the block into what we'll be using in the threaded function.
1470 (self.aoStmtsForThreadedFunction, iParamRef) = self.analyzeMorphStmtForThreaded(aoStmts, {});
1471 if iParamRef != len(self.aoParamRefs):
1472 raise Exception('iParamRef=%s, expected %s!' % (iParamRef, len(self.aoParamRefs),));
1473
1474 return True;
1475
1476 def emitThreadedCallStmts(self, cchIndent, sCallVarNm = None):
1477 """
1478 Produces generic C++ statments that emits a call to the thread function
1479 variation and any subsequent checks that may be necessary after that.
1480
1481 The sCallVarNm is the name of the variable with the threaded function
1482 to call. This is for the case where all the variations have the same
1483 parameters and only the threaded function number differs.
1484 """
1485 aoStmts = [
1486 iai.McCppCall('IEM_MC2_BEGIN_EMIT_CALLS',
1487 ['1' if 'IEM_CIMPL_F_CHECK_IRQ_BEFORE' in self.oParent.dsCImplFlags else '0'],
1488 cchIndent = cchIndent), # Scope and a hook for various stuff.
1489 ];
1490
1491 # The call to the threaded function.
1492 asCallArgs = [ self.getIndexName() if not sCallVarNm else sCallVarNm, ];
1493 for iParam in range(self.cMinParams):
1494 asFrags = [];
1495 for aoRefs in self.dParamRefs.values():
1496 oRef = aoRefs[0];
1497 if oRef.iNewParam == iParam:
1498 sCast = '(uint64_t)'
1499 if oRef.sType in ('int8_t', 'int16_t', 'int32_t'): # Make sure these doesn't get sign-extended.
1500 sCast = '(uint64_t)(u' + oRef.sType + ')';
1501 if oRef.offNewParam == 0:
1502 asFrags.append(sCast + '(' + oRef.sOrgRef + ')');
1503 else:
1504 asFrags.append('(%s(%s) << %s)' % (sCast, oRef.sOrgRef, oRef.offNewParam));
1505 assert asFrags;
1506 asCallArgs.append(' | '.join(asFrags));
1507
1508 aoStmts.append(iai.McCppCall('IEM_MC2_EMIT_CALL_%s' % (len(asCallArgs) - 1,), asCallArgs, cchIndent = cchIndent));
1509
1510 # 2023-11-28: This has to be done AFTER the CIMPL call, so we have to
1511 # emit this mode check from the compilation loop. On the
1512 # plus side, this means we eliminate unnecessary call at
1513 # end of the TB. :-)
1514 ## For CIMPL stuff, we need to consult the associated IEM_CIMPL_F_XXX
1515 ## mask and maybe emit additional checks.
1516 #if ( 'IEM_CIMPL_F_MODE' in self.oParent.dsCImplFlags
1517 # or 'IEM_CIMPL_F_XCPT' in self.oParent.dsCImplFlags
1518 # or 'IEM_CIMPL_F_VMEXIT' in self.oParent.dsCImplFlags):
1519 # aoStmts.append(iai.McCppCall('IEM_MC2_EMIT_CALL_1', ( 'kIemThreadedFunc_BltIn_CheckMode', 'pVCpu->iem.s.fExec', ),
1520 # cchIndent = cchIndent));
1521
1522 sCImplFlags = ' | '.join(self.oParent.dsCImplFlags.keys());
1523 if not sCImplFlags:
1524 sCImplFlags = '0'
1525 aoStmts.append(iai.McCppCall('IEM_MC2_END_EMIT_CALLS', ( sCImplFlags, ), cchIndent = cchIndent)); # For closing the scope.
1526
1527 # Emit fEndTb = true or fTbBranched = true if any of the CIMPL flags
1528 # indicates we should do so.
1529 # Note! iemThreadedRecompilerMcDeferToCImpl0 duplicates work done here.
1530 asEndTbFlags = [];
1531 asTbBranchedFlags = [];
1532 for sFlag in self.oParent.dsCImplFlags:
1533 if self.kdCImplFlags[sFlag] is True:
1534 asEndTbFlags.append(sFlag);
1535 elif sFlag.startswith('IEM_CIMPL_F_BRANCH_'):
1536 asTbBranchedFlags.append(sFlag);
1537 if ( asTbBranchedFlags
1538 and ( 'IEM_CIMPL_F_BRANCH_CONDITIONAL' not in asTbBranchedFlags
1539 or self.sVariation not in self.kdVariationsWithConditionalNoJmp)):
1540 aoStmts.append(iai.McCppGeneric('iemThreadedSetBranched(pVCpu, %s);'
1541 % ((' | '.join(asTbBranchedFlags)).replace('IEM_CIMPL_F_BRANCH', 'IEMBRANCHED_F'),),
1542 cchIndent = cchIndent)); # Inline fn saves ~2 seconds for gcc 13/dbg (1m13s vs 1m15s).
1543 if asEndTbFlags:
1544 aoStmts.append(iai.McCppGeneric('pVCpu->iem.s.fEndTb = true; /* %s */' % (','.join(asEndTbFlags),),
1545 cchIndent = cchIndent));
1546
1547 if 'IEM_CIMPL_F_CHECK_IRQ_AFTER' in self.oParent.dsCImplFlags:
1548 aoStmts.append(iai.McCppGeneric('pVCpu->iem.s.cInstrTillIrqCheck = 0;', cchIndent = cchIndent));
1549
1550 return aoStmts;
1551
1552
1553class ThreadedFunction(object):
1554 """
1555 A threaded function.
1556 """
1557
1558 def __init__(self, oMcBlock: iai.McBlock) -> None:
1559 self.oMcBlock = oMcBlock # type: iai.McBlock
1560 # The remaining fields are only useful after analyze() has been called:
1561 ## Variations for this block. There is at least one.
1562 self.aoVariations = [] # type: List[ThreadedFunctionVariation]
1563 ## Variation dictionary containing the same as aoVariations.
1564 self.dVariations = {} # type: Dict[str, ThreadedFunctionVariation]
1565 ## Dictionary of local variables (IEM_MC_LOCAL[_CONST]) and call arguments (IEM_MC_ARG*).
1566 self.dVariables = {} # type: Dict[str, iai.McStmtVar]
1567 ## Dictionary with any IEM_CIMPL_F_XXX flags explicitly advertised in the code block
1568 ## and those determined by analyzeCodeOperation().
1569 self.dsCImplFlags = {} # type: Dict[str, bool]
1570 ## The unique sub-name for this threaded function.
1571 self.sSubName = '';
1572 #if oMcBlock.iInFunction > 0 or (oMcBlock.oInstruction and len(oMcBlock.oInstruction.aoMcBlocks) > 1):
1573 # self.sSubName = '_%s' % (oMcBlock.iInFunction);
1574
1575 @staticmethod
1576 def dummyInstance():
1577 """ Gets a dummy instance. """
1578 return ThreadedFunction(iai.McBlock('null', 999999999, 999999999,
1579 iai.DecoderFunction('null', 999999999, 'nil', ('','')), 999999999));
1580
1581 def hasWithFlagsCheckingAndClearingVariation(self):
1582 """
1583 Check if there is one or more with flags checking and clearing
1584 variations for this threaded function.
1585 """
1586 for sVarWithFlags in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing:
1587 if sVarWithFlags in self.dVariations:
1588 return True;
1589 return False;
1590
1591 #
1592 # Analysis and code morphing.
1593 #
1594
1595 def raiseProblem(self, sMessage):
1596 """ Raises a problem. """
1597 raise Exception('%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, ));
1598
1599 def error(self, sMessage, oGenerator):
1600 """ Emits an error via the generator object, causing it to fail. """
1601 oGenerator.rawError('%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, ));
1602
1603 def warning(self, sMessage):
1604 """ Emits a warning. """
1605 print('%s:%s: warning: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, ));
1606
1607 ## Used by analyzeAndAnnotateName for memory MC blocks.
1608 kdAnnotateNameMemStmts = {
1609 'IEM_MC_FETCH_MEM16_U8': '__mem8',
1610 'IEM_MC_FETCH_MEM32_U8': '__mem8',
1611 'IEM_MC_FETCH_MEM_D80': '__mem80',
1612 'IEM_MC_FETCH_MEM_I16': '__mem16',
1613 'IEM_MC_FETCH_MEM_I32': '__mem32',
1614 'IEM_MC_FETCH_MEM_I64': '__mem64',
1615 'IEM_MC_FETCH_MEM_R32': '__mem32',
1616 'IEM_MC_FETCH_MEM_R64': '__mem64',
1617 'IEM_MC_FETCH_MEM_R80': '__mem80',
1618 'IEM_MC_FETCH_MEM_U128': '__mem128',
1619 'IEM_MC_FETCH_MEM_U128_ALIGN_SSE': '__mem128',
1620 'IEM_MC_FETCH_MEM_U128_NO_AC': '__mem128',
1621 'IEM_MC_FETCH_MEM_U16': '__mem16',
1622 'IEM_MC_FETCH_MEM_U16_DISP': '__mem16',
1623 'IEM_MC_FETCH_MEM_U16_SX_U32': '__mem16sx32',
1624 'IEM_MC_FETCH_MEM_U16_SX_U64': '__mem16sx64',
1625 'IEM_MC_FETCH_MEM_U16_ZX_U32': '__mem16zx32',
1626 'IEM_MC_FETCH_MEM_U16_ZX_U64': '__mem16zx64',
1627 'IEM_MC_FETCH_MEM_U256': '__mem256',
1628 'IEM_MC_FETCH_MEM_U256_ALIGN_AVX': '__mem256',
1629 'IEM_MC_FETCH_MEM_U256_NO_AC': '__mem256',
1630 'IEM_MC_FETCH_MEM_U32': '__mem32',
1631 'IEM_MC_FETCH_MEM_U32_DISP': '__mem32',
1632 'IEM_MC_FETCH_MEM_U32_SX_U64': '__mem32sx64',
1633 'IEM_MC_FETCH_MEM_U32_ZX_U64': '__mem32zx64',
1634 'IEM_MC_FETCH_MEM_U64': '__mem64',
1635 'IEM_MC_FETCH_MEM_U64_ALIGN_U128': '__mem64',
1636 'IEM_MC_FETCH_MEM_U64_DISP': '__mem64',
1637 'IEM_MC_FETCH_MEM_U8': '__mem8',
1638 'IEM_MC_FETCH_MEM_U8_DISP': '__mem8',
1639 'IEM_MC_FETCH_MEM_U8_SX_U16': '__mem8sx16',
1640 'IEM_MC_FETCH_MEM_U8_SX_U32': '__mem8sx32',
1641 'IEM_MC_FETCH_MEM_U8_SX_U64': '__mem8sx64',
1642 'IEM_MC_FETCH_MEM_U8_ZX_U16': '__mem8zx16',
1643 'IEM_MC_FETCH_MEM_U8_ZX_U32': '__mem8zx32',
1644 'IEM_MC_FETCH_MEM_U8_ZX_U64': '__mem8zx64',
1645 'IEM_MC_FETCH_MEM_XMM': '__mem128',
1646 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE': '__mem128',
1647 'IEM_MC_FETCH_MEM_XMM_NO_AC': '__mem128',
1648 'IEM_MC_FETCH_MEM_XMM_U32': '__mem32',
1649 'IEM_MC_FETCH_MEM_XMM_U64': '__mem64',
1650 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128': '__mem128',
1651 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE_AND_XREG_XMM': '__mem128',
1652 'IEM_MC_FETCH_MEM_XMM_U32_AND_XREG_XMM': '__mem32',
1653 'IEM_MC_FETCH_MEM_XMM_U64_AND_XREG_XMM': '__mem64',
1654 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_RAX_RDX_U64': '__mem128',
1655 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64': '__mem128',
1656
1657 'IEM_MC_STORE_MEM_I16_CONST_BY_REF': '__mem16',
1658 'IEM_MC_STORE_MEM_I32_CONST_BY_REF': '__mem32',
1659 'IEM_MC_STORE_MEM_I64_CONST_BY_REF': '__mem64',
1660 'IEM_MC_STORE_MEM_I8_CONST_BY_REF': '__mem8',
1661 'IEM_MC_STORE_MEM_INDEF_D80_BY_REF': '__mem80',
1662 'IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF': '__mem32',
1663 'IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF': '__mem64',
1664 'IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF': '__mem80',
1665 'IEM_MC_STORE_MEM_U128': '__mem128',
1666 'IEM_MC_STORE_MEM_U128_ALIGN_SSE': '__mem128',
1667 'IEM_MC_STORE_MEM_U128_NO_AC': '__mem128',
1668 'IEM_MC_STORE_MEM_U16': '__mem16',
1669 'IEM_MC_STORE_MEM_U16_CONST': '__mem16c',
1670 'IEM_MC_STORE_MEM_U256': '__mem256',
1671 'IEM_MC_STORE_MEM_U256_ALIGN_AVX': '__mem256',
1672 'IEM_MC_STORE_MEM_U256_NO_AC': '__mem256',
1673 'IEM_MC_STORE_MEM_U32': '__mem32',
1674 'IEM_MC_STORE_MEM_U32_CONST': '__mem32c',
1675 'IEM_MC_STORE_MEM_U64': '__mem64',
1676 'IEM_MC_STORE_MEM_U64_CONST': '__mem64c',
1677 'IEM_MC_STORE_MEM_U8': '__mem8',
1678 'IEM_MC_STORE_MEM_U8_CONST': '__mem8c',
1679
1680 'IEM_MC_MEM_MAP_D80_WO': '__mem80',
1681 'IEM_MC_MEM_MAP_I16_WO': '__mem16',
1682 'IEM_MC_MEM_MAP_I32_WO': '__mem32',
1683 'IEM_MC_MEM_MAP_I64_WO': '__mem64',
1684 'IEM_MC_MEM_MAP_R32_WO': '__mem32',
1685 'IEM_MC_MEM_MAP_R64_WO': '__mem64',
1686 'IEM_MC_MEM_MAP_R80_WO': '__mem80',
1687 'IEM_MC_MEM_MAP_U128_ATOMIC': '__mem128a',
1688 'IEM_MC_MEM_MAP_U128_RO': '__mem128',
1689 'IEM_MC_MEM_MAP_U128_RW': '__mem128',
1690 'IEM_MC_MEM_MAP_U128_WO': '__mem128',
1691 'IEM_MC_MEM_MAP_U16_ATOMIC': '__mem16a',
1692 'IEM_MC_MEM_MAP_U16_RO': '__mem16',
1693 'IEM_MC_MEM_MAP_U16_RW': '__mem16',
1694 'IEM_MC_MEM_MAP_U16_WO': '__mem16',
1695 'IEM_MC_MEM_MAP_U32_ATOMIC': '__mem32a',
1696 'IEM_MC_MEM_MAP_U32_RO': '__mem32',
1697 'IEM_MC_MEM_MAP_U32_RW': '__mem32',
1698 'IEM_MC_MEM_MAP_U32_WO': '__mem32',
1699 'IEM_MC_MEM_MAP_U64_ATOMIC': '__mem64a',
1700 'IEM_MC_MEM_MAP_U64_RO': '__mem64',
1701 'IEM_MC_MEM_MAP_U64_RW': '__mem64',
1702 'IEM_MC_MEM_MAP_U64_WO': '__mem64',
1703 'IEM_MC_MEM_MAP_U8_ATOMIC': '__mem8a',
1704 'IEM_MC_MEM_MAP_U8_RO': '__mem8',
1705 'IEM_MC_MEM_MAP_U8_RW': '__mem8',
1706 'IEM_MC_MEM_MAP_U8_WO': '__mem8',
1707 };
1708 ## Used by analyzeAndAnnotateName for non-memory MC blocks.
1709 kdAnnotateNameRegStmts = {
1710 'IEM_MC_FETCH_GREG_U8': '__greg8',
1711 'IEM_MC_FETCH_GREG_U8_ZX_U16': '__greg8zx16',
1712 'IEM_MC_FETCH_GREG_U8_ZX_U32': '__greg8zx32',
1713 'IEM_MC_FETCH_GREG_U8_ZX_U64': '__greg8zx64',
1714 'IEM_MC_FETCH_GREG_U8_SX_U16': '__greg8sx16',
1715 'IEM_MC_FETCH_GREG_U8_SX_U32': '__greg8sx32',
1716 'IEM_MC_FETCH_GREG_U8_SX_U64': '__greg8sx64',
1717 'IEM_MC_FETCH_GREG_U16': '__greg16',
1718 'IEM_MC_FETCH_GREG_U16_ZX_U32': '__greg16zx32',
1719 'IEM_MC_FETCH_GREG_U16_ZX_U64': '__greg16zx64',
1720 'IEM_MC_FETCH_GREG_U16_SX_U32': '__greg16sx32',
1721 'IEM_MC_FETCH_GREG_U16_SX_U64': '__greg16sx64',
1722 'IEM_MC_FETCH_GREG_U32': '__greg32',
1723 'IEM_MC_FETCH_GREG_U32_ZX_U64': '__greg32zx64',
1724 'IEM_MC_FETCH_GREG_U32_SX_U64': '__greg32sx64',
1725 'IEM_MC_FETCH_GREG_U64': '__greg64',
1726 'IEM_MC_FETCH_GREG_U64_ZX_U64': '__greg64zx64',
1727 'IEM_MC_FETCH_GREG_PAIR_U32': '__greg32',
1728 'IEM_MC_FETCH_GREG_PAIR_U64': '__greg64',
1729
1730 'IEM_MC_STORE_GREG_U8': '__greg8',
1731 'IEM_MC_STORE_GREG_U16': '__greg16',
1732 'IEM_MC_STORE_GREG_U32': '__greg32',
1733 'IEM_MC_STORE_GREG_U64': '__greg64',
1734 'IEM_MC_STORE_GREG_I64': '__greg64',
1735 'IEM_MC_STORE_GREG_U8_CONST': '__greg8c',
1736 'IEM_MC_STORE_GREG_U16_CONST': '__greg16c',
1737 'IEM_MC_STORE_GREG_U32_CONST': '__greg32c',
1738 'IEM_MC_STORE_GREG_U64_CONST': '__greg64c',
1739 'IEM_MC_STORE_GREG_PAIR_U32': '__greg32',
1740 'IEM_MC_STORE_GREG_PAIR_U64': '__greg64',
1741
1742 'IEM_MC_FETCH_SREG_U16': '__sreg16',
1743 'IEM_MC_FETCH_SREG_ZX_U32': '__sreg32',
1744 'IEM_MC_FETCH_SREG_ZX_U64': '__sreg64',
1745 'IEM_MC_FETCH_SREG_BASE_U64': '__sbase64',
1746 'IEM_MC_FETCH_SREG_BASE_U32': '__sbase32',
1747 'IEM_MC_STORE_SREG_BASE_U64': '__sbase64',
1748 'IEM_MC_STORE_SREG_BASE_U32': '__sbase32',
1749
1750 'IEM_MC_REF_GREG_U8': '__greg8',
1751 'IEM_MC_REF_GREG_U16': '__greg16',
1752 'IEM_MC_REF_GREG_U32': '__greg32',
1753 'IEM_MC_REF_GREG_U64': '__greg64',
1754 'IEM_MC_REF_GREG_U8_CONST': '__greg8',
1755 'IEM_MC_REF_GREG_U16_CONST': '__greg16',
1756 'IEM_MC_REF_GREG_U32_CONST': '__greg32',
1757 'IEM_MC_REF_GREG_U64_CONST': '__greg64',
1758 'IEM_MC_REF_GREG_I32': '__greg32',
1759 'IEM_MC_REF_GREG_I64': '__greg64',
1760 'IEM_MC_REF_GREG_I32_CONST': '__greg32',
1761 'IEM_MC_REF_GREG_I64_CONST': '__greg64',
1762
1763 'IEM_MC_STORE_FPUREG_R80_SRC_REF': '__fpu',
1764 'IEM_MC_REF_FPUREG': '__fpu',
1765
1766 'IEM_MC_FETCH_MREG_U64': '__mreg64',
1767 'IEM_MC_FETCH_MREG_U32': '__mreg32',
1768 'IEM_MC_FETCH_MREG_U16': '__mreg16',
1769 'IEM_MC_FETCH_MREG_U8': '__mreg8',
1770 'IEM_MC_STORE_MREG_U64': '__mreg64',
1771 'IEM_MC_STORE_MREG_U32': '__mreg32',
1772 'IEM_MC_STORE_MREG_U16': '__mreg16',
1773 'IEM_MC_STORE_MREG_U8': '__mreg8',
1774 'IEM_MC_STORE_MREG_U32_ZX_U64': '__mreg32zx64',
1775 'IEM_MC_REF_MREG_U64': '__mreg64',
1776 'IEM_MC_REF_MREG_U64_CONST': '__mreg64',
1777 'IEM_MC_REF_MREG_U32_CONST': '__mreg32',
1778
1779 'IEM_MC_CLEAR_XREG_U32_MASK': '__xreg32x4',
1780 'IEM_MC_FETCH_XREG_U128': '__xreg128',
1781 'IEM_MC_FETCH_XREG_XMM': '__xreg128',
1782 'IEM_MC_FETCH_XREG_U64': '__xreg64',
1783 'IEM_MC_FETCH_XREG_U32': '__xreg32',
1784 'IEM_MC_FETCH_XREG_U16': '__xreg16',
1785 'IEM_MC_FETCH_XREG_U8': '__xreg8',
1786 'IEM_MC_FETCH_XREG_PAIR_U128': '__xreg128p',
1787 'IEM_MC_FETCH_XREG_PAIR_XMM': '__xreg128p',
1788 'IEM_MC_FETCH_XREG_PAIR_U128_AND_RAX_RDX_U64': '__xreg128p',
1789 'IEM_MC_FETCH_XREG_PAIR_U128_AND_EAX_EDX_U32_SX_U64': '__xreg128p',
1790
1791 'IEM_MC_STORE_XREG_U32_U128': '__xreg32',
1792 'IEM_MC_STORE_XREG_U128': '__xreg128',
1793 'IEM_MC_STORE_XREG_XMM': '__xreg128',
1794 'IEM_MC_STORE_XREG_XMM_U32': '__xreg32',
1795 'IEM_MC_STORE_XREG_XMM_U64': '__xreg64',
1796 'IEM_MC_STORE_XREG_U64': '__xreg64',
1797 'IEM_MC_STORE_XREG_U64_ZX_U128': '__xreg64zx128',
1798 'IEM_MC_STORE_XREG_U32': '__xreg32',
1799 'IEM_MC_STORE_XREG_U16': '__xreg16',
1800 'IEM_MC_STORE_XREG_U8': '__xreg8',
1801 'IEM_MC_STORE_XREG_U32_ZX_U128': '__xreg32zx128',
1802 'IEM_MC_STORE_XREG_R32': '__xreg32',
1803 'IEM_MC_STORE_XREG_R64': '__xreg64',
1804 'IEM_MC_BROADCAST_XREG_U8_ZX_VLMAX': '__xreg8zx',
1805 'IEM_MC_BROADCAST_XREG_U16_ZX_VLMAX': '__xreg16zx',
1806 'IEM_MC_BROADCAST_XREG_U32_ZX_VLMAX': '__xreg32zx',
1807 'IEM_MC_BROADCAST_XREG_U64_ZX_VLMAX': '__xreg64zx',
1808 'IEM_MC_BROADCAST_XREG_U128_ZX_VLMAX': '__xreg128zx',
1809 'IEM_MC_REF_XREG_U128': '__xreg128',
1810 'IEM_MC_REF_XREG_U128_CONST': '__xreg128',
1811 'IEM_MC_REF_XREG_U32_CONST': '__xreg32',
1812 'IEM_MC_REF_XREG_U64_CONST': '__xreg64',
1813 'IEM_MC_REF_XREG_R32_CONST': '__xreg32',
1814 'IEM_MC_REF_XREG_R64_CONST': '__xreg64',
1815 'IEM_MC_REF_XREG_XMM_CONST': '__xreg128',
1816 'IEM_MC_COPY_XREG_U128': '__xreg128',
1817
1818 'IEM_MC_FETCH_YREG_U256': '__yreg256',
1819 'IEM_MC_FETCH_YREG_U128': '__yreg128',
1820 'IEM_MC_FETCH_YREG_U64': '__yreg64',
1821 'IEM_MC_FETCH_YREG_U32': '__yreg32',
1822 'IEM_MC_STORE_YREG_U128': '__yreg128',
1823 'IEM_MC_STORE_YREG_U32_ZX_VLMAX': '__yreg32zx',
1824 'IEM_MC_STORE_YREG_U64_ZX_VLMAX': '__yreg64zx',
1825 'IEM_MC_STORE_YREG_U128_ZX_VLMAX': '__yreg128zx',
1826 'IEM_MC_STORE_YREG_U256_ZX_VLMAX': '__yreg256zx',
1827 'IEM_MC_BROADCAST_YREG_U8_ZX_VLMAX': '__yreg8',
1828 'IEM_MC_BROADCAST_YREG_U16_ZX_VLMAX': '__yreg16',
1829 'IEM_MC_BROADCAST_YREG_U32_ZX_VLMAX': '__yreg32',
1830 'IEM_MC_BROADCAST_YREG_U64_ZX_VLMAX': '__yreg64',
1831 'IEM_MC_BROADCAST_YREG_U128_ZX_VLMAX': '__yreg128',
1832 'IEM_MC_REF_YREG_U128': '__yreg128',
1833 'IEM_MC_REF_YREG_U128_CONST': '__yreg128',
1834 'IEM_MC_REF_YREG_U64_CONST': '__yreg64',
1835 'IEM_MC_COPY_YREG_U256_ZX_VLMAX': '__yreg256zx',
1836 'IEM_MC_COPY_YREG_U128_ZX_VLMAX': '__yreg128zx',
1837 'IEM_MC_COPY_YREG_U64_ZX_VLMAX': '__yreg64zx',
1838 'IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX': '__yreg3296',
1839 'IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX': '__yreg6464',
1840 'IEM_MC_MERGE_YREG_U64HI_U64HI_ZX_VLMAX': '__yreg64hi64hi',
1841 'IEM_MC_MERGE_YREG_U64LO_U64LO_ZX_VLMAX': '__yreg64lo64lo',
1842 'IEM_MC_MERGE_YREG_U64LO_U64LOCAL_ZX_VLMAX':'__yreg64',
1843 'IEM_MC_MERGE_YREG_U64LOCAL_U64HI_ZX_VLMAX':'__yreg64',
1844 };
1845 kdAnnotateNameCallStmts = {
1846 'IEM_MC_CALL_CIMPL_0': '__cimpl',
1847 'IEM_MC_CALL_CIMPL_1': '__cimpl',
1848 'IEM_MC_CALL_CIMPL_2': '__cimpl',
1849 'IEM_MC_CALL_CIMPL_3': '__cimpl',
1850 'IEM_MC_CALL_CIMPL_4': '__cimpl',
1851 'IEM_MC_CALL_CIMPL_5': '__cimpl',
1852 'IEM_MC_CALL_CIMPL_6': '__cimpl',
1853 'IEM_MC_CALL_CIMPL_7': '__cimpl',
1854 'IEM_MC_DEFER_TO_CIMPL_0_RET': '__cimpl_defer',
1855 'IEM_MC_DEFER_TO_CIMPL_1_RET': '__cimpl_defer',
1856 'IEM_MC_DEFER_TO_CIMPL_2_RET': '__cimpl_defer',
1857 'IEM_MC_DEFER_TO_CIMPL_3_RET': '__cimpl_defer',
1858 'IEM_MC_DEFER_TO_CIMPL_4_RET': '__cimpl_defer',
1859 'IEM_MC_DEFER_TO_CIMPL_5_RET': '__cimpl_defer',
1860 'IEM_MC_DEFER_TO_CIMPL_6_RET': '__cimpl_defer',
1861 'IEM_MC_DEFER_TO_CIMPL_7_RET': '__cimpl_defer',
1862 'IEM_MC_CALL_VOID_AIMPL_0': '__aimpl',
1863 'IEM_MC_CALL_VOID_AIMPL_1': '__aimpl',
1864 'IEM_MC_CALL_VOID_AIMPL_2': '__aimpl',
1865 'IEM_MC_CALL_VOID_AIMPL_3': '__aimpl',
1866 'IEM_MC_CALL_VOID_AIMPL_4': '__aimpl',
1867 'IEM_MC_CALL_VOID_AIMPL_5': '__aimpl',
1868 'IEM_MC_CALL_AIMPL_0': '__aimpl_ret',
1869 'IEM_MC_CALL_AIMPL_1': '__aimpl_ret',
1870 'IEM_MC_CALL_AIMPL_2': '__aimpl_ret',
1871 'IEM_MC_CALL_AIMPL_3': '__aimpl_ret',
1872 'IEM_MC_CALL_AIMPL_4': '__aimpl_ret',
1873 'IEM_MC_CALL_AIMPL_5': '__aimpl_ret',
1874 'IEM_MC_CALL_AIMPL_6': '__aimpl_ret',
1875 'IEM_MC_CALL_VOID_AIMPL_6': '__aimpl_fpu',
1876 'IEM_MC_CALL_FPU_AIMPL_0': '__aimpl_fpu',
1877 'IEM_MC_CALL_FPU_AIMPL_1': '__aimpl_fpu',
1878 'IEM_MC_CALL_FPU_AIMPL_2': '__aimpl_fpu',
1879 'IEM_MC_CALL_FPU_AIMPL_3': '__aimpl_fpu',
1880 'IEM_MC_CALL_FPU_AIMPL_4': '__aimpl_fpu',
1881 'IEM_MC_CALL_FPU_AIMPL_5': '__aimpl_fpu',
1882 'IEM_MC_CALL_MMX_AIMPL_0': '__aimpl_mmx',
1883 'IEM_MC_CALL_MMX_AIMPL_1': '__aimpl_mmx',
1884 'IEM_MC_CALL_MMX_AIMPL_2': '__aimpl_mmx',
1885 'IEM_MC_CALL_MMX_AIMPL_3': '__aimpl_mmx',
1886 'IEM_MC_CALL_MMX_AIMPL_4': '__aimpl_mmx',
1887 'IEM_MC_CALL_MMX_AIMPL_5': '__aimpl_mmx',
1888 'IEM_MC_CALL_SSE_AIMPL_0': '__aimpl_sse',
1889 'IEM_MC_CALL_SSE_AIMPL_1': '__aimpl_sse',
1890 'IEM_MC_CALL_SSE_AIMPL_2': '__aimpl_sse',
1891 'IEM_MC_CALL_SSE_AIMPL_3': '__aimpl_sse',
1892 'IEM_MC_CALL_SSE_AIMPL_4': '__aimpl_sse',
1893 'IEM_MC_CALL_SSE_AIMPL_5': '__aimpl_sse',
1894 'IEM_MC_CALL_AVX_AIMPL_0': '__aimpl_avx',
1895 'IEM_MC_CALL_AVX_AIMPL_1': '__aimpl_avx',
1896 'IEM_MC_CALL_AVX_AIMPL_2': '__aimpl_avx',
1897 'IEM_MC_CALL_AVX_AIMPL_3': '__aimpl_avx',
1898 'IEM_MC_CALL_AVX_AIMPL_4': '__aimpl_avx',
1899 'IEM_MC_CALL_AVX_AIMPL_5': '__aimpl_avx',
1900 };
1901 def analyzeAndAnnotateName(self, aoStmts: List[iai.McStmt]):
1902 """
1903 Scans the statements and variation lists for clues about the threaded function,
1904 and sets self.sSubName if successfull.
1905 """
1906 # Operand base naming:
1907 dHits = {};
1908 cHits = iai.McStmt.countStmtsByName(aoStmts, self.kdAnnotateNameMemStmts, dHits);
1909 if cHits > 0:
1910 sStmtNm = sorted(dHits.keys())[-1]; # priority: STORE, MEM_MAP, FETCH.
1911 sName = self.kdAnnotateNameMemStmts[sStmtNm];
1912 else:
1913 cHits = iai.McStmt.countStmtsByName(aoStmts, self.kdAnnotateNameRegStmts, dHits);
1914 if cHits > 0:
1915 sStmtNm = sorted(dHits.keys())[-1]; # priority: STORE, MEM_MAP, FETCH.
1916 sName = self.kdAnnotateNameRegStmts[sStmtNm];
1917 else:
1918 # No op details, try name it by call type...
1919 cHits = iai.McStmt.countStmtsByName(aoStmts, self.kdAnnotateNameCallStmts, dHits);
1920 if cHits > 0:
1921 sStmtNm = sorted(dHits.keys())[-1]; # Not really necessary to sort, but simple this way.
1922 self.sSubName = self.kdAnnotateNameCallStmts[sStmtNm];
1923 return;
1924
1925 # Add call info if any:
1926 dHits = {};
1927 cHits = iai.McStmt.countStmtsByName(aoStmts, self.kdAnnotateNameCallStmts, dHits);
1928 if cHits > 0:
1929 sStmtNm = sorted(dHits.keys())[-1]; # Not really necessary to sort, but simple this way.
1930 sName += self.kdAnnotateNameCallStmts[sStmtNm][1:];
1931
1932 self.sSubName = sName;
1933 return;
1934
1935 def analyzeFindVariablesAndCallArgs(self, aoStmts: List[iai.McStmt]) -> bool:
1936 """ Scans the statements for MC variables and call arguments. """
1937 for oStmt in aoStmts:
1938 if isinstance(oStmt, iai.McStmtVar):
1939 if oStmt.sVarName in self.dVariables:
1940 raise Exception('Variable %s is defined more than once!' % (oStmt.sVarName,));
1941 self.dVariables[oStmt.sVarName] = oStmt.sVarName;
1942 elif isinstance(oStmt, iai.McStmtCall) and oStmt.sName.startswith('IEM_MC_CALL_AIMPL_'):
1943 if oStmt.asParams[1] in self.dVariables:
1944 raise Exception('Variable %s is defined more than once!' % (oStmt.asParams[1],));
1945 self.dVariables[oStmt.asParams[1]] = iai.McStmtVar('IEM_MC_LOCAL', oStmt.asParams[0:2],
1946 oStmt.asParams[0], oStmt.asParams[1]);
1947
1948 # There shouldn't be any variables or arguments declared inside if/
1949 # else blocks, but scan them too to be on the safe side.
1950 if isinstance(oStmt, iai.McStmtCond):
1951 #cBefore = len(self.dVariables);
1952 self.analyzeFindVariablesAndCallArgs(oStmt.aoIfBranch);
1953 self.analyzeFindVariablesAndCallArgs(oStmt.aoElseBranch);
1954 #if len(self.dVariables) != cBefore:
1955 # raise Exception('Variables/arguments defined in conditional branches!');
1956 return True;
1957
1958 def analyzeCodeOperation(self, aoStmts: List[iai.McStmt], dEflStmts, fSeenConditional = False) -> bool:
1959 """
1960 Analyzes the code looking clues as to additional side-effects.
1961
1962 Currently this is simply looking for branching and adding the relevant
1963 branch flags to dsCImplFlags. ASSUMES the caller pre-populates the
1964 dictionary with a copy of self.oMcBlock.dsCImplFlags.
1965
1966 This also sets McStmtCond.oIfBranchAnnotation & McStmtCond.oElseBranchAnnotation.
1967
1968 Returns annotation on return style.
1969 """
1970 sAnnotation = None;
1971 for oStmt in aoStmts:
1972 # Set IEM_IMPL_C_F_BRANCH if we see any branching MCs.
1973 if oStmt.sName.startswith('IEM_MC_SET_RIP'):
1974 assert not fSeenConditional;
1975 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_INDIRECT'] = True;
1976 elif oStmt.sName.startswith('IEM_MC_REL_JMP'):
1977 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_RELATIVE'] = True;
1978 if fSeenConditional:
1979 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_CONDITIONAL'] = True;
1980
1981 # Check for CIMPL and AIMPL calls.
1982 if oStmt.sName.startswith('IEM_MC_CALL_'):
1983 if oStmt.sName.startswith('IEM_MC_CALL_CIMPL_'):
1984 self.dsCImplFlags['IEM_CIMPL_F_CALLS_CIMPL'] = True;
1985 elif ( oStmt.sName.startswith('IEM_MC_CALL_VOID_AIMPL_')
1986 or oStmt.sName.startswith('IEM_MC_CALL_AIMPL_')):
1987 self.dsCImplFlags['IEM_CIMPL_F_CALLS_AIMPL'] = True;
1988 elif ( oStmt.sName.startswith('IEM_MC_CALL_SSE_AIMPL_')
1989 or oStmt.sName.startswith('IEM_MC_CALL_MMX_AIMPL_')
1990 or oStmt.sName.startswith('IEM_MC_CALL_FPU_AIMPL_')):
1991 self.dsCImplFlags['IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE'] = True;
1992 elif oStmt.sName.startswith('IEM_MC_CALL_AVX_AIMPL_'):
1993 self.dsCImplFlags['IEM_CIMPL_F_CALLS_AIMPL_WITH_XSTATE'] = True;
1994 else:
1995 raise Exception('Unknown IEM_MC_CALL_* statement: %s' % (oStmt.sName,));
1996
1997 # Check for return statements.
1998 if oStmt.sName in ('IEM_MC_ADVANCE_RIP_AND_FINISH',):
1999 assert sAnnotation is None;
2000 sAnnotation = g_ksFinishAnnotation_Advance;
2001 elif oStmt.sName in ('IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_REL_JMP_S16_AND_FINISH',
2002 'IEM_MC_REL_JMP_S32_AND_FINISH',):
2003 assert sAnnotation is None;
2004 sAnnotation = g_ksFinishAnnotation_RelJmp;
2005 elif oStmt.sName in ('IEM_MC_SET_RIP_U16_AND_FINISH', 'IEM_MC_SET_RIP_U32_AND_FINISH',
2006 'IEM_MC_SET_RIP_U64_AND_FINISH',):
2007 assert sAnnotation is None;
2008 sAnnotation = g_ksFinishAnnotation_SetJmp;
2009 elif oStmt.sName.startswith('IEM_MC_DEFER_TO_CIMPL_'):
2010 assert sAnnotation is None;
2011 sAnnotation = g_ksFinishAnnotation_DeferToCImpl;
2012
2013 # Collect MCs working on EFLAGS. Caller will check this.
2014 if oStmt.sName in ('IEM_MC_FETCH_EFLAGS', 'IEM_MC_FETCH_EFLAGS_U8', 'IEM_MC_COMMIT_EFLAGS',
2015 'IEM_MC_COMMIT_EFLAGS_OPT', 'IEM_MC_REF_EFLAGS', 'IEM_MC_ARG_LOCAL_EFLAGS', ):
2016 dEflStmts[oStmt.sName] = oStmt;
2017 elif isinstance(oStmt, iai.McStmtCall):
2018 if oStmt.sName in ('IEM_MC_CALL_CIMPL_0', 'IEM_MC_CALL_CIMPL_1', 'IEM_MC_CALL_CIMPL_2',
2019 'IEM_MC_CALL_CIMPL_3', 'IEM_MC_CALL_CIMPL_4', 'IEM_MC_CALL_CIMPL_5',):
2020 if ( oStmt.asParams[0].find('IEM_CIMPL_F_RFLAGS') >= 0
2021 or oStmt.asParams[0].find('IEM_CIMPL_F_STATUS_FLAGS') >= 0):
2022 dEflStmts[oStmt.sName] = oStmt;
2023
2024 # Process branches of conditionals recursively.
2025 if isinstance(oStmt, iai.McStmtCond):
2026 oStmt.oIfBranchAnnotation = self.analyzeCodeOperation(oStmt.aoIfBranch, dEflStmts, True);
2027 if oStmt.aoElseBranch:
2028 oStmt.oElseBranchAnnotation = self.analyzeCodeOperation(oStmt.aoElseBranch, dEflStmts, True);
2029
2030 return sAnnotation;
2031
2032 def analyzeThreadedFunction(self, oGenerator):
2033 """
2034 Analyzes the code, identifying the number of parameters it requires and such.
2035
2036 Returns dummy True - raises exception on trouble.
2037 """
2038
2039 #
2040 # Decode the block into a list/tree of McStmt objects.
2041 #
2042 aoStmts = self.oMcBlock.decode();
2043
2044 #
2045 # Check the block for errors before we proceed (will decode it).
2046 #
2047 asErrors = self.oMcBlock.check();
2048 if asErrors:
2049 raise Exception('\n'.join(['%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sError, )
2050 for sError in asErrors]));
2051
2052 #
2053 # Scan the statements for local variables and call arguments (self.dVariables).
2054 #
2055 self.analyzeFindVariablesAndCallArgs(aoStmts);
2056
2057 #
2058 # Scan the code for IEM_CIMPL_F_ and other clues.
2059 #
2060 self.dsCImplFlags = self.oMcBlock.dsCImplFlags.copy();
2061 dEflStmts = {};
2062 self.analyzeCodeOperation(aoStmts, dEflStmts);
2063 if ( ('IEM_CIMPL_F_CALLS_CIMPL' in self.dsCImplFlags)
2064 + ('IEM_CIMPL_F_CALLS_AIMPL' in self.dsCImplFlags)
2065 + ('IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE' in self.dsCImplFlags)
2066 + ('IEM_CIMPL_F_CALLS_AIMPL_WITH_XSTATE' in self.dsCImplFlags) > 1):
2067 self.error('Mixing CIMPL/AIMPL/AIMPL_WITH_FXSTATE/AIMPL_WITH_XSTATE calls', oGenerator);
2068
2069 #
2070 # Analyse EFLAGS related MCs and @opflmodify and friends.
2071 #
2072 if dEflStmts:
2073 oInstruction = self.oMcBlock.oInstruction; # iai.Instruction
2074 if ( oInstruction is None
2075 or (oInstruction.asFlTest is None and oInstruction.asFlModify is None)):
2076 sMcNames = '+'.join(dEflStmts.keys());
2077 if len(dEflStmts) != 1 or not sMcNames.startswith('IEM_MC_CALL_CIMPL_'): # Hack for far calls
2078 self.error('Uses %s but has no @opflmodify, @opfltest or @opflclass with details!' % (sMcNames,), oGenerator);
2079 elif 'IEM_MC_COMMIT_EFLAGS' in dEflStmts or 'IEM_MC_COMMIT_EFLAGS_OPT' in dEflStmts:
2080 if not oInstruction.asFlModify:
2081 if oInstruction.sMnemonic not in [ 'not', ]:
2082 self.error('Uses IEM_MC_COMMIT_EFLAGS[_OPT] but has no flags in @opflmodify!', oGenerator);
2083 elif ( 'IEM_MC_CALL_CIMPL_0' in dEflStmts
2084 or 'IEM_MC_CALL_CIMPL_1' in dEflStmts
2085 or 'IEM_MC_CALL_CIMPL_2' in dEflStmts
2086 or 'IEM_MC_CALL_CIMPL_3' in dEflStmts
2087 or 'IEM_MC_CALL_CIMPL_4' in dEflStmts
2088 or 'IEM_MC_CALL_CIMPL_5' in dEflStmts ):
2089 if not oInstruction.asFlModify:
2090 self.error('Uses IEM_MC_CALL_CIMPL_x or IEM_MC_DEFER_TO_CIMPL_5_RET with IEM_CIMPL_F_STATUS_FLAGS '
2091 'or IEM_CIMPL_F_RFLAGS but has no flags in @opflmodify!', oGenerator);
2092 elif 'IEM_MC_REF_EFLAGS' not in dEflStmts:
2093 if not oInstruction.asFlTest:
2094 if oInstruction.sMnemonic not in [ 'not', ]:
2095 self.error('Expected @opfltest!', oGenerator);
2096 if oInstruction and oInstruction.asFlSet:
2097 for sFlag in oInstruction.asFlSet:
2098 if sFlag not in oInstruction.asFlModify:
2099 self.error('"%s" in @opflset but missing from @opflmodify (%s)!'
2100 % (sFlag, ', '.join(oInstruction.asFlModify)), oGenerator);
2101 if oInstruction and oInstruction.asFlClear:
2102 for sFlag in oInstruction.asFlClear:
2103 if sFlag not in oInstruction.asFlModify:
2104 self.error('"%s" in @opflclear but missing from @opflmodify (%s)!'
2105 % (sFlag, ', '.join(oInstruction.asFlModify)), oGenerator);
2106
2107 #
2108 # Create variations as needed.
2109 #
2110 if iai.McStmt.findStmtByNames(aoStmts,
2111 { 'IEM_MC_DEFER_TO_CIMPL_0_RET': True,
2112 'IEM_MC_DEFER_TO_CIMPL_1_RET': True,
2113 'IEM_MC_DEFER_TO_CIMPL_2_RET': True,
2114 'IEM_MC_DEFER_TO_CIMPL_3_RET': True, }):
2115 asVariations = (ThreadedFunctionVariation.ksVariation_Default,);
2116
2117 elif iai.McStmt.findStmtByNames(aoStmts, { 'IEM_MC_CALC_RM_EFF_ADDR' : True,
2118 'IEM_MC_FETCH_MEM_U8' : True, # mov_AL_Ob ++
2119 'IEM_MC_FETCH_MEM_U16' : True, # mov_rAX_Ov ++
2120 'IEM_MC_FETCH_MEM_U32' : True,
2121 'IEM_MC_FETCH_MEM_U64' : True,
2122 'IEM_MC_STORE_MEM_U8' : True, # mov_Ob_AL ++
2123 'IEM_MC_STORE_MEM_U16' : True, # mov_Ov_rAX ++
2124 'IEM_MC_STORE_MEM_U32' : True,
2125 'IEM_MC_STORE_MEM_U64' : True, }):
2126 if 'IEM_MC_F_64BIT' in self.oMcBlock.dsMcFlags:
2127 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressOnly64;
2128 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags and 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
2129 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot286Not64;
2130 elif 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
2131 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot286;
2132 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags:
2133 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot64;
2134 elif 'IEM_MC_F_ONLY_8086' in self.oMcBlock.dsMcFlags:
2135 asVariations = ThreadedFunctionVariation.kasVariationsOnlyPre386;
2136 else:
2137 asVariations = ThreadedFunctionVariation.kasVariationsWithAddress;
2138 else:
2139 if 'IEM_MC_F_64BIT' in self.oMcBlock.dsMcFlags:
2140 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressOnly64;
2141 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags and 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
2142 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot286Not64;
2143 elif 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
2144 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot286;
2145 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags:
2146 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot64;
2147 elif 'IEM_MC_F_ONLY_8086' in self.oMcBlock.dsMcFlags:
2148 asVariations = ThreadedFunctionVariation.kasVariationsOnlyPre386;
2149 else:
2150 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddress;
2151
2152 if ( 'IEM_CIMPL_F_BRANCH_CONDITIONAL' in self.dsCImplFlags
2153 and 'IEM_CIMPL_F_BRANCH_RELATIVE' in self.dsCImplFlags): # (latter to avoid iemOp_into)
2154 assert set(asVariations).issubset(ThreadedFunctionVariation.kasVariationsWithoutAddress), \
2155 '%s: vars=%s McFlags=%s' % (self.oMcBlock.oFunction.sName, asVariations, self.oMcBlock.dsMcFlags);
2156 asVariationsBase = asVariations;
2157 asVariations = [];
2158 for sVariation in asVariationsBase:
2159 asVariations.extend([sVariation + '_Jmp', sVariation + '_NoJmp']);
2160 assert set(asVariations).issubset(ThreadedFunctionVariation.kdVariationsWithConditional);
2161
2162 if not iai.McStmt.findStmtByNames(aoStmts,
2163 { 'IEM_MC_ADVANCE_RIP_AND_FINISH': True,
2164 'IEM_MC_REL_JMP_S8_AND_FINISH': True,
2165 'IEM_MC_REL_JMP_S16_AND_FINISH': True,
2166 'IEM_MC_REL_JMP_S32_AND_FINISH': True,
2167 'IEM_MC_SET_RIP_U16_AND_FINISH': True,
2168 'IEM_MC_SET_RIP_U32_AND_FINISH': True,
2169 'IEM_MC_SET_RIP_U64_AND_FINISH': True,
2170 }):
2171 asVariations = [sVariation for sVariation in asVariations
2172 if sVariation not in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing];
2173
2174 self.aoVariations = [ThreadedFunctionVariation(self, sVar) for sVar in asVariations];
2175
2176 # Dictionary variant of the list.
2177 self.dVariations = { oVar.sVariation: oVar for oVar in self.aoVariations };
2178
2179 #
2180 # Try annotate the threaded function name.
2181 #
2182 self.analyzeAndAnnotateName(aoStmts);
2183
2184 #
2185 # Continue the analysis on each variation.
2186 #
2187 for oVariation in self.aoVariations:
2188 oVariation.analyzeVariation(aoStmts);
2189
2190 return True;
2191
2192 ## Used by emitThreadedCallStmts.
2193 kdVariationsWithNeedForPrefixCheck = {
2194 ThreadedFunctionVariation.ksVariation_64_Addr32: True,
2195 ThreadedFunctionVariation.ksVariation_64f_Addr32: True,
2196 ThreadedFunctionVariation.ksVariation_64_FsGs: True,
2197 ThreadedFunctionVariation.ksVariation_64f_FsGs: True,
2198 ThreadedFunctionVariation.ksVariation_32_Addr16: True,
2199 ThreadedFunctionVariation.ksVariation_32f_Addr16: True,
2200 ThreadedFunctionVariation.ksVariation_32_Flat: True,
2201 ThreadedFunctionVariation.ksVariation_32f_Flat: True,
2202 ThreadedFunctionVariation.ksVariation_16_Addr32: True,
2203 ThreadedFunctionVariation.ksVariation_16f_Addr32: True,
2204 };
2205
2206 def emitThreadedCallStmts(self, sBranch = None): # pylint: disable=too-many-statements
2207 """
2208 Worker for morphInputCode that returns a list of statements that emits
2209 the call to the threaded functions for the block.
2210
2211 The sBranch parameter is used with conditional branches where we'll emit
2212 different threaded calls depending on whether we're in the jump-taken or
2213 no-jump code path.
2214 """
2215 # Special case for only default variation:
2216 if len(self.aoVariations) == 1 and self.aoVariations[0].sVariation == ThreadedFunctionVariation.ksVariation_Default:
2217 assert not sBranch;
2218 return self.aoVariations[0].emitThreadedCallStmts(0);
2219
2220 #
2221 # Case statement sub-class.
2222 #
2223 dByVari = self.dVariations;
2224 #fDbg = self.oMcBlock.sFunction == 'iemOpCommonPushSReg';
2225 class Case:
2226 def __init__(self, sCond, sVarNm = None):
2227 self.sCond = sCond;
2228 self.sVarNm = sVarNm;
2229 self.oVar = dByVari[sVarNm] if sVarNm else None;
2230 self.aoBody = self.oVar.emitThreadedCallStmts(8) if sVarNm else None;
2231
2232 def toCode(self):
2233 aoStmts = [ iai.McCppGeneric('case %s:' % (self.sCond), cchIndent = 4), ];
2234 if self.aoBody:
2235 aoStmts.extend(self.aoBody);
2236 aoStmts.append(iai.McCppGeneric('break;', cchIndent = 8));
2237 return aoStmts;
2238
2239 def toFunctionAssignment(self):
2240 aoStmts = [ iai.McCppGeneric('case %s:' % (self.sCond), cchIndent = 4), ];
2241 if self.aoBody:
2242 aoStmts.extend([
2243 iai.McCppGeneric('enmFunction = %s;' % (self.oVar.getIndexName(),), cchIndent = 8),
2244 iai.McCppGeneric('break;', cchIndent = 8),
2245 ]);
2246 return aoStmts;
2247
2248 def isSame(self, oThat):
2249 if not self.aoBody: # fall thru always matches.
2250 return True;
2251 if len(self.aoBody) != len(oThat.aoBody):
2252 #if fDbg: print('dbg: body len diff: %s vs %s' % (len(self.aoBody), len(oThat.aoBody),));
2253 return False;
2254 for iStmt, oStmt in enumerate(self.aoBody):
2255 oThatStmt = oThat.aoBody[iStmt] # type: iai.McStmt
2256 assert isinstance(oStmt, iai.McCppGeneric);
2257 assert not isinstance(oStmt, iai.McStmtCond);
2258 if isinstance(oStmt, iai.McStmtCond):
2259 return False;
2260 if oStmt.sName != oThatStmt.sName:
2261 #if fDbg: print('dbg: stmt #%s name: %s vs %s' % (iStmt, oStmt.sName, oThatStmt.sName,));
2262 return False;
2263 if len(oStmt.asParams) != len(oThatStmt.asParams):
2264 #if fDbg: print('dbg: stmt #%s param count: %s vs %s'
2265 # % (iStmt, len(oStmt.asParams), len(oThatStmt.asParams),));
2266 return False;
2267 for iParam, sParam in enumerate(oStmt.asParams):
2268 if ( sParam != oThatStmt.asParams[iParam]
2269 and ( iParam != 1
2270 or not isinstance(oStmt, iai.McCppCall)
2271 or not oStmt.asParams[0].startswith('IEM_MC2_EMIT_CALL_')
2272 or sParam != self.oVar.getIndexName()
2273 or oThatStmt.asParams[iParam] != oThat.oVar.getIndexName() )):
2274 #if fDbg: print('dbg: stmt #%s, param #%s: %s vs %s'
2275 # % (iStmt, iParam, sParam, oThatStmt.asParams[iParam],));
2276 return False;
2277 return True;
2278
2279 #
2280 # Determine what we're switch on.
2281 # This ASSUMES that (IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | IEM_F_MODE_CPUMODE_MASK) == 7!
2282 #
2283 fSimple = True;
2284 sSwitchValue = '(pVCpu->iem.s.fExec & (IEM_F_MODE_CPUMODE_MASK | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK))';
2285 if dByVari.keys() & self.kdVariationsWithNeedForPrefixCheck.keys():
2286 sSwitchValue += ' | (pVCpu->iem.s.enmEffAddrMode == (pVCpu->iem.s.fExec & IEM_F_MODE_CPUMODE_MASK) ? 0 : 8)';
2287 # Accesses via FS and GS and CS goes thru non-FLAT functions. (CS
2288 # is not writable in 32-bit mode (at least), thus the penalty mode
2289 # for any accesses via it (simpler this way).)
2290 sSwitchValue += ' | (pVCpu->iem.s.iEffSeg < X86_SREG_FS && pVCpu->iem.s.iEffSeg != X86_SREG_CS ? 0 : 16)';
2291 fSimple = False; # threaded functions.
2292 if dByVari.keys() & ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing:
2293 sSwitchValue += ' | ((pVCpu->iem.s.fTbPrevInstr & (IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_INHIBIT_SHADOW)) || ' \
2294 + '(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_MASK) ? 32 : 0)';
2295
2296 #
2297 # Generate the case statements.
2298 #
2299 # pylintx: disable=x
2300 aoCases = [];
2301 if ThreadedFunctionVariation.ksVariation_64_Addr32 in dByVari:
2302 assert not fSimple and not sBranch;
2303 aoCases.extend([
2304 Case('IEMMODE_64BIT', ThrdFnVar.ksVariation_64),
2305 Case('IEMMODE_64BIT | 16', ThrdFnVar.ksVariation_64_FsGs),
2306 Case('IEMMODE_64BIT | 8 | 16', None), # fall thru
2307 Case('IEMMODE_64BIT | 8', ThrdFnVar.ksVariation_64_Addr32),
2308 ]);
2309 if ThreadedFunctionVariation.ksVariation_64f_Addr32 in dByVari:
2310 aoCases.extend([
2311 Case('IEMMODE_64BIT | 32', ThrdFnVar.ksVariation_64f),
2312 Case('IEMMODE_64BIT | 32 | 16', ThrdFnVar.ksVariation_64f_FsGs),
2313 Case('IEMMODE_64BIT | 32 | 8 | 16', None), # fall thru
2314 Case('IEMMODE_64BIT | 32 | 8', ThrdFnVar.ksVariation_64f_Addr32),
2315 ]);
2316 elif ThrdFnVar.ksVariation_64 in dByVari:
2317 assert fSimple and not sBranch;
2318 aoCases.append(Case('IEMMODE_64BIT', ThrdFnVar.ksVariation_64));
2319 if ThreadedFunctionVariation.ksVariation_64f in dByVari:
2320 aoCases.append(Case('IEMMODE_64BIT | 32', ThrdFnVar.ksVariation_64f));
2321 elif ThrdFnVar.ksVariation_64_Jmp in dByVari:
2322 assert fSimple and sBranch;
2323 aoCases.append(Case('IEMMODE_64BIT',
2324 ThrdFnVar.ksVariation_64_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_64_NoJmp));
2325 if ThreadedFunctionVariation.ksVariation_64f_Jmp in dByVari:
2326 aoCases.append(Case('IEMMODE_64BIT | 32',
2327 ThrdFnVar.ksVariation_64f_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_64f_NoJmp));
2328
2329 if ThrdFnVar.ksVariation_32_Addr16 in dByVari:
2330 assert not fSimple and not sBranch;
2331 aoCases.extend([
2332 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', ThrdFnVar.ksVariation_32_Flat),
2333 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 16', None), # fall thru
2334 Case('IEMMODE_32BIT | 16', None), # fall thru
2335 Case('IEMMODE_32BIT', ThrdFnVar.ksVariation_32),
2336 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 8', None), # fall thru
2337 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 8 | 16',None), # fall thru
2338 Case('IEMMODE_32BIT | 8 | 16',None), # fall thru
2339 Case('IEMMODE_32BIT | 8', ThrdFnVar.ksVariation_32_Addr16),
2340 ]);
2341 if ThrdFnVar.ksVariation_32f_Addr16 in dByVari:
2342 aoCases.extend([
2343 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', ThrdFnVar.ksVariation_32f_Flat),
2344 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 16', None), # fall thru
2345 Case('IEMMODE_32BIT | 32 | 16', None), # fall thru
2346 Case('IEMMODE_32BIT | 32', ThrdFnVar.ksVariation_32f),
2347 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 8', None), # fall thru
2348 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 8 | 16',None), # fall thru
2349 Case('IEMMODE_32BIT | 32 | 8 | 16',None), # fall thru
2350 Case('IEMMODE_32BIT | 32 | 8', ThrdFnVar.ksVariation_32f_Addr16),
2351 ]);
2352 elif ThrdFnVar.ksVariation_32 in dByVari:
2353 assert fSimple and not sBranch;
2354 aoCases.extend([
2355 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', None), # fall thru
2356 Case('IEMMODE_32BIT', ThrdFnVar.ksVariation_32),
2357 ]);
2358 if ThrdFnVar.ksVariation_32f in dByVari:
2359 aoCases.extend([
2360 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', None), # fall thru
2361 Case('IEMMODE_32BIT | 32', ThrdFnVar.ksVariation_32f),
2362 ]);
2363 elif ThrdFnVar.ksVariation_32_Jmp in dByVari:
2364 assert fSimple and sBranch;
2365 aoCases.extend([
2366 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', None), # fall thru
2367 Case('IEMMODE_32BIT',
2368 ThrdFnVar.ksVariation_32_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_32_NoJmp),
2369 ]);
2370 if ThrdFnVar.ksVariation_32f_Jmp in dByVari:
2371 aoCases.extend([
2372 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', None), # fall thru
2373 Case('IEMMODE_32BIT | 32',
2374 ThrdFnVar.ksVariation_32f_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_32f_NoJmp),
2375 ]);
2376
2377 if ThrdFnVar.ksVariation_16_Addr32 in dByVari:
2378 assert not fSimple and not sBranch;
2379 aoCases.extend([
2380 Case('IEMMODE_16BIT | 16', None), # fall thru
2381 Case('IEMMODE_16BIT', ThrdFnVar.ksVariation_16),
2382 Case('IEMMODE_16BIT | 8 | 16', None), # fall thru
2383 Case('IEMMODE_16BIT | 8', ThrdFnVar.ksVariation_16_Addr32),
2384 ]);
2385 if ThrdFnVar.ksVariation_16f_Addr32 in dByVari:
2386 aoCases.extend([
2387 Case('IEMMODE_16BIT | 32 | 16', None), # fall thru
2388 Case('IEMMODE_16BIT | 32', ThrdFnVar.ksVariation_16f),
2389 Case('IEMMODE_16BIT | 32 | 8 | 16', None), # fall thru
2390 Case('IEMMODE_16BIT | 32 | 8', ThrdFnVar.ksVariation_16f_Addr32),
2391 ]);
2392 elif ThrdFnVar.ksVariation_16 in dByVari:
2393 assert fSimple and not sBranch;
2394 aoCases.append(Case('IEMMODE_16BIT', ThrdFnVar.ksVariation_16));
2395 if ThrdFnVar.ksVariation_16f in dByVari:
2396 aoCases.append(Case('IEMMODE_16BIT | 32', ThrdFnVar.ksVariation_16f));
2397 elif ThrdFnVar.ksVariation_16_Jmp in dByVari:
2398 assert fSimple and sBranch;
2399 aoCases.append(Case('IEMMODE_16BIT',
2400 ThrdFnVar.ksVariation_16_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_16_NoJmp));
2401 if ThrdFnVar.ksVariation_16f_Jmp in dByVari:
2402 aoCases.append(Case('IEMMODE_16BIT | 32',
2403 ThrdFnVar.ksVariation_16f_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_16f_NoJmp));
2404
2405
2406 if ThrdFnVar.ksVariation_16_Pre386 in dByVari:
2407 if not fSimple:
2408 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 16', None)); # fall thru
2409 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', ThrdFnVar.ksVariation_16_Pre386));
2410 if ThrdFnVar.ksVariation_16f_Pre386 in dByVari: # should be nested under previous if, but line too long.
2411 if not fSimple:
2412 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 16', None)); # fall thru
2413 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', ThrdFnVar.ksVariation_16f_Pre386));
2414
2415 if ThrdFnVar.ksVariation_16_Pre386_Jmp in dByVari:
2416 assert fSimple and sBranch;
2417 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK',
2418 ThrdFnVar.ksVariation_16_Pre386_Jmp if sBranch == 'Jmp'
2419 else ThrdFnVar.ksVariation_16_Pre386_NoJmp));
2420 if ThrdFnVar.ksVariation_16f_Pre386_Jmp in dByVari:
2421 assert fSimple and sBranch;
2422 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32',
2423 ThrdFnVar.ksVariation_16f_Pre386_Jmp if sBranch == 'Jmp'
2424 else ThrdFnVar.ksVariation_16f_Pre386_NoJmp));
2425
2426 #
2427 # If the case bodies are all the same, except for the function called,
2428 # we can reduce the code size and hopefully compile time.
2429 #
2430 iFirstCaseWithBody = 0;
2431 while not aoCases[iFirstCaseWithBody].aoBody:
2432 iFirstCaseWithBody += 1
2433 fAllSameCases = True
2434 for iCase in range(iFirstCaseWithBody + 1, len(aoCases)):
2435 fAllSameCases = fAllSameCases and aoCases[iCase].isSame(aoCases[iFirstCaseWithBody]);
2436 #if fDbg: print('fAllSameCases=%s %s' % (fAllSameCases, self.oMcBlock.sFunction,));
2437 if fAllSameCases:
2438 aoStmts = [
2439 iai.McCppGeneric('IEMTHREADEDFUNCS enmFunction;'),
2440 iai.McCppGeneric('switch (%s)' % (sSwitchValue,)),
2441 iai.McCppGeneric('{'),
2442 ];
2443 for oCase in aoCases:
2444 aoStmts.extend(oCase.toFunctionAssignment());
2445 aoStmts.extend([
2446 iai.McCppGeneric('IEM_NOT_REACHED_DEFAULT_CASE_RET();', cchIndent = 4),
2447 iai.McCppGeneric('}'),
2448 ]);
2449 aoStmts.extend(dByVari[aoCases[iFirstCaseWithBody].sVarNm].emitThreadedCallStmts(0, 'enmFunction'));
2450
2451 else:
2452 #
2453 # Generate the generic switch statement.
2454 #
2455 aoStmts = [
2456 iai.McCppGeneric('switch (%s)' % (sSwitchValue,)),
2457 iai.McCppGeneric('{'),
2458 ];
2459 for oCase in aoCases:
2460 aoStmts.extend(oCase.toCode());
2461 aoStmts.extend([
2462 iai.McCppGeneric('IEM_NOT_REACHED_DEFAULT_CASE_RET();', cchIndent = 4),
2463 iai.McCppGeneric('}'),
2464 ]);
2465
2466 return aoStmts;
2467
2468 def morphInputCode(self, aoStmts, fIsConditional = False, fCallEmitted = False, cDepth = 0, sBranchAnnotation = None):
2469 """
2470 Adjusts (& copies) the statements for the input/decoder so it will emit
2471 calls to the right threaded functions for each block.
2472
2473 Returns list/tree of statements (aoStmts is not modified) and updated
2474 fCallEmitted status.
2475 """
2476 #print('McBlock at %s:%s' % (os.path.split(self.oMcBlock.sSrcFile)[1], self.oMcBlock.iBeginLine,));
2477 aoDecoderStmts = [];
2478
2479 for iStmt, oStmt in enumerate(aoStmts):
2480 # Copy the statement. Make a deep copy to make sure we've got our own
2481 # copies of all instance variables, even if a bit overkill at the moment.
2482 oNewStmt = copy.deepcopy(oStmt);
2483 aoDecoderStmts.append(oNewStmt);
2484 #print('oNewStmt %s %s' % (oNewStmt.sName, len(oNewStmt.asParams),));
2485 if oNewStmt.sName == 'IEM_MC_BEGIN' and self.dsCImplFlags:
2486 oNewStmt.asParams[1] = ' | '.join(sorted(self.dsCImplFlags.keys()));
2487
2488 # If we haven't emitted the threaded function call yet, look for
2489 # statements which it would naturally follow or preceed.
2490 if not fCallEmitted:
2491 if not oStmt.isCppStmt():
2492 if ( oStmt.sName.startswith('IEM_MC_MAYBE_RAISE_') \
2493 or (oStmt.sName.endswith('_AND_FINISH') and oStmt.sName.startswith('IEM_MC_'))
2494 or oStmt.sName.startswith('IEM_MC_CALL_CIMPL_')
2495 or oStmt.sName.startswith('IEM_MC_DEFER_TO_CIMPL_')
2496 or oStmt.sName in ('IEM_MC_RAISE_DIVIDE_ERROR',)):
2497 aoDecoderStmts.pop();
2498 if not fIsConditional:
2499 aoDecoderStmts.extend(self.emitThreadedCallStmts());
2500 elif oStmt.sName == 'IEM_MC_ADVANCE_RIP_AND_FINISH':
2501 aoDecoderStmts.extend(self.emitThreadedCallStmts('NoJmp'));
2502 else:
2503 assert oStmt.sName in { 'IEM_MC_REL_JMP_S8_AND_FINISH': True,
2504 'IEM_MC_REL_JMP_S16_AND_FINISH': True,
2505 'IEM_MC_REL_JMP_S32_AND_FINISH': True, };
2506 aoDecoderStmts.extend(self.emitThreadedCallStmts('Jmp'));
2507 aoDecoderStmts.append(oNewStmt);
2508 fCallEmitted = True;
2509
2510 elif iai.g_dMcStmtParsers[oStmt.sName][2]:
2511 # This is for Jmp/NoJmp with loopne and friends which modifies state other than RIP.
2512 if not sBranchAnnotation:
2513 self.raiseProblem('Modifying state before emitting calls! %s' % (oStmt.sName,));
2514 assert fIsConditional;
2515 aoDecoderStmts.pop();
2516 if sBranchAnnotation == g_ksFinishAnnotation_Advance:
2517 assert iai.McStmt.findStmtByNames(aoStmts[iStmt:], {'IEM_MC_ADVANCE_RIP_AND_FINISH':1,})
2518 aoDecoderStmts.extend(self.emitThreadedCallStmts('NoJmp'));
2519 elif sBranchAnnotation == g_ksFinishAnnotation_RelJmp:
2520 assert iai.McStmt.findStmtByNames(aoStmts[iStmt:],
2521 { 'IEM_MC_REL_JMP_S8_AND_FINISH': 1,
2522 'IEM_MC_REL_JMP_S16_AND_FINISH': 1,
2523 'IEM_MC_REL_JMP_S32_AND_FINISH': 1, });
2524 aoDecoderStmts.extend(self.emitThreadedCallStmts('Jmp'));
2525 else:
2526 self.raiseProblem('Modifying state before emitting calls! %s' % (oStmt.sName,));
2527 aoDecoderStmts.append(oNewStmt);
2528 fCallEmitted = True;
2529
2530 elif ( not fIsConditional
2531 and oStmt.fDecode
2532 and ( oStmt.asParams[0].find('IEMOP_HLP_DONE_') >= 0
2533 or oStmt.asParams[0].find('IEMOP_HLP_DECODED_') >= 0)):
2534 aoDecoderStmts.extend(self.emitThreadedCallStmts());
2535 fCallEmitted = True;
2536
2537 # Process branches of conditionals recursively.
2538 if isinstance(oStmt, iai.McStmtCond):
2539 (oNewStmt.aoIfBranch, fCallEmitted1) = self.morphInputCode(oStmt.aoIfBranch, fIsConditional,
2540 fCallEmitted, cDepth + 1, oStmt.oIfBranchAnnotation);
2541 if oStmt.aoElseBranch:
2542 (oNewStmt.aoElseBranch, fCallEmitted2) = self.morphInputCode(oStmt.aoElseBranch, fIsConditional,
2543 fCallEmitted, cDepth + 1,
2544 oStmt.oElseBranchAnnotation);
2545 else:
2546 fCallEmitted2 = False;
2547 fCallEmitted = fCallEmitted or (fCallEmitted1 and fCallEmitted2);
2548
2549 if not fCallEmitted and cDepth == 0:
2550 self.raiseProblem('Unable to insert call to threaded function.');
2551
2552 return (aoDecoderStmts, fCallEmitted);
2553
2554
2555 def generateInputCode(self):
2556 """
2557 Modifies the input code.
2558 """
2559 cchIndent = (self.oMcBlock.cchIndent + 3) // 4 * 4;
2560
2561 if len(self.oMcBlock.aoStmts) == 1:
2562 # IEM_MC_DEFER_TO_CIMPL_X_RET - need to wrap in {} to make it safe to insert into random code.
2563 sCode = ' ' * cchIndent + 'pVCpu->iem.s.fTbCurInstr = ';
2564 if self.dsCImplFlags:
2565 sCode += ' | '.join(sorted(self.dsCImplFlags.keys())) + ';\n';
2566 else:
2567 sCode += '0;\n';
2568 sCode += iai.McStmt.renderCodeForList(self.morphInputCode(self.oMcBlock.aoStmts)[0],
2569 cchIndent = cchIndent).replace('\n', ' /* gen */\n', 1);
2570 sIndent = ' ' * (min(cchIndent, 2) - 2);
2571 sCode = sIndent + '{\n' + sCode + sIndent + '}\n';
2572 return sCode;
2573
2574 # IEM_MC_BEGIN/END block
2575 assert len(self.oMcBlock.asLines) > 2, "asLines=%s" % (self.oMcBlock.asLines,);
2576 fIsConditional = ( 'IEM_CIMPL_F_BRANCH_CONDITIONAL' in self.dsCImplFlags
2577 and 'IEM_CIMPL_F_BRANCH_RELATIVE' in self.dsCImplFlags); # (latter to avoid iemOp_into)
2578 return iai.McStmt.renderCodeForList(self.morphInputCode(self.oMcBlock.aoStmts, fIsConditional)[0],
2579 cchIndent = cchIndent).replace('\n', ' /* gen */\n', 1);
2580
2581# Short alias for ThreadedFunctionVariation.
2582ThrdFnVar = ThreadedFunctionVariation;
2583
2584
2585class IEMThreadedGenerator(object):
2586 """
2587 The threaded code generator & annotator.
2588 """
2589
2590 def __init__(self):
2591 self.aoThreadedFuncs = [] # type: List[ThreadedFunction]
2592 self.oOptions = None # type: argparse.Namespace
2593 self.aoParsers = [] # type: List[IEMAllInstPython.SimpleParser]
2594 self.aidxFirstFunctions = [] # type: List[int] ##< Runs parallel to aoParser giving the index of the first function.
2595 self.cErrors = 0;
2596
2597 #
2598 # Error reporting.
2599 #
2600
2601 def rawError(self, sCompleteMessage):
2602 """ Output a raw error and increment the error counter. """
2603 print(sCompleteMessage, file = sys.stderr);
2604 self.cErrors += 1;
2605 return False;
2606
2607 #
2608 # Processing.
2609 #
2610
2611 def processInputFiles(self, sHostArch, fNativeRecompilerEnabled):
2612 """
2613 Process the input files.
2614 """
2615
2616 # Parse the files.
2617 self.aoParsers = iai.parseFiles(self.oOptions.asInFiles, sHostArch);
2618
2619 # Create threaded functions for the MC blocks.
2620 self.aoThreadedFuncs = [ThreadedFunction(oMcBlock) for oMcBlock in iai.g_aoMcBlocks];
2621
2622 # Analyze the threaded functions.
2623 dRawParamCounts = {};
2624 dMinParamCounts = {};
2625 for oThreadedFunction in self.aoThreadedFuncs:
2626 oThreadedFunction.analyzeThreadedFunction(self);
2627 for oVariation in oThreadedFunction.aoVariations:
2628 dRawParamCounts[len(oVariation.dParamRefs)] = dRawParamCounts.get(len(oVariation.dParamRefs), 0) + 1;
2629 dMinParamCounts[oVariation.cMinParams] = dMinParamCounts.get(oVariation.cMinParams, 0) + 1;
2630 print('debug: param count distribution, raw and optimized:', file = sys.stderr);
2631 for cCount in sorted({cBits: True for cBits in list(dRawParamCounts.keys()) + list(dMinParamCounts.keys())}.keys()):
2632 print('debug: %s params: %4s raw, %4s min'
2633 % (cCount, dRawParamCounts.get(cCount, 0), dMinParamCounts.get(cCount, 0)),
2634 file = sys.stderr);
2635
2636 # Do another pass over the threaded functions to settle the name suffix.
2637 iThreadedFn = 0;
2638 while iThreadedFn < len(self.aoThreadedFuncs):
2639 oFunction = self.aoThreadedFuncs[iThreadedFn].oMcBlock.oFunction;
2640 assert oFunction;
2641 iThreadedFnNext = iThreadedFn + 1;
2642 dSubNames = { self.aoThreadedFuncs[iThreadedFn].sSubName: 1 };
2643 while ( iThreadedFnNext < len(self.aoThreadedFuncs)
2644 and self.aoThreadedFuncs[iThreadedFnNext].oMcBlock.oFunction == oFunction):
2645 dSubNames[self.aoThreadedFuncs[iThreadedFnNext].sSubName] = 1;
2646 iThreadedFnNext += 1;
2647 if iThreadedFnNext - iThreadedFn > len(dSubNames):
2648 iSubName = 0;
2649 while iThreadedFn + iSubName < iThreadedFnNext:
2650 self.aoThreadedFuncs[iThreadedFn + iSubName].sSubName += '_%s' % (iSubName,);
2651 iSubName += 1;
2652 iThreadedFn = iThreadedFnNext;
2653
2654 # Populate aidxFirstFunctions. This is ASSUMING that
2655 # g_aoMcBlocks/self.aoThreadedFuncs are in self.aoParsers order.
2656 iThreadedFunction = 0;
2657 oThreadedFunction = self.getThreadedFunctionByIndex(0);
2658 self.aidxFirstFunctions = [];
2659 for oParser in self.aoParsers:
2660 self.aidxFirstFunctions.append(iThreadedFunction);
2661
2662 while oThreadedFunction.oMcBlock.sSrcFile == oParser.sSrcFile:
2663 iThreadedFunction += 1;
2664 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
2665
2666 # Analyze the threaded functions and their variations for native recompilation.
2667 if fNativeRecompilerEnabled:
2668 ian.analyzeThreadedFunctionsForNativeRecomp(self.aoThreadedFuncs, sHostArch);
2669
2670 # Gather arguments + variable statistics for the MC blocks.
2671 cMaxArgs = 0;
2672 cMaxVars = 0;
2673 cMaxVarsAndArgs = 0;
2674 cbMaxArgs = 0;
2675 cbMaxVars = 0;
2676 cbMaxVarsAndArgs = 0;
2677 for oThreadedFunction in self.aoThreadedFuncs:
2678 if oThreadedFunction.oMcBlock.aoLocals or oThreadedFunction.oMcBlock.aoArgs:
2679 # Counts.
2680 cMaxVars = max(cMaxVars, len(oThreadedFunction.oMcBlock.aoLocals));
2681 cMaxArgs = max(cMaxArgs, len(oThreadedFunction.oMcBlock.aoArgs));
2682 cMaxVarsAndArgs = max(cMaxVarsAndArgs,
2683 len(oThreadedFunction.oMcBlock.aoLocals) + len(oThreadedFunction.oMcBlock.aoArgs));
2684 if cMaxVarsAndArgs > 9:
2685 raise Exception('%s potentially uses too many variables / args: %u, max 10 - %u vars and %u args'
2686 % (oThreadedFunction.oMcBlock.oFunction.sName, cMaxVarsAndArgs,
2687 len(oThreadedFunction.oMcBlock.aoLocals), len(oThreadedFunction.oMcBlock.aoArgs),));
2688 # Calc stack allocation size:
2689 cbArgs = 0;
2690 for oArg in oThreadedFunction.oMcBlock.aoArgs:
2691 cbArgs += (getTypeBitCount(oArg.sType) + 63) // 64 * 8;
2692 cbVars = 0;
2693 for oVar in oThreadedFunction.oMcBlock.aoLocals:
2694 cbVars += (getTypeBitCount(oVar.sType) + 63) // 64 * 8;
2695 cbMaxVars = max(cbMaxVars, cbVars);
2696 cbMaxArgs = max(cbMaxArgs, cbArgs);
2697 cbMaxVarsAndArgs = max(cbMaxVarsAndArgs, cbVars + cbArgs);
2698 if cbMaxVarsAndArgs >= 0xc0:
2699 raise Exception('%s potentially uses too much stack: cbMaxVars=%#x cbMaxArgs=%#x'
2700 % (oThreadedFunction.oMcBlock.oFunction.sName, cbMaxVars, cbMaxArgs,));
2701
2702 print('debug: max vars+args: %u bytes / %u; max vars: %u bytes / %u; max args: %u bytes / %u'
2703 % (cbMaxVarsAndArgs, cMaxVarsAndArgs, cbMaxVars, cMaxVars, cbMaxArgs, cMaxArgs,), file = sys.stderr);
2704
2705 if self.cErrors > 0:
2706 print('fatal error: %u error%s during processing. Details above.'
2707 % (self.cErrors, 's' if self.cErrors > 1 else '',), file = sys.stderr);
2708 return False;
2709 return True;
2710
2711 #
2712 # Output
2713 #
2714
2715 def generateLicenseHeader(self):
2716 """
2717 Returns the lines for a license header.
2718 """
2719 return [
2720 '/*',
2721 ' * Autogenerated by $Id: IEMAllThrdPython.py 104195 2024-04-05 14:45:23Z vboxsync $ ',
2722 ' * Do not edit!',
2723 ' */',
2724 '',
2725 '/*',
2726 ' * Copyright (C) 2023-' + str(datetime.date.today().year) + ' Oracle and/or its affiliates.',
2727 ' *',
2728 ' * This file is part of VirtualBox base platform packages, as',
2729 ' * available from https://www.virtualbox.org.',
2730 ' *',
2731 ' * This program is free software; you can redistribute it and/or',
2732 ' * modify it under the terms of the GNU General Public License',
2733 ' * as published by the Free Software Foundation, in version 3 of the',
2734 ' * License.',
2735 ' *',
2736 ' * This program is distributed in the hope that it will be useful, but',
2737 ' * WITHOUT ANY WARRANTY; without even the implied warranty of',
2738 ' * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU',
2739 ' * General Public License for more details.',
2740 ' *',
2741 ' * You should have received a copy of the GNU General Public License',
2742 ' * along with this program; if not, see <https://www.gnu.org/licenses>.',
2743 ' *',
2744 ' * The contents of this file may alternatively be used under the terms',
2745 ' * of the Common Development and Distribution License Version 1.0',
2746 ' * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included',
2747 ' * in the VirtualBox distribution, in which case the provisions of the',
2748 ' * CDDL are applicable instead of those of the GPL.',
2749 ' *',
2750 ' * You may elect to license modified versions of this file under the',
2751 ' * terms and conditions of either the GPL or the CDDL or both.',
2752 ' *',
2753 ' * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0',
2754 ' */',
2755 '',
2756 '',
2757 '',
2758 ];
2759
2760 ## List of built-in threaded functions with user argument counts and
2761 ## whether it has a native recompiler implementation.
2762 katBltIns = (
2763 ( 'Nop', 0, True ),
2764 ( 'LogCpuState', 0, True ),
2765
2766 ( 'DeferToCImpl0', 2, True ),
2767 ( 'CheckIrq', 0, True ),
2768 ( 'CheckMode', 1, True ),
2769 ( 'CheckHwInstrBps', 0, False ),
2770 ( 'CheckCsLim', 1, True ),
2771
2772 ( 'CheckCsLimAndOpcodes', 3, True ),
2773 ( 'CheckOpcodes', 3, True ),
2774 ( 'CheckOpcodesConsiderCsLim', 3, True ),
2775
2776 ( 'CheckCsLimAndPcAndOpcodes', 3, True ),
2777 ( 'CheckPcAndOpcodes', 3, True ),
2778 ( 'CheckPcAndOpcodesConsiderCsLim', 3, True ),
2779
2780 ( 'CheckCsLimAndOpcodesAcrossPageLoadingTlb', 3, True ),
2781 ( 'CheckOpcodesAcrossPageLoadingTlb', 3, True ),
2782 ( 'CheckOpcodesAcrossPageLoadingTlbConsiderCsLim', 2, True ),
2783
2784 ( 'CheckCsLimAndOpcodesLoadingTlb', 3, True ),
2785 ( 'CheckOpcodesLoadingTlb', 3, True ),
2786 ( 'CheckOpcodesLoadingTlbConsiderCsLim', 3, True ),
2787
2788 ( 'CheckCsLimAndOpcodesOnNextPageLoadingTlb', 2, True ),
2789 ( 'CheckOpcodesOnNextPageLoadingTlb', 2, True ),
2790 ( 'CheckOpcodesOnNextPageLoadingTlbConsiderCsLim', 2, True ),
2791
2792 ( 'CheckCsLimAndOpcodesOnNewPageLoadingTlb', 2, True ),
2793 ( 'CheckOpcodesOnNewPageLoadingTlb', 2, True ),
2794 ( 'CheckOpcodesOnNewPageLoadingTlbConsiderCsLim', 2, True ),
2795 );
2796
2797 def generateThreadedFunctionsHeader(self, oOut, _):
2798 """
2799 Generates the threaded functions header file.
2800 Returns success indicator.
2801 """
2802
2803 asLines = self.generateLicenseHeader();
2804
2805 # Generate the threaded function table indexes.
2806 asLines += [
2807 'typedef enum IEMTHREADEDFUNCS',
2808 '{',
2809 ' kIemThreadedFunc_Invalid = 0,',
2810 '',
2811 ' /*',
2812 ' * Predefined',
2813 ' */',
2814 ];
2815 asLines += [' kIemThreadedFunc_BltIn_%s,' % (sFuncNm,) for sFuncNm, _, _ in self.katBltIns];
2816
2817 iThreadedFunction = 1 + len(self.katBltIns);
2818 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2819 asLines += [
2820 '',
2821 ' /*',
2822 ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation] + '',
2823 ' */',
2824 ];
2825 for oThreadedFunction in self.aoThreadedFuncs:
2826 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2827 if oVariation:
2828 iThreadedFunction += 1;
2829 oVariation.iEnumValue = iThreadedFunction;
2830 asLines.append(' ' + oVariation.getIndexName() + ',');
2831 asLines += [
2832 ' kIemThreadedFunc_End',
2833 '} IEMTHREADEDFUNCS;',
2834 '',
2835 ];
2836
2837 # Prototype the function table.
2838 asLines += [
2839 'extern const PFNIEMTHREADEDFUNC g_apfnIemThreadedFunctions[kIemThreadedFunc_End];',
2840 'extern uint8_t const g_acIemThreadedFunctionUsedArgs[kIemThreadedFunc_End];',
2841 '#if defined(IN_RING3) || defined(LOG_ENABLED)',
2842 'extern const char * const g_apszIemThreadedFunctions[kIemThreadedFunc_End];',
2843 '#endif',
2844 '#if defined(IN_RING3)',
2845 'extern const char * const g_apszIemThreadedFunctionStats[kIemThreadedFunc_End];',
2846 '#endif',
2847 ];
2848
2849 oOut.write('\n'.join(asLines));
2850 return True;
2851
2852 ksBitsToIntMask = {
2853 1: "UINT64_C(0x1)",
2854 2: "UINT64_C(0x3)",
2855 4: "UINT64_C(0xf)",
2856 8: "UINT64_C(0xff)",
2857 16: "UINT64_C(0xffff)",
2858 32: "UINT64_C(0xffffffff)",
2859 };
2860
2861 def generateFunctionParameterUnpacking(self, oVariation, oOut, asParams, uNoRefLevel = 0):
2862 """
2863 Outputs code for unpacking parameters.
2864 This is shared by the threaded and native code generators.
2865 """
2866 aasVars = [];
2867 for aoRefs in oVariation.dParamRefs.values():
2868 oRef = aoRefs[0];
2869 if oRef.sType[0] != 'P':
2870 cBits = g_kdTypeInfo[oRef.sType][0];
2871 sType = g_kdTypeInfo[oRef.sType][2];
2872 else:
2873 cBits = 64;
2874 sType = oRef.sType;
2875
2876 sTypeDecl = sType + ' const';
2877
2878 if cBits == 64:
2879 assert oRef.offNewParam == 0;
2880 if sType == 'uint64_t':
2881 sUnpack = '%s;' % (asParams[oRef.iNewParam],);
2882 else:
2883 sUnpack = '(%s)%s;' % (sType, asParams[oRef.iNewParam],);
2884 elif oRef.offNewParam == 0:
2885 sUnpack = '(%s)(%s & %s);' % (sType, asParams[oRef.iNewParam], self.ksBitsToIntMask[cBits]);
2886 else:
2887 sUnpack = '(%s)((%s >> %s) & %s);' \
2888 % (sType, asParams[oRef.iNewParam], oRef.offNewParam, self.ksBitsToIntMask[cBits]);
2889
2890 sComment = '/* %s - %s ref%s */' % (oRef.sOrgRef, len(aoRefs), 's' if len(aoRefs) != 1 else '',);
2891
2892 aasVars.append([ '%s:%02u' % (oRef.iNewParam, oRef.offNewParam),
2893 sTypeDecl, oRef.sNewName, sUnpack, sComment ]);
2894 acchVars = [0, 0, 0, 0, 0];
2895 for asVar in aasVars:
2896 for iCol, sStr in enumerate(asVar):
2897 acchVars[iCol] = max(acchVars[iCol], len(sStr));
2898 sFmt = ' %%-%ss %%-%ss = %%-%ss %%s\n' % (acchVars[1], acchVars[2], acchVars[3]);
2899 for asVar in sorted(aasVars):
2900 oOut.write(sFmt % (asVar[1], asVar[2], asVar[3], asVar[4],));
2901
2902 if uNoRefLevel > 0 and aasVars:
2903 if uNoRefLevel > 1:
2904 # level 2: Everything. This is used by liveness.
2905 oOut.write(' ');
2906 for asVar in sorted(aasVars):
2907 oOut.write(' RT_NOREF_PV(%s);' % (asVar[2],));
2908 oOut.write('\n');
2909 else:
2910 # level 1: Only pfnXxxx variables. This is used by native.
2911 for asVar in sorted(aasVars):
2912 if asVar[2].startswith('pfn'):
2913 oOut.write(' RT_NOREF_PV(%s);\n' % (asVar[2],));
2914 return True;
2915
2916 kasThreadedParamNames = ('uParam0', 'uParam1', 'uParam2');
2917 def generateThreadedFunctionsSource(self, oOut, _):
2918 """
2919 Generates the threaded functions source file.
2920 Returns success indicator.
2921 """
2922
2923 asLines = self.generateLicenseHeader();
2924 oOut.write('\n'.join(asLines));
2925
2926 #
2927 # Emit the function definitions.
2928 #
2929 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2930 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
2931 oOut.write( '\n'
2932 + '\n'
2933 + '\n'
2934 + '\n'
2935 + '/*' + '*' * 128 + '\n'
2936 + '* Variation: ' + sVarName + ' ' * (129 - len(sVarName) - 15) + '*\n'
2937 + '*' * 128 + '*/\n');
2938
2939 for oThreadedFunction in self.aoThreadedFuncs:
2940 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2941 if oVariation:
2942 oMcBlock = oThreadedFunction.oMcBlock;
2943
2944 # Function header
2945 oOut.write( '\n'
2946 + '\n'
2947 + '/**\n'
2948 + ' * #%u: %s at line %s offset %s in %s%s\n'
2949 % (oVariation.iEnumValue, oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine,
2950 os.path.split(oMcBlock.sSrcFile)[1],
2951 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '')
2952 + ' */\n'
2953 + 'static IEM_DECL_IEMTHREADEDFUNC_DEF(' + oVariation.getThreadedFunctionName() + ')\n'
2954 + '{\n');
2955
2956 # Unpack parameters.
2957 self.generateFunctionParameterUnpacking(oVariation, oOut, self.kasThreadedParamNames);
2958
2959 # RT_NOREF for unused parameters.
2960 if oVariation.cMinParams < g_kcThreadedParams:
2961 oOut.write(' RT_NOREF(' + ', '.join(self.kasThreadedParamNames[oVariation.cMinParams:]) + ');\n');
2962
2963 # Now for the actual statements.
2964 oOut.write(iai.McStmt.renderCodeForList(oVariation.aoStmtsForThreadedFunction, cchIndent = 4));
2965
2966 oOut.write('}\n');
2967
2968
2969 #
2970 # Generate the output tables in parallel.
2971 #
2972 asFuncTable = [
2973 '/**',
2974 ' * Function pointer table.',
2975 ' */',
2976 'PFNIEMTHREADEDFUNC const g_apfnIemThreadedFunctions[kIemThreadedFunc_End] =',
2977 '{',
2978 ' /*Invalid*/ NULL,',
2979 ];
2980 asArgCntTab = [
2981 '/**',
2982 ' * Argument count table.',
2983 ' */',
2984 'uint8_t const g_acIemThreadedFunctionUsedArgs[kIemThreadedFunc_End] =',
2985 '{',
2986 ' 0, /*Invalid*/',
2987 ];
2988 asNameTable = [
2989 '/**',
2990 ' * Function name table.',
2991 ' */',
2992 'const char * const g_apszIemThreadedFunctions[kIemThreadedFunc_End] =',
2993 '{',
2994 ' "Invalid",',
2995 ];
2996 asStatTable = [
2997 '/**',
2998 ' * Function statistics name table.',
2999 ' */',
3000 'const char * const g_apszIemThreadedFunctionStats[kIemThreadedFunc_End] =',
3001 '{',
3002 ' NULL,',
3003 ];
3004 aasTables = (asFuncTable, asArgCntTab, asNameTable, asStatTable,);
3005
3006 for asTable in aasTables:
3007 asTable.extend((
3008 '',
3009 ' /*',
3010 ' * Predefined.',
3011 ' */',
3012 ));
3013 for sFuncNm, cArgs, _ in self.katBltIns:
3014 asFuncTable.append(' iemThreadedFunc_BltIn_%s,' % (sFuncNm,));
3015 asArgCntTab.append(' %d, /*BltIn_%s*/' % (cArgs, sFuncNm,));
3016 asNameTable.append(' "BltIn_%s",' % (sFuncNm,));
3017 asStatTable.append(' "BltIn/%s",' % (sFuncNm,));
3018
3019 iThreadedFunction = 1 + len(self.katBltIns);
3020 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
3021 for asTable in aasTables:
3022 asTable.extend((
3023 '',
3024 ' /*',
3025 ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation],
3026 ' */',
3027 ));
3028 for oThreadedFunction in self.aoThreadedFuncs:
3029 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
3030 if oVariation:
3031 iThreadedFunction += 1;
3032 assert oVariation.iEnumValue == iThreadedFunction;
3033 sName = oVariation.getThreadedFunctionName();
3034 asFuncTable.append(' /*%4u*/ %s,' % (iThreadedFunction, sName,));
3035 asNameTable.append(' /*%4u*/ "%s",' % (iThreadedFunction, sName,));
3036 asArgCntTab.append(' /*%4u*/ %d, /*%s*/' % (iThreadedFunction, oVariation.cMinParams, sName,));
3037 asStatTable.append(' "%s",' % (oVariation.getThreadedFunctionStatisticsName(),));
3038
3039 for asTable in aasTables:
3040 asTable.append('};');
3041
3042 #
3043 # Output the tables.
3044 #
3045 oOut.write( '\n'
3046 + '\n');
3047 oOut.write('\n'.join(asFuncTable));
3048 oOut.write( '\n'
3049 + '\n'
3050 + '\n');
3051 oOut.write('\n'.join(asArgCntTab));
3052 oOut.write( '\n'
3053 + '\n'
3054 + '#if defined(IN_RING3) || defined(LOG_ENABLED)\n');
3055 oOut.write('\n'.join(asNameTable));
3056 oOut.write( '\n'
3057 + '#endif /* IN_RING3 || LOG_ENABLED */\n'
3058 + '\n'
3059 + '\n'
3060 + '#if defined(IN_RING3)\n');
3061 oOut.write('\n'.join(asStatTable));
3062 oOut.write( '\n'
3063 + '#endif /* IN_RING3 */\n');
3064
3065 return True;
3066
3067 def generateNativeFunctionsHeader(self, oOut, _):
3068 """
3069 Generates the native recompiler functions header file.
3070 Returns success indicator.
3071 """
3072 if not self.oOptions.fNativeRecompilerEnabled:
3073 return True;
3074
3075 asLines = self.generateLicenseHeader();
3076
3077 # Prototype the function table.
3078 asLines += [
3079 'extern const PFNIEMNATIVERECOMPFUNC g_apfnIemNativeRecompileFunctions[kIemThreadedFunc_End];',
3080 'extern const PFNIEMNATIVELIVENESSFUNC g_apfnIemNativeLivenessFunctions[kIemThreadedFunc_End];',
3081 '',
3082 ];
3083
3084 # Emit indicators as to which of the builtin functions have a native
3085 # recompiler function and which not. (We only really need this for
3086 # kIemThreadedFunc_BltIn_CheckMode, but do all just for simplicity.)
3087 for atBltIn in self.katBltIns:
3088 if atBltIn[1]:
3089 asLines.append('#define IEMNATIVE_WITH_BLTIN_' + atBltIn[0].upper())
3090 else:
3091 asLines.append('#define IEMNATIVE_WITHOUT_BLTIN_' + atBltIn[0].upper())
3092
3093 # Emit prototypes for the builtin functions we use in tables.
3094 asLines += [
3095 '',
3096 '/* Prototypes for built-in functions used in the above tables. */',
3097 ];
3098 for sFuncNm, _, fHaveRecompFunc in self.katBltIns:
3099 if fHaveRecompFunc:
3100 asLines += [
3101 'IEM_DECL_IEMNATIVERECOMPFUNC_PROTO( iemNativeRecompFunc_BltIn_%s);' % (sFuncNm,),
3102 'IEM_DECL_IEMNATIVELIVENESSFUNC_PROTO(iemNativeLivenessFunc_BltIn_%s);' % (sFuncNm,),
3103 ];
3104
3105 # Emit prototypes for table function.
3106 asLines += [
3107 '',
3108 '#ifdef IEMNATIVE_INCL_TABLE_FUNCTION_PROTOTYPES'
3109 ]
3110 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
3111 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
3112 asLines += [
3113 '',
3114 '/* Variation: ' + sVarName + ' */',
3115 ];
3116 for oThreadedFunction in self.aoThreadedFuncs:
3117 oVariation = oThreadedFunction.dVariations.get(sVariation, None) # type: ThreadedFunctionVariation
3118 if oVariation and oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
3119 asLines.append('IEM_DECL_IEMNATIVERECOMPFUNC_PROTO(' + oVariation.getNativeFunctionName() + ');');
3120 asLines += [
3121 '',
3122 '#endif /* IEMNATIVE_INCL_TABLE_FUNCTION_PROTOTYPES */',
3123 ]
3124
3125 oOut.write('\n'.join(asLines));
3126 return True;
3127
3128 def generateNativeFunctionsSource(self, oOut, idxPart):
3129 """
3130 Generates the native recompiler functions source file.
3131 Returns success indicator.
3132 """
3133 cParts = 4;
3134 assert(idxPart in range(cParts));
3135 if not self.oOptions.fNativeRecompilerEnabled:
3136 return True;
3137
3138 #
3139 # The file header.
3140 #
3141 oOut.write('\n'.join(self.generateLicenseHeader()));
3142
3143 #
3144 # Emit the functions.
3145 #
3146 # The files are split up by threaded variation as that's the simplest way to
3147 # do it, even if the distribution isn't entirely even (ksVariation_Default
3148 # only has the defer to cimpl bits and the pre-386 variants will naturally
3149 # have fewer instructions).
3150 #
3151 cVariationsPerFile = len(ThreadedFunctionVariation.kasVariationsEmitOrder) // cParts;
3152 idxFirstVar = idxPart * cVariationsPerFile;
3153 idxEndVar = idxFirstVar + cVariationsPerFile;
3154 if idxPart + 1 >= cParts:
3155 idxEndVar = len(ThreadedFunctionVariation.kasVariationsEmitOrder);
3156 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder[idxFirstVar:idxEndVar]:
3157 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
3158 oOut.write( '\n'
3159 + '\n'
3160 + '\n'
3161 + '\n'
3162 + '/*' + '*' * 128 + '\n'
3163 + '* Variation: ' + sVarName + ' ' * (129 - len(sVarName) - 15) + '*\n'
3164 + '*' * 128 + '*/\n');
3165
3166 for oThreadedFunction in self.aoThreadedFuncs:
3167 oVariation = oThreadedFunction.dVariations.get(sVariation, None) # type: ThreadedFunctionVariation
3168 if oVariation and oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
3169 oMcBlock = oThreadedFunction.oMcBlock;
3170
3171 # Function header
3172 oOut.write( '\n'
3173 + '\n'
3174 + '/**\n'
3175 + ' * #%u: %s at line %s offset %s in %s%s\n'
3176 % (oVariation.iEnumValue, oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine,
3177 os.path.split(oMcBlock.sSrcFile)[1],
3178 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '')
3179 + ' */\n'
3180 + 'IEM_DECL_IEMNATIVERECOMPFUNC_DEF(' + oVariation.getNativeFunctionName() + ')\n'
3181 + '{\n');
3182
3183 # Unpack parameters.
3184 self.generateFunctionParameterUnpacking(oVariation, oOut,
3185 ('pCallEntry->auParams[0]',
3186 'pCallEntry->auParams[1]',
3187 'pCallEntry->auParams[2]',),
3188 uNoRefLevel = 1);
3189
3190 # Now for the actual statements.
3191 oOut.write(oVariation.oNativeRecomp.renderCode(cchIndent = 4));
3192
3193 oOut.write('}\n');
3194
3195 #
3196 # Output the function table if this is the first file.
3197 #
3198 if idxPart == 0:
3199 oOut.write( '\n'
3200 + '\n'
3201 + '/*\n'
3202 + ' * Function table running parallel to g_apfnIemThreadedFunctions and friends.\n'
3203 + ' */\n'
3204 + 'const PFNIEMNATIVERECOMPFUNC g_apfnIemNativeRecompileFunctions[kIemThreadedFunc_End] =\n'
3205 + '{\n'
3206 + ' /*Invalid*/ NULL,'
3207 + '\n'
3208 + ' /*\n'
3209 + ' * Predefined.\n'
3210 + ' */\n'
3211 );
3212 for sFuncNm, _, fHaveRecompFunc in self.katBltIns:
3213 if fHaveRecompFunc:
3214 oOut.write(' iemNativeRecompFunc_BltIn_%s,\n' % (sFuncNm,))
3215 else:
3216 oOut.write(' NULL, /*BltIn_%s*/\n' % (sFuncNm,))
3217
3218 iThreadedFunction = 1 + len(self.katBltIns);
3219 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
3220 oOut.write( ' /*\n'
3221 + ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation] + '\n'
3222 + ' */\n');
3223 for oThreadedFunction in self.aoThreadedFuncs:
3224 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
3225 if oVariation:
3226 iThreadedFunction += 1;
3227 assert oVariation.iEnumValue == iThreadedFunction;
3228 sName = oVariation.getNativeFunctionName();
3229 if oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
3230 oOut.write(' /*%4u*/ %s,\n' % (iThreadedFunction, sName,));
3231 else:
3232 oOut.write(' /*%4u*/ NULL /*%s*/,\n' % (iThreadedFunction, sName,));
3233
3234 oOut.write( '};\n');
3235
3236 oOut.write('\n');
3237 return True;
3238
3239 def generateNativeLivenessSource(self, oOut, _):
3240 """
3241 Generates the native recompiler liveness analysis functions source file.
3242 Returns success indicator.
3243 """
3244 if not self.oOptions.fNativeRecompilerEnabled:
3245 return True;
3246
3247 #
3248 # The file header.
3249 #
3250 oOut.write('\n'.join(self.generateLicenseHeader()));
3251
3252 #
3253 # Emit the functions.
3254 #
3255 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
3256 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
3257 oOut.write( '\n'
3258 + '\n'
3259 + '\n'
3260 + '\n'
3261 + '/*' + '*' * 128 + '\n'
3262 + '* Variation: ' + sVarName + ' ' * (129 - len(sVarName) - 15) + '*\n'
3263 + '*' * 128 + '*/\n');
3264
3265 for oThreadedFunction in self.aoThreadedFuncs:
3266 oVariation = oThreadedFunction.dVariations.get(sVariation, None) # type: ThreadedFunctionVariation
3267 if oVariation and oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
3268 oMcBlock = oThreadedFunction.oMcBlock;
3269
3270 # Function header
3271 oOut.write( '\n'
3272 + '\n'
3273 + '/**\n'
3274 + ' * #%u: %s at line %s offset %s in %s%s\n'
3275 % (oVariation.iEnumValue, oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine,
3276 os.path.split(oMcBlock.sSrcFile)[1],
3277 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '')
3278 + ' */\n'
3279 + 'static IEM_DECL_IEMNATIVELIVENESSFUNC_DEF(' + oVariation.getLivenessFunctionName() + ')\n'
3280 + '{\n');
3281
3282 # Unpack parameters.
3283 self.generateFunctionParameterUnpacking(oVariation, oOut,
3284 ('pCallEntry->auParams[0]',
3285 'pCallEntry->auParams[1]',
3286 'pCallEntry->auParams[2]',),
3287 uNoRefLevel = 2);
3288
3289 # Now for the actual statements.
3290 oOut.write(oVariation.oNativeRecomp.renderCode(cchIndent = 4));
3291
3292 oOut.write('}\n');
3293
3294 #
3295 # Output the function table.
3296 #
3297 oOut.write( '\n'
3298 + '\n'
3299 + '/*\n'
3300 + ' * Liveness analysis function table running parallel to g_apfnIemThreadedFunctions and friends.\n'
3301 + ' */\n'
3302 + 'const PFNIEMNATIVELIVENESSFUNC g_apfnIemNativeLivenessFunctions[kIemThreadedFunc_End] =\n'
3303 + '{\n'
3304 + ' /*Invalid*/ NULL,'
3305 + '\n'
3306 + ' /*\n'
3307 + ' * Predefined.\n'
3308 + ' */\n'
3309 );
3310 for sFuncNm, _, fHaveRecompFunc in self.katBltIns:
3311 if fHaveRecompFunc:
3312 oOut.write(' iemNativeLivenessFunc_BltIn_%s,\n' % (sFuncNm,))
3313 else:
3314 oOut.write(' NULL, /*BltIn_%s*/\n' % (sFuncNm,))
3315
3316 iThreadedFunction = 1 + len(self.katBltIns);
3317 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
3318 oOut.write( ' /*\n'
3319 + ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation] + '\n'
3320 + ' */\n');
3321 for oThreadedFunction in self.aoThreadedFuncs:
3322 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
3323 if oVariation:
3324 iThreadedFunction += 1;
3325 assert oVariation.iEnumValue == iThreadedFunction;
3326 sName = oVariation.getLivenessFunctionName();
3327 if oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
3328 oOut.write(' /*%4u*/ %s,\n' % (iThreadedFunction, sName,));
3329 else:
3330 oOut.write(' /*%4u*/ NULL /*%s*/,\n' % (iThreadedFunction, sName,));
3331
3332 oOut.write( '};\n'
3333 + '\n');
3334 return True;
3335
3336
3337 def getThreadedFunctionByIndex(self, idx):
3338 """
3339 Returns a ThreadedFunction object for the given index. If the index is
3340 out of bounds, a dummy is returned.
3341 """
3342 if idx < len(self.aoThreadedFuncs):
3343 return self.aoThreadedFuncs[idx];
3344 return ThreadedFunction.dummyInstance();
3345
3346 def generateModifiedInput(self, oOut, idxFile):
3347 """
3348 Generates the combined modified input source/header file.
3349 Returns success indicator.
3350 """
3351 #
3352 # File header and assert assumptions.
3353 #
3354 oOut.write('\n'.join(self.generateLicenseHeader()));
3355 oOut.write('AssertCompile((IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | IEM_F_MODE_CPUMODE_MASK) == 7);\n');
3356
3357 #
3358 # Iterate all parsers (input files) and output the ones related to the
3359 # file set given by idxFile.
3360 #
3361 for idxParser, oParser in enumerate(self.aoParsers): # type: int, IEMAllInstPython.SimpleParser
3362 # Is this included in the file set?
3363 sSrcBaseFile = os.path.basename(oParser.sSrcFile).lower();
3364 fInclude = -1;
3365 for aoInfo in iai.g_aaoAllInstrFilesAndDefaultMapAndSet:
3366 if sSrcBaseFile == aoInfo[0].lower():
3367 fInclude = aoInfo[2] in (-1, idxFile);
3368 break;
3369 if fInclude is not True:
3370 assert fInclude is False;
3371 continue;
3372
3373 # Output it.
3374 oOut.write("\n\n/* ****** BEGIN %s ******* */\n" % (oParser.sSrcFile,));
3375
3376 iThreadedFunction = self.aidxFirstFunctions[idxParser];
3377 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
3378 iLine = 0;
3379 while iLine < len(oParser.asLines):
3380 sLine = oParser.asLines[iLine];
3381 iLine += 1; # iBeginLine and iEndLine are 1-based.
3382
3383 # Can we pass it thru?
3384 if ( iLine not in [oThreadedFunction.oMcBlock.iBeginLine, oThreadedFunction.oMcBlock.iEndLine]
3385 or oThreadedFunction.oMcBlock.sSrcFile != oParser.sSrcFile):
3386 oOut.write(sLine);
3387 #
3388 # Single MC block. Just extract it and insert the replacement.
3389 #
3390 elif oThreadedFunction.oMcBlock.iBeginLine != oThreadedFunction.oMcBlock.iEndLine:
3391 assert ( (sLine.count('IEM_MC_') - sLine.count('IEM_MC_F_') == 1)
3392 or oThreadedFunction.oMcBlock.iMacroExp == iai.McBlock.kiMacroExp_Partial), 'sLine="%s"' % (sLine,);
3393 oOut.write(sLine[:oThreadedFunction.oMcBlock.offBeginLine]);
3394 sModified = oThreadedFunction.generateInputCode().strip();
3395 oOut.write(sModified);
3396
3397 iLine = oThreadedFunction.oMcBlock.iEndLine;
3398 sLine = oParser.asLines[iLine - 1];
3399 assert ( sLine.count('IEM_MC_') - sLine.count('IEM_MC_F_') == 1
3400 or len(oThreadedFunction.oMcBlock.aoStmts) == 1
3401 or oThreadedFunction.oMcBlock.iMacroExp == iai.McBlock.kiMacroExp_Partial);
3402 oOut.write(sLine[oThreadedFunction.oMcBlock.offAfterEnd : ]);
3403
3404 # Advance
3405 iThreadedFunction += 1;
3406 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
3407 #
3408 # Macro expansion line that have sublines and may contain multiple MC blocks.
3409 #
3410 else:
3411 offLine = 0;
3412 while iLine == oThreadedFunction.oMcBlock.iBeginLine:
3413 oOut.write(sLine[offLine : oThreadedFunction.oMcBlock.offBeginLine]);
3414
3415 sModified = oThreadedFunction.generateInputCode().strip();
3416 assert ( sModified.startswith('IEM_MC_BEGIN')
3417 or (sModified.find('IEM_MC_DEFER_TO_CIMPL_') > 0 and sModified.strip().startswith('{\n'))
3418 or sModified.startswith('pVCpu->iem.s.fEndTb = true')
3419 or sModified.startswith('pVCpu->iem.s.fTbCurInstr = ')
3420 ), 'sModified="%s"' % (sModified,);
3421 oOut.write(sModified);
3422
3423 offLine = oThreadedFunction.oMcBlock.offAfterEnd;
3424
3425 # Advance
3426 iThreadedFunction += 1;
3427 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
3428
3429 # Last line segment.
3430 if offLine < len(sLine):
3431 oOut.write(sLine[offLine : ]);
3432
3433 oOut.write("/* ****** END %s ******* */\n" % (oParser.sSrcFile,));
3434
3435 return True;
3436
3437
3438 #
3439 # Main
3440 #
3441
3442 def main(self, asArgs):
3443 """
3444 C-like main function.
3445 Returns exit code.
3446 """
3447
3448 #
3449 # Parse arguments
3450 #
3451 sScriptDir = os.path.dirname(__file__);
3452 oParser = argparse.ArgumentParser(add_help = False);
3453 oParser.add_argument('asInFiles',
3454 metavar = 'input.cpp.h',
3455 nargs = '*',
3456 default = [os.path.join(sScriptDir, aoInfo[0])
3457 for aoInfo in iai.g_aaoAllInstrFilesAndDefaultMapAndSet],
3458 help = "Selection of VMMAll/IEMAllInst*.cpp.h files to use as input.");
3459 oParser.add_argument('--host-arch',
3460 metavar = 'arch',
3461 dest = 'sHostArch',
3462 action = 'store',
3463 default = None,
3464 help = 'The host architecture.');
3465
3466 oParser.add_argument('--out-thrd-funcs-hdr',
3467 metavar = 'file-thrd-funcs.h',
3468 dest = 'sOutFileThrdFuncsHdr',
3469 action = 'store',
3470 default = '-',
3471 help = 'The output header file for the threaded functions.');
3472 oParser.add_argument('--out-thrd-funcs-cpp',
3473 metavar = 'file-thrd-funcs.cpp',
3474 dest = 'sOutFileThrdFuncsCpp',
3475 action = 'store',
3476 default = '-',
3477 help = 'The output C++ file for the threaded functions.');
3478 oParser.add_argument('--out-n8ve-funcs-hdr',
3479 metavar = 'file-n8tv-funcs.h',
3480 dest = 'sOutFileN8veFuncsHdr',
3481 action = 'store',
3482 default = '-',
3483 help = 'The output header file for the native recompiler functions.');
3484 oParser.add_argument('--out-n8ve-funcs-cpp1',
3485 metavar = 'file-n8tv-funcs1.cpp',
3486 dest = 'sOutFileN8veFuncsCpp1',
3487 action = 'store',
3488 default = '-',
3489 help = 'The output C++ file for the native recompiler functions part 1.');
3490 oParser.add_argument('--out-n8ve-funcs-cpp2',
3491 metavar = 'file-n8ve-funcs2.cpp',
3492 dest = 'sOutFileN8veFuncsCpp2',
3493 action = 'store',
3494 default = '-',
3495 help = 'The output C++ file for the native recompiler functions part 2.');
3496 oParser.add_argument('--out-n8ve-funcs-cpp3',
3497 metavar = 'file-n8ve-funcs3.cpp',
3498 dest = 'sOutFileN8veFuncsCpp3',
3499 action = 'store',
3500 default = '-',
3501 help = 'The output C++ file for the native recompiler functions part 3.');
3502 oParser.add_argument('--out-n8ve-funcs-cpp4',
3503 metavar = 'file-n8ve-funcs4.cpp',
3504 dest = 'sOutFileN8veFuncsCpp4',
3505 action = 'store',
3506 default = '-',
3507 help = 'The output C++ file for the native recompiler functions part 4.');
3508 oParser.add_argument('--out-n8ve-liveness-cpp',
3509 metavar = 'file-n8ve-liveness.cpp',
3510 dest = 'sOutFileN8veLivenessCpp',
3511 action = 'store',
3512 default = '-',
3513 help = 'The output C++ file for the native recompiler liveness analysis functions.');
3514 oParser.add_argument('--native',
3515 dest = 'fNativeRecompilerEnabled',
3516 action = 'store_true',
3517 default = False,
3518 help = 'Enables generating the files related to native recompilation.');
3519 oParser.add_argument('--out-mod-input1',
3520 metavar = 'file-instr.cpp.h',
3521 dest = 'sOutFileModInput1',
3522 action = 'store',
3523 default = '-',
3524 help = 'The output C++/header file for modified input instruction files part 1.');
3525 oParser.add_argument('--out-mod-input2',
3526 metavar = 'file-instr.cpp.h',
3527 dest = 'sOutFileModInput2',
3528 action = 'store',
3529 default = '-',
3530 help = 'The output C++/header file for modified input instruction files part 2.');
3531 oParser.add_argument('--out-mod-input3',
3532 metavar = 'file-instr.cpp.h',
3533 dest = 'sOutFileModInput3',
3534 action = 'store',
3535 default = '-',
3536 help = 'The output C++/header file for modified input instruction files part 3.');
3537 oParser.add_argument('--out-mod-input4',
3538 metavar = 'file-instr.cpp.h',
3539 dest = 'sOutFileModInput4',
3540 action = 'store',
3541 default = '-',
3542 help = 'The output C++/header file for modified input instruction files part 4.');
3543 oParser.add_argument('--help', '-h', '-?',
3544 action = 'help',
3545 help = 'Display help and exit.');
3546 oParser.add_argument('--version', '-V',
3547 action = 'version',
3548 version = 'r%s (IEMAllThreadedPython.py), r%s (IEMAllInstPython.py)'
3549 % (__version__.split()[1], iai.__version__.split()[1],),
3550 help = 'Displays the version/revision of the script and exit.');
3551 self.oOptions = oParser.parse_args(asArgs[1:]);
3552 print("oOptions=%s" % (self.oOptions,), file = sys.stderr);
3553
3554 if self.oOptions.sHostArch not in ('amd64', 'arm64'):
3555 print('error! Unsupported (or missing) host architecture: %s' % (self.oOptions.sHostArch,), file = sys.stderr);
3556 return 1;
3557
3558 #
3559 # Process the instructions specified in the IEM sources.
3560 #
3561 if self.processInputFiles(self.oOptions.sHostArch, self.oOptions.fNativeRecompilerEnabled):
3562 #
3563 # Generate the output files.
3564 #
3565 aaoOutputFiles = (
3566 ( self.oOptions.sOutFileThrdFuncsHdr, self.generateThreadedFunctionsHeader, 0, ),
3567 ( self.oOptions.sOutFileThrdFuncsCpp, self.generateThreadedFunctionsSource, 0, ),
3568 ( self.oOptions.sOutFileN8veFuncsHdr, self.generateNativeFunctionsHeader, 0, ),
3569 ( self.oOptions.sOutFileN8veFuncsCpp1, self.generateNativeFunctionsSource, 0, ),
3570 ( self.oOptions.sOutFileN8veFuncsCpp2, self.generateNativeFunctionsSource, 1, ),
3571 ( self.oOptions.sOutFileN8veFuncsCpp3, self.generateNativeFunctionsSource, 2, ),
3572 ( self.oOptions.sOutFileN8veFuncsCpp4, self.generateNativeFunctionsSource, 3, ),
3573 ( self.oOptions.sOutFileN8veLivenessCpp, self.generateNativeLivenessSource, 0, ),
3574 ( self.oOptions.sOutFileModInput1, self.generateModifiedInput, 1, ),
3575 ( self.oOptions.sOutFileModInput2, self.generateModifiedInput, 2, ),
3576 ( self.oOptions.sOutFileModInput3, self.generateModifiedInput, 3, ),
3577 ( self.oOptions.sOutFileModInput4, self.generateModifiedInput, 4, ),
3578 );
3579 fRc = True;
3580 for sOutFile, fnGenMethod, iPartNo in aaoOutputFiles:
3581 if sOutFile == '-':
3582 fRc = fnGenMethod(sys.stdout, iPartNo) and fRc;
3583 else:
3584 try:
3585 oOut = open(sOutFile, 'w'); # pylint: disable=consider-using-with,unspecified-encoding
3586 except Exception as oXcpt:
3587 print('error! Failed open "%s" for writing: %s' % (sOutFile, oXcpt,), file = sys.stderr);
3588 return 1;
3589 fRc = fnGenMethod(oOut, iPartNo) and fRc;
3590 oOut.close();
3591 if fRc:
3592 return 0;
3593
3594 return 1;
3595
3596
3597if __name__ == '__main__':
3598 sys.exit(IEMThreadedGenerator().main(sys.argv));
3599
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette