VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllThrdPython.py@ 103762

Last change on this file since 103762 was 103757, checked in by vboxsync, 13 months ago

VMM/IEM: Remove IEM_MC_NATIVE_IF from the native emitter code blocks and liveness analysis, also morping ARGS to LOCALS as appropriate. [tweak] bugref:10376

  • Property svn:eol-style set to LF
  • Property svn:executable set to *
  • Property svn:keywords set to Author Date Id Revision
File size: 179.1 KB
Line 
1#!/usr/bin/env python
2# -*- coding: utf-8 -*-
3# $Id: IEMAllThrdPython.py 103757 2024-03-11 10:54:12Z vboxsync $
4# pylint: disable=invalid-name
5
6"""
7Annotates and generates threaded functions from IEMAllInst*.cpp.h.
8"""
9
10from __future__ import print_function;
11
12__copyright__ = \
13"""
14Copyright (C) 2023 Oracle and/or its affiliates.
15
16This file is part of VirtualBox base platform packages, as
17available from https://www.virtualbox.org.
18
19This program is free software; you can redistribute it and/or
20modify it under the terms of the GNU General Public License
21as published by the Free Software Foundation, in version 3 of the
22License.
23
24This program is distributed in the hope that it will be useful, but
25WITHOUT ANY WARRANTY; without even the implied warranty of
26MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
27General Public License for more details.
28
29You should have received a copy of the GNU General Public License
30along with this program; if not, see <https://www.gnu.org/licenses>.
31
32SPDX-License-Identifier: GPL-3.0-only
33"""
34__version__ = "$Revision: 103757 $"
35
36# Standard python imports.
37import copy;
38import datetime;
39import os;
40import re;
41import sys;
42import argparse;
43from typing import Dict, List;
44
45import IEMAllInstPython as iai;
46import IEMAllN8vePython as ian;
47
48
49# Python 3 hacks:
50if sys.version_info[0] >= 3:
51 long = int; # pylint: disable=redefined-builtin,invalid-name
52
53## Number of generic parameters for the thread functions.
54g_kcThreadedParams = 3;
55
56g_kdTypeInfo = {
57 # type name: (cBits, fSigned, C-type )
58 'int8_t': ( 8, True, 'int8_t', ),
59 'int16_t': ( 16, True, 'int16_t', ),
60 'int32_t': ( 32, True, 'int32_t', ),
61 'int64_t': ( 64, True, 'int64_t', ),
62 'uint4_t': ( 4, False, 'uint8_t', ),
63 'uint8_t': ( 8, False, 'uint8_t', ),
64 'uint16_t': ( 16, False, 'uint16_t', ),
65 'uint32_t': ( 32, False, 'uint32_t', ),
66 'uint64_t': ( 64, False, 'uint64_t', ),
67 'uintptr_t': ( 64, False, 'uintptr_t',), # ASSUMES 64-bit host pointer size.
68 'bool': ( 1, False, 'bool', ),
69 'IEMMODE': ( 2, False, 'IEMMODE', ),
70};
71
72# Only for getTypeBitCount/variables.
73g_kdTypeInfo2 = {
74 'RTFLOAT32U': ( 32, False, 'RTFLOAT32U', ),
75 'RTFLOAT64U': ( 64, False, 'RTFLOAT64U', ),
76 'RTUINT64U': ( 64, False, 'RTUINT64U', ),
77 'RTGCPTR': ( 64, False, 'RTGCPTR', ),
78 'RTPBCD80U': ( 80, False, 'RTPBCD80U', ),
79 'RTFLOAT80U': ( 80, False, 'RTFLOAT80U', ),
80 'IEMFPURESULT': (80+16, False, 'IEMFPURESULT', ),
81 'IEMFPURESULTTWO': (80+16+80,False, 'IEMFPURESULTTWO', ),
82 'RTUINT128U': ( 128, False, 'RTUINT128U', ),
83 'X86XMMREG': ( 128, False, 'X86XMMREG', ),
84 'IEMSSERESULT': ( 128+32, False, 'IEMSSERESULT', ),
85 'IEMMEDIAF2XMMSRC': ( 256, False, 'IEMMEDIAF2XMMSRC',),
86 'RTUINT256U': ( 256, False, 'RTUINT256U', ),
87 'IEMPCMPISTRXSRC': ( 256, False, 'IEMPCMPISTRXSRC', ),
88 'IEMPCMPESTRXSRC': ( 384, False, 'IEMPCMPESTRXSRC', ),
89}; #| g_kdTypeInfo; - requires 3.9
90g_kdTypeInfo2.update(g_kdTypeInfo);
91
92def getTypeBitCount(sType):
93 """
94 Translate a type to size in bits
95 """
96 if sType in g_kdTypeInfo2:
97 return g_kdTypeInfo2[sType][0];
98 if '*' in sType or sType[0] == 'P':
99 return 64;
100 #raise Exception('Unknown type: %s' % (sType,));
101 print('error: Unknown type: %s' % (sType,));
102 return 64;
103
104g_kdIemFieldToType = {
105 # Illegal ones:
106 'offInstrNextByte': ( None, ),
107 'cbInstrBuf': ( None, ),
108 'pbInstrBuf': ( None, ),
109 'uInstrBufPc': ( None, ),
110 'cbInstrBufTotal': ( None, ),
111 'offCurInstrStart': ( None, ),
112 'cbOpcode': ( None, ),
113 'offOpcode': ( None, ),
114 'offModRm': ( None, ),
115 # Okay ones.
116 'fPrefixes': ( 'uint32_t', ),
117 'uRexReg': ( 'uint8_t', ),
118 'uRexB': ( 'uint8_t', ),
119 'uRexIndex': ( 'uint8_t', ),
120 'iEffSeg': ( 'uint8_t', ),
121 'enmEffOpSize': ( 'IEMMODE', ),
122 'enmDefAddrMode': ( 'IEMMODE', ),
123 'enmEffAddrMode': ( 'IEMMODE', ),
124 'enmDefOpSize': ( 'IEMMODE', ),
125 'idxPrefix': ( 'uint8_t', ),
126 'uVex3rdReg': ( 'uint8_t', ),
127 'uVexLength': ( 'uint8_t', ),
128 'fEvexStuff': ( 'uint8_t', ),
129 'uFpuOpcode': ( 'uint16_t', ),
130};
131
132## @name McStmtCond.oIfBranchAnnotation/McStmtCond.oElseBranchAnnotation values
133## @{
134g_ksFinishAnnotation_Advance = 'Advance';
135g_ksFinishAnnotation_RelJmp = 'RelJmp';
136g_ksFinishAnnotation_SetJmp = 'SetJmp';
137g_ksFinishAnnotation_DeferToCImpl = 'DeferToCImpl';
138## @}
139
140
141class ThreadedParamRef(object):
142 """
143 A parameter reference for a threaded function.
144 """
145
146 def __init__(self, sOrgRef, sType, oStmt, iParam = None, offParam = 0, sStdRef = None):
147 ## The name / reference in the original code.
148 self.sOrgRef = sOrgRef;
149 ## Normalized name to deal with spaces in macro invocations and such.
150 self.sStdRef = sStdRef if sStdRef else ''.join(sOrgRef.split());
151 ## Indicates that sOrgRef may not match the parameter.
152 self.fCustomRef = sStdRef is not None;
153 ## The type (typically derived).
154 self.sType = sType;
155 ## The statement making the reference.
156 self.oStmt = oStmt;
157 ## The parameter containing the references. None if implicit.
158 self.iParam = iParam;
159 ## The offset in the parameter of the reference.
160 self.offParam = offParam;
161
162 ## The variable name in the threaded function.
163 self.sNewName = 'x';
164 ## The this is packed into.
165 self.iNewParam = 99;
166 ## The bit offset in iNewParam.
167 self.offNewParam = 1024
168
169
170class ThreadedFunctionVariation(object):
171 """ Threaded function variation. """
172
173 ## @name Variations.
174 ## These variations will match translation block selection/distinctions as well.
175 ## @{
176 # pylint: disable=line-too-long
177 ksVariation_Default = ''; ##< No variations - only used by IEM_MC_DEFER_TO_CIMPL_X_RET.
178 ksVariation_16 = '_16'; ##< 16-bit mode code (386+).
179 ksVariation_16f = '_16f'; ##< 16-bit mode code (386+), check+clear eflags.
180 ksVariation_16_Jmp = '_16_Jmp'; ##< 16-bit mode code (386+), conditional jump taken.
181 ksVariation_16f_Jmp = '_16f_Jmp'; ##< 16-bit mode code (386+), check+clear eflags, conditional jump taken.
182 ksVariation_16_NoJmp = '_16_NoJmp'; ##< 16-bit mode code (386+), conditional jump not taken.
183 ksVariation_16f_NoJmp = '_16f_NoJmp'; ##< 16-bit mode code (386+), check+clear eflags, conditional jump not taken.
184 ksVariation_16_Addr32 = '_16_Addr32'; ##< 16-bit mode code (386+), address size prefixed to 32-bit addressing.
185 ksVariation_16f_Addr32 = '_16f_Addr32'; ##< 16-bit mode code (386+), address size prefixed to 32-bit addressing, eflags.
186 ksVariation_16_Pre386 = '_16_Pre386'; ##< 16-bit mode code, pre-386 CPU target.
187 ksVariation_16f_Pre386 = '_16f_Pre386'; ##< 16-bit mode code, pre-386 CPU target, check+clear eflags.
188 ksVariation_16_Pre386_Jmp = '_16_Pre386_Jmp'; ##< 16-bit mode code, pre-386 CPU target, conditional jump taken.
189 ksVariation_16f_Pre386_Jmp = '_16f_Pre386_Jmp'; ##< 16-bit mode code, pre-386 CPU target, check+clear eflags, conditional jump taken.
190 ksVariation_16_Pre386_NoJmp = '_16_Pre386_NoJmp'; ##< 16-bit mode code, pre-386 CPU target, conditional jump not taken.
191 ksVariation_16f_Pre386_NoJmp = '_16f_Pre386_NoJmp'; ##< 16-bit mode code, pre-386 CPU target, check+clear eflags, conditional jump not taken.
192 ksVariation_32 = '_32'; ##< 32-bit mode code (386+).
193 ksVariation_32f = '_32f'; ##< 32-bit mode code (386+), check+clear eflags.
194 ksVariation_32_Jmp = '_32_Jmp'; ##< 32-bit mode code (386+), conditional jump taken.
195 ksVariation_32f_Jmp = '_32f_Jmp'; ##< 32-bit mode code (386+), check+clear eflags, conditional jump taken.
196 ksVariation_32_NoJmp = '_32_NoJmp'; ##< 32-bit mode code (386+), conditional jump not taken.
197 ksVariation_32f_NoJmp = '_32f_NoJmp'; ##< 32-bit mode code (386+), check+clear eflags, conditional jump not taken.
198 ksVariation_32_Flat = '_32_Flat'; ##< 32-bit mode code (386+) with CS, DS, E,S and SS flat and 4GB wide.
199 ksVariation_32f_Flat = '_32f_Flat'; ##< 32-bit mode code (386+) with CS, DS, E,S and SS flat and 4GB wide, eflags.
200 ksVariation_32_Addr16 = '_32_Addr16'; ##< 32-bit mode code (386+), address size prefixed to 16-bit addressing.
201 ksVariation_32f_Addr16 = '_32f_Addr16'; ##< 32-bit mode code (386+), address size prefixed to 16-bit addressing, eflags.
202 ksVariation_64 = '_64'; ##< 64-bit mode code.
203 ksVariation_64f = '_64f'; ##< 64-bit mode code, check+clear eflags.
204 ksVariation_64_Jmp = '_64_Jmp'; ##< 64-bit mode code, conditional jump taken.
205 ksVariation_64f_Jmp = '_64f_Jmp'; ##< 64-bit mode code, check+clear eflags, conditional jump taken.
206 ksVariation_64_NoJmp = '_64_NoJmp'; ##< 64-bit mode code, conditional jump not taken.
207 ksVariation_64f_NoJmp = '_64f_NoJmp'; ##< 64-bit mode code, check+clear eflags, conditional jump not taken.
208 ksVariation_64_FsGs = '_64_FsGs'; ##< 64-bit mode code, with memory accesses via FS or GS.
209 ksVariation_64f_FsGs = '_64f_FsGs'; ##< 64-bit mode code, with memory accesses via FS or GS, check+clear eflags.
210 ksVariation_64_Addr32 = '_64_Addr32'; ##< 64-bit mode code, address size prefixed to 32-bit addressing.
211 ksVariation_64f_Addr32 = '_64f_Addr32'; ##< 64-bit mode code, address size prefixed to 32-bit addressing, c+c eflags.
212 # pylint: enable=line-too-long
213 kasVariations = (
214 ksVariation_Default,
215 ksVariation_16,
216 ksVariation_16f,
217 ksVariation_16_Jmp,
218 ksVariation_16f_Jmp,
219 ksVariation_16_NoJmp,
220 ksVariation_16f_NoJmp,
221 ksVariation_16_Addr32,
222 ksVariation_16f_Addr32,
223 ksVariation_16_Pre386,
224 ksVariation_16f_Pre386,
225 ksVariation_16_Pre386_Jmp,
226 ksVariation_16f_Pre386_Jmp,
227 ksVariation_16_Pre386_NoJmp,
228 ksVariation_16f_Pre386_NoJmp,
229 ksVariation_32,
230 ksVariation_32f,
231 ksVariation_32_Jmp,
232 ksVariation_32f_Jmp,
233 ksVariation_32_NoJmp,
234 ksVariation_32f_NoJmp,
235 ksVariation_32_Flat,
236 ksVariation_32f_Flat,
237 ksVariation_32_Addr16,
238 ksVariation_32f_Addr16,
239 ksVariation_64,
240 ksVariation_64f,
241 ksVariation_64_Jmp,
242 ksVariation_64f_Jmp,
243 ksVariation_64_NoJmp,
244 ksVariation_64f_NoJmp,
245 ksVariation_64_FsGs,
246 ksVariation_64f_FsGs,
247 ksVariation_64_Addr32,
248 ksVariation_64f_Addr32,
249 );
250 kasVariationsWithoutAddress = (
251 ksVariation_16,
252 ksVariation_16f,
253 ksVariation_16_Pre386,
254 ksVariation_16f_Pre386,
255 ksVariation_32,
256 ksVariation_32f,
257 ksVariation_64,
258 ksVariation_64f,
259 );
260 kasVariationsWithoutAddressNot286 = (
261 ksVariation_16,
262 ksVariation_16f,
263 ksVariation_32,
264 ksVariation_32f,
265 ksVariation_64,
266 ksVariation_64f,
267 );
268 kasVariationsWithoutAddressNot286Not64 = (
269 ksVariation_16,
270 ksVariation_16f,
271 ksVariation_32,
272 ksVariation_32f,
273 );
274 kasVariationsWithoutAddressNot64 = (
275 ksVariation_16,
276 ksVariation_16f,
277 ksVariation_16_Pre386,
278 ksVariation_16f_Pre386,
279 ksVariation_32,
280 ksVariation_32f,
281 );
282 kasVariationsWithoutAddressOnly64 = (
283 ksVariation_64,
284 ksVariation_64f,
285 );
286 kasVariationsWithAddress = (
287 ksVariation_16,
288 ksVariation_16f,
289 ksVariation_16_Addr32,
290 ksVariation_16f_Addr32,
291 ksVariation_16_Pre386,
292 ksVariation_16f_Pre386,
293 ksVariation_32,
294 ksVariation_32f,
295 ksVariation_32_Flat,
296 ksVariation_32f_Flat,
297 ksVariation_32_Addr16,
298 ksVariation_32f_Addr16,
299 ksVariation_64,
300 ksVariation_64f,
301 ksVariation_64_FsGs,
302 ksVariation_64f_FsGs,
303 ksVariation_64_Addr32,
304 ksVariation_64f_Addr32,
305 );
306 kasVariationsWithAddressNot286 = (
307 ksVariation_16,
308 ksVariation_16f,
309 ksVariation_16_Addr32,
310 ksVariation_16f_Addr32,
311 ksVariation_32,
312 ksVariation_32f,
313 ksVariation_32_Flat,
314 ksVariation_32f_Flat,
315 ksVariation_32_Addr16,
316 ksVariation_32f_Addr16,
317 ksVariation_64,
318 ksVariation_64f,
319 ksVariation_64_FsGs,
320 ksVariation_64f_FsGs,
321 ksVariation_64_Addr32,
322 ksVariation_64f_Addr32,
323 );
324 kasVariationsWithAddressNot286Not64 = (
325 ksVariation_16,
326 ksVariation_16f,
327 ksVariation_16_Addr32,
328 ksVariation_16f_Addr32,
329 ksVariation_32,
330 ksVariation_32f,
331 ksVariation_32_Flat,
332 ksVariation_32f_Flat,
333 ksVariation_32_Addr16,
334 ksVariation_32f_Addr16,
335 );
336 kasVariationsWithAddressNot64 = (
337 ksVariation_16,
338 ksVariation_16f,
339 ksVariation_16_Addr32,
340 ksVariation_16f_Addr32,
341 ksVariation_16_Pre386,
342 ksVariation_16f_Pre386,
343 ksVariation_32,
344 ksVariation_32f,
345 ksVariation_32_Flat,
346 ksVariation_32f_Flat,
347 ksVariation_32_Addr16,
348 ksVariation_32f_Addr16,
349 );
350 kasVariationsWithAddressOnly64 = (
351 ksVariation_64,
352 ksVariation_64f,
353 ksVariation_64_FsGs,
354 ksVariation_64f_FsGs,
355 ksVariation_64_Addr32,
356 ksVariation_64f_Addr32,
357 );
358 kasVariationsOnlyPre386 = (
359 ksVariation_16_Pre386,
360 ksVariation_16f_Pre386,
361 );
362 kasVariationsEmitOrder = (
363 ksVariation_Default,
364 ksVariation_64,
365 ksVariation_64f,
366 ksVariation_64_Jmp,
367 ksVariation_64f_Jmp,
368 ksVariation_64_NoJmp,
369 ksVariation_64f_NoJmp,
370 ksVariation_64_FsGs,
371 ksVariation_64f_FsGs,
372 ksVariation_32_Flat,
373 ksVariation_32f_Flat,
374 ksVariation_32,
375 ksVariation_32f,
376 ksVariation_32_Jmp,
377 ksVariation_32f_Jmp,
378 ksVariation_32_NoJmp,
379 ksVariation_32f_NoJmp,
380 ksVariation_16,
381 ksVariation_16f,
382 ksVariation_16_Jmp,
383 ksVariation_16f_Jmp,
384 ksVariation_16_NoJmp,
385 ksVariation_16f_NoJmp,
386 ksVariation_16_Addr32,
387 ksVariation_16f_Addr32,
388 ksVariation_16_Pre386,
389 ksVariation_16f_Pre386,
390 ksVariation_16_Pre386_Jmp,
391 ksVariation_16f_Pre386_Jmp,
392 ksVariation_16_Pre386_NoJmp,
393 ksVariation_16f_Pre386_NoJmp,
394 ksVariation_32_Addr16,
395 ksVariation_32f_Addr16,
396 ksVariation_64_Addr32,
397 ksVariation_64f_Addr32,
398 );
399 kdVariationNames = {
400 ksVariation_Default: 'defer-to-cimpl',
401 ksVariation_16: '16-bit',
402 ksVariation_16f: '16-bit w/ eflag checking and clearing',
403 ksVariation_16_Jmp: '16-bit w/ conditional jump taken',
404 ksVariation_16f_Jmp: '16-bit w/ eflag checking and clearing and conditional jump taken',
405 ksVariation_16_NoJmp: '16-bit w/ conditional jump not taken',
406 ksVariation_16f_NoJmp: '16-bit w/ eflag checking and clearing and conditional jump not taken',
407 ksVariation_16_Addr32: '16-bit w/ address prefix (Addr32)',
408 ksVariation_16f_Addr32: '16-bit w/ address prefix (Addr32) and eflag checking and clearing',
409 ksVariation_16_Pre386: '16-bit on pre-386 CPU',
410 ksVariation_16f_Pre386: '16-bit on pre-386 CPU w/ eflag checking and clearing',
411 ksVariation_16_Pre386_Jmp: '16-bit on pre-386 CPU w/ conditional jump taken',
412 ksVariation_16f_Pre386_Jmp: '16-bit on pre-386 CPU w/ eflag checking and clearing and conditional jump taken',
413 ksVariation_16_Pre386_NoJmp: '16-bit on pre-386 CPU w/ conditional jump taken',
414 ksVariation_16f_Pre386_NoJmp: '16-bit on pre-386 CPU w/ eflag checking and clearing and conditional jump taken',
415 ksVariation_32: '32-bit',
416 ksVariation_32f: '32-bit w/ eflag checking and clearing',
417 ksVariation_32_Jmp: '32-bit w/ conditional jump taken',
418 ksVariation_32f_Jmp: '32-bit w/ eflag checking and clearing and conditional jump taken',
419 ksVariation_32_NoJmp: '32-bit w/ conditional jump not taken',
420 ksVariation_32f_NoJmp: '32-bit w/ eflag checking and clearing and conditional jump not taken',
421 ksVariation_32_Flat: '32-bit flat and wide open CS, SS, DS and ES',
422 ksVariation_32f_Flat: '32-bit flat and wide open CS, SS, DS and ES w/ eflag checking and clearing',
423 ksVariation_32_Addr16: '32-bit w/ address prefix (Addr16)',
424 ksVariation_32f_Addr16: '32-bit w/ address prefix (Addr16) and eflag checking and clearing',
425 ksVariation_64: '64-bit',
426 ksVariation_64f: '64-bit w/ eflag checking and clearing',
427 ksVariation_64_Jmp: '64-bit w/ conditional jump taken',
428 ksVariation_64f_Jmp: '64-bit w/ eflag checking and clearing and conditional jump taken',
429 ksVariation_64_NoJmp: '64-bit w/ conditional jump not taken',
430 ksVariation_64f_NoJmp: '64-bit w/ eflag checking and clearing and conditional jump not taken',
431 ksVariation_64_FsGs: '64-bit with memory accessed via FS or GS',
432 ksVariation_64f_FsGs: '64-bit with memory accessed via FS or GS and eflag checking and clearing',
433 ksVariation_64_Addr32: '64-bit w/ address prefix (Addr32)',
434 ksVariation_64f_Addr32: '64-bit w/ address prefix (Addr32) and eflag checking and clearing',
435 };
436 kdVariationsWithEflagsCheckingAndClearing = {
437 ksVariation_16f: True,
438 ksVariation_16f_Jmp: True,
439 ksVariation_16f_NoJmp: True,
440 ksVariation_16f_Addr32: True,
441 ksVariation_16f_Pre386: True,
442 ksVariation_16f_Pre386_Jmp: True,
443 ksVariation_16f_Pre386_NoJmp: True,
444 ksVariation_32f: True,
445 ksVariation_32f_Jmp: True,
446 ksVariation_32f_NoJmp: True,
447 ksVariation_32f_Flat: True,
448 ksVariation_32f_Addr16: True,
449 ksVariation_64f: True,
450 ksVariation_64f_Jmp: True,
451 ksVariation_64f_NoJmp: True,
452 ksVariation_64f_FsGs: True,
453 ksVariation_64f_Addr32: True,
454 };
455 kdVariationsOnly64NoFlags = {
456 ksVariation_64: True,
457 ksVariation_64_Jmp: True,
458 ksVariation_64_NoJmp: True,
459 ksVariation_64_FsGs: True,
460 ksVariation_64_Addr32: True,
461 };
462 kdVariationsOnly64WithFlags = {
463 ksVariation_64f: True,
464 ksVariation_64f_Jmp: True,
465 ksVariation_64f_NoJmp: True,
466 ksVariation_64f_FsGs: True,
467 ksVariation_64f_Addr32: True,
468 };
469 kdVariationsOnlyPre386NoFlags = {
470 ksVariation_16_Pre386: True,
471 ksVariation_16_Pre386_Jmp: True,
472 ksVariation_16_Pre386_NoJmp: True,
473 };
474 kdVariationsOnlyPre386WithFlags = {
475 ksVariation_16f_Pre386: True,
476 ksVariation_16f_Pre386_Jmp: True,
477 ksVariation_16f_Pre386_NoJmp: True,
478 };
479 kdVariationsWithFlatAddress = {
480 ksVariation_32_Flat: True,
481 ksVariation_32f_Flat: True,
482 ksVariation_64: True,
483 ksVariation_64f: True,
484 ksVariation_64_Addr32: True,
485 ksVariation_64f_Addr32: True,
486 };
487 kdVariationsWithFlatStackAddress = {
488 ksVariation_32_Flat: True,
489 ksVariation_32f_Flat: True,
490 ksVariation_64: True,
491 ksVariation_64f: True,
492 ksVariation_64_FsGs: True,
493 ksVariation_64f_FsGs: True,
494 ksVariation_64_Addr32: True,
495 ksVariation_64f_Addr32: True,
496 };
497 kdVariationsWithFlat64StackAddress = {
498 ksVariation_64: True,
499 ksVariation_64f: True,
500 ksVariation_64_FsGs: True,
501 ksVariation_64f_FsGs: True,
502 ksVariation_64_Addr32: True,
503 ksVariation_64f_Addr32: True,
504 };
505 kdVariationsWithFlatAddr16 = {
506 ksVariation_16: True,
507 ksVariation_16f: True,
508 ksVariation_16_Pre386: True,
509 ksVariation_16f_Pre386: True,
510 ksVariation_32_Addr16: True,
511 ksVariation_32f_Addr16: True,
512 };
513 kdVariationsWithFlatAddr32No64 = {
514 ksVariation_16_Addr32: True,
515 ksVariation_16f_Addr32: True,
516 ksVariation_32: True,
517 ksVariation_32f: True,
518 ksVariation_32_Flat: True,
519 ksVariation_32f_Flat: True,
520 };
521 kdVariationsWithAddressOnly64 = {
522 ksVariation_64: True,
523 ksVariation_64f: True,
524 ksVariation_64_FsGs: True,
525 ksVariation_64f_FsGs: True,
526 ksVariation_64_Addr32: True,
527 ksVariation_64f_Addr32: True,
528 };
529 kdVariationsWithConditional = {
530 ksVariation_16_Jmp: True,
531 ksVariation_16_NoJmp: True,
532 ksVariation_16_Pre386_Jmp: True,
533 ksVariation_16_Pre386_NoJmp: True,
534 ksVariation_32_Jmp: True,
535 ksVariation_32_NoJmp: True,
536 ksVariation_64_Jmp: True,
537 ksVariation_64_NoJmp: True,
538 ksVariation_16f_Jmp: True,
539 ksVariation_16f_NoJmp: True,
540 ksVariation_16f_Pre386_Jmp: True,
541 ksVariation_16f_Pre386_NoJmp: True,
542 ksVariation_32f_Jmp: True,
543 ksVariation_32f_NoJmp: True,
544 ksVariation_64f_Jmp: True,
545 ksVariation_64f_NoJmp: True,
546 };
547 kdVariationsWithConditionalNoJmp = {
548 ksVariation_16_NoJmp: True,
549 ksVariation_16_Pre386_NoJmp: True,
550 ksVariation_32_NoJmp: True,
551 ksVariation_64_NoJmp: True,
552 ksVariation_16f_NoJmp: True,
553 ksVariation_16f_Pre386_NoJmp: True,
554 ksVariation_32f_NoJmp: True,
555 ksVariation_64f_NoJmp: True,
556 };
557 kdVariationsOnlyPre386 = {
558 ksVariation_16_Pre386: True,
559 ksVariation_16f_Pre386: True,
560 ksVariation_16_Pre386_Jmp: True,
561 ksVariation_16f_Pre386_Jmp: True,
562 ksVariation_16_Pre386_NoJmp: True,
563 ksVariation_16f_Pre386_NoJmp: True,
564 };
565 ## @}
566
567 ## IEM_CIMPL_F_XXX flags that we know.
568 ## The value indicates whether it terminates the TB or not. The goal is to
569 ## improve the recompiler so all but END_TB will be False.
570 ##
571 ## @note iemThreadedRecompilerMcDeferToCImpl0 duplicates info found here.
572 kdCImplFlags = {
573 'IEM_CIMPL_F_MODE': False,
574 'IEM_CIMPL_F_BRANCH_DIRECT': False,
575 'IEM_CIMPL_F_BRANCH_INDIRECT': False,
576 'IEM_CIMPL_F_BRANCH_RELATIVE': False,
577 'IEM_CIMPL_F_BRANCH_FAR': True,
578 'IEM_CIMPL_F_BRANCH_CONDITIONAL': False,
579 # IEM_CIMPL_F_BRANCH_ANY should only be used for testing, so not included here.
580 'IEM_CIMPL_F_BRANCH_STACK': False,
581 'IEM_CIMPL_F_BRANCH_STACK_FAR': False,
582 'IEM_CIMPL_F_RFLAGS': False,
583 'IEM_CIMPL_F_INHIBIT_SHADOW': False,
584 'IEM_CIMPL_F_CHECK_IRQ_AFTER': False,
585 'IEM_CIMPL_F_CHECK_IRQ_BEFORE': False,
586 'IEM_CIMPL_F_CHECK_IRQ_BEFORE_AND_AFTER': False, # (ignore)
587 'IEM_CIMPL_F_STATUS_FLAGS': False,
588 'IEM_CIMPL_F_VMEXIT': False,
589 'IEM_CIMPL_F_FPU': False,
590 'IEM_CIMPL_F_REP': False,
591 'IEM_CIMPL_F_IO': False,
592 'IEM_CIMPL_F_END_TB': True,
593 'IEM_CIMPL_F_XCPT': True,
594 'IEM_CIMPL_F_CALLS_CIMPL': False,
595 'IEM_CIMPL_F_CALLS_AIMPL': False,
596 'IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE': False,
597 };
598
599 def __init__(self, oThreadedFunction, sVariation = ksVariation_Default):
600 self.oParent = oThreadedFunction # type: ThreadedFunction
601 ##< ksVariation_Xxxx.
602 self.sVariation = sVariation
603
604 ## Threaded function parameter references.
605 self.aoParamRefs = [] # type: List[ThreadedParamRef]
606 ## Unique parameter references.
607 self.dParamRefs = {} # type: Dict[str, List[ThreadedParamRef]]
608 ## Minimum number of parameters to the threaded function.
609 self.cMinParams = 0;
610
611 ## List/tree of statements for the threaded function.
612 self.aoStmtsForThreadedFunction = [] # type: List[McStmt]
613
614 ## Function enum number, for verification. Set by generateThreadedFunctionsHeader.
615 self.iEnumValue = -1;
616
617 ## Native recompilation details for this variation.
618 self.oNativeRecomp = None;
619
620 def getIndexName(self):
621 sName = self.oParent.oMcBlock.sFunction;
622 if sName.startswith('iemOp_'):
623 sName = sName[len('iemOp_'):];
624 return 'kIemThreadedFunc_%s%s%s' % ( sName, self.oParent.sSubName, self.sVariation, );
625
626 def getThreadedFunctionName(self):
627 sName = self.oParent.oMcBlock.sFunction;
628 if sName.startswith('iemOp_'):
629 sName = sName[len('iemOp_'):];
630 return 'iemThreadedFunc_%s%s%s' % ( sName, self.oParent.sSubName, self.sVariation, );
631
632 def getNativeFunctionName(self):
633 return 'iemNativeRecompFunc_' + self.getThreadedFunctionName()[len('iemThreadedFunc_'):];
634
635 def getLivenessFunctionName(self):
636 return 'iemNativeLivenessFunc_' + self.getThreadedFunctionName()[len('iemThreadedFunc_'):];
637
638 def getShortName(self):
639 sName = self.oParent.oMcBlock.sFunction;
640 if sName.startswith('iemOp_'):
641 sName = sName[len('iemOp_'):];
642 return '%s%s%s' % ( sName, self.oParent.sSubName, self.sVariation, );
643
644 def getThreadedFunctionStatisticsName(self):
645 sName = self.oParent.oMcBlock.sFunction;
646 if sName.startswith('iemOp_'):
647 sName = sName[len('iemOp_'):];
648
649 sVarNm = self.sVariation;
650 if sVarNm:
651 if sVarNm.startswith('_'):
652 sVarNm = sVarNm[1:];
653 if sVarNm.endswith('_Jmp'):
654 sVarNm = sVarNm[:-4];
655 sName += '_Jmp';
656 elif sVarNm.endswith('_NoJmp'):
657 sVarNm = sVarNm[:-6];
658 sName += '_NoJmp';
659 else:
660 sVarNm = 'DeferToCImpl';
661
662 return '%s/%s%s' % ( sVarNm, sName, self.oParent.sSubName );
663
664 def isWithFlagsCheckingAndClearingVariation(self):
665 """
666 Checks if this is a variation that checks and clears EFLAGS.
667 """
668 return self.sVariation in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing;
669
670 #
671 # Analysis and code morphing.
672 #
673
674 def raiseProblem(self, sMessage):
675 """ Raises a problem. """
676 self.oParent.raiseProblem(sMessage);
677
678 def warning(self, sMessage):
679 """ Emits a warning. """
680 self.oParent.warning(sMessage);
681
682 def analyzeReferenceToType(self, sRef):
683 """
684 Translates a variable or structure reference to a type.
685 Returns type name.
686 Raises exception if unable to figure it out.
687 """
688 ch0 = sRef[0];
689 if ch0 == 'u':
690 if sRef.startswith('u32'):
691 return 'uint32_t';
692 if sRef.startswith('u8') or sRef == 'uReg':
693 return 'uint8_t';
694 if sRef.startswith('u64'):
695 return 'uint64_t';
696 if sRef.startswith('u16'):
697 return 'uint16_t';
698 elif ch0 == 'b':
699 return 'uint8_t';
700 elif ch0 == 'f':
701 return 'bool';
702 elif ch0 == 'i':
703 if sRef.startswith('i8'):
704 return 'int8_t';
705 if sRef.startswith('i16'):
706 return 'int16_t';
707 if sRef.startswith('i32'):
708 return 'int32_t';
709 if sRef.startswith('i64'):
710 return 'int64_t';
711 if sRef in ('iReg', 'iFixedReg', 'iGReg', 'iSegReg', 'iSrcReg', 'iDstReg', 'iCrReg'):
712 return 'uint8_t';
713 elif ch0 == 'p':
714 if sRef.find('-') < 0:
715 return 'uintptr_t';
716 if sRef.startswith('pVCpu->iem.s.'):
717 sField = sRef[len('pVCpu->iem.s.') : ];
718 if sField in g_kdIemFieldToType:
719 if g_kdIemFieldToType[sField][0]:
720 return g_kdIemFieldToType[sField][0];
721 elif ch0 == 'G' and sRef.startswith('GCPtr'):
722 return 'uint64_t';
723 elif ch0 == 'e':
724 if sRef == 'enmEffOpSize':
725 return 'IEMMODE';
726 elif ch0 == 'o':
727 if sRef.startswith('off32'):
728 return 'uint32_t';
729 elif sRef == 'cbFrame': # enter
730 return 'uint16_t';
731 elif sRef == 'cShift': ## @todo risky
732 return 'uint8_t';
733
734 self.raiseProblem('Unknown reference: %s' % (sRef,));
735 return None; # Shut up pylint 2.16.2.
736
737 def analyzeCallToType(self, sFnRef):
738 """
739 Determins the type of an indirect function call.
740 """
741 assert sFnRef[0] == 'p';
742
743 #
744 # Simple?
745 #
746 if sFnRef.find('-') < 0:
747 oDecoderFunction = self.oParent.oMcBlock.oFunction;
748
749 # Try the argument list of the function defintion macro invocation first.
750 iArg = 2;
751 while iArg < len(oDecoderFunction.asDefArgs):
752 if sFnRef == oDecoderFunction.asDefArgs[iArg]:
753 return oDecoderFunction.asDefArgs[iArg - 1];
754 iArg += 1;
755
756 # Then check out line that includes the word and looks like a variable declaration.
757 oRe = re.compile(' +(P[A-Z0-9_]+|const +IEMOP[A-Z0-9_]+ *[*]) +(const |) *' + sFnRef + ' *(;|=)');
758 for sLine in oDecoderFunction.asLines:
759 oMatch = oRe.match(sLine);
760 if oMatch:
761 if not oMatch.group(1).startswith('const'):
762 return oMatch.group(1);
763 return 'PC' + oMatch.group(1)[len('const ') : -1].strip();
764
765 #
766 # Deal with the pImpl->pfnXxx:
767 #
768 elif sFnRef.startswith('pImpl->pfn'):
769 sMember = sFnRef[len('pImpl->') : ];
770 sBaseType = self.analyzeCallToType('pImpl');
771 offBits = sMember.rfind('U') + 1;
772 if sBaseType == 'PCIEMOPBINSIZES': return 'PFNIEMAIMPLBINU' + sMember[offBits:];
773 if sBaseType == 'PCIEMOPUNARYSIZES': return 'PFNIEMAIMPLUNARYU' + sMember[offBits:];
774 if sBaseType == 'PCIEMOPSHIFTSIZES': return 'PFNIEMAIMPLSHIFTU' + sMember[offBits:];
775 if sBaseType == 'PCIEMOPSHIFTDBLSIZES': return 'PFNIEMAIMPLSHIFTDBLU' + sMember[offBits:];
776 if sBaseType == 'PCIEMOPMULDIVSIZES': return 'PFNIEMAIMPLMULDIVU' + sMember[offBits:];
777 if sBaseType == 'PCIEMOPMEDIAF3': return 'PFNIEMAIMPLMEDIAF3U' + sMember[offBits:];
778 if sBaseType == 'PCIEMOPMEDIAOPTF2': return 'PFNIEMAIMPLMEDIAOPTF2U' + sMember[offBits:];
779 if sBaseType == 'PCIEMOPMEDIAOPTF2IMM8': return 'PFNIEMAIMPLMEDIAOPTF2U' + sMember[offBits:] + 'IMM8';
780 if sBaseType == 'PCIEMOPMEDIAOPTF3': return 'PFNIEMAIMPLMEDIAOPTF3U' + sMember[offBits:];
781 if sBaseType == 'PCIEMOPMEDIAOPTF3IMM8': return 'PFNIEMAIMPLMEDIAOPTF3U' + sMember[offBits:] + 'IMM8';
782 if sBaseType == 'PCIEMOPBLENDOP': return 'PFNIEMAIMPLAVXBLENDU' + sMember[offBits:];
783
784 self.raiseProblem('Unknown call reference: %s::%s (%s)' % (sBaseType, sMember, sFnRef,));
785
786 self.raiseProblem('Unknown call reference: %s' % (sFnRef,));
787 return None; # Shut up pylint 2.16.2.
788
789 def analyze8BitGRegStmt(self, oStmt):
790 """
791 Gets the 8-bit general purpose register access details of the given statement.
792 ASSUMES the statement is one accessing an 8-bit GREG.
793 """
794 idxReg = 0;
795 if ( oStmt.sName.find('_FETCH_') > 0
796 or oStmt.sName.find('_REF_') > 0
797 or oStmt.sName.find('_TO_LOCAL') > 0):
798 idxReg = 1;
799
800 sRegRef = oStmt.asParams[idxReg];
801 if sRegRef.startswith('IEM_GET_MODRM_RM') or sRegRef.startswith('IEM_GET_MODRM_REG'):
802 asBits = [sBit.strip() for sBit in sRegRef.replace('(', ',').replace(')', '').split(',')];
803 if len(asBits) != 3 or asBits[1] != 'pVCpu' or (asBits[0] != 'IEM_GET_MODRM_RM' and asBits[0] != 'IEM_GET_MODRM_REG'):
804 self.raiseProblem('Unexpected reference: %s (asBits=%s)' % (sRegRef, asBits));
805 sOrgExpr = asBits[0] + '_EX8(pVCpu, ' + asBits[2] + ')';
806 else:
807 sOrgExpr = '((%s) < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX) ? (%s) : (%s) + 12)' % (sRegRef, sRegRef, sRegRef);
808
809 if sRegRef.find('IEM_GET_MODRM_RM') >= 0: sStdRef = 'bRmRm8Ex';
810 elif sRegRef.find('IEM_GET_MODRM_REG') >= 0: sStdRef = 'bRmReg8Ex';
811 elif sRegRef == 'X86_GREG_xAX': sStdRef = 'bGregXAx8Ex';
812 elif sRegRef == 'X86_GREG_xCX': sStdRef = 'bGregXCx8Ex';
813 elif sRegRef == 'X86_GREG_xSP': sStdRef = 'bGregXSp8Ex';
814 elif sRegRef == 'iFixedReg': sStdRef = 'bFixedReg8Ex';
815 else:
816 self.warning('analyze8BitGRegStmt: sRegRef=%s -> bOther8Ex; %s %s; sOrgExpr=%s'
817 % (sRegRef, oStmt.sName, oStmt.asParams, sOrgExpr,));
818 sStdRef = 'bOther8Ex';
819
820 #print('analyze8BitGRegStmt: %s %s; sRegRef=%s\n -> idxReg=%s sOrgExpr=%s sStdRef=%s'
821 # % (oStmt.sName, oStmt.asParams, sRegRef, idxReg, sOrgExpr, sStdRef));
822 return (idxReg, sOrgExpr, sStdRef);
823
824
825 ## Maps memory related MCs to info for FLAT conversion.
826 ## This is used in 64-bit and flat 32-bit variants to skip the unnecessary
827 ## segmentation checking for every memory access. Only applied to access
828 ## via ES, DS and SS. FS, GS and CS gets the full segmentation threatment,
829 ## the latter (CS) is just to keep things simple (we could safely fetch via
830 ## it, but only in 64-bit mode could we safely write via it, IIRC).
831 kdMemMcToFlatInfo = {
832 'IEM_MC_FETCH_MEM_U8': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8' ),
833 'IEM_MC_FETCH_MEM16_U8': ( 1, 'IEM_MC_FETCH_MEM16_FLAT_U8' ),
834 'IEM_MC_FETCH_MEM32_U8': ( 1, 'IEM_MC_FETCH_MEM32_FLAT_U8' ),
835 'IEM_MC_FETCH_MEM_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16' ),
836 'IEM_MC_FETCH_MEM_U16_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_DISP' ),
837 'IEM_MC_FETCH_MEM_I16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I16' ),
838 'IEM_MC_FETCH_MEM_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32' ),
839 'IEM_MC_FETCH_MEM_U32_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_DISP' ),
840 'IEM_MC_FETCH_MEM_I32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I32' ),
841 'IEM_MC_FETCH_MEM_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64' ),
842 'IEM_MC_FETCH_MEM_U64_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64_DISP' ),
843 'IEM_MC_FETCH_MEM_U64_ALIGN_U128': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64_ALIGN_U128' ),
844 'IEM_MC_FETCH_MEM_I64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I64' ),
845 'IEM_MC_FETCH_MEM_R32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R32' ),
846 'IEM_MC_FETCH_MEM_R64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R64' ),
847 'IEM_MC_FETCH_MEM_R80': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R80' ),
848 'IEM_MC_FETCH_MEM_D80': ( 1, 'IEM_MC_FETCH_MEM_FLAT_D80' ),
849 'IEM_MC_FETCH_MEM_U128': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128' ),
850 'IEM_MC_FETCH_MEM_U128_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128_NO_AC' ),
851 'IEM_MC_FETCH_MEM_U128_ALIGN_SSE': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128_ALIGN_SSE' ),
852 'IEM_MC_FETCH_MEM_XMM': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM' ),
853 'IEM_MC_FETCH_MEM_XMM_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM_NO_AC' ),
854 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM_ALIGN_SSE' ),
855 'IEM_MC_FETCH_MEM_XMM_U32': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_U32' ),
856 'IEM_MC_FETCH_MEM_XMM_U64': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_U64' ),
857 'IEM_MC_FETCH_MEM_U256': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256' ),
858 'IEM_MC_FETCH_MEM_U256_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256_NO_AC' ),
859 'IEM_MC_FETCH_MEM_U256_ALIGN_AVX': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256_ALIGN_AVX' ),
860 'IEM_MC_FETCH_MEM_YMM': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM' ),
861 'IEM_MC_FETCH_MEM_YMM_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM_NO_AC' ),
862 'IEM_MC_FETCH_MEM_YMM_ALIGN_AVX': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM_ALIGN_AVX' ),
863 'IEM_MC_FETCH_MEM_U8_ZX_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U16' ),
864 'IEM_MC_FETCH_MEM_U8_ZX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U32' ),
865 'IEM_MC_FETCH_MEM_U8_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U64' ),
866 'IEM_MC_FETCH_MEM_U16_ZX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_ZX_U32' ),
867 'IEM_MC_FETCH_MEM_U16_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_ZX_U64' ),
868 'IEM_MC_FETCH_MEM_U32_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_ZX_U64' ),
869 'IEM_MC_FETCH_MEM_U8_SX_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U16' ),
870 'IEM_MC_FETCH_MEM_U8_SX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U32' ),
871 'IEM_MC_FETCH_MEM_U8_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U64' ),
872 'IEM_MC_FETCH_MEM_U16_SX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_SX_U32' ),
873 'IEM_MC_FETCH_MEM_U16_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_SX_U64' ),
874 'IEM_MC_FETCH_MEM_U32_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_SX_U64' ),
875 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128': ( 2, 'IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128' ),
876 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE_AND_XREG_XMM': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_ALIGN_SSE_AND_XREG_XMM' ),
877 'IEM_MC_FETCH_MEM_XMM_U32_AND_XREG_XMM': ( 3, 'IEM_MC_FETCH_MEM_FLAT_XMM_U32_AND_XREG_XMM' ),
878 'IEM_MC_FETCH_MEM_XMM_U64_AND_XREG_XMM': ( 3, 'IEM_MC_FETCH_MEM_FLAT_XMM_U64_AND_XREG_XMM' ),
879 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_RAX_RDX_U64':
880 ( 2, 'IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128_AND_RAX_RDX_U64' ),
881 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64':
882 ( 2, 'IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64' ),
883 'IEM_MC_STORE_MEM_U8': ( 0, 'IEM_MC_STORE_MEM_FLAT_U8' ),
884 'IEM_MC_STORE_MEM_U16': ( 0, 'IEM_MC_STORE_MEM_FLAT_U16' ),
885 'IEM_MC_STORE_MEM_U32': ( 0, 'IEM_MC_STORE_MEM_FLAT_U32' ),
886 'IEM_MC_STORE_MEM_U64': ( 0, 'IEM_MC_STORE_MEM_FLAT_U64' ),
887 'IEM_MC_STORE_MEM_U8_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U8_CONST' ),
888 'IEM_MC_STORE_MEM_U16_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U16_CONST' ),
889 'IEM_MC_STORE_MEM_U32_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U32_CONST' ),
890 'IEM_MC_STORE_MEM_U64_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U64_CONST' ),
891 'IEM_MC_STORE_MEM_U128': ( 0, 'IEM_MC_STORE_MEM_FLAT_U128' ),
892 'IEM_MC_STORE_MEM_U128_NO_AC': ( 0, 'IEM_MC_STORE_MEM_FLAT_U128_NO_AC' ),
893 'IEM_MC_STORE_MEM_U128_ALIGN_SSE': ( 0, 'IEM_MC_STORE_MEM_FLAT_U128_ALIGN_SSE' ),
894 'IEM_MC_STORE_MEM_U256': ( 0, 'IEM_MC_STORE_MEM_FLAT_U256' ),
895 'IEM_MC_STORE_MEM_U256_NO_AC': ( 0, 'IEM_MC_STORE_MEM_FLAT_U256_NO_AC' ),
896 'IEM_MC_STORE_MEM_U256_ALIGN_AVX': ( 0, 'IEM_MC_STORE_MEM_FLAT_U256_ALIGN_AVX' ),
897 'IEM_MC_MEM_MAP_D80_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_D80_WO' ),
898 'IEM_MC_MEM_MAP_I16_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_I16_WO' ),
899 'IEM_MC_MEM_MAP_I32_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_I32_WO' ),
900 'IEM_MC_MEM_MAP_I64_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_I64_WO' ),
901 'IEM_MC_MEM_MAP_R32_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_R32_WO' ),
902 'IEM_MC_MEM_MAP_R64_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_R64_WO' ),
903 'IEM_MC_MEM_MAP_R80_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_R80_WO' ),
904 'IEM_MC_MEM_MAP_U8_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_ATOMIC' ),
905 'IEM_MC_MEM_MAP_U8_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_RW' ),
906 'IEM_MC_MEM_MAP_U8_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_RO' ),
907 'IEM_MC_MEM_MAP_U8_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_WO' ),
908 'IEM_MC_MEM_MAP_U16_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_ATOMIC' ),
909 'IEM_MC_MEM_MAP_U16_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_RW' ),
910 'IEM_MC_MEM_MAP_U16_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_RO' ),
911 'IEM_MC_MEM_MAP_U16_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_WO' ),
912 'IEM_MC_MEM_MAP_U32_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_ATOMIC' ),
913 'IEM_MC_MEM_MAP_U32_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_RW' ),
914 'IEM_MC_MEM_MAP_U32_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_RO' ),
915 'IEM_MC_MEM_MAP_U32_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_WO' ),
916 'IEM_MC_MEM_MAP_U64_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_ATOMIC' ),
917 'IEM_MC_MEM_MAP_U64_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_RW' ),
918 'IEM_MC_MEM_MAP_U64_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_RO' ),
919 'IEM_MC_MEM_MAP_U64_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_WO' ),
920 'IEM_MC_MEM_MAP_U128_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_ATOMIC' ),
921 'IEM_MC_MEM_MAP_U128_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_RW' ),
922 'IEM_MC_MEM_MAP_U128_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_RO' ),
923 'IEM_MC_MEM_MAP_U128_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_WO' ),
924 'IEM_MC_MEM_MAP_EX': ( 3, 'IEM_MC_MEM_FLAT_MAP_EX' ),
925 };
926
927 kdMemMcToFlatInfoStack = {
928 'IEM_MC_PUSH_U16': ( 'IEM_MC_FLAT32_PUSH_U16', 'IEM_MC_FLAT64_PUSH_U16', ),
929 'IEM_MC_PUSH_U32': ( 'IEM_MC_FLAT32_PUSH_U32', 'IEM_MC_PUSH_U32', ),
930 'IEM_MC_PUSH_U64': ( 'IEM_MC_PUSH_U64', 'IEM_MC_FLAT64_PUSH_U64', ),
931 'IEM_MC_PUSH_U32_SREG': ( 'IEM_MC_FLAT32_PUSH_U32_SREG', 'IEM_MC_PUSH_U32_SREG' ),
932 'IEM_MC_POP_GREG_U16': ( 'IEM_MC_FLAT32_POP_GREG_U16', 'IEM_MC_FLAT64_POP_GREG_U16', ),
933 'IEM_MC_POP_GREG_U32': ( 'IEM_MC_FLAT32_POP_GREG_U32', 'IEM_MC_POP_GREG_U32', ),
934 'IEM_MC_POP_GREG_U64': ( 'IEM_MC_POP_GREG_U64', 'IEM_MC_FLAT64_POP_GREG_U64', ),
935 };
936
937 kdThreadedCalcRmEffAddrMcByVariation = {
938 ksVariation_16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
939 ksVariation_16f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
940 ksVariation_16_Pre386: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
941 ksVariation_16f_Pre386: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
942 ksVariation_32_Addr16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
943 ksVariation_32f_Addr16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
944 ksVariation_16_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
945 ksVariation_16f_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
946 ksVariation_32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
947 ksVariation_32f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
948 ksVariation_32_Flat: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
949 ksVariation_32f_Flat: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
950 ksVariation_64: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64',
951 ksVariation_64f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64',
952 ksVariation_64_FsGs: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_FSGS',
953 ksVariation_64f_FsGs: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_FSGS',
954 ksVariation_64_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_ADDR32', ## @todo How did this work again...
955 ksVariation_64f_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_ADDR32',
956 };
957
958 def analyzeMorphStmtForThreaded(self, aoStmts, dState, iParamRef = 0, iLevel = 0):
959 """
960 Transforms (copy) the statements into those for the threaded function.
961
962 Returns list/tree of statements (aoStmts is not modified) and the new
963 iParamRef value.
964 """
965 #
966 # We'll be traversing aoParamRefs in parallel to the statements, so we
967 # must match the traversal in analyzeFindThreadedParamRefs exactly.
968 #
969 #print('McBlock at %s:%s' % (os.path.split(self.oMcBlock.sSrcFile)[1], self.oMcBlock.iBeginLine,));
970 aoThreadedStmts = [];
971 for oStmt in aoStmts:
972 # Skip C++ statements that is purely related to decoding.
973 if not oStmt.isCppStmt() or not oStmt.fDecode:
974 # Copy the statement. Make a deep copy to make sure we've got our own
975 # copies of all instance variables, even if a bit overkill at the moment.
976 oNewStmt = copy.deepcopy(oStmt);
977 aoThreadedStmts.append(oNewStmt);
978 #print('oNewStmt %s %s' % (oNewStmt.sName, len(oNewStmt.asParams),));
979
980 # If the statement has parameter references, process the relevant parameters.
981 # We grab the references relevant to this statement and apply them in reserve order.
982 if iParamRef < len(self.aoParamRefs) and self.aoParamRefs[iParamRef].oStmt == oStmt:
983 iParamRefFirst = iParamRef;
984 while True:
985 iParamRef += 1;
986 if iParamRef >= len(self.aoParamRefs) or self.aoParamRefs[iParamRef].oStmt != oStmt:
987 break;
988
989 #print('iParamRefFirst=%s iParamRef=%s' % (iParamRefFirst, iParamRef));
990 for iCurRef in range(iParamRef - 1, iParamRefFirst - 1, -1):
991 oCurRef = self.aoParamRefs[iCurRef];
992 if oCurRef.iParam is not None:
993 assert oCurRef.oStmt == oStmt;
994 #print('iCurRef=%s iParam=%s sOrgRef=%s' % (iCurRef, oCurRef.iParam, oCurRef.sOrgRef));
995 sSrcParam = oNewStmt.asParams[oCurRef.iParam];
996 assert ( sSrcParam[oCurRef.offParam : oCurRef.offParam + len(oCurRef.sOrgRef)] == oCurRef.sOrgRef
997 or oCurRef.fCustomRef), \
998 'offParam=%s sOrgRef=%s iParam=%s oStmt.sName=%s sSrcParam=%s<eos>' \
999 % (oCurRef.offParam, oCurRef.sOrgRef, oCurRef.iParam, oStmt.sName, sSrcParam);
1000 oNewStmt.asParams[oCurRef.iParam] = sSrcParam[0 : oCurRef.offParam] \
1001 + oCurRef.sNewName \
1002 + sSrcParam[oCurRef.offParam + len(oCurRef.sOrgRef) : ];
1003
1004 # Morph IEM_MC_CALC_RM_EFF_ADDR into IEM_MC_CALC_RM_EFF_ADDR_THREADED ...
1005 if oNewStmt.sName == 'IEM_MC_CALC_RM_EFF_ADDR':
1006 oNewStmt.sName = self.kdThreadedCalcRmEffAddrMcByVariation[self.sVariation];
1007 assert len(oNewStmt.asParams) == 3;
1008
1009 if self.sVariation in self.kdVariationsWithFlatAddr16:
1010 oNewStmt.asParams = [
1011 oNewStmt.asParams[0], oNewStmt.asParams[1], self.dParamRefs['u16Disp'][0].sNewName,
1012 ];
1013 else:
1014 sSibAndMore = self.dParamRefs['bSib'][0].sNewName; # Merge bSib and 2nd part of cbImmAndRspOffset.
1015 if oStmt.asParams[2] not in ('0', '1', '2', '4'):
1016 sSibAndMore = '(%s) | ((%s) & 0x0f00)' % (self.dParamRefs['bSib'][0].sNewName, oStmt.asParams[2]);
1017
1018 if self.sVariation in self.kdVariationsWithFlatAddr32No64:
1019 oNewStmt.asParams = [
1020 oNewStmt.asParams[0], oNewStmt.asParams[1], sSibAndMore, self.dParamRefs['u32Disp'][0].sNewName,
1021 ];
1022 else:
1023 oNewStmt.asParams = [
1024 oNewStmt.asParams[0], self.dParamRefs['bRmEx'][0].sNewName, sSibAndMore,
1025 self.dParamRefs['u32Disp'][0].sNewName, self.dParamRefs['cbInstr'][0].sNewName,
1026 ];
1027 # ... and IEM_MC_ADVANCE_RIP_AND_FINISH into *_THREADED_PCxx[_WITH_FLAGS] ...
1028 elif ( oNewStmt.sName
1029 in ('IEM_MC_ADVANCE_RIP_AND_FINISH',
1030 'IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_REL_JMP_S16_AND_FINISH', 'IEM_MC_REL_JMP_S32_AND_FINISH',
1031 'IEM_MC_SET_RIP_U16_AND_FINISH', 'IEM_MC_SET_RIP_U32_AND_FINISH', 'IEM_MC_SET_RIP_U64_AND_FINISH', )):
1032 if oNewStmt.sName not in ('IEM_MC_SET_RIP_U16_AND_FINISH', 'IEM_MC_SET_RIP_U32_AND_FINISH',
1033 'IEM_MC_SET_RIP_U64_AND_FINISH', ):
1034 oNewStmt.asParams.append(self.dParamRefs['cbInstr'][0].sNewName);
1035 if ( oNewStmt.sName in ('IEM_MC_REL_JMP_S8_AND_FINISH', )
1036 and self.sVariation not in self.kdVariationsOnlyPre386):
1037 oNewStmt.asParams.append(self.dParamRefs['pVCpu->iem.s.enmEffOpSize'][0].sNewName);
1038 oNewStmt.sName += '_THREADED';
1039 if self.sVariation in self.kdVariationsOnly64NoFlags:
1040 oNewStmt.sName += '_PC64';
1041 elif self.sVariation in self.kdVariationsOnly64WithFlags:
1042 oNewStmt.sName += '_PC64_WITH_FLAGS';
1043 elif self.sVariation in self.kdVariationsOnlyPre386NoFlags:
1044 oNewStmt.sName += '_PC16';
1045 elif self.sVariation in self.kdVariationsOnlyPre386WithFlags:
1046 oNewStmt.sName += '_PC16_WITH_FLAGS';
1047 elif self.sVariation not in self.kdVariationsWithEflagsCheckingAndClearing:
1048 assert self.sVariation != self.ksVariation_Default;
1049 oNewStmt.sName += '_PC32';
1050 else:
1051 oNewStmt.sName += '_PC32_WITH_FLAGS';
1052
1053 # This is making the wrong branch of conditionals break out of the TB.
1054 if (oStmt.sName in ('IEM_MC_ADVANCE_RIP_AND_FINISH', 'IEM_MC_REL_JMP_S8_AND_FINISH',
1055 'IEM_MC_REL_JMP_S16_AND_FINISH', 'IEM_MC_REL_JMP_S32_AND_FINISH')):
1056 sExitTbStatus = 'VINF_SUCCESS';
1057 if self.sVariation in self.kdVariationsWithConditional:
1058 if self.sVariation in self.kdVariationsWithConditionalNoJmp:
1059 if oStmt.sName != 'IEM_MC_ADVANCE_RIP_AND_FINISH':
1060 sExitTbStatus = 'VINF_IEM_REEXEC_BREAK';
1061 elif oStmt.sName == 'IEM_MC_ADVANCE_RIP_AND_FINISH':
1062 sExitTbStatus = 'VINF_IEM_REEXEC_BREAK';
1063 oNewStmt.asParams.append(sExitTbStatus);
1064
1065 # Insert an MC so we can assert the correctioness of modified flags annotations on IEM_MC_REF_EFLAGS.
1066 if 'IEM_MC_ASSERT_EFLAGS' in dState:
1067 aoThreadedStmts.insert(len(aoThreadedStmts) - 1,
1068 iai.McStmtAssertEFlags(self.oParent.oMcBlock.oInstruction));
1069 del dState['IEM_MC_ASSERT_EFLAGS'];
1070
1071 # ... and IEM_MC_*_GREG_U8 into *_THREADED w/ reworked index taking REX into account
1072 elif oNewStmt.sName.startswith('IEM_MC_') and oNewStmt.sName.find('_GREG_U8') > 0:
1073 (idxReg, _, sStdRef) = self.analyze8BitGRegStmt(oStmt); # Don't use oNewStmt as it has been modified!
1074 oNewStmt.asParams[idxReg] = self.dParamRefs[sStdRef][0].sNewName;
1075 oNewStmt.sName += '_THREADED';
1076
1077 # ... and IEM_MC_CALL_CIMPL_[0-5] and IEM_MC_DEFER_TO_CIMPL_[0-5]_RET into *_THREADED ...
1078 elif oNewStmt.sName.startswith('IEM_MC_CALL_CIMPL_') or oNewStmt.sName.startswith('IEM_MC_DEFER_TO_CIMPL_'):
1079 oNewStmt.sName += '_THREADED';
1080 oNewStmt.idxFn += 1;
1081 oNewStmt.idxParams += 1;
1082 oNewStmt.asParams.insert(0, self.dParamRefs['cbInstr'][0].sNewName);
1083
1084 # ... and in FLAT modes we must morph memory access into FLAT accesses ...
1085 elif ( self.sVariation in self.kdVariationsWithFlatAddress
1086 and ( oNewStmt.sName.startswith('IEM_MC_FETCH_MEM')
1087 or (oNewStmt.sName.startswith('IEM_MC_STORE_MEM_') and oNewStmt.sName.find('_BY_REF') < 0)
1088 or oNewStmt.sName.startswith('IEM_MC_MEM_MAP') )):
1089 idxEffSeg = self.kdMemMcToFlatInfo[oNewStmt.sName][0];
1090 if idxEffSeg != -1:
1091 if ( oNewStmt.asParams[idxEffSeg].find('iEffSeg') < 0
1092 and oNewStmt.asParams[idxEffSeg] not in ('X86_SREG_ES', ) ):
1093 self.raiseProblem('Expected iEffSeg as param #%d to %s: %s'
1094 % (idxEffSeg + 1, oNewStmt.sName, oNewStmt.asParams[idxEffSeg],));
1095 oNewStmt.asParams.pop(idxEffSeg);
1096 oNewStmt.sName = self.kdMemMcToFlatInfo[oNewStmt.sName][1];
1097
1098 # ... PUSH and POP also needs flat variants, but these differ a little.
1099 elif ( self.sVariation in self.kdVariationsWithFlatStackAddress
1100 and ( (oNewStmt.sName.startswith('IEM_MC_PUSH') and oNewStmt.sName.find('_FPU') < 0)
1101 or oNewStmt.sName.startswith('IEM_MC_POP'))):
1102 oNewStmt.sName = self.kdMemMcToFlatInfoStack[oNewStmt.sName][int(self.sVariation in
1103 self.kdVariationsWithFlat64StackAddress)];
1104
1105 # Add EFLAGS usage annotations to relevant MCs.
1106 elif oNewStmt.sName in ('IEM_MC_COMMIT_EFLAGS', 'IEM_MC_REF_EFLAGS', 'IEM_MC_FETCH_EFLAGS'):
1107 oInstruction = self.oParent.oMcBlock.oInstruction;
1108 oNewStmt.sName += '_EX';
1109 oNewStmt.asParams.append(oInstruction.getTestedFlagsCStyle()); # Shall crash and burn if oInstruction is
1110 oNewStmt.asParams.append(oInstruction.getModifiedFlagsCStyle()); # None. Fix the IEM decoder code.
1111
1112 # For IEM_MC_REF_EFLAGS we to emit an MC before the ..._FINISH
1113 if oNewStmt.sName == 'IEM_MC_REF_EFLAGS_EX':
1114 dState['IEM_MC_ASSERT_EFLAGS'] = iLevel;
1115
1116 # Process branches of conditionals recursively.
1117 if isinstance(oStmt, iai.McStmtCond):
1118 (oNewStmt.aoIfBranch, iParamRef) = self.analyzeMorphStmtForThreaded(oStmt.aoIfBranch, dState,
1119 iParamRef, iLevel + 1);
1120 if oStmt.aoElseBranch:
1121 (oNewStmt.aoElseBranch, iParamRef) = self.analyzeMorphStmtForThreaded(oStmt.aoElseBranch,
1122 dState, iParamRef, iLevel + 1);
1123
1124 # Insert an MC so we can assert the correctioness of modified flags annotations
1125 # on IEM_MC_REF_EFLAGS if it goes out of scope.
1126 if dState.get('IEM_MC_ASSERT_EFLAGS', -1) == iLevel:
1127 aoThreadedStmts.append(iai.McStmtAssertEFlags(self.oParent.oMcBlock.oInstruction));
1128 del dState['IEM_MC_ASSERT_EFLAGS'];
1129
1130 return (aoThreadedStmts, iParamRef);
1131
1132
1133 def analyzeConsolidateThreadedParamRefs(self):
1134 """
1135 Consolidate threaded function parameter references into a dictionary
1136 with lists of the references to each variable/field.
1137 """
1138 # Gather unique parameters.
1139 self.dParamRefs = {};
1140 for oRef in self.aoParamRefs:
1141 if oRef.sStdRef not in self.dParamRefs:
1142 self.dParamRefs[oRef.sStdRef] = [oRef,];
1143 else:
1144 self.dParamRefs[oRef.sStdRef].append(oRef);
1145
1146 # Generate names for them for use in the threaded function.
1147 dParamNames = {};
1148 for sName, aoRefs in self.dParamRefs.items():
1149 # Morph the reference expression into a name.
1150 if sName.startswith('IEM_GET_MODRM_REG'): sName = 'bModRmRegP';
1151 elif sName.startswith('IEM_GET_MODRM_RM'): sName = 'bModRmRmP';
1152 elif sName.startswith('IEM_GET_MODRM_REG_8'): sName = 'bModRmReg8P';
1153 elif sName.startswith('IEM_GET_MODRM_RM_8'): sName = 'bModRmRm8P';
1154 elif sName.startswith('IEM_GET_EFFECTIVE_VVVV'): sName = 'bEffVvvvP';
1155 elif sName.find('.') >= 0 or sName.find('->') >= 0:
1156 sName = sName[max(sName.rfind('.'), sName.rfind('>')) + 1 : ] + 'P';
1157 else:
1158 sName += 'P';
1159
1160 # Ensure it's unique.
1161 if sName in dParamNames:
1162 for i in range(10):
1163 if sName + str(i) not in dParamNames:
1164 sName += str(i);
1165 break;
1166 dParamNames[sName] = True;
1167
1168 # Update all the references.
1169 for oRef in aoRefs:
1170 oRef.sNewName = sName;
1171
1172 # Organize them by size too for the purpose of optimize them.
1173 dBySize = {} # type: Dict[str, str]
1174 for sStdRef, aoRefs in self.dParamRefs.items():
1175 if aoRefs[0].sType[0] != 'P':
1176 cBits = g_kdTypeInfo[aoRefs[0].sType][0];
1177 assert(cBits <= 64);
1178 else:
1179 cBits = 64;
1180
1181 if cBits not in dBySize:
1182 dBySize[cBits] = [sStdRef,]
1183 else:
1184 dBySize[cBits].append(sStdRef);
1185
1186 # Pack the parameters as best as we can, starting with the largest ones
1187 # and ASSUMING a 64-bit parameter size.
1188 self.cMinParams = 0;
1189 offNewParam = 0;
1190 for cBits in sorted(dBySize.keys(), reverse = True):
1191 for sStdRef in dBySize[cBits]:
1192 if offNewParam == 0 or offNewParam + cBits > 64:
1193 self.cMinParams += 1;
1194 offNewParam = cBits;
1195 else:
1196 offNewParam += cBits;
1197 assert(offNewParam <= 64);
1198
1199 for oRef in self.dParamRefs[sStdRef]:
1200 oRef.iNewParam = self.cMinParams - 1;
1201 oRef.offNewParam = offNewParam - cBits;
1202
1203 # Currently there are a few that requires 4 parameters, list these so we can figure out why:
1204 if self.cMinParams >= 4:
1205 print('debug: cMinParams=%s cRawParams=%s - %s:%d'
1206 % (self.cMinParams, len(self.dParamRefs), self.oParent.oMcBlock.sSrcFile, self.oParent.oMcBlock.iBeginLine,));
1207
1208 return True;
1209
1210 ksHexDigits = '0123456789abcdefABCDEF';
1211
1212 def analyzeFindThreadedParamRefs(self, aoStmts): # pylint: disable=too-many-statements
1213 """
1214 Scans the statements for things that have to passed on to the threaded
1215 function (populates self.aoParamRefs).
1216 """
1217 for oStmt in aoStmts:
1218 # Some statements we can skip alltogether.
1219 if isinstance(oStmt, iai.McCppPreProc):
1220 continue;
1221 if oStmt.isCppStmt() and oStmt.fDecode:
1222 continue;
1223 if oStmt.sName in ('IEM_MC_BEGIN',):
1224 continue;
1225
1226 if isinstance(oStmt, iai.McStmtVar):
1227 if oStmt.sValue is None:
1228 continue;
1229 aiSkipParams = { 0: True, 1: True, 3: True };
1230 else:
1231 aiSkipParams = {};
1232
1233 # Several statements have implicit parameters and some have different parameters.
1234 if oStmt.sName in ('IEM_MC_ADVANCE_RIP_AND_FINISH', 'IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_REL_JMP_S16_AND_FINISH',
1235 'IEM_MC_REL_JMP_S32_AND_FINISH', 'IEM_MC_CALL_CIMPL_0', 'IEM_MC_CALL_CIMPL_1',
1236 'IEM_MC_CALL_CIMPL_2', 'IEM_MC_CALL_CIMPL_3', 'IEM_MC_CALL_CIMPL_4', 'IEM_MC_CALL_CIMPL_5',
1237 'IEM_MC_DEFER_TO_CIMPL_0_RET', 'IEM_MC_DEFER_TO_CIMPL_1_RET', 'IEM_MC_DEFER_TO_CIMPL_2_RET',
1238 'IEM_MC_DEFER_TO_CIMPL_3_RET', 'IEM_MC_DEFER_TO_CIMPL_4_RET', 'IEM_MC_DEFER_TO_CIMPL_5_RET', ):
1239 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_INSTR_LEN(pVCpu)', 'uint4_t', oStmt, sStdRef = 'cbInstr'));
1240
1241 if ( oStmt.sName in ('IEM_MC_REL_JMP_S8_AND_FINISH',)
1242 and self.sVariation not in self.kdVariationsOnlyPre386):
1243 self.aoParamRefs.append(ThreadedParamRef('pVCpu->iem.s.enmEffOpSize', 'IEMMODE', oStmt));
1244
1245 if oStmt.sName == 'IEM_MC_CALC_RM_EFF_ADDR':
1246 # This is being pretty presumptive about bRm always being the RM byte...
1247 assert len(oStmt.asParams) == 3;
1248 assert oStmt.asParams[1] == 'bRm';
1249
1250 if self.sVariation in self.kdVariationsWithFlatAddr16:
1251 self.aoParamRefs.append(ThreadedParamRef('bRm', 'uint8_t', oStmt));
1252 self.aoParamRefs.append(ThreadedParamRef('(uint16_t)uEffAddrInfo' ,
1253 'uint16_t', oStmt, sStdRef = 'u16Disp'));
1254 elif self.sVariation in self.kdVariationsWithFlatAddr32No64:
1255 self.aoParamRefs.append(ThreadedParamRef('bRm', 'uint8_t', oStmt));
1256 self.aoParamRefs.append(ThreadedParamRef('(uint8_t)(uEffAddrInfo >> 32)',
1257 'uint8_t', oStmt, sStdRef = 'bSib'));
1258 self.aoParamRefs.append(ThreadedParamRef('(uint32_t)uEffAddrInfo',
1259 'uint32_t', oStmt, sStdRef = 'u32Disp'));
1260 else:
1261 assert self.sVariation in self.kdVariationsWithAddressOnly64;
1262 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_MODRM_EX(pVCpu, bRm)',
1263 'uint8_t', oStmt, sStdRef = 'bRmEx'));
1264 self.aoParamRefs.append(ThreadedParamRef('(uint8_t)(uEffAddrInfo >> 32)',
1265 'uint8_t', oStmt, sStdRef = 'bSib'));
1266 self.aoParamRefs.append(ThreadedParamRef('(uint32_t)uEffAddrInfo',
1267 'uint32_t', oStmt, sStdRef = 'u32Disp'));
1268 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_INSTR_LEN(pVCpu)',
1269 'uint4_t', oStmt, sStdRef = 'cbInstr'));
1270 aiSkipParams[1] = True; # Skip the bRm parameter as it is being replaced by bRmEx.
1271
1272 # 8-bit register accesses needs to have their index argument reworked to take REX into account.
1273 if oStmt.sName.startswith('IEM_MC_') and oStmt.sName.find('_GREG_U8') > 0:
1274 (idxReg, sOrgRef, sStdRef) = self.analyze8BitGRegStmt(oStmt);
1275 self.aoParamRefs.append(ThreadedParamRef(sOrgRef, 'uint8_t', oStmt, idxReg, sStdRef = sStdRef));
1276 aiSkipParams[idxReg] = True; # Skip the parameter below.
1277
1278 # If in flat mode variation, ignore the effective segment parameter to memory MCs.
1279 if ( self.sVariation in self.kdVariationsWithFlatAddress
1280 and oStmt.sName in self.kdMemMcToFlatInfo
1281 and self.kdMemMcToFlatInfo[oStmt.sName][0] != -1):
1282 aiSkipParams[self.kdMemMcToFlatInfo[oStmt.sName][0]] = True;
1283
1284 # Inspect the target of calls to see if we need to pass down a
1285 # function pointer or function table pointer for it to work.
1286 if isinstance(oStmt, iai.McStmtCall):
1287 if oStmt.sFn[0] == 'p':
1288 self.aoParamRefs.append(ThreadedParamRef(oStmt.sFn, self.analyzeCallToType(oStmt.sFn), oStmt, oStmt.idxFn));
1289 elif ( oStmt.sFn[0] != 'i'
1290 and not oStmt.sFn.startswith('RT_CONCAT3')
1291 and not oStmt.sFn.startswith('IEMTARGETCPU_EFL_BEHAVIOR_SELECT')
1292 and not oStmt.sFn.startswith('IEM_SELECT_HOST_OR_FALLBACK') ):
1293 self.raiseProblem('Bogus function name in %s: %s' % (oStmt.sName, oStmt.sFn,));
1294 aiSkipParams[oStmt.idxFn] = True;
1295
1296 # Skip the hint parameter (first) for IEM_MC_CALL_CIMPL_X.
1297 if oStmt.sName.startswith('IEM_MC_CALL_CIMPL_'):
1298 assert oStmt.idxFn == 2;
1299 aiSkipParams[0] = True;
1300
1301 # Skip the function parameter (first) for IEM_MC_NATIVE_EMIT_X.
1302 if oStmt.sName.startswith('IEM_MC_NATIVE_EMIT_'):
1303 aiSkipParams[0] = True;
1304
1305
1306 # Check all the parameters for bogus references.
1307 for iParam, sParam in enumerate(oStmt.asParams):
1308 if iParam not in aiSkipParams and sParam not in self.oParent.dVariables:
1309 # The parameter may contain a C expression, so we have to try
1310 # extract the relevant bits, i.e. variables and fields while
1311 # ignoring operators and parentheses.
1312 offParam = 0;
1313 while offParam < len(sParam):
1314 # Is it the start of an C identifier? If so, find the end, but don't stop on field separators (->, .).
1315 ch = sParam[offParam];
1316 if ch.isalpha() or ch == '_':
1317 offStart = offParam;
1318 offParam += 1;
1319 while offParam < len(sParam):
1320 ch = sParam[offParam];
1321 if not ch.isalnum() and ch != '_' and ch != '.':
1322 if ch != '-' or sParam[offParam + 1] != '>':
1323 # Special hack for the 'CTX_SUFF(pVM)' bit in pVCpu->CTX_SUFF(pVM)->xxxx:
1324 if ( ch == '('
1325 and sParam[offStart : offParam + len('(pVM)->')] == 'pVCpu->CTX_SUFF(pVM)->'):
1326 offParam += len('(pVM)->') - 1;
1327 else:
1328 break;
1329 offParam += 1;
1330 offParam += 1;
1331 sRef = sParam[offStart : offParam];
1332
1333 # For register references, we pass the full register indexes instead as macros
1334 # like IEM_GET_MODRM_REG implicitly references pVCpu->iem.s.uRexReg and the
1335 # threaded function will be more efficient if we just pass the register index
1336 # as a 4-bit param.
1337 if ( sRef.startswith('IEM_GET_MODRM')
1338 or sRef.startswith('IEM_GET_EFFECTIVE_VVVV') ):
1339 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1340 if sParam[offParam] != '(':
1341 self.raiseProblem('Expected "(" following %s in "%s"' % (sRef, oStmt.renderCode(),));
1342 (asMacroParams, offCloseParam) = iai.McBlock.extractParams(sParam, offParam);
1343 if asMacroParams is None:
1344 self.raiseProblem('Unable to find ")" for %s in "%s"' % (sRef, oStmt.renderCode(),));
1345 offParam = offCloseParam + 1;
1346 self.aoParamRefs.append(ThreadedParamRef(sParam[offStart : offParam], 'uint8_t',
1347 oStmt, iParam, offStart));
1348
1349 # We can skip known variables.
1350 elif sRef in self.oParent.dVariables:
1351 pass;
1352
1353 # Skip certain macro invocations.
1354 elif sRef in ('IEM_GET_HOST_CPU_FEATURES',
1355 'IEM_GET_GUEST_CPU_FEATURES',
1356 'IEM_IS_GUEST_CPU_AMD',
1357 'IEM_IS_16BIT_CODE',
1358 'IEM_IS_32BIT_CODE',
1359 'IEM_IS_64BIT_CODE',
1360 ):
1361 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1362 if sParam[offParam] != '(':
1363 self.raiseProblem('Expected "(" following %s in "%s"' % (sRef, oStmt.renderCode(),));
1364 (asMacroParams, offCloseParam) = iai.McBlock.extractParams(sParam, offParam);
1365 if asMacroParams is None:
1366 self.raiseProblem('Unable to find ")" for %s in "%s"' % (sRef, oStmt.renderCode(),));
1367 offParam = offCloseParam + 1;
1368
1369 # Skip any dereference following it, unless it's a predicate like IEM_IS_GUEST_CPU_AMD.
1370 if sRef not in ('IEM_IS_GUEST_CPU_AMD',
1371 'IEM_IS_16BIT_CODE',
1372 'IEM_IS_32BIT_CODE',
1373 'IEM_IS_64BIT_CODE',
1374 ):
1375 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1376 if offParam + 2 <= len(sParam) and sParam[offParam : offParam + 2] == '->':
1377 offParam = iai.McBlock.skipSpacesAt(sParam, offParam + 2, len(sParam));
1378 while offParam < len(sParam) and (sParam[offParam].isalnum() or sParam[offParam] in '_.'):
1379 offParam += 1;
1380
1381 # Skip constants, globals, types (casts), sizeof and macros.
1382 elif ( sRef.startswith('IEM_OP_PRF_')
1383 or sRef.startswith('IEM_ACCESS_')
1384 or sRef.startswith('IEMINT_')
1385 or sRef.startswith('X86_GREG_')
1386 or sRef.startswith('X86_SREG_')
1387 or sRef.startswith('X86_EFL_')
1388 or sRef.startswith('X86_FSW_')
1389 or sRef.startswith('X86_FCW_')
1390 or sRef.startswith('X86_XCPT_')
1391 or sRef.startswith('IEMMODE_')
1392 or sRef.startswith('IEM_F_')
1393 or sRef.startswith('IEM_CIMPL_F_')
1394 or sRef.startswith('g_')
1395 or sRef.startswith('iemAImpl_')
1396 or sRef.startswith('kIemNativeGstReg_')
1397 or sRef.startswith('RT_ARCH_VAL_')
1398 or sRef in ( 'int8_t', 'int16_t', 'int32_t', 'int64_t',
1399 'INT8_C', 'INT16_C', 'INT32_C', 'INT64_C',
1400 'uint8_t', 'uint16_t', 'uint32_t', 'uint64_t',
1401 'UINT8_C', 'UINT16_C', 'UINT32_C', 'UINT64_C',
1402 'UINT8_MAX', 'UINT16_MAX', 'UINT32_MAX', 'UINT64_MAX',
1403 'INT8_MAX', 'INT16_MAX', 'INT32_MAX', 'INT64_MAX',
1404 'INT8_MIN', 'INT16_MIN', 'INT32_MIN', 'INT64_MIN',
1405 'sizeof', 'NOREF', 'RT_NOREF', 'IEMMODE_64BIT',
1406 'RT_BIT_32', 'RT_BIT_64', 'true', 'false',
1407 'NIL_RTGCPTR',) ):
1408 pass;
1409
1410 # Skip certain macro invocations.
1411 # Any variable (non-field) and decoder fields in IEMCPU will need to be parameterized.
1412 elif ( ( '.' not in sRef
1413 and '-' not in sRef
1414 and sRef not in ('pVCpu', ) )
1415 or iai.McBlock.koReIemDecoderVars.search(sRef) is not None):
1416 self.aoParamRefs.append(ThreadedParamRef(sRef, self.analyzeReferenceToType(sRef),
1417 oStmt, iParam, offStart));
1418 # Number.
1419 elif ch.isdigit():
1420 if ( ch == '0'
1421 and offParam + 2 <= len(sParam)
1422 and sParam[offParam + 1] in 'xX'
1423 and sParam[offParam + 2] in self.ksHexDigits ):
1424 offParam += 2;
1425 while offParam < len(sParam) and sParam[offParam] in self.ksHexDigits:
1426 offParam += 1;
1427 else:
1428 while offParam < len(sParam) and sParam[offParam].isdigit():
1429 offParam += 1;
1430 # Comment?
1431 elif ( ch == '/'
1432 and offParam + 4 <= len(sParam)
1433 and sParam[offParam + 1] == '*'):
1434 offParam += 2;
1435 offNext = sParam.find('*/', offParam);
1436 if offNext < offParam:
1437 self.raiseProblem('Unable to find "*/" in "%s" ("%s")' % (sRef, oStmt.renderCode(),));
1438 offParam = offNext + 2;
1439 # Whatever else.
1440 else:
1441 offParam += 1;
1442
1443 # Traverse the branches of conditionals.
1444 if isinstance(oStmt, iai.McStmtCond):
1445 self.analyzeFindThreadedParamRefs(oStmt.aoIfBranch);
1446 self.analyzeFindThreadedParamRefs(oStmt.aoElseBranch);
1447 return True;
1448
1449 def analyzeVariation(self, aoStmts):
1450 """
1451 2nd part of the analysis, done on each variation.
1452
1453 The variations may differ in parameter requirements and will end up with
1454 slightly different MC sequences. Thus this is done on each individually.
1455
1456 Returns dummy True - raises exception on trouble.
1457 """
1458 # Now scan the code for variables and field references that needs to
1459 # be passed to the threaded function because they are related to the
1460 # instruction decoding.
1461 self.analyzeFindThreadedParamRefs(aoStmts);
1462 self.analyzeConsolidateThreadedParamRefs();
1463
1464 # Morph the statement stream for the block into what we'll be using in the threaded function.
1465 (self.aoStmtsForThreadedFunction, iParamRef) = self.analyzeMorphStmtForThreaded(aoStmts, {});
1466 if iParamRef != len(self.aoParamRefs):
1467 raise Exception('iParamRef=%s, expected %s!' % (iParamRef, len(self.aoParamRefs),));
1468
1469 return True;
1470
1471 def emitThreadedCallStmts(self, cchIndent, sCallVarNm = None):
1472 """
1473 Produces generic C++ statments that emits a call to the thread function
1474 variation and any subsequent checks that may be necessary after that.
1475
1476 The sCallVarNm is the name of the variable with the threaded function
1477 to call. This is for the case where all the variations have the same
1478 parameters and only the threaded function number differs.
1479 """
1480 aoStmts = [
1481 iai.McCppCall('IEM_MC2_BEGIN_EMIT_CALLS',
1482 ['1' if 'IEM_CIMPL_F_CHECK_IRQ_BEFORE' in self.oParent.dsCImplFlags else '0'],
1483 cchIndent = cchIndent), # Scope and a hook for various stuff.
1484 ];
1485
1486 # The call to the threaded function.
1487 asCallArgs = [ self.getIndexName() if not sCallVarNm else sCallVarNm, ];
1488 for iParam in range(self.cMinParams):
1489 asFrags = [];
1490 for aoRefs in self.dParamRefs.values():
1491 oRef = aoRefs[0];
1492 if oRef.iNewParam == iParam:
1493 sCast = '(uint64_t)'
1494 if oRef.sType in ('int8_t', 'int16_t', 'int32_t'): # Make sure these doesn't get sign-extended.
1495 sCast = '(uint64_t)(u' + oRef.sType + ')';
1496 if oRef.offNewParam == 0:
1497 asFrags.append(sCast + '(' + oRef.sOrgRef + ')');
1498 else:
1499 asFrags.append('(%s(%s) << %s)' % (sCast, oRef.sOrgRef, oRef.offNewParam));
1500 assert asFrags;
1501 asCallArgs.append(' | '.join(asFrags));
1502
1503 aoStmts.append(iai.McCppCall('IEM_MC2_EMIT_CALL_%s' % (len(asCallArgs) - 1,), asCallArgs, cchIndent = cchIndent));
1504
1505 # 2023-11-28: This has to be done AFTER the CIMPL call, so we have to
1506 # emit this mode check from the compilation loop. On the
1507 # plus side, this means we eliminate unnecessary call at
1508 # end of the TB. :-)
1509 ## For CIMPL stuff, we need to consult the associated IEM_CIMPL_F_XXX
1510 ## mask and maybe emit additional checks.
1511 #if ( 'IEM_CIMPL_F_MODE' in self.oParent.dsCImplFlags
1512 # or 'IEM_CIMPL_F_XCPT' in self.oParent.dsCImplFlags
1513 # or 'IEM_CIMPL_F_VMEXIT' in self.oParent.dsCImplFlags):
1514 # aoStmts.append(iai.McCppCall('IEM_MC2_EMIT_CALL_1', ( 'kIemThreadedFunc_BltIn_CheckMode', 'pVCpu->iem.s.fExec', ),
1515 # cchIndent = cchIndent));
1516
1517 sCImplFlags = ' | '.join(self.oParent.dsCImplFlags.keys());
1518 if not sCImplFlags:
1519 sCImplFlags = '0'
1520 aoStmts.append(iai.McCppCall('IEM_MC2_END_EMIT_CALLS', ( sCImplFlags, ), cchIndent = cchIndent)); # For closing the scope.
1521
1522 # Emit fEndTb = true or fTbBranched = true if any of the CIMPL flags
1523 # indicates we should do so.
1524 # Note! iemThreadedRecompilerMcDeferToCImpl0 duplicates work done here.
1525 asEndTbFlags = [];
1526 asTbBranchedFlags = [];
1527 for sFlag in self.oParent.dsCImplFlags:
1528 if self.kdCImplFlags[sFlag] is True:
1529 asEndTbFlags.append(sFlag);
1530 elif sFlag.startswith('IEM_CIMPL_F_BRANCH_'):
1531 asTbBranchedFlags.append(sFlag);
1532 if ( asTbBranchedFlags
1533 and ( 'IEM_CIMPL_F_BRANCH_CONDITIONAL' not in asTbBranchedFlags
1534 or self.sVariation not in self.kdVariationsWithConditionalNoJmp)):
1535 aoStmts.append(iai.McCppGeneric('iemThreadedSetBranched(pVCpu, %s);'
1536 % ((' | '.join(asTbBranchedFlags)).replace('IEM_CIMPL_F_BRANCH', 'IEMBRANCHED_F'),),
1537 cchIndent = cchIndent)); # Inline fn saves ~2 seconds for gcc 13/dbg (1m13s vs 1m15s).
1538 if asEndTbFlags:
1539 aoStmts.append(iai.McCppGeneric('pVCpu->iem.s.fEndTb = true; /* %s */' % (','.join(asEndTbFlags),),
1540 cchIndent = cchIndent));
1541
1542 if 'IEM_CIMPL_F_CHECK_IRQ_AFTER' in self.oParent.dsCImplFlags:
1543 aoStmts.append(iai.McCppGeneric('pVCpu->iem.s.cInstrTillIrqCheck = 0;', cchIndent = cchIndent));
1544
1545 return aoStmts;
1546
1547
1548class ThreadedFunction(object):
1549 """
1550 A threaded function.
1551 """
1552
1553 def __init__(self, oMcBlock: iai.McBlock) -> None:
1554 self.oMcBlock = oMcBlock # type: iai.McBlock
1555 # The remaining fields are only useful after analyze() has been called:
1556 ## Variations for this block. There is at least one.
1557 self.aoVariations = [] # type: List[ThreadedFunctionVariation]
1558 ## Variation dictionary containing the same as aoVariations.
1559 self.dVariations = {} # type: Dict[str, ThreadedFunctionVariation]
1560 ## Dictionary of local variables (IEM_MC_LOCAL[_CONST]) and call arguments (IEM_MC_ARG*).
1561 self.dVariables = {} # type: Dict[str, iai.McStmtVar]
1562 ## Dictionary with any IEM_CIMPL_F_XXX flags explicitly advertised in the code block
1563 ## and those determined by analyzeCodeOperation().
1564 self.dsCImplFlags = {} # type: Dict[str, bool]
1565 ## The unique sub-name for this threaded function.
1566 self.sSubName = '';
1567 #if oMcBlock.iInFunction > 0 or (oMcBlock.oInstruction and len(oMcBlock.oInstruction.aoMcBlocks) > 1):
1568 # self.sSubName = '_%s' % (oMcBlock.iInFunction);
1569
1570 @staticmethod
1571 def dummyInstance():
1572 """ Gets a dummy instance. """
1573 return ThreadedFunction(iai.McBlock('null', 999999999, 999999999,
1574 iai.DecoderFunction('null', 999999999, 'nil', ('','')), 999999999));
1575
1576 def hasWithFlagsCheckingAndClearingVariation(self):
1577 """
1578 Check if there is one or more with flags checking and clearing
1579 variations for this threaded function.
1580 """
1581 for sVarWithFlags in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing:
1582 if sVarWithFlags in self.dVariations:
1583 return True;
1584 return False;
1585
1586 #
1587 # Analysis and code morphing.
1588 #
1589
1590 def raiseProblem(self, sMessage):
1591 """ Raises a problem. """
1592 raise Exception('%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, ));
1593
1594 def error(self, sMessage, oGenerator):
1595 """ Emits an error via the generator object, causing it to fail. """
1596 oGenerator.rawError('%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, ));
1597
1598 def warning(self, sMessage):
1599 """ Emits a warning. """
1600 print('%s:%s: warning: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, ));
1601
1602 ## Used by analyzeAndAnnotateName for memory MC blocks.
1603 kdAnnotateNameMemStmts = {
1604 'IEM_MC_FETCH_MEM16_U8': '__mem8',
1605 'IEM_MC_FETCH_MEM32_U8': '__mem8',
1606 'IEM_MC_FETCH_MEM_D80': '__mem80',
1607 'IEM_MC_FETCH_MEM_I16': '__mem16',
1608 'IEM_MC_FETCH_MEM_I32': '__mem32',
1609 'IEM_MC_FETCH_MEM_I64': '__mem64',
1610 'IEM_MC_FETCH_MEM_R32': '__mem32',
1611 'IEM_MC_FETCH_MEM_R64': '__mem64',
1612 'IEM_MC_FETCH_MEM_R80': '__mem80',
1613 'IEM_MC_FETCH_MEM_U128': '__mem128',
1614 'IEM_MC_FETCH_MEM_U128_ALIGN_SSE': '__mem128',
1615 'IEM_MC_FETCH_MEM_U128_NO_AC': '__mem128',
1616 'IEM_MC_FETCH_MEM_U16': '__mem16',
1617 'IEM_MC_FETCH_MEM_U16_DISP': '__mem16',
1618 'IEM_MC_FETCH_MEM_U16_SX_U32': '__mem16sx32',
1619 'IEM_MC_FETCH_MEM_U16_SX_U64': '__mem16sx64',
1620 'IEM_MC_FETCH_MEM_U16_ZX_U32': '__mem16zx32',
1621 'IEM_MC_FETCH_MEM_U16_ZX_U64': '__mem16zx64',
1622 'IEM_MC_FETCH_MEM_U256': '__mem256',
1623 'IEM_MC_FETCH_MEM_U256_ALIGN_AVX': '__mem256',
1624 'IEM_MC_FETCH_MEM_U256_NO_AC': '__mem256',
1625 'IEM_MC_FETCH_MEM_U32': '__mem32',
1626 'IEM_MC_FETCH_MEM_U32_DISP': '__mem32',
1627 'IEM_MC_FETCH_MEM_U32_SX_U64': '__mem32sx64',
1628 'IEM_MC_FETCH_MEM_U32_ZX_U64': '__mem32zx64',
1629 'IEM_MC_FETCH_MEM_U64': '__mem64',
1630 'IEM_MC_FETCH_MEM_U64_ALIGN_U128': '__mem64',
1631 'IEM_MC_FETCH_MEM_U64_DISP': '__mem64',
1632 'IEM_MC_FETCH_MEM_U8': '__mem8',
1633 'IEM_MC_FETCH_MEM_U8_DISP': '__mem8',
1634 'IEM_MC_FETCH_MEM_U8_SX_U16': '__mem8sx16',
1635 'IEM_MC_FETCH_MEM_U8_SX_U32': '__mem8sx32',
1636 'IEM_MC_FETCH_MEM_U8_SX_U64': '__mem8sx64',
1637 'IEM_MC_FETCH_MEM_U8_ZX_U16': '__mem8zx16',
1638 'IEM_MC_FETCH_MEM_U8_ZX_U32': '__mem8zx32',
1639 'IEM_MC_FETCH_MEM_U8_ZX_U64': '__mem8zx64',
1640 'IEM_MC_FETCH_MEM_XMM': '__mem128',
1641 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE': '__mem128',
1642 'IEM_MC_FETCH_MEM_XMM_NO_AC': '__mem128',
1643 'IEM_MC_FETCH_MEM_XMM_U32': '__mem32',
1644 'IEM_MC_FETCH_MEM_XMM_U64': '__mem64',
1645 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128': '__mem128',
1646 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE_AND_XREG_XMM': '__mem128',
1647 'IEM_MC_FETCH_MEM_XMM_U32_AND_XREG_XMM': '__mem32',
1648 'IEM_MC_FETCH_MEM_XMM_U64_AND_XREG_XMM': '__mem64',
1649 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_RAX_RDX_U64': '__mem128',
1650 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64': '__mem128',
1651
1652 'IEM_MC_STORE_MEM_I16_CONST_BY_REF': '__mem16',
1653 'IEM_MC_STORE_MEM_I32_CONST_BY_REF': '__mem32',
1654 'IEM_MC_STORE_MEM_I64_CONST_BY_REF': '__mem64',
1655 'IEM_MC_STORE_MEM_I8_CONST_BY_REF': '__mem8',
1656 'IEM_MC_STORE_MEM_INDEF_D80_BY_REF': '__mem80',
1657 'IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF': '__mem32',
1658 'IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF': '__mem64',
1659 'IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF': '__mem80',
1660 'IEM_MC_STORE_MEM_U128': '__mem128',
1661 'IEM_MC_STORE_MEM_U128_ALIGN_SSE': '__mem128',
1662 'IEM_MC_STORE_MEM_U128_NO_AC': '__mem128',
1663 'IEM_MC_STORE_MEM_U16': '__mem16',
1664 'IEM_MC_STORE_MEM_U16_CONST': '__mem16c',
1665 'IEM_MC_STORE_MEM_U256': '__mem256',
1666 'IEM_MC_STORE_MEM_U256_ALIGN_AVX': '__mem256',
1667 'IEM_MC_STORE_MEM_U256_NO_AC': '__mem256',
1668 'IEM_MC_STORE_MEM_U32': '__mem32',
1669 'IEM_MC_STORE_MEM_U32_CONST': '__mem32c',
1670 'IEM_MC_STORE_MEM_U64': '__mem64',
1671 'IEM_MC_STORE_MEM_U64_CONST': '__mem64c',
1672 'IEM_MC_STORE_MEM_U8': '__mem8',
1673 'IEM_MC_STORE_MEM_U8_CONST': '__mem8c',
1674
1675 'IEM_MC_MEM_MAP_D80_WO': '__mem80',
1676 'IEM_MC_MEM_MAP_I16_WO': '__mem16',
1677 'IEM_MC_MEM_MAP_I32_WO': '__mem32',
1678 'IEM_MC_MEM_MAP_I64_WO': '__mem64',
1679 'IEM_MC_MEM_MAP_R32_WO': '__mem32',
1680 'IEM_MC_MEM_MAP_R64_WO': '__mem64',
1681 'IEM_MC_MEM_MAP_R80_WO': '__mem80',
1682 'IEM_MC_MEM_MAP_U128_ATOMIC': '__mem128a',
1683 'IEM_MC_MEM_MAP_U128_RO': '__mem128',
1684 'IEM_MC_MEM_MAP_U128_RW': '__mem128',
1685 'IEM_MC_MEM_MAP_U128_WO': '__mem128',
1686 'IEM_MC_MEM_MAP_U16_ATOMIC': '__mem16a',
1687 'IEM_MC_MEM_MAP_U16_RO': '__mem16',
1688 'IEM_MC_MEM_MAP_U16_RW': '__mem16',
1689 'IEM_MC_MEM_MAP_U16_WO': '__mem16',
1690 'IEM_MC_MEM_MAP_U32_ATOMIC': '__mem32a',
1691 'IEM_MC_MEM_MAP_U32_RO': '__mem32',
1692 'IEM_MC_MEM_MAP_U32_RW': '__mem32',
1693 'IEM_MC_MEM_MAP_U32_WO': '__mem32',
1694 'IEM_MC_MEM_MAP_U64_ATOMIC': '__mem64a',
1695 'IEM_MC_MEM_MAP_U64_RO': '__mem64',
1696 'IEM_MC_MEM_MAP_U64_RW': '__mem64',
1697 'IEM_MC_MEM_MAP_U64_WO': '__mem64',
1698 'IEM_MC_MEM_MAP_U8_ATOMIC': '__mem8a',
1699 'IEM_MC_MEM_MAP_U8_RO': '__mem8',
1700 'IEM_MC_MEM_MAP_U8_RW': '__mem8',
1701 'IEM_MC_MEM_MAP_U8_WO': '__mem8',
1702 };
1703 ## Used by analyzeAndAnnotateName for non-memory MC blocks.
1704 kdAnnotateNameRegStmts = {
1705 'IEM_MC_FETCH_GREG_U8': '__greg8',
1706 'IEM_MC_FETCH_GREG_U8_ZX_U16': '__greg8zx16',
1707 'IEM_MC_FETCH_GREG_U8_ZX_U32': '__greg8zx32',
1708 'IEM_MC_FETCH_GREG_U8_ZX_U64': '__greg8zx64',
1709 'IEM_MC_FETCH_GREG_U8_SX_U16': '__greg8sx16',
1710 'IEM_MC_FETCH_GREG_U8_SX_U32': '__greg8sx32',
1711 'IEM_MC_FETCH_GREG_U8_SX_U64': '__greg8sx64',
1712 'IEM_MC_FETCH_GREG_U16': '__greg16',
1713 'IEM_MC_FETCH_GREG_U16_ZX_U32': '__greg16zx32',
1714 'IEM_MC_FETCH_GREG_U16_ZX_U64': '__greg16zx64',
1715 'IEM_MC_FETCH_GREG_U16_SX_U32': '__greg16sx32',
1716 'IEM_MC_FETCH_GREG_U16_SX_U64': '__greg16sx64',
1717 'IEM_MC_FETCH_GREG_U32': '__greg32',
1718 'IEM_MC_FETCH_GREG_U32_ZX_U64': '__greg32zx64',
1719 'IEM_MC_FETCH_GREG_U32_SX_U64': '__greg32sx64',
1720 'IEM_MC_FETCH_GREG_U64': '__greg64',
1721 'IEM_MC_FETCH_GREG_U64_ZX_U64': '__greg64zx64',
1722 'IEM_MC_FETCH_GREG_PAIR_U32': '__greg32',
1723 'IEM_MC_FETCH_GREG_PAIR_U64': '__greg64',
1724
1725 'IEM_MC_STORE_GREG_U8': '__greg8',
1726 'IEM_MC_STORE_GREG_U16': '__greg16',
1727 'IEM_MC_STORE_GREG_U32': '__greg32',
1728 'IEM_MC_STORE_GREG_U64': '__greg64',
1729 'IEM_MC_STORE_GREG_I64': '__greg64',
1730 'IEM_MC_STORE_GREG_U8_CONST': '__greg8c',
1731 'IEM_MC_STORE_GREG_U16_CONST': '__greg16c',
1732 'IEM_MC_STORE_GREG_U32_CONST': '__greg32c',
1733 'IEM_MC_STORE_GREG_U64_CONST': '__greg64c',
1734 'IEM_MC_STORE_GREG_PAIR_U32': '__greg32',
1735 'IEM_MC_STORE_GREG_PAIR_U64': '__greg64',
1736
1737 'IEM_MC_FETCH_SREG_U16': '__sreg16',
1738 'IEM_MC_FETCH_SREG_ZX_U32': '__sreg32',
1739 'IEM_MC_FETCH_SREG_ZX_U64': '__sreg64',
1740 'IEM_MC_FETCH_SREG_BASE_U64': '__sbase64',
1741 'IEM_MC_FETCH_SREG_BASE_U32': '__sbase32',
1742 'IEM_MC_STORE_SREG_BASE_U64': '__sbase64',
1743 'IEM_MC_STORE_SREG_BASE_U32': '__sbase32',
1744
1745 'IEM_MC_REF_GREG_U8': '__greg8',
1746 'IEM_MC_REF_GREG_U16': '__greg16',
1747 'IEM_MC_REF_GREG_U32': '__greg32',
1748 'IEM_MC_REF_GREG_U64': '__greg64',
1749 'IEM_MC_REF_GREG_U8_CONST': '__greg8',
1750 'IEM_MC_REF_GREG_U16_CONST': '__greg16',
1751 'IEM_MC_REF_GREG_U32_CONST': '__greg32',
1752 'IEM_MC_REF_GREG_U64_CONST': '__greg64',
1753 'IEM_MC_REF_GREG_I32': '__greg32',
1754 'IEM_MC_REF_GREG_I64': '__greg64',
1755 'IEM_MC_REF_GREG_I32_CONST': '__greg32',
1756 'IEM_MC_REF_GREG_I64_CONST': '__greg64',
1757
1758 'IEM_MC_STORE_FPUREG_R80_SRC_REF': '__fpu',
1759 'IEM_MC_REF_FPUREG': '__fpu',
1760
1761 'IEM_MC_FETCH_MREG_U64': '__mreg64',
1762 'IEM_MC_FETCH_MREG_U32': '__mreg32',
1763 'IEM_MC_STORE_MREG_U64': '__mreg64',
1764 'IEM_MC_STORE_MREG_U32_ZX_U64': '__mreg32zx64',
1765 'IEM_MC_REF_MREG_U64': '__mreg64',
1766 'IEM_MC_REF_MREG_U64_CONST': '__mreg64',
1767 'IEM_MC_REF_MREG_U32_CONST': '__mreg32',
1768
1769 'IEM_MC_CLEAR_XREG_U32_MASK': '__xreg32x4',
1770 'IEM_MC_FETCH_XREG_U128': '__xreg128',
1771 'IEM_MC_FETCH_XREG_XMM': '__xreg128',
1772 'IEM_MC_FETCH_XREG_U64': '__xreg64',
1773 'IEM_MC_FETCH_XREG_U32': '__xreg32',
1774 'IEM_MC_FETCH_XREG_U16': '__xreg16',
1775 'IEM_MC_FETCH_XREG_U8': '__xreg8',
1776 'IEM_MC_FETCH_XREG_PAIR_U128': '__xreg128p',
1777 'IEM_MC_FETCH_XREG_PAIR_XMM': '__xreg128p',
1778 'IEM_MC_FETCH_XREG_PAIR_U128_AND_RAX_RDX_U64': '__xreg128p',
1779 'IEM_MC_FETCH_XREG_PAIR_U128_AND_EAX_EDX_U32_SX_U64': '__xreg128p',
1780
1781 'IEM_MC_STORE_XREG_U32_U128': '__xreg32',
1782 'IEM_MC_STORE_XREG_U128': '__xreg128',
1783 'IEM_MC_STORE_XREG_XMM': '__xreg128',
1784 'IEM_MC_STORE_XREG_XMM_U32': '__xreg32',
1785 'IEM_MC_STORE_XREG_XMM_U64': '__xreg64',
1786 'IEM_MC_STORE_XREG_U64': '__xreg64',
1787 'IEM_MC_STORE_XREG_U64_ZX_U128': '__xreg64zx128',
1788 'IEM_MC_STORE_XREG_U32': '__xreg32',
1789 'IEM_MC_STORE_XREG_U16': '__xreg16',
1790 'IEM_MC_STORE_XREG_U8': '__xreg8',
1791 'IEM_MC_STORE_XREG_U32_ZX_U128': '__xreg32zx128',
1792 'IEM_MC_STORE_XREG_HI_U64': '__xreg64hi',
1793 'IEM_MC_STORE_XREG_R32': '__xreg32',
1794 'IEM_MC_STORE_XREG_R64': '__xreg64',
1795 'IEM_MC_BROADCAST_XREG_U8_ZX_VLMAX': '__xreg8zx',
1796 'IEM_MC_BROADCAST_XREG_U16_ZX_VLMAX': '__xreg16zx',
1797 'IEM_MC_BROADCAST_XREG_U32_ZX_VLMAX': '__xreg32zx',
1798 'IEM_MC_BROADCAST_XREG_U64_ZX_VLMAX': '__xreg64zx',
1799 'IEM_MC_BROADCAST_XREG_U128_ZX_VLMAX': '__xreg128zx',
1800 'IEM_MC_REF_XREG_U128': '__xreg128',
1801 'IEM_MC_REF_XREG_U128_CONST': '__xreg128',
1802 'IEM_MC_REF_XREG_U32_CONST': '__xreg32',
1803 'IEM_MC_REF_XREG_U64_CONST': '__xreg64',
1804 'IEM_MC_REF_XREG_R32_CONST': '__xreg32',
1805 'IEM_MC_REF_XREG_R64_CONST': '__xreg64',
1806 'IEM_MC_REF_XREG_XMM_CONST': '__xreg128',
1807 'IEM_MC_COPY_XREG_U128': '__xreg128',
1808
1809 'IEM_MC_FETCH_YREG_U256': '__yreg256',
1810 'IEM_MC_FETCH_YREG_U128': '__yreg128',
1811 'IEM_MC_FETCH_YREG_U64': '__yreg64',
1812 'IEM_MC_FETCH_YREG_2ND_U64': '__yreg64',
1813 'IEM_MC_FETCH_YREG_U32': '__yreg32',
1814 'IEM_MC_STORE_YREG_U128': '__yreg128',
1815 'IEM_MC_STORE_YREG_U32_ZX_VLMAX': '__yreg32zx',
1816 'IEM_MC_STORE_YREG_U64_ZX_VLMAX': '__yreg64zx',
1817 'IEM_MC_STORE_YREG_U128_ZX_VLMAX': '__yreg128zx',
1818 'IEM_MC_STORE_YREG_U256_ZX_VLMAX': '__yreg256zx',
1819 'IEM_MC_BROADCAST_YREG_U8_ZX_VLMAX': '__yreg8',
1820 'IEM_MC_BROADCAST_YREG_U16_ZX_VLMAX': '__yreg16',
1821 'IEM_MC_BROADCAST_YREG_U32_ZX_VLMAX': '__yreg32',
1822 'IEM_MC_BROADCAST_YREG_U64_ZX_VLMAX': '__yreg64',
1823 'IEM_MC_BROADCAST_YREG_U128_ZX_VLMAX': '__yreg128',
1824 'IEM_MC_REF_YREG_U128': '__yreg128',
1825 'IEM_MC_REF_YREG_U128_CONST': '__yreg128',
1826 'IEM_MC_REF_YREG_U64_CONST': '__yreg64',
1827 'IEM_MC_COPY_YREG_U256_ZX_VLMAX': '__yreg256zx',
1828 'IEM_MC_COPY_YREG_U128_ZX_VLMAX': '__yreg128zx',
1829 'IEM_MC_COPY_YREG_U64_ZX_VLMAX': '__yreg64zx',
1830 'IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX': '__yreg3296',
1831 'IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX': '__yreg6464',
1832 'IEM_MC_MERGE_YREG_U64HI_U64HI_ZX_VLMAX': '__yreg64hi64hi',
1833 'IEM_MC_MERGE_YREG_U64LO_U64LO_ZX_VLMAX': '__yreg64lo64lo',
1834 'IEM_MC_MERGE_YREG_U64LO_U64LOCAL_ZX_VLMAX':'__yreg64',
1835 'IEM_MC_MERGE_YREG_U64LOCAL_U64HI_ZX_VLMAX':'__yreg64',
1836 };
1837 def analyzeAndAnnotateName(self, aoStmts: List[iai.McStmt]):
1838 """
1839 Scans the statements and variation lists for clues about the threaded function,
1840 and sets self.sSubName if successfull.
1841 """
1842 dHits = {};
1843 cHits = iai.McStmt.countStmtsByName(aoStmts, self.kdAnnotateNameMemStmts, dHits);
1844 if cHits > 0:
1845 sStmtNm = sorted(dHits.keys())[-1]; # priority: STORE, MEM_MAP, FETCH.
1846 sName = self.kdAnnotateNameMemStmts[sStmtNm];
1847 else:
1848 cHits = iai.McStmt.countStmtsByName(aoStmts, self.kdAnnotateNameRegStmts, dHits);
1849 if not cHits:
1850 return;
1851 sStmtNm = sorted(dHits.keys())[-1]; # priority: STORE, MEM_MAP, FETCH.
1852 sName = self.kdAnnotateNameRegStmts[sStmtNm];
1853
1854 oStmt = iai.McStmt.findStmtByNames(aoStmts, {'IEM_MC_NATIVE_IF': True,});
1855 if oStmt and oStmt.asArchitectures:
1856 sName += '_n'; ## @todo check if enabled for the host architecture
1857 self.sSubName = sName;
1858 return;
1859
1860 def analyzeFindVariablesAndCallArgs(self, aoStmts: List[iai.McStmt]) -> bool:
1861 """ Scans the statements for MC variables and call arguments. """
1862 for oStmt in aoStmts:
1863 if isinstance(oStmt, iai.McStmtVar):
1864 if oStmt.sVarName in self.dVariables:
1865 raise Exception('Variable %s is defined more than once!' % (oStmt.sVarName,));
1866 self.dVariables[oStmt.sVarName] = oStmt.sVarName;
1867
1868 # There shouldn't be any variables or arguments declared inside if/
1869 # else blocks, but scan them too to be on the safe side.
1870 if isinstance(oStmt, iai.McStmtCond):
1871 #cBefore = len(self.dVariables);
1872 self.analyzeFindVariablesAndCallArgs(oStmt.aoIfBranch);
1873 self.analyzeFindVariablesAndCallArgs(oStmt.aoElseBranch);
1874 #if len(self.dVariables) != cBefore:
1875 # raise Exception('Variables/arguments defined in conditional branches!');
1876 return True;
1877
1878 def analyzeCodeOperation(self, aoStmts: List[iai.McStmt], dEflStmts, fSeenConditional = False) -> bool:
1879 """
1880 Analyzes the code looking clues as to additional side-effects.
1881
1882 Currently this is simply looking for branching and adding the relevant
1883 branch flags to dsCImplFlags. ASSUMES the caller pre-populates the
1884 dictionary with a copy of self.oMcBlock.dsCImplFlags.
1885
1886 This also sets McStmtCond.oIfBranchAnnotation & McStmtCond.oElseBranchAnnotation.
1887
1888 Returns annotation on return style.
1889 """
1890 sAnnotation = None;
1891 for oStmt in aoStmts:
1892 # Set IEM_IMPL_C_F_BRANCH if we see any branching MCs.
1893 if oStmt.sName.startswith('IEM_MC_SET_RIP'):
1894 assert not fSeenConditional;
1895 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_INDIRECT'] = True;
1896 elif oStmt.sName.startswith('IEM_MC_REL_JMP'):
1897 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_RELATIVE'] = True;
1898 if fSeenConditional:
1899 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_CONDITIONAL'] = True;
1900
1901 # Check for CIMPL and AIMPL calls.
1902 if oStmt.sName.startswith('IEM_MC_CALL_'):
1903 if oStmt.sName.startswith('IEM_MC_CALL_CIMPL_'):
1904 self.dsCImplFlags['IEM_CIMPL_F_CALLS_CIMPL'] = True;
1905 elif ( oStmt.sName.startswith('IEM_MC_CALL_VOID_AIMPL_')
1906 or oStmt.sName.startswith('IEM_MC_CALL_AIMPL_')
1907 or oStmt.sName.startswith('IEM_MC_CALL_AVX_AIMPL_')):
1908 self.dsCImplFlags['IEM_CIMPL_F_CALLS_AIMPL'] = True;
1909 elif ( oStmt.sName.startswith('IEM_MC_CALL_SSE_AIMPL_')
1910 or oStmt.sName.startswith('IEM_MC_CALL_MMX_AIMPL_')
1911 or oStmt.sName.startswith('IEM_MC_CALL_FPU_AIMPL_')):
1912 self.dsCImplFlags['IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE'] = True;
1913 else:
1914 raise Exception('Unknown IEM_MC_CALL_* statement: %s' % (oStmt.sName,));
1915
1916 # Check for return statements.
1917 if oStmt.sName in ('IEM_MC_ADVANCE_RIP_AND_FINISH',):
1918 assert sAnnotation is None;
1919 sAnnotation = g_ksFinishAnnotation_Advance;
1920 elif oStmt.sName in ('IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_REL_JMP_S16_AND_FINISH',
1921 'IEM_MC_REL_JMP_S32_AND_FINISH',):
1922 assert sAnnotation is None;
1923 sAnnotation = g_ksFinishAnnotation_RelJmp;
1924 elif oStmt.sName in ('IEM_MC_SET_RIP_U16_AND_FINISH', 'IEM_MC_SET_RIP_U32_AND_FINISH',
1925 'IEM_MC_SET_RIP_U64_AND_FINISH',):
1926 assert sAnnotation is None;
1927 sAnnotation = g_ksFinishAnnotation_SetJmp;
1928 elif oStmt.sName.startswith('IEM_MC_DEFER_TO_CIMPL_'):
1929 assert sAnnotation is None;
1930 sAnnotation = g_ksFinishAnnotation_DeferToCImpl;
1931
1932 # Collect MCs working on EFLAGS. Caller will check this.
1933 if oStmt.sName in ('IEM_MC_FETCH_EFLAGS', 'IEM_MC_FETCH_EFLAGS_U8', 'IEM_MC_COMMIT_EFLAGS', 'IEM_MC_REF_EFLAGS',
1934 'IEM_MC_ARG_LOCAL_EFLAGS', ):
1935 dEflStmts[oStmt.sName] = oStmt;
1936 elif isinstance(oStmt, iai.McStmtCall):
1937 if oStmt.sName in ('IEM_MC_CALL_CIMPL_0', 'IEM_MC_CALL_CIMPL_1', 'IEM_MC_CALL_CIMPL_2',
1938 'IEM_MC_CALL_CIMPL_3', 'IEM_MC_CALL_CIMPL_4', 'IEM_MC_CALL_CIMPL_5',):
1939 if ( oStmt.asParams[0].find('IEM_CIMPL_F_RFLAGS') >= 0
1940 or oStmt.asParams[0].find('IEM_CIMPL_F_STATUS_FLAGS') >= 0):
1941 dEflStmts[oStmt.sName] = oStmt;
1942
1943 # Process branches of conditionals recursively.
1944 if isinstance(oStmt, iai.McStmtCond):
1945 oStmt.oIfBranchAnnotation = self.analyzeCodeOperation(oStmt.aoIfBranch, dEflStmts, True);
1946 if oStmt.aoElseBranch:
1947 oStmt.oElseBranchAnnotation = self.analyzeCodeOperation(oStmt.aoElseBranch, dEflStmts, True);
1948
1949 return sAnnotation;
1950
1951 def analyze(self, oGenerator):
1952 """
1953 Analyzes the code, identifying the number of parameters it requires and such.
1954
1955 Returns dummy True - raises exception on trouble.
1956 """
1957
1958 #
1959 # Decode the block into a list/tree of McStmt objects.
1960 #
1961 aoStmts = self.oMcBlock.decode();
1962
1963 #
1964 # Check the block for errors before we proceed (will decode it).
1965 #
1966 asErrors = self.oMcBlock.check();
1967 if asErrors:
1968 raise Exception('\n'.join(['%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sError, )
1969 for sError in asErrors]));
1970
1971 #
1972 # Scan the statements for local variables and call arguments (self.dVariables).
1973 #
1974 self.analyzeFindVariablesAndCallArgs(aoStmts);
1975
1976 #
1977 # Scan the code for IEM_CIMPL_F_ and other clues.
1978 #
1979 self.dsCImplFlags = self.oMcBlock.dsCImplFlags.copy();
1980 dEflStmts = {};
1981 self.analyzeCodeOperation(aoStmts, dEflStmts);
1982 if ( ('IEM_CIMPL_F_CALLS_CIMPL' in self.dsCImplFlags)
1983 + ('IEM_CIMPL_F_CALLS_AIMPL' in self.dsCImplFlags)
1984 + ('IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE' in self.dsCImplFlags) > 1):
1985 self.error('Mixing CIMPL/AIMPL/AIMPL_WITH_FXSTATE calls', oGenerator);
1986
1987 #
1988 # Analyse EFLAGS related MCs and @opflmodify and friends.
1989 #
1990 if dEflStmts:
1991 oInstruction = self.oMcBlock.oInstruction; # iai.Instruction
1992 if ( oInstruction is None
1993 or (oInstruction.asFlTest is None and oInstruction.asFlModify is None)):
1994 sMcNames = '+'.join(dEflStmts.keys());
1995 if len(dEflStmts) != 1 or not sMcNames.startswith('IEM_MC_CALL_CIMPL_'): # Hack for far calls
1996 self.error('Uses %s but has no @opflmodify, @opfltest or @opflclass with details!' % (sMcNames,), oGenerator);
1997 elif 'IEM_MC_COMMIT_EFLAGS' in dEflStmts:
1998 if not oInstruction.asFlModify:
1999 if oInstruction.sMnemonic not in [ 'not', ]:
2000 self.error('Uses IEM_MC_COMMIT_EFLAGS but has no flags in @opflmodify!', oGenerator);
2001 elif ( 'IEM_MC_CALL_CIMPL_0' in dEflStmts
2002 or 'IEM_MC_CALL_CIMPL_1' in dEflStmts
2003 or 'IEM_MC_CALL_CIMPL_2' in dEflStmts
2004 or 'IEM_MC_CALL_CIMPL_3' in dEflStmts
2005 or 'IEM_MC_CALL_CIMPL_4' in dEflStmts
2006 or 'IEM_MC_CALL_CIMPL_5' in dEflStmts ):
2007 if not oInstruction.asFlModify:
2008 self.error('Uses IEM_MC_CALL_CIMPL_x or IEM_MC_DEFER_TO_CIMPL_5_RET with IEM_CIMPL_F_STATUS_FLAGS '
2009 'or IEM_CIMPL_F_RFLAGS but has no flags in @opflmodify!', oGenerator);
2010 elif 'IEM_MC_REF_EFLAGS' not in dEflStmts:
2011 if not oInstruction.asFlTest:
2012 if oInstruction.sMnemonic not in [ 'not', ]:
2013 self.error('Expected @opfltest!', oGenerator);
2014 if oInstruction and oInstruction.asFlSet:
2015 for sFlag in oInstruction.asFlSet:
2016 if sFlag not in oInstruction.asFlModify:
2017 self.error('"%s" in @opflset but missing from @opflmodify (%s)!'
2018 % (sFlag, ', '.join(oInstruction.asFlModify)), oGenerator);
2019 if oInstruction and oInstruction.asFlClear:
2020 for sFlag in oInstruction.asFlClear:
2021 if sFlag not in oInstruction.asFlModify:
2022 self.error('"%s" in @opflclear but missing from @opflmodify (%s)!'
2023 % (sFlag, ', '.join(oInstruction.asFlModify)), oGenerator);
2024
2025 #
2026 # Create variations as needed.
2027 #
2028 if iai.McStmt.findStmtByNames(aoStmts,
2029 { 'IEM_MC_DEFER_TO_CIMPL_0_RET': True,
2030 'IEM_MC_DEFER_TO_CIMPL_1_RET': True,
2031 'IEM_MC_DEFER_TO_CIMPL_2_RET': True,
2032 'IEM_MC_DEFER_TO_CIMPL_3_RET': True, }):
2033 asVariations = (ThreadedFunctionVariation.ksVariation_Default,);
2034
2035 elif iai.McStmt.findStmtByNames(aoStmts, { 'IEM_MC_CALC_RM_EFF_ADDR' : True,
2036 'IEM_MC_FETCH_MEM_U8' : True, # mov_AL_Ob ++
2037 'IEM_MC_FETCH_MEM_U16' : True, # mov_rAX_Ov ++
2038 'IEM_MC_FETCH_MEM_U32' : True,
2039 'IEM_MC_FETCH_MEM_U64' : True,
2040 'IEM_MC_STORE_MEM_U8' : True, # mov_Ob_AL ++
2041 'IEM_MC_STORE_MEM_U16' : True, # mov_Ov_rAX ++
2042 'IEM_MC_STORE_MEM_U32' : True,
2043 'IEM_MC_STORE_MEM_U64' : True, }):
2044 if 'IEM_MC_F_64BIT' in self.oMcBlock.dsMcFlags:
2045 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressOnly64;
2046 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags and 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
2047 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot286Not64;
2048 elif 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
2049 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot286;
2050 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags:
2051 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot64;
2052 elif 'IEM_MC_F_ONLY_8086' in self.oMcBlock.dsMcFlags:
2053 asVariations = ThreadedFunctionVariation.kasVariationsOnlyPre386;
2054 else:
2055 asVariations = ThreadedFunctionVariation.kasVariationsWithAddress;
2056 else:
2057 if 'IEM_MC_F_64BIT' in self.oMcBlock.dsMcFlags:
2058 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressOnly64;
2059 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags and 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
2060 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot286Not64;
2061 elif 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
2062 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot286;
2063 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags:
2064 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot64;
2065 elif 'IEM_MC_F_ONLY_8086' in self.oMcBlock.dsMcFlags:
2066 asVariations = ThreadedFunctionVariation.kasVariationsOnlyPre386;
2067 else:
2068 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddress;
2069
2070 if ( 'IEM_CIMPL_F_BRANCH_CONDITIONAL' in self.dsCImplFlags
2071 and 'IEM_CIMPL_F_BRANCH_RELATIVE' in self.dsCImplFlags): # (latter to avoid iemOp_into)
2072 assert set(asVariations).issubset(ThreadedFunctionVariation.kasVariationsWithoutAddress), \
2073 '%s: vars=%s McFlags=%s' % (self.oMcBlock.oFunction.sName, asVariations, self.oMcBlock.dsMcFlags);
2074 asVariationsBase = asVariations;
2075 asVariations = [];
2076 for sVariation in asVariationsBase:
2077 asVariations.extend([sVariation + '_Jmp', sVariation + '_NoJmp']);
2078 assert set(asVariations).issubset(ThreadedFunctionVariation.kdVariationsWithConditional);
2079
2080 if not iai.McStmt.findStmtByNames(aoStmts,
2081 { 'IEM_MC_ADVANCE_RIP_AND_FINISH': True,
2082 'IEM_MC_REL_JMP_S8_AND_FINISH': True,
2083 'IEM_MC_REL_JMP_S16_AND_FINISH': True,
2084 'IEM_MC_REL_JMP_S32_AND_FINISH': True,
2085 'IEM_MC_SET_RIP_U16_AND_FINISH': True,
2086 'IEM_MC_SET_RIP_U32_AND_FINISH': True,
2087 'IEM_MC_SET_RIP_U64_AND_FINISH': True,
2088 }):
2089 asVariations = [sVariation for sVariation in asVariations
2090 if sVariation not in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing];
2091
2092 self.aoVariations = [ThreadedFunctionVariation(self, sVar) for sVar in asVariations];
2093
2094 # Dictionary variant of the list.
2095 self.dVariations = { oVar.sVariation: oVar for oVar in self.aoVariations };
2096
2097 #
2098 # Try annotate the threaded function name.
2099 #
2100 self.analyzeAndAnnotateName(aoStmts);
2101
2102 #
2103 # Continue the analysis on each variation.
2104 #
2105 for oVariation in self.aoVariations:
2106 oVariation.analyzeVariation(aoStmts);
2107
2108 return True;
2109
2110 ## Used by emitThreadedCallStmts.
2111 kdVariationsWithNeedForPrefixCheck = {
2112 ThreadedFunctionVariation.ksVariation_64_Addr32: True,
2113 ThreadedFunctionVariation.ksVariation_64f_Addr32: True,
2114 ThreadedFunctionVariation.ksVariation_64_FsGs: True,
2115 ThreadedFunctionVariation.ksVariation_64f_FsGs: True,
2116 ThreadedFunctionVariation.ksVariation_32_Addr16: True,
2117 ThreadedFunctionVariation.ksVariation_32f_Addr16: True,
2118 ThreadedFunctionVariation.ksVariation_32_Flat: True,
2119 ThreadedFunctionVariation.ksVariation_32f_Flat: True,
2120 ThreadedFunctionVariation.ksVariation_16_Addr32: True,
2121 ThreadedFunctionVariation.ksVariation_16f_Addr32: True,
2122 };
2123
2124 def emitThreadedCallStmts(self, sBranch = None): # pylint: disable=too-many-statements
2125 """
2126 Worker for morphInputCode that returns a list of statements that emits
2127 the call to the threaded functions for the block.
2128
2129 The sBranch parameter is used with conditional branches where we'll emit
2130 different threaded calls depending on whether we're in the jump-taken or
2131 no-jump code path.
2132 """
2133 # Special case for only default variation:
2134 if len(self.aoVariations) == 1 and self.aoVariations[0].sVariation == ThreadedFunctionVariation.ksVariation_Default:
2135 assert not sBranch;
2136 return self.aoVariations[0].emitThreadedCallStmts(0);
2137
2138 #
2139 # Case statement sub-class.
2140 #
2141 dByVari = self.dVariations;
2142 #fDbg = self.oMcBlock.sFunction == 'iemOpCommonPushSReg';
2143 class Case:
2144 def __init__(self, sCond, sVarNm = None):
2145 self.sCond = sCond;
2146 self.sVarNm = sVarNm;
2147 self.oVar = dByVari[sVarNm] if sVarNm else None;
2148 self.aoBody = self.oVar.emitThreadedCallStmts(8) if sVarNm else None;
2149
2150 def toCode(self):
2151 aoStmts = [ iai.McCppGeneric('case %s:' % (self.sCond), cchIndent = 4), ];
2152 if self.aoBody:
2153 aoStmts.extend(self.aoBody);
2154 aoStmts.append(iai.McCppGeneric('break;', cchIndent = 8));
2155 return aoStmts;
2156
2157 def toFunctionAssignment(self):
2158 aoStmts = [ iai.McCppGeneric('case %s:' % (self.sCond), cchIndent = 4), ];
2159 if self.aoBody:
2160 aoStmts.extend([
2161 iai.McCppGeneric('enmFunction = %s;' % (self.oVar.getIndexName(),), cchIndent = 8),
2162 iai.McCppGeneric('break;', cchIndent = 8),
2163 ]);
2164 return aoStmts;
2165
2166 def isSame(self, oThat):
2167 if not self.aoBody: # fall thru always matches.
2168 return True;
2169 if len(self.aoBody) != len(oThat.aoBody):
2170 #if fDbg: print('dbg: body len diff: %s vs %s' % (len(self.aoBody), len(oThat.aoBody),));
2171 return False;
2172 for iStmt, oStmt in enumerate(self.aoBody):
2173 oThatStmt = oThat.aoBody[iStmt] # type: iai.McStmt
2174 assert isinstance(oStmt, iai.McCppGeneric);
2175 assert not isinstance(oStmt, iai.McStmtCond);
2176 if isinstance(oStmt, iai.McStmtCond):
2177 return False;
2178 if oStmt.sName != oThatStmt.sName:
2179 #if fDbg: print('dbg: stmt #%s name: %s vs %s' % (iStmt, oStmt.sName, oThatStmt.sName,));
2180 return False;
2181 if len(oStmt.asParams) != len(oThatStmt.asParams):
2182 #if fDbg: print('dbg: stmt #%s param count: %s vs %s'
2183 # % (iStmt, len(oStmt.asParams), len(oThatStmt.asParams),));
2184 return False;
2185 for iParam, sParam in enumerate(oStmt.asParams):
2186 if ( sParam != oThatStmt.asParams[iParam]
2187 and ( iParam != 1
2188 or not isinstance(oStmt, iai.McCppCall)
2189 or not oStmt.asParams[0].startswith('IEM_MC2_EMIT_CALL_')
2190 or sParam != self.oVar.getIndexName()
2191 or oThatStmt.asParams[iParam] != oThat.oVar.getIndexName() )):
2192 #if fDbg: print('dbg: stmt #%s, param #%s: %s vs %s'
2193 # % (iStmt, iParam, sParam, oThatStmt.asParams[iParam],));
2194 return False;
2195 return True;
2196
2197 #
2198 # Determine what we're switch on.
2199 # This ASSUMES that (IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | IEM_F_MODE_CPUMODE_MASK) == 7!
2200 #
2201 fSimple = True;
2202 sSwitchValue = '(pVCpu->iem.s.fExec & (IEM_F_MODE_CPUMODE_MASK | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK))';
2203 if dByVari.keys() & self.kdVariationsWithNeedForPrefixCheck.keys():
2204 sSwitchValue += ' | (pVCpu->iem.s.enmEffAddrMode == (pVCpu->iem.s.fExec & IEM_F_MODE_CPUMODE_MASK) ? 0 : 8)';
2205 # Accesses via FS and GS and CS goes thru non-FLAT functions. (CS
2206 # is not writable in 32-bit mode (at least), thus the penalty mode
2207 # for any accesses via it (simpler this way).)
2208 sSwitchValue += ' | (pVCpu->iem.s.iEffSeg < X86_SREG_FS && pVCpu->iem.s.iEffSeg != X86_SREG_CS ? 0 : 16)';
2209 fSimple = False; # threaded functions.
2210 if dByVari.keys() & ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing:
2211 sSwitchValue += ' | ((pVCpu->iem.s.fTbPrevInstr & (IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_INHIBIT_SHADOW)) || ' \
2212 + '(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_MASK) ? 32 : 0)';
2213
2214 #
2215 # Generate the case statements.
2216 #
2217 # pylintx: disable=x
2218 aoCases = [];
2219 if ThreadedFunctionVariation.ksVariation_64_Addr32 in dByVari:
2220 assert not fSimple and not sBranch;
2221 aoCases.extend([
2222 Case('IEMMODE_64BIT', ThrdFnVar.ksVariation_64),
2223 Case('IEMMODE_64BIT | 16', ThrdFnVar.ksVariation_64_FsGs),
2224 Case('IEMMODE_64BIT | 8 | 16', None), # fall thru
2225 Case('IEMMODE_64BIT | 8', ThrdFnVar.ksVariation_64_Addr32),
2226 ]);
2227 if ThreadedFunctionVariation.ksVariation_64f_Addr32 in dByVari:
2228 aoCases.extend([
2229 Case('IEMMODE_64BIT | 32', ThrdFnVar.ksVariation_64f),
2230 Case('IEMMODE_64BIT | 32 | 16', ThrdFnVar.ksVariation_64f_FsGs),
2231 Case('IEMMODE_64BIT | 32 | 8 | 16', None), # fall thru
2232 Case('IEMMODE_64BIT | 32 | 8', ThrdFnVar.ksVariation_64f_Addr32),
2233 ]);
2234 elif ThrdFnVar.ksVariation_64 in dByVari:
2235 assert fSimple and not sBranch;
2236 aoCases.append(Case('IEMMODE_64BIT', ThrdFnVar.ksVariation_64));
2237 if ThreadedFunctionVariation.ksVariation_64f in dByVari:
2238 aoCases.append(Case('IEMMODE_64BIT | 32', ThrdFnVar.ksVariation_64f));
2239 elif ThrdFnVar.ksVariation_64_Jmp in dByVari:
2240 assert fSimple and sBranch;
2241 aoCases.append(Case('IEMMODE_64BIT',
2242 ThrdFnVar.ksVariation_64_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_64_NoJmp));
2243 if ThreadedFunctionVariation.ksVariation_64f_Jmp in dByVari:
2244 aoCases.append(Case('IEMMODE_64BIT | 32',
2245 ThrdFnVar.ksVariation_64f_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_64f_NoJmp));
2246
2247 if ThrdFnVar.ksVariation_32_Addr16 in dByVari:
2248 assert not fSimple and not sBranch;
2249 aoCases.extend([
2250 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', ThrdFnVar.ksVariation_32_Flat),
2251 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 16', None), # fall thru
2252 Case('IEMMODE_32BIT | 16', None), # fall thru
2253 Case('IEMMODE_32BIT', ThrdFnVar.ksVariation_32),
2254 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 8', None), # fall thru
2255 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 8 | 16',None), # fall thru
2256 Case('IEMMODE_32BIT | 8 | 16',None), # fall thru
2257 Case('IEMMODE_32BIT | 8', ThrdFnVar.ksVariation_32_Addr16),
2258 ]);
2259 if ThrdFnVar.ksVariation_32f_Addr16 in dByVari:
2260 aoCases.extend([
2261 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', ThrdFnVar.ksVariation_32f_Flat),
2262 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 16', None), # fall thru
2263 Case('IEMMODE_32BIT | 32 | 16', None), # fall thru
2264 Case('IEMMODE_32BIT | 32', ThrdFnVar.ksVariation_32f),
2265 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 8', None), # fall thru
2266 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 8 | 16',None), # fall thru
2267 Case('IEMMODE_32BIT | 32 | 8 | 16',None), # fall thru
2268 Case('IEMMODE_32BIT | 32 | 8', ThrdFnVar.ksVariation_32f_Addr16),
2269 ]);
2270 elif ThrdFnVar.ksVariation_32 in dByVari:
2271 assert fSimple and not sBranch;
2272 aoCases.extend([
2273 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', None), # fall thru
2274 Case('IEMMODE_32BIT', ThrdFnVar.ksVariation_32),
2275 ]);
2276 if ThrdFnVar.ksVariation_32f in dByVari:
2277 aoCases.extend([
2278 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', None), # fall thru
2279 Case('IEMMODE_32BIT | 32', ThrdFnVar.ksVariation_32f),
2280 ]);
2281 elif ThrdFnVar.ksVariation_32_Jmp in dByVari:
2282 assert fSimple and sBranch;
2283 aoCases.extend([
2284 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', None), # fall thru
2285 Case('IEMMODE_32BIT',
2286 ThrdFnVar.ksVariation_32_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_32_NoJmp),
2287 ]);
2288 if ThrdFnVar.ksVariation_32f_Jmp in dByVari:
2289 aoCases.extend([
2290 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', None), # fall thru
2291 Case('IEMMODE_32BIT | 32',
2292 ThrdFnVar.ksVariation_32f_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_32f_NoJmp),
2293 ]);
2294
2295 if ThrdFnVar.ksVariation_16_Addr32 in dByVari:
2296 assert not fSimple and not sBranch;
2297 aoCases.extend([
2298 Case('IEMMODE_16BIT | 16', None), # fall thru
2299 Case('IEMMODE_16BIT', ThrdFnVar.ksVariation_16),
2300 Case('IEMMODE_16BIT | 8 | 16', None), # fall thru
2301 Case('IEMMODE_16BIT | 8', ThrdFnVar.ksVariation_16_Addr32),
2302 ]);
2303 if ThrdFnVar.ksVariation_16f_Addr32 in dByVari:
2304 aoCases.extend([
2305 Case('IEMMODE_16BIT | 32 | 16', None), # fall thru
2306 Case('IEMMODE_16BIT | 32', ThrdFnVar.ksVariation_16f),
2307 Case('IEMMODE_16BIT | 32 | 8 | 16', None), # fall thru
2308 Case('IEMMODE_16BIT | 32 | 8', ThrdFnVar.ksVariation_16f_Addr32),
2309 ]);
2310 elif ThrdFnVar.ksVariation_16 in dByVari:
2311 assert fSimple and not sBranch;
2312 aoCases.append(Case('IEMMODE_16BIT', ThrdFnVar.ksVariation_16));
2313 if ThrdFnVar.ksVariation_16f in dByVari:
2314 aoCases.append(Case('IEMMODE_16BIT | 32', ThrdFnVar.ksVariation_16f));
2315 elif ThrdFnVar.ksVariation_16_Jmp in dByVari:
2316 assert fSimple and sBranch;
2317 aoCases.append(Case('IEMMODE_16BIT',
2318 ThrdFnVar.ksVariation_16_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_16_NoJmp));
2319 if ThrdFnVar.ksVariation_16f_Jmp in dByVari:
2320 aoCases.append(Case('IEMMODE_16BIT | 32',
2321 ThrdFnVar.ksVariation_16f_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_16f_NoJmp));
2322
2323
2324 if ThrdFnVar.ksVariation_16_Pre386 in dByVari:
2325 if not fSimple:
2326 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 16', None)); # fall thru
2327 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', ThrdFnVar.ksVariation_16_Pre386));
2328 if ThrdFnVar.ksVariation_16f_Pre386 in dByVari: # should be nested under previous if, but line too long.
2329 if not fSimple:
2330 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 16', None)); # fall thru
2331 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', ThrdFnVar.ksVariation_16f_Pre386));
2332
2333 if ThrdFnVar.ksVariation_16_Pre386_Jmp in dByVari:
2334 assert fSimple and sBranch;
2335 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK',
2336 ThrdFnVar.ksVariation_16_Pre386_Jmp if sBranch == 'Jmp'
2337 else ThrdFnVar.ksVariation_16_Pre386_NoJmp));
2338 if ThrdFnVar.ksVariation_16f_Pre386_Jmp in dByVari:
2339 assert fSimple and sBranch;
2340 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32',
2341 ThrdFnVar.ksVariation_16f_Pre386_Jmp if sBranch == 'Jmp'
2342 else ThrdFnVar.ksVariation_16f_Pre386_NoJmp));
2343
2344 #
2345 # If the case bodies are all the same, except for the function called,
2346 # we can reduce the code size and hopefully compile time.
2347 #
2348 iFirstCaseWithBody = 0;
2349 while not aoCases[iFirstCaseWithBody].aoBody:
2350 iFirstCaseWithBody += 1
2351 fAllSameCases = True
2352 for iCase in range(iFirstCaseWithBody + 1, len(aoCases)):
2353 fAllSameCases = fAllSameCases and aoCases[iCase].isSame(aoCases[iFirstCaseWithBody]);
2354 #if fDbg: print('fAllSameCases=%s %s' % (fAllSameCases, self.oMcBlock.sFunction,));
2355 if fAllSameCases:
2356 aoStmts = [
2357 iai.McCppGeneric('IEMTHREADEDFUNCS enmFunction;'),
2358 iai.McCppGeneric('switch (%s)' % (sSwitchValue,)),
2359 iai.McCppGeneric('{'),
2360 ];
2361 for oCase in aoCases:
2362 aoStmts.extend(oCase.toFunctionAssignment());
2363 aoStmts.extend([
2364 iai.McCppGeneric('IEM_NOT_REACHED_DEFAULT_CASE_RET();', cchIndent = 4),
2365 iai.McCppGeneric('}'),
2366 ]);
2367 aoStmts.extend(dByVari[aoCases[iFirstCaseWithBody].sVarNm].emitThreadedCallStmts(0, 'enmFunction'));
2368
2369 else:
2370 #
2371 # Generate the generic switch statement.
2372 #
2373 aoStmts = [
2374 iai.McCppGeneric('switch (%s)' % (sSwitchValue,)),
2375 iai.McCppGeneric('{'),
2376 ];
2377 for oCase in aoCases:
2378 aoStmts.extend(oCase.toCode());
2379 aoStmts.extend([
2380 iai.McCppGeneric('IEM_NOT_REACHED_DEFAULT_CASE_RET();', cchIndent = 4),
2381 iai.McCppGeneric('}'),
2382 ]);
2383
2384 return aoStmts;
2385
2386 def morphInputCode(self, aoStmts, fIsConditional = False, fCallEmitted = False, cDepth = 0, sBranchAnnotation = None):
2387 """
2388 Adjusts (& copies) the statements for the input/decoder so it will emit
2389 calls to the right threaded functions for each block.
2390
2391 Returns list/tree of statements (aoStmts is not modified) and updated
2392 fCallEmitted status.
2393 """
2394 #print('McBlock at %s:%s' % (os.path.split(self.oMcBlock.sSrcFile)[1], self.oMcBlock.iBeginLine,));
2395 aoDecoderStmts = [];
2396
2397 for iStmt, oStmt in enumerate(aoStmts):
2398 # Copy the statement. Make a deep copy to make sure we've got our own
2399 # copies of all instance variables, even if a bit overkill at the moment.
2400 oNewStmt = copy.deepcopy(oStmt);
2401 aoDecoderStmts.append(oNewStmt);
2402 #print('oNewStmt %s %s' % (oNewStmt.sName, len(oNewStmt.asParams),));
2403 if oNewStmt.sName == 'IEM_MC_BEGIN' and self.dsCImplFlags:
2404 oNewStmt.asParams[3] = ' | '.join(sorted(self.dsCImplFlags.keys()));
2405
2406 # If we haven't emitted the threaded function call yet, look for
2407 # statements which it would naturally follow or preceed.
2408 if not fCallEmitted:
2409 if not oStmt.isCppStmt():
2410 if ( oStmt.sName.startswith('IEM_MC_MAYBE_RAISE_') \
2411 or (oStmt.sName.endswith('_AND_FINISH') and oStmt.sName.startswith('IEM_MC_'))
2412 or oStmt.sName.startswith('IEM_MC_CALL_CIMPL_')
2413 or oStmt.sName.startswith('IEM_MC_DEFER_TO_CIMPL_')
2414 or oStmt.sName in ('IEM_MC_RAISE_DIVIDE_ERROR',)):
2415 aoDecoderStmts.pop();
2416 if not fIsConditional:
2417 aoDecoderStmts.extend(self.emitThreadedCallStmts());
2418 elif oStmt.sName == 'IEM_MC_ADVANCE_RIP_AND_FINISH':
2419 aoDecoderStmts.extend(self.emitThreadedCallStmts('NoJmp'));
2420 else:
2421 assert oStmt.sName in { 'IEM_MC_REL_JMP_S8_AND_FINISH': True,
2422 'IEM_MC_REL_JMP_S16_AND_FINISH': True,
2423 'IEM_MC_REL_JMP_S32_AND_FINISH': True, };
2424 aoDecoderStmts.extend(self.emitThreadedCallStmts('Jmp'));
2425 aoDecoderStmts.append(oNewStmt);
2426 fCallEmitted = True;
2427
2428 elif iai.g_dMcStmtParsers[oStmt.sName][2]:
2429 # This is for Jmp/NoJmp with loopne and friends which modifies state other than RIP.
2430 if not sBranchAnnotation:
2431 self.raiseProblem('Modifying state before emitting calls! %s' % (oStmt.sName,));
2432 assert fIsConditional;
2433 aoDecoderStmts.pop();
2434 if sBranchAnnotation == g_ksFinishAnnotation_Advance:
2435 assert iai.McStmt.findStmtByNames(aoStmts[iStmt:], {'IEM_MC_ADVANCE_RIP_AND_FINISH':1,})
2436 aoDecoderStmts.extend(self.emitThreadedCallStmts('NoJmp'));
2437 elif sBranchAnnotation == g_ksFinishAnnotation_RelJmp:
2438 assert iai.McStmt.findStmtByNames(aoStmts[iStmt:],
2439 { 'IEM_MC_REL_JMP_S8_AND_FINISH': 1,
2440 'IEM_MC_REL_JMP_S16_AND_FINISH': 1,
2441 'IEM_MC_REL_JMP_S32_AND_FINISH': 1, });
2442 aoDecoderStmts.extend(self.emitThreadedCallStmts('Jmp'));
2443 else:
2444 self.raiseProblem('Modifying state before emitting calls! %s' % (oStmt.sName,));
2445 aoDecoderStmts.append(oNewStmt);
2446 fCallEmitted = True;
2447
2448 elif ( not fIsConditional
2449 and oStmt.fDecode
2450 and ( oStmt.asParams[0].find('IEMOP_HLP_DONE_') >= 0
2451 or oStmt.asParams[0].find('IEMOP_HLP_DECODED_') >= 0)):
2452 aoDecoderStmts.extend(self.emitThreadedCallStmts());
2453 fCallEmitted = True;
2454
2455 # Process branches of conditionals recursively.
2456 if isinstance(oStmt, iai.McStmtCond):
2457 (oNewStmt.aoIfBranch, fCallEmitted1) = self.morphInputCode(oStmt.aoIfBranch, fIsConditional,
2458 fCallEmitted, cDepth + 1, oStmt.oIfBranchAnnotation);
2459 if oStmt.aoElseBranch:
2460 (oNewStmt.aoElseBranch, fCallEmitted2) = self.morphInputCode(oStmt.aoElseBranch, fIsConditional,
2461 fCallEmitted, cDepth + 1,
2462 oStmt.oElseBranchAnnotation);
2463 else:
2464 fCallEmitted2 = False;
2465 fCallEmitted = fCallEmitted or (fCallEmitted1 and fCallEmitted2);
2466
2467 if not fCallEmitted and cDepth == 0:
2468 self.raiseProblem('Unable to insert call to threaded function.');
2469
2470 return (aoDecoderStmts, fCallEmitted);
2471
2472
2473 def generateInputCode(self):
2474 """
2475 Modifies the input code.
2476 """
2477 cchIndent = (self.oMcBlock.cchIndent + 3) // 4 * 4;
2478
2479 if len(self.oMcBlock.aoStmts) == 1:
2480 # IEM_MC_DEFER_TO_CIMPL_X_RET - need to wrap in {} to make it safe to insert into random code.
2481 sCode = ' ' * cchIndent + 'pVCpu->iem.s.fTbCurInstr = ';
2482 if self.dsCImplFlags:
2483 sCode += ' | '.join(sorted(self.dsCImplFlags.keys())) + ';\n';
2484 else:
2485 sCode += '0;\n';
2486 sCode += iai.McStmt.renderCodeForList(self.morphInputCode(self.oMcBlock.aoStmts)[0],
2487 cchIndent = cchIndent).replace('\n', ' /* gen */\n', 1);
2488 sIndent = ' ' * (min(cchIndent, 2) - 2);
2489 sCode = sIndent + '{\n' + sCode + sIndent + '}\n';
2490 return sCode;
2491
2492 # IEM_MC_BEGIN/END block
2493 assert len(self.oMcBlock.asLines) > 2, "asLines=%s" % (self.oMcBlock.asLines,);
2494 fIsConditional = ( 'IEM_CIMPL_F_BRANCH_CONDITIONAL' in self.dsCImplFlags
2495 and 'IEM_CIMPL_F_BRANCH_RELATIVE' in self.dsCImplFlags); # (latter to avoid iemOp_into)
2496 return iai.McStmt.renderCodeForList(self.morphInputCode(self.oMcBlock.aoStmts, fIsConditional)[0],
2497 cchIndent = cchIndent).replace('\n', ' /* gen */\n', 1);
2498
2499# Short alias for ThreadedFunctionVariation.
2500ThrdFnVar = ThreadedFunctionVariation;
2501
2502
2503class IEMThreadedGenerator(object):
2504 """
2505 The threaded code generator & annotator.
2506 """
2507
2508 def __init__(self):
2509 self.aoThreadedFuncs = [] # type: List[ThreadedFunction]
2510 self.oOptions = None # type: argparse.Namespace
2511 self.aoParsers = [] # type: List[IEMAllInstPython.SimpleParser]
2512 self.aidxFirstFunctions = [] # type: List[int] ##< Runs parallel to aoParser giving the index of the first function.
2513 self.cErrors = 0;
2514
2515 #
2516 # Error reporting.
2517 #
2518
2519 def rawError(self, sCompleteMessage):
2520 """ Output a raw error and increment the error counter. """
2521 print(sCompleteMessage, file = sys.stderr);
2522 self.cErrors += 1;
2523 return False;
2524
2525 #
2526 # Processing.
2527 #
2528
2529 def processInputFiles(self, sHostArch, fNativeRecompilerEnabled):
2530 """
2531 Process the input files.
2532 """
2533
2534 # Parse the files.
2535 self.aoParsers = iai.parseFiles(self.oOptions.asInFiles, sHostArch);
2536
2537 # Create threaded functions for the MC blocks.
2538 self.aoThreadedFuncs = [ThreadedFunction(oMcBlock) for oMcBlock in iai.g_aoMcBlocks];
2539
2540 # Analyze the threaded functions.
2541 dRawParamCounts = {};
2542 dMinParamCounts = {};
2543 for oThreadedFunction in self.aoThreadedFuncs:
2544 oThreadedFunction.analyze(self);
2545 for oVariation in oThreadedFunction.aoVariations:
2546 dRawParamCounts[len(oVariation.dParamRefs)] = dRawParamCounts.get(len(oVariation.dParamRefs), 0) + 1;
2547 dMinParamCounts[oVariation.cMinParams] = dMinParamCounts.get(oVariation.cMinParams, 0) + 1;
2548 print('debug: param count distribution, raw and optimized:', file = sys.stderr);
2549 for cCount in sorted({cBits: True for cBits in list(dRawParamCounts.keys()) + list(dMinParamCounts.keys())}.keys()):
2550 print('debug: %s params: %4s raw, %4s min'
2551 % (cCount, dRawParamCounts.get(cCount, 0), dMinParamCounts.get(cCount, 0)),
2552 file = sys.stderr);
2553
2554 # Do another pass over the threaded functions to settle the name suffix.
2555 iThreadedFn = 0;
2556 while iThreadedFn < len(self.aoThreadedFuncs):
2557 oFunction = self.aoThreadedFuncs[iThreadedFn].oMcBlock.oFunction;
2558 assert oFunction;
2559 iThreadedFnNext = iThreadedFn + 1;
2560 dSubNames = { self.aoThreadedFuncs[iThreadedFn].sSubName: 1 };
2561 while ( iThreadedFnNext < len(self.aoThreadedFuncs)
2562 and self.aoThreadedFuncs[iThreadedFnNext].oMcBlock.oFunction == oFunction):
2563 dSubNames[self.aoThreadedFuncs[iThreadedFnNext].sSubName] = 1;
2564 iThreadedFnNext += 1;
2565 if iThreadedFnNext - iThreadedFn > len(dSubNames):
2566 iSubName = 0;
2567 while iThreadedFn + iSubName < iThreadedFnNext:
2568 self.aoThreadedFuncs[iThreadedFn + iSubName].sSubName += '_%s' % (iSubName,);
2569 iSubName += 1;
2570 iThreadedFn = iThreadedFnNext;
2571
2572 # Populate aidxFirstFunctions. This is ASSUMING that
2573 # g_aoMcBlocks/self.aoThreadedFuncs are in self.aoParsers order.
2574 iThreadedFunction = 0;
2575 oThreadedFunction = self.getThreadedFunctionByIndex(0);
2576 self.aidxFirstFunctions = [];
2577 for oParser in self.aoParsers:
2578 self.aidxFirstFunctions.append(iThreadedFunction);
2579
2580 while oThreadedFunction.oMcBlock.sSrcFile == oParser.sSrcFile:
2581 iThreadedFunction += 1;
2582 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
2583
2584 # Analyze the threaded functions and their variations for native recompilation.
2585 if fNativeRecompilerEnabled:
2586 ian.displayStatistics(self.aoThreadedFuncs, sHostArch);
2587
2588 # Gather arguments + variable statistics for the MC blocks.
2589 cMaxArgs = 0;
2590 cMaxVars = 0;
2591 cMaxVarsAndArgs = 0;
2592 cbMaxArgs = 0;
2593 cbMaxVars = 0;
2594 cbMaxVarsAndArgs = 0;
2595 for oThreadedFunction in self.aoThreadedFuncs:
2596 if oThreadedFunction.oMcBlock.cLocals >= 0:
2597 # Counts.
2598 assert oThreadedFunction.oMcBlock.cArgs >= 0;
2599 cMaxVars = max(cMaxVars, oThreadedFunction.oMcBlock.cLocals);
2600 cMaxArgs = max(cMaxArgs, oThreadedFunction.oMcBlock.cArgs);
2601 cMaxVarsAndArgs = max(cMaxVarsAndArgs, oThreadedFunction.oMcBlock.cLocals + oThreadedFunction.oMcBlock.cArgs);
2602 if cMaxVarsAndArgs > 9:
2603 raise Exception('%s potentially uses too many variables / args: %u, max 10 - %u vars and %u args'
2604 % (oThreadedFunction.oMcBlock.oFunction.sName, cMaxVarsAndArgs,
2605 oThreadedFunction.oMcBlock.cLocals, oThreadedFunction.oMcBlock.cArgs));
2606 # Calc stack allocation size:
2607 cbArgs = 0;
2608 for oArg in oThreadedFunction.oMcBlock.aoArgs:
2609 cbArgs += (getTypeBitCount(oArg.sType) + 63) // 64 * 8;
2610 cbVars = 0;
2611 for oVar in oThreadedFunction.oMcBlock.aoLocals:
2612 cbVars += (getTypeBitCount(oVar.sType) + 63) // 64 * 8;
2613 cbMaxVars = max(cbMaxVars, cbVars);
2614 cbMaxArgs = max(cbMaxArgs, cbArgs);
2615 cbMaxVarsAndArgs = max(cbMaxVarsAndArgs, cbVars + cbArgs);
2616 if cbMaxVarsAndArgs >= 0xc0:
2617 raise Exception('%s potentially uses too much stack: cbMaxVars=%#x cbMaxArgs=%#x'
2618 % (oThreadedFunction.oMcBlock.oFunction.sName, cbMaxVars, cbMaxArgs,));
2619
2620 print('debug: max vars+args: %u bytes / %u; max vars: %u bytes / %u; max args: %u bytes / %u'
2621 % (cbMaxVarsAndArgs, cMaxVarsAndArgs, cbMaxVars, cMaxVars, cbMaxArgs, cMaxArgs,), file = sys.stderr);
2622
2623 if self.cErrors > 0:
2624 print('fatal error: %u error%s during processing. Details above.'
2625 % (self.cErrors, 's' if self.cErrors > 1 else '',), file = sys.stderr);
2626 return False;
2627 return True;
2628
2629 #
2630 # Output
2631 #
2632
2633 def generateLicenseHeader(self):
2634 """
2635 Returns the lines for a license header.
2636 """
2637 return [
2638 '/*',
2639 ' * Autogenerated by $Id: IEMAllThrdPython.py 103757 2024-03-11 10:54:12Z vboxsync $ ',
2640 ' * Do not edit!',
2641 ' */',
2642 '',
2643 '/*',
2644 ' * Copyright (C) 2023-' + str(datetime.date.today().year) + ' Oracle and/or its affiliates.',
2645 ' *',
2646 ' * This file is part of VirtualBox base platform packages, as',
2647 ' * available from https://www.virtualbox.org.',
2648 ' *',
2649 ' * This program is free software; you can redistribute it and/or',
2650 ' * modify it under the terms of the GNU General Public License',
2651 ' * as published by the Free Software Foundation, in version 3 of the',
2652 ' * License.',
2653 ' *',
2654 ' * This program is distributed in the hope that it will be useful, but',
2655 ' * WITHOUT ANY WARRANTY; without even the implied warranty of',
2656 ' * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU',
2657 ' * General Public License for more details.',
2658 ' *',
2659 ' * You should have received a copy of the GNU General Public License',
2660 ' * along with this program; if not, see <https://www.gnu.org/licenses>.',
2661 ' *',
2662 ' * The contents of this file may alternatively be used under the terms',
2663 ' * of the Common Development and Distribution License Version 1.0',
2664 ' * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included',
2665 ' * in the VirtualBox distribution, in which case the provisions of the',
2666 ' * CDDL are applicable instead of those of the GPL.',
2667 ' *',
2668 ' * You may elect to license modified versions of this file under the',
2669 ' * terms and conditions of either the GPL or the CDDL or both.',
2670 ' *',
2671 ' * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0',
2672 ' */',
2673 '',
2674 '',
2675 '',
2676 ];
2677
2678 ## List of built-in threaded functions with user argument counts and
2679 ## whether it has a native recompiler implementation.
2680 katBltIns = (
2681 ( 'Nop', 0, True ),
2682 ( 'LogCpuState', 0, True ),
2683
2684 ( 'DeferToCImpl0', 2, True ),
2685 ( 'CheckIrq', 0, True ),
2686 ( 'CheckMode', 1, True ),
2687 ( 'CheckHwInstrBps', 0, False ),
2688 ( 'CheckCsLim', 1, True ),
2689
2690 ( 'CheckCsLimAndOpcodes', 3, True ),
2691 ( 'CheckOpcodes', 3, True ),
2692 ( 'CheckOpcodesConsiderCsLim', 3, True ),
2693
2694 ( 'CheckCsLimAndPcAndOpcodes', 3, True ),
2695 ( 'CheckPcAndOpcodes', 3, True ),
2696 ( 'CheckPcAndOpcodesConsiderCsLim', 3, True ),
2697
2698 ( 'CheckCsLimAndOpcodesAcrossPageLoadingTlb', 3, True ),
2699 ( 'CheckOpcodesAcrossPageLoadingTlb', 3, True ),
2700 ( 'CheckOpcodesAcrossPageLoadingTlbConsiderCsLim', 2, True ),
2701
2702 ( 'CheckCsLimAndOpcodesLoadingTlb', 3, True ),
2703 ( 'CheckOpcodesLoadingTlb', 3, True ),
2704 ( 'CheckOpcodesLoadingTlbConsiderCsLim', 3, True ),
2705
2706 ( 'CheckCsLimAndOpcodesOnNextPageLoadingTlb', 2, True ),
2707 ( 'CheckOpcodesOnNextPageLoadingTlb', 2, True ),
2708 ( 'CheckOpcodesOnNextPageLoadingTlbConsiderCsLim', 2, True ),
2709
2710 ( 'CheckCsLimAndOpcodesOnNewPageLoadingTlb', 2, True ),
2711 ( 'CheckOpcodesOnNewPageLoadingTlb', 2, True ),
2712 ( 'CheckOpcodesOnNewPageLoadingTlbConsiderCsLim', 2, True ),
2713 );
2714
2715 def generateThreadedFunctionsHeader(self, oOut):
2716 """
2717 Generates the threaded functions header file.
2718 Returns success indicator.
2719 """
2720
2721 asLines = self.generateLicenseHeader();
2722
2723 # Generate the threaded function table indexes.
2724 asLines += [
2725 'typedef enum IEMTHREADEDFUNCS',
2726 '{',
2727 ' kIemThreadedFunc_Invalid = 0,',
2728 '',
2729 ' /*',
2730 ' * Predefined',
2731 ' */',
2732 ];
2733 asLines += [' kIemThreadedFunc_BltIn_%s,' % (sFuncNm,) for sFuncNm, _, _ in self.katBltIns];
2734
2735 iThreadedFunction = 1 + len(self.katBltIns);
2736 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2737 asLines += [
2738 '',
2739 ' /*',
2740 ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation] + '',
2741 ' */',
2742 ];
2743 for oThreadedFunction in self.aoThreadedFuncs:
2744 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2745 if oVariation:
2746 iThreadedFunction += 1;
2747 oVariation.iEnumValue = iThreadedFunction;
2748 asLines.append(' ' + oVariation.getIndexName() + ',');
2749 asLines += [
2750 ' kIemThreadedFunc_End',
2751 '} IEMTHREADEDFUNCS;',
2752 '',
2753 ];
2754
2755 # Prototype the function table.
2756 asLines += [
2757 'extern const PFNIEMTHREADEDFUNC g_apfnIemThreadedFunctions[kIemThreadedFunc_End];',
2758 'extern uint8_t const g_acIemThreadedFunctionUsedArgs[kIemThreadedFunc_End];',
2759 '#if defined(IN_RING3) || defined(LOG_ENABLED)',
2760 'extern const char * const g_apszIemThreadedFunctions[kIemThreadedFunc_End];',
2761 '#endif',
2762 '#if defined(IN_RING3)',
2763 'extern const char * const g_apszIemThreadedFunctionStats[kIemThreadedFunc_End];',
2764 '#endif',
2765 ];
2766
2767 oOut.write('\n'.join(asLines));
2768 return True;
2769
2770 ksBitsToIntMask = {
2771 1: "UINT64_C(0x1)",
2772 2: "UINT64_C(0x3)",
2773 4: "UINT64_C(0xf)",
2774 8: "UINT64_C(0xff)",
2775 16: "UINT64_C(0xffff)",
2776 32: "UINT64_C(0xffffffff)",
2777 };
2778
2779 def generateFunctionParameterUnpacking(self, oVariation, oOut, asParams):
2780 """
2781 Outputs code for unpacking parameters.
2782 This is shared by the threaded and native code generators.
2783 """
2784 aasVars = [];
2785 for aoRefs in oVariation.dParamRefs.values():
2786 oRef = aoRefs[0];
2787 if oRef.sType[0] != 'P':
2788 cBits = g_kdTypeInfo[oRef.sType][0];
2789 sType = g_kdTypeInfo[oRef.sType][2];
2790 else:
2791 cBits = 64;
2792 sType = oRef.sType;
2793
2794 sTypeDecl = sType + ' const';
2795
2796 if cBits == 64:
2797 assert oRef.offNewParam == 0;
2798 if sType == 'uint64_t':
2799 sUnpack = '%s;' % (asParams[oRef.iNewParam],);
2800 else:
2801 sUnpack = '(%s)%s;' % (sType, asParams[oRef.iNewParam],);
2802 elif oRef.offNewParam == 0:
2803 sUnpack = '(%s)(%s & %s);' % (sType, asParams[oRef.iNewParam], self.ksBitsToIntMask[cBits]);
2804 else:
2805 sUnpack = '(%s)((%s >> %s) & %s);' \
2806 % (sType, asParams[oRef.iNewParam], oRef.offNewParam, self.ksBitsToIntMask[cBits]);
2807
2808 sComment = '/* %s - %s ref%s */' % (oRef.sOrgRef, len(aoRefs), 's' if len(aoRefs) != 1 else '',);
2809
2810 aasVars.append([ '%s:%02u' % (oRef.iNewParam, oRef.offNewParam),
2811 sTypeDecl, oRef.sNewName, sUnpack, sComment ]);
2812 acchVars = [0, 0, 0, 0, 0];
2813 for asVar in aasVars:
2814 for iCol, sStr in enumerate(asVar):
2815 acchVars[iCol] = max(acchVars[iCol], len(sStr));
2816 sFmt = ' %%-%ss %%-%ss = %%-%ss %%s\n' % (acchVars[1], acchVars[2], acchVars[3]);
2817 for asVar in sorted(aasVars):
2818 oOut.write(sFmt % (asVar[1], asVar[2], asVar[3], asVar[4],));
2819 return True;
2820
2821 kasThreadedParamNames = ('uParam0', 'uParam1', 'uParam2');
2822 def generateThreadedFunctionsSource(self, oOut):
2823 """
2824 Generates the threaded functions source file.
2825 Returns success indicator.
2826 """
2827
2828 asLines = self.generateLicenseHeader();
2829 oOut.write('\n'.join(asLines));
2830
2831 #
2832 # Emit the function definitions.
2833 #
2834 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2835 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
2836 oOut.write( '\n'
2837 + '\n'
2838 + '\n'
2839 + '\n'
2840 + '/*' + '*' * 128 + '\n'
2841 + '* Variation: ' + sVarName + ' ' * (129 - len(sVarName) - 15) + '*\n'
2842 + '*' * 128 + '*/\n');
2843
2844 for oThreadedFunction in self.aoThreadedFuncs:
2845 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2846 if oVariation:
2847 oMcBlock = oThreadedFunction.oMcBlock;
2848
2849 # Function header
2850 oOut.write( '\n'
2851 + '\n'
2852 + '/**\n'
2853 + ' * #%u: %s at line %s offset %s in %s%s\n'
2854 % (oVariation.iEnumValue, oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine,
2855 os.path.split(oMcBlock.sSrcFile)[1],
2856 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '')
2857 + ' */\n'
2858 + 'static IEM_DECL_IEMTHREADEDFUNC_DEF(' + oVariation.getThreadedFunctionName() + ')\n'
2859 + '{\n');
2860
2861 # Unpack parameters.
2862 self.generateFunctionParameterUnpacking(oVariation, oOut, self.kasThreadedParamNames);
2863
2864 # RT_NOREF for unused parameters.
2865 if oVariation.cMinParams < g_kcThreadedParams:
2866 oOut.write(' RT_NOREF(' + ', '.join(self.kasThreadedParamNames[oVariation.cMinParams:]) + ');\n');
2867
2868 # Now for the actual statements.
2869 oOut.write(iai.McStmt.renderCodeForList(oVariation.aoStmtsForThreadedFunction, cchIndent = 4));
2870
2871 oOut.write('}\n');
2872
2873
2874 #
2875 # Generate the output tables in parallel.
2876 #
2877 asFuncTable = [
2878 '/**',
2879 ' * Function pointer table.',
2880 ' */',
2881 'PFNIEMTHREADEDFUNC const g_apfnIemThreadedFunctions[kIemThreadedFunc_End] =',
2882 '{',
2883 ' /*Invalid*/ NULL,',
2884 ];
2885 asArgCntTab = [
2886 '/**',
2887 ' * Argument count table.',
2888 ' */',
2889 'uint8_t const g_acIemThreadedFunctionUsedArgs[kIemThreadedFunc_End] =',
2890 '{',
2891 ' 0, /*Invalid*/',
2892 ];
2893 asNameTable = [
2894 '/**',
2895 ' * Function name table.',
2896 ' */',
2897 'const char * const g_apszIemThreadedFunctions[kIemThreadedFunc_End] =',
2898 '{',
2899 ' "Invalid",',
2900 ];
2901 asStatTable = [
2902 '/**',
2903 ' * Function statistics name table.',
2904 ' */',
2905 'const char * const g_apszIemThreadedFunctionStats[kIemThreadedFunc_End] =',
2906 '{',
2907 ' NULL,',
2908 ];
2909 aasTables = (asFuncTable, asArgCntTab, asNameTable, asStatTable,);
2910
2911 for asTable in aasTables:
2912 asTable.extend((
2913 '',
2914 ' /*',
2915 ' * Predefined.',
2916 ' */',
2917 ));
2918 for sFuncNm, cArgs, _ in self.katBltIns:
2919 asFuncTable.append(' iemThreadedFunc_BltIn_%s,' % (sFuncNm,));
2920 asArgCntTab.append(' %d, /*BltIn_%s*/' % (cArgs, sFuncNm,));
2921 asNameTable.append(' "BltIn_%s",' % (sFuncNm,));
2922 asStatTable.append(' "BltIn/%s",' % (sFuncNm,));
2923
2924 iThreadedFunction = 1 + len(self.katBltIns);
2925 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2926 for asTable in aasTables:
2927 asTable.extend((
2928 '',
2929 ' /*',
2930 ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation],
2931 ' */',
2932 ));
2933 for oThreadedFunction in self.aoThreadedFuncs:
2934 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2935 if oVariation:
2936 iThreadedFunction += 1;
2937 assert oVariation.iEnumValue == iThreadedFunction;
2938 sName = oVariation.getThreadedFunctionName();
2939 asFuncTable.append(' /*%4u*/ %s,' % (iThreadedFunction, sName,));
2940 asNameTable.append(' /*%4u*/ "%s",' % (iThreadedFunction, sName,));
2941 asArgCntTab.append(' /*%4u*/ %d, /*%s*/' % (iThreadedFunction, oVariation.cMinParams, sName,));
2942 asStatTable.append(' "%s",' % (oVariation.getThreadedFunctionStatisticsName(),));
2943
2944 for asTable in aasTables:
2945 asTable.append('};');
2946
2947 #
2948 # Output the tables.
2949 #
2950 oOut.write( '\n'
2951 + '\n');
2952 oOut.write('\n'.join(asFuncTable));
2953 oOut.write( '\n'
2954 + '\n'
2955 + '\n');
2956 oOut.write('\n'.join(asArgCntTab));
2957 oOut.write( '\n'
2958 + '\n'
2959 + '#if defined(IN_RING3) || defined(LOG_ENABLED)\n');
2960 oOut.write('\n'.join(asNameTable));
2961 oOut.write( '\n'
2962 + '#endif /* IN_RING3 || LOG_ENABLED */\n'
2963 + '\n'
2964 + '\n'
2965 + '#if defined(IN_RING3)\n');
2966 oOut.write('\n'.join(asStatTable));
2967 oOut.write( '\n'
2968 + '#endif /* IN_RING3 */\n');
2969
2970 return True;
2971
2972 def generateNativeFunctionsHeader(self, oOut):
2973 """
2974 Generates the native recompiler functions header file.
2975 Returns success indicator.
2976 """
2977 if not self.oOptions.fNativeRecompilerEnabled:
2978 return True;
2979
2980 asLines = self.generateLicenseHeader();
2981
2982 # Prototype the function table.
2983 asLines += [
2984 'extern const PFNIEMNATIVERECOMPFUNC g_apfnIemNativeRecompileFunctions[kIemThreadedFunc_End];',
2985 'extern const PFNIEMNATIVELIVENESSFUNC g_apfnIemNativeLivenessFunctions[kIemThreadedFunc_End];',
2986 '',
2987 ];
2988
2989 # Emit indicators as to which of the builtin functions have a native
2990 # recompiler function and which not. (We only really need this for
2991 # kIemThreadedFunc_BltIn_CheckMode, but do all just for simplicity.)
2992 for atBltIn in self.katBltIns:
2993 if atBltIn[1]:
2994 asLines.append('#define IEMNATIVE_WITH_BLTIN_' + atBltIn[0].upper())
2995 else:
2996 asLines.append('#define IEMNATIVE_WITHOUT_BLTIN_' + atBltIn[0].upper())
2997
2998 # Emit prototypes for the builtin functions we use in tables.
2999 asLines += [
3000 '',
3001 '/* Prototypes for built-in functions used in the above tables. */',
3002 ];
3003 for sFuncNm, _, fHaveRecompFunc in self.katBltIns:
3004 if fHaveRecompFunc:
3005 asLines += [
3006 'IEM_DECL_IEMNATIVERECOMPFUNC_PROTO( iemNativeRecompFunc_BltIn_%s);' % (sFuncNm,),
3007 'IEM_DECL_IEMNATIVELIVENESSFUNC_PROTO(iemNativeLivenessFunc_BltIn_%s);' % (sFuncNm,),
3008 ];
3009
3010 oOut.write('\n'.join(asLines));
3011 return True;
3012
3013 def generateNativeFunctionsSource(self, oOut):
3014 """
3015 Generates the native recompiler functions source file.
3016 Returns success indicator.
3017 """
3018 if not self.oOptions.fNativeRecompilerEnabled:
3019 return True;
3020
3021 #
3022 # The file header.
3023 #
3024 oOut.write('\n'.join(self.generateLicenseHeader()));
3025
3026 #
3027 # Emit the functions.
3028 #
3029 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
3030 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
3031 oOut.write( '\n'
3032 + '\n'
3033 + '\n'
3034 + '\n'
3035 + '/*' + '*' * 128 + '\n'
3036 + '* Variation: ' + sVarName + ' ' * (129 - len(sVarName) - 15) + '*\n'
3037 + '*' * 128 + '*/\n');
3038
3039 for oThreadedFunction in self.aoThreadedFuncs:
3040 oVariation = oThreadedFunction.dVariations.get(sVariation, None) # type: ThreadedFunctionVariation
3041 if oVariation and oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
3042 oMcBlock = oThreadedFunction.oMcBlock;
3043
3044 # Function header
3045 oOut.write( '\n'
3046 + '\n'
3047 + '/**\n'
3048 + ' * #%u: %s at line %s offset %s in %s%s\n'
3049 % (oVariation.iEnumValue, oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine,
3050 os.path.split(oMcBlock.sSrcFile)[1],
3051 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '')
3052 + ' */\n'
3053 + 'static IEM_DECL_IEMNATIVERECOMPFUNC_DEF(' + oVariation.getNativeFunctionName() + ')\n'
3054 + '{\n');
3055
3056 # Unpack parameters.
3057 self.generateFunctionParameterUnpacking(oVariation, oOut,
3058 ('pCallEntry->auParams[0]',
3059 'pCallEntry->auParams[1]',
3060 'pCallEntry->auParams[2]',));
3061
3062 # Now for the actual statements.
3063 oOut.write(oVariation.oNativeRecomp.renderCode(cchIndent = 4));
3064
3065 oOut.write('}\n');
3066
3067 #
3068 # Output the function table.
3069 #
3070 oOut.write( '\n'
3071 + '\n'
3072 + '/*\n'
3073 + ' * Function table running parallel to g_apfnIemThreadedFunctions and friends.\n'
3074 + ' */\n'
3075 + 'const PFNIEMNATIVERECOMPFUNC g_apfnIemNativeRecompileFunctions[kIemThreadedFunc_End] =\n'
3076 + '{\n'
3077 + ' /*Invalid*/ NULL,'
3078 + '\n'
3079 + ' /*\n'
3080 + ' * Predefined.\n'
3081 + ' */\n'
3082 );
3083 for sFuncNm, _, fHaveRecompFunc in self.katBltIns:
3084 if fHaveRecompFunc:
3085 oOut.write(' iemNativeRecompFunc_BltIn_%s,\n' % (sFuncNm,))
3086 else:
3087 oOut.write(' NULL, /*BltIn_%s*/\n' % (sFuncNm,))
3088
3089 iThreadedFunction = 1 + len(self.katBltIns);
3090 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
3091 oOut.write( ' /*\n'
3092 + ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation] + '\n'
3093 + ' */\n');
3094 for oThreadedFunction in self.aoThreadedFuncs:
3095 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
3096 if oVariation:
3097 iThreadedFunction += 1;
3098 assert oVariation.iEnumValue == iThreadedFunction;
3099 sName = oVariation.getNativeFunctionName();
3100 if oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
3101 oOut.write(' /*%4u*/ %s,\n' % (iThreadedFunction, sName,));
3102 else:
3103 oOut.write(' /*%4u*/ NULL /*%s*/,\n' % (iThreadedFunction, sName,));
3104
3105 oOut.write( '};\n'
3106 + '\n');
3107 return True;
3108
3109 def generateNativeLivenessSource(self, oOut):
3110 """
3111 Generates the native recompiler liveness analysis functions source file.
3112 Returns success indicator.
3113 """
3114 if not self.oOptions.fNativeRecompilerEnabled:
3115 return True;
3116
3117 #
3118 # The file header.
3119 #
3120 oOut.write('\n'.join(self.generateLicenseHeader()));
3121
3122 #
3123 # Emit the functions.
3124 #
3125 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
3126 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
3127 oOut.write( '\n'
3128 + '\n'
3129 + '\n'
3130 + '\n'
3131 + '/*' + '*' * 128 + '\n'
3132 + '* Variation: ' + sVarName + ' ' * (129 - len(sVarName) - 15) + '*\n'
3133 + '*' * 128 + '*/\n');
3134
3135 for oThreadedFunction in self.aoThreadedFuncs:
3136 oVariation = oThreadedFunction.dVariations.get(sVariation, None) # type: ThreadedFunctionVariation
3137 if oVariation and oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
3138 oMcBlock = oThreadedFunction.oMcBlock;
3139
3140 # Function header
3141 oOut.write( '\n'
3142 + '\n'
3143 + '/**\n'
3144 + ' * #%u: %s at line %s offset %s in %s%s\n'
3145 % (oVariation.iEnumValue, oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine,
3146 os.path.split(oMcBlock.sSrcFile)[1],
3147 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '')
3148 + ' */\n'
3149 + 'static IEM_DECL_IEMNATIVELIVENESSFUNC_DEF(' + oVariation.getLivenessFunctionName() + ')\n'
3150 + '{\n');
3151
3152 # Unpack parameters.
3153 self.generateFunctionParameterUnpacking(oVariation, oOut,
3154 ('pCallEntry->auParams[0]',
3155 'pCallEntry->auParams[1]',
3156 'pCallEntry->auParams[2]',));
3157 asNoRefs = []; #[ 'RT_NOREF_PV(pReNative);', ];
3158 for aoRefs in oVariation.dParamRefs.values():
3159 asNoRefs.append('RT_NOREF_PV(%s);' % (aoRefs[0].sNewName,));
3160 oOut.write(' %s\n' % (' '.join(asNoRefs),));
3161
3162 # Now for the actual statements.
3163 oOut.write(oVariation.oNativeRecomp.renderCode(cchIndent = 4));
3164
3165 oOut.write('}\n');
3166
3167 #
3168 # Output the function table.
3169 #
3170 oOut.write( '\n'
3171 + '\n'
3172 + '/*\n'
3173 + ' * Liveness analysis function table running parallel to g_apfnIemThreadedFunctions and friends.\n'
3174 + ' */\n'
3175 + 'const PFNIEMNATIVELIVENESSFUNC g_apfnIemNativeLivenessFunctions[kIemThreadedFunc_End] =\n'
3176 + '{\n'
3177 + ' /*Invalid*/ NULL,'
3178 + '\n'
3179 + ' /*\n'
3180 + ' * Predefined.\n'
3181 + ' */\n'
3182 );
3183 for sFuncNm, _, fHaveRecompFunc in self.katBltIns:
3184 if fHaveRecompFunc:
3185 oOut.write(' iemNativeLivenessFunc_BltIn_%s,\n' % (sFuncNm,))
3186 else:
3187 oOut.write(' NULL, /*BltIn_%s*/\n' % (sFuncNm,))
3188
3189 iThreadedFunction = 1 + len(self.katBltIns);
3190 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
3191 oOut.write( ' /*\n'
3192 + ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation] + '\n'
3193 + ' */\n');
3194 for oThreadedFunction in self.aoThreadedFuncs:
3195 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
3196 if oVariation:
3197 iThreadedFunction += 1;
3198 assert oVariation.iEnumValue == iThreadedFunction;
3199 sName = oVariation.getLivenessFunctionName();
3200 if oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
3201 oOut.write(' /*%4u*/ %s,\n' % (iThreadedFunction, sName,));
3202 else:
3203 oOut.write(' /*%4u*/ NULL /*%s*/,\n' % (iThreadedFunction, sName,));
3204
3205 oOut.write( '};\n'
3206 + '\n');
3207 return True;
3208
3209
3210 def getThreadedFunctionByIndex(self, idx):
3211 """
3212 Returns a ThreadedFunction object for the given index. If the index is
3213 out of bounds, a dummy is returned.
3214 """
3215 if idx < len(self.aoThreadedFuncs):
3216 return self.aoThreadedFuncs[idx];
3217 return ThreadedFunction.dummyInstance();
3218
3219 def generateModifiedInput(self, oOut, idxFile):
3220 """
3221 Generates the combined modified input source/header file.
3222 Returns success indicator.
3223 """
3224 #
3225 # File header and assert assumptions.
3226 #
3227 oOut.write('\n'.join(self.generateLicenseHeader()));
3228 oOut.write('AssertCompile((IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | IEM_F_MODE_CPUMODE_MASK) == 7);\n');
3229
3230 #
3231 # Iterate all parsers (input files) and output the ones related to the
3232 # file set given by idxFile.
3233 #
3234 for idxParser, oParser in enumerate(self.aoParsers): # type: int, IEMAllInstPython.SimpleParser
3235 # Is this included in the file set?
3236 sSrcBaseFile = os.path.basename(oParser.sSrcFile).lower();
3237 fInclude = -1;
3238 for aoInfo in iai.g_aaoAllInstrFilesAndDefaultMapAndSet:
3239 if sSrcBaseFile == aoInfo[0].lower():
3240 fInclude = aoInfo[2] in (-1, idxFile);
3241 break;
3242 if fInclude is not True:
3243 assert fInclude is False;
3244 continue;
3245
3246 # Output it.
3247 oOut.write("\n\n/* ****** BEGIN %s ******* */\n" % (oParser.sSrcFile,));
3248
3249 iThreadedFunction = self.aidxFirstFunctions[idxParser];
3250 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
3251 iLine = 0;
3252 while iLine < len(oParser.asLines):
3253 sLine = oParser.asLines[iLine];
3254 iLine += 1; # iBeginLine and iEndLine are 1-based.
3255
3256 # Can we pass it thru?
3257 if ( iLine not in [oThreadedFunction.oMcBlock.iBeginLine, oThreadedFunction.oMcBlock.iEndLine]
3258 or oThreadedFunction.oMcBlock.sSrcFile != oParser.sSrcFile):
3259 oOut.write(sLine);
3260 #
3261 # Single MC block. Just extract it and insert the replacement.
3262 #
3263 elif oThreadedFunction.oMcBlock.iBeginLine != oThreadedFunction.oMcBlock.iEndLine:
3264 assert ( (sLine.count('IEM_MC_') - sLine.count('IEM_MC_F_') == 1)
3265 or oThreadedFunction.oMcBlock.iMacroExp == iai.McBlock.kiMacroExp_Partial), 'sLine="%s"' % (sLine,);
3266 oOut.write(sLine[:oThreadedFunction.oMcBlock.offBeginLine]);
3267 sModified = oThreadedFunction.generateInputCode().strip();
3268 oOut.write(sModified);
3269
3270 iLine = oThreadedFunction.oMcBlock.iEndLine;
3271 sLine = oParser.asLines[iLine - 1];
3272 assert ( sLine.count('IEM_MC_') - sLine.count('IEM_MC_F_') == 1
3273 or len(oThreadedFunction.oMcBlock.aoStmts) == 1
3274 or oThreadedFunction.oMcBlock.iMacroExp == iai.McBlock.kiMacroExp_Partial);
3275 oOut.write(sLine[oThreadedFunction.oMcBlock.offAfterEnd : ]);
3276
3277 # Advance
3278 iThreadedFunction += 1;
3279 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
3280 #
3281 # Macro expansion line that have sublines and may contain multiple MC blocks.
3282 #
3283 else:
3284 offLine = 0;
3285 while iLine == oThreadedFunction.oMcBlock.iBeginLine:
3286 oOut.write(sLine[offLine : oThreadedFunction.oMcBlock.offBeginLine]);
3287
3288 sModified = oThreadedFunction.generateInputCode().strip();
3289 assert ( sModified.startswith('IEM_MC_BEGIN')
3290 or (sModified.find('IEM_MC_DEFER_TO_CIMPL_') > 0 and sModified.strip().startswith('{\n'))
3291 or sModified.startswith('pVCpu->iem.s.fEndTb = true')
3292 or sModified.startswith('pVCpu->iem.s.fTbCurInstr = ')
3293 ), 'sModified="%s"' % (sModified,);
3294 oOut.write(sModified);
3295
3296 offLine = oThreadedFunction.oMcBlock.offAfterEnd;
3297
3298 # Advance
3299 iThreadedFunction += 1;
3300 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
3301
3302 # Last line segment.
3303 if offLine < len(sLine):
3304 oOut.write(sLine[offLine : ]);
3305
3306 oOut.write("/* ****** END %s ******* */\n" % (oParser.sSrcFile,));
3307
3308 return True;
3309
3310 def generateModifiedInput1(self, oOut):
3311 """
3312 Generates the combined modified input source/header file, part 1.
3313 Returns success indicator.
3314 """
3315 return self.generateModifiedInput(oOut, 1);
3316
3317 def generateModifiedInput2(self, oOut):
3318 """
3319 Generates the combined modified input source/header file, part 2.
3320 Returns success indicator.
3321 """
3322 return self.generateModifiedInput(oOut, 2);
3323
3324 def generateModifiedInput3(self, oOut):
3325 """
3326 Generates the combined modified input source/header file, part 3.
3327 Returns success indicator.
3328 """
3329 return self.generateModifiedInput(oOut, 3);
3330
3331 def generateModifiedInput4(self, oOut):
3332 """
3333 Generates the combined modified input source/header file, part 4.
3334 Returns success indicator.
3335 """
3336 return self.generateModifiedInput(oOut, 4);
3337
3338
3339 #
3340 # Main
3341 #
3342
3343 def main(self, asArgs):
3344 """
3345 C-like main function.
3346 Returns exit code.
3347 """
3348
3349 #
3350 # Parse arguments
3351 #
3352 sScriptDir = os.path.dirname(__file__);
3353 oParser = argparse.ArgumentParser(add_help = False);
3354 oParser.add_argument('asInFiles',
3355 metavar = 'input.cpp.h',
3356 nargs = '*',
3357 default = [os.path.join(sScriptDir, aoInfo[0])
3358 for aoInfo in iai.g_aaoAllInstrFilesAndDefaultMapAndSet],
3359 help = "Selection of VMMAll/IEMAllInst*.cpp.h files to use as input.");
3360 oParser.add_argument('--host-arch',
3361 metavar = 'arch',
3362 dest = 'sHostArch',
3363 action = 'store',
3364 default = None,
3365 help = 'The host architecture.');
3366
3367 oParser.add_argument('--out-thrd-funcs-hdr',
3368 metavar = 'file-thrd-funcs.h',
3369 dest = 'sOutFileThrdFuncsHdr',
3370 action = 'store',
3371 default = '-',
3372 help = 'The output header file for the threaded functions.');
3373 oParser.add_argument('--out-thrd-funcs-cpp',
3374 metavar = 'file-thrd-funcs.cpp',
3375 dest = 'sOutFileThrdFuncsCpp',
3376 action = 'store',
3377 default = '-',
3378 help = 'The output C++ file for the threaded functions.');
3379 oParser.add_argument('--out-n8ve-funcs-hdr',
3380 metavar = 'file-n8tv-funcs.h',
3381 dest = 'sOutFileN8veFuncsHdr',
3382 action = 'store',
3383 default = '-',
3384 help = 'The output header file for the native recompiler functions.');
3385 oParser.add_argument('--out-n8ve-funcs-cpp',
3386 metavar = 'file-n8tv-funcs.cpp',
3387 dest = 'sOutFileN8veFuncsCpp',
3388 action = 'store',
3389 default = '-',
3390 help = 'The output C++ file for the native recompiler functions.');
3391 oParser.add_argument('--out-n8ve-liveness-cpp',
3392 metavar = 'file-n8tv-liveness.cpp',
3393 dest = 'sOutFileN8veLivenessCpp',
3394 action = 'store',
3395 default = '-',
3396 help = 'The output C++ file for the native recompiler liveness analysis functions.');
3397 oParser.add_argument('--native',
3398 dest = 'fNativeRecompilerEnabled',
3399 action = 'store_true',
3400 default = False,
3401 help = 'Enables generating the files related to native recompilation.');
3402 oParser.add_argument('--out-mod-input1',
3403 metavar = 'file-instr.cpp.h',
3404 dest = 'sOutFileModInput1',
3405 action = 'store',
3406 default = '-',
3407 help = 'The output C++/header file for modified input instruction files part 1.');
3408 oParser.add_argument('--out-mod-input2',
3409 metavar = 'file-instr.cpp.h',
3410 dest = 'sOutFileModInput2',
3411 action = 'store',
3412 default = '-',
3413 help = 'The output C++/header file for modified input instruction files part 2.');
3414 oParser.add_argument('--out-mod-input3',
3415 metavar = 'file-instr.cpp.h',
3416 dest = 'sOutFileModInput3',
3417 action = 'store',
3418 default = '-',
3419 help = 'The output C++/header file for modified input instruction files part 3.');
3420 oParser.add_argument('--out-mod-input4',
3421 metavar = 'file-instr.cpp.h',
3422 dest = 'sOutFileModInput4',
3423 action = 'store',
3424 default = '-',
3425 help = 'The output C++/header file for modified input instruction files part 4.');
3426 oParser.add_argument('--help', '-h', '-?',
3427 action = 'help',
3428 help = 'Display help and exit.');
3429 oParser.add_argument('--version', '-V',
3430 action = 'version',
3431 version = 'r%s (IEMAllThreadedPython.py), r%s (IEMAllInstPython.py)'
3432 % (__version__.split()[1], iai.__version__.split()[1],),
3433 help = 'Displays the version/revision of the script and exit.');
3434 self.oOptions = oParser.parse_args(asArgs[1:]);
3435 print("oOptions=%s" % (self.oOptions,), file = sys.stderr);
3436
3437 if self.oOptions.sHostArch not in ('amd64', 'arm64'):
3438 print('error! Unsupported (or missing) host architecture: %s' % (self.oOptions.sHostArch,), file = sys.stderr);
3439 return 1;
3440
3441 #
3442 # Process the instructions specified in the IEM sources.
3443 #
3444 if self.processInputFiles(self.oOptions.sHostArch, self.oOptions.fNativeRecompilerEnabled):
3445 #
3446 # Generate the output files.
3447 #
3448 aaoOutputFiles = (
3449 ( self.oOptions.sOutFileThrdFuncsHdr, self.generateThreadedFunctionsHeader ),
3450 ( self.oOptions.sOutFileThrdFuncsCpp, self.generateThreadedFunctionsSource ),
3451 ( self.oOptions.sOutFileN8veFuncsHdr, self.generateNativeFunctionsHeader ),
3452 ( self.oOptions.sOutFileN8veFuncsCpp, self.generateNativeFunctionsSource ),
3453 ( self.oOptions.sOutFileN8veLivenessCpp, self.generateNativeLivenessSource ),
3454 ( self.oOptions.sOutFileModInput1, self.generateModifiedInput1 ),
3455 ( self.oOptions.sOutFileModInput2, self.generateModifiedInput2 ),
3456 ( self.oOptions.sOutFileModInput3, self.generateModifiedInput3 ),
3457 ( self.oOptions.sOutFileModInput4, self.generateModifiedInput4 ),
3458 );
3459 fRc = True;
3460 for sOutFile, fnGenMethod in aaoOutputFiles:
3461 if sOutFile == '-':
3462 fRc = fnGenMethod(sys.stdout) and fRc;
3463 else:
3464 try:
3465 oOut = open(sOutFile, 'w'); # pylint: disable=consider-using-with,unspecified-encoding
3466 except Exception as oXcpt:
3467 print('error! Failed open "%s" for writing: %s' % (sOutFile, oXcpt,), file = sys.stderr);
3468 return 1;
3469 fRc = fnGenMethod(oOut) and fRc;
3470 oOut.close();
3471 if fRc:
3472 return 0;
3473
3474 return 1;
3475
3476
3477if __name__ == '__main__':
3478 sys.exit(IEMThreadedGenerator().main(sys.argv));
3479
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette